1
0
mirror of https://github.com/rancher/os.git synced 2025-06-29 08:16:49 +00:00

Merge pull request #1010 from joshwget/merge-v0.5

Merge v0.5 branch
This commit is contained in:
Darren Shepherd 2016-06-13 23:39:20 -07:00 committed by GitHub
commit 07d475cecb
2224 changed files with 249529 additions and 131533 deletions

View File

@ -1,2 +0,0 @@
DOCKER_HOST="tcp://192.168.2.2:2375"
DOCKER_TLS_VERIFY=

View File

@ -1,16 +1,16 @@
.DS_Store
.git
.idea
.vendor
.dockerignore
bin
gopath
tmp
state
build
images/*/build
scripts/images/*/dist/
dist
Godeps/_workspace/pkg
tests/integration/.venv*
tests/integration/.tox
*/*/*/*.pyc
*/*/*/__pycache__
.trash-cache
.dapper
vendor/*/*/*/.git
tmp

View File

@ -1,15 +0,0 @@
.DS_Store
.git
.idea
.vendor
.dockerignore
bin
gopath
tmp
state
build
Godeps/_workspace/pkg
tests/integration/.venv*
tests/integration/.tox
*/*/*/*.pyc
*/*/*/__pycache__

View File

@ -3,4 +3,4 @@ build:
volumes:
- /var/run/docker.sock:/var/run/docker.sock
commands:
- ./scripts/ci
- dapper ci

7
.gitignore vendored
View File

@ -5,11 +5,14 @@
/build
/dist
/gopath
/images/*/build
.dockerfile
*.swp
/tests/integration/MANIFEST
/tests/integration/.venv*
/tests/integration/.tox
/tests/integration/.idea
*.pyc
__pychache__
.docker-env.*
__pycache__
/.dapper
/.trash-cache

View File

@ -1 +0,0 @@
github.com/rancher/os

View File

@ -1,11 +0,0 @@
FROM debian:jessie
ENV DEBIAN_FRONTEND noninteractive
RUN apt-get update && apt-get install -y grub2 parted kexec-tools
COPY ./scripts/installer /scripts
COPY ./build.conf /scripts/
COPY ./dist/artifacts/vmlinuz /dist/vmlinuz
COPY ./dist/artifacts/initrd /dist/initrd
ENTRYPOINT ["/scripts/lay-down-os"]

View File

@ -1,45 +1,176 @@
FROM rancher/os-dapper-base
RUN apt-get update && \
apt-get -y install locales sudo vim less curl wget git rsync build-essential isolinux xorriso gccgo uuid \
libblkid-dev libmount-dev libselinux1-dev cpio genisoimage qemu-kvm qemu python-pip ca-certificates pkg-config tox module-init-tools
ARG HOST_ARCH
ENV HOST_ARCH ${HOST_ARCH}
RUN ln -sf go-6 /usr/bin/go && mkdir -p /usr/local && cd /usr/local && \
wget -O - https://storage.googleapis.com/golang/go1.6.2.src.tar.gz | tar -xz && \
cd go/src && GOROOT_BOOTSTRAP=/usr GOARCH=${HOST_ARCH} GOHOSTARCH=${HOST_ARCH} ./make.bash
ENV PATH /usr/local/go/bin:$PATH
RUN mkdir -p /go/src /go/bin && chmod -R 777 /go
ENV GOPATH /go
ENV PATH /go/bin:$PATH
ARG HOST_DOCKER_BINARY_URL
ENV HOST_DOCKER_BINARY_URL ${HOST_DOCKER_BINARY_URL}
RUN wget -O - ${HOST_DOCKER_BINARY_URL} > /usr/local/bin/docker
RUN chmod +x /usr/local/bin/docker
FROM ubuntu:16.04
# FROM arm64=aarch64/ubuntu:16.04 arm=armhf/ubuntu:16.04
ENV DAPPER_ENV VERSION DEV_BUILD
ENV DAPPER_DOCKER_SOCKET true
ENV DAPPER_SOURCE /go/src/github.com/rancher/os
ENV DAPPER_OUTPUT ./bin ./dist ./build/os-config.yml
ENV DAPPER_OUTPUT ./bin ./dist ./build/initrd
ENV DAPPER_RUN_ARGS --privileged
ENV TRASH_CACHE ${DAPPER_SOURCE}/.trash-cache
ENV SHELL /bin/bash
WORKDIR ${DAPPER_SOURCE}
COPY .dockerignore.dapper .dockerignore
########## General Configuration #####################
ARG DAPPER_HOST_ARCH=amd64
ARG HOST_ARCH=${DAPPER_HOST_ARCH}
ARG ARCH=${HOST_ARCH}
CMD make
ARG OS_REPO=rancher
ARG HOSTNAME_DEFAULT=rancher
ARG DISTRIB_ID=RancherOS
ARG TOOLCHAIN
ENV TOOLCHAIN ${TOOLCHAIN}
ARG DOCKER_VERSION=1.11.2
ARG DOCKER_PATCH_VERSION=v${DOCKER_VERSION}-ros1
ARG DOCKER_BUILD_VERSION=1.10.3
ARG DOCKER_BUILD_PATCH_VERSION=v${DOCKER_BUILD_VERSION}-ros1
ARG SELINUX_POLICY_URL=https://github.com/rancher/refpolicy/releases/download/v0.0.2/policy.29
RUN if [ "${TOOLCHAIN}" != "" ] && ! which ${TOOLCHAIN}-gcc; then \
apt-get update && \
ARG KERNEL_URL_amd64=https://github.com/rancher/os-kernel/releases/download/Ubuntu-4.4.0-23.41-rancher2/linux-4.4.10-rancher-x86.tar.gz
ARG KERNEL_URL_arm64=https://github.com/imikushin/os-kernel/releases/download/Estuary-4.4.0-arm64.8/linux-4.4.0-rancher-arm64.tar.gz
ARG DOCKER_URL_amd64=https://get.docker.com/builds/Linux/x86_64/docker-${DOCKER_VERSION}.tgz
ARG DOCKER_URL_arm=https://github.com/rancher/docker/releases/download/${DOCKER_PATCH_VERSION}/docker-${DOCKER_VERSION}_arm.tgz
ARG DOCKER_URL_arm64=https://github.com/rancher/docker/releases/download/${DOCKER_PATCH_VERSION}/docker-${DOCKER_VERSION}_arm64.tgz
ARG BUILD_DOCKER_URL_amd64=https://get.docker.com/builds/Linux/x86_64/docker-${DOCKER_BUILD_VERSION}
ARG BUILD_DOCKER_URL_arm=https://github.com/rancher/docker/releases/download/${DOCKER_BUILD_PATCH_VERSION}/docker-${DOCKER_BUILD_VERSION}_arm
ARG BUILD_DOCKER_URL_arm64=https://github.com/rancher/docker/releases/download/${DOCKER_BUILD_PATCH_VERSION}/docker-${DOCKER_BUILD_VERSION}_arm64
ARG TOOLCHAIN_arm64=aarch64-linux-gnu
ARG TOOLCHAIN_arm=arm-linux-gnueabihf
ARG OS_RELEASES_YML=https://releases.rancher.com/os/releases.yml
ARG VBOX_MODULES_URL_amd64=https://github.com/rancher/os-vbox/releases/download/v0.0.2/vbox-modules.tar.gz
ARG OS_SERVICES_REPO=https://raw.githubusercontent.com/${OS_REPO}/os-services
ARG IMAGE_NAME=${OS_REPO}/os
ARG DFS_IMAGE=${OS_REPO}/docker:v${DOCKER_VERSION}
ARG OS_BASE_URL_amd64=https://github.com/rancher/os-base/releases/download/v2016.05-3/os-base_amd64.tar.xz
ARG OS_BASE_URL_arm64=https://github.com/rancher/os-base/releases/download/v2016.05-3/os-base_arm64.tar.xz
ARG OS_BASE_URL_arm=https://github.com/rancher/os-base/releases/download/v2016.05-3/os-base_arm.tar.xz
######################################################
# Set up environment and export all ARGS as ENV
ENV ARCH ${ARCH}
ENV BUILD_DOCKER_URL BUILD_DOCKER_URL_${ARCH}
ENV BUILD_DOCKER_URL_amd64 ${BUILD_DOCKER_URL_amd64}
ENV BUILD_DOCKER_URL_arm ${BUILD_DOCKER_URL_arm}
ENV BUILD_DOCKER_URL_arm64 ${BUILD_DOCKER_URL_arm64}
ENV DAPPER_HOST_ARCH ${DAPPER_HOST_ARCH}
ENV DFS_IMAGE ${DFS_IMAGE}
ENV DISTRIB_ID ${DISTRIB_ID}
ENV DOCKER_PATCH_VERSION ${DOCKER_PATCH_VERSION}
ENV DOCKER_URL DOCKER_URL_${ARCH}
ENV DOCKER_URL_amd64 ${DOCKER_URL_amd64}
ENV DOCKER_URL_arm ${DOCKER_URL_arm}
ENV DOCKER_URL_arm64 ${DOCKER_URL_arm64}
ENV DOCKER_VERSION ${DOCKER_VERSION}
ENV DOWNLOADS /usr/src/downloads
ENV GOPATH /go
ENV GO_VERSION 1.6.2
ENV GOARCH $ARCH
ENV HOSTNAME_DEFAULT ${HOSTNAME_DEFAULT}
ENV HOST_ARCH ${HOST_ARCH}
ENV IMAGE_NAME ${IMAGE_NAME}
ENV KERNEL_URL KERNEL_URL_${ARCH}
ENV KERNEL_URL_amd64 ${KERNEL_URL_amd64}
ENV KERNEL_URL_arm64 ${KERNEL_URL_arm64}
ENV OS_BASE_SHA1 OS_BASE_SHA1_${ARCH}
ENV OS_BASE_URL OS_BASE_URL_${ARCH}
ENV OS_BASE_URL_amd64 ${OS_BASE_URL_amd64}
ENV OS_BASE_URL_arm ${OS_BASE_URL_arm}
ENV OS_BASE_URL_arm64 ${OS_BASE_URL_arm64}
ENV OS_RELEASES_YML ${OS_RELEASES_YML}
ENV OS_REPO ${OS_REPO}
ENV OS_SERVICES_REPO ${OS_SERVICES_REPO}
ENV PATH ${GOPATH}/bin:/usr/local/go/bin:$PATH
ENV REPO_VERSION master
ENV SELINUX_POLICY_URL ${SELINUX_POLICY_URL}
ENV TOOLCHAIN_arm ${TOOLCHAIN_arm}
ENV TOOLCHAIN_arm64 ${TOOLCHAIN_arm64}
ENV VBOX_MODULES_URL ${VBOX_MODULES_URL}
ENV VBOX_MODULES_URL VBOX_MODULES_URL_${ARCH}
ENV VBOX_MODULES_URL_amd64 ${VBOX_MODULES_URL_amd64}
RUN mkdir -p ${DOWNLOADS}
RUN apt-get update && \
apt-get install -y \
build-essential \
ca-certificates \
cpio \
curl \
dosfstools \
gccgo \
genisoimage \
git \
isolinux \
less \
libblkid-dev \
libmount-dev \
libselinux1-dev \
locales \
module-init-tools \
pkg-config \
python-pip \
qemu \
qemu-kvm \
rsync \
sudo \
tox \
vim \
wget \
xorriso
# Download kernel
RUN rm /bin/sh && ln -s /bin/bash /bin/sh
RUN if [ -n "${!KERNEL_URL}" ]; then \
curl -fL ${!KERNEL_URL} > ${DOWNLOADS}/kernel.tar.gz \
;fi
# Download SELinux Policy
RUN curl -pfL ${SELINUX_POLICY_URL} > ${DOWNLOADS}/$(basename ${SELINUX_POLICY_URL})
# VBox URL
RUN if [ -n "${!VBOX_MODULES_URL}" ]; then \
curl -pfL ${!VBOX_MODULES_URL} > ${DOWNLOADS}/vbox-modules.tar.gz \
;fi
# Install Go
COPY assets/go-dnsclient.patch ${DAPPER_SOURCE}
RUN ln -sf go-6 /usr/bin/go && \
curl -sfL https://storage.googleapis.com/golang/go${GO_VERSION}.src.tar.gz | tar -xzf - -C /usr/local && \
patch /usr/local/go/src/net/dnsclient_unix.go ${DAPPER_SOURCE}/go-dnsclient.patch && \
cd /usr/local/go/src && \
GOROOT_BOOTSTRAP=/usr GOARCH=${HOST_ARCH} GOHOSTARCH=${HOST_ARCH} ./make.bash && \
rm /usr/bin/go
# Install Host Docker
RUN curl -fL ${!BUILD_DOCKER_URL} > /usr/bin/docker && \
chmod +x /usr/bin/docker
# Install Target Docker
RUN curl -fL ${!DOCKER_URL} > ${DOWNLOADS}/docker.tgz
# Install Trash
RUN go get github.com/rancher/trash
# Install dapper
RUN curl -sL https://releases.rancher.com/dapper/latest/dapper-`uname -s`-`uname -m | sed 's/arm.*/arm/'` > /usr/bin/dapper && \
chmod +x /usr/bin/dapper
# Install toolchain
RUN TOOLCHAIN=TOOLCHAIN_${ARCH} && \
echo export TOOLCHAIN=${!TOOLCHAIN} > /usr/src/toolchain-env
RUN source /usr/src/toolchain-env && \
if [ "${TOOLCHAIN}" != "" ] && ! which ${TOOLCHAIN}-gcc; then \
apt-get install -y gcc-${TOOLCHAIN} g++-${TOOLCHAIN} \
;fi
RUN if [ "${TOOLCHAIN}" != "" ]; then \
RUN source /usr/src/toolchain-env; if [ "${TOOLCHAIN}" != "" ]; then \
apt-get update && \
cd /usr/local/src && \
for i in libselinux libsepol pcre3 util-linux; do \
@ -48,7 +179,7 @@ RUN if [ "${TOOLCHAIN}" != "" ]; then \
;done \
;fi
RUN if [ "${TOOLCHAIN}" != "" ]; then \
RUN source /usr/src/toolchain-env; if [ "${TOOLCHAIN}" != "" ]; then \
cd /usr/local/src/pcre3-* && \
autoreconf && \
CC=${TOOLCHAIN}-gcc CXX=${TOOLCHAIN}-g++ ./configure --host=${TOOLCHAIN} --prefix=/usr/${TOOLCHAIN} && \
@ -56,7 +187,7 @@ RUN if [ "${TOOLCHAIN}" != "" ]; then \
make install \
;fi
RUN if [ "${TOOLCHAIN}" != "" ]; then \
RUN source /usr/src/toolchain-env; if [ "${TOOLCHAIN}" != "" ]; then \
cd /usr/local/src/libselinux-* && \
CC=${TOOLCHAIN}-gcc CXX=${TOOLCHAIN}-g++ make CFLAGS=-Wall && \
make PREFIX=/usr/${TOOLCHAIN} DESTDIR=/usr/${TOOLCHAIN} install && \
@ -65,7 +196,7 @@ RUN if [ "${TOOLCHAIN}" != "" ]; then \
make PREFIX=/usr/${TOOLCHAIN} DESTDIR=/usr/${TOOLCHAIN} install \
;fi
RUN if [ "${TOOLCHAIN}" != "" ]; then \
RUN source /usr/src/toolchain-env; if [ "${TOOLCHAIN}" != "" ]; then \
cd /usr/local/src/util-linux-* && \
autoreconf && \
CC=${TOOLCHAIN}-gcc CXX=${TOOLCHAIN}-g++ ./configure --host=${TOOLCHAIN} --prefix=/usr/${TOOLCHAIN} \
@ -78,5 +209,8 @@ RUN if [ "${TOOLCHAIN}" != "" ]; then \
make install \
;fi
RUN apt-get update && \
apt-get -y install dosfstools
RUN mkdir -p images/00-rootfs/build && \
curl -pfL ${!OS_BASE_URL} | tar xvJf - -C images/00-rootfs/build
ENTRYPOINT ["./scripts/entry"]
CMD ["ci"]

120
Makefile
View File

@ -1,105 +1,39 @@
FORCE_PULL := 0
DEV_BUILD := 0
HOST_ARCH := amd64
ARCH := amd64
SUFFIX := $(if $(filter-out amd64,$(ARCH)),_$(ARCH))
HOST_SUFFIX := $(if $(filter-out amd64,$(HOST_ARCH)),_$(HOST_ARCH))
TARGETS := $(shell ls scripts | grep -vE 'clean|run|help')
include build.conf
include build.conf.$(ARCH)
.dapper:
@echo Downloading dapper
@curl -sL https://releases.rancher.com/dapper/latest/dapper-`uname -s`-`uname -m` > .dapper.tmp
@@chmod +x .dapper.tmp
@./.dapper.tmp -v
@mv .dapper.tmp .dapper
$(TARGETS): .dapper
./.dapper $@
bin/ros:
mkdir -p $(dir $@)
ARCH=$(ARCH) VERSION=$(VERSION) ./scripts/mk-ros.sh $@
trash: .dapper
./.dapper -m bind trash
build/host_ros: bin/ros
mkdir -p $(dir $@)
ifeq "$(ARCH)" "$(HOST_ARCH)"
ln -sf ../bin/ros $@
else
ARCH=$(HOST_ARCH) TOOLCHAIN= VERSION=$(VERSION) ./scripts/mk-ros.sh $@
endif
trash-keep: .dapper
./.dapper -m bind trash -k
deps: trash
assets/docker:
mkdir -p $(dir $@)
wget -O - "$(DOCKER_BINARY_URL)" > $@
chmod +x $@
build/initrd/.id:
dapper prepare
assets/selinux/policy.29:
mkdir -p $(dir $@)
wget -O - "$(SELINUX_POLICY_URL)" > $@
run: build/initrd/.id
dapper -m bind build-target
./scripts/run
assets/modules.tar.gz:
mkdir -p $(dir $@)
ifeq "$(ARCH)" "amd64"
curl -L "$(VBOX_MODULES_URL)" > $@
else
touch $@
endif
shell-bind:
dapper -m bind -s
ifdef COMPILED_KERNEL_URL
clean:
@./scripts/clean
installer: minimal
docker build -t $(IMAGE_NAME):$(VERSION)$(SUFFIX) -f Dockerfile.$(ARCH) .
help:
@./scripts/help
dist/artifacts/vmlinuz: build/kernel/
mkdir -p $(dir $@)
mv $(or $(wildcard build/kernel/boot/vmlinuz*), $(wildcard build/kernel/boot/vmlinux*)) $@
.DEFAULT_GOAL := default
build/kernel/:
mkdir -p $@
wget -O - "$(COMPILED_KERNEL_URL)" | tar -xzf - -C $@
dist/artifacts/initrd: bin/ros assets/docker assets/selinux/policy.29 build/kernel/ build/images.tar assets/modules.tar.gz
mkdir -p $(dir $@)
HOST_SUFFIX=$(HOST_SUFFIX) SUFFIX=$(SUFFIX) DFS_IMAGE=$(DFS_IMAGE) DEV_BUILD=$(DEV_BUILD) \
KERNEL_RELEASE=$(KERNEL_RELEASE) ARCH=$(ARCH) ./scripts/mk-initrd.sh $@
dist/artifacts/rancheros.iso: minimal
./scripts/mk-rancheros-iso.sh
all: minimal installer iso
initrd: dist/artifacts/initrd
minimal: initrd dist/artifacts/vmlinuz
iso: dist/artifacts/rancheros.iso dist/artifacts/iso-checksums.txt
test: minimal
./scripts/unit-test
cd tests/integration && HOST_ARCH=$(HOST_ARCH) ARCH=$(ARCH) tox
.PHONY: all minimal initrd iso installer test
endif
build/os-config.yml: build/host_ros
ARCH=$(ARCH) VERSION=$(VERSION) ./scripts/gen-os-config.sh $@
build/images.tar: build/host_ros build/os-config.yml
ARCH=$(ARCH) FORCE_PULL=$(FORCE_PULL) ./scripts/mk-images-tar.sh
dist/artifacts/rootfs.tar.gz: bin/ros assets/docker build/images.tar assets/selinux/policy.29 assets/modules.tar.gz
mkdir -p $(dir $@)
HOST_SUFFIX=$(HOST_SUFFIX) SUFFIX=$(SUFFIX) DFS_IMAGE=$(DFS_IMAGE) DEV_BUILD=$(DEV_BUILD) IS_ROOTFS=1 ./scripts/mk-initrd.sh $@
dist/artifacts/iso-checksums.txt: dist/artifacts/rancheros.iso
./scripts/mk-iso-checksums-txt.sh
version:
@echo $(VERSION)
rootfs: dist/artifacts/rootfs.tar.gz
.PHONY: rootfs version bin/ros
.PHONY: $(TARGETS)

24
assets/go-dnsclient.patch Normal file
View File

@ -0,0 +1,24 @@
265,270d264
< // Ensure only one update at a time checks resolv.conf.
< if !conf.tryAcquireSema() {
< return
< }
< defer conf.releaseSema()
<
276a271,280
> conf.update(name)
> }
>
> func (conf *resolverConfig) update(name string) {
> // Ensure only one update at a time checks resolv.conf.
> if !conf.tryAcquireSema() {
> return
> }
> defer conf.releaseSema()
>
293a298,302
> }
>
> func UpdateDnsConf() {
> resolvConf.initOnce.Do(resolvConf.init)
> resolvConf.update("/etc/resolv.conf")

View File

@ -1,8 +0,0 @@
IMAGE_NAME=rancher/os
VERSION=v0.4.4-dev
DFS_IMAGE=rancher/docker:v1.10.3
SELINUX_POLICY_URL=https://github.com/rancher/refpolicy/releases/download/v0.0.2/policy.29
HOSTNAME_DEFAULT=rancher
OS_IMAGES_ROOT=rancher
OS_SERVICES_REPO=https://raw.githubusercontent.com/rancher/os-services

View File

@ -1,7 +0,0 @@
DAPPER_BASE=ubuntu:16.04
TOOLCHAIN= #empty
COMPILED_KERNEL_URL=https://github.com/rancher/os-kernel/releases/download/Ubuntu-4.2.0-34.39-rancher/linux-4.2.8-ckt4-rancher-x86.tar.gz
DOCKER_BINARY_URL=https://get.docker.com/builds/Linux/x86_64/docker-1.10.3
OS_RELEASES_YML=https://releases.rancher.com/os/releases.yml
VBOX_MODULES_URL=https://github.com/rancher/os-vbox/releases/download/v0.0.2/vbox-modules.tar.gz

View File

@ -1,6 +0,0 @@
DAPPER_BASE=armhf/ubuntu:16.04
TOOLCHAIN=arm-linux-gnueabihf
COMPILED_KERNEL_URL= #empty
DOCKER_BINARY_URL=https://github.com/rancher/docker/releases/download/v1.10.3-ros1/docker-1.10.3_arm
OS_RELEASES_YML=https://releases.rancher.com/os/releases_arm.yml

View File

@ -1,6 +0,0 @@
DAPPER_BASE=aarch64/ubuntu:16.04
TOOLCHAIN=aarch64-linux-gnu
COMPILED_KERNEL_URL=https://github.com/imikushin/os-kernel/releases/download/Estuary-4.1.18-arm64-3/linux-4.1.18-arm64.tar.gz
DOCKER_BINARY_URL=https://github.com/rancher/docker/releases/download/v1.10.3-ros1/docker-1.10.3_arm64
OS_RELEASES_YML=https://releases.rancher.com/os/releases_arm64.yml

View File

@ -1,20 +0,0 @@
#!/bin/bash
set -e
export ARCH=${ARCH:-amd64}
cd $(dirname $0)
if [ "$1" != "--dev" ]; then
echo
echo Running \"production\" build. Will use lzma to compress initrd, which is somewhat slow...
echo Ctrl+C if you don\'t want this.
echo
echo For \"developer\" builds, run ./build.sh --dev
echo
./scripts/make.sh all
else
./scripts/make.sh DEV_BUILD=1 all
fi
ls -lh dist/artifacts

View File

@ -18,8 +18,9 @@ package cloudinit
import (
"errors"
"flag"
"io/ioutil"
"fmt"
"os"
"os/exec"
"strings"
"sync"
"time"
@ -33,6 +34,7 @@ import (
"github.com/coreos/coreos-cloudinit/datasource/file"
"github.com/coreos/coreos-cloudinit/datasource/metadata/digitalocean"
"github.com/coreos/coreos-cloudinit/datasource/metadata/ec2"
"github.com/coreos/coreos-cloudinit/datasource/metadata/gce"
"github.com/coreos/coreos-cloudinit/datasource/metadata/packet"
"github.com/coreos/coreos-cloudinit/datasource/proc_cmdline"
"github.com/coreos/coreos-cloudinit/datasource/url"
@ -40,6 +42,7 @@ import (
"github.com/coreos/coreos-cloudinit/system"
"github.com/rancher/netconf"
rancherConfig "github.com/rancher/os/config"
"github.com/rancher/os/util"
)
const (
@ -47,6 +50,7 @@ const (
datasourceMaxInterval = 30 * time.Second
datasourceTimeout = 5 * time.Minute
sshKeyName = "rancheros-cloud-config"
resizeStamp = "/var/lib/rancher/resizefs.done"
)
var (
@ -71,13 +75,13 @@ func saveFiles(cloudConfigBytes, scriptBytes []byte, metadata datasource.Metadat
if len(scriptBytes) > 0 {
log.Infof("Writing to %s", rancherConfig.CloudConfigScriptFile)
if err := ioutil.WriteFile(rancherConfig.CloudConfigScriptFile, scriptBytes, 500); err != nil {
if err := util.WriteFileAtomic(rancherConfig.CloudConfigScriptFile, scriptBytes, 500); err != nil {
log.Errorf("Error while writing file %s: %v", rancherConfig.CloudConfigScriptFile, err)
return err
}
}
if err := ioutil.WriteFile(rancherConfig.CloudConfigBootFile, cloudConfigBytes, 400); err != nil {
if err := util.WriteFileAtomic(rancherConfig.CloudConfigBootFile, cloudConfigBytes, 400); err != nil {
return err
}
log.Infof("Written to %s:\n%s", rancherConfig.CloudConfigBootFile, string(cloudConfigBytes))
@ -87,7 +91,7 @@ func saveFiles(cloudConfigBytes, scriptBytes []byte, metadata datasource.Metadat
return err
}
if err = ioutil.WriteFile(rancherConfig.MetaDataFile, metaDataBytes, 400); err != nil {
if err = util.WriteFileAtomic(rancherConfig.MetaDataFile, metaDataBytes, 400); err != nil {
return err
}
log.Infof("Written to %s:\n%s", rancherConfig.MetaDataFile, string(metaDataBytes))
@ -96,11 +100,7 @@ func saveFiles(cloudConfigBytes, scriptBytes []byte, metadata datasource.Metadat
}
func currentDatasource() (datasource.Datasource, error) {
cfg, err := rancherConfig.LoadConfig()
if err != nil {
log.WithFields(log.Fields{"err": err}).Error("Failed to read rancher config")
return nil, err
}
cfg := rancherConfig.LoadConfig()
dss := getDatasources(cfg)
if len(dss) == 0 {
@ -168,12 +168,31 @@ func fetchUserData() ([]byte, datasource.Metadata, error) {
return userDataBytes, metadata, nil
}
func executeCloudConfig() error {
cc, err := rancherConfig.LoadConfig()
func resizeDevice(cfg *rancherConfig.CloudConfig) error {
cmd := exec.Command("growpart", cfg.Rancher.ResizeDevice, "1")
err := cmd.Run()
if err != nil {
return err
}
cmd = exec.Command("partprobe")
err = cmd.Run()
if err != nil {
return err
}
cmd = exec.Command("resize2fs", fmt.Sprintf("%s1", cfg.Rancher.ResizeDevice))
err = cmd.Run()
if err != nil {
return err
}
return nil
}
func executeCloudConfig() error {
cc := rancherConfig.LoadConfig()
if len(cc.SSHAuthorizedKeys) > 0 {
authorizeSSHKeys("rancher", cc.SSHAuthorizedKeys, sshKeyName)
authorizeSSHKeys("docker", cc.SSHAuthorizedKeys, sshKeyName)
@ -189,6 +208,14 @@ func executeCloudConfig() error {
log.Printf("Wrote file %s to filesystem", fullPath)
}
if _, err := os.Stat(resizeStamp); os.IsNotExist(err) && cc.Rancher.ResizeDevice != "" {
if err := resizeDevice(cc); err == nil {
os.Create(resizeStamp)
} else {
log.Errorf("Failed to resize %s: %s", cc.Rancher.ResizeDevice, err)
}
}
return nil
}
@ -261,12 +288,7 @@ func getDatasources(cfg *rancherConfig.CloudConfig) []datasource.Datasource {
}
case "gce":
if network {
gceCloudConfigFile, err := GetAndCreateGceDataSourceFilename()
if err != nil {
log.Errorf("Could not retrieve GCE CloudConfig %s", err)
continue
}
dss = append(dss, file.NewDatasource(gceCloudConfigFile))
dss = append(dss, gce.NewDatasource("http://metadata.google.internal/"))
}
case "packet":
if !network {

View File

@ -1,140 +0,0 @@
package cloudinit
import (
"io/ioutil"
"strings"
log "github.com/Sirupsen/logrus"
yaml "github.com/cloudfoundry-incubator/candiedyaml"
"google.golang.org/cloud/compute/metadata"
)
type GceCloudConfig struct {
FileName string
UserData string
NonUserDataSSHKeys []string
}
const (
gceCloudConfigFile = "/var/lib/rancher/conf/gce_cloudinit_config.yml"
)
func NewGceCloudConfig() *GceCloudConfig {
userData, err := metadata.InstanceAttributeValue("user-data")
if err != nil {
log.Errorf("Could not retrieve user-data: %s", err)
}
projectSSHKeys, err := metadata.ProjectAttributeValue("sshKeys")
if err != nil {
log.Errorf("Could not retrieve project SSH Keys: %s", err)
}
instanceSSHKeys, err := metadata.InstanceAttributeValue("sshKeys")
if err != nil {
log.Errorf("Could not retrieve instance SSH Keys: %s", err)
}
nonUserDataSSHKeysRaw := projectSSHKeys + "\n" + instanceSSHKeys
nonUserDataSSHKeys := gceSshKeyFormatter(nonUserDataSSHKeysRaw)
gceCC := &GceCloudConfig{
FileName: gceCloudConfigFile,
UserData: userData,
NonUserDataSSHKeys: nonUserDataSSHKeys,
}
return gceCC
}
func GetAndCreateGceDataSourceFilename() (string, error) {
gceCC := NewGceCloudConfig()
err := gceCC.saveToFile(gceCC.FileName)
if err != nil {
log.Errorf("Error: %s", err)
return "", err
}
return gceCC.FileName, nil
}
func (cc *GceCloudConfig) saveToFile(filename string) error {
//Get Merged UserData sshkeys
data, err := cc.getMergedUserData()
if err != nil {
log.Errorf("Could not process userdata: %s", err)
return err
}
//write file
writeFile(filename, data)
return nil
}
func (cc *GceCloudConfig) getMergedUserData() ([]byte, error) {
var returnUserData []byte
userdata := make(map[string]interface{})
if cc.UserData != "" {
log.Infof("Found UserData Config")
err := yaml.Unmarshal([]byte(cc.UserData), &userdata)
if err != nil {
log.Errorf("Could not unmarshal data: %s", err)
return nil, err
}
}
var auth_keys []string
if _, exists := userdata["ssh_authorized_keys"]; exists {
udSshKeys := userdata["ssh_authorized_keys"].([]interface{})
log.Infof("userdata %s", udSshKeys)
for _, value := range udSshKeys {
auth_keys = append(auth_keys, value.(string))
}
}
if cc.NonUserDataSSHKeys != nil {
for _, value := range cc.NonUserDataSSHKeys {
auth_keys = append(auth_keys, value)
}
}
userdata["ssh_authorized_keys"] = auth_keys
yamlUserData, err := yaml.Marshal(&userdata)
if err != nil {
log.Errorf("Could not Marshal userdata: %s", err)
return nil, err
} else {
returnUserData = append([]byte("#cloud-config\n"), yamlUserData...)
}
return returnUserData, nil
}
func writeFile(filename string, data []byte) error {
if err := ioutil.WriteFile(filename, data, 400); err != nil {
log.Errorf("Could not write file %v", err)
return err
}
return nil
}
func gceSshKeyFormatter(rawKeys string) []string {
keySlice := strings.Split(rawKeys, "\n")
var cloudFormatedKeys []string
if len(keySlice) > 0 {
for i := range keySlice {
keyString := keySlice[i]
sIdx := strings.Index(keyString, ":")
if sIdx != -1 {
key := strings.TrimSpace(keyString[sIdx+1:])
keyA := strings.Split(key, " ")
key = strings.Join(keyA, " ")
if key != "" {
cloudFormatedKeys = append(cloudFormatedKeys, key)
}
}
}
}
return cloudFormatedKeys
}

View File

@ -31,6 +31,12 @@ func Main() {
HideHelp: true,
Subcommands: configSubcommands(),
},
{
Name: "console",
Usage: "console container commands",
HideHelp: true,
Subcommands: consoleSubcommands(),
},
{
Name: "dev",
ShortName: "d",

View File

@ -14,6 +14,7 @@ import (
"github.com/codegangsta/cli"
"github.com/rancher/os/config"
"github.com/rancher/os/util"
)
func configSubcommands() []cli.Command {
@ -28,17 +29,6 @@ func configSubcommands() []cli.Command {
Usage: "set a value",
Action: configSet,
},
{
Name: "import",
Usage: "import configuration from standard in or a file",
Action: runImport,
Flags: []cli.Flag{
cli.StringFlag{
Name: "input, i",
Usage: "File from which to read",
},
},
},
{
Name: "images",
Usage: "List Docker images for a configuration from a file",
@ -106,7 +96,7 @@ func imagesFromConfig(cfg *config.CloudConfig) []string {
return images
}
func runImages(c *cli.Context) {
func runImages(c *cli.Context) error {
configFile := c.String("input")
cfg, err := config.ReadConfig(nil, false, configFile)
if err != nil {
@ -114,12 +104,14 @@ func runImages(c *cli.Context) {
}
images := imagesFromConfig(cfg)
fmt.Println(strings.Join(images, " "))
return nil
}
func runGenerate(c *cli.Context) {
func runGenerate(c *cli.Context) error {
if err := genTpl(os.Stdin, os.Stdout); err != nil {
log.Fatalf("Failed to generate config, err: '%s'", err)
}
return nil
}
func genTpl(in io.Reader, out io.Writer) error {
@ -140,76 +132,30 @@ func env2map(env []string) map[string]string {
return m
}
func runImport(c *cli.Context) {
var input io.ReadCloser
var err error
input = os.Stdin
cfg, err := config.LoadConfig()
if err != nil {
log.Fatal(err)
}
inputFile := c.String("input")
if inputFile != "" {
input, err = os.Open(inputFile)
if err != nil {
log.Fatal(err)
}
defer input.Close()
}
bytes, err := ioutil.ReadAll(input)
if err != nil {
log.Fatal(err)
}
cfg, err = cfg.Import(bytes)
if err != nil {
log.Fatal(err)
}
if err := cfg.Save(); err != nil {
log.Fatal(err)
}
}
func configSet(c *cli.Context) {
func configSet(c *cli.Context) error {
key := c.Args().Get(0)
value := c.Args().Get(1)
if key == "" {
return
return nil
}
cfg, err := config.LoadConfig()
err := config.Set(key, value)
if err != nil {
log.Fatal(err)
}
cfgDiff, err := cfg.Set(key, value)
if err != nil {
log.Fatal(err)
}
if err := cfg.Save(cfgDiff); err != nil {
log.Fatal(err)
}
return nil
}
func configGet(c *cli.Context) {
func configGet(c *cli.Context) error {
arg := c.Args().Get(0)
if arg == "" {
return
return nil
}
cfg, err := config.LoadConfig()
val, err := config.Get(arg)
if err != nil {
log.WithFields(log.Fields{"err": err}).Fatal("config get: failed to load config")
}
val, err := cfg.GetIgnoreOmitEmpty(arg)
if err != nil {
log.WithFields(log.Fields{"cfg": cfg, "key": arg, "val": val, "err": err}).Fatal("config get: failed to retrieve value")
log.WithFields(log.Fields{"key": arg, "val": val, "err": err}).Fatal("config get: failed to retrieve value")
}
printYaml := false
@ -229,31 +175,26 @@ func configGet(c *cli.Context) {
} else {
fmt.Println(val)
}
return nil
}
func merge(c *cli.Context) {
func merge(c *cli.Context) error {
bytes, err := ioutil.ReadAll(os.Stdin)
if err != nil {
log.Fatal(err)
}
cfg, err := config.LoadConfig()
err = config.Merge(bytes)
if err != nil {
log.Fatal(err)
}
cfg, err = cfg.MergeBytes(bytes)
if err != nil {
log.Fatal(err)
}
if err := cfg.Save(); err != nil {
log.Fatal(err)
}
return nil
}
func export(c *cli.Context) {
content, err := config.Dump(c.Bool("private"), c.Bool("full"))
func export(c *cli.Context) error {
content, err := config.Export(c.Bool("private"), c.Bool("full"))
if err != nil {
log.Fatal(err)
}
@ -262,9 +203,11 @@ func export(c *cli.Context) {
if output == "" {
fmt.Println(content)
} else {
err := ioutil.WriteFile(output, []byte(content), 0400)
err := util.WriteFileAtomic(output, []byte(content), 0400)
if err != nil {
log.Fatal(err)
}
}
return nil
}

128
cmd/control/console.go Normal file
View File

@ -0,0 +1,128 @@
package control
import (
"bufio"
"fmt"
"os"
"golang.org/x/net/context"
log "github.com/Sirupsen/logrus"
"github.com/codegangsta/cli"
composeConfig "github.com/docker/libcompose/config"
"github.com/docker/libcompose/project/options"
"github.com/rancher/os/compose"
"github.com/rancher/os/config"
"github.com/rancher/os/util/network"
)
func consoleSubcommands() []cli.Command {
return []cli.Command{
{
Name: "switch",
Usage: "switch console without a reboot",
Action: consoleSwitch,
Flags: []cli.Flag{
cli.BoolFlag{
Name: "force, f",
Usage: "do not prompt for input",
},
},
},
{
Name: "enable",
Usage: "set console to be switched on next reboot",
Action: consoleEnable,
},
{
Name: "list",
Usage: "list available consoles",
Action: consoleList,
},
}
}
func consoleSwitch(c *cli.Context) error {
if len(c.Args()) != 1 {
log.Fatal("Must specify exactly one console to switch to")
}
newConsole := c.Args()[0]
if !c.Bool("force") {
in := bufio.NewReader(os.Stdin)
fmt.Println("Switching consoles will destroy the current console container and restart Docker.")
fmt.Println("Note: You will also be logged out.")
if !yes(in, "Continue") {
return nil
}
}
cfg := config.LoadConfig()
if newConsole != "default" {
if err := compose.StageServices(cfg, newConsole); err != nil {
return err
}
}
service, err := compose.CreateService(nil, "switch-console", &composeConfig.ServiceConfigV1{
LogDriver: "json-file",
Privileged: true,
Net: "host",
Pid: "host",
Image: fmt.Sprintf("rancher/os-base:%s", config.VERSION),
Labels: map[string]string{
config.SCOPE: config.SYSTEM,
},
Command: []string{"/usr/bin/switch-console", newConsole},
VolumesFrom: []string{"all-volumes"},
})
if err != nil {
return err
}
if err = service.Delete(context.Background(), options.Delete{}); err != nil {
return err
}
if err = service.Up(context.Background(), options.Up{}); err != nil {
return err
}
return service.Log(context.Background(), true)
}
func consoleEnable(c *cli.Context) error {
if len(c.Args()) != 1 {
log.Fatal("Must specify exactly one console to enable")
}
newConsole := c.Args()[0]
cfg := config.LoadConfig()
if newConsole != "default" {
if err := compose.StageServices(cfg, newConsole); err != nil {
return err
}
}
if err := config.Set("rancher.console", newConsole); err != nil {
log.Errorf("Failed to update 'rancher.console': %v", err)
}
return nil
}
func consoleList(c *cli.Context) error {
cfg := config.LoadConfig()
consoles, err := network.GetConsoles(cfg.Rancher.Repositories.ToArray())
if err != nil {
return err
}
fmt.Println("default")
for _, console := range consoles {
fmt.Println(console)
}
return nil
}

View File

@ -7,8 +7,9 @@ import (
"github.com/rancher/os/util"
)
func devAction(c *cli.Context) {
func devAction(c *cli.Context) error {
if len(c.Args()) > 0 {
fmt.Println(util.ResolveDevice(c.Args()[0]))
}
return nil
}

View File

@ -11,15 +11,12 @@ import (
"github.com/rancher/os/util"
)
func envAction(c *cli.Context) {
cfg, err := config.LoadConfig()
if err != nil {
log.Fatal(err)
}
func envAction(c *cli.Context) error {
cfg := config.LoadConfig()
args := c.Args()
if len(args) == 0 {
return
return nil
}
osEnv := os.Environ()
@ -39,4 +36,6 @@ func envAction(c *cli.Context) {
if err := syscall.Exec(args[0], args, util.Map2KVPairs(envMap)); err != nil {
log.Fatal(err)
}
return nil
}

View File

@ -48,7 +48,7 @@ var installCommand = cli.Command{
},
}
func installAction(c *cli.Context) {
func installAction(c *cli.Context) error {
if c.Args().Present() {
log.Fatalf("invalid arguments %v", c.Args())
}
@ -58,10 +58,7 @@ func installAction(c *cli.Context) {
}
image := c.String("image")
cfg, err := config.LoadConfig()
if err != nil {
log.WithFields(log.Fields{"err": err}).Fatal("ros install: failed to load config")
}
cfg := config.LoadConfig()
if image == "" {
image = cfg.Rancher.Upgrade.Image + ":" + config.VERSION + config.SUFFIX
}
@ -89,6 +86,8 @@ func installAction(c *cli.Context) {
if err := runInstall(image, installType, cloudConfig, device, force, reboot); err != nil {
log.WithFields(log.Fields{"err": err}).Fatal("Failed to run install")
}
return nil
}
func runInstall(image, installType, cloudConfig, device string, force, reboot bool) error {

View File

@ -9,13 +9,15 @@ import (
"os"
"strings"
"golang.org/x/net/context"
log "github.com/Sirupsen/logrus"
yaml "github.com/cloudfoundry-incubator/candiedyaml"
dockerClient "github.com/fsouza/go-dockerclient"
"github.com/codegangsta/cli"
"github.com/docker/libcompose/project"
dockerClient "github.com/docker/engine-api/client"
composeConfig "github.com/docker/libcompose/config"
"github.com/docker/libcompose/project/options"
"github.com/rancher/os/cmd/power"
"github.com/rancher/os/compose"
"github.com/rancher/os/config"
@ -115,7 +117,7 @@ func getImages() (*Images, error) {
return parseBody(body)
}
func osMetaDataGet(c *cli.Context) {
func osMetaDataGet(c *cli.Context) error {
images, err := getImages()
if err != nil {
log.Fatal(err)
@ -127,13 +129,15 @@ func osMetaDataGet(c *cli.Context) {
}
for _, image := range images.Available {
_, err := client.InspectImage(image)
if err == dockerClient.ErrNoSuchImage {
_, _, err := client.ImageInspectWithRaw(context.Background(), image, false)
if dockerClient.IsErrImageNotFound(err) {
fmt.Println(image, "remote")
} else {
fmt.Println(image, "local")
}
}
return nil
}
func getLatestImage() (string, error) {
@ -145,7 +149,7 @@ func getLatestImage() (string, error) {
return images.Current, nil
}
func osUpgrade(c *cli.Context) {
func osUpgrade(c *cli.Context) error {
image := c.String("image")
if image == "" {
@ -164,20 +168,13 @@ func osUpgrade(c *cli.Context) {
if err := startUpgradeContainer(image, c.Bool("stage"), c.Bool("force"), !c.Bool("no-reboot"), c.Bool("kexec"), c.Bool("upgrade-console"), c.String("append")); err != nil {
log.Fatal(err)
}
return nil
}
func osVersion(c *cli.Context) {
func osVersion(c *cli.Context) error {
fmt.Println(config.VERSION)
}
func yes(in *bufio.Reader, question string) bool {
fmt.Printf("%s [y/N]: ", question)
line, err := in.ReadString('\n')
if err != nil {
log.Fatal(err)
}
return strings.ToLower(line[0:1]) == "y"
return nil
}
func startUpgradeContainer(image string, stage, force, reboot, kexec bool, upgradeConsole bool, kernelArgs string) error {
@ -198,27 +195,31 @@ func startUpgradeContainer(image string, stage, force, reboot, kexec bool, upgra
}
if upgradeConsole {
cfg, err := config.LoadConfig()
if err != nil {
log.Fatal(err)
}
cfg.Rancher.ForceConsoleRebuild = true
if err := cfg.Save(); err != nil {
if err := config.Set("rancher.force_console_rebuild", true); err != nil {
log.Fatal(err)
}
}
container, err := compose.CreateService(nil, "os-upgrade", &project.ServiceConfig{
fmt.Printf("Upgrading to %s\n", image)
confirmation := "Continue"
imageSplit := strings.Split(image, ":")
if len(imageSplit) > 1 && imageSplit[1] == config.VERSION+config.SUFFIX {
confirmation = fmt.Sprintf("Already at version %s. Continue anyway", imageSplit[1])
}
if !force && !yes(in, confirmation) {
os.Exit(1)
}
container, err := compose.CreateService(nil, "os-upgrade", &composeConfig.ServiceConfigV1{
LogDriver: "json-file",
Privileged: true,
Net: "host",
Pid: "host",
Image: image,
Labels: project.NewSliceorMap(map[string]string{
Labels: map[string]string{
config.SCOPE: config.SYSTEM,
}),
Command: project.NewCommand(command...),
},
Command: command,
})
if err != nil {
return err
@ -230,41 +231,30 @@ func startUpgradeContainer(image string, stage, force, reboot, kexec bool, upgra
}
// Only pull image if not found locally
if _, err := client.InspectImage(image); err != nil {
if err := container.Pull(); err != nil {
if _, _, err := client.ImageInspectWithRaw(context.Background(), image, false); err != nil {
if err := container.Pull(context.Background()); err != nil {
return err
}
}
if !stage {
imageSplit := strings.Split(image, ":")
if len(imageSplit) > 1 && imageSplit[1] == config.VERSION {
if !force && !yes(in, fmt.Sprintf("Already at version %s. Continue anyways", imageSplit[1])) {
os.Exit(1)
}
} else {
fmt.Printf("Upgrading to %s\n", image)
if !force && !yes(in, "Continue") {
os.Exit(1)
}
}
// If there is already an upgrade container, delete it
// Up() should to this, but currently does not due to a bug
if err := container.Delete(); err != nil {
if err := container.Delete(context.Background(), options.Delete{}); err != nil {
return err
}
if err := container.Up(); err != nil {
if err := container.Up(context.Background(), options.Up{
Log: true,
}); err != nil {
return err
}
if err := container.Log(); err != nil {
if err := container.Log(context.Background(), true); err != nil {
return err
}
if err := container.Delete(); err != nil {
if err := container.Delete(context.Background(), options.Delete{}); err != nil {
return err
}
@ -288,10 +278,6 @@ func parseBody(body []byte) (*Images, error) {
}
func getUpgradeUrl() (string, error) {
cfg, err := config.LoadConfig()
if err != nil {
return "", err
}
cfg := config.LoadConfig()
return cfg.Rancher.Upgrade.Url, nil
}

View File

@ -2,16 +2,17 @@ package control
import (
"fmt"
"syscall"
"github.com/codegangsta/cli"
"github.com/rancher/os/config"
"syscall"
)
func selinuxCommand() cli.Command {
app := cli.Command{}
app.Name = "selinux"
app.Usage = "Launch SELinux tools container."
app.Action = func(c *cli.Context) {
app.Action = func(c *cli.Context) error {
argv := []string{"system-docker", "run", "-it", "--privileged", "--rm",
"--net", "host", "--pid", "host", "--ipc", "host",
"-v", "/usr/bin/docker:/usr/bin/docker.dist:ro",
@ -49,8 +50,9 @@ func selinuxCommand() cli.Command {
"-v", "/etc/selinux:/etc/selinux",
"-v", "/var/lib/selinux:/var/lib/selinux",
"-v", "/usr/share/selinux:/usr/share/selinux",
fmt.Sprintf("rancher/os-selinuxtools:%s", config.VERSION + config.SUFFIX), "bash"}
fmt.Sprintf("rancher/os-selinuxtools:%s", config.VERSION+config.SUFFIX), "bash"}
syscall.Exec("/bin/system-docker", argv, []string{})
return nil
}
return app

View File

@ -17,13 +17,9 @@ import (
type projectFactory struct {
}
func (p *projectFactory) Create(c *cli.Context) (*project.Project, error) {
cfg, err := config.LoadConfig()
if err != nil {
return nil, err
}
return compose.GetProject(cfg, true)
func (p *projectFactory) Create(c *cli.Context) (project.APIProject, error) {
cfg := config.LoadConfig()
return compose.GetProject(cfg, true, false)
}
func beforeApp(c *cli.Context) error {
@ -86,12 +82,13 @@ func serviceSubCommands() []cli.Command {
}
}
func disable(c *cli.Context) {
func updateIncludedServices(cfg *config.CloudConfig) error {
return config.Set("rancher.services_include", cfg.Rancher.ServicesInclude)
}
func disable(c *cli.Context) error {
changed := false
cfg, err := config.LoadConfig()
if err != nil {
logrus.Fatal(err)
}
cfg := config.LoadConfig()
for _, service := range c.Args() {
if _, ok := cfg.Rancher.ServicesInclude[service]; !ok {
@ -103,18 +100,17 @@ func disable(c *cli.Context) {
}
if changed {
if err = cfg.Save(); err != nil {
if err := updateIncludedServices(cfg); err != nil {
logrus.Fatal(err)
}
}
return nil
}
func del(c *cli.Context) {
func del(c *cli.Context) error {
changed := false
cfg, err := config.LoadConfig()
if err != nil {
logrus.Fatal(err)
}
cfg := config.LoadConfig()
for _, service := range c.Args() {
if _, ok := cfg.Rancher.ServicesInclude[service]; !ok {
@ -125,17 +121,16 @@ func del(c *cli.Context) {
}
if changed {
if err = cfg.Save(); err != nil {
if err := updateIncludedServices(cfg); err != nil {
logrus.Fatal(err)
}
}
return nil
}
func enable(c *cli.Context) {
cfg, err := config.LoadConfig()
if err != nil {
logrus.Fatal(err)
}
func enable(c *cli.Context) error {
cfg := config.LoadConfig()
var enabledServices []string
@ -155,17 +150,16 @@ func enable(c *cli.Context) {
logrus.Fatal(err)
}
if err := cfg.Save(); err != nil {
if err := updateIncludedServices(cfg); err != nil {
logrus.Fatal(err)
}
}
return nil
}
func list(c *cli.Context) {
cfg, err := config.LoadConfig()
if err != nil {
logrus.Fatal(err)
}
func list(c *cli.Context) error {
cfg := config.LoadConfig()
clone := make(map[string]bool)
for service, enabled := range cfg.Rancher.ServicesInclude {
@ -197,4 +191,6 @@ func list(c *cli.Context) {
fmt.Printf("disabled %s\n", service)
}
}
return nil
}

View File

@ -10,11 +10,20 @@ import (
"github.com/codegangsta/cli"
machineUtil "github.com/docker/machine/utils"
"github.com/rancher/os/config"
"github.com/rancher/os/util"
)
const (
NAME string = "rancher"
BITS int = 2048
ServerTlsPath string = "/etc/docker/tls"
ClientTlsPath string = "/home/rancher/.docker"
Cert string = "cert.pem"
Key string = "key.pem"
ServerCert string = "server-cert.pem"
ServerKey string = "server-key.pem"
CaCert string = "ca.pem"
CaKey string = "ca-key.pem"
)
func tlsConfCommands() []cli.Command {
@ -44,14 +53,12 @@ func tlsConfCommands() []cli.Command {
}
}
func writeCerts(generateServer bool, hostname []string, cfg *config.CloudConfig, certPath, keyPath, caCertPath, caKeyPath string) error {
func writeCerts(generateServer bool, hostname []string, certPath, keyPath, caCertPath, caKeyPath string) error {
if !generateServer {
return machineUtil.GenerateCert([]string{""}, certPath, keyPath, caCertPath, caKeyPath, NAME, BITS)
}
if cfg.Rancher.Docker.ServerKey == "" || cfg.Rancher.Docker.ServerCert == "" {
err := machineUtil.GenerateCert(hostname, certPath, keyPath, caCertPath, caKeyPath, NAME, BITS)
if err != nil {
if err := machineUtil.GenerateCert(hostname, certPath, keyPath, caCertPath, caKeyPath, NAME, BITS); err != nil {
return err
}
@ -65,80 +72,62 @@ func writeCerts(generateServer bool, hostname []string, cfg *config.CloudConfig,
return err
}
cfg, err = cfg.Merge(map[interface{}]interface{}{
"rancher": map[interface{}]interface{}{
"docker": map[interface{}]interface{}{
"server_cert": string(cert),
"server_key": string(key),
},
},
})
if err != nil {
// certPath, keyPath are already written to by machineUtil.GenerateCert()
if err := config.Set("rancher.docker.server_cert", string(cert)); err != nil {
return err
}
if err := config.Set("rancher.docker.server_key", string(key)); err != nil {
return err
}
return cfg.Save() // certPath, keyPath are already written to by machineUtil.GenerateCert()
}
if err := ioutil.WriteFile(certPath, []byte(cfg.Rancher.Docker.ServerCert), 0400); err != nil {
return err
}
return ioutil.WriteFile(keyPath, []byte(cfg.Rancher.Docker.ServerKey), 0400)
return nil
}
func writeCaCerts(cfg *config.CloudConfig, caCertPath, caKeyPath string) (*config.CloudConfig, error) {
func writeCaCerts(cfg *config.CloudConfig, caCertPath, caKeyPath string) error {
if cfg.Rancher.Docker.CACert == "" {
if err := machineUtil.GenerateCACertificate(caCertPath, caKeyPath, NAME, BITS); err != nil {
return nil, err
return err
}
caCert, err := ioutil.ReadFile(caCertPath)
if err != nil {
return nil, err
return err
}
caKey, err := ioutil.ReadFile(caKeyPath)
if err != nil {
return nil, err
return err
}
cfg, err = cfg.Merge(map[interface{}]interface{}{
"rancher": map[interface{}]interface{}{
"docker": map[interface{}]interface{}{
"ca_key": string(caKey),
"ca_cert": string(caCert),
},
},
})
if err != nil {
return nil, err
// caCertPath, caKeyPath are already written to by machineUtil.GenerateCACertificate()
if err := config.Set("rancher.docker.ca_cert", string(caCert)); err != nil {
return err
}
if err := config.Set("rancher.docker.ca_key", string(caKey)); err != nil {
return err
}
} else {
cfg = config.LoadConfig()
if err := util.WriteFileAtomic(caCertPath, []byte(cfg.Rancher.Docker.CACert), 0400); err != nil {
return err
}
if err = cfg.Save(); err != nil {
return nil, err
if err := util.WriteFileAtomic(caKeyPath, []byte(cfg.Rancher.Docker.CAKey), 0400); err != nil {
return err
}
}
return cfg, nil // caCertPath, caKeyPath are already written to by machineUtil.GenerateCACertificate()
}
if err := ioutil.WriteFile(caCertPath, []byte(cfg.Rancher.Docker.CACert), 0400); err != nil {
return nil, err
}
if err := ioutil.WriteFile(caKeyPath, []byte(cfg.Rancher.Docker.CAKey), 0400); err != nil {
return nil, err
}
return cfg, nil
return nil
}
func tlsConfCreate(c *cli.Context) {
func tlsConfCreate(c *cli.Context) error {
err := generate(c)
if err != nil {
log.Fatal(err)
}
return nil
}
func generate(c *cli.Context) error {
@ -150,27 +139,22 @@ func generate(c *cli.Context) error {
}
func Generate(generateServer bool, outDir string, hostnames []string) error {
cfg, err := config.LoadConfig()
if err != nil {
return err
}
if outDir == "" {
if generateServer {
outDir = "/etc/docker/tls"
outDir = ServerTlsPath
} else {
outDir = "/home/rancher/.docker"
outDir = ClientTlsPath
}
log.Infof("Out directory (-d, --dir) not specified, using default: %s", outDir)
}
caCertPath := filepath.Join(outDir, "ca.pem")
caKeyPath := filepath.Join(outDir, "ca-key.pem")
certPath := filepath.Join(outDir, "cert.pem")
keyPath := filepath.Join(outDir, "key.pem")
caCertPath := filepath.Join(outDir, CaCert)
caKeyPath := filepath.Join(outDir, CaKey)
certPath := filepath.Join(outDir, Cert)
keyPath := filepath.Join(outDir, Key)
if generateServer {
certPath = filepath.Join(outDir, "server-cert.pem")
keyPath = filepath.Join(outDir, "server-key.pem")
certPath = filepath.Join(outDir, ServerCert)
keyPath = filepath.Join(outDir, ServerKey)
}
if _, err := os.Stat(outDir); os.IsNotExist(err) {
@ -179,11 +163,11 @@ func Generate(generateServer bool, outDir string, hostnames []string) error {
}
}
cfg, err = writeCaCerts(cfg, caCertPath, caKeyPath)
if err != nil {
cfg := config.LoadConfig()
if err := writeCaCerts(cfg, caCertPath, caKeyPath); err != nil {
return err
}
if err := writeCerts(generateServer, hostnames, cfg, certPath, keyPath, caCertPath, caKeyPath); err != nil {
if err := writeCerts(generateServer, hostnames, certPath, keyPath, caCertPath, caKeyPath); err != nil {
return err
}

19
cmd/control/util.go Normal file
View File

@ -0,0 +1,19 @@
package control
import (
"bufio"
"fmt"
"strings"
log "github.com/Sirupsen/logrus"
)
func yes(in *bufio.Reader, question string) bool {
fmt.Printf("%s [y/N]: ", question)
line, err := in.ReadString('\n')
if err != nil {
log.Fatal(err)
}
return strings.ToLower(line[0:1]) == "y"
}

View File

@ -3,55 +3,58 @@ package network
import (
"flag"
"os"
"os/exec"
"golang.org/x/net/context"
log "github.com/Sirupsen/logrus"
"github.com/docker/libnetwork/resolvconf"
"github.com/rancher/netconf"
"github.com/rancher/os/config"
"github.com/rancher/os/docker"
"github.com/rancher/os/hostname"
)
const (
NETWORK_DONE = "/var/run/network.done"
WAIT_FOR_NETWORK = "wait-for-network"
)
var (
daemon bool
stopNetworkPre bool
flags *flag.FlagSet
)
func init() {
flags = flag.NewFlagSet(os.Args[0], flag.ContinueOnError)
flags.BoolVar(&daemon, "daemon", false, "run dhcpd as daemon")
}
func sendTerm(proc string) {
cmd := exec.Command("killall", "-TERM", proc)
cmd.Stderr = os.Stderr
cmd.Stdout = os.Stdout
cmd.Run()
flags.BoolVar(&stopNetworkPre, "stop-network-pre", false, "")
}
func Main() {
flags.Parse(os.Args[1:])
log.Infof("Running network: daemon=%v", daemon)
log.Infof("Running network: stop-network-pre=%v", stopNetworkPre)
os.Remove(NETWORK_DONE) // ignore error
cfg, err := config.LoadConfig()
if stopNetworkPre {
client, err := docker.NewSystemClient()
if err != nil {
log.Fatal(err)
log.Error(err)
}
err = client.ContainerStop(context.Background(), "network-pre", 10)
if err != nil {
log.Error(err)
}
_, err = client.ContainerWait(context.Background(), "network-pre")
if err != nil {
log.Error(err)
}
}
cfg := config.LoadConfig()
nameservers := cfg.Rancher.Network.Dns.Nameservers
search := cfg.Rancher.Network.Dns.Search
userSetDns := len(nameservers) > 0 || len(search) > 0
if !userSetDns {
nameservers = cfg.Rancher.DefaultNetwork.Dns.Nameservers
search = cfg.Rancher.DefaultNetwork.Dns.Search
nameservers = cfg.Rancher.Defaults.Network.Dns.Nameservers
search = cfg.Rancher.Defaults.Network.Dns.Search
}
if _, err := resolvconf.Build("/etc/resolv.conf", nameservers, search, nil); err != nil {
@ -75,14 +78,5 @@ func Main() {
log.Error(err)
}
if f, err := os.Create(NETWORK_DONE); err != nil {
log.Error(err)
} else {
f.Close()
}
sendTerm(WAIT_FOR_NETWORK)
if daemon {
select {}
}
}

View File

@ -1,7 +1,6 @@
package power
import (
"bufio"
"errors"
"os"
"path/filepath"
@ -9,14 +8,15 @@ import (
"strings"
"syscall"
"golang.org/x/net/context"
log "github.com/Sirupsen/logrus"
dockerClient "github.com/fsouza/go-dockerclient"
"github.com/docker/engine-api/types"
"github.com/docker/engine-api/types/container"
"github.com/docker/engine-api/types/filters"
"github.com/rancher/os/docker"
)
const (
DOCKER_CGROUPS_FILE = "/proc/self/cgroup"
"github.com/rancher/os/util"
)
func runDocker(name string) error {
@ -36,11 +36,10 @@ func runDocker(name string) error {
cmd = os.Args
}
exiting, err := client.InspectContainer(name)
if exiting != nil {
err := client.RemoveContainer(dockerClient.RemoveContainerOptions{
ID: exiting.ID,
Force: true,
existing, err := client.ContainerInspect(context.Background(), name)
if err == nil && existing.ID != "" {
err := client.ContainerRemove(context.Background(), types.ContainerRemoveOptions{
ContainerID: existing.ID,
})
if err != nil {
@ -48,53 +47,50 @@ func runDocker(name string) error {
}
}
currentContainerId, err := getCurrentContainerId()
currentContainerId, err := util.GetCurrentContainerId()
if err != nil {
return err
}
currentContainer, err := client.InspectContainer(currentContainerId)
currentContainer, err := client.ContainerInspect(context.Background(), currentContainerId)
if err != nil {
return err
}
powerContainer, err := client.CreateContainer(dockerClient.CreateContainerOptions{
Name: name,
Config: &dockerClient.Config{
powerContainer, err := client.ContainerCreate(context.Background(),
&container.Config{
Image: currentContainer.Config.Image,
Cmd: cmd,
Env: []string{
"IN_DOCKER=true",
},
},
HostConfig: &dockerClient.HostConfig{
&container.HostConfig{
PidMode: "host",
VolumesFrom: []string{
currentContainer.ID,
},
Privileged: true,
},
})
}, nil, name)
if err != nil {
return err
}
go func() {
client.AttachToContainer(dockerClient.AttachToContainerOptions{
Container: powerContainer.ID,
OutputStream: os.Stdout,
ErrorStream: os.Stderr,
client.ContainerAttach(context.Background(), types.ContainerAttachOptions{
ContainerID: powerContainer.ID,
Stream: true,
Stderr: true,
Stdout: true,
})
}()
err = client.StartContainer(powerContainer.ID, powerContainer.HostConfig)
err = client.ContainerStart(context.Background(), powerContainer.ID)
if err != nil {
return err
}
_, err = client.WaitContainer(powerContainer.ID)
_, err = client.ContainerWait(context.Background(), powerContainer.ID)
if err != nil {
log.Fatal(err)
@ -172,19 +168,20 @@ func shutDownContainers() error {
return err
}
opts := dockerClient.ListContainersOptions{
filter := filters.NewArgs()
filter.Add("status", "running")
opts := types.ContainerListOptions{
All: true,
Filters: map[string][]string{
"status": {"running"},
},
Filter: filter,
}
containers, err := client.ListContainers(opts)
containers, err := client.ContainerList(context.Background(), opts)
if err != nil {
return err
}
currentContainerId, err := getCurrentContainerId()
currentContainerId, err := util.GetCurrentContainerId()
if err != nil {
return err
}
@ -197,7 +194,7 @@ func shutDownContainers() error {
}
log.Infof("Stopping %s : %v", container.ID[:12], container.Names)
stopErr := client.StopContainer(container.ID, uint(timeout))
stopErr := client.ContainerStop(context.Background(), container.ID, timeout)
if stopErr != nil {
stopErrorStrings = append(stopErrorStrings, " ["+container.ID+"] "+stopErr.Error())
}
@ -209,7 +206,7 @@ func shutDownContainers() error {
if container.ID == currentContainerId {
continue
}
_, waitErr := client.WaitContainer(container.ID)
_, waitErr := client.ContainerWait(context.Background(), container.ID)
if waitErr != nil {
waitErrorStrings = append(waitErrorStrings, " ["+container.ID+"] "+waitErr.Error())
}
@ -221,35 +218,3 @@ func shutDownContainers() error {
return nil
}
func getCurrentContainerId() (string, error) {
file, err := os.Open(DOCKER_CGROUPS_FILE)
if err != nil {
return "", err
}
fileReader := bufio.NewScanner(file)
if !fileReader.Scan() {
return "", errors.New("Empty file /proc/self/cgroup")
}
line := fileReader.Text()
parts := strings.Split(line, "/")
for len(parts) != 3 {
if !fileReader.Scan() {
return "", errors.New("Found no docker cgroups")
}
line = fileReader.Text()
parts = strings.Split(line, "/")
if len(parts) == 3 {
if strings.HasSuffix(parts[1], "docker") {
break
} else {
parts = nil
}
}
}
return parts[len(parts)-1:][0], nil
}

View File

@ -31,7 +31,7 @@ func Main() {
app.Run(os.Args)
}
func shutdown(c *cli.Context) {
func shutdown(c *cli.Context) error {
common("")
reboot := c.String("r")
poweroff := c.String("h")
@ -41,4 +41,6 @@ func shutdown(c *cli.Context) {
} else if poweroff == "now" {
PowerOff()
}
return nil
}

View File

@ -48,7 +48,7 @@ func setupSigterm() {
}()
}
func run(c *cli.Context) {
func run(c *cli.Context) error {
setupSigterm()
var stream io.Reader = os.Stdin
@ -79,6 +79,7 @@ func run(c *cli.Context) {
}
wg.Wait()
return nil
}
func addProcess(process *os.Process) {

View File

@ -0,0 +1,45 @@
package switchconsole
import (
"os"
log "github.com/Sirupsen/logrus"
"github.com/docker/libcompose/project/options"
"github.com/rancher/os/compose"
"github.com/rancher/os/config"
"golang.org/x/net/context"
)
func Main() {
if len(os.Args) != 2 {
log.Fatal("Must specify exactly one existing container")
}
newConsole := os.Args[1]
cfg := config.LoadConfig()
project, err := compose.GetProject(cfg, true, false)
if err != nil {
log.Fatal(err)
}
if newConsole != "default" {
if err = compose.LoadService(project, cfg, true, newConsole); err != nil {
log.Fatal(err)
}
}
if err = project.Up(context.Background(), options.Up{
Log: true,
}, "console"); err != nil {
log.Fatal(err)
}
if err = project.Restart(context.Background(), 10, "docker"); err != nil {
log.Errorf("Failed to restart Docker: %v", err)
}
if err = config.Set("rancher.console", newConsole); err != nil {
log.Errorf("Failed to update 'rancher.console': %v", err)
}
}

View File

@ -1,30 +1,21 @@
package systemdocker
import (
"log"
"os"
"strings"
"syscall"
log "github.com/Sirupsen/logrus"
"github.com/docker/docker/docker"
"github.com/rancher/os/config"
)
func Main() {
var newEnv []string
for _, env := range os.Environ() {
if !strings.HasPrefix(env, "DOCKER_HOST=") {
newEnv = append(newEnv, env)
}
}
newEnv = append(newEnv, "DOCKER_HOST="+config.DOCKER_SYSTEM_HOST)
if os.Geteuid() != 0 {
log.Fatalf("%s: Need to be root", os.Args[0])
}
os.Args[0] = config.DOCKER_DIST_BIN
if err := syscall.Exec(os.Args[0], os.Args, newEnv); err != nil {
log.Fatal(err)
if os.Getenv("DOCKER_HOST") == "" {
os.Setenv("DOCKER_HOST", config.DOCKER_SYSTEM_HOST)
}
docker.Main()
}

View File

@ -1,153 +1,211 @@
package userdocker
import (
"bufio"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"os"
"os/exec"
"os/signal"
"strings"
"strconv"
"syscall"
"time"
"golang.org/x/net/context"
"path/filepath"
log "github.com/Sirupsen/logrus"
"github.com/docker/libcompose/docker"
"github.com/docker/engine-api/types"
composeClient "github.com/docker/libcompose/docker/client"
"github.com/docker/libcompose/project"
"github.com/rancher/os/cmd/control"
"github.com/rancher/os/compose"
"github.com/rancher/os/config"
"github.com/opencontainers/runc/libcontainer/cgroups"
_ "github.com/opencontainers/runc/libcontainer/nsenter"
"github.com/opencontainers/runc/libcontainer/system"
rosDocker "github.com/rancher/os/docker"
"github.com/rancher/os/util"
)
const (
DEFAULT_STORAGE_CONTEXT = "console"
DOCKER_PID_FILE = "/var/run/docker.pid"
DOCKER_COMMAND = "docker-init"
userDocker = "user-docker"
)
func Main() {
cfg, err := config.LoadConfig()
cfg := config.LoadConfig()
execID, resp, err := startDocker(cfg)
if err != nil {
log.Fatal(err)
}
if len(os.Args) == 1 {
if err := enter(cfg); err != nil {
process, err := getDockerProcess()
if err != nil {
log.Fatal(err)
}
} else {
if err := main(cfg); err != nil {
handleTerm(process)
// Wait for Docker daemon to exit
io.Copy(ioutil.Discard, resp.Reader)
resp.Close()
client, err := rosDocker.NewSystemClient()
if err != nil {
log.Fatal(err)
}
}
}
func enter(cfg *config.CloudConfig) error {
context := cfg.Rancher.Docker.StorageContext
if context == "" {
context = DEFAULT_STORAGE_CONTEXT
}
log.Infof("Starting Docker in context: %s", context)
p, err := compose.GetProject(cfg, true)
state, err := client.ContainerExecInspect(context.Background(), execID)
if err != nil {
log.Fatal(err)
}
// Proxy exit code
os.Exit(state.ExitCode)
}
func writeCerts(cfg *config.CloudConfig) error {
outDir := control.ServerTlsPath
if err := os.MkdirAll(outDir, 0700); err != nil {
return err
}
caCertPath := filepath.Join(outDir, control.CaCert)
caKeyPath := filepath.Join(outDir, control.CaKey)
serverCertPath := filepath.Join(outDir, control.ServerCert)
serverKeyPath := filepath.Join(outDir, control.ServerKey)
if cfg.Rancher.Docker.CACert != "" {
if err := util.WriteFileAtomic(caCertPath, []byte(cfg.Rancher.Docker.CACert), 0400); err != nil {
return err
}
pid, err := waitForPid(context, p)
if err := util.WriteFileAtomic(caKeyPath, []byte(cfg.Rancher.Docker.CAKey), 0400); err != nil {
return err
}
}
if cfg.Rancher.Docker.ServerCert != "" {
if err := util.WriteFileAtomic(serverCertPath, []byte(cfg.Rancher.Docker.ServerCert), 0400); err != nil {
return err
}
if err := util.WriteFileAtomic(serverKeyPath, []byte(cfg.Rancher.Docker.ServerKey), 0400); err != nil {
return err
}
}
return nil
}
func startDocker(cfg *config.CloudConfig) (string, types.HijackedResponse, error) {
storageContext := cfg.Rancher.Docker.StorageContext
if storageContext == "" {
storageContext = DEFAULT_STORAGE_CONTEXT
}
log.Infof("Starting Docker in context: %s", storageContext)
p, err := compose.GetProject(cfg, true, false)
if err != nil {
return err
return "", types.HijackedResponse{}, err
}
log.Infof("%s PID %d", context, pid)
return runNsenter(pid)
}
type result struct {
Pid int `json:"Pid"`
}
func findProgram(searchPaths ...string) string {
prog := ""
for _, i := range searchPaths {
var err error
prog, err = exec.LookPath(i)
if err == nil {
break
}
prog = i
}
return prog
}
func runNsenter(pid int) error {
args := []string{findProgram(userDocker), "main"}
r, w, err := os.Pipe()
pid, err := waitForPid(storageContext, p)
if err != nil {
return err
return "", types.HijackedResponse{}, err
}
cmd := &exec.Cmd{
Path: args[0],
Args: args,
Stdin: os.Stdin,
Stdout: os.Stdout,
Stderr: os.Stderr,
ExtraFiles: []*os.File{w},
Env: append(os.Environ(),
"_LIBCONTAINER_INITPIPE=3",
fmt.Sprintf("_LIBCONTAINER_INITPID=%d", pid),
),
}
log.Infof("%s PID %d", storageContext, pid)
if err := cmd.Start(); err != nil {
return err
}
w.Close()
var result result
if err := json.NewDecoder(r).Decode(&result); err != nil {
return err
}
if err := cmd.Wait(); err != nil {
return err
}
log.Infof("Docker PID %d", result.Pid)
p, err := os.FindProcess(result.Pid)
client, err := rosDocker.NewSystemClient()
if err != nil {
return err
return "", types.HijackedResponse{}, err
}
handleTerm(p)
if err := switchCgroup(result.Pid, pid); err != nil {
return err
if err := os.Remove(DOCKER_PID_FILE); err != nil && !os.IsNotExist(err) {
return "", types.HijackedResponse{}, err
}
_, err = p.Wait()
return err
dockerCfg := cfg.Rancher.Docker
args := dockerCfg.FullArgs()
log.Debugf("User Docker args: %v", args)
if dockerCfg.TLS {
if err := writeCerts(cfg); err != nil {
return "", types.HijackedResponse{}, err
}
}
cmd := []string{"env"}
log.Info(dockerCfg.AppendEnv())
cmd = append(cmd, dockerCfg.AppendEnv()...)
cmd = append(cmd, DOCKER_COMMAND)
cmd = append(cmd, args...)
log.Infof("Running %v", cmd)
resp, err := client.ContainerExecCreate(context.Background(), types.ExecConfig{
Container: storageContext,
Privileged: true,
AttachStderr: true,
AttachStdout: true,
Cmd: cmd,
})
if err != nil {
return "", types.HijackedResponse{}, err
}
attachResp, err := client.ContainerExecAttach(context.Background(), resp.ID, types.ExecConfig{
Detach: false,
AttachStderr: true,
AttachStdout: true,
})
if err != nil {
return "", types.HijackedResponse{}, err
}
if err := client.ContainerExecStart(context.Background(), resp.ID, types.ExecStartCheck{
Detach: false,
}); err != nil {
return "", types.HijackedResponse{}, err
}
return resp.ID, attachResp, nil
}
func handleTerm(p *os.Process) {
func getDockerProcess() (*os.Process, error) {
pidBytes, err := waitForFile(DOCKER_PID_FILE)
if err != nil {
return nil, err
}
dockerPid, err := strconv.Atoi(string(pidBytes))
if err != nil {
return nil, err
}
return os.FindProcess(dockerPid)
}
func handleTerm(process *os.Process) {
term := make(chan os.Signal)
signal.Notify(term, syscall.SIGTERM)
go func() {
<-term
p.Signal(syscall.SIGTERM)
process.Signal(syscall.SIGTERM)
}()
}
func waitForFile(file string) ([]byte, error) {
for {
contents, err := ioutil.ReadFile(file)
if os.IsNotExist(err) {
log.Infof("Waiting for %s", file)
time.Sleep(1 * time.Second)
} else if err != nil {
return nil, err
} else {
return contents, nil
}
}
}
func waitForPid(service string, project *project.Project) (int, error) {
log.Infof("Getting PID for service: %s", service)
for {
@ -166,7 +224,7 @@ func getPid(service string, project *project.Project) (int, error) {
return 0, err
}
containers, err := s.Containers()
containers, err := s.Containers(context.Background())
if err != nil {
return 0, err
}
@ -175,7 +233,7 @@ func getPid(service string, project *project.Project) (int, error) {
return 0, nil
}
client, err := docker.CreateClient(docker.ClientOpts{
client, err := composeClient.Create(composeClient.Options{
Host: config.DOCKER_SYSTEM_HOST,
})
if err != nil {
@ -187,8 +245,8 @@ func getPid(service string, project *project.Project) (int, error) {
return 0, err
}
info, err := client.InspectContainer(id)
if err != nil || info == nil {
info, err := client.ContainerInspect(context.Background(), id)
if err != nil || info.ID == "" {
return 0, err
}
@ -198,71 +256,3 @@ func getPid(service string, project *project.Project) (int, error) {
return 0, nil
}
func main(cfg *config.CloudConfig) error {
os.Unsetenv("_LIBCONTAINER_INITPIPE")
os.Unsetenv("_LIBCONTAINER_INITPID")
if err := system.ParentDeathSignal(syscall.SIGKILL).Set(); err != nil {
return err
}
if err := os.Remove("/var/run/docker.pid"); err != nil && !os.IsNotExist(err) {
return err
}
dockerCfg := cfg.Rancher.Docker
args := dockerCfg.FullArgs()
log.Debugf("User Docker args: %v", args)
if dockerCfg.TLS {
log.Debug("Generating TLS certs if needed")
if err := control.Generate(true, "/etc/docker/tls", []string{"127.0.0.1", "*", "*.*", "*.*.*", "*.*.*.*"}); err != nil {
return err
}
}
prog := findProgram("docker-init", "dockerlaunch", "docker")
if strings.Contains(prog, "dockerlaunch") {
args = append([]string{prog, "docker"}, args...)
} else {
args = append([]string{prog}, args...)
}
log.Infof("Running %v", args)
return syscall.Exec(args[0], args, dockerCfg.AppendEnv())
}
func switchCgroup(src, target int) error {
cgroupFile := fmt.Sprintf("/proc/%d/cgroup", target)
f, err := os.Open(cgroupFile)
if err != nil {
return err
}
defer f.Close()
targetCgroups := map[string]string{}
s := bufio.NewScanner(f)
for s.Scan() {
text := s.Text()
parts := strings.Split(text, ":")
subparts := strings.Split(parts[1], "=")
subsystem := subparts[0]
if len(subparts) > 1 {
subsystem = subparts[1]
}
targetPath := fmt.Sprintf("/host/sys/fs/cgroup/%s%s", subsystem, parts[2])
log.Infof("Moving Docker to cgroup %s", targetPath)
targetCgroups[subsystem] = targetPath
}
if err := s.Err(); err != nil {
return err
}
return cgroups.EnterPid(targetCgroups, src)
}

View File

@ -1,23 +0,0 @@
package waitfornetwork
import (
"github.com/rancher/os/cmd/network"
"os"
"os/signal"
"syscall"
)
func handleTerm() {
c := make(chan os.Signal, 1)
signal.Notify(c, syscall.SIGTERM)
<-c
os.Exit(0)
}
func Main() {
go handleTerm()
if _, err := os.Stat(network.NETWORK_DONE); err == nil {
os.Exit(0)
}
select {}
}

View File

@ -3,27 +3,29 @@ package compose
import (
"fmt"
"golang.org/x/net/context"
log "github.com/Sirupsen/logrus"
yaml "github.com/cloudfoundry-incubator/candiedyaml"
"github.com/docker/libcompose/cli/logger"
composeConfig "github.com/docker/libcompose/config"
"github.com/docker/libcompose/docker"
composeClient "github.com/docker/libcompose/docker/client"
"github.com/docker/libcompose/project"
"github.com/docker/libcompose/project/events"
"github.com/docker/libcompose/project/options"
"github.com/rancher/os/config"
rosDocker "github.com/rancher/os/docker"
"github.com/rancher/os/util"
"github.com/rancher/os/util/network"
)
func CreateService(cfg *config.CloudConfig, name string, serviceConfig *project.ServiceConfig) (project.Service, error) {
func CreateService(cfg *config.CloudConfig, name string, serviceConfig *composeConfig.ServiceConfigV1) (project.Service, error) {
if cfg == nil {
var err error
cfg, err = config.LoadConfig()
if err != nil {
return nil, err
}
cfg = config.LoadConfig()
}
p, err := CreateServiceSet("once", cfg, map[string]*project.ServiceConfig{
p, err := CreateServiceSet("once", cfg, map[string]*composeConfig.ServiceConfigV1{
name: serviceConfig,
})
if err != nil {
@ -33,8 +35,8 @@ func CreateService(cfg *config.CloudConfig, name string, serviceConfig *project.
return p.CreateService(name)
}
func CreateServiceSet(name string, cfg *config.CloudConfig, configs map[string]*project.ServiceConfig) (*project.Project, error) {
p, err := newProject(name, cfg, nil)
func CreateServiceSet(name string, cfg *config.CloudConfig, configs map[string]*composeConfig.ServiceConfigV1) (*project.Project, error) {
p, err := newProject(name, cfg, nil, nil)
if err != nil {
return nil, err
}
@ -44,21 +46,22 @@ func CreateServiceSet(name string, cfg *config.CloudConfig, configs map[string]*
return p, nil
}
func RunServiceSet(name string, cfg *config.CloudConfig, configs map[string]*project.ServiceConfig) (*project.Project, error) {
func RunServiceSet(name string, cfg *config.CloudConfig, configs map[string]*composeConfig.ServiceConfigV1) (*project.Project, error) {
p, err := CreateServiceSet(name, cfg, configs)
if err != nil {
return nil, err
}
return p, p.Up()
return p, p.Up(context.Background(), options.Up{
Log: cfg.Rancher.Log,
})
}
func GetProject(cfg *config.CloudConfig, networkingAvailable bool) (*project.Project, error) {
return newCoreServiceProject(cfg, networkingAvailable)
func GetProject(cfg *config.CloudConfig, networkingAvailable, loadConsole bool) (*project.Project, error) {
return newCoreServiceProject(cfg, networkingAvailable, loadConsole)
}
func newProject(name string, cfg *config.CloudConfig, environmentLookup project.EnvironmentLookup) (*project.Project, error) {
clientFactory, err := rosDocker.NewClientFactory(docker.ClientOpts{})
func newProject(name string, cfg *config.CloudConfig, environmentLookup composeConfig.EnvironmentLookup, authLookup *rosDocker.ConfigAuthLookup) (*project.Project, error) {
clientFactory, err := rosDocker.NewClientFactory(composeClient.Options{})
if err != nil {
return nil, err
}
@ -66,31 +69,89 @@ func newProject(name string, cfg *config.CloudConfig, environmentLookup project.
if environmentLookup == nil {
environmentLookup = rosDocker.NewConfigEnvironment(cfg)
}
if authLookup == nil {
authLookup = rosDocker.NewConfigAuthLookup(cfg)
}
serviceFactory := &rosDocker.ServiceFactory{
Deps: map[string][]string{},
}
context := &docker.Context{
ClientFactory: clientFactory,
AuthLookup: authLookup,
Context: project.Context{
ProjectName: name,
NoRecreate: true, // for libcompose to not recreate on project reload, looping up the boot :)
EnvironmentLookup: environmentLookup,
ServiceFactory: serviceFactory,
Log: cfg.Rancher.Log,
LoggerFactory: logger.NewColorLoggerFactory(),
},
}
serviceFactory.Context = context
return docker.NewProject(context)
authLookup.SetContext(context)
return docker.NewProject(context, &composeConfig.ParseOptions{
Interpolate: true,
Validate: false,
Preprocess: preprocessServiceMap,
})
}
func addServices(p *project.Project, enabled map[interface{}]interface{}, configs map[string]*project.ServiceConfig) map[interface{}]interface{} {
func preprocessServiceMap(serviceMap composeConfig.RawServiceMap) (composeConfig.RawServiceMap, error) {
newServiceMap := make(composeConfig.RawServiceMap)
for k, v := range serviceMap {
newServiceMap[k] = make(composeConfig.RawService)
for k2, v2 := range v {
if k2 == "environment" || k2 == "labels" {
newServiceMap[k][k2] = preprocess(v2, true)
} else {
newServiceMap[k][k2] = preprocess(v2, false)
}
}
}
return newServiceMap, nil
}
func preprocess(item interface{}, replaceTypes bool) interface{} {
switch typedDatas := item.(type) {
case map[interface{}]interface{}:
newMap := make(map[interface{}]interface{})
for key, value := range typedDatas {
newMap[key] = preprocess(value, replaceTypes)
}
return newMap
case []interface{}:
// newArray := make([]interface{}, 0) will cause golint to complain
var newArray []interface{}
newArray = make([]interface{}, 0)
for _, value := range typedDatas {
newArray = append(newArray, preprocess(value, replaceTypes))
}
return newArray
default:
if replaceTypes {
return fmt.Sprint(item)
}
return item
}
}
func addServices(p *project.Project, enabled map[interface{}]interface{}, configs map[string]*composeConfig.ServiceConfigV1) map[interface{}]interface{} {
serviceConfigsV2, _ := composeConfig.ConvertServices(configs)
// Note: we ignore errors while loading services
unchanged := true
for name, serviceConfig := range configs {
hash := project.GetServiceHash(name, serviceConfig)
for name, serviceConfig := range serviceConfigsV2 {
hash := composeConfig.GetServiceHash(name, serviceConfig)
if enabled[name] == hash {
continue
@ -123,71 +184,24 @@ func adjustContainerNames(m map[interface{}]interface{}) map[interface{}]interfa
return m
}
func newCoreServiceProject(cfg *config.CloudConfig, useNetwork bool) (*project.Project, error) {
projectEvents := make(chan project.Event)
enabled := map[interface{}]interface{}{}
func newCoreServiceProject(cfg *config.CloudConfig, useNetwork, loadConsole bool) (*project.Project, error) {
environmentLookup := rosDocker.NewConfigEnvironment(cfg)
authLookup := rosDocker.NewConfigAuthLookup(cfg)
p, err := newProject("os", cfg, environmentLookup)
p, err := newProject("os", cfg, environmentLookup, authLookup)
if err != nil {
return nil, err
}
projectEvents := make(chan events.Event)
p.AddListener(project.NewDefaultListener(p))
p.AddListener(projectEvents)
p.ReloadCallback = func() error {
var err error
cfg, err = config.LoadConfig()
if err != nil {
return err
}
environmentLookup.SetConfig(cfg)
enabled = addServices(p, enabled, cfg.Rancher.Services)
for service, serviceEnabled := range cfg.Rancher.ServicesInclude {
if _, ok := enabled[service]; ok || !serviceEnabled {
continue
}
bytes, err := LoadServiceResource(service, useNetwork, cfg)
if err != nil {
if err == network.ErrNoNetwork {
log.Debugf("Can not load %s, networking not enabled", service)
} else {
log.Errorf("Failed to load %s : %v", service, err)
}
continue
}
m := map[interface{}]interface{}{}
if err := yaml.Unmarshal(bytes, &m); err != nil {
log.Errorf("Failed to parse YAML configuration: %s : %v", service, err)
continue
}
bytes, err = yaml.Marshal(adjustContainerNames(config.StringifyValues(m)))
if err != nil {
log.Errorf("Failed to marshal YAML configuration: %s : %v", service, err)
continue
}
err = p.Load(bytes)
if err != nil {
log.Errorf("Failed to load %s : %v", service, err)
continue
}
enabled[service] = service
}
return nil
}
p.ReloadCallback = projectReload(p, &useNetwork, loadConsole, environmentLookup, authLookup)
go func() {
for event := range projectEvents {
if event.EventType == project.EventContainerStarted && event.ServiceName == "ntp" {
if event.EventType == events.ContainerStarted && event.ServiceName == "ntp" {
useNetwork = true
}
}
@ -203,13 +217,13 @@ func newCoreServiceProject(cfg *config.CloudConfig, useNetwork bool) (*project.P
}
func StageServices(cfg *config.CloudConfig, services ...string) error {
p, err := newProject("stage-services", cfg, nil)
p, err := newProject("stage-services", cfg, nil, nil)
if err != nil {
return err
}
for _, service := range services {
bytes, err := LoadServiceResource(service, true, cfg)
bytes, err := network.LoadServiceResource(service, true, cfg)
if err != nil {
return fmt.Errorf("Failed to load %s : %v", service, err)
}
@ -219,29 +233,25 @@ func StageServices(cfg *config.CloudConfig, services ...string) error {
return fmt.Errorf("Failed to parse YAML configuration: %s : %v", service, err)
}
bytes, err = yaml.Marshal(config.StringifyValues(m))
bytes, err = yaml.Marshal(m)
if err != nil {
fmt.Errorf("Failed to marshal YAML configuration: %s : %v", service, err)
return fmt.Errorf("Failed to marshal YAML configuration: %s : %v", service, err)
}
err = p.Load(bytes)
if err != nil {
fmt.Errorf("Failed to load %s : %v", service, err)
return fmt.Errorf("Failed to load %s : %v", service, err)
}
}
// Reduce service configurations to just image and labels
for serviceName, serviceConfig := range p.Configs {
p.Configs[serviceName] = &project.ServiceConfig{
for _, serviceName := range p.ServiceConfigs.Keys() {
serviceConfig, _ := p.ServiceConfigs.Get(serviceName)
p.ServiceConfigs.Add(serviceName, &composeConfig.ServiceConfig{
Image: serviceConfig.Image,
Labels: serviceConfig.Labels,
})
}
}
return p.Pull()
}
func LoadServiceResource(name string, useNetwork bool, cfg *config.CloudConfig) ([]byte, error) {
return network.LoadResource(name, useNetwork, cfg.Rancher.Repositories.ToArray())
return p.Pull(context.Background())
}

74
compose/reload.go Normal file
View File

@ -0,0 +1,74 @@
package compose
import (
"fmt"
log "github.com/Sirupsen/logrus"
yaml "github.com/cloudfoundry-incubator/candiedyaml"
"github.com/docker/libcompose/project"
"github.com/rancher/os/config"
"github.com/rancher/os/docker"
"github.com/rancher/os/util/network"
)
func LoadService(p *project.Project, cfg *config.CloudConfig, useNetwork bool, service string) error {
bytes, err := network.LoadServiceResource(service, useNetwork, cfg)
if err != nil {
return err
}
m := map[interface{}]interface{}{}
if err = yaml.Unmarshal(bytes, &m); err != nil {
return fmt.Errorf("Failed to parse YAML configuration for %s: %v", service, err)
}
m = adjustContainerNames(m)
bytes, err = yaml.Marshal(m)
if err != nil {
return fmt.Errorf("Failed to marshal YAML configuration for %s: %v", service, err)
}
if err = p.Load(bytes); err != nil {
return fmt.Errorf("Failed to load %s: %v", service, err)
}
return nil
}
func projectReload(p *project.Project, useNetwork *bool, loadConsole bool, environmentLookup *docker.ConfigEnvironment, authLookup *docker.ConfigAuthLookup) func() error {
enabled := map[interface{}]interface{}{}
return func() error {
cfg := config.LoadConfig()
environmentLookup.SetConfig(cfg)
authLookup.SetConfig(cfg)
enabled = addServices(p, enabled, cfg.Rancher.Services)
for service, serviceEnabled := range cfg.Rancher.ServicesInclude {
if _, ok := enabled[service]; ok || !serviceEnabled {
continue
}
if err := LoadService(p, cfg, *useNetwork, service); err != nil {
if err != network.ErrNoNetwork {
log.Error(err)
}
continue
}
enabled[service] = service
}
if !loadConsole || cfg.Rancher.Console == "" || cfg.Rancher.Console == "default" {
return nil
}
if err := LoadService(p, cfg, *useNetwork, cfg.Rancher.Console); err != nil && err != network.ErrNoNetwork {
log.Error(err)
}
return nil
}
}

View File

@ -1,135 +1,37 @@
package config
import (
"fmt"
log "github.com/Sirupsen/logrus"
yaml "github.com/cloudfoundry-incubator/candiedyaml"
"github.com/rancher/os/util"
)
func (c *CloudConfig) Import(bytes []byte) (*CloudConfig, error) {
data, err := readConfig(bytes, false, CloudConfigPrivateFile)
func Merge(bytes []byte) error {
data, err := readConfigs(bytes, false, true)
if err != nil {
return c, err
return err
}
return NewConfig().Merge(data)
}
func (c *CloudConfig) MergeBytes(bytes []byte) (*CloudConfig, error) {
data, err := readConfig(bytes, false)
existing, err := readConfigs(nil, false, true, CloudConfigFile)
if err != nil {
return c, err
return err
}
return c.Merge(data)
return WriteToFile(util.Merge(existing, data), CloudConfigFile)
}
var keysToStringify = []string{
"command",
"dns",
"dns_search",
"entrypoint",
"env_file",
"environment",
"labels",
"links",
}
func isPathToStringify(path []interface{}) bool {
l := len(path)
if l == 0 {
return false
}
if sk, ok := path[l-1].(string); ok {
return util.Contains(keysToStringify, sk)
}
return false
}
func stringifyValue(data interface{}, path []interface{}) interface{} {
switch data := data.(type) {
case map[interface{}]interface{}:
result := make(map[interface{}]interface{}, len(data))
if isPathToStringify(path) {
for k, v := range data {
switch v := v.(type) {
case []interface{}:
result[k] = stringifyValue(v, append(path, k))
case map[interface{}]interface{}:
result[k] = stringifyValue(v, append(path, k))
default:
result[k] = fmt.Sprint(v)
}
}
} else {
for k, v := range data {
result[k] = stringifyValue(v, append(path, k))
}
}
return result
case []interface{}:
result := make([]interface{}, len(data))
if isPathToStringify(path) {
for k, v := range data {
result[k] = fmt.Sprint(v)
}
} else {
for k, v := range data {
result[k] = stringifyValue(v, append(path, k))
}
}
return result
default:
return data
}
}
func StringifyValues(data map[interface{}]interface{}) map[interface{}]interface{} {
return stringifyValue(data, nil).(map[interface{}]interface{})
}
func (c *CloudConfig) Merge(values map[interface{}]interface{}) (*CloudConfig, error) {
d := map[interface{}]interface{}{}
if err := util.Convert(c, &d); err != nil {
return c, err
}
r := util.MapsUnion(d, StringifyValues(values))
t := &CloudConfig{}
if err := util.Convert(r, t); err != nil {
return c, err
}
return t, nil
}
func Dump(private, full bool) (string, error) {
var cfg *CloudConfig
var err error
if full {
cfg, err = LoadConfig()
} else {
files := []string{CloudConfigBootFile, CloudConfigPrivateFile, CloudConfigFile}
func Export(private, full bool) (string, error) {
rawCfg := loadRawDiskConfig(full)
if !private {
files = util.FilterStrings(files, func(x string) bool { return x != CloudConfigPrivateFile })
}
cfg, err = ChainCfgFuncs(nil,
func(_ *CloudConfig) (*CloudConfig, error) { return ReadConfig(nil, true, files...) },
amendNils,
)
rawCfg = filterPrivateKeys(rawCfg)
}
if err != nil {
return "", err
}
bytes, err := yaml.Marshal(*cfg)
bytes, err := yaml.Marshal(rawCfg)
return string(bytes), err
}
func (c *CloudConfig) Get(key string) (interface{}, error) {
func Get(key string) (interface{}, error) {
cfg := LoadConfig()
data := map[interface{}]interface{}{}
if err := util.Convert(c, &data); err != nil {
if err := util.ConvertIgnoreOmitEmpty(cfg, &data); err != nil {
return nil, err
}
@ -137,58 +39,12 @@ func (c *CloudConfig) Get(key string) (interface{}, error) {
return v, nil
}
func (c *CloudConfig) GetIgnoreOmitEmpty(key string) (interface{}, error) {
data := map[interface{}]interface{}{}
if err := util.ConvertIgnoreOmitEmpty(c, &data); err != nil {
return nil, err
}
v, _ := getOrSetVal(key, data, nil)
return v, nil
}
func (c *CloudConfig) Set(key string, value interface{}) (map[interface{}]interface{}, error) {
data := map[interface{}]interface{}{}
_, data = getOrSetVal(key, data, value)
return data, nil
}
func (c *CloudConfig) Save(cfgDiffs ...map[interface{}]interface{}) error {
files := append([]string{OsConfigFile, OemConfigFile}, CloudConfigDirFiles()...)
files = util.FilterStrings(files, func(x string) bool { return x != CloudConfigPrivateFile })
exCfg, err := ChainCfgFuncs(nil,
func(_ *CloudConfig) (*CloudConfig, error) {
return ReadConfig(nil, true, files...)
},
readCmdline,
amendNils)
func Set(key string, value interface{}) error {
existing, err := readConfigs(nil, false, true, CloudConfigFile)
if err != nil {
return err
}
exCfg = mergeMetadata(exCfg, readMetadata())
exData := map[interface{}]interface{}{}
if err := util.Convert(exCfg, &exData); err != nil {
return err
}
data := map[interface{}]interface{}{}
if err := util.Convert(c, &data); err != nil {
return err
}
data = util.MapsDifference(data, exData)
// Apply any additional config diffs
for _, diff := range cfgDiffs {
data = util.MapsUnion(data, diff)
}
log.WithFields(log.Fields{"diff": data}).Debug("The diff we're about to save")
if err := saveToDisk(data); err != nil {
return err
}
return nil
_, modified := getOrSetVal(key, existing, value)
return WriteToFile(modified, CloudConfigFile)
}

View File

@ -1,13 +1,11 @@
package config
import (
"fmt"
yaml "github.com/cloudfoundry-incubator/candiedyaml"
"testing"
"github.com/rancher/os/util"
"github.com/stretchr/testify/require"
"strings"
)
func TestFilterKey(t *testing.T) {
@ -55,104 +53,6 @@ func TestFilterKey(t *testing.T) {
assert.Equal(expectedRest, rest)
}
func TestStringifyValues(t *testing.T) {
assert := require.New(t)
data := map[interface{}]interface{}{
"ssh_authorized_keys": []string{"pubk1", "pubk2"},
"hostname": "ros-test",
"rancher": map[interface{}]interface{}{
"services": map[interface{}]interface{}{
"my-service": map[interface{}]interface{}{
"command": []interface{}{"echo", 1, false, "nothing"},
"labels": map[interface{}]interface{}{
"some-bool": true,
"some-num": 42,
},
"dsa-pub": "dsa-test2",
},
},
"docker": map[interface{}]interface{}{
"ca_key": "ca_key-test3",
"ca_cert": "ca_cert-test4",
"args": []string{"args_test5"},
},
},
}
expected := map[interface{}]interface{}{
"ssh_authorized_keys": []string{"pubk1", "pubk2"},
"hostname": "ros-test",
"rancher": map[interface{}]interface{}{
"services": map[interface{}]interface{}{
"my-service": map[interface{}]interface{}{
"command": []interface{}{"echo", "1", "false", "nothing"},
"labels": map[interface{}]interface{}{
"some-bool": "true",
"some-num": "42",
},
"dsa-pub": "dsa-test2",
},
},
"docker": map[interface{}]interface{}{
"ca_key": "ca_key-test3",
"ca_cert": "ca_cert-test4",
"args": []string{"args_test5"},
},
},
}
assert.Equal(expected, StringifyValues(data))
}
func TestFilterDottedKeys(t *testing.T) {
assert := require.New(t)
data := map[interface{}]interface{}{
"ssh_authorized_keys": []string{"pubk1", "pubk2"},
"hostname": "ros-test",
"rancher": map[interface{}]interface{}{
"ssh": map[interface{}]interface{}{
"keys": map[interface{}]interface{}{
"dsa": "dsa-test1",
"dsa-pub": "dsa-test2",
},
},
"docker": map[interface{}]interface{}{
"ca_key": "ca_key-test3",
"ca_cert": "ca_cert-test4",
"args": []string{"args_test5"},
},
},
}
expectedFiltered := map[interface{}]interface{}{
"ssh_authorized_keys": []string{"pubk1", "pubk2"},
"rancher": map[interface{}]interface{}{
"ssh": map[interface{}]interface{}{
"keys": map[interface{}]interface{}{
"dsa": "dsa-test1",
"dsa-pub": "dsa-test2",
},
},
},
}
expectedRest := map[interface{}]interface{}{
"hostname": "ros-test",
"rancher": map[interface{}]interface{}{
"docker": map[interface{}]interface{}{
"ca_key": "ca_key-test3",
"ca_cert": "ca_cert-test4",
"args": []string{"args_test5"},
},
},
}
assert.Equal([]string{"rancher", "ssh"}, strings.Split("rancher.ssh", "."))
assert.Equal([]string{"ssh_authorized_keys"}, strings.Split("ssh_authorized_keys", "."))
filtered, rest := filterDottedKeys(data, []string{"ssh_authorized_keys", "rancher.ssh"})
assert.Equal(expectedFiltered, filtered)
assert.Equal(expectedRest, rest)
}
func TestUnmarshalOrReturnString(t *testing.T) {
assert := require.New(t)
@ -358,8 +258,6 @@ func TestUserDocker(t *testing.T) {
err = util.Convert(config, &data)
assert.Nil(err)
fmt.Println(data)
val, ok := data["rancher"].(map[interface{}]interface{})["docker"]
assert.True(ok)

View File

@ -4,8 +4,9 @@ import (
log "github.com/Sirupsen/logrus"
yaml "github.com/cloudfoundry-incubator/candiedyaml"
"github.com/rancher/os/util"
"strings"
"github.com/rancher/os/util"
)
type CfgFunc func(*CloudConfig) (*CloudConfig, error)
@ -58,17 +59,12 @@ func filterKey(data map[interface{}]interface{}, key []string) (filtered, rest m
return
}
func filterDottedKeys(data map[interface{}]interface{}, keys []string) (filtered, rest map[interface{}]interface{}) {
filtered = map[interface{}]interface{}{}
rest = util.MapCopy(data)
for _, key := range keys {
f, r := filterKey(data, strings.Split(key, "."))
filtered = util.MapsUnion(filtered, f)
rest = util.MapsIntersection(rest, r)
func filterPrivateKeys(data map[interface{}]interface{}) map[interface{}]interface{} {
for _, privateKey := range PrivateKeys {
_, data = filterKey(data, strings.Split(privateKey, "."))
}
return
return data
}
func getOrSetVal(args string, data map[interface{}]interface{}, value interface{}) (interface{}, map[interface{}]interface{}) {

View File

@ -11,70 +11,60 @@ import (
yaml "github.com/cloudfoundry-incubator/candiedyaml"
"github.com/coreos/coreos-cloudinit/datasource"
"github.com/coreos/coreos-cloudinit/initialize"
"github.com/docker/libcompose/project"
"github.com/docker/engine-api/types"
composeConfig "github.com/docker/libcompose/config"
"github.com/rancher/os/util"
)
var osConfig *CloudConfig
func NewConfig() *CloudConfig {
if osConfig == nil {
osConfig, _ = ReadConfig(nil, true, OsConfigFile, OemConfigFile)
}
newCfg := *osConfig
return &newCfg
}
func ReadConfig(bytes []byte, substituteMetadataVars bool, files ...string) (*CloudConfig, error) {
if data, err := readConfig(bytes, substituteMetadataVars, files...); err == nil {
data, err := readConfigs(bytes, substituteMetadataVars, true, files...)
if err != nil {
return nil, err
}
c := &CloudConfig{}
if err := util.Convert(data, c); err != nil {
return nil, err
}
c, _ = amendNils(c)
c, _ = amendContainerNames(c)
c = amendNils(c)
c = amendContainerNames(c)
return c, nil
} else {
return nil, err
}
}
func LoadConfig() (*CloudConfig, error) {
cfg, err := ChainCfgFuncs(NewConfig(),
readFilesAndMetadata,
readCmdline,
amendNils,
amendContainerNames)
if err != nil {
log.WithFields(log.Fields{"cfg": cfg, "err": err}).Error("Failed to load config")
return nil, err
func loadRawDiskConfig(full bool) map[interface{}]interface{} {
var rawCfg map[interface{}]interface{}
if full {
rawCfg, _ = readConfigs(nil, true, false, OsConfigFile, OemConfigFile)
}
log.Debug("Merging cloud-config from meta-data and user-data")
cfg = mergeMetadata(cfg, readMetadata())
files := append(CloudConfigDirFiles(), CloudConfigFile)
additionalCfgs, _ := readConfigs(nil, true, false, files...)
if cfg.Rancher.Debug {
log.SetLevel(log.DebugLevel)
if !util.Contains(cfg.Rancher.Docker.Args, "-D") {
cfg.Rancher.Docker.Args = append(cfg.Rancher.Docker.Args, "-D")
}
if !util.Contains(cfg.Rancher.SystemDocker.Args, "-D") {
cfg.Rancher.SystemDocker.Args = append(cfg.Rancher.SystemDocker.Args, "-D")
}
} else {
if util.Contains(cfg.Rancher.Docker.Args, "-D") {
cfg.Rancher.Docker.Args = util.FilterStrings(cfg.Rancher.Docker.Args, func(x string) bool { return x != "-D" })
}
if util.Contains(cfg.Rancher.SystemDocker.Args, "-D") {
cfg.Rancher.SystemDocker.Args = util.FilterStrings(cfg.Rancher.SystemDocker.Args, func(x string) bool { return x != "-D" })
}
}
return util.Merge(rawCfg, additionalCfgs)
}
return cfg, nil
func loadRawConfig() map[interface{}]interface{} {
rawCfg := loadRawDiskConfig(true)
rawCfg = util.Merge(rawCfg, readCmdline())
rawCfg = applyDebugFlags(rawCfg)
return mergeMetadata(rawCfg, readMetadata())
}
func LoadConfig() *CloudConfig {
rawCfg := loadRawConfig()
cfg := &CloudConfig{}
if err := util.Convert(rawCfg, cfg); err != nil {
log.Errorf("Failed to parse configuration: %s", err)
return &CloudConfig{}
}
cfg = amendNils(cfg)
cfg = amendContainerNames(cfg)
return cfg
}
func CloudConfigDirFiles() []string {
files, err := util.DirLs(CloudConfigDir)
files, err := ioutil.ReadDir(CloudConfigDir)
if err != nil {
if os.IsNotExist(err) {
// do nothing
@ -85,36 +75,56 @@ func CloudConfigDirFiles() []string {
return []string{}
}
files = util.Filter(files, func(x interface{}) bool {
f := x.(os.FileInfo)
if f.IsDir() || strings.HasPrefix(f.Name(), ".") {
return false
var finalFiles []string
for _, file := range files {
if !file.IsDir() && !strings.HasPrefix(file.Name(), ".") {
finalFiles = append(finalFiles, path.Join(CloudConfigDir, file.Name()))
}
}
return true
})
return util.ToStrings(util.Map(files, func(x interface{}) interface{} {
return path.Join(CloudConfigDir, x.(os.FileInfo).Name())
}))
return finalFiles
}
func applyDebugFlags(rawCfg map[interface{}]interface{}) map[interface{}]interface{} {
cfg := &CloudConfig{}
if err := util.Convert(rawCfg, cfg); err != nil {
return rawCfg
}
if cfg.Rancher.Debug {
log.SetLevel(log.DebugLevel)
if !util.Contains(cfg.Rancher.Docker.Args, "-D") {
cfg.Rancher.Docker.Args = append(cfg.Rancher.Docker.Args, "-D")
}
if !util.Contains(cfg.Rancher.SystemDocker.Args, "-D") {
cfg.Rancher.SystemDocker.Args = append(cfg.Rancher.SystemDocker.Args, "-D")
}
}
_, rawCfg = getOrSetVal("rancher.docker.args", rawCfg, cfg.Rancher.Docker.Args)
_, rawCfg = getOrSetVal("rancher.system_docker.args", rawCfg, cfg.Rancher.SystemDocker.Args)
return rawCfg
}
// mergeMetadata merges certain options from md (meta-data from the datasource)
// onto cc (a CloudConfig derived from user-data), if they are not already set
// on cc (i.e. user-data always takes precedence)
func mergeMetadata(cc *CloudConfig, md datasource.Metadata) *CloudConfig {
if cc == nil {
return cc
func mergeMetadata(rawCfg map[interface{}]interface{}, md datasource.Metadata) map[interface{}]interface{} {
if rawCfg == nil {
return nil
}
out := util.MapCopy(rawCfg)
outHostname, ok := out["hostname"]
if !ok {
outHostname = ""
}
out := cc
dirty := false
if md.Hostname != "" {
if out.Hostname != "" {
log.Debugf("Warning: user-data hostname (%s) overrides metadata hostname (%s)\n", out.Hostname, md.Hostname)
if outHostname != "" {
log.Debugf("Warning: user-data hostname (%s) overrides metadata hostname (%s)\n", outHostname, md.Hostname)
} else {
out = &(*cc)
dirty = true
out.Hostname = md.Hostname
out["hostname"] = md.Hostname
}
}
@ -126,14 +136,18 @@ func mergeMetadata(cc *CloudConfig, md datasource.Metadata) *CloudConfig {
sort.Sort(sort.StringSlice(keys))
currentKeys, ok := out["ssh_authorized_keys"]
if !ok {
return out
}
finalKeys := currentKeys.([]interface{})
for _, k := range keys {
if !dirty {
out = &(*cc)
dirty = true
}
out.SSHAuthorizedKeys = append(out.SSHAuthorizedKeys, md.SSHPublicKeys[k])
finalKeys = append(finalKeys, md.SSHPublicKeys[k])
}
out["ssh_authorized_keys"] = finalKeys
return out
}
@ -145,68 +159,50 @@ func readMetadata() datasource.Metadata {
return metadata
}
func readFilesAndMetadata(c *CloudConfig) (*CloudConfig, error) {
files := append(CloudConfigDirFiles(), CloudConfigFile)
data, err := readConfig(nil, true, files...)
if err != nil {
log.WithFields(log.Fields{"err": err, "files": files}).Error("Error reading config files")
return c, err
}
t, err := c.Merge(data)
if err != nil {
log.WithFields(log.Fields{"cfg": c, "data": data, "err": err}).Error("Error merging config data")
return c, err
}
return t, nil
}
func readCmdline(c *CloudConfig) (*CloudConfig, error) {
func readCmdline() map[interface{}]interface{} {
log.Debug("Reading config cmdline")
cmdLine, err := ioutil.ReadFile("/proc/cmdline")
if err != nil {
log.WithFields(log.Fields{"err": err}).Error("Failed to read kernel params")
return c, err
return nil
}
if len(cmdLine) == 0 {
return c, nil
return nil
}
log.Debugf("Config cmdline %s", cmdLine)
cmdLineObj := parseCmdline(strings.TrimSpace(string(cmdLine)))
t, err := c.Merge(cmdLineObj)
if err != nil {
log.WithFields(log.Fields{"cfg": c, "cmdLine": cmdLine, "data": cmdLineObj, "err": err}).Warn("Error adding kernel params to config")
}
return t, nil
return cmdLineObj
}
func amendNils(c *CloudConfig) (*CloudConfig, error) {
func amendNils(c *CloudConfig) *CloudConfig {
t := *c
if t.Rancher.Environment == nil {
t.Rancher.Environment = map[string]string{}
}
if t.Rancher.Autoformat == nil {
t.Rancher.Autoformat = map[string]*project.ServiceConfig{}
t.Rancher.Autoformat = map[string]*composeConfig.ServiceConfigV1{}
}
if t.Rancher.BootstrapContainers == nil {
t.Rancher.BootstrapContainers = map[string]*project.ServiceConfig{}
t.Rancher.BootstrapContainers = map[string]*composeConfig.ServiceConfigV1{}
}
if t.Rancher.Services == nil {
t.Rancher.Services = map[string]*project.ServiceConfig{}
t.Rancher.Services = map[string]*composeConfig.ServiceConfigV1{}
}
if t.Rancher.ServicesInclude == nil {
t.Rancher.ServicesInclude = map[string]bool{}
}
return &t, nil
if t.Rancher.RegistryAuths == nil {
t.Rancher.RegistryAuths = map[string]types.AuthConfig{}
}
return &t
}
func amendContainerNames(c *CloudConfig) (*CloudConfig, error) {
for _, scm := range []map[string]*project.ServiceConfig{
func amendContainerNames(c *CloudConfig) *CloudConfig {
for _, scm := range []map[string]*composeConfig.ServiceConfigV1{
c.Rancher.Autoformat,
c.Rancher.BootstrapContainers,
c.Rancher.Services,
@ -215,7 +211,7 @@ func amendContainerNames(c *CloudConfig) (*CloudConfig, error) {
v.ContainerName = k
}
}
return c, nil
return c
}
func WriteToFile(data interface{}, filename string) error {
@ -224,27 +220,10 @@ func WriteToFile(data interface{}, filename string) error {
return err
}
return ioutil.WriteFile(filename, content, 400)
return util.WriteFileAtomic(filename, content, 400)
}
func saveToDisk(data map[interface{}]interface{}) error {
private, config := filterDottedKeys(data, []string{
"rancher.ssh",
"rancher.docker.ca_key",
"rancher.docker.ca_cert",
"rancher.docker.server_key",
"rancher.docker.server_cert",
})
err := WriteToFile(config, CloudConfigFile)
if err != nil {
return err
}
return WriteToFile(private, CloudConfigPrivateFile)
}
func readConfig(bytes []byte, substituteMetadataVars bool, files ...string) (map[interface{}]interface{}, error) {
func readConfigs(bytes []byte, substituteMetadataVars, returnErr bool, files ...string) (map[interface{}]interface{}, error) {
// You can't just overlay yaml bytes on to maps, it won't merge, but instead
// just override the keys and not merge the map values.
left := make(map[interface{}]interface{})
@ -252,8 +231,12 @@ func readConfig(bytes []byte, substituteMetadataVars bool, files ...string) (map
for _, file := range files {
content, err := readConfigFile(file)
if err != nil {
if returnErr {
return nil, err
}
log.Errorf("Failed to read config file %s: %s", file, err)
continue
}
if len(content) == 0 {
continue
}
@ -264,24 +247,53 @@ func readConfig(bytes []byte, substituteMetadataVars bool, files ...string) (map
right := make(map[interface{}]interface{})
err = yaml.Unmarshal(content, &right)
if err != nil {
if returnErr {
return nil, err
}
left = util.MapsUnion(left, right)
log.Errorf("Failed to parse config file %s: %s", file, err)
continue
}
// Verify there are no issues converting to CloudConfig
c := &CloudConfig{}
if err := util.Convert(right, c); err != nil {
if returnErr {
return nil, err
}
log.Errorf("Failed to parse config file %s: %s", file, err)
continue
}
left = util.Merge(left, right)
}
if bytes == nil || len(bytes) == 0 {
return left, nil
}
if bytes != nil && len(bytes) > 0 {
right := make(map[interface{}]interface{})
if substituteMetadataVars {
bytes = substituteVars(bytes, metadata)
}
if err := yaml.Unmarshal(bytes, &right); err != nil {
if returnErr {
return nil, err
}
left = util.MapsUnion(left, right)
log.Errorf("Failed to parse bytes: %s", err)
return left, nil
}
c := &CloudConfig{}
if err := util.Convert(right, c); err != nil {
if returnErr {
return nil, err
}
log.Errorf("Failed to parse bytes: %s", err)
return left, nil
}
left = util.Merge(left, right)
return left, nil
}

View File

@ -1,10 +1,12 @@
package config
import (
"github.com/coreos/coreos-cloudinit/config"
"github.com/docker/libcompose/project"
"github.com/rancher/netconf"
"runtime"
"github.com/coreos/coreos-cloudinit/config"
"github.com/docker/engine-api/types"
composeConfig "github.com/docker/libcompose/config"
"github.com/rancher/netconf"
)
const (
@ -21,6 +23,7 @@ const (
MODULES_ARCHIVE = "/modules.tar"
DEBUG = false
SYSTEM_DOCKER_LOG = "/var/log/system-docker.log"
SYSTEM_DOCKER_BIN = "/usr/bin/system-docker"
LABEL = "label"
HASH = "io.rancher.os.hash"
@ -28,6 +31,7 @@ const (
DETACH = "io.rancher.os.detach"
CREATE_ONLY = "io.rancher.os.createonly"
RELOAD_CONFIG = "io.rancher.os.reloadconfig"
CONSOLE = "io.rancher.os.console"
SCOPE = "io.rancher.os.scope"
REBUILD = "io.docker.compose.rebuild"
SYSTEM = "system"
@ -35,7 +39,6 @@ const (
OsConfigFile = "/usr/share/ros/os-config.yml"
CloudConfigDir = "/var/lib/rancher/conf/cloud-config.d"
CloudConfigBootFile = "/var/lib/rancher/conf/cloud-config.d/boot.yml"
CloudConfigPrivateFile = "/var/lib/rancher/conf/cloud-config.d/private.yml"
CloudConfigNetworkFile = "/var/lib/rancher/conf/cloud-config.d/network.yml"
CloudConfigScriptFile = "/var/lib/rancher/conf/cloud-config-script"
MetaDataFile = "/var/lib/rancher/conf/metadata"
@ -47,6 +50,13 @@ var (
VERSION string
ARCH string
SUFFIX string
PrivateKeys = []string{
"rancher.ssh",
"rancher.docker.ca_key",
"rancher.docker.ca_cert",
"rancher.docker.server_key",
"rancher.docker.server_cert",
}
)
func init() {
@ -71,16 +81,16 @@ type CloudConfig struct {
SSHAuthorizedKeys []string `yaml:"ssh_authorized_keys"`
WriteFiles []config.File `yaml:"write_files"`
Hostname string `yaml:"hostname"`
DefaultHostname string `yaml:"default_hostname"`
Rancher RancherConfig `yaml:"rancher,omitempty"`
}
type RancherConfig struct {
Console string `yaml:"console,omitempty"`
Environment map[string]string `yaml:"environment,omitempty"`
Services map[string]*project.ServiceConfig `yaml:"services,omitempty"`
BootstrapContainers map[string]*project.ServiceConfig `yaml:"bootstrap,omitempty"`
Autoformat map[string]*project.ServiceConfig `yaml:"autoformat,omitempty"`
Services map[string]*composeConfig.ServiceConfigV1 `yaml:"services,omitempty"`
BootstrapContainers map[string]*composeConfig.ServiceConfigV1 `yaml:"bootstrap,omitempty"`
Autoformat map[string]*composeConfig.ServiceConfigV1 `yaml:"autoformat,omitempty"`
BootstrapDocker DockerConfig `yaml:"bootstrap_docker,omitempty"`
CloudInit CloudInit `yaml:"cloud_init,omitempty"`
Debug bool `yaml:"debug,omitempty"`
@ -98,6 +108,9 @@ type RancherConfig struct {
SystemDocker DockerConfig `yaml:"system_docker,omitempty"`
Upgrade UpgradeConfig `yaml:"upgrade,omitempty"`
Docker DockerConfig `yaml:"docker,omitempty"`
RegistryAuths map[string]types.AuthConfig `yaml:"registry_auths,omitempty"`
Defaults Defaults `yaml:"defaults,omitempty"`
ResizeDevice string `yaml:"resize_device,omitempty"`
}
type UpgradeConfig struct {
@ -141,6 +154,11 @@ type CloudInit struct {
Datasources []string `yaml:"datasources,omitempty"`
}
type Defaults struct {
Hostname string `yaml:"hostname,omitempty"`
Network netconf.NetworkConfig `yaml:"network,omitempty"`
}
func (r Repositories) ToArray() []string {
result := make([]string, 0, len(r))
for _, repo := range r {

82
docker/auth.go Normal file
View File

@ -0,0 +1,82 @@
package docker
import (
"encoding/base64"
"fmt"
"strings"
log "github.com/Sirupsen/logrus"
"github.com/docker/docker/registry"
"github.com/docker/engine-api/types"
"github.com/docker/libcompose/docker"
"github.com/rancher/os/config"
)
// ConfigAuthLookup will lookup registry auth info from cloud config
// if a context is set, it will also lookup auth info from the Docker config file
type ConfigAuthLookup struct {
cfg *config.CloudConfig
context *docker.Context
dockerConfigAuthLookup *docker.ConfigAuthLookup
}
func NewConfigAuthLookup(cfg *config.CloudConfig) *ConfigAuthLookup {
return &ConfigAuthLookup{
cfg: cfg,
}
}
func populateRemaining(authConfig *types.AuthConfig) error {
if authConfig.Auth == "" {
return nil
}
decoded, err := base64.URLEncoding.DecodeString(authConfig.Auth)
if err != nil {
return err
}
decodedSplit := strings.Split(string(decoded), ":")
if len(decodedSplit) != 2 {
return fmt.Errorf("Invalid auth: %s", authConfig.Auth)
}
authConfig.Username = decodedSplit[0]
authConfig.Password = decodedSplit[1]
return nil
}
func (c *ConfigAuthLookup) SetConfig(cfg *config.CloudConfig) {
c.cfg = cfg
}
func (c *ConfigAuthLookup) SetContext(context *docker.Context) {
c.context = context
c.dockerConfigAuthLookup = docker.NewConfigAuthLookup(context)
}
func (c *ConfigAuthLookup) Lookup(repoInfo *registry.RepositoryInfo) types.AuthConfig {
if repoInfo == nil || repoInfo.Index == nil {
return types.AuthConfig{}
}
authConfig := registry.ResolveAuthConfig(c.All(), repoInfo.Index)
err := populateRemaining(&authConfig)
if err != nil {
log.Error(err)
return types.AuthConfig{}
}
return authConfig
}
func (c *ConfigAuthLookup) All() map[string]types.AuthConfig {
registryAuths := c.cfg.Rancher.RegistryAuths
if c.dockerConfigAuthLookup != nil {
for registry, authConfig := range c.dockerConfigAuthLookup.All() {
registryAuths[registry] = authConfig
}
}
return registryAuths
}

View File

@ -1,26 +1,27 @@
package docker
import (
dockerClient "github.com/fsouza/go-dockerclient"
dockerClient "github.com/docker/engine-api/client"
"github.com/rancher/os/config"
"golang.org/x/net/context"
)
func NewSystemClient() (*dockerClient.Client, error) {
func NewSystemClient() (dockerClient.APIClient, error) {
return NewClient(config.DOCKER_SYSTEM_HOST)
}
func NewDefaultClient() (*dockerClient.Client, error) {
func NewDefaultClient() (dockerClient.APIClient, error) {
return NewClient(config.DOCKER_HOST)
}
func NewClient(endpoint string) (*dockerClient.Client, error) {
client, err := dockerClient.NewClient(endpoint)
func NewClient(endpoint string) (dockerClient.APIClient, error) {
client, err := dockerClient.NewClient(endpoint, "", nil, nil)
if err != nil {
return nil, err
}
err = ClientOK(endpoint, func() bool {
_, err := client.Info()
_, err := client.Info(context.Background())
return err == nil
})

View File

@ -4,34 +4,36 @@ import (
"fmt"
"sync"
"golang.org/x/net/context"
log "github.com/Sirupsen/logrus"
"github.com/docker/libcompose/docker"
dockerclient "github.com/docker/engine-api/client"
composeClient "github.com/docker/libcompose/docker/client"
"github.com/docker/libcompose/project"
dockerclient "github.com/fsouza/go-dockerclient"
"github.com/rancher/os/config"
"github.com/rancher/os/util"
)
type ClientFactory struct {
userClient *dockerclient.Client
systemClient *dockerclient.Client
userClient dockerclient.APIClient
systemClient dockerclient.APIClient
userOnce sync.Once
systemOnce sync.Once
}
func NewClientFactory(opts docker.ClientOpts) (docker.ClientFactory, error) {
func NewClientFactory(opts composeClient.Options) (project.ClientFactory, error) {
userOpts := opts
systemOpts := opts
userOpts.Host = config.DOCKER_HOST
systemOpts.Host = config.DOCKER_SYSTEM_HOST
userClient, err := docker.CreateClient(userOpts)
userClient, err := composeClient.Create(userOpts)
if err != nil {
return nil, err
}
systemClient, err := docker.CreateClient(systemOpts)
systemClient, err := composeClient.Create(systemOpts)
if err != nil {
return nil, err
}
@ -42,7 +44,7 @@ func NewClientFactory(opts docker.ClientOpts) (docker.ClientFactory, error) {
}, nil
}
func (c *ClientFactory) Create(service project.Service) *dockerclient.Client {
func (c *ClientFactory) Create(service project.Service) dockerclient.APIClient {
if IsSystemContainer(service.Config()) {
waitFor(&c.systemOnce, c.systemClient, config.DOCKER_SYSTEM_HOST)
return c.systemClient
@ -52,10 +54,10 @@ func (c *ClientFactory) Create(service project.Service) *dockerclient.Client {
return c.userClient
}
func waitFor(once *sync.Once, client *dockerclient.Client, endpoint string) {
func waitFor(once *sync.Once, client dockerclient.APIClient, endpoint string) {
once.Do(func() {
err := ClientOK(endpoint, func() bool {
_, err := client.Info()
_, err := client.Info(context.Background())
return err == nil
})
if err != nil {

View File

@ -4,7 +4,7 @@ import (
"fmt"
"strings"
"github.com/docker/libcompose/project"
composeConfig "github.com/docker/libcompose/config"
"github.com/rancher/os/config"
)
@ -72,7 +72,7 @@ func (c *ConfigEnvironment) SetConfig(cfg *config.CloudConfig) {
c.cfg = cfg
}
func (c *ConfigEnvironment) Lookup(key, serviceName string, serviceConfig *project.ServiceConfig) []string {
func (c *ConfigEnvironment) Lookup(key, serviceName string, serviceConfig *composeConfig.ServiceConfig) []string {
fullKey := fmt.Sprintf("%s/%s", serviceName, key)
return lookupKeys(c.cfg, fullKey, key)
}

View File

@ -1,11 +1,17 @@
package docker
import (
"fmt"
"github.com/Sirupsen/logrus"
dockerclient "github.com/docker/engine-api/client"
"github.com/docker/engine-api/types"
composeConfig "github.com/docker/libcompose/config"
"github.com/docker/libcompose/docker"
"github.com/docker/libcompose/project"
dockerclient "github.com/fsouza/go-dockerclient"
"github.com/docker/libcompose/project/options"
"github.com/rancher/os/config"
"golang.org/x/net/context"
)
type Service struct {
@ -15,7 +21,7 @@ type Service struct {
project *project.Project
}
func NewService(factory *ServiceFactory, name string, serviceConfig *project.ServiceConfig, context *docker.Context, project *project.Project) *Service {
func NewService(factory *ServiceFactory, name string, serviceConfig *composeConfig.ServiceConfig, context *docker.Context, project *project.Project) *Service {
return &Service{
Service: docker.NewService(name, serviceConfig, context),
deps: factory.Deps,
@ -50,20 +56,20 @@ func (s *Service) missingImage() bool {
return false
}
client := s.context.ClientFactory.Create(s)
i, err := client.InspectImage(s.Config().Image)
return err != nil || i == nil
_, _, err := client.ImageInspectWithRaw(context.Background(), s.Config().Image, false)
return err != nil
}
func (s *Service) requiresSyslog() bool {
return s.Config().LogDriver == "syslog"
return s.Config().Logging.Driver == "syslog"
}
func (s *Service) requiresUserDocker() bool {
return s.Config().Labels.MapParts()[config.SCOPE] != config.SYSTEM
return s.Config().Labels[config.SCOPE] != config.SYSTEM
}
func appendLink(deps []project.ServiceRelationship, name string, optional bool, p *project.Project) []project.ServiceRelationship {
if _, ok := p.Configs[name]; !ok {
if _, ok := p.ServiceConfigs.Get(name); !ok {
return deps
}
rel := project.NewServiceRelationship(name, project.RelTypeLink)
@ -71,29 +77,26 @@ func appendLink(deps []project.ServiceRelationship, name string, optional bool,
return append(deps, rel)
}
func (s *Service) shouldRebuild() (bool, error) {
containers, err := s.Containers()
if err != nil {
return false, err
}
cfg, err := config.LoadConfig()
func (s *Service) shouldRebuild(ctx context.Context) (bool, error) {
containers, err := s.Containers(ctx)
if err != nil {
return false, err
}
cfg := config.LoadConfig()
for _, c := range containers {
outOfSync, err := c.(*docker.Container).OutOfSync(s.Service.Config().Image)
outOfSync, err := c.(*docker.Container).OutOfSync(ctx, s.Service.Config().Image)
if err != nil {
return false, err
}
_, containerInfo, err := s.getContainer()
if containerInfo == nil || err != nil {
_, containerInfo, err := s.getContainer(ctx)
if err != nil {
return false, err
}
name := containerInfo.Name[1:]
origRebuildLabel := containerInfo.Config.Labels[config.REBUILD]
newRebuildLabel := s.Config().Labels.MapParts()[config.REBUILD]
newRebuildLabel := s.Config().Labels[config.REBUILD]
rebuildLabelChanged := newRebuildLabel != origRebuildLabel
logrus.WithFields(logrus.Fields{
"origRebuildLabel": origRebuildLabel,
@ -101,59 +104,66 @@ func (s *Service) shouldRebuild() (bool, error) {
"rebuildLabelChanged": rebuildLabelChanged,
"outOfSync": outOfSync}).Debug("Rebuild values")
rebuilding := false
if newRebuildLabel == "always" {
return true, nil
}
if outOfSync {
if cfg.Rancher.ForceConsoleRebuild && s.Name() == "console" {
cfg.Rancher.ForceConsoleRebuild = false
if err := cfg.Save(); err != nil {
if s.Name() == "console" {
if cfg.Rancher.ForceConsoleRebuild {
if err := config.Set("rancher.force_console_rebuild", false); err != nil {
return false, err
}
rebuilding = true
} else if origRebuildLabel == "always" || rebuildLabelChanged || origRebuildLabel != "false" {
rebuilding = true
return true, nil
}
origConsoleLabel := containerInfo.Config.Labels[config.CONSOLE]
newConsoleLabel := s.Config().Labels[config.CONSOLE]
if newConsoleLabel != origConsoleLabel {
return true, nil
}
} else if rebuildLabelChanged || origRebuildLabel != "false" {
return true, nil
} else {
logrus.Warnf("%s needs rebuilding", name)
}
}
if rebuilding {
logrus.Infof("Rebuilding %s", name)
return true, nil
}
}
return false, nil
}
func (s *Service) Up() error {
labels := s.Config().Labels.MapParts()
func (s *Service) Up(ctx context.Context, options options.Up) error {
labels := s.Config().Labels
if err := s.Service.Create(); err != nil {
if err := s.Service.Create(ctx, options.Create); err != nil {
return err
}
shouldRebuild, err := s.shouldRebuild()
shouldRebuild, err := s.shouldRebuild(ctx)
if err != nil {
return err
}
if shouldRebuild {
cs, err := s.Service.Containers()
logrus.Infof("Rebuilding %s", s.Name())
cs, err := s.Service.Containers(ctx)
if err != nil {
return err
}
for _, c := range cs {
if _, err := c.(*docker.Container).Recreate(s.Config().Image); err != nil {
if _, err := c.(*docker.Container).Recreate(ctx, s.Config().Image); err != nil {
return err
}
}
s.rename()
if err = s.rename(ctx); err != nil {
return err
}
}
if labels[config.CREATE_ONLY] == "true" {
return s.checkReload(labels)
}
if err := s.Service.Up(); err != nil {
if err := s.Service.Up(ctx, options); err != nil {
return err
}
if labels[config.DETACH] == "false" {
if err := s.wait(); err != nil {
if err := s.wait(ctx); err != nil {
return err
}
}
@ -168,52 +178,53 @@ func (s *Service) checkReload(labels map[string]string) error {
return nil
}
func (s *Service) Create() error {
return s.Service.Create()
func (s *Service) Create(ctx context.Context, options options.Create) error {
return s.Service.Create(ctx, options)
}
func (s *Service) getContainer() (*dockerclient.Client, *dockerclient.Container, error) {
containers, err := s.Service.Containers()
func (s *Service) getContainer(ctx context.Context) (dockerclient.APIClient, types.ContainerJSON, error) {
containers, err := s.Service.Containers(ctx)
if err != nil {
return nil, nil, err
return nil, types.ContainerJSON{}, err
}
if len(containers) == 0 {
return nil, nil, nil
return nil, types.ContainerJSON{}, fmt.Errorf("No containers found for %s", s.Name())
}
id, err := containers[0].ID()
if err != nil {
return nil, nil, err
return nil, types.ContainerJSON{}, err
}
client := s.context.ClientFactory.Create(s)
info, err := client.InspectContainer(id)
info, err := client.ContainerInspect(context.Background(), id)
return client, info, err
}
func (s *Service) wait() error {
client, info, err := s.getContainer()
if err != nil || info == nil {
func (s *Service) wait(ctx context.Context) error {
client, info, err := s.getContainer(ctx)
if err != nil {
return err
}
if _, err := client.WaitContainer(info.ID); err != nil {
if _, err := client.ContainerWait(context.Background(), info.ID); err != nil {
return err
}
return nil
}
func (s *Service) rename() error {
client, info, err := s.getContainer()
if err != nil || info == nil {
func (s *Service) rename(ctx context.Context) error {
client, info, err := s.getContainer(ctx)
if err != nil {
return err
}
if len(info.Name) > 0 && info.Name[1:] != s.Name() {
logrus.Debugf("Renaming container %s => %s", info.Name[1:], s.Name())
return client.RenameContainer(dockerclient.RenameContainerOptions{ID: info.ID, Name: s.Name()})
return client.ContainerRename(context.Background(), info.ID, s.Name())
} else {
return nil
}

View File

@ -1,6 +1,7 @@
package docker
import (
composeConfig "github.com/docker/libcompose/config"
"github.com/docker/libcompose/docker"
"github.com/docker/libcompose/project"
"github.com/rancher/os/util"
@ -11,13 +12,13 @@ type ServiceFactory struct {
Deps map[string][]string
}
func (s *ServiceFactory) Create(project *project.Project, name string, serviceConfig *project.ServiceConfig) (project.Service, error) {
if after := serviceConfig.Labels.MapParts()["io.rancher.os.after"]; after != "" {
func (s *ServiceFactory) Create(project *project.Project, name string, serviceConfig *composeConfig.ServiceConfig) (project.Service, error) {
if after := serviceConfig.Labels["io.rancher.os.after"]; after != "" {
for _, dep := range util.TrimSplit(after, ",") {
s.Deps[name] = append(s.Deps[name], dep)
}
}
if before := serviceConfig.Labels.MapParts()["io.rancher.os.before"]; before != "" {
if before := serviceConfig.Labels["io.rancher.os.before"]; before != "" {
for _, dep := range util.TrimSplit(before, ",") {
s.Deps[dep] = append(s.Deps[dep], name)
}

View File

@ -1,11 +1,11 @@
package docker
import (
"github.com/docker/libcompose/project"
composeConfig "github.com/docker/libcompose/config"
"github.com/rancher/os/config"
)
func IsSystemContainer(serviceConfig *project.ServiceConfig) bool {
return serviceConfig.Labels.MapParts()[config.SCOPE] == config.SYSTEM
func IsSystemContainer(serviceConfig *composeConfig.ServiceConfig) bool {
return serviceConfig.Labels[config.SCOPE] == config.SYSTEM
}

Binary file not shown.

Before

Width:  |  Height:  |  Size: 743 KiB

After

Width:  |  Height:  |  Size: 30 KiB

View File

@ -2,17 +2,18 @@ package hostname
import (
"bufio"
"github.com/rancher/os/config"
"io/ioutil"
"os"
"strings"
"syscall"
"github.com/rancher/os/config"
)
func SetHostnameFromCloudConfig(cc *config.CloudConfig) error {
var hostname string
if cc.Hostname == "" {
hostname = cc.DefaultHostname
hostname = cc.Rancher.Defaults.Hostname
} else {
hostname = cc.Hostname
}

View File

@ -0,0 +1,2 @@
assets
build/dist/kernel

View File

@ -0,0 +1,2 @@
FROM scratch
ADD build/rootfs.tar /

38
images/01-base/Dockerfile Normal file
View File

@ -0,0 +1,38 @@
FROM rancher/os-rootfs
RUN ln -s /dev/null /etc/udev/rules.d/80-net-name-slot.rules
# Cleanup Buildroot
RUN rm /sbin/poweroff /sbin/reboot /sbin/halt && \
sed -i '/^root/s!/bin/sh!/bin/bash!' /etc/passwd && \
echo 'RancherOS \n \l' > /etc/issue && \
rm -rf /run \
/linuxrc \
/etc/os-release \
/var/cache \
/var/lock \
/var/log \
/var/run \
/var/spool \
/var/lib/misc && \
mkdir -p \
/home \
/run \
/var/cache \
/var/lock \
/var/log \
/var/run \
/var/spool && \
passwd -l root && \
addgroup -g 1100 rancher && \
addgroup -g 1101 docker && \
addgroup -g 1103 sudo && \
adduser -u 1100 -G rancher -D -h /home/rancher -s /bin/bash rancher && \
adduser -u 1101 -G docker -D -h /home/docker -s /bin/bash docker && \
adduser rancher docker && \
adduser rancher sudo && \
adduser docker sudo && \
echo '%sudo ALL=(ALL) ALL' >> /etc/sudoers
COPY inputrc /etc/inputrc
COPY entry.sh /usr/sbin/entry.sh
COPY growpart /usr/bin/growpart
ENTRYPOINT ["/usr/sbin/entry.sh"]

14
images/01-base/entry.sh Executable file
View File

@ -0,0 +1,14 @@
#!/bin/bash
if [ -e /host/dev ]; then
mount --rbind /host/dev /dev
fi
CA_BASE=/etc/ssl/certs/ca-certificates.crt.rancher
CA=/etc/ssl/certs/ca-certificates.crt
if [[ -e ${CA_BASE} && ! -e ${CA} ]]; then
cp $CA_BASE $CA
fi
exec "$@"

780
images/01-base/growpart Executable file
View File

@ -0,0 +1,780 @@
#!/bin/sh
# Copyright (C) 2011 Canonical Ltd.
# Copyright (C) 2013 Hewlett-Packard Development Company, L.P.
#
# Authors: Scott Moser <smoser@canonical.com>
# Juerg Haefliger <juerg.haefliger@hp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 3 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# the fudge factor. if within this many bytes dont bother
FUDGE=${GROWPART_FUDGE:-$((1024*1024))}
TEMP_D=""
RESTORE_FUNC=""
RESTORE_HUMAN=""
VERBOSITY=0
DISK=""
PART=""
PT_UPDATE=false
DRY_RUN=0
SFDISK_VERSION=""
SFDISK_2_26="22600"
SFDISK_V_WORKING_GPT="22603"
MBR_BACKUP=""
GPT_BACKUP=""
_capture=""
error() {
echo "$@" 1>&2
}
fail() {
[ $# -eq 0 ] || echo "FAILED:" "$@"
exit 2
}
nochange() {
echo "NOCHANGE:" "$@"
exit 1
}
changed() {
echo "CHANGED:" "$@"
exit 0
}
change() {
echo "CHANGE:" "$@"
exit 0
}
cleanup() {
if [ -n "${RESTORE_FUNC}" ]; then
error "***** WARNING: Resize failed, attempting to revert ******"
if ${RESTORE_FUNC} ; then
error "***** Appears to have gone OK ****"
else
error "***** FAILED! ******"
if [ -n "${RESTORE_HUMAN}" -a -f "${RESTORE_HUMAN}" ]; then
error "**** original table looked like: ****"
cat "${RESTORE_HUMAN}" 1>&2
else
error "We seem to have not saved the partition table!"
fi
fi
fi
[ -z "${TEMP_D}" -o ! -d "${TEMP_D}" ] || rm -Rf "${TEMP_D}"
}
debug() {
local level=${1}
shift
[ "${level}" -gt "${VERBOSITY}" ] && return
if [ "${DEBUG_LOG}" ]; then
echo "$@" >>"${DEBUG_LOG}"
else
error "$@"
fi
}
debugcat() {
local level="$1"
shift;
[ "${level}" -gt "$VERBOSITY" ] && return
if [ "${DEBUG_LOG}" ]; then
cat "$@" >>"${DEBUG_LOG}"
else
cat "$@" 1>&2
fi
}
mktemp_d() {
# just a mktemp -d that doens't need mktemp if its not there.
_RET=$(mktemp -d "${TMPDIR:-/tmp}/${0##*/}.XXXXXX" 2>/dev/null) &&
return
_RET=$(umask 077 && t="${TMPDIR:-/tmp}/${0##*/}.$$" &&
mkdir "${t}" && echo "${t}")
return
}
Usage() {
cat <<EOF
${0##*/} disk partition
rewrite partition table so that partition takes up all the space it can
options:
-h | --help print Usage and exit
--fudge F if part could be resized, but change would be
less than 'F' bytes, do not resize (default: ${FUDGE})
-N | --dry-run only report what would be done, show new 'sfdisk -d'
-v | --verbose increase verbosity / debug
-u | --update R update the the kernel partition table info after growing
this requires kernel support and 'partx --update'
R is one of:
- 'auto' : [default] update partition if possible
- 'force' : try despite sanity checks (fail on failure)
- 'off' : do not attempt
- 'on' : fail if sanity checks indicate no support
Example:
- ${0##*/} /dev/sda 1
Resize partition 1 on /dev/sda
EOF
}
bad_Usage() {
Usage 1>&2
error "$@"
exit 2
}
sfdisk_restore_legacy() {
sfdisk --no-reread "${DISK}" -I "${MBR_BACKUP}"
}
sfdisk_restore() {
# files are named: sfdisk-<device>-<offset>.bak
local f="" offset="" fails=0
for f in "${MBR_BACKUP}"*.bak; do
[ -f "$f" ] || continue
offset=${f##*-}
offset=${offset%.bak}
[ "$offset" = "$f" ] && {
error "WARN: confused by file $f";
continue;
}
dd "if=$f" "of=${DISK}" seek=$(($offset)) bs=1 conv=notrunc ||
{ error "WARN: failed restore from $f"; fails=$(($fails+1)); }
done
return $fails
}
sfdisk_worked_but_blkrrpart_failed() {
local ret="$1" output="$2"
# exit code found was just 1, but dont insist on that
#[ $ret -eq 1 ] || return 1
# Successfully wrote the new partition table
grep -qi "Success.* wrote.* new.* partition" "$output" &&
grep -qi "BLKRRPART: Device or resource busy" "$output"
return
}
get_sfdisk_version() {
# set SFDISK_VERSION to MAJOR*10000+MINOR*100+MICRO
local out oifs="$IFS" ver=""
[ -n "$SFDISK_VERSION" ] && return 0
# expected output: sfdisk from util-linux 2.25.2
out=$(sfdisk --version) ||
{ error "failed to get sfdisk version"; return 1; }
set -- $out
ver=$4
case "$ver" in
[0-9]*.[0-9]*.[0-9]|[0-9].[0-9]*)
IFS="."; set -- $ver; IFS="$oifs"
SFDISK_VERSION=$(($1*10000+$2*100+${3:-0}))
return 0;;
*) error "unexpected output in sfdisk --version [$out]"
return 1;;
esac
}
resize_sfdisk() {
local humanpt="${TEMP_D}/recovery"
local mbr_backup="${TEMP_D}/orig.save"
local restore_func=""
local format="$1"
local change_out=${TEMP_D}/change.out
local dump_out=${TEMP_D}/dump.out
local new_out=${TEMP_D}/new.out
local dump_mod=${TEMP_D}/dump.mod
local tmp="${TEMP_D}/tmp.out"
local err="${TEMP_D}/err.out"
local mbr_max_512="4294967296"
local pt_start pt_size pt_end max_end new_size change_info dpart
local sector_num sector_size disk_size tot out
rqe sfd_list sfdisk --list --unit=S "$DISK" >"$tmp" ||
fail "failed: sfdisk --list $DISK"
if [ "${SFDISK_VERSION}" -lt ${SFDISK_2_26} ]; then
# exected output contains: Units: sectors of 512 bytes, ...
out=$(awk '$1 == "Units:" && $5 ~ /bytes/ { print $4 }' "$tmp") ||
fail "failed to read sfdisk output"
if [ -z "$out" ]; then
error "WARN: sector size not found in sfdisk output, assuming 512"
sector_size=512
else
sector_size="$out"
fi
local _w _cyl _w1 _heads _w2 sectors _w3 t s
# show-size is in units of 1024 bytes (same as /proc/partitions)
t=$(sfdisk --show-size "${DISK}") ||
fail "failed: sfdisk --show-size $DISK"
disk_size=$((t*1024))
sector_num=$(($disk_size/$sector_size))
msg="disk size '$disk_size' not evenly div by sector size '$sector_size'"
[ "$((${disk_size}%${sector_size}))" -eq 0 ] ||
error "WARN: $msg"
restore_func=sfdisk_restore_legacy
else
# --list first line output:
# Disk /dev/vda: 20 GiB, 21474836480 bytes, 41943040 sectors
local _x
read _x _x _x _x disk_size _x sector_num _x < "$tmp"
sector_size=$((disk_size/$sector_num))
restore_func=sfdisk_restore
fi
debug 1 "$sector_num sectors of $sector_size. total size=${disk_size} bytes"
[ $(($disk_size/512)) -gt $mbr_max_512 ] &&
debug 1 "WARN: disk is larger than 2TB. additional space will go unused."
rqe sfd_dump sfdisk --unit=S --dump "${DISK}" >"${dump_out}" ||
fail "failed to dump sfdisk info for ${DISK}"
RESTORE_HUMAN="$dump_out"
{
echo "## sfdisk --unit=S --dump ${DISK}"
cat "${dump_out}"
} >"$humanpt"
[ $? -eq 0 ] || fail "failed to save sfdisk -d output"
RESTORE_HUMAN="$humanpt"
debugcat 1 "$humanpt"
sed -e 's/,//g; s/start=/start /; s/size=/size /' "${dump_out}" \
>"${dump_mod}" ||
fail "sed failed on dump output"
dpart="${DISK}${PART}" # disk and partition number
if [ -b "${DISK}p${PART}" -a "${DISK%[0-9]}" != "${DISK}" ]; then
# for block devices that end in a number (/dev/nbd0)
# the partition is "<name>p<partition_number>" (/dev/nbd0p1)
dpart="${DISK}p${PART}"
elif [ "${DISK#/dev/loop[0-9]}" != "${DISK}" ]; then
# for /dev/loop devices, sfdisk output will be <name>p<number>
# format also, even though there is not a device there.
dpart="${DISK}p${PART}"
fi
pt_start=$(awk '$1 == pt { print $4 }' "pt=${dpart}" <"${dump_mod}") &&
pt_size=$(awk '$1 == pt { print $6 }' "pt=${dpart}" <"${dump_mod}") &&
[ -n "${pt_start}" -a -n "${pt_size}" ] &&
pt_end=$((${pt_size}+${pt_start})) ||
fail "failed to get start and end for ${dpart} in ${DISK}"
# find the minimal starting location that is >= pt_end
max_end=$(awk '$3 == "start" { if($4 >= pt_end && $4 < min)
{ min = $4 } } END { printf("%s\n",min); }' \
min=${sector_num} pt_end=${pt_end} "${dump_mod}") &&
[ -n "${max_end}" ] ||
fail "failed to get max_end for partition ${PART}"
mbr_max_sectors=$((mbr_max_512*$((sector_size/512))))
if [ "$max_end" -gt "$mbr_max_sectors" ]; then
max_end=$mbr_max_sectors
fi
if [ "$format" = "gpt" ]; then
# sfdisk respects 'last-lba' in input, and complains about
# partitions that go past that. without it, it does the right thing.
sed -i '/^last-lba:/d' "$dump_out" ||
fail "failed to remove last-lba from output"
fi
local gpt_second_size="33"
if [ "${max_end}" -gt "$((${sector_num}-${gpt_second_size}))" ]; then
# if mbr allow subsequent conversion to gpt without shrinking the
# partition. safety net at cost of 33 sectors, seems reasonable.
# if gpt, we can't write there anyway.
debug 1 "padding ${gpt_second_size} sectors for gpt secondary header"
max_end=$((${sector_num}-${gpt_second_size}))
fi
debug 1 "max_end=${max_end} tot=${sector_num} pt_end=${pt_end}" \
"pt_start=${pt_start} pt_size=${pt_size}"
[ $((${pt_end})) -eq ${max_end} ] &&
nochange "partition ${PART} is size ${pt_size}. it cannot be grown"
[ $((${pt_end}+(${FUDGE}/$sector_size))) -gt ${max_end} ] &&
nochange "partition ${PART} could only be grown by" \
"$((${max_end}-${pt_end})) [fudge=$((${FUDGE}/$sector_size))]"
# now, change the size for this partition in ${dump_out} to be the
# new size
new_size=$((${max_end}-${pt_start}))
sed "\|^\s*${dpart} |s/${pt_size},/${new_size},/" "${dump_out}" \
>"${new_out}" ||
fail "failed to change size in output"
change_info="partition=${PART} start=${pt_start} old: size=${pt_size} end=${pt_end} new: size=${new_size},end=${max_end}"
if [ ${DRY_RUN} -ne 0 ]; then
echo "CHANGE: ${change_info}"
{
echo "# === old sfdisk -d ==="
cat "${dump_out}"
echo "# === new sfdisk -d ==="
cat "${new_out}"
} 1>&2
exit 0
fi
MBR_BACKUP="${mbr_backup}"
LANG=C sfdisk --no-reread "${DISK}" --force \
-O "${mbr_backup}" <"${new_out}" >"${change_out}" 2>&1
ret=$?
[ $ret -eq 0 ] || RESTORE_FUNC="${restore_func}"
if [ $ret -eq 0 ]; then
:
elif $PT_UPDATE &&
sfdisk_worked_but_blkrrpart_failed "$ret" "${change_out}"; then
# if the command failed, but it looks like only because
# the device was busy and we have pt_update, then go on
debug 1 "sfdisk failed, but likely only because of blkrrpart"
else
error "attempt to resize ${DISK} failed. sfdisk output below:"
sed 's,^,| ,' "${change_out}" 1>&2
fail "failed to resize"
fi
rq pt_update pt_update "$DISK" "$PART" ||
fail "pt_resize failed"
RESTORE_FUNC=""
changed "${change_info}"
# dump_out looks something like:
## partition table of /tmp/out.img
#unit: sectors
#
#/tmp/out.img1 : start= 1, size= 48194, Id=83
#/tmp/out.img2 : start= 48195, size= 963900, Id=83
#/tmp/out.img3 : start= 1012095, size= 305235, Id=82
#/tmp/out.img4 : start= 1317330, size= 771120, Id= 5
#/tmp/out.img5 : start= 1317331, size= 642599, Id=83
#/tmp/out.img6 : start= 1959931, size= 48194, Id=83
#/tmp/out.img7 : start= 2008126, size= 80324, Id=83
}
gpt_restore() {
sgdisk -l "${GPT_BACKUP}" "${DISK}"
}
resize_sgdisk() {
GPT_BACKUP="${TEMP_D}/pt.backup"
local pt_info="${TEMP_D}/pt.info"
local pt_pretend="${TEMP_D}/pt.pretend"
local pt_data="${TEMP_D}/pt.data"
local out="${TEMP_D}/out"
local dev="disk=${DISK} partition=${PART}"
local pt_start pt_end pt_size last pt_max code guid name new_size
local old new change_info sector_size
# Dump the original partition information and details to disk. This is
# used in case something goes wrong and human interaction is required
# to revert any changes.
rqe sgd_info sgdisk "--info=${PART}" --print "${DISK}" >"${pt_info}" ||
fail "${dev}: failed to dump original sgdisk info"
RESTORE_HUMAN="${pt_info}"
sector_size=$(awk '$0 ~ /^Logical sector size:.*bytes/ { print $4 }' \
"$pt_info") && [ -n "$sector_size" ] || {
sector_size=512
error "WARN: did not find sector size, assuming 512"
}
debug 1 "$dev: original sgdisk info:"
debugcat 1 "${pt_info}"
# Pretend to move the backup GPT header to the end of the disk and dump
# the resulting partition information. We use this info to determine if
# we have to resize the partition.
rqe sgd_pretend sgdisk --pretend --move-second-header \
--print "${DISK}" >"${pt_pretend}" ||
fail "${dev}: failed to dump pretend sgdisk info"
debug 1 "$dev: pretend sgdisk info"
debugcat 1 "${pt_pretend}"
# Extract the partition data from the pretend dump
awk 'found { print } ; $1 == "Number" { found = 1 }' \
"${pt_pretend}" >"${pt_data}" ||
fail "${dev}: failed to parse pretend sgdisk info"
# Get the start and end sectors of the partition to be grown
pt_start=$(awk '$1 == '"${PART}"' { print $2 }' "${pt_data}") &&
[ -n "${pt_start}" ] ||
fail "${dev}: failed to get start sector"
pt_end=$(awk '$1 == '"${PART}"' { print $3 }' "${pt_data}") &&
[ -n "${pt_end}" ] ||
fail "${dev}: failed to get end sector"
pt_size="$((${pt_end} - ${pt_start}))"
# Get the last usable sector
last=$(awk '/last usable sector is/ { print $NF }' \
"${pt_pretend}") && [ -n "${last}" ] ||
fail "${dev}: failed to get last usable sector"
# Find the minimal start sector that is >= pt_end
pt_max=$(awk '{ if ($2 >= pt_end && $2 < min) { min = $2 } } END \
{ print min }' min="${last}" pt_end="${pt_end}" \
"${pt_data}") && [ -n "${pt_max}" ] ||
fail "${dev}: failed to find max end sector"
debug 1 "${dev}: pt_start=${pt_start} pt_end=${pt_end}" \
"pt_size=${pt_size} pt_max=${pt_max} last=${last}"
# Check if the partition can be grown
[ "${pt_end}" -eq "${pt_max}" ] &&
nochange "${dev}: size=${pt_size}, it cannot be grown"
[ "$((${pt_end} + ${FUDGE}/${sector_size}))" -gt "${pt_max}" ] &&
nochange "${dev}: could only be grown by" \
"$((${pt_max} - ${pt_end})) [fudge=$((${FUDGE}/$sector_size))]"
# The partition can be grown if we made it here. Get some more info
# about it so we can do it properly.
# FIXME: Do we care about the attribute flags?
code=$(awk '/^Partition GUID code:/ { print $4 }' "${pt_info}")
guid=$(awk '/^Partition unique GUID:/ { print $4 }' "${pt_info}")
name=$(awk '/^Partition name:/ { gsub(/'"'"'/, "") ; \
if (NF >= 3) print substr($0, index($0, $3)) }' "${pt_info}")
[ -n "${code}" -a -n "${guid}" ] ||
fail "${dev}: failed to parse sgdisk details"
debug 1 "${dev}: code=${code} guid=${guid} name='${name}'"
local wouldrun=""
[ "$DRY_RUN" -ne 0 ] && wouldrun="would-run"
# Calculate the new size of the partition
new_size=$((${pt_max} - ${pt_start}))
old="old: size=${pt_size},end=${pt_end}"
new="new: size=${new_size},end=${pt_max}"
change_info="${dev}: start=${pt_start} ${old} ${new}"
# Backup the current partition table, we're about to modify it
rq sgd_backup $wouldrun sgdisk "--backup=${GPT_BACKUP}" "${DISK}" ||
fail "${dev}: failed to backup the partition table"
# Modify the partition table. We do it all in one go (the order is
# important!):
# - move the GPT backup header to the end of the disk
# - delete the partition
# - recreate the partition with the new size
# - set the partition code
# - set the partition GUID
# - set the partition name
rq sgdisk_mod $wouldrun sgdisk --move-second-header "--delete=${PART}" \
"--new=${PART}:${pt_start}:${pt_max}" \
"--typecode=${PART}:${code}" \
"--partition-guid=${PART}:${guid}" \
"--change-name=${PART}:${name}" "${DISK}" &&
rq pt_update $wouldrun pt_update "$DISK" "$PART" || {
RESTORE_FUNC=gpt_restore
fail "${dev}: failed to repartition"
}
# Dry run
[ "${DRY_RUN}" -ne 0 ] && change "${change_info}"
changed "${change_info}"
}
kver_to_num() {
local kver="$1" maj="" min="" mic="0"
kver=${kver%%-*}
maj=${kver%%.*}
min=${kver#${maj}.}
min=${min%%.*}
mic=${kver#${maj}.${min}.}
[ "$kver" = "$mic" ] && mic=0
_RET=$(($maj*1000*1000+$min*1000+$mic))
}
kver_cmp() {
local op="$2" n1="" n2=""
kver_to_num "$1"
n1="$_RET"
kver_to_num "$3"
n2="$_RET"
[ $n1 $op $n2 ]
}
rq() {
# runquieterror(label, command)
# gobble stderr of a command unless it errors
local label="$1" ret="" efile=""
efile="$TEMP_D/$label.err"
shift;
local rlabel="running"
[ "$1" = "would-run" ] && rlabel="would-run" && shift
local cmd="" x=""
for x in "$@"; do
[ "${x#* }" != "$x" -o "${x#* \"}" != "$x" ] && x="'$x'"
cmd="$cmd $x"
done
cmd=${cmd# }
debug 2 "$rlabel[$label][$_capture]" "$cmd"
[ "$rlabel" = "would-run" ] && return 0
if [ "${_capture}" = "erronly" ]; then
"$@" 2>"$TEMP_D/$label.err"
ret=$?
else
"$@" >"$TEMP_D/$label.err" 2>&1
ret=$?
fi
if [ $ret -ne 0 ]; then
error "failed [$label:$ret]" "$@"
cat "$efile" 1>&2
fi
return $ret
}
rqe() {
local _capture="erronly"
rq "$@"
}
verify_ptupdate() {
local input="$1" found="" reason="" kver=""
# we can always satisfy 'off'
if [ "$input" = "off" ]; then
_RET="false";
return 0;
fi
if command -v partx >/dev/null 2>&1; then
local out="" ret=0
out=$(partx --help 2>&1)
ret=$?
if [ $ret -eq 0 ]; then
echo "$out" | grep -q -- --update || {
reason="partx has no '--update' flag in usage."
found="off"
}
else
reason="'partx --help' returned $ret. assuming it is old."
found="off"
fi
else
reason="no 'partx' command"
found="off"
fi
if [ -z "$found" ]; then
if [ "$(uname)" != "Linux" ]; then
reason="Kernel is not Linux per uname."
found="off"
fi
fi
if [ -z "$found" ]; then
kver=$(uname -r) || debug 1 "uname -r failed!"
if ! kver_cmp "${kver-0.0.0}" -ge 3.8.0; then
reason="Kernel '$kver' < 3.8.0."
found="off"
fi
fi
if [ -z "$found" ]; then
_RET="true"
return 0
fi
case "$input" in
on) error "$reason"; return 1;;
auto)
_RET="false";
debug 1 "partition update disabled: $reason"
return 0;;
force)
_RET="true"
error "WARNING: ptupdate forced on even though: $reason"
return 0;;
esac
error "unknown input '$input'";
return 1;
}
pt_update() {
local dev="$1" part="$2" update="${3:-$PT_UPDATE}"
if ! $update; then
return 0
fi
# partx only works on block devices (do not run on file)
[ -b "$dev" ] || return 0
partx --update "$part" "$dev"
}
has_cmd() {
command -v "${1}" >/dev/null 2>&1
}
resize_sgdisk_gpt() {
resize_sgdisk gpt
}
resize_sgdisk_dos() {
fail "unable to resize dos label with sgdisk"
}
resize_sfdisk_gpt() {
resize_sfdisk gpt
}
resize_sfdisk_dos() {
resize_sfdisk dos
}
get_table_format() {
local out="" disk="$1"
if has_cmd blkid && out=$(blkid -o value -s PTTYPE "$disk") &&
[ "$out" = "dos" -o "$out" = "gpt" ]; then
_RET="$out"
return
fi
_RET="dos"
if [ ${SFDISK_VERSION} -lt ${SFDISK_2_26} ] &&
out=$(sfdisk --id --force "$disk" 1 2>/dev/null); then
if [ "$out" = "ee" ]; then
_RET="gpt"
else
_RET="dos"
fi
return
elif out=$(LANG=C sfdisk --list "$disk"); then
out=$(echo "$out" | sed -e '/Disklabel type/!d' -e 's/.*: //')
case "$out" in
gpt|dos) _RET="$out";;
*) error "WARN: unknown label $out";;
esac
fi
}
get_resizer() {
local format="$1" user=${2:-"auto"}
case "$user" in
sgdisk) _RET="resize_sgdisk_$format"; return;;
sfdisk) _RET="resize_sfdisk_$format"; return;;
auto) :;;
*) error "unexpected input: '$user'";;
esac
if [ "$format" = "dos" ]; then
_RET="resize_sfdisk_dos"
return 0
fi
if [ "${SFDISK_VERSION}" -ge ${SFDISK_V_WORKING_GPT} ]; then
# sfdisk 2.26.2 works for resize but loses type (LP: #1474090)
_RET="resize_sfdisk_gpt"
elif has_cmd sgdisk; then
_RET="resize_sgdisk_$format"
else
error "no tools available to resize disk with '$format'"
return 1
fi
return 0
}
pt_update="auto"
resizer=${GROWPART_RESIZER:-"auto"}
while [ $# -ne 0 ]; do
cur=${1}
next=${2}
case "$cur" in
-h|--help)
Usage
exit 0
;;
--fudge)
FUDGE=${next}
shift
;;
-N|--dry-run)
DRY_RUN=1
;;
-u|--update|--update=*)
if [ "${cur#--update=}" != "$cur" ]; then
next="${cur#--update=}"
else
shift
fi
case "$next" in
off|auto|force|on) pt_update=$next;;
*) fail "unknown --update option: $next";;
esac
;;
-v|--verbose)
VERBOSITY=$(($VERBOSITY+1))
;;
--)
shift
break
;;
-*)
fail "unknown option ${cur}"
;;
*)
if [ -z "${DISK}" ]; then
DISK=${cur}
else
[ -z "${PART}" ] || fail "confused by arg ${cur}"
PART=${cur}
fi
;;
esac
shift
done
[ -n "${DISK}" ] || bad_Usage "must supply disk and partition-number"
[ -n "${PART}" ] || bad_Usage "must supply partition-number"
has_cmd "sfdisk" || fail "sfdisk not found"
get_sfdisk_version || fail
[ -e "${DISK}" ] || fail "${DISK}: does not exist"
[ "${PART#*[!0-9]}" = "${PART}" ] || fail "partition-number must be a number"
verify_ptupdate "$pt_update" || fail
PT_UPDATE=$_RET
debug 1 "update-partition set to $PT_UPDATE"
mktemp_d && TEMP_D="${_RET}" || fail "failed to make temp dir"
trap cleanup 0 # EXIT - some shells may not like 'EXIT' but are ok with 0
# get the ID of the first partition to determine if it's MBR or GPT
get_table_format "$DISK" || fail
format=$_RET
get_resizer "$format" "$resizer" ||
fail "failed to get a resizer for id '$id'"
resizer=$_RET
debug 1 "resizing $PART on $DISK using $resizer"
"$resizer"
# vi: ts=4 noexpandtab

67
images/01-base/inputrc Normal file
View File

@ -0,0 +1,67 @@
# /etc/inputrc - global inputrc for libreadline
# See readline(3readline) and `info rluserman' for more information.
# Be 8 bit clean.
set input-meta on
set output-meta on
# To allow the use of 8bit-characters like the german umlauts, uncomment
# the line below. However this makes the meta key not work as a meta key,
# which is annoying to those which don't need to type in 8-bit characters.
# set convert-meta off
# try to enable the application keypad when it is called. Some systems
# need this to enable the arrow keys.
# set enable-keypad on
# see /usr/share/doc/bash/inputrc.arrows for other codes of arrow keys
# do not bell on tab-completion
# set bell-style none
# set bell-style visible
# some defaults / modifications for the emacs mode
$if mode=emacs
# allow the use of the Home/End keys
"\e[1~": beginning-of-line
"\e[4~": end-of-line
# allow the use of the Delete/Insert keys
"\e[3~": delete-char
"\e[2~": quoted-insert
# mappings for "page up" and "page down" to step to the beginning/end
# of the history
# "\e[5~": beginning-of-history
# "\e[6~": end-of-history
# alternate mappings for "page up" and "page down" to search the history
# "\e[5~": history-search-backward
# "\e[6~": history-search-forward
# mappings for Ctrl-left-arrow and Ctrl-right-arrow for word moving
"\e[1;5C": forward-word
"\e[1;5D": backward-word
"\e[5C": forward-word
"\e[5D": backward-word
"\e\e[C": forward-word
"\e\e[D": backward-word
$if term=rxvt
"\e[7~": beginning-of-line
"\e[8~": end-of-line
"\eOc": forward-word
"\eOd": backward-word
$endif
# for non RH/Debian xterm, can't hurt for RH/Debian xterm
# "\eOH": beginning-of-line
# "\eOF": end-of-line
# for freebsd console
# "\e[H": beginning-of-line
# "\e[F": end-of-line
$endif

View File

@ -0,0 +1,2 @@
FROM rancher/os-base
CMD ["/usr/sbin/acpid", "-f"]

View File

@ -0,0 +1,4 @@
FROM rancher/os-base
COPY auto-format.sh /usr/sbin/
COPY od-1m0 /
ENTRYPOINT ["/usr/sbin/auto-format.sh"]

View File

@ -0,0 +1,67 @@
#!/bin/bash
set -ex
MAGIC=${MAGIC:-"boot2docker, please format-me"}
AUTOFORMAT=${AUTOFORMAT:-"/dev/sda /dev/vda"}
DEVS=(${AUTOFORMAT})
FORMATZERO=${FORMATZERO:-false}
for dev in ${DEVS[@]}; do
if [ -b "${dev}" ]; then
# Test for our magic string (it means that the disk was made by ./boot2docker init)
HEADER=`dd if=${dev} bs=1 count=${#MAGIC} 2>/dev/null`
if [ "$HEADER" = "$MAGIC" ]; then
# save the preload userdata.tar file
dd if=${dev} of=/userdata.tar bs=1 count=8192
elif [ "${FORMATZERO}" != "true" ]; then
# do not try to guess whether to auto-format a disk beginning with 1MB filled with 00
continue
elif ! od -A d -N 1048576 ${dev} | head -n 3 | diff ./od-1m0 - >/dev/null 2>&1; then
# do not auto-format if the disk does not begin with 1MB filled with 00
continue
fi
mkfs.ext4 -L RANCHER_STATE ${dev}
if [ -e "/userdata.tar" ]; then
mkdir -p /mnt/new-root
mount -t ext4 ${dev} /mnt/new-root
pushd /mnt/new-root
mkdir -p ./var/lib/rancher/conf/cloud-config.d
echo $(tar -xvf /userdata.tar)
AUTHORIZED_KEY1=$(cat ./.ssh/authorized_keys)
AUTHORIZED_KEY2=$(cat ./.ssh/authorized_keys2)
tee ./var/lib/rancher/conf/cloud-config.d/machine.yml << EOF
#cloud-config
rancher:
network:
interfaces:
eth0:
dhcp: true
eth1:
dhcp: true
lo:
address: 127.0.0.1/8
ssh_authorized_keys:
- ${AUTHORIZED_KEY1}
- ${AUTHORIZED_KEY2}
users:
- name: docker
ssh_authorized_keys:
- ${AUTHORIZED_KEY1}
- ${AUTHORIZED_KEY2}
EOF
popd
umount /mnt/new-root
fi
# do not check another device
break
fi
done

View File

@ -0,0 +1,3 @@
0000000 000000 000000 000000 000000 000000 000000 000000 000000
*
1048576

View File

@ -0,0 +1,3 @@
FROM rancher/os-base
COPY cloud-init.sh /
CMD ["/cloud-init.sh"]

View File

@ -0,0 +1,15 @@
#!/bin/bash
set -x -e
MOUNT_POINT=/media/config-2
CONFIG_DEV=$(ros dev "LABEL=config-2")
mkdir -p ${MOUNT_POINT}
if [ -e "${CONFIG_DEV}" ]; then
mount -t iso9660,vfat ${CONFIG_DEV} ${MOUNT_POINT}
else
mount -t 9p -o trans=virtio,version=9p2000.L config-2 ${MOUNT_POINT} 2>/dev/null || true
fi
cloud-init -save -network=${CLOUD_INIT_NETWORK:-true}

View File

@ -0,0 +1,16 @@
FROM rancher/os-base
COPY console.sh docker-init update-ssh-keys rancheros-install /usr/sbin/
COPY build/lsb-release /etc/
RUN sed -i 's/rancher:!/rancher:*/g' /etc/shadow && \
sed -i 's/docker:!/docker:*/g' /etc/shadow && \
sed -i 's/#ClientAliveInterval 0/ClientAliveInterval 180/g' /etc/ssh/sshd_config && \
echo '## allow password less for rancher user' >> /etc/sudoers && \
echo 'rancher ALL=(ALL) NOPASSWD: ALL' >> /etc/sudoers && \
echo '## allow password less for docker user' >> /etc/sudoers && \
echo 'docker ALL=(ALL) NOPASSWD: ALL' >> /etc/sudoers && \
ln -sf /usr/bin/docker.dist /usr/bin/docker && \
ln -sf /usr/bin/docker-containerd.dist /usr/bin/docker-containerd && \
ln -sf /usr/bin/docker-containerd-shim.dist /usr/bin/docker-containerd-shim && \
ln -sf /usr/bin/docker-runc.dist /usr/bin/docker-runc
COPY prompt.sh /etc/profile.d/
CMD ["/usr/sbin/console.sh"]

147
images/02-console/console.sh Executable file
View File

@ -0,0 +1,147 @@
#!/bin/bash
set -e -x
setup_ssh()
{
for i in rsa dsa ecdsa ed25519; do
local output=/etc/ssh/ssh_host_${i}_key
if [ ! -s $output ]; then
local saved="$(ros config get rancher.ssh.keys.${i})"
local pub="$(ros config get rancher.ssh.keys.${i}-pub)"
if [[ -n "$saved" && -n "$pub" ]]; then
(
umask 077
temp_file=$(mktemp)
echo "$saved" > ${temp_file}
mv ${temp_file} ${output}
temp_file=$(mktemp)
echo "$pub" > ${temp_file}
mv ${temp_file} ${output}.pub
)
else
ssh-keygen -f $output -N '' -t $i
ros config set -- rancher.ssh.keys.${i} "$(<${output})"
ros config set -- rancher.ssh.keys.${i}-pub "$(<${output}.pub)"
fi
fi
done
mkdir -p /var/run/sshd
}
setup_cgroup()
{
local cgroup=$(grep name=systemd /proc/$$/cgroup | cut -f3 -d:)
if [ -n "$cgroup" ]; then
mkdir -p /sys/fs/cgroup/systemd${cgroup}
fi
}
setup_cgroup || true
RANCHER_HOME=/home/rancher
if [ ! -d ${RANCHER_HOME} ]; then
mkdir -p ${RANCHER_HOME}
chown rancher:rancher ${RANCHER_HOME}
chmod 2755 ${RANCHER_HOME}
fi
DOCKER_HOME=/home/docker
if [ ! -d ${DOCKER_HOME} ]; then
mkdir -p ${DOCKER_HOME}
chown docker:docker ${DOCKER_HOME}
chmod 2755 ${DOCKER_HOME}
fi
echo 1000000000 > /proc/sys/fs/file-max
for i in $(</proc/cmdline); do
case $i in
rancher.password=*)
PASSWORD=$(echo $i | sed 's/rancher.password=//')
;;
esac
done
if [ -n "$PASSWORD" ]; then
echo "rancher:$PASSWORD" | chpasswd
fi
setup_ssh
cat > /etc/respawn.conf << EOF
/sbin/getty 115200 tty6
/sbin/getty 115200 tty5
/sbin/getty 115200 tty4
/sbin/getty 115200 tty3
/sbin/getty 115200 tty2
/sbin/getty 115200 tty1
/usr/sbin/sshd -D
EOF
for i in ttyS{0..4} ttyAMA0; do
if grep -q 'console='$i /proc/cmdline; then
echo '/sbin/getty 115200' $i >> /etc/respawn.conf
fi
done
if ! grep -q '^UseDNS no' /etc/ssh/sshd_config; then
echo "UseDNS no" >> /etc/ssh/sshd_config
fi
if ! grep -q '^PermitRootLogin no' /etc/ssh/sshd_config; then
echo "PermitRootLogin no" >> /etc/ssh/sshd_config
fi
if ! grep -q '^ServerKeyBits 2048' /etc/ssh/sshd_config; then
echo "ServerKeyBits 2048" >> /etc/ssh/sshd_config
fi
if ! grep -q '^AllowGroups docker' /etc/ssh/sshd_config; then
echo "AllowGroups docker" >> /etc/ssh/sshd_config
fi
VERSION="$(ros os version)"
ID_TYPE="busybox"
if [ -e /etc/os-release ] && grep -q 'ID_LIKE=' /etc/os-release; then
ID_TYPE=$(grep 'ID_LIKE=' /etc/os-release | cut -d'=' -f2)
fi
cat > /etc/os-release << EOF
NAME="RancherOS"
VERSION=$VERSION
ID=rancheros
ID_LIKE=$ID_TYPE
VERSION_ID=$VERSION
PRETTY_NAME="RancherOS"
HOME_URL=
SUPPORT_URL=
BUG_REPORT_URL=
BUILD_ID=
EOF
echo 'RancherOS \n \l' > /etc/issue
echo $(/sbin/ifconfig | grep -B1 "inet addr" |awk '{ if ( $1 == "inet" ) { print $2 } else if ( $2 == "Link" ) { printf "%s:" ,$1 } }' |awk -F: '{ print $1 ": " $3}') >> /etc/issue
cloud-init -execute
if [ -x /var/lib/rancher/conf/cloud-config-script ]; then
echo "Running /var/lib/rancher/conf/cloud-config-script"
/var/lib/rancher/conf/cloud-config-script || true
fi
if [ -x /opt/rancher/bin/start.sh ]; then
echo Executing custom script
/opt/rancher/bin/start.sh || true
fi
touch /run/console-done
if [ -x /etc/rc.local ]; then
echo Executing rc.local
/etc/rc.local || true
fi
export TERM=linux
exec respawn -f /etc/respawn.conf

22
images/02-console/docker-init Executable file
View File

@ -0,0 +1,22 @@
#!/bin/bash
set -e
if [ -e /var/lib/rancher/conf/docker ]; then
source /var/lib/rancher/conf/docker
fi
while [ ! -e /run/console-done ]; do
sleep 1
done
DOCKER_BIN=$(which docker) || DOCKER_BIN=/usr/bin/docker
for i in /opt/bin /usr/local/bin; do
if [ -x ${i}/docker ]; then
PATH=${i}:$PATH
DOCKER_BIN=${i}/docker
break
fi
done
exec /usr/bin/dockerlaunch $DOCKER_BIN "$@" $DOCKER_OPTS >>/var/log/docker.log 2>&1

15
images/02-console/prebuild.sh Executable file
View File

@ -0,0 +1,15 @@
#!/bin/bash
set -e
VERSION=${VERSION:?"VERSION not set"}
cd $(dirname $0)
rm -rf ./build
mkdir -p ./build
cat > ./build/lsb-release << EOF
DISTRIB_ID=${DISTRIB_ID}
DISTRIB_RELEASE=${VERSION}
DISTRIB_DESCRIPTION="${DISTRIB_ID} ${VERSION}"
EOF

View File

@ -0,0 +1 @@
export PS1='[\u@\h \W]\$ '

View File

@ -0,0 +1,9 @@
#!/bin/bash
set -e
cat <<EOF
As of RancherOS v0.4.0 'rancheros-install' is obsolete.
Please use 'ros install' instead.
EOF
exit 1

View File

@ -0,0 +1,20 @@
#!/bin/bash
USERNAME=$1
HOME_DIR=$(grep ^$USERNAME /etc/passwd | cut -f6 -d:)
if [ ! -d $HOME_DIR/.ssh ]; then
mkdir -p $HOME_DIR/.ssh
chmod 0700 $HOME_DIR/.ssh
fi
if [ ! -e $HOME_DIR/.ssh/authorized_keys ]; then
touch $HOME_DIR/.ssh/authorized_keys
chmod 0600 $HOME_DIR/.ssh/authorized_keys
fi
if ! grep -q "$2" $HOME_DIR/.ssh/authorized_keys; then
echo "$2" >> $HOME_DIR/.ssh/authorized_keys
fi
chown -R $USERNAME $HOME_DIR/.ssh

View File

@ -0,0 +1,2 @@
FROM rancher/os-base
CMD ["/usr/bin/user-docker"]

3
images/02-ntp/Dockerfile Normal file
View File

@ -0,0 +1,3 @@
FROM rancher/os-base
COPY ntp.sh /
CMD ["/ntp.sh"]

3
images/02-ntp/ntp.sh Executable file
View File

@ -0,0 +1,3 @@
#!/bin/bash
exec ntpd --nofork -g

View File

@ -0,0 +1,7 @@
FROM rancher/os-base
RUN ln -sf /usr/bin/docker.dist /usr/bin/docker && \
ln -sf /usr/bin/docker-containerd.dist /usr/bin/docker-containerd && \
ln -sf /usr/bin/docker-containerd-shim.dist /usr/bin/docker-containerd-shim && \
ln -sf /usr/bin/docker-runc.dist /usr/bin/docker-runc
COPY preload.sh /
CMD ["/preload.sh"]

39
images/02-preload/preload.sh Executable file
View File

@ -0,0 +1,39 @@
#!/bin/bash
set -e
BASE=${1:-${PRELOAD_DIR}}
BASE=${BASE:-/mnt/preload}
should_load() {
file=${1}
if [[ ${file} =~ \.done$ ]]; then echo false
elif [ -f ${file} ]; then
if [[ ${file} -nt ${file}.done ]]; then echo true
else echo false
fi
else echo false
fi
}
if [ -d ${BASE} ]; then
echo Preloading docker images from ${BASE}...
for file in $(ls ${BASE}); do
path=${BASE}/${file}
loading=$(should_load ${path})
if [ ${loading} == "true" ]; then
CAT="cat ${path}"
if [[ ${file} =~ \.t?gz$ ]]; then CAT="${CAT} | gunzip"; fi
if [[ ${file} =~ \.t?xz$ ]]; then CAT="${CAT} | unxz"; fi
CAT="${CAT} | docker load"
echo loading from ${path}
eval ${CAT} || :
touch ${path}.done || :
fi
done
echo Done.
else
echo Can not preload images from ${BASE}: not a dir or does not exist.
fi

View File

@ -0,0 +1,2 @@
FROM rancher/os-base
CMD ["echo"]

View File

@ -0,0 +1,3 @@
FROM rancher/os-base
COPY state.sh /usr/sbin/
CMD ["/usr/sbin/state.sh"]

12
images/02-statescript/state.sh Executable file
View File

@ -0,0 +1,12 @@
#!/bin/bash
set -x
if [ "$(ros config get rancher.state.mdadm_scan)" = "true" ]; then
mdadm --assemble --scan
fi
ros config get rancher.state.script > config.sh
if [ -s config.sh ]; then
chmod +x config.sh
exec ./config.sh
fi

View File

@ -0,0 +1,4 @@
FROM rancher/os-base
COPY syslog.sh /
RUN sed -i 1,10d /etc/rsyslog.conf
CMD ["/syslog.sh"]

5
images/02-syslog/syslog.sh Executable file
View File

@ -0,0 +1,5 @@
#!bin/bash
set -x -e
exec rsyslogd -n

View File

@ -0,0 +1,3 @@
FROM rancher/os-base
COPY udev.sh /
CMD ["/udev.sh"]

18
images/02-udev/udev.sh Executable file
View File

@ -0,0 +1,18 @@
#!/bin/bash
if [ "$DAEMON" = true ]; then
exec udevd
fi
udevd --daemon
udevadm trigger --action=add
udevadm settle
if [ "$BOOTSTRAP" = true ]; then
# This was needed to get USB devices to fully register
# There is probably a better way to do this
killall udevd
udevd --daemon
udevadm trigger --action=add
udevadm settle
fi

View File

@ -7,7 +7,6 @@ import (
"strings"
log "github.com/Sirupsen/logrus"
"github.com/docker/libcompose/project"
"github.com/rancher/docker-from-scratch"
"github.com/rancher/os/compose"
"github.com/rancher/os/config"
@ -21,7 +20,7 @@ func autoformat(cfg *config.CloudConfig) (*config.CloudConfig, error) {
AUTOFORMAT := "AUTOFORMAT=" + strings.Join(cfg.Rancher.State.Autoformat, " ")
FORMATZERO := "FORMATZERO=" + fmt.Sprint(cfg.Rancher.State.FormatZero)
t := *cfg
t.Rancher.Autoformat["autoformat"].Environment = project.NewMaporEqualSlice([]string{AUTOFORMAT, FORMATZERO})
t.Rancher.Autoformat["autoformat"].Environment = []string{AUTOFORMAT, FORMATZERO}
log.Info("Running Autoformat services")
_, err := compose.RunServiceSet("autoformat", &t, t.Rancher.Autoformat)
return &t, err
@ -34,13 +33,12 @@ func runBootstrapContainers(cfg *config.CloudConfig) (*config.CloudConfig, error
}
func startDocker(cfg *config.CloudConfig) (chan interface{}, error) {
launchConfig, args := getLaunchConfig(cfg, &cfg.Rancher.BootstrapDocker)
launchConfig.Fork = true
launchConfig.LogFile = ""
launchConfig.NoLog = true
cmd, err := dockerlaunch.LaunchDocker(launchConfig, config.DOCKER_BIN, args...)
cmd, err := dockerlaunch.LaunchDocker(launchConfig, config.SYSTEM_DOCKER_BIN, args...)
if err != nil {
return nil, err
}

View File

@ -118,10 +118,7 @@ func mountState(cfg *config.CloudConfig) error {
func mountOem(cfg *config.CloudConfig) (*config.CloudConfig, error) {
if cfg == nil {
var err error
if cfg, err = config.LoadConfig(); err != nil {
return cfg, err
}
cfg = config.LoadConfig()
}
if err := mountConfigured("oem", cfg.Rancher.State.OemDev, cfg.Rancher.State.OemFsType, config.OEM); err != nil {
log.Debugf("Not mounting OEM: %v", err)
@ -165,8 +162,8 @@ func getLaunchConfig(cfg *config.CloudConfig, dockerCfg *config.DockerConfig) (*
args := dockerlaunch.ParseConfig(&launchConfig, append(dockerCfg.Args, dockerCfg.ExtraArgs...)...)
launchConfig.DnsConfig.Nameservers = cfg.Rancher.DefaultNetwork.Dns.Nameservers
launchConfig.DnsConfig.Search = cfg.Rancher.DefaultNetwork.Dns.Search
launchConfig.DnsConfig.Nameservers = cfg.Rancher.Defaults.Network.Dns.Nameservers
launchConfig.DnsConfig.Search = cfg.Rancher.Defaults.Network.Dns.Search
launchConfig.Environment = dockerCfg.Environment
launchConfig.EmulateSystemd = true
@ -199,13 +196,10 @@ func RunInit() error {
},
mountOem,
func(_ *config.CloudConfig) (*config.CloudConfig, error) {
cfg, err := config.LoadConfig()
if err != nil {
return cfg, err
}
cfg := config.LoadConfig()
if cfg.Rancher.Debug {
cfgString, err := config.Dump(false, true)
cfgString, err := config.Export(false, true)
if err != nil {
log.WithFields(log.Fields{"err": err}).Error("Error serializing config")
} else {
@ -218,7 +212,7 @@ func RunInit() error {
loadModules,
tryMountAndBootstrap,
func(_ *config.CloudConfig) (*config.CloudConfig, error) {
return config.LoadConfig()
return config.LoadConfig(), nil
},
loadModules,
func(c *config.CloudConfig) (*config.CloudConfig, error) {
@ -241,7 +235,7 @@ func RunInit() error {
launchConfig.Fork = !cfg.Rancher.SystemDocker.Exec
log.Info("Launching System Docker")
_, err = dockerlaunch.LaunchDocker(launchConfig, config.DOCKER_BIN, args...)
_, err = dockerlaunch.LaunchDocker(launchConfig, config.SYSTEM_DOCKER_BIN, args...)
if err != nil {
return err
}

View File

@ -5,8 +5,10 @@ import (
"path"
"syscall"
"golang.org/x/net/context"
log "github.com/Sirupsen/logrus"
dockerClient "github.com/fsouza/go-dockerclient"
"github.com/docker/libcompose/project/options"
"github.com/rancher/os/compose"
"github.com/rancher/os/config"
"github.com/rancher/os/docker"
@ -76,10 +78,7 @@ func loadImages(cfg *config.CloudConfig) (*config.CloudConfig, error) {
defer input.Close()
log.Infof("Loading images from %s", inputFileName)
err = client.LoadImage(dockerClient.LoadImageOptions{
InputStream: input,
})
if err != nil {
if _, err = client.ImageLoad(context.Background(), input, true); err != nil {
return cfg, err
}
@ -90,19 +89,21 @@ func loadImages(cfg *config.CloudConfig) (*config.CloudConfig, error) {
}
func SysInit() error {
cfg, err := config.LoadConfig()
if err != nil {
return err
}
cfg := config.LoadConfig()
_, err = config.ChainCfgFuncs(cfg,
_, err := config.ChainCfgFuncs(cfg,
loadImages,
func(cfg *config.CloudConfig) (*config.CloudConfig, error) {
p, err := compose.GetProject(cfg, false)
p, err := compose.GetProject(cfg, false, true)
if err != nil {
return cfg, err
}
return cfg, p.Up()
return cfg, p.Up(context.Background(), options.Up{
Create: options.Create{
NoRecreate: true,
},
Log: cfg.Rancher.Log,
})
},
func(cfg *config.CloudConfig) (*config.CloudConfig, error) {
syscall.Sync()

63
main.go
View File

@ -1,10 +1,7 @@
package main
import (
"os"
"strings"
log "github.com/Sirupsen/logrus"
"github.com/docker/docker/docker"
"github.com/docker/docker/pkg/reexec"
"github.com/rancher/docker-from-scratch"
"github.com/rancher/os/cmd/cloudinit"
@ -12,54 +9,38 @@ import (
"github.com/rancher/os/cmd/network"
"github.com/rancher/os/cmd/power"
"github.com/rancher/os/cmd/respawn"
"github.com/rancher/os/cmd/switchconsole"
"github.com/rancher/os/cmd/sysinit"
"github.com/rancher/os/cmd/systemdocker"
"github.com/rancher/os/cmd/userdocker"
"github.com/rancher/os/cmd/wait"
"github.com/rancher/os/cmd/waitfornetwork"
"github.com/rancher/os/config"
osInit "github.com/rancher/os/init"
)
func registerCmd(cmd string, mainFunc func()) {
log.Debugf("Registering main %s", cmd)
reexec.Register(cmd, mainFunc)
parts := strings.Split(cmd, "/")
if len(parts) == 0 {
return
}
last := parts[len(parts)-1]
log.Debugf("Registering main %s", last)
reexec.Register(last, mainFunc)
log.Debugf("Registering main %s", "./"+last)
reexec.Register("./"+last, mainFunc)
var entrypoints = map[string]func(){
"cloud-init": cloudinit.Main,
"docker": docker.Main,
"dockerlaunch": dockerlaunch.Main,
"halt": power.Halt,
"init": osInit.MainInit,
"netconf": network.Main,
"poweroff": power.PowerOff,
"reboot": power.Reboot,
"respawn": respawn.Main,
"ros-sysinit": sysinit.Main,
"shutdown": power.Main,
"switch-console": switchconsole.Main,
"system-docker": systemdocker.Main,
"user-docker": userdocker.Main,
"wait-for-docker": wait.Main,
}
func main() {
registerCmd("/init", osInit.MainInit)
registerCmd(config.SYSINIT_BIN, sysinit.Main)
registerCmd("/usr/bin/dockerlaunch", dockerlaunch.Main)
registerCmd("/usr/bin/user-docker", userdocker.Main)
registerCmd("/usr/bin/system-docker", systemdocker.Main)
registerCmd("/sbin/poweroff", power.PowerOff)
registerCmd("/sbin/reboot", power.Reboot)
registerCmd("/sbin/halt", power.Halt)
registerCmd("/sbin/shutdown", power.Main)
registerCmd("/usr/bin/respawn", respawn.Main)
registerCmd("/usr/bin/ros", control.Main)
registerCmd("/usr/bin/cloud-init", cloudinit.Main)
registerCmd("/usr/sbin/netconf", network.Main)
registerCmd("/usr/sbin/wait-for-network", waitfornetwork.Main)
registerCmd("/usr/sbin/wait-for-docker", wait.Main)
for name, f := range entrypoints {
reexec.Register(name, f)
}
if !reexec.Init() {
reexec.Register(os.Args[0], control.Main)
if !reexec.Init() {
log.Fatalf("Failed to find an entry point for %s", os.Args[0])
}
control.Main()
}
}

View File

@ -1,8 +1,12 @@
default_hostname: {{.HOSTNAME_DEFAULT}}
rancher:
defaults:
hostname: {{.HOSTNAME_DEFAULT}}
network:
dns:
nameservers: [8.8.8.8, 8.8.4.4]
bootstrap:
state-script:
image: {{.OS_IMAGES_ROOT}}/os-statescript:{{.VERSION}}{{.SUFFIX}}
image: {{.OS_REPO}}/os-statescript:{{.VERSION}}{{.SUFFIX}}
labels:
io.rancher.os.detach: "false"
io.rancher.os.scope: system
@ -18,7 +22,7 @@ rancher:
- /usr/bin/ros:/usr/bin/ros:ro
- /usr/share/ros:/usr/share/ros:ro
udev-bootstrap:
image: {{.OS_IMAGES_ROOT}}/os-udev:{{.VERSION}}{{.SUFFIX}}
image: {{.OS_REPO}}/os-udev:{{.VERSION}}{{.SUFFIX}}
environment:
- BOOTSTRAP=true
labels:
@ -34,7 +38,7 @@ rancher:
- /lib/firmware:/lib/firmware
autoformat:
autoformat:
image: {{.OS_IMAGES_ROOT}}/os-autoformat:{{.VERSION}}{{.SUFFIX}}
image: {{.OS_REPO}}/os-autoformat:{{.VERSION}}{{.SUFFIX}}
labels:
io.rancher.os.detach: "false"
io.rancher.os.scope: system
@ -42,7 +46,7 @@ rancher:
net: none
privileged: true
udev-autoformat:
image: {{.OS_IMAGES_ROOT}}/os-udev:{{.VERSION}}{{.SUFFIX}}
image: {{.OS_REPO}}/os-udev:{{.VERSION}}{{.SUFFIX}}
labels:
io.rancher.os.detach: "false"
io.rancher.os.scope: system
@ -58,15 +62,13 @@ rancher:
bootstrap_docker:
args: [daemon, -s, overlay, -b, none, --restart=false, -g, /var/lib/system-docker,
-G, root, -H, 'unix:///var/run/system-docker.sock', --userland-proxy=false]
console: default
cloud_init:
datasources:
- configdrive:/media/config-2
default_network:
dns:
nameservers: [8.8.8.8, 8.8.4.4]
repositories:
core:
url: {{.OS_SERVICES_REPO}}/{{.VERSION}}{{.SUFFIX}}
url: {{.OS_SERVICES_REPO}}/{{.REPO_VERSION}}{{.SUFFIX}}
state:
fstype: auto
dev: LABEL=RANCHER_STATE
@ -75,7 +77,7 @@ rancher:
services:
{{if eq "amd64" .ARCH -}}
acpid:
image: {{.OS_IMAGES_ROOT}}/os-acpid:{{.VERSION}}{{.SUFFIX}}
image: {{.OS_REPO}}/os-acpid:{{.VERSION}}{{.SUFFIX}}
labels:
io.rancher.os.scope: system
net: host
@ -86,7 +88,7 @@ rancher:
- system-volumes
{{end -}}
all-volumes:
image: {{.OS_IMAGES_ROOT}}/os-state:{{.VERSION}}{{.SUFFIX}}
image: {{.OS_REPO}}/os-state:{{.VERSION}}{{.SUFFIX}}
labels:
io.rancher.os.createonly: "true"
io.rancher.os.scope: system
@ -100,7 +102,7 @@ rancher:
- user-volumes
- system-volumes
cloud-init:
image: {{.OS_IMAGES_ROOT}}/os-cloudinit:{{.VERSION}}{{.SUFFIX}}
image: {{.OS_REPO}}/os-cloudinit:{{.VERSION}}{{.SUFFIX}}
labels:
io.rancher.os.detach: "false"
io.rancher.os.reloadconfig: "true"
@ -113,7 +115,7 @@ rancher:
- command-volumes
- system-volumes
cloud-init-pre:
image: {{.OS_IMAGES_ROOT}}/os-cloudinit:{{.VERSION}}{{.SUFFIX}}
image: {{.OS_REPO}}/os-cloudinit:{{.VERSION}}{{.SUFFIX}}
environment:
- CLOUD_INIT_NETWORK=false
labels:
@ -128,7 +130,7 @@ rancher:
- command-volumes
- system-volumes
command-volumes:
image: {{.OS_IMAGES_ROOT}}/os-state:{{.VERSION}}{{.SUFFIX}}
image: {{.OS_REPO}}/os-state:{{.VERSION}}{{.SUFFIX}}
labels:
io.rancher.os.createonly: "true"
io.rancher.os.scope: system
@ -137,6 +139,9 @@ rancher:
privileged: true
read_only: true
volumes:
- /usr/bin/docker-containerd:/usr/bin/docker-containerd.dist:ro
- /usr/bin/docker-containerd-shim:/usr/bin/docker-containerd-shim.dist:ro
- /usr/bin/docker-runc:/usr/bin/docker-runc.dist:ro
- /usr/bin/docker:/usr/bin/docker.dist:ro
- /usr/bin/ros:/usr/bin/dockerlaunch:ro
- /usr/bin/ros:/usr/bin/user-docker:ro
@ -149,14 +154,15 @@ rancher:
- /usr/bin/ros:/usr/bin/ros:ro
- /usr/bin/ros:/usr/bin/cloud-init:ro
- /usr/bin/ros:/usr/sbin/netconf:ro
- /usr/bin/ros:/usr/sbin/wait-for-network:ro
- /usr/bin/ros:/usr/sbin/wait-for-docker:ro
- /usr/bin/ros:/usr/bin/switch-console:ro
console:
image: {{.OS_IMAGES_ROOT}}/os-console:{{.VERSION}}{{.SUFFIX}}
image: {{.OS_REPO}}/os-console:{{.VERSION}}{{.SUFFIX}}
labels:
io.rancher.os.scope: system
io.rancher.os.after: wait-for-network
io.rancher.os.after: network
io.docker.compose.rebuild: always
io.rancher.os.console: default
net: host
uts: host
pid: host
@ -168,7 +174,7 @@ rancher:
volumes:
- /usr/bin/iptables:/sbin/iptables:ro
container-data-volumes:
image: {{.OS_IMAGES_ROOT}}/os-state:{{.VERSION}}{{.SUFFIX}}
image: {{.OS_REPO}}/os-state:{{.VERSION}}{{.SUFFIX}}
labels:
io.rancher.os.createonly: "true"
io.rancher.os.scope: system
@ -180,7 +186,8 @@ rancher:
- /var/lib/docker:/var/lib/docker
- /var/lib/rkt:/var/lib/rkt
network-pre:
image: {{.OS_IMAGES_ROOT}}/os-network:{{.VERSION}}{{.SUFFIX}}
image: {{.OS_REPO}}/os-base:{{.VERSION}}{{.SUFFIX}}
command: netconf
labels:
io.rancher.os.scope: system
io.rancher.os.after: cloud-init-pre
@ -192,12 +199,11 @@ rancher:
- command-volumes
- system-volumes
network:
image: {{.OS_IMAGES_ROOT}}/os-network:{{.VERSION}}{{.SUFFIX}}
image: {{.OS_REPO}}/os-base:{{.VERSION}}{{.SUFFIX}}
command: netconf --stop-network-pre
labels:
io.rancher.os.scope: system
io.rancher.os.after: cloud-init
environment:
- DAEMON=true
net: host
uts: host
pid: host
@ -205,41 +211,17 @@ rancher:
volumes_from:
- command-volumes
- system-volumes
wait-for-network-pre:
image: {{.OS_IMAGES_ROOT}}/os-network:{{.VERSION}}{{.SUFFIX}}
command: wait-for-network
labels:
io.rancher.os.detach: "false"
io.rancher.os.scope: system
io.rancher.os.after: network-pre
pid: host
privileged: true
volumes_from:
- command-volumes
- system-volumes
wait-for-network:
image: {{.OS_IMAGES_ROOT}}/os-network:{{.VERSION}}{{.SUFFIX}}
command: wait-for-network
labels:
io.rancher.os.detach: "false"
io.rancher.os.scope: system
io.rancher.os.after: network
pid: host
privileged: true
volumes_from:
- command-volumes
- system-volumes
ntp:
image: {{.OS_IMAGES_ROOT}}/os-ntp:{{.VERSION}}{{.SUFFIX}}
image: {{.OS_REPO}}/os-ntp:{{.VERSION}}{{.SUFFIX}}
labels:
io.rancher.os.scope: system
io.rancher.os.after: wait-for-network-pre
io.rancher.os.after: network-pre
net: host
uts: host
privileged: true
restart: always
preload-system-images:
image: {{.OS_IMAGES_ROOT}}/os-preload:{{.VERSION}}{{.SUFFIX}}
image: {{.OS_REPO}}/os-preload:{{.VERSION}}{{.SUFFIX}}
labels:
io.rancher.os.detach: "false"
io.rancher.os.scope: system
@ -251,7 +233,7 @@ rancher:
- command-volumes
- system-volumes
preload-user-images:
image: {{.OS_IMAGES_ROOT}}/os-preload:{{.VERSION}}{{.SUFFIX}}
image: {{.OS_REPO}}/os-preload:{{.VERSION}}{{.SUFFIX}}
labels:
io.rancher.os.detach: "false"
io.rancher.os.scope: system
@ -264,7 +246,7 @@ rancher:
- command-volumes
- system-volumes
syslog:
image: {{.OS_IMAGES_ROOT}}/os-syslog:{{.VERSION}}{{.SUFFIX}}
image: {{.OS_REPO}}/os-syslog:{{.VERSION}}{{.SUFFIX}}
labels:
io.rancher.os.scope: system
log_driver: json-file
@ -275,7 +257,7 @@ rancher:
volumes_from:
- system-volumes
system-volumes:
image: {{.OS_IMAGES_ROOT}}/os-state:{{.VERSION}}{{.SUFFIX}}
image: {{.OS_REPO}}/os-state:{{.VERSION}}{{.SUFFIX}}
labels:
io.rancher.os.createonly: "true"
io.rancher.os.scope: system
@ -295,12 +277,13 @@ rancher:
- /lib/modules:/lib/modules
- /run:/run
- /usr/share/ros:/usr/share/ros
- /var/lib/rancher/cache:/var/lib/rancher/cache
- /var/lib/rancher/conf:/var/lib/rancher/conf
- /var/lib/rancher:/var/lib/rancher
- /var/log:/var/log
- /var/run:/var/run
udev-cold:
image: {{.OS_IMAGES_ROOT}}/os-udev:{{.VERSION}}{{.SUFFIX}}
image: {{.OS_REPO}}/os-udev:{{.VERSION}}{{.SUFFIX}}
labels:
io.rancher.os.scope: system
io.rancher.os.before: udev
@ -310,7 +293,7 @@ rancher:
volumes_from:
- system-volumes
udev:
image: {{.OS_IMAGES_ROOT}}/os-udev:{{.VERSION}}{{.SUFFIX}}
image: {{.OS_REPO}}/os-udev:{{.VERSION}}{{.SUFFIX}}
environment:
- DAEMON=true
labels:
@ -323,7 +306,7 @@ rancher:
volumes_from:
- system-volumes
user-volumes:
image: {{.OS_IMAGES_ROOT}}/os-state:{{.VERSION}}{{.SUFFIX}}
image: {{.OS_REPO}}/os-state:{{.VERSION}}{{.SUFFIX}}
labels:
io.rancher.os.createonly: "true"
io.rancher.os.scope: system
@ -335,7 +318,7 @@ rancher:
- /home:/home
- /opt:/opt
docker:
image: {{.OS_IMAGES_ROOT}}/os-docker:{{.VERSION}}{{.SUFFIX}}
image: {{.OS_REPO}}/os-docker:{{.VERSION}}{{.SUFFIX}}
environment:
- HTTP_PROXY
- HTTPS_PROXY
@ -354,12 +337,13 @@ rancher:
volumes:
- /sys/fs/cgroup:/host/sys/fs/cgroup
system_docker:
exec: true
args: [daemon, --log-opt, max-size=25m, --log-opt, max-file=2, -s, overlay, -b, docker-sys,
--fixed-cidr, 172.18.42.1/16, --restart=false, -g, /var/lib/system-docker, -G, root,
-H, 'unix:///var/run/system-docker.sock', --userland-proxy=false]
upgrade:
url: {{.OS_RELEASES_YML}}
image: {{.OS_IMAGES_ROOT}}/os
image: {{.OS_REPO}}/os
docker:
tls_args: [--tlsverify, --tlscacert=/etc/docker/tls/ca.pem, --tlscert=/etc/docker/tls/server-cert.pem, --tlskey=/etc/docker/tls/server-key.pem,
'-H=0.0.0.0:2376']

5
scripts/build Executable file
View File

@ -0,0 +1,5 @@
#!/bin/bash
set -e
$(dirname $0)/build-target
$(dirname $0)/build-host

View File

@ -1,2 +0,0 @@
BUILD=$(pwd)/build
DIST=$(pwd)/dist

12
scripts/build-host Executable file
View File

@ -0,0 +1,12 @@
#!/bin/bash
cd $(dirname $0)/..
export OUTPUT=bin/host_ros
if [[ -e bin/ros && "$HOST_ARCH" = "$ARCH" ]]; then
echo Creating $OUTPUT
cp bin/ros $OUTPUT
else
GOARCH=${HOST_ARCH} TOOLCHAIN= ./scripts/build-target
fi

25
scripts/build-images Executable file
View File

@ -0,0 +1,25 @@
#!/bin/bash
set -e
export ARCH=${ARCH:-"amd64"}
BASE=images
source $(dirname $0)/version
cd $(dirname $0)/..
for i in $BASE/[0-9]*; do
name="os-$(echo ${i} | cut -f2 -d-)"
tag="${OS_REPO}/${name}:${VERSION}${SUFFIX}"
echo Building ${tag}
if [ -x ${i}/prebuild.sh ]; then
${i}/prebuild.sh
fi
if dapper -d --build -f ${i}/Dockerfile -- -t rancher/${name} ${i}; then
docker tag rancher/${name} ${tag}
elif [ "$?" != "42" ]; then
exit 1
else
echo "WARN: Skipping ${tag}"
fi
done

20
scripts/build-target Executable file
View File

@ -0,0 +1,20 @@
#!/bin/bash
set -e
ros="$1"
source $(dirname $0)/version
cd $(dirname $0)/..
if [ "${!TOOLCHAIN}" != "" ]; then
export CC=/usr/bin/${!TOOLCHAIN}-gcc
export CGO_ENABLED=1
fi
OUTPUT=${OUTPUT:-bin/ros}
echo Building $OUTPUT
CONST="-X github.com/docker/docker/dockerversion.GitCommit=${COMMIT} -X github.com/docker/docker/dockerversion.Version=${DOCKER_PATCH_VERSION} -X github.com/docker/docker/dockerversion.BuildTime=$(date -u +'%Y-%m-%dT%H:%M:%SZ') -X github.com/docker/docker/dockerversion.IAmStatic=true -X github.com/rancher/os/config.VERSION=${VERSION}"
go build -tags "selinux cgo daemon netgo" -installsuffix netgo -ldflags "$CONST -linkmode external -extldflags -static" -o ${OUTPUT}
strip --strip-all ${OUTPUT}

View File

@ -1,38 +0,0 @@
#!/bin/bash
set -x -e
cd $(dirname $0)/..
: RANCHER_ISO=${RANCHER_ISO:="./dist/artifacts/rancheros.iso"}
if [[ -z $RANCHER_ISO ]]; then
echo "Need an ISO..." 1>&2
exit 1
fi
if [ ! -e ${RANCHER_ISO} ]; then
echo "Could not find ISO ${RANCHER_ISO}..." 1>&2
echo "have you run build.sh yet?" 1>&2
exit 1
fi
GITSHA=$(git rev-parse --short HEAD)
VM="RancherOS-${GITSHA}"
sudo chown -R `whoami` ./dist
VBoxManage createhd --format vmdk --filename ./dist/artifacts/$VM.vmdk --size 40000
VBoxManage createvm --name $VM --ostype "Linux_64" --register
VBoxManage storagectl $VM --name "SATA" --add sata --portcount 2
VBoxManage storageattach $VM --storagectl "SATA" --port 0 --type hdd --medium ./dist/artifacts/$VM.vmdk
VBoxManage storageattach $VM --storagectl "SATA" --port 1 --type dvddrive --medium ${RANCHER_ISO}
VBoxManage modifyvm $VM --memory 1024 --acpi on --boot1 disk --boot2 dvd
VBoxManage modifyvm $VM --rtcuseutc on
VBoxManage modifyvm $VM --usb off
VBoxManage modifyvm $VM --audio none
VBoxManage modifyvm $VM --nic1 nat
VBoxManage modifyvm $VM --nictype1 virtio
#VBoxManage startvm $VM

View File

@ -1,7 +1,11 @@
#!/bin/sh
set -ex
#!/bin/bash
set -e
cd $(dirname $0)/..
. ./scripts/dapper-common
cd $(dirname $0)
dapper -d -O make HOST_ARCH=${HOST_ARCH} ARCH=${ARCH} DEV_BUILD=1 test
./build
./test
#./validate
./prepare
./package
./integration-test

4
scripts/clean Executable file
View File

@ -0,0 +1,4 @@
#!/bin/bash
cd $(dirname $0)/..
rm -rf build dist bin images/*/build

View File

@ -1,20 +0,0 @@
#!/bin/sh
set -ex
HOST_ARCH=${HOST_ARCH:-$(docker version | grep 'OS/Arch:' | tail -n+2 | awk '{print $2}' | cut -f2 -d'/')}
HOST_ARCH=${HOST_ARCH:?"Failed to guess HOST_ARCH"}
ARCH=${ARCH:-"$HOST_ARCH"}
export HOST_ARCH ARCH
cd $(dirname $0)/..
[ -f "./.docker-env.${HOST_ARCH}" ] && . ./.docker-env.${HOST_ARCH} || echo "WARNING: missing .docker-env.${HOST_ARCH} (to use an ${HOST_ARCH} docker host)"
. ./build.conf.${HOST_ARCH}
export HOST_DOCKER_BINARY_URL=${DOCKER_BINARY_URL}
docker inspect $DAPPER_BASE >/dev/null 2>&1 || docker pull $DAPPER_BASE
docker tag $DAPPER_BASE rancher/os-dapper-base
set -a
. ./build.conf.${ARCH}
set +a

Some files were not shown because too many files have changed in this diff Show More