Remove projects/kubernetes, moved to https://github.com/linuxkit/kubernetes

Signed-off-by: Ian Campbell <ijc@docker.com>
This commit is contained in:
Ian Campbell 2017-11-17 10:42:18 +00:00
parent b27c196719
commit 6539b78a72
27 changed files with 0 additions and 775 deletions

View File

@ -1,5 +0,0 @@
image-cache/common/*.tar
image-cache/common/Dockerfile
image-cache/control-plane/*.tar
image-cache/control-plane/Dockerfile
kube-weave.yaml

View File

@ -1,43 +0,0 @@
KUBE_RUNTIME ?= docker
KUBE_NETWORK ?= weave
KUBE_NETWORK_WEAVE ?= v2.0.5
ifeq ($(shell uname -s),Darwin)
KUBE_FORMATS ?= iso-efi
else
KUBE_FORMATS ?= iso-bios
endif
KUBE_FORMAT_ARGS := $(patsubst %,-format %,$(KUBE_FORMATS))
all: build-container-images build-vm-images
build-container-images:
linuxkit pkg build kubelet
build-cache-images:
$(MAKE) -C image-cache build
push-container-images:
linuxkit pkg push kubelet
$(MAKE) -C image-cache push
build-vm-images: kube-master.iso kube-node.iso
kube-master.iso: kube.yml $(KUBE_RUNTIME).yml $(KUBE_RUNTIME)-master.yml $(KUBE_NETWORK).yml
moby build -name kube-master $(KUBE_FORMAT_ARGS) $^
kube-node.iso: kube.yml $(KUBE_RUNTIME).yml $(KUBE_NETWORK).yml
moby build -name kube-node $(KUBE_FORMAT_ARGS) $^
weave.yml: kube-weave.yaml
kube-weave.yaml:
curl -L -o $@ https://cloud.weave.works/k8s/v1.8/net?v=$(KUBE_NETWORK_WEAVE)
clean:
rm -f -r \
kube-*-kernel kube-*-cmdline kube-*-state kube-*-initrd.img *.iso \
kube-weave.yaml
$(MAKE) -C image-cache clean

View File

@ -1,105 +0,0 @@
# Kubernetes and LinuxKit
This project aims to demonstrate how one can create minimal and immutable Kubernetes OS images with LinuxKit.
Make sure to `cd projects/kubernetes` first.
Build OS images:
```
make build-vm-images
```
By default this will build images using Docker Engine for execution. To instead use cri-containerd use:
```
make build-vm-images KUBE_RUNTIME=cri-containerd
```
Boot Kubernetes master OS image using `hyperkit` on macOS: or `qemu` on Linux:
```
./boot.sh
```
or, to automatically initialise the cluster upon boot with no additional options
```
KUBE_MASTER_AUTOINIT="" ./boot.sh
```
Get IP address of the master:
```
ip addr show dev eth0
```
Login to the kubelet container:
```
./ssh_into_kubelet.sh <master-ip>
```
Manually initialise master with `kubeadm` if booted without `KUBE_MASTER_AUTOINIT`:
```
kubeadm-init.sh
```
Once `kubeadm` exits, make sure to copy the `kubeadm join` arguments,
and try `kubectl get nodes` from within the master.
If you just want to run a single node cluster with jobs running on the master, you can use:
```
kubectl taint nodes --all node-role.kubernetes.io/master- --kubeconfig /etc/kubernetes/admin.conf
```
To boot a node use:
```
./boot.sh <n> [<join_args> ...]
```
More specifically, to start 3 nodes use 3 separate shells and run this:
```
shell1> ./boot.sh 1 --token bb38c6.117e66eabbbce07d 192.168.65.22:6443
shell2> ./boot.sh 2 --token bb38c6.117e66eabbbce07d 192.168.65.22:6443
shell3> ./boot.sh 3 --token bb38c6.117e66eabbbce07d 192.168.65.22:6443
```
## Platform specific information
### MacOS
The above instructions should work as is.
### Linux
By default `linuxkit run` uses user mode networking which does not
support access from the host. To workaround this you can use port
forwarding e.g.
KUBE_RUN_ARGS="-publish 2222:22" ./boot.sh
ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -p 2222 root@localhost
However you will not be able to run worker nodes since individual
instances cannot see each other.
To enable networking between instance unfortunately requires `root`
privileges to configure a bridge and setup the bridge mode privileged
helper.
See http://wiki.qemu.org/Features/HelperNetworking for details in
brief you will need:
- To setup and configure a bridge (including e.g. DHCP etc) on the
host. (You can reuse a bridge created by e.g. `virt-mananger`)
- To set the `qemu-bridge-helper` setuid root. The location differs by
distro, it could be `/usr/lib/qemu/qemu-bridge-helper` or
`/usr/local/libexec/qemu-bridge-helper` or elsewhere. You need to
`chmod u+s «PATH»`.
- List the bridge created in the first step in `/etc/qemu/bridge.conf`
with a line like `allow br0` (if your bridge is called `br0`).
- Set `KUBE_NETWORKING=bridge,«name»` e.g.
KUBE_NETWORKING="bridge,br0" ./boot.sh
KUBE_NETWORKING="bridge,br0" ./boot.sh 1 «options»
## Configuration
The `boot.sh` script has various configuration variables at the top
which can be overridden via the environment e.g.
KUBE_VCPUS=4 ./boot.sh

View File

@ -1,91 +0,0 @@
#!/bin/sh
set -e
: ${KUBE_MASTER_VCPUS:=2}
: ${KUBE_MASTER_MEM:=1024}
: ${KUBE_MASTER_DISK:=4G}
: ${KUBE_MASTER_UNTAINT:=n}
: ${KUBE_NODE_VCPUS:=2}
: ${KUBE_NODE_MEM:=4096}
: ${KUBE_NODE_DISK:=8G}
: ${KUBE_NETWORKING:=default}
: ${KUBE_RUN_ARGS:=}
: ${KUBE_EFI:=}
: ${KUBE_MAC:=}
: ${KUBE_CLEAR_STATE:=}
[ "$(uname -s)" = "Darwin" ] && KUBE_EFI=1
suffix=".iso"
[ -n "${KUBE_EFI}" ] && suffix="-efi.iso" && uefi="--uefi"
if [ $# -eq 0 ] ; then
img="kube-master"
# If $KUBE_MASTER_AUTOINIT is set, including if it is set to ""
# then we configure for auto init. If it is completely unset then
# we do not.
if [ -n "${KUBE_MASTER_AUTOINIT+x}" ] ; then
kubeadm_data="${kubeadm_data+$kubeadm_data, }\"init\": { \"content\": \"${KUBE_MASTER_AUTOINIT}\" }"
fi
if [ "${KUBE_MASTER_UNTAINT}" = "y" ] ; then
kubeadm_data="${kubeadm_data+$kubeadm_data, }\"untaint-master\": { \"content\": \"\" }"
fi
state="kube-master-state"
: ${KUBE_VCPUS:=$KUBE_MASTER_VCPUS}
: ${KUBE_MEM:=$KUBE_MASTER_MEM}
: ${KUBE_DISK:=$KUBE_MASTER_DISK}
elif [ $# -ge 1 ] ; then
case $1 in
''|*[!0-9]*)
echo "Node number must be a number"
exit 1
;;
0)
echo "Node number must be greater than 0"
exit 1
;;
*) ;;
esac
img="kube-node"
name="node-${1}"
shift
if [ $# -ge 1 ] ; then
kubeadm_data="\"join\": { \"content\": \"${*}\" }"
fi
state="kube-${name}-state"
: ${KUBE_VCPUS:=$KUBE_NODE_VCPUS}
: ${KUBE_MEM:=$KUBE_NODE_MEM}
: ${KUBE_DISK:=$KUBE_NODE_DISK}
else
echo "Usage:"
echo " - Boot master:"
echo " ${0}"
echo " - Boot node:"
echo " ${0} <node> <join_args>"
exit 1
fi
set -x
if [ -n "${KUBE_CLEAR_STATE}" ] ; then
rm -rf "${state}"
mkdir "${state}"
if [ -n "${KUBE_MAC}" ] ; then
echo -n "${KUBE_MAC}" > "${state}"/mac-addr
fi
fi
mkdir -p "${state}"
touch $state/metadata.json
if [ -n "${kubeadm_data}" ] ; then
echo "{ \"kubeadm\": { \"entries\": { ${kubeadm_data} } } }" > $state/metadata.json
fi
linuxkit run ${KUBE_RUN_ARGS} -networking ${KUBE_NETWORKING} -cpus ${KUBE_VCPUS} -mem ${KUBE_MEM} -state "${state}" -disk size=${KUBE_DISK} -data $state/metadata.json ${uefi} "${img}${suffix}"

View File

@ -1,8 +0,0 @@
onboot:
- name: bridge
image: busybox:latest
command: ["/bin/sh", "-c", "set -ex; echo '{\"cniVersion\":\"0.3.1\",\"name\":\"default\",\"plugins\":[{\"type\":\"bridge\",\"bridge\":\"cni0\",\"isDefaultGateway\":true,\"ipMasq\":false,\"hairpinMode\":true,\"ipam\":{\"type\":\"host-local\",\"subnet\":\"10.1.0.0/16\",\"gateway\":\"10.1.0.1\"},\"dns\":{\"nameservers\":[\"10.1.0.1\"]}},{\"type\":\"portmap\",\"capabilities\":{\"portMappings\":true},\"snat\":true}]}' > /var/lib/cni/etc/net.d/10-default.conflist; echo '{\"cniVersion\":\"0.2.0\",\"type\":\"loopback\"}' > /var/lib/cni/etc/net.d/99-loopback.conf"]
runtime:
mkdir: ["/var/lib/cni/etc/net.d"]
binds:
- /var/lib:/var/lib

View File

@ -1,7 +0,0 @@
services:
- name: cri-containerd
image: linuxkitprojects/cri-containerd:72863deaa81a749fe8ff72bd69f863bab719aa06
files:
- path: /etc/kubelet.sh.conf
contents: |
KUBELET_ARGS="--container-runtime=remote --container-runtime-endpoint=unix:///var/run/cri-containerd.sock"

View File

@ -1,52 +0,0 @@
FROM linuxkit/alpine:07f7d136e427dc68154cd5edbb2b9576f9ac5213 AS build
RUN \
apk add \
bash \
gcc \
git \
go \
libc-dev \
libseccomp-dev \
linux-headers \
make \
socat \
&& true
ENV GOPATH=/go PATH=$PATH:/go/bin
ENV CRI_CONTAINERD_URL https://github.com/kubernetes-incubator/cri-containerd.git
#ENV CRI_CONTAINERD_BRANCH pull/NNN/head
ENV CRI_CONTAINERD_COMMIT ac8b0979fa634703e0a8d03df03eb51774fcff3d
RUN mkdir -p $GOPATH/src/github.com/kubernetes-incubator && \
cd $GOPATH/src/github.com/kubernetes-incubator && \
git clone $CRI_CONTAINERD_URL cri-containerd
WORKDIR $GOPATH/src/github.com/kubernetes-incubator/cri-containerd
RUN set -e; \
if [ -n "$CRI_CONTAINERD_BRANCH" ] ; then \
git fetch origin "$CRI_CONTAINERD_BRANCH"; \
fi; \
git checkout $CRI_CONTAINERD_COMMIT
RUN make static-binaries BUILD_TAGS="seccomp"
RUN mkdir -p /out/etc/apk && cp -r /etc/apk/* /out/etc/apk/
# util-linux because a full ns-enter is required.
# example commands: /usr/bin/nsenter --net= -F -- <ip commandline>
# /usr/bin/nsenter --net=/var/run/netns/cni-5e8acebe-810d-c1b9-ced0-47be2f312fa8 -F -- <ip commandline>
# NB the first ("--net=") is actually not valid -- see https://github.com/kubernetes-incubator/cri-containerd/issues/245
RUN apk add --no-cache --initdb -p /out \
alpine-baselayout \
busybox \
ca-certificates \
iptables \
util-linux \
&& true
# Remove apk residuals. We have a read-only rootfs, so apk is of no use.
RUN rm -rf /out/etc/apk /out/lib/apk /out/var/cache
RUN make DESTDIR=/out install
FROM scratch
WORKDIR /
ENTRYPOINT ["cri-containerd", "-v", "2", "--alsologtostderr", "--network-bin-dir", "/var/lib/cni/opt/bin", "--network-conf-dir", "/var/lib/cni/etc/net.d"]
COPY --from=build /out /
LABEL org.mobyproject.config='{"binds": ["/etc/resolv.conf:/etc/resolv.conf", "/run:/run:rshared,rbind", "/dev:/dev", "/tmp:/tmp", "/var:/var:rshared,rbind", "/var/lib/kubeadm:/etc/kubernetes", "/var/lib/cni/etc:/etc/cni:rshared,rbind", "/var/lib/cni/opt:/opt/cni:rshared,rbind", "/run/containerd/containerd.sock:/run/containerd/containerd.sock", "/var/lib/kubelet-plugins:/usr/libexec/kubernetes/kubelet-plugins:rshared,rbind"], "mounts": [{"type": "cgroup", "options": ["rw","nosuid","noexec","nodev","relatime"]}], "capabilities": ["all"], "rootfsPropagation": "shared", "pid": "host", "runtime": {"mkdir": ["/var/lib/kubeadm", "/var/lib/cni/etc/net.d", "/var/lib/cni/opt", "/var/lib/kubelet-plugins"]}}'

View File

@ -1,6 +0,0 @@
org: linuxkitprojects
image: cri-containerd
network: true
disable-content-trust: true
arches:
- amd64

View File

@ -1,3 +0,0 @@
services:
- name: kubernetes-docker-image-cache-control-plane
image: linuxkitprojects/kubernetes-docker-image-cache-control-plane:02d28e234458f29277f175e68fdca028403b3ed8

View File

@ -1,29 +0,0 @@
services:
- name: docker
image: docker:17.10.0-ce-dind
capabilities:
- all
pid: host
mounts:
- type: cgroup
options: ["rw","nosuid","noexec","nodev","relatime"]
binds:
- /dev:/dev
- /etc/resolv.conf:/etc/resolv.conf
- /etc/os-release:/etc/os-release
- /lib/modules:/lib/modules
- /run:/run
- /var:/var:rshared,rbind
- /var/lib/kubeadm:/etc/kubernetes
- /var/lib/cni/etc:/etc/cni:rshared,rbind
- /var/lib/cni/opt:/opt/cni:rshared,rbind
- /var/lib/kubelet-plugins:/usr/libexec/kubernetes/kubelet-plugins:rshared,rbind
rootfsPropagation: shared
command: ["/usr/local/bin/docker-init", "/usr/local/bin/dockerd"]
runtime:
mkdir: ["/var/lib/kubeadm", "/var/lib/cni/etc", "/var/lib/cni/opt", "/var/lib/kubelet-plugins"]
- name: kubernetes-docker-image-cache-common
image: linuxkitprojects/kubernetes-docker-image-cache-common:02d28e234458f29277f175e68fdca028403b3ed8
files:
- path: /etc/kubelet.sh.conf
contents: ""

View File

@ -1 +0,0 @@
dl/

View File

@ -1,20 +0,0 @@
FROM linuxkit/alpine:07f7d136e427dc68154cd5edbb2b9576f9ac5213 AS build
RUN mkdir -p /out/etc/apk && cp -r /etc/apk/* /out/etc/apk/
RUN apk add --no-cache --initdb -p /out \
alpine-baselayout \
busybox
# Remove apk residuals. We have a read-only rootfs, so apk is of no use.
RUN rm -rf /out/etc/apk /out/lib/apk /out/var/cache
RUN rmdir /out/var/run && ln -nfs /run /out/var/run
FROM scratch
WORKDIR /
COPY --from=build /out /
COPY --from=docker:17.06.0-ce /usr/local/bin/docker /usr/local/bin/docker
COPY *.tar /images/
ENTRYPOINT [ "/bin/sh", "-c" ]
CMD [ "for image in /images/*.tar ; do docker image load -i $image && rm -f $image ; done" ]
LABEL org.mobyproject.config='{"binds": ["/var/run:/var/run"]}'

View File

@ -1,41 +0,0 @@
default: push
include versions.mk
dl/%.tar:
mkdir -p $(dir $@)
docker image pull gcr.io/google_containers/$(shell basename $@ .tar)
docker image save -o $@ gcr.io/google_containers/$(shell basename $@ .tar)
%-pkg:
@set -e ; \
builddir=$$(mktemp -d $(CACHE).XXXXXX) ; \
trap 'rm -rf $${builddir}' EXIT ; \
ln $(IMAGES) $${builddir} ; \
$(MAKE) -f Makefile.pkg BUILDDIR=$${builddir} CACHE=$(CACHE) $*
.PHONY: build-common forcebuild-common push-common forcepush-common show-tag-common
build-common forcebuild-common push-common forcepush-common show-tag-common: %-common: $(patsubst %,dl/%.tar,$(COMMON_IMAGES))
@$(MAKE) CACHE=common IMAGES="$^" $*-pkg
.PHONY: build-control-plane forcebuild-control-plane push-control-plane forcepush-control-plane show-tag-control-plane
build-control-plane forcebuild-control-plane push-control-plane forcepush-control-plane show-tag-control-plane: %-control-plane: $(patsubst %,dl/%.tar,$(CONTROL_PLANE_IMAGES))
@$(MAKE) CACHE=control-plane IMAGES="$^" $*-pkg
.PHONY: build forcebuild push forcepush show-tags
build: build-common build-control-plane
forcebuild: forcebuild-common forcebuild-control-plane
push: push-common push-control-plane
forcepush: forcepush-common forcepush-control-plane
show-tags: show-tag-common show-tag-control-plane
.PHONY: dl
dl: $(patsubst %,dl/%.tar,$(COMMON_IMAGES) $(CONTROL_PLANE_IMAGES))
.PHONY: clean
clean:
rm -rf dl
.PHONY: refresh
refresh:
./mkversions > versions.mk

View File

@ -1,22 +0,0 @@
$(BUILDDIR)/build.yml: build.yml.in
@sed -e 's/@@CACHE@@/$(CACHE)/g' < $< > $@
$(BUILDDIR)/Dockerfile: Dockerfile
@cp $< $@
.PHONY: push forcepush tag forcetag show-tag
push: $(BUILDDIR)/build.yml $(BUILDDIR)/Dockerfile
linuxkit pkg push -hash-path . $(BUILDDIR)
forcepush: $(BUILDDIR)/build.yml $(BUILDDIR)/Dockerfile
linuxkit pkg push -force -hash-path . $(BUILDDIR)
tag: $(BUILDDIR)/build.yml $(BUILDDIR)/Dockerfile
linuxkit pkg build -hash-path . $(BUILDDIR)
forcetag: $(BUILDDIR)/build.yml $(BUILDDIR)/Dockerfile
linuxkit pkg build --force -hash-path . $(BUILDDIR)
show-tag: $(BUILDDIR)/build.yml $(BUILDDIR)/Dockerfile
@linuxkit pkg show-tag -hash-path . $(BUILDDIR)

View File

@ -1,5 +0,0 @@
org: linuxkitprojects
image: kubernetes-docker-image-cache-@@CACHE@@
disable-content-trust: true
arches:
- amd64

View File

@ -1 +0,0 @@
Dockerfile

View File

@ -1,45 +0,0 @@
#!/bin/sh
repo=gcr.io/google_containers
kube_version=v1.8.2
kube_dns_version=1.14.5
pause_version=3.0
etcd_version=3.0.17
common="
kube-proxy-amd64:$kube_version
k8s-dns-sidecar-amd64:$kube_dns_version
k8s-dns-kube-dns-amd64:$kube_dns_version
k8s-dns-dnsmasq-nanny-amd64:$kube_dns_version
pause-amd64:$pause_version"
control="
kube-apiserver-amd64:$kube_version
kube-controller-manager-amd64:$kube_version
kube-scheduler-amd64:$kube_version
etcd-amd64:$etcd_version"
for i in $common $control ; do
docker image pull "$repo/$i" 1>&2
done
oi() {
local i="$1"
digest=$(docker image inspect --format '{{index .RepoDigests 0}}' "$repo/$i")
i=$(echo "${i}@${digest#*@}" | sed -e 's/:/\\:/g')
echo " \\"
echo -n " ${i}"
}
rm -f $t
echo "# autogenerated by mkversions"
echo -n "COMMON_IMAGES :="
for i in $common ; do
oi "$i"
done
echo ""
echo ""
echo -n "CONTROL_PLANE_IMAGES :="
for i in $control ; do
oi "$i"
done
echo ""

View File

@ -1,13 +0,0 @@
# autogenerated by mkversions
COMMON_IMAGES := \
kube-proxy-amd64\:v1.8.2@sha256\:8b81f3be506ca13df59731a75cc7795aeeff64cad48202067b431b9c2a4d91a9 \
k8s-dns-sidecar-amd64\:1.14.5@sha256\:9aab42bf6a2a068b797fe7d91a5d8d915b10dbbc3d6f2b10492848debfba6044 \
k8s-dns-kube-dns-amd64\:1.14.5@sha256\:1a3fc069de481ae690188f6f1ba4664b5cc7760af37120f70c86505c79eea61d \
k8s-dns-dnsmasq-nanny-amd64\:1.14.5@sha256\:46b933bb70270c8a02fa6b6f87d440f6f1fce1a5a2a719e164f83f7b109f7544 \
pause-amd64\:3.0@sha256\:163ac025575b775d1c0f9bf0bdd0f086883171eb475b5068e7defa4ca9e76516
CONTROL_PLANE_IMAGES := \
kube-apiserver-amd64\:v1.8.2@sha256\:3e980f4b57292568ea8c87be462cf0583e40bbc2dbfff71d0d9e19beda3cb74b \
kube-controller-manager-amd64\:v1.8.2@sha256\:c2cd4acd4238b2f2526abf5ba546d4e6f4a46618ad5747a539e8a72c294a7482 \
kube-scheduler-amd64\:v1.8.2@sha256\:7c920b718509e8cf811c69178526d84ebfab2bdbb95949f6e82eb5233e7b5f0e \
etcd-amd64\:3.0.17@sha256\:d83d3545e06fb035db8512e33bd44afb55dea007a3abd7b17742d3ac6d235940

View File

@ -1,61 +0,0 @@
kernel:
image: linuxkit/kernel:4.9.62
cmdline: "console=tty0 console=ttyS0"
init:
- linuxkit/init:42a92119e1ca10380e0d33e26c0cbcf85b9b3558
- linuxkit/runc:1b0741d07949c0acc444cd6a04ee7f833443579d
- linuxkit/containerd:bfb61cc1d26c39cd4b2bc08f7a9963fefa0ef3bf
- linuxkit/ca-certificates:af4880e78edc28743f7c5e262678c67c6add4c26
onboot:
- name: sysctl
image: linuxkit/sysctl:a9ad57ed738a31ea9380cd73236866c312b35489
binds:
- /etc/sysctl.d/01-kubernetes.conf:/etc/sysctl.d/01-kubernetes.conf
readonly: false
- name: sysfs
image: linuxkit/sysfs:5367b46211882278b84a9e8048855ca5df65beda
- name: dhcpcd
image: linuxkit/dhcpcd:48831507404049660b960e4055f544917d90378e
command: ["/sbin/dhcpcd", "--nobackground", "-f", "/dhcpcd.conf", "-1"]
- name: metadata
image: linuxkit/metadata:026aca5c08c22589a7e319f79449bef2c65f04c5
- name: format
image: linuxkit/format:6b46d0450082f397177da36be6b4d74d93eacd1e
- name: mounts
image: linuxkit/mount:41685ecc8039643948e5dff46e17584753469a7a
command: ["/usr/bin/mountie", "/var/lib/"]
services:
- name: getty
image: linuxkit/getty:6af22c32c98536a79230eef000e9abd06b037faa
env:
- INSECURE=true
- name: rngd
image: linuxkit/rngd:842e5e8ece7934f0cab9fd0027b595ff3471e5b9
- name: ntpd
image: linuxkit/openntpd:07a80c3e3e816658318ac027e1253ff9a228b8de
- name: sshd
image: linuxkit/sshd:b7f21ef1b13300a994e35eac3644e4f84f0ada8a
- name: kubelet
image: linuxkitprojects/kubelet:4f93f02fb13f8c3e98e922afb2ef8cd1e0da66b3
files:
- path: etc/linuxkit.yml
metadata: yaml
- path: /etc/kubernetes
symlink: "/var/lib/kubeadm"
- path: /etc/os-release
contents: |
PRETTY_NAME="LinuxKit Kubernetes Project"
- path: /usr/libexec/kubernetes/kubelet-plugins
symlink: "/var/lib/kubelet-plugins"
- path: /etc/kubeadm/
directory: true
- path: /etc/sysctl.d/01-kubernetes.conf
contents: 'net.ipv4.ip_forward = 1'
- path: /opt/cni
directory: true
- path: /etc/cni
directory: true
- path: root/.ssh/authorized_keys
source: ~/.ssh/id_rsa.pub
mode: "0600"
optional: true

View File

@ -1,78 +0,0 @@
FROM linuxkit/alpine:07f7d136e427dc68154cd5edbb2b9576f9ac5213 AS build
ENV kubernetes_version v1.8.2
ENV cni_version v0.6.0
RUN apk add -U --no-cache \
bash \
coreutils \
curl \
findutils \
git \
go \
grep \
libc-dev \
linux-headers \
make \
rsync \
&& true
ENV GOPATH=/go PATH=$PATH:/go/bin
ENV KUBERNETES_URL https://github.com/kubernetes/kubernetes.git
#ENV KUBERNETES_BRANCH pull/NNN/head
ENV KUBERNETES_COMMIT ${kubernetes_version}
RUN mkdir -p $GOPATH/src/github.com/kubernetes && \
cd $GOPATH/src/github.com/kubernetes && \
git clone $KUBERNETES_URL kubernetes
WORKDIR $GOPATH/src/github.com/kubernetes/kubernetes
RUN set -e; \
if [ -n "$KUBERNETES_BRANCH" ] ; then \
git fetch origin "$KUBERNETES_BRANCH"; \
fi; \
git checkout $KUBERNETES_COMMIT
RUN make WHAT="cmd/kubelet cmd/kubectl cmd/kubeadm"
RUN mkdir -p /out/etc/apk && cp -r /etc/apk/* /out/etc/apk/
#coreutils needed for du -B for disk image checks made by kubelet
# example: $ du -s -B 1 /var/lib/kubelet/pods/...
# du: unrecognized option: B
RUN apk add --no-cache --initdb -p /out \
alpine-baselayout \
busybox \
ca-certificates \
coreutils \
curl \
ebtables \
ethtool \
iproute2 \
iptables \
libc6-compat \
musl \
openssl \
socat \
util-linux \
&& true
RUN cp _output/bin/kubelet /out/usr/bin/kubelet
RUN cp _output/bin/kubeadm /out/usr/bin/kubeadm
RUN cp _output/bin/kubectl /out/usr/bin/kubectl
# Remove apk residuals. We have a read-only rootfs, so apk is of no use.
RUN rm -rf /out/etc/apk /out/lib/apk /out/var/cache
RUN rmdir /out/var/run && ln -nfs /run /out/var/run
RUN curl -fSL -o /out/root/cni.tgz https://github.com/containernetworking/plugins/releases/download/${cni_version}/cni-plugins-amd64-${cni_version}.tgz
ADD kubelet.sh /out/usr/bin/kubelet.sh
ADD kubeadm-init.sh /kubeadm-init.sh
RUN sed -e "s/@KUBERNETES_VERSION@/${kubernetes_version}/g" </kubeadm-init.sh >/out/usr/bin/kubeadm-init.sh && chmod +x /out/usr/bin/kubeadm-init.sh
FROM scratch
WORKDIR /
ENTRYPOINT ["/usr/bin/kubelet.sh"]
COPY --from=build /out /
ENV KUBECONFIG "/etc/kubernetes/admin.conf"
LABEL org.mobyproject.config='{"binds": ["/dev:/dev", "/etc/resolv.conf:/etc/resolv.conf", "/etc/os-release:/etc/os-release", "/run:/run:rshared,rbind", "/var:/var:rshared,rbind", "/var/lib/kubeadm:/etc/kubernetes", "/etc/kubelet.sh.conf:/etc/kubelet.sh.conf", "/etc/kubeadm:/etc/kubeadm", "/var/lib/kubelet-plugins:/usr/libexec/kubernetes/kubelet-plugins:rshared,rbind"], "mounts": [{"type": "cgroup", "options": ["rw","nosuid","noexec","nodev","relatime"]}], "capabilities": ["all"], "rootfsPropagation": "shared", "pid": "host", "runtime": {"mkdir": ["/var/lib/kubeadm", "/var/lib/cni/etc", "/var/lib/cni/opt", "/var/lib/kubelet-plugins"], "mounts": [{"type": "bind", "source": "/var/lib/cni/opt", "destination": "/opt/cni", "options": ["rw", "bind"]}, {"type": "bind", "source": "/var/lib/cni/etc", "destination": "/etc/cni", "options": ["rw", "bind"]}]}}'

View File

@ -1,6 +0,0 @@
org: linuxkitprojects
image: kubelet
network: true
disable-content-trust: true
arches:
- amd64

View File

@ -1,39 +0,0 @@
#!/bin/sh
set -e
touch /var/lib/kubeadm/.kubeadm-init.sh-started
if [ -f /etc/kubeadm/kubeadm.yaml ]; then
echo Using the configuration from /etc/kubeadm/kubeadm.yaml
if [ $# -ne 0 ] ; then
echo WARNING: Ignoring command line options: $@
fi
kubeadm init --skip-preflight-checks --config /etc/kubeadm/kubeadm.yaml
else
kubeadm init --skip-preflight-checks --kubernetes-version @KUBERNETES_VERSION@ $@
fi
if [ -d /var/config/cni/etc/net.d ]; then
cp /var/config/cni/etc/net.d/* /var/lib/cni/etc/net.d/
fi
# sorting by basename relies on the dirnames having the same number of directories
YAML=$(ls -1 /var/config/kube-system.init/*.yaml /etc/kubeadm/kube-system.init/*.yaml 2>/dev/null | sort --field-separator=/ --key=5)
for i in ${YAML}; do
n=$(basename "$i")
if [ -e "$i" ] ; then
if [ ! -s "$i" ] ; then # ignore zero sized files
echo "Ignoring zero size file $n"
continue
fi
echo "Applying $n"
if ! kubectl create -n kube-system -f "$i" ; then
touch /var/lib/kubeadm/.kubeadm-init.sh-kube-system.init-failed
touch /var/lib/kubeadm/.kubeadm-init.sh-kube-system.init-"$n"-failed
echo "Failed to apply $n"
continue
fi
fi
done
if [ -f /var/config/kubeadm/untaint-master ] ; then
echo "Removing \"node-role.kubernetes.io/master\" taint from all nodes"
kubectl taint nodes --all node-role.kubernetes.io/master-
fi
touch /var/lib/kubeadm/.kubeadm-init.sh-finished

View File

@ -1,72 +0,0 @@
#!/bin/sh
# Kubelet outputs only to stderr, so arrange for everything we do to go there too
exec 1>&2
if [ -e /etc/kubelet.sh.conf ] ; then
. /etc/kubelet.sh.conf
fi
if [ -f /var/config/kubelet/disabled ] ; then
echo "kubelet.sh: /var/config/kubelet/disabled file is present, exiting"
exit 0
fi
if [ -n "$KUBELET_DISABLED" ] ; then
echo "kubelet.sh: KUBELET_DISABLED environ variable is set, exiting"
exit 0
fi
if [ ! -e /var/lib/cni/.opt.defaults-extracted ] ; then
mkdir -p /var/lib/cni/opt/bin
tar -xzf /root/cni.tgz -C /var/lib/cni/opt/bin
touch /var/lib/cni/.opt.defaults-extracted
fi
await=/etc/kubernetes/kubelet.conf
if [ -f "/etc/kubernetes/kubelet.conf" ] ; then
echo "kubelet.sh: kubelet already configured"
elif [ -d /var/config/kubeadm ] ; then
if [ -f /var/config/kubeadm/init ] ; then
echo "kubelet.sh: init cluster with metadata \"$(cat /var/config/kubeadm/init)\""
# This needs to be in the background since it waits for kubelet to start.
# We skip printing the token so it is not persisted in the log.
kubeadm-init.sh --skip-token-print $(cat /var/config/kubeadm/init) &
elif [ -e /var/config/kubeadm/join ] ; then
echo "kubelet.sh: joining cluster with metadata \"$(cat /var/config/kubeadm/join)\""
kubeadm join --skip-preflight-checks $(cat /var/config/kubeadm/join)
await=/etc/kubernetes/bootstrap-kubelet.conf
fi
elif [ -e /var/config/userdata ] ; then
echo "kubelet.sh: joining cluster with metadata \"$(cat /var/config/userdata)\""
kubeadm join --skip-preflight-checks $(cat /var/config/userdata)
await=/etc/kubernetes/bootstrap-kubelet.conf
fi
echo "kubelet.sh: waiting for ${await}"
# TODO(ijc) is there a race between kubeadm creating this file and
# finishing the write where we might be able to fall through and
# start kubelet with an incomplete configuration file? I've tried
# to provoke such a race without success. An explicit
# synchronisation barrier or changing kubeadm to write
# kubelet.conf atomically might be good in any case.
until [ -f "${await}" ] ; do
sleep 1
done
echo "kubelet.sh: ${await} has arrived" 2>&1
mkdir -p /etc/kubernetes/manifests
exec kubelet --kubeconfig=/etc/kubernetes/kubelet.conf \
--bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf \
--pod-manifest-path=/etc/kubernetes/manifests \
--allow-privileged=true \
--cluster-dns=10.96.0.10 \
--cluster-domain=cluster.local \
--cgroups-per-qos=false \
--enforce-node-allocatable= \
--network-plugin=cni \
--cni-conf-dir=/var/lib/cni/etc/net.d \
--cni-bin-dir=/var/lib/cni/opt/bin \
--cadvisor-port=0 \
$KUBELET_ARGS $@

View File

@ -1,18 +0,0 @@
#!/bin/bash -eu
sshopts="-o LogLevel=FATAL \
-o StrictHostKeyChecking=no \
-o UserKnownHostsFile=/dev/null \
-o IdentitiesOnly=yes"
case $(uname -s) in
Linux)
ssh=ssh
;;
*)
ssh="docker run --rm -ti \
-v $HOME/.ssh/:/root/.ssh \
ijc25/alpine-ssh"
;;
esac
$ssh $sshopts -t root@"$1" ctr tasks exec --tty --exec-id ssh-$(hostname)-$$ kubelet ash -l

View File

@ -1,3 +0,0 @@
files:
- path: /etc/kubeadm/kube-system.init/50-weave.yaml
source: kube-weave.yaml