mirror of
https://github.com/rancher/rke.git
synced 2025-09-28 08:15:54 +00:00
Merge pull request #125 from galal-hussein/add_kubelet_sidekick
Add services sidekick container
This commit is contained in:
@@ -45,6 +45,7 @@ const (
|
||||
DNSMasqImage = "dnsmasq_image"
|
||||
KubeDNSSidecarImage = "kubedns_sidecar_image"
|
||||
KubeDNSAutoScalerImage = "kubedns_autoscaler_image"
|
||||
ServiceSidekickImage = "service_sidekick_image"
|
||||
)
|
||||
|
||||
func (c *Cluster) DeployClusterPlanes() error {
|
||||
@@ -53,11 +54,18 @@ func (c *Cluster) DeployClusterPlanes() error {
|
||||
if err != nil {
|
||||
return fmt.Errorf("[etcd] Failed to bring up Etcd Plane: %v", err)
|
||||
}
|
||||
err = services.RunControlPlane(c.ControlPlaneHosts, c.EtcdHosts, c.Services)
|
||||
err = services.RunControlPlane(c.ControlPlaneHosts,
|
||||
c.EtcdHosts,
|
||||
c.Services,
|
||||
c.SystemImages[ServiceSidekickImage])
|
||||
if err != nil {
|
||||
return fmt.Errorf("[controlPlane] Failed to bring up Control Plane: %v", err)
|
||||
}
|
||||
err = services.RunWorkerPlane(c.ControlPlaneHosts, c.WorkerHosts, c.Services, c.SystemImages[NginxProxyImage])
|
||||
err = services.RunWorkerPlane(c.ControlPlaneHosts,
|
||||
c.WorkerHosts,
|
||||
c.Services,
|
||||
c.SystemImages[NginxProxyImage],
|
||||
c.SystemImages[ServiceSidekickImage])
|
||||
if err != nil {
|
||||
return fmt.Errorf("[workerPlane] Failed to bring up Worker Plane: %v", err)
|
||||
}
|
||||
@@ -157,6 +165,7 @@ func (c *Cluster) setClusterImageDefaults() {
|
||||
DNSMasqImage: DefaultDNSMasqImage,
|
||||
KubeDNSSidecarImage: DefaultKubeDNSSidecarImage,
|
||||
KubeDNSAutoScalerImage: DefaultKubeDNSAutoScalerImage,
|
||||
ServiceSidekickImage: DefaultServiceSidekickImage,
|
||||
}
|
||||
for k, v := range systemImagesDefaultsMap {
|
||||
setDefaultIfEmptyMapValue(c.SystemImages, k, v)
|
||||
|
@@ -13,10 +13,11 @@ const (
|
||||
|
||||
DefaultNetworkPlugin = "flannel"
|
||||
|
||||
DefaultInfraContainerImage = "gcr.io/google_containers/pause-amd64:3.0"
|
||||
DefaultAplineImage = "alpine:latest"
|
||||
DefaultNginxProxyImage = "rancher/rke-nginx-proxy:0.1.0"
|
||||
DefaultCertDownloaderImage = "rancher/rke-cert-deployer:0.1.0"
|
||||
DefaultInfraContainerImage = "gcr.io/google_containers/pause-amd64:3.0"
|
||||
DefaultAplineImage = "alpine:latest"
|
||||
DefaultNginxProxyImage = "rancher/rke-nginx-proxy:0.1.0"
|
||||
DefaultCertDownloaderImage = "rancher/rke-cert-deployer:0.1.0"
|
||||
DefaultServiceSidekickImage = "rancher/rke-service-sidekick:0.1.0"
|
||||
|
||||
DefaultFlannelImage = "quay.io/coreos/flannel:v0.9.1"
|
||||
DefaultFlannelCNIImage = "quay.io/coreos/flannel-cni:v0.2.0"
|
||||
|
@@ -20,7 +20,7 @@ var K8sDockerVersions = map[string][]string{
|
||||
}
|
||||
|
||||
func DoRunContainer(dClient *client.Client, imageCfg *container.Config, hostCfg *container.HostConfig, containerName string, hostname string, plane string) error {
|
||||
isRunning, err := IsContainerRunning(dClient, hostname, containerName)
|
||||
isRunning, err := IsContainerRunning(dClient, hostname, containerName, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -56,7 +56,7 @@ func DoRunContainer(dClient *client.Client, imageCfg *container.Config, hostCfg
|
||||
|
||||
func DoRollingUpdateContainer(dClient *client.Client, imageCfg *container.Config, hostCfg *container.HostConfig, containerName, hostname, plane string) error {
|
||||
logrus.Debugf("[%s] Checking for deployed [%s]", plane, containerName)
|
||||
isRunning, err := IsContainerRunning(dClient, hostname, containerName)
|
||||
isRunning, err := IsContainerRunning(dClient, hostname, containerName, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -115,9 +115,9 @@ func DoRemoveContainer(dClient *client.Client, containerName, hostname string) e
|
||||
return nil
|
||||
}
|
||||
|
||||
func IsContainerRunning(dClient *client.Client, hostname string, containerName string) (bool, error) {
|
||||
func IsContainerRunning(dClient *client.Client, hostname string, containerName string, all bool) (bool, error) {
|
||||
logrus.Debugf("Checking if container [%s] is running on host [%s]", containerName, hostname)
|
||||
containers, err := dClient.ContainerList(context.Background(), types.ContainerListOptions{})
|
||||
containers, err := dClient.ContainerList(context.Background(), types.ContainerListOptions{All: all})
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("Can't get Docker containers for host [%s]: %v", hostname, err)
|
||||
|
||||
|
7
package/service-sidekick/Dockerfile
Normal file
7
package/service-sidekick/Dockerfile
Normal file
@@ -0,0 +1,7 @@
|
||||
FROM scratch
|
||||
|
||||
VOLUME /opt/rke
|
||||
|
||||
COPY entrypoint.sh /opt/rke/entrypoint.sh
|
||||
|
||||
CMD /bin/true
|
6
package/service-sidekick/build-and-push.sh
Executable file
6
package/service-sidekick/build-and-push.sh
Executable file
@@ -0,0 +1,6 @@
|
||||
#!/bin/bash
|
||||
|
||||
ACCT=${ACCT:-rancher}
|
||||
|
||||
docker build -t $ACCT/rke-service-sidekick:0.1.0 .
|
||||
docker push $ACCT/rke-service-sidekick:0.1.0
|
22
package/service-sidekick/entrypoint.sh
Executable file
22
package/service-sidekick/entrypoint.sh
Executable file
@@ -0,0 +1,22 @@
|
||||
#!/bin/bash -x
|
||||
|
||||
if [ "$1" == "kubelet" ]; then
|
||||
for i in $(DOCKER_API_VERSION=1.24 ./docker info 2>&1 | grep -i 'docker root dir' | cut -f2 -d:) /var/lib/docker /run /var/run; do
|
||||
for m in $(tac /proc/mounts | awk '{print $2}' | grep ^${i}/); do
|
||||
if [ "$m" != "/var/run/nscd" ] && [ "$m" != "/run/nscd" ]; then
|
||||
umount $m || true
|
||||
fi
|
||||
done
|
||||
done
|
||||
mount --rbind /host/dev /dev
|
||||
mount -o rw,remount /sys/fs/cgroup 2>/dev/null || true
|
||||
for i in /sys/fs/cgroup/*; do
|
||||
if [ -d $i ]; then
|
||||
mkdir -p $i/kubepods
|
||||
fi
|
||||
done
|
||||
CGROUPDRIVER=$(docker info | grep -i 'cgroup driver' | awk '{print $3}')
|
||||
exec "$@" --cgroup-driver=$CGROUPDRIVER
|
||||
fi
|
||||
|
||||
exec "$@"
|
@@ -86,7 +86,7 @@ func doRunDeployer(host *hosts.Host, containerEnv []string, certDownloaderImage
|
||||
}
|
||||
logrus.Debugf("[certificates] Successfully started Certificate deployer container: %s", resp.ID)
|
||||
for {
|
||||
isDeployerRunning, err := docker.IsContainerRunning(host.DClient, host.Address, CrtDownloaderContainer)
|
||||
isDeployerRunning, err := docker.IsContainerRunning(host.DClient, host.Address, CrtDownloaderContainer, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@@ -6,7 +6,7 @@ import (
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
func RunControlPlane(controlHosts []*hosts.Host, etcdHosts []*hosts.Host, controlServices v3.RKEConfigServices) error {
|
||||
func RunControlPlane(controlHosts, etcdHosts []*hosts.Host, controlServices v3.RKEConfigServices, sidekickImage string) error {
|
||||
logrus.Infof("[%s] Building up Controller Plane..", ControlRole)
|
||||
for _, host := range controlHosts {
|
||||
|
||||
@@ -15,6 +15,10 @@ func RunControlPlane(controlHosts []*hosts.Host, etcdHosts []*hosts.Host, contro
|
||||
return err
|
||||
}
|
||||
}
|
||||
// run sidekick
|
||||
if err := runSidekick(host, sidekickImage); err != nil {
|
||||
return err
|
||||
}
|
||||
// run kubeapi
|
||||
err := runKubeAPI(host, etcdHosts, controlServices.KubeAPI)
|
||||
if err != nil {
|
||||
@@ -66,6 +70,10 @@ func RemoveControlPlane(controlHosts []*hosts.Host, force bool) error {
|
||||
if err := removeKubeproxy(host); err != nil {
|
||||
return nil
|
||||
}
|
||||
// remove Sidekick
|
||||
if err := removeSidekick(host); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
logrus.Infof("[%s] Successfully teared down Controller Plane..", ControlRole)
|
||||
|
@@ -24,7 +24,8 @@ func removeKubeAPI(host *hosts.Host) error {
|
||||
func buildKubeAPIConfig(host *hosts.Host, kubeAPIService v3.KubeAPIService, etcdConnString string) (*container.Config, *container.HostConfig) {
|
||||
imageCfg := &container.Config{
|
||||
Image: kubeAPIService.Image,
|
||||
Entrypoint: []string{"kube-apiserver",
|
||||
Entrypoint: []string{"/opt/rke/entrypoint.sh",
|
||||
"kube-apiserver",
|
||||
"--insecure-bind-address=127.0.0.1",
|
||||
"--bind-address=0.0.0.0",
|
||||
"--insecure-port=8080",
|
||||
@@ -44,6 +45,9 @@ func buildKubeAPIConfig(host *hosts.Host, kubeAPIService v3.KubeAPIService, etcd
|
||||
"--service-account-key-file=" + pki.KubeAPIKeyPath},
|
||||
}
|
||||
hostCfg := &container.HostConfig{
|
||||
VolumesFrom: []string{
|
||||
SidekickContainerName,
|
||||
},
|
||||
Binds: []string{
|
||||
"/etc/kubernetes:/etc/kubernetes",
|
||||
},
|
||||
|
@@ -22,7 +22,8 @@ func removeKubeController(host *hosts.Host) error {
|
||||
func buildKubeControllerConfig(kubeControllerService v3.KubeControllerService) (*container.Config, *container.HostConfig) {
|
||||
imageCfg := &container.Config{
|
||||
Image: kubeControllerService.Image,
|
||||
Entrypoint: []string{"kube-controller-manager",
|
||||
Entrypoint: []string{"/opt/rke/entrypoint.sh",
|
||||
"kube-controller-manager",
|
||||
"--address=0.0.0.0",
|
||||
"--cloud-provider=",
|
||||
"--leader-elect=true",
|
||||
@@ -39,6 +40,9 @@ func buildKubeControllerConfig(kubeControllerService v3.KubeControllerService) (
|
||||
},
|
||||
}
|
||||
hostCfg := &container.HostConfig{
|
||||
VolumesFrom: []string{
|
||||
SidekickContainerName,
|
||||
},
|
||||
Binds: []string{
|
||||
"/etc/kubernetes:/etc/kubernetes",
|
||||
},
|
||||
|
@@ -4,7 +4,6 @@ import (
|
||||
"fmt"
|
||||
|
||||
"github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/go-connections/nat"
|
||||
"github.com/rancher/rke/docker"
|
||||
"github.com/rancher/rke/hosts"
|
||||
"github.com/rancher/rke/pki"
|
||||
@@ -23,12 +22,12 @@ func removeKubelet(host *hosts.Host) error {
|
||||
func buildKubeletConfig(host *hosts.Host, kubeletService v3.KubeletService) (*container.Config, *container.HostConfig) {
|
||||
imageCfg := &container.Config{
|
||||
Image: kubeletService.Image,
|
||||
Entrypoint: []string{"kubelet",
|
||||
Entrypoint: []string{"/opt/rke/entrypoint.sh",
|
||||
"kubelet",
|
||||
"--v=2",
|
||||
"--address=0.0.0.0",
|
||||
"--cluster-domain=" + kubeletService.ClusterDomain,
|
||||
"--pod-infra-container-image=" + kubeletService.InfraContainerImage,
|
||||
"--cgroup-driver=cgroupfs",
|
||||
"--cgroups-per-qos=True",
|
||||
"--enforce-node-allocatable=",
|
||||
"--hostname-override=" + host.HostnameOverride,
|
||||
@@ -54,30 +53,24 @@ func buildKubeletConfig(host *hosts.Host, kubeletService v3.KubeletService) (*co
|
||||
}
|
||||
}
|
||||
hostCfg := &container.HostConfig{
|
||||
VolumesFrom: []string{
|
||||
SidekickContainerName,
|
||||
},
|
||||
Binds: []string{
|
||||
"/etc/kubernetes:/etc/kubernetes",
|
||||
"/etc/cni:/etc/cni:ro",
|
||||
"/opt/cni:/opt/cni:ro",
|
||||
"/etc/resolv.conf:/etc/resolv.conf",
|
||||
"/sys:/sys:ro",
|
||||
"/sys:/sys",
|
||||
"/var/lib/docker:/var/lib/docker:rw",
|
||||
"/var/lib/kubelet:/var/lib/kubelet:shared",
|
||||
"/var/run:/var/run:rw",
|
||||
"/run:/run",
|
||||
"/dev:/host/dev",
|
||||
"/sys/fs/cgroup:/sys/fs/cgroup:rw"},
|
||||
"/dev:/host/dev"},
|
||||
NetworkMode: "host",
|
||||
PidMode: "host",
|
||||
Privileged: true,
|
||||
RestartPolicy: container.RestartPolicy{Name: "always"},
|
||||
PortBindings: nat.PortMap{
|
||||
"8080/tcp": []nat.PortBinding{
|
||||
{
|
||||
HostIP: "0.0.0.0",
|
||||
HostPort: "8080",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
for arg, value := range kubeletService.ExtraArgs {
|
||||
cmd := fmt.Sprintf("--%s=%s", arg, value)
|
||||
|
@@ -22,13 +22,17 @@ func removeKubeproxy(host *hosts.Host) error {
|
||||
func buildKubeproxyConfig(host *hosts.Host, kubeproxyService v3.KubeproxyService) (*container.Config, *container.HostConfig) {
|
||||
imageCfg := &container.Config{
|
||||
Image: kubeproxyService.Image,
|
||||
Entrypoint: []string{"kube-proxy",
|
||||
Entrypoint: []string{"/opt/rke/entrypoint.sh",
|
||||
"kube-proxy",
|
||||
"--v=2",
|
||||
"--healthz-bind-address=0.0.0.0",
|
||||
"--kubeconfig=" + pki.KubeProxyConfigPath,
|
||||
},
|
||||
}
|
||||
hostCfg := &container.HostConfig{
|
||||
VolumesFrom: []string{
|
||||
SidekickContainerName,
|
||||
},
|
||||
Binds: []string{
|
||||
"/etc/kubernetes:/etc/kubernetes",
|
||||
},
|
||||
|
@@ -22,7 +22,8 @@ func removeScheduler(host *hosts.Host) error {
|
||||
func buildSchedulerConfig(host *hosts.Host, schedulerService v3.SchedulerService) (*container.Config, *container.HostConfig) {
|
||||
imageCfg := &container.Config{
|
||||
Image: schedulerService.Image,
|
||||
Entrypoint: []string{"kube-scheduler",
|
||||
Entrypoint: []string{"/opt/rke/entrypoint.sh",
|
||||
"kube-scheduler",
|
||||
"--leader-elect=true",
|
||||
"--v=2",
|
||||
"--address=0.0.0.0",
|
||||
@@ -30,6 +31,9 @@ func buildSchedulerConfig(host *hosts.Host, schedulerService v3.SchedulerService
|
||||
},
|
||||
}
|
||||
hostCfg := &container.HostConfig{
|
||||
VolumesFrom: []string{
|
||||
SidekickContainerName,
|
||||
},
|
||||
Binds: []string{
|
||||
"/etc/kubernetes:/etc/kubernetes",
|
||||
},
|
||||
|
@@ -3,6 +3,11 @@ package services
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
|
||||
"github.com/docker/docker/api/types/container"
|
||||
"github.com/rancher/rke/docker"
|
||||
"github.com/rancher/rke/hosts"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -17,6 +22,7 @@ const (
|
||||
SchedulerContainerName = "scheduler"
|
||||
EtcdContainerName = "etcd"
|
||||
NginxProxyContainerName = "nginx-proxy"
|
||||
SidekickContainerName = "service-sidekick"
|
||||
)
|
||||
|
||||
func GetKubernetesServiceIP(serviceClusterRange string) (net.IP, error) {
|
||||
@@ -33,3 +39,36 @@ func GetKubernetesServiceIP(serviceClusterRange string) (net.IP, error) {
|
||||
}
|
||||
return ip, nil
|
||||
}
|
||||
|
||||
func buildSidekickConfig(sidekickImage string) (*container.Config, *container.HostConfig) {
|
||||
imageCfg := &container.Config{
|
||||
Image: sidekickImage,
|
||||
}
|
||||
hostCfg := &container.HostConfig{
|
||||
NetworkMode: "none",
|
||||
}
|
||||
return imageCfg, hostCfg
|
||||
}
|
||||
|
||||
func runSidekick(host *hosts.Host, sidekickImage string) error {
|
||||
isRunning, err := docker.IsContainerRunning(host.DClient, host.Address, SidekickContainerName, true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if isRunning {
|
||||
logrus.Infof("[sidekick] Sidekick container already created on host [%s]", host.Address)
|
||||
return nil
|
||||
}
|
||||
imageCfg, hostCfg := buildSidekickConfig(sidekickImage)
|
||||
if err := docker.PullImage(host.DClient, host.Address, sidekickImage); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := docker.CreateContiner(host.DClient, host.Address, SidekickContainerName, imageCfg, hostCfg); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func removeSidekick(host *hosts.Host) error {
|
||||
return docker.DoRemoveContainer(host.DClient, SidekickContainerName, host.Address)
|
||||
}
|
||||
|
@@ -6,9 +6,14 @@ import (
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
func RunWorkerPlane(controlHosts []*hosts.Host, workerHosts []*hosts.Host, workerServices v3.RKEConfigServices, nginxProxyImage string) error {
|
||||
func RunWorkerPlane(controlHosts, workerHosts []*hosts.Host, workerServices v3.RKEConfigServices, nginxProxyImage, sidekickImage string) error {
|
||||
logrus.Infof("[%s] Building up Worker Plane..", WorkerRole)
|
||||
for _, host := range controlHosts {
|
||||
// run sidekick
|
||||
if err := runSidekick(host, sidekickImage); err != nil {
|
||||
return err
|
||||
}
|
||||
// run kubelet
|
||||
// only one master for now
|
||||
if err := runKubelet(host, workerServices.Kubelet); err != nil {
|
||||
return err
|
||||
@@ -24,6 +29,10 @@ func RunWorkerPlane(controlHosts []*hosts.Host, workerHosts []*hosts.Host, worke
|
||||
return err
|
||||
}
|
||||
}
|
||||
// run sidekick
|
||||
if err := runSidekick(host, sidekickImage); err != nil {
|
||||
return err
|
||||
}
|
||||
// run kubelet
|
||||
if err := runKubelet(host, workerServices.Kubelet); err != nil {
|
||||
return err
|
||||
@@ -55,6 +64,9 @@ func RemoveWorkerPlane(workerHosts []*hosts.Host, force bool) error {
|
||||
if err := removeNginxProxy(host); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := removeSidekick(host); err != nil {
|
||||
return err
|
||||
}
|
||||
logrus.Infof("[%s] Successfully teared down Worker Plane..", WorkerRole)
|
||||
}
|
||||
|
||||
|
Reference in New Issue
Block a user