1
0
mirror of https://github.com/rancher/rke.git synced 2025-08-24 17:38:24 +00:00

Add user-configurable images

Refactor configuration defaults
 Add comments to config
 Add configurable utility images
 Add configurable network plugin images
 Add configurable kubedns images
This commit is contained in:
moelsayed 2017-12-05 03:29:29 +02:00
parent 89ec803428
commit 0ee002b2cf
19 changed files with 276 additions and 133 deletions

View File

@ -1,6 +1,15 @@
package addons package addons
func GetKubeDNSManifest(clusterDNSServer, clusterDomain string) string { const (
KubeDNSImage = "kubeDNSImage"
DNSMasqImage = "DNSMasqImage"
KubeDNSSidecarImage = "kubednsSidecarImage"
KubeDNSAutoScalerImage = "kubeDNSAutoScalerImage"
KubeDNSServer = "clusterDNSServer"
KubeDNSClusterDomain = "clusterDomain"
)
func GetKubeDNSManifest(kubeDNSConfig map[string]string) string {
return `--- return `---
apiVersion: apps/v1beta1 apiVersion: apps/v1beta1
kind: Deployment kind: Deployment
@ -17,7 +26,7 @@ spec:
spec: spec:
containers: containers:
- name: autoscaler - name: autoscaler
image: gcr.io/google_containers/cluster-proportional-autoscaler-amd64:1.0.0 image: ` + kubeDNSConfig[KubeDNSAutoScalerImage] + `
resources: resources:
requests: requests:
cpu: "20m" cpu: "20m"
@ -81,7 +90,7 @@ spec:
optional: true optional: true
containers: containers:
- name: kubedns - name: kubedns
image: gcr.io/google_containers/k8s-dns-kube-dns-amd64:1.14.5 image: ` + kubeDNSConfig[KubeDNSImage] + `
resources: resources:
# TODO: Set memory limits when we've profiled the container for large # TODO: Set memory limits when we've profiled the container for large
# clusters, then set request = limit to keep this container in # clusters, then set request = limit to keep this container in
@ -111,7 +120,7 @@ spec:
initialDelaySeconds: 3 initialDelaySeconds: 3
timeoutSeconds: 5 timeoutSeconds: 5
args: args:
- --domain=` + clusterDomain + `. - --domain=` + kubeDNSConfig[KubeDNSClusterDomain] + `.
- --dns-port=10053 - --dns-port=10053
- --config-dir=/kube-dns-config - --config-dir=/kube-dns-config
- --v=2 - --v=2
@ -132,7 +141,7 @@ spec:
- name: kube-dns-config - name: kube-dns-config
mountPath: /kube-dns-config mountPath: /kube-dns-config
- name: dnsmasq - name: dnsmasq
image: gcr.io/google_containers/k8s-dns-dnsmasq-nanny-amd64:1.14.5 image: ` + kubeDNSConfig[DNSMasqImage] + `
livenessProbe: livenessProbe:
httpGet: httpGet:
path: /healthcheck/dnsmasq path: /healthcheck/dnsmasq
@ -151,7 +160,7 @@ spec:
- -k - -k
- --cache-size=1000 - --cache-size=1000
- --log-facility=- - --log-facility=-
- --server=/` + clusterDomain + `/127.0.0.1#10053 - --server=/` + kubeDNSConfig[KubeDNSClusterDomain] + `/127.0.0.1#10053
- --server=/in-addr.arpa/127.0.0.1#10053 - --server=/in-addr.arpa/127.0.0.1#10053
- --server=/ip6.arpa/127.0.0.1#10053 - --server=/ip6.arpa/127.0.0.1#10053
ports: ports:
@ -170,7 +179,7 @@ spec:
- name: kube-dns-config - name: kube-dns-config
mountPath: /etc/k8s/dns/dnsmasq-nanny mountPath: /etc/k8s/dns/dnsmasq-nanny
- name: sidecar - name: sidecar
image: gcr.io/google_containers/k8s-dns-sidecar-amd64:1.14.5 image: ` + kubeDNSConfig[KubeDNSSidecarImage] + `
livenessProbe: livenessProbe:
httpGet: httpGet:
path: /metrics path: /metrics
@ -183,8 +192,8 @@ spec:
args: args:
- --v=2 - --v=2
- --logtostderr - --logtostderr
- --probe=kubedns,127.0.0.1:10053,kubernetes.default.svc.` + clusterDomain + `,5,A - --probe=kubedns,127.0.0.1:10053,kubernetes.default.svc.` + kubeDNSConfig[KubeDNSClusterDomain] + `,5,A
- --probe=dnsmasq,127.0.0.1:53,kubernetes.default.svc.` + clusterDomain + `,5,A - --probe=dnsmasq,127.0.0.1:53,kubernetes.default.svc.` + kubeDNSConfig[KubeDNSClusterDomain] + `,5,A
ports: ports:
- containerPort: 10054 - containerPort: 10054
name: metrics name: metrics
@ -209,7 +218,7 @@ metadata:
spec: spec:
selector: selector:
k8s-app: kube-dns k8s-app: kube-dns
clusterIP: ` + clusterDNSServer + ` clusterIP: ` + kubeDNSConfig[KubeDNSServer] + `
ports: ports:
- name: dns - name: dns
port: 53 port: 53

View File

@ -12,7 +12,8 @@ auth:
network: network:
plugin: flannel plugin: flannel
options: options:
foo: bar flannel_image: quay.io/coreos/flannel:v0.9.1
flannel_cni_image: quay.io/coreos/flannel-cni:v0.2.0
ssh_key_path: ~/.ssh/test ssh_key_path: ~/.ssh/test
@ -56,6 +57,16 @@ services:
kubeproxy: kubeproxy:
image: rancher/k8s:v1.8.3-rancher2 image: rancher/k8s:v1.8.3-rancher2
system_images:
alpine: alpine:latest
nginx_proxy: rancher/rke-nginx-proxy:0.1.0
cert_downloader: rancher/rke-cert-deployer:0.1.0
kubedns_image: gcr.io/google_containers/k8s-dns-kube-dns-amd64:1.14.5
dnsmasq_image: gcr.io/google_containers/k8s-dns-dnsmasq-nanny-amd64:1.14.5
kubedns_sidecar_image: gcr.io/google_containers/k8s-dns-sidecar-amd64:1.14.5
kubedns_autoscaler_image: gcr.io/google_containers/cluster-proportional-autoscaler-amd64:1.0.0
# all addon manifests MUST specify a namespace # all addon manifests MUST specify a namespace
addons: |- addons: |-
--- ---

View File

@ -36,9 +36,15 @@ func (c *Cluster) DeployUserAddOns() error {
func (c *Cluster) deployKubeDNS() error { func (c *Cluster) deployKubeDNS() error {
logrus.Infof("[addons] Setting up KubeDNS") logrus.Infof("[addons] Setting up KubeDNS")
kubeDNSConfig := map[string]string{
kubeDNSYaml := addons.GetKubeDNSManifest(c.ClusterDNSServer, c.ClusterDomain) addons.KubeDNSServer: c.ClusterDNSServer,
addons.KubeDNSClusterDomain: c.ClusterDomain,
addons.KubeDNSImage: c.SystemImages[KubeDNSImage],
addons.DNSMasqImage: c.SystemImages[DNSMasqImage],
addons.KubeDNSSidecarImage: c.SystemImages[KubeDNSSidecarImage],
addons.KubeDNSAutoScalerImage: c.SystemImages[KubeDNSAutoScalerImage],
}
kubeDNSYaml := addons.GetKubeDNSManifest(kubeDNSConfig)
if err := c.doAddonDeploy(kubeDNSYaml, KubeDNSAddonResourceName); err != nil { if err := c.doAddonDeploy(kubeDNSYaml, KubeDNSAddonResourceName); err != nil {
return err return err
} }

View File

@ -33,20 +33,18 @@ type Cluster struct {
} }
const ( const (
X509AuthenticationProvider = "x509" X509AuthenticationProvider = "x509"
DefaultClusterConfig = "cluster.yml" StateConfigMapName = "cluster-state"
DefaultServiceClusterIPRange = "10.233.0.0/18" UpdateStateTimeout = 30
DefaultClusterCIDR = "10.233.64.0/18" GetStateTimeout = 30
DefaultClusterDNSService = "10.233.0.3" KubernetesClientTimeOut = 30
DefaultClusterDomain = "cluster.local" AplineImage = "alpine"
DefaultInfraContainerImage = "gcr.io/google_containers/pause-amd64:3.0" NginxProxyImage = "nginx_proxy"
DefaultAuthStrategy = "x509" CertDownloaderImage = "cert_downloader"
DefaultNetworkPlugin = "flannel" KubeDNSImage = "kubedns_image"
DefaultClusterSSHKeyPath = "~/.ssh/id_rsa" DNSMasqImage = "dnsmasq_image"
StateConfigMapName = "cluster-state" KubeDNSSidecarImage = "kubedns_sidecar_image"
UpdateStateTimeout = 30 KubeDNSAutoScalerImage = "kubedns_autoscaler_image"
GetStateTimeout = 30
KubernetesClientTimeOut = 30
) )
func (c *Cluster) DeployClusterPlanes() error { func (c *Cluster) DeployClusterPlanes() error {
@ -59,7 +57,7 @@ func (c *Cluster) DeployClusterPlanes() error {
if err != nil { if err != nil {
return fmt.Errorf("[controlPlane] Failed to bring up Control Plane: %v", err) return fmt.Errorf("[controlPlane] Failed to bring up Control Plane: %v", err)
} }
err = services.RunWorkerPlane(c.ControlPlaneHosts, c.WorkerHosts, c.Services) err = services.RunWorkerPlane(c.ControlPlaneHosts, c.WorkerHosts, c.Services, c.SystemImages[NginxProxyImage])
if err != nil { if err != nil {
return fmt.Errorf("[workerPlane] Failed to bring up Worker Plane: %v", err) return fmt.Errorf("[workerPlane] Failed to bring up Worker Plane: %v", err)
} }
@ -126,29 +124,42 @@ func (c *Cluster) setClusterDefaults() {
c.Nodes[i].SSHKeyPath = c.SSHKeyPath c.Nodes[i].SSHKeyPath = c.SSHKeyPath
} }
} }
if len(c.Services.KubeAPI.ServiceClusterIPRange) == 0 { c.setClusterServicesDefaults()
c.Services.KubeAPI.ServiceClusterIPRange = DefaultServiceClusterIPRange c.setClusterNetworkDefaults()
c.setClusterImageDefaults()
}
func (c *Cluster) setClusterServicesDefaults() {
serviceConfigDefaultsMap := map[string]string{
c.Services.KubeAPI.ServiceClusterIPRange: DefaultServiceClusterIPRange,
c.Services.KubeController.ServiceClusterIPRange: DefaultServiceClusterIPRange,
c.Services.KubeController.ClusterCIDR: DefaultClusterCIDR,
c.Services.Kubelet.ClusterDNSServer: DefaultClusterDNSService,
c.Services.Kubelet.ClusterDomain: DefaultClusterDomain,
c.Services.Kubelet.InfraContainerImage: DefaultInfraContainerImage,
c.Authentication.Strategy: DefaultAuthStrategy,
} }
if len(c.Services.KubeController.ServiceClusterIPRange) == 0 { for k, v := range serviceConfigDefaultsMap {
c.Services.KubeController.ServiceClusterIPRange = DefaultServiceClusterIPRange setDefaultIfEmpty(&k, v)
} }
if len(c.Services.KubeController.ClusterCIDR) == 0 { }
c.Services.KubeController.ClusterCIDR = DefaultClusterCIDR
func (c *Cluster) setClusterImageDefaults() {
if c.SystemImages == nil {
// don't break if the user didn't define rke_images
c.SystemImages = make(map[string]string)
} }
if len(c.Services.Kubelet.ClusterDNSServer) == 0 { systemImagesDefaultsMap := map[string]string{
c.Services.Kubelet.ClusterDNSServer = DefaultClusterDNSService AplineImage: DefaultAplineImage,
NginxProxyImage: DefaultNginxProxyImage,
CertDownloaderImage: DefaultCertDownloaderImage,
KubeDNSImage: DefaultKubeDNSImage,
DNSMasqImage: DefaultDNSMasqImage,
KubeDNSSidecarImage: DefaultKubeDNSSidecarImage,
KubeDNSAutoScalerImage: DefaultKubeDNSAutoScalerImage,
} }
if len(c.Services.Kubelet.ClusterDomain) == 0 { for k, v := range systemImagesDefaultsMap {
c.Services.Kubelet.ClusterDomain = DefaultClusterDomain setDefaultIfEmptyMapValue(c.SystemImages, k, v)
}
if len(c.Services.Kubelet.InfraContainerImage) == 0 {
c.Services.Kubelet.InfraContainerImage = DefaultInfraContainerImage
}
if len(c.Authentication.Strategy) == 0 {
c.Authentication.Strategy = DefaultAuthStrategy
}
if len(c.Network.Plugin) == 0 {
c.Network.Plugin = DefaultNetworkPlugin
} }
} }

45
cluster/defaults.go Normal file
View File

@ -0,0 +1,45 @@
package cluster
const (
DefaultClusterConfig = "cluster.yml"
DefaultServiceClusterIPRange = "10.233.0.0/18"
DefaultClusterCIDR = "10.233.64.0/18"
DefaultClusterDNSService = "10.233.0.3"
DefaultClusterDomain = "cluster.local"
DefaultClusterSSHKeyPath = "~/.ssh/id_rsa"
DefaultAuthStrategy = "x509"
DefaultNetworkPlugin = "flannel"
DefaultInfraContainerImage = "gcr.io/google_containers/pause-amd64:3.0"
DefaultAplineImage = "alpine:latest"
DefaultNginxProxyImage = "rancher/rke-nginx-proxy:0.1.0"
DefaultCertDownloaderImage = "rancher/rke-cert-deployer:0.1.0"
DefaultFlannelImage = "quay.io/coreos/flannel:v0.9.1"
DefaultFlannelCNIImage = "quay.io/coreos/flannel-cni:v0.2.0"
DefaultCalicoNodeImage = "quay.io/calico/node:v2.6.2"
DefaultCalicoCNIImage = "quay.io/calico/cni:v1.11.0"
DefaultCalicoControllersImage = "quay.io/calico/kube-controllers:v1.0.0"
DefaultCanalNodeImage = "quay.io/calico/node:v2.6.2"
DefaultCanalCNIImage = "quay.io/calico/cni:v1.11.0"
DefaultCanalFlannelImage = "quay.io/coreos/flannel:v0.9.1"
DefaultKubeDNSImage = "gcr.io/google_containers/k8s-dns-kube-dns-amd64:1.14.5"
DefaultDNSMasqImage = "gcr.io/google_containers/k8s-dns-dnsmasq-nanny-amd64:1.14.5"
DefaultKubeDNSSidecarImage = "gcr.io/google_containers/k8s-dns-sidecar-amd64:1.14.5"
DefaultKubeDNSAutoScalerImage = "gcr.io/google_containers/cluster-proportional-autoscaler-amd64:1.0.0"
)
func setDefaultIfEmptyMapValue(configMap map[string]string, key string, value string) {
if _, ok := configMap[key]; !ok {
configMap[key] = value
}
}
func setDefaultIfEmpty(varName *string, defaultValue string) {
if len(*varName) == 0 {
*varName = defaultValue
}
}

View File

@ -59,11 +59,11 @@ func (c *Cluster) InvertIndexHosts() error {
func (c *Cluster) SetUpHosts() error { func (c *Cluster) SetUpHosts() error {
if c.Authentication.Strategy == X509AuthenticationProvider { if c.Authentication.Strategy == X509AuthenticationProvider {
logrus.Infof("[certificates] Deploying kubernetes certificates to Cluster nodes") logrus.Infof("[certificates] Deploying kubernetes certificates to Cluster nodes")
err := pki.DeployCertificatesOnMasters(c.ControlPlaneHosts, c.Certificates) err := pki.DeployCertificatesOnMasters(c.ControlPlaneHosts, c.Certificates, c.SystemImages[CertDownloaderImage])
if err != nil { if err != nil {
return err return err
} }
err = pki.DeployCertificatesOnWorkers(c.WorkerHosts, c.Certificates) err = pki.DeployCertificatesOnWorkers(c.WorkerHosts, c.Certificates, c.SystemImages[CertDownloaderImage])
if err != nil { if err != nil {
return err return err
} }

View File

@ -11,9 +11,20 @@ import (
const ( const (
NetworkPluginResourceName = "rke-network-plugin" NetworkPluginResourceName = "rke-network-plugin"
FlannelNetworkPlugin = "flannel"
CalicoNetworkPlugin = "calico" FlannelNetworkPlugin = "flannel"
CanalNetworkPlugin = "canal" FlannelImage = "flannel_image"
FlannelCNIImage = "flannel_cni_image"
CalicoNetworkPlugin = "calico"
CalicoNodeImage = "calico_node_image"
CalicoCNIImage = "calico_cni_image"
CalicoControllersImages = "calico_controllers_image"
CanalNetworkPlugin = "canal"
CanalNodeImage = "canal_node_image"
CanalCNIImage = "canal_cni_image"
CanalFlannelImage = "canal_flannel_image"
) )
func (c *Cluster) DeployNetworkPlugin() error { func (c *Cluster) DeployNetworkPlugin() error {
@ -31,30 +42,62 @@ func (c *Cluster) DeployNetworkPlugin() error {
} }
func (c *Cluster) doFlannelDeploy() error { func (c *Cluster) doFlannelDeploy() error {
pluginYaml := network.GetFlannelManifest(c.ClusterCIDR) pluginYaml := network.GetFlannelManifest(c.ClusterCIDR, c.Network.Options[FlannelImage], c.Network.Options[FlannelCNIImage])
return c.doAddonDeploy(pluginYaml, NetworkPluginResourceName) return c.doAddonDeploy(pluginYaml, NetworkPluginResourceName)
} }
func (c *Cluster) doCalicoDeploy() error { func (c *Cluster) doCalicoDeploy() error {
calicoConfig := make(map[string]string) calicoConfig := map[string]string{
calicoConfig["etcdEndpoints"] = services.GetEtcdConnString(c.EtcdHosts) network.EtcdEndpoints: services.GetEtcdConnString(c.EtcdHosts),
calicoConfig["apiRoot"] = "https://127.0.0.1:6443" network.APIRoot: "https://127.0.0.1:6443",
calicoConfig["clientCrt"] = pki.KubeNodeCertPath network.ClientCert: pki.KubeNodeCertPath,
calicoConfig["clientKey"] = pki.KubeNodeKeyPath network.ClientKey: pki.KubeNodeKeyPath,
calicoConfig["clientCA"] = pki.CACertPath network.ClientCA: pki.CACertPath,
calicoConfig["kubeCfg"] = pki.KubeNodeConfigPath network.KubeCfg: pki.KubeNodeConfigPath,
calicoConfig["clusterCIDR"] = c.ClusterCIDR network.ClusterCIDR: c.ClusterCIDR,
network.CNIImage: c.Network.Options[CalicoCNIImage],
network.NodeImage: c.Network.Options[CalicoNodeImage],
network.ControllersImage: c.Network.Options[CalicoControllersImages],
}
pluginYaml := network.GetCalicoManifest(calicoConfig) pluginYaml := network.GetCalicoManifest(calicoConfig)
return c.doAddonDeploy(pluginYaml, NetworkPluginResourceName) return c.doAddonDeploy(pluginYaml, NetworkPluginResourceName)
} }
func (c *Cluster) doCanalDeploy() error { func (c *Cluster) doCanalDeploy() error {
canalConfig := make(map[string]string) canalConfig := map[string]string{
canalConfig["clientCrt"] = pki.KubeNodeCertPath network.ClientCert: pki.KubeNodeCertPath,
canalConfig["clientKey"] = pki.KubeNodeKeyPath network.ClientKey: pki.KubeNodeKeyPath,
canalConfig["clientCA"] = pki.CACertPath network.ClientCA: pki.CACertPath,
canalConfig["kubeCfg"] = pki.KubeNodeConfigPath network.KubeCfg: pki.KubeNodeConfigPath,
canalConfig["clusterCIDR"] = c.ClusterCIDR network.ClusterCIDR: c.ClusterCIDR,
network.NodeImage: c.Network.Options[CanalNodeImage],
network.CNIImage: c.Network.Options[CanalCNIImage],
network.FlannelImage: c.Network.Options[CanalFlannelImage],
}
pluginYaml := network.GetCanalManifest(canalConfig) pluginYaml := network.GetCanalManifest(canalConfig)
return c.doAddonDeploy(pluginYaml, NetworkPluginResourceName) return c.doAddonDeploy(pluginYaml, NetworkPluginResourceName)
} }
func (c *Cluster) setClusterNetworkDefaults() {
setDefaultIfEmpty(&c.Network.Plugin, DefaultNetworkPlugin)
if c.Network.Options == nil {
// don't break if the user didn't define options
c.Network.Options = make(map[string]string)
}
switch {
case c.Network.Plugin == FlannelNetworkPlugin:
setDefaultIfEmptyMapValue(c.Network.Options, FlannelImage, DefaultFlannelImage)
setDefaultIfEmptyMapValue(c.Network.Options, FlannelCNIImage, DefaultFlannelCNIImage)
case c.Network.Plugin == CalicoNetworkPlugin:
setDefaultIfEmptyMapValue(c.Network.Options, CalicoCNIImage, DefaultCalicoCNIImage)
setDefaultIfEmptyMapValue(c.Network.Options, CalicoNodeImage, DefaultCalicoNodeImage)
setDefaultIfEmptyMapValue(c.Network.Options, CalicoControllersImages, DefaultCalicoControllersImage)
case c.Network.Plugin == CanalNetworkPlugin:
setDefaultIfEmptyMapValue(c.Network.Options, CanalCNIImage, DefaultCanalCNIImage)
setDefaultIfEmptyMapValue(c.Network.Options, CanalNodeImage, DefaultCanalNodeImage)
setDefaultIfEmptyMapValue(c.Network.Options, CanalFlannelImage, DefaultCanalFlannelImage)
}
}

View File

@ -50,7 +50,7 @@ func reconcileWorker(currentCluster, kubeCluster *Cluster, kubeClient *kubernete
return fmt.Errorf("Failed to delete worker node %s from cluster", toDeleteHost.Address) return fmt.Errorf("Failed to delete worker node %s from cluster", toDeleteHost.Address)
} }
// attempting to clean services/files on the host // attempting to clean services/files on the host
if err := reconcileHost(toDeleteHost, true); err != nil { if err := reconcileHost(toDeleteHost, true, currentCluster.SystemImages[AplineImage]); err != nil {
logrus.Warnf("[reconcile] Couldn't clean up worker node [%s]: %v", toDeleteHost.Address, err) logrus.Warnf("[reconcile] Couldn't clean up worker node [%s]: %v", toDeleteHost.Address, err)
continue continue
} }
@ -82,7 +82,7 @@ func reconcileControl(currentCluster, kubeCluster *Cluster, kubeClient *kubernet
return fmt.Errorf("Failed to delete controlplane node %s from cluster", toDeleteHost.Address) return fmt.Errorf("Failed to delete controlplane node %s from cluster", toDeleteHost.Address)
} }
// attempting to clean services/files on the host // attempting to clean services/files on the host
if err := reconcileHost(toDeleteHost, false); err != nil { if err := reconcileHost(toDeleteHost, false, currentCluster.SystemImages[AplineImage]); err != nil {
logrus.Warnf("[reconcile] Couldn't clean up controlplane node [%s]: %v", toDeleteHost.Address, err) logrus.Warnf("[reconcile] Couldn't clean up controlplane node [%s]: %v", toDeleteHost.Address, err)
continue continue
} }
@ -95,7 +95,7 @@ func reconcileControl(currentCluster, kubeCluster *Cluster, kubeClient *kubernet
cpChanged := hosts.IsHostListChanged(currentCluster.ControlPlaneHosts, kubeCluster.ControlPlaneHosts) cpChanged := hosts.IsHostListChanged(currentCluster.ControlPlaneHosts, kubeCluster.ControlPlaneHosts)
if cpChanged { if cpChanged {
logrus.Infof("[reconcile] Rolling update nginx hosts with new list of control plane hosts") logrus.Infof("[reconcile] Rolling update nginx hosts with new list of control plane hosts")
err := services.RollingUpdateNginxProxy(kubeCluster.ControlPlaneHosts, kubeCluster.WorkerHosts) err := services.RollingUpdateNginxProxy(kubeCluster.ControlPlaneHosts, kubeCluster.WorkerHosts, currentCluster.SystemImages[NginxProxyImage])
if err != nil { if err != nil {
return fmt.Errorf("Failed to rolling update Nginx hosts with new control plane hosts") return fmt.Errorf("Failed to rolling update Nginx hosts with new control plane hosts")
} }
@ -103,7 +103,7 @@ func reconcileControl(currentCluster, kubeCluster *Cluster, kubeClient *kubernet
return nil return nil
} }
func reconcileHost(toDeleteHost *hosts.Host, worker bool) error { func reconcileHost(toDeleteHost *hosts.Host, worker bool, cleanerImage string) error {
if err := toDeleteHost.TunnelUp(); err != nil { if err := toDeleteHost.TunnelUp(); err != nil {
return fmt.Errorf("Not able to reach the host: %v", err) return fmt.Errorf("Not able to reach the host: %v", err)
} }
@ -111,14 +111,14 @@ func reconcileHost(toDeleteHost *hosts.Host, worker bool) error {
if err := services.RemoveWorkerPlane([]*hosts.Host{toDeleteHost}, false); err != nil { if err := services.RemoveWorkerPlane([]*hosts.Host{toDeleteHost}, false); err != nil {
return fmt.Errorf("Couldn't remove worker plane: %v", err) return fmt.Errorf("Couldn't remove worker plane: %v", err)
} }
if err := toDeleteHost.CleanUpWorkerHost(services.ControlRole); err != nil { if err := toDeleteHost.CleanUpWorkerHost(services.ControlRole, cleanerImage); err != nil {
return fmt.Errorf("Not able to clean the host: %v", err) return fmt.Errorf("Not able to clean the host: %v", err)
} }
} else { } else {
if err := services.RemoveControlPlane([]*hosts.Host{toDeleteHost}, false); err != nil { if err := services.RemoveControlPlane([]*hosts.Host{toDeleteHost}, false); err != nil {
return fmt.Errorf("Couldn't remove control plane: %v", err) return fmt.Errorf("Couldn't remove control plane: %v", err)
} }
if err := toDeleteHost.CleanUpControlHost(services.WorkerRole); err != nil { if err := toDeleteHost.CleanUpControlHost(services.WorkerRole, cleanerImage); err != nil {
return fmt.Errorf("Not able to clean the host: %v", err) return fmt.Errorf("Not able to clean the host: %v", err)
} }
} }

View File

@ -23,21 +23,21 @@ func (c *Cluster) ClusterRemove() error {
} }
// Clean up all hosts // Clean up all hosts
if err := cleanUpHosts(c.ControlPlaneHosts, c.WorkerHosts, c.EtcdHosts); err != nil { if err := cleanUpHosts(c.ControlPlaneHosts, c.WorkerHosts, c.EtcdHosts, c.SystemImages[AplineImage]); err != nil {
return err return err
} }
return pki.RemoveAdminConfig(c.LocalKubeConfigPath) return pki.RemoveAdminConfig(c.LocalKubeConfigPath)
} }
func cleanUpHosts(cpHosts, workerHosts, etcdHosts []*hosts.Host) error { func cleanUpHosts(cpHosts, workerHosts, etcdHosts []*hosts.Host, cleanerImage string) error {
allHosts := []*hosts.Host{} allHosts := []*hosts.Host{}
allHosts = append(allHosts, cpHosts...) allHosts = append(allHosts, cpHosts...)
allHosts = append(allHosts, workerHosts...) allHosts = append(allHosts, workerHosts...)
allHosts = append(allHosts, etcdHosts...) allHosts = append(allHosts, etcdHosts...)
for _, host := range allHosts { for _, host := range allHosts {
if err := host.CleanUpAll(); err != nil { if err := host.CleanUpAll(cleanerImage); err != nil {
return err return err
} }
} }

View File

@ -16,6 +16,11 @@ import (
"gopkg.in/yaml.v2" "gopkg.in/yaml.v2"
) )
const (
comments = `# If you intened to deploy Kubernetes in an air-gapped envrionment,
# please consult the documentation on how to configure custom RKE images.`
)
func ConfigCommand() cli.Command { func ConfigCommand() cli.Command {
return cli.Command{ return cli.Command{
Name: "config", Name: "config",
@ -67,12 +72,12 @@ func writeConfig(cluster *v3.RancherKubernetesEngineConfig, configFile string, p
} }
logrus.Debugf("Deploying cluster configuration file: %s", configFile) logrus.Debugf("Deploying cluster configuration file: %s", configFile)
configString := fmt.Sprintf("%s\n%s", comments, string(yamlConfig))
if print { if print {
fmt.Printf("Configuration File: \n%s", string(yamlConfig)) fmt.Printf("Configuration File: \n%s", configString)
return nil return nil
} }
return ioutil.WriteFile(configFile, yamlConfig, 0640) return ioutil.WriteFile(configFile, []byte(configString), 0640)
} }
func clusterConfig(ctx *cli.Context) error { func clusterConfig(ctx *cli.Context) error {

View File

@ -27,11 +27,10 @@ const (
ToCleanCNIBin = "/opt/cni" ToCleanCNIBin = "/opt/cni"
ToCleanCalicoRun = "/var/run/calico" ToCleanCalicoRun = "/var/run/calico"
CleanerContainerName = "kube-cleaner" CleanerContainerName = "kube-cleaner"
CleanerImage = "alpine:latest"
) )
func (h *Host) CleanUpAll() error { func (h *Host) CleanUpAll(cleanerImage string) error {
// the only supported removal for etcd dir is in rke remove logrus.Infof("[hosts] Cleaning up host [%s]", h.Address)
toCleanPaths := []string{ toCleanPaths := []string{
ToCleanEtcdDir, ToCleanEtcdDir,
ToCleanSSLDir, ToCleanSSLDir,
@ -39,10 +38,10 @@ func (h *Host) CleanUpAll() error {
ToCleanCNIBin, ToCleanCNIBin,
ToCleanCalicoRun, ToCleanCalicoRun,
} }
return h.CleanUp(toCleanPaths) return h.CleanUp(toCleanPaths, cleanerImage)
} }
func (h *Host) CleanUpWorkerHost(controlRole string) error { func (h *Host) CleanUpWorkerHost(controlRole, cleanerImage string) error {
if h.IsControl { if h.IsControl {
logrus.Infof("[hosts] Host [%s] is already a controlplane host, skipping cleanup.", h.Address) logrus.Infof("[hosts] Host [%s] is already a controlplane host, skipping cleanup.", h.Address)
return nil return nil
@ -53,10 +52,10 @@ func (h *Host) CleanUpWorkerHost(controlRole string) error {
ToCleanCNIBin, ToCleanCNIBin,
ToCleanCalicoRun, ToCleanCalicoRun,
} }
return h.CleanUp(toCleanPaths) return h.CleanUp(toCleanPaths, cleanerImage)
} }
func (h *Host) CleanUpControlHost(workerRole string) error { func (h *Host) CleanUpControlHost(workerRole, cleanerImage string) error {
if h.IsWorker { if h.IsWorker {
logrus.Infof("[hosts] Host [%s] is already a worker host, skipping cleanup.", h.Address) logrus.Infof("[hosts] Host [%s] is already a worker host, skipping cleanup.", h.Address)
return nil return nil
@ -67,12 +66,12 @@ func (h *Host) CleanUpControlHost(workerRole string) error {
ToCleanCNIBin, ToCleanCNIBin,
ToCleanCalicoRun, ToCleanCalicoRun,
} }
return h.CleanUp(toCleanPaths) return h.CleanUp(toCleanPaths, cleanerImage)
} }
func (h *Host) CleanUp(toCleanPaths []string) error { func (h *Host) CleanUp(toCleanPaths []string, cleanerImage string) error {
logrus.Infof("[hosts] Cleaning up host [%s]", h.Address) logrus.Infof("[hosts] Cleaning up host [%s]", h.Address)
imageCfg, hostCfg := buildCleanerConfig(h, toCleanPaths) imageCfg, hostCfg := buildCleanerConfig(h, toCleanPaths, cleanerImage)
logrus.Infof("[hosts] Running cleaner container on host [%s]", h.Address) logrus.Infof("[hosts] Running cleaner container on host [%s]", h.Address)
if err := docker.DoRunContainer(h.DClient, imageCfg, hostCfg, CleanerContainerName, h.Address, CleanerContainerName); err != nil { if err := docker.DoRunContainer(h.DClient, imageCfg, hostCfg, CleanerContainerName, h.Address, CleanerContainerName); err != nil {
return err return err
@ -160,10 +159,10 @@ func IsHostListChanged(currentHosts, configHosts []*Host) bool {
return changed return changed
} }
func buildCleanerConfig(host *Host, toCleanDirs []string) (*container.Config, *container.HostConfig) { func buildCleanerConfig(host *Host, toCleanDirs []string, cleanerImage string) (*container.Config, *container.HostConfig) {
cmd := append([]string{"rm", "-rf"}, toCleanDirs...) cmd := append([]string{"rm", "-rf"}, toCleanDirs...)
imageCfg := &container.Config{ imageCfg := &container.Config{
Image: CleanerImage, Image: cleanerImage,
Cmd: cmd, Cmd: cmd,
} }
bindMounts := []string{} bindMounts := []string{}

View File

@ -17,7 +17,7 @@ metadata:
namespace: kube-system namespace: kube-system
data: data:
# Configure this with the location of your etcd cluster. # Configure this with the location of your etcd cluster.
etcd_endpoints: "` + calicoConfig["etcdEndpoints"] + `" etcd_endpoints: "` + calicoConfig[EtcdEndpoints] + `"
# Configure the Calico backend to use. # Configure the Calico backend to use.
calico_backend: "bird" calico_backend: "bird"
@ -30,7 +30,7 @@ data:
"plugins": [ "plugins": [
{ {
"type": "calico", "type": "calico",
"etcd_endpoints": "` + calicoConfig["etcdEndpoints"] + `", "etcd_endpoints": "` + calicoConfig[EtcdEndpoints] + `",
"etcd_key_file": "", "etcd_key_file": "",
"etcd_cert_file": "", "etcd_cert_file": "",
"etcd_ca_cert_file": "", "etcd_ca_cert_file": "",
@ -41,13 +41,13 @@ data:
}, },
"policy": { "policy": {
"type": "k8s", "type": "k8s",
"k8s_api_root": "` + calicoConfig["apiRoot"] + `", "k8s_api_root": "` + calicoConfig[APIRoot] + `",
"k8s_client_certificate": "` + calicoConfig["clientCrt"] + `", "k8s_client_certificate": "` + calicoConfig[ClientCert] + `",
"k8s_client_key": "` + calicoConfig["clientKey"] + `", "k8s_client_key": "` + calicoConfig[ClientKey] + `",
"k8s_certificate_authority": "` + calicoConfig["clientCA"] + `" "k8s_certificate_authority": "` + calicoConfig[ClientCA] + `"
}, },
"kubernetes": { "kubernetes": {
"kubeconfig": "` + calicoConfig["kubeCfg"] + `" "kubeconfig": "` + calicoConfig[KubeCfg] + `"
} }
}, },
{ {
@ -124,7 +124,7 @@ spec:
# container programs network policy and routes on each # container programs network policy and routes on each
# host. # host.
- name: calico-node - name: calico-node
image: quay.io/calico/node:v2.6.2 image: ` + calicoConfig[NodeImage] + `
env: env:
# The location of the Calico etcd cluster. # The location of the Calico etcd cluster.
- name: ETCD_ENDPOINTS - name: ETCD_ENDPOINTS
@ -149,7 +149,7 @@ spec:
value: "ACCEPT" value: "ACCEPT"
# Configure the IP Pool from which Pod IPs will be chosen. # Configure the IP Pool from which Pod IPs will be chosen.
- name: CALICO_IPV4POOL_CIDR - name: CALICO_IPV4POOL_CIDR
value: "` + calicoConfig["clusterCIDR"] + `" value: "` + calicoConfig[ClusterCIDR] + `"
- name: CALICO_IPV4POOL_IPIP - name: CALICO_IPV4POOL_IPIP
value: "Always" value: "Always"
# Disable IPv6 on Kubernetes. # Disable IPv6 on Kubernetes.
@ -215,7 +215,7 @@ spec:
# This container installs the Calico CNI binaries # This container installs the Calico CNI binaries
# and CNI network config file on each node. # and CNI network config file on each node.
- name: install-cni - name: install-cni
image: quay.io/calico/cni:v1.11.0 image: ` + calicoConfig[CNIImage] + `
command: ["/install-cni.sh"] command: ["/install-cni.sh"]
env: env:
# Name of the CNI config file to create. # Name of the CNI config file to create.
@ -304,7 +304,7 @@ spec:
operator: "Exists" operator: "Exists"
containers: containers:
- name: calico-kube-controllers - name: calico-kube-controllers
image: quay.io/calico/kube-controllers:v1.0.0 image: ` + calicoConfig[ControllersImage] + `
env: env:
# The location of the Calico etcd cluster. # The location of the Calico etcd cluster.
- name: ETCD_ENDPOINTS - name: ETCD_ENDPOINTS
@ -371,7 +371,7 @@ spec:
serviceAccountName: calico-kube-controllers serviceAccountName: calico-kube-controllers
containers: containers:
- name: calico-policy-controller - name: calico-policy-controller
image: quay.io/calico/kube-controllers:v1.0.0 image: ` + calicoConfig[ControllersImage] + `
env: env:
# The location of the Calico etcd cluster. # The location of the Calico etcd cluster.
- name: ETCD_ENDPOINTS - name: ETCD_ENDPOINTS

View File

@ -34,13 +34,13 @@ data:
}, },
"policy": { "policy": {
"type": "k8s", "type": "k8s",
"k8s_api_root": "` + canalConfig["apiRoot"] + `", "k8s_api_root": "` + canalConfig[APIRoot] + `",
"k8s_client_certificate": "` + canalConfig["clientCrt"] + `", "k8s_client_certificate": "` + canalConfig[ClientCert] + `",
"k8s_client_key": "` + canalConfig["clientKey"] + `", "k8s_client_key": "` + canalConfig[ClientKey] + `",
"k8s_certificate_authority": "` + canalConfig["clientCA"] + `" "k8s_certificate_authority": "` + canalConfig[ClientCA] + `"
}, },
"kubernetes": { "kubernetes": {
"kubeconfig": "` + canalConfig["kubeCfg"] + `" "kubeconfig": "` + canalConfig[KubeCfg] + `"
} }
}, },
{ {
@ -54,7 +54,7 @@ data:
# Flannel network configuration. Mounted into the flannel container. # Flannel network configuration. Mounted into the flannel container.
net-conf.json: | net-conf.json: |
{ {
"Network": "` + canalConfig["clusterCIDR"] + `", "Network": "` + canalConfig[ClusterCIDR] + `",
"Backend": { "Backend": {
"Type": "vxlan" "Type": "vxlan"
} }
@ -106,7 +106,7 @@ spec:
# container programs network policy and routes on each # container programs network policy and routes on each
# host. # host.
- name: calico-node - name: calico-node
image: quay.io/calico/node:v2.6.2 image: ` + canalConfig[NodeImage] + `
env: env:
# Use Kubernetes API as the backing datastore. # Use Kubernetes API as the backing datastore.
- name: DATASTORE_TYPE - name: DATASTORE_TYPE
@ -173,7 +173,7 @@ spec:
# This container installs the Calico CNI binaries # This container installs the Calico CNI binaries
# and CNI network config file on each node. # and CNI network config file on each node.
- name: install-cni - name: install-cni
image: quay.io/calico/cni:v1.11.0 image: ` + canalConfig[CNIImage] + `
command: ["/install-cni.sh"] command: ["/install-cni.sh"]
env: env:
- name: CNI_CONF_NAME - name: CNI_CONF_NAME
@ -198,7 +198,7 @@ spec:
# This container runs flannel using the kube-subnet-mgr backend # This container runs flannel using the kube-subnet-mgr backend
# for allocating subnets. # for allocating subnets.
- name: kube-flannel - name: kube-flannel
image: quay.io/coreos/flannel:v0.9.1 image: ` + canalConfig[FlannelImage] + `
command: [ "/opt/bin/flanneld", "--ip-masq", "--kube-subnet-mgr" ] command: [ "/opt/bin/flanneld", "--ip-masq", "--kube-subnet-mgr" ]
securityContext: securityContext:
privileged: true privileged: true

View File

@ -1,6 +1,6 @@
package network package network
func GetFlannelManifest(clusterCIDR string) string { func GetFlannelManifest(clusterCIDR, image, cniImage string) string {
return ` return `
--- ---
kind: ConfigMap kind: ConfigMap
@ -57,7 +57,7 @@ spec:
spec: spec:
containers: containers:
- name: kube-flannel - name: kube-flannel
image: quay.io/coreos/flannel:v0.8.0 image: ` + image + `
imagePullPolicy: IfNotPresent imagePullPolicy: IfNotPresent
resources: resources:
limits: limits:
@ -86,7 +86,7 @@ spec:
- name: flannel-cfg - name: flannel-cfg
mountPath: /etc/kube-flannel/ mountPath: /etc/kube-flannel/
- name: install-cni - name: install-cni
image: quay.io/coreos/flannel-cni:v0.2.0 image: ` + cniImage + `
command: ["/install-cni.sh"] command: ["/install-cni.sh"]
env: env:
# The CNI network config to install on each node. # The CNI network config to install on each node.

16
network/network.go Normal file
View File

@ -0,0 +1,16 @@
package network
const (
EtcdEndpoints = "etcdEndpoints"
APIRoot = "apiRoot"
ClientCert = "clientCert"
ClientKey = "clientKey"
ClientCA = "clientCA"
KubeCfg = "kubeCfg"
ClusterCIDR = "clusterCIDR"
CNIImage = "cniImage"
NodeImage = "nodeImage"
ControllersImage = "controllersImage"
FlannelImage = "flannelImage"
FlannelCNIImage = "flannelCNIImage"
)

View File

@ -1,7 +1,6 @@
package pki package pki
const ( const (
CrtDownloaderImage = "rancher/rke-cert-deployer:0.1.0"
CrtDownloaderContainer = "cert-deployer" CrtDownloaderContainer = "cert-deployer"
CertificatesSecretName = "k8s-certs" CertificatesSecretName = "k8s-certs"

View File

@ -14,7 +14,7 @@ import (
"github.com/sirupsen/logrus" "github.com/sirupsen/logrus"
) )
func DeployCertificatesOnMasters(cpHosts []*hosts.Host, crtMap map[string]CertificatePKI) error { func DeployCertificatesOnMasters(cpHosts []*hosts.Host, crtMap map[string]CertificatePKI, certDownloaderImage string) error {
// list of certificates that should be deployed on the masters // list of certificates that should be deployed on the masters
crtList := []string{ crtList := []string{
CACertName, CACertName,
@ -31,7 +31,7 @@ func DeployCertificatesOnMasters(cpHosts []*hosts.Host, crtMap map[string]Certif
} }
for i := range cpHosts { for i := range cpHosts {
err := doRunDeployer(cpHosts[i], env) err := doRunDeployer(cpHosts[i], env, certDownloaderImage)
if err != nil { if err != nil {
return err return err
} }
@ -39,7 +39,7 @@ func DeployCertificatesOnMasters(cpHosts []*hosts.Host, crtMap map[string]Certif
return nil return nil
} }
func DeployCertificatesOnWorkers(workerHosts []*hosts.Host, crtMap map[string]CertificatePKI) error { func DeployCertificatesOnWorkers(workerHosts []*hosts.Host, crtMap map[string]CertificatePKI, certDownloaderImage string) error {
// list of certificates that should be deployed on the workers // list of certificates that should be deployed on the workers
crtList := []string{ crtList := []string{
CACertName, CACertName,
@ -53,7 +53,7 @@ func DeployCertificatesOnWorkers(workerHosts []*hosts.Host, crtMap map[string]Ce
} }
for i := range workerHosts { for i := range workerHosts {
err := doRunDeployer(workerHosts[i], env) err := doRunDeployer(workerHosts[i], env, certDownloaderImage)
if err != nil { if err != nil {
return err return err
} }
@ -61,14 +61,13 @@ func DeployCertificatesOnWorkers(workerHosts []*hosts.Host, crtMap map[string]Ce
return nil return nil
} }
func doRunDeployer(host *hosts.Host, containerEnv []string) error { func doRunDeployer(host *hosts.Host, containerEnv []string, certDownloaderImage string) error {
logrus.Debugf("[certificates] Pulling Certificate downloader Image on host [%s]", host.Address) logrus.Debugf("[certificates] Pulling Certificate downloader Image on host [%s]", host.Address)
err := docker.PullImage(host.DClient, host.Address, CrtDownloaderImage) if err := docker.PullImage(host.DClient, host.Address, certDownloaderImage); err != nil {
if err != nil {
return err return err
} }
imageCfg := &container.Config{ imageCfg := &container.Config{
Image: CrtDownloaderImage, Image: certDownloaderImage,
Env: containerEnv, Env: containerEnv,
} }
hostCfg := &container.HostConfig{ hostCfg := &container.HostConfig{

View File

@ -13,10 +13,10 @@ const (
NginxProxyEnvName = "CP_HOSTS" NginxProxyEnvName = "CP_HOSTS"
) )
func RollingUpdateNginxProxy(cpHosts []*hosts.Host, workerHosts []*hosts.Host) error { func RollingUpdateNginxProxy(cpHosts []*hosts.Host, workerHosts []*hosts.Host, nginxProxyImage string) error {
nginxProxyEnv := buildProxyEnv(cpHosts) nginxProxyEnv := buildProxyEnv(cpHosts)
for _, host := range workerHosts { for _, host := range workerHosts {
imageCfg, hostCfg := buildNginxProxyConfig(host, nginxProxyEnv) imageCfg, hostCfg := buildNginxProxyConfig(host, nginxProxyEnv, nginxProxyImage)
if err := docker.DoRollingUpdateContainer(host.DClient, imageCfg, hostCfg, NginxProxyContainerName, host.Address, WorkerRole); err != nil { if err := docker.DoRollingUpdateContainer(host.DClient, imageCfg, hostCfg, NginxProxyContainerName, host.Address, WorkerRole); err != nil {
return err return err
} }
@ -24,9 +24,9 @@ func RollingUpdateNginxProxy(cpHosts []*hosts.Host, workerHosts []*hosts.Host) e
return nil return nil
} }
func runNginxProxy(host *hosts.Host, cpHosts []*hosts.Host) error { func runNginxProxy(host *hosts.Host, cpHosts []*hosts.Host, nginxProxyImage string) error {
nginxProxyEnv := buildProxyEnv(cpHosts) nginxProxyEnv := buildProxyEnv(cpHosts)
imageCfg, hostCfg := buildNginxProxyConfig(host, nginxProxyEnv) imageCfg, hostCfg := buildNginxProxyConfig(host, nginxProxyEnv, nginxProxyImage)
return docker.DoRunContainer(host.DClient, imageCfg, hostCfg, NginxProxyContainerName, host.Address, WorkerRole) return docker.DoRunContainer(host.DClient, imageCfg, hostCfg, NginxProxyContainerName, host.Address, WorkerRole)
} }
@ -34,9 +34,9 @@ func removeNginxProxy(host *hosts.Host) error {
return docker.DoRemoveContainer(host.DClient, NginxProxyContainerName, host.Address) return docker.DoRemoveContainer(host.DClient, NginxProxyContainerName, host.Address)
} }
func buildNginxProxyConfig(host *hosts.Host, nginxProxyEnv string) (*container.Config, *container.HostConfig) { func buildNginxProxyConfig(host *hosts.Host, nginxProxyEnv, nginxProxyImage string) (*container.Config, *container.HostConfig) {
imageCfg := &container.Config{ imageCfg := &container.Config{
Image: NginxProxyImage, Image: nginxProxyImage,
Env: []string{fmt.Sprintf("%s=%s", NginxProxyEnvName, nginxProxyEnv)}, Env: []string{fmt.Sprintf("%s=%s", NginxProxyEnvName, nginxProxyEnv)},
} }
hostCfg := &container.HostConfig{ hostCfg := &container.HostConfig{

View File

@ -6,7 +6,7 @@ import (
"github.com/sirupsen/logrus" "github.com/sirupsen/logrus"
) )
func RunWorkerPlane(controlHosts []*hosts.Host, workerHosts []*hosts.Host, workerServices v3.RKEConfigServices) error { func RunWorkerPlane(controlHosts []*hosts.Host, workerHosts []*hosts.Host, workerServices v3.RKEConfigServices, nginxProxyImage string) error {
logrus.Infof("[%s] Building up Worker Plane..", WorkerRole) logrus.Infof("[%s] Building up Worker Plane..", WorkerRole)
for _, host := range controlHosts { for _, host := range controlHosts {
// only one master for now // only one master for now
@ -20,7 +20,7 @@ func RunWorkerPlane(controlHosts []*hosts.Host, workerHosts []*hosts.Host, worke
for _, host := range workerHosts { for _, host := range workerHosts {
// run nginx proxy // run nginx proxy
if !host.IsControl { if !host.IsControl {
if err := runNginxProxy(host, controlHosts); err != nil { if err := runNginxProxy(host, controlHosts, nginxProxyImage); err != nil {
return err return err
} }
} }