mirror of
https://github.com/rancher/rke.git
synced 2025-05-11 18:04:35 +00:00
Add user-configurable images
Refactor configuration defaults Add comments to config Add configurable utility images Add configurable network plugin images Add configurable kubedns images
This commit is contained in:
parent
89ec803428
commit
0ee002b2cf
@ -1,6 +1,15 @@
|
||||
package addons
|
||||
|
||||
func GetKubeDNSManifest(clusterDNSServer, clusterDomain string) string {
|
||||
const (
|
||||
KubeDNSImage = "kubeDNSImage"
|
||||
DNSMasqImage = "DNSMasqImage"
|
||||
KubeDNSSidecarImage = "kubednsSidecarImage"
|
||||
KubeDNSAutoScalerImage = "kubeDNSAutoScalerImage"
|
||||
KubeDNSServer = "clusterDNSServer"
|
||||
KubeDNSClusterDomain = "clusterDomain"
|
||||
)
|
||||
|
||||
func GetKubeDNSManifest(kubeDNSConfig map[string]string) string {
|
||||
return `---
|
||||
apiVersion: apps/v1beta1
|
||||
kind: Deployment
|
||||
@ -17,7 +26,7 @@ spec:
|
||||
spec:
|
||||
containers:
|
||||
- name: autoscaler
|
||||
image: gcr.io/google_containers/cluster-proportional-autoscaler-amd64:1.0.0
|
||||
image: ` + kubeDNSConfig[KubeDNSAutoScalerImage] + `
|
||||
resources:
|
||||
requests:
|
||||
cpu: "20m"
|
||||
@ -81,7 +90,7 @@ spec:
|
||||
optional: true
|
||||
containers:
|
||||
- name: kubedns
|
||||
image: gcr.io/google_containers/k8s-dns-kube-dns-amd64:1.14.5
|
||||
image: ` + kubeDNSConfig[KubeDNSImage] + `
|
||||
resources:
|
||||
# TODO: Set memory limits when we've profiled the container for large
|
||||
# clusters, then set request = limit to keep this container in
|
||||
@ -111,7 +120,7 @@ spec:
|
||||
initialDelaySeconds: 3
|
||||
timeoutSeconds: 5
|
||||
args:
|
||||
- --domain=` + clusterDomain + `.
|
||||
- --domain=` + kubeDNSConfig[KubeDNSClusterDomain] + `.
|
||||
- --dns-port=10053
|
||||
- --config-dir=/kube-dns-config
|
||||
- --v=2
|
||||
@ -132,7 +141,7 @@ spec:
|
||||
- name: kube-dns-config
|
||||
mountPath: /kube-dns-config
|
||||
- name: dnsmasq
|
||||
image: gcr.io/google_containers/k8s-dns-dnsmasq-nanny-amd64:1.14.5
|
||||
image: ` + kubeDNSConfig[DNSMasqImage] + `
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /healthcheck/dnsmasq
|
||||
@ -151,7 +160,7 @@ spec:
|
||||
- -k
|
||||
- --cache-size=1000
|
||||
- --log-facility=-
|
||||
- --server=/` + clusterDomain + `/127.0.0.1#10053
|
||||
- --server=/` + kubeDNSConfig[KubeDNSClusterDomain] + `/127.0.0.1#10053
|
||||
- --server=/in-addr.arpa/127.0.0.1#10053
|
||||
- --server=/ip6.arpa/127.0.0.1#10053
|
||||
ports:
|
||||
@ -170,7 +179,7 @@ spec:
|
||||
- name: kube-dns-config
|
||||
mountPath: /etc/k8s/dns/dnsmasq-nanny
|
||||
- name: sidecar
|
||||
image: gcr.io/google_containers/k8s-dns-sidecar-amd64:1.14.5
|
||||
image: ` + kubeDNSConfig[KubeDNSSidecarImage] + `
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /metrics
|
||||
@ -183,8 +192,8 @@ spec:
|
||||
args:
|
||||
- --v=2
|
||||
- --logtostderr
|
||||
- --probe=kubedns,127.0.0.1:10053,kubernetes.default.svc.` + clusterDomain + `,5,A
|
||||
- --probe=dnsmasq,127.0.0.1:53,kubernetes.default.svc.` + clusterDomain + `,5,A
|
||||
- --probe=kubedns,127.0.0.1:10053,kubernetes.default.svc.` + kubeDNSConfig[KubeDNSClusterDomain] + `,5,A
|
||||
- --probe=dnsmasq,127.0.0.1:53,kubernetes.default.svc.` + kubeDNSConfig[KubeDNSClusterDomain] + `,5,A
|
||||
ports:
|
||||
- containerPort: 10054
|
||||
name: metrics
|
||||
@ -209,7 +218,7 @@ metadata:
|
||||
spec:
|
||||
selector:
|
||||
k8s-app: kube-dns
|
||||
clusterIP: ` + clusterDNSServer + `
|
||||
clusterIP: ` + kubeDNSConfig[KubeDNSServer] + `
|
||||
ports:
|
||||
- name: dns
|
||||
port: 53
|
||||
|
13
cluster.yml
13
cluster.yml
@ -12,7 +12,8 @@ auth:
|
||||
network:
|
||||
plugin: flannel
|
||||
options:
|
||||
foo: bar
|
||||
flannel_image: quay.io/coreos/flannel:v0.9.1
|
||||
flannel_cni_image: quay.io/coreos/flannel-cni:v0.2.0
|
||||
|
||||
ssh_key_path: ~/.ssh/test
|
||||
|
||||
@ -56,6 +57,16 @@ services:
|
||||
kubeproxy:
|
||||
image: rancher/k8s:v1.8.3-rancher2
|
||||
|
||||
|
||||
system_images:
|
||||
alpine: alpine:latest
|
||||
nginx_proxy: rancher/rke-nginx-proxy:0.1.0
|
||||
cert_downloader: rancher/rke-cert-deployer:0.1.0
|
||||
kubedns_image: gcr.io/google_containers/k8s-dns-kube-dns-amd64:1.14.5
|
||||
dnsmasq_image: gcr.io/google_containers/k8s-dns-dnsmasq-nanny-amd64:1.14.5
|
||||
kubedns_sidecar_image: gcr.io/google_containers/k8s-dns-sidecar-amd64:1.14.5
|
||||
kubedns_autoscaler_image: gcr.io/google_containers/cluster-proportional-autoscaler-amd64:1.0.0
|
||||
|
||||
# all addon manifests MUST specify a namespace
|
||||
addons: |-
|
||||
---
|
||||
|
@ -36,9 +36,15 @@ func (c *Cluster) DeployUserAddOns() error {
|
||||
|
||||
func (c *Cluster) deployKubeDNS() error {
|
||||
logrus.Infof("[addons] Setting up KubeDNS")
|
||||
|
||||
kubeDNSYaml := addons.GetKubeDNSManifest(c.ClusterDNSServer, c.ClusterDomain)
|
||||
|
||||
kubeDNSConfig := map[string]string{
|
||||
addons.KubeDNSServer: c.ClusterDNSServer,
|
||||
addons.KubeDNSClusterDomain: c.ClusterDomain,
|
||||
addons.KubeDNSImage: c.SystemImages[KubeDNSImage],
|
||||
addons.DNSMasqImage: c.SystemImages[DNSMasqImage],
|
||||
addons.KubeDNSSidecarImage: c.SystemImages[KubeDNSSidecarImage],
|
||||
addons.KubeDNSAutoScalerImage: c.SystemImages[KubeDNSAutoScalerImage],
|
||||
}
|
||||
kubeDNSYaml := addons.GetKubeDNSManifest(kubeDNSConfig)
|
||||
if err := c.doAddonDeploy(kubeDNSYaml, KubeDNSAddonResourceName); err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -33,20 +33,18 @@ type Cluster struct {
|
||||
}
|
||||
|
||||
const (
|
||||
X509AuthenticationProvider = "x509"
|
||||
DefaultClusterConfig = "cluster.yml"
|
||||
DefaultServiceClusterIPRange = "10.233.0.0/18"
|
||||
DefaultClusterCIDR = "10.233.64.0/18"
|
||||
DefaultClusterDNSService = "10.233.0.3"
|
||||
DefaultClusterDomain = "cluster.local"
|
||||
DefaultInfraContainerImage = "gcr.io/google_containers/pause-amd64:3.0"
|
||||
DefaultAuthStrategy = "x509"
|
||||
DefaultNetworkPlugin = "flannel"
|
||||
DefaultClusterSSHKeyPath = "~/.ssh/id_rsa"
|
||||
StateConfigMapName = "cluster-state"
|
||||
UpdateStateTimeout = 30
|
||||
GetStateTimeout = 30
|
||||
KubernetesClientTimeOut = 30
|
||||
X509AuthenticationProvider = "x509"
|
||||
StateConfigMapName = "cluster-state"
|
||||
UpdateStateTimeout = 30
|
||||
GetStateTimeout = 30
|
||||
KubernetesClientTimeOut = 30
|
||||
AplineImage = "alpine"
|
||||
NginxProxyImage = "nginx_proxy"
|
||||
CertDownloaderImage = "cert_downloader"
|
||||
KubeDNSImage = "kubedns_image"
|
||||
DNSMasqImage = "dnsmasq_image"
|
||||
KubeDNSSidecarImage = "kubedns_sidecar_image"
|
||||
KubeDNSAutoScalerImage = "kubedns_autoscaler_image"
|
||||
)
|
||||
|
||||
func (c *Cluster) DeployClusterPlanes() error {
|
||||
@ -59,7 +57,7 @@ func (c *Cluster) DeployClusterPlanes() error {
|
||||
if err != nil {
|
||||
return fmt.Errorf("[controlPlane] Failed to bring up Control Plane: %v", err)
|
||||
}
|
||||
err = services.RunWorkerPlane(c.ControlPlaneHosts, c.WorkerHosts, c.Services)
|
||||
err = services.RunWorkerPlane(c.ControlPlaneHosts, c.WorkerHosts, c.Services, c.SystemImages[NginxProxyImage])
|
||||
if err != nil {
|
||||
return fmt.Errorf("[workerPlane] Failed to bring up Worker Plane: %v", err)
|
||||
}
|
||||
@ -126,29 +124,42 @@ func (c *Cluster) setClusterDefaults() {
|
||||
c.Nodes[i].SSHKeyPath = c.SSHKeyPath
|
||||
}
|
||||
}
|
||||
if len(c.Services.KubeAPI.ServiceClusterIPRange) == 0 {
|
||||
c.Services.KubeAPI.ServiceClusterIPRange = DefaultServiceClusterIPRange
|
||||
c.setClusterServicesDefaults()
|
||||
c.setClusterNetworkDefaults()
|
||||
c.setClusterImageDefaults()
|
||||
}
|
||||
|
||||
func (c *Cluster) setClusterServicesDefaults() {
|
||||
serviceConfigDefaultsMap := map[string]string{
|
||||
c.Services.KubeAPI.ServiceClusterIPRange: DefaultServiceClusterIPRange,
|
||||
c.Services.KubeController.ServiceClusterIPRange: DefaultServiceClusterIPRange,
|
||||
c.Services.KubeController.ClusterCIDR: DefaultClusterCIDR,
|
||||
c.Services.Kubelet.ClusterDNSServer: DefaultClusterDNSService,
|
||||
c.Services.Kubelet.ClusterDomain: DefaultClusterDomain,
|
||||
c.Services.Kubelet.InfraContainerImage: DefaultInfraContainerImage,
|
||||
c.Authentication.Strategy: DefaultAuthStrategy,
|
||||
}
|
||||
if len(c.Services.KubeController.ServiceClusterIPRange) == 0 {
|
||||
c.Services.KubeController.ServiceClusterIPRange = DefaultServiceClusterIPRange
|
||||
for k, v := range serviceConfigDefaultsMap {
|
||||
setDefaultIfEmpty(&k, v)
|
||||
}
|
||||
if len(c.Services.KubeController.ClusterCIDR) == 0 {
|
||||
c.Services.KubeController.ClusterCIDR = DefaultClusterCIDR
|
||||
}
|
||||
|
||||
func (c *Cluster) setClusterImageDefaults() {
|
||||
if c.SystemImages == nil {
|
||||
// don't break if the user didn't define rke_images
|
||||
c.SystemImages = make(map[string]string)
|
||||
}
|
||||
if len(c.Services.Kubelet.ClusterDNSServer) == 0 {
|
||||
c.Services.Kubelet.ClusterDNSServer = DefaultClusterDNSService
|
||||
systemImagesDefaultsMap := map[string]string{
|
||||
AplineImage: DefaultAplineImage,
|
||||
NginxProxyImage: DefaultNginxProxyImage,
|
||||
CertDownloaderImage: DefaultCertDownloaderImage,
|
||||
KubeDNSImage: DefaultKubeDNSImage,
|
||||
DNSMasqImage: DefaultDNSMasqImage,
|
||||
KubeDNSSidecarImage: DefaultKubeDNSSidecarImage,
|
||||
KubeDNSAutoScalerImage: DefaultKubeDNSAutoScalerImage,
|
||||
}
|
||||
if len(c.Services.Kubelet.ClusterDomain) == 0 {
|
||||
c.Services.Kubelet.ClusterDomain = DefaultClusterDomain
|
||||
}
|
||||
if len(c.Services.Kubelet.InfraContainerImage) == 0 {
|
||||
c.Services.Kubelet.InfraContainerImage = DefaultInfraContainerImage
|
||||
}
|
||||
if len(c.Authentication.Strategy) == 0 {
|
||||
c.Authentication.Strategy = DefaultAuthStrategy
|
||||
}
|
||||
if len(c.Network.Plugin) == 0 {
|
||||
c.Network.Plugin = DefaultNetworkPlugin
|
||||
for k, v := range systemImagesDefaultsMap {
|
||||
setDefaultIfEmptyMapValue(c.SystemImages, k, v)
|
||||
}
|
||||
}
|
||||
|
||||
|
45
cluster/defaults.go
Normal file
45
cluster/defaults.go
Normal file
@ -0,0 +1,45 @@
|
||||
package cluster
|
||||
|
||||
const (
|
||||
DefaultClusterConfig = "cluster.yml"
|
||||
|
||||
DefaultServiceClusterIPRange = "10.233.0.0/18"
|
||||
DefaultClusterCIDR = "10.233.64.0/18"
|
||||
DefaultClusterDNSService = "10.233.0.3"
|
||||
DefaultClusterDomain = "cluster.local"
|
||||
DefaultClusterSSHKeyPath = "~/.ssh/id_rsa"
|
||||
|
||||
DefaultAuthStrategy = "x509"
|
||||
|
||||
DefaultNetworkPlugin = "flannel"
|
||||
|
||||
DefaultInfraContainerImage = "gcr.io/google_containers/pause-amd64:3.0"
|
||||
DefaultAplineImage = "alpine:latest"
|
||||
DefaultNginxProxyImage = "rancher/rke-nginx-proxy:0.1.0"
|
||||
DefaultCertDownloaderImage = "rancher/rke-cert-deployer:0.1.0"
|
||||
|
||||
DefaultFlannelImage = "quay.io/coreos/flannel:v0.9.1"
|
||||
DefaultFlannelCNIImage = "quay.io/coreos/flannel-cni:v0.2.0"
|
||||
DefaultCalicoNodeImage = "quay.io/calico/node:v2.6.2"
|
||||
DefaultCalicoCNIImage = "quay.io/calico/cni:v1.11.0"
|
||||
DefaultCalicoControllersImage = "quay.io/calico/kube-controllers:v1.0.0"
|
||||
DefaultCanalNodeImage = "quay.io/calico/node:v2.6.2"
|
||||
DefaultCanalCNIImage = "quay.io/calico/cni:v1.11.0"
|
||||
DefaultCanalFlannelImage = "quay.io/coreos/flannel:v0.9.1"
|
||||
|
||||
DefaultKubeDNSImage = "gcr.io/google_containers/k8s-dns-kube-dns-amd64:1.14.5"
|
||||
DefaultDNSMasqImage = "gcr.io/google_containers/k8s-dns-dnsmasq-nanny-amd64:1.14.5"
|
||||
DefaultKubeDNSSidecarImage = "gcr.io/google_containers/k8s-dns-sidecar-amd64:1.14.5"
|
||||
DefaultKubeDNSAutoScalerImage = "gcr.io/google_containers/cluster-proportional-autoscaler-amd64:1.0.0"
|
||||
)
|
||||
|
||||
func setDefaultIfEmptyMapValue(configMap map[string]string, key string, value string) {
|
||||
if _, ok := configMap[key]; !ok {
|
||||
configMap[key] = value
|
||||
}
|
||||
}
|
||||
func setDefaultIfEmpty(varName *string, defaultValue string) {
|
||||
if len(*varName) == 0 {
|
||||
*varName = defaultValue
|
||||
}
|
||||
}
|
@ -59,11 +59,11 @@ func (c *Cluster) InvertIndexHosts() error {
|
||||
func (c *Cluster) SetUpHosts() error {
|
||||
if c.Authentication.Strategy == X509AuthenticationProvider {
|
||||
logrus.Infof("[certificates] Deploying kubernetes certificates to Cluster nodes")
|
||||
err := pki.DeployCertificatesOnMasters(c.ControlPlaneHosts, c.Certificates)
|
||||
err := pki.DeployCertificatesOnMasters(c.ControlPlaneHosts, c.Certificates, c.SystemImages[CertDownloaderImage])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = pki.DeployCertificatesOnWorkers(c.WorkerHosts, c.Certificates)
|
||||
err = pki.DeployCertificatesOnWorkers(c.WorkerHosts, c.Certificates, c.SystemImages[CertDownloaderImage])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -11,9 +11,20 @@ import (
|
||||
|
||||
const (
|
||||
NetworkPluginResourceName = "rke-network-plugin"
|
||||
FlannelNetworkPlugin = "flannel"
|
||||
CalicoNetworkPlugin = "calico"
|
||||
CanalNetworkPlugin = "canal"
|
||||
|
||||
FlannelNetworkPlugin = "flannel"
|
||||
FlannelImage = "flannel_image"
|
||||
FlannelCNIImage = "flannel_cni_image"
|
||||
|
||||
CalicoNetworkPlugin = "calico"
|
||||
CalicoNodeImage = "calico_node_image"
|
||||
CalicoCNIImage = "calico_cni_image"
|
||||
CalicoControllersImages = "calico_controllers_image"
|
||||
|
||||
CanalNetworkPlugin = "canal"
|
||||
CanalNodeImage = "canal_node_image"
|
||||
CanalCNIImage = "canal_cni_image"
|
||||
CanalFlannelImage = "canal_flannel_image"
|
||||
)
|
||||
|
||||
func (c *Cluster) DeployNetworkPlugin() error {
|
||||
@ -31,30 +42,62 @@ func (c *Cluster) DeployNetworkPlugin() error {
|
||||
}
|
||||
|
||||
func (c *Cluster) doFlannelDeploy() error {
|
||||
pluginYaml := network.GetFlannelManifest(c.ClusterCIDR)
|
||||
pluginYaml := network.GetFlannelManifest(c.ClusterCIDR, c.Network.Options[FlannelImage], c.Network.Options[FlannelCNIImage])
|
||||
return c.doAddonDeploy(pluginYaml, NetworkPluginResourceName)
|
||||
}
|
||||
|
||||
func (c *Cluster) doCalicoDeploy() error {
|
||||
calicoConfig := make(map[string]string)
|
||||
calicoConfig["etcdEndpoints"] = services.GetEtcdConnString(c.EtcdHosts)
|
||||
calicoConfig["apiRoot"] = "https://127.0.0.1:6443"
|
||||
calicoConfig["clientCrt"] = pki.KubeNodeCertPath
|
||||
calicoConfig["clientKey"] = pki.KubeNodeKeyPath
|
||||
calicoConfig["clientCA"] = pki.CACertPath
|
||||
calicoConfig["kubeCfg"] = pki.KubeNodeConfigPath
|
||||
calicoConfig["clusterCIDR"] = c.ClusterCIDR
|
||||
calicoConfig := map[string]string{
|
||||
network.EtcdEndpoints: services.GetEtcdConnString(c.EtcdHosts),
|
||||
network.APIRoot: "https://127.0.0.1:6443",
|
||||
network.ClientCert: pki.KubeNodeCertPath,
|
||||
network.ClientKey: pki.KubeNodeKeyPath,
|
||||
network.ClientCA: pki.CACertPath,
|
||||
network.KubeCfg: pki.KubeNodeConfigPath,
|
||||
network.ClusterCIDR: c.ClusterCIDR,
|
||||
network.CNIImage: c.Network.Options[CalicoCNIImage],
|
||||
network.NodeImage: c.Network.Options[CalicoNodeImage],
|
||||
network.ControllersImage: c.Network.Options[CalicoControllersImages],
|
||||
}
|
||||
pluginYaml := network.GetCalicoManifest(calicoConfig)
|
||||
return c.doAddonDeploy(pluginYaml, NetworkPluginResourceName)
|
||||
}
|
||||
|
||||
func (c *Cluster) doCanalDeploy() error {
|
||||
canalConfig := make(map[string]string)
|
||||
canalConfig["clientCrt"] = pki.KubeNodeCertPath
|
||||
canalConfig["clientKey"] = pki.KubeNodeKeyPath
|
||||
canalConfig["clientCA"] = pki.CACertPath
|
||||
canalConfig["kubeCfg"] = pki.KubeNodeConfigPath
|
||||
canalConfig["clusterCIDR"] = c.ClusterCIDR
|
||||
canalConfig := map[string]string{
|
||||
network.ClientCert: pki.KubeNodeCertPath,
|
||||
network.ClientKey: pki.KubeNodeKeyPath,
|
||||
network.ClientCA: pki.CACertPath,
|
||||
network.KubeCfg: pki.KubeNodeConfigPath,
|
||||
network.ClusterCIDR: c.ClusterCIDR,
|
||||
network.NodeImage: c.Network.Options[CanalNodeImage],
|
||||
network.CNIImage: c.Network.Options[CanalCNIImage],
|
||||
network.FlannelImage: c.Network.Options[CanalFlannelImage],
|
||||
}
|
||||
pluginYaml := network.GetCanalManifest(canalConfig)
|
||||
return c.doAddonDeploy(pluginYaml, NetworkPluginResourceName)
|
||||
}
|
||||
|
||||
func (c *Cluster) setClusterNetworkDefaults() {
|
||||
setDefaultIfEmpty(&c.Network.Plugin, DefaultNetworkPlugin)
|
||||
|
||||
if c.Network.Options == nil {
|
||||
// don't break if the user didn't define options
|
||||
c.Network.Options = make(map[string]string)
|
||||
}
|
||||
switch {
|
||||
case c.Network.Plugin == FlannelNetworkPlugin:
|
||||
setDefaultIfEmptyMapValue(c.Network.Options, FlannelImage, DefaultFlannelImage)
|
||||
setDefaultIfEmptyMapValue(c.Network.Options, FlannelCNIImage, DefaultFlannelCNIImage)
|
||||
|
||||
case c.Network.Plugin == CalicoNetworkPlugin:
|
||||
setDefaultIfEmptyMapValue(c.Network.Options, CalicoCNIImage, DefaultCalicoCNIImage)
|
||||
setDefaultIfEmptyMapValue(c.Network.Options, CalicoNodeImage, DefaultCalicoNodeImage)
|
||||
setDefaultIfEmptyMapValue(c.Network.Options, CalicoControllersImages, DefaultCalicoControllersImage)
|
||||
|
||||
case c.Network.Plugin == CanalNetworkPlugin:
|
||||
setDefaultIfEmptyMapValue(c.Network.Options, CanalCNIImage, DefaultCanalCNIImage)
|
||||
setDefaultIfEmptyMapValue(c.Network.Options, CanalNodeImage, DefaultCanalNodeImage)
|
||||
setDefaultIfEmptyMapValue(c.Network.Options, CanalFlannelImage, DefaultCanalFlannelImage)
|
||||
}
|
||||
}
|
||||
|
@ -50,7 +50,7 @@ func reconcileWorker(currentCluster, kubeCluster *Cluster, kubeClient *kubernete
|
||||
return fmt.Errorf("Failed to delete worker node %s from cluster", toDeleteHost.Address)
|
||||
}
|
||||
// attempting to clean services/files on the host
|
||||
if err := reconcileHost(toDeleteHost, true); err != nil {
|
||||
if err := reconcileHost(toDeleteHost, true, currentCluster.SystemImages[AplineImage]); err != nil {
|
||||
logrus.Warnf("[reconcile] Couldn't clean up worker node [%s]: %v", toDeleteHost.Address, err)
|
||||
continue
|
||||
}
|
||||
@ -82,7 +82,7 @@ func reconcileControl(currentCluster, kubeCluster *Cluster, kubeClient *kubernet
|
||||
return fmt.Errorf("Failed to delete controlplane node %s from cluster", toDeleteHost.Address)
|
||||
}
|
||||
// attempting to clean services/files on the host
|
||||
if err := reconcileHost(toDeleteHost, false); err != nil {
|
||||
if err := reconcileHost(toDeleteHost, false, currentCluster.SystemImages[AplineImage]); err != nil {
|
||||
logrus.Warnf("[reconcile] Couldn't clean up controlplane node [%s]: %v", toDeleteHost.Address, err)
|
||||
continue
|
||||
}
|
||||
@ -95,7 +95,7 @@ func reconcileControl(currentCluster, kubeCluster *Cluster, kubeClient *kubernet
|
||||
cpChanged := hosts.IsHostListChanged(currentCluster.ControlPlaneHosts, kubeCluster.ControlPlaneHosts)
|
||||
if cpChanged {
|
||||
logrus.Infof("[reconcile] Rolling update nginx hosts with new list of control plane hosts")
|
||||
err := services.RollingUpdateNginxProxy(kubeCluster.ControlPlaneHosts, kubeCluster.WorkerHosts)
|
||||
err := services.RollingUpdateNginxProxy(kubeCluster.ControlPlaneHosts, kubeCluster.WorkerHosts, currentCluster.SystemImages[NginxProxyImage])
|
||||
if err != nil {
|
||||
return fmt.Errorf("Failed to rolling update Nginx hosts with new control plane hosts")
|
||||
}
|
||||
@ -103,7 +103,7 @@ func reconcileControl(currentCluster, kubeCluster *Cluster, kubeClient *kubernet
|
||||
return nil
|
||||
}
|
||||
|
||||
func reconcileHost(toDeleteHost *hosts.Host, worker bool) error {
|
||||
func reconcileHost(toDeleteHost *hosts.Host, worker bool, cleanerImage string) error {
|
||||
if err := toDeleteHost.TunnelUp(); err != nil {
|
||||
return fmt.Errorf("Not able to reach the host: %v", err)
|
||||
}
|
||||
@ -111,14 +111,14 @@ func reconcileHost(toDeleteHost *hosts.Host, worker bool) error {
|
||||
if err := services.RemoveWorkerPlane([]*hosts.Host{toDeleteHost}, false); err != nil {
|
||||
return fmt.Errorf("Couldn't remove worker plane: %v", err)
|
||||
}
|
||||
if err := toDeleteHost.CleanUpWorkerHost(services.ControlRole); err != nil {
|
||||
if err := toDeleteHost.CleanUpWorkerHost(services.ControlRole, cleanerImage); err != nil {
|
||||
return fmt.Errorf("Not able to clean the host: %v", err)
|
||||
}
|
||||
} else {
|
||||
if err := services.RemoveControlPlane([]*hosts.Host{toDeleteHost}, false); err != nil {
|
||||
return fmt.Errorf("Couldn't remove control plane: %v", err)
|
||||
}
|
||||
if err := toDeleteHost.CleanUpControlHost(services.WorkerRole); err != nil {
|
||||
if err := toDeleteHost.CleanUpControlHost(services.WorkerRole, cleanerImage); err != nil {
|
||||
return fmt.Errorf("Not able to clean the host: %v", err)
|
||||
}
|
||||
}
|
||||
|
@ -23,21 +23,21 @@ func (c *Cluster) ClusterRemove() error {
|
||||
}
|
||||
|
||||
// Clean up all hosts
|
||||
if err := cleanUpHosts(c.ControlPlaneHosts, c.WorkerHosts, c.EtcdHosts); err != nil {
|
||||
if err := cleanUpHosts(c.ControlPlaneHosts, c.WorkerHosts, c.EtcdHosts, c.SystemImages[AplineImage]); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return pki.RemoveAdminConfig(c.LocalKubeConfigPath)
|
||||
}
|
||||
|
||||
func cleanUpHosts(cpHosts, workerHosts, etcdHosts []*hosts.Host) error {
|
||||
func cleanUpHosts(cpHosts, workerHosts, etcdHosts []*hosts.Host, cleanerImage string) error {
|
||||
allHosts := []*hosts.Host{}
|
||||
allHosts = append(allHosts, cpHosts...)
|
||||
allHosts = append(allHosts, workerHosts...)
|
||||
allHosts = append(allHosts, etcdHosts...)
|
||||
|
||||
for _, host := range allHosts {
|
||||
if err := host.CleanUpAll(); err != nil {
|
||||
if err := host.CleanUpAll(cleanerImage); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
@ -16,6 +16,11 @@ import (
|
||||
"gopkg.in/yaml.v2"
|
||||
)
|
||||
|
||||
const (
|
||||
comments = `# If you intened to deploy Kubernetes in an air-gapped envrionment,
|
||||
# please consult the documentation on how to configure custom RKE images.`
|
||||
)
|
||||
|
||||
func ConfigCommand() cli.Command {
|
||||
return cli.Command{
|
||||
Name: "config",
|
||||
@ -67,12 +72,12 @@ func writeConfig(cluster *v3.RancherKubernetesEngineConfig, configFile string, p
|
||||
}
|
||||
logrus.Debugf("Deploying cluster configuration file: %s", configFile)
|
||||
|
||||
configString := fmt.Sprintf("%s\n%s", comments, string(yamlConfig))
|
||||
if print {
|
||||
fmt.Printf("Configuration File: \n%s", string(yamlConfig))
|
||||
fmt.Printf("Configuration File: \n%s", configString)
|
||||
return nil
|
||||
}
|
||||
return ioutil.WriteFile(configFile, yamlConfig, 0640)
|
||||
|
||||
return ioutil.WriteFile(configFile, []byte(configString), 0640)
|
||||
}
|
||||
|
||||
func clusterConfig(ctx *cli.Context) error {
|
||||
|
@ -27,11 +27,10 @@ const (
|
||||
ToCleanCNIBin = "/opt/cni"
|
||||
ToCleanCalicoRun = "/var/run/calico"
|
||||
CleanerContainerName = "kube-cleaner"
|
||||
CleanerImage = "alpine:latest"
|
||||
)
|
||||
|
||||
func (h *Host) CleanUpAll() error {
|
||||
// the only supported removal for etcd dir is in rke remove
|
||||
func (h *Host) CleanUpAll(cleanerImage string) error {
|
||||
logrus.Infof("[hosts] Cleaning up host [%s]", h.Address)
|
||||
toCleanPaths := []string{
|
||||
ToCleanEtcdDir,
|
||||
ToCleanSSLDir,
|
||||
@ -39,10 +38,10 @@ func (h *Host) CleanUpAll() error {
|
||||
ToCleanCNIBin,
|
||||
ToCleanCalicoRun,
|
||||
}
|
||||
return h.CleanUp(toCleanPaths)
|
||||
return h.CleanUp(toCleanPaths, cleanerImage)
|
||||
}
|
||||
|
||||
func (h *Host) CleanUpWorkerHost(controlRole string) error {
|
||||
func (h *Host) CleanUpWorkerHost(controlRole, cleanerImage string) error {
|
||||
if h.IsControl {
|
||||
logrus.Infof("[hosts] Host [%s] is already a controlplane host, skipping cleanup.", h.Address)
|
||||
return nil
|
||||
@ -53,10 +52,10 @@ func (h *Host) CleanUpWorkerHost(controlRole string) error {
|
||||
ToCleanCNIBin,
|
||||
ToCleanCalicoRun,
|
||||
}
|
||||
return h.CleanUp(toCleanPaths)
|
||||
return h.CleanUp(toCleanPaths, cleanerImage)
|
||||
}
|
||||
|
||||
func (h *Host) CleanUpControlHost(workerRole string) error {
|
||||
func (h *Host) CleanUpControlHost(workerRole, cleanerImage string) error {
|
||||
if h.IsWorker {
|
||||
logrus.Infof("[hosts] Host [%s] is already a worker host, skipping cleanup.", h.Address)
|
||||
return nil
|
||||
@ -67,12 +66,12 @@ func (h *Host) CleanUpControlHost(workerRole string) error {
|
||||
ToCleanCNIBin,
|
||||
ToCleanCalicoRun,
|
||||
}
|
||||
return h.CleanUp(toCleanPaths)
|
||||
return h.CleanUp(toCleanPaths, cleanerImage)
|
||||
}
|
||||
|
||||
func (h *Host) CleanUp(toCleanPaths []string) error {
|
||||
func (h *Host) CleanUp(toCleanPaths []string, cleanerImage string) error {
|
||||
logrus.Infof("[hosts] Cleaning up host [%s]", h.Address)
|
||||
imageCfg, hostCfg := buildCleanerConfig(h, toCleanPaths)
|
||||
imageCfg, hostCfg := buildCleanerConfig(h, toCleanPaths, cleanerImage)
|
||||
logrus.Infof("[hosts] Running cleaner container on host [%s]", h.Address)
|
||||
if err := docker.DoRunContainer(h.DClient, imageCfg, hostCfg, CleanerContainerName, h.Address, CleanerContainerName); err != nil {
|
||||
return err
|
||||
@ -160,10 +159,10 @@ func IsHostListChanged(currentHosts, configHosts []*Host) bool {
|
||||
return changed
|
||||
}
|
||||
|
||||
func buildCleanerConfig(host *Host, toCleanDirs []string) (*container.Config, *container.HostConfig) {
|
||||
func buildCleanerConfig(host *Host, toCleanDirs []string, cleanerImage string) (*container.Config, *container.HostConfig) {
|
||||
cmd := append([]string{"rm", "-rf"}, toCleanDirs...)
|
||||
imageCfg := &container.Config{
|
||||
Image: CleanerImage,
|
||||
Image: cleanerImage,
|
||||
Cmd: cmd,
|
||||
}
|
||||
bindMounts := []string{}
|
||||
|
@ -17,7 +17,7 @@ metadata:
|
||||
namespace: kube-system
|
||||
data:
|
||||
# Configure this with the location of your etcd cluster.
|
||||
etcd_endpoints: "` + calicoConfig["etcdEndpoints"] + `"
|
||||
etcd_endpoints: "` + calicoConfig[EtcdEndpoints] + `"
|
||||
|
||||
# Configure the Calico backend to use.
|
||||
calico_backend: "bird"
|
||||
@ -30,7 +30,7 @@ data:
|
||||
"plugins": [
|
||||
{
|
||||
"type": "calico",
|
||||
"etcd_endpoints": "` + calicoConfig["etcdEndpoints"] + `",
|
||||
"etcd_endpoints": "` + calicoConfig[EtcdEndpoints] + `",
|
||||
"etcd_key_file": "",
|
||||
"etcd_cert_file": "",
|
||||
"etcd_ca_cert_file": "",
|
||||
@ -41,13 +41,13 @@ data:
|
||||
},
|
||||
"policy": {
|
||||
"type": "k8s",
|
||||
"k8s_api_root": "` + calicoConfig["apiRoot"] + `",
|
||||
"k8s_client_certificate": "` + calicoConfig["clientCrt"] + `",
|
||||
"k8s_client_key": "` + calicoConfig["clientKey"] + `",
|
||||
"k8s_certificate_authority": "` + calicoConfig["clientCA"] + `"
|
||||
"k8s_api_root": "` + calicoConfig[APIRoot] + `",
|
||||
"k8s_client_certificate": "` + calicoConfig[ClientCert] + `",
|
||||
"k8s_client_key": "` + calicoConfig[ClientKey] + `",
|
||||
"k8s_certificate_authority": "` + calicoConfig[ClientCA] + `"
|
||||
},
|
||||
"kubernetes": {
|
||||
"kubeconfig": "` + calicoConfig["kubeCfg"] + `"
|
||||
"kubeconfig": "` + calicoConfig[KubeCfg] + `"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -124,7 +124,7 @@ spec:
|
||||
# container programs network policy and routes on each
|
||||
# host.
|
||||
- name: calico-node
|
||||
image: quay.io/calico/node:v2.6.2
|
||||
image: ` + calicoConfig[NodeImage] + `
|
||||
env:
|
||||
# The location of the Calico etcd cluster.
|
||||
- name: ETCD_ENDPOINTS
|
||||
@ -149,7 +149,7 @@ spec:
|
||||
value: "ACCEPT"
|
||||
# Configure the IP Pool from which Pod IPs will be chosen.
|
||||
- name: CALICO_IPV4POOL_CIDR
|
||||
value: "` + calicoConfig["clusterCIDR"] + `"
|
||||
value: "` + calicoConfig[ClusterCIDR] + `"
|
||||
- name: CALICO_IPV4POOL_IPIP
|
||||
value: "Always"
|
||||
# Disable IPv6 on Kubernetes.
|
||||
@ -215,7 +215,7 @@ spec:
|
||||
# This container installs the Calico CNI binaries
|
||||
# and CNI network config file on each node.
|
||||
- name: install-cni
|
||||
image: quay.io/calico/cni:v1.11.0
|
||||
image: ` + calicoConfig[CNIImage] + `
|
||||
command: ["/install-cni.sh"]
|
||||
env:
|
||||
# Name of the CNI config file to create.
|
||||
@ -304,7 +304,7 @@ spec:
|
||||
operator: "Exists"
|
||||
containers:
|
||||
- name: calico-kube-controllers
|
||||
image: quay.io/calico/kube-controllers:v1.0.0
|
||||
image: ` + calicoConfig[ControllersImage] + `
|
||||
env:
|
||||
# The location of the Calico etcd cluster.
|
||||
- name: ETCD_ENDPOINTS
|
||||
@ -371,7 +371,7 @@ spec:
|
||||
serviceAccountName: calico-kube-controllers
|
||||
containers:
|
||||
- name: calico-policy-controller
|
||||
image: quay.io/calico/kube-controllers:v1.0.0
|
||||
image: ` + calicoConfig[ControllersImage] + `
|
||||
env:
|
||||
# The location of the Calico etcd cluster.
|
||||
- name: ETCD_ENDPOINTS
|
||||
|
@ -34,13 +34,13 @@ data:
|
||||
},
|
||||
"policy": {
|
||||
"type": "k8s",
|
||||
"k8s_api_root": "` + canalConfig["apiRoot"] + `",
|
||||
"k8s_client_certificate": "` + canalConfig["clientCrt"] + `",
|
||||
"k8s_client_key": "` + canalConfig["clientKey"] + `",
|
||||
"k8s_certificate_authority": "` + canalConfig["clientCA"] + `"
|
||||
"k8s_api_root": "` + canalConfig[APIRoot] + `",
|
||||
"k8s_client_certificate": "` + canalConfig[ClientCert] + `",
|
||||
"k8s_client_key": "` + canalConfig[ClientKey] + `",
|
||||
"k8s_certificate_authority": "` + canalConfig[ClientCA] + `"
|
||||
},
|
||||
"kubernetes": {
|
||||
"kubeconfig": "` + canalConfig["kubeCfg"] + `"
|
||||
"kubeconfig": "` + canalConfig[KubeCfg] + `"
|
||||
}
|
||||
},
|
||||
{
|
||||
@ -54,7 +54,7 @@ data:
|
||||
# Flannel network configuration. Mounted into the flannel container.
|
||||
net-conf.json: |
|
||||
{
|
||||
"Network": "` + canalConfig["clusterCIDR"] + `",
|
||||
"Network": "` + canalConfig[ClusterCIDR] + `",
|
||||
"Backend": {
|
||||
"Type": "vxlan"
|
||||
}
|
||||
@ -106,7 +106,7 @@ spec:
|
||||
# container programs network policy and routes on each
|
||||
# host.
|
||||
- name: calico-node
|
||||
image: quay.io/calico/node:v2.6.2
|
||||
image: ` + canalConfig[NodeImage] + `
|
||||
env:
|
||||
# Use Kubernetes API as the backing datastore.
|
||||
- name: DATASTORE_TYPE
|
||||
@ -173,7 +173,7 @@ spec:
|
||||
# This container installs the Calico CNI binaries
|
||||
# and CNI network config file on each node.
|
||||
- name: install-cni
|
||||
image: quay.io/calico/cni:v1.11.0
|
||||
image: ` + canalConfig[CNIImage] + `
|
||||
command: ["/install-cni.sh"]
|
||||
env:
|
||||
- name: CNI_CONF_NAME
|
||||
@ -198,7 +198,7 @@ spec:
|
||||
# This container runs flannel using the kube-subnet-mgr backend
|
||||
# for allocating subnets.
|
||||
- name: kube-flannel
|
||||
image: quay.io/coreos/flannel:v0.9.1
|
||||
image: ` + canalConfig[FlannelImage] + `
|
||||
command: [ "/opt/bin/flanneld", "--ip-masq", "--kube-subnet-mgr" ]
|
||||
securityContext:
|
||||
privileged: true
|
||||
|
@ -1,6 +1,6 @@
|
||||
package network
|
||||
|
||||
func GetFlannelManifest(clusterCIDR string) string {
|
||||
func GetFlannelManifest(clusterCIDR, image, cniImage string) string {
|
||||
return `
|
||||
---
|
||||
kind: ConfigMap
|
||||
@ -57,7 +57,7 @@ spec:
|
||||
spec:
|
||||
containers:
|
||||
- name: kube-flannel
|
||||
image: quay.io/coreos/flannel:v0.8.0
|
||||
image: ` + image + `
|
||||
imagePullPolicy: IfNotPresent
|
||||
resources:
|
||||
limits:
|
||||
@ -86,7 +86,7 @@ spec:
|
||||
- name: flannel-cfg
|
||||
mountPath: /etc/kube-flannel/
|
||||
- name: install-cni
|
||||
image: quay.io/coreos/flannel-cni:v0.2.0
|
||||
image: ` + cniImage + `
|
||||
command: ["/install-cni.sh"]
|
||||
env:
|
||||
# The CNI network config to install on each node.
|
||||
|
16
network/network.go
Normal file
16
network/network.go
Normal file
@ -0,0 +1,16 @@
|
||||
package network
|
||||
|
||||
const (
|
||||
EtcdEndpoints = "etcdEndpoints"
|
||||
APIRoot = "apiRoot"
|
||||
ClientCert = "clientCert"
|
||||
ClientKey = "clientKey"
|
||||
ClientCA = "clientCA"
|
||||
KubeCfg = "kubeCfg"
|
||||
ClusterCIDR = "clusterCIDR"
|
||||
CNIImage = "cniImage"
|
||||
NodeImage = "nodeImage"
|
||||
ControllersImage = "controllersImage"
|
||||
FlannelImage = "flannelImage"
|
||||
FlannelCNIImage = "flannelCNIImage"
|
||||
)
|
@ -1,7 +1,6 @@
|
||||
package pki
|
||||
|
||||
const (
|
||||
CrtDownloaderImage = "rancher/rke-cert-deployer:0.1.0"
|
||||
CrtDownloaderContainer = "cert-deployer"
|
||||
CertificatesSecretName = "k8s-certs"
|
||||
|
||||
|
@ -14,7 +14,7 @@ import (
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
func DeployCertificatesOnMasters(cpHosts []*hosts.Host, crtMap map[string]CertificatePKI) error {
|
||||
func DeployCertificatesOnMasters(cpHosts []*hosts.Host, crtMap map[string]CertificatePKI, certDownloaderImage string) error {
|
||||
// list of certificates that should be deployed on the masters
|
||||
crtList := []string{
|
||||
CACertName,
|
||||
@ -31,7 +31,7 @@ func DeployCertificatesOnMasters(cpHosts []*hosts.Host, crtMap map[string]Certif
|
||||
}
|
||||
|
||||
for i := range cpHosts {
|
||||
err := doRunDeployer(cpHosts[i], env)
|
||||
err := doRunDeployer(cpHosts[i], env, certDownloaderImage)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -39,7 +39,7 @@ func DeployCertificatesOnMasters(cpHosts []*hosts.Host, crtMap map[string]Certif
|
||||
return nil
|
||||
}
|
||||
|
||||
func DeployCertificatesOnWorkers(workerHosts []*hosts.Host, crtMap map[string]CertificatePKI) error {
|
||||
func DeployCertificatesOnWorkers(workerHosts []*hosts.Host, crtMap map[string]CertificatePKI, certDownloaderImage string) error {
|
||||
// list of certificates that should be deployed on the workers
|
||||
crtList := []string{
|
||||
CACertName,
|
||||
@ -53,7 +53,7 @@ func DeployCertificatesOnWorkers(workerHosts []*hosts.Host, crtMap map[string]Ce
|
||||
}
|
||||
|
||||
for i := range workerHosts {
|
||||
err := doRunDeployer(workerHosts[i], env)
|
||||
err := doRunDeployer(workerHosts[i], env, certDownloaderImage)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -61,14 +61,13 @@ func DeployCertificatesOnWorkers(workerHosts []*hosts.Host, crtMap map[string]Ce
|
||||
return nil
|
||||
}
|
||||
|
||||
func doRunDeployer(host *hosts.Host, containerEnv []string) error {
|
||||
func doRunDeployer(host *hosts.Host, containerEnv []string, certDownloaderImage string) error {
|
||||
logrus.Debugf("[certificates] Pulling Certificate downloader Image on host [%s]", host.Address)
|
||||
err := docker.PullImage(host.DClient, host.Address, CrtDownloaderImage)
|
||||
if err != nil {
|
||||
if err := docker.PullImage(host.DClient, host.Address, certDownloaderImage); err != nil {
|
||||
return err
|
||||
}
|
||||
imageCfg := &container.Config{
|
||||
Image: CrtDownloaderImage,
|
||||
Image: certDownloaderImage,
|
||||
Env: containerEnv,
|
||||
}
|
||||
hostCfg := &container.HostConfig{
|
||||
|
@ -13,10 +13,10 @@ const (
|
||||
NginxProxyEnvName = "CP_HOSTS"
|
||||
)
|
||||
|
||||
func RollingUpdateNginxProxy(cpHosts []*hosts.Host, workerHosts []*hosts.Host) error {
|
||||
func RollingUpdateNginxProxy(cpHosts []*hosts.Host, workerHosts []*hosts.Host, nginxProxyImage string) error {
|
||||
nginxProxyEnv := buildProxyEnv(cpHosts)
|
||||
for _, host := range workerHosts {
|
||||
imageCfg, hostCfg := buildNginxProxyConfig(host, nginxProxyEnv)
|
||||
imageCfg, hostCfg := buildNginxProxyConfig(host, nginxProxyEnv, nginxProxyImage)
|
||||
if err := docker.DoRollingUpdateContainer(host.DClient, imageCfg, hostCfg, NginxProxyContainerName, host.Address, WorkerRole); err != nil {
|
||||
return err
|
||||
}
|
||||
@ -24,9 +24,9 @@ func RollingUpdateNginxProxy(cpHosts []*hosts.Host, workerHosts []*hosts.Host) e
|
||||
return nil
|
||||
}
|
||||
|
||||
func runNginxProxy(host *hosts.Host, cpHosts []*hosts.Host) error {
|
||||
func runNginxProxy(host *hosts.Host, cpHosts []*hosts.Host, nginxProxyImage string) error {
|
||||
nginxProxyEnv := buildProxyEnv(cpHosts)
|
||||
imageCfg, hostCfg := buildNginxProxyConfig(host, nginxProxyEnv)
|
||||
imageCfg, hostCfg := buildNginxProxyConfig(host, nginxProxyEnv, nginxProxyImage)
|
||||
return docker.DoRunContainer(host.DClient, imageCfg, hostCfg, NginxProxyContainerName, host.Address, WorkerRole)
|
||||
}
|
||||
|
||||
@ -34,9 +34,9 @@ func removeNginxProxy(host *hosts.Host) error {
|
||||
return docker.DoRemoveContainer(host.DClient, NginxProxyContainerName, host.Address)
|
||||
}
|
||||
|
||||
func buildNginxProxyConfig(host *hosts.Host, nginxProxyEnv string) (*container.Config, *container.HostConfig) {
|
||||
func buildNginxProxyConfig(host *hosts.Host, nginxProxyEnv, nginxProxyImage string) (*container.Config, *container.HostConfig) {
|
||||
imageCfg := &container.Config{
|
||||
Image: NginxProxyImage,
|
||||
Image: nginxProxyImage,
|
||||
Env: []string{fmt.Sprintf("%s=%s", NginxProxyEnvName, nginxProxyEnv)},
|
||||
}
|
||||
hostCfg := &container.HostConfig{
|
||||
|
@ -6,7 +6,7 @@ import (
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
func RunWorkerPlane(controlHosts []*hosts.Host, workerHosts []*hosts.Host, workerServices v3.RKEConfigServices) error {
|
||||
func RunWorkerPlane(controlHosts []*hosts.Host, workerHosts []*hosts.Host, workerServices v3.RKEConfigServices, nginxProxyImage string) error {
|
||||
logrus.Infof("[%s] Building up Worker Plane..", WorkerRole)
|
||||
for _, host := range controlHosts {
|
||||
// only one master for now
|
||||
@ -20,7 +20,7 @@ func RunWorkerPlane(controlHosts []*hosts.Host, workerHosts []*hosts.Host, worke
|
||||
for _, host := range workerHosts {
|
||||
// run nginx proxy
|
||||
if !host.IsControl {
|
||||
if err := runNginxProxy(host, controlHosts); err != nil {
|
||||
if err := runNginxProxy(host, controlHosts, nginxProxyImage); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user