diff --git a/cluster/cluster.go b/cluster/cluster.go index 76f5e320..8be4a1ef 100644 --- a/cluster/cluster.go +++ b/cluster/cluster.go @@ -4,16 +4,16 @@ import ( "fmt" "net" "path/filepath" + "strings" "github.com/rancher/rke/hosts" - "github.com/rancher/rke/k8s" "github.com/rancher/rke/pki" "github.com/rancher/rke/services" "github.com/rancher/types/apis/cluster.cattle.io/v1" "github.com/sirupsen/logrus" - "golang.org/x/crypto/ssh" yaml "gopkg.in/yaml.v2" "k8s.io/client-go/kubernetes" + "k8s.io/client-go/tools/clientcmd" "k8s.io/client-go/util/cert" ) @@ -21,9 +21,9 @@ type Cluster struct { v1.RancherKubernetesEngineConfig `yaml:",inline"` ConfigPath string `yaml:"config_path"` LocalKubeConfigPath string - EtcdHosts []hosts.Host - WorkerHosts []hosts.Host - ControlPlaneHosts []hosts.Host + EtcdHosts []*hosts.Host + WorkerHosts []*hosts.Host + ControlPlaneHosts []*hosts.Host KubeClient *kubernetes.Clientset KubernetesServiceIP net.IP Certificates map[string]pki.CertificatePKI @@ -152,92 +152,49 @@ func GetLocalKubeConfig(configPath string) string { return fmt.Sprintf("%s%s%s", baseDir, pki.KubeAdminConfigPrefix, fileName) } -func ReconcileCluster(kubeCluster, currentCluster *Cluster) error { - logrus.Infof("[reconcile] Reconciling cluster state") - if currentCluster == nil { - logrus.Infof("[reconcile] This is newly generated cluster") - return nil - } - if err := rebuildLocalAdminConfig(kubeCluster); err != nil { - return err - } - kubeClient, err := k8s.NewClient(kubeCluster.LocalKubeConfigPath) - if err != nil { - return fmt.Errorf("Failed to initialize new kubernetes client: %v", err) - } - key, _ := checkEncryptedKey(kubeCluster.SSHKeyPath) - - logrus.Infof("[reconcile] Check Control plane hosts to be deleted") - cpToDelete := hosts.GetToDeleteHosts(currentCluster.ControlPlaneHosts, kubeCluster.ControlPlaneHosts) - for _, toDeleteHost := range cpToDelete { - if err := hosts.DeleteNode(&toDeleteHost, kubeClient); err != nil { - return fmt.Errorf("Failed to delete controlplane node %s from cluster", toDeleteHost.Address) - } - // attempting to clean up the host - if err := reconcileHostCleaner(toDeleteHost, key, false); err != nil { - logrus.Warnf("[reconcile] Couldn't clean up controlplane node [%s]: %v", toDeleteHost.Address, err) - continue - } - } - - logrus.Infof("[reconcile] Check worker hosts to be deleted") - wpToDelete := hosts.GetToDeleteHosts(currentCluster.WorkerHosts, kubeCluster.WorkerHosts) - for _, toDeleteHost := range wpToDelete { - if err := hosts.DeleteNode(&toDeleteHost, kubeClient); err != nil { - return fmt.Errorf("Failed to delete worker node %s from cluster", toDeleteHost.Address) - } - // attempting to clean up the host - if err := reconcileHostCleaner(toDeleteHost, key, true); err != nil { - logrus.Warnf("[reconcile] Couldn't clean up worker node [%s]: %v", toDeleteHost.Address, err) - continue - } - } - - // Rolling update on change for nginx Proxy - cpChanged := hosts.IsHostListChanged(currentCluster.ControlPlaneHosts, kubeCluster.ControlPlaneHosts) - if cpChanged { - logrus.Infof("[reconcile] Rolling update nginx hosts with new list of control plane hosts") - err = services.RollingUpdateNginxProxy(kubeCluster.ControlPlaneHosts, kubeCluster.WorkerHosts) - if err != nil { - return fmt.Errorf("Failed to rolling update Nginx hosts with new control plane hosts") - } - } - logrus.Infof("[reconcile] Reconciled cluster state successfully") - return nil -} - -func reconcileHostCleaner(toDeleteHost hosts.Host, key ssh.Signer, worker bool) error { - if err := toDeleteHost.TunnelUp(key); err != nil { - return fmt.Errorf("Not able to reach the host: %v", err) - } - if err := services.RemoveControlPlane([]hosts.Host{toDeleteHost}); err != nil { - return fmt.Errorf("Couldn't remove control plane: %v", err) - } - - if err := services.RemoveWorkerPlane(nil, []hosts.Host{toDeleteHost}); err != nil { - return fmt.Errorf("Couldn't remove worker plane: %v", err) - } - if err := toDeleteHost.CleanUp(); err != nil { - return fmt.Errorf("Not able to clean the host: %v", err) - } - return nil -} - func rebuildLocalAdminConfig(kubeCluster *Cluster) error { logrus.Infof("[reconcile] Rebuilding and update local kube config") + var workingConfig string currentKubeConfig := kubeCluster.Certificates[pki.KubeAdminCommonName] caCrt := kubeCluster.Certificates[pki.CACertName].Certificate - newConfig := pki.GetKubeConfigX509WithData( - "https://"+kubeCluster.ControlPlaneHosts[0].Address+":6443", - pki.KubeAdminCommonName, - string(cert.EncodeCertPEM(caCrt)), - string(cert.EncodeCertPEM(currentKubeConfig.Certificate)), - string(cert.EncodePrivateKeyPEM(currentKubeConfig.Key))) - err := pki.DeployAdminConfig(newConfig, kubeCluster.LocalKubeConfigPath) - if err != nil { - return fmt.Errorf("Failed to redeploy local admin config with new host") + for _, cpHost := range kubeCluster.ControlPlaneHosts { + newConfig := pki.GetKubeConfigX509WithData( + "https://"+cpHost.Address+":6443", + pki.KubeAdminCommonName, + string(cert.EncodeCertPEM(caCrt)), + string(cert.EncodeCertPEM(currentKubeConfig.Certificate)), + string(cert.EncodePrivateKeyPEM(currentKubeConfig.Key))) + + if err := pki.DeployAdminConfig(newConfig, kubeCluster.LocalKubeConfigPath); err != nil { + return fmt.Errorf("Failed to redeploy local admin config with new host") + } + workingConfig = newConfig + if _, err := GetK8sVersion(kubeCluster.LocalKubeConfigPath); err != nil { + logrus.Infof("[reconcile] host [%s] is not active master on the cluster", cpHost.Address) + continue + } else { + break + } } - currentKubeConfig.Config = newConfig + currentKubeConfig.Config = workingConfig kubeCluster.Certificates[pki.KubeAdminCommonName] = currentKubeConfig return nil } + +func isLocalConfigWorking(localKubeConfigPath string) bool { + if _, err := GetK8sVersion(localKubeConfigPath); err != nil { + logrus.Infof("[reconcile] Local config is not vaild, rebuilding admin config") + return false + } + return true +} + +func getLocalConfigAddress(localConfigPath string) (string, error) { + config, err := clientcmd.BuildConfigFromFlags("", localConfigPath) + if err != nil { + return "", err + } + splittedAdress := strings.Split(config.Host, ":") + address := splittedAdress[1] + return address[2:], nil +} diff --git a/cluster/hosts.go b/cluster/hosts.go index 3d1a2c49..5f45c24d 100644 --- a/cluster/hosts.go +++ b/cluster/hosts.go @@ -46,22 +46,24 @@ func (c *Cluster) TunnelHosts() error { } func (c *Cluster) InvertIndexHosts() error { - c.EtcdHosts = make([]hosts.Host, 0) - c.WorkerHosts = make([]hosts.Host, 0) - c.ControlPlaneHosts = make([]hosts.Host, 0) + c.EtcdHosts = make([]*hosts.Host, 0) + c.WorkerHosts = make([]*hosts.Host, 0) + c.ControlPlaneHosts = make([]*hosts.Host, 0) for _, host := range c.Nodes { + newHost := hosts.Host{ + RKEConfigNode: host, + } for _, role := range host.Role { logrus.Debugf("Host: " + host.Address + " has role: " + role) - newHost := hosts.Host{ - RKEConfigNode: host, - } switch role { case services.ETCDRole: - c.EtcdHosts = append(c.EtcdHosts, newHost) + c.EtcdHosts = append(c.EtcdHosts, &newHost) case services.ControlRole: - c.ControlPlaneHosts = append(c.ControlPlaneHosts, newHost) + newHost.IsControl = true + c.ControlPlaneHosts = append(c.ControlPlaneHosts, &newHost) case services.WorkerRole: - c.WorkerHosts = append(c.WorkerHosts, newHost) + newHost.IsWorker = true + c.WorkerHosts = append(c.WorkerHosts, &newHost) default: return fmt.Errorf("Failed to recognize host [%s] role %s", host.Address, role) } diff --git a/cluster/reconcile.go b/cluster/reconcile.go new file mode 100644 index 00000000..61cf5d82 --- /dev/null +++ b/cluster/reconcile.go @@ -0,0 +1,129 @@ +package cluster + +import ( + "fmt" + + "github.com/rancher/rke/hosts" + "github.com/rancher/rke/k8s" + "github.com/rancher/rke/services" + "github.com/sirupsen/logrus" + "golang.org/x/crypto/ssh" + "k8s.io/client-go/kubernetes" +) + +func ReconcileCluster(kubeCluster, currentCluster *Cluster) error { + logrus.Infof("[reconcile] Reconciling cluster state") + if currentCluster == nil { + logrus.Infof("[reconcile] This is newly generated cluster") + + return nil + } + // to handle if current local admin is down and we need to use new cp from the list + if !isLocalConfigWorking(kubeCluster.LocalKubeConfigPath) { + if err := rebuildLocalAdminConfig(kubeCluster); err != nil { + return err + } + } + + kubeClient, err := k8s.NewClient(kubeCluster.LocalKubeConfigPath) + if err != nil { + return fmt.Errorf("Failed to initialize new kubernetes client: %v", err) + } + + key, _ := checkEncryptedKey(kubeCluster.SSHKeyPath) + + if err := reconcileWorker(currentCluster, kubeCluster, key, kubeClient); err != nil { + return err + } + + if err := reconcileControl(currentCluster, kubeCluster, key, kubeClient); err != nil { + return err + } + logrus.Infof("[reconcile] Reconciled cluster state successfully") + return nil +} + +func reconcileWorker(currentCluster, kubeCluster *Cluster, key ssh.Signer, kubeClient *kubernetes.Clientset) error { + // worker deleted first to avoid issues when worker+controller on same host + logrus.Debugf("[reconcile] Check worker hosts to be deleted") + wpToDelete := hosts.GetToDeleteHosts(currentCluster.WorkerHosts, kubeCluster.WorkerHosts) + for _, toDeleteHost := range wpToDelete { + toDeleteHost.IsWorker = false + if err := hosts.DeleteNode(toDeleteHost, kubeClient, toDeleteHost.IsControl); err != nil { + return fmt.Errorf("Failed to delete worker node %s from cluster", toDeleteHost.Address) + } + // attempting to clean services/files on the host + if err := reconcileHost(toDeleteHost, key, true); err != nil { + logrus.Warnf("[reconcile] Couldn't clean up worker node [%s]: %v", toDeleteHost.Address, err) + continue + } + } + return nil +} + +func reconcileControl(currentCluster, kubeCluster *Cluster, key ssh.Signer, kubeClient *kubernetes.Clientset) error { + logrus.Debugf("[reconcile] Check Control plane hosts to be deleted") + selfDeleteAddress, err := getLocalConfigAddress(kubeCluster.LocalKubeConfigPath) + if err != nil { + return err + } + cpToDelete := hosts.GetToDeleteHosts(currentCluster.ControlPlaneHosts, kubeCluster.ControlPlaneHosts) + // move the current host in local kubeconfig to the end of the list + for i, toDeleteHost := range cpToDelete { + if toDeleteHost.Address == selfDeleteAddress { + cpToDelete = append(cpToDelete[:i], cpToDelete[i+1:]...) + cpToDelete = append(cpToDelete, toDeleteHost) + } + } + + for _, toDeleteHost := range cpToDelete { + kubeClient, err := k8s.NewClient(kubeCluster.LocalKubeConfigPath) + if err != nil { + return fmt.Errorf("Failed to initialize new kubernetes client: %v", err) + } + if err := hosts.DeleteNode(toDeleteHost, kubeClient, toDeleteHost.IsWorker); err != nil { + return fmt.Errorf("Failed to delete controlplane node %s from cluster", toDeleteHost.Address) + } + // attempting to clean services/files on the host + if err := reconcileHost(toDeleteHost, key, false); err != nil { + logrus.Warnf("[reconcile] Couldn't clean up controlplane node [%s]: %v", toDeleteHost.Address, err) + continue + } + } + // rebuilding local admin config to enable saving cluster state + if err := rebuildLocalAdminConfig(kubeCluster); err != nil { + return err + } + // Rolling update on change for nginx Proxy + cpChanged := hosts.IsHostListChanged(currentCluster.ControlPlaneHosts, kubeCluster.ControlPlaneHosts) + if cpChanged { + logrus.Infof("[reconcile] Rolling update nginx hosts with new list of control plane hosts") + err := services.RollingUpdateNginxProxy(kubeCluster.ControlPlaneHosts, kubeCluster.WorkerHosts) + if err != nil { + return fmt.Errorf("Failed to rolling update Nginx hosts with new control plane hosts") + } + } + return nil +} + +func reconcileHost(toDeleteHost *hosts.Host, key ssh.Signer, worker bool) error { + if err := toDeleteHost.TunnelUp(key); err != nil { + return fmt.Errorf("Not able to reach the host: %v", err) + } + if worker { + if err := services.RemoveWorkerPlane([]*hosts.Host{toDeleteHost}, false); err != nil { + return fmt.Errorf("Couldn't remove worker plane: %v", err) + } + if err := toDeleteHost.CleanUpWorkerHost(services.ControlRole); err != nil { + return fmt.Errorf("Not able to clean the host: %v", err) + } + } else { + if err := services.RemoveControlPlane([]*hosts.Host{toDeleteHost}, false); err != nil { + return fmt.Errorf("Couldn't remove control plane: %v", err) + } + if err := toDeleteHost.CleanUpControlHost(services.WorkerRole); err != nil { + return fmt.Errorf("Not able to clean the host: %v", err) + } + } + return nil +} diff --git a/cluster/remove.go b/cluster/remove.go index f3a79a43..693e0a07 100644 --- a/cluster/remove.go +++ b/cluster/remove.go @@ -8,12 +8,12 @@ import ( func (c *Cluster) ClusterRemove() error { // Remove Worker Plane - if err := services.RemoveWorkerPlane(c.ControlPlaneHosts, c.WorkerHosts); err != nil { + if err := services.RemoveWorkerPlane(c.WorkerHosts, true); err != nil { return err } // Remove Contol Plane - if err := services.RemoveControlPlane(c.ControlPlaneHosts); err != nil { + if err := services.RemoveControlPlane(c.ControlPlaneHosts, true); err != nil { return err } @@ -30,14 +30,14 @@ func (c *Cluster) ClusterRemove() error { return pki.RemoveAdminConfig(c.LocalKubeConfigPath) } -func cleanUpHosts(cpHosts, workerHosts, etcdHosts []hosts.Host) error { - allHosts := []hosts.Host{} +func cleanUpHosts(cpHosts, workerHosts, etcdHosts []*hosts.Host) error { + allHosts := []*hosts.Host{} allHosts = append(allHosts, cpHosts...) allHosts = append(allHosts, workerHosts...) allHosts = append(allHosts, etcdHosts...) for _, host := range allHosts { - if err := host.CleanUp(); err != nil { + if err := host.CleanUpAll(); err != nil { return err } } diff --git a/cluster/state.go b/cluster/state.go index 69680b85..94b44c81 100644 --- a/cluster/state.go +++ b/cluster/state.go @@ -123,7 +123,7 @@ func getStateFromKubernetes(kubeClient *kubernetes.Clientset, kubeConfigPath str } func GetK8sVersion(localConfigPath string) (string, error) { - logrus.Debugf("[version] Using admin.config to connect to Kubernetes cluster..") + logrus.Debugf("[version] Using %s to connect to Kubernetes cluster..", localConfigPath) k8sClient, err := k8s.NewClient(localConfigPath) if err != nil { return "", fmt.Errorf("Failed to create Kubernetes Client: %v", err) diff --git a/cmd/up.go b/cmd/up.go index 61489b26..61579617 100644 --- a/cmd/up.go +++ b/cmd/up.go @@ -54,6 +54,11 @@ func ClusterUp(clusterFile string) (string, string, string, string, error) { return APIURL, caCrt, clientCert, clientKey, err } + err = cluster.ReconcileCluster(kubeCluster, currentCluster) + if err != nil { + return APIURL, caCrt, clientCert, clientKey, err + } + err = kubeCluster.SetUpHosts() if err != nil { return APIURL, caCrt, clientCert, clientKey, err @@ -64,11 +69,6 @@ func ClusterUp(clusterFile string) (string, string, string, string, error) { return APIURL, caCrt, clientCert, clientKey, err } - err = cluster.ReconcileCluster(kubeCluster, currentCluster) - if err != nil { - return APIURL, caCrt, clientCert, clientKey, err - } - err = kubeCluster.SaveClusterState(clusterFile) if err != nil { return APIURL, caCrt, clientCert, clientKey, err diff --git a/docker/docker.go b/docker/docker.go index 486f9e3f..f6288350 100644 --- a/docker/docker.go +++ b/docker/docker.go @@ -6,6 +6,7 @@ import ( "io" "io/ioutil" "os" + "reflect" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/container" @@ -19,7 +20,7 @@ func DoRunContainer(dClient *client.Client, imageCfg *container.Config, hostCfg return err } if isRunning { - logrus.Infof("[%s] Container %s is already running on host [%s]", plane, containerName, hostname) + logrus.Infof("[%s] Container [%s] is already running on host [%s]", plane, containerName, hostname) isUpgradable, err := IsContainerUpgradable(dClient, imageCfg, containerName, hostname, plane) if err != nil { return err @@ -35,21 +36,21 @@ func DoRunContainer(dClient *client.Client, imageCfg *container.Config, hostCfg if err != nil { return err } - logrus.Infof("[%s] Successfully pulled %s image on host [%s]", plane, containerName, hostname) + logrus.Infof("[%s] Successfully pulled [%s] image on host [%s]", plane, containerName, hostname) resp, err := dClient.ContainerCreate(context.Background(), imageCfg, hostCfg, nil, containerName) if err != nil { - return fmt.Errorf("Failed to create %s container on host [%s]: %v", containerName, hostname, err) + return fmt.Errorf("Failed to create [%s] container on host [%s]: %v", containerName, hostname, err) } if err := dClient.ContainerStart(context.Background(), resp.ID, types.ContainerStartOptions{}); err != nil { - return fmt.Errorf("Failed to start %s container on host [%s]: %v", containerName, hostname, err) + return fmt.Errorf("Failed to start [%s] container on host [%s]: %v", containerName, hostname, err) } - logrus.Debugf("[%s] Successfully started %s container: %s", plane, containerName, resp.ID) - logrus.Infof("[%s] Successfully started %s container on host [%s]", plane, containerName, hostname) + logrus.Debugf("[%s] Successfully started [%s] container: [%s]", plane, containerName, resp.ID) + logrus.Infof("[%s] Successfully started [%s] container on host [%s]", plane, containerName, hostname) return nil } func DoRollingUpdateContainer(dClient *client.Client, imageCfg *container.Config, hostCfg *container.HostConfig, containerName, hostname, plane string) error { - logrus.Debugf("[%s] Checking for deployed %s", plane, containerName) + logrus.Debugf("[%s] Checking for deployed [%s]", plane, containerName) isRunning, err := IsContainerRunning(dClient, hostname, containerName) if err != nil { return err @@ -63,7 +64,7 @@ func DoRollingUpdateContainer(dClient *client.Client, imageCfg *container.Config if err != nil { return err } - logrus.Infof("[%s] Successfully pulled %s image on host [%s]", plane, containerName, hostname) + logrus.Infof("[%s] Successfully pulled [%s] image on host [%s]", plane, containerName, hostname) logrus.Debugf("[%s] Stopping old container", plane) oldContainerName := "old-" + containerName if err := StopRenameContainer(dClient, hostname, containerName, oldContainerName); err != nil { @@ -72,12 +73,12 @@ func DoRollingUpdateContainer(dClient *client.Client, imageCfg *container.Config logrus.Infof("[%s] Successfully stopped old container %s on host [%s]", plane, containerName, hostname) _, err = CreateContiner(dClient, hostname, containerName, imageCfg, hostCfg) if err != nil { - return fmt.Errorf("Failed to create %s container on host [%s]: %v", containerName, hostname, err) + return fmt.Errorf("Failed to create [%s] container on host [%s]: %v", containerName, hostname, err) } if err := StartContainer(dClient, hostname, containerName); err != nil { - return fmt.Errorf("Failed to start %s container on host [%s]: %v", containerName, hostname, err) + return fmt.Errorf("Failed to start [%s] container on host [%s]: %v", containerName, hostname, err) } - logrus.Infof("[%s] Successfully updated %s container on host [%s]", plane, containerName, hostname) + logrus.Infof("[%s] Successfully updated [%s] container on host [%s]", plane, containerName, hostname) logrus.Debugf("[%s] Removing old container", plane) err = RemoveContainer(dClient, hostname, oldContainerName) return err @@ -110,7 +111,7 @@ func DoRemoveContainer(dClient *client.Client, containerName, hostname string) e } func IsContainerRunning(dClient *client.Client, hostname string, containerName string) (bool, error) { - logrus.Debugf("Checking if container %s is running on host [%s]", containerName, hostname) + logrus.Debugf("Checking if container [%s] is running on host [%s]", containerName, hostname) containers, err := dClient.ContainerList(context.Background(), types.ContainerListOptions{}) if err != nil { return false, fmt.Errorf("Can't get Docker containers for host [%s]: %v", hostname, err) @@ -127,7 +128,7 @@ func IsContainerRunning(dClient *client.Client, hostname string, containerName s func PullImage(dClient *client.Client, hostname string, containerImage string) error { out, err := dClient.ImagePull(context.Background(), containerImage, types.ImagePullOptions{}) if err != nil { - return fmt.Errorf("Can't pull Docker image %s for host [%s]: %v", containerImage, hostname, err) + return fmt.Errorf("Can't pull Docker image [%s] for host [%s]: %v", containerImage, hostname, err) } defer out.Close() if logrus.GetLevel() == logrus.DebugLevel { @@ -142,7 +143,7 @@ func PullImage(dClient *client.Client, hostname string, containerImage string) e func RemoveContainer(dClient *client.Client, hostname string, containerName string) error { err := dClient.ContainerRemove(context.Background(), containerName, types.ContainerRemoveOptions{}) if err != nil { - return fmt.Errorf("Can't remove Docker container %s for host [%s]: %v", containerName, hostname, err) + return fmt.Errorf("Can't remove Docker container [%s] for host [%s]: %v", containerName, hostname, err) } return nil } @@ -150,7 +151,7 @@ func RemoveContainer(dClient *client.Client, hostname string, containerName stri func StopContainer(dClient *client.Client, hostname string, containerName string) error { err := dClient.ContainerStop(context.Background(), containerName, nil) if err != nil { - return fmt.Errorf("Can't stop Docker container %s for host [%s]: %v", containerName, hostname, err) + return fmt.Errorf("Can't stop Docker container [%s] for host [%s]: %v", containerName, hostname, err) } return nil } @@ -158,14 +159,14 @@ func StopContainer(dClient *client.Client, hostname string, containerName string func RenameContainer(dClient *client.Client, hostname string, oldContainerName string, newContainerName string) error { err := dClient.ContainerRename(context.Background(), oldContainerName, newContainerName) if err != nil { - return fmt.Errorf("Can't rename Docker container %s for host [%s]: %v", oldContainerName, hostname, err) + return fmt.Errorf("Can't rename Docker container [%s] for host [%s]: %v", oldContainerName, hostname, err) } return nil } func StartContainer(dClient *client.Client, hostname string, containerName string) error { if err := dClient.ContainerStart(context.Background(), containerName, types.ContainerStartOptions{}); err != nil { - return fmt.Errorf("Failed to start %s container on host [%s]: %v", containerName, hostname, err) + return fmt.Errorf("Failed to start [%s] container on host [%s]: %v", containerName, hostname, err) } return nil } @@ -173,7 +174,7 @@ func StartContainer(dClient *client.Client, hostname string, containerName strin func CreateContiner(dClient *client.Client, hostname string, containerName string, imageCfg *container.Config, hostCfg *container.HostConfig) (container.ContainerCreateCreatedBody, error) { created, err := dClient.ContainerCreate(context.Background(), imageCfg, hostCfg, nil, containerName) if err != nil { - return container.ContainerCreateCreatedBody{}, fmt.Errorf("Failed to create %s container on host [%s]: %v", containerName, hostname, err) + return container.ContainerCreateCreatedBody{}, fmt.Errorf("Failed to create [%s] container on host [%s]: %v", containerName, hostname, err) } return created, nil } @@ -181,7 +182,7 @@ func CreateContiner(dClient *client.Client, hostname string, containerName strin func InspectContainer(dClient *client.Client, hostname string, containerName string) (types.ContainerJSON, error) { inspection, err := dClient.ContainerInspect(context.Background(), containerName) if err != nil { - return types.ContainerJSON{}, fmt.Errorf("Failed to inspect %s container on host [%s]: %v", containerName, hostname, err) + return types.ContainerJSON{}, fmt.Errorf("Failed to inspect [%s] container on host [%s]: %v", containerName, hostname, err) } return inspection, nil } @@ -210,17 +211,17 @@ func WaitForContainer(dClient *client.Client, containerName string) error { } func IsContainerUpgradable(dClient *client.Client, imageCfg *container.Config, containerName string, hostname string, plane string) (bool, error) { - logrus.Debugf("[%s] Checking if container %s is eligible for upgrade on host [%s]", plane, containerName, hostname) + logrus.Debugf("[%s] Checking if container [%s] is eligible for upgrade on host [%s]", plane, containerName, hostname) // this should be moved to a higher layer. containerInspect, err := InspectContainer(dClient, hostname, containerName) if err != nil { return false, err } - if containerInspect.Config.Image == imageCfg.Image { - logrus.Debugf("[%s] Container %s is not eligible for updgrade on host [%s]", plane, containerName, hostname) - return false, nil + if containerInspect.Config.Image != imageCfg.Image || !reflect.DeepEqual(containerInspect.Config.Cmd, imageCfg.Cmd) { + logrus.Debugf("[%s] Container [%s] is eligible for updgrade on host [%s]", plane, containerName, hostname) + return true, nil } - logrus.Debugf("[%s] Container %s is eligible for updgrade on host [%s]", plane, containerName, hostname) - return true, nil + logrus.Debugf("[%s] Container [%s] is not eligible for updgrade on host [%s]", plane, containerName, hostname) + return false, nil } diff --git a/hosts/hosts.go b/hosts/hosts.go index 952f417e..b46edff3 100644 --- a/hosts/hosts.go +++ b/hosts/hosts.go @@ -15,7 +15,9 @@ import ( type Host struct { v1.RKEConfigNode - DClient *client.Client + DClient *client.Client + IsControl bool + IsWorker bool } const ( @@ -28,17 +30,50 @@ const ( CleanerImage = "alpine:latest" ) -func (h *Host) CleanUp() error { - logrus.Infof("[hosts] Cleaning up host [%s]", h.Address) - toCleanDirs := []string{ +func (h *Host) CleanUpAll() error { + // the only supported removal for etcd dir is in rke remove + toCleanPaths := []string{ ToCleanEtcdDir, ToCleanSSLDir, ToCleanCNIConf, ToCleanCNIBin, ToCleanCalicoRun, } + return h.CleanUp(toCleanPaths) +} + +func (h *Host) CleanUpWorkerHost(controlRole string) error { + if h.IsControl { + logrus.Infof("[hosts] Host [%s] is already a controlplane host, skipping cleanup.", h.Address) + return nil + } + toCleanPaths := []string{ + ToCleanSSLDir, + ToCleanCNIConf, + ToCleanCNIBin, + ToCleanCalicoRun, + } + return h.CleanUp(toCleanPaths) +} + +func (h *Host) CleanUpControlHost(workerRole string) error { + if h.IsWorker { + logrus.Infof("[hosts] Host [%s] is already a worker host, skipping cleanup.", h.Address) + return nil + } + toCleanPaths := []string{ + ToCleanSSLDir, + ToCleanCNIConf, + ToCleanCNIBin, + ToCleanCalicoRun, + } + return h.CleanUp(toCleanPaths) +} + +func (h *Host) CleanUp(toCleanPaths []string) error { + logrus.Infof("[hosts] Cleaning up host [%s]", h.Address) + imageCfg, hostCfg := buildCleanerConfig(h, toCleanPaths) logrus.Infof("[hosts] Running cleaner container on host [%s]", h.Address) - imageCfg, hostCfg := buildCleanerConfig(h, toCleanDirs) if err := docker.DoRunContainer(h.DClient, imageCfg, hostCfg, CleanerContainerName, h.Address, CleanerContainerName); err != nil { return err } @@ -55,7 +90,11 @@ func (h *Host) CleanUp() error { return nil } -func DeleteNode(toDeleteHost *Host, kubeClient *kubernetes.Clientset) error { +func DeleteNode(toDeleteHost *Host, kubeClient *kubernetes.Clientset, hasAnotherRole bool) error { + if hasAnotherRole { + logrus.Infof("[hosts] host [%s] has another role, skipping delete from kubernetes cluster", toDeleteHost.Address) + return nil + } logrus.Infof("[hosts] Cordoning host [%s]", toDeleteHost.Address) if _, err := k8s.GetNode(kubeClient, toDeleteHost.HostnameOverride); err != nil { if apierrors.IsNotFound(err) { @@ -76,8 +115,8 @@ func DeleteNode(toDeleteHost *Host, kubeClient *kubernetes.Clientset) error { return nil } -func GetToDeleteHosts(currentHosts, configHosts []Host) []Host { - toDeleteHosts := []Host{} +func GetToDeleteHosts(currentHosts, configHosts []*Host) []*Host { + toDeleteHosts := []*Host{} for _, currentHost := range currentHosts { found := false for _, newHost := range configHosts { @@ -92,7 +131,7 @@ func GetToDeleteHosts(currentHosts, configHosts []Host) []Host { return toDeleteHosts } -func IsHostListChanged(currentHosts, configHosts []Host) bool { +func IsHostListChanged(currentHosts, configHosts []*Host) bool { changed := false for _, host := range currentHosts { found := false diff --git a/pki/deploy.go b/pki/deploy.go index 30797bf5..cf2a8bc8 100644 --- a/pki/deploy.go +++ b/pki/deploy.go @@ -14,7 +14,7 @@ import ( "github.com/sirupsen/logrus" ) -func DeployCertificatesOnMasters(cpHosts []hosts.Host, crtMap map[string]CertificatePKI) error { +func DeployCertificatesOnMasters(cpHosts []*hosts.Host, crtMap map[string]CertificatePKI) error { // list of certificates that should be deployed on the masters crtList := []string{ CACertName, @@ -31,7 +31,7 @@ func DeployCertificatesOnMasters(cpHosts []hosts.Host, crtMap map[string]Certifi } for i := range cpHosts { - err := doRunDeployer(&cpHosts[i], env) + err := doRunDeployer(cpHosts[i], env) if err != nil { return err } @@ -39,7 +39,7 @@ func DeployCertificatesOnMasters(cpHosts []hosts.Host, crtMap map[string]Certifi return nil } -func DeployCertificatesOnWorkers(workerHosts []hosts.Host, crtMap map[string]CertificatePKI) error { +func DeployCertificatesOnWorkers(workerHosts []*hosts.Host, crtMap map[string]CertificatePKI) error { // list of certificates that should be deployed on the workers crtList := []string{ CACertName, @@ -53,7 +53,7 @@ func DeployCertificatesOnWorkers(workerHosts []hosts.Host, crtMap map[string]Cer } for i := range workerHosts { - err := doRunDeployer(&workerHosts[i], env) + err := doRunDeployer(workerHosts[i], env) if err != nil { return err } diff --git a/pki/pki.go b/pki/pki.go index ad7b17b3..bff8af14 100644 --- a/pki/pki.go +++ b/pki/pki.go @@ -27,7 +27,7 @@ type CertificatePKI struct { } // StartCertificatesGeneration ... -func StartCertificatesGeneration(cpHosts []hosts.Host, workerHosts []hosts.Host, clusterDomain, localConfigPath string, KubernetesServiceIP net.IP) (map[string]CertificatePKI, error) { +func StartCertificatesGeneration(cpHosts []*hosts.Host, workerHosts []*hosts.Host, clusterDomain, localConfigPath string, KubernetesServiceIP net.IP) (map[string]CertificatePKI, error) { logrus.Infof("[certificates] Generating kubernetes certificates") certs, err := generateCerts(cpHosts, clusterDomain, localConfigPath, KubernetesServiceIP) if err != nil { @@ -36,7 +36,7 @@ func StartCertificatesGeneration(cpHosts []hosts.Host, workerHosts []hosts.Host, return certs, nil } -func generateCerts(cpHosts []hosts.Host, clusterDomain, localConfigPath string, KubernetesServiceIP net.IP) (map[string]CertificatePKI, error) { +func generateCerts(cpHosts []*hosts.Host, clusterDomain, localConfigPath string, KubernetesServiceIP net.IP) (map[string]CertificatePKI, error) { certs := make(map[string]CertificatePKI) // generate CA certificate and key logrus.Infof("[certificates] Generating CA kubernetes certificates") @@ -246,7 +246,7 @@ func generateCACertAndKey() (*x509.Certificate, *rsa.PrivateKey, error) { return kubeCACert, rootKey, nil } -func GetAltNames(cpHosts []hosts.Host, clusterDomain string, KubernetesServiceIP net.IP) *cert.AltNames { +func GetAltNames(cpHosts []*hosts.Host, clusterDomain string, KubernetesServiceIP net.IP) *cert.AltNames { ips := []net.IP{} dnsNames := []string{} for _, host := range cpHosts { diff --git a/pki/pki_test.go b/pki/pki_test.go index fe4b4a08..85780093 100644 --- a/pki/pki_test.go +++ b/pki/pki_test.go @@ -16,8 +16,8 @@ const ( ) func TestPKI(t *testing.T) { - cpHosts := []hosts.Host{ - hosts.Host{ + cpHosts := []*hosts.Host{ + &hosts.Host{ RKEConfigNode: v1.RKEConfigNode{ Address: "1.1.1.1", InternalAddress: "192.168.1.5", diff --git a/services/controlplane.go b/services/controlplane.go index 3655ba5a..76d3f257 100644 --- a/services/controlplane.go +++ b/services/controlplane.go @@ -6,9 +6,15 @@ import ( "github.com/sirupsen/logrus" ) -func RunControlPlane(controlHosts []hosts.Host, etcdHosts []hosts.Host, controlServices v1.RKEConfigServices) error { +func RunControlPlane(controlHosts []*hosts.Host, etcdHosts []*hosts.Host, controlServices v1.RKEConfigServices) error { logrus.Infof("[%s] Building up Controller Plane..", ControlRole) for _, host := range controlHosts { + + if host.IsWorker { + if err := removeNginxProxy(host); err != nil { + return err + } + } // run kubeapi err := runKubeAPI(host, etcdHosts, controlServices.KubeAPI) if err != nil { @@ -29,7 +35,7 @@ func RunControlPlane(controlHosts []hosts.Host, etcdHosts []hosts.Host, controlS return nil } -func RemoveControlPlane(controlHosts []hosts.Host) error { +func RemoveControlPlane(controlHosts []*hosts.Host, force bool) error { logrus.Infof("[%s] Tearing down the Controller Plane..", ControlRole) for _, host := range controlHosts { // remove KubeAPI @@ -47,6 +53,20 @@ func RemoveControlPlane(controlHosts []hosts.Host) error { if err != nil { return err } + + // check if the host already is a worker + if host.IsWorker { + logrus.Infof("[%s] Host [%s] is already a worker host, skipping delete kubelet and kubeproxy.", ControlRole, host.Address) + } else { + // remove KubeAPI + if err := removeKubelet(host); err != nil { + return err + } + // remove KubeController + if err := removeKubeproxy(host); err != nil { + return nil + } + } } logrus.Infof("[%s] Successfully teared down Controller Plane..", ControlRole) return nil diff --git a/services/etcd.go b/services/etcd.go index bbc64e02..dce1394b 100644 --- a/services/etcd.go +++ b/services/etcd.go @@ -11,7 +11,7 @@ import ( "github.com/sirupsen/logrus" ) -func RunEtcdPlane(etcdHosts []hosts.Host, etcdService v1.ETCDService) error { +func RunEtcdPlane(etcdHosts []*hosts.Host, etcdService v1.ETCDService) error { logrus.Infof("[%s] Building up Etcd Plane..", ETCDRole) initCluster := getEtcdInitialCluster(etcdHosts) for _, host := range etcdHosts { @@ -25,7 +25,7 @@ func RunEtcdPlane(etcdHosts []hosts.Host, etcdService v1.ETCDService) error { return nil } -func RemoveEtcdPlane(etcdHosts []hosts.Host) error { +func RemoveEtcdPlane(etcdHosts []*hosts.Host) error { logrus.Infof("[%s] Tearing down Etcd Plane..", ETCDRole) for _, host := range etcdHosts { err := docker.DoRemoveContainer(host.DClient, EtcdContainerName, host.Address) @@ -37,7 +37,7 @@ func RemoveEtcdPlane(etcdHosts []hosts.Host) error { return nil } -func buildEtcdConfig(host hosts.Host, etcdService v1.ETCDService, initCluster string) (*container.Config, *container.HostConfig) { +func buildEtcdConfig(host *hosts.Host, etcdService v1.ETCDService, initCluster string) (*container.Config, *container.HostConfig) { imageCfg := &container.Config{ Image: etcdService.Image, Cmd: []string{"/usr/local/bin/etcd", @@ -72,13 +72,13 @@ func buildEtcdConfig(host hosts.Host, etcdService v1.ETCDService, initCluster st } for arg, value := range etcdService.ExtraArgs { cmd := fmt.Sprintf("--%s=%s", arg, value) - imageCfg.Cmd = append(imageCfg.Cmd, cmd) + imageCfg.Entrypoint = append(imageCfg.Entrypoint, cmd) } return imageCfg, hostCfg } -func GetEtcdConnString(hosts []hosts.Host) string { +func GetEtcdConnString(hosts []*hosts.Host) string { connString := "" for i, host := range hosts { connString += "http://" + host.InternalAddress + ":2379" @@ -89,7 +89,7 @@ func GetEtcdConnString(hosts []hosts.Host) string { return connString } -func getEtcdInitialCluster(hosts []hosts.Host) string { +func getEtcdInitialCluster(hosts []*hosts.Host) string { initialCluster := "" for i, host := range hosts { initialCluster += fmt.Sprintf("etcd-%s=http://%s:2380", host.HostnameOverride, host.InternalAddress) diff --git a/services/kubeapi.go b/services/kubeapi.go index 8b1e241b..3d5aa6d6 100644 --- a/services/kubeapi.go +++ b/services/kubeapi.go @@ -11,17 +11,17 @@ import ( "github.com/rancher/types/apis/cluster.cattle.io/v1" ) -func runKubeAPI(host hosts.Host, etcdHosts []hosts.Host, kubeAPIService v1.KubeAPIService) error { +func runKubeAPI(host *hosts.Host, etcdHosts []*hosts.Host, kubeAPIService v1.KubeAPIService) error { etcdConnString := GetEtcdConnString(etcdHosts) imageCfg, hostCfg := buildKubeAPIConfig(host, kubeAPIService, etcdConnString) return docker.DoRunContainer(host.DClient, imageCfg, hostCfg, KubeAPIContainerName, host.Address, ControlRole) } -func removeKubeAPI(host hosts.Host) error { +func removeKubeAPI(host *hosts.Host) error { return docker.DoRemoveContainer(host.DClient, KubeAPIContainerName, host.Address) } -func buildKubeAPIConfig(host hosts.Host, kubeAPIService v1.KubeAPIService, etcdConnString string) (*container.Config, *container.HostConfig) { +func buildKubeAPIConfig(host *hosts.Host, kubeAPIService v1.KubeAPIService, etcdConnString string) (*container.Config, *container.HostConfig) { imageCfg := &container.Config{ Image: kubeAPIService.Image, Entrypoint: []string{"kube-apiserver", @@ -61,7 +61,7 @@ func buildKubeAPIConfig(host hosts.Host, kubeAPIService v1.KubeAPIService, etcdC for arg, value := range kubeAPIService.ExtraArgs { cmd := fmt.Sprintf("--%s=%s", arg, value) - imageCfg.Cmd = append(imageCfg.Cmd, cmd) + imageCfg.Entrypoint = append(imageCfg.Entrypoint, cmd) } return imageCfg, hostCfg } diff --git a/services/kubecontroller.go b/services/kubecontroller.go index a8d30afd..3873851f 100644 --- a/services/kubecontroller.go +++ b/services/kubecontroller.go @@ -10,12 +10,12 @@ import ( "github.com/rancher/types/apis/cluster.cattle.io/v1" ) -func runKubeController(host hosts.Host, kubeControllerService v1.KubeControllerService) error { +func runKubeController(host *hosts.Host, kubeControllerService v1.KubeControllerService) error { imageCfg, hostCfg := buildKubeControllerConfig(kubeControllerService) return docker.DoRunContainer(host.DClient, imageCfg, hostCfg, KubeControllerContainerName, host.Address, ControlRole) } -func removeKubeController(host hosts.Host) error { +func removeKubeController(host *hosts.Host) error { return docker.DoRemoveContainer(host.DClient, KubeControllerContainerName, host.Address) } @@ -47,7 +47,7 @@ func buildKubeControllerConfig(kubeControllerService v1.KubeControllerService) ( } for arg, value := range kubeControllerService.ExtraArgs { cmd := fmt.Sprintf("--%s=%s", arg, value) - imageCfg.Cmd = append(imageCfg.Cmd, cmd) + imageCfg.Entrypoint = append(imageCfg.Entrypoint, cmd) } return imageCfg, hostCfg } diff --git a/services/kubelet.go b/services/kubelet.go index 842883a0..d65802cd 100644 --- a/services/kubelet.go +++ b/services/kubelet.go @@ -11,16 +11,16 @@ import ( "github.com/rancher/types/apis/cluster.cattle.io/v1" ) -func runKubelet(host hosts.Host, kubeletService v1.KubeletService) error { +func runKubelet(host *hosts.Host, kubeletService v1.KubeletService) error { imageCfg, hostCfg := buildKubeletConfig(host, kubeletService) return docker.DoRunContainer(host.DClient, imageCfg, hostCfg, KubeletContainerName, host.Address, WorkerRole) } -func removeKubelet(host hosts.Host) error { +func removeKubelet(host *hosts.Host) error { return docker.DoRemoveContainer(host.DClient, KubeletContainerName, host.Address) } -func buildKubeletConfig(host hosts.Host, kubeletService v1.KubeletService) (*container.Config, *container.HostConfig) { +func buildKubeletConfig(host *hosts.Host, kubeletService v1.KubeletService) (*container.Config, *container.HostConfig) { imageCfg := &container.Config{ Image: kubeletService.Image, Entrypoint: []string{"kubelet", @@ -80,7 +80,7 @@ func buildKubeletConfig(host hosts.Host, kubeletService v1.KubeletService) (*con } for arg, value := range kubeletService.ExtraArgs { cmd := fmt.Sprintf("--%s=%s", arg, value) - imageCfg.Cmd = append(imageCfg.Cmd, cmd) + imageCfg.Entrypoint = append(imageCfg.Entrypoint, cmd) } return imageCfg, hostCfg } diff --git a/services/kubeproxy.go b/services/kubeproxy.go index fc1f67dd..91b64d86 100644 --- a/services/kubeproxy.go +++ b/services/kubeproxy.go @@ -10,16 +10,16 @@ import ( "github.com/rancher/types/apis/cluster.cattle.io/v1" ) -func runKubeproxy(host hosts.Host, kubeproxyService v1.KubeproxyService) error { +func runKubeproxy(host *hosts.Host, kubeproxyService v1.KubeproxyService) error { imageCfg, hostCfg := buildKubeproxyConfig(host, kubeproxyService) return docker.DoRunContainer(host.DClient, imageCfg, hostCfg, KubeproxyContainerName, host.Address, WorkerRole) } -func removeKubeproxy(host hosts.Host) error { +func removeKubeproxy(host *hosts.Host) error { return docker.DoRemoveContainer(host.DClient, KubeproxyContainerName, host.Address) } -func buildKubeproxyConfig(host hosts.Host, kubeproxyService v1.KubeproxyService) (*container.Config, *container.HostConfig) { +func buildKubeproxyConfig(host *hosts.Host, kubeproxyService v1.KubeproxyService) (*container.Config, *container.HostConfig) { imageCfg := &container.Config{ Image: kubeproxyService.Image, Entrypoint: []string{"kube-proxy", @@ -38,7 +38,7 @@ func buildKubeproxyConfig(host hosts.Host, kubeproxyService v1.KubeproxyService) } for arg, value := range kubeproxyService.ExtraArgs { cmd := fmt.Sprintf("--%s=%s", arg, value) - imageCfg.Cmd = append(imageCfg.Cmd, cmd) + imageCfg.Entrypoint = append(imageCfg.Entrypoint, cmd) } return imageCfg, hostCfg } diff --git a/services/proxy.go b/services/proxy.go index 47fc9cb8..9ba6f012 100644 --- a/services/proxy.go +++ b/services/proxy.go @@ -13,7 +13,7 @@ const ( NginxProxyEnvName = "CP_HOSTS" ) -func RollingUpdateNginxProxy(cpHosts []hosts.Host, workerHosts []hosts.Host) error { +func RollingUpdateNginxProxy(cpHosts []*hosts.Host, workerHosts []*hosts.Host) error { nginxProxyEnv := buildProxyEnv(cpHosts) for _, host := range workerHosts { imageCfg, hostCfg := buildNginxProxyConfig(host, nginxProxyEnv) @@ -24,17 +24,17 @@ func RollingUpdateNginxProxy(cpHosts []hosts.Host, workerHosts []hosts.Host) err return nil } -func runNginxProxy(host hosts.Host, cpHosts []hosts.Host) error { +func runNginxProxy(host *hosts.Host, cpHosts []*hosts.Host) error { nginxProxyEnv := buildProxyEnv(cpHosts) imageCfg, hostCfg := buildNginxProxyConfig(host, nginxProxyEnv) return docker.DoRunContainer(host.DClient, imageCfg, hostCfg, NginxProxyContainerName, host.Address, WorkerRole) } -func removeNginxProxy(host hosts.Host) error { +func removeNginxProxy(host *hosts.Host) error { return docker.DoRemoveContainer(host.DClient, NginxProxyContainerName, host.Address) } -func buildNginxProxyConfig(host hosts.Host, nginxProxyEnv string) (*container.Config, *container.HostConfig) { +func buildNginxProxyConfig(host *hosts.Host, nginxProxyEnv string) (*container.Config, *container.HostConfig) { imageCfg := &container.Config{ Image: NginxProxyImage, Env: []string{fmt.Sprintf("%s=%s", NginxProxyEnvName, nginxProxyEnv)}, @@ -47,7 +47,7 @@ func buildNginxProxyConfig(host hosts.Host, nginxProxyEnv string) (*container.Co return imageCfg, hostCfg } -func buildProxyEnv(cpHosts []hosts.Host) string { +func buildProxyEnv(cpHosts []*hosts.Host) string { proxyEnv := "" for i, cpHost := range cpHosts { proxyEnv += fmt.Sprintf("%s", cpHost.InternalAddress) diff --git a/services/scheduler.go b/services/scheduler.go index 10efdb1c..6c3db1c2 100644 --- a/services/scheduler.go +++ b/services/scheduler.go @@ -10,16 +10,16 @@ import ( "github.com/rancher/types/apis/cluster.cattle.io/v1" ) -func runScheduler(host hosts.Host, schedulerService v1.SchedulerService) error { +func runScheduler(host *hosts.Host, schedulerService v1.SchedulerService) error { imageCfg, hostCfg := buildSchedulerConfig(host, schedulerService) return docker.DoRunContainer(host.DClient, imageCfg, hostCfg, SchedulerContainerName, host.Address, ControlRole) } -func removeScheduler(host hosts.Host) error { +func removeScheduler(host *hosts.Host) error { return docker.DoRemoveContainer(host.DClient, SchedulerContainerName, host.Address) } -func buildSchedulerConfig(host hosts.Host, schedulerService v1.SchedulerService) (*container.Config, *container.HostConfig) { +func buildSchedulerConfig(host *hosts.Host, schedulerService v1.SchedulerService) (*container.Config, *container.HostConfig) { imageCfg := &container.Config{ Image: schedulerService.Image, Entrypoint: []string{"kube-scheduler", @@ -38,7 +38,7 @@ func buildSchedulerConfig(host hosts.Host, schedulerService v1.SchedulerService) } for arg, value := range schedulerService.ExtraArgs { cmd := fmt.Sprintf("--%s=%s", arg, value) - imageCfg.Cmd = append(imageCfg.Cmd, cmd) + imageCfg.Entrypoint = append(imageCfg.Entrypoint, cmd) } return imageCfg, hostCfg } diff --git a/services/workerplane.go b/services/workerplane.go index af039ef1..c62a1a93 100644 --- a/services/workerplane.go +++ b/services/workerplane.go @@ -6,7 +6,7 @@ import ( "github.com/sirupsen/logrus" ) -func RunWorkerPlane(controlHosts []hosts.Host, workerHosts []hosts.Host, workerServices v1.RKEConfigServices) error { +func RunWorkerPlane(controlHosts []*hosts.Host, workerHosts []*hosts.Host, workerServices v1.RKEConfigServices) error { logrus.Infof("[%s] Building up Worker Plane..", WorkerRole) for _, host := range controlHosts { // only one master for now @@ -19,14 +19,7 @@ func RunWorkerPlane(controlHosts []hosts.Host, workerHosts []hosts.Host, workerS } for _, host := range workerHosts { // run nginx proxy - isControlPlaneHost := false - for _, role := range host.Role { - if role == ControlRole { - isControlPlaneHost = true - break - } - } - if !isControlPlaneHost { + if !host.IsControl { if err := runNginxProxy(host, controlHosts); err != nil { return err } @@ -44,18 +37,15 @@ func RunWorkerPlane(controlHosts []hosts.Host, workerHosts []hosts.Host, workerS return nil } -func RemoveWorkerPlane(controlHosts []hosts.Host, workerHosts []hosts.Host) error { +func RemoveWorkerPlane(workerHosts []*hosts.Host, force bool) error { logrus.Infof("[%s] Tearing down Worker Plane..", WorkerRole) - for _, host := range controlHosts { - if err := removeKubelet(host); err != nil { - return err - } - if err := removeKubeproxy(host); err != nil { - return err - } - } - for _, host := range workerHosts { + // check if the host already is a controlplane + if host.IsControl && !force { + logrus.Infof("[%s] Host [%s] is already a controlplane host, nothing to do.", WorkerRole, host.Address) + return nil + } + if err := removeKubelet(host); err != nil { return err } @@ -65,7 +55,8 @@ func RemoveWorkerPlane(controlHosts []hosts.Host, workerHosts []hosts.Host) erro if err := removeNginxProxy(host); err != nil { return err } + logrus.Infof("[%s] Successfully teared down Worker Plane..", WorkerRole) } - logrus.Infof("[%s] Successfully teared down Worker Plane..", WorkerRole) + return nil }