mirror of
https://github.com/rancher/rke.git
synced 2025-08-02 07:43:04 +00:00
Cluster Down/Remove
Reverse order and add force flag
This commit is contained in:
parent
e53f7adf02
commit
892e9ab5d9
43
cluster/down.go
Normal file
43
cluster/down.go
Normal file
@ -0,0 +1,43 @@
|
||||
package cluster
|
||||
|
||||
import (
|
||||
"github.com/rancher/rke/hosts"
|
||||
"github.com/rancher/rke/services"
|
||||
)
|
||||
|
||||
func (c *Cluster) ClusterDown() error {
|
||||
// Remove Worker Plane
|
||||
if err := services.RemoveWorkerPlane(c.ControlPlaneHosts, c.WorkerHosts); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Remove Contol Plane
|
||||
if err := services.RemoveControlPlane(c.ControlPlaneHosts); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Remove Etcd Plane
|
||||
if err := services.RemoveEtcdPlane(c.EtcdHosts); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Clean up all hosts
|
||||
if err := cleanUpHosts(c.ControlPlaneHosts, c.WorkerHosts, c.EtcdHosts); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func cleanUpHosts(cpHosts, workerHosts, etcdHosts []hosts.Host) error {
|
||||
allHosts := []hosts.Host{}
|
||||
allHosts = append(allHosts, cpHosts...)
|
||||
allHosts = append(allHosts, workerHosts...)
|
||||
allHosts = append(allHosts, etcdHosts...)
|
||||
|
||||
for _, host := range allHosts {
|
||||
if err := host.CleanUp(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
180
cmd/cluster.go
180
cmd/cluster.go
@ -1,6 +1,7 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
@ -14,7 +15,7 @@ import (
|
||||
)
|
||||
|
||||
func ClusterCommand() cli.Command {
|
||||
clusterUpFlags := []cli.Flag{
|
||||
clusterUpDownUpgradeFlags := []cli.Flag{
|
||||
cli.StringFlag{
|
||||
Name: "cluster-file",
|
||||
Usage: "Specify an alternate cluster YAML file",
|
||||
@ -22,37 +23,41 @@ func ClusterCommand() cli.Command {
|
||||
EnvVar: "CLUSTER_FILE",
|
||||
},
|
||||
}
|
||||
clusterUpgradeFlags := []cli.Flag{
|
||||
cli.StringFlag{
|
||||
Name: "cluster-file",
|
||||
Usage: "Specify an upgraded cluster YAML file",
|
||||
Value: "cluster.yml",
|
||||
EnvVar: "CLUSTER_FILE",
|
||||
clusterForceRemove := []cli.Flag{
|
||||
cli.BoolFlag{
|
||||
Name: "force",
|
||||
Usage: "Force removal of the cluster",
|
||||
},
|
||||
}
|
||||
return cli.Command{
|
||||
Name: "cluster",
|
||||
ShortName: "cluster",
|
||||
Usage: "Operations on the cluster",
|
||||
Flags: clusterUpFlags,
|
||||
Flags: clusterUpDownUpgradeFlags,
|
||||
Subcommands: []cli.Command{
|
||||
cli.Command{
|
||||
Name: "up",
|
||||
Usage: "Bring the cluster up",
|
||||
Action: clusterUpFromCli,
|
||||
Flags: clusterUpFlags,
|
||||
Flags: clusterUpDownUpgradeFlags,
|
||||
},
|
||||
cli.Command{
|
||||
Name: "down",
|
||||
Usage: "Teardown the cluster and clean cluster nodes",
|
||||
Action: clusterDownFromCli,
|
||||
Flags: append(clusterUpDownUpgradeFlags, clusterForceRemove...),
|
||||
},
|
||||
cli.Command{
|
||||
Name: "version",
|
||||
Usage: "Show Cluster Kubernetes version",
|
||||
Action: getClusterVersion,
|
||||
Flags: clusterUpFlags,
|
||||
Flags: clusterUpDownUpgradeFlags,
|
||||
},
|
||||
cli.Command{
|
||||
Name: "upgrade",
|
||||
Usage: "Upgrade Cluster Kubernetes version",
|
||||
Action: clusterUpgradeFromCli,
|
||||
Flags: clusterUpgradeFlags,
|
||||
Flags: clusterUpDownUpgradeFlags,
|
||||
},
|
||||
},
|
||||
}
|
||||
@ -125,61 +130,6 @@ func ClusterUp(clusterFile string) (string, string, string, string, error) {
|
||||
return APIURL, caCrt, clientCert, clientKey, nil
|
||||
}
|
||||
|
||||
func clusterUpFromCli(ctx *cli.Context) error {
|
||||
clusterFile, err := resolveClusterFile(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Failed to resolve cluster file: %v", err)
|
||||
}
|
||||
_, _, _, _, err = ClusterUp(clusterFile)
|
||||
return err
|
||||
}
|
||||
|
||||
func resolveClusterFile(ctx *cli.Context) (string, error) {
|
||||
clusterFile := ctx.String("cluster-file")
|
||||
fp, err := filepath.Abs(clusterFile)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to lookup current directory name: %v", err)
|
||||
}
|
||||
file, err := os.Open(fp)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("Can not find cluster configuration file: %v", err)
|
||||
}
|
||||
defer file.Close()
|
||||
buf, err := ioutil.ReadAll(file)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to read file: %v", err)
|
||||
}
|
||||
clusterFileBuff := string(buf)
|
||||
|
||||
/*
|
||||
This is a hacky way to add config path to cluster object without messing with
|
||||
ClusterUp function and to avoid conflict with calls from kontainer-engine, basically
|
||||
i add config path (cluster.yml by default) to a field into the config buffer
|
||||
to be parsed later and added as ConfigPath field into cluster object.
|
||||
*/
|
||||
clusterFileBuff = fmt.Sprintf("%s\nconfig_path: %s\n", clusterFileBuff, clusterFile)
|
||||
return clusterFileBuff, nil
|
||||
}
|
||||
|
||||
func getClusterVersion(ctx *cli.Context) error {
|
||||
localKubeConfig := cluster.GetLocalKubeConfig(ctx.String("cluster-file"))
|
||||
serverVersion, err := cluster.GetK8sVersion(localKubeConfig)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Printf("Server Version: %s\n", serverVersion)
|
||||
return nil
|
||||
}
|
||||
|
||||
func clusterUpgradeFromCli(ctx *cli.Context) error {
|
||||
clusterFile, err := resolveClusterFile(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Failed to resolve cluster file: %v", err)
|
||||
}
|
||||
_, _, _, _, err = ClusterUpgrade(clusterFile)
|
||||
return err
|
||||
}
|
||||
|
||||
func ClusterUpgrade(clusterFile string) (string, string, string, string, error) {
|
||||
logrus.Infof("Upgrading Kubernetes cluster")
|
||||
var APIURL, caCrt, clientCert, clientKey string
|
||||
@ -222,5 +172,101 @@ func ClusterUpgrade(clusterFile string) (string, string, string, string, error)
|
||||
clientCert = string(cert.EncodeCertPEM(kubeCluster.Certificates[pki.KubeAdminCommonName].Certificate))
|
||||
clientKey = string(cert.EncodePrivateKeyPEM(kubeCluster.Certificates[pki.KubeAdminCommonName].Key))
|
||||
return APIURL, caCrt, clientCert, clientKey, nil
|
||||
|
||||
}
|
||||
|
||||
func ClusterDown(clusterFile string) error {
|
||||
logrus.Infof("Tearing down Kubernetes cluster")
|
||||
kubeCluster, err := cluster.ParseConfig(clusterFile)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = kubeCluster.TunnelHosts()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
logrus.Debugf("Starting Cluster removal")
|
||||
err = kubeCluster.ClusterDown()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
logrus.Infof("Cluster removed successfully")
|
||||
return nil
|
||||
}
|
||||
|
||||
func clusterUpFromCli(ctx *cli.Context) error {
|
||||
clusterFile, err := resolveClusterFile(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Failed to resolve cluster file: %v", err)
|
||||
}
|
||||
_, _, _, _, err = ClusterUp(clusterFile)
|
||||
return err
|
||||
}
|
||||
|
||||
func clusterUpgradeFromCli(ctx *cli.Context) error {
|
||||
clusterFile, err := resolveClusterFile(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Failed to resolve cluster file: %v", err)
|
||||
}
|
||||
_, _, _, _, err = ClusterUpgrade(clusterFile)
|
||||
return err
|
||||
}
|
||||
|
||||
func clusterDownFromCli(ctx *cli.Context) error {
|
||||
force := ctx.Bool("force")
|
||||
if !force {
|
||||
reader := bufio.NewReader(os.Stdin)
|
||||
fmt.Printf("Are you sure you want to remove Kubernetes cluster [y/n]: ")
|
||||
input, err := reader.ReadString('\n')
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if input != "y" && input != "Y" {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
clusterFile, err := resolveClusterFile(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Failed to resolve cluster file: %v", err)
|
||||
}
|
||||
return ClusterDown(clusterFile)
|
||||
}
|
||||
|
||||
func resolveClusterFile(ctx *cli.Context) (string, error) {
|
||||
clusterFile := ctx.String("cluster-file")
|
||||
fp, err := filepath.Abs(clusterFile)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to lookup current directory name: %v", err)
|
||||
}
|
||||
file, err := os.Open(fp)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("Can not find cluster configuration file: %v", err)
|
||||
}
|
||||
defer file.Close()
|
||||
buf, err := ioutil.ReadAll(file)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to read file: %v", err)
|
||||
}
|
||||
clusterFileBuff := string(buf)
|
||||
|
||||
/*
|
||||
This is a hacky way to add config path to cluster object without messing with
|
||||
ClusterUp function and to avoid conflict with calls from kontainer-engine, basically
|
||||
i add config path (cluster.yml by default) to a field into the config buffer
|
||||
to be parsed later and added as ConfigPath field into cluster object.
|
||||
*/
|
||||
clusterFileBuff = fmt.Sprintf("%s\nconfig_path: %s\n", clusterFileBuff, clusterFile)
|
||||
return clusterFileBuff, nil
|
||||
}
|
||||
|
||||
func getClusterVersion(ctx *cli.Context) error {
|
||||
localKubeConfig := cluster.GetLocalKubeConfig(ctx.String("cluster-file"))
|
||||
serverVersion, err := cluster.GetK8sVersion(localKubeConfig)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Printf("Server Version: %s\n", serverVersion)
|
||||
return nil
|
||||
}
|
||||
|
@ -69,6 +69,32 @@ func DoRollingUpdateContainer(dClient *client.Client, imageCfg *container.Config
|
||||
return err
|
||||
}
|
||||
|
||||
func DoRemoveContainer(dClient *client.Client, containerName, hostname string) error {
|
||||
logrus.Infof("[down/%s] Checking if container is running on host [%s]", containerName, hostname)
|
||||
// not using the wrapper to check if the error is a NotFound error
|
||||
_, err := dClient.ContainerInspect(context.Background(), containerName)
|
||||
if err != nil {
|
||||
if client.IsErrNotFound(err) {
|
||||
logrus.Infof("[down/%s] Container doesn't exist on host [%s]", containerName, hostname)
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
logrus.Infof("[down/%s] Stopping container on host [%s]", containerName, hostname)
|
||||
err = StopContainer(dClient, hostname, containerName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
logrus.Infof("[down/%s] Removing container on host [%s]", containerName, hostname)
|
||||
err = RemoveContainer(dClient, hostname, containerName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
logrus.Infof("[down/%s] Sucessfully removed container on host [%s]", containerName, hostname)
|
||||
return nil
|
||||
}
|
||||
|
||||
func IsContainerRunning(dClient *client.Client, hostname string, containerName string) (bool, error) {
|
||||
logrus.Debugf("Checking if container %s is running on host [%s]", containerName, hostname)
|
||||
containers, err := dClient.ContainerList(context.Background(), types.ContainerListOptions{})
|
||||
|
@ -1,7 +1,11 @@
|
||||
package hosts
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/docker/client"
|
||||
"github.com/rancher/rke/docker"
|
||||
"github.com/rancher/rke/k8s"
|
||||
"github.com/rancher/types/apis/cluster.cattle.io/v1"
|
||||
"github.com/sirupsen/logrus"
|
||||
@ -13,6 +17,41 @@ type Host struct {
|
||||
DClient *client.Client
|
||||
}
|
||||
|
||||
const (
|
||||
ToCleanEtcdDir = "/var/lib/etcd"
|
||||
ToCleanSSLDir = "/etc/kubernetes/ssl"
|
||||
ToCleanCNIConf = "/etc/cni"
|
||||
ToCleanCNIBin = "/opt/cni"
|
||||
CleanerContainerName = "kube-cleaner"
|
||||
CleanerImage = "alpine:latest"
|
||||
)
|
||||
|
||||
func (h *Host) CleanUp() error {
|
||||
logrus.Infof("[down] Cleaning up host [%s]", h.AdvertisedHostname)
|
||||
toCleanDirs := []string{
|
||||
ToCleanEtcdDir,
|
||||
ToCleanSSLDir,
|
||||
ToCleanCNIConf,
|
||||
ToCleanCNIBin,
|
||||
}
|
||||
logrus.Infof("[down] Running cleaner container on host [%s]", h.AdvertisedHostname)
|
||||
imageCfg, hostCfg := buildCleanerConfig(h, toCleanDirs)
|
||||
if err := docker.DoRunContainer(h.DClient, imageCfg, hostCfg, CleanerContainerName, h.AdvertisedHostname, CleanerContainerName); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := docker.WaitForContainer(h.DClient, CleanerContainerName); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
logrus.Infof("[down] Removing cleaner container on host [%s]", h.AdvertisedHostname)
|
||||
if err := docker.RemoveContainer(h.DClient, h.AdvertisedHostname, CleanerContainerName); err != nil {
|
||||
return err
|
||||
}
|
||||
logrus.Infof("[down] Successfully cleaned up host [%s]", h.AdvertisedHostname)
|
||||
return nil
|
||||
}
|
||||
|
||||
func DeleteNode(toDeleteHost *Host, kubeClient *kubernetes.Clientset) error {
|
||||
logrus.Infof("[hosts] Cordoning host [%s]", toDeleteHost.AdvertisedHostname)
|
||||
err := k8s.CordonUncordon(kubeClient, toDeleteHost.AdvertisedHostname, true)
|
||||
@ -70,3 +109,19 @@ func IsHostListChanged(currentHosts, configHosts []Host) bool {
|
||||
}
|
||||
return changed
|
||||
}
|
||||
|
||||
func buildCleanerConfig(host *Host, toCleanDirs []string) (*container.Config, *container.HostConfig) {
|
||||
cmd := append([]string{"rm", "-rf"}, toCleanDirs...)
|
||||
imageCfg := &container.Config{
|
||||
Image: CleanerImage,
|
||||
Cmd: cmd,
|
||||
}
|
||||
bindMounts := []string{}
|
||||
for _, vol := range toCleanDirs {
|
||||
bindMounts = append(bindMounts, fmt.Sprintf("%s:%s", vol, vol))
|
||||
}
|
||||
hostCfg := &container.HostConfig{
|
||||
Binds: bindMounts,
|
||||
}
|
||||
return imageCfg, hostCfg
|
||||
}
|
||||
|
@ -51,3 +51,26 @@ func UpgradeControlPlane(controlHosts []hosts.Host, etcdHosts []hosts.Host, cont
|
||||
logrus.Infof("[%s] Successfully upgraded Controller Plane..", ControlRole)
|
||||
return nil
|
||||
}
|
||||
|
||||
func RemoveControlPlane(controlHosts []hosts.Host) error {
|
||||
logrus.Infof("[%s] Tearing down the Controller Plane..", ControlRole)
|
||||
for _, host := range controlHosts {
|
||||
// remove KubeAPI
|
||||
if err := removeKubeAPI(host); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// remove KubeController
|
||||
if err := removeKubeController(host); err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// remove scheduler
|
||||
err := removeScheduler(host)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
logrus.Infof("[%s] Successfully teared down Controller Plane..", ControlRole)
|
||||
return nil
|
||||
}
|
||||
|
@ -25,6 +25,18 @@ func RunEtcdPlane(etcdHosts []hosts.Host, etcdService v1.ETCDService) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func RemoveEtcdPlane(etcdHosts []hosts.Host) error {
|
||||
logrus.Infof("[%s] Tearing down Etcd Plane..", ETCDRole)
|
||||
for _, host := range etcdHosts {
|
||||
err := docker.DoRemoveContainer(host.DClient, EtcdContainerName, host.AdvertisedHostname)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
logrus.Infof("[%s] Successfully teared down Etcd Plane..", ETCDRole)
|
||||
return nil
|
||||
}
|
||||
|
||||
func buildEtcdConfig(host hosts.Host, etcdService v1.ETCDService, initCluster string) (*container.Config, *container.HostConfig) {
|
||||
imageCfg := &container.Config{
|
||||
Image: etcdService.Image,
|
||||
|
@ -43,6 +43,10 @@ func upgradeKubeAPI(host hosts.Host, etcdHosts []hosts.Host, kubeAPIService v1.K
|
||||
return err
|
||||
}
|
||||
|
||||
func removeKubeAPI(host hosts.Host) error {
|
||||
return docker.DoRemoveContainer(host.DClient, KubeAPIContainerName, host.AdvertisedHostname)
|
||||
}
|
||||
|
||||
func buildKubeAPIConfig(host hosts.Host, kubeAPIService v1.KubeAPIService, etcdConnString string) (*container.Config, *container.HostConfig) {
|
||||
imageCfg := &container.Config{
|
||||
Image: kubeAPIService.Image,
|
||||
|
@ -42,6 +42,10 @@ func upgradeKubeController(host hosts.Host, kubeControllerService v1.KubeControl
|
||||
|
||||
}
|
||||
|
||||
func removeKubeController(host hosts.Host) error {
|
||||
return docker.DoRemoveContainer(host.DClient, KubeControllerContainerName, host.AdvertisedHostname)
|
||||
}
|
||||
|
||||
func buildKubeControllerConfig(kubeControllerService v1.KubeControllerService) (*container.Config, *container.HostConfig) {
|
||||
imageCfg := &container.Config{
|
||||
Image: kubeControllerService.Image,
|
||||
|
@ -42,6 +42,10 @@ func upgradeKubelet(host hosts.Host, kubeletService v1.KubeletService, isMaster
|
||||
return err
|
||||
}
|
||||
|
||||
func removeKubelet(host hosts.Host) error {
|
||||
return docker.DoRemoveContainer(host.DClient, KubeletContainerName, host.AdvertisedHostname)
|
||||
}
|
||||
|
||||
func buildKubeletConfig(host hosts.Host, kubeletService v1.KubeletService, isMaster bool) (*container.Config, *container.HostConfig) {
|
||||
imageCfg := &container.Config{
|
||||
Image: kubeletService.Image,
|
||||
|
@ -40,6 +40,11 @@ func upgradeKubeproxy(host hosts.Host, kubeproxyService v1.KubeproxyService) err
|
||||
err = docker.RemoveContainer(host.DClient, host.AdvertisedHostname, oldContainerName)
|
||||
return err
|
||||
}
|
||||
|
||||
func removeKubeproxy(host hosts.Host) error {
|
||||
return docker.DoRemoveContainer(host.DClient, KubeproxyContainerName, host.AdvertisedHostname)
|
||||
}
|
||||
|
||||
func buildKubeproxyConfig(host hosts.Host, kubeproxyService v1.KubeproxyService) (*container.Config, *container.HostConfig) {
|
||||
imageCfg := &container.Config{
|
||||
Image: kubeproxyService.Image,
|
||||
|
@ -28,6 +28,10 @@ func runNginxProxy(host hosts.Host, cpHosts []hosts.Host) error {
|
||||
return docker.DoRunContainer(host.DClient, imageCfg, hostCfg, NginxProxyContainerName, host.AdvertisedHostname, WorkerRole)
|
||||
}
|
||||
|
||||
func removeNginxProxy(host hosts.Host) error {
|
||||
return docker.DoRemoveContainer(host.DClient, NginxProxyContainerName, host.AdvertisedHostname)
|
||||
}
|
||||
|
||||
func buildNginxProxyConfig(host hosts.Host, nginxProxyEnv string) (*container.Config, *container.HostConfig) {
|
||||
imageCfg := &container.Config{
|
||||
Image: NginxProxyImage,
|
||||
|
@ -15,6 +15,7 @@ func runScheduler(host hosts.Host, schedulerService v1.SchedulerService) error {
|
||||
imageCfg, hostCfg := buildSchedulerConfig(host, schedulerService)
|
||||
return docker.DoRunContainer(host.DClient, imageCfg, hostCfg, SchedulerContainerName, host.AdvertisedHostname, ControlRole)
|
||||
}
|
||||
|
||||
func upgradeScheduler(host hosts.Host, schedulerService v1.SchedulerService) error {
|
||||
logrus.Debugf("[upgrade/Scheduler] Checking for deployed version")
|
||||
containerInspect, err := docker.InspectContainer(host.DClient, host.AdvertisedHostname, SchedulerContainerName)
|
||||
@ -40,6 +41,10 @@ func upgradeScheduler(host hosts.Host, schedulerService v1.SchedulerService) err
|
||||
return err
|
||||
}
|
||||
|
||||
func removeScheduler(host hosts.Host) error {
|
||||
return docker.DoRemoveContainer(host.DClient, SchedulerContainerName, host.AdvertisedHostname)
|
||||
}
|
||||
|
||||
func buildSchedulerConfig(host hosts.Host, schedulerService v1.SchedulerService) (*container.Config, *container.HostConfig) {
|
||||
imageCfg := &container.Config{
|
||||
Image: schedulerService.Image,
|
||||
|
@ -92,3 +92,34 @@ func UpgradeWorkerPlane(controlHosts []hosts.Host, workerHosts []hosts.Host, wor
|
||||
logrus.Infof("[%s] Successfully upgraded Worker Plane..", WorkerRole)
|
||||
return nil
|
||||
}
|
||||
|
||||
func RemoveWorkerPlane(controlHosts []hosts.Host, workerHosts []hosts.Host) error {
|
||||
logrus.Infof("[%s] Tearing down Worker Plane..", WorkerRole)
|
||||
for _, host := range controlHosts {
|
||||
err := removeKubelet(host)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = removeKubeproxy(host)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
for _, host := range workerHosts {
|
||||
err := removeKubelet(host)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = removeKubeproxy(host)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = removeNginxProxy(host)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
logrus.Infof("[%s] Successfully teared down Worker Plane..", WorkerRole)
|
||||
return nil
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user