diff --git a/cluster/cluster.go b/cluster/cluster.go index 792b2e15..f582d398 100644 --- a/cluster/cluster.go +++ b/cluster/cluster.go @@ -370,6 +370,10 @@ func (c *Cluster) parseCloudConfig(ctx context.Context) (string, error) { } return string(jsonString), nil } + if c.CloudProvider.AWSCloudProvider != nil { + c.CloudProvider.Name = AWSCloudProvider + return "", nil + } if len(c.CloudProvider.CloudConfig) == 0 { return "", nil } diff --git a/cluster/reconcile.go b/cluster/reconcile.go index a628a049..f2a15032 100644 --- a/cluster/reconcile.go +++ b/cluster/reconcile.go @@ -56,7 +56,7 @@ func reconcileWorker(ctx context.Context, currentCluster, kubeCluster *Cluster, wpToDelete := hosts.GetToDeleteHosts(currentCluster.WorkerHosts, kubeCluster.WorkerHosts, kubeCluster.InactiveHosts) for _, toDeleteHost := range wpToDelete { toDeleteHost.IsWorker = false - if err := hosts.DeleteNode(ctx, toDeleteHost, kubeClient, toDeleteHost.IsControl); err != nil { + if err := hosts.DeleteNode(ctx, toDeleteHost, kubeClient, toDeleteHost.IsControl, kubeCluster.CloudProvider.Name); err != nil { return fmt.Errorf("Failed to delete worker node %s from cluster", toDeleteHost.Address) } // attempting to clean services/files on the host @@ -96,7 +96,7 @@ func reconcileControl(ctx context.Context, currentCluster, kubeCluster *Cluster, if err != nil { return fmt.Errorf("Failed to initialize new kubernetes client: %v", err) } - if err := hosts.DeleteNode(ctx, toDeleteHost, kubeClient, toDeleteHost.IsWorker); err != nil { + if err := hosts.DeleteNode(ctx, toDeleteHost, kubeClient, toDeleteHost.IsWorker, kubeCluster.CloudProvider.Name); err != nil { return fmt.Errorf("Failed to delete controlplane node %s from cluster", toDeleteHost.Address) } // attempting to clean services/files on the host @@ -161,7 +161,7 @@ func reconcileEtcd(ctx context.Context, currentCluster, kubeCluster *Cluster, ku log.Warnf(ctx, "[reconcile] %v", err) continue } - if err := hosts.DeleteNode(ctx, etcdHost, kubeClient, etcdHost.IsControl); err != nil { + if err := hosts.DeleteNode(ctx, etcdHost, kubeClient, etcdHost.IsControl, kubeCluster.CloudProvider.Name); err != nil { log.Warnf(ctx, "Failed to delete etcd node %s from cluster", etcdHost.Address) continue } diff --git a/hosts/hosts.go b/hosts/hosts.go index 0b168da0..a5c1c466 100644 --- a/hosts/hosts.go +++ b/hosts/hosts.go @@ -126,7 +126,7 @@ func (h *Host) CleanUp(ctx context.Context, toCleanPaths []string, cleanerImage return nil } -func DeleteNode(ctx context.Context, toDeleteHost *Host, kubeClient *kubernetes.Clientset, hasAnotherRole bool) error { +func DeleteNode(ctx context.Context, toDeleteHost *Host, kubeClient *kubernetes.Clientset, hasAnotherRole bool, cloudProvider string) error { if hasAnotherRole { log.Infof(ctx, "[hosts] host [%s] has another role, skipping delete from kubernetes cluster", toDeleteHost.Address) return nil @@ -144,7 +144,7 @@ func DeleteNode(ctx context.Context, toDeleteHost *Host, kubeClient *kubernetes. return err } log.Infof(ctx, "[hosts] Deleting host [%s] from the cluster", toDeleteHost.Address) - if err := k8s.DeleteNode(kubeClient, toDeleteHost.HostnameOverride); err != nil { + if err := k8s.DeleteNode(kubeClient, toDeleteHost.HostnameOverride, cloudProvider); err != nil { return err } log.Infof(ctx, "[hosts] Successfully deleted host [%s] from the cluster", toDeleteHost.Address) diff --git a/k8s/node.go b/k8s/node.go index b0057be2..5e47d876 100644 --- a/k8s/node.go +++ b/k8s/node.go @@ -19,9 +19,18 @@ const ( HostnameLabel = "kubernetes.io/hostname" InternalAddressAnnotation = "rke.io/internal-ip" ExternalAddressAnnotation = "rke.io/external-ip" + AWSCloudProvider = "aws" ) -func DeleteNode(k8sClient *kubernetes.Clientset, nodeName string) error { +func DeleteNode(k8sClient *kubernetes.Clientset, nodeName, cloudProvider string) error { + + if cloudProvider == AWSCloudProvider { + node, err := GetNode(k8sClient, nodeName) + if err != nil { + return err + } + nodeName = node.Name + } return k8sClient.CoreV1().Nodes().Delete(nodeName, &metav1.DeleteOptions{}) } @@ -45,7 +54,7 @@ func GetNode(k8sClient *kubernetes.Clientset, nodeName string) (*v1.Node, error) func CordonUncordon(k8sClient *kubernetes.Clientset, nodeName string, cordoned bool) error { updated := false for retries := 0; retries <= 5; retries++ { - node, err := k8sClient.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{}) + node, err := GetNode(k8sClient, nodeName) if err != nil { logrus.Debugf("Error getting node %s: %v", nodeName, err) time.Sleep(time.Second * 5)