mirror of
https://github.com/rancher/rke.git
synced 2025-09-02 07:24:20 +00:00
Add get node for delete and cordon
This commit is contained in:
@@ -370,6 +370,10 @@ func (c *Cluster) parseCloudConfig(ctx context.Context) (string, error) {
|
|||||||
}
|
}
|
||||||
return string(jsonString), nil
|
return string(jsonString), nil
|
||||||
}
|
}
|
||||||
|
if c.CloudProvider.AWSCloudProvider != nil {
|
||||||
|
c.CloudProvider.Name = AWSCloudProvider
|
||||||
|
return "", nil
|
||||||
|
}
|
||||||
if len(c.CloudProvider.CloudConfig) == 0 {
|
if len(c.CloudProvider.CloudConfig) == 0 {
|
||||||
return "", nil
|
return "", nil
|
||||||
}
|
}
|
||||||
|
@@ -56,7 +56,7 @@ func reconcileWorker(ctx context.Context, currentCluster, kubeCluster *Cluster,
|
|||||||
wpToDelete := hosts.GetToDeleteHosts(currentCluster.WorkerHosts, kubeCluster.WorkerHosts, kubeCluster.InactiveHosts)
|
wpToDelete := hosts.GetToDeleteHosts(currentCluster.WorkerHosts, kubeCluster.WorkerHosts, kubeCluster.InactiveHosts)
|
||||||
for _, toDeleteHost := range wpToDelete {
|
for _, toDeleteHost := range wpToDelete {
|
||||||
toDeleteHost.IsWorker = false
|
toDeleteHost.IsWorker = false
|
||||||
if err := hosts.DeleteNode(ctx, toDeleteHost, kubeClient, toDeleteHost.IsControl); err != nil {
|
if err := hosts.DeleteNode(ctx, toDeleteHost, kubeClient, toDeleteHost.IsControl, kubeCluster.CloudProvider.Name); err != nil {
|
||||||
return fmt.Errorf("Failed to delete worker node %s from cluster", toDeleteHost.Address)
|
return fmt.Errorf("Failed to delete worker node %s from cluster", toDeleteHost.Address)
|
||||||
}
|
}
|
||||||
// attempting to clean services/files on the host
|
// attempting to clean services/files on the host
|
||||||
@@ -96,7 +96,7 @@ func reconcileControl(ctx context.Context, currentCluster, kubeCluster *Cluster,
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("Failed to initialize new kubernetes client: %v", err)
|
return fmt.Errorf("Failed to initialize new kubernetes client: %v", err)
|
||||||
}
|
}
|
||||||
if err := hosts.DeleteNode(ctx, toDeleteHost, kubeClient, toDeleteHost.IsWorker); err != nil {
|
if err := hosts.DeleteNode(ctx, toDeleteHost, kubeClient, toDeleteHost.IsWorker, kubeCluster.CloudProvider.Name); err != nil {
|
||||||
return fmt.Errorf("Failed to delete controlplane node %s from cluster", toDeleteHost.Address)
|
return fmt.Errorf("Failed to delete controlplane node %s from cluster", toDeleteHost.Address)
|
||||||
}
|
}
|
||||||
// attempting to clean services/files on the host
|
// attempting to clean services/files on the host
|
||||||
@@ -161,7 +161,7 @@ func reconcileEtcd(ctx context.Context, currentCluster, kubeCluster *Cluster, ku
|
|||||||
log.Warnf(ctx, "[reconcile] %v", err)
|
log.Warnf(ctx, "[reconcile] %v", err)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if err := hosts.DeleteNode(ctx, etcdHost, kubeClient, etcdHost.IsControl); err != nil {
|
if err := hosts.DeleteNode(ctx, etcdHost, kubeClient, etcdHost.IsControl, kubeCluster.CloudProvider.Name); err != nil {
|
||||||
log.Warnf(ctx, "Failed to delete etcd node %s from cluster", etcdHost.Address)
|
log.Warnf(ctx, "Failed to delete etcd node %s from cluster", etcdHost.Address)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
@@ -126,7 +126,7 @@ func (h *Host) CleanUp(ctx context.Context, toCleanPaths []string, cleanerImage
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func DeleteNode(ctx context.Context, toDeleteHost *Host, kubeClient *kubernetes.Clientset, hasAnotherRole bool) error {
|
func DeleteNode(ctx context.Context, toDeleteHost *Host, kubeClient *kubernetes.Clientset, hasAnotherRole bool, cloudProvider string) error {
|
||||||
if hasAnotherRole {
|
if hasAnotherRole {
|
||||||
log.Infof(ctx, "[hosts] host [%s] has another role, skipping delete from kubernetes cluster", toDeleteHost.Address)
|
log.Infof(ctx, "[hosts] host [%s] has another role, skipping delete from kubernetes cluster", toDeleteHost.Address)
|
||||||
return nil
|
return nil
|
||||||
@@ -144,7 +144,7 @@ func DeleteNode(ctx context.Context, toDeleteHost *Host, kubeClient *kubernetes.
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
log.Infof(ctx, "[hosts] Deleting host [%s] from the cluster", toDeleteHost.Address)
|
log.Infof(ctx, "[hosts] Deleting host [%s] from the cluster", toDeleteHost.Address)
|
||||||
if err := k8s.DeleteNode(kubeClient, toDeleteHost.HostnameOverride); err != nil {
|
if err := k8s.DeleteNode(kubeClient, toDeleteHost.HostnameOverride, cloudProvider); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
log.Infof(ctx, "[hosts] Successfully deleted host [%s] from the cluster", toDeleteHost.Address)
|
log.Infof(ctx, "[hosts] Successfully deleted host [%s] from the cluster", toDeleteHost.Address)
|
||||||
|
13
k8s/node.go
13
k8s/node.go
@@ -19,9 +19,18 @@ const (
|
|||||||
HostnameLabel = "kubernetes.io/hostname"
|
HostnameLabel = "kubernetes.io/hostname"
|
||||||
InternalAddressAnnotation = "rke.io/internal-ip"
|
InternalAddressAnnotation = "rke.io/internal-ip"
|
||||||
ExternalAddressAnnotation = "rke.io/external-ip"
|
ExternalAddressAnnotation = "rke.io/external-ip"
|
||||||
|
AWSCloudProvider = "aws"
|
||||||
)
|
)
|
||||||
|
|
||||||
func DeleteNode(k8sClient *kubernetes.Clientset, nodeName string) error {
|
func DeleteNode(k8sClient *kubernetes.Clientset, nodeName, cloudProvider string) error {
|
||||||
|
|
||||||
|
if cloudProvider == AWSCloudProvider {
|
||||||
|
node, err := GetNode(k8sClient, nodeName)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
nodeName = node.Name
|
||||||
|
}
|
||||||
return k8sClient.CoreV1().Nodes().Delete(nodeName, &metav1.DeleteOptions{})
|
return k8sClient.CoreV1().Nodes().Delete(nodeName, &metav1.DeleteOptions{})
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -45,7 +54,7 @@ func GetNode(k8sClient *kubernetes.Clientset, nodeName string) (*v1.Node, error)
|
|||||||
func CordonUncordon(k8sClient *kubernetes.Clientset, nodeName string, cordoned bool) error {
|
func CordonUncordon(k8sClient *kubernetes.Clientset, nodeName string, cordoned bool) error {
|
||||||
updated := false
|
updated := false
|
||||||
for retries := 0; retries <= 5; retries++ {
|
for retries := 0; retries <= 5; retries++ {
|
||||||
node, err := k8sClient.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{})
|
node, err := GetNode(k8sClient, nodeName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logrus.Debugf("Error getting node %s: %v", nodeName, err)
|
logrus.Debugf("Error getting node %s: %v", nodeName, err)
|
||||||
time.Sleep(time.Second * 5)
|
time.Sleep(time.Second * 5)
|
||||||
|
Reference in New Issue
Block a user