mirror of
https://github.com/rancher/rke.git
synced 2025-09-20 10:59:21 +00:00
add logic for external aws cloud provider
This commit is contained in:
@@ -52,14 +52,17 @@ func RunWorkerPlane(ctx context.Context, allHosts []*hosts.Host, localConnDialer
|
||||
return nil
|
||||
}
|
||||
|
||||
func UpgradeWorkerPlaneForWorkerAndEtcdNodes(ctx context.Context, kubeClient *kubernetes.Clientset, mixedRolesHosts []*hosts.Host, workerOnlyHosts []*hosts.Host, inactiveHosts map[string]bool, localConnDialerFactory hosts.DialerFactory, prsMap map[string]v3.PrivateRegistry, workerNodePlanMap map[string]v3.RKEConfigNodePlan, certMap map[string]pki.CertificatePKI, updateWorkersOnly bool, alpineImage string, upgradeStrategy *v3.NodeUpgradeStrategy, newHosts map[string]bool, maxUnavailable int, k8sVersion string) (string, error) {
|
||||
func UpgradeWorkerPlaneForWorkerAndEtcdNodes(ctx context.Context, kubeClient *kubernetes.Clientset, mixedRolesHosts []*hosts.Host, workerOnlyHosts []*hosts.Host, inactiveHosts map[string]bool, localConnDialerFactory hosts.DialerFactory, prsMap map[string]v3.PrivateRegistry, workerNodePlanMap map[string]v3.RKEConfigNodePlan, certMap map[string]pki.CertificatePKI,
|
||||
updateWorkersOnly bool, alpineImage string, upgradeStrategy *v3.NodeUpgradeStrategy,
|
||||
newHosts map[string]bool, maxUnavailable int, k8sVersion, cloudProviderName string) (string, error) {
|
||||
log.Infof(ctx, "[%s] Upgrading Worker Plane..", WorkerRole)
|
||||
var errMsgMaxUnavailableNotFailed string
|
||||
updateNewHostsList(kubeClient, append(mixedRolesHosts, workerOnlyHosts...), newHosts)
|
||||
updateNewHostsList(kubeClient, append(mixedRolesHosts, workerOnlyHosts...), newHosts, cloudProviderName)
|
||||
if len(mixedRolesHosts) > 0 {
|
||||
log.Infof(ctx, "First checking and processing worker components for upgrades on nodes with etcd role one at a time")
|
||||
}
|
||||
multipleRolesHostsFailedToUpgrade, err := processWorkerPlaneForUpgrade(ctx, kubeClient, mixedRolesHosts, localConnDialerFactory, prsMap, workerNodePlanMap, certMap, updateWorkersOnly, alpineImage, 1, upgradeStrategy, newHosts, inactiveHosts, k8sVersion)
|
||||
multipleRolesHostsFailedToUpgrade, err := processWorkerPlaneForUpgrade(ctx, kubeClient, mixedRolesHosts, localConnDialerFactory, prsMap, workerNodePlanMap, certMap, updateWorkersOnly, alpineImage,
|
||||
1, upgradeStrategy, newHosts, inactiveHosts, k8sVersion, cloudProviderName)
|
||||
if err != nil {
|
||||
logrus.Errorf("Failed to upgrade hosts: %v with error %v", strings.Join(multipleRolesHostsFailedToUpgrade, ","), err)
|
||||
return errMsgMaxUnavailableNotFailed, err
|
||||
@@ -68,7 +71,8 @@ func UpgradeWorkerPlaneForWorkerAndEtcdNodes(ctx context.Context, kubeClient *ku
|
||||
if len(workerOnlyHosts) > 0 {
|
||||
log.Infof(ctx, "Now checking and upgrading worker components on nodes with only worker role %v at a time", maxUnavailable)
|
||||
}
|
||||
workerOnlyHostsFailedToUpgrade, err := processWorkerPlaneForUpgrade(ctx, kubeClient, workerOnlyHosts, localConnDialerFactory, prsMap, workerNodePlanMap, certMap, updateWorkersOnly, alpineImage, maxUnavailable, upgradeStrategy, newHosts, inactiveHosts, k8sVersion)
|
||||
workerOnlyHostsFailedToUpgrade, err := processWorkerPlaneForUpgrade(ctx, kubeClient, workerOnlyHosts, localConnDialerFactory, prsMap, workerNodePlanMap, certMap, updateWorkersOnly, alpineImage,
|
||||
maxUnavailable, upgradeStrategy, newHosts, inactiveHosts, k8sVersion, cloudProviderName)
|
||||
if err != nil {
|
||||
logrus.Errorf("Failed to upgrade hosts: %v with error %v", strings.Join(workerOnlyHostsFailedToUpgrade, ","), err)
|
||||
if len(workerOnlyHostsFailedToUpgrade) >= maxUnavailable {
|
||||
@@ -81,9 +85,9 @@ func UpgradeWorkerPlaneForWorkerAndEtcdNodes(ctx context.Context, kubeClient *ku
|
||||
return errMsgMaxUnavailableNotFailed, nil
|
||||
}
|
||||
|
||||
func updateNewHostsList(kubeClient *kubernetes.Clientset, allHosts []*hosts.Host, newHosts map[string]bool) {
|
||||
func updateNewHostsList(kubeClient *kubernetes.Clientset, allHosts []*hosts.Host, newHosts map[string]bool, cloudProviderName string) {
|
||||
for _, h := range allHosts {
|
||||
_, err := k8s.GetNode(kubeClient, h.HostnameOverride)
|
||||
_, err := k8s.GetNode(kubeClient, h.HostnameOverride, cloudProviderName)
|
||||
if err != nil && apierrors.IsNotFound(err) {
|
||||
// this host could have been added to cluster state upon successful controlplane upgrade but isn't a node yet.
|
||||
newHosts[h.HostnameOverride] = true
|
||||
@@ -93,7 +97,7 @@ func updateNewHostsList(kubeClient *kubernetes.Clientset, allHosts []*hosts.Host
|
||||
|
||||
func processWorkerPlaneForUpgrade(ctx context.Context, kubeClient *kubernetes.Clientset, allHosts []*hosts.Host, localConnDialerFactory hosts.DialerFactory,
|
||||
prsMap map[string]v3.PrivateRegistry, workerNodePlanMap map[string]v3.RKEConfigNodePlan, certMap map[string]pki.CertificatePKI, updateWorkersOnly bool, alpineImage string,
|
||||
maxUnavailable int, upgradeStrategy *v3.NodeUpgradeStrategy, newHosts, inactiveHosts map[string]bool, k8sVersion string) ([]string, error) {
|
||||
maxUnavailable int, upgradeStrategy *v3.NodeUpgradeStrategy, newHosts, inactiveHosts map[string]bool, k8sVersion, cloudProviderName string) ([]string, error) {
|
||||
var errgrp errgroup.Group
|
||||
var drainHelper drain.Helper
|
||||
var failedHosts []string
|
||||
@@ -128,7 +132,7 @@ func processWorkerPlaneForUpgrade(ctx context.Context, kubeClient *kubernetes.Cl
|
||||
}
|
||||
continue
|
||||
}
|
||||
if err := CheckNodeReady(kubeClient, runHost, WorkerRole); err != nil {
|
||||
if err := CheckNodeReady(kubeClient, runHost, WorkerRole, cloudProviderName); err != nil {
|
||||
errList = append(errList, err)
|
||||
hostsFailed.Store(runHost.HostnameOverride, true)
|
||||
hostsFailedToUpgrade <- runHost.HostnameOverride
|
||||
@@ -163,13 +167,14 @@ func processWorkerPlaneForUpgrade(ctx context.Context, kubeClient *kubernetes.Cl
|
||||
}
|
||||
if !upgradable {
|
||||
logrus.Infof("[workerplane] Upgrade not required for worker components of host %v", runHost.HostnameOverride)
|
||||
if err := k8s.CordonUncordon(kubeClient, runHost.HostnameOverride, false); err != nil {
|
||||
if err := k8s.CordonUncordon(kubeClient, runHost.HostnameOverride, cloudProviderName, false); err != nil {
|
||||
// This node didn't undergo an upgrade, so RKE will only log any error after uncordoning it and won't count this in maxUnavailable
|
||||
logrus.Errorf("[workerplane] Failed to uncordon node %v, error: %v", runHost.HostnameOverride, err)
|
||||
}
|
||||
continue
|
||||
}
|
||||
if err := upgradeWorkerHost(ctx, kubeClient, runHost, upgradeStrategy.Drain != nil && *upgradeStrategy.Drain, drainHelper, localConnDialerFactory, prsMap, workerNodePlanMap, certMap, updateWorkersOnly, alpineImage, k8sVersion); err != nil {
|
||||
if err := upgradeWorkerHost(ctx, kubeClient, runHost, upgradeStrategy.Drain != nil && *upgradeStrategy.Drain, drainHelper, localConnDialerFactory, prsMap, workerNodePlanMap, certMap,
|
||||
updateWorkersOnly, alpineImage, k8sVersion, cloudProviderName); err != nil {
|
||||
errList = append(errList, err)
|
||||
hostsFailed.Store(runHost.HostnameOverride, true)
|
||||
hostsFailedToUpgrade <- runHost.HostnameOverride
|
||||
@@ -192,9 +197,9 @@ func processWorkerPlaneForUpgrade(ctx context.Context, kubeClient *kubernetes.Cl
|
||||
|
||||
func upgradeWorkerHost(ctx context.Context, kubeClient *kubernetes.Clientset, runHost *hosts.Host, drainFlag bool, drainHelper drain.Helper,
|
||||
localConnDialerFactory hosts.DialerFactory, prsMap map[string]v3.PrivateRegistry, workerNodePlanMap map[string]v3.RKEConfigNodePlan, certMap map[string]pki.CertificatePKI, updateWorkersOnly bool,
|
||||
alpineImage, k8sVersion string) error {
|
||||
alpineImage, k8sVersion, cloudProviderName string) error {
|
||||
// cordon and drain
|
||||
if err := cordonAndDrainNode(kubeClient, runHost, drainFlag, drainHelper, WorkerRole); err != nil {
|
||||
if err := cordonAndDrainNode(kubeClient, runHost, drainFlag, drainHelper, WorkerRole, cloudProviderName); err != nil {
|
||||
return err
|
||||
}
|
||||
logrus.Debugf("[workerplane] upgrading host %v", runHost.HostnameOverride)
|
||||
@@ -202,11 +207,11 @@ func upgradeWorkerHost(ctx context.Context, kubeClient *kubernetes.Clientset, ru
|
||||
return err
|
||||
}
|
||||
// consider upgrade done when kubeclient lists node as ready
|
||||
if err := CheckNodeReady(kubeClient, runHost, WorkerRole); err != nil {
|
||||
if err := CheckNodeReady(kubeClient, runHost, WorkerRole, cloudProviderName); err != nil {
|
||||
return err
|
||||
}
|
||||
// uncordon node
|
||||
return k8s.CordonUncordon(kubeClient, runHost.HostnameOverride, false)
|
||||
return k8s.CordonUncordon(kubeClient, runHost.HostnameOverride, cloudProviderName, false)
|
||||
}
|
||||
|
||||
func doDeployWorkerPlaneHost(ctx context.Context, host *hosts.Host, localConnDialerFactory hosts.DialerFactory, prsMap map[string]v3.PrivateRegistry, processMap map[string]v3.Process, certMap map[string]pki.CertificatePKI, updateWorkersOnly bool, alpineImage, k8sVersion string) error {
|
||||
|
Reference in New Issue
Block a user