diff --git a/cluster/defaults.go b/cluster/defaults.go index 54bf5e5c..2bab5860 100644 --- a/cluster/defaults.go +++ b/cluster/defaults.go @@ -241,7 +241,7 @@ func (c *Cluster) setNodeUpgradeStrategy() { } setDefaultIfEmpty(&c.UpgradeStrategy.MaxUnavailableWorker, DefaultMaxUnavailableWorker) setDefaultIfEmpty(&c.UpgradeStrategy.MaxUnavailableControlplane, DefaultMaxUnavailableControlplane) - if !c.UpgradeStrategy.Drain { + if c.UpgradeStrategy.Drain != nil && *c.UpgradeStrategy.Drain { return } if c.UpgradeStrategy.DrainInput == nil { diff --git a/services/controlplane.go b/services/controlplane.go index 4f61ea61..3123ba5f 100644 --- a/services/controlplane.go +++ b/services/controlplane.go @@ -68,7 +68,7 @@ func UpgradeControlPlaneNodes(ctx context.Context, kubeClient *kubernetes.Client log.Infof(ctx, "[%s] Adding controlplane nodes %v to the cluster", ControlRole, strings.Join(nodes, ",")) } } - if upgradeStrategy.Drain { + if upgradeStrategy.Drain != nil && *upgradeStrategy.Drain { drainHelper = getDrainHelper(kubeClient, *upgradeStrategy) log.Infof(ctx, "[%s] Parameters provided to drain command: %#v", ControlRole, fmt.Sprintf("Force: %v, IgnoreAllDaemonSets: %v, DeleteLocalData: %v, Timeout: %v, GracePeriodSeconds: %v", drainHelper.Force, drainHelper.IgnoreAllDaemonSets, drainHelper.DeleteLocalData, drainHelper.Timeout, drainHelper.GracePeriodSeconds)) } @@ -169,7 +169,9 @@ func processControlPlaneForUpgrade(ctx context.Context, kubeClient *kubernetes.C } continue } - if err := upgradeControlHost(ctx, kubeClient, runHost, upgradeStrategy.Drain, drainHelper, localConnDialerFactory, prsMap, cpNodePlanMap, updateWorkersOnly, alpineImage, certMap, controlPlaneUpgradable, workerPlaneUpgradable); err != nil { + + shouldDrain := upgradeStrategy.Drain != nil && *upgradeStrategy.Drain + if err := upgradeControlHost(ctx, kubeClient, runHost, shouldDrain, drainHelper, localConnDialerFactory, prsMap, cpNodePlanMap, updateWorkersOnly, alpineImage, certMap, controlPlaneUpgradable, workerPlaneUpgradable); err != nil { errList = append(errList, err) hostsFailedToUpgrade <- runHost.HostnameOverride hostsFailed.Store(runHost.HostnameOverride, true) diff --git a/services/workerplane.go b/services/workerplane.go index 8fb6db10..d89bfe82 100644 --- a/services/workerplane.go +++ b/services/workerplane.go @@ -101,7 +101,7 @@ func processWorkerPlaneForUpgrade(ctx context.Context, kubeClient *kubernetes.Cl var hostsFailed sync.Map hostsQueue := util.GetObjectQueue(allHosts) - if upgradeStrategy.Drain { + if upgradeStrategy.Drain != nil && *upgradeStrategy.Drain { drainHelper = getDrainHelper(kubeClient, *upgradeStrategy) log.Infof(ctx, "[%s] Parameters provided to drain command: %#v", WorkerRole, fmt.Sprintf("Force: %v, IgnoreAllDaemonSets: %v, DeleteLocalData: %v, Timeout: %v, GracePeriodSeconds: %v", drainHelper.Force, drainHelper.IgnoreAllDaemonSets, drainHelper.DeleteLocalData, drainHelper.Timeout, drainHelper.GracePeriodSeconds)) @@ -169,7 +169,7 @@ func processWorkerPlaneForUpgrade(ctx context.Context, kubeClient *kubernetes.Cl } continue } - if err := upgradeWorkerHost(ctx, kubeClient, runHost, upgradeStrategy.Drain, drainHelper, localConnDialerFactory, prsMap, workerNodePlanMap, certMap, updateWorkersOnly, alpineImage); err != nil { + if err := upgradeWorkerHost(ctx, kubeClient, runHost, upgradeStrategy.Drain != nil && *upgradeStrategy.Drain, drainHelper, localConnDialerFactory, prsMap, workerNodePlanMap, certMap, updateWorkersOnly, alpineImage); err != nil { errList = append(errList, err) hostsFailed.Store(runHost.HostnameOverride, true) hostsFailedToUpgrade <- runHost.HostnameOverride diff --git a/types/rke_types.go b/types/rke_types.go index 69cd05bf..9b718b91 100644 --- a/types/rke_types.go +++ b/types/rke_types.go @@ -72,7 +72,7 @@ type NodeUpgradeStrategy struct { MaxUnavailableWorker string `yaml:"max_unavailable_worker" json:"maxUnavailableWorker,omitempty" norman:"min=1,default=10%"` // MaxUnavailableControlplane input can be a number of nodes or a percentage of nodes MaxUnavailableControlplane string `yaml:"max_unavailable_controlplane" json:"maxUnavailableControlplane,omitempty" norman:"min=1,default=1"` - Drain bool `yaml:"drain" json:"drain,omitempty"` + Drain *bool `yaml:"drain" json:"drain,omitempty"` DrainInput *NodeDrainInput `yaml:"node_drain_input" json:"nodeDrainInput,omitempty"` }