mirror of
https://github.com/rancher/rke.git
synced 2025-08-01 15:19:09 +00:00
Remove ignore-upgrade label from zero downtime upgrade
This commit is contained in:
parent
259bafc27d
commit
6b25bcf3e0
@ -70,7 +70,6 @@ type Cluster struct {
|
||||
NewHosts map[string]bool
|
||||
MaxUnavailableForWorkerNodes int
|
||||
MaxUnavailableForControlNodes int
|
||||
HostsLabeledToIgnoreUpgrade map[string]bool
|
||||
}
|
||||
|
||||
type encryptionConfig struct {
|
||||
@ -170,7 +169,7 @@ func (c *Cluster) UpgradeControlPlane(ctx context.Context, kubeClient *kubernete
|
||||
|
||||
for _, host := range c.InactiveHosts {
|
||||
// include only hosts with controlplane role
|
||||
if host.IsControl && !c.HostsLabeledToIgnoreUpgrade[host.Address] {
|
||||
if host.IsControl {
|
||||
inactiveHosts[host.HostnameOverride] = true
|
||||
}
|
||||
}
|
||||
@ -179,9 +178,7 @@ func (c *Cluster) UpgradeControlPlane(ctx context.Context, kubeClient *kubernete
|
||||
return "", err
|
||||
}
|
||||
for _, host := range c.ControlPlaneHosts {
|
||||
if !c.HostsLabeledToIgnoreUpgrade[host.Address] {
|
||||
controlPlaneHosts = append(controlPlaneHosts, host)
|
||||
}
|
||||
controlPlaneHosts = append(controlPlaneHosts, host)
|
||||
if c.NewHosts[host.HostnameOverride] {
|
||||
continue
|
||||
}
|
||||
@ -237,7 +234,7 @@ func (c *Cluster) DeployWorkerPlane(ctx context.Context, svcOptionData map[strin
|
||||
return "", err
|
||||
}
|
||||
workerNodePlanMap[host.Address] = BuildRKEConfigNodePlan(ctx, c, host, host.DockerInfo, svcOptions)
|
||||
if host.IsControl || c.HostsLabeledToIgnoreUpgrade[host.Address] {
|
||||
if host.IsControl {
|
||||
continue
|
||||
}
|
||||
if !host.IsEtcd {
|
||||
@ -274,7 +271,7 @@ func (c *Cluster) UpgradeWorkerPlane(ctx context.Context, kubeClient *kubernetes
|
||||
|
||||
for _, host := range c.InactiveHosts {
|
||||
// if host has controlplane role, it already has worker components upgraded
|
||||
if !host.IsControl && !c.HostsLabeledToIgnoreUpgrade[host.Address] {
|
||||
if !host.IsControl {
|
||||
inactiveHosts[host.HostnameOverride] = true
|
||||
}
|
||||
}
|
||||
@ -681,7 +678,6 @@ func InitClusterObject(ctx context.Context, rkeConfig *v3.RancherKubernetesEngin
|
||||
EncryptionConfig: encryptionConfig{
|
||||
EncryptionProviderFile: encryptConfig,
|
||||
},
|
||||
HostsLabeledToIgnoreUpgrade: make(map[string]bool),
|
||||
}
|
||||
if metadata.K8sVersionToRKESystemImages == nil {
|
||||
if err := metadata.InitMetadata(ctx); err != nil {
|
||||
|
@ -4,11 +4,9 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/rancher/rke/hosts"
|
||||
"github.com/rancher/rke/k8s"
|
||||
"github.com/rancher/rke/log"
|
||||
"github.com/rancher/rke/pki"
|
||||
"github.com/rancher/rke/services"
|
||||
@ -16,8 +14,6 @@ import (
|
||||
v3 "github.com/rancher/types/apis/management.cattle.io/v3"
|
||||
"github.com/sirupsen/logrus"
|
||||
"golang.org/x/sync/errgroup"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apiserver/pkg/apis/apiserver/v1alpha1"
|
||||
"sigs.k8s.io/yaml"
|
||||
)
|
||||
@ -67,37 +63,6 @@ func (c *Cluster) TunnelHosts(ctx context.Context, flags ExternalFlags) error {
|
||||
return ValidateHostCount(c)
|
||||
}
|
||||
|
||||
func (c *Cluster) FindHostsLabeledToIgnoreUpgrade(ctx context.Context) {
|
||||
kubeClient, err := k8s.NewClient(c.LocalKubeConfigPath, c.K8sWrapTransport)
|
||||
if err != nil {
|
||||
logrus.Errorf("Error generating kube client in FindHostsLabeledToIgnoreUpgrade: %v", err)
|
||||
return
|
||||
}
|
||||
var nodes *v1.NodeList
|
||||
for retries := 0; retries < k8s.MaxRetries; retries++ {
|
||||
nodes, err = kubeClient.CoreV1().Nodes().List(metav1.ListOptions{})
|
||||
if err == nil {
|
||||
break
|
||||
}
|
||||
time.Sleep(time.Second * k8s.RetryInterval)
|
||||
}
|
||||
if err != nil {
|
||||
log.Infof(ctx, "Error listing nodes but continuing upgrade: %v", err)
|
||||
return
|
||||
}
|
||||
if nodes == nil {
|
||||
return
|
||||
}
|
||||
for _, node := range nodes.Items {
|
||||
if val, ok := node.Labels[k8s.IgnoreHostDuringUpgradeLabel]; ok && val == k8s.IgnoreLabelValue {
|
||||
host := hosts.Host{RKEConfigNode: v3.RKEConfigNode{Address: node.Annotations[k8s.ExternalAddressAnnotation]}}
|
||||
logrus.Infof("Host %v is labeled to ignore upgrade", host.Address)
|
||||
c.HostsLabeledToIgnoreUpgrade[host.Address] = true
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (c *Cluster) InvertIndexHosts() error {
|
||||
c.EtcdHosts = make([]*hosts.Host, 0)
|
||||
c.WorkerHosts = make([]*hosts.Host, 0)
|
||||
@ -158,34 +123,22 @@ func (c *Cluster) CalculateMaxUnavailable() (int, int, error) {
|
||||
var workerHosts, controlHosts, maxUnavailableWorker, maxUnavailableControl int
|
||||
|
||||
for _, host := range c.InactiveHosts {
|
||||
if host.IsControl && !c.HostsLabeledToIgnoreUpgrade[host.Address] {
|
||||
if host.IsControl {
|
||||
inactiveControlPlaneHosts = append(inactiveControlPlaneHosts, host.HostnameOverride)
|
||||
}
|
||||
if !host.IsWorker && !c.HostsLabeledToIgnoreUpgrade[host.Address] {
|
||||
if !host.IsWorker {
|
||||
inactiveWorkerHosts = append(inactiveWorkerHosts, host.HostnameOverride)
|
||||
}
|
||||
// not breaking out of the loop so we can log all of the inactive hosts
|
||||
}
|
||||
|
||||
for _, host := range c.WorkerHosts {
|
||||
if c.HostsLabeledToIgnoreUpgrade[host.Address] {
|
||||
continue
|
||||
}
|
||||
workerHosts++
|
||||
}
|
||||
// maxUnavailable should be calculated against all hosts provided in cluster.yml except the ones labelled to be ignored for upgrade
|
||||
workerHosts += len(inactiveWorkerHosts)
|
||||
// maxUnavailable should be calculated against all hosts provided in cluster.yml
|
||||
workerHosts = len(c.WorkerHosts) + len(inactiveWorkerHosts)
|
||||
maxUnavailableWorker, err := services.CalculateMaxUnavailable(c.UpgradeStrategy.MaxUnavailableWorker, workerHosts, services.WorkerRole)
|
||||
if err != nil {
|
||||
return maxUnavailableWorker, maxUnavailableControl, err
|
||||
}
|
||||
for _, host := range c.ControlPlaneHosts {
|
||||
if c.HostsLabeledToIgnoreUpgrade[host.Address] {
|
||||
continue
|
||||
}
|
||||
controlHosts++
|
||||
}
|
||||
controlHosts += len(inactiveControlPlaneHosts)
|
||||
controlHosts = len(c.ControlPlaneHosts) + len(inactiveControlPlaneHosts)
|
||||
maxUnavailableControl, err = services.CalculateMaxUnavailable(c.UpgradeStrategy.MaxUnavailableControlplane, controlHosts, services.ControlRole)
|
||||
if err != nil {
|
||||
return maxUnavailableWorker, maxUnavailableControl, err
|
||||
|
@ -163,7 +163,6 @@ func ClusterUp(ctx context.Context, dialersOptions hosts.DialersOptions, flags c
|
||||
kubeCluster.NewHosts = newNodes
|
||||
reconcileCluster = true
|
||||
|
||||
kubeCluster.FindHostsLabeledToIgnoreUpgrade(ctx)
|
||||
maxUnavailableWorker, maxUnavailableControl, err := kubeCluster.CalculateMaxUnavailable()
|
||||
if err != nil {
|
||||
return APIURL, caCrt, clientCert, clientKey, nil, err
|
||||
@ -247,9 +246,6 @@ func checkAllIncluded(cluster *cluster.Cluster) error {
|
||||
|
||||
var names []string
|
||||
for _, host := range cluster.InactiveHosts {
|
||||
if cluster.HostsLabeledToIgnoreUpgrade[host.Address] {
|
||||
continue
|
||||
}
|
||||
names = append(names, host.Address)
|
||||
}
|
||||
|
||||
|
14
k8s/node.go
14
k8s/node.go
@ -14,14 +14,12 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
HostnameLabel = "kubernetes.io/hostname"
|
||||
InternalAddressAnnotation = "rke.cattle.io/internal-ip"
|
||||
ExternalAddressAnnotation = "rke.cattle.io/external-ip"
|
||||
IgnoreHostDuringUpgradeLabel = "user.cattle.io/upgrade-policy"
|
||||
IgnoreLabelValue = "prevent"
|
||||
AWSCloudProvider = "aws"
|
||||
MaxRetries = 5
|
||||
RetryInterval = 5
|
||||
HostnameLabel = "kubernetes.io/hostname"
|
||||
InternalAddressAnnotation = "rke.cattle.io/internal-ip"
|
||||
ExternalAddressAnnotation = "rke.cattle.io/external-ip"
|
||||
AWSCloudProvider = "aws"
|
||||
MaxRetries = 5
|
||||
RetryInterval = 5
|
||||
)
|
||||
|
||||
func DeleteNode(k8sClient *kubernetes.Clientset, nodeName, cloudProvider string) error {
|
||||
|
@ -80,9 +80,6 @@ func getNodeListForUpgrade(kubeClient *kubernetes.Clientset, hostsFailed *sync.M
|
||||
if inactiveHosts[node.Labels[k8s.HostnameLabel]] {
|
||||
continue
|
||||
}
|
||||
if val, ok := node.Labels[k8s.IgnoreHostDuringUpgradeLabel]; ok && val == k8s.IgnoreLabelValue {
|
||||
continue
|
||||
}
|
||||
nodeList = append(nodeList, node)
|
||||
}
|
||||
return nodeList, nil
|
||||
|
Loading…
Reference in New Issue
Block a user