mirror of
https://github.com/rancher/rke.git
synced 2025-08-08 02:14:30 +00:00
Remove ignore-upgrade label from zero downtime upgrade
This commit is contained in:
parent
259bafc27d
commit
6b25bcf3e0
@ -70,7 +70,6 @@ type Cluster struct {
|
|||||||
NewHosts map[string]bool
|
NewHosts map[string]bool
|
||||||
MaxUnavailableForWorkerNodes int
|
MaxUnavailableForWorkerNodes int
|
||||||
MaxUnavailableForControlNodes int
|
MaxUnavailableForControlNodes int
|
||||||
HostsLabeledToIgnoreUpgrade map[string]bool
|
|
||||||
}
|
}
|
||||||
|
|
||||||
type encryptionConfig struct {
|
type encryptionConfig struct {
|
||||||
@ -170,7 +169,7 @@ func (c *Cluster) UpgradeControlPlane(ctx context.Context, kubeClient *kubernete
|
|||||||
|
|
||||||
for _, host := range c.InactiveHosts {
|
for _, host := range c.InactiveHosts {
|
||||||
// include only hosts with controlplane role
|
// include only hosts with controlplane role
|
||||||
if host.IsControl && !c.HostsLabeledToIgnoreUpgrade[host.Address] {
|
if host.IsControl {
|
||||||
inactiveHosts[host.HostnameOverride] = true
|
inactiveHosts[host.HostnameOverride] = true
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -179,9 +178,7 @@ func (c *Cluster) UpgradeControlPlane(ctx context.Context, kubeClient *kubernete
|
|||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
for _, host := range c.ControlPlaneHosts {
|
for _, host := range c.ControlPlaneHosts {
|
||||||
if !c.HostsLabeledToIgnoreUpgrade[host.Address] {
|
|
||||||
controlPlaneHosts = append(controlPlaneHosts, host)
|
controlPlaneHosts = append(controlPlaneHosts, host)
|
||||||
}
|
|
||||||
if c.NewHosts[host.HostnameOverride] {
|
if c.NewHosts[host.HostnameOverride] {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
@ -237,7 +234,7 @@ func (c *Cluster) DeployWorkerPlane(ctx context.Context, svcOptionData map[strin
|
|||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
workerNodePlanMap[host.Address] = BuildRKEConfigNodePlan(ctx, c, host, host.DockerInfo, svcOptions)
|
workerNodePlanMap[host.Address] = BuildRKEConfigNodePlan(ctx, c, host, host.DockerInfo, svcOptions)
|
||||||
if host.IsControl || c.HostsLabeledToIgnoreUpgrade[host.Address] {
|
if host.IsControl {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if !host.IsEtcd {
|
if !host.IsEtcd {
|
||||||
@ -274,7 +271,7 @@ func (c *Cluster) UpgradeWorkerPlane(ctx context.Context, kubeClient *kubernetes
|
|||||||
|
|
||||||
for _, host := range c.InactiveHosts {
|
for _, host := range c.InactiveHosts {
|
||||||
// if host has controlplane role, it already has worker components upgraded
|
// if host has controlplane role, it already has worker components upgraded
|
||||||
if !host.IsControl && !c.HostsLabeledToIgnoreUpgrade[host.Address] {
|
if !host.IsControl {
|
||||||
inactiveHosts[host.HostnameOverride] = true
|
inactiveHosts[host.HostnameOverride] = true
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -681,7 +678,6 @@ func InitClusterObject(ctx context.Context, rkeConfig *v3.RancherKubernetesEngin
|
|||||||
EncryptionConfig: encryptionConfig{
|
EncryptionConfig: encryptionConfig{
|
||||||
EncryptionProviderFile: encryptConfig,
|
EncryptionProviderFile: encryptConfig,
|
||||||
},
|
},
|
||||||
HostsLabeledToIgnoreUpgrade: make(map[string]bool),
|
|
||||||
}
|
}
|
||||||
if metadata.K8sVersionToRKESystemImages == nil {
|
if metadata.K8sVersionToRKESystemImages == nil {
|
||||||
if err := metadata.InitMetadata(ctx); err != nil {
|
if err := metadata.InitMetadata(ctx); err != nil {
|
||||||
|
@ -4,11 +4,9 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/docker/docker/api/types"
|
"github.com/docker/docker/api/types"
|
||||||
"github.com/rancher/rke/hosts"
|
"github.com/rancher/rke/hosts"
|
||||||
"github.com/rancher/rke/k8s"
|
|
||||||
"github.com/rancher/rke/log"
|
"github.com/rancher/rke/log"
|
||||||
"github.com/rancher/rke/pki"
|
"github.com/rancher/rke/pki"
|
||||||
"github.com/rancher/rke/services"
|
"github.com/rancher/rke/services"
|
||||||
@ -16,8 +14,6 @@ import (
|
|||||||
v3 "github.com/rancher/types/apis/management.cattle.io/v3"
|
v3 "github.com/rancher/types/apis/management.cattle.io/v3"
|
||||||
"github.com/sirupsen/logrus"
|
"github.com/sirupsen/logrus"
|
||||||
"golang.org/x/sync/errgroup"
|
"golang.org/x/sync/errgroup"
|
||||||
v1 "k8s.io/api/core/v1"
|
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
|
||||||
"k8s.io/apiserver/pkg/apis/apiserver/v1alpha1"
|
"k8s.io/apiserver/pkg/apis/apiserver/v1alpha1"
|
||||||
"sigs.k8s.io/yaml"
|
"sigs.k8s.io/yaml"
|
||||||
)
|
)
|
||||||
@ -67,37 +63,6 @@ func (c *Cluster) TunnelHosts(ctx context.Context, flags ExternalFlags) error {
|
|||||||
return ValidateHostCount(c)
|
return ValidateHostCount(c)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Cluster) FindHostsLabeledToIgnoreUpgrade(ctx context.Context) {
|
|
||||||
kubeClient, err := k8s.NewClient(c.LocalKubeConfigPath, c.K8sWrapTransport)
|
|
||||||
if err != nil {
|
|
||||||
logrus.Errorf("Error generating kube client in FindHostsLabeledToIgnoreUpgrade: %v", err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
var nodes *v1.NodeList
|
|
||||||
for retries := 0; retries < k8s.MaxRetries; retries++ {
|
|
||||||
nodes, err = kubeClient.CoreV1().Nodes().List(metav1.ListOptions{})
|
|
||||||
if err == nil {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
time.Sleep(time.Second * k8s.RetryInterval)
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
log.Infof(ctx, "Error listing nodes but continuing upgrade: %v", err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if nodes == nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
for _, node := range nodes.Items {
|
|
||||||
if val, ok := node.Labels[k8s.IgnoreHostDuringUpgradeLabel]; ok && val == k8s.IgnoreLabelValue {
|
|
||||||
host := hosts.Host{RKEConfigNode: v3.RKEConfigNode{Address: node.Annotations[k8s.ExternalAddressAnnotation]}}
|
|
||||||
logrus.Infof("Host %v is labeled to ignore upgrade", host.Address)
|
|
||||||
c.HostsLabeledToIgnoreUpgrade[host.Address] = true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *Cluster) InvertIndexHosts() error {
|
func (c *Cluster) InvertIndexHosts() error {
|
||||||
c.EtcdHosts = make([]*hosts.Host, 0)
|
c.EtcdHosts = make([]*hosts.Host, 0)
|
||||||
c.WorkerHosts = make([]*hosts.Host, 0)
|
c.WorkerHosts = make([]*hosts.Host, 0)
|
||||||
@ -158,34 +123,22 @@ func (c *Cluster) CalculateMaxUnavailable() (int, int, error) {
|
|||||||
var workerHosts, controlHosts, maxUnavailableWorker, maxUnavailableControl int
|
var workerHosts, controlHosts, maxUnavailableWorker, maxUnavailableControl int
|
||||||
|
|
||||||
for _, host := range c.InactiveHosts {
|
for _, host := range c.InactiveHosts {
|
||||||
if host.IsControl && !c.HostsLabeledToIgnoreUpgrade[host.Address] {
|
if host.IsControl {
|
||||||
inactiveControlPlaneHosts = append(inactiveControlPlaneHosts, host.HostnameOverride)
|
inactiveControlPlaneHosts = append(inactiveControlPlaneHosts, host.HostnameOverride)
|
||||||
}
|
}
|
||||||
if !host.IsWorker && !c.HostsLabeledToIgnoreUpgrade[host.Address] {
|
if !host.IsWorker {
|
||||||
inactiveWorkerHosts = append(inactiveWorkerHosts, host.HostnameOverride)
|
inactiveWorkerHosts = append(inactiveWorkerHosts, host.HostnameOverride)
|
||||||
}
|
}
|
||||||
// not breaking out of the loop so we can log all of the inactive hosts
|
// not breaking out of the loop so we can log all of the inactive hosts
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, host := range c.WorkerHosts {
|
// maxUnavailable should be calculated against all hosts provided in cluster.yml
|
||||||
if c.HostsLabeledToIgnoreUpgrade[host.Address] {
|
workerHosts = len(c.WorkerHosts) + len(inactiveWorkerHosts)
|
||||||
continue
|
|
||||||
}
|
|
||||||
workerHosts++
|
|
||||||
}
|
|
||||||
// maxUnavailable should be calculated against all hosts provided in cluster.yml except the ones labelled to be ignored for upgrade
|
|
||||||
workerHosts += len(inactiveWorkerHosts)
|
|
||||||
maxUnavailableWorker, err := services.CalculateMaxUnavailable(c.UpgradeStrategy.MaxUnavailableWorker, workerHosts, services.WorkerRole)
|
maxUnavailableWorker, err := services.CalculateMaxUnavailable(c.UpgradeStrategy.MaxUnavailableWorker, workerHosts, services.WorkerRole)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return maxUnavailableWorker, maxUnavailableControl, err
|
return maxUnavailableWorker, maxUnavailableControl, err
|
||||||
}
|
}
|
||||||
for _, host := range c.ControlPlaneHosts {
|
controlHosts = len(c.ControlPlaneHosts) + len(inactiveControlPlaneHosts)
|
||||||
if c.HostsLabeledToIgnoreUpgrade[host.Address] {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
controlHosts++
|
|
||||||
}
|
|
||||||
controlHosts += len(inactiveControlPlaneHosts)
|
|
||||||
maxUnavailableControl, err = services.CalculateMaxUnavailable(c.UpgradeStrategy.MaxUnavailableControlplane, controlHosts, services.ControlRole)
|
maxUnavailableControl, err = services.CalculateMaxUnavailable(c.UpgradeStrategy.MaxUnavailableControlplane, controlHosts, services.ControlRole)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return maxUnavailableWorker, maxUnavailableControl, err
|
return maxUnavailableWorker, maxUnavailableControl, err
|
||||||
|
@ -163,7 +163,6 @@ func ClusterUp(ctx context.Context, dialersOptions hosts.DialersOptions, flags c
|
|||||||
kubeCluster.NewHosts = newNodes
|
kubeCluster.NewHosts = newNodes
|
||||||
reconcileCluster = true
|
reconcileCluster = true
|
||||||
|
|
||||||
kubeCluster.FindHostsLabeledToIgnoreUpgrade(ctx)
|
|
||||||
maxUnavailableWorker, maxUnavailableControl, err := kubeCluster.CalculateMaxUnavailable()
|
maxUnavailableWorker, maxUnavailableControl, err := kubeCluster.CalculateMaxUnavailable()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return APIURL, caCrt, clientCert, clientKey, nil, err
|
return APIURL, caCrt, clientCert, clientKey, nil, err
|
||||||
@ -247,9 +246,6 @@ func checkAllIncluded(cluster *cluster.Cluster) error {
|
|||||||
|
|
||||||
var names []string
|
var names []string
|
||||||
for _, host := range cluster.InactiveHosts {
|
for _, host := range cluster.InactiveHosts {
|
||||||
if cluster.HostsLabeledToIgnoreUpgrade[host.Address] {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
names = append(names, host.Address)
|
names = append(names, host.Address)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -17,8 +17,6 @@ const (
|
|||||||
HostnameLabel = "kubernetes.io/hostname"
|
HostnameLabel = "kubernetes.io/hostname"
|
||||||
InternalAddressAnnotation = "rke.cattle.io/internal-ip"
|
InternalAddressAnnotation = "rke.cattle.io/internal-ip"
|
||||||
ExternalAddressAnnotation = "rke.cattle.io/external-ip"
|
ExternalAddressAnnotation = "rke.cattle.io/external-ip"
|
||||||
IgnoreHostDuringUpgradeLabel = "user.cattle.io/upgrade-policy"
|
|
||||||
IgnoreLabelValue = "prevent"
|
|
||||||
AWSCloudProvider = "aws"
|
AWSCloudProvider = "aws"
|
||||||
MaxRetries = 5
|
MaxRetries = 5
|
||||||
RetryInterval = 5
|
RetryInterval = 5
|
||||||
|
@ -80,9 +80,6 @@ func getNodeListForUpgrade(kubeClient *kubernetes.Clientset, hostsFailed *sync.M
|
|||||||
if inactiveHosts[node.Labels[k8s.HostnameLabel]] {
|
if inactiveHosts[node.Labels[k8s.HostnameLabel]] {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if val, ok := node.Labels[k8s.IgnoreHostDuringUpgradeLabel]; ok && val == k8s.IgnoreLabelValue {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
nodeList = append(nodeList, node)
|
nodeList = append(nodeList, node)
|
||||||
}
|
}
|
||||||
return nodeList, nil
|
return nodeList, nil
|
||||||
|
Loading…
Reference in New Issue
Block a user