1
0
mirror of https://github.com/rancher/rke.git synced 2025-04-28 03:31:24 +00:00

Force deploy certificates if kubeapi cert got changed

This commit is contained in:
galal-hussein 2019-04-23 23:42:10 +02:00 committed by Alena Prokharchyk
parent 765746fc77
commit 7744f18d6e
5 changed files with 27 additions and 11 deletions

View File

@ -17,6 +17,7 @@ import (
func SetUpAuthentication(ctx context.Context, kubeCluster, currentCluster *Cluster, fullState *FullState) error {
if kubeCluster.AuthnStrategies[AuthnX509Provider] {
compareKubeAPICerts(ctx, kubeCluster, currentCluster)
kubeCluster.Certificates = fullState.DesiredState.CertificatesBundle
return nil
}
@ -223,3 +224,16 @@ func GetClusterCertsFromNodes(ctx context.Context, kubeCluster *Cluster) (map[st
// reporting the last error only.
return nil, err
}
func compareKubeAPICerts(ctx context.Context, kubeCluster, currentCluster *Cluster) {
// checking if kubeapi cert got changed then we set force deploy to true
// to force deploying the kubeapi cert with new SANs
if currentCluster != nil {
currentKubeAPICert := currentCluster.Certificates[pki.KubeAPICertName]
desiredKubeAPICert := kubeCluster.Certificates[pki.KubeAPICertName]
if desiredKubeAPICert.CertificatePEM != currentKubeAPICert.CertificatePEM {
log.Infof(ctx, "[certificates] KubeAPI certificate changed, force deploying certs")
kubeCluster.ForceDeployCerts = true
}
}
}

View File

@ -42,6 +42,7 @@ type Cluster struct {
DockerDialerFactory hosts.DialerFactory
EtcdHosts []*hosts.Host
EtcdReadyHosts []*hosts.Host
ForceDeployCerts bool
InactiveHosts []*hosts.Host
K8sWrapTransport k8s.WrapTransport
KubeClient *kubernetes.Clientset
@ -170,7 +171,7 @@ func InitClusterObject(ctx context.Context, rkeConfig *v3.RancherKubernetesEngin
}
// Setting cluster Defaults
err := c.setClusterDefaults(ctx)
err := c.setClusterDefaults(ctx, flags)
if err != nil {
return nil, err
}

View File

@ -76,7 +76,7 @@ func setDefaultIfEmpty(varName *string, defaultValue string) {
}
}
func (c *Cluster) setClusterDefaults(ctx context.Context) error {
func (c *Cluster) setClusterDefaults(ctx context.Context, flags ExternalFlags) error {
if len(c.SSHKeyPath) == 0 {
c.SSHKeyPath = DefaultClusterSSHKeyPath
}
@ -155,6 +155,11 @@ func (c *Cluster) setClusterDefaults(ctx context.Context) error {
c.DNS.Provider = DefaultDNSProvider
}
if c.RancherKubernetesEngineConfig.RotateCertificates != nil ||
flags.CustomCerts {
c.ForceDeployCerts = true
}
c.setClusterServicesDefaults()
c.setClusterNetworkDefaults()
c.setClusterAuthnDefaults()

View File

@ -121,10 +121,6 @@ func (c *Cluster) InvertIndexHosts() error {
func (c *Cluster) SetUpHosts(ctx context.Context, flags ExternalFlags) error {
if c.AuthnStrategies[AuthnX509Provider] {
log.Infof(ctx, "[certificates] Deploying kubernetes certificates to Cluster nodes")
forceDeploy := false
if flags.CustomCerts || c.RancherKubernetesEngineConfig.RotateCertificates != nil {
forceDeploy = true
}
hostList := hosts.GetUniqueHostList(c.EtcdHosts, c.ControlPlaneHosts, c.WorkerHosts)
var errgrp errgroup.Group
hostsQueue := util.GetObjectQueue(hostList)
@ -132,7 +128,7 @@ func (c *Cluster) SetUpHosts(ctx context.Context, flags ExternalFlags) error {
errgrp.Go(func() error {
var errList []error
for host := range hostsQueue {
err := pki.DeployCertificatesOnPlaneHost(ctx, host.(*hosts.Host), c.RancherKubernetesEngineConfig, c.Certificates, c.SystemImages.CertDownloader, c.PrivateRegistriesMap, forceDeploy)
err := pki.DeployCertificatesOnPlaneHost(ctx, host.(*hosts.Host), c.RancherKubernetesEngineConfig, c.Certificates, c.SystemImages.CertDownloader, c.PrivateRegistriesMap, c.ForceDeployCerts)
if err != nil {
errList = append(errList, err)
}

View File

@ -51,7 +51,7 @@ func ReconcileCluster(ctx context.Context, kubeCluster, currentCluster *Cluster,
if err := reconcileControl(ctx, currentCluster, kubeCluster, kubeClient); err != nil {
return err
}
if flags.CustomCerts {
if kubeCluster.ForceDeployCerts {
if err := restartComponentsWhenCertChanges(ctx, currentCluster, kubeCluster); err != nil {
return err
}
@ -67,7 +67,7 @@ func reconcileWorker(ctx context.Context, currentCluster, kubeCluster *Cluster,
wpToDelete := hosts.GetToDeleteHosts(currentCluster.WorkerHosts, kubeCluster.WorkerHosts, kubeCluster.InactiveHosts, false)
for _, toDeleteHost := range wpToDelete {
toDeleteHost.IsWorker = false
if err := hosts.DeleteNode(ctx, toDeleteHost, kubeClient, toDeleteHost.IsControl, kubeCluster.CloudProvider.Name); err != nil {
if err := hosts.DeleteNode(ctx, toDeleteHost, kubeClient, toDeleteHost.IsControl || toDeleteHost.IsEtcd, kubeCluster.CloudProvider.Name); err != nil {
return fmt.Errorf("Failed to delete worker node [%s] from cluster: %v", toDeleteHost.Address, err)
}
// attempting to clean services/files on the host
@ -181,7 +181,7 @@ func reconcileEtcd(ctx context.Context, currentCluster, kubeCluster *Cluster, ku
log.Warnf(ctx, "[reconcile] %v", err)
continue
}
if err := hosts.DeleteNode(ctx, etcdHost, kubeClient, etcdHost.IsControl, kubeCluster.CloudProvider.Name); err != nil {
if err := hosts.DeleteNode(ctx, etcdHost, kubeClient, etcdHost.IsControl || etcdHost.IsWorker, kubeCluster.CloudProvider.Name); err != nil {
log.Warnf(ctx, "Failed to delete etcd node [%s] from cluster: %v", etcdHost.Address, err)
continue
}
@ -259,7 +259,7 @@ func cleanControlNode(ctx context.Context, kubeCluster, currentCluster *Cluster,
// if I am deleting a node that's already in the config, it's probably being replaced and I shouldn't remove it from ks8
if !hosts.IsNodeInList(toDeleteHost, kubeCluster.ControlPlaneHosts) {
if err := hosts.DeleteNode(ctx, toDeleteHost, kubeClient, toDeleteHost.IsWorker, kubeCluster.CloudProvider.Name); err != nil {
if err := hosts.DeleteNode(ctx, toDeleteHost, kubeClient, toDeleteHost.IsWorker || toDeleteHost.IsEtcd, kubeCluster.CloudProvider.Name); err != nil {
return fmt.Errorf("Failed to delete controlplane node [%s] from cluster: %v", toDeleteHost.Address, err)
}
}