mirror of
https://github.com/rancher/rke.git
synced 2025-09-01 23:16:22 +00:00
Make etcd provisioning a public API
This commit is contained in:
@@ -62,7 +62,7 @@ const (
|
||||
AWSCloudProvider = "aws"
|
||||
)
|
||||
|
||||
func (c *Cluster) DeployControlPlane(ctx context.Context) error {
|
||||
func (c *Cluster) DeployETCD(ctx context.Context) error {
|
||||
// Deploy Etcd Plane
|
||||
etcdProcessHostMap := c.getEtcdProcessHostMap(nil)
|
||||
if len(c.Services.Etcd.ExternalURLs) > 0 {
|
||||
@@ -73,6 +73,15 @@ func (c *Cluster) DeployControlPlane(ctx context.Context) error {
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Cluster) DeployControlPlane(ctx context.Context) error {
|
||||
// Deploy Etcd Plane
|
||||
if err := c.DeployETCD(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Deploy Control plane
|
||||
processMap := map[string]v3.Process{
|
||||
services.SidekickContainerName: c.BuildSidecarProcess(),
|
||||
|
@@ -39,7 +39,7 @@ func BuildRKEConfigNodePlan(ctx context.Context, myCluster *Cluster, host *hosts
|
||||
|
||||
portChecks = append(portChecks, BuildPortChecksFromPortList(host, WorkerPortList, ProtocolTCP)...)
|
||||
// Do we need an nginxProxy for this one ?
|
||||
if host.IsWorker && !host.IsControl {
|
||||
if !host.IsControl {
|
||||
processes[services.NginxProxyContainerName] = myCluster.BuildProxyProcess()
|
||||
}
|
||||
if host.IsControl {
|
||||
|
@@ -35,7 +35,7 @@ func ReconcileCluster(ctx context.Context, kubeCluster, currentCluster *Cluster,
|
||||
// sync node labels to define the toDelete labels
|
||||
syncLabels(ctx, currentCluster, kubeCluster)
|
||||
|
||||
if err := reconcileEtcd(ctx, currentCluster, kubeCluster, kubeClient); err != nil {
|
||||
if err := ReconcileEtcd(ctx, currentCluster, kubeCluster, kubeClient, true); err != nil {
|
||||
return fmt.Errorf("Failed to reconcile etcd plane: %v", err)
|
||||
}
|
||||
|
||||
@@ -149,7 +149,7 @@ func reconcileHost(ctx context.Context, toDeleteHost *hosts.Host, worker, etcd b
|
||||
return nil
|
||||
}
|
||||
|
||||
func reconcileEtcd(ctx context.Context, currentCluster, kubeCluster *Cluster, kubeClient *kubernetes.Clientset) error {
|
||||
func ReconcileEtcd(ctx context.Context, currentCluster, kubeCluster *Cluster, kubeClient *kubernetes.Clientset, handleCerts bool) error {
|
||||
log.Infof(ctx, "[reconcile] Check etcd hosts to be deleted")
|
||||
// get tls for the first current etcd host
|
||||
clientCert := cert.EncodeCertPEM(currentCluster.Certificates[pki.KubeNodeCertName].Certificate)
|
||||
@@ -161,9 +161,11 @@ func reconcileEtcd(ctx context.Context, currentCluster, kubeCluster *Cluster, ku
|
||||
log.Warnf(ctx, "[reconcile] %v", err)
|
||||
continue
|
||||
}
|
||||
if err := hosts.DeleteNode(ctx, etcdHost, kubeClient, etcdHost.IsControl); err != nil {
|
||||
log.Warnf(ctx, "Failed to delete etcd node %s from cluster", etcdHost.Address)
|
||||
continue
|
||||
if kubeClient != nil {
|
||||
if err := hosts.DeleteNode(ctx, etcdHost, kubeClient, etcdHost.IsControl); err != nil {
|
||||
log.Warnf(ctx, "Failed to delete etcd node %s from cluster", etcdHost.Address)
|
||||
continue
|
||||
}
|
||||
}
|
||||
// attempting to clean services/files on the host
|
||||
if err := reconcileHost(ctx, etcdHost, false, true, currentCluster.SystemImages.Alpine, currentCluster.DockerDialerFactory, currentCluster.PrivateRegistriesMap); err != nil {
|
||||
@@ -173,28 +175,32 @@ func reconcileEtcd(ctx context.Context, currentCluster, kubeCluster *Cluster, ku
|
||||
}
|
||||
log.Infof(ctx, "[reconcile] Check etcd hosts to be added")
|
||||
etcdToAdd := hosts.GetToAddHosts(currentCluster.EtcdHosts, kubeCluster.EtcdHosts)
|
||||
crtMap := currentCluster.Certificates
|
||||
var err error
|
||||
for _, etcdHost := range etcdToAdd {
|
||||
kubeCluster.UpdateWorkersOnly = false
|
||||
etcdHost.ToAddEtcdMember = true
|
||||
// Generate new certificate for the new etcd member
|
||||
crtMap, err = pki.RegenerateEtcdCertificate(
|
||||
ctx,
|
||||
crtMap,
|
||||
etcdHost,
|
||||
kubeCluster.EtcdHosts,
|
||||
kubeCluster.ClusterDomain,
|
||||
kubeCluster.KubernetesServiceIP)
|
||||
if err != nil {
|
||||
return err
|
||||
if handleCerts {
|
||||
crtMap := currentCluster.Certificates
|
||||
var err error
|
||||
for _, etcdHost := range etcdToAdd {
|
||||
kubeCluster.UpdateWorkersOnly = false
|
||||
etcdHost.ToAddEtcdMember = true
|
||||
// Generate new certificate for the new etcd member
|
||||
crtMap, err = pki.RegenerateEtcdCertificate(
|
||||
ctx,
|
||||
crtMap,
|
||||
etcdHost,
|
||||
kubeCluster.EtcdHosts,
|
||||
kubeCluster.ClusterDomain,
|
||||
kubeCluster.KubernetesServiceIP)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
currentCluster.Certificates = crtMap
|
||||
}
|
||||
currentCluster.Certificates = crtMap
|
||||
for _, etcdHost := range etcdToAdd {
|
||||
// deploy certificates on new etcd host
|
||||
if err := pki.DeployCertificatesOnHost(ctx, etcdHost, currentCluster.Certificates, kubeCluster.SystemImages.CertDownloader, pki.CertPathPrefix, kubeCluster.PrivateRegistriesMap); err != nil {
|
||||
return err
|
||||
if handleCerts {
|
||||
// deploy certificates on new etcd host
|
||||
if err := pki.DeployCertificatesOnHost(ctx, etcdHost, currentCluster.Certificates, kubeCluster.SystemImages.CertDownloader, pki.CertPathPrefix, kubeCluster.PrivateRegistriesMap); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Check if the host already part of the cluster -- this will cover cluster with lost quorum
|
||||
|
29
cmd/up.go
29
cmd/up.go
@@ -48,6 +48,35 @@ func UpCommand() cli.Command {
|
||||
}
|
||||
}
|
||||
|
||||
func EtcdUp(ctx context.Context, currentCluster, kubeCluster *cluster.Cluster, disablePortCheck bool) error {
|
||||
log.Infof(ctx, "Checking ETCD")
|
||||
if err := kubeCluster.TunnelHosts(ctx, false); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !disablePortCheck {
|
||||
if err := kubeCluster.CheckClusterPorts(ctx, currentCluster); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if currentCluster != nil {
|
||||
if err := cluster.ReconcileEtcd(ctx, currentCluster, kubeCluster, nil, false); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if err := kubeCluster.DeployETCD(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(kubeCluster.InactiveHosts) > 0 {
|
||||
return fmt.Errorf("failed to contact to %s", kubeCluster.InactiveHosts[0].Address)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func ClusterUp(
|
||||
ctx context.Context,
|
||||
rkeConfig *v3.RancherKubernetesEngineConfig,
|
||||
|
Reference in New Issue
Block a user