1
0
mirror of https://github.com/rancher/rke.git synced 2025-09-01 15:06:23 +00:00

Make etcd provisioning a public API

This commit is contained in:
Darren Shepherd
2018-03-31 03:53:43 -07:00
parent 149f866c47
commit d2c58fb8e6
4 changed files with 70 additions and 26 deletions

View File

@@ -62,7 +62,7 @@ const (
AWSCloudProvider = "aws"
)
func (c *Cluster) DeployControlPlane(ctx context.Context) error {
func (c *Cluster) DeployETCD(ctx context.Context) error {
// Deploy Etcd Plane
etcdProcessHostMap := c.getEtcdProcessHostMap(nil)
if len(c.Services.Etcd.ExternalURLs) > 0 {
@@ -73,6 +73,15 @@ func (c *Cluster) DeployControlPlane(ctx context.Context) error {
}
}
return nil
}
func (c *Cluster) DeployControlPlane(ctx context.Context) error {
// Deploy Etcd Plane
if err := c.DeployETCD(ctx); err != nil {
return err
}
// Deploy Control plane
processMap := map[string]v3.Process{
services.SidekickContainerName: c.BuildSidecarProcess(),

View File

@@ -39,7 +39,7 @@ func BuildRKEConfigNodePlan(ctx context.Context, myCluster *Cluster, host *hosts
portChecks = append(portChecks, BuildPortChecksFromPortList(host, WorkerPortList, ProtocolTCP)...)
// Do we need an nginxProxy for this one ?
if host.IsWorker && !host.IsControl {
if !host.IsControl {
processes[services.NginxProxyContainerName] = myCluster.BuildProxyProcess()
}
if host.IsControl {

View File

@@ -35,7 +35,7 @@ func ReconcileCluster(ctx context.Context, kubeCluster, currentCluster *Cluster,
// sync node labels to define the toDelete labels
syncLabels(ctx, currentCluster, kubeCluster)
if err := reconcileEtcd(ctx, currentCluster, kubeCluster, kubeClient); err != nil {
if err := ReconcileEtcd(ctx, currentCluster, kubeCluster, kubeClient, true); err != nil {
return fmt.Errorf("Failed to reconcile etcd plane: %v", err)
}
@@ -149,7 +149,7 @@ func reconcileHost(ctx context.Context, toDeleteHost *hosts.Host, worker, etcd b
return nil
}
func reconcileEtcd(ctx context.Context, currentCluster, kubeCluster *Cluster, kubeClient *kubernetes.Clientset) error {
func ReconcileEtcd(ctx context.Context, currentCluster, kubeCluster *Cluster, kubeClient *kubernetes.Clientset, handleCerts bool) error {
log.Infof(ctx, "[reconcile] Check etcd hosts to be deleted")
// get tls for the first current etcd host
clientCert := cert.EncodeCertPEM(currentCluster.Certificates[pki.KubeNodeCertName].Certificate)
@@ -161,10 +161,12 @@ func reconcileEtcd(ctx context.Context, currentCluster, kubeCluster *Cluster, ku
log.Warnf(ctx, "[reconcile] %v", err)
continue
}
if kubeClient != nil {
if err := hosts.DeleteNode(ctx, etcdHost, kubeClient, etcdHost.IsControl); err != nil {
log.Warnf(ctx, "Failed to delete etcd node %s from cluster", etcdHost.Address)
continue
}
}
// attempting to clean services/files on the host
if err := reconcileHost(ctx, etcdHost, false, true, currentCluster.SystemImages.Alpine, currentCluster.DockerDialerFactory, currentCluster.PrivateRegistriesMap); err != nil {
log.Warnf(ctx, "[reconcile] Couldn't clean up etcd node [%s]: %v", etcdHost.Address, err)
@@ -173,6 +175,7 @@ func reconcileEtcd(ctx context.Context, currentCluster, kubeCluster *Cluster, ku
}
log.Infof(ctx, "[reconcile] Check etcd hosts to be added")
etcdToAdd := hosts.GetToAddHosts(currentCluster.EtcdHosts, kubeCluster.EtcdHosts)
if handleCerts {
crtMap := currentCluster.Certificates
var err error
for _, etcdHost := range etcdToAdd {
@@ -191,11 +194,14 @@ func reconcileEtcd(ctx context.Context, currentCluster, kubeCluster *Cluster, ku
}
}
currentCluster.Certificates = crtMap
}
for _, etcdHost := range etcdToAdd {
if handleCerts {
// deploy certificates on new etcd host
if err := pki.DeployCertificatesOnHost(ctx, etcdHost, currentCluster.Certificates, kubeCluster.SystemImages.CertDownloader, pki.CertPathPrefix, kubeCluster.PrivateRegistriesMap); err != nil {
return err
}
}
// Check if the host already part of the cluster -- this will cover cluster with lost quorum
isEtcdMember, err := services.IsEtcdMember(ctx, etcdHost, kubeCluster.EtcdHosts, currentCluster.LocalConnDialerFactory, clientCert, clientkey)

View File

@@ -48,6 +48,35 @@ func UpCommand() cli.Command {
}
}
func EtcdUp(ctx context.Context, currentCluster, kubeCluster *cluster.Cluster, disablePortCheck bool) error {
log.Infof(ctx, "Checking ETCD")
if err := kubeCluster.TunnelHosts(ctx, false); err != nil {
return err
}
if !disablePortCheck {
if err := kubeCluster.CheckClusterPorts(ctx, currentCluster); err != nil {
return err
}
}
if currentCluster != nil {
if err := cluster.ReconcileEtcd(ctx, currentCluster, kubeCluster, nil, false); err != nil {
return err
}
}
if err := kubeCluster.DeployETCD(ctx); err != nil {
return err
}
if len(kubeCluster.InactiveHosts) > 0 {
return fmt.Errorf("failed to contact to %s", kubeCluster.InactiveHosts[0].Address)
}
return nil
}
func ClusterUp(
ctx context.Context,
rkeConfig *v3.RancherKubernetesEngineConfig,