1
0
mirror of https://github.com/rancher/rke.git synced 2025-08-12 04:03:01 +00:00

consider service options based on hostOS info

This commit is contained in:
kinarashah 2019-09-06 15:53:14 -07:00 committed by Alena Prokharchyk
parent d6b29c9ccc
commit 9411027476
5 changed files with 65 additions and 73 deletions

View File

@ -89,12 +89,12 @@ const (
SystemNamespace = "kube-system" SystemNamespace = "kube-system"
) )
func (c *Cluster) DeployControlPlane(ctx context.Context, svcOptions *v3.KubernetesServicesOptions) error { func (c *Cluster) DeployControlPlane(ctx context.Context, svcOptionData map[string]*v3.KubernetesServicesOptions) error {
// Deploy Etcd Plane // Deploy Etcd Plane
etcdNodePlanMap := make(map[string]v3.RKEConfigNodePlan) etcdNodePlanMap := make(map[string]v3.RKEConfigNodePlan)
// Build etcd node plan map // Build etcd node plan map
for _, etcdHost := range c.EtcdHosts { for _, etcdHost := range c.EtcdHosts {
etcdNodePlanMap[etcdHost.Address] = BuildRKEConfigNodePlan(ctx, c, etcdHost, etcdHost.DockerInfo, svcOptions) etcdNodePlanMap[etcdHost.Address] = BuildRKEConfigNodePlan(ctx, c, etcdHost, etcdHost.DockerInfo, svcOptionData)
} }
if len(c.Services.Etcd.ExternalURLs) > 0 { if len(c.Services.Etcd.ExternalURLs) > 0 {
@ -109,7 +109,7 @@ func (c *Cluster) DeployControlPlane(ctx context.Context, svcOptions *v3.Kuberne
cpNodePlanMap := make(map[string]v3.RKEConfigNodePlan) cpNodePlanMap := make(map[string]v3.RKEConfigNodePlan)
// Build cp node plan map // Build cp node plan map
for _, cpHost := range c.ControlPlaneHosts { for _, cpHost := range c.ControlPlaneHosts {
cpNodePlanMap[cpHost.Address] = BuildRKEConfigNodePlan(ctx, c, cpHost, cpHost.DockerInfo, svcOptions) cpNodePlanMap[cpHost.Address] = BuildRKEConfigNodePlan(ctx, c, cpHost, cpHost.DockerInfo, svcOptionData)
} }
if err := services.RunControlPlane(ctx, c.ControlPlaneHosts, if err := services.RunControlPlane(ctx, c.ControlPlaneHosts,
c.LocalConnDialerFactory, c.LocalConnDialerFactory,
@ -124,17 +124,13 @@ func (c *Cluster) DeployControlPlane(ctx context.Context, svcOptions *v3.Kuberne
return nil return nil
} }
func (c *Cluster) DeployWorkerPlane(ctx context.Context, linuxSvcOptions *v3.KubernetesServicesOptions, windowsSvcOptions *v3.KubernetesServicesOptions) error { func (c *Cluster) DeployWorkerPlane(ctx context.Context, svcOptionData map[string]*v3.KubernetesServicesOptions) error {
// Deploy Worker plane // Deploy Worker plane
workerNodePlanMap := make(map[string]v3.RKEConfigNodePlan) workerNodePlanMap := make(map[string]v3.RKEConfigNodePlan)
// Build cp node plan map // Build cp node plan map
allHosts := hosts.GetUniqueHostList(c.EtcdHosts, c.ControlPlaneHosts, c.WorkerHosts) allHosts := hosts.GetUniqueHostList(c.EtcdHosts, c.ControlPlaneHosts, c.WorkerHosts)
for _, workerHost := range allHosts { for _, workerHost := range allHosts {
svcOptions := linuxSvcOptions workerNodePlanMap[workerHost.Address] = BuildRKEConfigNodePlan(ctx, c, workerHost, workerHost.DockerInfo, svcOptionData)
if workerHost.DockerInfo.OSType == "windows" { // compatible with Windows
svcOptions = windowsSvcOptions
}
workerNodePlanMap[workerHost.Address] = BuildRKEConfigNodePlan(ctx, c, workerHost, workerHost.DockerInfo, svcOptions)
} }
if err := services.RunWorkerPlane(ctx, allHosts, if err := services.RunWorkerPlane(ctx, allHosts,
c.LocalConnDialerFactory, c.LocalConnDialerFactory,

View File

@ -19,7 +19,7 @@ import (
"github.com/rancher/rke/pki" "github.com/rancher/rke/pki"
"github.com/rancher/rke/services" "github.com/rancher/rke/services"
"github.com/rancher/rke/util" "github.com/rancher/rke/util"
v3 "github.com/rancher/types/apis/management.cattle.io/v3" "github.com/rancher/types/apis/management.cattle.io/v3"
"github.com/sirupsen/logrus" "github.com/sirupsen/logrus"
) )
@ -54,7 +54,20 @@ const (
var admissionControlOptionNames = []string{"enable-admission-plugins", "admission-control"} var admissionControlOptionNames = []string{"enable-admission-plugins", "admission-control"}
func GeneratePlan(ctx context.Context, rkeConfig *v3.RancherKubernetesEngineConfig, hostsInfoMap map[string]types.Info) (v3.RKEPlan, error) { func GetServiceOptionData(data map[string]interface{}) map[string]*v3.KubernetesServicesOptions {
svcOptionsData := map[string]*v3.KubernetesServicesOptions{}
k8sServiceOptions, _ := data["k8s-service-options"].(*v3.KubernetesServicesOptions)
if k8sServiceOptions != nil {
svcOptionsData["k8s-service-options"] = k8sServiceOptions
}
k8sWServiceOptions, _ := data["k8s-windows-service-options"].(*v3.KubernetesServicesOptions)
if k8sWServiceOptions != nil {
svcOptionsData["k8s-windows-service-options"] = k8sWServiceOptions
}
return svcOptionsData
}
func GeneratePlan(ctx context.Context, rkeConfig *v3.RancherKubernetesEngineConfig, hostsInfoMap map[string]types.Info, data map[string]interface{}) (v3.RKEPlan, error) {
clusterPlan := v3.RKEPlan{} clusterPlan := v3.RKEPlan{}
myCluster, err := InitClusterObject(ctx, rkeConfig, ExternalFlags{}) myCluster, err := InitClusterObject(ctx, rkeConfig, ExternalFlags{})
if err != nil { if err != nil {
@ -62,21 +75,22 @@ func GeneratePlan(ctx context.Context, rkeConfig *v3.RancherKubernetesEngineConf
} }
// rkeConfig.Nodes are already unique. But they don't have role flags. So I will use the parsed cluster.Hosts to make use of the role flags. // rkeConfig.Nodes are already unique. But they don't have role flags. So I will use the parsed cluster.Hosts to make use of the role flags.
uniqHosts := hosts.GetUniqueHostList(myCluster.EtcdHosts, myCluster.ControlPlaneHosts, myCluster.WorkerHosts) uniqHosts := hosts.GetUniqueHostList(myCluster.EtcdHosts, myCluster.ControlPlaneHosts, myCluster.WorkerHosts)
svcOptionData := GetServiceOptionData(data)
for _, host := range uniqHosts { for _, host := range uniqHosts {
host.DockerInfo = hostsInfoMap[host.Address] host.DockerInfo = hostsInfoMap[host.Address]
clusterPlan.Nodes = append(clusterPlan.Nodes, BuildRKEConfigNodePlan(ctx, myCluster, host, hostsInfoMap[host.Address], nil)) clusterPlan.Nodes = append(clusterPlan.Nodes, BuildRKEConfigNodePlan(ctx, myCluster, host, hostsInfoMap[host.Address], svcOptionData))
} }
return clusterPlan, nil return clusterPlan, nil
} }
func BuildRKEConfigNodePlan(ctx context.Context, myCluster *Cluster, host *hosts.Host, hostDockerInfo types.Info, svcOptions *v3.KubernetesServicesOptions) v3.RKEConfigNodePlan { func BuildRKEConfigNodePlan(ctx context.Context, myCluster *Cluster, host *hosts.Host, hostDockerInfo types.Info, svcOptionData map[string]*v3.KubernetesServicesOptions) v3.RKEConfigNodePlan {
prefixPath := hosts.GetPrefixPath(hostDockerInfo.OperatingSystem, myCluster.PrefixPath) prefixPath := hosts.GetPrefixPath(hostDockerInfo.OperatingSystem, myCluster.PrefixPath)
processes := map[string]v3.Process{} processes := map[string]v3.Process{}
portChecks := []v3.PortCheck{} portChecks := []v3.PortCheck{}
// Everybody gets a sidecar and a kubelet.. // Everybody gets a sidecar and a kubelet..
processes[services.SidekickContainerName] = myCluster.BuildSidecarProcess(host, prefixPath) processes[services.SidekickContainerName] = myCluster.BuildSidecarProcess(host, prefixPath)
processes[services.KubeletContainerName] = myCluster.BuildKubeletProcess(host, prefixPath, svcOptions) processes[services.KubeletContainerName] = myCluster.BuildKubeletProcess(host, prefixPath, svcOptionData)
processes[services.KubeproxyContainerName] = myCluster.BuildKubeProxyProcess(host, prefixPath, svcOptions) processes[services.KubeproxyContainerName] = myCluster.BuildKubeProxyProcess(host, prefixPath, svcOptionData)
portChecks = append(portChecks, BuildPortChecksFromPortList(host, WorkerPortList, ProtocolTCP)...) portChecks = append(portChecks, BuildPortChecksFromPortList(host, WorkerPortList, ProtocolTCP)...)
// Do we need an nginxProxy for this one ? // Do we need an nginxProxy for this one ?
@ -84,9 +98,9 @@ func BuildRKEConfigNodePlan(ctx context.Context, myCluster *Cluster, host *hosts
processes[services.NginxProxyContainerName] = myCluster.BuildProxyProcess(host, prefixPath) processes[services.NginxProxyContainerName] = myCluster.BuildProxyProcess(host, prefixPath)
} }
if host.IsControl { if host.IsControl {
processes[services.KubeAPIContainerName] = myCluster.BuildKubeAPIProcess(host, prefixPath, svcOptions) processes[services.KubeAPIContainerName] = myCluster.BuildKubeAPIProcess(host, prefixPath, svcOptionData)
processes[services.KubeControllerContainerName] = myCluster.BuildKubeControllerProcess(host, prefixPath, svcOptions) processes[services.KubeControllerContainerName] = myCluster.BuildKubeControllerProcess(host, prefixPath, svcOptionData)
processes[services.SchedulerContainerName] = myCluster.BuildSchedulerProcess(host, prefixPath, svcOptions) processes[services.SchedulerContainerName] = myCluster.BuildSchedulerProcess(host, prefixPath, svcOptionData)
portChecks = append(portChecks, BuildPortChecksFromPortList(host, ControlPlanePortList, ProtocolTCP)...) portChecks = append(portChecks, BuildPortChecksFromPortList(host, ControlPlanePortList, ProtocolTCP)...)
} }
@ -143,7 +157,7 @@ func osLimitationFilter(osType string, processes map[string]v3.Process) map[stri
return processes return processes
} }
func (c *Cluster) BuildKubeAPIProcess(host *hosts.Host, prefixPath string, svcOptions *v3.KubernetesServicesOptions) v3.Process { func (c *Cluster) BuildKubeAPIProcess(host *hosts.Host, prefixPath string, svcOptionData map[string]*v3.KubernetesServicesOptions) v3.Process {
// check if external etcd is used // check if external etcd is used
etcdConnectionString := services.GetEtcdConnString(c.EtcdHosts, host.InternalAddress) etcdConnectionString := services.GetEtcdConnString(c.EtcdHosts, host.InternalAddress)
etcdPathPrefix := EtcdPathPrefix etcdPathPrefix := EtcdPathPrefix
@ -196,13 +210,7 @@ func (c *Cluster) BuildKubeAPIProcess(host *hosts.Host, prefixPath string, svcOp
c.Services.KubeAPI.ExtraEnv, c.Services.KubeAPI.ExtraEnv,
fmt.Sprintf("%s=%s", CloudConfigSumEnv, getCloudConfigChecksum(c.CloudConfigFile))) fmt.Sprintf("%s=%s", CloudConfigSumEnv, getCloudConfigChecksum(c.CloudConfigFile)))
} }
var serviceOptions v3.KubernetesServicesOptions serviceOptions := c.GetKubernetesServicesOptions(host.DockerInfo.OSType, svcOptionData)
if svcOptions == nil {
// check if our version has specific options for this component
serviceOptions = c.GetKubernetesServicesOptions(host.DockerInfo.OSType)
} else {
serviceOptions = *svcOptions
}
if serviceOptions.KubeAPI != nil { if serviceOptions.KubeAPI != nil {
for k, v := range serviceOptions.KubeAPI { for k, v := range serviceOptions.KubeAPI {
// if the value is empty, we remove that option // if the value is empty, we remove that option
@ -283,7 +291,7 @@ func (c *Cluster) BuildKubeAPIProcess(host *hosts.Host, prefixPath string, svcOp
} }
} }
func (c *Cluster) BuildKubeControllerProcess(host *hosts.Host, prefixPath string, svcOptions *v3.KubernetesServicesOptions) v3.Process { func (c *Cluster) BuildKubeControllerProcess(host *hosts.Host, prefixPath string, svcOptionData map[string]*v3.KubernetesServicesOptions) v3.Process {
Command := []string{ Command := []string{
c.getRKEToolsEntryPoint(), c.getRKEToolsEntryPoint(),
"kube-controller-manager", "kube-controller-manager",
@ -309,13 +317,7 @@ func (c *Cluster) BuildKubeControllerProcess(host *hosts.Host, prefixPath string
c.Services.KubeController.ExtraEnv, c.Services.KubeController.ExtraEnv,
fmt.Sprintf("%s=%s", CloudConfigSumEnv, getCloudConfigChecksum(c.CloudConfigFile))) fmt.Sprintf("%s=%s", CloudConfigSumEnv, getCloudConfigChecksum(c.CloudConfigFile)))
} }
var serviceOptions v3.KubernetesServicesOptions serviceOptions := c.GetKubernetesServicesOptions(host.DockerInfo.OSType, svcOptionData)
if svcOptions == nil {
// check if our version has specific options for this component
serviceOptions = c.GetKubernetesServicesOptions(host.DockerInfo.OSType)
} else {
serviceOptions = *svcOptions
}
if serviceOptions.KubeController != nil { if serviceOptions.KubeController != nil {
for k, v := range serviceOptions.KubeController { for k, v := range serviceOptions.KubeController {
// if the value is empty, we remove that option // if the value is empty, we remove that option
@ -374,7 +376,7 @@ func (c *Cluster) BuildKubeControllerProcess(host *hosts.Host, prefixPath string
} }
} }
func (c *Cluster) BuildKubeletProcess(host *hosts.Host, prefixPath string, svcOptions *v3.KubernetesServicesOptions) v3.Process { func (c *Cluster) BuildKubeletProcess(host *hosts.Host, prefixPath string, svcOptionData map[string]*v3.KubernetesServicesOptions) v3.Process {
Command := []string{ Command := []string{
c.getRKEToolsEntryPoint(), c.getRKEToolsEntryPoint(),
"kubelet", "kubelet",
@ -436,13 +438,7 @@ func (c *Cluster) BuildKubeletProcess(host *hosts.Host, prefixPath string, svcOp
c.Services.Kubelet.ExtraEnv, c.Services.Kubelet.ExtraEnv,
fmt.Sprintf("%s=%s", KubeletDockerConfigFileEnv, path.Join(prefixPath, KubeletDockerConfigPath))) fmt.Sprintf("%s=%s", KubeletDockerConfigFileEnv, path.Join(prefixPath, KubeletDockerConfigPath)))
} }
var serviceOptions v3.KubernetesServicesOptions serviceOptions := c.GetKubernetesServicesOptions(host.DockerInfo.OSType, svcOptionData)
if svcOptions == nil {
// check if our version has specific options for this component
serviceOptions = c.GetKubernetesServicesOptions(host.DockerInfo.OSType)
} else {
serviceOptions = *svcOptions
}
if serviceOptions.Kubelet != nil { if serviceOptions.Kubelet != nil {
for k, v := range serviceOptions.Kubelet { for k, v := range serviceOptions.Kubelet {
// if the value is empty, we remove that option // if the value is empty, we remove that option
@ -557,7 +553,7 @@ func (c *Cluster) BuildKubeletProcess(host *hosts.Host, prefixPath string, svcOp
} }
} }
func (c *Cluster) BuildKubeProxyProcess(host *hosts.Host, prefixPath string, svcOptions *v3.KubernetesServicesOptions) v3.Process { func (c *Cluster) BuildKubeProxyProcess(host *hosts.Host, prefixPath string, svcOptionData map[string]*v3.KubernetesServicesOptions) v3.Process {
Command := []string{ Command := []string{
c.getRKEToolsEntryPoint(), c.getRKEToolsEntryPoint(),
"kube-proxy", "kube-proxy",
@ -578,13 +574,7 @@ func (c *Cluster) BuildKubeProxyProcess(host *hosts.Host, prefixPath string, svc
CommandArgs["kubeconfig"] = path.Join(prefixPath, pki.GetConfigPath(pki.KubeProxyCertName)) CommandArgs["kubeconfig"] = path.Join(prefixPath, pki.GetConfigPath(pki.KubeProxyCertName))
} }
var serviceOptions v3.KubernetesServicesOptions serviceOptions := c.GetKubernetesServicesOptions(host.DockerInfo.OSType, svcOptionData)
if svcOptions == nil {
// check if our version has specific options for this component
serviceOptions = c.GetKubernetesServicesOptions(host.DockerInfo.OSType)
} else {
serviceOptions = *svcOptions
}
if serviceOptions.Kubeproxy != nil { if serviceOptions.Kubeproxy != nil {
for k, v := range serviceOptions.Kubeproxy { for k, v := range serviceOptions.Kubeproxy {
// if the value is empty, we remove that option // if the value is empty, we remove that option
@ -722,7 +712,7 @@ func (c *Cluster) BuildProxyProcess(host *hosts.Host, prefixPath string) v3.Proc
} }
} }
func (c *Cluster) BuildSchedulerProcess(host *hosts.Host, prefixPath string, svcOptions *v3.KubernetesServicesOptions) v3.Process { func (c *Cluster) BuildSchedulerProcess(host *hosts.Host, prefixPath string, svcOptionData map[string]*v3.KubernetesServicesOptions) v3.Process {
Command := []string{ Command := []string{
c.getRKEToolsEntryPoint(), c.getRKEToolsEntryPoint(),
"kube-scheduler", "kube-scheduler",
@ -736,13 +726,7 @@ func (c *Cluster) BuildSchedulerProcess(host *hosts.Host, prefixPath string, svc
if c.DinD { if c.DinD {
CommandArgs["address"] = "0.0.0.0" CommandArgs["address"] = "0.0.0.0"
} }
var serviceOptions v3.KubernetesServicesOptions serviceOptions := c.GetKubernetesServicesOptions(host.DockerInfo.OSType, svcOptionData)
if svcOptions == nil {
// check if our version has specific options for this component
serviceOptions = c.GetKubernetesServicesOptions(host.DockerInfo.OSType)
} else {
serviceOptions = *svcOptions
}
if serviceOptions.Scheduler != nil { if serviceOptions.Scheduler != nil {
for k, v := range serviceOptions.Scheduler { for k, v := range serviceOptions.Scheduler {
// if the value is empty, we remove that option // if the value is empty, we remove that option
@ -1007,7 +991,20 @@ func BuildPortChecksFromPortList(host *hosts.Host, portList []string, proto stri
return portChecks return portChecks
} }
func (c *Cluster) GetKubernetesServicesOptions(osType string) v3.KubernetesServicesOptions { func (c *Cluster) GetKubernetesServicesOptions(osType string, data map[string]*v3.KubernetesServicesOptions) v3.KubernetesServicesOptions {
if osType == "windows" {
if svcOption, ok := data["k8s-windows-service-options"]; ok {
return *svcOption
}
} else {
if svcOption, ok := data["k8s-service-options"]; ok {
return *svcOption
}
}
return c.getDefaultKubernetesServicesOptions(osType)
}
func (c *Cluster) getDefaultKubernetesServicesOptions(osType string) v3.KubernetesServicesOptions {
var serviceOptionsTemplate map[string]v3.KubernetesServicesOptions var serviceOptionsTemplate map[string]v3.KubernetesServicesOptions
switch osType { switch osType {
case "windows": case "windows":

View File

@ -27,7 +27,7 @@ const (
EtcdPlaneNodesReplacedErr = "Etcd plane nodes are replaced. Stopping provisioning. Please restore your cluster from backup." EtcdPlaneNodesReplacedErr = "Etcd plane nodes are replaced. Stopping provisioning. Please restore your cluster from backup."
) )
func ReconcileCluster(ctx context.Context, kubeCluster, currentCluster *Cluster, flags ExternalFlags, svcOptions *v3.KubernetesServicesOptions) error { func ReconcileCluster(ctx context.Context, kubeCluster, currentCluster *Cluster, flags ExternalFlags, svcOptionData map[string]*v3.KubernetesServicesOptions) error {
logrus.Debugf("[reconcile] currentCluster: %+v\n", currentCluster) logrus.Debugf("[reconcile] currentCluster: %+v\n", currentCluster)
log.Infof(ctx, "[reconcile] Reconciling cluster state") log.Infof(ctx, "[reconcile] Reconciling cluster state")
kubeCluster.UpdateWorkersOnly = flags.UpdateOnly kubeCluster.UpdateWorkersOnly = flags.UpdateOnly
@ -49,7 +49,7 @@ func ReconcileCluster(ctx context.Context, kubeCluster, currentCluster *Cluster,
syncLabels(ctx, currentCluster, kubeCluster) syncLabels(ctx, currentCluster, kubeCluster)
syncNodeRoles(ctx, currentCluster, kubeCluster) syncNodeRoles(ctx, currentCluster, kubeCluster)
if err := reconcileEtcd(ctx, currentCluster, kubeCluster, kubeClient, svcOptions); err != nil { if err := reconcileEtcd(ctx, currentCluster, kubeCluster, kubeClient, svcOptionData); err != nil {
return fmt.Errorf("Failed to reconcile etcd plane: %v", err) return fmt.Errorf("Failed to reconcile etcd plane: %v", err)
} }
@ -175,7 +175,7 @@ func reconcileHost(ctx context.Context, toDeleteHost *hosts.Host, worker, etcd b
return nil return nil
} }
func reconcileEtcd(ctx context.Context, currentCluster, kubeCluster *Cluster, kubeClient *kubernetes.Clientset, svcOptions *v3.KubernetesServicesOptions) error { func reconcileEtcd(ctx context.Context, currentCluster, kubeCluster *Cluster, kubeClient *kubernetes.Clientset, svcOptionData map[string]*v3.KubernetesServicesOptions) error {
etcdToDelete := hosts.GetToDeleteHosts(currentCluster.EtcdHosts, kubeCluster.EtcdHosts, kubeCluster.InactiveHosts, false) etcdToDelete := hosts.GetToDeleteHosts(currentCluster.EtcdHosts, kubeCluster.EtcdHosts, kubeCluster.InactiveHosts, false)
etcdToAdd := hosts.GetToAddHosts(currentCluster.EtcdHosts, kubeCluster.EtcdHosts) etcdToAdd := hosts.GetToAddHosts(currentCluster.EtcdHosts, kubeCluster.EtcdHosts)
clientCert := cert.EncodeCertPEM(currentCluster.Certificates[pki.KubeNodeCertName].Certificate) clientCert := cert.EncodeCertPEM(currentCluster.Certificates[pki.KubeNodeCertName].Certificate)
@ -199,10 +199,10 @@ func reconcileEtcd(ctx context.Context, currentCluster, kubeCluster *Cluster, ku
return err return err
} }
// handle etcd member add // handle etcd member add
return addEtcdMembers(ctx, currentCluster, kubeCluster, kubeClient, svcOptions, clientCert, clientKey, etcdToAdd) return addEtcdMembers(ctx, currentCluster, kubeCluster, kubeClient, svcOptionData, clientCert, clientKey, etcdToAdd)
} }
func addEtcdMembers(ctx context.Context, currentCluster, kubeCluster *Cluster, kubeClient *kubernetes.Clientset, svcOptions *v3.KubernetesServicesOptions, clientCert, clientKey []byte, etcdToAdd []*hosts.Host) error { func addEtcdMembers(ctx context.Context, currentCluster, kubeCluster *Cluster, kubeClient *kubernetes.Clientset, svcOptionData map[string]*v3.KubernetesServicesOptions, clientCert, clientKey []byte, etcdToAdd []*hosts.Host) error {
log.Infof(ctx, "[reconcile] Check etcd hosts to be added") log.Infof(ctx, "[reconcile] Check etcd hosts to be added")
for _, etcdHost := range etcdToAdd { for _, etcdHost := range etcdToAdd {
kubeCluster.UpdateWorkersOnly = false kubeCluster.UpdateWorkersOnly = false
@ -224,7 +224,7 @@ func addEtcdMembers(ctx context.Context, currentCluster, kubeCluster *Cluster, k
etcdNodePlanMap := make(map[string]v3.RKEConfigNodePlan) etcdNodePlanMap := make(map[string]v3.RKEConfigNodePlan)
for _, etcdReadyHost := range kubeCluster.EtcdReadyHosts { for _, etcdReadyHost := range kubeCluster.EtcdReadyHosts {
etcdNodePlanMap[etcdReadyHost.Address] = BuildRKEConfigNodePlan(ctx, kubeCluster, etcdReadyHost, etcdReadyHost.DockerInfo, svcOptions) etcdNodePlanMap[etcdReadyHost.Address] = BuildRKEConfigNodePlan(ctx, kubeCluster, etcdReadyHost, etcdReadyHost.DockerInfo, svcOptionData)
} }
// this will start the newly added etcd node and make sure it started correctly before restarting other node // this will start the newly added etcd node and make sure it started correctly before restarting other node
// https://github.com/etcd-io/etcd/blob/master/Documentation/op-guide/runtime-configuration.md#add-a-new-member // https://github.com/etcd-io/etcd/blob/master/Documentation/op-guide/runtime-configuration.md#add-a-new-member

View File

@ -132,7 +132,7 @@ func showRKECertificatesFromCli(ctx *cli.Context) error {
func rebuildClusterWithRotatedCertificates(ctx context.Context, func rebuildClusterWithRotatedCertificates(ctx context.Context,
dialersOptions hosts.DialersOptions, dialersOptions hosts.DialersOptions,
flags cluster.ExternalFlags, svcOptions *v3.KubernetesServicesOptions) (string, string, string, string, map[string]pki.CertificatePKI, error) { flags cluster.ExternalFlags, svcOptionData map[string]*v3.KubernetesServicesOptions) (string, string, string, string, map[string]pki.CertificatePKI, error) {
var APIURL, caCrt, clientCert, clientKey string var APIURL, caCrt, clientCert, clientKey string
log.Infof(ctx, "Rebuilding Kubernetes cluster with rotated certificates") log.Infof(ctx, "Rebuilding Kubernetes cluster with rotated certificates")
clusterState, err := cluster.ReadStateFile(ctx, cluster.GetStateFilePath(flags.ClusterFilePath, flags.ConfigDir)) clusterState, err := cluster.ReadStateFile(ctx, cluster.GetStateFilePath(flags.ClusterFilePath, flags.ConfigDir))
@ -185,7 +185,7 @@ func rebuildClusterWithRotatedCertificates(ctx context.Context,
} }
if isLegacyKubeAPI { if isLegacyKubeAPI {
log.Infof(ctx, "[controlplane] Redeploying controlplane to update kubeapi parameters") log.Infof(ctx, "[controlplane] Redeploying controlplane to update kubeapi parameters")
if err := kubeCluster.DeployControlPlane(ctx, svcOptions); err != nil { if err := kubeCluster.DeployControlPlane(ctx, svcOptionData); err != nil {
return APIURL, caCrt, clientCert, clientKey, nil, err return APIURL, caCrt, clientCert, clientKey, nil, err
} }
} }

View File

@ -88,10 +88,10 @@ func ClusterUp(ctx context.Context, dialersOptions hosts.DialersOptions, flags c
if err != nil { if err != nil {
return APIURL, caCrt, clientCert, clientKey, nil, err return APIURL, caCrt, clientCert, clientKey, nil, err
} }
svcOptions, _ := data["k8s-service-options"].(*v3.KubernetesServicesOptions) svcOptionsData := cluster.GetServiceOptionData(data)
// check if rotate certificates is triggered // check if rotate certificates is triggered
if kubeCluster.RancherKubernetesEngineConfig.RotateCertificates != nil { if kubeCluster.RancherKubernetesEngineConfig.RotateCertificates != nil {
return rebuildClusterWithRotatedCertificates(ctx, dialersOptions, flags, svcOptions) return rebuildClusterWithRotatedCertificates(ctx, dialersOptions, flags, svcOptionsData)
} }
log.Infof(ctx, "Building Kubernetes cluster") log.Infof(ctx, "Building Kubernetes cluster")
@ -132,7 +132,7 @@ func ClusterUp(ctx context.Context, dialersOptions hosts.DialersOptions, flags c
return APIURL, caCrt, clientCert, clientKey, nil, err return APIURL, caCrt, clientCert, clientKey, nil, err
} }
err = cluster.ReconcileCluster(ctx, kubeCluster, currentCluster, flags, svcOptions) err = cluster.ReconcileCluster(ctx, kubeCluster, currentCluster, flags, svcOptionsData)
if err != nil { if err != nil {
return APIURL, caCrt, clientCert, clientKey, nil, err return APIURL, caCrt, clientCert, clientKey, nil, err
} }
@ -145,7 +145,7 @@ func ClusterUp(ctx context.Context, dialersOptions hosts.DialersOptions, flags c
return APIURL, caCrt, clientCert, clientKey, nil, err return APIURL, caCrt, clientCert, clientKey, nil, err
} }
err = kubeCluster.DeployControlPlane(ctx, svcOptions) err = kubeCluster.DeployControlPlane(ctx, svcOptionsData)
if err != nil { if err != nil {
return APIURL, caCrt, clientCert, clientKey, nil, err return APIURL, caCrt, clientCert, clientKey, nil, err
} }
@ -166,8 +166,7 @@ func ClusterUp(ctx context.Context, dialersOptions hosts.DialersOptions, flags c
return APIURL, caCrt, clientCert, clientKey, nil, err return APIURL, caCrt, clientCert, clientKey, nil, err
} }
windowsSvcOptions, _ := data["k8s-windows-service-options"].(*v3.KubernetesServicesOptions) err = kubeCluster.DeployWorkerPlane(ctx, svcOptionsData)
err = kubeCluster.DeployWorkerPlane(ctx, svcOptions, windowsSvcOptions)
if err != nil { if err != nil {
return APIURL, caCrt, clientCert, clientKey, nil, err return APIURL, caCrt, clientCert, clientKey, nil, err
} }