mirror of
https://github.com/rancher/rke.git
synced 2025-05-10 01:15:43 +00:00
consider service options based on hostOS info
This commit is contained in:
parent
d6b29c9ccc
commit
9411027476
@ -89,12 +89,12 @@ const (
|
||||
SystemNamespace = "kube-system"
|
||||
)
|
||||
|
||||
func (c *Cluster) DeployControlPlane(ctx context.Context, svcOptions *v3.KubernetesServicesOptions) error {
|
||||
func (c *Cluster) DeployControlPlane(ctx context.Context, svcOptionData map[string]*v3.KubernetesServicesOptions) error {
|
||||
// Deploy Etcd Plane
|
||||
etcdNodePlanMap := make(map[string]v3.RKEConfigNodePlan)
|
||||
// Build etcd node plan map
|
||||
for _, etcdHost := range c.EtcdHosts {
|
||||
etcdNodePlanMap[etcdHost.Address] = BuildRKEConfigNodePlan(ctx, c, etcdHost, etcdHost.DockerInfo, svcOptions)
|
||||
etcdNodePlanMap[etcdHost.Address] = BuildRKEConfigNodePlan(ctx, c, etcdHost, etcdHost.DockerInfo, svcOptionData)
|
||||
}
|
||||
|
||||
if len(c.Services.Etcd.ExternalURLs) > 0 {
|
||||
@ -109,7 +109,7 @@ func (c *Cluster) DeployControlPlane(ctx context.Context, svcOptions *v3.Kuberne
|
||||
cpNodePlanMap := make(map[string]v3.RKEConfigNodePlan)
|
||||
// Build cp node plan map
|
||||
for _, cpHost := range c.ControlPlaneHosts {
|
||||
cpNodePlanMap[cpHost.Address] = BuildRKEConfigNodePlan(ctx, c, cpHost, cpHost.DockerInfo, svcOptions)
|
||||
cpNodePlanMap[cpHost.Address] = BuildRKEConfigNodePlan(ctx, c, cpHost, cpHost.DockerInfo, svcOptionData)
|
||||
}
|
||||
if err := services.RunControlPlane(ctx, c.ControlPlaneHosts,
|
||||
c.LocalConnDialerFactory,
|
||||
@ -124,17 +124,13 @@ func (c *Cluster) DeployControlPlane(ctx context.Context, svcOptions *v3.Kuberne
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Cluster) DeployWorkerPlane(ctx context.Context, linuxSvcOptions *v3.KubernetesServicesOptions, windowsSvcOptions *v3.KubernetesServicesOptions) error {
|
||||
func (c *Cluster) DeployWorkerPlane(ctx context.Context, svcOptionData map[string]*v3.KubernetesServicesOptions) error {
|
||||
// Deploy Worker plane
|
||||
workerNodePlanMap := make(map[string]v3.RKEConfigNodePlan)
|
||||
// Build cp node plan map
|
||||
allHosts := hosts.GetUniqueHostList(c.EtcdHosts, c.ControlPlaneHosts, c.WorkerHosts)
|
||||
for _, workerHost := range allHosts {
|
||||
svcOptions := linuxSvcOptions
|
||||
if workerHost.DockerInfo.OSType == "windows" { // compatible with Windows
|
||||
svcOptions = windowsSvcOptions
|
||||
}
|
||||
workerNodePlanMap[workerHost.Address] = BuildRKEConfigNodePlan(ctx, c, workerHost, workerHost.DockerInfo, svcOptions)
|
||||
workerNodePlanMap[workerHost.Address] = BuildRKEConfigNodePlan(ctx, c, workerHost, workerHost.DockerInfo, svcOptionData)
|
||||
}
|
||||
if err := services.RunWorkerPlane(ctx, allHosts,
|
||||
c.LocalConnDialerFactory,
|
||||
|
@ -19,7 +19,7 @@ import (
|
||||
"github.com/rancher/rke/pki"
|
||||
"github.com/rancher/rke/services"
|
||||
"github.com/rancher/rke/util"
|
||||
v3 "github.com/rancher/types/apis/management.cattle.io/v3"
|
||||
"github.com/rancher/types/apis/management.cattle.io/v3"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
@ -54,7 +54,20 @@ const (
|
||||
|
||||
var admissionControlOptionNames = []string{"enable-admission-plugins", "admission-control"}
|
||||
|
||||
func GeneratePlan(ctx context.Context, rkeConfig *v3.RancherKubernetesEngineConfig, hostsInfoMap map[string]types.Info) (v3.RKEPlan, error) {
|
||||
func GetServiceOptionData(data map[string]interface{}) map[string]*v3.KubernetesServicesOptions {
|
||||
svcOptionsData := map[string]*v3.KubernetesServicesOptions{}
|
||||
k8sServiceOptions, _ := data["k8s-service-options"].(*v3.KubernetesServicesOptions)
|
||||
if k8sServiceOptions != nil {
|
||||
svcOptionsData["k8s-service-options"] = k8sServiceOptions
|
||||
}
|
||||
k8sWServiceOptions, _ := data["k8s-windows-service-options"].(*v3.KubernetesServicesOptions)
|
||||
if k8sWServiceOptions != nil {
|
||||
svcOptionsData["k8s-windows-service-options"] = k8sWServiceOptions
|
||||
}
|
||||
return svcOptionsData
|
||||
}
|
||||
|
||||
func GeneratePlan(ctx context.Context, rkeConfig *v3.RancherKubernetesEngineConfig, hostsInfoMap map[string]types.Info, data map[string]interface{}) (v3.RKEPlan, error) {
|
||||
clusterPlan := v3.RKEPlan{}
|
||||
myCluster, err := InitClusterObject(ctx, rkeConfig, ExternalFlags{})
|
||||
if err != nil {
|
||||
@ -62,21 +75,22 @@ func GeneratePlan(ctx context.Context, rkeConfig *v3.RancherKubernetesEngineConf
|
||||
}
|
||||
// rkeConfig.Nodes are already unique. But they don't have role flags. So I will use the parsed cluster.Hosts to make use of the role flags.
|
||||
uniqHosts := hosts.GetUniqueHostList(myCluster.EtcdHosts, myCluster.ControlPlaneHosts, myCluster.WorkerHosts)
|
||||
svcOptionData := GetServiceOptionData(data)
|
||||
for _, host := range uniqHosts {
|
||||
host.DockerInfo = hostsInfoMap[host.Address]
|
||||
clusterPlan.Nodes = append(clusterPlan.Nodes, BuildRKEConfigNodePlan(ctx, myCluster, host, hostsInfoMap[host.Address], nil))
|
||||
clusterPlan.Nodes = append(clusterPlan.Nodes, BuildRKEConfigNodePlan(ctx, myCluster, host, hostsInfoMap[host.Address], svcOptionData))
|
||||
}
|
||||
return clusterPlan, nil
|
||||
}
|
||||
|
||||
func BuildRKEConfigNodePlan(ctx context.Context, myCluster *Cluster, host *hosts.Host, hostDockerInfo types.Info, svcOptions *v3.KubernetesServicesOptions) v3.RKEConfigNodePlan {
|
||||
func BuildRKEConfigNodePlan(ctx context.Context, myCluster *Cluster, host *hosts.Host, hostDockerInfo types.Info, svcOptionData map[string]*v3.KubernetesServicesOptions) v3.RKEConfigNodePlan {
|
||||
prefixPath := hosts.GetPrefixPath(hostDockerInfo.OperatingSystem, myCluster.PrefixPath)
|
||||
processes := map[string]v3.Process{}
|
||||
portChecks := []v3.PortCheck{}
|
||||
// Everybody gets a sidecar and a kubelet..
|
||||
processes[services.SidekickContainerName] = myCluster.BuildSidecarProcess(host, prefixPath)
|
||||
processes[services.KubeletContainerName] = myCluster.BuildKubeletProcess(host, prefixPath, svcOptions)
|
||||
processes[services.KubeproxyContainerName] = myCluster.BuildKubeProxyProcess(host, prefixPath, svcOptions)
|
||||
processes[services.KubeletContainerName] = myCluster.BuildKubeletProcess(host, prefixPath, svcOptionData)
|
||||
processes[services.KubeproxyContainerName] = myCluster.BuildKubeProxyProcess(host, prefixPath, svcOptionData)
|
||||
|
||||
portChecks = append(portChecks, BuildPortChecksFromPortList(host, WorkerPortList, ProtocolTCP)...)
|
||||
// Do we need an nginxProxy for this one ?
|
||||
@ -84,9 +98,9 @@ func BuildRKEConfigNodePlan(ctx context.Context, myCluster *Cluster, host *hosts
|
||||
processes[services.NginxProxyContainerName] = myCluster.BuildProxyProcess(host, prefixPath)
|
||||
}
|
||||
if host.IsControl {
|
||||
processes[services.KubeAPIContainerName] = myCluster.BuildKubeAPIProcess(host, prefixPath, svcOptions)
|
||||
processes[services.KubeControllerContainerName] = myCluster.BuildKubeControllerProcess(host, prefixPath, svcOptions)
|
||||
processes[services.SchedulerContainerName] = myCluster.BuildSchedulerProcess(host, prefixPath, svcOptions)
|
||||
processes[services.KubeAPIContainerName] = myCluster.BuildKubeAPIProcess(host, prefixPath, svcOptionData)
|
||||
processes[services.KubeControllerContainerName] = myCluster.BuildKubeControllerProcess(host, prefixPath, svcOptionData)
|
||||
processes[services.SchedulerContainerName] = myCluster.BuildSchedulerProcess(host, prefixPath, svcOptionData)
|
||||
|
||||
portChecks = append(portChecks, BuildPortChecksFromPortList(host, ControlPlanePortList, ProtocolTCP)...)
|
||||
}
|
||||
@ -143,7 +157,7 @@ func osLimitationFilter(osType string, processes map[string]v3.Process) map[stri
|
||||
return processes
|
||||
}
|
||||
|
||||
func (c *Cluster) BuildKubeAPIProcess(host *hosts.Host, prefixPath string, svcOptions *v3.KubernetesServicesOptions) v3.Process {
|
||||
func (c *Cluster) BuildKubeAPIProcess(host *hosts.Host, prefixPath string, svcOptionData map[string]*v3.KubernetesServicesOptions) v3.Process {
|
||||
// check if external etcd is used
|
||||
etcdConnectionString := services.GetEtcdConnString(c.EtcdHosts, host.InternalAddress)
|
||||
etcdPathPrefix := EtcdPathPrefix
|
||||
@ -196,13 +210,7 @@ func (c *Cluster) BuildKubeAPIProcess(host *hosts.Host, prefixPath string, svcOp
|
||||
c.Services.KubeAPI.ExtraEnv,
|
||||
fmt.Sprintf("%s=%s", CloudConfigSumEnv, getCloudConfigChecksum(c.CloudConfigFile)))
|
||||
}
|
||||
var serviceOptions v3.KubernetesServicesOptions
|
||||
if svcOptions == nil {
|
||||
// check if our version has specific options for this component
|
||||
serviceOptions = c.GetKubernetesServicesOptions(host.DockerInfo.OSType)
|
||||
} else {
|
||||
serviceOptions = *svcOptions
|
||||
}
|
||||
serviceOptions := c.GetKubernetesServicesOptions(host.DockerInfo.OSType, svcOptionData)
|
||||
if serviceOptions.KubeAPI != nil {
|
||||
for k, v := range serviceOptions.KubeAPI {
|
||||
// if the value is empty, we remove that option
|
||||
@ -283,7 +291,7 @@ func (c *Cluster) BuildKubeAPIProcess(host *hosts.Host, prefixPath string, svcOp
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Cluster) BuildKubeControllerProcess(host *hosts.Host, prefixPath string, svcOptions *v3.KubernetesServicesOptions) v3.Process {
|
||||
func (c *Cluster) BuildKubeControllerProcess(host *hosts.Host, prefixPath string, svcOptionData map[string]*v3.KubernetesServicesOptions) v3.Process {
|
||||
Command := []string{
|
||||
c.getRKEToolsEntryPoint(),
|
||||
"kube-controller-manager",
|
||||
@ -309,13 +317,7 @@ func (c *Cluster) BuildKubeControllerProcess(host *hosts.Host, prefixPath string
|
||||
c.Services.KubeController.ExtraEnv,
|
||||
fmt.Sprintf("%s=%s", CloudConfigSumEnv, getCloudConfigChecksum(c.CloudConfigFile)))
|
||||
}
|
||||
var serviceOptions v3.KubernetesServicesOptions
|
||||
if svcOptions == nil {
|
||||
// check if our version has specific options for this component
|
||||
serviceOptions = c.GetKubernetesServicesOptions(host.DockerInfo.OSType)
|
||||
} else {
|
||||
serviceOptions = *svcOptions
|
||||
}
|
||||
serviceOptions := c.GetKubernetesServicesOptions(host.DockerInfo.OSType, svcOptionData)
|
||||
if serviceOptions.KubeController != nil {
|
||||
for k, v := range serviceOptions.KubeController {
|
||||
// if the value is empty, we remove that option
|
||||
@ -374,7 +376,7 @@ func (c *Cluster) BuildKubeControllerProcess(host *hosts.Host, prefixPath string
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Cluster) BuildKubeletProcess(host *hosts.Host, prefixPath string, svcOptions *v3.KubernetesServicesOptions) v3.Process {
|
||||
func (c *Cluster) BuildKubeletProcess(host *hosts.Host, prefixPath string, svcOptionData map[string]*v3.KubernetesServicesOptions) v3.Process {
|
||||
Command := []string{
|
||||
c.getRKEToolsEntryPoint(),
|
||||
"kubelet",
|
||||
@ -436,13 +438,7 @@ func (c *Cluster) BuildKubeletProcess(host *hosts.Host, prefixPath string, svcOp
|
||||
c.Services.Kubelet.ExtraEnv,
|
||||
fmt.Sprintf("%s=%s", KubeletDockerConfigFileEnv, path.Join(prefixPath, KubeletDockerConfigPath)))
|
||||
}
|
||||
var serviceOptions v3.KubernetesServicesOptions
|
||||
if svcOptions == nil {
|
||||
// check if our version has specific options for this component
|
||||
serviceOptions = c.GetKubernetesServicesOptions(host.DockerInfo.OSType)
|
||||
} else {
|
||||
serviceOptions = *svcOptions
|
||||
}
|
||||
serviceOptions := c.GetKubernetesServicesOptions(host.DockerInfo.OSType, svcOptionData)
|
||||
if serviceOptions.Kubelet != nil {
|
||||
for k, v := range serviceOptions.Kubelet {
|
||||
// if the value is empty, we remove that option
|
||||
@ -557,7 +553,7 @@ func (c *Cluster) BuildKubeletProcess(host *hosts.Host, prefixPath string, svcOp
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Cluster) BuildKubeProxyProcess(host *hosts.Host, prefixPath string, svcOptions *v3.KubernetesServicesOptions) v3.Process {
|
||||
func (c *Cluster) BuildKubeProxyProcess(host *hosts.Host, prefixPath string, svcOptionData map[string]*v3.KubernetesServicesOptions) v3.Process {
|
||||
Command := []string{
|
||||
c.getRKEToolsEntryPoint(),
|
||||
"kube-proxy",
|
||||
@ -578,13 +574,7 @@ func (c *Cluster) BuildKubeProxyProcess(host *hosts.Host, prefixPath string, svc
|
||||
CommandArgs["kubeconfig"] = path.Join(prefixPath, pki.GetConfigPath(pki.KubeProxyCertName))
|
||||
}
|
||||
|
||||
var serviceOptions v3.KubernetesServicesOptions
|
||||
if svcOptions == nil {
|
||||
// check if our version has specific options for this component
|
||||
serviceOptions = c.GetKubernetesServicesOptions(host.DockerInfo.OSType)
|
||||
} else {
|
||||
serviceOptions = *svcOptions
|
||||
}
|
||||
serviceOptions := c.GetKubernetesServicesOptions(host.DockerInfo.OSType, svcOptionData)
|
||||
if serviceOptions.Kubeproxy != nil {
|
||||
for k, v := range serviceOptions.Kubeproxy {
|
||||
// if the value is empty, we remove that option
|
||||
@ -722,7 +712,7 @@ func (c *Cluster) BuildProxyProcess(host *hosts.Host, prefixPath string) v3.Proc
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Cluster) BuildSchedulerProcess(host *hosts.Host, prefixPath string, svcOptions *v3.KubernetesServicesOptions) v3.Process {
|
||||
func (c *Cluster) BuildSchedulerProcess(host *hosts.Host, prefixPath string, svcOptionData map[string]*v3.KubernetesServicesOptions) v3.Process {
|
||||
Command := []string{
|
||||
c.getRKEToolsEntryPoint(),
|
||||
"kube-scheduler",
|
||||
@ -736,13 +726,7 @@ func (c *Cluster) BuildSchedulerProcess(host *hosts.Host, prefixPath string, svc
|
||||
if c.DinD {
|
||||
CommandArgs["address"] = "0.0.0.0"
|
||||
}
|
||||
var serviceOptions v3.KubernetesServicesOptions
|
||||
if svcOptions == nil {
|
||||
// check if our version has specific options for this component
|
||||
serviceOptions = c.GetKubernetesServicesOptions(host.DockerInfo.OSType)
|
||||
} else {
|
||||
serviceOptions = *svcOptions
|
||||
}
|
||||
serviceOptions := c.GetKubernetesServicesOptions(host.DockerInfo.OSType, svcOptionData)
|
||||
if serviceOptions.Scheduler != nil {
|
||||
for k, v := range serviceOptions.Scheduler {
|
||||
// if the value is empty, we remove that option
|
||||
@ -1007,7 +991,20 @@ func BuildPortChecksFromPortList(host *hosts.Host, portList []string, proto stri
|
||||
return portChecks
|
||||
}
|
||||
|
||||
func (c *Cluster) GetKubernetesServicesOptions(osType string) v3.KubernetesServicesOptions {
|
||||
func (c *Cluster) GetKubernetesServicesOptions(osType string, data map[string]*v3.KubernetesServicesOptions) v3.KubernetesServicesOptions {
|
||||
if osType == "windows" {
|
||||
if svcOption, ok := data["k8s-windows-service-options"]; ok {
|
||||
return *svcOption
|
||||
}
|
||||
} else {
|
||||
if svcOption, ok := data["k8s-service-options"]; ok {
|
||||
return *svcOption
|
||||
}
|
||||
}
|
||||
return c.getDefaultKubernetesServicesOptions(osType)
|
||||
}
|
||||
|
||||
func (c *Cluster) getDefaultKubernetesServicesOptions(osType string) v3.KubernetesServicesOptions {
|
||||
var serviceOptionsTemplate map[string]v3.KubernetesServicesOptions
|
||||
switch osType {
|
||||
case "windows":
|
||||
|
@ -27,7 +27,7 @@ const (
|
||||
EtcdPlaneNodesReplacedErr = "Etcd plane nodes are replaced. Stopping provisioning. Please restore your cluster from backup."
|
||||
)
|
||||
|
||||
func ReconcileCluster(ctx context.Context, kubeCluster, currentCluster *Cluster, flags ExternalFlags, svcOptions *v3.KubernetesServicesOptions) error {
|
||||
func ReconcileCluster(ctx context.Context, kubeCluster, currentCluster *Cluster, flags ExternalFlags, svcOptionData map[string]*v3.KubernetesServicesOptions) error {
|
||||
logrus.Debugf("[reconcile] currentCluster: %+v\n", currentCluster)
|
||||
log.Infof(ctx, "[reconcile] Reconciling cluster state")
|
||||
kubeCluster.UpdateWorkersOnly = flags.UpdateOnly
|
||||
@ -49,7 +49,7 @@ func ReconcileCluster(ctx context.Context, kubeCluster, currentCluster *Cluster,
|
||||
syncLabels(ctx, currentCluster, kubeCluster)
|
||||
syncNodeRoles(ctx, currentCluster, kubeCluster)
|
||||
|
||||
if err := reconcileEtcd(ctx, currentCluster, kubeCluster, kubeClient, svcOptions); err != nil {
|
||||
if err := reconcileEtcd(ctx, currentCluster, kubeCluster, kubeClient, svcOptionData); err != nil {
|
||||
return fmt.Errorf("Failed to reconcile etcd plane: %v", err)
|
||||
}
|
||||
|
||||
@ -175,7 +175,7 @@ func reconcileHost(ctx context.Context, toDeleteHost *hosts.Host, worker, etcd b
|
||||
return nil
|
||||
}
|
||||
|
||||
func reconcileEtcd(ctx context.Context, currentCluster, kubeCluster *Cluster, kubeClient *kubernetes.Clientset, svcOptions *v3.KubernetesServicesOptions) error {
|
||||
func reconcileEtcd(ctx context.Context, currentCluster, kubeCluster *Cluster, kubeClient *kubernetes.Clientset, svcOptionData map[string]*v3.KubernetesServicesOptions) error {
|
||||
etcdToDelete := hosts.GetToDeleteHosts(currentCluster.EtcdHosts, kubeCluster.EtcdHosts, kubeCluster.InactiveHosts, false)
|
||||
etcdToAdd := hosts.GetToAddHosts(currentCluster.EtcdHosts, kubeCluster.EtcdHosts)
|
||||
clientCert := cert.EncodeCertPEM(currentCluster.Certificates[pki.KubeNodeCertName].Certificate)
|
||||
@ -199,10 +199,10 @@ func reconcileEtcd(ctx context.Context, currentCluster, kubeCluster *Cluster, ku
|
||||
return err
|
||||
}
|
||||
// handle etcd member add
|
||||
return addEtcdMembers(ctx, currentCluster, kubeCluster, kubeClient, svcOptions, clientCert, clientKey, etcdToAdd)
|
||||
return addEtcdMembers(ctx, currentCluster, kubeCluster, kubeClient, svcOptionData, clientCert, clientKey, etcdToAdd)
|
||||
}
|
||||
|
||||
func addEtcdMembers(ctx context.Context, currentCluster, kubeCluster *Cluster, kubeClient *kubernetes.Clientset, svcOptions *v3.KubernetesServicesOptions, clientCert, clientKey []byte, etcdToAdd []*hosts.Host) error {
|
||||
func addEtcdMembers(ctx context.Context, currentCluster, kubeCluster *Cluster, kubeClient *kubernetes.Clientset, svcOptionData map[string]*v3.KubernetesServicesOptions, clientCert, clientKey []byte, etcdToAdd []*hosts.Host) error {
|
||||
log.Infof(ctx, "[reconcile] Check etcd hosts to be added")
|
||||
for _, etcdHost := range etcdToAdd {
|
||||
kubeCluster.UpdateWorkersOnly = false
|
||||
@ -224,7 +224,7 @@ func addEtcdMembers(ctx context.Context, currentCluster, kubeCluster *Cluster, k
|
||||
|
||||
etcdNodePlanMap := make(map[string]v3.RKEConfigNodePlan)
|
||||
for _, etcdReadyHost := range kubeCluster.EtcdReadyHosts {
|
||||
etcdNodePlanMap[etcdReadyHost.Address] = BuildRKEConfigNodePlan(ctx, kubeCluster, etcdReadyHost, etcdReadyHost.DockerInfo, svcOptions)
|
||||
etcdNodePlanMap[etcdReadyHost.Address] = BuildRKEConfigNodePlan(ctx, kubeCluster, etcdReadyHost, etcdReadyHost.DockerInfo, svcOptionData)
|
||||
}
|
||||
// this will start the newly added etcd node and make sure it started correctly before restarting other node
|
||||
// https://github.com/etcd-io/etcd/blob/master/Documentation/op-guide/runtime-configuration.md#add-a-new-member
|
||||
|
@ -132,7 +132,7 @@ func showRKECertificatesFromCli(ctx *cli.Context) error {
|
||||
|
||||
func rebuildClusterWithRotatedCertificates(ctx context.Context,
|
||||
dialersOptions hosts.DialersOptions,
|
||||
flags cluster.ExternalFlags, svcOptions *v3.KubernetesServicesOptions) (string, string, string, string, map[string]pki.CertificatePKI, error) {
|
||||
flags cluster.ExternalFlags, svcOptionData map[string]*v3.KubernetesServicesOptions) (string, string, string, string, map[string]pki.CertificatePKI, error) {
|
||||
var APIURL, caCrt, clientCert, clientKey string
|
||||
log.Infof(ctx, "Rebuilding Kubernetes cluster with rotated certificates")
|
||||
clusterState, err := cluster.ReadStateFile(ctx, cluster.GetStateFilePath(flags.ClusterFilePath, flags.ConfigDir))
|
||||
@ -185,7 +185,7 @@ func rebuildClusterWithRotatedCertificates(ctx context.Context,
|
||||
}
|
||||
if isLegacyKubeAPI {
|
||||
log.Infof(ctx, "[controlplane] Redeploying controlplane to update kubeapi parameters")
|
||||
if err := kubeCluster.DeployControlPlane(ctx, svcOptions); err != nil {
|
||||
if err := kubeCluster.DeployControlPlane(ctx, svcOptionData); err != nil {
|
||||
return APIURL, caCrt, clientCert, clientKey, nil, err
|
||||
}
|
||||
}
|
||||
|
11
cmd/up.go
11
cmd/up.go
@ -88,10 +88,10 @@ func ClusterUp(ctx context.Context, dialersOptions hosts.DialersOptions, flags c
|
||||
if err != nil {
|
||||
return APIURL, caCrt, clientCert, clientKey, nil, err
|
||||
}
|
||||
svcOptions, _ := data["k8s-service-options"].(*v3.KubernetesServicesOptions)
|
||||
svcOptionsData := cluster.GetServiceOptionData(data)
|
||||
// check if rotate certificates is triggered
|
||||
if kubeCluster.RancherKubernetesEngineConfig.RotateCertificates != nil {
|
||||
return rebuildClusterWithRotatedCertificates(ctx, dialersOptions, flags, svcOptions)
|
||||
return rebuildClusterWithRotatedCertificates(ctx, dialersOptions, flags, svcOptionsData)
|
||||
}
|
||||
|
||||
log.Infof(ctx, "Building Kubernetes cluster")
|
||||
@ -132,7 +132,7 @@ func ClusterUp(ctx context.Context, dialersOptions hosts.DialersOptions, flags c
|
||||
return APIURL, caCrt, clientCert, clientKey, nil, err
|
||||
}
|
||||
|
||||
err = cluster.ReconcileCluster(ctx, kubeCluster, currentCluster, flags, svcOptions)
|
||||
err = cluster.ReconcileCluster(ctx, kubeCluster, currentCluster, flags, svcOptionsData)
|
||||
if err != nil {
|
||||
return APIURL, caCrt, clientCert, clientKey, nil, err
|
||||
}
|
||||
@ -145,7 +145,7 @@ func ClusterUp(ctx context.Context, dialersOptions hosts.DialersOptions, flags c
|
||||
return APIURL, caCrt, clientCert, clientKey, nil, err
|
||||
}
|
||||
|
||||
err = kubeCluster.DeployControlPlane(ctx, svcOptions)
|
||||
err = kubeCluster.DeployControlPlane(ctx, svcOptionsData)
|
||||
if err != nil {
|
||||
return APIURL, caCrt, clientCert, clientKey, nil, err
|
||||
}
|
||||
@ -166,8 +166,7 @@ func ClusterUp(ctx context.Context, dialersOptions hosts.DialersOptions, flags c
|
||||
return APIURL, caCrt, clientCert, clientKey, nil, err
|
||||
}
|
||||
|
||||
windowsSvcOptions, _ := data["k8s-windows-service-options"].(*v3.KubernetesServicesOptions)
|
||||
err = kubeCluster.DeployWorkerPlane(ctx, svcOptions, windowsSvcOptions)
|
||||
err = kubeCluster.DeployWorkerPlane(ctx, svcOptionsData)
|
||||
if err != nil {
|
||||
return APIURL, caCrt, clientCert, clientKey, nil, err
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user