mirror of
https://github.com/rancher/rke.git
synced 2025-05-06 15:27:56 +00:00
rancher pass serviceoptions and addon templates to rke
This commit is contained in:
parent
c191ed6202
commit
116b47b025
@ -5,6 +5,6 @@ import (
|
||||
"github.com/rancher/rke/templates"
|
||||
)
|
||||
|
||||
func GetCoreDNSManifest(CoreDNSConfig interface{}) (string, error) {
|
||||
return templates.CompileTemplateFromMap(templates.GetDefaultVersionedTemplate(rkeData.CoreDNS), CoreDNSConfig)
|
||||
func GetCoreDNSManifest(CoreDNSConfig interface{}, data map[string]interface{}) (string, error) {
|
||||
return templates.CompileTemplateFromMap(templates.GetDefaultVersionedTemplate(rkeData.CoreDNS, data), CoreDNSConfig)
|
||||
}
|
||||
|
@ -5,6 +5,6 @@ import (
|
||||
"github.com/rancher/rke/templates"
|
||||
)
|
||||
|
||||
func GetNginxIngressManifest(IngressConfig interface{}) (string, error) {
|
||||
return templates.CompileTemplateFromMap(templates.GetDefaultVersionedTemplate(rkeData.NginxIngress), IngressConfig)
|
||||
func GetNginxIngressManifest(IngressConfig interface{}, data map[string]interface{}) (string, error) {
|
||||
return templates.CompileTemplateFromMap(templates.GetDefaultVersionedTemplate(rkeData.NginxIngress, data), IngressConfig)
|
||||
}
|
||||
|
@ -5,7 +5,7 @@ import (
|
||||
"github.com/rancher/rke/templates"
|
||||
)
|
||||
|
||||
func GetKubeDNSManifest(KubeDNSConfig interface{}) (string, error) {
|
||||
func GetKubeDNSManifest(KubeDNSConfig interface{}, data map[string]interface{}) (string, error) {
|
||||
|
||||
return templates.CompileTemplateFromMap(templates.GetDefaultVersionedTemplate(rkeData.KubeDNS), KubeDNSConfig)
|
||||
return templates.CompileTemplateFromMap(templates.GetDefaultVersionedTemplate(rkeData.KubeDNS, data), KubeDNSConfig)
|
||||
}
|
||||
|
@ -5,7 +5,7 @@ import (
|
||||
"github.com/rancher/rke/templates"
|
||||
)
|
||||
|
||||
func GetMetricsServerManifest(MetricsServerConfig interface{}) (string, error) {
|
||||
func GetMetricsServerManifest(MetricsServerConfig interface{}, data map[string]interface{}) (string, error) {
|
||||
|
||||
return templates.CompileTemplateFromMap(templates.GetDefaultVersionedTemplate(rkeData.MetricsServer), MetricsServerConfig)
|
||||
return templates.CompileTemplateFromMap(templates.GetDefaultVersionedTemplate(rkeData.MetricsServer, data), MetricsServerConfig)
|
||||
}
|
||||
|
@ -97,21 +97,21 @@ func getAddonResourceName(addon string) string {
|
||||
return AddonResourceName
|
||||
}
|
||||
|
||||
func (c *Cluster) deployK8sAddOns(ctx context.Context) error {
|
||||
if err := c.deployDNS(ctx); err != nil {
|
||||
func (c *Cluster) deployK8sAddOns(ctx context.Context, data map[string]interface{}) error {
|
||||
if err := c.deployDNS(ctx, data); err != nil {
|
||||
if err, ok := err.(*addonError); ok && err.isCritical {
|
||||
return err
|
||||
}
|
||||
log.Warnf(ctx, "Failed to deploy DNS addon execute job for provider %s: %v", c.DNS.Provider, err)
|
||||
|
||||
}
|
||||
if err := c.deployMetricServer(ctx); err != nil {
|
||||
if err := c.deployMetricServer(ctx, data); err != nil {
|
||||
if err, ok := err.(*addonError); ok && err.isCritical {
|
||||
return err
|
||||
}
|
||||
log.Warnf(ctx, "Failed to deploy addon execute job [%s]: %v", MetricsServerAddonResourceName, err)
|
||||
}
|
||||
if err := c.deployIngress(ctx); err != nil {
|
||||
if err := c.deployIngress(ctx, data); err != nil {
|
||||
if err, ok := err.(*addonError); ok && err.isCritical {
|
||||
return err
|
||||
}
|
||||
@ -246,7 +246,7 @@ func getAddonFromURL(yamlURL string) ([]byte, error) {
|
||||
|
||||
}
|
||||
|
||||
func (c *Cluster) deployKubeDNS(ctx context.Context) error {
|
||||
func (c *Cluster) deployKubeDNS(ctx context.Context, data map[string]interface{}) error {
|
||||
log.Infof(ctx, "[addons] Setting up %s", c.DNS.Provider)
|
||||
KubeDNSConfig := KubeDNSOptions{
|
||||
KubeDNSImage: c.SystemImages.KubeDNS,
|
||||
@ -261,7 +261,7 @@ func (c *Cluster) deployKubeDNS(ctx context.Context) error {
|
||||
StubDomains: c.DNS.StubDomains,
|
||||
NodeSelector: c.DNS.NodeSelector,
|
||||
}
|
||||
kubeDNSYaml, err := addons.GetKubeDNSManifest(KubeDNSConfig)
|
||||
kubeDNSYaml, err := addons.GetKubeDNSManifest(KubeDNSConfig, data)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -272,7 +272,7 @@ func (c *Cluster) deployKubeDNS(ctx context.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Cluster) deployCoreDNS(ctx context.Context) error {
|
||||
func (c *Cluster) deployCoreDNS(ctx context.Context, data map[string]interface{}) error {
|
||||
log.Infof(ctx, "[addons] Setting up %s", c.DNS.Provider)
|
||||
CoreDNSConfig := CoreDNSOptions{
|
||||
CoreDNSImage: c.SystemImages.CoreDNS,
|
||||
@ -284,7 +284,7 @@ func (c *Cluster) deployCoreDNS(ctx context.Context) error {
|
||||
ReverseCIDRs: c.DNS.ReverseCIDRs,
|
||||
NodeSelector: c.DNS.NodeSelector,
|
||||
}
|
||||
coreDNSYaml, err := addons.GetCoreDNSManifest(CoreDNSConfig)
|
||||
coreDNSYaml, err := addons.GetCoreDNSManifest(CoreDNSConfig, data)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -295,7 +295,7 @@ func (c *Cluster) deployCoreDNS(ctx context.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Cluster) deployMetricServer(ctx context.Context) error {
|
||||
func (c *Cluster) deployMetricServer(ctx context.Context, data map[string]interface{}) error {
|
||||
if c.Monitoring.Provider == "none" {
|
||||
addonJobExists, err := addons.AddonJobExists(MetricsServerAddonJobName, c.LocalKubeConfigPath, c.K8sWrapTransport)
|
||||
if err != nil {
|
||||
@ -323,7 +323,7 @@ func (c *Cluster) deployMetricServer(ctx context.Context) error {
|
||||
Options: c.Monitoring.Options,
|
||||
Version: util.GetTagMajorVersion(versionTag),
|
||||
}
|
||||
metricsYaml, err := addons.GetMetricsServerManifest(MetricsServerConfig)
|
||||
metricsYaml, err := addons.GetMetricsServerManifest(MetricsServerConfig, data)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -445,7 +445,7 @@ func (c *Cluster) ApplySystemAddonExecuteJob(addonJob string, addonUpdated bool)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Cluster) deployIngress(ctx context.Context) error {
|
||||
func (c *Cluster) deployIngress(ctx context.Context, data map[string]interface{}) error {
|
||||
if c.Ingress.Provider == "none" {
|
||||
addonJobExists, err := addons.AddonJobExists(IngressAddonJobName, c.LocalKubeConfigPath, c.K8sWrapTransport)
|
||||
if err != nil {
|
||||
@ -483,7 +483,7 @@ func (c *Cluster) deployIngress(ctx context.Context) error {
|
||||
}
|
||||
|
||||
// Currently only deploying nginx ingress controller
|
||||
ingressYaml, err := addons.GetNginxIngressManifest(ingressConfig)
|
||||
ingressYaml, err := addons.GetNginxIngressManifest(ingressConfig, data)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -510,7 +510,7 @@ func (c *Cluster) removeDNSProvider(ctx context.Context, dnsprovider string) err
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Cluster) deployDNS(ctx context.Context) error {
|
||||
func (c *Cluster) deployDNS(ctx context.Context, data map[string]interface{}) error {
|
||||
for _, dnsprovider := range DNSProviders {
|
||||
if strings.EqualFold(dnsprovider, c.DNS.Provider) {
|
||||
continue
|
||||
@ -521,7 +521,7 @@ func (c *Cluster) deployDNS(ctx context.Context) error {
|
||||
}
|
||||
switch DNSProvider := c.DNS.Provider; DNSProvider {
|
||||
case DefaultDNSProvider:
|
||||
if err := c.deployKubeDNS(ctx); err != nil {
|
||||
if err := c.deployKubeDNS(ctx, data); err != nil {
|
||||
if err, ok := err.(*addonError); ok && err.isCritical {
|
||||
return err
|
||||
}
|
||||
@ -530,7 +530,7 @@ func (c *Cluster) deployDNS(ctx context.Context) error {
|
||||
log.Infof(ctx, "[dns] DNS provider %s deployed successfully", c.DNS.Provider)
|
||||
return nil
|
||||
case CoreDNSProvider:
|
||||
if err := c.deployCoreDNS(ctx); err != nil {
|
||||
if err := c.deployCoreDNS(ctx, data); err != nil {
|
||||
if err, ok := err.(*addonError); ok && err.isCritical {
|
||||
return err
|
||||
}
|
||||
|
@ -84,12 +84,12 @@ const (
|
||||
serviceAccountTokenFileParam = "service-account-key-file"
|
||||
)
|
||||
|
||||
func (c *Cluster) DeployControlPlane(ctx context.Context) error {
|
||||
func (c *Cluster) DeployControlPlane(ctx context.Context, svcOptions *v3.KubernetesServicesOptions) error {
|
||||
// Deploy Etcd Plane
|
||||
etcdNodePlanMap := make(map[string]v3.RKEConfigNodePlan)
|
||||
// Build etcd node plan map
|
||||
for _, etcdHost := range c.EtcdHosts {
|
||||
etcdNodePlanMap[etcdHost.Address] = BuildRKEConfigNodePlan(ctx, c, etcdHost, etcdHost.DockerInfo)
|
||||
etcdNodePlanMap[etcdHost.Address] = BuildRKEConfigNodePlan(ctx, c, etcdHost, etcdHost.DockerInfo, svcOptions)
|
||||
}
|
||||
|
||||
if len(c.Services.Etcd.ExternalURLs) > 0 {
|
||||
@ -104,7 +104,7 @@ func (c *Cluster) DeployControlPlane(ctx context.Context) error {
|
||||
cpNodePlanMap := make(map[string]v3.RKEConfigNodePlan)
|
||||
// Build cp node plan map
|
||||
for _, cpHost := range c.ControlPlaneHosts {
|
||||
cpNodePlanMap[cpHost.Address] = BuildRKEConfigNodePlan(ctx, c, cpHost, cpHost.DockerInfo)
|
||||
cpNodePlanMap[cpHost.Address] = BuildRKEConfigNodePlan(ctx, c, cpHost, cpHost.DockerInfo, svcOptions)
|
||||
}
|
||||
if err := services.RunControlPlane(ctx, c.ControlPlaneHosts,
|
||||
c.LocalConnDialerFactory,
|
||||
@ -119,13 +119,13 @@ func (c *Cluster) DeployControlPlane(ctx context.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Cluster) DeployWorkerPlane(ctx context.Context) error {
|
||||
func (c *Cluster) DeployWorkerPlane(ctx context.Context, svcOptions *v3.KubernetesServicesOptions) error {
|
||||
// Deploy Worker plane
|
||||
workerNodePlanMap := make(map[string]v3.RKEConfigNodePlan)
|
||||
// Build cp node plan map
|
||||
allHosts := hosts.GetUniqueHostList(c.EtcdHosts, c.ControlPlaneHosts, c.WorkerHosts)
|
||||
for _, workerHost := range allHosts {
|
||||
workerNodePlanMap[workerHost.Address] = BuildRKEConfigNodePlan(ctx, c, workerHost, workerHost.DockerInfo)
|
||||
workerNodePlanMap[workerHost.Address] = BuildRKEConfigNodePlan(ctx, c, workerHost, workerHost.DockerInfo, svcOptions)
|
||||
}
|
||||
if err := services.RunWorkerPlane(ctx, allHosts,
|
||||
c.LocalConnDialerFactory,
|
||||
@ -324,8 +324,8 @@ func ApplyAuthzResources(ctx context.Context, rkeConfig v3.RancherKubernetesEngi
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Cluster) deployAddons(ctx context.Context) error {
|
||||
if err := c.deployK8sAddOns(ctx); err != nil {
|
||||
func (c *Cluster) deployAddons(ctx context.Context, data map[string]interface{}) error {
|
||||
if err := c.deployK8sAddOns(ctx, data); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := c.deployUserAddOns(ctx); err != nil {
|
||||
@ -453,6 +453,7 @@ func ConfigureCluster(
|
||||
crtBundle map[string]pki.CertificatePKI,
|
||||
flags ExternalFlags,
|
||||
dailersOptions hosts.DialersOptions,
|
||||
data map[string]interface{},
|
||||
useKubectl bool) error {
|
||||
// dialer factories are not needed here since we are not uses docker only k8s jobs
|
||||
kubeCluster, err := InitClusterObject(ctx, &rkeConfig, flags)
|
||||
@ -465,13 +466,13 @@ func ConfigureCluster(
|
||||
kubeCluster.UseKubectlDeploy = useKubectl
|
||||
if len(kubeCluster.ControlPlaneHosts) > 0 {
|
||||
kubeCluster.Certificates = crtBundle
|
||||
if err := kubeCluster.deployNetworkPlugin(ctx); err != nil {
|
||||
if err := kubeCluster.deployNetworkPlugin(ctx, data); err != nil {
|
||||
if err, ok := err.(*addonError); ok && err.isCritical {
|
||||
return err
|
||||
}
|
||||
log.Warnf(ctx, "Failed to deploy addon execute job [%s]: %v", NetworkPluginResourceName, err)
|
||||
}
|
||||
if err := kubeCluster.deployAddons(ctx); err != nil {
|
||||
if err := kubeCluster.deployAddons(ctx, data); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
@ -123,17 +123,17 @@ var EtcdClientPortList = []string{
|
||||
EtcdPort1,
|
||||
}
|
||||
|
||||
func (c *Cluster) deployNetworkPlugin(ctx context.Context) error {
|
||||
func (c *Cluster) deployNetworkPlugin(ctx context.Context, data map[string]interface{}) error {
|
||||
log.Infof(ctx, "[network] Setting up network plugin: %s", c.Network.Plugin)
|
||||
switch c.Network.Plugin {
|
||||
case FlannelNetworkPlugin:
|
||||
return c.doFlannelDeploy(ctx)
|
||||
return c.doFlannelDeploy(ctx, data)
|
||||
case CalicoNetworkPlugin:
|
||||
return c.doCalicoDeploy(ctx)
|
||||
return c.doCalicoDeploy(ctx, data)
|
||||
case CanalNetworkPlugin:
|
||||
return c.doCanalDeploy(ctx)
|
||||
return c.doCanalDeploy(ctx, data)
|
||||
case WeaveNetworkPlugin:
|
||||
return c.doWeaveDeploy(ctx)
|
||||
return c.doWeaveDeploy(ctx, data)
|
||||
case NoNetworkPlugin:
|
||||
log.Infof(ctx, "[network] Not deploying a cluster network, expecting custom CNI")
|
||||
return nil
|
||||
@ -142,7 +142,7 @@ func (c *Cluster) deployNetworkPlugin(ctx context.Context) error {
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Cluster) doFlannelDeploy(ctx context.Context) error {
|
||||
func (c *Cluster) doFlannelDeploy(ctx context.Context, data map[string]interface{}) error {
|
||||
vni, err := atoiWithDefault(c.Network.Options[FlannelBackendVxLanNetworkIdentify], FlannelVxLanNetworkIdentify)
|
||||
if err != nil {
|
||||
return err
|
||||
@ -165,14 +165,14 @@ func (c *Cluster) doFlannelDeploy(ctx context.Context) error {
|
||||
RBACConfig: c.Authorization.Mode,
|
||||
ClusterVersion: util.GetTagMajorVersion(c.Version),
|
||||
}
|
||||
pluginYaml, err := c.getNetworkPluginManifest(flannelConfig)
|
||||
pluginYaml, err := c.getNetworkPluginManifest(flannelConfig, data)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return c.doAddonDeploy(ctx, pluginYaml, NetworkPluginResourceName, true)
|
||||
}
|
||||
|
||||
func (c *Cluster) doCalicoDeploy(ctx context.Context) error {
|
||||
func (c *Cluster) doCalicoDeploy(ctx context.Context, data map[string]interface{}) error {
|
||||
clientConfig := pki.GetConfigPath(pki.KubeNodeCertName)
|
||||
calicoConfig := map[string]interface{}{
|
||||
KubeCfg: clientConfig,
|
||||
@ -183,14 +183,14 @@ func (c *Cluster) doCalicoDeploy(ctx context.Context) error {
|
||||
CloudProvider: c.Network.Options[CalicoCloudProvider],
|
||||
RBACConfig: c.Authorization.Mode,
|
||||
}
|
||||
pluginYaml, err := c.getNetworkPluginManifest(calicoConfig)
|
||||
pluginYaml, err := c.getNetworkPluginManifest(calicoConfig, data)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return c.doAddonDeploy(ctx, pluginYaml, NetworkPluginResourceName, true)
|
||||
}
|
||||
|
||||
func (c *Cluster) doCanalDeploy(ctx context.Context) error {
|
||||
func (c *Cluster) doCanalDeploy(ctx context.Context, data map[string]interface{}) error {
|
||||
flannelVni, err := atoiWithDefault(c.Network.Options[CanalFlannelBackendVxLanNetworkIdentify], FlannelVxLanNetworkIdentify)
|
||||
if err != nil {
|
||||
return err
|
||||
@ -219,14 +219,14 @@ func (c *Cluster) doCanalDeploy(ctx context.Context) error {
|
||||
"Port": flannelPort,
|
||||
},
|
||||
}
|
||||
pluginYaml, err := c.getNetworkPluginManifest(canalConfig)
|
||||
pluginYaml, err := c.getNetworkPluginManifest(canalConfig, data)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return c.doAddonDeploy(ctx, pluginYaml, NetworkPluginResourceName, true)
|
||||
}
|
||||
|
||||
func (c *Cluster) doWeaveDeploy(ctx context.Context) error {
|
||||
func (c *Cluster) doWeaveDeploy(ctx context.Context, data map[string]interface{}) error {
|
||||
weaveConfig := map[string]interface{}{
|
||||
ClusterCIDR: c.ClusterCIDR,
|
||||
WeavePassword: c.Network.Options[WeavePassword],
|
||||
@ -235,23 +235,23 @@ func (c *Cluster) doWeaveDeploy(ctx context.Context) error {
|
||||
WeaveLoopbackImage: c.SystemImages.Alpine,
|
||||
RBACConfig: c.Authorization.Mode,
|
||||
}
|
||||
pluginYaml, err := c.getNetworkPluginManifest(weaveConfig)
|
||||
pluginYaml, err := c.getNetworkPluginManifest(weaveConfig, data)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return c.doAddonDeploy(ctx, pluginYaml, NetworkPluginResourceName, true)
|
||||
}
|
||||
|
||||
func (c *Cluster) getNetworkPluginManifest(pluginConfig map[string]interface{}) (string, error) {
|
||||
func (c *Cluster) getNetworkPluginManifest(pluginConfig, data map[string]interface{}) (string, error) {
|
||||
switch c.Network.Plugin {
|
||||
case FlannelNetworkPlugin:
|
||||
return templates.CompileTemplateFromMap(templates.GetVersionedTemplates(FlannelNetworkPlugin, c.Version), pluginConfig)
|
||||
return templates.CompileTemplateFromMap(templates.GetVersionedTemplates(FlannelNetworkPlugin, data, c.Version), pluginConfig)
|
||||
case CalicoNetworkPlugin:
|
||||
return templates.CompileTemplateFromMap(templates.GetVersionedTemplates(CalicoNetworkPlugin, c.Version), pluginConfig)
|
||||
return templates.CompileTemplateFromMap(templates.GetVersionedTemplates(CalicoNetworkPlugin, data, c.Version), pluginConfig)
|
||||
case CanalNetworkPlugin:
|
||||
return templates.CompileTemplateFromMap(templates.GetVersionedTemplates(CanalNetworkPlugin, c.Version), pluginConfig)
|
||||
return templates.CompileTemplateFromMap(templates.GetVersionedTemplates(CanalNetworkPlugin, data, c.Version), pluginConfig)
|
||||
case WeaveNetworkPlugin:
|
||||
return templates.CompileTemplateFromMap(templates.GetVersionedTemplates(WeaveNetworkPlugin, c.Version), pluginConfig)
|
||||
return templates.CompileTemplateFromMap(templates.GetVersionedTemplates(WeaveNetworkPlugin, data, c.Version), pluginConfig)
|
||||
default:
|
||||
return "", fmt.Errorf("[network] Unsupported network plugin: %s", c.Network.Plugin)
|
||||
}
|
||||
|
@ -19,7 +19,7 @@ import (
|
||||
"github.com/rancher/rke/pki"
|
||||
"github.com/rancher/rke/services"
|
||||
"github.com/rancher/rke/util"
|
||||
v3 "github.com/rancher/types/apis/management.cattle.io/v3"
|
||||
"github.com/rancher/types/apis/management.cattle.io/v3"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
@ -53,19 +53,19 @@ func GeneratePlan(ctx context.Context, rkeConfig *v3.RancherKubernetesEngineConf
|
||||
uniqHosts := hosts.GetUniqueHostList(myCluster.EtcdHosts, myCluster.ControlPlaneHosts, myCluster.WorkerHosts)
|
||||
for _, host := range uniqHosts {
|
||||
host.DockerInfo = hostsInfoMap[host.Address]
|
||||
clusterPlan.Nodes = append(clusterPlan.Nodes, BuildRKEConfigNodePlan(ctx, myCluster, host, hostsInfoMap[host.Address]))
|
||||
clusterPlan.Nodes = append(clusterPlan.Nodes, BuildRKEConfigNodePlan(ctx, myCluster, host, hostsInfoMap[host.Address], nil))
|
||||
}
|
||||
return clusterPlan, nil
|
||||
}
|
||||
|
||||
func BuildRKEConfigNodePlan(ctx context.Context, myCluster *Cluster, host *hosts.Host, hostDockerInfo types.Info) v3.RKEConfigNodePlan {
|
||||
func BuildRKEConfigNodePlan(ctx context.Context, myCluster *Cluster, host *hosts.Host, hostDockerInfo types.Info, svcOptions *v3.KubernetesServicesOptions) v3.RKEConfigNodePlan {
|
||||
prefixPath := hosts.GetPrefixPath(hostDockerInfo.OperatingSystem, myCluster.PrefixPath)
|
||||
processes := map[string]v3.Process{}
|
||||
portChecks := []v3.PortCheck{}
|
||||
// Everybody gets a sidecar and a kubelet..
|
||||
processes[services.SidekickContainerName] = myCluster.BuildSidecarProcess()
|
||||
processes[services.KubeletContainerName] = myCluster.BuildKubeletProcess(host, prefixPath)
|
||||
processes[services.KubeproxyContainerName] = myCluster.BuildKubeProxyProcess(host, prefixPath)
|
||||
processes[services.KubeletContainerName] = myCluster.BuildKubeletProcess(host, prefixPath, svcOptions)
|
||||
processes[services.KubeproxyContainerName] = myCluster.BuildKubeProxyProcess(host, prefixPath, svcOptions)
|
||||
|
||||
portChecks = append(portChecks, BuildPortChecksFromPortList(host, WorkerPortList, ProtocolTCP)...)
|
||||
// Do we need an nginxProxy for this one ?
|
||||
@ -73,9 +73,9 @@ func BuildRKEConfigNodePlan(ctx context.Context, myCluster *Cluster, host *hosts
|
||||
processes[services.NginxProxyContainerName] = myCluster.BuildProxyProcess()
|
||||
}
|
||||
if host.IsControl {
|
||||
processes[services.KubeAPIContainerName] = myCluster.BuildKubeAPIProcess(host, prefixPath)
|
||||
processes[services.KubeControllerContainerName] = myCluster.BuildKubeControllerProcess(prefixPath)
|
||||
processes[services.SchedulerContainerName] = myCluster.BuildSchedulerProcess(prefixPath)
|
||||
processes[services.KubeAPIContainerName] = myCluster.BuildKubeAPIProcess(host, prefixPath, svcOptions)
|
||||
processes[services.KubeControllerContainerName] = myCluster.BuildKubeControllerProcess(prefixPath, svcOptions)
|
||||
processes[services.SchedulerContainerName] = myCluster.BuildSchedulerProcess(prefixPath, svcOptions)
|
||||
|
||||
portChecks = append(portChecks, BuildPortChecksFromPortList(host, ControlPlanePortList, ProtocolTCP)...)
|
||||
}
|
||||
@ -101,7 +101,7 @@ func BuildRKEConfigNodePlan(ctx context.Context, myCluster *Cluster, host *hosts
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Cluster) BuildKubeAPIProcess(host *hosts.Host, prefixPath string) v3.Process {
|
||||
func (c *Cluster) BuildKubeAPIProcess(host *hosts.Host, prefixPath string, svcOptions *v3.KubernetesServicesOptions) v3.Process {
|
||||
// check if external etcd is used
|
||||
etcdConnectionString := services.GetEtcdConnString(c.EtcdHosts, host.InternalAddress)
|
||||
etcdPathPrefix := EtcdPathPrefix
|
||||
@ -154,8 +154,13 @@ func (c *Cluster) BuildKubeAPIProcess(host *hosts.Host, prefixPath string) v3.Pr
|
||||
c.Services.KubeAPI.ExtraEnv,
|
||||
fmt.Sprintf("%s=%s", CloudConfigSumEnv, getCloudConfigChecksum(c.CloudConfigFile)))
|
||||
}
|
||||
// check if our version has specific options for this component
|
||||
serviceOptions := c.GetKubernetesServicesOptions()
|
||||
var serviceOptions v3.KubernetesServicesOptions
|
||||
if svcOptions == nil {
|
||||
// check if our version has specific options for this component
|
||||
serviceOptions = c.GetKubernetesServicesOptions()
|
||||
} else {
|
||||
serviceOptions = *svcOptions
|
||||
}
|
||||
if serviceOptions.KubeAPI != nil {
|
||||
for k, v := range serviceOptions.KubeAPI {
|
||||
// if the value is empty, we remove that option
|
||||
@ -236,7 +241,7 @@ func (c *Cluster) BuildKubeAPIProcess(host *hosts.Host, prefixPath string) v3.Pr
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Cluster) BuildKubeControllerProcess(prefixPath string) v3.Process {
|
||||
func (c *Cluster) BuildKubeControllerProcess(prefixPath string, svcOptions *v3.KubernetesServicesOptions) v3.Process {
|
||||
Command := []string{
|
||||
c.getRKEToolsEntryPoint(),
|
||||
"kube-controller-manager",
|
||||
@ -262,8 +267,13 @@ func (c *Cluster) BuildKubeControllerProcess(prefixPath string) v3.Process {
|
||||
c.Services.KubeController.ExtraEnv,
|
||||
fmt.Sprintf("%s=%s", CloudConfigSumEnv, getCloudConfigChecksum(c.CloudConfigFile)))
|
||||
}
|
||||
// check if our version has specific options for this component
|
||||
serviceOptions := c.GetKubernetesServicesOptions()
|
||||
var serviceOptions v3.KubernetesServicesOptions
|
||||
if svcOptions == nil {
|
||||
// check if our version has specific options for this component
|
||||
serviceOptions = c.GetKubernetesServicesOptions()
|
||||
} else {
|
||||
serviceOptions = *svcOptions
|
||||
}
|
||||
if serviceOptions.KubeController != nil {
|
||||
for k, v := range serviceOptions.KubeController {
|
||||
// if the value is empty, we remove that option
|
||||
@ -322,7 +332,7 @@ func (c *Cluster) BuildKubeControllerProcess(prefixPath string) v3.Process {
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Cluster) BuildKubeletProcess(host *hosts.Host, prefixPath string) v3.Process {
|
||||
func (c *Cluster) BuildKubeletProcess(host *hosts.Host, prefixPath string, svcOptions *v3.KubernetesServicesOptions) v3.Process {
|
||||
|
||||
Command := []string{
|
||||
c.getRKEToolsEntryPoint(),
|
||||
@ -365,13 +375,13 @@ func (c *Cluster) BuildKubeletProcess(host *hosts.Host, prefixPath string) v3.Pr
|
||||
c.Services.Kubelet.ExtraEnv,
|
||||
fmt.Sprintf("%s=%s", KubeletDockerConfigFileEnv, path.Join(prefixPath, KubeletDockerConfigPath)))
|
||||
}
|
||||
// allow-privileged is removed in k8s 1.15
|
||||
if c.Version < MaxK8s115Version {
|
||||
CommandArgs["allow-privileged"] = "true"
|
||||
var serviceOptions v3.KubernetesServicesOptions
|
||||
if svcOptions == nil {
|
||||
// check if our version has specific options for this component
|
||||
serviceOptions = c.GetKubernetesServicesOptions()
|
||||
} else {
|
||||
serviceOptions = *svcOptions
|
||||
}
|
||||
|
||||
// check if our version has specific options for this component
|
||||
serviceOptions := c.GetKubernetesServicesOptions()
|
||||
if serviceOptions.Kubelet != nil {
|
||||
for k, v := range serviceOptions.Kubelet {
|
||||
// if the value is empty, we remove that option
|
||||
@ -448,7 +458,7 @@ func (c *Cluster) BuildKubeletProcess(host *hosts.Host, prefixPath string) v3.Pr
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Cluster) BuildKubeProxyProcess(host *hosts.Host, prefixPath string) v3.Process {
|
||||
func (c *Cluster) BuildKubeProxyProcess(host *hosts.Host, prefixPath string, svcOptions *v3.KubernetesServicesOptions) v3.Process {
|
||||
Command := []string{
|
||||
c.getRKEToolsEntryPoint(),
|
||||
"kube-proxy",
|
||||
@ -460,8 +470,13 @@ func (c *Cluster) BuildKubeProxyProcess(host *hosts.Host, prefixPath string) v3.
|
||||
"kubeconfig": pki.GetConfigPath(pki.KubeProxyCertName),
|
||||
}
|
||||
|
||||
// check if our version has specific options for this component
|
||||
serviceOptions := c.GetKubernetesServicesOptions()
|
||||
var serviceOptions v3.KubernetesServicesOptions
|
||||
if svcOptions == nil {
|
||||
// check if our version has specific options for this component
|
||||
serviceOptions = c.GetKubernetesServicesOptions()
|
||||
} else {
|
||||
serviceOptions = *svcOptions
|
||||
}
|
||||
if serviceOptions.Kubeproxy != nil {
|
||||
for k, v := range serviceOptions.Kubeproxy {
|
||||
// if the value is empty, we remove that option
|
||||
@ -550,7 +565,7 @@ func (c *Cluster) BuildProxyProcess() v3.Process {
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Cluster) BuildSchedulerProcess(prefixPath string) v3.Process {
|
||||
func (c *Cluster) BuildSchedulerProcess(prefixPath string, svcOptions *v3.KubernetesServicesOptions) v3.Process {
|
||||
Command := []string{
|
||||
c.getRKEToolsEntryPoint(),
|
||||
"kube-scheduler",
|
||||
@ -564,9 +579,13 @@ func (c *Cluster) BuildSchedulerProcess(prefixPath string) v3.Process {
|
||||
if c.DinD {
|
||||
CommandArgs["address"] = "0.0.0.0"
|
||||
}
|
||||
|
||||
// check if our version has specific options for this component
|
||||
serviceOptions := c.GetKubernetesServicesOptions()
|
||||
var serviceOptions v3.KubernetesServicesOptions
|
||||
if svcOptions == nil {
|
||||
// check if our version has specific options for this component
|
||||
serviceOptions = c.GetKubernetesServicesOptions()
|
||||
} else {
|
||||
serviceOptions = *svcOptions
|
||||
}
|
||||
if serviceOptions.Scheduler != nil {
|
||||
for k, v := range serviceOptions.Scheduler {
|
||||
// if the value is empty, we remove that option
|
||||
|
@ -24,7 +24,7 @@ const (
|
||||
EtcdPlaneNodesReplacedErr = "Etcd plane nodes are replaced. Stopping provisioning. Please restore your cluster from backup."
|
||||
)
|
||||
|
||||
func ReconcileCluster(ctx context.Context, kubeCluster, currentCluster *Cluster, flags ExternalFlags) error {
|
||||
func ReconcileCluster(ctx context.Context, kubeCluster, currentCluster *Cluster, flags ExternalFlags, svcOptions *v3.KubernetesServicesOptions) error {
|
||||
log.Infof(ctx, "[reconcile] Reconciling cluster state")
|
||||
kubeCluster.UpdateWorkersOnly = flags.UpdateOnly
|
||||
if currentCluster == nil {
|
||||
@ -40,7 +40,7 @@ func ReconcileCluster(ctx context.Context, kubeCluster, currentCluster *Cluster,
|
||||
// sync node labels to define the toDelete labels
|
||||
syncLabels(ctx, currentCluster, kubeCluster)
|
||||
|
||||
if err := reconcileEtcd(ctx, currentCluster, kubeCluster, kubeClient); err != nil {
|
||||
if err := reconcileEtcd(ctx, currentCluster, kubeCluster, kubeClient, svcOptions); err != nil {
|
||||
return fmt.Errorf("Failed to reconcile etcd plane: %v", err)
|
||||
}
|
||||
|
||||
@ -165,7 +165,7 @@ func reconcileHost(ctx context.Context, toDeleteHost *hosts.Host, worker, etcd b
|
||||
return nil
|
||||
}
|
||||
|
||||
func reconcileEtcd(ctx context.Context, currentCluster, kubeCluster *Cluster, kubeClient *kubernetes.Clientset) error {
|
||||
func reconcileEtcd(ctx context.Context, currentCluster, kubeCluster *Cluster, kubeClient *kubernetes.Clientset, svcOptions *v3.KubernetesServicesOptions) error {
|
||||
log.Infof(ctx, "[reconcile] Check etcd hosts to be deleted")
|
||||
if isEtcdPlaneReplaced(ctx, currentCluster, kubeCluster) {
|
||||
logrus.Warnf("%v", EtcdPlaneNodesReplacedErr)
|
||||
@ -213,7 +213,7 @@ func reconcileEtcd(ctx context.Context, currentCluster, kubeCluster *Cluster, ku
|
||||
|
||||
etcdNodePlanMap := make(map[string]v3.RKEConfigNodePlan)
|
||||
for _, etcdReadyHost := range kubeCluster.EtcdReadyHosts {
|
||||
etcdNodePlanMap[etcdReadyHost.Address] = BuildRKEConfigNodePlan(ctx, kubeCluster, etcdReadyHost, etcdReadyHost.DockerInfo)
|
||||
etcdNodePlanMap[etcdReadyHost.Address] = BuildRKEConfigNodePlan(ctx, kubeCluster, etcdReadyHost, etcdReadyHost.DockerInfo, svcOptions)
|
||||
}
|
||||
// this will start the newly added etcd node and make sure it started correctly before restarting other node
|
||||
// https://github.com/etcd-io/etcd/blob/master/Documentation/op-guide/runtime-configuration.md#add-a-new-member
|
||||
|
@ -94,7 +94,7 @@ func rotateRKECertificatesFromCli(ctx *cli.Context) error {
|
||||
if err := ClusterInit(context.Background(), rkeConfig, hosts.DialersOptions{}, externalFlags); err != nil {
|
||||
return err
|
||||
}
|
||||
_, _, _, _, _, err = ClusterUp(context.Background(), hosts.DialersOptions{}, externalFlags)
|
||||
_, _, _, _, _, err = ClusterUp(context.Background(), hosts.DialersOptions{}, externalFlags, map[string]interface{}{})
|
||||
return err
|
||||
}
|
||||
|
||||
@ -126,7 +126,7 @@ func showRKECertificatesFromCli(ctx *cli.Context) error {
|
||||
|
||||
func rebuildClusterWithRotatedCertificates(ctx context.Context,
|
||||
dialersOptions hosts.DialersOptions,
|
||||
flags cluster.ExternalFlags) (string, string, string, string, map[string]pki.CertificatePKI, error) {
|
||||
flags cluster.ExternalFlags, svcOptions *v3.KubernetesServicesOptions) (string, string, string, string, map[string]pki.CertificatePKI, error) {
|
||||
var APIURL, caCrt, clientCert, clientKey string
|
||||
log.Infof(ctx, "Rebuilding Kubernetes cluster with rotated certificates")
|
||||
clusterState, err := cluster.ReadStateFile(ctx, cluster.GetStateFilePath(flags.ClusterFilePath, flags.ConfigDir))
|
||||
@ -179,7 +179,7 @@ func rebuildClusterWithRotatedCertificates(ctx context.Context,
|
||||
}
|
||||
if isLegacyKubeAPI {
|
||||
log.Infof(ctx, "[controlplane] Redeploying controlplane to update kubeapi parameters")
|
||||
if err := kubeCluster.DeployControlPlane(ctx); err != nil {
|
||||
if err := kubeCluster.DeployControlPlane(ctx, svcOptions); err != nil {
|
||||
return APIURL, caCrt, clientCert, clientKey, nil, err
|
||||
}
|
||||
}
|
||||
|
@ -75,8 +75,8 @@ func ClusterInit(ctx context.Context, rkeConfig *v3.RancherKubernetesEngineConfi
|
||||
if len(flags.CertificateDir) == 0 {
|
||||
flags.CertificateDir = cluster.GetCertificateDirPath(flags.ClusterFilePath, flags.ConfigDir)
|
||||
}
|
||||
if err := metadata.InitMetadata(ctx); err != nil {
|
||||
return err
|
||||
if metadata.K8sVersionToRKESystemImages == nil {
|
||||
metadata.InitMetadata(ctx)
|
||||
}
|
||||
rkeFullState, _ := cluster.ReadStateFile(ctx, stateFilePath)
|
||||
|
||||
|
@ -112,7 +112,9 @@ func RestoreEtcdSnapshot(
|
||||
ctx context.Context,
|
||||
rkeConfig *v3.RancherKubernetesEngineConfig,
|
||||
dialersOptions hosts.DialersOptions,
|
||||
flags cluster.ExternalFlags, snapshotName string) (string, string, string, string, map[string]pki.CertificatePKI, error) {
|
||||
flags cluster.ExternalFlags,
|
||||
data map[string]interface{},
|
||||
snapshotName string) (string, string, string, string, map[string]pki.CertificatePKI, error) {
|
||||
var APIURL, caCrt, clientCert, clientKey string
|
||||
log.Infof(ctx, "Restoring etcd snapshot %s", snapshotName)
|
||||
kubeCluster, err := cluster.InitClusterObject(ctx, rkeConfig, flags)
|
||||
@ -154,7 +156,7 @@ func RestoreEtcdSnapshot(
|
||||
if err := ClusterInit(ctx, rkeConfig, dialersOptions, flags); err != nil {
|
||||
return APIURL, caCrt, clientCert, clientKey, nil, err
|
||||
}
|
||||
APIURL, caCrt, clientCert, clientKey, certs, err := ClusterUp(ctx, dialersOptions, flags)
|
||||
APIURL, caCrt, clientCert, clientKey, certs, err := ClusterUp(ctx, dialersOptions, flags, data)
|
||||
if err != nil {
|
||||
if !strings.Contains(err.Error(), "Provisioning incomplete") {
|
||||
return APIURL, caCrt, clientCert, clientKey, nil, err
|
||||
@ -221,7 +223,7 @@ func RestoreEtcdSnapshotFromCli(ctx *cli.Context) error {
|
||||
// setting up the flags
|
||||
flags := cluster.GetExternalFlags(false, false, false, "", filePath)
|
||||
|
||||
_, _, _, _, _, err = RestoreEtcdSnapshot(context.Background(), rkeConfig, hosts.DialersOptions{}, flags, etcdSnapshotName)
|
||||
_, _, _, _, _, err = RestoreEtcdSnapshot(context.Background(), rkeConfig, hosts.DialersOptions{}, flags, map[string]interface{}{}, etcdSnapshotName)
|
||||
return err
|
||||
}
|
||||
|
||||
|
19
cmd/up.go
19
cmd/up.go
@ -75,7 +75,7 @@ func UpCommand() cli.Command {
|
||||
}
|
||||
}
|
||||
|
||||
func ClusterUp(ctx context.Context, dialersOptions hosts.DialersOptions, flags cluster.ExternalFlags) (string, string, string, string, map[string]pki.CertificatePKI, error) {
|
||||
func ClusterUp(ctx context.Context, dialersOptions hosts.DialersOptions, flags cluster.ExternalFlags, data map[string]interface{}) (string, string, string, string, map[string]pki.CertificatePKI, error) {
|
||||
var APIURL, caCrt, clientCert, clientKey string
|
||||
|
||||
clusterState, err := cluster.ReadStateFile(ctx, cluster.GetStateFilePath(flags.ClusterFilePath, flags.ConfigDir))
|
||||
@ -86,9 +86,10 @@ func ClusterUp(ctx context.Context, dialersOptions hosts.DialersOptions, flags c
|
||||
if err != nil {
|
||||
return APIURL, caCrt, clientCert, clientKey, nil, err
|
||||
}
|
||||
svcOptions, _ := data["k8s-service-options"].(*v3.KubernetesServicesOptions)
|
||||
// check if rotate certificates is triggered
|
||||
if kubeCluster.RancherKubernetesEngineConfig.RotateCertificates != nil {
|
||||
return rebuildClusterWithRotatedCertificates(ctx, dialersOptions, flags)
|
||||
return rebuildClusterWithRotatedCertificates(ctx, dialersOptions, flags, svcOptions)
|
||||
}
|
||||
|
||||
log.Infof(ctx, "Building Kubernetes cluster")
|
||||
@ -129,7 +130,7 @@ func ClusterUp(ctx context.Context, dialersOptions hosts.DialersOptions, flags c
|
||||
return APIURL, caCrt, clientCert, clientKey, nil, err
|
||||
}
|
||||
|
||||
err = cluster.ReconcileCluster(ctx, kubeCluster, currentCluster, flags)
|
||||
err = cluster.ReconcileCluster(ctx, kubeCluster, currentCluster, flags, svcOptions)
|
||||
if err != nil {
|
||||
return APIURL, caCrt, clientCert, clientKey, nil, err
|
||||
}
|
||||
@ -142,7 +143,7 @@ func ClusterUp(ctx context.Context, dialersOptions hosts.DialersOptions, flags c
|
||||
return APIURL, caCrt, clientCert, clientKey, nil, err
|
||||
}
|
||||
|
||||
err = kubeCluster.DeployControlPlane(ctx)
|
||||
err = kubeCluster.DeployControlPlane(ctx, svcOptions)
|
||||
if err != nil {
|
||||
return APIURL, caCrt, clientCert, clientKey, nil, err
|
||||
}
|
||||
@ -163,7 +164,7 @@ func ClusterUp(ctx context.Context, dialersOptions hosts.DialersOptions, flags c
|
||||
return APIURL, caCrt, clientCert, clientKey, nil, err
|
||||
}
|
||||
|
||||
err = kubeCluster.DeployWorkerPlane(ctx)
|
||||
err = kubeCluster.DeployWorkerPlane(ctx, svcOptions)
|
||||
if err != nil {
|
||||
return APIURL, caCrt, clientCert, clientKey, nil, err
|
||||
}
|
||||
@ -177,7 +178,7 @@ func ClusterUp(ctx context.Context, dialersOptions hosts.DialersOptions, flags c
|
||||
return APIURL, caCrt, clientCert, clientKey, nil, err
|
||||
}
|
||||
|
||||
err = cluster.ConfigureCluster(ctx, kubeCluster.RancherKubernetesEngineConfig, kubeCluster.Certificates, flags, dialersOptions, false)
|
||||
err = cluster.ConfigureCluster(ctx, kubeCluster.RancherKubernetesEngineConfig, kubeCluster.Certificates, flags, dialersOptions, data, false)
|
||||
if err != nil {
|
||||
return APIURL, caCrt, clientCert, clientKey, nil, err
|
||||
}
|
||||
@ -238,7 +239,7 @@ func clusterUpFromCli(ctx *cli.Context) error {
|
||||
return err
|
||||
}
|
||||
|
||||
_, _, _, _, _, err = ClusterUp(context.Background(), hosts.DialersOptions{}, flags)
|
||||
_, _, _, _, _, err = ClusterUp(context.Background(), hosts.DialersOptions{}, flags, map[string]interface{}{})
|
||||
return err
|
||||
}
|
||||
|
||||
@ -269,7 +270,7 @@ func clusterUpLocal(ctx *cli.Context) error {
|
||||
if err := ClusterInit(context.Background(), rkeConfig, dialers, flags); err != nil {
|
||||
return err
|
||||
}
|
||||
_, _, _, _, _, err = ClusterUp(context.Background(), dialers, flags)
|
||||
_, _, _, _, _, err = ClusterUp(context.Background(), dialers, flags, map[string]interface{}{})
|
||||
return err
|
||||
}
|
||||
|
||||
@ -297,7 +298,7 @@ func clusterUpDind(ctx *cli.Context) error {
|
||||
return err
|
||||
}
|
||||
// start cluster
|
||||
_, _, _, _, _, err = ClusterUp(context.Background(), dialers, flags)
|
||||
_, _, _, _, _, err = ClusterUp(context.Background(), dialers, flags, map[string]interface{}{})
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -5,6 +5,7 @@ import (
|
||||
"encoding/json"
|
||||
"text/template"
|
||||
|
||||
"github.com/rancher/norman/types/convert"
|
||||
"github.com/rancher/rke/metadata"
|
||||
|
||||
"github.com/rancher/rke/util"
|
||||
@ -19,8 +20,10 @@ func CompileTemplateFromMap(tmplt string, configMap interface{}) (string, error)
|
||||
return out.String(), nil
|
||||
}
|
||||
|
||||
func GetVersionedTemplates(templateName string, k8sVersion string) string {
|
||||
|
||||
func GetVersionedTemplates(templateName string, data map[string]interface{}, k8sVersion string) string {
|
||||
if template, ok := data[templateName]; ok {
|
||||
return convert.ToString(template)
|
||||
}
|
||||
versionedTemplate := metadata.K8sVersionToTemplates[templateName]
|
||||
if t, ok := versionedTemplate[util.GetTagMajorVersion(k8sVersion)]; ok {
|
||||
return t
|
||||
|
Loading…
Reference in New Issue
Block a user