mirror of
https://github.com/rancher/rke.git
synced 2025-09-16 23:20:56 +00:00
Add compatibility with k8s v1.22
This commit is contained in:
@@ -84,6 +84,7 @@ type ingressOptions struct {
|
||||
Tolerations []v1.Toleration
|
||||
NginxIngressControllerPriorityClassName string
|
||||
DefaultHTTPBackendPriorityClassName string
|
||||
DefaultIngressClass bool
|
||||
}
|
||||
|
||||
type MetricsServerOptions struct {
|
||||
@@ -493,7 +494,7 @@ func (c *Cluster) doAddonDeploy(ctx context.Context, addonYaml, resourceName str
|
||||
if err != nil {
|
||||
return &addonError{fmt.Sprintf("Failed to get Node [%s]: %v", c.ControlPlaneHosts[0].HostnameOverride, err), isCritical}
|
||||
}
|
||||
addonJob, err := addons.GetAddonsExecuteJob(resourceName, node.Name, c.Services.KubeAPI.Image)
|
||||
addonJob, err := addons.GetAddonsExecuteJob(resourceName, node.Name, c.Services.KubeAPI.Image, c.Version)
|
||||
|
||||
if err != nil {
|
||||
return &addonError{fmt.Sprintf("Failed to generate addon execute job: %v", err), isCritical}
|
||||
@@ -514,7 +515,7 @@ func (c *Cluster) doAddonDelete(ctx context.Context, resourceName string, isCrit
|
||||
if err != nil {
|
||||
return &addonError{fmt.Sprintf("Failed to get Node [%s]: %v", c.ControlPlaneHosts[0].HostnameOverride, err), isCritical}
|
||||
}
|
||||
deleteJob, err := addons.GetAddonsDeleteJob(resourceName, node.Name, c.Services.KubeAPI.Image)
|
||||
deleteJob, err := addons.GetAddonsDeleteJob(resourceName, node.Name, c.Services.KubeAPI.Image, c.Version)
|
||||
if err != nil {
|
||||
return &addonError{fmt.Sprintf("Failed to generate addon delete job: %v", err), isCritical}
|
||||
}
|
||||
@@ -522,7 +523,7 @@ func (c *Cluster) doAddonDelete(ctx context.Context, resourceName string, isCrit
|
||||
return &addonError{fmt.Sprintf("%v", err), isCritical}
|
||||
}
|
||||
// At this point, the addon should be deleted. We need to clean up by deleting the deploy and delete jobs.
|
||||
tmpJobYaml, err := addons.GetAddonsExecuteJob(resourceName, node.Name, c.Services.KubeAPI.Image)
|
||||
tmpJobYaml, err := addons.GetAddonsExecuteJob(resourceName, node.Name, c.Services.KubeAPI.Image, c.Version)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -608,6 +609,7 @@ func (c *Cluster) deployIngress(ctx context.Context, data map[string]interface{}
|
||||
Tolerations: c.Ingress.Tolerations,
|
||||
NginxIngressControllerPriorityClassName: c.Ingress.NginxIngressControllerPriorityClassName,
|
||||
DefaultHTTPBackendPriorityClassName: c.Ingress.DefaultHTTPBackendPriorityClassName,
|
||||
DefaultIngressClass: *c.Ingress.DefaultIngressClass,
|
||||
}
|
||||
// since nginx ingress controller 0.16.0, it can be run as non-root and doesn't require privileged anymore.
|
||||
// So we can use securityContext instead of setting privileges via initContainer.
|
||||
|
@@ -849,6 +849,10 @@ func (c *Cluster) setAddonsDefaults() {
|
||||
}
|
||||
c.Ingress.DefaultBackend = &defaultBackend
|
||||
}
|
||||
if c.Ingress.DefaultIngressClass == nil {
|
||||
defaultIngressClass := true
|
||||
c.Ingress.DefaultIngressClass = &defaultIngressClass
|
||||
}
|
||||
}
|
||||
|
||||
func setDaemonsetAddonDefaults(updateStrategy *v3.DaemonSetUpdateStrategy) *v3.DaemonSetUpdateStrategy {
|
||||
|
@@ -34,10 +34,7 @@ const (
|
||||
KubeAPIPort = "6443"
|
||||
EtcdPort1 = "2379"
|
||||
EtcdPort2 = "2380"
|
||||
ScedulerPort = "10251"
|
||||
ControllerPort = "10252"
|
||||
KubeletPort = "10250"
|
||||
KubeProxyPort = "10256"
|
||||
FlannelVxLanPort = 8472
|
||||
|
||||
FlannelVxLanNetworkIdentify = 1
|
||||
|
@@ -55,6 +55,7 @@ const (
|
||||
MaxK8s115Version = "v1.15"
|
||||
MaxEtcdPort4001Version = "v3.4.3-rancher99"
|
||||
MaxEtcdNoStrictTLSVersion = "v3.4.14-rancher99"
|
||||
MaxK8s121Version = "v1.21.99-rancher99"
|
||||
|
||||
EncryptionProviderConfigArgument = "encryption-provider-config"
|
||||
|
||||
@@ -371,11 +372,26 @@ func (c *Cluster) BuildKubeControllerProcess(host *hosts.Host, serviceOptions v3
|
||||
cmd := fmt.Sprintf("--%s=%s", arg, value)
|
||||
Command = append(Command, cmd)
|
||||
}
|
||||
k8sTag, err := util.GetImageTagFromImage(c.SystemImages.Kubernetes)
|
||||
if err != nil {
|
||||
logrus.Warn(err)
|
||||
}
|
||||
k8sSemVer, err := util.StrToSemVer(k8sTag)
|
||||
if err != nil {
|
||||
logrus.Warn(err)
|
||||
}
|
||||
maxK8s121Version, err := util.StrToSemVer(MaxK8s121Version)
|
||||
if err != nil {
|
||||
logrus.Warn(err)
|
||||
}
|
||||
|
||||
Binds = append(Binds, c.Services.KubeController.ExtraBinds...)
|
||||
healthCheck := v3.HealthCheck{}
|
||||
|
||||
healthCheck := v3.HealthCheck{
|
||||
URL: services.GetHealthCheckURL(false, services.KubeControllerPort),
|
||||
if k8sSemVer.LessThan(*maxK8s121Version) {
|
||||
healthCheck.URL = services.GetHealthCheckURL(false, services.KubeControllerPortMaxV121)
|
||||
} else {
|
||||
healthCheck.URL = services.GetHealthCheckURL(true, services.KubeControllerPort)
|
||||
}
|
||||
|
||||
registryAuthConfig, _, _ := docker.GetImageRegistryConfig(c.Services.KubeController.Image, c.PrivateRegistriesMap)
|
||||
|
Reference in New Issue
Block a user