mirror of
https://github.com/rancher/rke.git
synced 2025-05-07 15:57:06 +00:00
Add compatibility with k8s v1.22
This commit is contained in:
parent
b7ef427a9a
commit
546a61b24a
addons
cluster
services
templates
types
@ -6,26 +6,45 @@ import (
|
||||
|
||||
"k8s.io/client-go/transport"
|
||||
|
||||
"github.com/blang/semver"
|
||||
"github.com/rancher/rke/k8s"
|
||||
"github.com/rancher/rke/templates"
|
||||
"github.com/sirupsen/logrus"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
func GetAddonsExecuteJob(addonName, nodeName, image string) (string, error) {
|
||||
return getAddonJob(addonName, nodeName, image, false)
|
||||
func GetAddonsExecuteJob(addonName, nodeName, image, k8sVersion string) (string, error) {
|
||||
return getAddonJob(addonName, nodeName, image, k8sVersion, false)
|
||||
}
|
||||
|
||||
func GetAddonsDeleteJob(addonName, nodeName, image string) (string, error) {
|
||||
return getAddonJob(addonName, nodeName, image, true)
|
||||
func GetAddonsDeleteJob(addonName, nodeName, image, k8sVersion string) (string, error) {
|
||||
return getAddonJob(addonName, nodeName, image, k8sVersion, true)
|
||||
}
|
||||
|
||||
func getAddonJob(addonName, nodeName, image string, isDelete bool) (string, error) {
|
||||
func getAddonJob(addonName, nodeName, image, k8sVersion string, isDelete bool) (string, error) {
|
||||
OSLabel := "beta.kubernetes.io/os"
|
||||
toMatch, err := semver.Make(k8sVersion[1:])
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("Cluster version [%s] can not be parsed as semver: %v", k8sVersion, err)
|
||||
}
|
||||
|
||||
logrus.Debugf("Checking addon job OS label for cluster version [%s]", k8sVersion)
|
||||
// kubernetes.io/os should be used 1.22.0 and up
|
||||
OSLabelRange, err := semver.ParseRange(">=1.22.0-rancher0")
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("Failed to parse semver range for checking OS label for addon job: %v", err)
|
||||
}
|
||||
if OSLabelRange(toMatch) {
|
||||
logrus.Debugf("Cluster version [%s] needs to use new OS label", k8sVersion)
|
||||
OSLabel = "kubernetes.io/os"
|
||||
}
|
||||
|
||||
jobConfig := map[string]string{
|
||||
"AddonName": addonName,
|
||||
"NodeName": nodeName,
|
||||
"Image": image,
|
||||
"DeleteJob": strconv.FormatBool(isDelete),
|
||||
"OSLabel": OSLabel,
|
||||
}
|
||||
template, err := templates.CompileTemplateFromMap(templates.AddonJobTemplate, jobConfig)
|
||||
logrus.Tracef("template for [%s] is: [%s]", addonName, template)
|
||||
|
@ -14,10 +14,11 @@ const (
|
||||
FakeAddonName = "example-addon"
|
||||
FakeNodeName = "node1"
|
||||
FakeAddonImage = "example/example:latest"
|
||||
FakeK8sVersion = "v1.21.1-rancher1-1"
|
||||
)
|
||||
|
||||
func TestJobManifest(t *testing.T) {
|
||||
jobYaml, err := GetAddonsExecuteJob(FakeAddonName, FakeNodeName, FakeAddonImage)
|
||||
jobYaml, err := GetAddonsExecuteJob(FakeAddonName, FakeNodeName, FakeAddonImage, FakeK8sVersion)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to get addon execute job: %v", err)
|
||||
}
|
||||
|
@ -84,6 +84,7 @@ type ingressOptions struct {
|
||||
Tolerations []v1.Toleration
|
||||
NginxIngressControllerPriorityClassName string
|
||||
DefaultHTTPBackendPriorityClassName string
|
||||
DefaultIngressClass bool
|
||||
}
|
||||
|
||||
type MetricsServerOptions struct {
|
||||
@ -493,7 +494,7 @@ func (c *Cluster) doAddonDeploy(ctx context.Context, addonYaml, resourceName str
|
||||
if err != nil {
|
||||
return &addonError{fmt.Sprintf("Failed to get Node [%s]: %v", c.ControlPlaneHosts[0].HostnameOverride, err), isCritical}
|
||||
}
|
||||
addonJob, err := addons.GetAddonsExecuteJob(resourceName, node.Name, c.Services.KubeAPI.Image)
|
||||
addonJob, err := addons.GetAddonsExecuteJob(resourceName, node.Name, c.Services.KubeAPI.Image, c.Version)
|
||||
|
||||
if err != nil {
|
||||
return &addonError{fmt.Sprintf("Failed to generate addon execute job: %v", err), isCritical}
|
||||
@ -514,7 +515,7 @@ func (c *Cluster) doAddonDelete(ctx context.Context, resourceName string, isCrit
|
||||
if err != nil {
|
||||
return &addonError{fmt.Sprintf("Failed to get Node [%s]: %v", c.ControlPlaneHosts[0].HostnameOverride, err), isCritical}
|
||||
}
|
||||
deleteJob, err := addons.GetAddonsDeleteJob(resourceName, node.Name, c.Services.KubeAPI.Image)
|
||||
deleteJob, err := addons.GetAddonsDeleteJob(resourceName, node.Name, c.Services.KubeAPI.Image, c.Version)
|
||||
if err != nil {
|
||||
return &addonError{fmt.Sprintf("Failed to generate addon delete job: %v", err), isCritical}
|
||||
}
|
||||
@ -522,7 +523,7 @@ func (c *Cluster) doAddonDelete(ctx context.Context, resourceName string, isCrit
|
||||
return &addonError{fmt.Sprintf("%v", err), isCritical}
|
||||
}
|
||||
// At this point, the addon should be deleted. We need to clean up by deleting the deploy and delete jobs.
|
||||
tmpJobYaml, err := addons.GetAddonsExecuteJob(resourceName, node.Name, c.Services.KubeAPI.Image)
|
||||
tmpJobYaml, err := addons.GetAddonsExecuteJob(resourceName, node.Name, c.Services.KubeAPI.Image, c.Version)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -608,6 +609,7 @@ func (c *Cluster) deployIngress(ctx context.Context, data map[string]interface{}
|
||||
Tolerations: c.Ingress.Tolerations,
|
||||
NginxIngressControllerPriorityClassName: c.Ingress.NginxIngressControllerPriorityClassName,
|
||||
DefaultHTTPBackendPriorityClassName: c.Ingress.DefaultHTTPBackendPriorityClassName,
|
||||
DefaultIngressClass: *c.Ingress.DefaultIngressClass,
|
||||
}
|
||||
// since nginx ingress controller 0.16.0, it can be run as non-root and doesn't require privileged anymore.
|
||||
// So we can use securityContext instead of setting privileges via initContainer.
|
||||
|
@ -849,6 +849,10 @@ func (c *Cluster) setAddonsDefaults() {
|
||||
}
|
||||
c.Ingress.DefaultBackend = &defaultBackend
|
||||
}
|
||||
if c.Ingress.DefaultIngressClass == nil {
|
||||
defaultIngressClass := true
|
||||
c.Ingress.DefaultIngressClass = &defaultIngressClass
|
||||
}
|
||||
}
|
||||
|
||||
func setDaemonsetAddonDefaults(updateStrategy *v3.DaemonSetUpdateStrategy) *v3.DaemonSetUpdateStrategy {
|
||||
|
@ -34,10 +34,7 @@ const (
|
||||
KubeAPIPort = "6443"
|
||||
EtcdPort1 = "2379"
|
||||
EtcdPort2 = "2380"
|
||||
ScedulerPort = "10251"
|
||||
ControllerPort = "10252"
|
||||
KubeletPort = "10250"
|
||||
KubeProxyPort = "10256"
|
||||
FlannelVxLanPort = 8472
|
||||
|
||||
FlannelVxLanNetworkIdentify = 1
|
||||
|
@ -55,6 +55,7 @@ const (
|
||||
MaxK8s115Version = "v1.15"
|
||||
MaxEtcdPort4001Version = "v3.4.3-rancher99"
|
||||
MaxEtcdNoStrictTLSVersion = "v3.4.14-rancher99"
|
||||
MaxK8s121Version = "v1.21.99-rancher99"
|
||||
|
||||
EncryptionProviderConfigArgument = "encryption-provider-config"
|
||||
|
||||
@ -371,11 +372,26 @@ func (c *Cluster) BuildKubeControllerProcess(host *hosts.Host, serviceOptions v3
|
||||
cmd := fmt.Sprintf("--%s=%s", arg, value)
|
||||
Command = append(Command, cmd)
|
||||
}
|
||||
k8sTag, err := util.GetImageTagFromImage(c.SystemImages.Kubernetes)
|
||||
if err != nil {
|
||||
logrus.Warn(err)
|
||||
}
|
||||
k8sSemVer, err := util.StrToSemVer(k8sTag)
|
||||
if err != nil {
|
||||
logrus.Warn(err)
|
||||
}
|
||||
maxK8s121Version, err := util.StrToSemVer(MaxK8s121Version)
|
||||
if err != nil {
|
||||
logrus.Warn(err)
|
||||
}
|
||||
|
||||
Binds = append(Binds, c.Services.KubeController.ExtraBinds...)
|
||||
healthCheck := v3.HealthCheck{}
|
||||
|
||||
healthCheck := v3.HealthCheck{
|
||||
URL: services.GetHealthCheckURL(false, services.KubeControllerPort),
|
||||
if k8sSemVer.LessThan(*maxK8s121Version) {
|
||||
healthCheck.URL = services.GetHealthCheckURL(false, services.KubeControllerPortMaxV121)
|
||||
} else {
|
||||
healthCheck.URL = services.GetHealthCheckURL(true, services.KubeControllerPort)
|
||||
}
|
||||
|
||||
registryAuthConfig, _, _ := docker.GetImageRegistryConfig(c.Services.KubeController.Image, c.PrivateRegistriesMap)
|
||||
|
@ -42,11 +42,12 @@ const (
|
||||
LogLinkContainerName = "rke-log-linker"
|
||||
LogCleanerContainerName = "rke-log-cleaner"
|
||||
|
||||
KubeAPIPort = 6443
|
||||
SchedulerPort = 10251
|
||||
KubeControllerPort = 10252
|
||||
KubeletPort = 10248
|
||||
KubeproxyPort = 10256
|
||||
KubeAPIPort = 6443
|
||||
SchedulerPort = 10251
|
||||
KubeControllerPortMaxV121 = 10252
|
||||
KubeControllerPort = 10257
|
||||
KubeletPort = 10248
|
||||
KubeproxyPort = 10256
|
||||
|
||||
WorkerThreads = util.WorkerThreads
|
||||
|
||||
|
@ -4,6 +4,7 @@ const AddonJobTemplate = `
|
||||
{{- $addonName := .AddonName }}
|
||||
{{- $nodeName := .NodeName }}
|
||||
{{- $image := .Image }}
|
||||
{{- $OSLabel := .OSLabel }}
|
||||
apiVersion: batch/v1
|
||||
kind: Job
|
||||
metadata:
|
||||
@ -24,7 +25,7 @@ spec:
|
||||
requiredDuringSchedulingIgnoredDuringExecution:
|
||||
nodeSelectorTerms:
|
||||
- matchExpressions:
|
||||
- key: beta.kubernetes.io/os
|
||||
- key: {{$OSLabel}}
|
||||
operator: NotIn
|
||||
values:
|
||||
- windows
|
||||
|
@ -460,6 +460,8 @@ type IngressConfig struct {
|
||||
DefaultHTTPBackendPriorityClassName string `yaml:"default_http_backend_priority_class_name" json:"defaultHttpBackendPriorityClassName,omitempty"`
|
||||
// Priority class name for Nginx-Ingress's "nginx-ingress-controller" daemonset
|
||||
NginxIngressControllerPriorityClassName string `yaml:"nginx_ingress_controller_priority_class_name" json:"nginxIngressControllerPriorityClassName,omitempty"`
|
||||
// Enable or disable nginx default-http-backend
|
||||
DefaultIngressClass *bool `yaml:"default_ingress_class" json:"defaultIngressClass,omitempty" norman:"default=true"`
|
||||
}
|
||||
|
||||
type ExtraEnv struct {
|
||||
|
Loading…
Reference in New Issue
Block a user