From c191ed62023408e95d7c0ce348675268cfbe0e56 Mon Sep 17 00:00:00 2001 From: kinarashah Date: Tue, 28 May 2019 11:51:53 -0700 Subject: [PATCH] use k8s version info from kontainer-driver-metadata --- addons/coredns.go | 8 +- addons/ingress.go | 8 +- addons/kubedns.go | 7 +- addons/metrics.go | 7 +- cluster/defaults.go | 7 +- cluster/local.go | 3 +- cluster/network.go | 2 +- cluster/plan.go | 161 +- cluster/validation.go | 8 +- cmd/common.go | 4 + cmd/config.go | 23 +- cmd/up.go | 3 +- main.go | 3 +- metadata/metadata.go | 94 + templates/calico.go | 1836 ----------------- templates/canal.go | 1757 ---------------- templates/coredns.go | 290 --- templates/flannel.go | 439 ---- templates/kubedns.go | 314 --- templates/metrics.go | 150 -- templates/nginx-ingress.go | 324 --- templates/templates.go | 30 +- templates/weave.go | 240 --- .../coreos/go-semver/semver/semver.go | 2 + .../rke/k8s_rke_system_images.go | 58 +- 25 files changed, 231 insertions(+), 5547 deletions(-) create mode 100644 metadata/metadata.go delete mode 100644 templates/calico.go delete mode 100644 templates/canal.go delete mode 100644 templates/coredns.go delete mode 100644 templates/flannel.go delete mode 100644 templates/kubedns.go delete mode 100644 templates/metrics.go delete mode 100644 templates/nginx-ingress.go delete mode 100644 templates/weave.go diff --git a/addons/coredns.go b/addons/coredns.go index 5e844e5f..8138def9 100644 --- a/addons/coredns.go +++ b/addons/coredns.go @@ -1,8 +1,10 @@ package addons -import "github.com/rancher/rke/templates" +import ( + rkeData "github.com/rancher/kontainer-driver-metadata/rke/templates" + "github.com/rancher/rke/templates" +) func GetCoreDNSManifest(CoreDNSConfig interface{}) (string, error) { - - return templates.CompileTemplateFromMap(templates.CoreDNSTemplate, CoreDNSConfig) + return templates.CompileTemplateFromMap(templates.GetDefaultVersionedTemplate(rkeData.CoreDNS), CoreDNSConfig) } diff --git a/addons/ingress.go b/addons/ingress.go index 26410b66..e63cbbcb 100644 --- a/addons/ingress.go +++ b/addons/ingress.go @@ -1,8 +1,10 @@ package addons -import "github.com/rancher/rke/templates" +import ( + rkeData "github.com/rancher/kontainer-driver-metadata/rke/templates" + "github.com/rancher/rke/templates" +) func GetNginxIngressManifest(IngressConfig interface{}) (string, error) { - - return templates.CompileTemplateFromMap(templates.NginxIngressTemplate, IngressConfig) + return templates.CompileTemplateFromMap(templates.GetDefaultVersionedTemplate(rkeData.NginxIngress), IngressConfig) } diff --git a/addons/kubedns.go b/addons/kubedns.go index 8556e208..2d34528e 100644 --- a/addons/kubedns.go +++ b/addons/kubedns.go @@ -1,8 +1,11 @@ package addons -import "github.com/rancher/rke/templates" +import ( + rkeData "github.com/rancher/kontainer-driver-metadata/rke/templates" + "github.com/rancher/rke/templates" +) func GetKubeDNSManifest(KubeDNSConfig interface{}) (string, error) { - return templates.CompileTemplateFromMap(templates.KubeDNSTemplate, KubeDNSConfig) + return templates.CompileTemplateFromMap(templates.GetDefaultVersionedTemplate(rkeData.KubeDNS), KubeDNSConfig) } diff --git a/addons/metrics.go b/addons/metrics.go index 12d6e1d2..3c73d522 100644 --- a/addons/metrics.go +++ b/addons/metrics.go @@ -1,8 +1,11 @@ package addons -import "github.com/rancher/rke/templates" +import ( + rkeData "github.com/rancher/kontainer-driver-metadata/rke/templates" + "github.com/rancher/rke/templates" +) func GetMetricsServerManifest(MetricsServerConfig interface{}) (string, error) { - return templates.CompileTemplateFromMap(templates.MetricsServerTemplate, MetricsServerConfig) + return templates.CompileTemplateFromMap(templates.GetDefaultVersionedTemplate(rkeData.MetricsServer), MetricsServerConfig) } diff --git a/cluster/defaults.go b/cluster/defaults.go index 84efc030..bbebc313 100644 --- a/cluster/defaults.go +++ b/cluster/defaults.go @@ -3,6 +3,7 @@ package cluster import ( "context" "fmt" + "github.com/rancher/rke/metadata" "strings" "github.com/rancher/rke/cloudprovider" @@ -25,8 +26,6 @@ const ( DefaultClusterName = "local" DefaultClusterSSHKeyPath = "~/.ssh/id_rsa" - DefaultK8sVersion = v3.DefaultK8s - DefaultSSHPort = "22" DefaultDockerSockPath = "/var/run/docker.sock" @@ -137,7 +136,7 @@ func (c *Cluster) setClusterDefaults(ctx context.Context, flags ExternalFlags) e c.ClusterName = DefaultClusterName } if len(c.Version) == 0 { - c.Version = DefaultK8sVersion + c.Version = metadata.DefaultK8sVersion } if c.AddonJobTimeout == 0 { c.AddonJobTimeout = k8s.DefaultTimeout @@ -229,7 +228,7 @@ func (c *Cluster) setClusterServicesDefaults() { func (c *Cluster) setClusterImageDefaults() error { var privRegURL string - imageDefaults, ok := v3.AllK8sVersions[c.Version] + imageDefaults, ok := metadata.K8sVersionToRKESystemImages[c.Version] if !ok { return nil } diff --git a/cluster/local.go b/cluster/local.go index 51764061..88f83278 100644 --- a/cluster/local.go +++ b/cluster/local.go @@ -1,13 +1,14 @@ package cluster import ( + "github.com/rancher/rke/metadata" "github.com/rancher/rke/services" "github.com/rancher/types/apis/management.cattle.io/v3" ) func GetLocalRKEConfig() *v3.RancherKubernetesEngineConfig { rkeLocalNode := GetLocalRKENodeConfig() - imageDefaults := v3.K8sVersionToRKESystemImages[DefaultK8sVersion] + imageDefaults := metadata.K8sVersionToRKESystemImages[metadata.DefaultK8sVersion] rkeServices := v3.RKEConfigServices{ Kubelet: v3.KubeletService{ diff --git a/cluster/network.go b/cluster/network.go index 2f40c166..ca9c0d16 100644 --- a/cluster/network.go +++ b/cluster/network.go @@ -251,7 +251,7 @@ func (c *Cluster) getNetworkPluginManifest(pluginConfig map[string]interface{}) case CanalNetworkPlugin: return templates.CompileTemplateFromMap(templates.GetVersionedTemplates(CanalNetworkPlugin, c.Version), pluginConfig) case WeaveNetworkPlugin: - return templates.CompileTemplateFromMap(templates.WeaveTemplate, pluginConfig) + return templates.CompileTemplateFromMap(templates.GetVersionedTemplates(WeaveNetworkPlugin, c.Version), pluginConfig) default: return "", fmt.Errorf("[network] Unsupported network plugin: %s", c.Network.Plugin) } diff --git a/cluster/plan.go b/cluster/plan.go index 3c0ca202..10f3e5a3 100644 --- a/cluster/plan.go +++ b/cluster/plan.go @@ -15,6 +15,7 @@ import ( "github.com/rancher/rke/docker" "github.com/rancher/rke/hosts" "github.com/rancher/rke/k8s" + "github.com/rancher/rke/metadata" "github.com/rancher/rke/pki" "github.com/rancher/rke/services" "github.com/rancher/rke/util" @@ -120,46 +121,26 @@ func (c *Cluster) BuildKubeAPIProcess(host *hosts.Host, prefixPath string) v3.Pr c.getRKEToolsEntryPoint(), "kube-apiserver", } - baseEnabledAdmissionPlugins := []string{ - "DefaultStorageClass", - "DefaultTolerationSeconds", - "LimitRanger", - "NamespaceLifecycle", - "NodeRestriction", - "ResourceQuota", - "ServiceAccount", - } + CommandArgs := map[string]string{ - "allow-privileged": "true", - "anonymous-auth": "false", - "bind-address": "0.0.0.0", - "client-ca-file": pki.GetCertPath(pki.CACertName), - "cloud-provider": c.CloudProvider.Name, - "etcd-cafile": etcdCAClientCert, - "etcd-certfile": etcdClientCert, - "etcd-keyfile": etcdClientKey, - "etcd-prefix": etcdPathPrefix, - "etcd-servers": etcdConnectionString, - "insecure-port": "0", - "kubelet-client-certificate": pki.GetCertPath(pki.KubeAPICertName), - "kubelet-client-key": pki.GetKeyPath(pki.KubeAPICertName), - "kubelet-preferred-address-types": "InternalIP,ExternalIP,Hostname", - "profiling": "false", - "proxy-client-cert-file": pki.GetCertPath(pki.APIProxyClientCertName), - "proxy-client-key-file": pki.GetKeyPath(pki.APIProxyClientCertName), - "requestheader-allowed-names": pki.APIProxyClientCertName, - "requestheader-client-ca-file": pki.GetCertPath(pki.RequestHeaderCACertName), - "requestheader-extra-headers-prefix": "X-Remote-Extra-", - "requestheader-group-headers": "X-Remote-Group", - "requestheader-username-headers": "X-Remote-User", - "secure-port": "6443", - "service-account-key-file": pki.GetKeyPath(pki.ServiceAccountTokenKeyName), - "service-account-lookup": "true", - "service-cluster-ip-range": c.Services.KubeAPI.ServiceClusterIPRange, - "service-node-port-range": c.Services.KubeAPI.ServiceNodePortRange, - "storage-backend": "etcd3", - "tls-cert-file": pki.GetCertPath(pki.KubeAPICertName), - "tls-private-key-file": pki.GetKeyPath(pki.KubeAPICertName), + "client-ca-file": pki.GetCertPath(pki.CACertName), + "cloud-provider": c.CloudProvider.Name, + "etcd-cafile": etcdCAClientCert, + "etcd-certfile": etcdClientCert, + "etcd-keyfile": etcdClientKey, + "etcd-prefix": etcdPathPrefix, + "etcd-servers": etcdConnectionString, + "kubelet-client-certificate": pki.GetCertPath(pki.KubeAPICertName), + "kubelet-client-key": pki.GetKeyPath(pki.KubeAPICertName), + "proxy-client-cert-file": pki.GetCertPath(pki.APIProxyClientCertName), + "proxy-client-key-file": pki.GetKeyPath(pki.APIProxyClientCertName), + "requestheader-allowed-names": pki.APIProxyClientCertName, + "requestheader-client-ca-file": pki.GetCertPath(pki.RequestHeaderCACertName), + "service-account-key-file": pki.GetKeyPath(pki.ServiceAccountTokenKeyName), + "service-cluster-ip-range": c.Services.KubeAPI.ServiceClusterIPRange, + "service-node-port-range": c.Services.KubeAPI.ServiceNodePortRange, + "tls-cert-file": pki.GetCertPath(pki.KubeAPICertName), + "tls-private-key-file": pki.GetKeyPath(pki.KubeAPICertName), } if len(c.CloudProvider.Name) > 0 { CommandArgs["cloud-config"] = cloudConfigFileName @@ -198,37 +179,15 @@ func (c *Cluster) BuildKubeAPIProcess(host *hosts.Host, prefixPath string) v3.Pr CommandArgs["advertise-address"] = host.InternalAddress } - // PodSecurityPolicy - if c.Services.KubeAPI.PodSecurityPolicy { - CommandArgs["runtime-config"] = "extensions/v1beta1/podsecuritypolicy=true" - baseEnabledAdmissionPlugins = append(baseEnabledAdmissionPlugins, "PodSecurityPolicy") - } - - // AlwaysPullImages - if c.Services.KubeAPI.AlwaysPullImages { - baseEnabledAdmissionPlugins = append(baseEnabledAdmissionPlugins, "AlwaysPullImages") - } - - // Admission control plugins - // Resolution order: - // k8s_defaults.go K8sVersionServiceOptions - // enabledAdmissionPlugins - // cluster.yml extra_args overwrites it all - for _, optionName := range admissionControlOptionNames { - if _, ok := CommandArgs[optionName]; ok { - enabledAdmissionPlugins := strings.Split(CommandArgs[optionName], ",") - enabledAdmissionPlugins = append(enabledAdmissionPlugins, baseEnabledAdmissionPlugins...) - - // Join unique slice as arg - CommandArgs[optionName] = strings.Join(util.UniqueStringSlice(enabledAdmissionPlugins), ",") - break - } - } if c.Services.KubeAPI.PodSecurityPolicy { CommandArgs["runtime-config"] = "extensions/v1beta1/podsecuritypolicy=true" for _, optionName := range admissionControlOptionNames { if _, ok := CommandArgs[optionName]; ok { - CommandArgs[optionName] = CommandArgs[optionName] + ",PodSecurityPolicy" + if c.Services.KubeAPI.AlwaysPullImages { + CommandArgs[optionName] = CommandArgs[optionName] + ",PodSecurityPolicy,AlwaysPullImages" + } else { + CommandArgs[optionName] = CommandArgs[optionName] + ",PodSecurityPolicy" + } break } } @@ -284,23 +243,12 @@ func (c *Cluster) BuildKubeControllerProcess(prefixPath string) v3.Process { } CommandArgs := map[string]string{ - "address": "0.0.0.0", - "allow-untagged-cloud": "true", - "allocate-node-cidrs": "true", "cloud-provider": c.CloudProvider.Name, "cluster-cidr": c.ClusterCIDR, - "configure-cloud-routes": "false", - "enable-hostpath-provisioner": "false", "kubeconfig": pki.GetConfigPath(pki.KubeControllerCertName), - "leader-elect": "true", - "node-monitor-grace-period": "40s", - "pod-eviction-timeout": "5m0s", - "profiling": "false", "root-ca-file": pki.GetCertPath(pki.CACertName), "service-account-private-key-file": pki.GetKeyPath(pki.ServiceAccountTokenKeyName), "service-cluster-ip-range": c.Services.KubeController.ServiceClusterIPRange, - "terminated-pod-gc-threshold": "1000", - "v": "2", } // Best security practice is to listen on localhost, but DinD uses private container network instead of Host. if c.DinD { @@ -382,30 +330,15 @@ func (c *Cluster) BuildKubeletProcess(host *hosts.Host, prefixPath string) v3.Pr } CommandArgs := map[string]string{ - "address": "0.0.0.0", - "anonymous-auth": "false", - "authentication-token-webhook": "true", - "cgroups-per-qos": "True", - "client-ca-file": pki.GetCertPath(pki.CACertName), - "cloud-provider": c.CloudProvider.Name, - "cluster-dns": c.ClusterDNSServer, - "cluster-domain": c.ClusterDomain, - "cni-bin-dir": "/opt/cni/bin", - "cni-conf-dir": "/etc/cni/net.d", - "enforce-node-allocatable": "", - "event-qps": "0", - "fail-swap-on": strconv.FormatBool(c.Services.Kubelet.FailSwapOn), - "hostname-override": host.HostnameOverride, - "kubeconfig": pki.GetConfigPath(pki.KubeNodeCertName), - "make-iptables-util-chains": "true", - "network-plugin": "cni", - "pod-infra-container-image": c.Services.Kubelet.InfraContainerImage, - "read-only-port": "0", - "resolv-conf": "/etc/resolv.conf", - "root-dir": path.Join(prefixPath, "/var/lib/kubelet"), - "streaming-connection-idle-timeout": "30m", - "volume-plugin-dir": "/var/lib/kubelet/volumeplugins", - "v": "2", + "client-ca-file": pki.GetCertPath(pki.CACertName), + "cloud-provider": c.CloudProvider.Name, + "cluster-dns": c.ClusterDNSServer, + "cluster-domain": c.ClusterDomain, + "fail-swap-on": strconv.FormatBool(c.Services.Kubelet.FailSwapOn), + "hostname-override": host.HostnameOverride, + "kubeconfig": pki.GetConfigPath(pki.KubeNodeCertName), + "pod-infra-container-image": c.Services.Kubelet.InfraContainerImage, + "root-dir": path.Join(prefixPath, "/var/lib/kubelet"), } if host.IsControl && !host.IsWorker { CommandArgs["register-with-taints"] = unschedulableControlTaint @@ -522,16 +455,11 @@ func (c *Cluster) BuildKubeProxyProcess(host *hosts.Host, prefixPath string) v3. } CommandArgs := map[string]string{ - "cluster-cidr": c.ClusterCIDR, - "v": "2", - "healthz-bind-address": "127.0.0.1", - "hostname-override": host.HostnameOverride, - "kubeconfig": pki.GetConfigPath(pki.KubeProxyCertName), - } - // Best security practice is to listen on localhost, but DinD uses private container network instead of Host. - if c.DinD { - CommandArgs["healthz-bind-address"] = "0.0.0.0" + "cluster-cidr": c.ClusterCIDR, + "hostname-override": host.HostnameOverride, + "kubeconfig": pki.GetConfigPath(pki.KubeProxyCertName), } + // check if our version has specific options for this component serviceOptions := c.GetKubernetesServicesOptions() if serviceOptions.Kubeproxy != nil { @@ -545,6 +473,11 @@ func (c *Cluster) BuildKubeProxyProcess(host *hosts.Host, prefixPath string) v3. } } + // Best security practice is to listen on localhost, but DinD uses private container network instead of Host. + if c.DinD { + CommandArgs["healthz-bind-address"] = "0.0.0.0" + } + VolumesFrom := []string{ services.SidekickContainerName, } @@ -624,11 +557,7 @@ func (c *Cluster) BuildSchedulerProcess(prefixPath string) v3.Process { } CommandArgs := map[string]string{ - "leader-elect": "true", - "v": "2", - "address": "0.0.0.0", - "profiling": "false", - "kubeconfig": pki.GetConfigPath(pki.KubeSchedulerCertName), + "kubeconfig": pki.GetConfigPath(pki.KubeSchedulerCertName), } // Best security practice is to listen on localhost, but DinD uses private container network instead of Host. @@ -860,7 +789,7 @@ func (c *Cluster) GetKubernetesServicesOptions() v3.KubernetesServicesOptions { clusterMajorVersion = k8sImageMajorVersion } - serviceOptions, ok := v3.K8sVersionServiceOptions[clusterMajorVersion] + serviceOptions, ok := metadata.K8sVersionToServiceOptions[clusterMajorVersion] if ok { return serviceOptions } diff --git a/cluster/validation.go b/cluster/validation.go index 439624a5..0dadc9f8 100644 --- a/cluster/validation.go +++ b/cluster/validation.go @@ -3,13 +3,13 @@ package cluster import ( "context" "fmt" + "github.com/rancher/rke/metadata" "strings" "github.com/rancher/rke/log" "github.com/rancher/rke/pki" "github.com/rancher/rke/services" "github.com/rancher/rke/util" - v3 "github.com/rancher/types/apis/management.cattle.io/v3" "k8s.io/apimachinery/pkg/util/validation" ) @@ -205,7 +205,7 @@ func validateVersion(ctx context.Context, c *Cluster) error { if err != nil { return fmt.Errorf("%s is not valid semver", c.Version) } - _, ok := v3.AllK8sVersions[c.Version] + _, ok := metadata.K8sVersionToRKESystemImages[c.Version] if !ok { if err := validateSystemImages(c); err != nil { return fmt.Errorf("%s is an unsupported Kubernetes version and system images are not populated: %v", c.Version, err) @@ -213,9 +213,9 @@ func validateVersion(ctx context.Context, c *Cluster) error { return nil } - if _, ok := v3.K8sBadVersions[c.Version]; ok { + if _, ok := metadata.K8sBadVersions[c.Version]; ok { log.Warnf(ctx, "%s version exists but its recommended to install this version - see 'rke config --system-images --all' for versions supported with this release", c.Version) - return nil + return fmt.Errorf("%s is an unsupported Kubernetes version and system images are not populated: %v", c.Version, err) } return nil diff --git a/cmd/common.go b/cmd/common.go index 6fbc5cb8..e1037b94 100644 --- a/cmd/common.go +++ b/cmd/common.go @@ -3,6 +3,7 @@ package cmd import ( "context" "fmt" + "github.com/rancher/rke/metadata" "io/ioutil" "os" "path/filepath" @@ -74,6 +75,9 @@ func ClusterInit(ctx context.Context, rkeConfig *v3.RancherKubernetesEngineConfi if len(flags.CertificateDir) == 0 { flags.CertificateDir = cluster.GetCertificateDirPath(flags.ClusterFilePath, flags.ConfigDir) } + if err := metadata.InitMetadata(ctx); err != nil { + return err + } rkeFullState, _ := cluster.ReadStateFile(ctx, stateFilePath) kubeCluster, err := cluster.InitClusterObject(ctx, rkeConfig, flags) diff --git a/cmd/config.go b/cmd/config.go index c52d4b3b..dfa3978a 100644 --- a/cmd/config.go +++ b/cmd/config.go @@ -2,7 +2,9 @@ package cmd import ( "bufio" + "context" "fmt" + "github.com/rancher/rke/metadata" "io/ioutil" "os" "reflect" @@ -95,6 +97,9 @@ func writeConfig(cluster *v3.RancherKubernetesEngineConfig, configFile string, p func clusterConfig(ctx *cli.Context) error { if ctx.Bool("system-images") { + if metadata.K8sVersionToRKESystemImages == nil { + metadata.InitMetadata(context.Background()) + } return generateSystemImagesList(ctx.String("version"), ctx.Bool("all")) } configFile := ctx.String("name") @@ -270,14 +275,14 @@ func getHostConfig(reader *bufio.Reader, index int, clusterSSHKeyPath string) (* } func getSystemImagesConfig(reader *bufio.Reader) (*v3.RKESystemImages, error) { - imageDefaults := v3.K8sVersionToRKESystemImages[cluster.DefaultK8sVersion] + imageDefaults := metadata.K8sVersionToRKESystemImages[metadata.DefaultK8sVersion] kubeImage, err := getConfig(reader, "Kubernetes Docker image", imageDefaults.Kubernetes) if err != nil { return nil, err } - systemImages, ok := v3.K8sVersionToRKESystemImages[kubeImage] + systemImages, ok := metadata.K8sVersionToRKESystemImages[kubeImage] if ok { return &systemImages, nil } @@ -403,10 +408,10 @@ func getAddonManifests(reader *bufio.Reader) ([]string, error) { func generateSystemImagesList(version string, all bool) error { allVersions := []string{} currentVersionImages := make(map[string]v3.RKESystemImages) - for _, version := range v3.K8sVersionsCurrent { - if _, ok := v3.K8sBadVersions[version]; !ok { + for _, version := range metadata.K8sVersionsCurrent { + if _, ok := metadata.K8sBadVersions[version]; !ok { allVersions = append(allVersions, version) - currentVersionImages[version] = v3.AllK8sVersions[version] + currentVersionImages[version] = metadata.K8sVersionToRKESystemImages[version] } } if all { @@ -423,11 +428,11 @@ func generateSystemImagesList(version string, all bool) error { return nil } if len(version) == 0 { - version = v3.DefaultK8s + version = metadata.DefaultK8sVersion } - rkeSystemImages := v3.AllK8sVersions[version] - if _, ok := v3.K8sBadVersions[version]; ok { - return fmt.Errorf("k8s version is not recommended, supported versions are: %v", allVersions) + rkeSystemImages := metadata.K8sVersionToRKESystemImages[version] + if _, ok := metadata.K8sBadVersions[version]; ok { + return fmt.Errorf("k8s version is not supported, supported versions are: %v", allVersions) } if rkeSystemImages == (v3.RKESystemImages{}) { return fmt.Errorf("k8s version is not supported, supported versions are: %v", allVersions) diff --git a/cmd/up.go b/cmd/up.go index 8ba9fcae..b8cf7501 100644 --- a/cmd/up.go +++ b/cmd/up.go @@ -11,7 +11,7 @@ import ( "github.com/rancher/rke/hosts" "github.com/rancher/rke/log" "github.com/rancher/rke/pki" - v3 "github.com/rancher/types/apis/management.cattle.io/v3" + "github.com/rancher/types/apis/management.cattle.io/v3" "github.com/urfave/cli" "k8s.io/client-go/util/cert" ) @@ -82,7 +82,6 @@ func ClusterUp(ctx context.Context, dialersOptions hosts.DialersOptions, flags c if err != nil { return APIURL, caCrt, clientCert, clientKey, nil, err } - kubeCluster, err := cluster.InitClusterObject(ctx, clusterState.DesiredState.RancherKubernetesEngineConfig.DeepCopy(), flags) if err != nil { return APIURL, caCrt, clientCert, clientKey, nil, err diff --git a/main.go b/main.go index edf7a7ec..460b1dea 100644 --- a/main.go +++ b/main.go @@ -1,6 +1,7 @@ package main import ( + "github.com/rancher/rke/metadata" "os" "regexp" @@ -30,8 +31,8 @@ func mainErr() error { if ctx.GlobalBool("debug") { logrus.SetLevel(logrus.DebugLevel) } - logrus.Debugf("RKE version %s", app.Version) if released.MatchString(app.Version) { + metadata.RKEVersion = app.Version return nil } logrus.Warnf("This is not an officially supported version (%s) of RKE. Please download the latest official release at https://github.com/rancher/rke/releases/latest", app.Version) diff --git a/metadata/metadata.go b/metadata/metadata.go new file mode 100644 index 00000000..4be73e4f --- /dev/null +++ b/metadata/metadata.go @@ -0,0 +1,94 @@ +package metadata + +import ( + "context" + "github.com/sirupsen/logrus" + "strings" + + mVersion "github.com/mcuadros/go-version" + "github.com/rancher/kontainer-driver-metadata/rke" + "github.com/rancher/types/apis/management.cattle.io/v3" +) + +var ( + RKEVersion string + DefaultK8sVersion string + K8sVersionToTemplates map[string]map[string]string + K8sVersionToRKESystemImages map[string]v3.RKESystemImages + K8sVersionToServiceOptions map[string]v3.KubernetesServicesOptions + K8sVersionsCurrent []string + K8sBadVersions = map[string]bool{} +) + +func InitMetadata(ctx context.Context) error { + logrus.Infof("calling init") + initK8sRKESystemImages() + initAddonTemplates() + initServiceOptions() + return nil +} + +const RKEVersionDev = "0.2.3" + +func initAddonTemplates() { + K8sVersionToTemplates = rke.DriverData.K8sVersionedTemplates +} + +func initServiceOptions() { + K8sVersionToServiceOptions = interface{}(rke.DriverData.K8sVersionServiceOptions).(map[string]v3.KubernetesServicesOptions) +} + +func initK8sRKESystemImages() { + K8sVersionToRKESystemImages = map[string]v3.RKESystemImages{} + rkeData := rke.DriverData + // non released versions + if RKEVersion == "" { + RKEVersion = RKEVersionDev + } + DefaultK8sVersion = rkeData.RKEDefaultK8sVersions["default"] + if defaultK8sVersion, ok := rkeData.RKEDefaultK8sVersions[RKEVersion]; ok { + DefaultK8sVersion = defaultK8sVersion + } + maxVersionForMajorK8sVersion := map[string]string{} + for k8sVersion, systemImages := range rkeData.K8sVersionRKESystemImages { + rkeVersionInfo, ok := rkeData.K8sVersionInfo[k8sVersion] + if ok { + // RKEVersion = 0.2.4, DeprecateRKEVersion = 0.2.2 + if rkeVersionInfo.DeprecateRKEVersion != "" && mVersion.Compare(RKEVersion, rkeVersionInfo.DeprecateRKEVersion, ">=") { + K8sBadVersions[k8sVersion] = true + continue + } + // RKEVersion = 0.2.4, MinVersion = 0.2.5, don't store + lowerThanMin := rkeVersionInfo.MinRKEVersion != "" && mVersion.Compare(RKEVersion, rkeVersionInfo.MinRKEVersion, "<") + if lowerThanMin { + continue + } + } + // store all for upgrades + K8sVersionToRKESystemImages[k8sVersion] = interface{}(systemImages).(v3.RKESystemImages) + + majorVersion := getTagMajorVersion(k8sVersion) + maxVersionInfo, ok := rkeData.K8sVersionInfo[majorVersion] + if ok { + // RKEVersion = 0.2.4, MaxVersion = 0.2.3, don't use in current + greaterThanMax := maxVersionInfo.MaxRKEVersion != "" && mVersion.Compare(RKEVersion, maxVersionInfo.MaxRKEVersion, ">") + if greaterThanMax { + continue + } + } + if curr, ok := maxVersionForMajorK8sVersion[majorVersion]; !ok || k8sVersion > curr { + maxVersionForMajorK8sVersion[majorVersion] = k8sVersion + } + } + for _, k8sVersion := range maxVersionForMajorK8sVersion { + K8sVersionsCurrent = append(K8sVersionsCurrent, k8sVersion) + } +} + +func getTagMajorVersion(tag string) string { + splitTag := strings.Split(tag, ".") + if len(splitTag) < 2 { + return "" + } + return strings.Join(splitTag[:2], ".") +} diff --git a/templates/calico.go b/templates/calico.go deleted file mode 100644 index 5313c630..00000000 --- a/templates/calico.go +++ /dev/null @@ -1,1836 +0,0 @@ -package templates - -const CalicoTemplateV112 = ` -{{if eq .RBACConfig "rbac"}} -## start rbac here - -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1beta1 -metadata: - name: calico-node -rules: - - apiGroups: [""] - resources: - - namespaces - verbs: - - get - - list - - watch - - apiGroups: [""] - resources: - - pods/status - verbs: - - update - - apiGroups: [""] - resources: - - pods - verbs: - - get - - list - - watch - - patch - - apiGroups: [""] - resources: - - services - verbs: - - get - - apiGroups: [""] - resources: - - endpoints - verbs: - - get - - apiGroups: [""] - resources: - - nodes - verbs: - - get - - list - - update - - watch - - apiGroups: ["extensions"] - resources: - - networkpolicies - verbs: - - get - - list - - watch - - apiGroups: ["networking.k8s.io"] - resources: - - networkpolicies - verbs: - - watch - - list - - apiGroups: ["crd.projectcalico.org"] - resources: - - globalfelixconfigs - - felixconfigurations - - bgppeers - - globalbgpconfigs - - bgpconfigurations - - ippools - - globalnetworkpolicies - - globalnetworksets - - networkpolicies - - clusterinformations - - hostendpoints - verbs: - - create - - get - - list - - update - - watch ---- -apiVersion: rbac.authorization.k8s.io/v1beta1 -kind: ClusterRoleBinding -metadata: - name: calico-node -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: calico-node -subjects: -- kind: ServiceAccount - name: calico-node - namespace: kube-system -- apiGroup: rbac.authorization.k8s.io - kind: Group - name: system:nodes -{{end}} -## end rbac here - ---- -kind: ConfigMap -apiVersion: v1 -metadata: - name: calico-config - namespace: kube-system -data: - # To enable Typha, set this to "calico-typha" *and* set a non-zero value for Typha replicas - # below. We recommend using Typha if you have more than 50 nodes. Above 100 nodes it is - # essential. - typha_service_name: "none" - # The CNI network configuration to install on each node. - cni_network_config: |- - { - "name": "k8s-pod-network", - "cniVersion": "0.3.0", - "plugins": [ - { - "type": "calico", - "log_level": "WARNING", - "datastore_type": "kubernetes", - "nodename": "__KUBERNETES_NODE_NAME__", - "mtu": 1500, - "ipam": { - "type": "host-local", - "subnet": "usePodCidr" - }, - "policy": { - "type": "k8s", - "k8s_auth_token": "__SERVICEACCOUNT_TOKEN__" - }, - "kubernetes": { - "k8s_api_root": "https://__KUBERNETES_SERVICE_HOST__:__KUBERNETES_SERVICE_PORT__", - "kubeconfig": "{{.KubeCfg}}" - } - }, - { - "type": "portmap", - "snat": true, - "capabilities": {"portMappings": true} - } - ] - } - ---- - -# This manifest installs the calico/node container, as well -# as the Calico CNI plugins and network config on -# each master and worker node in a Kubernetes cluster. -kind: DaemonSet -apiVersion: extensions/v1beta1 -metadata: - name: calico-node - namespace: kube-system - labels: - k8s-app: calico-node -spec: - selector: - matchLabels: - k8s-app: calico-node - updateStrategy: - type: RollingUpdate - rollingUpdate: - maxUnavailable: 1 - template: - metadata: - labels: - k8s-app: calico-node - annotations: - # This, along with the CriticalAddonsOnly toleration below, - # marks the pod as a critical add-on, ensuring it gets - # priority scheduling and that its resources are reserved - # if it ever gets evicted. - scheduler.alpha.kubernetes.io/critical-pod: '' - spec: - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: beta.kubernetes.io/os - operator: NotIn - values: - - windows - hostNetwork: true - tolerations: - # Make sure calico/node gets scheduled on all nodes. - - effect: NoSchedule - operator: Exists - # Mark the pod as a critical add-on for rescheduling. - - key: CriticalAddonsOnly - operator: Exists - - effect: NoExecute - operator: Exists - - key: "node-role.kubernetes.io/controlplane" - operator: "Exists" - effect: "NoSchedule" - - key: "node-role.kubernetes.io/etcd" - operator: "Exists" - effect: "NoExecute" - serviceAccountName: calico-node - terminationGracePeriodSeconds: 0 - containers: - # Runs calico/node container on each Kubernetes node. This - # container programs network policy and routes on each - # host. - - name: calico-node - image: {{.NodeImage}} - env: - # Use Kubernetes API as the backing datastore. - - name: DATASTORE_TYPE - value: "kubernetes" - # Disable felix logging to file - - name: FELIX_LOGFILEPATH - value: "none" - # Disable felix logging for syslog - - name: FELIX_LOGSEVERITYSYS - value: "" - # Enable felix logging to stdout - - name: FELIX_LOGSEVERITYSCREEN - value: "Warning" - # Cluster type to identify the deployment type - - name: CLUSTER_TYPE - value: "k8s,bgp" - # Disable file logging so kubectl logs works. - - name: CALICO_DISABLE_FILE_LOGGING - value: "true" - # Set Felix endpoint to host default action to ACCEPT. - - name: FELIX_DEFAULTENDPOINTTOHOSTACTION - value: "ACCEPT" - # Disable IPV6 on Kubernetes. - - name: FELIX_IPV6SUPPORT - value: "false" - # Set MTU for tunnel device used if ipip is enabled - - name: FELIX_IPINIPMTU - value: "1440" - # Wait for the datastore. - - name: WAIT_FOR_DATASTORE - value: "true" - # The default IPv4 pool to create on startup if none exists. Pod IPs will be - # chosen from this range. Changing this value after installation will have - # no effect. This should fall within --cluster-cidr. - - name: CALICO_IPV4POOL_CIDR - value: "{{.ClusterCIDR}}" - # Enable IPIP - - name: CALICO_IPV4POOL_IPIP - value: "Always" - # Enable IP-in-IP within Felix. - - name: FELIX_IPINIPENABLED - value: "true" - # Typha support: controlled by the ConfigMap. - - name: FELIX_TYPHAK8SSERVICENAME - valueFrom: - configMapKeyRef: - name: calico-config - key: typha_service_name - # Set based on the k8s node name. - - name: NODENAME - valueFrom: - fieldRef: - fieldPath: spec.nodeName - # Auto-detect the BGP IP address. - - name: IP - value: "autodetect" - - name: FELIX_HEALTHENABLED - value: "true" - securityContext: - privileged: true - resources: - requests: - cpu: 250m - livenessProbe: - httpGet: - path: /liveness - port: 9099 - periodSeconds: 10 - initialDelaySeconds: 10 - failureThreshold: 6 - readinessProbe: - httpGet: - path: /readiness - port: 9099 - periodSeconds: 10 - volumeMounts: - - mountPath: /lib/modules - name: lib-modules - readOnly: true - - mountPath: /var/run/calico - name: var-run-calico - readOnly: false - - mountPath: /var/lib/calico - name: var-lib-calico - readOnly: false - # This container installs the Calico CNI binaries - # and CNI network config file on each node. - - name: install-cni - image: {{.CNIImage}} - command: ["/install-cni.sh"] - env: - # Name of the CNI config file to create. - - name: CNI_CONF_NAME - value: "10-calico.conflist" - # The CNI network config to install on each node. - - name: CNI_NETWORK_CONFIG - valueFrom: - configMapKeyRef: - name: calico-config - key: cni_network_config - # Set the hostname based on the k8s node name. - - name: KUBERNETES_NODE_NAME - valueFrom: - fieldRef: - fieldPath: spec.nodeName - volumeMounts: - - mountPath: /host/opt/cni/bin - name: cni-bin-dir - - mountPath: /host/etc/cni/net.d - name: cni-net-dir - volumes: - # Used by calico/node. - - name: lib-modules - hostPath: - path: /lib/modules - - name: var-run-calico - hostPath: - path: /var/run/calico - - name: var-lib-calico - hostPath: - path: /var/lib/calico - # Used to install CNI. - - name: cni-bin-dir - hostPath: - path: /opt/cni/bin - - name: cni-net-dir - hostPath: - path: /etc/cni/net.d - -# Create all the CustomResourceDefinitions needed for -# Calico policy and networking mode. ---- - -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - name: felixconfigurations.crd.projectcalico.org -spec: - scope: Cluster - group: crd.projectcalico.org - version: v1 - names: - kind: FelixConfiguration - plural: felixconfigurations - singular: felixconfiguration - ---- -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - name: bgppeers.crd.projectcalico.org -spec: - scope: Cluster - group: crd.projectcalico.org - version: v1 - names: - kind: BGPPeer - plural: bgppeers - singular: bgppeer - ---- - -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - name: bgpconfigurations.crd.projectcalico.org -spec: - scope: Cluster - group: crd.projectcalico.org - version: v1 - names: - kind: BGPConfiguration - plural: bgpconfigurations - singular: bgpconfiguration - ---- - -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - name: ippools.crd.projectcalico.org -spec: - scope: Cluster - group: crd.projectcalico.org - version: v1 - names: - kind: IPPool - plural: ippools - singular: ippool - ---- - -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - name: hostendpoints.crd.projectcalico.org -spec: - scope: Cluster - group: crd.projectcalico.org - version: v1 - names: - kind: HostEndpoint - plural: hostendpoints - singular: hostendpoint - ---- - -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - name: clusterinformations.crd.projectcalico.org -spec: - scope: Cluster - group: crd.projectcalico.org - version: v1 - names: - kind: ClusterInformation - plural: clusterinformations - singular: clusterinformation - ---- - -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - name: globalnetworkpolicies.crd.projectcalico.org -spec: - scope: Cluster - group: crd.projectcalico.org - version: v1 - names: - kind: GlobalNetworkPolicy - plural: globalnetworkpolicies - singular: globalnetworkpolicy - ---- - -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - name: globalnetworksets.crd.projectcalico.org -spec: - scope: Cluster - group: crd.projectcalico.org - version: v1 - names: - kind: GlobalNetworkSet - plural: globalnetworksets - singular: globalnetworkset - ---- - -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - name: networkpolicies.crd.projectcalico.org -spec: - scope: Namespaced - group: crd.projectcalico.org - version: v1 - names: - kind: NetworkPolicy - plural: networkpolicies - singular: networkpolicy - ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: calico-node - namespace: kube-system - - -{{if ne .CloudProvider "none"}} ---- -kind: ConfigMap -apiVersion: v1 -metadata: - name: {{.CloudProvider}}-ippool - namespace: kube-system -data: - {{.CloudProvider}}-ippool: |- - apiVersion: projectcalico.org/v3 - kind: IPPool - metadata: - name: ippool-ipip-1 - spec: - cidr: {{.ClusterCIDR}} - ipipMode: Always - natOutgoing: true ---- -apiVersion: v1 -kind: Pod -metadata: - name: calicoctl - namespace: kube-system -spec: - hostNetwork: true - restartPolicy: OnFailure - containers: - - name: calicoctl - image: {{.Calicoctl}} - command: ["/bin/sh", "-c", "calicoctl apply -f {{.CloudProvider}}-ippool.yaml"] - env: - - name: DATASTORE_TYPE - value: kubernetes - volumeMounts: - - name: ippool-config - mountPath: /root/ - volumes: - - name: ippool-config - configMap: - name: {{.CloudProvider}}-ippool - items: - - key: {{.CloudProvider}}-ippool - path: {{.CloudProvider}}-ippool.yaml - # Mount in the etcd TLS secrets. -{{end}} -` - -const CalicoTemplateV113 = ` -{{if eq .RBACConfig "rbac"}} -## start rbac here - -# Include a clusterrole for the calico-node DaemonSet, -# and bind it to the calico-node serviceaccount. -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1beta1 -metadata: - name: calico-node -rules: - # The CNI plugin needs to get pods, nodes, and namespaces. - - apiGroups: [""] - resources: - - pods - - nodes - - namespaces - verbs: - - get - - apiGroups: [""] - resources: - - endpoints - - services - verbs: - # Used to discover service IPs for advertisement. - - watch - - list - # Used to discover Typhas. - - get - - apiGroups: [""] - resources: - - nodes/status - verbs: - # Needed for clearing NodeNetworkUnavailable flag. - - patch - # Calico stores some configuration information in node annotations. - - update - # Watch for changes to Kubernetes NetworkPolicies. - - apiGroups: ["networking.k8s.io"] - resources: - - networkpolicies - verbs: - - watch - - list - # Used by Calico for policy information. - - apiGroups: [""] - resources: - - pods - - namespaces - - serviceaccounts - verbs: - - list - - watch - # The CNI plugin patches pods/status. - - apiGroups: [""] - resources: - - pods/status - verbs: - - patch - # Calico monitors various CRDs for config. - - apiGroups: ["crd.projectcalico.org"] - resources: - - globalfelixconfigs - - felixconfigurations - - bgppeers - - globalbgpconfigs - - bgpconfigurations - - ippools - - globalnetworkpolicies - - globalnetworksets - - networkpolicies - - clusterinformations - - hostendpoints - verbs: - - get - - list - - watch - # Calico must create and update some CRDs on startup. - - apiGroups: ["crd.projectcalico.org"] - resources: - - ippools - - felixconfigurations - - clusterinformations - verbs: - - create - - update - # Calico stores some configuration information on the node. - - apiGroups: [""] - resources: - - nodes - verbs: - - get - - list - - watch - # These permissions are only requried for upgrade from v2.6, and can - # be removed after upgrade or on fresh installations. - - apiGroups: ["crd.projectcalico.org"] - resources: - - bgpconfigurations - - bgppeers - verbs: - - create - - update ---- -apiVersion: rbac.authorization.k8s.io/v1beta1 -kind: ClusterRoleBinding -metadata: - name: calico-node -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: calico-node -subjects: -- kind: ServiceAccount - name: calico-node - namespace: kube-system -- apiGroup: rbac.authorization.k8s.io - kind: Group - name: system:nodes -{{end}} -## end rbac here - ---- -# This ConfigMap is used to configure a self-hosted Calico installation. -kind: ConfigMap -apiVersion: v1 -metadata: - name: calico-config - namespace: kube-system -data: - # To enable Typha, set this to "calico-typha" *and* set a non-zero value for Typha replicas - # below. We recommend using Typha if you have more than 50 nodes. Above 100 nodes it is - # essential. - typha_service_name: "none" - # Configure the Calico backend to use. - calico_backend: "bird" - - # Configure the MTU to use - veth_mtu: "1440" - - # The CNI network configuration to install on each node. The special - # values in this config will be automatically populated. - cni_network_config: |- - { - "name": "k8s-pod-network", - "cniVersion": "0.3.0", - "plugins": [ - { - "type": "calico", - "log_level": "WARNING", - "datastore_type": "kubernetes", - "nodename": "__KUBERNETES_NODE_NAME__", - "mtu": __CNI_MTU__, - "ipam": { - "type": "host-local", - "subnet": "usePodCidr" - }, - "policy": { - "type": "k8s" - }, - "kubernetes": { - "kubeconfig": "{{.KubeCfg}}" - } - }, - { - "type": "portmap", - "snat": true, - "capabilities": {"portMappings": true} - } - ] - } ---- - -# This manifest installs the calico/node container, as well -# as the Calico CNI plugins and network config on -# each master and worker node in a Kubernetes cluster. -kind: DaemonSet -apiVersion: extensions/v1beta1 -metadata: - name: calico-node - namespace: kube-system - labels: - k8s-app: calico-node -spec: - selector: - matchLabels: - k8s-app: calico-node - updateStrategy: - type: RollingUpdate - rollingUpdate: - maxUnavailable: 1 - template: - metadata: - labels: - k8s-app: calico-node - annotations: - # This, along with the CriticalAddonsOnly toleration below, - # marks the pod as a critical add-on, ensuring it gets - # priority scheduling and that its resources are reserved - # if it ever gets evicted. - scheduler.alpha.kubernetes.io/critical-pod: '' - spec: - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: beta.kubernetes.io/os - operator: NotIn - values: - - windows - hostNetwork: true - tolerations: - # Make sure calico-node gets scheduled on all nodes. - - effect: NoSchedule - operator: Exists - # Mark the pod as a critical add-on for rescheduling. - - key: CriticalAddonsOnly - operator: Exists - - effect: NoExecute - operator: Exists - serviceAccountName: calico-node - # Minimize downtime during a rolling upgrade or deletion; tell Kubernetes to do a "force - # deletion": https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods. - terminationGracePeriodSeconds: 0 - initContainers: - # This container installs the Calico CNI binaries - # and CNI network config file on each node. - - name: install-cni - image: {{.CNIImage}} - command: ["/install-cni.sh"] - env: - # Name of the CNI config file to create. - - name: CNI_CONF_NAME - value: "10-calico.conflist" - # The CNI network config to install on each node. - - name: CNI_NETWORK_CONFIG - valueFrom: - configMapKeyRef: - name: calico-config - key: cni_network_config - # Set the hostname based on the k8s node name. - - name: KUBERNETES_NODE_NAME - valueFrom: - fieldRef: - fieldPath: spec.nodeName - # CNI MTU Config variable - - name: CNI_MTU - valueFrom: - configMapKeyRef: - name: calico-config - key: veth_mtu - # Prevents the container from sleeping forever. - - name: SLEEP - value: "false" - volumeMounts: - - mountPath: /host/opt/cni/bin - name: cni-bin-dir - - mountPath: /host/etc/cni/net.d - name: cni-net-dir - containers: - # Runs calico/node container on each Kubernetes node. This - # container programs network policy and routes on each - # host. - - name: calico-node - image: {{.NodeImage}} - env: - # Use Kubernetes API as the backing datastore. - - name: DATASTORE_TYPE - value: "kubernetes" - # Typha support: controlled by the ConfigMap. - - name: FELIX_TYPHAK8SSERVICENAME - valueFrom: - configMapKeyRef: - name: calico-config - key: typha_service_name - # Wait for the datastore. - - name: WAIT_FOR_DATASTORE - value: "true" - # Set based on the k8s node name. - - name: NODENAME - valueFrom: - fieldRef: - fieldPath: spec.nodeName - # Choose the backend to use. - - name: CALICO_NETWORKING_BACKEND - valueFrom: - configMapKeyRef: - name: calico-config - key: calico_backend - # Cluster type to identify the deployment type - - name: CLUSTER_TYPE - value: "k8s,bgp" - # Auto-detect the BGP IP address. - - name: IP - value: "autodetect" - # Enable IPIP - - name: CALICO_IPV4POOL_IPIP - value: "Always" - # Set MTU for tunnel device used if ipip is enabled - - name: FELIX_IPINIPMTU - valueFrom: - configMapKeyRef: - name: calico-config - key: veth_mtu - # The default IPv4 pool to create on startup if none exists. Pod IPs will be - # chosen from this range. Changing this value after installation will have - # no effect. This should fall within --cluster-cidr. - - name: CALICO_IPV4POOL_CIDR - value: "{{.ClusterCIDR}}" - # Disable file logging so kubectl logs works. - - name: CALICO_DISABLE_FILE_LOGGING - value: "true" - # Set Felix endpoint to host default action to ACCEPT. - - name: FELIX_DEFAULTENDPOINTTOHOSTACTION - value: "ACCEPT" - # Disable IPv6 on Kubernetes. - - name: FELIX_IPV6SUPPORT - value: "false" - # Disable felix logging to file - - name: FELIX_LOGFILEPATH - value: "none" - # Disable felix logging for syslog - - name: FELIX_LOGSEVERITYSYS - value: "" - # Enable felix logging to stdout - - name: FELIX_LOGSEVERITYSCREEN - value: "Warning" - - name: FELIX_HEALTHENABLED - value: "true" - securityContext: - privileged: true - resources: - requests: - cpu: 250m - livenessProbe: - httpGet: - path: /liveness - port: 9099 - host: localhost - periodSeconds: 10 - initialDelaySeconds: 10 - failureThreshold: 6 - readinessProbe: - exec: - command: - - /bin/calico-node - - -bird-ready - - -felix-ready - periodSeconds: 10 - volumeMounts: - - mountPath: /lib/modules - name: lib-modules - readOnly: true - - mountPath: /run/xtables.lock - name: xtables-lock - readOnly: false - - mountPath: /var/run/calico - name: var-run-calico - readOnly: false - - mountPath: /var/lib/calico - name: var-lib-calico - readOnly: false - volumes: - # Used by calico/node. - - name: lib-modules - hostPath: - path: /lib/modules - - name: var-run-calico - hostPath: - path: /var/run/calico - - name: var-lib-calico - hostPath: - path: /var/lib/calico - - name: xtables-lock - hostPath: - path: /run/xtables.lock - type: FileOrCreate - # Used to install CNI. - - name: cni-bin-dir - hostPath: - path: /opt/cni/bin - - name: cni-net-dir - hostPath: - path: /etc/cni/net.d - -# Create all the CustomResourceDefinitions needed for -# Calico policy and networking mode. ---- - -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - name: felixconfigurations.crd.projectcalico.org -spec: - scope: Cluster - group: crd.projectcalico.org - version: v1 - names: - kind: FelixConfiguration - plural: felixconfigurations - singular: felixconfiguration - ---- -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - name: bgppeers.crd.projectcalico.org -spec: - scope: Cluster - group: crd.projectcalico.org - version: v1 - names: - kind: BGPPeer - plural: bgppeers - singular: bgppeer - ---- - -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - name: bgpconfigurations.crd.projectcalico.org -spec: - scope: Cluster - group: crd.projectcalico.org - version: v1 - names: - kind: BGPConfiguration - plural: bgpconfigurations - singular: bgpconfiguration - ---- - -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - name: ippools.crd.projectcalico.org -spec: - scope: Cluster - group: crd.projectcalico.org - version: v1 - names: - kind: IPPool - plural: ippools - singular: ippool - ---- - -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - name: hostendpoints.crd.projectcalico.org -spec: - scope: Cluster - group: crd.projectcalico.org - version: v1 - names: - kind: HostEndpoint - plural: hostendpoints - singular: hostendpoint - ---- - -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - name: clusterinformations.crd.projectcalico.org -spec: - scope: Cluster - group: crd.projectcalico.org - version: v1 - names: - kind: ClusterInformation - plural: clusterinformations - singular: clusterinformation - ---- - -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - name: globalnetworkpolicies.crd.projectcalico.org -spec: - scope: Cluster - group: crd.projectcalico.org - version: v1 - names: - kind: GlobalNetworkPolicy - plural: globalnetworkpolicies - singular: globalnetworkpolicy - ---- - -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - name: globalnetworksets.crd.projectcalico.org -spec: - scope: Cluster - group: crd.projectcalico.org - version: v1 - names: - kind: GlobalNetworkSet - plural: globalnetworksets - singular: globalnetworkset - ---- - -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - name: networkpolicies.crd.projectcalico.org -spec: - scope: Namespaced - group: crd.projectcalico.org - version: v1 - names: - kind: NetworkPolicy - plural: networkpolicies - singular: networkpolicy - ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: calico-node - namespace: kube-system - - -{{if ne .CloudProvider "none"}} ---- -kind: ConfigMap -apiVersion: v1 -metadata: - name: {{.CloudProvider}}-ippool - namespace: kube-system -data: - {{.CloudProvider}}-ippool: |- - apiVersion: projectcalico.org/v3 - kind: IPPool - metadata: - name: ippool-ipip-1 - spec: - cidr: {{.ClusterCIDR}} - ipipMode: Always - natOutgoing: true ---- -apiVersion: v1 -kind: Pod -metadata: - name: calicoctl - namespace: kube-system -spec: - hostNetwork: true - restartPolicy: OnFailure - containers: - - name: calicoctl - image: {{.Calicoctl}} - command: ["/bin/sh", "-c", "calicoctl apply -f {{.CloudProvider}}-ippool.yaml"] - env: - - name: DATASTORE_TYPE - value: kubernetes - volumeMounts: - - name: ippool-config - mountPath: /root/ - volumes: - - name: ippool-config - configMap: - name: {{.CloudProvider}}-ippool - items: - - key: {{.CloudProvider}}-ippool - path: {{.CloudProvider}}-ippool.yaml - # Mount in the etcd TLS secrets. -{{end}} -` - -const CalicoTemplateV115 = ` -{{if eq .RBACConfig "rbac"}} ---- -# Source: calico/templates/rbac.yaml -# Include a clusterrole for the kube-controllers component, -# and bind it to the calico-kube-controllers serviceaccount. -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1beta1 -metadata: - name: calico-kube-controllers -rules: - # Nodes are watched to monitor for deletions. - - apiGroups: [""] - resources: - - nodes - verbs: - - watch - - list - - get - # Pods are queried to check for existence. - - apiGroups: [""] - resources: - - pods - verbs: - - get - # IPAM resources are manipulated when nodes are deleted. - - apiGroups: ["crd.projectcalico.org"] - resources: - - ippools - verbs: - - list - - apiGroups: ["crd.projectcalico.org"] - resources: - - blockaffinities - - ipamblocks - - ipamhandles - verbs: - - get - - list - - create - - update - - delete - # Needs access to update clusterinformations. - - apiGroups: ["crd.projectcalico.org"] - resources: - - clusterinformations - verbs: - - get - - create - - update ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1beta1 -metadata: - name: calico-kube-controllers -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: calico-kube-controllers -subjects: -- kind: ServiceAccount - name: calico-kube-controllers - namespace: kube-system -- apiGroup: rbac.authorization.k8s.io - kind: Group - name: system:nodes ---- -# Include a clusterrole for the calico-node DaemonSet, -# and bind it to the calico-node serviceaccount. -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1beta1 -metadata: - name: calico-node -rules: - # The CNI plugin needs to get pods, nodes, and namespaces. - - apiGroups: [""] - resources: - - pods - - nodes - - namespaces - verbs: - - get - - apiGroups: [""] - resources: - - endpoints - - services - verbs: - # Used to discover service IPs for advertisement. - - watch - - list - # Used to discover Typhas. - - get - - apiGroups: [""] - resources: - - nodes/status - verbs: - # Needed for clearing NodeNetworkUnavailable flag. - - patch - # Calico stores some configuration information in node annotations. - - update - # Watch for changes to Kubernetes NetworkPolicies. - - apiGroups: ["networking.k8s.io"] - resources: - - networkpolicies - verbs: - - watch - - list - # Used by Calico for policy information. - - apiGroups: [""] - resources: - - pods - - namespaces - - serviceaccounts - verbs: - - list - - watch - # The CNI plugin patches pods/status. - - apiGroups: [""] - resources: - - pods/status - verbs: - - patch - # Calico monitors various CRDs for config. - - apiGroups: ["crd.projectcalico.org"] - resources: - - globalfelixconfigs - - felixconfigurations - - bgppeers - - globalbgpconfigs - - bgpconfigurations - - ippools - - ipamblocks - - globalnetworkpolicies - - globalnetworksets - - networkpolicies - - networksets - - clusterinformations - - hostendpoints - verbs: - - get - - list - - watch - # Calico must create and update some CRDs on startup. - - apiGroups: ["crd.projectcalico.org"] - resources: - - ippools - - felixconfigurations - - clusterinformations - verbs: - - create - - update - # Calico stores some configuration information on the node. - - apiGroups: [""] - resources: - - nodes - verbs: - - get - - list - - watch - # These permissions are only requried for upgrade from v2.6, and can - # be removed after upgrade or on fresh installations. - - apiGroups: ["crd.projectcalico.org"] - resources: - - bgpconfigurations - - bgppeers - verbs: - - create - - update - # These permissions are required for Calico CNI to perform IPAM allocations. - - apiGroups: ["crd.projectcalico.org"] - resources: - - blockaffinities - - ipamblocks - - ipamhandles - verbs: - - get - - list - - create - - update - - delete - - apiGroups: ["crd.projectcalico.org"] - resources: - - ipamconfigs - verbs: - - get - # Block affinities must also be watchable by confd for route aggregation. - - apiGroups: ["crd.projectcalico.org"] - resources: - - blockaffinities - verbs: - - watch - # The Calico IPAM migration needs to get daemonsets. These permissions can be - # removed if not upgrading from an installation using host-local IPAM. - - apiGroups: ["apps"] - resources: - - daemonsets - verbs: - - get ---- -apiVersion: rbac.authorization.k8s.io/v1beta1 -kind: ClusterRoleBinding -metadata: - name: calico-node -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: calico-node -subjects: -- kind: ServiceAccount - name: calico-node - namespace: kube-system -- apiGroup: rbac.authorization.k8s.io - kind: Group - name: system:nodes -{{end}} ---- -# Source: calico/templates/calico-config.yaml -# This ConfigMap is used to configure a self-hosted Calico installation. -kind: ConfigMap -apiVersion: v1 -metadata: - name: calico-config - namespace: kube-system -data: - # Typha is disabled. - typha_service_name: "none" - # Configure the backend to use. - calico_backend: "bird" - - # Configure the MTU to use - veth_mtu: "1440" - - # The CNI network configuration to install on each node. The special - # values in this config will be automatically populated. - cni_network_config: |- - { - "name": "k8s-pod-network", - "cniVersion": "0.3.0", - "plugins": [ - { - "type": "calico", - "log_level": "info", - "datastore_type": "kubernetes", - "nodename": "__KUBERNETES_NODE_NAME__", - "mtu": __CNI_MTU__, - "ipam": { - "type": "calico-ipam" - }, - "policy": { - "type": "k8s" - }, - "kubernetes": { - "kubeconfig": "{{.KubeCfg}}" - } - }, - { - "type": "portmap", - "snat": true, - "capabilities": {"portMappings": true} - } - ] - } ---- -# Source: calico/templates/kdd-crds.yaml -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - name: felixconfigurations.crd.projectcalico.org -spec: - scope: Cluster - group: crd.projectcalico.org - version: v1 - names: - kind: FelixConfiguration - plural: felixconfigurations - singular: felixconfiguration ---- -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - name: ipamblocks.crd.projectcalico.org -spec: - scope: Cluster - group: crd.projectcalico.org - version: v1 - names: - kind: IPAMBlock - plural: ipamblocks - singular: ipamblock ---- -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - name: blockaffinities.crd.projectcalico.org -spec: - scope: Cluster - group: crd.projectcalico.org - version: v1 - names: - kind: BlockAffinity - plural: blockaffinities - singular: blockaffinity ---- -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - name: ipamhandles.crd.projectcalico.org -spec: - scope: Cluster - group: crd.projectcalico.org - version: v1 - names: - kind: IPAMHandle - plural: ipamhandles - singular: ipamhandle ---- -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - name: ipamconfigs.crd.projectcalico.org -spec: - scope: Cluster - group: crd.projectcalico.org - version: v1 - names: - kind: IPAMConfig - plural: ipamconfigs - singular: ipamconfig ---- -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - name: bgppeers.crd.projectcalico.org -spec: - scope: Cluster - group: crd.projectcalico.org - version: v1 - names: - kind: BGPPeer - plural: bgppeers - singular: bgppeer ---- -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - name: bgpconfigurations.crd.projectcalico.org -spec: - scope: Cluster - group: crd.projectcalico.org - version: v1 - names: - kind: BGPConfiguration - plural: bgpconfigurations - singular: bgpconfiguration ---- -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - name: ippools.crd.projectcalico.org -spec: - scope: Cluster - group: crd.projectcalico.org - version: v1 - names: - kind: IPPool - plural: ippools - singular: ippool ---- -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - name: hostendpoints.crd.projectcalico.org -spec: - scope: Cluster - group: crd.projectcalico.org - version: v1 - names: - kind: HostEndpoint - plural: hostendpoints - singular: hostendpoint ---- -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - name: clusterinformations.crd.projectcalico.org -spec: - scope: Cluster - group: crd.projectcalico.org - version: v1 - names: - kind: ClusterInformation - plural: clusterinformations - singular: clusterinformation ---- -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - name: globalnetworkpolicies.crd.projectcalico.org -spec: - scope: Cluster - group: crd.projectcalico.org - version: v1 - names: - kind: GlobalNetworkPolicy - plural: globalnetworkpolicies - singular: globalnetworkpolicy ---- -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - name: globalnetworksets.crd.projectcalico.org -spec: - scope: Cluster - group: crd.projectcalico.org - version: v1 - names: - kind: GlobalNetworkSet - plural: globalnetworksets - singular: globalnetworkset ---- -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - name: networkpolicies.crd.projectcalico.org -spec: - scope: Namespaced - group: crd.projectcalico.org - version: v1 - names: - kind: NetworkPolicy - plural: networkpolicies - singular: networkpolicy ---- -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - name: networksets.crd.projectcalico.org -spec: - scope: Namespaced - group: crd.projectcalico.org - version: v1 - names: - kind: NetworkSet - plural: networksets - singular: networkset ---- -# Source: calico/templates/calico-node.yaml -# This manifest installs the calico-node container, as well -# as the CNI plugins and network config on -# each master and worker node in a Kubernetes cluster. -kind: DaemonSet -apiVersion: extensions/v1beta1 -metadata: - name: calico-node - namespace: kube-system - labels: - k8s-app: calico-node -spec: - selector: - matchLabels: - k8s-app: calico-node - updateStrategy: - type: RollingUpdate - rollingUpdate: - maxUnavailable: 1 - template: - metadata: - labels: - k8s-app: calico-node - annotations: - # This, along with the CriticalAddonsOnly toleration below, - # marks the pod as a critical add-on, ensuring it gets - # priority scheduling and that its resources are reserved - # if it ever gets evicted. - scheduler.alpha.kubernetes.io/critical-pod: '' - spec: - nodeSelector: - beta.kubernetes.io/os: linux - hostNetwork: true - tolerations: - # Make sure calico-node gets scheduled on all nodes. - - effect: NoSchedule - operator: Exists - # Mark the pod as a critical add-on for rescheduling. - - key: CriticalAddonsOnly - operator: Exists - - effect: NoExecute - operator: Exists -{{if eq .RBACConfig "rbac"}} - serviceAccountName: calico-node -{{end}} - # Minimize downtime during a rolling upgrade or deletion; tell Kubernetes to do a "force - # deletion": https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods. - terminationGracePeriodSeconds: 0 - initContainers: - # This container performs upgrade from host-local IPAM to calico-ipam. - # It can be deleted if this is a fresh installation, or if you have already - # upgraded to use calico-ipam. - #- name: upgrade-ipam - # image: calico/cni:v3.7.3 - # command: ["/opt/cni/bin/calico-ipam", "-upgrade"] - # env: - # - name: KUBERNETES_NODE_NAME - # valueFrom: - # fieldRef: - # fieldPath: spec.nodeName - # - name: CALICO_NETWORKING_BACKEND - # valueFrom: - # configMapKeyRef: - # name: calico-config - # key: calico_backend - # volumeMounts: - # - mountPath: /var/lib/cni/networks - # name: host-local-net-dir - # - mountPath: /host/opt/cni/bin - # name: cni-bin-dir - # This container installs the CNI binaries - # and CNI network config file on each node. - - name: install-cni - image: {{.CNIImage}} - command: ["/install-cni.sh"] - env: - # Name of the CNI config file to create. - - name: CNI_CONF_NAME - value: "10-calico.conflist" - # The CNI network config to install on each node. - - name: CNI_NETWORK_CONFIG - valueFrom: - configMapKeyRef: - name: calico-config - key: cni_network_config - # Set the hostname based on the k8s node name. - - name: KUBERNETES_NODE_NAME - valueFrom: - fieldRef: - fieldPath: spec.nodeName - # CNI MTU Config variable - - name: CNI_MTU - valueFrom: - configMapKeyRef: - name: calico-config - key: veth_mtu - # Prevents the container from sleeping forever. - - name: SLEEP - value: "false" - volumeMounts: - - mountPath: /host/opt/cni/bin - name: cni-bin-dir - - mountPath: /host/etc/cni/net.d - name: cni-net-dir - containers: - # Runs calico-node container on each Kubernetes node. This - # container programs network policy and routes on each - # host. - - name: calico-node - image: {{.NodeImage}} - env: - # Use Kubernetes API as the backing datastore. - - name: DATASTORE_TYPE - value: "kubernetes" - # Wait for the datastore. - - name: WAIT_FOR_DATASTORE - value: "true" - # Set based on the k8s node name. - - name: NODENAME - valueFrom: - fieldRef: - fieldPath: spec.nodeName - # Choose the backend to use. - - name: CALICO_NETWORKING_BACKEND - valueFrom: - configMapKeyRef: - name: calico-config - key: calico_backend - # Cluster type to identify the deployment type - - name: CLUSTER_TYPE - value: "k8s,bgp" - # Auto-detect the BGP IP address. - - name: IP - value: "autodetect" - # Enable IPIP - - name: CALICO_IPV4POOL_IPIP - value: "Always" - # Set MTU for tunnel device used if ipip is enabled - - name: FELIX_IPINIPMTU - valueFrom: - configMapKeyRef: - name: calico-config - key: veth_mtu - # The default IPv4 pool to create on startup if none exists. Pod IPs will be - # chosen from this range. Changing this value after installation will have - # no effect. This should fall within --cluster-cidr. - - name: CALICO_IPV4POOL_CIDR - value: "{{.ClusterCIDR}}" - # Disable file logging so kubectl logs works. - - name: CALICO_DISABLE_FILE_LOGGING - value: "true" - # Set Felix endpoint to host default action to ACCEPT. - - name: FELIX_DEFAULTENDPOINTTOHOSTACTION - value: "ACCEPT" - # Disable IPv6 on Kubernetes. - - name: FELIX_IPV6SUPPORT - value: "false" - # Set Felix logging to "info" - - name: FELIX_LOGSEVERITYSCREEN - value: "info" - - name: FELIX_HEALTHENABLED - value: "true" - securityContext: - privileged: true - resources: - requests: - cpu: 250m - livenessProbe: - httpGet: - path: /liveness - port: 9099 - host: localhost - periodSeconds: 10 - initialDelaySeconds: 10 - failureThreshold: 6 - readinessProbe: - exec: - command: - - /bin/calico-node - - -bird-ready - - -felix-ready - periodSeconds: 10 - volumeMounts: - - mountPath: /lib/modules - name: lib-modules - readOnly: true - - mountPath: /run/xtables.lock - name: xtables-lock - readOnly: false - - mountPath: /var/run/calico - name: var-run-calico - readOnly: false - - mountPath: /var/lib/calico - name: var-lib-calico - readOnly: false - volumes: - # Used by calico-node. - - name: lib-modules - hostPath: - path: /lib/modules - - name: var-run-calico - hostPath: - path: /var/run/calico - - name: var-lib-calico - hostPath: - path: /var/lib/calico - - name: xtables-lock - hostPath: - path: /run/xtables.lock - type: FileOrCreate - # Used to install CNI. - - name: cni-bin-dir - hostPath: - path: /opt/cni/bin - - name: cni-net-dir - hostPath: - path: /etc/cni/net.d - # Mount in the directory for host-local IPAM allocations. This is - # used when upgrading from host-local to calico-ipam, and can be removed - # if not using the upgrade-ipam init container. - - name: host-local-net-dir - hostPath: - path: /var/lib/cni/networks ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: calico-node - namespace: kube-system ---- -# Source: calico/templates/calico-kube-controllers.yaml -# See https://github.com/projectcalico/kube-controllers -apiVersion: extensions/v1beta1 -kind: Deployment -metadata: - name: calico-kube-controllers - namespace: kube-system - labels: - k8s-app: calico-kube-controllers - annotations: - scheduler.alpha.kubernetes.io/critical-pod: '' -spec: - # The controller can only have a single active instance. - replicas: 1 - strategy: - type: Recreate - template: - metadata: - name: calico-kube-controllers - namespace: kube-system - labels: - k8s-app: calico-kube-controllers - spec: - nodeSelector: - beta.kubernetes.io/os: linux - tolerations: - # Mark the pod as a critical add-on for rescheduling. - - key: CriticalAddonsOnly - operator: Exists - - key: node-role.kubernetes.io/master - effect: NoSchedule -{{if eq .RBACConfig "rbac"}} - serviceAccountName: calico-kube-controllers -{{end}} - containers: - - name: calico-kube-controllers - image: calico/kube-controllers:v3.7.3 - env: - # Choose which controllers to run. - - name: ENABLED_CONTROLLERS - value: node - - name: DATASTORE_TYPE - value: kubernetes - readinessProbe: - exec: - command: - - /usr/bin/check-status - - -r ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: calico-kube-controllers - namespace: kube-system -` diff --git a/templates/canal.go b/templates/canal.go deleted file mode 100644 index 31b74380..00000000 --- a/templates/canal.go +++ /dev/null @@ -1,1757 +0,0 @@ -package templates - -const CanalTemplateV112 = ` -{{if eq .RBACConfig "rbac"}} -# Calico Roles -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1beta1 -metadata: - name: calico -rules: - - apiGroups: [""] - resources: - - namespaces - verbs: - - get - - list - - watch - - apiGroups: [""] - resources: - - pods/status - verbs: - - update - - apiGroups: [""] - resources: - - pods - verbs: - - get - - list - - watch - - patch - - apiGroups: [""] - resources: - - services - verbs: - - get - - apiGroups: [""] - resources: - - endpoints - verbs: - - get - - apiGroups: [""] - resources: - - nodes - verbs: - - get - - list - - update - - watch - - apiGroups: ["networking.k8s.io"] - resources: - - networkpolicies - verbs: - - get - - list - - watch - - apiGroups: ["crd.projectcalico.org"] - resources: - - globalfelixconfigs - - felixconfigurations - - bgppeers - - globalbgpconfigs - - bgpconfigurations - - ippools - - globalnetworkpolicies - - networkpolicies - - clusterinformations - - hostendpoints - - globalnetworksets - verbs: - - create - - get - - list - - update - - watch - ---- - -# Flannel roles -# Pulled from https://github.com/coreos/flannel/blob/master/Documentation/kube-flannel-rbac.yml -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1beta1 -metadata: - name: flannel -rules: - - apiGroups: - - "" - resources: - - pods - verbs: - - get - - apiGroups: - - "" - resources: - - nodes - verbs: - - list - - watch - - apiGroups: - - "" - resources: - - nodes/status - verbs: - - patch ---- - -# Bind the flannel ClusterRole to the canal ServiceAccount. -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1beta1 -metadata: - name: canal-flannel -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: flannel -subjects: -- kind: ServiceAccount - name: canal - namespace: kube-system - ---- - -# Bind the calico ClusterRole to the canal ServiceAccount. -apiVersion: rbac.authorization.k8s.io/v1beta1 -kind: ClusterRoleBinding -metadata: - name: canal-calico -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: calico -subjects: -- kind: ServiceAccount - name: canal - namespace: kube-system -- apiGroup: rbac.authorization.k8s.io - kind: Group - name: system:nodes -{{end}} - -# Canal Version v3.1.1 -# https://docs.projectcalico.org/v3.1/releases#v3.1.1 -# This manifest includes the following component versions: -# calico/node:v3.1.1 -# calico/cni:v3.1.1 -# coreos/flannel:v0.9.1 - ---- -# This ConfigMap can be used to configure a self-hosted Canal installation. -kind: ConfigMap -apiVersion: v1 -metadata: - name: canal-config - namespace: kube-system -data: - # The interface used by canal for host <-> host communication. - # If left blank, then the interface is chosen using the node's - # default route. - canal_iface: "{{.CanalInterface}}" - - # Whether or not to masquerade traffic to destinations not within - # the pod network. - masquerade: "true" - - # The CNI network configuration to install on each node. - cni_network_config: |- - { - "name": "k8s-pod-network", - "cniVersion": "0.3.0", - "plugins": [ - { - "type": "calico", - "log_level": "WARNING", - "datastore_type": "kubernetes", - "nodename": "__KUBERNETES_NODE_NAME__", - "ipam": { - "type": "host-local", - "subnet": "usePodCidr" - }, - "policy": { - "type": "k8s", - "k8s_auth_token": "__SERVICEACCOUNT_TOKEN__" - }, - "kubernetes": { - "k8s_api_root": "https://__KUBERNETES_SERVICE_HOST__:__KUBERNETES_SERVICE_PORT__", - "kubeconfig": "{{.KubeCfg}}" - } - }, - { - "type": "portmap", - "snat": true, - "capabilities": {"portMappings": true} - } - ] - } - - # Flannel network configuration. Mounted into the flannel container. - net-conf.json: | - { - "Network": "{{.ClusterCIDR}}", - "Backend": { - "Type": "{{.FlannelBackend.Type}}", - "VNI": {{.FlannelBackend.VNI}}, - "Port": {{.FlannelBackend.Port}} - } - } - ---- - -# This manifest installs the calico/node container, as well -# as the Calico CNI plugins and network config on -# each master and worker node in a Kubernetes cluster. -kind: DaemonSet -apiVersion: extensions/v1beta1 -metadata: - name: canal - namespace: kube-system - labels: - k8s-app: canal -spec: - selector: - matchLabels: - k8s-app: canal - updateStrategy: - type: RollingUpdate - rollingUpdate: - maxUnavailable: 1 - template: - metadata: - labels: - k8s-app: canal - annotations: - scheduler.alpha.kubernetes.io/critical-pod: '' - spec: - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: beta.kubernetes.io/os - operator: NotIn - values: - - windows - hostNetwork: true - serviceAccountName: canal - tolerations: - # Tolerate this effect so the pods will be schedulable at all times - - effect: NoSchedule - operator: Exists - # Mark the pod as a critical add-on for rescheduling. - - key: CriticalAddonsOnly - operator: Exists - - effect: NoExecute - operator: Exists - - key: "node-role.kubernetes.io/controlplane" - operator: "Exists" - effect: "NoSchedule" - - key: "node-role.kubernetes.io/etcd" - operator: "Exists" - effect: "NoExecute" - # Minimize downtime during a rolling upgrade or deletion; tell Kubernetes to do a "force - # deletion": https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods. - terminationGracePeriodSeconds: 0 - containers: - # Runs calico/node container on each Kubernetes node. This - # container programs network policy and routes on each - # host. - - name: calico-node - image: {{.NodeImage}} - env: - # Use Kubernetes API as the backing datastore. - - name: DATASTORE_TYPE - value: "kubernetes" - # Disable felix logging to file - - name: FELIX_LOGFILEPATH - value: "none" - # Disable felix logging for syslog - - name: FELIX_LOGSEVERITYSYS - value: "" - # Enable felix logging to stdout - - name: FELIX_LOGSEVERITYSCREEN - value: "Warning" - # Don't enable BGP. - - name: CALICO_NETWORKING_BACKEND - value: "none" - # Cluster type to identify the deployment type - - name: CLUSTER_TYPE - value: "k8s,canal" - # Disable file logging so kubectl logs works. - - name: CALICO_DISABLE_FILE_LOGGING - value: "true" - # Period, in seconds, at which felix re-applies all iptables state - - name: FELIX_IPTABLESREFRESHINTERVAL - value: "60" - # Disable IPV6 support in Felix. - - name: FELIX_IPV6SUPPORT - value: "false" - # Wait for the datastore. - - name: WAIT_FOR_DATASTORE - value: "true" - # No IP address needed. - - name: IP - value: "" - - name: NODENAME - valueFrom: - fieldRef: - fieldPath: spec.nodeName - # Set Felix endpoint to host default action to ACCEPT. - - name: FELIX_DEFAULTENDPOINTTOHOSTACTION - value: "ACCEPT" - - name: FELIX_HEALTHENABLED - value: "true" - securityContext: - privileged: true - resources: - requests: - cpu: 250m - livenessProbe: - httpGet: - path: /liveness - port: 9099 - periodSeconds: 10 - initialDelaySeconds: 10 - failureThreshold: 6 - readinessProbe: - httpGet: - path: /readiness - port: 9099 - periodSeconds: 10 - volumeMounts: - - mountPath: /lib/modules - name: lib-modules - readOnly: true - - mountPath: /var/run/calico - name: var-run-calico - readOnly: false - - mountPath: /var/lib/calico - name: var-lib-calico - readOnly: false - # This container installs the Calico CNI binaries - # and CNI network config file on each node. - - name: install-cni - image: {{.CNIImage}} - command: ["/install-cni.sh"] - env: - - name: CNI_CONF_NAME - value: "10-calico.conflist" - # The CNI network config to install on each node. - - name: CNI_NETWORK_CONFIG - valueFrom: - configMapKeyRef: - name: canal-config - key: cni_network_config - - name: KUBERNETES_NODE_NAME - valueFrom: - fieldRef: - fieldPath: spec.nodeName - volumeMounts: - - mountPath: /host/opt/cni/bin - name: cni-bin-dir - - mountPath: /host/etc/cni/net.d - name: cni-net-dir - # This container runs flannel using the kube-subnet-mgr backend - # for allocating subnets. - - name: kube-flannel - image: {{.CanalFlannelImg}} - command: [ "/opt/bin/flanneld", "--ip-masq", "--kube-subnet-mgr" ] - securityContext: - privileged: true - env: - - name: POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - - name: POD_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: FLANNELD_IFACE - valueFrom: - configMapKeyRef: - name: canal-config - key: canal_iface - - name: FLANNELD_IP_MASQ - valueFrom: - configMapKeyRef: - name: canal-config - key: masquerade - volumeMounts: - - name: run - mountPath: /run - - name: flannel-cfg - mountPath: /etc/kube-flannel/ - - name: xtables-lock - mountPath: /run/xtables.lock - readOnly: false - volumes: - # Used by calico/node. - - name: lib-modules - hostPath: - path: /lib/modules - - name: var-run-calico - hostPath: - path: /var/run/calico - - name: var-lib-calico - hostPath: - path: /var/lib/calico - # Used to install CNI. - - name: cni-bin-dir - hostPath: - path: /opt/cni/bin - - name: cni-net-dir - hostPath: - path: /etc/cni/net.d - # Used by flannel. - - name: run - hostPath: - path: /run - - name: flannel-cfg - configMap: - name: canal-config - - name: xtables-lock - hostPath: - path: /run/xtables.lock - type: FileOrCreate - -# Create all the CustomResourceDefinitions needed for -# Calico policy-only mode. ---- - -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - name: felixconfigurations.crd.projectcalico.org -spec: - scope: Cluster - group: crd.projectcalico.org - version: v1 - names: - kind: FelixConfiguration - plural: felixconfigurations - singular: felixconfiguration - ---- - -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - name: bgpconfigurations.crd.projectcalico.org -spec: - scope: Cluster - group: crd.projectcalico.org - version: v1 - names: - kind: BGPConfiguration - plural: bgpconfigurations - singular: bgpconfiguration - ---- - -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - name: ippools.crd.projectcalico.org -spec: - scope: Cluster - group: crd.projectcalico.org - version: v1 - names: - kind: IPPool - plural: ippools - singular: ippool - ---- - -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - name: clusterinformations.crd.projectcalico.org -spec: - scope: Cluster - group: crd.projectcalico.org - version: v1 - names: - kind: ClusterInformation - plural: clusterinformations - singular: clusterinformation - ---- - -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - name: globalnetworkpolicies.crd.projectcalico.org -spec: - scope: Cluster - group: crd.projectcalico.org - version: v1 - names: - kind: GlobalNetworkPolicy - plural: globalnetworkpolicies - singular: globalnetworkpolicy - ---- - -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - name: networkpolicies.crd.projectcalico.org -spec: - scope: Namespaced - group: crd.projectcalico.org - version: v1 - names: - kind: NetworkPolicy - plural: networkpolicies - singular: networkpolicy - ---- - -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - name: globalnetworksets.crd.projectcalico.org -spec: - scope: Cluster - group: crd.projectcalico.org - version: v1 - names: - kind: GlobalNetworkSet - plural: globalnetworksets - singular: globalnetworkset - ---- - -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - name: hostendpoints.crd.projectcalico.org -spec: - scope: Cluster - group: crd.projectcalico.org - version: v1 - names: - kind: HostEndpoint - plural: hostendpoints - singular: hostendpoint - ---- - -apiVersion: v1 -kind: ServiceAccount -metadata: - name: canal - namespace: kube-system -` - -const CanalTemplateV113 = ` -{{if eq .RBACConfig "rbac"}} -# Include a clusterrole for the calico-node DaemonSet, -# and bind it to the calico-node serviceaccount. -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1beta1 -metadata: - name: calico -rules: - # The CNI plugin needs to get pods, nodes, and namespaces. - - apiGroups: [""] - resources: - - pods - - nodes - - namespaces - verbs: - - get - - apiGroups: [""] - resources: - - endpoints - - services - verbs: - # Used to discover service IPs for advertisement. - - watch - - list - # Used to discover Typhas. - - get - - apiGroups: [""] - resources: - - nodes/status - verbs: - # Needed for clearing NodeNetworkUnavailable flag. - - patch - # Calico stores some configuration information in node annotations. - - update - # Watch for changes to Kubernetes NetworkPolicies. - - apiGroups: ["networking.k8s.io"] - resources: - - networkpolicies - verbs: - - watch - - list - # Used by Calico for policy information. - - apiGroups: [""] - resources: - - pods - - namespaces - - serviceaccounts - verbs: - - list - - watch - # The CNI plugin patches pods/status. - - apiGroups: [""] - resources: - - pods/status - verbs: - - patch - # Calico monitors various CRDs for config. - - apiGroups: ["crd.projectcalico.org"] - resources: - - globalfelixconfigs - - felixconfigurations - - bgppeers - - globalbgpconfigs - - bgpconfigurations - - ippools - - globalnetworkpolicies - - globalnetworksets - - networkpolicies - - clusterinformations - - hostendpoints - verbs: - - get - - list - - watch - # Calico must create and update some CRDs on startup. - - apiGroups: ["crd.projectcalico.org"] - resources: - - ippools - - felixconfigurations - - clusterinformations - verbs: - - create - - update - # Calico stores some configuration information on the node. - - apiGroups: [""] - resources: - - nodes - verbs: - - get - - list - - watch - # These permissions are only requried for upgrade from v2.6, and can - # be removed after upgrade or on fresh installations. - - apiGroups: ["crd.projectcalico.org"] - resources: - - bgpconfigurations - - bgppeers - verbs: - - create - - update ---- -apiVersion: rbac.authorization.k8s.io/v1beta1 -kind: ClusterRoleBinding -metadata: - name: calico-node -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: calico-node -subjects: -- kind: ServiceAccount - name: calico-node - namespace: kube-system -- apiGroup: rbac.authorization.k8s.io - kind: Group - name: system:nodes ---- -# Flannel ClusterRole -# Pulled from https://github.com/coreos/flannel/blob/master/Documentation/kube-flannel-rbac.yml -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1beta1 -metadata: - name: flannel -rules: - - apiGroups: - - "" - resources: - - pods - verbs: - - get - - apiGroups: - - "" - resources: - - nodes - verbs: - - list - - watch - - apiGroups: - - "" - resources: - - nodes/status - verbs: - - patch ---- -# Bind the flannel ClusterRole to the canal ServiceAccount. -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1beta1 -metadata: - name: canal-flannel -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: flannel -subjects: -- kind: ServiceAccount - name: canal - namespace: kube-system ---- -# Bind the Calico ClusterRole to the canal ServiceAccount. -apiVersion: rbac.authorization.k8s.io/v1beta1 -kind: ClusterRoleBinding -metadata: - name: canal-calico -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: calico -subjects: -- kind: ServiceAccount - name: canal - namespace: kube-system -- apiGroup: rbac.authorization.k8s.io - kind: Group - name: system:nodes -{{end}} - -# Canal Version v3.1.1 -# https://docs.projectcalico.org/v3.1/releases#v3.1.1 -# This manifest includes the following component versions: -# calico/node:v3.1.1 -# calico/cni:v3.1.1 -# coreos/flannel:v0.9.1 - ---- -# This ConfigMap is used to configure a self-hosted Canal installation. -kind: ConfigMap -apiVersion: v1 -metadata: - name: canal-config - namespace: kube-system -data: - # The interface used by canal for host <-> host communication. - # If left blank, then the interface is chosen using the node's - # default route. - canal_iface: "{{.CanalInterface}}" - - # Whether or not to masquerade traffic to destinations not within - # the pod network. - masquerade: "true" - - # The CNI network configuration to install on each node. The special - # values in this config will be automatically populated. - cni_network_config: |- - { - "name": "k8s-pod-network", - "cniVersion": "0.3.0", - "plugins": [ - { - "type": "calico", - "log_level": "WARNING", - "datastore_type": "kubernetes", - "nodename": "__KUBERNETES_NODE_NAME__", - "ipam": { - "type": "host-local", - "subnet": "usePodCidr" - }, - "policy": { - "type": "k8s" - }, - "kubernetes": { - "kubeconfig": "{{.KubeCfg}}" - } - }, - { - "type": "portmap", - "snat": true, - "capabilities": {"portMappings": true} - } - ] - } - - # Flannel network configuration. Mounted into the flannel container. - net-conf.json: | - { - "Network": "{{.ClusterCIDR}}", - "Backend": { - "Type": "{{.FlannelBackend.Type}}" - } - } ---- - -# This manifest installs the calico/node container, as well -# as the Calico CNI plugins and network config on -# each master and worker node in a Kubernetes cluster. -kind: DaemonSet -apiVersion: extensions/v1beta1 -metadata: - name: canal - namespace: kube-system - labels: - k8s-app: canal -spec: - selector: - matchLabels: - k8s-app: canal - updateStrategy: - type: RollingUpdate - rollingUpdate: - maxUnavailable: 1 - template: - metadata: - labels: - k8s-app: canal - annotations: - # This, along with the CriticalAddonsOnly toleration below, - # marks the pod as a critical add-on, ensuring it gets - # priority scheduling and that its resources are reserved - # if it ever gets evicted. - scheduler.alpha.kubernetes.io/critical-pod: '' - spec: - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: beta.kubernetes.io/os - operator: NotIn - values: - - windows - hostNetwork: true - tolerations: - # Make sure canal gets scheduled on all nodes. - - effect: NoSchedule - operator: Exists - # Mark the pod as a critical add-on for rescheduling. - - key: CriticalAddonsOnly - operator: Exists - - effect: NoExecute - operator: Exists - serviceAccountName: canal - # Minimize downtime during a rolling upgrade or deletion; tell Kubernetes to do a "force - # deletion": https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods. - terminationGracePeriodSeconds: 0 - initContainers: - # This container installs the Calico CNI binaries - # and CNI network config file on each node. - - name: install-cni - image: {{.CNIImage}} - command: ["/install-cni.sh"] - env: - # Name of the CNI config file to create. - - name: CNI_CONF_NAME - value: "10-canal.conflist" - # The CNI network config to install on each node. - - name: CNI_NETWORK_CONFIG - valueFrom: - configMapKeyRef: - name: canal-config - key: cni_network_config - # Set the hostname based on the k8s node name. - - name: KUBERNETES_NODE_NAME - valueFrom: - fieldRef: - fieldPath: spec.nodeName - # Prevents the container from sleeping forever. - - name: SLEEP - value: "false" - volumeMounts: - - mountPath: /host/opt/cni/bin - name: cni-bin-dir - - mountPath: /host/etc/cni/net.d - name: cni-net-dir - containers: - # Runs calico/node container on each Kubernetes node. This - # container programs network policy and routes on each - # host. - - name: calico-node - image: {{.NodeImage}} - env: - # Use Kubernetes API as the backing datastore. - - name: DATASTORE_TYPE - value: "kubernetes" - # Wait for the datastore. - - name: WAIT_FOR_DATASTORE - value: "true" - # Set based on the k8s node name. - - name: NODENAME - valueFrom: - fieldRef: - fieldPath: spec.nodeName - # Don't enable BGP. - - name: CALICO_NETWORKING_BACKEND - value: "none" - # Cluster type to identify the deployment type - - name: CLUSTER_TYPE - value: "k8s,canal" - # Period, in seconds, at which felix re-applies all iptables state - - name: FELIX_IPTABLESREFRESHINTERVAL - value: "60" - # No IP address needed. - - name: IP - value: "" - # The default IPv4 pool to create on startup if none exists. Pod IPs will be - # chosen from this range. Changing this value after installation will have - # no effect. This should fall within --cluster-cidr. - - name: CALICO_IPV4POOL_CIDR - value: "192.168.0.0/16" - # Disable file logging so kubectl logs works. - - name: CALICO_DISABLE_FILE_LOGGING - value: "true" - # Set Felix endpoint to host default action to ACCEPT. - - name: FELIX_DEFAULTENDPOINTTOHOSTACTION - value: "ACCEPT" - # Disable IPv6 on Kubernetes. - - name: FELIX_IPV6SUPPORT - value: "false" - # Disable felix logging to file - - name: FELIX_LOGFILEPATH - value: "none" - # Disable felix logging for syslog - - name: FELIX_LOGSEVERITYSYS - value: "" - # Enable felix logging to stdout - - name: FELIX_LOGSEVERITYSCREEN - value: "Warning" - - name: FELIX_HEALTHENABLED - value: "true" - securityContext: - privileged: true - resources: - requests: - cpu: 250m - livenessProbe: - httpGet: - path: /liveness - port: 9099 - host: localhost - periodSeconds: 10 - initialDelaySeconds: 10 - failureThreshold: 6 - readinessProbe: - httpGet: - path: /readiness - port: 9099 - host: localhost - periodSeconds: 10 - volumeMounts: - - mountPath: /lib/modules - name: lib-modules - readOnly: true - - mountPath: /run/xtables.lock - name: xtables-lock - readOnly: false - - mountPath: /var/run/calico - name: var-run-calico - readOnly: false - - mountPath: /var/lib/calico - name: var-lib-calico - readOnly: false - # This container runs flannel using the kube-subnet-mgr backend - # for allocating subnets. - - name: kube-flannel - image: {{.CanalFlannelImg}} - command: [ "/opt/bin/flanneld", "--ip-masq", "--kube-subnet-mgr" ] - securityContext: - privileged: true - env: - - name: POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - - name: POD_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: FLANNELD_IFACE - valueFrom: - configMapKeyRef: - name: canal-config - key: canal_iface - - name: FLANNELD_IP_MASQ - valueFrom: - configMapKeyRef: - name: canal-config - key: masquerade - volumeMounts: - - mountPath: /run/xtables.lock - name: xtables-lock - readOnly: false - - name: flannel-cfg - mountPath: /etc/kube-flannel/ - volumes: - # Used by calico/node. - - name: lib-modules - hostPath: - path: /lib/modules - - name: var-run-calico - hostPath: - path: /var/run/calico - - name: var-lib-calico - hostPath: - path: /var/lib/calico - - name: xtables-lock - hostPath: - path: /run/xtables.lock - type: FileOrCreate - # Used by flannel. - - name: flannel-cfg - configMap: - name: canal-config - # Used to install CNI. - - name: cni-bin-dir - hostPath: - path: /opt/cni/bin - - name: cni-net-dir - hostPath: - path: /etc/cni/net.d - ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: canal - namespace: kube-system - ---- - -# Create all the CustomResourceDefinitions needed for -# Calico policy and networking mode. - -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - name: felixconfigurations.crd.projectcalico.org -spec: - scope: Cluster - group: crd.projectcalico.org - version: v1 - names: - kind: FelixConfiguration - plural: felixconfigurations - singular: felixconfiguration ---- - -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - name: bgpconfigurations.crd.projectcalico.org -spec: - scope: Cluster - group: crd.projectcalico.org - version: v1 - names: - kind: BGPConfiguration - plural: bgpconfigurations - singular: bgpconfiguration - ---- - -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - name: ippools.crd.projectcalico.org -spec: - scope: Cluster - group: crd.projectcalico.org - version: v1 - names: - kind: IPPool - plural: ippools - singular: ippool - ---- - -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - name: hostendpoints.crd.projectcalico.org -spec: - scope: Cluster - group: crd.projectcalico.org - version: v1 - names: - kind: HostEndpoint - plural: hostendpoints - singular: hostendpoint - ---- - -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - name: clusterinformations.crd.projectcalico.org -spec: - scope: Cluster - group: crd.projectcalico.org - version: v1 - names: - kind: ClusterInformation - plural: clusterinformations - singular: clusterinformation - ---- - -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - name: globalnetworkpolicies.crd.projectcalico.org -spec: - scope: Cluster - group: crd.projectcalico.org - version: v1 - names: - kind: GlobalNetworkPolicy - plural: globalnetworkpolicies - singular: globalnetworkpolicy - ---- - -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - name: globalnetworksets.crd.projectcalico.org -spec: - scope: Cluster - group: crd.projectcalico.org - version: v1 - names: - kind: GlobalNetworkSet - plural: globalnetworksets - singular: globalnetworkset - ---- - -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - name: networkpolicies.crd.projectcalico.org -spec: - scope: Namespaced - group: crd.projectcalico.org - version: v1 - names: - kind: NetworkPolicy - plural: networkpolicies - singular: networkpolicy -` - -const CanalTemplateV115 = ` -{{if eq .RBACConfig "rbac"}} -# Include a clusterrole for the calico-node DaemonSet, -# and bind it to the calico-node serviceaccount. -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1beta1 -metadata: - name: calico -rules: - # The CNI plugin needs to get pods, nodes, and namespaces. - - apiGroups: [""] - resources: - - pods - - nodes - - namespaces - verbs: - - get - - apiGroups: [""] - resources: - - endpoints - - services - verbs: - # Used to discover service IPs for advertisement. - - watch - - list - # Used to discover Typhas. - - get - - apiGroups: [""] - resources: - - nodes/status - verbs: - # Needed for clearing NodeNetworkUnavailable flag. - - patch - # Calico stores some configuration information in node annotations. - - update - # Watch for changes to Kubernetes NetworkPolicies. - - apiGroups: ["networking.k8s.io"] - resources: - - networkpolicies - verbs: - - watch - - list - # Used by Calico for policy information. - - apiGroups: [""] - resources: - - pods - - namespaces - - serviceaccounts - verbs: - - list - - watch - # The CNI plugin patches pods/status. - - apiGroups: [""] - resources: - - pods/status - verbs: - - patch - # Calico monitors various CRDs for config. - - apiGroups: ["crd.projectcalico.org"] - resources: - - globalfelixconfigs - - felixconfigurations - - bgppeers - - globalbgpconfigs - - bgpconfigurations - - ippools - - ipamblocks - - globalnetworkpolicies - - globalnetworksets - - networkpolicies - - networksets - - clusterinformations - - hostendpoints - verbs: - - get - - list - - watch - # Calico must create and update some CRDs on startup. - - apiGroups: ["crd.projectcalico.org"] - resources: - - ippools - - felixconfigurations - - clusterinformations - verbs: - - create - - update - # Calico stores some configuration information on the node. - - apiGroups: [""] - resources: - - nodes - verbs: - - get - - list - - watch - # These permissions are only requried for upgrade from v2.6, and can - # be removed after upgrade or on fresh installations. - - apiGroups: ["crd.projectcalico.org"] - resources: - - bgpconfigurations - - bgppeers - verbs: - - create - - update ---- -apiVersion: rbac.authorization.k8s.io/v1beta1 -kind: ClusterRoleBinding -metadata: - name: calico-node -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: calico-node -subjects: -- kind: ServiceAccount - name: calico-node - namespace: kube-system -- apiGroup: rbac.authorization.k8s.io - kind: Group - name: system:nodes ---- -# Flannel ClusterRole -# Pulled from https://github.com/coreos/flannel/blob/master/Documentation/kube-flannel-rbac.yml -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1beta1 -metadata: - name: flannel -rules: - - apiGroups: - - "" - resources: - - pods - verbs: - - get - - apiGroups: - - "" - resources: - - nodes - verbs: - - list - - watch - - apiGroups: - - "" - resources: - - nodes/status - verbs: - - patch ---- -# Bind the flannel ClusterRole to the canal ServiceAccount. -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1beta1 -metadata: - name: canal-flannel -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: flannel -subjects: -- kind: ServiceAccount - name: canal - namespace: kube-system ---- -# Bind the Calico ClusterRole to the canal ServiceAccount. -apiVersion: rbac.authorization.k8s.io/v1beta1 -kind: ClusterRoleBinding -metadata: - name: canal-calico -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: calico -subjects: -- kind: ServiceAccount - name: canal - namespace: kube-system -- apiGroup: rbac.authorization.k8s.io - kind: Group - name: system:nodes -{{end}} - -# Canal Version v3.1.1 -# https://docs.projectcalico.org/v3.1/releases#v3.1.1 -# This manifest includes the following component versions: -# calico/node:v3.1.1 -# calico/cni:v3.1.1 -# coreos/flannel:v0.9.1 - ---- -# This ConfigMap is used to configure a self-hosted Canal installation. -kind: ConfigMap -apiVersion: v1 -metadata: - name: canal-config - namespace: kube-system -data: - # The interface used by canal for host <-> host communication. - # If left blank, then the interface is chosen using the node's - # default route. - canal_iface: "{{.CanalInterface}}" - - # Whether or not to masquerade traffic to destinations not within - # the pod network. - masquerade: "true" - - # The CNI network configuration to install on each node. The special - # values in this config will be automatically populated. - cni_network_config: |- - { - "name": "k8s-pod-network", - "cniVersion": "0.3.0", - "plugins": [ - { - "type": "calico", - "log_level": "WARNING", - "datastore_type": "kubernetes", - "nodename": "__KUBERNETES_NODE_NAME__", - "ipam": { - "type": "host-local", - "subnet": "usePodCidr" - }, - "policy": { - "type": "k8s" - }, - "kubernetes": { - "kubeconfig": "{{.KubeCfg}}" - } - }, - { - "type": "portmap", - "snat": true, - "capabilities": {"portMappings": true} - } - ] - } - - # Flannel network configuration. Mounted into the flannel container. - net-conf.json: | - { - "Network": "{{.ClusterCIDR}}", - "Backend": { - "Type": "{{.FlannelBackend.Type}}" - } - } ---- - -# This manifest installs the calico/node container, as well -# as the Calico CNI plugins and network config on -# each master and worker node in a Kubernetes cluster. -kind: DaemonSet -apiVersion: extensions/v1beta1 -metadata: - name: canal - namespace: kube-system - labels: - k8s-app: canal -spec: - selector: - matchLabels: - k8s-app: canal - updateStrategy: - type: RollingUpdate - rollingUpdate: - maxUnavailable: 1 - template: - metadata: - labels: - k8s-app: canal - annotations: - # This, along with the CriticalAddonsOnly toleration below, - # marks the pod as a critical add-on, ensuring it gets - # priority scheduling and that its resources are reserved - # if it ever gets evicted. - scheduler.alpha.kubernetes.io/critical-pod: '' - spec: - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: beta.kubernetes.io/os - operator: NotIn - values: - - windows - hostNetwork: true - tolerations: - # Make sure canal gets scheduled on all nodes. - - effect: NoSchedule - operator: Exists - # Mark the pod as a critical add-on for rescheduling. - - key: CriticalAddonsOnly - operator: Exists - - effect: NoExecute - operator: Exists - {{if eq .RBACConfig "rbac"}} - serviceAccountName: canal - {{end}} - # Minimize downtime during a rolling upgrade or deletion; tell Kubernetes to do a "force - # deletion": https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods. - terminationGracePeriodSeconds: 0 - initContainers: - # This container installs the Calico CNI binaries - # and CNI network config file on each node. - - name: install-cni - image: {{.CNIImage}} - command: ["/install-cni.sh"] - env: - # Name of the CNI config file to create. - - name: CNI_CONF_NAME - value: "10-canal.conflist" - # The CNI network config to install on each node. - - name: CNI_NETWORK_CONFIG - valueFrom: - configMapKeyRef: - name: canal-config - key: cni_network_config - # Set the hostname based on the k8s node name. - - name: KUBERNETES_NODE_NAME - valueFrom: - fieldRef: - fieldPath: spec.nodeName - # Prevents the container from sleeping forever. - - name: SLEEP - value: "false" - volumeMounts: - - mountPath: /host/opt/cni/bin - name: cni-bin-dir - - mountPath: /host/etc/cni/net.d - name: cni-net-dir - containers: - # Runs calico/node container on each Kubernetes node. This - # container programs network policy and routes on each - # host. - - name: calico-node - image: {{.NodeImage}} - env: - # Use Kubernetes API as the backing datastore. - - name: DATASTORE_TYPE - value: "kubernetes" - # Wait for the datastore. - - name: WAIT_FOR_DATASTORE - value: "true" - # Set based on the k8s node name. - - name: NODENAME - valueFrom: - fieldRef: - fieldPath: spec.nodeName - # Don't enable BGP. - - name: CALICO_NETWORKING_BACKEND - value: "none" - # Cluster type to identify the deployment type - - name: CLUSTER_TYPE - value: "k8s,canal" - # Period, in seconds, at which felix re-applies all iptables state - - name: FELIX_IPTABLESREFRESHINTERVAL - value: "60" - # No IP address needed. - - name: IP - value: "" - # The default IPv4 pool to create on startup if none exists. Pod IPs will be - # chosen from this range. Changing this value after installation will have - # no effect. This should fall within --cluster-cidr. - - name: CALICO_IPV4POOL_CIDR - value: "192.168.0.0/16" - # Disable file logging so kubectl logs works. - - name: CALICO_DISABLE_FILE_LOGGING - value: "true" - # Set Felix endpoint to host default action to ACCEPT. - - name: FELIX_DEFAULTENDPOINTTOHOSTACTION - value: "ACCEPT" - # Disable IPv6 on Kubernetes. - - name: FELIX_IPV6SUPPORT - value: "false" - # Disable felix logging to file - - name: FELIX_LOGFILEPATH - value: "none" - # Disable felix logging for syslog - - name: FELIX_LOGSEVERITYSYS - value: "" - # Enable felix logging to stdout - - name: FELIX_LOGSEVERITYSCREEN - value: "Warning" - - name: FELIX_HEALTHENABLED - value: "true" - securityContext: - privileged: true - resources: - requests: - cpu: 250m - livenessProbe: - httpGet: - path: /liveness - port: 9099 - host: localhost - periodSeconds: 10 - initialDelaySeconds: 10 - failureThreshold: 6 - readinessProbe: - httpGet: - path: /readiness - port: 9099 - host: localhost - periodSeconds: 10 - volumeMounts: - - mountPath: /lib/modules - name: lib-modules - readOnly: true - - mountPath: /run/xtables.lock - name: xtables-lock - readOnly: false - - mountPath: /var/run/calico - name: var-run-calico - readOnly: false - - mountPath: /var/lib/calico - name: var-lib-calico - readOnly: false - # This container runs flannel using the kube-subnet-mgr backend - # for allocating subnets. - - name: kube-flannel - image: {{.CanalFlannelImg}} - command: [ "/opt/bin/flanneld", "--ip-masq", "--kube-subnet-mgr" ] - securityContext: - privileged: true - env: - - name: POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - - name: POD_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: FLANNELD_IFACE - valueFrom: - configMapKeyRef: - name: canal-config - key: canal_iface - - name: FLANNELD_IP_MASQ - valueFrom: - configMapKeyRef: - name: canal-config - key: masquerade - volumeMounts: - - mountPath: /run/xtables.lock - name: xtables-lock - readOnly: false - - name: flannel-cfg - mountPath: /etc/kube-flannel/ - volumes: - # Used by calico/node. - - name: lib-modules - hostPath: - path: /lib/modules - - name: var-run-calico - hostPath: - path: /var/run/calico - - name: var-lib-calico - hostPath: - path: /var/lib/calico - - name: xtables-lock - hostPath: - path: /run/xtables.lock - type: FileOrCreate - # Used by flannel. - - name: flannel-cfg - configMap: - name: canal-config - # Used to install CNI. - - name: cni-bin-dir - hostPath: - path: /opt/cni/bin - - name: cni-net-dir - hostPath: - path: /etc/cni/net.d - ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: canal - namespace: kube-system - ---- - -# Create all the CustomResourceDefinitions needed for -# Calico policy and networking mode. - -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - name: felixconfigurations.crd.projectcalico.org -spec: - scope: Cluster - group: crd.projectcalico.org - version: v1 - names: - kind: FelixConfiguration - plural: felixconfigurations - singular: felixconfiguration ---- - -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - name: bgpconfigurations.crd.projectcalico.org -spec: - scope: Cluster - group: crd.projectcalico.org - version: v1 - names: - kind: BGPConfiguration - plural: bgpconfigurations - singular: bgpconfiguration - ---- - -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - name: ippools.crd.projectcalico.org -spec: - scope: Cluster - group: crd.projectcalico.org - version: v1 - names: - kind: IPPool - plural: ippools - singular: ippool - ---- - -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - name: hostendpoints.crd.projectcalico.org -spec: - scope: Cluster - group: crd.projectcalico.org - version: v1 - names: - kind: HostEndpoint - plural: hostendpoints - singular: hostendpoint - ---- - -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - name: clusterinformations.crd.projectcalico.org -spec: - scope: Cluster - group: crd.projectcalico.org - version: v1 - names: - kind: ClusterInformation - plural: clusterinformations - singular: clusterinformation - ---- - -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - name: globalnetworkpolicies.crd.projectcalico.org -spec: - scope: Cluster - group: crd.projectcalico.org - version: v1 - names: - kind: GlobalNetworkPolicy - plural: globalnetworkpolicies - singular: globalnetworkpolicy - ---- - -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - name: globalnetworksets.crd.projectcalico.org -spec: - scope: Cluster - group: crd.projectcalico.org - version: v1 - names: - kind: GlobalNetworkSet - plural: globalnetworksets - singular: globalnetworkset - ---- - -apiVersion: apiextensions.k8s.io/v1beta1 -kind: CustomResourceDefinition -metadata: - name: networkpolicies.crd.projectcalico.org -spec: - scope: Namespaced - group: crd.projectcalico.org - version: v1 - names: - kind: NetworkPolicy - plural: networkpolicies - singular: networkpolicy -` diff --git a/templates/coredns.go b/templates/coredns.go deleted file mode 100644 index c0323d8b..00000000 --- a/templates/coredns.go +++ /dev/null @@ -1,290 +0,0 @@ -package templates - -const CoreDNSTemplate = ` ---- -{{- if eq .RBACConfig "rbac"}} -apiVersion: v1 -kind: ServiceAccount -metadata: - name: coredns - namespace: kube-system - labels: - kubernetes.io/cluster-service: "true" - addonmanager.kubernetes.io/mode: Reconcile ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - labels: - kubernetes.io/bootstrapping: rbac-defaults - addonmanager.kubernetes.io/mode: Reconcile - name: system:coredns -rules: -- apiGroups: - - "" - resources: - - endpoints - - services - - pods - - namespaces - verbs: - - list - - watch -- apiGroups: - - "" - resources: - - nodes - verbs: - - get ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - annotations: - rbac.authorization.kubernetes.io/autoupdate: "true" - labels: - kubernetes.io/bootstrapping: rbac-defaults - addonmanager.kubernetes.io/mode: EnsureExists - name: system:coredns -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: system:coredns -subjects: -- kind: ServiceAccount - name: coredns - namespace: kube-system -{{- end }} ---- -apiVersion: v1 -kind: ConfigMap -metadata: - name: coredns - namespace: kube-system - labels: - addonmanager.kubernetes.io/mode: EnsureExists -data: - Corefile: | - .:53 { - errors - health - kubernetes {{.ClusterDomain}} {{ if .ReverseCIDRs }}{{ .ReverseCIDRs }}{{ else }}{{ "in-addr.arpa ip6.arpa" }}{{ end }} { - pods insecure - upstream - fallthrough in-addr.arpa ip6.arpa - ttl 30 - } - prometheus :9153 - {{- if .UpstreamNameservers }} - forward . {{range $i, $v := .UpstreamNameservers}}{{if $i}} {{end}}{{.}}{{end}} - {{- else }} - forward . "/etc/resolv.conf" - {{- end }} - cache 30 - loop - reload - loadbalance - } ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: coredns - namespace: kube-system - labels: - k8s-app: kube-dns - kubernetes.io/cluster-service: "true" - addonmanager.kubernetes.io/mode: Reconcile - kubernetes.io/name: "CoreDNS" -spec: - replicas: 1 - strategy: - type: RollingUpdate - rollingUpdate: - maxUnavailable: 1 - selector: - matchLabels: - k8s-app: kube-dns - template: - metadata: - labels: - k8s-app: kube-dns - annotations: - seccomp.security.alpha.kubernetes.io/pod: 'docker/default' - spec: - priorityClassName: system-cluster-critical -{{- if eq .RBACConfig "rbac"}} - serviceAccountName: coredns -{{- end }} - tolerations: - - key: "CriticalAddonsOnly" - operator: "Exists" - nodeSelector: - beta.kubernetes.io/os: linux - {{ range $k, $v := .NodeSelector }} - {{ $k }}: "{{ $v }}" - {{ end }} - containers: - - name: coredns - image: {{.CoreDNSImage}} - imagePullPolicy: IfNotPresent - resources: - limits: - memory: 170Mi - requests: - cpu: 100m - memory: 70Mi - args: [ "-conf", "/etc/coredns/Corefile" ] - volumeMounts: - - name: config-volume - mountPath: /etc/coredns - readOnly: true - ports: - - containerPort: 53 - name: dns - protocol: UDP - - containerPort: 53 - name: dns-tcp - protocol: TCP - - containerPort: 9153 - name: metrics - protocol: TCP - livenessProbe: - httpGet: - path: /health - port: 8080 - scheme: HTTP - initialDelaySeconds: 60 - timeoutSeconds: 5 - successThreshold: 1 - failureThreshold: 5 - readinessProbe: - httpGet: - path: /health - port: 8080 - scheme: HTTP - securityContext: - allowPrivilegeEscalation: false - capabilities: - add: - - NET_BIND_SERVICE - drop: - - all - readOnlyRootFilesystem: true - dnsPolicy: Default - volumes: - - name: config-volume - configMap: - name: coredns - items: - - key: Corefile - path: Corefile ---- -apiVersion: v1 -kind: Service -metadata: - name: kube-dns - namespace: kube-system - annotations: - prometheus.io/port: "9153" - prometheus.io/scrape: "true" - labels: - k8s-app: kube-dns - kubernetes.io/cluster-service: "true" - addonmanager.kubernetes.io/mode: Reconcile - kubernetes.io/name: "CoreDNS" -spec: - selector: - k8s-app: kube-dns - clusterIP: {{.ClusterDNSServer}} - ports: - - name: dns - port: 53 - protocol: UDP - - name: dns-tcp - port: 53 - protocol: TCP - - name: metrics - port: 9153 - protocol: TCP ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: coredns-autoscaler - namespace: kube-system - labels: - k8s-app: coredns-autoscaler -spec: - selector: - matchLabels: - k8s-app: coredns-autoscaler - template: - metadata: - labels: - k8s-app: coredns-autoscaler - spec: -{{- if eq .RBACConfig "rbac"}} - serviceAccountName: coredns-autoscaler -{{- end }} - nodeSelector: - beta.kubernetes.io/os: linux - containers: - - name: autoscaler - image: {{.CoreDNSAutoScalerImage}} - resources: - requests: - cpu: "20m" - memory: "10Mi" - command: - - /cluster-proportional-autoscaler - - --namespace=kube-system - - --configmap=coredns-autoscaler - - --target=Deployment/coredns - # When cluster is using large nodes(with more cores), "coresPerReplica" should dominate. - # If using small nodes, "nodesPerReplica" should dominate. - - --default-params={"linear":{"coresPerReplica":128,"nodesPerReplica":4,"min":1}} - - --logtostderr=true - - --v=2 -{{- if eq .RBACConfig "rbac"}} ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: coredns-autoscaler - namespace: kube-system - labels: - kubernetes.io/cluster-service: "true" - addonmanager.kubernetes.io/mode: Reconcile ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: system:coredns-autoscaler -rules: - - apiGroups: [""] - resources: ["nodes"] - verbs: ["list", "watch"] - - apiGroups: [""] - resources: ["replicationcontrollers/scale"] - verbs: ["get", "update"] - - apiGroups: ["extensions"] - resources: ["deployments/scale", "replicasets/scale"] - verbs: ["get", "update"] - - apiGroups: [""] - resources: ["configmaps"] - verbs: ["get", "create"] ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: system:coredns-autoscaler -subjects: - - kind: ServiceAccount - name: coredns-autoscaler - namespace: kube-system -roleRef: - kind: ClusterRole - name: system:coredns-autoscaler - apiGroup: rbac.authorization.k8s.io -{{- end }}` diff --git a/templates/flannel.go b/templates/flannel.go deleted file mode 100644 index 5e73176c..00000000 --- a/templates/flannel.go +++ /dev/null @@ -1,439 +0,0 @@ -package templates - -const FlannelTemplate = ` -{{- if eq .RBACConfig "rbac"}} ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: flannel -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: flannel -subjects: -- kind: ServiceAccount - name: flannel - namespace: kube-system ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: flannel -rules: - - apiGroups: - - "" - resources: - - pods - verbs: - - get - - apiGroups: - - "" - resources: - - nodes - verbs: - - list - - watch - - apiGroups: - - "" - resources: - - nodes/status - verbs: - - patch -{{- end}} ---- -kind: ConfigMap -apiVersion: v1 -metadata: - name: kube-flannel-cfg - namespace: "kube-system" - labels: - tier: node - app: flannel -data: - cni-conf.json: | - { - "name":"cbr0", - "cniVersion":"0.3.1", - "plugins":[ - { - "type":"flannel", - "delegate":{ - "forceAddress":true, - "isDefaultGateway":true - } - }, - { - "type":"portmap", - "capabilities":{ - "portMappings":true - } - } - ] - } - net-conf.json: | - { - "Network": "{{.ClusterCIDR}}", - "Backend": { - "Type": "{{.FlannelBackend.Type}}", - "VNI": {{.FlannelBackend.VNI}}, - "Port": {{.FlannelBackend.Port}} - } - } ---- -apiVersion: extensions/v1beta1 -kind: DaemonSet -metadata: - name: kube-flannel - namespace: "kube-system" - labels: - tier: node - k8s-app: flannel -spec: - template: - metadata: - labels: - tier: node - k8s-app: flannel - spec: - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: beta.kubernetes.io/os - operator: NotIn - values: - - windows - serviceAccountName: flannel - containers: - - name: kube-flannel - image: {{.Image}} - imagePullPolicy: IfNotPresent - resources: - limits: - cpu: 300m - memory: 500M - requests: - cpu: 150m - memory: 64M - {{- if .FlannelInterface}} - command: ["/opt/bin/flanneld","--ip-masq","--kube-subnet-mgr","--iface={{.FlannelInterface}}"] - {{- else}} - command: ["/opt/bin/flanneld","--ip-masq","--kube-subnet-mgr"] - {{- end}} - securityContext: - privileged: true - env: - - name: POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - - name: POD_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - volumeMounts: - - name: run - mountPath: /run - - name: cni - mountPath: /etc/cni/net.d - - name: flannel-cfg - mountPath: /etc/kube-flannel/ - - name: install-cni - image: {{.CNIImage}} - command: ["/install-cni.sh"] - env: - # The CNI network config to install on each node. - - name: CNI_NETWORK_CONFIG - valueFrom: - configMapKeyRef: - name: kube-flannel-cfg - key: cni-conf.json - - name: CNI_CONF_NAME - value: "10-flannel.conflist" - volumeMounts: - - name: cni - mountPath: /host/etc/cni/net.d - - name: host-cni-bin - mountPath: /host/opt/cni/bin/ - hostNetwork: true - tolerations: - {{- if ge .ClusterVersion "v1.12" }} - - operator: Exists - effect: NoSchedule - - operator: Exists - effect: NoExecute - {{- else }} - - key: node-role.kubernetes.io/controlplane - operator: Exists - effect: NoSchedule - - key: node-role.kubernetes.io/etcd - operator: Exists - effect: NoExecute - {{- end }} - - key: node.kubernetes.io/not-ready - effect: NoSchedule - operator: Exists - volumes: - - name: run - hostPath: - path: /run - - name: cni - hostPath: - path: /etc/cni/net.d - - name: flannel-cfg - configMap: - name: kube-flannel-cfg - - name: host-cni-bin - hostPath: - path: /opt/cni/bin - updateStrategy: - rollingUpdate: - maxUnavailable: 20% - type: RollingUpdate ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: flannel - namespace: kube-system -` - -const FlannelTemplateV115 = ` -{{- if eq .RBACConfig "rbac"}} ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1beta1 -metadata: - name: flannel -rules: - - apiGroups: ['extensions'] - resources: ['podsecuritypolicies'] - verbs: ['use'] - resourceNames: ['psp.flannel.unprivileged'] - - apiGroups: - - "" - resources: - - pods - verbs: - - get - - apiGroups: - - "" - resources: - - nodes - verbs: - - list - - watch - - apiGroups: - - "" - resources: - - nodes/status - verbs: - - patch ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1beta1 -metadata: - name: flannel -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: flannel -subjects: -- kind: ServiceAccount - name: flannel - namespace: kube-system ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: flannel - namespace: kube-system -{{end}} ---- -apiVersion: extensions/v1beta1 -kind: PodSecurityPolicy -metadata: - name: psp.flannel.unprivileged - annotations: - seccomp.security.alpha.kubernetes.io/allowedProfileNames: docker/default - seccomp.security.alpha.kubernetes.io/defaultProfileName: docker/default - apparmor.security.beta.kubernetes.io/allowedProfileNames: runtime/default - apparmor.security.beta.kubernetes.io/defaultProfileName: runtime/default -spec: - privileged: false - volumes: - - configMap - - secret - - emptyDir - - hostPath - allowedHostPaths: - - pathPrefix: "/etc/cni/net.d" - - pathPrefix: "/etc/kube-flannel" - - pathPrefix: "/run/flannel" - readOnlyRootFilesystem: false - # Users and groups - runAsUser: - rule: RunAsAny - supplementalGroups: - rule: RunAsAny - fsGroup: - rule: RunAsAny - # Privilege Escalation - allowPrivilegeEscalation: false - defaultAllowPrivilegeEscalation: false - # Capabilities - allowedCapabilities: ['NET_ADMIN'] - defaultAddCapabilities: [] - requiredDropCapabilities: [] - # Host namespaces - hostPID: false - hostIPC: false - hostNetwork: true - hostPorts: - - min: 0 - max: 65535 - # SELinux - seLinux: - # SELinux is unsed in CaaSP - rule: 'RunAsAny' ---- -kind: ConfigMap -apiVersion: v1 -metadata: - name: kube-flannel-cfg - namespace: kube-system - labels: - tier: node - app: flannel -data: - cni-conf.json: | - { - "name": "cbr0", - "plugins": [ - { - "type": "flannel", - "delegate": { - "hairpinMode": true, - "isDefaultGateway": true - } - }, - { - "type": "portmap", - "capabilities": { - "portMappings": true - } - } - ] - } - net-conf.json: | - { - "Network": "{{.ClusterCIDR}}", - "Backend": { - "Type": "{{.FlannelBackend.Type}}", - "VNI": {{.FlannelBackend.VNI}}, - "Port": {{.FlannelBackend.Port}} - } - } ---- -apiVersion: extensions/v1beta1 -kind: DaemonSet -metadata: - name: kube-flannel - namespace: kube-system - labels: - tier: node - app: flannel -spec: - template: - metadata: - labels: - tier: node - app: flannel - spec: - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: beta.kubernetes.io/os - operator: NotIn - values: - - windows - hostNetwork: true - tolerations: - - operator: Exists - effect: NoSchedule - {{- if eq .RBACConfig "rbac"}} - serviceAccountName: flannel - {{end}} - containers: - - name: kube-flannel - image: {{.Image}} - command: - - /opt/bin/flanneld - args: - - --ip-masq - - --kube-subnet-mgr - {{- if .FlannelInterface}} - - --iface={{.FlannelInterface}} - {{end}} - resources: - requests: - cpu: "100m" - memory: "50Mi" - limits: - cpu: "100m" - memory: "50Mi" - securityContext: - privileged: false - capabilities: - add: ["NET_ADMIN"] - env: - - name: POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - - name: POD_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - volumeMounts: - - name: run - mountPath: /run - - name: cni - mountPath: /etc/cni/net.d - - name: flannel-cfg - mountPath: /etc/kube-flannel/ - - name: install-cni - image: {{.CNIImage}} - command: ["/install-cni.sh"] - env: - # The CNI network config to install on each node. - - name: CNI_NETWORK_CONFIG - valueFrom: - configMapKeyRef: - name: kube-flannel-cfg - key: cni-conf.json - - name: CNI_CONF_NAME - value: "10-flannel.conflist" - volumeMounts: - - name: cni - mountPath: /host/etc/cni/net.d - - name: host-cni-bin - mountPath: /host/opt/cni/bin/ - volumes: - - name: run - hostPath: - path: /run - - name: cni - hostPath: - path: /etc/cni/net.d - - name: flannel-cfg - configMap: - name: kube-flannel-cfg - - name: host-cni-bin - hostPath: - path: /opt/cni/bin -` diff --git a/templates/kubedns.go b/templates/kubedns.go deleted file mode 100644 index f03a1779..00000000 --- a/templates/kubedns.go +++ /dev/null @@ -1,314 +0,0 @@ -package templates - -const KubeDNSTemplate = ` ---- -apiVersion: apps/v1beta1 -kind: Deployment -metadata: - name: kube-dns-autoscaler - namespace: kube-system - labels: - k8s-app: kube-dns-autoscaler -spec: - template: - metadata: - labels: - k8s-app: kube-dns-autoscaler - spec: - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: beta.kubernetes.io/os - operator: NotIn - values: - - windows - serviceAccountName: kube-dns-autoscaler - containers: - - name: autoscaler - image: {{.KubeDNSAutoScalerImage}} - resources: - requests: - cpu: "20m" - memory: "10Mi" - command: - - /cluster-proportional-autoscaler - - --namespace=kube-system - - --configmap=kube-dns-autoscaler - - --target=Deployment/kube-dns - # When cluster is using large nodes(with more cores), "coresPerReplica" should dominate. - # If using small nodes, "nodesPerReplica" should dominate. - - --default-params={"linear":{"coresPerReplica":128,"nodesPerReplica":4,"min":1}} - - --logtostderr=true - - --v=2 ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: kube-dns-autoscaler - namespace: kube-system - labels: - kubernetes.io/cluster-service: "true" - addonmanager.kubernetes.io/mode: Reconcile -{{- if eq .RBACConfig "rbac"}} ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: system:kube-dns-autoscaler -rules: - - apiGroups: [""] - resources: ["nodes"] - verbs: ["list", "watch"] - - apiGroups: [""] - resources: ["replicationcontrollers/scale"] - verbs: ["get", "update"] - - apiGroups: ["extensions"] - resources: ["deployments/scale", "replicasets/scale"] - verbs: ["get", "update"] - - apiGroups: [""] - resources: ["configmaps"] - verbs: ["get", "create"] ---- -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: system:kube-dns-autoscaler -subjects: - - kind: ServiceAccount - name: kube-dns-autoscaler - namespace: kube-system -roleRef: - kind: ClusterRole - name: system:kube-dns-autoscaler - apiGroup: rbac.authorization.k8s.io -{{- end }} ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: kube-dns - namespace: kube-system - labels: - kubernetes.io/cluster-service: "true" - addonmanager.kubernetes.io/mode: Reconcile ---- -apiVersion: extensions/v1beta1 -kind: Deployment -metadata: - name: kube-dns - namespace: kube-system - labels: - k8s-app: kube-dns - kubernetes.io/cluster-service: "true" - addonmanager.kubernetes.io/mode: Reconcile -spec: - # replicas: not specified here: - # 1. In order to make Addon Manager do not reconcile this replicas parameter. - # 2. Default is 1. - # 3. Will be tuned in real time if DNS horizontal auto-scaling is turned on. - strategy: - rollingUpdate: - maxSurge: 10% - maxUnavailable: 0 - selector: - matchLabels: - k8s-app: kube-dns - template: - metadata: - labels: - k8s-app: kube-dns - annotations: - scheduler.alpha.kubernetes.io/critical-pod: '' - spec: - nodeSelector: - {{ range $k, $v := .NodeSelector }} - {{ $k }}: "{{ $v }}" - {{ end }} - affinity: - podAntiAffinity: - preferredDuringSchedulingIgnoredDuringExecution: - - weight: 100 - podAffinityTerm: - labelSelector: - matchExpressions: - - key: k8s-app - operator: In - values: ["kube-dns"] - topologyKey: kubernetes.io/hostname - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: beta.kubernetes.io/os - operator: NotIn - values: - - windows - tolerations: - - key: "CriticalAddonsOnly" - operator: "Exists" - volumes: - - name: kube-dns-config - configMap: - name: kube-dns - optional: true - containers: - - name: kubedns - image: {{.KubeDNSImage}} - resources: - # TODO: Set memory limits when we've profiled the container for large - # clusters, then set request = limit to keep this container in - # guaranteed class. Currently, this container falls into the - # "burstable" category so the kubelet doesn't backoff from restarting it. - limits: - memory: 170Mi - requests: - cpu: 100m - memory: 70Mi - livenessProbe: - httpGet: - path: /healthcheck/kubedns - port: 10054 - scheme: HTTP - initialDelaySeconds: 60 - timeoutSeconds: 5 - successThreshold: 1 - failureThreshold: 5 - readinessProbe: - httpGet: - path: /readiness - port: 8081 - scheme: HTTP - # we poll on pod startup for the Kubernetes master service and - # only setup the /readiness HTTP server once that's available. - initialDelaySeconds: 3 - timeoutSeconds: 5 - args: - - --domain={{.ClusterDomain}}. - - --dns-port=10053 - - --config-dir=/kube-dns-config - - --v=2 - env: - - name: PROMETHEUS_PORT - value: "10055" - ports: - - containerPort: 10053 - name: dns-local - protocol: UDP - - containerPort: 10053 - name: dns-tcp-local - protocol: TCP - - containerPort: 10055 - name: metrics - protocol: TCP - volumeMounts: - - name: kube-dns-config - mountPath: /kube-dns-config - - name: dnsmasq - image: {{.DNSMasqImage}} - livenessProbe: - httpGet: - path: /healthcheck/dnsmasq - port: 10054 - scheme: HTTP - initialDelaySeconds: 60 - timeoutSeconds: 5 - successThreshold: 1 - failureThreshold: 5 - args: - - -v=2 - - -logtostderr - - -configDir=/etc/k8s/dns/dnsmasq-nanny - - -restartDnsmasq=true - - -- - - -k - - --cache-size=1000 - - --log-facility=- - - --server=/{{.ClusterDomain}}/127.0.0.1#10053 - {{- if .ReverseCIDRs }} - {{- range .ReverseCIDRs }} - - --server=/{{.}}/127.0.0.1#10053 - {{- end }} - {{- else }} - - --server=/in-addr.arpa/127.0.0.1#10053 - - --server=/ip6.arpa/127.0.0.1#10053 - {{- end }} - ports: - - containerPort: 53 - name: dns - protocol: UDP - - containerPort: 53 - name: dns-tcp - protocol: TCP - # see: https://github.com/kubernetes/kubernetes/issues/29055 for details - resources: - requests: - cpu: 150m - memory: 20Mi - volumeMounts: - - name: kube-dns-config - mountPath: /etc/k8s/dns/dnsmasq-nanny - - name: sidecar - image: {{.KubeDNSSidecarImage}} - livenessProbe: - httpGet: - path: /metrics - port: 10054 - scheme: HTTP - initialDelaySeconds: 60 - timeoutSeconds: 5 - successThreshold: 1 - failureThreshold: 5 - args: - - --v=2 - - --logtostderr - - --probe=kubedns,127.0.0.1:10053,kubernetes.default.svc.{{.ClusterDomain}},5,A - - --probe=dnsmasq,127.0.0.1:53,kubernetes.default.svc.{{.ClusterDomain}},5,A - ports: - - containerPort: 10054 - name: metrics - protocol: TCP - resources: - requests: - memory: 20Mi - cpu: 10m - dnsPolicy: Default # Don't use cluster DNS. - serviceAccountName: kube-dns ---- -apiVersion: v1 -kind: Service -metadata: - name: kube-dns - namespace: kube-system - labels: - k8s-app: kube-dns - kubernetes.io/cluster-service: "true" - addonmanager.kubernetes.io/mode: Reconcile - kubernetes.io/name: "KubeDNS" -spec: - selector: - k8s-app: kube-dns - clusterIP: {{.ClusterDNSServer}} - ports: - - name: dns - port: 53 - protocol: UDP - - name: dns-tcp - port: 53 - protocol: TCP ---- -apiVersion: v1 -kind: ConfigMap -metadata: - name: kube-dns - namespace: kube-system -data: -{{- if .UpstreamNameservers }} - upstreamNameservers: | - [{{range $i, $v := .UpstreamNameservers}}{{if $i}}, {{end}}{{printf "%q" .}}{{end}}] -{{- end }} -{{- if .StubDomains }} - stubDomains: | - {{ GetKubednsStubDomains .StubDomains }} -{{- end }}` diff --git a/templates/metrics.go b/templates/metrics.go deleted file mode 100644 index b9540907..00000000 --- a/templates/metrics.go +++ /dev/null @@ -1,150 +0,0 @@ -package templates - -const MetricsServerTemplate = ` -{{- if eq .RBACConfig "rbac"}} ---- -apiVersion: rbac.authorization.k8s.io/v1beta1 -kind: ClusterRoleBinding -metadata: - name: metrics-server:system:auth-delegator -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: system:auth-delegator -subjects: -- kind: ServiceAccount - name: metrics-server - namespace: kube-system ---- -apiVersion: rbac.authorization.k8s.io/v1beta1 -kind: RoleBinding -metadata: - name: metrics-server-auth-reader - namespace: kube-system -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: extension-apiserver-authentication-reader -subjects: -- kind: ServiceAccount - name: metrics-server - namespace: kube-system ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: system:metrics-server -rules: -- apiGroups: - - "" - resources: - - pods - - nodes - - nodes/stats - - namespaces - verbs: - - get - - list - - watch -- apiGroups: - - "extensions" - resources: - - deployments - verbs: - - get - - list - - watch ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: system:metrics-server -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: system:metrics-server -subjects: -- kind: ServiceAccount - name: metrics-server - namespace: kube-system -{{- end }} ---- -apiVersion: apiregistration.k8s.io/v1beta1 -kind: APIService -metadata: - name: v1beta1.metrics.k8s.io -spec: - service: - name: metrics-server - namespace: kube-system - group: metrics.k8s.io - version: v1beta1 - insecureSkipTLSVerify: true - groupPriorityMinimum: 100 - versionPriority: 100 ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: metrics-server - namespace: kube-system ---- -apiVersion: extensions/v1beta1 -kind: Deployment -metadata: - name: metrics-server - namespace: kube-system - labels: - k8s-app: metrics-server -spec: - selector: - matchLabels: - k8s-app: metrics-server - template: - metadata: - name: metrics-server - labels: - k8s-app: metrics-server - spec: - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: beta.kubernetes.io/os - operator: NotIn - values: - - windows - serviceAccountName: metrics-server - containers: - - name: metrics-server - image: {{ .MetricsServerImage }} - imagePullPolicy: Always - command: - - /metrics-server - {{- if eq .Version "v0.3" }} - - --kubelet-insecure-tls - - --kubelet-preferred-address-types=InternalIP - - --logtostderr - {{- else }} - - --source=kubernetes.summary_api:https://kubernetes.default.svc?kubeletHttps=true&kubeletPort=10250&useServiceAccount=true&insecure=true - {{- end }} - {{ range $k,$v := .Options }} - - --{{ $k }}={{ $v }} - {{ end }} ---- -apiVersion: v1 -kind: Service -metadata: - name: metrics-server - namespace: kube-system - labels: - kubernetes.io/name: "Metrics-server" -spec: - selector: - k8s-app: metrics-server - ports: - - port: 443 - protocol: TCP - targetPort: 443 -` diff --git a/templates/nginx-ingress.go b/templates/nginx-ingress.go deleted file mode 100644 index 335238fc..00000000 --- a/templates/nginx-ingress.go +++ /dev/null @@ -1,324 +0,0 @@ -package templates - -const NginxIngressTemplate = ` -apiVersion: v1 -kind: Namespace -metadata: - name: ingress-nginx ---- -kind: ConfigMap -apiVersion: v1 -metadata: - name: nginx-configuration - namespace: ingress-nginx - labels: - app: ingress-nginx -data: -{{ range $k,$v := .Options }} - {{ $k }}: "{{ $v }}" -{{ end }} ---- -kind: ConfigMap -apiVersion: v1 -metadata: - name: tcp-services - namespace: ingress-nginx ---- -kind: ConfigMap -apiVersion: v1 -metadata: - name: udp-services - namespace: ingress-nginx -{{if eq .RBACConfig "rbac"}} ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: nginx-ingress-serviceaccount - namespace: ingress-nginx ---- -apiVersion: rbac.authorization.k8s.io/v1beta1 -kind: ClusterRole -metadata: - name: nginx-ingress-clusterrole -rules: - - apiGroups: - - "" - resources: - - configmaps - - endpoints - - nodes - - pods - - secrets - verbs: - - list - - watch - - apiGroups: - - "" - resources: - - nodes - verbs: - - get - - apiGroups: - - "" - resources: - - services - verbs: - - get - - list - - watch - - apiGroups: - - "extensions" - resources: - - ingresses - - daemonsets - verbs: - - get - - list - - watch - - apiGroups: - - "" - resources: - - events - verbs: - - create - - patch - - apiGroups: - - "extensions" - resources: - - ingresses/status - verbs: - - update ---- -apiVersion: rbac.authorization.k8s.io/v1beta1 -kind: Role -metadata: - name: nginx-ingress-role - namespace: ingress-nginx -rules: - - apiGroups: - - "" - resources: - - configmaps - - pods - - secrets - - namespaces - verbs: - - get - - apiGroups: - - "" - resources: - - configmaps - resourceNames: - # Defaults to "-" - # Here: "-" - # This has to be adapted if you change either parameter - # when launching the nginx-ingress-controller. - - "ingress-controller-leader-nginx" - verbs: - - get - - update - - apiGroups: - - "" - resources: - - configmaps - verbs: - - create - - apiGroups: - - "" - resources: - - endpoints - verbs: - - get ---- -apiVersion: rbac.authorization.k8s.io/v1beta1 -kind: RoleBinding -metadata: - name: nginx-ingress-role-nisa-binding - namespace: ingress-nginx -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: nginx-ingress-role -subjects: - - kind: ServiceAccount - name: nginx-ingress-serviceaccount - namespace: ingress-nginx ---- -apiVersion: rbac.authorization.k8s.io/v1beta1 -kind: ClusterRoleBinding -metadata: - name: nginx-ingress-clusterrole-nisa-binding -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: nginx-ingress-clusterrole -subjects: - - kind: ServiceAccount - name: nginx-ingress-serviceaccount - namespace: ingress-nginx -{{ end }} ---- -apiVersion: extensions/v1beta1 -kind: DaemonSet -metadata: - name: nginx-ingress-controller - namespace: ingress-nginx -spec: - selector: - matchLabels: - app: ingress-nginx - template: - metadata: - labels: - app: ingress-nginx - annotations: - prometheus.io/port: '10254' - prometheus.io/scrape: 'true' - spec: - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: beta.kubernetes.io/os - operator: NotIn - values: - - windows - hostNetwork: true - nodeSelector: - {{ range $k, $v := .NodeSelector }} - {{ $k }}: "{{ $v }}" - {{ end }} - {{if eq .RBACConfig "rbac"}} - serviceAccountName: nginx-ingress-serviceaccount - {{ end }} - {{- if ne .AlpineImage ""}} - initContainers: - - command: - - sh - - -c - - sysctl -w net.core.somaxconn=32768; sysctl -w net.ipv4.ip_local_port_range="1024 65535" - image: {{.AlpineImage}} - imagePullPolicy: IfNotPresent - name: sysctl - securityContext: - privileged: true - {{- end }} - containers: - - name: nginx-ingress-controller - image: {{.IngressImage}} - args: - - /nginx-ingress-controller - - --default-backend-service=$(POD_NAMESPACE)/default-http-backend - - --configmap=$(POD_NAMESPACE)/nginx-configuration - - --tcp-services-configmap=$(POD_NAMESPACE)/tcp-services - - --udp-services-configmap=$(POD_NAMESPACE)/udp-services - - --annotations-prefix=nginx.ingress.kubernetes.io - {{ range $k, $v := .ExtraArgs }} - - --{{ $k }}{{if ne $v "" }}={{ $v }}{{end}} - {{ end }} - {{- if eq .AlpineImage ""}} - securityContext: - capabilities: - drop: - - ALL - add: - - NET_BIND_SERVICE - runAsUser: 33 - {{- end }} - env: - - name: POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - - name: POD_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - ports: - - name: http - containerPort: 80 - - name: https - containerPort: 443 - livenessProbe: - failureThreshold: 3 - httpGet: - path: /healthz - port: 10254 - scheme: HTTP - initialDelaySeconds: 10 - periodSeconds: 10 - successThreshold: 1 - timeoutSeconds: 1 - readinessProbe: - failureThreshold: 3 - httpGet: - path: /healthz - port: 10254 - scheme: HTTP - periodSeconds: 10 - successThreshold: 1 - timeoutSeconds: 1 ---- -apiVersion: extensions/v1beta1 -kind: Deployment -metadata: - name: default-http-backend - labels: - app: default-http-backend - namespace: ingress-nginx -spec: - replicas: 1 - template: - metadata: - labels: - app: default-http-backend - spec: - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: beta.kubernetes.io/os - operator: NotIn - values: - - windows - terminationGracePeriodSeconds: 60 - containers: - - name: default-http-backend - # Any image is permissable as long as: - # 1. It serves a 404 page at / - # 2. It serves 200 on a /healthz endpoint - image: {{.IngressBackend}} - livenessProbe: - httpGet: - path: /healthz - port: 8080 - scheme: HTTP - initialDelaySeconds: 30 - timeoutSeconds: 5 - ports: - - containerPort: 8080 - resources: - limits: - cpu: 10m - memory: 20Mi - requests: - cpu: 10m - memory: 20Mi ---- -apiVersion: v1 -kind: Service -metadata: - name: default-http-backend - namespace: ingress-nginx - labels: - app: default-http-backend -spec: - ports: - - port: 80 - targetPort: 8080 - selector: - app: default-http-backend -` diff --git a/templates/templates.go b/templates/templates.go index 626efb8b..26170e65 100644 --- a/templates/templates.go +++ b/templates/templates.go @@ -5,28 +5,11 @@ import ( "encoding/json" "text/template" + "github.com/rancher/rke/metadata" + "github.com/rancher/rke/util" ) -var VersionedTemplate = map[string]map[string]string{ - "calico": map[string]string{ - "v1.15": CalicoTemplateV115, - "v1.14": CalicoTemplateV113, - "v1.13": CalicoTemplateV113, - "default": CalicoTemplateV112, - }, - "canal": map[string]string{ - "v1.15": CanalTemplateV115, - "v1.14": CanalTemplateV113, - "v1.13": CanalTemplateV113, - "default": CanalTemplateV112, - }, - "flannel": map[string]string{ - "v1.15": FlannelTemplateV115, - "default": FlannelTemplate, - }, -} - func CompileTemplateFromMap(tmplt string, configMap interface{}) (string, error) { out := new(bytes.Buffer) t := template.Must(template.New("compiled_template").Funcs(template.FuncMap{"GetKubednsStubDomains": GetKubednsStubDomains}).Parse(tmplt)) @@ -38,7 +21,7 @@ func CompileTemplateFromMap(tmplt string, configMap interface{}) (string, error) func GetVersionedTemplates(templateName string, k8sVersion string) string { - versionedTemplate := VersionedTemplate[templateName] + versionedTemplate := metadata.K8sVersionToTemplates[templateName] if t, ok := versionedTemplate[util.GetTagMajorVersion(k8sVersion)]; ok { return t } @@ -49,3 +32,10 @@ func GetKubednsStubDomains(stubDomains map[string][]string) string { json, _ := json.Marshal(stubDomains) return string(json) } + +func GetDefaultVersionedTemplate(templateName string, data map[string]interface{}) string { + if template, ok := data[templateName]; ok { + return convert.ToString(template) + } + return metadata.K8sVersionToTemplates[templateName]["default"] +} diff --git a/templates/weave.go b/templates/weave.go deleted file mode 100644 index a2ce5ce6..00000000 --- a/templates/weave.go +++ /dev/null @@ -1,240 +0,0 @@ -package templates - -const WeaveTemplate = ` ---- -# This ConfigMap can be used to configure a self-hosted Weave Net installation. -apiVersion: v1 -kind: List -items: - - apiVersion: v1 - kind: ServiceAccount - metadata: - name: weave-net - namespace: kube-system - - apiVersion: extensions/v1beta1 - kind: DaemonSet - metadata: - name: weave-net - labels: - name: weave-net - namespace: kube-system - spec: - template: - metadata: - annotations: - scheduler.alpha.kubernetes.io/tolerations: >- - [{"key":"dedicated","operator":"Equal","value":"master","effect":"NoSchedule"}] - labels: - name: weave-net - spec: - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: beta.kubernetes.io/os - operator: NotIn - values: - - windows - containers: - - name: weave - command: - - /home/weave/launch.sh - env: - - name: HOSTNAME - valueFrom: - fieldRef: - apiVersion: v1 - fieldPath: spec.nodeName - - name: IPALLOC_RANGE - value: "{{.ClusterCIDR}}" - {{- if .WeavePassword}} - - name: WEAVE_PASSWORD - value: "{{.WeavePassword}}" - {{- end}} - image: {{.Image}} - readinessProbe: - httpGet: - host: 127.0.0.1 - path: /status - port: 6784 - initialDelaySeconds: 30 - resources: - requests: - cpu: 10m - securityContext: - privileged: true - volumeMounts: - - name: weavedb - mountPath: /weavedb - - name: cni-bin - mountPath: /host/opt - - name: cni-bin2 - mountPath: /host/home - - name: cni-conf - mountPath: /host/etc - - name: dbus - mountPath: /host/var/lib/dbus - - name: lib-modules - mountPath: /lib/modules - - name: xtables-lock - mountPath: /run/xtables.lock - - name: weave-npc - env: - - name: HOSTNAME - valueFrom: - fieldRef: - apiVersion: v1 - fieldPath: spec.nodeName - image: {{.CNIImage}} - resources: - requests: - cpu: 10m - securityContext: - privileged: true - volumeMounts: - - name: xtables-lock - mountPath: /run/xtables.lock - - name: weave-plugins - command: - - /opt/rke-tools/weave-plugins-cni.sh - image: {{.WeaveLoopbackImage}} - securityContext: - privileged: true - volumeMounts: - - name: cni-bin - mountPath: /opt - hostNetwork: true - hostPID: true - restartPolicy: Always - securityContext: - seLinuxOptions: {} - serviceAccountName: weave-net - tolerations: - - operator: Exists - effect: NoSchedule - - operator: Exists - effect: NoExecute - volumes: - - name: weavedb - hostPath: - path: /var/lib/weave - - name: cni-bin - hostPath: - path: /opt - - name: cni-bin2 - hostPath: - path: /home - - name: cni-conf - hostPath: - path: /etc - - name: dbus - hostPath: - path: /var/lib/dbus - - name: lib-modules - hostPath: - path: /lib/modules - - name: xtables-lock - hostPath: - path: /run/xtables.lock - updateStrategy: - type: RollingUpdate -{{- if eq .RBACConfig "rbac"}} ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: weave-net - labels: - name: weave-net - namespace: kube-system ---- -apiVersion: rbac.authorization.k8s.io/v1beta1 -kind: ClusterRole -metadata: - name: weave-net - labels: - name: weave-net -rules: - - apiGroups: - - '' - resources: - - pods - - namespaces - - nodes - verbs: - - get - - list - - watch - - apiGroups: - - networking.k8s.io - resources: - - networkpolicies - verbs: - - get - - list - - watch - - apiGroups: - - '' - resources: - - nodes/status - verbs: - - patch - - update ---- -apiVersion: rbac.authorization.k8s.io/v1beta1 -kind: ClusterRoleBinding -metadata: - name: weave-net - labels: - name: weave-net -roleRef: - kind: ClusterRole - name: weave-net - apiGroup: rbac.authorization.k8s.io -subjects: - - kind: ServiceAccount - name: weave-net - namespace: kube-system ---- -apiVersion: rbac.authorization.k8s.io/v1beta1 -kind: Role -metadata: - name: weave-net - labels: - name: weave-net - namespace: kube-system -rules: - - apiGroups: - - '' - resourceNames: - - weave-net - resources: - - configmaps - verbs: - - get - - update - - apiGroups: - - '' - resources: - - configmaps - verbs: - - create ---- -apiVersion: rbac.authorization.k8s.io/v1beta1 -kind: RoleBinding -metadata: - name: weave-net - labels: - name: weave-net - namespace: kube-system -roleRef: - kind: Role - name: weave-net - apiGroup: rbac.authorization.k8s.io -subjects: - - kind: ServiceAccount - name: weave-net - namespace: kube-system -{{- end}} -` diff --git a/vendor/github.com/coreos/go-semver/semver/semver.go b/vendor/github.com/coreos/go-semver/semver/semver.go index 76cf4852..4d237019 100644 --- a/vendor/github.com/coreos/go-semver/semver/semver.go +++ b/vendor/github.com/coreos/go-semver/semver/semver.go @@ -19,6 +19,7 @@ import ( "bytes" "errors" "fmt" + "github.com/sirupsen/logrus" "regexp" "strconv" "strings" @@ -74,6 +75,7 @@ func (v *Version) Set(version string) error { dotParts := strings.SplitN(version, ".", 3) if len(dotParts) != 3 { + logrus.Infof("version version version %s", version) return fmt.Errorf("%s is not in dotted-tri format", version) } diff --git a/vendor/github.com/rancher/kontainer-driver-metadata/rke/k8s_rke_system_images.go b/vendor/github.com/rancher/kontainer-driver-metadata/rke/k8s_rke_system_images.go index fdb6b608..b91a504e 100644 --- a/vendor/github.com/rancher/kontainer-driver-metadata/rke/k8s_rke_system_images.go +++ b/vendor/github.com/rancher/kontainer-driver-metadata/rke/k8s_rke_system_images.go @@ -1001,35 +1001,35 @@ func loadK8sRKESystemImages() map[string]v3.RKESystemImages { CoreDNS: m("coredns/coredns:1.3.1"), CoreDNSAutoscaler: m("gcr.io/google_containers/cluster-proportional-autoscaler:1.3.0"), }, - //// Experimental in Rancher v2.2.5 - //"v1.15.0-rancher1-1": { - // Etcd: m("quay.io/coreos/etcd:v3.3.10-rancher1"), - // Kubernetes: m("rancher/hyperkube:v1.15.0-rancher1"), - // Alpine: m("rancher/rke-tools:v0.1.32"), - // NginxProxy: m("rancher/rke-tools:v0.1.32"), - // CertDownloader: m("rancher/rke-tools:v0.1.32"), - // KubernetesServicesSidecar: m("rancher/rke-tools:v0.1.32"), - // KubeDNS: m("gcr.io/google_containers/k8s-dns-kube-dns:1.15.0"), - // DNSmasq: m("gcr.io/google_containers/k8s-dns-dnsmasq-nanny:1.15.0"), - // KubeDNSSidecar: m("gcr.io/google_containers/k8s-dns-sidecar:1.15.0"), - // KubeDNSAutoscaler: m("gcr.io/google_containers/cluster-proportional-autoscaler:1.3.0"), - // Flannel: m("quay.io/coreos/flannel:v0.11.0-rancher1"), - // FlannelCNI: m("rancher/flannel-cni:v0.3.0-rancher1"), - // CalicoNode: m("quay.io/calico/node:v3.7.3"), - // CalicoCNI: m("quay.io/calico/cni:v3.7.3"), - // CalicoCtl: m("quay.io/calico/ctl:v2.0.0"), - // CanalNode: m("quay.io/calico/node:v3.7.3"), - // CanalCNI: m("quay.io/calico/cni:v3.7.3"), - // CanalFlannel: m("quay.io/coreos/flannel:v0.11.0"), - // WeaveNode: m("weaveworks/weave-kube:2.5.2"), - // WeaveCNI: m("weaveworks/weave-npc:2.5.2"), - // PodInfraContainer: m("gcr.io/google_containers/pause:3.1"), - // Ingress: m("rancher/nginx-ingress-controller:0.21.0-rancher3"), - // IngressBackend: m("k8s.gcr.io/defaultbackend:1.5-rancher1"), - // MetricsServer: m("gcr.io/google_containers/metrics-server:v0.3.3"), - // CoreDNS: m("coredns/coredns:1.3.1"), - // CoreDNSAutoscaler: m("gcr.io/google_containers/cluster-proportional-autoscaler:1.3.0"), - //}, + // Experimental in Rancher v2.2.5 + "v1.15.0-rancher1-1": { + Etcd: m("quay.io/coreos/etcd:v3.3.10-rancher1"), + Kubernetes: m("rancher/hyperkube:v1.15.0-rancher1"), + Alpine: m("rancher/rke-tools:v0.1.34"), + NginxProxy: m("rancher/rke-tools:v0.1.34"), + CertDownloader: m("rancher/rke-tools:v0.1.34"), + KubernetesServicesSidecar: m("rancher/rke-tools:v0.1.34"), + KubeDNS: m("gcr.io/google_containers/k8s-dns-kube-dns:1.15.0"), + DNSmasq: m("gcr.io/google_containers/k8s-dns-dnsmasq-nanny:1.15.0"), + KubeDNSSidecar: m("gcr.io/google_containers/k8s-dns-sidecar:1.15.0"), + KubeDNSAutoscaler: m("gcr.io/google_containers/cluster-proportional-autoscaler:1.3.0"), + Flannel: m("quay.io/coreos/flannel:v0.11.0-rancher1"), + FlannelCNI: m("rancher/flannel-cni:v0.3.0-rancher1"), + CalicoNode: m("quay.io/calico/node:v3.7.3"), + CalicoCNI: m("quay.io/calico/cni:v3.7.3"), + CalicoCtl: m("quay.io/calico/ctl:v2.0.0"), + CanalNode: m("quay.io/calico/node:v3.7.3"), + CanalCNI: m("quay.io/calico/cni:v3.7.3"), + CanalFlannel: m("quay.io/coreos/flannel:v0.11.0"), + WeaveNode: m("weaveworks/weave-kube:2.5.2"), + WeaveCNI: m("weaveworks/weave-npc:2.5.2"), + PodInfraContainer: m("gcr.io/google_containers/pause:3.1"), + Ingress: m("rancher/nginx-ingress-controller:0.21.0-rancher3"), + IngressBackend: m("k8s.gcr.io/defaultbackend:1.5-rancher1"), + MetricsServer: m("gcr.io/google_containers/metrics-server:v0.3.3"), + CoreDNS: m("coredns/coredns:1.3.1"), + CoreDNSAutoscaler: m("gcr.io/google_containers/cluster-proportional-autoscaler:1.3.0"), + }, // k8s version from 2.1.x release with old rke-tools to allow upgrade from 2.1.x clusters // without all clusters being restarted "v1.12.5-rancher1-1": {