mirror of
https://github.com/rancher/rke.git
synced 2025-08-31 22:46:25 +00:00
use k8s version info from kontainer-driver-metadata
This commit is contained in:
committed by
Alena Prokharchyk
parent
8c8e3d1c6b
commit
c191ed6202
@@ -1,8 +1,10 @@
|
||||
package addons
|
||||
|
||||
import "github.com/rancher/rke/templates"
|
||||
import (
|
||||
rkeData "github.com/rancher/kontainer-driver-metadata/rke/templates"
|
||||
"github.com/rancher/rke/templates"
|
||||
)
|
||||
|
||||
func GetCoreDNSManifest(CoreDNSConfig interface{}) (string, error) {
|
||||
|
||||
return templates.CompileTemplateFromMap(templates.CoreDNSTemplate, CoreDNSConfig)
|
||||
return templates.CompileTemplateFromMap(templates.GetDefaultVersionedTemplate(rkeData.CoreDNS), CoreDNSConfig)
|
||||
}
|
||||
|
@@ -1,8 +1,10 @@
|
||||
package addons
|
||||
|
||||
import "github.com/rancher/rke/templates"
|
||||
import (
|
||||
rkeData "github.com/rancher/kontainer-driver-metadata/rke/templates"
|
||||
"github.com/rancher/rke/templates"
|
||||
)
|
||||
|
||||
func GetNginxIngressManifest(IngressConfig interface{}) (string, error) {
|
||||
|
||||
return templates.CompileTemplateFromMap(templates.NginxIngressTemplate, IngressConfig)
|
||||
return templates.CompileTemplateFromMap(templates.GetDefaultVersionedTemplate(rkeData.NginxIngress), IngressConfig)
|
||||
}
|
||||
|
@@ -1,8 +1,11 @@
|
||||
package addons
|
||||
|
||||
import "github.com/rancher/rke/templates"
|
||||
import (
|
||||
rkeData "github.com/rancher/kontainer-driver-metadata/rke/templates"
|
||||
"github.com/rancher/rke/templates"
|
||||
)
|
||||
|
||||
func GetKubeDNSManifest(KubeDNSConfig interface{}) (string, error) {
|
||||
|
||||
return templates.CompileTemplateFromMap(templates.KubeDNSTemplate, KubeDNSConfig)
|
||||
return templates.CompileTemplateFromMap(templates.GetDefaultVersionedTemplate(rkeData.KubeDNS), KubeDNSConfig)
|
||||
}
|
||||
|
@@ -1,8 +1,11 @@
|
||||
package addons
|
||||
|
||||
import "github.com/rancher/rke/templates"
|
||||
import (
|
||||
rkeData "github.com/rancher/kontainer-driver-metadata/rke/templates"
|
||||
"github.com/rancher/rke/templates"
|
||||
)
|
||||
|
||||
func GetMetricsServerManifest(MetricsServerConfig interface{}) (string, error) {
|
||||
|
||||
return templates.CompileTemplateFromMap(templates.MetricsServerTemplate, MetricsServerConfig)
|
||||
return templates.CompileTemplateFromMap(templates.GetDefaultVersionedTemplate(rkeData.MetricsServer), MetricsServerConfig)
|
||||
}
|
||||
|
@@ -3,6 +3,7 @@ package cluster
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"github.com/rancher/rke/metadata"
|
||||
"strings"
|
||||
|
||||
"github.com/rancher/rke/cloudprovider"
|
||||
@@ -25,8 +26,6 @@ const (
|
||||
DefaultClusterName = "local"
|
||||
DefaultClusterSSHKeyPath = "~/.ssh/id_rsa"
|
||||
|
||||
DefaultK8sVersion = v3.DefaultK8s
|
||||
|
||||
DefaultSSHPort = "22"
|
||||
DefaultDockerSockPath = "/var/run/docker.sock"
|
||||
|
||||
@@ -137,7 +136,7 @@ func (c *Cluster) setClusterDefaults(ctx context.Context, flags ExternalFlags) e
|
||||
c.ClusterName = DefaultClusterName
|
||||
}
|
||||
if len(c.Version) == 0 {
|
||||
c.Version = DefaultK8sVersion
|
||||
c.Version = metadata.DefaultK8sVersion
|
||||
}
|
||||
if c.AddonJobTimeout == 0 {
|
||||
c.AddonJobTimeout = k8s.DefaultTimeout
|
||||
@@ -229,7 +228,7 @@ func (c *Cluster) setClusterServicesDefaults() {
|
||||
func (c *Cluster) setClusterImageDefaults() error {
|
||||
var privRegURL string
|
||||
|
||||
imageDefaults, ok := v3.AllK8sVersions[c.Version]
|
||||
imageDefaults, ok := metadata.K8sVersionToRKESystemImages[c.Version]
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
|
@@ -1,13 +1,14 @@
|
||||
package cluster
|
||||
|
||||
import (
|
||||
"github.com/rancher/rke/metadata"
|
||||
"github.com/rancher/rke/services"
|
||||
"github.com/rancher/types/apis/management.cattle.io/v3"
|
||||
)
|
||||
|
||||
func GetLocalRKEConfig() *v3.RancherKubernetesEngineConfig {
|
||||
rkeLocalNode := GetLocalRKENodeConfig()
|
||||
imageDefaults := v3.K8sVersionToRKESystemImages[DefaultK8sVersion]
|
||||
imageDefaults := metadata.K8sVersionToRKESystemImages[metadata.DefaultK8sVersion]
|
||||
|
||||
rkeServices := v3.RKEConfigServices{
|
||||
Kubelet: v3.KubeletService{
|
||||
|
@@ -251,7 +251,7 @@ func (c *Cluster) getNetworkPluginManifest(pluginConfig map[string]interface{})
|
||||
case CanalNetworkPlugin:
|
||||
return templates.CompileTemplateFromMap(templates.GetVersionedTemplates(CanalNetworkPlugin, c.Version), pluginConfig)
|
||||
case WeaveNetworkPlugin:
|
||||
return templates.CompileTemplateFromMap(templates.WeaveTemplate, pluginConfig)
|
||||
return templates.CompileTemplateFromMap(templates.GetVersionedTemplates(WeaveNetworkPlugin, c.Version), pluginConfig)
|
||||
default:
|
||||
return "", fmt.Errorf("[network] Unsupported network plugin: %s", c.Network.Plugin)
|
||||
}
|
||||
|
@@ -15,6 +15,7 @@ import (
|
||||
"github.com/rancher/rke/docker"
|
||||
"github.com/rancher/rke/hosts"
|
||||
"github.com/rancher/rke/k8s"
|
||||
"github.com/rancher/rke/metadata"
|
||||
"github.com/rancher/rke/pki"
|
||||
"github.com/rancher/rke/services"
|
||||
"github.com/rancher/rke/util"
|
||||
@@ -120,19 +121,8 @@ func (c *Cluster) BuildKubeAPIProcess(host *hosts.Host, prefixPath string) v3.Pr
|
||||
c.getRKEToolsEntryPoint(),
|
||||
"kube-apiserver",
|
||||
}
|
||||
baseEnabledAdmissionPlugins := []string{
|
||||
"DefaultStorageClass",
|
||||
"DefaultTolerationSeconds",
|
||||
"LimitRanger",
|
||||
"NamespaceLifecycle",
|
||||
"NodeRestriction",
|
||||
"ResourceQuota",
|
||||
"ServiceAccount",
|
||||
}
|
||||
|
||||
CommandArgs := map[string]string{
|
||||
"allow-privileged": "true",
|
||||
"anonymous-auth": "false",
|
||||
"bind-address": "0.0.0.0",
|
||||
"client-ca-file": pki.GetCertPath(pki.CACertName),
|
||||
"cloud-provider": c.CloudProvider.Name,
|
||||
"etcd-cafile": etcdCAClientCert,
|
||||
@@ -140,24 +130,15 @@ func (c *Cluster) BuildKubeAPIProcess(host *hosts.Host, prefixPath string) v3.Pr
|
||||
"etcd-keyfile": etcdClientKey,
|
||||
"etcd-prefix": etcdPathPrefix,
|
||||
"etcd-servers": etcdConnectionString,
|
||||
"insecure-port": "0",
|
||||
"kubelet-client-certificate": pki.GetCertPath(pki.KubeAPICertName),
|
||||
"kubelet-client-key": pki.GetKeyPath(pki.KubeAPICertName),
|
||||
"kubelet-preferred-address-types": "InternalIP,ExternalIP,Hostname",
|
||||
"profiling": "false",
|
||||
"proxy-client-cert-file": pki.GetCertPath(pki.APIProxyClientCertName),
|
||||
"proxy-client-key-file": pki.GetKeyPath(pki.APIProxyClientCertName),
|
||||
"requestheader-allowed-names": pki.APIProxyClientCertName,
|
||||
"requestheader-client-ca-file": pki.GetCertPath(pki.RequestHeaderCACertName),
|
||||
"requestheader-extra-headers-prefix": "X-Remote-Extra-",
|
||||
"requestheader-group-headers": "X-Remote-Group",
|
||||
"requestheader-username-headers": "X-Remote-User",
|
||||
"secure-port": "6443",
|
||||
"service-account-key-file": pki.GetKeyPath(pki.ServiceAccountTokenKeyName),
|
||||
"service-account-lookup": "true",
|
||||
"service-cluster-ip-range": c.Services.KubeAPI.ServiceClusterIPRange,
|
||||
"service-node-port-range": c.Services.KubeAPI.ServiceNodePortRange,
|
||||
"storage-backend": "etcd3",
|
||||
"tls-cert-file": pki.GetCertPath(pki.KubeAPICertName),
|
||||
"tls-private-key-file": pki.GetKeyPath(pki.KubeAPICertName),
|
||||
}
|
||||
@@ -198,37 +179,15 @@ func (c *Cluster) BuildKubeAPIProcess(host *hosts.Host, prefixPath string) v3.Pr
|
||||
CommandArgs["advertise-address"] = host.InternalAddress
|
||||
}
|
||||
|
||||
// PodSecurityPolicy
|
||||
if c.Services.KubeAPI.PodSecurityPolicy {
|
||||
CommandArgs["runtime-config"] = "extensions/v1beta1/podsecuritypolicy=true"
|
||||
baseEnabledAdmissionPlugins = append(baseEnabledAdmissionPlugins, "PodSecurityPolicy")
|
||||
}
|
||||
|
||||
// AlwaysPullImages
|
||||
for _, optionName := range admissionControlOptionNames {
|
||||
if _, ok := CommandArgs[optionName]; ok {
|
||||
if c.Services.KubeAPI.AlwaysPullImages {
|
||||
baseEnabledAdmissionPlugins = append(baseEnabledAdmissionPlugins, "AlwaysPullImages")
|
||||
}
|
||||
|
||||
// Admission control plugins
|
||||
// Resolution order:
|
||||
// k8s_defaults.go K8sVersionServiceOptions
|
||||
// enabledAdmissionPlugins
|
||||
// cluster.yml extra_args overwrites it all
|
||||
for _, optionName := range admissionControlOptionNames {
|
||||
if _, ok := CommandArgs[optionName]; ok {
|
||||
enabledAdmissionPlugins := strings.Split(CommandArgs[optionName], ",")
|
||||
enabledAdmissionPlugins = append(enabledAdmissionPlugins, baseEnabledAdmissionPlugins...)
|
||||
|
||||
// Join unique slice as arg
|
||||
CommandArgs[optionName] = strings.Join(util.UniqueStringSlice(enabledAdmissionPlugins), ",")
|
||||
break
|
||||
}
|
||||
}
|
||||
if c.Services.KubeAPI.PodSecurityPolicy {
|
||||
CommandArgs["runtime-config"] = "extensions/v1beta1/podsecuritypolicy=true"
|
||||
for _, optionName := range admissionControlOptionNames {
|
||||
if _, ok := CommandArgs[optionName]; ok {
|
||||
CommandArgs[optionName] = CommandArgs[optionName] + ",PodSecurityPolicy,AlwaysPullImages"
|
||||
} else {
|
||||
CommandArgs[optionName] = CommandArgs[optionName] + ",PodSecurityPolicy"
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
@@ -284,23 +243,12 @@ func (c *Cluster) BuildKubeControllerProcess(prefixPath string) v3.Process {
|
||||
}
|
||||
|
||||
CommandArgs := map[string]string{
|
||||
"address": "0.0.0.0",
|
||||
"allow-untagged-cloud": "true",
|
||||
"allocate-node-cidrs": "true",
|
||||
"cloud-provider": c.CloudProvider.Name,
|
||||
"cluster-cidr": c.ClusterCIDR,
|
||||
"configure-cloud-routes": "false",
|
||||
"enable-hostpath-provisioner": "false",
|
||||
"kubeconfig": pki.GetConfigPath(pki.KubeControllerCertName),
|
||||
"leader-elect": "true",
|
||||
"node-monitor-grace-period": "40s",
|
||||
"pod-eviction-timeout": "5m0s",
|
||||
"profiling": "false",
|
||||
"root-ca-file": pki.GetCertPath(pki.CACertName),
|
||||
"service-account-private-key-file": pki.GetKeyPath(pki.ServiceAccountTokenKeyName),
|
||||
"service-cluster-ip-range": c.Services.KubeController.ServiceClusterIPRange,
|
||||
"terminated-pod-gc-threshold": "1000",
|
||||
"v": "2",
|
||||
}
|
||||
// Best security practice is to listen on localhost, but DinD uses private container network instead of Host.
|
||||
if c.DinD {
|
||||
@@ -382,30 +330,15 @@ func (c *Cluster) BuildKubeletProcess(host *hosts.Host, prefixPath string) v3.Pr
|
||||
}
|
||||
|
||||
CommandArgs := map[string]string{
|
||||
"address": "0.0.0.0",
|
||||
"anonymous-auth": "false",
|
||||
"authentication-token-webhook": "true",
|
||||
"cgroups-per-qos": "True",
|
||||
"client-ca-file": pki.GetCertPath(pki.CACertName),
|
||||
"cloud-provider": c.CloudProvider.Name,
|
||||
"cluster-dns": c.ClusterDNSServer,
|
||||
"cluster-domain": c.ClusterDomain,
|
||||
"cni-bin-dir": "/opt/cni/bin",
|
||||
"cni-conf-dir": "/etc/cni/net.d",
|
||||
"enforce-node-allocatable": "",
|
||||
"event-qps": "0",
|
||||
"fail-swap-on": strconv.FormatBool(c.Services.Kubelet.FailSwapOn),
|
||||
"hostname-override": host.HostnameOverride,
|
||||
"kubeconfig": pki.GetConfigPath(pki.KubeNodeCertName),
|
||||
"make-iptables-util-chains": "true",
|
||||
"network-plugin": "cni",
|
||||
"pod-infra-container-image": c.Services.Kubelet.InfraContainerImage,
|
||||
"read-only-port": "0",
|
||||
"resolv-conf": "/etc/resolv.conf",
|
||||
"root-dir": path.Join(prefixPath, "/var/lib/kubelet"),
|
||||
"streaming-connection-idle-timeout": "30m",
|
||||
"volume-plugin-dir": "/var/lib/kubelet/volumeplugins",
|
||||
"v": "2",
|
||||
}
|
||||
if host.IsControl && !host.IsWorker {
|
||||
CommandArgs["register-with-taints"] = unschedulableControlTaint
|
||||
@@ -523,15 +456,10 @@ func (c *Cluster) BuildKubeProxyProcess(host *hosts.Host, prefixPath string) v3.
|
||||
|
||||
CommandArgs := map[string]string{
|
||||
"cluster-cidr": c.ClusterCIDR,
|
||||
"v": "2",
|
||||
"healthz-bind-address": "127.0.0.1",
|
||||
"hostname-override": host.HostnameOverride,
|
||||
"kubeconfig": pki.GetConfigPath(pki.KubeProxyCertName),
|
||||
}
|
||||
// Best security practice is to listen on localhost, but DinD uses private container network instead of Host.
|
||||
if c.DinD {
|
||||
CommandArgs["healthz-bind-address"] = "0.0.0.0"
|
||||
}
|
||||
|
||||
// check if our version has specific options for this component
|
||||
serviceOptions := c.GetKubernetesServicesOptions()
|
||||
if serviceOptions.Kubeproxy != nil {
|
||||
@@ -545,6 +473,11 @@ func (c *Cluster) BuildKubeProxyProcess(host *hosts.Host, prefixPath string) v3.
|
||||
}
|
||||
}
|
||||
|
||||
// Best security practice is to listen on localhost, but DinD uses private container network instead of Host.
|
||||
if c.DinD {
|
||||
CommandArgs["healthz-bind-address"] = "0.0.0.0"
|
||||
}
|
||||
|
||||
VolumesFrom := []string{
|
||||
services.SidekickContainerName,
|
||||
}
|
||||
@@ -624,10 +557,6 @@ func (c *Cluster) BuildSchedulerProcess(prefixPath string) v3.Process {
|
||||
}
|
||||
|
||||
CommandArgs := map[string]string{
|
||||
"leader-elect": "true",
|
||||
"v": "2",
|
||||
"address": "0.0.0.0",
|
||||
"profiling": "false",
|
||||
"kubeconfig": pki.GetConfigPath(pki.KubeSchedulerCertName),
|
||||
}
|
||||
|
||||
@@ -860,7 +789,7 @@ func (c *Cluster) GetKubernetesServicesOptions() v3.KubernetesServicesOptions {
|
||||
clusterMajorVersion = k8sImageMajorVersion
|
||||
}
|
||||
|
||||
serviceOptions, ok := v3.K8sVersionServiceOptions[clusterMajorVersion]
|
||||
serviceOptions, ok := metadata.K8sVersionToServiceOptions[clusterMajorVersion]
|
||||
if ok {
|
||||
return serviceOptions
|
||||
}
|
||||
|
@@ -3,13 +3,13 @@ package cluster
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"github.com/rancher/rke/metadata"
|
||||
"strings"
|
||||
|
||||
"github.com/rancher/rke/log"
|
||||
"github.com/rancher/rke/pki"
|
||||
"github.com/rancher/rke/services"
|
||||
"github.com/rancher/rke/util"
|
||||
v3 "github.com/rancher/types/apis/management.cattle.io/v3"
|
||||
"k8s.io/apimachinery/pkg/util/validation"
|
||||
)
|
||||
|
||||
@@ -205,7 +205,7 @@ func validateVersion(ctx context.Context, c *Cluster) error {
|
||||
if err != nil {
|
||||
return fmt.Errorf("%s is not valid semver", c.Version)
|
||||
}
|
||||
_, ok := v3.AllK8sVersions[c.Version]
|
||||
_, ok := metadata.K8sVersionToRKESystemImages[c.Version]
|
||||
if !ok {
|
||||
if err := validateSystemImages(c); err != nil {
|
||||
return fmt.Errorf("%s is an unsupported Kubernetes version and system images are not populated: %v", c.Version, err)
|
||||
@@ -213,9 +213,9 @@ func validateVersion(ctx context.Context, c *Cluster) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
if _, ok := v3.K8sBadVersions[c.Version]; ok {
|
||||
if _, ok := metadata.K8sBadVersions[c.Version]; ok {
|
||||
log.Warnf(ctx, "%s version exists but its recommended to install this version - see 'rke config --system-images --all' for versions supported with this release", c.Version)
|
||||
return nil
|
||||
return fmt.Errorf("%s is an unsupported Kubernetes version and system images are not populated: %v", c.Version, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
@@ -3,6 +3,7 @@ package cmd
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"github.com/rancher/rke/metadata"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
@@ -74,6 +75,9 @@ func ClusterInit(ctx context.Context, rkeConfig *v3.RancherKubernetesEngineConfi
|
||||
if len(flags.CertificateDir) == 0 {
|
||||
flags.CertificateDir = cluster.GetCertificateDirPath(flags.ClusterFilePath, flags.ConfigDir)
|
||||
}
|
||||
if err := metadata.InitMetadata(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
rkeFullState, _ := cluster.ReadStateFile(ctx, stateFilePath)
|
||||
|
||||
kubeCluster, err := cluster.InitClusterObject(ctx, rkeConfig, flags)
|
||||
|
@@ -2,7 +2,9 @@ package cmd
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"context"
|
||||
"fmt"
|
||||
"github.com/rancher/rke/metadata"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"reflect"
|
||||
@@ -95,6 +97,9 @@ func writeConfig(cluster *v3.RancherKubernetesEngineConfig, configFile string, p
|
||||
|
||||
func clusterConfig(ctx *cli.Context) error {
|
||||
if ctx.Bool("system-images") {
|
||||
if metadata.K8sVersionToRKESystemImages == nil {
|
||||
metadata.InitMetadata(context.Background())
|
||||
}
|
||||
return generateSystemImagesList(ctx.String("version"), ctx.Bool("all"))
|
||||
}
|
||||
configFile := ctx.String("name")
|
||||
@@ -270,14 +275,14 @@ func getHostConfig(reader *bufio.Reader, index int, clusterSSHKeyPath string) (*
|
||||
}
|
||||
|
||||
func getSystemImagesConfig(reader *bufio.Reader) (*v3.RKESystemImages, error) {
|
||||
imageDefaults := v3.K8sVersionToRKESystemImages[cluster.DefaultK8sVersion]
|
||||
imageDefaults := metadata.K8sVersionToRKESystemImages[metadata.DefaultK8sVersion]
|
||||
|
||||
kubeImage, err := getConfig(reader, "Kubernetes Docker image", imageDefaults.Kubernetes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
systemImages, ok := v3.K8sVersionToRKESystemImages[kubeImage]
|
||||
systemImages, ok := metadata.K8sVersionToRKESystemImages[kubeImage]
|
||||
if ok {
|
||||
return &systemImages, nil
|
||||
}
|
||||
@@ -403,10 +408,10 @@ func getAddonManifests(reader *bufio.Reader) ([]string, error) {
|
||||
func generateSystemImagesList(version string, all bool) error {
|
||||
allVersions := []string{}
|
||||
currentVersionImages := make(map[string]v3.RKESystemImages)
|
||||
for _, version := range v3.K8sVersionsCurrent {
|
||||
if _, ok := v3.K8sBadVersions[version]; !ok {
|
||||
for _, version := range metadata.K8sVersionsCurrent {
|
||||
if _, ok := metadata.K8sBadVersions[version]; !ok {
|
||||
allVersions = append(allVersions, version)
|
||||
currentVersionImages[version] = v3.AllK8sVersions[version]
|
||||
currentVersionImages[version] = metadata.K8sVersionToRKESystemImages[version]
|
||||
}
|
||||
}
|
||||
if all {
|
||||
@@ -423,11 +428,11 @@ func generateSystemImagesList(version string, all bool) error {
|
||||
return nil
|
||||
}
|
||||
if len(version) == 0 {
|
||||
version = v3.DefaultK8s
|
||||
version = metadata.DefaultK8sVersion
|
||||
}
|
||||
rkeSystemImages := v3.AllK8sVersions[version]
|
||||
if _, ok := v3.K8sBadVersions[version]; ok {
|
||||
return fmt.Errorf("k8s version is not recommended, supported versions are: %v", allVersions)
|
||||
rkeSystemImages := metadata.K8sVersionToRKESystemImages[version]
|
||||
if _, ok := metadata.K8sBadVersions[version]; ok {
|
||||
return fmt.Errorf("k8s version is not supported, supported versions are: %v", allVersions)
|
||||
}
|
||||
if rkeSystemImages == (v3.RKESystemImages{}) {
|
||||
return fmt.Errorf("k8s version is not supported, supported versions are: %v", allVersions)
|
||||
|
@@ -11,7 +11,7 @@ import (
|
||||
"github.com/rancher/rke/hosts"
|
||||
"github.com/rancher/rke/log"
|
||||
"github.com/rancher/rke/pki"
|
||||
v3 "github.com/rancher/types/apis/management.cattle.io/v3"
|
||||
"github.com/rancher/types/apis/management.cattle.io/v3"
|
||||
"github.com/urfave/cli"
|
||||
"k8s.io/client-go/util/cert"
|
||||
)
|
||||
@@ -82,7 +82,6 @@ func ClusterUp(ctx context.Context, dialersOptions hosts.DialersOptions, flags c
|
||||
if err != nil {
|
||||
return APIURL, caCrt, clientCert, clientKey, nil, err
|
||||
}
|
||||
|
||||
kubeCluster, err := cluster.InitClusterObject(ctx, clusterState.DesiredState.RancherKubernetesEngineConfig.DeepCopy(), flags)
|
||||
if err != nil {
|
||||
return APIURL, caCrt, clientCert, clientKey, nil, err
|
||||
|
3
main.go
3
main.go
@@ -1,6 +1,7 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"github.com/rancher/rke/metadata"
|
||||
"os"
|
||||
"regexp"
|
||||
|
||||
@@ -30,8 +31,8 @@ func mainErr() error {
|
||||
if ctx.GlobalBool("debug") {
|
||||
logrus.SetLevel(logrus.DebugLevel)
|
||||
}
|
||||
logrus.Debugf("RKE version %s", app.Version)
|
||||
if released.MatchString(app.Version) {
|
||||
metadata.RKEVersion = app.Version
|
||||
return nil
|
||||
}
|
||||
logrus.Warnf("This is not an officially supported version (%s) of RKE. Please download the latest official release at https://github.com/rancher/rke/releases/latest", app.Version)
|
||||
|
94
metadata/metadata.go
Normal file
94
metadata/metadata.go
Normal file
@@ -0,0 +1,94 @@
|
||||
package metadata
|
||||
|
||||
import (
|
||||
"context"
|
||||
"github.com/sirupsen/logrus"
|
||||
"strings"
|
||||
|
||||
mVersion "github.com/mcuadros/go-version"
|
||||
"github.com/rancher/kontainer-driver-metadata/rke"
|
||||
"github.com/rancher/types/apis/management.cattle.io/v3"
|
||||
)
|
||||
|
||||
var (
|
||||
RKEVersion string
|
||||
DefaultK8sVersion string
|
||||
K8sVersionToTemplates map[string]map[string]string
|
||||
K8sVersionToRKESystemImages map[string]v3.RKESystemImages
|
||||
K8sVersionToServiceOptions map[string]v3.KubernetesServicesOptions
|
||||
K8sVersionsCurrent []string
|
||||
K8sBadVersions = map[string]bool{}
|
||||
)
|
||||
|
||||
func InitMetadata(ctx context.Context) error {
|
||||
logrus.Infof("calling init")
|
||||
initK8sRKESystemImages()
|
||||
initAddonTemplates()
|
||||
initServiceOptions()
|
||||
return nil
|
||||
}
|
||||
|
||||
const RKEVersionDev = "0.2.3"
|
||||
|
||||
func initAddonTemplates() {
|
||||
K8sVersionToTemplates = rke.DriverData.K8sVersionedTemplates
|
||||
}
|
||||
|
||||
func initServiceOptions() {
|
||||
K8sVersionToServiceOptions = interface{}(rke.DriverData.K8sVersionServiceOptions).(map[string]v3.KubernetesServicesOptions)
|
||||
}
|
||||
|
||||
func initK8sRKESystemImages() {
|
||||
K8sVersionToRKESystemImages = map[string]v3.RKESystemImages{}
|
||||
rkeData := rke.DriverData
|
||||
// non released versions
|
||||
if RKEVersion == "" {
|
||||
RKEVersion = RKEVersionDev
|
||||
}
|
||||
DefaultK8sVersion = rkeData.RKEDefaultK8sVersions["default"]
|
||||
if defaultK8sVersion, ok := rkeData.RKEDefaultK8sVersions[RKEVersion]; ok {
|
||||
DefaultK8sVersion = defaultK8sVersion
|
||||
}
|
||||
maxVersionForMajorK8sVersion := map[string]string{}
|
||||
for k8sVersion, systemImages := range rkeData.K8sVersionRKESystemImages {
|
||||
rkeVersionInfo, ok := rkeData.K8sVersionInfo[k8sVersion]
|
||||
if ok {
|
||||
// RKEVersion = 0.2.4, DeprecateRKEVersion = 0.2.2
|
||||
if rkeVersionInfo.DeprecateRKEVersion != "" && mVersion.Compare(RKEVersion, rkeVersionInfo.DeprecateRKEVersion, ">=") {
|
||||
K8sBadVersions[k8sVersion] = true
|
||||
continue
|
||||
}
|
||||
// RKEVersion = 0.2.4, MinVersion = 0.2.5, don't store
|
||||
lowerThanMin := rkeVersionInfo.MinRKEVersion != "" && mVersion.Compare(RKEVersion, rkeVersionInfo.MinRKEVersion, "<")
|
||||
if lowerThanMin {
|
||||
continue
|
||||
}
|
||||
}
|
||||
// store all for upgrades
|
||||
K8sVersionToRKESystemImages[k8sVersion] = interface{}(systemImages).(v3.RKESystemImages)
|
||||
|
||||
majorVersion := getTagMajorVersion(k8sVersion)
|
||||
maxVersionInfo, ok := rkeData.K8sVersionInfo[majorVersion]
|
||||
if ok {
|
||||
// RKEVersion = 0.2.4, MaxVersion = 0.2.3, don't use in current
|
||||
greaterThanMax := maxVersionInfo.MaxRKEVersion != "" && mVersion.Compare(RKEVersion, maxVersionInfo.MaxRKEVersion, ">")
|
||||
if greaterThanMax {
|
||||
continue
|
||||
}
|
||||
}
|
||||
if curr, ok := maxVersionForMajorK8sVersion[majorVersion]; !ok || k8sVersion > curr {
|
||||
maxVersionForMajorK8sVersion[majorVersion] = k8sVersion
|
||||
}
|
||||
}
|
||||
for _, k8sVersion := range maxVersionForMajorK8sVersion {
|
||||
K8sVersionsCurrent = append(K8sVersionsCurrent, k8sVersion)
|
||||
}
|
||||
}
|
||||
|
||||
func getTagMajorVersion(tag string) string {
|
||||
splitTag := strings.Split(tag, ".")
|
||||
if len(splitTag) < 2 {
|
||||
return ""
|
||||
}
|
||||
return strings.Join(splitTag[:2], ".")
|
||||
}
|
1836
templates/calico.go
1836
templates/calico.go
File diff suppressed because it is too large
Load Diff
1757
templates/canal.go
1757
templates/canal.go
File diff suppressed because it is too large
Load Diff
@@ -1,290 +0,0 @@
|
||||
package templates
|
||||
|
||||
const CoreDNSTemplate = `
|
||||
---
|
||||
{{- if eq .RBACConfig "rbac"}}
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: coredns
|
||||
namespace: kube-system
|
||||
labels:
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
labels:
|
||||
kubernetes.io/bootstrapping: rbac-defaults
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
name: system:coredns
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- endpoints
|
||||
- services
|
||||
- pods
|
||||
- namespaces
|
||||
verbs:
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- nodes
|
||||
verbs:
|
||||
- get
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
annotations:
|
||||
rbac.authorization.kubernetes.io/autoupdate: "true"
|
||||
labels:
|
||||
kubernetes.io/bootstrapping: rbac-defaults
|
||||
addonmanager.kubernetes.io/mode: EnsureExists
|
||||
name: system:coredns
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: system:coredns
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: coredns
|
||||
namespace: kube-system
|
||||
{{- end }}
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: coredns
|
||||
namespace: kube-system
|
||||
labels:
|
||||
addonmanager.kubernetes.io/mode: EnsureExists
|
||||
data:
|
||||
Corefile: |
|
||||
.:53 {
|
||||
errors
|
||||
health
|
||||
kubernetes {{.ClusterDomain}} {{ if .ReverseCIDRs }}{{ .ReverseCIDRs }}{{ else }}{{ "in-addr.arpa ip6.arpa" }}{{ end }} {
|
||||
pods insecure
|
||||
upstream
|
||||
fallthrough in-addr.arpa ip6.arpa
|
||||
ttl 30
|
||||
}
|
||||
prometheus :9153
|
||||
{{- if .UpstreamNameservers }}
|
||||
forward . {{range $i, $v := .UpstreamNameservers}}{{if $i}} {{end}}{{.}}{{end}}
|
||||
{{- else }}
|
||||
forward . "/etc/resolv.conf"
|
||||
{{- end }}
|
||||
cache 30
|
||||
loop
|
||||
reload
|
||||
loadbalance
|
||||
}
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: coredns
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: kube-dns
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
kubernetes.io/name: "CoreDNS"
|
||||
spec:
|
||||
replicas: 1
|
||||
strategy:
|
||||
type: RollingUpdate
|
||||
rollingUpdate:
|
||||
maxUnavailable: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: kube-dns
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kube-dns
|
||||
annotations:
|
||||
seccomp.security.alpha.kubernetes.io/pod: 'docker/default'
|
||||
spec:
|
||||
priorityClassName: system-cluster-critical
|
||||
{{- if eq .RBACConfig "rbac"}}
|
||||
serviceAccountName: coredns
|
||||
{{- end }}
|
||||
tolerations:
|
||||
- key: "CriticalAddonsOnly"
|
||||
operator: "Exists"
|
||||
nodeSelector:
|
||||
beta.kubernetes.io/os: linux
|
||||
{{ range $k, $v := .NodeSelector }}
|
||||
{{ $k }}: "{{ $v }}"
|
||||
{{ end }}
|
||||
containers:
|
||||
- name: coredns
|
||||
image: {{.CoreDNSImage}}
|
||||
imagePullPolicy: IfNotPresent
|
||||
resources:
|
||||
limits:
|
||||
memory: 170Mi
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 70Mi
|
||||
args: [ "-conf", "/etc/coredns/Corefile" ]
|
||||
volumeMounts:
|
||||
- name: config-volume
|
||||
mountPath: /etc/coredns
|
||||
readOnly: true
|
||||
ports:
|
||||
- containerPort: 53
|
||||
name: dns
|
||||
protocol: UDP
|
||||
- containerPort: 53
|
||||
name: dns-tcp
|
||||
protocol: TCP
|
||||
- containerPort: 9153
|
||||
name: metrics
|
||||
protocol: TCP
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /health
|
||||
port: 8080
|
||||
scheme: HTTP
|
||||
initialDelaySeconds: 60
|
||||
timeoutSeconds: 5
|
||||
successThreshold: 1
|
||||
failureThreshold: 5
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /health
|
||||
port: 8080
|
||||
scheme: HTTP
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
capabilities:
|
||||
add:
|
||||
- NET_BIND_SERVICE
|
||||
drop:
|
||||
- all
|
||||
readOnlyRootFilesystem: true
|
||||
dnsPolicy: Default
|
||||
volumes:
|
||||
- name: config-volume
|
||||
configMap:
|
||||
name: coredns
|
||||
items:
|
||||
- key: Corefile
|
||||
path: Corefile
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: kube-dns
|
||||
namespace: kube-system
|
||||
annotations:
|
||||
prometheus.io/port: "9153"
|
||||
prometheus.io/scrape: "true"
|
||||
labels:
|
||||
k8s-app: kube-dns
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
kubernetes.io/name: "CoreDNS"
|
||||
spec:
|
||||
selector:
|
||||
k8s-app: kube-dns
|
||||
clusterIP: {{.ClusterDNSServer}}
|
||||
ports:
|
||||
- name: dns
|
||||
port: 53
|
||||
protocol: UDP
|
||||
- name: dns-tcp
|
||||
port: 53
|
||||
protocol: TCP
|
||||
- name: metrics
|
||||
port: 9153
|
||||
protocol: TCP
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: coredns-autoscaler
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: coredns-autoscaler
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: coredns-autoscaler
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: coredns-autoscaler
|
||||
spec:
|
||||
{{- if eq .RBACConfig "rbac"}}
|
||||
serviceAccountName: coredns-autoscaler
|
||||
{{- end }}
|
||||
nodeSelector:
|
||||
beta.kubernetes.io/os: linux
|
||||
containers:
|
||||
- name: autoscaler
|
||||
image: {{.CoreDNSAutoScalerImage}}
|
||||
resources:
|
||||
requests:
|
||||
cpu: "20m"
|
||||
memory: "10Mi"
|
||||
command:
|
||||
- /cluster-proportional-autoscaler
|
||||
- --namespace=kube-system
|
||||
- --configmap=coredns-autoscaler
|
||||
- --target=Deployment/coredns
|
||||
# When cluster is using large nodes(with more cores), "coresPerReplica" should dominate.
|
||||
# If using small nodes, "nodesPerReplica" should dominate.
|
||||
- --default-params={"linear":{"coresPerReplica":128,"nodesPerReplica":4,"min":1}}
|
||||
- --logtostderr=true
|
||||
- --v=2
|
||||
{{- if eq .RBACConfig "rbac"}}
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: coredns-autoscaler
|
||||
namespace: kube-system
|
||||
labels:
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
---
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: system:coredns-autoscaler
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["nodes"]
|
||||
verbs: ["list", "watch"]
|
||||
- apiGroups: [""]
|
||||
resources: ["replicationcontrollers/scale"]
|
||||
verbs: ["get", "update"]
|
||||
- apiGroups: ["extensions"]
|
||||
resources: ["deployments/scale", "replicasets/scale"]
|
||||
verbs: ["get", "update"]
|
||||
- apiGroups: [""]
|
||||
resources: ["configmaps"]
|
||||
verbs: ["get", "create"]
|
||||
---
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: system:coredns-autoscaler
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: coredns-autoscaler
|
||||
namespace: kube-system
|
||||
roleRef:
|
||||
kind: ClusterRole
|
||||
name: system:coredns-autoscaler
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
{{- end }}`
|
@@ -1,439 +0,0 @@
|
||||
package templates
|
||||
|
||||
const FlannelTemplate = `
|
||||
{{- if eq .RBACConfig "rbac"}}
|
||||
---
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: flannel
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: flannel
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: flannel
|
||||
namespace: kube-system
|
||||
---
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: flannel
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- pods
|
||||
verbs:
|
||||
- get
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- nodes
|
||||
verbs:
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- nodes/status
|
||||
verbs:
|
||||
- patch
|
||||
{{- end}}
|
||||
---
|
||||
kind: ConfigMap
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: kube-flannel-cfg
|
||||
namespace: "kube-system"
|
||||
labels:
|
||||
tier: node
|
||||
app: flannel
|
||||
data:
|
||||
cni-conf.json: |
|
||||
{
|
||||
"name":"cbr0",
|
||||
"cniVersion":"0.3.1",
|
||||
"plugins":[
|
||||
{
|
||||
"type":"flannel",
|
||||
"delegate":{
|
||||
"forceAddress":true,
|
||||
"isDefaultGateway":true
|
||||
}
|
||||
},
|
||||
{
|
||||
"type":"portmap",
|
||||
"capabilities":{
|
||||
"portMappings":true
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
net-conf.json: |
|
||||
{
|
||||
"Network": "{{.ClusterCIDR}}",
|
||||
"Backend": {
|
||||
"Type": "{{.FlannelBackend.Type}}",
|
||||
"VNI": {{.FlannelBackend.VNI}},
|
||||
"Port": {{.FlannelBackend.Port}}
|
||||
}
|
||||
}
|
||||
---
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
name: kube-flannel
|
||||
namespace: "kube-system"
|
||||
labels:
|
||||
tier: node
|
||||
k8s-app: flannel
|
||||
spec:
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
tier: node
|
||||
k8s-app: flannel
|
||||
spec:
|
||||
affinity:
|
||||
nodeAffinity:
|
||||
requiredDuringSchedulingIgnoredDuringExecution:
|
||||
nodeSelectorTerms:
|
||||
- matchExpressions:
|
||||
- key: beta.kubernetes.io/os
|
||||
operator: NotIn
|
||||
values:
|
||||
- windows
|
||||
serviceAccountName: flannel
|
||||
containers:
|
||||
- name: kube-flannel
|
||||
image: {{.Image}}
|
||||
imagePullPolicy: IfNotPresent
|
||||
resources:
|
||||
limits:
|
||||
cpu: 300m
|
||||
memory: 500M
|
||||
requests:
|
||||
cpu: 150m
|
||||
memory: 64M
|
||||
{{- if .FlannelInterface}}
|
||||
command: ["/opt/bin/flanneld","--ip-masq","--kube-subnet-mgr","--iface={{.FlannelInterface}}"]
|
||||
{{- else}}
|
||||
command: ["/opt/bin/flanneld","--ip-masq","--kube-subnet-mgr"]
|
||||
{{- end}}
|
||||
securityContext:
|
||||
privileged: true
|
||||
env:
|
||||
- name: POD_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.name
|
||||
- name: POD_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
volumeMounts:
|
||||
- name: run
|
||||
mountPath: /run
|
||||
- name: cni
|
||||
mountPath: /etc/cni/net.d
|
||||
- name: flannel-cfg
|
||||
mountPath: /etc/kube-flannel/
|
||||
- name: install-cni
|
||||
image: {{.CNIImage}}
|
||||
command: ["/install-cni.sh"]
|
||||
env:
|
||||
# The CNI network config to install on each node.
|
||||
- name: CNI_NETWORK_CONFIG
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: kube-flannel-cfg
|
||||
key: cni-conf.json
|
||||
- name: CNI_CONF_NAME
|
||||
value: "10-flannel.conflist"
|
||||
volumeMounts:
|
||||
- name: cni
|
||||
mountPath: /host/etc/cni/net.d
|
||||
- name: host-cni-bin
|
||||
mountPath: /host/opt/cni/bin/
|
||||
hostNetwork: true
|
||||
tolerations:
|
||||
{{- if ge .ClusterVersion "v1.12" }}
|
||||
- operator: Exists
|
||||
effect: NoSchedule
|
||||
- operator: Exists
|
||||
effect: NoExecute
|
||||
{{- else }}
|
||||
- key: node-role.kubernetes.io/controlplane
|
||||
operator: Exists
|
||||
effect: NoSchedule
|
||||
- key: node-role.kubernetes.io/etcd
|
||||
operator: Exists
|
||||
effect: NoExecute
|
||||
{{- end }}
|
||||
- key: node.kubernetes.io/not-ready
|
||||
effect: NoSchedule
|
||||
operator: Exists
|
||||
volumes:
|
||||
- name: run
|
||||
hostPath:
|
||||
path: /run
|
||||
- name: cni
|
||||
hostPath:
|
||||
path: /etc/cni/net.d
|
||||
- name: flannel-cfg
|
||||
configMap:
|
||||
name: kube-flannel-cfg
|
||||
- name: host-cni-bin
|
||||
hostPath:
|
||||
path: /opt/cni/bin
|
||||
updateStrategy:
|
||||
rollingUpdate:
|
||||
maxUnavailable: 20%
|
||||
type: RollingUpdate
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: flannel
|
||||
namespace: kube-system
|
||||
`
|
||||
|
||||
const FlannelTemplateV115 = `
|
||||
{{- if eq .RBACConfig "rbac"}}
|
||||
---
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
metadata:
|
||||
name: flannel
|
||||
rules:
|
||||
- apiGroups: ['extensions']
|
||||
resources: ['podsecuritypolicies']
|
||||
verbs: ['use']
|
||||
resourceNames: ['psp.flannel.unprivileged']
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- pods
|
||||
verbs:
|
||||
- get
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- nodes
|
||||
verbs:
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- nodes/status
|
||||
verbs:
|
||||
- patch
|
||||
---
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
metadata:
|
||||
name: flannel
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: flannel
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: flannel
|
||||
namespace: kube-system
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: flannel
|
||||
namespace: kube-system
|
||||
{{end}}
|
||||
---
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: PodSecurityPolicy
|
||||
metadata:
|
||||
name: psp.flannel.unprivileged
|
||||
annotations:
|
||||
seccomp.security.alpha.kubernetes.io/allowedProfileNames: docker/default
|
||||
seccomp.security.alpha.kubernetes.io/defaultProfileName: docker/default
|
||||
apparmor.security.beta.kubernetes.io/allowedProfileNames: runtime/default
|
||||
apparmor.security.beta.kubernetes.io/defaultProfileName: runtime/default
|
||||
spec:
|
||||
privileged: false
|
||||
volumes:
|
||||
- configMap
|
||||
- secret
|
||||
- emptyDir
|
||||
- hostPath
|
||||
allowedHostPaths:
|
||||
- pathPrefix: "/etc/cni/net.d"
|
||||
- pathPrefix: "/etc/kube-flannel"
|
||||
- pathPrefix: "/run/flannel"
|
||||
readOnlyRootFilesystem: false
|
||||
# Users and groups
|
||||
runAsUser:
|
||||
rule: RunAsAny
|
||||
supplementalGroups:
|
||||
rule: RunAsAny
|
||||
fsGroup:
|
||||
rule: RunAsAny
|
||||
# Privilege Escalation
|
||||
allowPrivilegeEscalation: false
|
||||
defaultAllowPrivilegeEscalation: false
|
||||
# Capabilities
|
||||
allowedCapabilities: ['NET_ADMIN']
|
||||
defaultAddCapabilities: []
|
||||
requiredDropCapabilities: []
|
||||
# Host namespaces
|
||||
hostPID: false
|
||||
hostIPC: false
|
||||
hostNetwork: true
|
||||
hostPorts:
|
||||
- min: 0
|
||||
max: 65535
|
||||
# SELinux
|
||||
seLinux:
|
||||
# SELinux is unsed in CaaSP
|
||||
rule: 'RunAsAny'
|
||||
---
|
||||
kind: ConfigMap
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: kube-flannel-cfg
|
||||
namespace: kube-system
|
||||
labels:
|
||||
tier: node
|
||||
app: flannel
|
||||
data:
|
||||
cni-conf.json: |
|
||||
{
|
||||
"name": "cbr0",
|
||||
"plugins": [
|
||||
{
|
||||
"type": "flannel",
|
||||
"delegate": {
|
||||
"hairpinMode": true,
|
||||
"isDefaultGateway": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"type": "portmap",
|
||||
"capabilities": {
|
||||
"portMappings": true
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
net-conf.json: |
|
||||
{
|
||||
"Network": "{{.ClusterCIDR}}",
|
||||
"Backend": {
|
||||
"Type": "{{.FlannelBackend.Type}}",
|
||||
"VNI": {{.FlannelBackend.VNI}},
|
||||
"Port": {{.FlannelBackend.Port}}
|
||||
}
|
||||
}
|
||||
---
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
name: kube-flannel
|
||||
namespace: kube-system
|
||||
labels:
|
||||
tier: node
|
||||
app: flannel
|
||||
spec:
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
tier: node
|
||||
app: flannel
|
||||
spec:
|
||||
affinity:
|
||||
nodeAffinity:
|
||||
requiredDuringSchedulingIgnoredDuringExecution:
|
||||
nodeSelectorTerms:
|
||||
- matchExpressions:
|
||||
- key: beta.kubernetes.io/os
|
||||
operator: NotIn
|
||||
values:
|
||||
- windows
|
||||
hostNetwork: true
|
||||
tolerations:
|
||||
- operator: Exists
|
||||
effect: NoSchedule
|
||||
{{- if eq .RBACConfig "rbac"}}
|
||||
serviceAccountName: flannel
|
||||
{{end}}
|
||||
containers:
|
||||
- name: kube-flannel
|
||||
image: {{.Image}}
|
||||
command:
|
||||
- /opt/bin/flanneld
|
||||
args:
|
||||
- --ip-masq
|
||||
- --kube-subnet-mgr
|
||||
{{- if .FlannelInterface}}
|
||||
- --iface={{.FlannelInterface}}
|
||||
{{end}}
|
||||
resources:
|
||||
requests:
|
||||
cpu: "100m"
|
||||
memory: "50Mi"
|
||||
limits:
|
||||
cpu: "100m"
|
||||
memory: "50Mi"
|
||||
securityContext:
|
||||
privileged: false
|
||||
capabilities:
|
||||
add: ["NET_ADMIN"]
|
||||
env:
|
||||
- name: POD_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.name
|
||||
- name: POD_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
volumeMounts:
|
||||
- name: run
|
||||
mountPath: /run
|
||||
- name: cni
|
||||
mountPath: /etc/cni/net.d
|
||||
- name: flannel-cfg
|
||||
mountPath: /etc/kube-flannel/
|
||||
- name: install-cni
|
||||
image: {{.CNIImage}}
|
||||
command: ["/install-cni.sh"]
|
||||
env:
|
||||
# The CNI network config to install on each node.
|
||||
- name: CNI_NETWORK_CONFIG
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: kube-flannel-cfg
|
||||
key: cni-conf.json
|
||||
- name: CNI_CONF_NAME
|
||||
value: "10-flannel.conflist"
|
||||
volumeMounts:
|
||||
- name: cni
|
||||
mountPath: /host/etc/cni/net.d
|
||||
- name: host-cni-bin
|
||||
mountPath: /host/opt/cni/bin/
|
||||
volumes:
|
||||
- name: run
|
||||
hostPath:
|
||||
path: /run
|
||||
- name: cni
|
||||
hostPath:
|
||||
path: /etc/cni/net.d
|
||||
- name: flannel-cfg
|
||||
configMap:
|
||||
name: kube-flannel-cfg
|
||||
- name: host-cni-bin
|
||||
hostPath:
|
||||
path: /opt/cni/bin
|
||||
`
|
@@ -1,314 +0,0 @@
|
||||
package templates
|
||||
|
||||
const KubeDNSTemplate = `
|
||||
---
|
||||
apiVersion: apps/v1beta1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: kube-dns-autoscaler
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: kube-dns-autoscaler
|
||||
spec:
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kube-dns-autoscaler
|
||||
spec:
|
||||
affinity:
|
||||
nodeAffinity:
|
||||
requiredDuringSchedulingIgnoredDuringExecution:
|
||||
nodeSelectorTerms:
|
||||
- matchExpressions:
|
||||
- key: beta.kubernetes.io/os
|
||||
operator: NotIn
|
||||
values:
|
||||
- windows
|
||||
serviceAccountName: kube-dns-autoscaler
|
||||
containers:
|
||||
- name: autoscaler
|
||||
image: {{.KubeDNSAutoScalerImage}}
|
||||
resources:
|
||||
requests:
|
||||
cpu: "20m"
|
||||
memory: "10Mi"
|
||||
command:
|
||||
- /cluster-proportional-autoscaler
|
||||
- --namespace=kube-system
|
||||
- --configmap=kube-dns-autoscaler
|
||||
- --target=Deployment/kube-dns
|
||||
# When cluster is using large nodes(with more cores), "coresPerReplica" should dominate.
|
||||
# If using small nodes, "nodesPerReplica" should dominate.
|
||||
- --default-params={"linear":{"coresPerReplica":128,"nodesPerReplica":4,"min":1}}
|
||||
- --logtostderr=true
|
||||
- --v=2
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: kube-dns-autoscaler
|
||||
namespace: kube-system
|
||||
labels:
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
{{- if eq .RBACConfig "rbac"}}
|
||||
---
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: system:kube-dns-autoscaler
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["nodes"]
|
||||
verbs: ["list", "watch"]
|
||||
- apiGroups: [""]
|
||||
resources: ["replicationcontrollers/scale"]
|
||||
verbs: ["get", "update"]
|
||||
- apiGroups: ["extensions"]
|
||||
resources: ["deployments/scale", "replicasets/scale"]
|
||||
verbs: ["get", "update"]
|
||||
- apiGroups: [""]
|
||||
resources: ["configmaps"]
|
||||
verbs: ["get", "create"]
|
||||
---
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: system:kube-dns-autoscaler
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: kube-dns-autoscaler
|
||||
namespace: kube-system
|
||||
roleRef:
|
||||
kind: ClusterRole
|
||||
name: system:kube-dns-autoscaler
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
{{- end }}
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: kube-dns
|
||||
namespace: kube-system
|
||||
labels:
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
---
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: kube-dns
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: kube-dns
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
spec:
|
||||
# replicas: not specified here:
|
||||
# 1. In order to make Addon Manager do not reconcile this replicas parameter.
|
||||
# 2. Default is 1.
|
||||
# 3. Will be tuned in real time if DNS horizontal auto-scaling is turned on.
|
||||
strategy:
|
||||
rollingUpdate:
|
||||
maxSurge: 10%
|
||||
maxUnavailable: 0
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: kube-dns
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kube-dns
|
||||
annotations:
|
||||
scheduler.alpha.kubernetes.io/critical-pod: ''
|
||||
spec:
|
||||
nodeSelector:
|
||||
{{ range $k, $v := .NodeSelector }}
|
||||
{{ $k }}: "{{ $v }}"
|
||||
{{ end }}
|
||||
affinity:
|
||||
podAntiAffinity:
|
||||
preferredDuringSchedulingIgnoredDuringExecution:
|
||||
- weight: 100
|
||||
podAffinityTerm:
|
||||
labelSelector:
|
||||
matchExpressions:
|
||||
- key: k8s-app
|
||||
operator: In
|
||||
values: ["kube-dns"]
|
||||
topologyKey: kubernetes.io/hostname
|
||||
nodeAffinity:
|
||||
requiredDuringSchedulingIgnoredDuringExecution:
|
||||
nodeSelectorTerms:
|
||||
- matchExpressions:
|
||||
- key: beta.kubernetes.io/os
|
||||
operator: NotIn
|
||||
values:
|
||||
- windows
|
||||
tolerations:
|
||||
- key: "CriticalAddonsOnly"
|
||||
operator: "Exists"
|
||||
volumes:
|
||||
- name: kube-dns-config
|
||||
configMap:
|
||||
name: kube-dns
|
||||
optional: true
|
||||
containers:
|
||||
- name: kubedns
|
||||
image: {{.KubeDNSImage}}
|
||||
resources:
|
||||
# TODO: Set memory limits when we've profiled the container for large
|
||||
# clusters, then set request = limit to keep this container in
|
||||
# guaranteed class. Currently, this container falls into the
|
||||
# "burstable" category so the kubelet doesn't backoff from restarting it.
|
||||
limits:
|
||||
memory: 170Mi
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 70Mi
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /healthcheck/kubedns
|
||||
port: 10054
|
||||
scheme: HTTP
|
||||
initialDelaySeconds: 60
|
||||
timeoutSeconds: 5
|
||||
successThreshold: 1
|
||||
failureThreshold: 5
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /readiness
|
||||
port: 8081
|
||||
scheme: HTTP
|
||||
# we poll on pod startup for the Kubernetes master service and
|
||||
# only setup the /readiness HTTP server once that's available.
|
||||
initialDelaySeconds: 3
|
||||
timeoutSeconds: 5
|
||||
args:
|
||||
- --domain={{.ClusterDomain}}.
|
||||
- --dns-port=10053
|
||||
- --config-dir=/kube-dns-config
|
||||
- --v=2
|
||||
env:
|
||||
- name: PROMETHEUS_PORT
|
||||
value: "10055"
|
||||
ports:
|
||||
- containerPort: 10053
|
||||
name: dns-local
|
||||
protocol: UDP
|
||||
- containerPort: 10053
|
||||
name: dns-tcp-local
|
||||
protocol: TCP
|
||||
- containerPort: 10055
|
||||
name: metrics
|
||||
protocol: TCP
|
||||
volumeMounts:
|
||||
- name: kube-dns-config
|
||||
mountPath: /kube-dns-config
|
||||
- name: dnsmasq
|
||||
image: {{.DNSMasqImage}}
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /healthcheck/dnsmasq
|
||||
port: 10054
|
||||
scheme: HTTP
|
||||
initialDelaySeconds: 60
|
||||
timeoutSeconds: 5
|
||||
successThreshold: 1
|
||||
failureThreshold: 5
|
||||
args:
|
||||
- -v=2
|
||||
- -logtostderr
|
||||
- -configDir=/etc/k8s/dns/dnsmasq-nanny
|
||||
- -restartDnsmasq=true
|
||||
- --
|
||||
- -k
|
||||
- --cache-size=1000
|
||||
- --log-facility=-
|
||||
- --server=/{{.ClusterDomain}}/127.0.0.1#10053
|
||||
{{- if .ReverseCIDRs }}
|
||||
{{- range .ReverseCIDRs }}
|
||||
- --server=/{{.}}/127.0.0.1#10053
|
||||
{{- end }}
|
||||
{{- else }}
|
||||
- --server=/in-addr.arpa/127.0.0.1#10053
|
||||
- --server=/ip6.arpa/127.0.0.1#10053
|
||||
{{- end }}
|
||||
ports:
|
||||
- containerPort: 53
|
||||
name: dns
|
||||
protocol: UDP
|
||||
- containerPort: 53
|
||||
name: dns-tcp
|
||||
protocol: TCP
|
||||
# see: https://github.com/kubernetes/kubernetes/issues/29055 for details
|
||||
resources:
|
||||
requests:
|
||||
cpu: 150m
|
||||
memory: 20Mi
|
||||
volumeMounts:
|
||||
- name: kube-dns-config
|
||||
mountPath: /etc/k8s/dns/dnsmasq-nanny
|
||||
- name: sidecar
|
||||
image: {{.KubeDNSSidecarImage}}
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /metrics
|
||||
port: 10054
|
||||
scheme: HTTP
|
||||
initialDelaySeconds: 60
|
||||
timeoutSeconds: 5
|
||||
successThreshold: 1
|
||||
failureThreshold: 5
|
||||
args:
|
||||
- --v=2
|
||||
- --logtostderr
|
||||
- --probe=kubedns,127.0.0.1:10053,kubernetes.default.svc.{{.ClusterDomain}},5,A
|
||||
- --probe=dnsmasq,127.0.0.1:53,kubernetes.default.svc.{{.ClusterDomain}},5,A
|
||||
ports:
|
||||
- containerPort: 10054
|
||||
name: metrics
|
||||
protocol: TCP
|
||||
resources:
|
||||
requests:
|
||||
memory: 20Mi
|
||||
cpu: 10m
|
||||
dnsPolicy: Default # Don't use cluster DNS.
|
||||
serviceAccountName: kube-dns
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: kube-dns
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: kube-dns
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
kubernetes.io/name: "KubeDNS"
|
||||
spec:
|
||||
selector:
|
||||
k8s-app: kube-dns
|
||||
clusterIP: {{.ClusterDNSServer}}
|
||||
ports:
|
||||
- name: dns
|
||||
port: 53
|
||||
protocol: UDP
|
||||
- name: dns-tcp
|
||||
port: 53
|
||||
protocol: TCP
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: kube-dns
|
||||
namespace: kube-system
|
||||
data:
|
||||
{{- if .UpstreamNameservers }}
|
||||
upstreamNameservers: |
|
||||
[{{range $i, $v := .UpstreamNameservers}}{{if $i}}, {{end}}{{printf "%q" .}}{{end}}]
|
||||
{{- end }}
|
||||
{{- if .StubDomains }}
|
||||
stubDomains: |
|
||||
{{ GetKubednsStubDomains .StubDomains }}
|
||||
{{- end }}`
|
@@ -1,150 +0,0 @@
|
||||
package templates
|
||||
|
||||
const MetricsServerTemplate = `
|
||||
{{- if eq .RBACConfig "rbac"}}
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: metrics-server:system:auth-delegator
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: system:auth-delegator
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: metrics-server
|
||||
namespace: kube-system
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
name: metrics-server-auth-reader
|
||||
namespace: kube-system
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: extension-apiserver-authentication-reader
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: metrics-server
|
||||
namespace: kube-system
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: system:metrics-server
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- pods
|
||||
- nodes
|
||||
- nodes/stats
|
||||
- namespaces
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- "extensions"
|
||||
resources:
|
||||
- deployments
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: system:metrics-server
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: system:metrics-server
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: metrics-server
|
||||
namespace: kube-system
|
||||
{{- end }}
|
||||
---
|
||||
apiVersion: apiregistration.k8s.io/v1beta1
|
||||
kind: APIService
|
||||
metadata:
|
||||
name: v1beta1.metrics.k8s.io
|
||||
spec:
|
||||
service:
|
||||
name: metrics-server
|
||||
namespace: kube-system
|
||||
group: metrics.k8s.io
|
||||
version: v1beta1
|
||||
insecureSkipTLSVerify: true
|
||||
groupPriorityMinimum: 100
|
||||
versionPriority: 100
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: metrics-server
|
||||
namespace: kube-system
|
||||
---
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: metrics-server
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: metrics-server
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: metrics-server
|
||||
template:
|
||||
metadata:
|
||||
name: metrics-server
|
||||
labels:
|
||||
k8s-app: metrics-server
|
||||
spec:
|
||||
affinity:
|
||||
nodeAffinity:
|
||||
requiredDuringSchedulingIgnoredDuringExecution:
|
||||
nodeSelectorTerms:
|
||||
- matchExpressions:
|
||||
- key: beta.kubernetes.io/os
|
||||
operator: NotIn
|
||||
values:
|
||||
- windows
|
||||
serviceAccountName: metrics-server
|
||||
containers:
|
||||
- name: metrics-server
|
||||
image: {{ .MetricsServerImage }}
|
||||
imagePullPolicy: Always
|
||||
command:
|
||||
- /metrics-server
|
||||
{{- if eq .Version "v0.3" }}
|
||||
- --kubelet-insecure-tls
|
||||
- --kubelet-preferred-address-types=InternalIP
|
||||
- --logtostderr
|
||||
{{- else }}
|
||||
- --source=kubernetes.summary_api:https://kubernetes.default.svc?kubeletHttps=true&kubeletPort=10250&useServiceAccount=true&insecure=true
|
||||
{{- end }}
|
||||
{{ range $k,$v := .Options }}
|
||||
- --{{ $k }}={{ $v }}
|
||||
{{ end }}
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: metrics-server
|
||||
namespace: kube-system
|
||||
labels:
|
||||
kubernetes.io/name: "Metrics-server"
|
||||
spec:
|
||||
selector:
|
||||
k8s-app: metrics-server
|
||||
ports:
|
||||
- port: 443
|
||||
protocol: TCP
|
||||
targetPort: 443
|
||||
`
|
@@ -1,324 +0,0 @@
|
||||
package templates
|
||||
|
||||
const NginxIngressTemplate = `
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: ingress-nginx
|
||||
---
|
||||
kind: ConfigMap
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: nginx-configuration
|
||||
namespace: ingress-nginx
|
||||
labels:
|
||||
app: ingress-nginx
|
||||
data:
|
||||
{{ range $k,$v := .Options }}
|
||||
{{ $k }}: "{{ $v }}"
|
||||
{{ end }}
|
||||
---
|
||||
kind: ConfigMap
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: tcp-services
|
||||
namespace: ingress-nginx
|
||||
---
|
||||
kind: ConfigMap
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: udp-services
|
||||
namespace: ingress-nginx
|
||||
{{if eq .RBACConfig "rbac"}}
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: nginx-ingress-serviceaccount
|
||||
namespace: ingress-nginx
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: nginx-ingress-clusterrole
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- configmaps
|
||||
- endpoints
|
||||
- nodes
|
||||
- pods
|
||||
- secrets
|
||||
verbs:
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- nodes
|
||||
verbs:
|
||||
- get
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- services
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- "extensions"
|
||||
resources:
|
||||
- ingresses
|
||||
- daemonsets
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- events
|
||||
verbs:
|
||||
- create
|
||||
- patch
|
||||
- apiGroups:
|
||||
- "extensions"
|
||||
resources:
|
||||
- ingresses/status
|
||||
verbs:
|
||||
- update
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: Role
|
||||
metadata:
|
||||
name: nginx-ingress-role
|
||||
namespace: ingress-nginx
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- configmaps
|
||||
- pods
|
||||
- secrets
|
||||
- namespaces
|
||||
verbs:
|
||||
- get
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- configmaps
|
||||
resourceNames:
|
||||
# Defaults to "<election-id>-<ingress-class>"
|
||||
# Here: "<ingress-controller-leader>-<nginx>"
|
||||
# This has to be adapted if you change either parameter
|
||||
# when launching the nginx-ingress-controller.
|
||||
- "ingress-controller-leader-nginx"
|
||||
verbs:
|
||||
- get
|
||||
- update
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- configmaps
|
||||
verbs:
|
||||
- create
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- endpoints
|
||||
verbs:
|
||||
- get
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
name: nginx-ingress-role-nisa-binding
|
||||
namespace: ingress-nginx
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: nginx-ingress-role
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: nginx-ingress-serviceaccount
|
||||
namespace: ingress-nginx
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: nginx-ingress-clusterrole-nisa-binding
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: nginx-ingress-clusterrole
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: nginx-ingress-serviceaccount
|
||||
namespace: ingress-nginx
|
||||
{{ end }}
|
||||
---
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
name: nginx-ingress-controller
|
||||
namespace: ingress-nginx
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: ingress-nginx
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: ingress-nginx
|
||||
annotations:
|
||||
prometheus.io/port: '10254'
|
||||
prometheus.io/scrape: 'true'
|
||||
spec:
|
||||
affinity:
|
||||
nodeAffinity:
|
||||
requiredDuringSchedulingIgnoredDuringExecution:
|
||||
nodeSelectorTerms:
|
||||
- matchExpressions:
|
||||
- key: beta.kubernetes.io/os
|
||||
operator: NotIn
|
||||
values:
|
||||
- windows
|
||||
hostNetwork: true
|
||||
nodeSelector:
|
||||
{{ range $k, $v := .NodeSelector }}
|
||||
{{ $k }}: "{{ $v }}"
|
||||
{{ end }}
|
||||
{{if eq .RBACConfig "rbac"}}
|
||||
serviceAccountName: nginx-ingress-serviceaccount
|
||||
{{ end }}
|
||||
{{- if ne .AlpineImage ""}}
|
||||
initContainers:
|
||||
- command:
|
||||
- sh
|
||||
- -c
|
||||
- sysctl -w net.core.somaxconn=32768; sysctl -w net.ipv4.ip_local_port_range="1024 65535"
|
||||
image: {{.AlpineImage}}
|
||||
imagePullPolicy: IfNotPresent
|
||||
name: sysctl
|
||||
securityContext:
|
||||
privileged: true
|
||||
{{- end }}
|
||||
containers:
|
||||
- name: nginx-ingress-controller
|
||||
image: {{.IngressImage}}
|
||||
args:
|
||||
- /nginx-ingress-controller
|
||||
- --default-backend-service=$(POD_NAMESPACE)/default-http-backend
|
||||
- --configmap=$(POD_NAMESPACE)/nginx-configuration
|
||||
- --tcp-services-configmap=$(POD_NAMESPACE)/tcp-services
|
||||
- --udp-services-configmap=$(POD_NAMESPACE)/udp-services
|
||||
- --annotations-prefix=nginx.ingress.kubernetes.io
|
||||
{{ range $k, $v := .ExtraArgs }}
|
||||
- --{{ $k }}{{if ne $v "" }}={{ $v }}{{end}}
|
||||
{{ end }}
|
||||
{{- if eq .AlpineImage ""}}
|
||||
securityContext:
|
||||
capabilities:
|
||||
drop:
|
||||
- ALL
|
||||
add:
|
||||
- NET_BIND_SERVICE
|
||||
runAsUser: 33
|
||||
{{- end }}
|
||||
env:
|
||||
- name: POD_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.name
|
||||
- name: POD_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
ports:
|
||||
- name: http
|
||||
containerPort: 80
|
||||
- name: https
|
||||
containerPort: 443
|
||||
livenessProbe:
|
||||
failureThreshold: 3
|
||||
httpGet:
|
||||
path: /healthz
|
||||
port: 10254
|
||||
scheme: HTTP
|
||||
initialDelaySeconds: 10
|
||||
periodSeconds: 10
|
||||
successThreshold: 1
|
||||
timeoutSeconds: 1
|
||||
readinessProbe:
|
||||
failureThreshold: 3
|
||||
httpGet:
|
||||
path: /healthz
|
||||
port: 10254
|
||||
scheme: HTTP
|
||||
periodSeconds: 10
|
||||
successThreshold: 1
|
||||
timeoutSeconds: 1
|
||||
---
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: default-http-backend
|
||||
labels:
|
||||
app: default-http-backend
|
||||
namespace: ingress-nginx
|
||||
spec:
|
||||
replicas: 1
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: default-http-backend
|
||||
spec:
|
||||
affinity:
|
||||
nodeAffinity:
|
||||
requiredDuringSchedulingIgnoredDuringExecution:
|
||||
nodeSelectorTerms:
|
||||
- matchExpressions:
|
||||
- key: beta.kubernetes.io/os
|
||||
operator: NotIn
|
||||
values:
|
||||
- windows
|
||||
terminationGracePeriodSeconds: 60
|
||||
containers:
|
||||
- name: default-http-backend
|
||||
# Any image is permissable as long as:
|
||||
# 1. It serves a 404 page at /
|
||||
# 2. It serves 200 on a /healthz endpoint
|
||||
image: {{.IngressBackend}}
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /healthz
|
||||
port: 8080
|
||||
scheme: HTTP
|
||||
initialDelaySeconds: 30
|
||||
timeoutSeconds: 5
|
||||
ports:
|
||||
- containerPort: 8080
|
||||
resources:
|
||||
limits:
|
||||
cpu: 10m
|
||||
memory: 20Mi
|
||||
requests:
|
||||
cpu: 10m
|
||||
memory: 20Mi
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: default-http-backend
|
||||
namespace: ingress-nginx
|
||||
labels:
|
||||
app: default-http-backend
|
||||
spec:
|
||||
ports:
|
||||
- port: 80
|
||||
targetPort: 8080
|
||||
selector:
|
||||
app: default-http-backend
|
||||
`
|
@@ -5,28 +5,11 @@ import (
|
||||
"encoding/json"
|
||||
"text/template"
|
||||
|
||||
"github.com/rancher/rke/metadata"
|
||||
|
||||
"github.com/rancher/rke/util"
|
||||
)
|
||||
|
||||
var VersionedTemplate = map[string]map[string]string{
|
||||
"calico": map[string]string{
|
||||
"v1.15": CalicoTemplateV115,
|
||||
"v1.14": CalicoTemplateV113,
|
||||
"v1.13": CalicoTemplateV113,
|
||||
"default": CalicoTemplateV112,
|
||||
},
|
||||
"canal": map[string]string{
|
||||
"v1.15": CanalTemplateV115,
|
||||
"v1.14": CanalTemplateV113,
|
||||
"v1.13": CanalTemplateV113,
|
||||
"default": CanalTemplateV112,
|
||||
},
|
||||
"flannel": map[string]string{
|
||||
"v1.15": FlannelTemplateV115,
|
||||
"default": FlannelTemplate,
|
||||
},
|
||||
}
|
||||
|
||||
func CompileTemplateFromMap(tmplt string, configMap interface{}) (string, error) {
|
||||
out := new(bytes.Buffer)
|
||||
t := template.Must(template.New("compiled_template").Funcs(template.FuncMap{"GetKubednsStubDomains": GetKubednsStubDomains}).Parse(tmplt))
|
||||
@@ -38,7 +21,7 @@ func CompileTemplateFromMap(tmplt string, configMap interface{}) (string, error)
|
||||
|
||||
func GetVersionedTemplates(templateName string, k8sVersion string) string {
|
||||
|
||||
versionedTemplate := VersionedTemplate[templateName]
|
||||
versionedTemplate := metadata.K8sVersionToTemplates[templateName]
|
||||
if t, ok := versionedTemplate[util.GetTagMajorVersion(k8sVersion)]; ok {
|
||||
return t
|
||||
}
|
||||
@@ -49,3 +32,10 @@ func GetKubednsStubDomains(stubDomains map[string][]string) string {
|
||||
json, _ := json.Marshal(stubDomains)
|
||||
return string(json)
|
||||
}
|
||||
|
||||
func GetDefaultVersionedTemplate(templateName string, data map[string]interface{}) string {
|
||||
if template, ok := data[templateName]; ok {
|
||||
return convert.ToString(template)
|
||||
}
|
||||
return metadata.K8sVersionToTemplates[templateName]["default"]
|
||||
}
|
||||
|
@@ -1,240 +0,0 @@
|
||||
package templates
|
||||
|
||||
const WeaveTemplate = `
|
||||
---
|
||||
# This ConfigMap can be used to configure a self-hosted Weave Net installation.
|
||||
apiVersion: v1
|
||||
kind: List
|
||||
items:
|
||||
- apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: weave-net
|
||||
namespace: kube-system
|
||||
- apiVersion: extensions/v1beta1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
name: weave-net
|
||||
labels:
|
||||
name: weave-net
|
||||
namespace: kube-system
|
||||
spec:
|
||||
template:
|
||||
metadata:
|
||||
annotations:
|
||||
scheduler.alpha.kubernetes.io/tolerations: >-
|
||||
[{"key":"dedicated","operator":"Equal","value":"master","effect":"NoSchedule"}]
|
||||
labels:
|
||||
name: weave-net
|
||||
spec:
|
||||
affinity:
|
||||
nodeAffinity:
|
||||
requiredDuringSchedulingIgnoredDuringExecution:
|
||||
nodeSelectorTerms:
|
||||
- matchExpressions:
|
||||
- key: beta.kubernetes.io/os
|
||||
operator: NotIn
|
||||
values:
|
||||
- windows
|
||||
containers:
|
||||
- name: weave
|
||||
command:
|
||||
- /home/weave/launch.sh
|
||||
env:
|
||||
- name: HOSTNAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
apiVersion: v1
|
||||
fieldPath: spec.nodeName
|
||||
- name: IPALLOC_RANGE
|
||||
value: "{{.ClusterCIDR}}"
|
||||
{{- if .WeavePassword}}
|
||||
- name: WEAVE_PASSWORD
|
||||
value: "{{.WeavePassword}}"
|
||||
{{- end}}
|
||||
image: {{.Image}}
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
host: 127.0.0.1
|
||||
path: /status
|
||||
port: 6784
|
||||
initialDelaySeconds: 30
|
||||
resources:
|
||||
requests:
|
||||
cpu: 10m
|
||||
securityContext:
|
||||
privileged: true
|
||||
volumeMounts:
|
||||
- name: weavedb
|
||||
mountPath: /weavedb
|
||||
- name: cni-bin
|
||||
mountPath: /host/opt
|
||||
- name: cni-bin2
|
||||
mountPath: /host/home
|
||||
- name: cni-conf
|
||||
mountPath: /host/etc
|
||||
- name: dbus
|
||||
mountPath: /host/var/lib/dbus
|
||||
- name: lib-modules
|
||||
mountPath: /lib/modules
|
||||
- name: xtables-lock
|
||||
mountPath: /run/xtables.lock
|
||||
- name: weave-npc
|
||||
env:
|
||||
- name: HOSTNAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
apiVersion: v1
|
||||
fieldPath: spec.nodeName
|
||||
image: {{.CNIImage}}
|
||||
resources:
|
||||
requests:
|
||||
cpu: 10m
|
||||
securityContext:
|
||||
privileged: true
|
||||
volumeMounts:
|
||||
- name: xtables-lock
|
||||
mountPath: /run/xtables.lock
|
||||
- name: weave-plugins
|
||||
command:
|
||||
- /opt/rke-tools/weave-plugins-cni.sh
|
||||
image: {{.WeaveLoopbackImage}}
|
||||
securityContext:
|
||||
privileged: true
|
||||
volumeMounts:
|
||||
- name: cni-bin
|
||||
mountPath: /opt
|
||||
hostNetwork: true
|
||||
hostPID: true
|
||||
restartPolicy: Always
|
||||
securityContext:
|
||||
seLinuxOptions: {}
|
||||
serviceAccountName: weave-net
|
||||
tolerations:
|
||||
- operator: Exists
|
||||
effect: NoSchedule
|
||||
- operator: Exists
|
||||
effect: NoExecute
|
||||
volumes:
|
||||
- name: weavedb
|
||||
hostPath:
|
||||
path: /var/lib/weave
|
||||
- name: cni-bin
|
||||
hostPath:
|
||||
path: /opt
|
||||
- name: cni-bin2
|
||||
hostPath:
|
||||
path: /home
|
||||
- name: cni-conf
|
||||
hostPath:
|
||||
path: /etc
|
||||
- name: dbus
|
||||
hostPath:
|
||||
path: /var/lib/dbus
|
||||
- name: lib-modules
|
||||
hostPath:
|
||||
path: /lib/modules
|
||||
- name: xtables-lock
|
||||
hostPath:
|
||||
path: /run/xtables.lock
|
||||
updateStrategy:
|
||||
type: RollingUpdate
|
||||
{{- if eq .RBACConfig "rbac"}}
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: weave-net
|
||||
labels:
|
||||
name: weave-net
|
||||
namespace: kube-system
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: weave-net
|
||||
labels:
|
||||
name: weave-net
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ''
|
||||
resources:
|
||||
- pods
|
||||
- namespaces
|
||||
- nodes
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- networking.k8s.io
|
||||
resources:
|
||||
- networkpolicies
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- ''
|
||||
resources:
|
||||
- nodes/status
|
||||
verbs:
|
||||
- patch
|
||||
- update
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: weave-net
|
||||
labels:
|
||||
name: weave-net
|
||||
roleRef:
|
||||
kind: ClusterRole
|
||||
name: weave-net
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: weave-net
|
||||
namespace: kube-system
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: Role
|
||||
metadata:
|
||||
name: weave-net
|
||||
labels:
|
||||
name: weave-net
|
||||
namespace: kube-system
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ''
|
||||
resourceNames:
|
||||
- weave-net
|
||||
resources:
|
||||
- configmaps
|
||||
verbs:
|
||||
- get
|
||||
- update
|
||||
- apiGroups:
|
||||
- ''
|
||||
resources:
|
||||
- configmaps
|
||||
verbs:
|
||||
- create
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
name: weave-net
|
||||
labels:
|
||||
name: weave-net
|
||||
namespace: kube-system
|
||||
roleRef:
|
||||
kind: Role
|
||||
name: weave-net
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: weave-net
|
||||
namespace: kube-system
|
||||
{{- end}}
|
||||
`
|
2
vendor/github.com/coreos/go-semver/semver/semver.go
generated
vendored
2
vendor/github.com/coreos/go-semver/semver/semver.go
generated
vendored
@@ -19,6 +19,7 @@ import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"github.com/sirupsen/logrus"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
@@ -74,6 +75,7 @@ func (v *Version) Set(version string) error {
|
||||
dotParts := strings.SplitN(version, ".", 3)
|
||||
|
||||
if len(dotParts) != 3 {
|
||||
logrus.Infof("version version version %s", version)
|
||||
return fmt.Errorf("%s is not in dotted-tri format", version)
|
||||
}
|
||||
|
||||
|
58
vendor/github.com/rancher/kontainer-driver-metadata/rke/k8s_rke_system_images.go
generated
vendored
58
vendor/github.com/rancher/kontainer-driver-metadata/rke/k8s_rke_system_images.go
generated
vendored
@@ -1001,35 +1001,35 @@ func loadK8sRKESystemImages() map[string]v3.RKESystemImages {
|
||||
CoreDNS: m("coredns/coredns:1.3.1"),
|
||||
CoreDNSAutoscaler: m("gcr.io/google_containers/cluster-proportional-autoscaler:1.3.0"),
|
||||
},
|
||||
//// Experimental in Rancher v2.2.5
|
||||
//"v1.15.0-rancher1-1": {
|
||||
// Etcd: m("quay.io/coreos/etcd:v3.3.10-rancher1"),
|
||||
// Kubernetes: m("rancher/hyperkube:v1.15.0-rancher1"),
|
||||
// Alpine: m("rancher/rke-tools:v0.1.32"),
|
||||
// NginxProxy: m("rancher/rke-tools:v0.1.32"),
|
||||
// CertDownloader: m("rancher/rke-tools:v0.1.32"),
|
||||
// KubernetesServicesSidecar: m("rancher/rke-tools:v0.1.32"),
|
||||
// KubeDNS: m("gcr.io/google_containers/k8s-dns-kube-dns:1.15.0"),
|
||||
// DNSmasq: m("gcr.io/google_containers/k8s-dns-dnsmasq-nanny:1.15.0"),
|
||||
// KubeDNSSidecar: m("gcr.io/google_containers/k8s-dns-sidecar:1.15.0"),
|
||||
// KubeDNSAutoscaler: m("gcr.io/google_containers/cluster-proportional-autoscaler:1.3.0"),
|
||||
// Flannel: m("quay.io/coreos/flannel:v0.11.0-rancher1"),
|
||||
// FlannelCNI: m("rancher/flannel-cni:v0.3.0-rancher1"),
|
||||
// CalicoNode: m("quay.io/calico/node:v3.7.3"),
|
||||
// CalicoCNI: m("quay.io/calico/cni:v3.7.3"),
|
||||
// CalicoCtl: m("quay.io/calico/ctl:v2.0.0"),
|
||||
// CanalNode: m("quay.io/calico/node:v3.7.3"),
|
||||
// CanalCNI: m("quay.io/calico/cni:v3.7.3"),
|
||||
// CanalFlannel: m("quay.io/coreos/flannel:v0.11.0"),
|
||||
// WeaveNode: m("weaveworks/weave-kube:2.5.2"),
|
||||
// WeaveCNI: m("weaveworks/weave-npc:2.5.2"),
|
||||
// PodInfraContainer: m("gcr.io/google_containers/pause:3.1"),
|
||||
// Ingress: m("rancher/nginx-ingress-controller:0.21.0-rancher3"),
|
||||
// IngressBackend: m("k8s.gcr.io/defaultbackend:1.5-rancher1"),
|
||||
// MetricsServer: m("gcr.io/google_containers/metrics-server:v0.3.3"),
|
||||
// CoreDNS: m("coredns/coredns:1.3.1"),
|
||||
// CoreDNSAutoscaler: m("gcr.io/google_containers/cluster-proportional-autoscaler:1.3.0"),
|
||||
//},
|
||||
// Experimental in Rancher v2.2.5
|
||||
"v1.15.0-rancher1-1": {
|
||||
Etcd: m("quay.io/coreos/etcd:v3.3.10-rancher1"),
|
||||
Kubernetes: m("rancher/hyperkube:v1.15.0-rancher1"),
|
||||
Alpine: m("rancher/rke-tools:v0.1.34"),
|
||||
NginxProxy: m("rancher/rke-tools:v0.1.34"),
|
||||
CertDownloader: m("rancher/rke-tools:v0.1.34"),
|
||||
KubernetesServicesSidecar: m("rancher/rke-tools:v0.1.34"),
|
||||
KubeDNS: m("gcr.io/google_containers/k8s-dns-kube-dns:1.15.0"),
|
||||
DNSmasq: m("gcr.io/google_containers/k8s-dns-dnsmasq-nanny:1.15.0"),
|
||||
KubeDNSSidecar: m("gcr.io/google_containers/k8s-dns-sidecar:1.15.0"),
|
||||
KubeDNSAutoscaler: m("gcr.io/google_containers/cluster-proportional-autoscaler:1.3.0"),
|
||||
Flannel: m("quay.io/coreos/flannel:v0.11.0-rancher1"),
|
||||
FlannelCNI: m("rancher/flannel-cni:v0.3.0-rancher1"),
|
||||
CalicoNode: m("quay.io/calico/node:v3.7.3"),
|
||||
CalicoCNI: m("quay.io/calico/cni:v3.7.3"),
|
||||
CalicoCtl: m("quay.io/calico/ctl:v2.0.0"),
|
||||
CanalNode: m("quay.io/calico/node:v3.7.3"),
|
||||
CanalCNI: m("quay.io/calico/cni:v3.7.3"),
|
||||
CanalFlannel: m("quay.io/coreos/flannel:v0.11.0"),
|
||||
WeaveNode: m("weaveworks/weave-kube:2.5.2"),
|
||||
WeaveCNI: m("weaveworks/weave-npc:2.5.2"),
|
||||
PodInfraContainer: m("gcr.io/google_containers/pause:3.1"),
|
||||
Ingress: m("rancher/nginx-ingress-controller:0.21.0-rancher3"),
|
||||
IngressBackend: m("k8s.gcr.io/defaultbackend:1.5-rancher1"),
|
||||
MetricsServer: m("gcr.io/google_containers/metrics-server:v0.3.3"),
|
||||
CoreDNS: m("coredns/coredns:1.3.1"),
|
||||
CoreDNSAutoscaler: m("gcr.io/google_containers/cluster-proportional-autoscaler:1.3.0"),
|
||||
},
|
||||
// k8s version from 2.1.x release with old rke-tools to allow upgrade from 2.1.x clusters
|
||||
// without all clusters being restarted
|
||||
"v1.12.5-rancher1-1": {
|
||||
|
Reference in New Issue
Block a user