1
0
mirror of https://github.com/rancher/rke.git synced 2025-08-19 07:17:30 +00:00

Update vendor for kontainer-driver-metadata

This commit is contained in:
orangedeng 2019-08-23 09:43:54 +08:00 committed by Alena Prokharchyk
parent 0ef3c0849a
commit e9ecef797d
14 changed files with 1803 additions and 27 deletions

4
go.mod
View File

@ -32,9 +32,9 @@ require (
github.com/opencontainers/go-digest v1.0.0-rc1 // indirect github.com/opencontainers/go-digest v1.0.0-rc1 // indirect
github.com/opencontainers/image-spec v0.0.0-20170929214853-7c889fafd04a // indirect github.com/opencontainers/image-spec v0.0.0-20170929214853-7c889fafd04a // indirect
github.com/pkg/errors v0.8.1 github.com/pkg/errors v0.8.1
github.com/rancher/kontainer-driver-metadata v0.0.0-20190822033834-53da6c8441ed github.com/rancher/kontainer-driver-metadata v0.0.0-20190823014104-22ae7cf76a62
github.com/rancher/norman v0.0.0-20190821234528-20a936b685b0 github.com/rancher/norman v0.0.0-20190821234528-20a936b685b0
github.com/rancher/types v0.0.0-20190822030441-376c6f64c718 github.com/rancher/types v0.0.0-20190822170951-b99efa820bc3
github.com/sirupsen/logrus v1.4.2 github.com/sirupsen/logrus v1.4.2
github.com/smartystreets/goconvey v0.0.0-20190731233626-505e41936337 // indirect github.com/smartystreets/goconvey v0.0.0-20190731233626-505e41936337 // indirect
github.com/soheilhy/cmux v0.1.4 // indirect github.com/soheilhy/cmux v0.1.4 // indirect

6
go.sum
View File

@ -172,10 +172,16 @@ github.com/prometheus/procfs v0.0.3 h1:CTwfnzjQ+8dS6MhHHu4YswVAD99sL2wjPqP+VkURm
github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ=
github.com/rancher/kontainer-driver-metadata v0.0.0-20190822033834-53da6c8441ed h1:7+QZ69GEo50fDd+/AKEFL/dPZSCLOVtLVOdZvi4DNEY= github.com/rancher/kontainer-driver-metadata v0.0.0-20190822033834-53da6c8441ed h1:7+QZ69GEo50fDd+/AKEFL/dPZSCLOVtLVOdZvi4DNEY=
github.com/rancher/kontainer-driver-metadata v0.0.0-20190822033834-53da6c8441ed/go.mod h1:KCr66cmkx3e0eesKeX8XfD7JwWVNSrkWVqSDqSaycnA= github.com/rancher/kontainer-driver-metadata v0.0.0-20190822033834-53da6c8441ed/go.mod h1:KCr66cmkx3e0eesKeX8XfD7JwWVNSrkWVqSDqSaycnA=
github.com/rancher/kontainer-driver-metadata v0.0.0-20190822225253-5d3809ddfedd h1:LDvGG2oA5fWtOmfvD+wwtFBjc3tGP0ZNP/+RObX3Oio=
github.com/rancher/kontainer-driver-metadata v0.0.0-20190822225253-5d3809ddfedd/go.mod h1:dHvhyuoiwrjqQCFD586g0cZ9NJJXEKeAtQi8RX96U8E=
github.com/rancher/kontainer-driver-metadata v0.0.0-20190823014104-22ae7cf76a62 h1:CLrRJx0hzPOLBPq40sEX1DF3zfwak72XvUwMsnCul6E=
github.com/rancher/kontainer-driver-metadata v0.0.0-20190823014104-22ae7cf76a62/go.mod h1:dHvhyuoiwrjqQCFD586g0cZ9NJJXEKeAtQi8RX96U8E=
github.com/rancher/norman v0.0.0-20190821234528-20a936b685b0 h1:bNG4b0CTTBE8yEamIz8RYcfz+7kfK9N8YTvyiykRCS8= github.com/rancher/norman v0.0.0-20190821234528-20a936b685b0 h1:bNG4b0CTTBE8yEamIz8RYcfz+7kfK9N8YTvyiykRCS8=
github.com/rancher/norman v0.0.0-20190821234528-20a936b685b0/go.mod h1:KwP6RD4rVMdK8XK0wqZaptrhTn/TO4kXU3doh4iatQU= github.com/rancher/norman v0.0.0-20190821234528-20a936b685b0/go.mod h1:KwP6RD4rVMdK8XK0wqZaptrhTn/TO4kXU3doh4iatQU=
github.com/rancher/types v0.0.0-20190822030441-376c6f64c718 h1:eEdtDFNI0HvoYbCNA2KzqzJSxGPAE+xB91syrEtHU90= github.com/rancher/types v0.0.0-20190822030441-376c6f64c718 h1:eEdtDFNI0HvoYbCNA2KzqzJSxGPAE+xB91syrEtHU90=
github.com/rancher/types v0.0.0-20190822030441-376c6f64c718/go.mod h1:9L7VLTwNVt7vJYwP/7xrQ4tWghDQ+zl9//RTqRjGxes= github.com/rancher/types v0.0.0-20190822030441-376c6f64c718/go.mod h1:9L7VLTwNVt7vJYwP/7xrQ4tWghDQ+zl9//RTqRjGxes=
github.com/rancher/types v0.0.0-20190822170951-b99efa820bc3 h1:4mz/J0iEtW/VDtjN3zI9B4g49MKeoLHkHZKJGqRN7xg=
github.com/rancher/types v0.0.0-20190822170951-b99efa820bc3/go.mod h1:9L7VLTwNVt7vJYwP/7xrQ4tWghDQ+zl9//RTqRjGxes=
github.com/rancher/wrangler v0.1.5 h1:HiXOeP6Kci2DK+e04D1g6INT77xAYpAr54zmTTe0Spk= github.com/rancher/wrangler v0.1.5 h1:HiXOeP6Kci2DK+e04D1g6INT77xAYpAr54zmTTe0Spk=
github.com/rancher/wrangler v0.1.5/go.mod h1:EYP7cqpg42YqElaCm+U9ieSrGQKAXxUH5xsr+XGpWyE= github.com/rancher/wrangler v0.1.5/go.mod h1:EYP7cqpg42YqElaCm+U9ieSrGQKAXxUH5xsr+XGpWyE=
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=

View File

@ -1,6 +1,7 @@
package rke package rke
import ( import (
"bytes"
"encoding/json" "encoding/json"
"fmt" "fmt"
"github.com/blang/semver" "github.com/blang/semver"
@ -43,6 +44,7 @@ type Data struct {
var ( var (
DriverData Data DriverData Data
TemplateData map[string]map[string]string
m = image.Mirror m = image.Mirror
) )
@ -86,33 +88,38 @@ func validateDefaultPresent(versions map[string]string) {
} }
func validateTemplateMatch() { func validateTemplateMatch() {
TemplateData = map[string]map[string]string{}
for k8sVersion := range DriverData.K8sVersionRKESystemImages { for k8sVersion := range DriverData.K8sVersionRKESystemImages {
toMatch, err := semver.Make(k8sVersion[1:]) toMatch, err := semver.Make(strings.Split(k8sVersion[1:], "-rancher")[0])
if err != nil { if err != nil {
panic(fmt.Sprintf("k8sVersion not sem-ver %s %v", k8sVersion, err)) panic(fmt.Sprintf("k8sVersion not sem-ver %s %v", k8sVersion, err))
} }
TemplateData[k8sVersion] = map[string]string{}
for plugin, pluginData := range DriverData.K8sVersionedTemplates { for plugin, pluginData := range DriverData.K8sVersionedTemplates {
if plugin == templates.TemplateKeys { if plugin == templates.TemplateKeys {
continue continue
} }
matchedKey := ""
matchedRange := "" matchedRange := ""
for toTestRange := range pluginData { for toTestRange, key := range pluginData {
testRange, err := semver.ParseRange(toTestRange) testRange, err := semver.ParseRange(toTestRange)
if err != nil { if err != nil {
panic(fmt.Sprintf("range for %s not sem-ver %v %v", plugin, testRange, err)) panic(fmt.Sprintf("range for %s not sem-ver %v %v", plugin, testRange, err))
} }
if testRange(toMatch) { if testRange(toMatch) {
// only one range should be matched // only one range should be matched
if matchedRange != "" { if matchedKey != "" {
panic(fmt.Sprintf("k8sVersion %s for plugin %s passing range %s, conflict range matching with %s", panic(fmt.Sprintf("k8sVersion %s for plugin %s passing range %s, conflict range matching with %s",
k8sVersion, plugin, toTestRange, matchedRange)) k8sVersion, plugin, toTestRange, matchedRange))
} }
matchedKey = key
matchedRange = toTestRange matchedRange = toTestRange
} }
} }
if matchedRange == "" { if matchedKey == "" {
panic(fmt.Sprintf("no template found for k8sVersion %s plugin %s", k8sVersion, plugin)) panic(fmt.Sprintf("no template found for k8sVersion %s plugin %s", k8sVersion, plugin))
} }
TemplateData[k8sVersion][plugin] = fmt.Sprintf("range=%s key=%s", matchedRange, matchedKey)
} }
} }
} }
@ -122,6 +129,17 @@ func GenerateData() {
splitStr := strings.SplitN(os.Args[1], "=", 2) splitStr := strings.SplitN(os.Args[1], "=", 2)
if len(splitStr) == 2 { if len(splitStr) == 2 {
if splitStr[0] == "--write-data" && splitStr[1] == "true" { if splitStr[0] == "--write-data" && splitStr[1] == "true" {
buf := new(bytes.Buffer)
enc := json.NewEncoder(buf)
enc.SetEscapeHTML(false)
enc.SetIndent("", " ")
if err := enc.Encode(TemplateData); err != nil {
panic(fmt.Sprintf("error encoding template data %v", err))
}
fmt.Println(buf.String())
fmt.Println("generating data.json") fmt.Println("generating data.json")
//todo: zip file //todo: zip file
strData, _ := json.MarshalIndent(DriverData, "", " ") strData, _ := json.MarshalIndent(DriverData, "", " ")

View File

@ -1607,5 +1607,36 @@ func loadK8sRKESystemImages() map[string]v3.RKESystemImages {
IngressBackend: m("k8s.gcr.io/defaultbackend:1.4"), IngressBackend: m("k8s.gcr.io/defaultbackend:1.4"),
MetricsServer: m("gcr.io/google_containers/metrics-server-amd64:v0.3.1"), MetricsServer: m("gcr.io/google_containers/metrics-server-amd64:v0.3.1"),
}, },
"v1.16.0-beta.1-rancher1-1": {
Etcd: m("quay.io/coreos/etcd:v3.3.10-rancher1"),
Kubernetes: m("rancher/hyperkube:v1.16.0-beta.1-rancher1"),
Alpine: m("rancher/rke-tools:v0.1.43"),
NginxProxy: m("rancher/rke-tools:v0.1.43"),
CertDownloader: m("rancher/rke-tools:v0.1.43"),
KubernetesServicesSidecar: m("rancher/rke-tools:v0.1.43"),
KubeDNS: m("gcr.io/google_containers/k8s-dns-kube-dns:1.15.0"),
DNSmasq: m("gcr.io/google_containers/k8s-dns-dnsmasq-nanny:1.15.0"),
KubeDNSSidecar: m("gcr.io/google_containers/k8s-dns-sidecar:1.15.0"),
KubeDNSAutoscaler: m("gcr.io/google_containers/cluster-proportional-autoscaler:1.3.0"),
Flannel: m("quay.io/coreos/flannel:v0.11.0-rancher1"),
FlannelCNI: m("rancher/flannel-cni:v0.3.0-rancher5"),
CalicoNode: m("quay.io/calico/node:v3.8.1"),
CalicoCNI: m("quay.io/calico/cni:v3.8.1"),
CalicoControllers: m("quay.io/calico/kube-controllers:v3.8.1"),
CalicoFlexVol: m("quay.io/calico/pod2daemon-flexvol:v3.8.1"),
CanalNode: m("quay.io/calico/node:v3.8.1"),
CanalCNI: m("quay.io/calico/cni:v3.8.1"),
CanalFlannel: m("quay.io/coreos/flannel:v0.11.0"),
CanalFlexVol: m("quay.io/calico/pod2daemon-flexvol:v3.8.1"),
WeaveNode: m("weaveworks/weave-kube:2.5.2"),
WeaveCNI: m("weaveworks/weave-npc:2.5.2"),
PodInfraContainer: m("gcr.io/google_containers/pause:3.1"),
Ingress: m("rancher/nginx-ingress-controller:0.21.0-rancher3"),
IngressBackend: m("k8s.gcr.io/defaultbackend:1.5-rancher1"),
MetricsServer: m("gcr.io/google_containers/metrics-server:v0.3.3"),
CoreDNS: m("coredns/coredns:1.3.1"),
CoreDNSAutoscaler: m("gcr.io/google_containers/cluster-proportional-autoscaler:1.3.0"),
WindowsPodInfraContainer: m("rancher/kubelet-pause:v0.1.3"),
},
} }
} }

View File

@ -13,7 +13,13 @@ const (
func loadK8sVersionServiceOptions() map[string]v3.KubernetesServicesOptions { func loadK8sVersionServiceOptions() map[string]v3.KubernetesServicesOptions {
return map[string]v3.KubernetesServicesOptions{ return map[string]v3.KubernetesServicesOptions{
"v1.16": {
KubeAPI: getKubeAPIOptions116(),
Kubelet: getKubeletOptions116(),
KubeController: getKubeControllerOptions(),
Kubeproxy: getKubeProxyOptions(),
Scheduler: getSchedulerOptions(),
},
"v1.15": { "v1.15": {
KubeAPI: getKubeAPIOptions115(), KubeAPI: getKubeAPIOptions115(),
Kubelet: getKubeletOptions115(), Kubelet: getKubeletOptions115(),
@ -107,6 +113,13 @@ func getKubeAPIOptions115() map[string]string {
return kubeAPIOptions return kubeAPIOptions
} }
func getKubeAPIOptions116() map[string]string {
kubeAPIOptions := getKubeAPIOptions114()
kubeAPIOptions["enable-admission-plugins"] = fmt.Sprintf("%s,%s", kubeAPIOptions["enable-admission-plugins"], "TaintNodesByCondition,PersistentVolumeClaimResize")
kubeAPIOptions["runtime-config"] = "authorization.k8s.io/v1beta1=true"
return kubeAPIOptions
}
// getKubeletOptions provides the root options for windows // getKubeletOptions provides the root options for windows
// note: please double-check on windows side if changing the following options // note: please double-check on windows side if changing the following options
func getKubeletOptions() map[string]string { func getKubeletOptions() map[string]string {
@ -139,6 +152,13 @@ func getKubeletOptions115() map[string]string {
return kubeletOptions return kubeletOptions
} }
func getKubeletOptions116() map[string]string {
kubeletOptions := getKubeletOptions()
kubeletOptions["authorization-mode"] = "Webhook"
delete(kubeletOptions, "allow-privileged")
return kubeletOptions
}
func getKubeControllerOptions() map[string]string { func getKubeControllerOptions() map[string]string {
return map[string]string{ return map[string]string{
"address": "0.0.0.0", "address": "0.0.0.0",

View File

@ -505,6 +505,11 @@ metadata:
spec: spec:
hostNetwork: true hostNetwork: true
restartPolicy: OnFailure restartPolicy: OnFailure
tolerations:
- effect: NoExecute
operator: Exists
- effect: NoSchedule
operator: Exists
containers: containers:
- name: calicoctl - name: calicoctl
image: {{.Calicoctl}} image: {{.Calicoctl}}
@ -1082,6 +1087,11 @@ metadata:
spec: spec:
hostNetwork: true hostNetwork: true
restartPolicy: OnFailure restartPolicy: OnFailure
tolerations:
- effect: NoExecute
operator: Exists
- effect: NoSchedule
operator: Exists
containers: containers:
- name: calicoctl - name: calicoctl
image: {{.Calicoctl}} image: {{.Calicoctl}}
@ -1805,11 +1815,14 @@ spec:
nodeSelector: nodeSelector:
beta.kubernetes.io/os: linux beta.kubernetes.io/os: linux
tolerations: tolerations:
# Make sure calico-node gets scheduled on all nodes.
- effect: NoSchedule
operator: Exists
# Mark the pod as a critical add-on for rescheduling. # Mark the pod as a critical add-on for rescheduling.
- key: CriticalAddonsOnly - key: CriticalAddonsOnly
operator: Exists operator: Exists
- key: node-role.kubernetes.io/master - effect: NoExecute
effect: NoSchedule operator: Exists
{{if eq .RBACConfig "rbac"}} {{if eq .RBACConfig "rbac"}}
serviceAccountName: calico-kube-controllers serviceAccountName: calico-kube-controllers
{{end}} {{end}}
@ -1834,3 +1847,787 @@ metadata:
name: calico-kube-controllers name: calico-kube-controllers
namespace: kube-system namespace: kube-system
` `
const CalicoTemplateV116 = `
{{if eq .RBACConfig "rbac"}}
# Source: calico/templates/rbac.yaml
# Include a clusterrole for the kube-controllers component,
# and bind it to the calico-kube-controllers serviceaccount.
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: calico-kube-controllers
rules:
# Nodes are watched to monitor for deletions.
- apiGroups: [""]
resources:
- nodes
verbs:
- watch
- list
- get
# Pods are queried to check for existence.
- apiGroups: [""]
resources:
- pods
verbs:
- get
# IPAM resources are manipulated when nodes are deleted.
- apiGroups: ["crd.projectcalico.org"]
resources:
- ippools
verbs:
- list
- apiGroups: ["crd.projectcalico.org"]
resources:
- blockaffinities
- ipamblocks
- ipamhandles
verbs:
- get
- list
- create
- update
- delete
# Needs access to update clusterinformations.
- apiGroups: ["crd.projectcalico.org"]
resources:
- clusterinformations
verbs:
- get
- create
- update
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: calico-kube-controllers
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: calico-kube-controllers
subjects:
- kind: ServiceAccount
name: calico-kube-controllers
namespace: kube-system
---
# Include a clusterrole for the calico-node DaemonSet,
# and bind it to the calico-node serviceaccount.
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: calico-node
rules:
# The CNI plugin needs to get pods, nodes, and namespaces.
- apiGroups: [""]
resources:
- pods
- nodes
- namespaces
verbs:
- get
- apiGroups: [""]
resources:
- endpoints
- services
verbs:
# Used to discover service IPs for advertisement.
- watch
- list
# Used to discover Typhas.
- get
- apiGroups: [""]
resources:
- nodes/status
verbs:
# Needed for clearing NodeNetworkUnavailable flag.
- patch
# Calico stores some configuration information in node annotations.
- update
# Watch for changes to Kubernetes NetworkPolicies.
- apiGroups: ["networking.k8s.io"]
resources:
- networkpolicies
verbs:
- watch
- list
# Used by Calico for policy information.
- apiGroups: [""]
resources:
- pods
- namespaces
- serviceaccounts
verbs:
- list
- watch
# The CNI plugin patches pods/status.
- apiGroups: [""]
resources:
- pods/status
verbs:
- patch
# Calico monitors various CRDs for config.
- apiGroups: ["crd.projectcalico.org"]
resources:
- globalfelixconfigs
- felixconfigurations
- bgppeers
- globalbgpconfigs
- bgpconfigurations
- ippools
- ipamblocks
- globalnetworkpolicies
- globalnetworksets
- networkpolicies
- networksets
- clusterinformations
- hostendpoints
verbs:
- get
- list
- watch
# Calico must create and update some CRDs on startup.
- apiGroups: ["crd.projectcalico.org"]
resources:
- ippools
- felixconfigurations
- clusterinformations
verbs:
- create
- update
# Calico stores some configuration information on the node.
- apiGroups: [""]
resources:
- nodes
verbs:
- get
- list
- watch
# These permissions are only requried for upgrade from v2.6, and can
# be removed after upgrade or on fresh installations.
- apiGroups: ["crd.projectcalico.org"]
resources:
- bgpconfigurations
- bgppeers
verbs:
- create
- update
# These permissions are required for Calico CNI to perform IPAM allocations.
- apiGroups: ["crd.projectcalico.org"]
resources:
- blockaffinities
- ipamblocks
- ipamhandles
verbs:
- get
- list
- create
- update
- delete
- apiGroups: ["crd.projectcalico.org"]
resources:
- ipamconfigs
verbs:
- get
# Block affinities must also be watchable by confd for route aggregation.
- apiGroups: ["crd.projectcalico.org"]
resources:
- blockaffinities
verbs:
- watch
# The Calico IPAM migration needs to get daemonsets. These permissions can be
# removed if not upgrading from an installation using host-local IPAM.
- apiGroups: ["apps"]
resources:
- daemonsets
verbs:
- get
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: calico-node
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: calico-node
subjects:
- kind: ServiceAccount
name: calico-node
namespace: kube-system
- apiGroup: rbac.authorization.k8s.io
kind: Group
name: system:nodes
{{end}}
---
# Source: calico/templates/calico-config.yaml
# This ConfigMap is used to configure a self-hosted Calico installation.
kind: ConfigMap
apiVersion: v1
metadata:
name: calico-config
namespace: kube-system
data:
# Typha is disabled.
typha_service_name: "none"
# Configure the backend to use.
calico_backend: "bird"
# Configure the MTU to use
veth_mtu: "1440"
# The CNI network configuration to install on each node. The special
# values in this config will be automatically populated.
cni_network_config: |-
{
"name": "k8s-pod-network",
"cniVersion": "0.3.1",
"plugins": [
{
"type": "calico",
"log_level": "info",
"datastore_type": "kubernetes",
"nodename": "__KUBERNETES_NODE_NAME__",
"mtu": __CNI_MTU__,
"ipam": {
"type": "calico-ipam"
},
"policy": {
"type": "k8s"
},
"kubernetes": {
"kubeconfig": "{{.KubeCfg}}"
}
},
{
"type": "portmap",
"snat": true,
"capabilities": {"portMappings": true}
}
]
}
---
---
# Source: calico/templates/kdd-crds.yaml
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: felixconfigurations.crd.projectcalico.org
spec:
scope: Cluster
group: crd.projectcalico.org
version: v1
names:
kind: FelixConfiguration
plural: felixconfigurations
singular: felixconfiguration
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: ipamblocks.crd.projectcalico.org
spec:
scope: Cluster
group: crd.projectcalico.org
version: v1
names:
kind: IPAMBlock
plural: ipamblocks
singular: ipamblock
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: blockaffinities.crd.projectcalico.org
spec:
scope: Cluster
group: crd.projectcalico.org
version: v1
names:
kind: BlockAffinity
plural: blockaffinities
singular: blockaffinity
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: ipamhandles.crd.projectcalico.org
spec:
scope: Cluster
group: crd.projectcalico.org
version: v1
names:
kind: IPAMHandle
plural: ipamhandles
singular: ipamhandle
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: ipamconfigs.crd.projectcalico.org
spec:
scope: Cluster
group: crd.projectcalico.org
version: v1
names:
kind: IPAMConfig
plural: ipamconfigs
singular: ipamconfig
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: bgppeers.crd.projectcalico.org
spec:
scope: Cluster
group: crd.projectcalico.org
version: v1
names:
kind: BGPPeer
plural: bgppeers
singular: bgppeer
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: bgpconfigurations.crd.projectcalico.org
spec:
scope: Cluster
group: crd.projectcalico.org
version: v1
names:
kind: BGPConfiguration
plural: bgpconfigurations
singular: bgpconfiguration
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: ippools.crd.projectcalico.org
spec:
scope: Cluster
group: crd.projectcalico.org
version: v1
names:
kind: IPPool
plural: ippools
singular: ippool
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: hostendpoints.crd.projectcalico.org
spec:
scope: Cluster
group: crd.projectcalico.org
version: v1
names:
kind: HostEndpoint
plural: hostendpoints
singular: hostendpoint
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: clusterinformations.crd.projectcalico.org
spec:
scope: Cluster
group: crd.projectcalico.org
version: v1
names:
kind: ClusterInformation
plural: clusterinformations
singular: clusterinformation
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: globalnetworkpolicies.crd.projectcalico.org
spec:
scope: Cluster
group: crd.projectcalico.org
version: v1
names:
kind: GlobalNetworkPolicy
plural: globalnetworkpolicies
singular: globalnetworkpolicy
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: globalnetworksets.crd.projectcalico.org
spec:
scope: Cluster
group: crd.projectcalico.org
version: v1
names:
kind: GlobalNetworkSet
plural: globalnetworksets
singular: globalnetworkset
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: networkpolicies.crd.projectcalico.org
spec:
scope: Namespaced
group: crd.projectcalico.org
version: v1
names:
kind: NetworkPolicy
plural: networkpolicies
singular: networkpolicy
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: networksets.crd.projectcalico.org
spec:
scope: Namespaced
group: crd.projectcalico.org
version: v1
names:
kind: NetworkSet
plural: networksets
singular: networkset
---
---
# Source: calico/templates/calico-node.yaml
# This manifest installs the calico-node container, as well
# as the CNI plugins and network config on
# each master and worker node in a Kubernetes cluster.
kind: DaemonSet
apiVersion: apps/v1
metadata:
name: calico-node
namespace: kube-system
labels:
k8s-app: calico-node
spec:
selector:
matchLabels:
k8s-app: calico-node
updateStrategy:
type: RollingUpdate
rollingUpdate:
maxUnavailable: 1
template:
metadata:
labels:
k8s-app: calico-node
annotations:
# This, along with the CriticalAddonsOnly toleration below,
# marks the pod as a critical add-on, ensuring it gets
# priority scheduling and that its resources are reserved
# if it ever gets evicted.
scheduler.alpha.kubernetes.io/critical-pod: ''
spec:
nodeSelector:
beta.kubernetes.io/os: linux
hostNetwork: true
tolerations:
# Make sure calico-node gets scheduled on all nodes.
- effect: NoSchedule
operator: Exists
# Mark the pod as a critical add-on for rescheduling.
- key: CriticalAddonsOnly
operator: Exists
- effect: NoExecute
operator: Exists
{{if eq .RBACConfig "rbac"}}
serviceAccountName: calico-node
{{end}}
# Minimize downtime during a rolling upgrade or deletion; tell Kubernetes to do a "force
# deletion": https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods.
terminationGracePeriodSeconds: 0
priorityClassName: system-node-critical
initContainers:
# This container performs upgrade from host-local IPAM to calico-ipam.
# It can be deleted if this is a fresh installation, or if you have already
# upgraded to use calico-ipam.
- name: upgrade-ipam
image: {{.CNIImage}}
command: ["/opt/cni/bin/calico-ipam", "-upgrade"]
env:
- name: KUBERNETES_NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: CALICO_NETWORKING_BACKEND
valueFrom:
configMapKeyRef:
name: calico-config
key: calico_backend
volumeMounts:
- mountPath: /var/lib/cni/networks
name: host-local-net-dir
- mountPath: /host/opt/cni/bin
name: cni-bin-dir
# This container installs the CNI binaries
# and CNI network config file on each node.
- name: install-cni
image: {{.CNIImage}}
command: ["/install-cni.sh"]
env:
# Name of the CNI config file to create.
- name: CNI_CONF_NAME
value: "10-calico.conflist"
# The CNI network config to install on each node.
- name: CNI_NETWORK_CONFIG
valueFrom:
configMapKeyRef:
name: calico-config
key: cni_network_config
# Set the hostname based on the k8s node name.
- name: KUBERNETES_NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
# CNI MTU Config variable
- name: CNI_MTU
valueFrom:
configMapKeyRef:
name: calico-config
key: veth_mtu
# Prevents the container from sleeping forever.
- name: SLEEP
value: "false"
volumeMounts:
- mountPath: /host/opt/cni/bin
name: cni-bin-dir
- mountPath: /host/etc/cni/net.d
name: cni-net-dir
# Adds a Flex Volume Driver that creates a per-pod Unix Domain Socket to allow Dikastes
# to communicate with Felix over the Policy Sync API.
- name: flexvol-driver
image: {{.FlexVolImg}}
volumeMounts:
- name: flexvol-driver-host
mountPath: /host/driver
containers:
# Runs calico-node container on each Kubernetes node. This
# container programs network policy and routes on each
# host.
- name: calico-node
image: {{.NodeImage}}
env:
# Use Kubernetes API as the backing datastore.
- name: DATASTORE_TYPE
value: "kubernetes"
# Wait for the datastore.
- name: WAIT_FOR_DATASTORE
value: "true"
# Set based on the k8s node name.
- name: NODENAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
# Choose the backend to use.
- name: CALICO_NETWORKING_BACKEND
valueFrom:
configMapKeyRef:
name: calico-config
key: calico_backend
# Cluster type to identify the deployment type
- name: CLUSTER_TYPE
value: "k8s,bgp"
# Auto-detect the BGP IP address.
- name: IP
value: "autodetect"
# Enable IPIP
- name: CALICO_IPV4POOL_IPIP
value: "Always"
# Set MTU for tunnel device used if ipip is enabled
- name: FELIX_IPINIPMTU
valueFrom:
configMapKeyRef:
name: calico-config
key: veth_mtu
# The default IPv4 pool to create on startup if none exists. Pod IPs will be
# chosen from this range. Changing this value after installation will have
# no effect. This should fall within --cluster-cidr.
- name: CALICO_IPV4POOL_CIDR
value: "{{.ClusterCIDR}}"
# Disable file logging so kubectl logs works.
- name: CALICO_DISABLE_FILE_LOGGING
value: "true"
# Set Felix endpoint to host default action to ACCEPT.
- name: FELIX_DEFAULTENDPOINTTOHOSTACTION
value: "ACCEPT"
# Disable IPv6 on Kubernetes.
- name: FELIX_IPV6SUPPORT
value: "false"
# Set Felix logging to "info"
- name: FELIX_LOGSEVERITYSCREEN
value: "info"
- name: FELIX_HEALTHENABLED
value: "true"
securityContext:
privileged: true
resources:
requests:
cpu: 250m
livenessProbe:
httpGet:
path: /liveness
port: 9099
host: localhost
periodSeconds: 10
initialDelaySeconds: 10
failureThreshold: 6
readinessProbe:
exec:
command:
- /bin/calico-node
- -bird-ready
- -felix-ready
periodSeconds: 10
volumeMounts:
- mountPath: /lib/modules
name: lib-modules
readOnly: true
- mountPath: /run/xtables.lock
name: xtables-lock
readOnly: false
- mountPath: /var/run/calico
name: var-run-calico
readOnly: false
- mountPath: /var/lib/calico
name: var-lib-calico
readOnly: false
- name: policysync
mountPath: /var/run/nodeagent
volumes:
# Used by calico-node.
- name: lib-modules
hostPath:
path: /lib/modules
- name: var-run-calico
hostPath:
path: /var/run/calico
- name: var-lib-calico
hostPath:
path: /var/lib/calico
- name: xtables-lock
hostPath:
path: /run/xtables.lock
type: FileOrCreate
# Used to install CNI.
- name: cni-bin-dir
hostPath:
path: /opt/cni/bin
- name: cni-net-dir
hostPath:
path: /etc/cni/net.d
# Mount in the directory for host-local IPAM allocations. This is
# used when upgrading from host-local to calico-ipam, and can be removed
# if not using the upgrade-ipam init container.
- name: host-local-net-dir
hostPath:
path: /var/lib/cni/networks
# Used to create per-pod Unix Domain Sockets
- name: policysync
hostPath:
type: DirectoryOrCreate
path: /var/run/nodeagent
# Used to install Flex Volume Driver
- name: flexvol-driver-host
hostPath:
type: DirectoryOrCreate
path: /usr/libexec/kubernetes/kubelet-plugins/volume/exec/nodeagent~uds
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: calico-kube-controllers
namespace: kube-system
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: calico-node
namespace: kube-system
---
# Source: calico/templates/calico-kube-controllers.yaml
# See https://github.com/projectcalico/kube-controllers
apiVersion: apps/v1
kind: Deployment
metadata:
name: calico-kube-controllers
namespace: kube-system
labels:
k8s-app: calico-kube-controllers
spec:
# The controllers can only have a single active instance.
replicas: 1
selector:
matchLabels:
k8s-app: calico-kube-controllers
strategy:
type: Recreate
template:
metadata:
name: calico-kube-controllers
namespace: kube-system
labels:
k8s-app: calico-kube-controllers
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
spec:
nodeSelector:
beta.kubernetes.io/os: linux
tolerations:
# Make sure calico-node gets scheduled on all nodes.
- effect: NoSchedule
operator: Exists
# Mark the pod as a critical add-on for rescheduling.
- key: CriticalAddonsOnly
operator: Exists
- effect: NoExecute
operator: Exists
{{if eq .RBACConfig "rbac"}}
serviceAccountName: calico-kube-controllers
{{end}}
priorityClassName: system-cluster-critical
containers:
- name: calico-kube-controllers
image: {{.ControllersImage}}
env:
# Choose which controllers to run.
- name: ENABLED_CONTROLLERS
value: node
- name: DATASTORE_TYPE
value: kubernetes
readinessProbe:
exec:
command:
- /usr/bin/check-status
- -r
`

View File

@ -1755,3 +1755,618 @@ spec:
plural: networkpolicies plural: networkpolicies
singular: networkpolicy singular: networkpolicy
` `
const CanalTemplateV116 = `
---
# Source: calico/templates/calico-config.yaml
# This ConfigMap is used to configure a self-hosted Canal installation.
kind: ConfigMap
apiVersion: v1
metadata:
name: canal-config
namespace: kube-system
data:
# Typha is disabled.
typha_service_name: "none"
# The interface used by canal for host <-> host communication.
# If left blank, then the interface is chosen using the node's
# default route.
canal_iface: "{{.CanalInterface}}"
# Whether or not to masquerade traffic to destinations not within
# the pod network.
masquerade: "true"
# The CNI network configuration to install on each node. The special
# values in this config will be automatically populated.
cni_network_config: |-
{
"name": "k8s-pod-network",
"cniVersion": "0.3.1",
"plugins": [
{
"type": "calico",
"log_level": "WARNING",
"datastore_type": "kubernetes",
"nodename": "__KUBERNETES_NODE_NAME__",
"ipam": {
"type": "host-local",
"subnet": "usePodCidr"
},
"policy": {
"type": "k8s",
"k8s_auth_token": "__SERVICEACCOUNT_TOKEN__"
},
"kubernetes": {
"kubeconfig": "{{.KubeCfg}}"
}
},
{
"type": "portmap",
"snat": true,
"capabilities": {"portMappings": true}
}
]
}
# Flannel network configuration. Mounted into the flannel container.
net-conf.json: |
{
"Network": "{{.ClusterCIDR}}",
"Backend": {
"Type": "{{.FlannelBackend.Type}}"
}
}
---
# Source: calico/templates/kdd-crds.yaml
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: felixconfigurations.crd.projectcalico.org
spec:
scope: Cluster
group: crd.projectcalico.org
version: v1
names:
kind: FelixConfiguration
plural: felixconfigurations
singular: felixconfiguration
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: bgpconfigurations.crd.projectcalico.org
spec:
scope: Cluster
group: crd.projectcalico.org
version: v1
names:
kind: BGPConfiguration
plural: bgpconfigurations
singular: bgpconfiguration
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: ippools.crd.projectcalico.org
spec:
scope: Cluster
group: crd.projectcalico.org
version: v1
names:
kind: IPPool
plural: ippools
singular: ippool
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: hostendpoints.crd.projectcalico.org
spec:
scope: Cluster
group: crd.projectcalico.org
version: v1
names:
kind: HostEndpoint
plural: hostendpoints
singular: hostendpoint
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: clusterinformations.crd.projectcalico.org
spec:
scope: Cluster
group: crd.projectcalico.org
version: v1
names:
kind: ClusterInformation
plural: clusterinformations
singular: clusterinformation
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: globalnetworkpolicies.crd.projectcalico.org
spec:
scope: Cluster
group: crd.projectcalico.org
version: v1
names:
kind: GlobalNetworkPolicy
plural: globalnetworkpolicies
singular: globalnetworkpolicy
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: globalnetworksets.crd.projectcalico.org
spec:
scope: Cluster
group: crd.projectcalico.org
version: v1
names:
kind: GlobalNetworkSet
plural: globalnetworksets
singular: globalnetworkset
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: networkpolicies.crd.projectcalico.org
spec:
scope: Namespaced
group: crd.projectcalico.org
version: v1
names:
kind: NetworkPolicy
plural: networkpolicies
singular: networkpolicy
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: networksets.crd.projectcalico.org
spec:
scope: Namespaced
group: crd.projectcalico.org
version: v1
names:
kind: NetworkSet
plural: networksets
singular: networkset
{{if eq .RBACConfig "rbac"}}
---
# Source: calico/templates/rbac.yaml
# Include a clusterrole for the calico-node DaemonSet,
# and bind it to the calico-node serviceaccount.
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: calico-node
rules:
# The CNI plugin needs to get pods, nodes, and namespaces.
- apiGroups: [""]
resources:
- pods
- nodes
- namespaces
verbs:
- get
- apiGroups: [""]
resources:
- endpoints
- services
verbs:
# Used to discover service IPs for advertisement.
- watch
- list
# Used to discover Typhas.
- get
- apiGroups: [""]
resources:
- nodes/status
verbs:
# Needed for clearing NodeNetworkUnavailable flag.
- patch
# Calico stores some configuration information in node annotations.
- update
# Watch for changes to Kubernetes NetworkPolicies.
- apiGroups: ["networking.k8s.io"]
resources:
- networkpolicies
verbs:
- watch
- list
# Used by Calico for policy information.
- apiGroups: [""]
resources:
- pods
- namespaces
- serviceaccounts
verbs:
- list
- watch
# The CNI plugin patches pods/status.
- apiGroups: [""]
resources:
- pods/status
verbs:
- patch
# Calico monitors various CRDs for config.
- apiGroups: ["crd.projectcalico.org"]
resources:
- globalfelixconfigs
- felixconfigurations
- bgppeers
- globalbgpconfigs
- bgpconfigurations
- ippools
- ipamblocks
- globalnetworkpolicies
- globalnetworksets
- networkpolicies
- networksets
- clusterinformations
- hostendpoints
verbs:
- get
- list
- watch
# Calico must create and update some CRDs on startup.
- apiGroups: ["crd.projectcalico.org"]
resources:
- ippools
- felixconfigurations
- clusterinformations
verbs:
- create
- update
# Calico stores some configuration information on the node.
- apiGroups: [""]
resources:
- nodes
verbs:
- get
- list
- watch
# These permissions are only requried for upgrade from v2.6, and can
# be removed after upgrade or on fresh installations.
- apiGroups: ["crd.projectcalico.org"]
resources:
- bgpconfigurations
- bgppeers
verbs:
- create
- update
---
# Flannel ClusterRole
# Pulled from https://github.com/coreos/flannel/blob/master/Documentation/kube-flannel-rbac.yml
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: flannel
rules:
- apiGroups: [""]
resources:
- pods
verbs:
- get
- apiGroups: [""]
resources:
- nodes
verbs:
- list
- watch
- apiGroups: [""]
resources:
- nodes/status
verbs:
- patch
---
# Bind the flannel ClusterRole to the canal ServiceAccount.
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: flannel
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: flannel
subjects:
- kind: ServiceAccount
name: canal
namespace: kube-system
- apiGroup: rbac.authorization.k8s.io
kind: Group
name: system:nodes
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: calico-node
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: calico-node
subjects:
- kind: ServiceAccount
name: canal
namespace: kube-system
- apiGroup: rbac.authorization.k8s.io
kind: Group
name: system:nodes
{{end}}
---
# Source: calico/templates/calico-node.yaml
# This manifest installs the canal container, as well
# as the CNI plugins and network config on
# each master and worker node in a Kubernetes cluster.
kind: DaemonSet
apiVersion: apps/v1
metadata:
name: canal
namespace: kube-system
labels:
k8s-app: canal
spec:
selector:
matchLabels:
k8s-app: canal
updateStrategy:
type: RollingUpdate
rollingUpdate:
maxUnavailable: 1
template:
metadata:
labels:
k8s-app: canal
annotations:
# This, along with the CriticalAddonsOnly toleration below,
# marks the pod as a critical add-on, ensuring it gets
# priority scheduling and that its resources are reserved
# if it ever gets evicted.
scheduler.alpha.kubernetes.io/critical-pod: ''
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: beta.kubernetes.io/os
operator: NotIn
values:
- windows
hostNetwork: true
tolerations:
# Tolerate this effect so the pods will be schedulable at all times
- effect: NoSchedule
operator: Exists
# Mark the pod as a critical add-on for rescheduling.
- key: CriticalAddonsOnly
operator: Exists
- effect: NoExecute
operator: Exists
- key: "node-role.kubernetes.io/controlplane"
operator: "Exists"
effect: "NoSchedule"
- key: "node-role.kubernetes.io/etcd"
operator: "Exists"
effect: "NoExecute"
{{if eq .RBACConfig "rbac"}}
serviceAccountName: canal
{{end}}
# Minimize downtime during a rolling upgrade or deletion; tell Kubernetes to do a "force
# deletion": https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods.
terminationGracePeriodSeconds: 0
priorityClassName: system-node-critical
initContainers:
# This container installs the CNI binaries
# and CNI network config file on each node.
- name: install-cni
image: {{.CNIImage}}
command: ["/install-cni.sh"]
env:
# Name of the CNI config file to create.
- name: CNI_CONF_NAME
value: "10-canal.conflist"
# The CNI network config to install on each node.
- name: CNI_NETWORK_CONFIG
valueFrom:
configMapKeyRef:
name: canal-config
key: cni_network_config
# Set the hostname based on the k8s node name.
- name: KUBERNETES_NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
# Prevents the container from sleeping forever.
- name: SLEEP
value: "false"
volumeMounts:
- mountPath: /host/opt/cni/bin
name: cni-bin-dir
- mountPath: /host/etc/cni/net.d
name: cni-net-dir
# Adds a Flex Volume Driver that creates a per-pod Unix Domain Socket to allow Dikastes
# to communicate with Felix over the Policy Sync API.
- name: flexvol-driver
image: {{.FlexVolImg}}
volumeMounts:
- name: flexvol-driver-host
mountPath: /host/driver
containers:
# Runs canal container on each Kubernetes node. This
# container programs network policy and routes on each
# host.
- name: calico-node
image: {{.NodeImage}}
env:
# Use Kubernetes API as the backing datastore.
- name: DATASTORE_TYPE
value: "kubernetes"
# Configure route aggregation based on pod CIDR.
- name: USE_POD_CIDR
value: "true"
# Wait for the datastore.
- name: WAIT_FOR_DATASTORE
value: "true"
# Set based on the k8s node name.
- name: NODENAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
# Don't enable BGP.
- name: CALICO_NETWORKING_BACKEND
value: "none"
# Cluster type to identify the deployment type
- name: CLUSTER_TYPE
value: "k8s,canal"
# Period, in seconds, at which felix re-applies all iptables state
- name: FELIX_IPTABLESREFRESHINTERVAL
value: "60"
# No IP address needed.
- name: IP
value: ""
- name: CALICO_IPV4POOL_CIDR
value: "192.168.0.0/16"
- name: CALICO_DISABLE_FILE_LOGGING
value: "true"
# Set Felix endpoint to host default action to ACCEPT.
- name: FELIX_DEFAULTENDPOINTTOHOSTACTION
value: "ACCEPT"
# Disable IPv6 on Kubernetes.
- name: FELIX_IPV6SUPPORT
value: "false"
# Set Felix logging to "info"
- name: FELIX_LOGSEVERITYSCREEN
value: "info"
- name: FELIX_HEALTHENABLED
value: "true"
securityContext:
privileged: true
resources:
requests:
cpu: 250m
livenessProbe:
httpGet:
path: /liveness
port: 9099
host: localhost
periodSeconds: 10
initialDelaySeconds: 10
failureThreshold: 6
readinessProbe:
httpGet:
path: /readiness
port: 9099
host: localhost
periodSeconds: 10
volumeMounts:
- mountPath: /lib/modules
name: lib-modules
readOnly: true
- mountPath: /run/xtables.lock
name: xtables-lock
readOnly: false
- mountPath: /var/run/calico
name: var-run-calico
readOnly: false
- mountPath: /var/lib/calico
name: var-lib-calico
readOnly: false
- name: policysync
mountPath: /var/run/nodeagent
# This container runs flannel using the kube-subnet-mgr backend
# for allocating subnets.
- name: kube-flannel
image: {{.CanalFlannelImg}}
command: [ "/opt/bin/flanneld", "--ip-masq", "--kube-subnet-mgr" ]
securityContext:
privileged: true
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: FLANNELD_IFACE
valueFrom:
configMapKeyRef:
name: canal-config
key: canal_iface
- name: FLANNELD_IP_MASQ
valueFrom:
configMapKeyRef:
name: canal-config
key: masquerade
volumeMounts:
- mountPath: /run/xtables.lock
name: xtables-lock
readOnly: false
- name: flannel-cfg
mountPath: /etc/kube-flannel/
volumes:
# Used by canal.
- name: lib-modules
hostPath:
path: /lib/modules
- name: var-run-calico
hostPath:
path: /var/run/calico
- name: var-lib-calico
hostPath:
path: /var/lib/calico
- name: xtables-lock
hostPath:
path: /run/xtables.lock
type: FileOrCreate
# Used by flannel.
- name: flannel-cfg
configMap:
name: canal-config
# Used to install CNI.
- name: cni-bin-dir
hostPath:
path: /opt/cni/bin
- name: cni-net-dir
hostPath:
path: /etc/cni/net.d
# Used to create per-pod Unix Domain Sockets
- name: policysync
hostPath:
type: DirectoryOrCreate
path: /var/run/nodeagent
# Used to install Flex Volume Driver
- name: flexvol-driver-host
hostPath:
type: DirectoryOrCreate
path: /usr/libexec/kubernetes/kubelet-plugins/volume/exec/nodeagent~uds
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: canal
namespace: kube-system
`

View File

@ -119,6 +119,10 @@ spec:
tolerations: tolerations:
- key: "CriticalAddonsOnly" - key: "CriticalAddonsOnly"
operator: "Exists" operator: "Exists"
- effect: NoExecute
operator: Exists
- effect: NoSchedule
operator: Exists
nodeSelector: nodeSelector:
beta.kubernetes.io/os: linux beta.kubernetes.io/os: linux
{{ range $k, $v := .NodeSelector }} {{ range $k, $v := .NodeSelector }}
@ -229,6 +233,11 @@ spec:
{{- end }} {{- end }}
nodeSelector: nodeSelector:
beta.kubernetes.io/os: linux beta.kubernetes.io/os: linux
tolerations:
- effect: NoExecute
operator: Exists
- effect: NoSchedule
operator: Exists
containers: containers:
- name: autoscaler - name: autoscaler
image: {{.CoreDNSAutoScalerImage}} image: {{.CoreDNSAutoScalerImage}}

View File

@ -440,3 +440,247 @@ spec:
maxUnavailable: 20% maxUnavailable: 20%
type: RollingUpdate type: RollingUpdate
` `
const FlannelTemplateV116 = `
{{- if eq .RBACConfig "rbac"}}
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: flannel
rules:
- apiGroups: ['extensions']
resources: ['podsecuritypolicies']
verbs: ['use']
resourceNames: ['psp.flannel.unprivileged']
- apiGroups:
- ""
resources:
- pods
verbs:
- get
- apiGroups:
- ""
resources:
- nodes
verbs:
- list
- watch
- apiGroups:
- ""
resources:
- nodes/status
verbs:
- patch
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: flannel
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: flannel
subjects:
- kind: ServiceAccount
name: flannel
namespace: kube-system
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: flannel
namespace: kube-system
{{end}}
---
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
name: psp.flannel.unprivileged
annotations:
seccomp.security.alpha.kubernetes.io/allowedProfileNames: docker/default
seccomp.security.alpha.kubernetes.io/defaultProfileName: docker/default
apparmor.security.beta.kubernetes.io/allowedProfileNames: runtime/default
apparmor.security.beta.kubernetes.io/defaultProfileName: runtime/default
spec:
privileged: false
volumes:
- configMap
- secret
- emptyDir
- hostPath
allowedHostPaths:
- pathPrefix: "/etc/cni/net.d"
- pathPrefix: "/etc/kube-flannel"
- pathPrefix: "/run/flannel"
readOnlyRootFilesystem: false
# Users and groups
runAsUser:
rule: RunAsAny
supplementalGroups:
rule: RunAsAny
fsGroup:
rule: RunAsAny
# Privilege Escalation
allowPrivilegeEscalation: false
defaultAllowPrivilegeEscalation: false
# Capabilities
allowedCapabilities: ['NET_ADMIN']
defaultAddCapabilities: []
requiredDropCapabilities: []
# Host namespaces
hostPID: false
hostIPC: false
hostNetwork: true
hostPorts:
- min: 0
max: 65535
# SELinux
seLinux:
# SELinux is unsed in CaaSP
rule: 'RunAsAny'
---
kind: ConfigMap
apiVersion: v1
metadata:
name: kube-flannel-cfg
namespace: kube-system
labels:
tier: node
app: flannel
data:
cni-conf.json: |
{
"name": "cbr0",
"plugins": [
{
"type": "flannel",
"delegate": {
"hairpinMode": true,
"isDefaultGateway": true
}
},
{
"type": "portmap",
"capabilities": {
"portMappings": true
}
}
]
}
net-conf.json: |
{
"Network": "{{.ClusterCIDR}}",
"Backend": {
"Type": "{{.FlannelBackend.Type}}",
"VNI": {{.FlannelBackend.VNI}},
"Port": {{.FlannelBackend.Port}}
}
}
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: kube-flannel
namespace: kube-system
labels:
tier: node
k8s-app: flannel
spec:
selector:
matchLabels:
k8s-app: flannel
template:
metadata:
labels:
tier: node
k8s-app: flannel
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: beta.kubernetes.io/os
operator: NotIn
values:
- windows
hostNetwork: true
tolerations:
- operator: Exists
{{- if eq .RBACConfig "rbac"}}
serviceAccountName: flannel
{{end}}
containers:
- name: kube-flannel
image: {{.Image}}
command:
- /opt/bin/flanneld
args:
- --ip-masq
- --kube-subnet-mgr
{{- if .FlannelInterface}}
- --iface={{.FlannelInterface}}
{{end}}
resources:
requests:
cpu: "100m"
memory: "50Mi"
limits:
cpu: "100m"
memory: "50Mi"
securityContext:
privileged: false
capabilities:
add: ["NET_ADMIN"]
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
volumeMounts:
- name: run
mountPath: /run
- name: cni
mountPath: /etc/cni/net.d
- name: flannel-cfg
mountPath: /etc/kube-flannel/
- name: install-cni
image: {{.CNIImage}}
command: ["/install-cni.sh"]
env:
# The CNI network config to install on each node.
- name: CNI_NETWORK_CONFIG
valueFrom:
configMapKeyRef:
name: kube-flannel-cfg
key: cni-conf.json
- name: CNI_CONF_NAME
value: "10-flannel.conflist"
volumeMounts:
- name: cni
mountPath: /host/etc/cni/net.d
- name: host-cni-bin
mountPath: /host/opt/cni/bin/
volumes:
- name: run
hostPath:
path: /run
- name: cni
hostPath:
path: /etc/cni/net.d
- name: flannel-cfg
configMap:
name: kube-flannel-cfg
- name: host-cni-bin
hostPath:
path: /opt/cni/bin
updateStrategy:
rollingUpdate:
maxUnavailable: 20%
type: RollingUpdate
`

View File

@ -25,6 +25,11 @@ spec:
values: values:
- windows - windows
serviceAccountName: kube-dns-autoscaler serviceAccountName: kube-dns-autoscaler
tolerations:
- effect: NoExecute
operator: Exists
- effect: NoSchedule
operator: Exists
containers: containers:
- name: autoscaler - name: autoscaler
image: {{.KubeDNSAutoScalerImage}} image: {{.KubeDNSAutoScalerImage}}
@ -148,6 +153,10 @@ spec:
tolerations: tolerations:
- key: "CriticalAddonsOnly" - key: "CriticalAddonsOnly"
operator: "Exists" operator: "Exists"
- effect: NoExecute
operator: Exists
- effect: NoSchedule
operator: Exists
volumes: volumes:
- name: kube-dns-config - name: kube-dns-config
configMap: configMap:

View File

@ -89,7 +89,7 @@ metadata:
name: metrics-server name: metrics-server
namespace: kube-system namespace: kube-system
--- ---
apiVersion: extensions/v1beta1 apiVersion: apps/v1
kind: Deployment kind: Deployment
metadata: metadata:
name: metrics-server name: metrics-server
@ -116,6 +116,11 @@ spec:
values: values:
- windows - windows
serviceAccountName: metrics-server serviceAccountName: metrics-server
tolerations:
- effect: NoExecute
operator: Exists
- effect: NoSchedule
operator: Exists
containers: containers:
- name: metrics-server - name: metrics-server
image: {{ .MetricsServerImage }} image: {{ .MetricsServerImage }}

View File

@ -159,7 +159,7 @@ subjects:
namespace: ingress-nginx namespace: ingress-nginx
{{ end }} {{ end }}
--- ---
apiVersion: extensions/v1beta1 apiVersion: apps/v1
kind: DaemonSet kind: DaemonSet
metadata: metadata:
name: nginx-ingress-controller name: nginx-ingress-controller
@ -193,6 +193,11 @@ spec:
{{if eq .RBACConfig "rbac"}} {{if eq .RBACConfig "rbac"}}
serviceAccountName: nginx-ingress-serviceaccount serviceAccountName: nginx-ingress-serviceaccount
{{ end }} {{ end }}
tolerations:
- effect: NoExecute
operator: Exists
- effect: NoSchedule
operator: Exists
{{- if ne .AlpineImage ""}} {{- if ne .AlpineImage ""}}
initContainers: initContainers:
- command: - command:
@ -261,7 +266,7 @@ spec:
successThreshold: 1 successThreshold: 1
timeoutSeconds: 1 timeoutSeconds: 1
--- ---
apiVersion: extensions/v1beta1 apiVersion: apps/v1
kind: Deployment kind: Deployment
metadata: metadata:
name: default-http-backend name: default-http-backend
@ -270,6 +275,9 @@ metadata:
namespace: ingress-nginx namespace: ingress-nginx
spec: spec:
replicas: 1 replicas: 1
selector:
matchLabels:
app: default-http-backend
template: template:
metadata: metadata:
labels: labels:
@ -285,6 +293,11 @@ spec:
values: values:
- windows - windows
terminationGracePeriodSeconds: 60 terminationGracePeriodSeconds: 60
tolerations:
- effect: NoExecute
operator: Exists
- effect: NoSchedule
operator: Exists
containers: containers:
- name: default-http-backend - name: default-http-backend
# Any image is permissable as long as: # Any image is permissable as long as:

View File

@ -17,16 +17,19 @@ const (
NginxIngress = "nginxIngress" NginxIngress = "nginxIngress"
TemplateKeys = "templateKeys" TemplateKeys = "templateKeys"
calicov18 = "calico-v1.8" calicov18 = "calico-v1.8"
calicov113 = "calico-v1.13" calicov113 = "calico-v1.13"
calicov115 = "calico-v1.15" calicov115 = "calico-v1.15"
calicov116 = "calico-v1.16"
canalv115 = "canal-v1.15" canalv115 = "canal-v1.15"
canalv116 = "canal-v1.16"
canalv113 = "canal-v1.13" canalv113 = "canal-v1.13"
canalv18 = "canal-v1.8" canalv18 = "canal-v1.8"
flannelv115 = "flannel-v1.15" flannelv116 = "flannel-v1.16"
flannelv18 = "flannel-v1.8" flannelv115 = "flannel-v1.15"
flannelv18 = "flannel-v1.8"
coreDnsv18 = "coredns-v1.8" coreDnsv18 = "coredns-v1.8"
kubeDnsv18 = "kubedns-v1.8" kubeDnsv18 = "kubedns-v1.8"
@ -40,17 +43,20 @@ const (
func LoadK8sVersionedTemplates() map[string]map[string]string { func LoadK8sVersionedTemplates() map[string]map[string]string {
return map[string]map[string]string{ return map[string]map[string]string{
Calico: { Calico: {
">=1.15.0 <1.16.0": calicov115, ">=1.16.0-alpha": calicov116,
">=1.15.0 <1.16.0-alpha": calicov115,
">=1.13.0 <1.15.0": calicov113, ">=1.13.0 <1.15.0": calicov113,
">=1.8.0 <1.13.0": calicov18, ">=1.8.0 <1.13.0": calicov18,
}, },
Canal: { Canal: {
">=1.15.0 <1.16.0": canalv115, ">=1.16.0-alpha": canalv116,
">=1.15.0 <1.16.0-alpha": canalv115,
">=1.13.0 <1.15.0": canalv113, ">=1.13.0 <1.15.0": canalv113,
">=1.8.0 <1.13.0": canalv18, ">=1.8.0 <1.13.0": canalv18,
}, },
Flannel: { Flannel: {
">=1.15.0": flannelv115, ">=1.16.0-alpha": flannelv116,
">=1.15.0 <1.16.0-alpha": flannelv115,
">=1.8.0 <1.15.0": flannelv18, ">=1.8.0 <1.15.0": flannelv18,
}, },
CoreDNS: { CoreDNS: {
@ -76,14 +82,17 @@ func getTemplates() map[string]string {
return map[string]string{ return map[string]string{
calicov113: CalicoTemplateV113, calicov113: CalicoTemplateV113,
calicov115: CalicoTemplateV115, calicov115: CalicoTemplateV115,
calicov18: CalicoTemplateV112, calicov116: CalicoTemplateV116,
calicov18: CalicoTemplateV112,
flannelv115: FlannelTemplateV115, flannelv115: FlannelTemplateV115,
flannelv18: FlannelTemplate, flannelv116: FlannelTemplateV116,
flannelv18: FlannelTemplate,
canalv113: CanalTemplateV113, canalv113: CanalTemplateV113,
canalv18: CanalTemplateV112, canalv18: CanalTemplateV112,
canalv115: CanalTemplateV115, canalv115: CanalTemplateV115,
canalv116: CanalTemplateV116,
coreDnsv18: CoreDNSTemplate, coreDnsv18: CoreDNSTemplate,
kubeDnsv18: KubeDNSTemplate, kubeDnsv18: KubeDNSTemplate,

4
vendor/modules.txt vendored
View File

@ -105,7 +105,7 @@ github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg
# github.com/prometheus/procfs v0.0.3 # github.com/prometheus/procfs v0.0.3
github.com/prometheus/procfs github.com/prometheus/procfs
github.com/prometheus/procfs/internal/fs github.com/prometheus/procfs/internal/fs
# github.com/rancher/kontainer-driver-metadata v0.0.0-20190822033834-53da6c8441ed # github.com/rancher/kontainer-driver-metadata v0.0.0-20190823014104-22ae7cf76a62
github.com/rancher/kontainer-driver-metadata/rke/templates github.com/rancher/kontainer-driver-metadata/rke/templates
github.com/rancher/kontainer-driver-metadata/rke github.com/rancher/kontainer-driver-metadata/rke
# github.com/rancher/norman v0.0.0-20190821234528-20a936b685b0 # github.com/rancher/norman v0.0.0-20190821234528-20a936b685b0
@ -123,7 +123,7 @@ github.com/rancher/norman/types/slice
github.com/rancher/norman/httperror github.com/rancher/norman/httperror
github.com/rancher/norman/types/definition github.com/rancher/norman/types/definition
github.com/rancher/norman/types/values github.com/rancher/norman/types/values
# github.com/rancher/types v0.0.0-20190822030441-376c6f64c718 # github.com/rancher/types v0.0.0-20190822170951-b99efa820bc3
github.com/rancher/types/apis/management.cattle.io/v3 github.com/rancher/types/apis/management.cattle.io/v3
github.com/rancher/types/apis/project.cattle.io/v3 github.com/rancher/types/apis/project.cattle.io/v3
github.com/rancher/types/image github.com/rancher/types/image