package templates const CalicoTemplate = ` {{if eq .RBACConfig "rbac"}} --- apiVersion: rbac.authorization.k8s.io/v1beta1 kind: ClusterRoleBinding metadata: name: calico-cni-plugin roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole name: calico-cni-plugin subjects: - kind: ServiceAccount name: calico-cni-plugin namespace: kube-system --- kind: ClusterRole apiVersion: rbac.authorization.k8s.io/v1beta1 metadata: name: calico-cni-plugin rules: - apiGroups: [""] resources: - pods - nodes verbs: - get --- apiVersion: v1 kind: ServiceAccount metadata: name: calico-cni-plugin namespace: kube-system --- apiVersion: rbac.authorization.k8s.io/v1beta1 kind: ClusterRoleBinding metadata: name: calico-kube-controllers roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole name: calico-kube-controllers subjects: - kind: ServiceAccount name: calico-kube-controllers namespace: kube-system --- kind: ClusterRole apiVersion: rbac.authorization.k8s.io/v1beta1 metadata: name: calico-kube-controllers rules: - apiGroups: - "" - extensions resources: - pods - namespaces - networkpolicies - nodes verbs: - watch - list --- apiVersion: v1 kind: ServiceAccount metadata: name: calico-kube-controllers namespace: kube-system ## end rbac here {{end}} --- # Calico Version master # https://docs.projectcalico.org/master/releases#master # This manifest includes the following component versions: # calico/node:master # calico/cni:master # calico/kube-controllers:master # This ConfigMap is used to configure a self-hosted Calico installation. kind: ConfigMap apiVersion: v1 metadata: name: calico-config namespace: kube-system data: # Configure this with the location of your etcd cluster. etcd_endpoints: "{{.EtcdEndpoints}}" # Configure the Calico backend to use. calico_backend: "bird" # The CNI network configuration to install on each node. cni_network_config: |- { "name": "rke-pod-network", "cniVersion": "0.3.0", "plugins": [ { "type": "calico", "etcd_endpoints": "{{.EtcdEndpoints}}", "etcd_key_file": "{{.ClientKeyPath}}", "etcd_cert_file": "{{.ClientCertPath}}", "etcd_ca_cert_file": "{{.ClientCAPath}}", "log_level": "info", "mtu": 1500, "ipam": { "type": "calico-ipam" }, "policy": { "type": "k8s", "k8s_api_root": "{{.APIRoot}}", "k8s_client_certificate": "{{.ClientCertPath}}", "k8s_client_key": "{{.ClientKeyPath}}", "k8s_certificate_authority": "{{.ClientCAPath}}" }, "kubernetes": { "kubeconfig": "{{.KubeCfg}}" } }, { "type": "portmap", "snat": true, "capabilities": {"portMappings": true} } ] } etcd_ca: "/calico-secrets/etcd-ca" etcd_cert: "/calico-secrets/etcd-cert" etcd_key: "/calico-secrets/etcd-key" --- apiVersion: v1 kind: Secret type: Opaque metadata: name: calico-etcd-secrets namespace: kube-system data: etcd-key: {{.ClientKey}} etcd-cert: {{.ClientCert}} etcd-ca: {{.ClientCA}} --- # This manifest installs the calico/node container, as well # as the Calico CNI plugins and network config on # each master and worker node in a Kubernetes cluster. kind: DaemonSet apiVersion: extensions/v1beta1 metadata: name: calico-node namespace: kube-system labels: k8s-app: calico-node spec: selector: matchLabels: k8s-app: calico-node template: metadata: labels: k8s-app: calico-node annotations: scheduler.alpha.kubernetes.io/critical-pod: '' spec: hostNetwork: true serviceAccountName: calico-node # Minimize downtime during a rolling upgrade or deletion; tell Kubernetes to do a "force # deletion": https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods. terminationGracePeriodSeconds: 0 tolerations: - key: "dedicated" value: "master" effect: "NoSchedule" - key: "CriticalAddonsOnly" operator: "Exists" - key: "node-role.kubernetes.io/master" operator: "Exists" - key: "node-role.kubernetes.io/etcd" operator: "Exists" effect: "NoExecute" containers: # Runs calico/node container on each Kubernetes node. This # container programs network policy and routes on each # host. - name: calico-node image: {{.NodeImage}} env: # The location of the Calico etcd cluster. - name: ETCD_ENDPOINTS valueFrom: configMapKeyRef: name: calico-config key: etcd_endpoints # Choose the backend to use. - name: CALICO_NETWORKING_BACKEND valueFrom: configMapKeyRef: name: calico-config key: calico_backend # Cluster type to identify the deployment type - name: CLUSTER_TYPE value: "k8s,bgp" # Disable file logging so "kubectl logs" works. - name: CALICO_DISABLE_FILE_LOGGING value: "true" # Set Felix endpoint to host default action to ACCEPT. - name: FELIX_DEFAULTENDPOINTTOHOSTACTION value: "ACCEPT" # Configure the IP Pool from which Pod IPs will be chosen. - name: CALICO_IPV4POOL_CIDR value: "{{.ClusterCIDR}}" - name: CALICO_IPV4POOL_IPIP value: "Always" # Disable IPv6 on Kubernetes. - name: FELIX_IPV6SUPPORT value: "false" # Set Felix logging to "info" - name: FELIX_LOGSEVERITYSCREEN value: "info" # Set MTU for tunnel device used if ipip is enabled - name: FELIX_IPINIPMTU value: "1440" # Location of the CA certificate for etcd. - name: ETCD_CA_CERT_FILE valueFrom: configMapKeyRef: name: calico-config key: etcd_ca # Location of the client key for etcd. - name: ETCD_KEY_FILE valueFrom: configMapKeyRef: name: calico-config key: etcd_key # Location of the client certificate for etcd. - name: ETCD_CERT_FILE valueFrom: configMapKeyRef: name: calico-config key: etcd_cert # Auto-detect the BGP IP address. - name: IP value: "" - name: FELIX_HEALTHENABLED value: "true" securityContext: privileged: true resources: requests: cpu: 250m livenessProbe: httpGet: path: /liveness port: 9099 periodSeconds: 10 initialDelaySeconds: 10 failureThreshold: 6 readinessProbe: httpGet: path: /readiness port: 9099 periodSeconds: 10 volumeMounts: - mountPath: /lib/modules name: lib-modules readOnly: true - mountPath: /var/run/calico name: var-run-calico readOnly: false - mountPath: /calico-secrets name: etcd-certs - mountPath: /etc/kubernetes name: etc-kubernetes # This container installs the Calico CNI binaries # and CNI network config file on each node. - name: install-cni image: {{.CNIImage}} command: ["/install-cni.sh"] env: # Name of the CNI config file to create. - name: CNI_CONF_NAME value: "10-calico.conflist" # The location of the Calico etcd cluster. - name: ETCD_ENDPOINTS valueFrom: configMapKeyRef: name: calico-config key: etcd_endpoints # The CNI network config to install on each node. - name: CNI_NETWORK_CONFIG valueFrom: configMapKeyRef: name: calico-config key: cni_network_config volumeMounts: - mountPath: /host/opt/cni/bin name: cni-bin-dir - mountPath: /host/etc/cni/net.d name: cni-net-dir - mountPath: /calico-secrets name: etcd-certs - mountPath: /etc/kubernetes name: etc-kubernetes volumes: # Used by calico/node. - name: lib-modules hostPath: path: /lib/modules - name: var-run-calico hostPath: path: /var/run/calico # Used to install CNI. - name: cni-bin-dir hostPath: path: /opt/cni/bin - name: cni-net-dir hostPath: path: /etc/cni/net.d # Mount in the etcd TLS secrets. - name: etcd-certs secret: secretName: calico-etcd-secrets - name: etc-kubernetes hostPath: path: /etc/kubernetes --- # This manifest deploys the Calico Kubernetes controllers. # See https://github.com/projectcalico/kube-controllers apiVersion: extensions/v1beta1 kind: Deployment metadata: name: calico-kube-controllers namespace: kube-system labels: k8s-app: calico-kube-controllers annotations: scheduler.alpha.kubernetes.io/critical-pod: '' spec: # The controllers can only have a single active instance. replicas: 1 strategy: type: Recreate template: metadata: name: calico-kube-controllers namespace: kube-system labels: k8s-app: calico-kube-controllers spec: # The controllers must run in the host network namespace so that # it isn't governed by policy that would prevent it from working. hostNetwork: true serviceAccountName: calico-kube-controllers tolerations: - key: "dedicated" value: "master" effect: "NoSchedule" - key: "CriticalAddonsOnly" operator: "Exists" - key: "node-role.kubernetes.io/master" operator: "Exists" - key: "node-role.kubernetes.io/etcd" operator: "Exists" effect: "NoSchedule" containers: - name: calico-kube-controllers image: {{.ControllersImage}} env: # The location of the Calico etcd cluster. - name: ETCD_ENDPOINTS valueFrom: configMapKeyRef: name: calico-config key: etcd_endpoints # Location of the CA certificate for etcd. - name: ETCD_CA_CERT_FILE valueFrom: configMapKeyRef: name: calico-config key: etcd_ca # Location of the client key for etcd. - name: ETCD_KEY_FILE valueFrom: configMapKeyRef: name: calico-config key: etcd_key # Location of the client certificate for etcd. - name: ETCD_CERT_FILE valueFrom: configMapKeyRef: name: calico-config key: etcd_cert volumeMounts: # Mount in the etcd TLS secrets. - mountPath: /calico-secrets name: etcd-certs - mountPath: /etc/kubernetes name: etc-kubernetes volumes: # Mount in the etcd TLS secrets. - name: etcd-certs secret: secretName: calico-etcd-secrets - name: etc-kubernetes hostPath: path: /etc/kubernetes --- # This deployment turns off the old "policy-controller". It should remain at 0 replicas, and then # be removed entirely once the new kube-controllers deployment has been deployed above. apiVersion: extensions/v1beta1 kind: Deployment metadata: name: calico-policy-controller namespace: kube-system labels: k8s-app: calico-policy spec: # Turn this deployment off in favor of the kube-controllers deployment above. replicas: 0 strategy: type: Recreate template: metadata: name: calico-policy-controller namespace: kube-system labels: k8s-app: calico-policy spec: hostNetwork: true serviceAccountName: calico-kube-controllers containers: - name: calico-policy-controller image: {{.ControllersImage}} env: # The location of the Calico etcd cluster. - name: ETCD_ENDPOINTS valueFrom: configMapKeyRef: name: calico-config key: etcd_endpoints --- apiVersion: v1 kind: ServiceAccount metadata: name: calico-kube-controllers namespace: kube-system --- apiVersion: v1 kind: ServiceAccount metadata: name: calico-node namespace: kube-system {{if ne .CloudProvider "none"}} --- kind: ConfigMap apiVersion: v1 metadata: name: {{.CloudProvider}}-ippool namespace: kube-system data: {{.CloudProvider}}-ippool: |- apiVersion: v1 kind: ipPool metadata: cidr: {{.ClusterCIDR}} spec: ipip: enabled: true nat-outgoing: true --- apiVersion: v1 kind: Pod metadata: name: calicoctl namespace: kube-system spec: hostNetwork: true restartPolicy: OnFailure containers: - name: calicoctl image: {{.Calicoctl}} command: ["/bin/sh", "-c", "calicoctl apply -f {{.CloudProvider}}-ippool.yaml"] env: - name: ETCD_ENDPOINTS valueFrom: configMapKeyRef: name: calico-config key: etcd_endpoints # Location of the CA certificate for etcd. - name: ETCD_CA_CERT_FILE valueFrom: configMapKeyRef: name: calico-config key: etcd_ca # Location of the client key for etcd. - name: ETCD_KEY_FILE valueFrom: configMapKeyRef: name: calico-config key: etcd_key # Location of the client certificate for etcd. - name: ETCD_CERT_FILE valueFrom: configMapKeyRef: name: calico-config key: etcd_cert volumeMounts: - name: ippool-config mountPath: /root/ # Mount in the etcd TLS secrets. - mountPath: /calico-secrets name: etcd-certs volumes: - name: ippool-config configMap: name: {{.CloudProvider}}-ippool items: - key: {{.CloudProvider}}-ippool path: {{.CloudProvider}}-ippool.yaml # Mount in the etcd TLS secrets. - name: etcd-certs secret: secretName: calico-etcd-secrets {{end}} `