1
0
mirror of https://github.com/rancher/rke.git synced 2025-09-04 16:30:02 +00:00

kontainer-driver-metadata update

This commit is contained in:
Alena Prokharchyk
2019-09-18 13:19:50 -07:00
parent ea94a39bc3
commit e185b6ac81
8 changed files with 357 additions and 56 deletions

View File

@@ -1034,14 +1034,14 @@ func loadK8sRKESystemImages() map[string]v3.RKESystemImages {
CoreDNS: m("coredns/coredns:1.2.6"),
CoreDNSAutoscaler: m("gcr.io/google_containers/cluster-proportional-autoscaler:1.0.0"),
},
// Enabled in Rancher v2.3
"v1.13.10-rancher1-3": {
// Enabled in Rancher v2.3.0
"v1.13.11-rancher1-1": {
Etcd: m("quay.io/coreos/etcd:v3.2.24-rancher1"),
Kubernetes: m("rancher/hyperkube:v1.13.10-rancher1"),
Alpine: m("rancher/rke-tools:v0.1.44"),
NginxProxy: m("rancher/rke-tools:v0.1.44"),
CertDownloader: m("rancher/rke-tools:v0.1.44"),
KubernetesServicesSidecar: m("rancher/rke-tools:v0.1.44"),
Kubernetes: m("rancher/hyperkube:v1.13.11-rancher1"),
Alpine: m("rancher/rke-tools:v0.1.50"),
NginxProxy: m("rancher/rke-tools:v0.1.50"),
CertDownloader: m("rancher/rke-tools:v0.1.50"),
KubernetesServicesSidecar: m("rancher/rke-tools:v0.1.50"),
KubeDNS: m("gcr.io/google_containers/k8s-dns-kube-dns:1.15.0"),
DNSmasq: m("gcr.io/google_containers/k8s-dns-dnsmasq-nanny:1.15.0"),
KubeDNSSidecar: m("gcr.io/google_containers/k8s-dns-sidecar:1.15.0"),
@@ -1208,10 +1208,10 @@ func loadK8sRKESystemImages() map[string]v3.RKESystemImages {
CoreDNS: m("coredns/coredns:1.3.1"),
CoreDNSAutoscaler: m("gcr.io/google_containers/cluster-proportional-autoscaler:1.3.0"),
},
// Enabled in Rancher v2.3
"v1.14.6-rancher3-1": {
// Enabled in Rancher v2.3.0
"v1.14.7-rancher1-1": {
Etcd: m("quay.io/coreos/etcd:v3.3.10-rancher1"),
Kubernetes: m("rancher/hyperkube:v1.14.6-rancher3"),
Kubernetes: m("rancher/hyperkube:v1.14.7-rancher1"),
Alpine: m("rancher/rke-tools:v0.1.50"),
NginxProxy: m("rancher/rke-tools:v0.1.50"),
CertDownloader: m("rancher/rke-tools:v0.1.50"),
@@ -1327,10 +1327,10 @@ func loadK8sRKESystemImages() map[string]v3.RKESystemImages {
CoreDNS: m("coredns/coredns:1.3.1"),
CoreDNSAutoscaler: m("gcr.io/google_containers/cluster-proportional-autoscaler:1.3.0"),
},
// Enabled in Rancher v2.3
"v1.15.3-rancher3-1": {
// Enabled in Rancher v2.3.0
"v1.15.4-rancher1-1": {
Etcd: m("quay.io/coreos/etcd:v3.3.10-rancher1"),
Kubernetes: m("rancher/hyperkube:v1.15.3-rancher3"),
Kubernetes: m("rancher/hyperkube:v1.15.4-rancher1"),
Alpine: m("rancher/rke-tools:v0.1.50"),
NginxProxy: m("rancher/rke-tools:v0.1.50"),
CertDownloader: m("rancher/rke-tools:v0.1.50"),
@@ -1358,6 +1358,38 @@ func loadK8sRKESystemImages() map[string]v3.RKESystemImages {
CoreDNSAutoscaler: m("gcr.io/google_containers/cluster-proportional-autoscaler:1.3.0"),
WindowsPodInfraContainer: m("rancher/kubelet-pause:v0.1.3"),
},
// Experimental in Rancher v2.3.0
"v1.16.0-rancher1-1": {
Etcd: m("quay.io/coreos/etcd:v3.3.15-rancher1"),
Kubernetes: m("rancher/hyperkube:v1.16.0-rancher1"),
Alpine: m("rancher/rke-tools:v0.1.49"),
NginxProxy: m("rancher/rke-tools:v0.1.49"),
CertDownloader: m("rancher/rke-tools:v0.1.49"),
KubernetesServicesSidecar: m("rancher/rke-tools:v0.1.49"),
KubeDNS: m("gcr.io/google_containers/k8s-dns-kube-dns:1.15.0"),
DNSmasq: m("gcr.io/google_containers/k8s-dns-dnsmasq-nanny:1.15.0"),
KubeDNSSidecar: m("gcr.io/google_containers/k8s-dns-sidecar:1.15.0"),
KubeDNSAutoscaler: m("gcr.io/google_containers/cluster-proportional-autoscaler:1.7.1"),
Flannel: m("quay.io/coreos/flannel:v0.11.0-rancher1"),
FlannelCNI: m("rancher/flannel-cni:v0.3.0-rancher5"),
CalicoNode: m("quay.io/calico/node:v3.8.1"),
CalicoCNI: m("quay.io/calico/cni:v3.8.1"),
CalicoControllers: m("quay.io/calico/kube-controllers:v3.8.1"),
CalicoFlexVol: m("quay.io/calico/pod2daemon-flexvol:v3.8.1"),
CanalNode: m("quay.io/calico/node:v3.8.1"),
CanalCNI: m("quay.io/calico/cni:v3.8.1"),
CanalFlannel: m("quay.io/coreos/flannel:v0.11.0"),
CanalFlexVol: m("quay.io/calico/pod2daemon-flexvol:v3.8.1"),
WeaveNode: m("weaveworks/weave-kube:2.5.2"),
WeaveCNI: m("weaveworks/weave-npc:2.5.2"),
PodInfraContainer: m("gcr.io/google_containers/pause:3.1"),
Ingress: m("rancher/nginx-ingress-controller:nginx-0.25.1-rancher1"),
IngressBackend: m("k8s.gcr.io/defaultbackend:1.5-rancher1"),
MetricsServer: m("gcr.io/google_containers/metrics-server:v0.3.4"),
CoreDNS: m("coredns/coredns:1.6.2"),
CoreDNSAutoscaler: m("gcr.io/google_containers/cluster-proportional-autoscaler:1.7.1"),
WindowsPodInfraContainer: m("rancher/kubelet-pause:v0.1.3"),
},
// k8s version from 2.1.x release with old rke-tools to allow upgrade from 2.1.x clusters
// without all clusters being restarted
"v1.11.9-rancher1-3": {
@@ -1638,37 +1670,5 @@ func loadK8sRKESystemImages() map[string]v3.RKESystemImages {
IngressBackend: m("k8s.gcr.io/defaultbackend:1.4"),
MetricsServer: m("gcr.io/google_containers/metrics-server-amd64:v0.3.1"),
},
// Enabled in Rancher v2.3
"v1.16.0-beta.1-rancher2-1": {
Etcd: m("quay.io/coreos/etcd:v3.3.10-rancher1"),
Kubernetes: m("rancher/hyperkube:v1.16.0-beta.1-rancher2"),
Alpine: m("rancher/rke-tools:v0.1.50"),
NginxProxy: m("rancher/rke-tools:v0.1.50"),
CertDownloader: m("rancher/rke-tools:v0.1.50"),
KubernetesServicesSidecar: m("rancher/rke-tools:v0.1.50"),
KubeDNS: m("gcr.io/google_containers/k8s-dns-kube-dns:1.15.0"),
DNSmasq: m("gcr.io/google_containers/k8s-dns-dnsmasq-nanny:1.15.0"),
KubeDNSSidecar: m("gcr.io/google_containers/k8s-dns-sidecar:1.15.0"),
KubeDNSAutoscaler: m("gcr.io/google_containers/cluster-proportional-autoscaler:1.3.0"),
Flannel: m("quay.io/coreos/flannel:v0.11.0-rancher1"),
FlannelCNI: m("rancher/flannel-cni:v0.3.0-rancher5"),
CalicoNode: m("quay.io/calico/node:v3.8.1"),
CalicoCNI: m("quay.io/calico/cni:v3.8.1"),
CalicoControllers: m("quay.io/calico/kube-controllers:v3.8.1"),
CalicoFlexVol: m("quay.io/calico/pod2daemon-flexvol:v3.8.1"),
CanalNode: m("quay.io/calico/node:v3.8.1"),
CanalCNI: m("quay.io/calico/cni:v3.8.1"),
CanalFlannel: m("quay.io/coreos/flannel:v0.11.0"),
CanalFlexVol: m("quay.io/calico/pod2daemon-flexvol:v3.8.1"),
WeaveNode: m("weaveworks/weave-kube:2.5.2"),
WeaveCNI: m("weaveworks/weave-npc:2.5.2"),
PodInfraContainer: m("gcr.io/google_containers/pause:3.1"),
Ingress: m("rancher/nginx-ingress-controller:nginx-0.25.1-rancher1"),
IngressBackend: m("k8s.gcr.io/defaultbackend:1.5-rancher1"),
MetricsServer: m("gcr.io/google_containers/metrics-server:v0.3.3"),
CoreDNS: m("coredns/coredns:1.3.1"),
CoreDNSAutoscaler: m("gcr.io/google_containers/cluster-proportional-autoscaler:1.3.0"),
WindowsPodInfraContainer: m("rancher/kubelet-pause:v0.1.3"),
},
}
}

View File

@@ -16,9 +16,9 @@ func loadRancherDefaultK8sVersions() map[string]string {
func loadRKEDefaultK8sVersions() map[string]string {
return map[string]string{
"0.3": "v1.15.3-rancher3-1",
"0.3": "v1.15.4-rancher1-1",
// rke will use default if its version is absent
"default": "v1.15.3-rancher3-1",
"default": "v1.15.4-rancher1-1",
}
}

View File

@@ -311,3 +311,301 @@ roleRef:
name: system:coredns-autoscaler
apiGroup: rbac.authorization.k8s.io
{{- end }}`
const CoreDNSTemplateV116 = `
---
{{- if eq .RBACConfig "rbac"}}
apiVersion: v1
kind: ServiceAccount
metadata:
name: coredns
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
kubernetes.io/bootstrapping: rbac-defaults
name: system:coredns
rules:
- apiGroups:
- ""
resources:
- endpoints
- services
- pods
- namespaces
verbs:
- list
- watch
- apiGroups:
- ""
resources:
- nodes
verbs:
- get
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
annotations:
rbac.authorization.kubernetes.io/autoupdate: "true"
labels:
kubernetes.io/bootstrapping: rbac-defaults
name: system:coredns
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:coredns
subjects:
- kind: ServiceAccount
name: coredns
namespace: kube-system
{{- end }}
---
apiVersion: v1
kind: ConfigMap
metadata:
name: coredns
namespace: kube-system
data:
Corefile: |
.:53 {
errors
health
ready
kubernetes {{.ClusterDomain}} {{ if .ReverseCIDRs }}{{ .ReverseCIDRs }}{{ else }}{{ "in-addr.arpa ip6.arpa" }}{{ end }} {
pods insecure
fallthrough in-addr.arpa ip6.arpa
}
prometheus :9153
{{- if .UpstreamNameservers }}
forward . {{range $i, $v := .UpstreamNameservers}}{{if $i}} {{end}}{{.}}{{end}}
{{- else }}
forward . "/etc/resolv.conf"
{{- end }}
cache 30
loop
reload
loadbalance
}
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: coredns
namespace: kube-system
labels:
k8s-app: kube-dns
kubernetes.io/name: "CoreDNS"
spec:
replicas: 1
strategy:
type: RollingUpdate
rollingUpdate:
maxUnavailable: 1
selector:
matchLabels:
k8s-app: kube-dns
template:
metadata:
labels:
k8s-app: kube-dns
annotations:
seccomp.security.alpha.kubernetes.io/pod: 'docker/default'
spec:
priorityClassName: system-cluster-critical
{{- if eq .RBACConfig "rbac"}}
serviceAccountName: coredns
{{- end }}
tolerations:
- key: "CriticalAddonsOnly"
operator: "Exists"
- effect: NoExecute
operator: Exists
- effect: NoSchedule
operator: Exists
nodeSelector:
beta.kubernetes.io/os: linux
{{ range $k, $v := .NodeSelector }}
{{ $k }}: "{{ $v }}"
{{ end }}
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: node-role.kubernetes.io/worker
operator: Exists
containers:
- name: coredns
image: {{.CoreDNSImage}}
imagePullPolicy: IfNotPresent
resources:
limits:
memory: 170Mi
requests:
cpu: 100m
memory: 70Mi
args: [ "-conf", "/etc/coredns/Corefile" ]
volumeMounts:
- name: config-volume
mountPath: /etc/coredns
readOnly: true
ports:
- containerPort: 53
name: dns
protocol: UDP
- containerPort: 53
name: dns-tcp
protocol: TCP
- containerPort: 9153
name: metrics
protocol: TCP
livenessProbe:
httpGet:
path: /health
port: 8080
scheme: HTTP
initialDelaySeconds: 60
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 5
readinessProbe:
httpGet:
path: /ready
port: 8181
scheme: HTTP
securityContext:
allowPrivilegeEscalation: false
capabilities:
add:
- NET_BIND_SERVICE
drop:
- all
readOnlyRootFilesystem: true
dnsPolicy: Default
volumes:
- name: config-volume
configMap:
name: coredns
items:
- key: Corefile
path: Corefile
---
apiVersion: v1
kind: Service
metadata:
name: kube-dns
namespace: kube-system
annotations:
prometheus.io/port: "9153"
prometheus.io/scrape: "true"
labels:
k8s-app: kube-dns
kubernetes.io/cluster-service: "true"
kubernetes.io/name: "CoreDNS"
spec:
selector:
k8s-app: kube-dns
clusterIP: {{.ClusterDNSServer}}
ports:
- name: dns
port: 53
protocol: UDP
- name: dns-tcp
port: 53
protocol: TCP
- name: metrics
port: 9153
protocol: TCP
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: coredns-autoscaler
namespace: kube-system
labels:
k8s-app: coredns-autoscaler
spec:
selector:
matchLabels:
k8s-app: coredns-autoscaler
template:
metadata:
labels:
k8s-app: coredns-autoscaler
spec:
{{- if eq .RBACConfig "rbac"}}
serviceAccountName: coredns-autoscaler
{{- end }}
nodeSelector:
beta.kubernetes.io/os: linux
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: node-role.kubernetes.io/worker
operator: Exists
tolerations:
- effect: NoExecute
operator: Exists
- effect: NoSchedule
operator: Exists
containers:
- name: autoscaler
image: {{.CoreDNSAutoScalerImage}}
resources:
requests:
cpu: "20m"
memory: "10Mi"
command:
- /cluster-proportional-autoscaler
- --namespace=kube-system
- --configmap=coredns-autoscaler
- --target=Deployment/coredns
# When cluster is using large nodes(with more cores), "coresPerReplica" should dominate.
# If using small nodes, "nodesPerReplica" should dominate.
- --default-params={"linear":{"coresPerReplica":128,"nodesPerReplica":4,"min":1,"preventSinglePointFailure":true}}
- --logtostderr=true
- --v=2
{{- if eq .RBACConfig "rbac"}}
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: coredns-autoscaler
namespace: kube-system
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: system:coredns-autoscaler
rules:
- apiGroups: [""]
resources: ["nodes"]
verbs: ["list", "watch"]
- apiGroups: [""]
resources: ["replicationcontrollers/scale"]
verbs: ["get", "update"]
- apiGroups: ["extensions","apps"]
resources: ["deployments/scale", "replicasets/scale"]
verbs: ["get", "update"]
- apiGroups: [""]
resources: ["configmaps"]
verbs: ["get", "create"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: system:coredns-autoscaler
subjects:
- kind: ServiceAccount
name: coredns-autoscaler
namespace: kube-system
roleRef:
kind: ClusterRole
name: system:coredns-autoscaler
apiGroup: rbac.authorization.k8s.io
{{- end }}`

View File

@@ -377,7 +377,7 @@ spec:
- --target=Deployment/kube-dns
# When cluster is using large nodes(with more cores), "coresPerReplica" should dominate.
# If using small nodes, "nodesPerReplica" should dominate.
- --default-params={"linear":{"coresPerReplica":128,"nodesPerReplica":4,"min":1}}
- --default-params={"linear":{"coresPerReplica":128,"nodesPerReplica":4,"min":1,"preventSinglePointFailure":true}}
- --logtostderr=true
- --v=2
---
@@ -402,7 +402,7 @@ rules:
- apiGroups: [""]
resources: ["replicationcontrollers/scale"]
verbs: ["get", "update"]
- apiGroups: ["extensions"]
- apiGroups: ["extensions","apps"]
resources: ["deployments/scale", "replicasets/scale"]
verbs: ["get", "update"]
- apiGroups: [""]

View File

@@ -31,7 +31,8 @@ const (
flannelv115 = "flannel-v1.15"
flannelv116 = "flannel-v1.16"
coreDnsv18 = "coredns-v1.8"
coreDnsv18 = "coredns-v1.8"
coreDnsv116 = "coredns-v1.16"
kubeDnsv18 = "kubedns-v1.8"
kubeDnsv116 = "kubedns-v1.16"
@@ -65,7 +66,8 @@ func LoadK8sVersionedTemplates() map[string]map[string]string {
">=1.8.0-rancher0 <1.15.0-rancher0": flannelv18,
},
CoreDNS: {
">=1.8.0-rancher0 <1.16.0": coreDnsv18,
">=1.16.0-alpha": coreDnsv116,
">=1.8.0-rancher0 <1.16.0-alpha": coreDnsv18,
},
KubeDNS: {
">=1.16.0-alpha": kubeDnsv116,
@@ -106,7 +108,8 @@ func getTemplates() map[string]string {
canalv115: CanalTemplateV115,
canalv116: CanalTemplateV116,
coreDnsv18: CoreDNSTemplate,
coreDnsv18: CoreDNSTemplate,
coreDnsv116: CoreDNSTemplateV116,
kubeDnsv18: KubeDNSTemplate,
kubeDnsv116: KubeDNSTemplateV116,