1
0
mirror of https://github.com/rancher/rke.git synced 2025-07-05 11:37:48 +00:00

Vendor changes

This commit is contained in:
Brenda Rearden 2019-12-19 17:42:27 -07:00
parent 4f2c87fcd0
commit 0b8e86fd62
7 changed files with 367 additions and 14 deletions

6
go.mod
View File

@ -47,9 +47,9 @@ require (
github.com/mattn/go-colorable v0.1.0
github.com/mcuadros/go-version v0.0.0-20180611085657-6d5863ca60fa
github.com/pkg/errors v0.8.1
github.com/rancher/kontainer-driver-metadata v0.0.0-20191210185103-dc45b432da60
github.com/rancher/norman v0.0.0-20191126010027-3afadb987c81
github.com/rancher/types v0.0.0-20191209180830-cd2fb5538623
github.com/rancher/kontainer-driver-metadata v0.0.0-20191219221041-278e61bd811a
github.com/rancher/norman v0.0.0-20191126011629-6269ccdbeace
github.com/rancher/types v0.0.0-20191212174249-7f4ca1e45ee0
github.com/sirupsen/logrus v1.4.2
github.com/smartystreets/goconvey v0.0.0-20190731233626-505e41936337 // indirect
github.com/stretchr/testify v1.4.0

9
go.sum
View File

@ -517,16 +517,17 @@ github.com/prometheus/tsdb v0.8.0/go.mod h1:fSI0j+IUQrDd7+ZtR9WKIGtoYAYAJUKcKhYL
github.com/quobyte/api v0.1.2/go.mod h1:jL7lIHrmqQ7yh05OJ+eEEdHr0u/kmT1Ff9iHd+4H6VI=
github.com/rancher/client-go v1.16.0-rancher.3 h1:bOA0zwtJi8wUeldqfrCGyDRuN6oW+zJ092i/OhWP2nU=
github.com/rancher/client-go v1.16.0-rancher.3/go.mod h1:J69/JveO6XESwVgG53q3Uz5OSfgsv4uxpScmmyYOOlk=
github.com/rancher/kontainer-driver-metadata v0.0.0-20191210185103-dc45b432da60 h1:bOTLX8Zx6cXqB8lbdZz+IpYm7IQ9pf0T/twMDUfWKwE=
github.com/rancher/kontainer-driver-metadata v0.0.0-20191210185103-dc45b432da60/go.mod h1:+FCrVV/rB6Bq6w988Qxw2kaaj+1FZq3ySsD23DFnQrw=
github.com/rancher/kontainer-driver-metadata v0.0.0-20191219221041-278e61bd811a h1:rUNhXhQHCGgMddgksIQtM8K0YFqFt+mvSEVxoVaQBR0=
github.com/rancher/kontainer-driver-metadata v0.0.0-20191219221041-278e61bd811a/go.mod h1:VBeI/67gB2g/zn1R3Tu7VvCCKTiYCX0HfsFVvnm7v2I=
github.com/rancher/norman v0.0.0-20191003174345-0ac7dd6ccb36 h1:N0ZUBJRq/ydy2ULiuqKhmiKShmEtpDOWXxKzVZxTzHk=
github.com/rancher/norman v0.0.0-20191003174345-0ac7dd6ccb36/go.mod h1:kVWc1OyHK9decIY90IYExSHedI5a5qze7IfLiEOTmXQ=
github.com/rancher/norman v0.0.0-20191126010027-3afadb987c81 h1:40IyRSjbSj/jNHpJFOHGOlUvOCIQKUMF6p8Tcc/w1vs=
github.com/rancher/norman v0.0.0-20191126010027-3afadb987c81/go.mod h1:kVWc1OyHK9decIY90IYExSHedI5a5qze7IfLiEOTmXQ=
github.com/rancher/pkg v0.0.0-20190514055449-b30ab9de040e h1:j6+HqCET/NLPBtew2m5apL7jWw/PStQ7iGwXjgAqdvo=
github.com/rancher/pkg v0.0.0-20190514055449-b30ab9de040e/go.mod h1:XbYHTPaXuw8ZY9bylhYKQh/nJxDaTKk3YhAxPl4Qy/k=
github.com/rancher/types v0.0.0-20191115181915-fa1ec441252a/go.mod h1:K5zlxVpe7bY2QgOs1YUcU8dVXtzKncxpGEcvxGMgr0k=
github.com/rancher/types v0.0.0-20191209180830-cd2fb5538623 h1:FHdfRy/4ITz2Q1Wu4AkXkrT8p/VfN+PGzDGbOHbkMUQ=
github.com/rancher/types v0.0.0-20191209180830-cd2fb5538623/go.mod h1:bNhE/LSlF1urIf8XDdHyTe43J2YsHvuUeqWmKmrs8wg=
github.com/rancher/types v0.0.0-20191212174249-7f4ca1e45ee0 h1:L/lkhui+jMBxuCV5d8LqRS2iDf42R/60eHyDD4mPKlo=
github.com/rancher/types v0.0.0-20191212174249-7f4ca1e45ee0/go.mod h1:yYtjxRexsviS9aPO0qp1gqnMSLRRoe0JW6Mqu1EbJZM=
github.com/rancher/wrangler v0.1.5 h1:HiXOeP6Kci2DK+e04D1g6INT77xAYpAr54zmTTe0Spk=
github.com/rancher/wrangler v0.1.5/go.mod h1:EYP7cqpg42YqElaCm+U9ieSrGQKAXxUH5xsr+XGpWyE=
github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=

View File

@ -1783,9 +1783,10 @@ func loadK8sRKESystemImages() map[string]v3.RKESystemImages {
CoreDNSAutoscaler: m("gcr.io/google_containers/cluster-proportional-autoscaler:1.7.1"),
WindowsPodInfraContainer: m("rancher/kubelet-pause:v0.1.3"),
},
"v1.17.0-beta.2-rancher1-1": {
//Experimental out of band
"v1.17.0-rancher1-1": {
Etcd: m("quay.io/coreos/etcd:v3.4.3-rancher1"),
Kubernetes: m("rancher/hyperkube:v1.17.0-beta.2-rancher1"),
Kubernetes: m("rancher/hyperkube:v1.17.0-rancher1"),
Alpine: m("rancher/rke-tools:v0.1.51"),
NginxProxy: m("rancher/rke-tools:v0.1.51"),
CertDownloader: m("rancher/rke-tools:v0.1.51"),
@ -1809,8 +1810,40 @@ func loadK8sRKESystemImages() map[string]v3.RKESystemImages {
PodInfraContainer: m("gcr.io/google_containers/pause:3.1"),
Ingress: m("rancher/nginx-ingress-controller:nginx-0.25.1-rancher1"),
IngressBackend: m("k8s.gcr.io/defaultbackend:1.5-rancher1"),
MetricsServer: m("gcr.io/google_containers/metrics-server:v0.3.4"),
CoreDNS: m("coredns/coredns:1.6.2"),
MetricsServer: m("gcr.io/google_containers/metrics-server:v0.3.6"),
CoreDNS: m("coredns/coredns:1.6.5"),
CoreDNSAutoscaler: m("gcr.io/google_containers/cluster-proportional-autoscaler:1.7.1"),
WindowsPodInfraContainer: m("rancher/kubelet-pause:v0.1.3"),
},
//Experimental in Rancher v2.3.4
"v1.17.0-rancher1-2": {
Etcd: m("quay.io/coreos/etcd:v3.4.3-rancher1"),
Kubernetes: m("rancher/hyperkube:v1.17.0-rancher1"),
Alpine: m("rancher/rke-tools:v0.1.51"),
NginxProxy: m("rancher/rke-tools:v0.1.51"),
CertDownloader: m("rancher/rke-tools:v0.1.51"),
KubernetesServicesSidecar: m("rancher/rke-tools:v0.1.51"),
KubeDNS: m("gcr.io/google_containers/k8s-dns-kube-dns:1.15.0"),
DNSmasq: m("gcr.io/google_containers/k8s-dns-dnsmasq-nanny:1.15.0"),
KubeDNSSidecar: m("gcr.io/google_containers/k8s-dns-sidecar:1.15.0"),
KubeDNSAutoscaler: m("gcr.io/google_containers/cluster-proportional-autoscaler:1.7.1"),
Flannel: m("quay.io/coreos/flannel:v0.11.0-rancher1"),
FlannelCNI: m("rancher/flannel-cni:v0.3.0-rancher5"),
CalicoNode: m("quay.io/calico/node:v3.10.2"),
CalicoCNI: m("quay.io/calico/cni:v3.10.2"),
CalicoControllers: m("quay.io/calico/kube-controllers:v3.10.2"),
CalicoFlexVol: m("quay.io/calico/pod2daemon-flexvol:v3.10.2"),
CanalNode: m("quay.io/calico/node:v3.10.2"),
CanalCNI: m("quay.io/calico/cni:v3.10.2"),
CanalFlannel: m("quay.io/coreos/flannel:v0.11.0"),
CanalFlexVol: m("quay.io/calico/pod2daemon-flexvol:v3.10.2"),
WeaveNode: m("weaveworks/weave-kube:2.5.2"),
WeaveCNI: m("weaveworks/weave-npc:2.5.2"),
PodInfraContainer: m("gcr.io/google_containers/pause:3.1"),
Ingress: m("rancher/nginx-ingress-controller:nginx-0.25.1-rancher1"),
IngressBackend: m("k8s.gcr.io/defaultbackend:1.5-rancher1"),
MetricsServer: m("gcr.io/google_containers/metrics-server:v0.3.6"),
CoreDNS: m("coredns/coredns:1.6.5"),
CoreDNSAutoscaler: m("gcr.io/google_containers/cluster-proportional-autoscaler:1.7.1"),
WindowsPodInfraContainer: m("rancher/kubelet-pause:v0.1.3"),
},

View File

@ -13,7 +13,15 @@ const (
func loadK8sVersionServiceOptions() map[string]v3.KubernetesServicesOptions {
return map[string]v3.KubernetesServicesOptions{
"v1.17.0-beta.2-rancher1-1": {
"v1.17.0-rancher1-2": {
Etcd: getETCDOptions(),
KubeAPI: getKubeAPIOptions116(),
Kubelet: getKubeletOptions116(),
KubeController: getKubeControllerOptions(),
Kubeproxy: getKubeProxyOptions(),
Scheduler: getSchedulerOptions(),
},
"v1.17.0-rancher1-1": {
Etcd: getETCDOptions(),
KubeAPI: getKubeAPIOptions116(),
Kubelet: getKubeletOptions116(),

View File

@ -609,3 +609,311 @@ roleRef:
name: system:coredns-autoscaler
apiGroup: rbac.authorization.k8s.io
{{- end }}`
const CoreDNSTemplateV117 = `
---
{{- if eq .RBACConfig "rbac"}}
apiVersion: v1
kind: ServiceAccount
metadata:
name: coredns
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
kubernetes.io/bootstrapping: rbac-defaults
name: system:coredns
rules:
- apiGroups:
- ""
resources:
- endpoints
- services
- pods
- namespaces
verbs:
- list
- watch
- apiGroups:
- ""
resources:
- nodes
verbs:
- get
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
annotations:
rbac.authorization.kubernetes.io/autoupdate: "true"
labels:
kubernetes.io/bootstrapping: rbac-defaults
name: system:coredns
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:coredns
subjects:
- kind: ServiceAccount
name: coredns
namespace: kube-system
{{- end }}
---
apiVersion: v1
kind: ConfigMap
metadata:
name: coredns
namespace: kube-system
data:
Corefile: |
.:53 {
errors
health {
lameduck 5s
}
ready
kubernetes {{.ClusterDomain}} {{ if .ReverseCIDRs }}{{ .ReverseCIDRs }}{{ else }}{{ "in-addr.arpa ip6.arpa" }}{{ end }} {
pods insecure
fallthrough in-addr.arpa ip6.arpa
}
prometheus :9153
{{- if .UpstreamNameservers }}
forward . {{range $i, $v := .UpstreamNameservers}}{{if $i}} {{end}}{{.}}{{end}}
{{- else }}
forward . "/etc/resolv.conf"
{{- end }}
cache 30
loop
reload
loadbalance
}
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: coredns
namespace: kube-system
labels:
k8s-app: kube-dns
kubernetes.io/name: "CoreDNS"
spec:
replicas: 1
strategy:
type: RollingUpdate
rollingUpdate:
maxUnavailable: 1
selector:
matchLabels:
k8s-app: kube-dns
template:
metadata:
labels:
k8s-app: kube-dns
annotations:
seccomp.security.alpha.kubernetes.io/pod: 'docker/default'
spec:
priorityClassName: system-cluster-critical
{{- if eq .RBACConfig "rbac"}}
serviceAccountName: coredns
{{- end }}
tolerations:
- key: "CriticalAddonsOnly"
operator: "Exists"
- effect: NoExecute
operator: Exists
- effect: NoSchedule
operator: Exists
nodeSelector:
beta.kubernetes.io/os: linux
{{ range $k, $v := .NodeSelector }}
{{ $k }}: "{{ $v }}"
{{ end }}
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: node-role.kubernetes.io/worker
operator: Exists
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchExpressions:
- key: k8s-app
operator: In
values: ["kube-dns"]
topologyKey: kubernetes.io/hostname
containers:
- name: coredns
image: {{.CoreDNSImage}}
imagePullPolicy: IfNotPresent
resources:
limits:
memory: 170Mi
requests:
cpu: 100m
memory: 70Mi
args: [ "-conf", "/etc/coredns/Corefile" ]
volumeMounts:
- name: config-volume
mountPath: /etc/coredns
readOnly: true
ports:
- containerPort: 53
name: dns
protocol: UDP
- containerPort: 53
name: dns-tcp
protocol: TCP
- containerPort: 9153
name: metrics
protocol: TCP
livenessProbe:
httpGet:
path: /health
port: 8080
scheme: HTTP
initialDelaySeconds: 60
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 5
readinessProbe:
httpGet:
path: /ready
port: 8181
scheme: HTTP
securityContext:
allowPrivilegeEscalation: false
capabilities:
add:
- NET_BIND_SERVICE
drop:
- all
readOnlyRootFilesystem: true
dnsPolicy: Default
volumes:
- name: config-volume
configMap:
name: coredns
items:
- key: Corefile
path: Corefile
---
apiVersion: v1
kind: Service
metadata:
name: kube-dns
namespace: kube-system
annotations:
prometheus.io/port: "9153"
prometheus.io/scrape: "true"
labels:
k8s-app: kube-dns
kubernetes.io/cluster-service: "true"
kubernetes.io/name: "CoreDNS"
spec:
selector:
k8s-app: kube-dns
clusterIP: {{.ClusterDNSServer}}
ports:
- name: dns
port: 53
protocol: UDP
- name: dns-tcp
port: 53
protocol: TCP
- name: metrics
port: 9153
protocol: TCP
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: coredns-autoscaler
namespace: kube-system
labels:
k8s-app: coredns-autoscaler
spec:
selector:
matchLabels:
k8s-app: coredns-autoscaler
template:
metadata:
labels:
k8s-app: coredns-autoscaler
spec:
{{- if eq .RBACConfig "rbac"}}
serviceAccountName: coredns-autoscaler
{{- end }}
nodeSelector:
beta.kubernetes.io/os: linux
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: node-role.kubernetes.io/worker
operator: Exists
tolerations:
- effect: NoExecute
operator: Exists
- effect: NoSchedule
operator: Exists
containers:
- name: autoscaler
image: {{.CoreDNSAutoScalerImage}}
resources:
requests:
cpu: "20m"
memory: "10Mi"
command:
- /cluster-proportional-autoscaler
- --namespace=kube-system
- --configmap=coredns-autoscaler
- --target=Deployment/coredns
# When cluster is using large nodes(with more cores), "coresPerReplica" should dominate.
# If using small nodes, "nodesPerReplica" should dominate.
- --default-params={"linear":{"coresPerReplica":128,"nodesPerReplica":4,"min":1,"preventSinglePointFailure":true}}
- --logtostderr=true
- --v=2
{{- if eq .RBACConfig "rbac"}}
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: coredns-autoscaler
namespace: kube-system
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: system:coredns-autoscaler
rules:
- apiGroups: [""]
resources: ["nodes"]
verbs: ["list", "watch"]
- apiGroups: [""]
resources: ["replicationcontrollers/scale"]
verbs: ["get", "update"]
- apiGroups: ["extensions","apps"]
resources: ["deployments/scale", "replicasets/scale"]
verbs: ["get", "update"]
- apiGroups: [""]
resources: ["configmaps"]
verbs: ["get", "create"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: system:coredns-autoscaler
subjects:
- kind: ServiceAccount
name: coredns-autoscaler
namespace: kube-system
roleRef:
kind: ClusterRole
name: system:coredns-autoscaler
apiGroup: rbac.authorization.k8s.io
{{- end }}`

View File

@ -33,6 +33,7 @@ const (
coreDnsv18 = "coredns-v1.8"
coreDnsv116 = "coredns-v1.16"
coreDnsv117 = "coredns-v1.17"
kubeDnsv18 = "kubedns-v1.8"
kubeDnsv116 = "kubedns-v1.16"
@ -66,7 +67,8 @@ func LoadK8sVersionedTemplates() map[string]map[string]string {
">=1.8.0-rancher0 <1.15.0-rancher0": flannelv18,
},
CoreDNS: {
">=1.16.0-alpha": coreDnsv116,
">=1.17.0-alpha": coreDnsv117,
">=1.16.0-alpha <1.17.0-alpha": coreDnsv116,
">=1.8.0-rancher0 <1.16.0-alpha": coreDnsv18,
},
KubeDNS: {
@ -110,6 +112,7 @@ func getTemplates() map[string]string {
coreDnsv18: CoreDNSTemplate,
coreDnsv116: CoreDNSTemplateV116,
coreDnsv117: CoreDNSTemplateV117,
kubeDnsv18: KubeDNSTemplate,
kubeDnsv116: KubeDNSTemplateV116,

2
vendor/modules.txt vendored
View File

@ -129,7 +129,7 @@ github.com/prometheus/procfs
github.com/prometheus/procfs/internal/util
github.com/prometheus/procfs/nfs
github.com/prometheus/procfs/xfs
# github.com/rancher/kontainer-driver-metadata v0.0.0-20191210185103-dc45b432da60
# github.com/rancher/kontainer-driver-metadata v0.0.0-20191219221041-278e61bd811a
github.com/rancher/kontainer-driver-metadata/rke
github.com/rancher/kontainer-driver-metadata/rke/templates
# github.com/rancher/norman v0.0.0-20191126010027-3afadb987c81