From e185b6ac81e3169252b5a311efb6f5cc5379fea0 Mon Sep 17 00:00:00 2001 From: Alena Prokharchyk Date: Wed, 18 Sep 2019 13:19:50 -0700 Subject: [PATCH] kontainer-driver-metadata update --- go.mod | 2 +- go.sum | 4 +- .../rke/k8s_rke_system_images.go | 90 +++--- .../rke/k8s_version_info.go | 4 +- .../rke/templates/coredns.go | 298 ++++++++++++++++++ .../rke/templates/kubedns.go | 4 +- .../rke/templates/templates.go | 9 +- vendor/modules.txt | 2 +- 8 files changed, 357 insertions(+), 56 deletions(-) diff --git a/go.mod b/go.mod index 696e21a3..6d369d09 100644 --- a/go.mod +++ b/go.mod @@ -32,7 +32,7 @@ require ( github.com/opencontainers/go-digest v1.0.0-rc1 // indirect github.com/opencontainers/image-spec v0.0.0-20170929214853-7c889fafd04a // indirect github.com/pkg/errors v0.8.1 - github.com/rancher/kontainer-driver-metadata v0.0.0-20190918024130-319e04039e40 + github.com/rancher/kontainer-driver-metadata v0.0.0-20190918201757-1abc154a1634 github.com/rancher/norman v0.0.0-20190821234528-20a936b685b0 github.com/rancher/types v0.0.0-20190827214052-704648244586 github.com/sirupsen/logrus v1.4.2 diff --git a/go.sum b/go.sum index 661d5928..26e8695a 100644 --- a/go.sum +++ b/go.sum @@ -181,8 +181,8 @@ github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.3 h1:CTwfnzjQ+8dS6MhHHu4YswVAD99sL2wjPqP+VkURmKE= github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= -github.com/rancher/kontainer-driver-metadata v0.0.0-20190918024130-319e04039e40 h1:oHYvDk490ZZKx0jdKH6zNJHfzrtcLoUduFbLcbZ3FBs= -github.com/rancher/kontainer-driver-metadata v0.0.0-20190918024130-319e04039e40/go.mod h1:dHvhyuoiwrjqQCFD586g0cZ9NJJXEKeAtQi8RX96U8E= +github.com/rancher/kontainer-driver-metadata v0.0.0-20190918201757-1abc154a1634 h1:gigLlhZiyydhrO8SwB8u4detgrXKGkZMDLas5C5D20Y= +github.com/rancher/kontainer-driver-metadata v0.0.0-20190918201757-1abc154a1634/go.mod h1:dHvhyuoiwrjqQCFD586g0cZ9NJJXEKeAtQi8RX96U8E= github.com/rancher/norman v0.0.0-20190821234528-20a936b685b0 h1:bNG4b0CTTBE8yEamIz8RYcfz+7kfK9N8YTvyiykRCS8= github.com/rancher/norman v0.0.0-20190821234528-20a936b685b0/go.mod h1:KwP6RD4rVMdK8XK0wqZaptrhTn/TO4kXU3doh4iatQU= github.com/rancher/types v0.0.0-20190822170951-b99efa820bc3 h1:4mz/J0iEtW/VDtjN3zI9B4g49MKeoLHkHZKJGqRN7xg= diff --git a/vendor/github.com/rancher/kontainer-driver-metadata/rke/k8s_rke_system_images.go b/vendor/github.com/rancher/kontainer-driver-metadata/rke/k8s_rke_system_images.go index 84223122..9cda7476 100644 --- a/vendor/github.com/rancher/kontainer-driver-metadata/rke/k8s_rke_system_images.go +++ b/vendor/github.com/rancher/kontainer-driver-metadata/rke/k8s_rke_system_images.go @@ -1034,14 +1034,14 @@ func loadK8sRKESystemImages() map[string]v3.RKESystemImages { CoreDNS: m("coredns/coredns:1.2.6"), CoreDNSAutoscaler: m("gcr.io/google_containers/cluster-proportional-autoscaler:1.0.0"), }, - // Enabled in Rancher v2.3 - "v1.13.10-rancher1-3": { + // Enabled in Rancher v2.3.0 + "v1.13.11-rancher1-1": { Etcd: m("quay.io/coreos/etcd:v3.2.24-rancher1"), - Kubernetes: m("rancher/hyperkube:v1.13.10-rancher1"), - Alpine: m("rancher/rke-tools:v0.1.44"), - NginxProxy: m("rancher/rke-tools:v0.1.44"), - CertDownloader: m("rancher/rke-tools:v0.1.44"), - KubernetesServicesSidecar: m("rancher/rke-tools:v0.1.44"), + Kubernetes: m("rancher/hyperkube:v1.13.11-rancher1"), + Alpine: m("rancher/rke-tools:v0.1.50"), + NginxProxy: m("rancher/rke-tools:v0.1.50"), + CertDownloader: m("rancher/rke-tools:v0.1.50"), + KubernetesServicesSidecar: m("rancher/rke-tools:v0.1.50"), KubeDNS: m("gcr.io/google_containers/k8s-dns-kube-dns:1.15.0"), DNSmasq: m("gcr.io/google_containers/k8s-dns-dnsmasq-nanny:1.15.0"), KubeDNSSidecar: m("gcr.io/google_containers/k8s-dns-sidecar:1.15.0"), @@ -1208,10 +1208,10 @@ func loadK8sRKESystemImages() map[string]v3.RKESystemImages { CoreDNS: m("coredns/coredns:1.3.1"), CoreDNSAutoscaler: m("gcr.io/google_containers/cluster-proportional-autoscaler:1.3.0"), }, - // Enabled in Rancher v2.3 - "v1.14.6-rancher3-1": { + // Enabled in Rancher v2.3.0 + "v1.14.7-rancher1-1": { Etcd: m("quay.io/coreos/etcd:v3.3.10-rancher1"), - Kubernetes: m("rancher/hyperkube:v1.14.6-rancher3"), + Kubernetes: m("rancher/hyperkube:v1.14.7-rancher1"), Alpine: m("rancher/rke-tools:v0.1.50"), NginxProxy: m("rancher/rke-tools:v0.1.50"), CertDownloader: m("rancher/rke-tools:v0.1.50"), @@ -1327,10 +1327,10 @@ func loadK8sRKESystemImages() map[string]v3.RKESystemImages { CoreDNS: m("coredns/coredns:1.3.1"), CoreDNSAutoscaler: m("gcr.io/google_containers/cluster-proportional-autoscaler:1.3.0"), }, - // Enabled in Rancher v2.3 - "v1.15.3-rancher3-1": { + // Enabled in Rancher v2.3.0 + "v1.15.4-rancher1-1": { Etcd: m("quay.io/coreos/etcd:v3.3.10-rancher1"), - Kubernetes: m("rancher/hyperkube:v1.15.3-rancher3"), + Kubernetes: m("rancher/hyperkube:v1.15.4-rancher1"), Alpine: m("rancher/rke-tools:v0.1.50"), NginxProxy: m("rancher/rke-tools:v0.1.50"), CertDownloader: m("rancher/rke-tools:v0.1.50"), @@ -1358,6 +1358,38 @@ func loadK8sRKESystemImages() map[string]v3.RKESystemImages { CoreDNSAutoscaler: m("gcr.io/google_containers/cluster-proportional-autoscaler:1.3.0"), WindowsPodInfraContainer: m("rancher/kubelet-pause:v0.1.3"), }, + // Experimental in Rancher v2.3.0 + "v1.16.0-rancher1-1": { + Etcd: m("quay.io/coreos/etcd:v3.3.15-rancher1"), + Kubernetes: m("rancher/hyperkube:v1.16.0-rancher1"), + Alpine: m("rancher/rke-tools:v0.1.49"), + NginxProxy: m("rancher/rke-tools:v0.1.49"), + CertDownloader: m("rancher/rke-tools:v0.1.49"), + KubernetesServicesSidecar: m("rancher/rke-tools:v0.1.49"), + KubeDNS: m("gcr.io/google_containers/k8s-dns-kube-dns:1.15.0"), + DNSmasq: m("gcr.io/google_containers/k8s-dns-dnsmasq-nanny:1.15.0"), + KubeDNSSidecar: m("gcr.io/google_containers/k8s-dns-sidecar:1.15.0"), + KubeDNSAutoscaler: m("gcr.io/google_containers/cluster-proportional-autoscaler:1.7.1"), + Flannel: m("quay.io/coreos/flannel:v0.11.0-rancher1"), + FlannelCNI: m("rancher/flannel-cni:v0.3.0-rancher5"), + CalicoNode: m("quay.io/calico/node:v3.8.1"), + CalicoCNI: m("quay.io/calico/cni:v3.8.1"), + CalicoControllers: m("quay.io/calico/kube-controllers:v3.8.1"), + CalicoFlexVol: m("quay.io/calico/pod2daemon-flexvol:v3.8.1"), + CanalNode: m("quay.io/calico/node:v3.8.1"), + CanalCNI: m("quay.io/calico/cni:v3.8.1"), + CanalFlannel: m("quay.io/coreos/flannel:v0.11.0"), + CanalFlexVol: m("quay.io/calico/pod2daemon-flexvol:v3.8.1"), + WeaveNode: m("weaveworks/weave-kube:2.5.2"), + WeaveCNI: m("weaveworks/weave-npc:2.5.2"), + PodInfraContainer: m("gcr.io/google_containers/pause:3.1"), + Ingress: m("rancher/nginx-ingress-controller:nginx-0.25.1-rancher1"), + IngressBackend: m("k8s.gcr.io/defaultbackend:1.5-rancher1"), + MetricsServer: m("gcr.io/google_containers/metrics-server:v0.3.4"), + CoreDNS: m("coredns/coredns:1.6.2"), + CoreDNSAutoscaler: m("gcr.io/google_containers/cluster-proportional-autoscaler:1.7.1"), + WindowsPodInfraContainer: m("rancher/kubelet-pause:v0.1.3"), + }, // k8s version from 2.1.x release with old rke-tools to allow upgrade from 2.1.x clusters // without all clusters being restarted "v1.11.9-rancher1-3": { @@ -1638,37 +1670,5 @@ func loadK8sRKESystemImages() map[string]v3.RKESystemImages { IngressBackend: m("k8s.gcr.io/defaultbackend:1.4"), MetricsServer: m("gcr.io/google_containers/metrics-server-amd64:v0.3.1"), }, - // Enabled in Rancher v2.3 - "v1.16.0-beta.1-rancher2-1": { - Etcd: m("quay.io/coreos/etcd:v3.3.10-rancher1"), - Kubernetes: m("rancher/hyperkube:v1.16.0-beta.1-rancher2"), - Alpine: m("rancher/rke-tools:v0.1.50"), - NginxProxy: m("rancher/rke-tools:v0.1.50"), - CertDownloader: m("rancher/rke-tools:v0.1.50"), - KubernetesServicesSidecar: m("rancher/rke-tools:v0.1.50"), - KubeDNS: m("gcr.io/google_containers/k8s-dns-kube-dns:1.15.0"), - DNSmasq: m("gcr.io/google_containers/k8s-dns-dnsmasq-nanny:1.15.0"), - KubeDNSSidecar: m("gcr.io/google_containers/k8s-dns-sidecar:1.15.0"), - KubeDNSAutoscaler: m("gcr.io/google_containers/cluster-proportional-autoscaler:1.3.0"), - Flannel: m("quay.io/coreos/flannel:v0.11.0-rancher1"), - FlannelCNI: m("rancher/flannel-cni:v0.3.0-rancher5"), - CalicoNode: m("quay.io/calico/node:v3.8.1"), - CalicoCNI: m("quay.io/calico/cni:v3.8.1"), - CalicoControllers: m("quay.io/calico/kube-controllers:v3.8.1"), - CalicoFlexVol: m("quay.io/calico/pod2daemon-flexvol:v3.8.1"), - CanalNode: m("quay.io/calico/node:v3.8.1"), - CanalCNI: m("quay.io/calico/cni:v3.8.1"), - CanalFlannel: m("quay.io/coreos/flannel:v0.11.0"), - CanalFlexVol: m("quay.io/calico/pod2daemon-flexvol:v3.8.1"), - WeaveNode: m("weaveworks/weave-kube:2.5.2"), - WeaveCNI: m("weaveworks/weave-npc:2.5.2"), - PodInfraContainer: m("gcr.io/google_containers/pause:3.1"), - Ingress: m("rancher/nginx-ingress-controller:nginx-0.25.1-rancher1"), - IngressBackend: m("k8s.gcr.io/defaultbackend:1.5-rancher1"), - MetricsServer: m("gcr.io/google_containers/metrics-server:v0.3.3"), - CoreDNS: m("coredns/coredns:1.3.1"), - CoreDNSAutoscaler: m("gcr.io/google_containers/cluster-proportional-autoscaler:1.3.0"), - WindowsPodInfraContainer: m("rancher/kubelet-pause:v0.1.3"), - }, } } diff --git a/vendor/github.com/rancher/kontainer-driver-metadata/rke/k8s_version_info.go b/vendor/github.com/rancher/kontainer-driver-metadata/rke/k8s_version_info.go index db86087a..c9886ece 100644 --- a/vendor/github.com/rancher/kontainer-driver-metadata/rke/k8s_version_info.go +++ b/vendor/github.com/rancher/kontainer-driver-metadata/rke/k8s_version_info.go @@ -16,9 +16,9 @@ func loadRancherDefaultK8sVersions() map[string]string { func loadRKEDefaultK8sVersions() map[string]string { return map[string]string{ - "0.3": "v1.15.3-rancher3-1", + "0.3": "v1.15.4-rancher1-1", // rke will use default if its version is absent - "default": "v1.15.3-rancher3-1", + "default": "v1.15.4-rancher1-1", } } diff --git a/vendor/github.com/rancher/kontainer-driver-metadata/rke/templates/coredns.go b/vendor/github.com/rancher/kontainer-driver-metadata/rke/templates/coredns.go index 8a9a3d9e..130ee5e4 100644 --- a/vendor/github.com/rancher/kontainer-driver-metadata/rke/templates/coredns.go +++ b/vendor/github.com/rancher/kontainer-driver-metadata/rke/templates/coredns.go @@ -311,3 +311,301 @@ roleRef: name: system:coredns-autoscaler apiGroup: rbac.authorization.k8s.io {{- end }}` + +const CoreDNSTemplateV116 = ` +--- +{{- if eq .RBACConfig "rbac"}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: coredns + namespace: kube-system +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + kubernetes.io/bootstrapping: rbac-defaults + name: system:coredns +rules: +- apiGroups: + - "" + resources: + - endpoints + - services + - pods + - namespaces + verbs: + - list + - watch +- apiGroups: + - "" + resources: + - nodes + verbs: + - get +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + annotations: + rbac.authorization.kubernetes.io/autoupdate: "true" + labels: + kubernetes.io/bootstrapping: rbac-defaults + name: system:coredns +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:coredns +subjects: +- kind: ServiceAccount + name: coredns + namespace: kube-system +{{- end }} +--- +apiVersion: v1 +kind: ConfigMap +metadata: + name: coredns + namespace: kube-system +data: + Corefile: | + .:53 { + errors + health + ready + kubernetes {{.ClusterDomain}} {{ if .ReverseCIDRs }}{{ .ReverseCIDRs }}{{ else }}{{ "in-addr.arpa ip6.arpa" }}{{ end }} { + pods insecure + fallthrough in-addr.arpa ip6.arpa + } + prometheus :9153 + {{- if .UpstreamNameservers }} + forward . {{range $i, $v := .UpstreamNameservers}}{{if $i}} {{end}}{{.}}{{end}} + {{- else }} + forward . "/etc/resolv.conf" + {{- end }} + cache 30 + loop + reload + loadbalance + } +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: coredns + namespace: kube-system + labels: + k8s-app: kube-dns + kubernetes.io/name: "CoreDNS" +spec: + replicas: 1 + strategy: + type: RollingUpdate + rollingUpdate: + maxUnavailable: 1 + selector: + matchLabels: + k8s-app: kube-dns + template: + metadata: + labels: + k8s-app: kube-dns + annotations: + seccomp.security.alpha.kubernetes.io/pod: 'docker/default' + spec: + priorityClassName: system-cluster-critical +{{- if eq .RBACConfig "rbac"}} + serviceAccountName: coredns +{{- end }} + tolerations: + - key: "CriticalAddonsOnly" + operator: "Exists" + - effect: NoExecute + operator: Exists + - effect: NoSchedule + operator: Exists + nodeSelector: + beta.kubernetes.io/os: linux + {{ range $k, $v := .NodeSelector }} + {{ $k }}: "{{ $v }}" + {{ end }} + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: node-role.kubernetes.io/worker + operator: Exists + containers: + - name: coredns + image: {{.CoreDNSImage}} + imagePullPolicy: IfNotPresent + resources: + limits: + memory: 170Mi + requests: + cpu: 100m + memory: 70Mi + args: [ "-conf", "/etc/coredns/Corefile" ] + volumeMounts: + - name: config-volume + mountPath: /etc/coredns + readOnly: true + ports: + - containerPort: 53 + name: dns + protocol: UDP + - containerPort: 53 + name: dns-tcp + protocol: TCP + - containerPort: 9153 + name: metrics + protocol: TCP + livenessProbe: + httpGet: + path: /health + port: 8080 + scheme: HTTP + initialDelaySeconds: 60 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + readinessProbe: + httpGet: + path: /ready + port: 8181 + scheme: HTTP + securityContext: + allowPrivilegeEscalation: false + capabilities: + add: + - NET_BIND_SERVICE + drop: + - all + readOnlyRootFilesystem: true + dnsPolicy: Default + volumes: + - name: config-volume + configMap: + name: coredns + items: + - key: Corefile + path: Corefile +--- +apiVersion: v1 +kind: Service +metadata: + name: kube-dns + namespace: kube-system + annotations: + prometheus.io/port: "9153" + prometheus.io/scrape: "true" + labels: + k8s-app: kube-dns + kubernetes.io/cluster-service: "true" + kubernetes.io/name: "CoreDNS" +spec: + selector: + k8s-app: kube-dns + clusterIP: {{.ClusterDNSServer}} + ports: + - name: dns + port: 53 + protocol: UDP + - name: dns-tcp + port: 53 + protocol: TCP + - name: metrics + port: 9153 + protocol: TCP +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: coredns-autoscaler + namespace: kube-system + labels: + k8s-app: coredns-autoscaler +spec: + selector: + matchLabels: + k8s-app: coredns-autoscaler + template: + metadata: + labels: + k8s-app: coredns-autoscaler + spec: +{{- if eq .RBACConfig "rbac"}} + serviceAccountName: coredns-autoscaler +{{- end }} + nodeSelector: + beta.kubernetes.io/os: linux + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: node-role.kubernetes.io/worker + operator: Exists + tolerations: + - effect: NoExecute + operator: Exists + - effect: NoSchedule + operator: Exists + containers: + - name: autoscaler + image: {{.CoreDNSAutoScalerImage}} + resources: + requests: + cpu: "20m" + memory: "10Mi" + command: + - /cluster-proportional-autoscaler + - --namespace=kube-system + - --configmap=coredns-autoscaler + - --target=Deployment/coredns + # When cluster is using large nodes(with more cores), "coresPerReplica" should dominate. + # If using small nodes, "nodesPerReplica" should dominate. + - --default-params={"linear":{"coresPerReplica":128,"nodesPerReplica":4,"min":1,"preventSinglePointFailure":true}} + - --logtostderr=true + - --v=2 +{{- if eq .RBACConfig "rbac"}} +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: coredns-autoscaler + namespace: kube-system +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: system:coredns-autoscaler +rules: + - apiGroups: [""] + resources: ["nodes"] + verbs: ["list", "watch"] + - apiGroups: [""] + resources: ["replicationcontrollers/scale"] + verbs: ["get", "update"] + - apiGroups: ["extensions","apps"] + resources: ["deployments/scale", "replicasets/scale"] + verbs: ["get", "update"] + - apiGroups: [""] + resources: ["configmaps"] + verbs: ["get", "create"] +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: system:coredns-autoscaler +subjects: + - kind: ServiceAccount + name: coredns-autoscaler + namespace: kube-system +roleRef: + kind: ClusterRole + name: system:coredns-autoscaler + apiGroup: rbac.authorization.k8s.io +{{- end }}` diff --git a/vendor/github.com/rancher/kontainer-driver-metadata/rke/templates/kubedns.go b/vendor/github.com/rancher/kontainer-driver-metadata/rke/templates/kubedns.go index 289dd2f1..d7f59684 100644 --- a/vendor/github.com/rancher/kontainer-driver-metadata/rke/templates/kubedns.go +++ b/vendor/github.com/rancher/kontainer-driver-metadata/rke/templates/kubedns.go @@ -377,7 +377,7 @@ spec: - --target=Deployment/kube-dns # When cluster is using large nodes(with more cores), "coresPerReplica" should dominate. # If using small nodes, "nodesPerReplica" should dominate. - - --default-params={"linear":{"coresPerReplica":128,"nodesPerReplica":4,"min":1}} + - --default-params={"linear":{"coresPerReplica":128,"nodesPerReplica":4,"min":1,"preventSinglePointFailure":true}} - --logtostderr=true - --v=2 --- @@ -402,7 +402,7 @@ rules: - apiGroups: [""] resources: ["replicationcontrollers/scale"] verbs: ["get", "update"] - - apiGroups: ["extensions"] + - apiGroups: ["extensions","apps"] resources: ["deployments/scale", "replicasets/scale"] verbs: ["get", "update"] - apiGroups: [""] diff --git a/vendor/github.com/rancher/kontainer-driver-metadata/rke/templates/templates.go b/vendor/github.com/rancher/kontainer-driver-metadata/rke/templates/templates.go index 57402eee..d0c9758c 100644 --- a/vendor/github.com/rancher/kontainer-driver-metadata/rke/templates/templates.go +++ b/vendor/github.com/rancher/kontainer-driver-metadata/rke/templates/templates.go @@ -31,7 +31,8 @@ const ( flannelv115 = "flannel-v1.15" flannelv116 = "flannel-v1.16" - coreDnsv18 = "coredns-v1.8" + coreDnsv18 = "coredns-v1.8" + coreDnsv116 = "coredns-v1.16" kubeDnsv18 = "kubedns-v1.8" kubeDnsv116 = "kubedns-v1.16" @@ -65,7 +66,8 @@ func LoadK8sVersionedTemplates() map[string]map[string]string { ">=1.8.0-rancher0 <1.15.0-rancher0": flannelv18, }, CoreDNS: { - ">=1.8.0-rancher0 <1.16.0": coreDnsv18, + ">=1.16.0-alpha": coreDnsv116, + ">=1.8.0-rancher0 <1.16.0-alpha": coreDnsv18, }, KubeDNS: { ">=1.16.0-alpha": kubeDnsv116, @@ -106,7 +108,8 @@ func getTemplates() map[string]string { canalv115: CanalTemplateV115, canalv116: CanalTemplateV116, - coreDnsv18: CoreDNSTemplate, + coreDnsv18: CoreDNSTemplate, + coreDnsv116: CoreDNSTemplateV116, kubeDnsv18: KubeDNSTemplate, kubeDnsv116: KubeDNSTemplateV116, diff --git a/vendor/modules.txt b/vendor/modules.txt index 04b4f3a6..62c5edaf 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -105,7 +105,7 @@ github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg # github.com/prometheus/procfs v0.0.3 github.com/prometheus/procfs github.com/prometheus/procfs/internal/fs -# github.com/rancher/kontainer-driver-metadata v0.0.0-20190918024130-319e04039e40 +# github.com/rancher/kontainer-driver-metadata v0.0.0-20190918201757-1abc154a1634 github.com/rancher/kontainer-driver-metadata/rke/templates github.com/rancher/kontainer-driver-metadata/rke # github.com/rancher/norman v0.0.0-20190821234528-20a936b685b0