diff --git a/vendor.conf b/vendor.conf index b74847f1..11bbc728 100644 --- a/vendor.conf +++ b/vendor.conf @@ -28,4 +28,4 @@ github.com/matttproud/golang_protobuf_extensions c12348ce28de40eed0136aa2b644d0e github.com/mattn/go-colorable efa589957cd060542a26d2dd7832fd6a6c6c3ade github.com/mattn/go-isatty 6ca4dbf54d38eea1a992b3c722a76a5d1c4cb25c github.com/rancher/norman 0557aa4ff31a3a0f007dcb1b684894f23cda390c -github.com/rancher/types e3a7b1174de6d7d1d202bbc4e919d9cc99d6b9a6 +github.com/rancher/types 1f8a7696aafde37fd40be4b30ca59c272a5970e9 diff --git a/vendor/github.com/rancher/types/apis/management.cattle.io/v3/k8s_defaults.go b/vendor/github.com/rancher/types/apis/management.cattle.io/v3/k8s_defaults.go index 1d91c113..768f64b5 100644 --- a/vendor/github.com/rancher/types/apis/management.cattle.io/v3/k8s_defaults.go +++ b/vendor/github.com/rancher/types/apis/management.cattle.io/v3/k8s_defaults.go @@ -30,6 +30,7 @@ var ( // different k8s version tag "v1.12.7-rancher1-2", "v1.13.5-rancher1-2", + "v1.14.0-rancher1-1", } // K8sVersionToRKESystemImages is dynamically populated on init() with the latest versions @@ -37,10 +38,20 @@ var ( // K8sVersionServiceOptions - service options per k8s version K8sVersionServiceOptions = map[string]KubernetesServicesOptions{ + "v1.14": { + KubeAPI: map[string]string{ + "tls-cipher-suites": "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305", + "enable-admission-plugins": "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota", + }, + Kubelet: map[string]string{ + "tls-cipher-suites": "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305", + }, + }, "v1.13": { KubeAPI: map[string]string{ "tls-cipher-suites": "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305", "enable-admission-plugins": "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota", + "repair-malformed-updates": "false", }, Kubelet: map[string]string{ "tls-cipher-suites": "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305", @@ -50,6 +61,7 @@ var ( KubeAPI: map[string]string{ "tls-cipher-suites": "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305", "enable-admission-plugins": "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota", + "repair-malformed-updates": "false", }, Kubelet: map[string]string{ "tls-cipher-suites": "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305", @@ -59,6 +71,7 @@ var ( KubeAPI: map[string]string{ "tls-cipher-suites": "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305", "enable-admission-plugins": "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota", + "repair-malformed-updates": "false", }, Kubelet: map[string]string{ "tls-cipher-suites": "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305", @@ -70,6 +83,7 @@ var ( "tls-cipher-suites": "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305", "endpoint-reconciler-type": "lease", "enable-admission-plugins": "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota", + "repair-malformed-updates": "false", }, Kubelet: map[string]string{ "tls-cipher-suites": "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305", @@ -80,6 +94,7 @@ var ( KubeAPI: map[string]string{ "endpoint-reconciler-type": "lease", "admission-control": "ServiceAccount,NamespaceLifecycle,LimitRanger,PersistentVolumeLabel,DefaultStorageClass,ResourceQuota,DefaultTolerationSeconds", + "repair-malformed-updates": "false", }, Kubelet: map[string]string{ "cadvisor-port": "0", @@ -706,6 +721,32 @@ var ( IngressBackend: m("k8s.gcr.io/defaultbackend:1.4"), MetricsServer: m("gcr.io/google_containers/metrics-server-amd64:v0.2.1"), }, + "v1.11.9-rancher1-2": { + Etcd: m("quay.io/coreos/etcd:v3.2.18"), + Kubernetes: m("rancher/hyperkube:v1.11.9-rancher1"), + Alpine: m("rancher/rke-tools:v0.1.28"), + NginxProxy: m("rancher/rke-tools:v0.1.28"), + CertDownloader: m("rancher/rke-tools:v0.1.28"), + KubernetesServicesSidecar: m("rancher/rke-tools:v0.1.28"), + KubeDNS: m("gcr.io/google_containers/k8s-dns-kube-dns-amd64:1.14.10"), + DNSmasq: m("gcr.io/google_containers/k8s-dns-dnsmasq-nanny-amd64:1.14.10"), + KubeDNSSidecar: m("gcr.io/google_containers/k8s-dns-sidecar-amd64:1.14.10"), + KubeDNSAutoscaler: m("gcr.io/google_containers/cluster-proportional-autoscaler-amd64:1.0.0"), + Flannel: m("quay.io/coreos/flannel:v0.10.0"), + FlannelCNI: m("quay.io/coreos/flannel-cni:v0.3.0"), + CalicoNode: m("quay.io/calico/node:v3.1.3"), + CalicoCNI: m("quay.io/calico/cni:v3.1.3"), + CalicoCtl: m("quay.io/calico/ctl:v2.0.0"), + CanalNode: m("quay.io/calico/node:v3.1.3"), + CanalCNI: m("quay.io/calico/cni:v3.1.3"), + CanalFlannel: m("quay.io/coreos/flannel:v0.10.0"), + WeaveNode: m("weaveworks/weave-kube:2.1.2"), + WeaveCNI: m("weaveworks/weave-npc:2.1.2"), + PodInfraContainer: m("gcr.io/google_containers/pause-amd64:3.1"), + Ingress: m("rancher/nginx-ingress-controller:0.16.2-rancher1"), + IngressBackend: m("k8s.gcr.io/defaultbackend:1.4"), + MetricsServer: m("gcr.io/google_containers/metrics-server-amd64:v0.2.1"), + }, "v1.12.0-rancher1-1": { Etcd: m("quay.io/coreos/etcd:v3.2.24"), Kubernetes: m("rancher/hyperkube:v1.12.0-rancher1"), @@ -902,6 +943,34 @@ var ( CoreDNS: m("coredns/coredns:1.2.2"), CoreDNSAutoscaler: m("gcr.io/google_containers/cluster-proportional-autoscaler:1.0.0"), }, + "v1.12.7-rancher1-3": { + Etcd: m("quay.io/coreos/etcd:v3.2.24-rancher1"), + Kubernetes: m("rancher/hyperkube:v1.12.7-rancher1"), + Alpine: m("rancher/rke-tools:v0.1.28"), + NginxProxy: m("rancher/rke-tools:v0.1.28"), + CertDownloader: m("rancher/rke-tools:v0.1.28"), + KubernetesServicesSidecar: m("rancher/rke-tools:v0.1.28"), + KubeDNS: m("gcr.io/google_containers/k8s-dns-kube-dns:1.14.13"), + DNSmasq: m("gcr.io/google_containers/k8s-dns-dnsmasq-nanny:1.14.13"), + KubeDNSSidecar: m("gcr.io/google_containers/k8s-dns-sidecar:1.14.13"), + KubeDNSAutoscaler: m("gcr.io/google_containers/cluster-proportional-autoscaler:1.0.0"), + Flannel: m("quay.io/coreos/flannel:v0.10.0-rancher1"), + FlannelCNI: m("rancher/flannel-cni:v0.3.0-rancher1"), + CalicoNode: m("quay.io/calico/node:v3.1.3"), + CalicoCNI: m("quay.io/calico/cni:v3.1.3"), + CalicoCtl: m("quay.io/calico/ctl:v2.0.0"), + CanalNode: m("quay.io/calico/node:v3.1.3"), + CanalCNI: m("quay.io/calico/cni:v3.1.3"), + CanalFlannel: m("quay.io/coreos/flannel:v0.10.0"), + WeaveNode: m("weaveworks/weave-kube:2.5.0"), + WeaveCNI: m("weaveworks/weave-npc:2.5.0"), + PodInfraContainer: m("gcr.io/google_containers/pause:3.1"), + Ingress: m("rancher/nginx-ingress-controller:0.21.0-rancher3"), + IngressBackend: m("k8s.gcr.io/defaultbackend:1.4-rancher1"), + MetricsServer: m("gcr.io/google_containers/metrics-server:v0.3.1"), + CoreDNS: m("coredns/coredns:1.2.2"), + CoreDNSAutoscaler: m("gcr.io/google_containers/cluster-proportional-autoscaler:1.0.0"), + }, "v1.13.1-rancher1-2": { Etcd: m("quay.io/coreos/etcd:v3.2.24"), Kubernetes: m("rancher/hyperkube:v1.13.1-rancher1"), @@ -986,6 +1055,62 @@ var ( CoreDNS: m("coredns/coredns:1.2.6"), CoreDNSAutoscaler: m("gcr.io/google_containers/cluster-proportional-autoscaler:1.0.0"), }, + "v1.13.5-rancher1-3": { + Etcd: m("quay.io/coreos/etcd:v3.2.24-rancher1"), + Kubernetes: m("rancher/hyperkube:v1.13.5-rancher1"), + Alpine: m("rancher/rke-tools:v0.1.28"), + NginxProxy: m("rancher/rke-tools:v0.1.28"), + CertDownloader: m("rancher/rke-tools:v0.1.28"), + KubernetesServicesSidecar: m("rancher/rke-tools:v0.1.28"), + KubeDNS: m("gcr.io/google_containers/k8s-dns-kube-dns:1.15.0"), + DNSmasq: m("gcr.io/google_containers/k8s-dns-dnsmasq-nanny:1.15.0"), + KubeDNSSidecar: m("gcr.io/google_containers/k8s-dns-sidecar:1.15.0"), + KubeDNSAutoscaler: m("gcr.io/google_containers/cluster-proportional-autoscaler:1.0.0"), + Flannel: m("quay.io/coreos/flannel:v0.10.0-rancher1"), + FlannelCNI: m("rancher/flannel-cni:v0.3.0-rancher1"), + CalicoNode: m("quay.io/calico/node:v3.4.0"), + CalicoCNI: m("quay.io/calico/cni:v3.4.0"), + CalicoCtl: m("quay.io/calico/ctl:v2.0.0"), + CanalNode: m("quay.io/calico/node:v3.4.0"), + CanalCNI: m("quay.io/calico/cni:v3.4.0"), + CanalFlannel: m("quay.io/coreos/flannel:v0.10.0"), + WeaveNode: m("weaveworks/weave-kube:2.5.0"), + WeaveCNI: m("weaveworks/weave-npc:2.5.0"), + PodInfraContainer: m("gcr.io/google_containers/pause:3.1"), + Ingress: m("rancher/nginx-ingress-controller:0.21.0-rancher3"), + IngressBackend: m("k8s.gcr.io/defaultbackend:1.4-rancher1"), + MetricsServer: m("gcr.io/google_containers/metrics-server:v0.3.1"), + CoreDNS: m("coredns/coredns:1.2.6"), + CoreDNSAutoscaler: m("gcr.io/google_containers/cluster-proportional-autoscaler:1.0.0"), + }, + "v1.14.0-rancher1-1": { + Etcd: m("quay.io/coreos/etcd:v3.3.10-rancher1"), + Kubernetes: m("rancher/hyperkube:v1.14.0-rancher1"), + Alpine: m("rancher/rke-tools:v0.1.28"), + NginxProxy: m("rancher/rke-tools:v0.1.28"), + CertDownloader: m("rancher/rke-tools:v0.1.28"), + KubernetesServicesSidecar: m("rancher/rke-tools:v0.1.28"), + KubeDNS: m("gcr.io/google_containers/k8s-dns-kube-dns:1.15.0"), + DNSmasq: m("gcr.io/google_containers/k8s-dns-dnsmasq-nanny:1.15.0"), + KubeDNSSidecar: m("gcr.io/google_containers/k8s-dns-sidecar:1.15.0"), + KubeDNSAutoscaler: m("gcr.io/google_containers/cluster-proportional-autoscaler:1.3.0"), + Flannel: m("quay.io/coreos/flannel:v0.10.0-rancher1"), + FlannelCNI: m("rancher/flannel-cni:v0.3.0-rancher1"), + CalicoNode: m("quay.io/calico/node:v3.4.0"), + CalicoCNI: m("quay.io/calico/cni:v3.4.0"), + CalicoCtl: m("quay.io/calico/ctl:v2.0.0"), + CanalNode: m("quay.io/calico/node:v3.4.0"), + CanalCNI: m("quay.io/calico/cni:v3.4.0"), + CanalFlannel: m("quay.io/coreos/flannel:v0.10.0"), + WeaveNode: m("weaveworks/weave-kube:2.5.0"), + WeaveCNI: m("weaveworks/weave-npc:2.5.0"), + PodInfraContainer: m("gcr.io/google_containers/pause:3.1"), + Ingress: m("rancher/nginx-ingress-controller:0.21.0-rancher3"), + IngressBackend: m("k8s.gcr.io/defaultbackend:1.5-rancher1"), + MetricsServer: m("gcr.io/google_containers/metrics-server:v0.3.1"), + CoreDNS: m("coredns/coredns:1.3.1"), + CoreDNSAutoscaler: m("gcr.io/google_containers/cluster-proportional-autoscaler:1.3.0"), + }, // k8s version from 2.1.x release with old rke-tools to allow upgrade from 2.1.x clusters // without all clusters being restarted "v1.12.5-rancher1-1": { diff --git a/vendor/github.com/rancher/types/apis/management.cattle.io/v3/k8s_windows_default.go b/vendor/github.com/rancher/types/apis/management.cattle.io/v3/k8s_windows_default.go index 3c8463d7..11d3473f 100644 --- a/vendor/github.com/rancher/types/apis/management.cattle.io/v3/k8s_windows_default.go +++ b/vendor/github.com/rancher/types/apis/management.cattle.io/v3/k8s_windows_default.go @@ -293,6 +293,14 @@ var ( CanalCNIBinaries: m("rancher/canal-cni:v0.0.1-nanoserver-1803"), KubeletPause: m("rancher/kubelet-pause:v0.0.1-nanoserver-1803"), }, + "v1.14.0-rancher1-1": { + NginxProxy: m("rancher/nginx-proxy:v0.0.1-nanoserver-1803"), + KubernetesBinaries: m("rancher/hyperkube:v1.14.0-nanoserver-1803"), + FlannelCNIBinaries: m("rancher/flannel-cni:v0.0.1-nanoserver-1803"), + CalicoCNIBinaries: m("rancher/calico-cni:v0.0.1-nanoserver-1803"), + CanalCNIBinaries: m("rancher/canal-cni:v0.0.1-nanoserver-1803"), + KubeletPause: m("rancher/kubelet-pause:v0.0.1-nanoserver-1803"), + }, } )