1
0
mirror of https://github.com/rancher/rke.git synced 2025-08-02 07:43:04 +00:00

update kontainer-driver-metadata

This commit is contained in:
moelsayed 2019-09-10 23:33:20 +02:00 committed by Alena Prokharchyk
parent 2506dbedbf
commit f622c8b942
8 changed files with 618 additions and 28 deletions

2
go.mod
View File

@ -32,7 +32,7 @@ require (
github.com/opencontainers/go-digest v1.0.0-rc1 // indirect
github.com/opencontainers/image-spec v0.0.0-20170929214853-7c889fafd04a // indirect
github.com/pkg/errors v0.8.1
github.com/rancher/kontainer-driver-metadata v0.0.0-20190905180018-bd99c3a44558
github.com/rancher/kontainer-driver-metadata v0.0.0-20190910205335-4cf9a4fac46a
github.com/rancher/norman v0.0.0-20190821234528-20a936b685b0
github.com/rancher/types v0.0.0-20190827214052-704648244586
github.com/sirupsen/logrus v1.4.2

4
go.sum
View File

@ -181,8 +181,8 @@ github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
github.com/prometheus/procfs v0.0.3 h1:CTwfnzjQ+8dS6MhHHu4YswVAD99sL2wjPqP+VkURmKE=
github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ=
github.com/rancher/kontainer-driver-metadata v0.0.0-20190905180018-bd99c3a44558 h1:v7ccMI1ni+hlEkLM+0k7YSkEOYUPWO1d+VNoe0iP8/4=
github.com/rancher/kontainer-driver-metadata v0.0.0-20190905180018-bd99c3a44558/go.mod h1:dHvhyuoiwrjqQCFD586g0cZ9NJJXEKeAtQi8RX96U8E=
github.com/rancher/kontainer-driver-metadata v0.0.0-20190910205335-4cf9a4fac46a h1:+RwE0OhSR2bgySTq8wgso+r+QFc2veWfFnCriB0CO6M=
github.com/rancher/kontainer-driver-metadata v0.0.0-20190910205335-4cf9a4fac46a/go.mod h1:dHvhyuoiwrjqQCFD586g0cZ9NJJXEKeAtQi8RX96U8E=
github.com/rancher/norman v0.0.0-20190821234528-20a936b685b0 h1:bNG4b0CTTBE8yEamIz8RYcfz+7kfK9N8YTvyiykRCS8=
github.com/rancher/norman v0.0.0-20190821234528-20a936b685b0/go.mod h1:KwP6RD4rVMdK8XK0wqZaptrhTn/TO4kXU3doh4iatQU=
github.com/rancher/types v0.0.0-20190822170951-b99efa820bc3 h1:4mz/J0iEtW/VDtjN3zI9B4g49MKeoLHkHZKJGqRN7xg=

View File

@ -1212,10 +1212,10 @@ func loadK8sRKESystemImages() map[string]v3.RKESystemImages {
"v1.14.6-rancher3-1": {
Etcd: m("quay.io/coreos/etcd:v3.3.10-rancher1"),
Kubernetes: m("rancher/hyperkube:v1.14.6-rancher3"),
Alpine: m("rancher/rke-tools:v0.1.47"),
NginxProxy: m("rancher/rke-tools:v0.1.47"),
CertDownloader: m("rancher/rke-tools:v0.1.47"),
KubernetesServicesSidecar: m("rancher/rke-tools:v0.1.47"),
Alpine: m("rancher/rke-tools:v0.1.48"),
NginxProxy: m("rancher/rke-tools:v0.1.48"),
CertDownloader: m("rancher/rke-tools:v0.1.48"),
KubernetesServicesSidecar: m("rancher/rke-tools:v0.1.48"),
KubeDNS: m("gcr.io/google_containers/k8s-dns-kube-dns:1.15.0"),
DNSmasq: m("gcr.io/google_containers/k8s-dns-dnsmasq-nanny:1.15.0"),
KubeDNSSidecar: m("gcr.io/google_containers/k8s-dns-sidecar:1.15.0"),
@ -1331,10 +1331,10 @@ func loadK8sRKESystemImages() map[string]v3.RKESystemImages {
"v1.15.3-rancher3-1": {
Etcd: m("quay.io/coreos/etcd:v3.3.10-rancher1"),
Kubernetes: m("rancher/hyperkube:v1.15.3-rancher3"),
Alpine: m("rancher/rke-tools:v0.1.47"),
NginxProxy: m("rancher/rke-tools:v0.1.47"),
CertDownloader: m("rancher/rke-tools:v0.1.47"),
KubernetesServicesSidecar: m("rancher/rke-tools:v0.1.47"),
Alpine: m("rancher/rke-tools:v0.1.48"),
NginxProxy: m("rancher/rke-tools:v0.1.48"),
CertDownloader: m("rancher/rke-tools:v0.1.48"),
KubernetesServicesSidecar: m("rancher/rke-tools:v0.1.48"),
KubeDNS: m("gcr.io/google_containers/k8s-dns-kube-dns:1.15.0"),
DNSmasq: m("gcr.io/google_containers/k8s-dns-dnsmasq-nanny:1.15.0"),
KubeDNSSidecar: m("gcr.io/google_containers/k8s-dns-sidecar:1.15.0"),
@ -1642,10 +1642,10 @@ func loadK8sRKESystemImages() map[string]v3.RKESystemImages {
"v1.16.0-beta.1-rancher2-1": {
Etcd: m("quay.io/coreos/etcd:v3.3.10-rancher1"),
Kubernetes: m("rancher/hyperkube:v1.16.0-beta.1-rancher2"),
Alpine: m("rancher/rke-tools:v0.1.47"),
NginxProxy: m("rancher/rke-tools:v0.1.47"),
CertDownloader: m("rancher/rke-tools:v0.1.47"),
KubernetesServicesSidecar: m("rancher/rke-tools:v0.1.47"),
Alpine: m("rancher/rke-tools:v0.1.48"),
NginxProxy: m("rancher/rke-tools:v0.1.48"),
CertDownloader: m("rancher/rke-tools:v0.1.48"),
KubernetesServicesSidecar: m("rancher/rke-tools:v0.1.48"),
KubeDNS: m("gcr.io/google_containers/k8s-dns-kube-dns:1.15.0"),
DNSmasq: m("gcr.io/google_containers/k8s-dns-dnsmasq-nanny:1.15.0"),
KubeDNSSidecar: m("gcr.io/google_containers/k8s-dns-sidecar:1.15.0"),

View File

@ -564,6 +564,7 @@ data:
cni-conf.json: |
{
"name": "cbr0",
"cniVersion":"0.3.1",
"plugins": [
{
"type": "flannel",

View File

@ -327,3 +327,334 @@ data:
stubDomains: |
{{ GetKubednsStubDomains .StubDomains }}
{{- end }}`
const KubeDNSTemplateV116 = `
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: kube-dns-autoscaler
namespace: kube-system
labels:
k8s-app: kube-dns-autoscaler
spec:
selector:
matchLabels:
k8s-app: kube-dns-autoscaler
template:
metadata:
labels:
k8s-app: kube-dns-autoscaler
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: beta.kubernetes.io/os
operator: NotIn
values:
- windows
- key: node-role.kubernetes.io/worker
operator: Exists
serviceAccountName: kube-dns-autoscaler
tolerations:
- effect: NoExecute
operator: Exists
- effect: NoSchedule
operator: Exists
containers:
- name: autoscaler
image: {{.KubeDNSAutoScalerImage}}
resources:
requests:
cpu: "20m"
memory: "10Mi"
command:
- /cluster-proportional-autoscaler
- --namespace=kube-system
- --configmap=kube-dns-autoscaler
- --target=Deployment/kube-dns
# When cluster is using large nodes(with more cores), "coresPerReplica" should dominate.
# If using small nodes, "nodesPerReplica" should dominate.
- --default-params={"linear":{"coresPerReplica":128,"nodesPerReplica":4,"min":1}}
- --logtostderr=true
- --v=2
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: kube-dns-autoscaler
namespace: kube-system
labels:
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
{{- if eq .RBACConfig "rbac"}}
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: system:kube-dns-autoscaler
rules:
- apiGroups: [""]
resources: ["nodes"]
verbs: ["list", "watch"]
- apiGroups: [""]
resources: ["replicationcontrollers/scale"]
verbs: ["get", "update"]
- apiGroups: ["extensions"]
resources: ["deployments/scale", "replicasets/scale"]
verbs: ["get", "update"]
- apiGroups: [""]
resources: ["configmaps"]
verbs: ["get", "create"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: system:kube-dns-autoscaler
subjects:
- kind: ServiceAccount
name: kube-dns-autoscaler
namespace: kube-system
roleRef:
kind: ClusterRole
name: system:kube-dns-autoscaler
apiGroup: rbac.authorization.k8s.io
{{- end }}
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: kube-dns
namespace: kube-system
labels:
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: kube-dns
namespace: kube-system
labels:
k8s-app: kube-dns
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
spec:
# replicas: not specified here:
# 1. In order to make Addon Manager do not reconcile this replicas parameter.
# 2. Default is 1.
# 3. Will be tuned in real time if DNS horizontal auto-scaling is turned on.
strategy:
rollingUpdate:
maxSurge: 10%
maxUnavailable: 0
selector:
matchLabels:
k8s-app: kube-dns
template:
metadata:
labels:
k8s-app: kube-dns
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
spec:
{{if .NodeSelector}}
nodeSelector:
{{ range $k, $v := .NodeSelector }}
{{ $k }}: "{{ $v }}"
{{ end }}
{{end}}
affinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 100
podAffinityTerm:
labelSelector:
matchExpressions:
- key: k8s-app
operator: In
values: ["kube-dns"]
topologyKey: kubernetes.io/hostname
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: beta.kubernetes.io/os
operator: NotIn
values:
- windows
- key: node-role.kubernetes.io/worker
operator: Exists
tolerations:
- key: "CriticalAddonsOnly"
operator: "Exists"
- effect: NoExecute
operator: Exists
- effect: NoSchedule
operator: Exists
volumes:
- name: kube-dns-config
configMap:
name: kube-dns
optional: true
containers:
- name: kubedns
image: {{.KubeDNSImage}}
resources:
# TODO: Set memory limits when we've profiled the container for large
# clusters, then set request = limit to keep this container in
# guaranteed class. Currently, this container falls into the
# "burstable" category so the kubelet doesn't backoff from restarting it.
limits:
memory: 170Mi
requests:
cpu: 100m
memory: 70Mi
livenessProbe:
httpGet:
path: /healthcheck/kubedns
port: 10054
scheme: HTTP
initialDelaySeconds: 60
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 5
readinessProbe:
httpGet:
path: /readiness
port: 8081
scheme: HTTP
# we poll on pod startup for the Kubernetes master service and
# only setup the /readiness HTTP server once that's available.
initialDelaySeconds: 3
timeoutSeconds: 5
args:
- --domain={{.ClusterDomain}}.
- --dns-port=10053
- --config-dir=/kube-dns-config
- --v=2
env:
- name: PROMETHEUS_PORT
value: "10055"
ports:
- containerPort: 10053
name: dns-local
protocol: UDP
- containerPort: 10053
name: dns-tcp-local
protocol: TCP
- containerPort: 10055
name: metrics
protocol: TCP
volumeMounts:
- name: kube-dns-config
mountPath: /kube-dns-config
- name: dnsmasq
image: {{.DNSMasqImage}}
livenessProbe:
httpGet:
path: /healthcheck/dnsmasq
port: 10054
scheme: HTTP
initialDelaySeconds: 60
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 5
args:
- -v=2
- -logtostderr
- -configDir=/etc/k8s/dns/dnsmasq-nanny
- -restartDnsmasq=true
- --
- -k
- --cache-size=1000
- --log-facility=-
- --server=/{{.ClusterDomain}}/127.0.0.1#10053
{{- if .ReverseCIDRs }}
{{- range .ReverseCIDRs }}
- --server=/{{.}}/127.0.0.1#10053
{{- end }}
{{- else }}
- --server=/in-addr.arpa/127.0.0.1#10053
- --server=/ip6.arpa/127.0.0.1#10053
{{- end }}
ports:
- containerPort: 53
name: dns
protocol: UDP
- containerPort: 53
name: dns-tcp
protocol: TCP
# see: https://github.com/kubernetes/kubernetes/issues/29055 for details
resources:
requests:
cpu: 150m
memory: 20Mi
volumeMounts:
- name: kube-dns-config
mountPath: /etc/k8s/dns/dnsmasq-nanny
- name: sidecar
image: {{.KubeDNSSidecarImage}}
livenessProbe:
httpGet:
path: /metrics
port: 10054
scheme: HTTP
initialDelaySeconds: 60
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 5
args:
- --v=2
- --logtostderr
- --probe=kubedns,127.0.0.1:10053,kubernetes.default.svc.{{.ClusterDomain}},5,A
- --probe=dnsmasq,127.0.0.1:53,kubernetes.default.svc.{{.ClusterDomain}},5,A
ports:
- containerPort: 10054
name: metrics
protocol: TCP
resources:
requests:
memory: 20Mi
cpu: 10m
dnsPolicy: Default # Don't use cluster DNS.
serviceAccountName: kube-dns
---
apiVersion: v1
kind: Service
metadata:
name: kube-dns
namespace: kube-system
labels:
k8s-app: kube-dns
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
kubernetes.io/name: "KubeDNS"
spec:
selector:
k8s-app: kube-dns
clusterIP: {{.ClusterDNSServer}}
ports:
- name: dns
port: 53
protocol: UDP
- name: dns-tcp
port: 53
protocol: TCP
---
apiVersion: v1
kind: ConfigMap
metadata:
name: kube-dns
namespace: kube-system
data:
{{- if .UpstreamNameservers }}
upstreamNameservers: |
[{{range $i, $v := .UpstreamNameservers}}{{if $i}}, {{end}}{{printf "%q" .}}{{end}}]
{{- end }}
{{- if .StubDomains }}
stubDomains: |
{{ GetKubednsStubDomains .StubDomains }}
{{- end }}`

View File

@ -22,21 +22,25 @@ const (
calicov115 = "calico-v1.15"
calicov116 = "calico-v1.16"
canalv18 = "canal-v1.8"
canalv113 = "canal-v1.13"
canalv115 = "canal-v1.15"
canalv116 = "canal-v1.16"
canalv113 = "canal-v1.13"
canalv18 = "canal-v1.8"
flannelv116 = "flannel-v1.16"
flannelv115 = "flannel-v1.15"
flannelv18 = "flannel-v1.8"
flannelv115 = "flannel-v1.15"
flannelv116 = "flannel-v1.16"
coreDnsv18 = "coredns-v1.8"
kubeDnsv18 = "kubedns-v1.8"
kubeDnsv18 = "kubedns-v1.8"
kubeDnsv116 = "kubedns-v1.16"
metricsServerv18 = "metricsserver-v1.8"
weavev18 = "weave-v1.8"
weavev18 = "weave-v1.8"
weavev116 = "weave-v1.16"
nginxIngressv18 = "nginxingress-v1.8"
nginxIngressV115 = "nginxingress-v1.15"
)
@ -64,13 +68,15 @@ func LoadK8sVersionedTemplates() map[string]map[string]string {
">=1.8.0-rancher0 <1.16.0": coreDnsv18,
},
KubeDNS: {
">=1.8.0-rancher0 <1.16.0": kubeDnsv18,
">=1.16.0-alpha": kubeDnsv116,
">=1.8.0-rancher0 <1.16.0-alpha": kubeDnsv18,
},
MetricsServer: {
">=1.8.0-rancher0 <1.16.0": metricsServerv18,
},
Weave: {
">=1.8.0-rancher0 <1.16.0": weavev18,
">=1.16.0-alpha": weavev116,
">=1.8.0-rancher0 <1.16.0-alpha": weavev18,
},
NginxIngress: {
">=1.8.0-rancher0 <1.13.10-rancher1-3": nginxIngressv18,
@ -101,11 +107,14 @@ func getTemplates() map[string]string {
canalv116: CanalTemplateV116,
coreDnsv18: CoreDNSTemplate,
kubeDnsv18: KubeDNSTemplate,
kubeDnsv18: KubeDNSTemplate,
kubeDnsv116: KubeDNSTemplateV116,
metricsServerv18: MetricsServerTemplate,
weavev18: WeaveTemplate,
weavev18: WeaveTemplate,
weavev116: WeaveTemplateV116,
nginxIngressv18: NginxIngressTemplate,
nginxIngressV115: NginxIngressTemplateV0251Rancher1,

View File

@ -245,3 +245,252 @@ subjects:
namespace: kube-system
{{- end}}
`
const WeaveTemplateV116 = `
---
# This ConfigMap can be used to configure a self-hosted Weave Net installation.
apiVersion: v1
kind: List
items:
- apiVersion: v1
kind: ServiceAccount
metadata:
name: weave-net
namespace: kube-system
- apiVersion: apps/v1
kind: DaemonSet
metadata:
name: weave-net
labels:
name: weave-net
namespace: kube-system
spec:
selector:
matchLabels:
name: weave-net
template:
metadata:
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
scheduler.alpha.kubernetes.io/tolerations: >-
[{"key":"dedicated","operator":"Equal","value":"master","effect":"NoSchedule"}]
labels:
name: weave-net
spec:
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: beta.kubernetes.io/os
operator: NotIn
values:
- windows
{{if .NodeSelector}}
nodeSelector:
{{ range $k, $v := .NodeSelector }}
{{ $k }}: "{{ $v }}"
{{ end }}
{{end}}
containers:
- name: weave
command:
- /home/weave/launch.sh
env:
- name: HOSTNAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: spec.nodeName
- name: IPALLOC_RANGE
value: "{{.ClusterCIDR}}"
{{- if .WeavePassword}}
- name: WEAVE_PASSWORD
value: "{{.WeavePassword}}"
{{- end}}
image: {{.Image}}
readinessProbe:
httpGet:
host: 127.0.0.1
path: /status
port: 6784
initialDelaySeconds: 30
resources:
requests:
cpu: 10m
securityContext:
privileged: true
volumeMounts:
- name: weavedb
mountPath: /weavedb
- name: cni-bin
mountPath: /host/opt
- name: cni-bin2
mountPath: /host/home
- name: cni-conf
mountPath: /host/etc
- name: dbus
mountPath: /host/var/lib/dbus
- name: lib-modules
mountPath: /lib/modules
- name: xtables-lock
mountPath: /run/xtables.lock
- name: weave-npc
env:
- name: HOSTNAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: spec.nodeName
image: {{.CNIImage}}
resources:
requests:
cpu: 10m
securityContext:
privileged: true
volumeMounts:
- name: xtables-lock
mountPath: /run/xtables.lock
- name: weave-plugins
command:
- /opt/rke-tools/weave-plugins-cni.sh
image: {{.WeaveLoopbackImage}}
securityContext:
privileged: true
volumeMounts:
- name: cni-bin
mountPath: /opt
hostNetwork: true
hostPID: true
restartPolicy: Always
securityContext:
seLinuxOptions: {}
serviceAccountName: weave-net
tolerations:
- operator: Exists
effect: NoSchedule
- operator: Exists
effect: NoExecute
volumes:
- name: weavedb
hostPath:
path: /var/lib/weave
- name: cni-bin
hostPath:
path: /opt
- name: cni-bin2
hostPath:
path: /home
- name: cni-conf
hostPath:
path: /etc
- name: dbus
hostPath:
path: /var/lib/dbus
- name: lib-modules
hostPath:
path: /lib/modules
- name: xtables-lock
hostPath:
path: /run/xtables.lock
updateStrategy:
type: RollingUpdate
{{- if eq .RBACConfig "rbac"}}
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: weave-net
labels:
name: weave-net
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
name: weave-net
labels:
name: weave-net
rules:
- apiGroups:
- ''
resources:
- pods
- namespaces
- nodes
verbs:
- get
- list
- watch
- apiGroups:
- networking.k8s.io
resources:
- networkpolicies
verbs:
- get
- list
- watch
- apiGroups:
- ''
resources:
- nodes/status
verbs:
- patch
- update
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
name: weave-net
labels:
name: weave-net
roleRef:
kind: ClusterRole
name: weave-net
apiGroup: rbac.authorization.k8s.io
subjects:
- kind: ServiceAccount
name: weave-net
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: Role
metadata:
name: weave-net
labels:
name: weave-net
namespace: kube-system
rules:
- apiGroups:
- ''
resourceNames:
- weave-net
resources:
- configmaps
verbs:
- get
- update
- apiGroups:
- ''
resources:
- configmaps
verbs:
- create
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: RoleBinding
metadata:
name: weave-net
labels:
name: weave-net
namespace: kube-system
roleRef:
kind: Role
name: weave-net
apiGroup: rbac.authorization.k8s.io
subjects:
- kind: ServiceAccount
name: weave-net
namespace: kube-system
{{- end}}
`

6
vendor/modules.txt vendored
View File

@ -105,7 +105,7 @@ github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg
# github.com/prometheus/procfs v0.0.3
github.com/prometheus/procfs
github.com/prometheus/procfs/internal/fs
# github.com/rancher/kontainer-driver-metadata v0.0.0-20190905180018-bd99c3a44558
# github.com/rancher/kontainer-driver-metadata v0.0.0-20190910205335-4cf9a4fac46a
github.com/rancher/kontainer-driver-metadata/rke/templates
github.com/rancher/kontainer-driver-metadata/rke
# github.com/rancher/norman v0.0.0-20190821234528-20a936b685b0
@ -195,8 +195,9 @@ gopkg.in/yaml.v2
# k8s.io/api v0.0.0-20190805182251-6c9aa3caf3d6
k8s.io/api/core/v1
k8s.io/api/batch/v1
k8s.io/api/extensions/v1beta1
k8s.io/api/policy/v1beta1
k8s.io/api/rbac/v1
k8s.io/api/extensions/v1beta1
k8s.io/api/apps/v1beta1
k8s.io/api/admissionregistration/v1beta1
k8s.io/api/apps/v1
@ -214,7 +215,6 @@ k8s.io/api/batch/v2alpha1
k8s.io/api/certificates/v1beta1
k8s.io/api/coordination/v1
k8s.io/api/coordination/v1beta1
k8s.io/api/policy/v1beta1
k8s.io/api/events/v1beta1
k8s.io/api/networking/v1
k8s.io/api/networking/v1beta1