From e67c24bdbb4b0f52f023fae3914b435f9a402d28 Mon Sep 17 00:00:00 2001 From: Damian Sawicki Date: Thu, 12 Dec 2024 10:52:45 +0000 Subject: [PATCH] Bump kubedns and nodelocaldns to 1.24.0 (fixed) In addition to the version bump, this removes the usage of the flag logtostderr (see https://github.com/kubernetes/enhancements/tree/master/keps/sig-instrumentation/2845-deprecate-klog-specific-flags-in-k8s-components), which was causing errors https://github.com/kubernetes/kubernetes/issues/129230. Signed-off-by: Damian Sawicki --- cluster/addons/dns/kube-dns/kube-dns.yaml.base | 8 +++----- cluster/addons/dns/kube-dns/kube-dns.yaml.in | 8 +++----- cluster/addons/dns/kube-dns/kube-dns.yaml.sed | 8 +++----- cluster/addons/dns/nodelocaldns/nodelocaldns.yaml | 2 +- 4 files changed, 10 insertions(+), 16 deletions(-) diff --git a/cluster/addons/dns/kube-dns/kube-dns.yaml.base b/cluster/addons/dns/kube-dns/kube-dns.yaml.base index 9686ce5fe90..afba8aa51bb 100644 --- a/cluster/addons/dns/kube-dns/kube-dns.yaml.base +++ b/cluster/addons/dns/kube-dns/kube-dns.yaml.base @@ -114,7 +114,7 @@ spec: kubernetes.io/os: linux containers: - name: kubedns - image: registry.k8s.io/dns/k8s-dns-kube-dns:1.23.1 + image: registry.k8s.io/dns/k8s-dns-kube-dns:1.24.0 resources: # TODO: Set memory limits when we've profiled the container for large # clusters, then set request = limit to keep this container in @@ -170,7 +170,7 @@ spec: runAsUser: 1001 runAsGroup: 1001 - name: dnsmasq - image: registry.k8s.io/dns/k8s-dns-dnsmasq-nanny:1.23.1 + image: registry.k8s.io/dns/k8s-dns-dnsmasq-nanny:1.24.0 livenessProbe: httpGet: path: /healthcheck/dnsmasq @@ -182,7 +182,6 @@ spec: failureThreshold: 5 args: - -v=2 - - -logtostderr - -configDir=/etc/k8s/dns/dnsmasq-nanny - -restartDnsmasq=true - -- @@ -217,7 +216,7 @@ spec: - NET_BIND_SERVICE - SETGID - name: sidecar - image: registry.k8s.io/dns/k8s-dns-sidecar:1.23.1 + image: registry.k8s.io/dns/k8s-dns-sidecar:1.24.0 livenessProbe: httpGet: path: /metrics @@ -229,7 +228,6 @@ spec: failureThreshold: 5 args: - --v=2 - - --logtostderr - --probe=kubedns,127.0.0.1:10053,kubernetes.default.svc.__DNS__DOMAIN__,5,SRV - --probe=dnsmasq,127.0.0.1:53,kubernetes.default.svc.__DNS__DOMAIN__,5,SRV ports: diff --git a/cluster/addons/dns/kube-dns/kube-dns.yaml.in b/cluster/addons/dns/kube-dns/kube-dns.yaml.in index a38176a57d7..864e4f69d2e 100644 --- a/cluster/addons/dns/kube-dns/kube-dns.yaml.in +++ b/cluster/addons/dns/kube-dns/kube-dns.yaml.in @@ -114,7 +114,7 @@ spec: kubernetes.io/os: linux containers: - name: kubedns - image: registry.k8s.io/dns/k8s-dns-kube-dns:1.23.1 + image: registry.k8s.io/dns/k8s-dns-kube-dns:1.24.0 resources: # TODO: Set memory limits when we've profiled the container for large # clusters, then set request = limit to keep this container in @@ -170,7 +170,7 @@ spec: runAsUser: 1001 runAsGroup: 1001 - name: dnsmasq - image: registry.k8s.io/dns/k8s-dns-dnsmasq-nanny:1.23.1 + image: registry.k8s.io/dns/k8s-dns-dnsmasq-nanny:1.24.0 livenessProbe: httpGet: path: /healthcheck/dnsmasq @@ -182,7 +182,6 @@ spec: failureThreshold: 5 args: - -v=2 - - -logtostderr - -configDir=/etc/k8s/dns/dnsmasq-nanny - -restartDnsmasq=true - -- @@ -217,7 +216,7 @@ spec: - NET_BIND_SERVICE - SETGID - name: sidecar - image: registry.k8s.io/dns/k8s-dns-sidecar:1.23.1 + image: registry.k8s.io/dns/k8s-dns-sidecar:1.24.0 livenessProbe: httpGet: path: /metrics @@ -229,7 +228,6 @@ spec: failureThreshold: 5 args: - --v=2 - - --logtostderr - --probe=kubedns,127.0.0.1:10053,kubernetes.default.svc.dns_domain,5,SRV - --probe=dnsmasq,127.0.0.1:53,kubernetes.default.svc.dns_domain,5,SRV ports: diff --git a/cluster/addons/dns/kube-dns/kube-dns.yaml.sed b/cluster/addons/dns/kube-dns/kube-dns.yaml.sed index 39d122d6087..de94f0b5665 100644 --- a/cluster/addons/dns/kube-dns/kube-dns.yaml.sed +++ b/cluster/addons/dns/kube-dns/kube-dns.yaml.sed @@ -114,7 +114,7 @@ spec: kubernetes.io/os: linux containers: - name: kubedns - image: registry.k8s.io/dns/k8s-dns-kube-dns:1.23.1 + image: registry.k8s.io/dns/k8s-dns-kube-dns:1.24.0 resources: # TODO: Set memory limits when we've profiled the container for large # clusters, then set request = limit to keep this container in @@ -170,7 +170,7 @@ spec: runAsUser: 1001 runAsGroup: 1001 - name: dnsmasq - image: registry.k8s.io/dns/k8s-dns-dnsmasq-nanny:1.23.1 + image: registry.k8s.io/dns/k8s-dns-dnsmasq-nanny:1.24.0 livenessProbe: httpGet: path: /healthcheck/dnsmasq @@ -182,7 +182,6 @@ spec: failureThreshold: 5 args: - -v=2 - - -logtostderr - -configDir=/etc/k8s/dns/dnsmasq-nanny - -restartDnsmasq=true - -- @@ -217,7 +216,7 @@ spec: - NET_BIND_SERVICE - SETGID - name: sidecar - image: registry.k8s.io/dns/k8s-dns-sidecar:1.23.1 + image: registry.k8s.io/dns/k8s-dns-sidecar:1.24.0 livenessProbe: httpGet: path: /metrics @@ -229,7 +228,6 @@ spec: failureThreshold: 5 args: - --v=2 - - --logtostderr - --probe=kubedns,127.0.0.1:10053,kubernetes.default.svc.$DNS_DOMAIN,5,SRV - --probe=dnsmasq,127.0.0.1:53,kubernetes.default.svc.$DNS_DOMAIN,5,SRV ports: diff --git a/cluster/addons/dns/nodelocaldns/nodelocaldns.yaml b/cluster/addons/dns/nodelocaldns/nodelocaldns.yaml index 773409c8d79..088404c96b8 100644 --- a/cluster/addons/dns/nodelocaldns/nodelocaldns.yaml +++ b/cluster/addons/dns/nodelocaldns/nodelocaldns.yaml @@ -138,7 +138,7 @@ spec: operator: "Exists" containers: - name: node-cache - image: registry.k8s.io/dns/k8s-dns-node-cache:1.23.1 + image: registry.k8s.io/dns/k8s-dns-node-cache:1.24.0 resources: requests: cpu: 25m