Bump kubedns and nodelocaldns to 1.25.0

Signed-off-by: Damian Sawicki <dsawicki@google.com>
This commit is contained in:
Damian Sawicki 2025-01-24 11:13:42 +00:00
parent f6f06806cc
commit 156b9fbadc
4 changed files with 10 additions and 10 deletions

View File

@ -114,7 +114,7 @@ spec:
kubernetes.io/os: linux
containers:
- name: kubedns
image: registry.k8s.io/dns/k8s-dns-kube-dns:1.24.0
image: registry.k8s.io/dns/k8s-dns-kube-dns:1.25.0
resources:
# TODO: Set memory limits when we've profiled the container for large
# clusters, then set request = limit to keep this container in
@ -170,7 +170,7 @@ spec:
runAsUser: 1001
runAsGroup: 1001
- name: dnsmasq
image: registry.k8s.io/dns/k8s-dns-dnsmasq-nanny:1.24.0
image: registry.k8s.io/dns/k8s-dns-dnsmasq-nanny:1.25.0
livenessProbe:
httpGet:
path: /healthcheck/dnsmasq
@ -216,7 +216,7 @@ spec:
- NET_BIND_SERVICE
- SETGID
- name: sidecar
image: registry.k8s.io/dns/k8s-dns-sidecar:1.24.0
image: registry.k8s.io/dns/k8s-dns-sidecar:1.25.0
livenessProbe:
httpGet:
path: /metrics

View File

@ -114,7 +114,7 @@ spec:
kubernetes.io/os: linux
containers:
- name: kubedns
image: registry.k8s.io/dns/k8s-dns-kube-dns:1.24.0
image: registry.k8s.io/dns/k8s-dns-kube-dns:1.25.0
resources:
# TODO: Set memory limits when we've profiled the container for large
# clusters, then set request = limit to keep this container in
@ -170,7 +170,7 @@ spec:
runAsUser: 1001
runAsGroup: 1001
- name: dnsmasq
image: registry.k8s.io/dns/k8s-dns-dnsmasq-nanny:1.24.0
image: registry.k8s.io/dns/k8s-dns-dnsmasq-nanny:1.25.0
livenessProbe:
httpGet:
path: /healthcheck/dnsmasq
@ -216,7 +216,7 @@ spec:
- NET_BIND_SERVICE
- SETGID
- name: sidecar
image: registry.k8s.io/dns/k8s-dns-sidecar:1.24.0
image: registry.k8s.io/dns/k8s-dns-sidecar:1.25.0
livenessProbe:
httpGet:
path: /metrics

View File

@ -114,7 +114,7 @@ spec:
kubernetes.io/os: linux
containers:
- name: kubedns
image: registry.k8s.io/dns/k8s-dns-kube-dns:1.24.0
image: registry.k8s.io/dns/k8s-dns-kube-dns:1.25.0
resources:
# TODO: Set memory limits when we've profiled the container for large
# clusters, then set request = limit to keep this container in
@ -170,7 +170,7 @@ spec:
runAsUser: 1001
runAsGroup: 1001
- name: dnsmasq
image: registry.k8s.io/dns/k8s-dns-dnsmasq-nanny:1.24.0
image: registry.k8s.io/dns/k8s-dns-dnsmasq-nanny:1.25.0
livenessProbe:
httpGet:
path: /healthcheck/dnsmasq
@ -216,7 +216,7 @@ spec:
- NET_BIND_SERVICE
- SETGID
- name: sidecar
image: registry.k8s.io/dns/k8s-dns-sidecar:1.24.0
image: registry.k8s.io/dns/k8s-dns-sidecar:1.25.0
livenessProbe:
httpGet:
path: /metrics

View File

@ -138,7 +138,7 @@ spec:
operator: "Exists"
containers:
- name: node-cache
image: registry.k8s.io/dns/k8s-dns-node-cache:1.24.0
image: registry.k8s.io/dns/k8s-dns-node-cache:1.25.0
resources:
requests:
cpu: 25m