mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-24 20:24:09 +00:00
Merge pull request #67504 from loburm/adjust_resources
Automatic merge from submit-queue (batch tested with PRs 67062, 67169, 67539, 67504, 66876). If you want to cherry-pick this change to another branch, please follow the instructions <a href="https://github.com/kubernetes/community/blob/master/contributors/devel/cherry-picks.md">here</a>. Decrease CPU requests of master components in two times. **What this PR does / why we need it**: Decreases cpu request of each master component by two. This allows to schedule more components on the master node in case of one-core machines. At the same time it doesn't change current cpu share that each component receives (https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/#how-pods-with-resource-limits-are-run). ```release-note NONE ```
This commit is contained in:
commit
f6817d2f6d
@ -1269,7 +1269,7 @@ function prepare-kube-proxy-manifest-variables {
|
||||
sed -i -e "s@{{container_env}}@${container_env}@g" ${src_file}
|
||||
sed -i -e "s@{{kube_cache_mutation_detector_env_name}}@${kube_cache_mutation_detector_env_name}@g" ${src_file}
|
||||
sed -i -e "s@{{kube_cache_mutation_detector_env_value}}@${kube_cache_mutation_detector_env_value}@g" ${src_file}
|
||||
sed -i -e "s@{{ cpurequest }}@100m@g" ${src_file}
|
||||
sed -i -e "s@{{ cpurequest }}@50m@g" ${src_file}
|
||||
sed -i -e "s@{{api_servers_with_port}}@${api_servers}@g" ${src_file}
|
||||
sed -i -e "s@{{kubernetes_service_host_env_value}}@${KUBERNETES_MASTER_NAME}@g" ${src_file}
|
||||
if [[ -n "${CLUSTER_IP_RANGE:-}" ]]; then
|
||||
@ -1390,10 +1390,10 @@ function start-etcd-servers {
|
||||
rm -f /etc/init.d/etcd
|
||||
fi
|
||||
prepare-log-file /var/log/etcd.log
|
||||
prepare-etcd-manifest "" "2379" "2380" "200m" "etcd.manifest"
|
||||
prepare-etcd-manifest "" "2379" "2380" "100m" "etcd.manifest"
|
||||
|
||||
prepare-log-file /var/log/etcd-events.log
|
||||
prepare-etcd-manifest "-events" "4002" "2381" "100m" "etcd-events.manifest"
|
||||
prepare-etcd-manifest "-events" "4002" "2381" "50m" "etcd-events.manifest"
|
||||
}
|
||||
|
||||
# Calculates the following variables based on env variables, which will be used
|
||||
|
@ -44,7 +44,7 @@
|
||||
],
|
||||
"resources": {
|
||||
"requests": {
|
||||
"cpu": "10m",
|
||||
"cpu": "5m",
|
||||
"memory": "300Mi"
|
||||
}
|
||||
},
|
||||
|
@ -11,9 +11,9 @@ spec:
|
||||
- name: image-puller
|
||||
resources:
|
||||
requests:
|
||||
cpu: 100m
|
||||
cpu: 50m
|
||||
limits:
|
||||
cpu: 100m
|
||||
cpu: 50m
|
||||
image: k8s.gcr.io/busybox:1.24
|
||||
# TODO: Replace this with a go script that pulls in parallel?
|
||||
# Currently it takes ~5m to pull all e2e images, so this is OK, and
|
||||
@ -90,9 +90,9 @@ spec:
|
||||
- name: nethealth-check
|
||||
resources:
|
||||
requests:
|
||||
cpu: 100m
|
||||
cpu: 50m
|
||||
limits:
|
||||
cpu: 100m
|
||||
cpu: 50m
|
||||
image: k8s.gcr.io/kube-nethealth-amd64:1.0
|
||||
command:
|
||||
- /bin/sh
|
||||
|
@ -39,7 +39,7 @@ spec:
|
||||
# master components on a single core master.
|
||||
# TODO: Make resource requirements depend on the size of the cluster
|
||||
requests:
|
||||
cpu: 10m
|
||||
cpu: 5m
|
||||
memory: 50Mi
|
||||
command:
|
||||
# TODO: split this out into args when we no longer need to pipe stdout to a file #6428
|
||||
|
@ -21,7 +21,7 @@ spec:
|
||||
- exec /opt/kube-addons.sh 1>>/var/log/kube-addon-manager.log 2>&1
|
||||
resources:
|
||||
requests:
|
||||
cpu: 5m
|
||||
cpu: 3m
|
||||
memory: 50Mi
|
||||
volumeMounts:
|
||||
- mountPath: /etc/kubernetes/
|
||||
|
@ -22,7 +22,7 @@
|
||||
"image": "{{pillar['kube_docker_registry']}}/kube-apiserver:{{pillar['kube-apiserver_docker_tag']}}",
|
||||
"resources": {
|
||||
"requests": {
|
||||
"cpu": "250m"
|
||||
"cpu": "125m"
|
||||
}
|
||||
},
|
||||
"command": [
|
||||
|
@ -21,7 +21,7 @@
|
||||
"image": "{{pillar['kube_docker_registry']}}/kube-controller-manager:{{pillar['kube-controller-manager_docker_tag']}}",
|
||||
"resources": {
|
||||
"requests": {
|
||||
"cpu": "200m"
|
||||
"cpu": "100m"
|
||||
}
|
||||
},
|
||||
"command": [
|
||||
|
@ -21,7 +21,7 @@
|
||||
"image": "{{pillar['kube_docker_registry']}}/kube-scheduler:{{pillar['kube-scheduler_docker_tag']}}",
|
||||
"resources": {
|
||||
"requests": {
|
||||
"cpu": "75m"
|
||||
"cpu": "40m"
|
||||
}
|
||||
},
|
||||
"command": [
|
||||
|
@ -22,7 +22,7 @@ spec:
|
||||
# TODO: Make resource requirements depend on the size of the cluster
|
||||
resources:
|
||||
requests:
|
||||
cpu: 10m
|
||||
cpu: 5m
|
||||
memory: 100Mi
|
||||
command:
|
||||
# TODO: split this out into args when we no longer need to pipe stdout to a file #6428
|
||||
|
Loading…
Reference in New Issue
Block a user