mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-29 22:46:12 +00:00
Merge pull request #44898 from xingzhou/kube-44697
Automatic merge from submit-queue (batch tested with PRs 45908, 44898) While calculating pod's cpu limits, need to count in init-container. Need to count in init-container when calculating a pod's cpu limits. Otherwise, may cause pod start failure due to "invalid argument" error while trying to write "cpu.cfs_quota_us" file. Fixed #44697 Release note: ``` NONE ```
This commit is contained in:
commit
4d89212d26
@ -26,6 +26,7 @@ import (
|
||||
libcontainercgroups "github.com/opencontainers/runc/libcontainer/cgroups"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/api/v1/resource"
|
||||
"k8s.io/kubernetes/pkg/kubelet/qos"
|
||||
)
|
||||
|
||||
@ -84,28 +85,41 @@ func MilliCPUToShares(milliCPU int64) int64 {
|
||||
|
||||
// ResourceConfigForPod takes the input pod and outputs the cgroup resource config.
|
||||
func ResourceConfigForPod(pod *v1.Pod) *ResourceConfig {
|
||||
// sum requests and limits, track if limits were applied for each resource.
|
||||
// sum requests and limits.
|
||||
reqs, limits, err := resource.PodRequestsAndLimits(pod)
|
||||
if err != nil {
|
||||
return &ResourceConfig{}
|
||||
}
|
||||
|
||||
cpuRequests := int64(0)
|
||||
cpuLimits := int64(0)
|
||||
memoryLimits := int64(0)
|
||||
memoryLimitsDeclared := true
|
||||
cpuLimitsDeclared := true
|
||||
for _, container := range pod.Spec.Containers {
|
||||
cpuRequests += container.Resources.Requests.Cpu().MilliValue()
|
||||
cpuLimits += container.Resources.Limits.Cpu().MilliValue()
|
||||
if container.Resources.Limits.Cpu().IsZero() {
|
||||
cpuLimitsDeclared = false
|
||||
}
|
||||
memoryLimits += container.Resources.Limits.Memory().Value()
|
||||
if container.Resources.Limits.Memory().IsZero() {
|
||||
memoryLimitsDeclared = false
|
||||
}
|
||||
if request, found := reqs[v1.ResourceCPU]; found {
|
||||
cpuRequests = request.MilliValue()
|
||||
}
|
||||
if limit, found := limits[v1.ResourceCPU]; found {
|
||||
cpuLimits = limit.MilliValue()
|
||||
}
|
||||
if limit, found := limits[v1.ResourceMemory]; found {
|
||||
memoryLimits = limit.Value()
|
||||
}
|
||||
|
||||
// convert to CFS values
|
||||
cpuShares := MilliCPUToShares(cpuRequests)
|
||||
cpuQuota, cpuPeriod := MilliCPUToQuota(cpuLimits)
|
||||
|
||||
// track if limits were applied for each resource.
|
||||
memoryLimitsDeclared := true
|
||||
cpuLimitsDeclared := true
|
||||
for _, container := range pod.Spec.Containers {
|
||||
if container.Resources.Limits.Cpu().IsZero() {
|
||||
cpuLimitsDeclared = false
|
||||
}
|
||||
if container.Resources.Limits.Memory().IsZero() {
|
||||
memoryLimitsDeclared = false
|
||||
}
|
||||
}
|
||||
|
||||
// determine the qos class
|
||||
qosClass := qos.GetPodQOS(pod)
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user