mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-08-07 11:13:48 +00:00
Merge pull request #89222 from fengzixu/master
bugfix: initcontainer is also taken into account when calculating resource requests
This commit is contained in:
commit
b850b5ce2e
@ -114,6 +114,7 @@ func calculateResourceAllocatableRequest(nodeInfo *framework.NodeInfo, pod *v1.P
|
|||||||
|
|
||||||
// calculatePodResourceRequest returns the total non-zero requests. If Overhead is defined for the pod and the
|
// calculatePodResourceRequest returns the total non-zero requests. If Overhead is defined for the pod and the
|
||||||
// PodOverhead feature is enabled, the Overhead is added to the result.
|
// PodOverhead feature is enabled, the Overhead is added to the result.
|
||||||
|
// podResourceRequest = max(sum(podSpec.Containers), podSpec.InitContainers) + overHead
|
||||||
func calculatePodResourceRequest(pod *v1.Pod, resource v1.ResourceName) int64 {
|
func calculatePodResourceRequest(pod *v1.Pod, resource v1.ResourceName) int64 {
|
||||||
var podRequest int64
|
var podRequest int64
|
||||||
for i := range pod.Spec.Containers {
|
for i := range pod.Spec.Containers {
|
||||||
@ -122,11 +123,20 @@ func calculatePodResourceRequest(pod *v1.Pod, resource v1.ResourceName) int64 {
|
|||||||
podRequest += value
|
podRequest += value
|
||||||
}
|
}
|
||||||
|
|
||||||
|
for i := range pod.Spec.InitContainers {
|
||||||
|
initContainer := &pod.Spec.InitContainers[i]
|
||||||
|
value := schedutil.GetNonzeroRequestForResource(resource, &initContainer.Resources.Requests)
|
||||||
|
if podRequest < value {
|
||||||
|
podRequest = value
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// If Overhead is being utilized, add to the total requests for the pod
|
// If Overhead is being utilized, add to the total requests for the pod
|
||||||
if pod.Spec.Overhead != nil && utilfeature.DefaultFeatureGate.Enabled(features.PodOverhead) {
|
if pod.Spec.Overhead != nil && utilfeature.DefaultFeatureGate.Enabled(features.PodOverhead) {
|
||||||
if quantity, found := pod.Spec.Overhead[resource]; found {
|
if quantity, found := pod.Spec.Overhead[resource]; found {
|
||||||
podRequest += quantity.Value()
|
podRequest += quantity.Value()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return podRequest
|
return podRequest
|
||||||
}
|
}
|
||||||
|
@ -208,7 +208,10 @@ func (r *Resource) Add(rl v1.ResourceList) {
|
|||||||
case v1.ResourcePods:
|
case v1.ResourcePods:
|
||||||
r.AllowedPodNumber += int(rQuant.Value())
|
r.AllowedPodNumber += int(rQuant.Value())
|
||||||
case v1.ResourceEphemeralStorage:
|
case v1.ResourceEphemeralStorage:
|
||||||
|
if utilfeature.DefaultFeatureGate.Enabled(features.LocalStorageCapacityIsolation) {
|
||||||
|
// if the local storage capacity isolation feature gate is disabled, pods request 0 disk.
|
||||||
r.EphemeralStorage += rQuant.Value()
|
r.EphemeralStorage += rQuant.Value()
|
||||||
|
}
|
||||||
default:
|
default:
|
||||||
if v1helper.IsScalarResourceName(rName) {
|
if v1helper.IsScalarResourceName(rName) {
|
||||||
r.AddScalar(rName, rQuant.Value())
|
r.AddScalar(rName, rQuant.Value())
|
||||||
@ -458,21 +461,32 @@ func (n *NodeInfo) resetSlicesIfEmpty() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// resourceRequest = max(sum(podSpec.Containers), podSpec.InitContainers) + overHead
|
||||||
func calculateResource(pod *v1.Pod) (res Resource, non0CPU int64, non0Mem int64) {
|
func calculateResource(pod *v1.Pod) (res Resource, non0CPU int64, non0Mem int64) {
|
||||||
resPtr := &res
|
resPtr := &res
|
||||||
for _, c := range pod.Spec.Containers {
|
for _, c := range pod.Spec.Containers {
|
||||||
resPtr.Add(c.Resources.Requests)
|
resPtr.Add(c.Resources.Requests)
|
||||||
|
|
||||||
non0CPUReq, non0MemReq := schedutil.GetNonzeroRequests(&c.Resources.Requests)
|
non0CPUReq, non0MemReq := schedutil.GetNonzeroRequests(&c.Resources.Requests)
|
||||||
non0CPU += non0CPUReq
|
non0CPU += non0CPUReq
|
||||||
non0Mem += non0MemReq
|
non0Mem += non0MemReq
|
||||||
// No non-zero resources for GPUs or opaque resources.
|
// No non-zero resources for GPUs or opaque resources.
|
||||||
}
|
}
|
||||||
|
|
||||||
|
for _, ic := range pod.Spec.InitContainers {
|
||||||
|
resPtr.SetMaxResource(ic.Resources.Requests)
|
||||||
|
non0CPUReq, non0MemReq := schedutil.GetNonzeroRequests(&ic.Resources.Requests)
|
||||||
|
if non0CPU < non0CPUReq {
|
||||||
|
non0CPU = non0CPUReq
|
||||||
|
}
|
||||||
|
|
||||||
|
if non0Mem < non0MemReq {
|
||||||
|
non0Mem = non0MemReq
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// If Overhead is being utilized, add to the total requests for the pod
|
// If Overhead is being utilized, add to the total requests for the pod
|
||||||
if pod.Spec.Overhead != nil && utilfeature.DefaultFeatureGate.Enabled(features.PodOverhead) {
|
if pod.Spec.Overhead != nil && utilfeature.DefaultFeatureGate.Enabled(features.PodOverhead) {
|
||||||
resPtr.Add(pod.Spec.Overhead)
|
resPtr.Add(pod.Spec.Overhead)
|
||||||
|
|
||||||
if _, found := pod.Spec.Overhead[v1.ResourceCPU]; found {
|
if _, found := pod.Spec.Overhead[v1.ResourceCPU]; found {
|
||||||
non0CPU += pod.Spec.Overhead.Cpu().MilliValue()
|
non0CPU += pod.Spec.Overhead.Cpu().MilliValue()
|
||||||
}
|
}
|
||||||
|
@ -614,6 +614,46 @@ func TestNodeInfoAddPod(t *testing.T) {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Namespace: "node_info_cache_test",
|
||||||
|
Name: "test-3",
|
||||||
|
UID: types.UID("test-3"),
|
||||||
|
},
|
||||||
|
Spec: v1.PodSpec{
|
||||||
|
Containers: []v1.Container{
|
||||||
|
{
|
||||||
|
Resources: v1.ResourceRequirements{
|
||||||
|
Requests: v1.ResourceList{
|
||||||
|
v1.ResourceCPU: resource.MustParse("200m"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Ports: []v1.ContainerPort{
|
||||||
|
{
|
||||||
|
HostIP: "127.0.0.1",
|
||||||
|
HostPort: 8080,
|
||||||
|
Protocol: "TCP",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
InitContainers: []v1.Container{
|
||||||
|
{
|
||||||
|
Resources: v1.ResourceRequirements{
|
||||||
|
Requests: v1.ResourceList{
|
||||||
|
v1.ResourceCPU: resource.MustParse("500m"),
|
||||||
|
v1.ResourceMemory: resource.MustParse("200Mi"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
NodeName: nodeName,
|
||||||
|
Overhead: v1.ResourceList{
|
||||||
|
v1.ResourceCPU: resource.MustParse("500m"),
|
||||||
|
v1.ResourceMemory: resource.MustParse("500"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
}
|
}
|
||||||
expected := &NodeInfo{
|
expected := &NodeInfo{
|
||||||
node: &v1.Node{
|
node: &v1.Node{
|
||||||
@ -622,15 +662,15 @@ func TestNodeInfoAddPod(t *testing.T) {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
Requested: &Resource{
|
Requested: &Resource{
|
||||||
MilliCPU: 1300,
|
MilliCPU: 2300,
|
||||||
Memory: 1000,
|
Memory: 209716700, //1500 + 200MB in initContainers
|
||||||
EphemeralStorage: 0,
|
EphemeralStorage: 0,
|
||||||
AllowedPodNumber: 0,
|
AllowedPodNumber: 0,
|
||||||
ScalarResources: map[v1.ResourceName]int64(nil),
|
ScalarResources: map[v1.ResourceName]int64(nil),
|
||||||
},
|
},
|
||||||
NonZeroRequested: &Resource{
|
NonZeroRequested: &Resource{
|
||||||
MilliCPU: 1300,
|
MilliCPU: 2300,
|
||||||
Memory: 209716200, //200MB + 1000 specified in requests/overhead
|
Memory: 419431900, //200MB(initContainers) + 200MB(default memory value) + 1500 specified in requests/overhead
|
||||||
EphemeralStorage: 0,
|
EphemeralStorage: 0,
|
||||||
AllowedPodNumber: 0,
|
AllowedPodNumber: 0,
|
||||||
ScalarResources: map[v1.ResourceName]int64(nil),
|
ScalarResources: map[v1.ResourceName]int64(nil),
|
||||||
@ -710,6 +750,48 @@ func TestNodeInfoAddPod(t *testing.T) {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
Pod: &v1.Pod{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Namespace: "node_info_cache_test",
|
||||||
|
Name: "test-3",
|
||||||
|
UID: types.UID("test-3"),
|
||||||
|
},
|
||||||
|
Spec: v1.PodSpec{
|
||||||
|
Containers: []v1.Container{
|
||||||
|
{
|
||||||
|
Resources: v1.ResourceRequirements{
|
||||||
|
Requests: v1.ResourceList{
|
||||||
|
v1.ResourceCPU: resource.MustParse("200m"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Ports: []v1.ContainerPort{
|
||||||
|
{
|
||||||
|
HostIP: "127.0.0.1",
|
||||||
|
HostPort: 8080,
|
||||||
|
Protocol: "TCP",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
InitContainers: []v1.Container{
|
||||||
|
{
|
||||||
|
Resources: v1.ResourceRequirements{
|
||||||
|
Requests: v1.ResourceList{
|
||||||
|
v1.ResourceCPU: resource.MustParse("500m"),
|
||||||
|
v1.ResourceMemory: resource.MustParse("200Mi"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
NodeName: nodeName,
|
||||||
|
Overhead: v1.ResourceList{
|
||||||
|
v1.ResourceCPU: resource.MustParse("500m"),
|
||||||
|
v1.ResourceMemory: resource.MustParse("500"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -38,10 +38,12 @@ go_library(
|
|||||||
deps = [
|
deps = [
|
||||||
"//pkg/api/v1/pod:go_default_library",
|
"//pkg/api/v1/pod:go_default_library",
|
||||||
"//pkg/apis/core/v1/helper:go_default_library",
|
"//pkg/apis/core/v1/helper:go_default_library",
|
||||||
|
"//pkg/features:go_default_library",
|
||||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||||
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
|
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||||
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||||
|
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||||
"//staging/src/k8s.io/kube-scheduler/extender/v1:go_default_library",
|
"//staging/src/k8s.io/kube-scheduler/extender/v1:go_default_library",
|
||||||
"//vendor/k8s.io/klog:go_default_library",
|
"//vendor/k8s.io/klog:go_default_library",
|
||||||
],
|
],
|
||||||
|
@ -18,7 +18,9 @@ package util
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
|
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||||
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
|
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
|
||||||
|
"k8s.io/kubernetes/pkg/features"
|
||||||
)
|
)
|
||||||
|
|
||||||
// For each of these resources, a pod that doesn't request the resource explicitly
|
// For each of these resources, a pod that doesn't request the resource explicitly
|
||||||
@ -60,6 +62,11 @@ func GetNonzeroRequestForResource(resource v1.ResourceName, requests *v1.Resourc
|
|||||||
}
|
}
|
||||||
return requests.Memory().Value()
|
return requests.Memory().Value()
|
||||||
case v1.ResourceEphemeralStorage:
|
case v1.ResourceEphemeralStorage:
|
||||||
|
// if the local storage capacity isolation feature gate is disabled, pods request 0 disk.
|
||||||
|
if !utilfeature.DefaultFeatureGate.Enabled(features.LocalStorageCapacityIsolation) {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
quantity, found := (*requests)[v1.ResourceEphemeralStorage]
|
quantity, found := (*requests)[v1.ResourceEphemeralStorage]
|
||||||
if !found {
|
if !found {
|
||||||
return 0
|
return 0
|
||||||
|
Loading…
Reference in New Issue
Block a user