using NonMissingContainerRequests

This commit is contained in:
AxeZhan 2024-10-10 14:42:31 +08:00
parent 5ecd83f1e6
commit 6fbc3a618f
2 changed files with 116 additions and 55 deletions

View File

@ -32,7 +32,7 @@ import (
utilfeature "k8s.io/apiserver/pkg/util/feature" utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/klog/v2" "k8s.io/klog/v2"
podutil "k8s.io/kubernetes/pkg/api/v1/pod" "k8s.io/apimachinery/pkg/api/resource"
resourcehelper "k8s.io/kubernetes/pkg/api/v1/resource" resourcehelper "k8s.io/kubernetes/pkg/api/v1/resource"
"k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/pkg/features"
schedutil "k8s.io/kubernetes/pkg/scheduler/util" schedutil "k8s.io/kubernetes/pkg/scheduler/util"
@ -930,40 +930,24 @@ func (n *NodeInfo) update(pod *v1.Pod, sign int64) {
} }
func calculateResource(pod *v1.Pod) (Resource, int64, int64) { func calculateResource(pod *v1.Pod) (Resource, int64, int64) {
var non0InitCPU, non0InitMem int64
var non0CPU, non0Mem int64
requests := resourcehelper.PodRequests(pod, resourcehelper.PodResourcesOptions{ requests := resourcehelper.PodRequests(pod, resourcehelper.PodResourcesOptions{
InPlacePodVerticalScalingEnabled: utilfeature.DefaultFeatureGate.Enabled(features.InPlacePodVerticalScaling), InPlacePodVerticalScalingEnabled: utilfeature.DefaultFeatureGate.Enabled(features.InPlacePodVerticalScaling),
ContainerFn: func(requests v1.ResourceList, containerType podutil.ContainerType) { })
non0CPUReq, non0MemReq := schedutil.GetNonzeroRequests(&requests)
switch containerType { non0Requests := resourcehelper.PodRequests(pod, resourcehelper.PodResourcesOptions{
case podutil.Containers: InPlacePodVerticalScalingEnabled: utilfeature.DefaultFeatureGate.Enabled(features.InPlacePodVerticalScaling),
non0CPU += non0CPUReq NonMissingContainerRequests: map[v1.ResourceName]resource.Quantity{
non0Mem += non0MemReq v1.ResourceCPU: *resource.NewMilliQuantity(schedutil.DefaultMilliCPURequest, resource.DecimalSI),
case podutil.InitContainers: v1.ResourceMemory: *resource.NewQuantity(schedutil.DefaultMemoryRequest, resource.DecimalSI),
non0InitCPU = max(non0InitCPU, non0CPUReq)
non0InitMem = max(non0InitMem, non0MemReq)
}
}, },
}) })
non0CPU = max(non0CPU, non0InitCPU) non0CPU := non0Requests[v1.ResourceCPU]
non0Mem = max(non0Mem, non0InitMem) non0Mem := non0Requests[v1.ResourceMemory]
// If Overhead is being utilized, add to the non-zero cpu/memory tracking for the pod. It has already been added
// into ScalarResources since it is part of requests
if pod.Spec.Overhead != nil {
if _, found := pod.Spec.Overhead[v1.ResourceCPU]; found {
non0CPU += pod.Spec.Overhead.Cpu().MilliValue()
}
if _, found := pod.Spec.Overhead[v1.ResourceMemory]; found {
non0Mem += pod.Spec.Overhead.Memory().Value()
}
}
var res Resource var res Resource
res.Add(requests) res.Add(requests)
return res, non0CPU, non0Mem return res, non0CPU.MilliValue(), non0Mem.Value()
} }
// updateUsedPorts updates the UsedPorts of NodeInfo. // updateUsedPorts updates the UsedPorts of NodeInfo.

View File

@ -1525,34 +1525,83 @@ func TestCalculatePodResourcesWithResize(t *testing.T) {
Name: "testpod", Name: "testpod",
UID: types.UID("testpod"), UID: types.UID("testpod"),
}, },
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "c1",
Resources: v1.ResourceRequirements{Requests: v1.ResourceList{v1.ResourceCPU: cpu500m, v1.ResourceMemory: mem500M}},
},
},
},
Status: v1.PodStatus{ Status: v1.PodStatus{
Phase: v1.PodRunning, Phase: v1.PodRunning,
Resize: "",
ContainerStatuses: []v1.ContainerStatus{
{
Name: "c1",
AllocatedResources: v1.ResourceList{v1.ResourceCPU: cpu500m, v1.ResourceMemory: mem500M},
},
},
}, },
} }
restartAlways := v1.ContainerRestartPolicyAlways
preparePod := func(pod v1.Pod,
requests, allocatedResources,
initRequests, initAllocatedResources,
sidecarRequests, sidecarAllocatedResources *v1.ResourceList,
resizeStatus v1.PodResizeStatus) v1.Pod {
if requests != nil {
pod.Spec.Containers = append(pod.Spec.Containers,
v1.Container{
Name: "c1",
Resources: v1.ResourceRequirements{Requests: *requests},
})
}
if allocatedResources != nil {
pod.Status.ContainerStatuses = append(pod.Status.ContainerStatuses,
v1.ContainerStatus{
Name: "c1",
AllocatedResources: *allocatedResources,
})
}
if initRequests != nil {
pod.Spec.InitContainers = append(pod.Spec.InitContainers,
v1.Container{
Name: "i1",
Resources: v1.ResourceRequirements{Requests: *initRequests},
},
)
}
if initAllocatedResources != nil {
pod.Status.InitContainerStatuses = append(pod.Status.InitContainerStatuses,
v1.ContainerStatus{
Name: "i1",
AllocatedResources: *initAllocatedResources,
})
}
if sidecarRequests != nil {
pod.Spec.InitContainers = append(pod.Spec.InitContainers,
v1.Container{
Name: "s1",
Resources: v1.ResourceRequirements{Requests: *sidecarRequests},
RestartPolicy: &restartAlways,
},
)
}
if sidecarAllocatedResources != nil {
pod.Status.InitContainerStatuses = append(pod.Status.InitContainerStatuses,
v1.ContainerStatus{
Name: "s1",
AllocatedResources: *sidecarAllocatedResources,
})
}
pod.Status.Resize = resizeStatus
return pod
}
tests := []struct { tests := []struct {
name string name string
requests v1.ResourceList requests v1.ResourceList
allocatedResources v1.ResourceList allocatedResources v1.ResourceList
resizeStatus v1.PodResizeStatus initRequests *v1.ResourceList
expectedResource Resource initAllocatedResources *v1.ResourceList
expectedNon0CPU int64 sidecarRequests *v1.ResourceList
expectedNon0Mem int64 sidecarAllocatedResources *v1.ResourceList
resizeStatus v1.PodResizeStatus
expectedResource Resource
expectedNon0CPU int64
expectedNon0Mem int64
}{ }{
{ {
name: "Pod with no pending resize", name: "Pod with no pending resize",
@ -1590,16 +1639,44 @@ func TestCalculatePodResourcesWithResize(t *testing.T) {
expectedNon0CPU: cpu500m.MilliValue(), expectedNon0CPU: cpu500m.MilliValue(),
expectedNon0Mem: mem500M.Value(), expectedNon0Mem: mem500M.Value(),
}, },
{
name: "Pod with init container and no pending resize",
requests: v1.ResourceList{v1.ResourceCPU: cpu500m, v1.ResourceMemory: mem500M},
allocatedResources: v1.ResourceList{v1.ResourceCPU: cpu500m, v1.ResourceMemory: mem500M},
initRequests: &v1.ResourceList{v1.ResourceCPU: cpu700m, v1.ResourceMemory: mem800M},
initAllocatedResources: &v1.ResourceList{v1.ResourceCPU: cpu700m, v1.ResourceMemory: mem800M},
resizeStatus: "",
expectedResource: Resource{MilliCPU: cpu700m.MilliValue(), Memory: mem800M.Value()},
expectedNon0CPU: cpu700m.MilliValue(),
expectedNon0Mem: mem800M.Value(),
},
{
name: "Pod with sider container and no pending resize",
requests: v1.ResourceList{v1.ResourceCPU: cpu500m, v1.ResourceMemory: mem500M},
allocatedResources: v1.ResourceList{v1.ResourceCPU: cpu500m, v1.ResourceMemory: mem500M},
initRequests: &v1.ResourceList{v1.ResourceCPU: cpu700m, v1.ResourceMemory: mem800M},
initAllocatedResources: &v1.ResourceList{v1.ResourceCPU: cpu700m, v1.ResourceMemory: mem800M},
sidecarRequests: &v1.ResourceList{v1.ResourceCPU: cpu700m, v1.ResourceMemory: mem800M},
sidecarAllocatedResources: &v1.ResourceList{v1.ResourceCPU: cpu700m, v1.ResourceMemory: mem800M},
resizeStatus: "",
expectedResource: Resource{
MilliCPU: cpu500m.MilliValue() + cpu700m.MilliValue(),
Memory: mem500M.Value() + mem800M.Value(),
},
expectedNon0CPU: cpu500m.MilliValue() + cpu700m.MilliValue(),
expectedNon0Mem: mem500M.Value() + mem800M.Value(),
},
} }
for _, tt := range tests { for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) { t.Run(tt.name, func(t *testing.T) {
pod := testpod.DeepCopy() pod := preparePod(*testpod.DeepCopy(),
pod.Spec.Containers[0].Resources.Requests = tt.requests &tt.requests, &tt.allocatedResources,
pod.Status.ContainerStatuses[0].AllocatedResources = tt.allocatedResources tt.initRequests, tt.initAllocatedResources,
pod.Status.Resize = tt.resizeStatus tt.sidecarRequests, tt.sidecarAllocatedResources,
tt.resizeStatus)
res, non0CPU, non0Mem := calculateResource(pod) res, non0CPU, non0Mem := calculateResource(&pod)
if !reflect.DeepEqual(tt.expectedResource, res) { if !reflect.DeepEqual(tt.expectedResource, res) {
t.Errorf("Test: %s expected resource: %+v, got: %+v", tt.name, tt.expectedResource, res) t.Errorf("Test: %s expected resource: %+v, got: %+v", tt.name, tt.expectedResource, res)
} }