From 6fbc3a618fa5b222e67901f65d40025fb5004928 Mon Sep 17 00:00:00 2001 From: AxeZhan Date: Thu, 10 Oct 2024 14:42:31 +0800 Subject: [PATCH] using NonMissingContainerRequests --- pkg/scheduler/framework/types.go | 38 +++----- pkg/scheduler/framework/types_test.go | 133 ++++++++++++++++++++------ 2 files changed, 116 insertions(+), 55 deletions(-) diff --git a/pkg/scheduler/framework/types.go b/pkg/scheduler/framework/types.go index bc687ef0d1f..bc39253627f 100644 --- a/pkg/scheduler/framework/types.go +++ b/pkg/scheduler/framework/types.go @@ -32,7 +32,7 @@ import ( utilfeature "k8s.io/apiserver/pkg/util/feature" "k8s.io/klog/v2" - podutil "k8s.io/kubernetes/pkg/api/v1/pod" + "k8s.io/apimachinery/pkg/api/resource" resourcehelper "k8s.io/kubernetes/pkg/api/v1/resource" "k8s.io/kubernetes/pkg/features" schedutil "k8s.io/kubernetes/pkg/scheduler/util" @@ -930,40 +930,24 @@ func (n *NodeInfo) update(pod *v1.Pod, sign int64) { } func calculateResource(pod *v1.Pod) (Resource, int64, int64) { - var non0InitCPU, non0InitMem int64 - var non0CPU, non0Mem int64 requests := resourcehelper.PodRequests(pod, resourcehelper.PodResourcesOptions{ InPlacePodVerticalScalingEnabled: utilfeature.DefaultFeatureGate.Enabled(features.InPlacePodVerticalScaling), - ContainerFn: func(requests v1.ResourceList, containerType podutil.ContainerType) { - non0CPUReq, non0MemReq := schedutil.GetNonzeroRequests(&requests) - switch containerType { - case podutil.Containers: - non0CPU += non0CPUReq - non0Mem += non0MemReq - case podutil.InitContainers: - non0InitCPU = max(non0InitCPU, non0CPUReq) - non0InitMem = max(non0InitMem, non0MemReq) - } + }) + + non0Requests := resourcehelper.PodRequests(pod, resourcehelper.PodResourcesOptions{ + InPlacePodVerticalScalingEnabled: utilfeature.DefaultFeatureGate.Enabled(features.InPlacePodVerticalScaling), + NonMissingContainerRequests: map[v1.ResourceName]resource.Quantity{ + v1.ResourceCPU: *resource.NewMilliQuantity(schedutil.DefaultMilliCPURequest, resource.DecimalSI), + v1.ResourceMemory: *resource.NewQuantity(schedutil.DefaultMemoryRequest, resource.DecimalSI), }, }) - non0CPU = max(non0CPU, non0InitCPU) - non0Mem = max(non0Mem, non0InitMem) + non0CPU := non0Requests[v1.ResourceCPU] + non0Mem := non0Requests[v1.ResourceMemory] - // If Overhead is being utilized, add to the non-zero cpu/memory tracking for the pod. It has already been added - // into ScalarResources since it is part of requests - if pod.Spec.Overhead != nil { - if _, found := pod.Spec.Overhead[v1.ResourceCPU]; found { - non0CPU += pod.Spec.Overhead.Cpu().MilliValue() - } - - if _, found := pod.Spec.Overhead[v1.ResourceMemory]; found { - non0Mem += pod.Spec.Overhead.Memory().Value() - } - } var res Resource res.Add(requests) - return res, non0CPU, non0Mem + return res, non0CPU.MilliValue(), non0Mem.Value() } // updateUsedPorts updates the UsedPorts of NodeInfo. diff --git a/pkg/scheduler/framework/types_test.go b/pkg/scheduler/framework/types_test.go index e1aa53147ff..1b1d81bd561 100644 --- a/pkg/scheduler/framework/types_test.go +++ b/pkg/scheduler/framework/types_test.go @@ -1525,34 +1525,83 @@ func TestCalculatePodResourcesWithResize(t *testing.T) { Name: "testpod", UID: types.UID("testpod"), }, - Spec: v1.PodSpec{ - Containers: []v1.Container{ - { - Name: "c1", - Resources: v1.ResourceRequirements{Requests: v1.ResourceList{v1.ResourceCPU: cpu500m, v1.ResourceMemory: mem500M}}, - }, - }, - }, Status: v1.PodStatus{ - Phase: v1.PodRunning, - Resize: "", - ContainerStatuses: []v1.ContainerStatus{ - { - Name: "c1", - AllocatedResources: v1.ResourceList{v1.ResourceCPU: cpu500m, v1.ResourceMemory: mem500M}, - }, - }, + Phase: v1.PodRunning, }, } + restartAlways := v1.ContainerRestartPolicyAlways + + preparePod := func(pod v1.Pod, + requests, allocatedResources, + initRequests, initAllocatedResources, + sidecarRequests, sidecarAllocatedResources *v1.ResourceList, + resizeStatus v1.PodResizeStatus) v1.Pod { + + if requests != nil { + pod.Spec.Containers = append(pod.Spec.Containers, + v1.Container{ + Name: "c1", + Resources: v1.ResourceRequirements{Requests: *requests}, + }) + } + if allocatedResources != nil { + pod.Status.ContainerStatuses = append(pod.Status.ContainerStatuses, + v1.ContainerStatus{ + Name: "c1", + AllocatedResources: *allocatedResources, + }) + } + + if initRequests != nil { + pod.Spec.InitContainers = append(pod.Spec.InitContainers, + v1.Container{ + Name: "i1", + Resources: v1.ResourceRequirements{Requests: *initRequests}, + }, + ) + } + if initAllocatedResources != nil { + pod.Status.InitContainerStatuses = append(pod.Status.InitContainerStatuses, + v1.ContainerStatus{ + Name: "i1", + AllocatedResources: *initAllocatedResources, + }) + } + + if sidecarRequests != nil { + pod.Spec.InitContainers = append(pod.Spec.InitContainers, + v1.Container{ + Name: "s1", + Resources: v1.ResourceRequirements{Requests: *sidecarRequests}, + RestartPolicy: &restartAlways, + }, + ) + } + if sidecarAllocatedResources != nil { + pod.Status.InitContainerStatuses = append(pod.Status.InitContainerStatuses, + v1.ContainerStatus{ + Name: "s1", + AllocatedResources: *sidecarAllocatedResources, + }) + } + + pod.Status.Resize = resizeStatus + return pod + } + tests := []struct { - name string - requests v1.ResourceList - allocatedResources v1.ResourceList - resizeStatus v1.PodResizeStatus - expectedResource Resource - expectedNon0CPU int64 - expectedNon0Mem int64 + name string + requests v1.ResourceList + allocatedResources v1.ResourceList + initRequests *v1.ResourceList + initAllocatedResources *v1.ResourceList + sidecarRequests *v1.ResourceList + sidecarAllocatedResources *v1.ResourceList + resizeStatus v1.PodResizeStatus + expectedResource Resource + expectedNon0CPU int64 + expectedNon0Mem int64 }{ { name: "Pod with no pending resize", @@ -1590,16 +1639,44 @@ func TestCalculatePodResourcesWithResize(t *testing.T) { expectedNon0CPU: cpu500m.MilliValue(), expectedNon0Mem: mem500M.Value(), }, + { + name: "Pod with init container and no pending resize", + requests: v1.ResourceList{v1.ResourceCPU: cpu500m, v1.ResourceMemory: mem500M}, + allocatedResources: v1.ResourceList{v1.ResourceCPU: cpu500m, v1.ResourceMemory: mem500M}, + initRequests: &v1.ResourceList{v1.ResourceCPU: cpu700m, v1.ResourceMemory: mem800M}, + initAllocatedResources: &v1.ResourceList{v1.ResourceCPU: cpu700m, v1.ResourceMemory: mem800M}, + resizeStatus: "", + expectedResource: Resource{MilliCPU: cpu700m.MilliValue(), Memory: mem800M.Value()}, + expectedNon0CPU: cpu700m.MilliValue(), + expectedNon0Mem: mem800M.Value(), + }, + { + name: "Pod with sider container and no pending resize", + requests: v1.ResourceList{v1.ResourceCPU: cpu500m, v1.ResourceMemory: mem500M}, + allocatedResources: v1.ResourceList{v1.ResourceCPU: cpu500m, v1.ResourceMemory: mem500M}, + initRequests: &v1.ResourceList{v1.ResourceCPU: cpu700m, v1.ResourceMemory: mem800M}, + initAllocatedResources: &v1.ResourceList{v1.ResourceCPU: cpu700m, v1.ResourceMemory: mem800M}, + sidecarRequests: &v1.ResourceList{v1.ResourceCPU: cpu700m, v1.ResourceMemory: mem800M}, + sidecarAllocatedResources: &v1.ResourceList{v1.ResourceCPU: cpu700m, v1.ResourceMemory: mem800M}, + resizeStatus: "", + expectedResource: Resource{ + MilliCPU: cpu500m.MilliValue() + cpu700m.MilliValue(), + Memory: mem500M.Value() + mem800M.Value(), + }, + expectedNon0CPU: cpu500m.MilliValue() + cpu700m.MilliValue(), + expectedNon0Mem: mem500M.Value() + mem800M.Value(), + }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - pod := testpod.DeepCopy() - pod.Spec.Containers[0].Resources.Requests = tt.requests - pod.Status.ContainerStatuses[0].AllocatedResources = tt.allocatedResources - pod.Status.Resize = tt.resizeStatus + pod := preparePod(*testpod.DeepCopy(), + &tt.requests, &tt.allocatedResources, + tt.initRequests, tt.initAllocatedResources, + tt.sidecarRequests, tt.sidecarAllocatedResources, + tt.resizeStatus) - res, non0CPU, non0Mem := calculateResource(pod) + res, non0CPU, non0Mem := calculateResource(&pod) if !reflect.DeepEqual(tt.expectedResource, res) { t.Errorf("Test: %s expected resource: %+v, got: %+v", tt.name, tt.expectedResource, res) }