From bf4f907bfac38a59a4b3e18cc7ec36d2186a86ef Mon Sep 17 00:00:00 2001 From: Sathyanarayanan Saravanamuthu Date: Mon, 5 Dec 2022 12:10:10 +0530 Subject: [PATCH] Improving schedule extender test coverage Signed-off-by: Sathyanarayanan Saravanamuthu --- pkg/scheduler/extender_test.go | 10 +++- pkg/scheduler/testing/wrappers.go | 50 ++++++++++++++++++- .../scheduler/scoring/priorities_test.go | 6 +-- 3 files changed, 60 insertions(+), 6 deletions(-) diff --git a/pkg/scheduler/extender_test.go b/pkg/scheduler/extender_test.go index 6d80a93148a..64305665179 100644 --- a/pkg/scheduler/extender_test.go +++ b/pkg/scheduler/extender_test.go @@ -354,13 +354,21 @@ func TestIsInterested(t *testing.T) { want: false, }, { - label: "Managed memory, container memory", + label: "Managed memory, container memory with Requests", extender: mem, pod: st.MakePod().Req(map[v1.ResourceName]string{ "memory": "0", }).Obj(), want: true, }, + { + label: "Managed memory, container memory with Limits", + extender: mem, + pod: st.MakePod().Lim(map[v1.ResourceName]string{ + "memory": "0", + }).Obj(), + want: true, + }, { label: "Managed memory, init container memory", extender: mem, diff --git a/pkg/scheduler/testing/wrappers.go b/pkg/scheduler/testing/wrappers.go index f632c42ebd7..70579dd800e 100644 --- a/pkg/scheduler/testing/wrappers.go +++ b/pkg/scheduler/testing/wrappers.go @@ -187,6 +187,30 @@ func (c *ContainerWrapper) Resources(resMap map[v1.ResourceName]string) *Contain return c } +// ResourceRequests sets the container resources requests to the given resource map of requests. +func (c *ContainerWrapper) ResourceRequests(reqMap map[v1.ResourceName]string) *ContainerWrapper { + res := v1.ResourceList{} + for k, v := range reqMap { + res[k] = resource.MustParse(v) + } + c.Container.Resources = v1.ResourceRequirements{ + Requests: res, + } + return c +} + +// ResourceLimits sets the container resource limits to the given resource map. +func (c *ContainerWrapper) ResourceLimits(limMap map[v1.ResourceName]string) *ContainerWrapper { + res := v1.ResourceList{} + for k, v := range limMap { + res[k] = resource.MustParse(v) + } + c.Container.Resources = v1.ResourceRequirements{ + Limits: res, + } + return c +} + // PodWrapper wraps a Pod inside. type PodWrapper struct{ v1.Pod } @@ -608,8 +632,8 @@ func (p *PodWrapper) Annotations(annotations map[string]string) *PodWrapper { return p } -// Req adds a new container to the inner pod with given resource map. -func (p *PodWrapper) Req(resMap map[v1.ResourceName]string) *PodWrapper { +// Res adds a new container to the inner pod with given resource map. +func (p *PodWrapper) Res(resMap map[v1.ResourceName]string) *PodWrapper { if len(resMap) == 0 { return p } @@ -619,6 +643,28 @@ func (p *PodWrapper) Req(resMap map[v1.ResourceName]string) *PodWrapper { return p } +// Req adds a new container to the inner pod with given resource map of requests. +func (p *PodWrapper) Req(reqMap map[v1.ResourceName]string) *PodWrapper { + if len(reqMap) == 0 { + return p + } + + name := fmt.Sprintf("con%d", len(p.Spec.Containers)) + p.Spec.Containers = append(p.Spec.Containers, MakeContainer().Name(name).Image(imageutils.GetPauseImageName()).ResourceRequests(reqMap).Obj()) + return p +} + +// Lim adds a new container to the inner pod with given resource map of limits. +func (p *PodWrapper) Lim(limMap map[v1.ResourceName]string) *PodWrapper { + if len(limMap) == 0 { + return p + } + + name := fmt.Sprintf("con%d", len(p.Spec.Containers)) + p.Spec.Containers = append(p.Spec.Containers, MakeContainer().Name(name).Image(imageutils.GetPauseImageName()).ResourceLimits(limMap).Obj()) + return p +} + // InitReq adds a new init container to the inner pod with given resource map. func (p *PodWrapper) InitReq(resMap map[v1.ResourceName]string) *PodWrapper { if len(resMap) == 0 { diff --git a/test/integration/scheduler/scoring/priorities_test.go b/test/integration/scheduler/scoring/priorities_test.go index d805e66442d..8237ccf9e5b 100644 --- a/test/integration/scheduler/scoring/priorities_test.go +++ b/test/integration/scheduler/scoring/priorities_test.go @@ -151,7 +151,7 @@ func TestNodeResourcesScoring(t *testing.T) { if err != nil { t.Fatal(err) } - cpuBoundPod1, err := runPausePod(testCtx.ClientSet, st.MakePod().Namespace(testCtx.NS.Name).Name("cpubound1").Req( + cpuBoundPod1, err := runPausePod(testCtx.ClientSet, st.MakePod().Namespace(testCtx.NS.Name).Name("cpubound1").Res( map[v1.ResourceName]string{ v1.ResourceCPU: "2", v1.ResourceMemory: "4G", @@ -161,7 +161,7 @@ func TestNodeResourcesScoring(t *testing.T) { if err != nil { t.Fatal(err) } - gpuBoundPod1, err := runPausePod(testCtx.ClientSet, st.MakePod().Namespace(testCtx.NS.Name).Name("gpubound1").Req( + gpuBoundPod1, err := runPausePod(testCtx.ClientSet, st.MakePod().Namespace(testCtx.NS.Name).Name("gpubound1").Res( map[v1.ResourceName]string{ v1.ResourceCPU: "1", v1.ResourceMemory: "2G", @@ -185,7 +185,7 @@ func TestNodeResourcesScoring(t *testing.T) { // The following pod is using the gpu-binpacking-scheduler profile, which gives a higher weight to // GPU-based binpacking, and so it should land on the node with higher GPU utilization. - cpuBoundPod2, err := runPausePod(testCtx.ClientSet, st.MakePod().Namespace(testCtx.NS.Name).Name("cpubound2").SchedulerName("gpu-binpacking-scheduler").Req( + cpuBoundPod2, err := runPausePod(testCtx.ClientSet, st.MakePod().Namespace(testCtx.NS.Name).Name("cpubound2").SchedulerName("gpu-binpacking-scheduler").Res( map[v1.ResourceName]string{ v1.ResourceCPU: "2", v1.ResourceMemory: "4G",