mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-19 18:02:01 +00:00
Improving schedule extender test coverage
Signed-off-by: Sathyanarayanan Saravanamuthu <sathyanarays@vmware.com>
This commit is contained in:
parent
79cba170b5
commit
bf4f907bfa
@ -354,13 +354,21 @@ func TestIsInterested(t *testing.T) {
|
||||
want: false,
|
||||
},
|
||||
{
|
||||
label: "Managed memory, container memory",
|
||||
label: "Managed memory, container memory with Requests",
|
||||
extender: mem,
|
||||
pod: st.MakePod().Req(map[v1.ResourceName]string{
|
||||
"memory": "0",
|
||||
}).Obj(),
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
label: "Managed memory, container memory with Limits",
|
||||
extender: mem,
|
||||
pod: st.MakePod().Lim(map[v1.ResourceName]string{
|
||||
"memory": "0",
|
||||
}).Obj(),
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
label: "Managed memory, init container memory",
|
||||
extender: mem,
|
||||
|
@ -187,6 +187,30 @@ func (c *ContainerWrapper) Resources(resMap map[v1.ResourceName]string) *Contain
|
||||
return c
|
||||
}
|
||||
|
||||
// ResourceRequests sets the container resources requests to the given resource map of requests.
|
||||
func (c *ContainerWrapper) ResourceRequests(reqMap map[v1.ResourceName]string) *ContainerWrapper {
|
||||
res := v1.ResourceList{}
|
||||
for k, v := range reqMap {
|
||||
res[k] = resource.MustParse(v)
|
||||
}
|
||||
c.Container.Resources = v1.ResourceRequirements{
|
||||
Requests: res,
|
||||
}
|
||||
return c
|
||||
}
|
||||
|
||||
// ResourceLimits sets the container resource limits to the given resource map.
|
||||
func (c *ContainerWrapper) ResourceLimits(limMap map[v1.ResourceName]string) *ContainerWrapper {
|
||||
res := v1.ResourceList{}
|
||||
for k, v := range limMap {
|
||||
res[k] = resource.MustParse(v)
|
||||
}
|
||||
c.Container.Resources = v1.ResourceRequirements{
|
||||
Limits: res,
|
||||
}
|
||||
return c
|
||||
}
|
||||
|
||||
// PodWrapper wraps a Pod inside.
|
||||
type PodWrapper struct{ v1.Pod }
|
||||
|
||||
@ -608,8 +632,8 @@ func (p *PodWrapper) Annotations(annotations map[string]string) *PodWrapper {
|
||||
return p
|
||||
}
|
||||
|
||||
// Req adds a new container to the inner pod with given resource map.
|
||||
func (p *PodWrapper) Req(resMap map[v1.ResourceName]string) *PodWrapper {
|
||||
// Res adds a new container to the inner pod with given resource map.
|
||||
func (p *PodWrapper) Res(resMap map[v1.ResourceName]string) *PodWrapper {
|
||||
if len(resMap) == 0 {
|
||||
return p
|
||||
}
|
||||
@ -619,6 +643,28 @@ func (p *PodWrapper) Req(resMap map[v1.ResourceName]string) *PodWrapper {
|
||||
return p
|
||||
}
|
||||
|
||||
// Req adds a new container to the inner pod with given resource map of requests.
|
||||
func (p *PodWrapper) Req(reqMap map[v1.ResourceName]string) *PodWrapper {
|
||||
if len(reqMap) == 0 {
|
||||
return p
|
||||
}
|
||||
|
||||
name := fmt.Sprintf("con%d", len(p.Spec.Containers))
|
||||
p.Spec.Containers = append(p.Spec.Containers, MakeContainer().Name(name).Image(imageutils.GetPauseImageName()).ResourceRequests(reqMap).Obj())
|
||||
return p
|
||||
}
|
||||
|
||||
// Lim adds a new container to the inner pod with given resource map of limits.
|
||||
func (p *PodWrapper) Lim(limMap map[v1.ResourceName]string) *PodWrapper {
|
||||
if len(limMap) == 0 {
|
||||
return p
|
||||
}
|
||||
|
||||
name := fmt.Sprintf("con%d", len(p.Spec.Containers))
|
||||
p.Spec.Containers = append(p.Spec.Containers, MakeContainer().Name(name).Image(imageutils.GetPauseImageName()).ResourceLimits(limMap).Obj())
|
||||
return p
|
||||
}
|
||||
|
||||
// InitReq adds a new init container to the inner pod with given resource map.
|
||||
func (p *PodWrapper) InitReq(resMap map[v1.ResourceName]string) *PodWrapper {
|
||||
if len(resMap) == 0 {
|
||||
|
@ -151,7 +151,7 @@ func TestNodeResourcesScoring(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
cpuBoundPod1, err := runPausePod(testCtx.ClientSet, st.MakePod().Namespace(testCtx.NS.Name).Name("cpubound1").Req(
|
||||
cpuBoundPod1, err := runPausePod(testCtx.ClientSet, st.MakePod().Namespace(testCtx.NS.Name).Name("cpubound1").Res(
|
||||
map[v1.ResourceName]string{
|
||||
v1.ResourceCPU: "2",
|
||||
v1.ResourceMemory: "4G",
|
||||
@ -161,7 +161,7 @@ func TestNodeResourcesScoring(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
gpuBoundPod1, err := runPausePod(testCtx.ClientSet, st.MakePod().Namespace(testCtx.NS.Name).Name("gpubound1").Req(
|
||||
gpuBoundPod1, err := runPausePod(testCtx.ClientSet, st.MakePod().Namespace(testCtx.NS.Name).Name("gpubound1").Res(
|
||||
map[v1.ResourceName]string{
|
||||
v1.ResourceCPU: "1",
|
||||
v1.ResourceMemory: "2G",
|
||||
@ -185,7 +185,7 @@ func TestNodeResourcesScoring(t *testing.T) {
|
||||
|
||||
// The following pod is using the gpu-binpacking-scheduler profile, which gives a higher weight to
|
||||
// GPU-based binpacking, and so it should land on the node with higher GPU utilization.
|
||||
cpuBoundPod2, err := runPausePod(testCtx.ClientSet, st.MakePod().Namespace(testCtx.NS.Name).Name("cpubound2").SchedulerName("gpu-binpacking-scheduler").Req(
|
||||
cpuBoundPod2, err := runPausePod(testCtx.ClientSet, st.MakePod().Namespace(testCtx.NS.Name).Name("cpubound2").SchedulerName("gpu-binpacking-scheduler").Res(
|
||||
map[v1.ResourceName]string{
|
||||
v1.ResourceCPU: "2",
|
||||
v1.ResourceMemory: "4G",
|
||||
|
Loading…
Reference in New Issue
Block a user