mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-22 19:31:44 +00:00
Improving schedule extender test coverage
Signed-off-by: Sathyanarayanan Saravanamuthu <sathyanarays@vmware.com>
This commit is contained in:
parent
79cba170b5
commit
bf4f907bfa
@ -354,13 +354,21 @@ func TestIsInterested(t *testing.T) {
|
|||||||
want: false,
|
want: false,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
label: "Managed memory, container memory",
|
label: "Managed memory, container memory with Requests",
|
||||||
extender: mem,
|
extender: mem,
|
||||||
pod: st.MakePod().Req(map[v1.ResourceName]string{
|
pod: st.MakePod().Req(map[v1.ResourceName]string{
|
||||||
"memory": "0",
|
"memory": "0",
|
||||||
}).Obj(),
|
}).Obj(),
|
||||||
want: true,
|
want: true,
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
label: "Managed memory, container memory with Limits",
|
||||||
|
extender: mem,
|
||||||
|
pod: st.MakePod().Lim(map[v1.ResourceName]string{
|
||||||
|
"memory": "0",
|
||||||
|
}).Obj(),
|
||||||
|
want: true,
|
||||||
|
},
|
||||||
{
|
{
|
||||||
label: "Managed memory, init container memory",
|
label: "Managed memory, init container memory",
|
||||||
extender: mem,
|
extender: mem,
|
||||||
|
@ -187,6 +187,30 @@ func (c *ContainerWrapper) Resources(resMap map[v1.ResourceName]string) *Contain
|
|||||||
return c
|
return c
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ResourceRequests sets the container resources requests to the given resource map of requests.
|
||||||
|
func (c *ContainerWrapper) ResourceRequests(reqMap map[v1.ResourceName]string) *ContainerWrapper {
|
||||||
|
res := v1.ResourceList{}
|
||||||
|
for k, v := range reqMap {
|
||||||
|
res[k] = resource.MustParse(v)
|
||||||
|
}
|
||||||
|
c.Container.Resources = v1.ResourceRequirements{
|
||||||
|
Requests: res,
|
||||||
|
}
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
|
||||||
|
// ResourceLimits sets the container resource limits to the given resource map.
|
||||||
|
func (c *ContainerWrapper) ResourceLimits(limMap map[v1.ResourceName]string) *ContainerWrapper {
|
||||||
|
res := v1.ResourceList{}
|
||||||
|
for k, v := range limMap {
|
||||||
|
res[k] = resource.MustParse(v)
|
||||||
|
}
|
||||||
|
c.Container.Resources = v1.ResourceRequirements{
|
||||||
|
Limits: res,
|
||||||
|
}
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
|
||||||
// PodWrapper wraps a Pod inside.
|
// PodWrapper wraps a Pod inside.
|
||||||
type PodWrapper struct{ v1.Pod }
|
type PodWrapper struct{ v1.Pod }
|
||||||
|
|
||||||
@ -608,8 +632,8 @@ func (p *PodWrapper) Annotations(annotations map[string]string) *PodWrapper {
|
|||||||
return p
|
return p
|
||||||
}
|
}
|
||||||
|
|
||||||
// Req adds a new container to the inner pod with given resource map.
|
// Res adds a new container to the inner pod with given resource map.
|
||||||
func (p *PodWrapper) Req(resMap map[v1.ResourceName]string) *PodWrapper {
|
func (p *PodWrapper) Res(resMap map[v1.ResourceName]string) *PodWrapper {
|
||||||
if len(resMap) == 0 {
|
if len(resMap) == 0 {
|
||||||
return p
|
return p
|
||||||
}
|
}
|
||||||
@ -619,6 +643,28 @@ func (p *PodWrapper) Req(resMap map[v1.ResourceName]string) *PodWrapper {
|
|||||||
return p
|
return p
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Req adds a new container to the inner pod with given resource map of requests.
|
||||||
|
func (p *PodWrapper) Req(reqMap map[v1.ResourceName]string) *PodWrapper {
|
||||||
|
if len(reqMap) == 0 {
|
||||||
|
return p
|
||||||
|
}
|
||||||
|
|
||||||
|
name := fmt.Sprintf("con%d", len(p.Spec.Containers))
|
||||||
|
p.Spec.Containers = append(p.Spec.Containers, MakeContainer().Name(name).Image(imageutils.GetPauseImageName()).ResourceRequests(reqMap).Obj())
|
||||||
|
return p
|
||||||
|
}
|
||||||
|
|
||||||
|
// Lim adds a new container to the inner pod with given resource map of limits.
|
||||||
|
func (p *PodWrapper) Lim(limMap map[v1.ResourceName]string) *PodWrapper {
|
||||||
|
if len(limMap) == 0 {
|
||||||
|
return p
|
||||||
|
}
|
||||||
|
|
||||||
|
name := fmt.Sprintf("con%d", len(p.Spec.Containers))
|
||||||
|
p.Spec.Containers = append(p.Spec.Containers, MakeContainer().Name(name).Image(imageutils.GetPauseImageName()).ResourceLimits(limMap).Obj())
|
||||||
|
return p
|
||||||
|
}
|
||||||
|
|
||||||
// InitReq adds a new init container to the inner pod with given resource map.
|
// InitReq adds a new init container to the inner pod with given resource map.
|
||||||
func (p *PodWrapper) InitReq(resMap map[v1.ResourceName]string) *PodWrapper {
|
func (p *PodWrapper) InitReq(resMap map[v1.ResourceName]string) *PodWrapper {
|
||||||
if len(resMap) == 0 {
|
if len(resMap) == 0 {
|
||||||
|
@ -151,7 +151,7 @@ func TestNodeResourcesScoring(t *testing.T) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
cpuBoundPod1, err := runPausePod(testCtx.ClientSet, st.MakePod().Namespace(testCtx.NS.Name).Name("cpubound1").Req(
|
cpuBoundPod1, err := runPausePod(testCtx.ClientSet, st.MakePod().Namespace(testCtx.NS.Name).Name("cpubound1").Res(
|
||||||
map[v1.ResourceName]string{
|
map[v1.ResourceName]string{
|
||||||
v1.ResourceCPU: "2",
|
v1.ResourceCPU: "2",
|
||||||
v1.ResourceMemory: "4G",
|
v1.ResourceMemory: "4G",
|
||||||
@ -161,7 +161,7 @@ func TestNodeResourcesScoring(t *testing.T) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
gpuBoundPod1, err := runPausePod(testCtx.ClientSet, st.MakePod().Namespace(testCtx.NS.Name).Name("gpubound1").Req(
|
gpuBoundPod1, err := runPausePod(testCtx.ClientSet, st.MakePod().Namespace(testCtx.NS.Name).Name("gpubound1").Res(
|
||||||
map[v1.ResourceName]string{
|
map[v1.ResourceName]string{
|
||||||
v1.ResourceCPU: "1",
|
v1.ResourceCPU: "1",
|
||||||
v1.ResourceMemory: "2G",
|
v1.ResourceMemory: "2G",
|
||||||
@ -185,7 +185,7 @@ func TestNodeResourcesScoring(t *testing.T) {
|
|||||||
|
|
||||||
// The following pod is using the gpu-binpacking-scheduler profile, which gives a higher weight to
|
// The following pod is using the gpu-binpacking-scheduler profile, which gives a higher weight to
|
||||||
// GPU-based binpacking, and so it should land on the node with higher GPU utilization.
|
// GPU-based binpacking, and so it should land on the node with higher GPU utilization.
|
||||||
cpuBoundPod2, err := runPausePod(testCtx.ClientSet, st.MakePod().Namespace(testCtx.NS.Name).Name("cpubound2").SchedulerName("gpu-binpacking-scheduler").Req(
|
cpuBoundPod2, err := runPausePod(testCtx.ClientSet, st.MakePod().Namespace(testCtx.NS.Name).Name("cpubound2").SchedulerName("gpu-binpacking-scheduler").Res(
|
||||||
map[v1.ResourceName]string{
|
map[v1.ResourceName]string{
|
||||||
v1.ResourceCPU: "2",
|
v1.ResourceCPU: "2",
|
||||||
v1.ResourceMemory: "4G",
|
v1.ResourceMemory: "4G",
|
||||||
|
Loading…
Reference in New Issue
Block a user