mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-08-04 01:40:07 +00:00
add tests for scheduler
This commit is contained in:
parent
6fbc3a618f
commit
b1f07bb36c
@ -675,19 +675,29 @@ func TestRestartableInitContainers(t *testing.T) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
newPodWithRestartableInitContainers := func() *v1.Pod {
|
newPodWithRestartableInitContainers := func(request, sidecarRequest *v1.ResourceList) *v1.Pod {
|
||||||
restartPolicyAlways := v1.ContainerRestartPolicyAlways
|
restartPolicyAlways := v1.ContainerRestartPolicyAlways
|
||||||
|
|
||||||
|
container := v1.Container{Name: "regular"}
|
||||||
|
if request != nil {
|
||||||
|
container.Resources = v1.ResourceRequirements{
|
||||||
|
Requests: *request,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
sidecarContainer := v1.Container{
|
||||||
|
Name: "restartable-init",
|
||||||
|
RestartPolicy: &restartPolicyAlways,
|
||||||
|
}
|
||||||
|
if sidecarRequest != nil {
|
||||||
|
sidecarContainer.Resources = v1.ResourceRequirements{
|
||||||
|
Requests: *sidecarRequest,
|
||||||
|
}
|
||||||
|
}
|
||||||
return &v1.Pod{
|
return &v1.Pod{
|
||||||
Spec: v1.PodSpec{
|
Spec: v1.PodSpec{
|
||||||
Containers: []v1.Container{
|
Containers: []v1.Container{container},
|
||||||
{Name: "regular"},
|
InitContainers: []v1.Container{sidecarContainer},
|
||||||
},
|
|
||||||
InitContainers: []v1.Container{
|
|
||||||
{
|
|
||||||
Name: "restartable-init",
|
|
||||||
RestartPolicy: &restartPolicyAlways,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -697,6 +707,7 @@ func TestRestartableInitContainers(t *testing.T) {
|
|||||||
pod *v1.Pod
|
pod *v1.Pod
|
||||||
enableSidecarContainers bool
|
enableSidecarContainers bool
|
||||||
wantPreFilterStatus *framework.Status
|
wantPreFilterStatus *framework.Status
|
||||||
|
wantFilterStatus *framework.Status
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
name: "allow pod without restartable init containers if sidecar containers is disabled",
|
name: "allow pod without restartable init containers if sidecar containers is disabled",
|
||||||
@ -704,7 +715,7 @@ func TestRestartableInitContainers(t *testing.T) {
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "not allow pod with restartable init containers if sidecar containers is disabled",
|
name: "not allow pod with restartable init containers if sidecar containers is disabled",
|
||||||
pod: newPodWithRestartableInitContainers(),
|
pod: newPodWithRestartableInitContainers(nil, nil),
|
||||||
wantPreFilterStatus: framework.NewStatus(framework.UnschedulableAndUnresolvable, "Pod has a restartable init container and the SidecarContainers feature is disabled"),
|
wantPreFilterStatus: framework.NewStatus(framework.UnschedulableAndUnresolvable, "Pod has a restartable init container and the SidecarContainers feature is disabled"),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -715,7 +726,24 @@ func TestRestartableInitContainers(t *testing.T) {
|
|||||||
{
|
{
|
||||||
name: "allow pod with restartable init containers if sidecar containers is enabled",
|
name: "allow pod with restartable init containers if sidecar containers is enabled",
|
||||||
enableSidecarContainers: true,
|
enableSidecarContainers: true,
|
||||||
pod: newPodWithRestartableInitContainers(),
|
pod: newPodWithRestartableInitContainers(nil, nil),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "allow pod if the total requested resources do not exceed the node's allocatable resources",
|
||||||
|
enableSidecarContainers: true,
|
||||||
|
pod: newPodWithRestartableInitContainers(
|
||||||
|
&v1.ResourceList{v1.ResourceCPU: *resource.NewMilliQuantity(1, resource.DecimalSI)},
|
||||||
|
&v1.ResourceList{v1.ResourceCPU: *resource.NewMilliQuantity(1, resource.DecimalSI)},
|
||||||
|
),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "not allow pod if the total requested resources do exceed the node's allocatable resources",
|
||||||
|
enableSidecarContainers: true,
|
||||||
|
pod: newPodWithRestartableInitContainers(
|
||||||
|
&v1.ResourceList{v1.ResourceCPU: *resource.NewMilliQuantity(1, resource.DecimalSI)},
|
||||||
|
&v1.ResourceList{v1.ResourceCPU: *resource.NewMilliQuantity(2, resource.DecimalSI)},
|
||||||
|
),
|
||||||
|
wantFilterStatus: framework.NewStatus(framework.Unschedulable, "Insufficient cpu"),
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -724,7 +752,7 @@ func TestRestartableInitContainers(t *testing.T) {
|
|||||||
_, ctx := ktesting.NewTestContext(t)
|
_, ctx := ktesting.NewTestContext(t)
|
||||||
ctx, cancel := context.WithCancel(ctx)
|
ctx, cancel := context.WithCancel(ctx)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
node := v1.Node{Status: v1.NodeStatus{Capacity: v1.ResourceList{}, Allocatable: makeAllocatableResources(0, 0, 1, 0, 0, 0)}}
|
node := v1.Node{Status: v1.NodeStatus{Capacity: v1.ResourceList{}, Allocatable: makeAllocatableResources(2, 0, 1, 0, 0, 0)}}
|
||||||
nodeInfo := framework.NewNodeInfo()
|
nodeInfo := framework.NewNodeInfo()
|
||||||
nodeInfo.SetNode(&node)
|
nodeInfo.SetNode(&node)
|
||||||
|
|
||||||
@ -735,15 +763,15 @@ func TestRestartableInitContainers(t *testing.T) {
|
|||||||
cycleState := framework.NewCycleState()
|
cycleState := framework.NewCycleState()
|
||||||
_, preFilterStatus := p.(framework.PreFilterPlugin).PreFilter(ctx, cycleState, test.pod)
|
_, preFilterStatus := p.(framework.PreFilterPlugin).PreFilter(ctx, cycleState, test.pod)
|
||||||
if diff := cmp.Diff(test.wantPreFilterStatus, preFilterStatus); diff != "" {
|
if diff := cmp.Diff(test.wantPreFilterStatus, preFilterStatus); diff != "" {
|
||||||
t.Error("status does not match (-expected +actual):\n", diff)
|
t.Error("prefilter status does not match (-expected +actual):\n", diff)
|
||||||
}
|
}
|
||||||
if !preFilterStatus.IsSuccess() {
|
if !preFilterStatus.IsSuccess() {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
filterStatus := p.(framework.FilterPlugin).Filter(ctx, cycleState, test.pod, nodeInfo)
|
filterStatus := p.(framework.FilterPlugin).Filter(ctx, cycleState, test.pod, nodeInfo)
|
||||||
if !filterStatus.IsSuccess() {
|
if diff := cmp.Diff(test.wantFilterStatus, filterStatus); diff != "" {
|
||||||
t.Error("status does not match (-expected +actual):\n- Success\n +\n", filterStatus.Code())
|
t.Error("filter status does not match (-expected +actual):\n", diff)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@ -932,6 +960,52 @@ func TestFitScore(t *testing.T) {
|
|||||||
},
|
},
|
||||||
runPreScore: false,
|
runPreScore: false,
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
name: "test case for ScoringStrategy MostAllocated with sidecar container",
|
||||||
|
requestedPod: st.MakePod().
|
||||||
|
Req(map[v1.ResourceName]string{"cpu": "1000", "memory": "2000"}).
|
||||||
|
Obj(),
|
||||||
|
nodes: []*v1.Node{
|
||||||
|
st.MakeNode().Name("node1").Capacity(map[v1.ResourceName]string{"cpu": "4000", "memory": "10000"}).Obj(),
|
||||||
|
st.MakeNode().Name("node2").Capacity(map[v1.ResourceName]string{"cpu": "4000", "memory": "10000"}).Obj(),
|
||||||
|
},
|
||||||
|
existingPods: []*v1.Pod{
|
||||||
|
st.MakePod().Node("node1").Req(map[v1.ResourceName]string{"cpu": "1000", "memory": "2000"}).
|
||||||
|
SidecarReq(map[v1.ResourceName]string{"cpu": "1000", "memory": "2000"}).Obj(),
|
||||||
|
st.MakePod().Node("node2").Req(map[v1.ResourceName]string{"cpu": "1000", "memory": "2000"}).Obj(),
|
||||||
|
},
|
||||||
|
expectedPriorities: []framework.NodeScore{{Name: "node1", Score: 67}, {Name: "node2", Score: 45}},
|
||||||
|
nodeResourcesFitArgs: config.NodeResourcesFitArgs{
|
||||||
|
ScoringStrategy: &config.ScoringStrategy{
|
||||||
|
Type: config.MostAllocated,
|
||||||
|
Resources: defaultResources,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
runPreScore: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "test case for ScoringStrategy LeastAllocated with sidecar container",
|
||||||
|
requestedPod: st.MakePod().
|
||||||
|
Req(map[v1.ResourceName]string{"cpu": "1000", "memory": "2000"}).
|
||||||
|
Obj(),
|
||||||
|
nodes: []*v1.Node{
|
||||||
|
st.MakeNode().Name("node1").Capacity(map[v1.ResourceName]string{"cpu": "4000", "memory": "10000"}).Obj(),
|
||||||
|
st.MakeNode().Name("node2").Capacity(map[v1.ResourceName]string{"cpu": "4000", "memory": "10000"}).Obj(),
|
||||||
|
},
|
||||||
|
existingPods: []*v1.Pod{
|
||||||
|
st.MakePod().Node("node1").Req(map[v1.ResourceName]string{"cpu": "1000", "memory": "2000"}).
|
||||||
|
SidecarReq(map[v1.ResourceName]string{"cpu": "1000", "memory": "2000"}).Obj(),
|
||||||
|
st.MakePod().Node("node2").Req(map[v1.ResourceName]string{"cpu": "1000", "memory": "2000"}).Obj(),
|
||||||
|
},
|
||||||
|
expectedPriorities: []framework.NodeScore{{Name: "node1", Score: 32}, {Name: "node2", Score: 55}},
|
||||||
|
nodeResourcesFitArgs: config.NodeResourcesFitArgs{
|
||||||
|
ScoringStrategy: &config.ScoringStrategy{
|
||||||
|
Type: config.LeastAllocated,
|
||||||
|
Resources: defaultResources,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
runPreScore: true,
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, test := range tests {
|
for _, test := range tests {
|
||||||
|
@ -224,6 +224,12 @@ func (c *ContainerWrapper) ResourceLimits(limMap map[v1.ResourceName]string) *Co
|
|||||||
return c
|
return c
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// RestartPolicy sets the container's restartPolicy to the given restartPolicy.
|
||||||
|
func (c *ContainerWrapper) RestartPolicy(restartPolicy v1.ContainerRestartPolicy) *ContainerWrapper {
|
||||||
|
c.Container.RestartPolicy = &restartPolicy
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
|
||||||
// PodWrapper wraps a Pod inside.
|
// PodWrapper wraps a Pod inside.
|
||||||
type PodWrapper struct{ v1.Pod }
|
type PodWrapper struct{ v1.Pod }
|
||||||
|
|
||||||
@ -701,6 +707,17 @@ func (p *PodWrapper) InitReq(resMap map[v1.ResourceName]string) *PodWrapper {
|
|||||||
return p
|
return p
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// SidecarReq adds a new sidecar container to the inner pod with given resource map.
|
||||||
|
func (p *PodWrapper) SidecarReq(resMap map[v1.ResourceName]string) *PodWrapper {
|
||||||
|
if len(resMap) == 0 {
|
||||||
|
return p
|
||||||
|
}
|
||||||
|
|
||||||
|
name := fmt.Sprintf("sidecar-con%d", len(p.Spec.InitContainers))
|
||||||
|
p.Spec.InitContainers = append(p.Spec.InitContainers, MakeContainer().Name(name).Image(imageutils.GetPauseImageName()).RestartPolicy(v1.ContainerRestartPolicyAlways).Resources(resMap).Obj())
|
||||||
|
return p
|
||||||
|
}
|
||||||
|
|
||||||
// PreemptionPolicy sets the give preemption policy to the inner pod.
|
// PreemptionPolicy sets the give preemption policy to the inner pod.
|
||||||
func (p *PodWrapper) PreemptionPolicy(policy v1.PreemptionPolicy) *PodWrapper {
|
func (p *PodWrapper) PreemptionPolicy(policy v1.PreemptionPolicy) *PodWrapper {
|
||||||
p.Spec.PreemptionPolicy = &policy
|
p.Spec.PreemptionPolicy = &policy
|
||||||
|
@ -108,20 +108,17 @@ func initTestSchedulerForPriorityTest(t *testing.T, preScorePluginName, scorePlu
|
|||||||
return testCtx
|
return testCtx
|
||||||
}
|
}
|
||||||
|
|
||||||
func initTestSchedulerForNodeResourcesTest(t *testing.T) *testutils.TestContext {
|
func initTestSchedulerForNodeResourcesTest(t *testing.T, strategy configv1.ScoringStrategyType) *testutils.TestContext {
|
||||||
cfg := configtesting.V1ToInternalWithDefaults(t, configv1.KubeSchedulerConfiguration{
|
cfg := configtesting.V1ToInternalWithDefaults(t, configv1.KubeSchedulerConfiguration{
|
||||||
Profiles: []configv1.KubeSchedulerProfile{
|
Profiles: []configv1.KubeSchedulerProfile{
|
||||||
{
|
{
|
||||||
SchedulerName: pointer.String(v1.DefaultSchedulerName),
|
SchedulerName: pointer.String(v1.DefaultSchedulerName),
|
||||||
},
|
|
||||||
{
|
|
||||||
SchedulerName: pointer.String("gpu-binpacking-scheduler"),
|
|
||||||
PluginConfig: []configv1.PluginConfig{
|
PluginConfig: []configv1.PluginConfig{
|
||||||
{
|
{
|
||||||
Name: noderesources.Name,
|
Name: noderesources.Name,
|
||||||
Args: runtime.RawExtension{Object: &configv1.NodeResourcesFitArgs{
|
Args: runtime.RawExtension{Object: &configv1.NodeResourcesFitArgs{
|
||||||
ScoringStrategy: &configv1.ScoringStrategy{
|
ScoringStrategy: &configv1.ScoringStrategy{
|
||||||
Type: configv1.MostAllocated,
|
Type: strategy,
|
||||||
Resources: []configv1.ResourceSpec{
|
Resources: []configv1.ResourceSpec{
|
||||||
{Name: string(v1.ResourceCPU), Weight: 1},
|
{Name: string(v1.ResourceCPU), Weight: 1},
|
||||||
{Name: string(v1.ResourceMemory), Weight: 1},
|
{Name: string(v1.ResourceMemory), Weight: 1},
|
||||||
@ -147,63 +144,220 @@ func initTestSchedulerForNodeResourcesTest(t *testing.T) *testutils.TestContext
|
|||||||
// TestNodeResourcesScoring verifies that scheduler's node resources priority function
|
// TestNodeResourcesScoring verifies that scheduler's node resources priority function
|
||||||
// works correctly.
|
// works correctly.
|
||||||
func TestNodeResourcesScoring(t *testing.T) {
|
func TestNodeResourcesScoring(t *testing.T) {
|
||||||
testCtx := initTestSchedulerForNodeResourcesTest(t)
|
tests := []struct {
|
||||||
// Add a few nodes.
|
name string
|
||||||
_, err := createAndWaitForNodesInCache(testCtx, "testnode", st.MakeNode().Capacity(
|
pod func(testCtx *testutils.TestContext) *v1.Pod
|
||||||
map[v1.ResourceName]string{
|
existingPods func(testCtx *testutils.TestContext) []*v1.Pod
|
||||||
v1.ResourceCPU: "8",
|
nodes []*v1.Node
|
||||||
v1.ResourceMemory: "16G",
|
strategy configv1.ScoringStrategyType
|
||||||
resourceGPU: "4",
|
// expectedNodeName is the list of node names. The pod should be scheduled on either of them.
|
||||||
}), 2)
|
expectedNodeName []string
|
||||||
if err != nil {
|
}{
|
||||||
t.Fatal(err)
|
{
|
||||||
}
|
name: "with least allocated strategy, pod scheduled to node with more allocatable resources",
|
||||||
cpuBoundPod1, err := runPausePod(testCtx.ClientSet, st.MakePod().Namespace(testCtx.NS.Name).Name("cpubound1").Res(
|
pod: func(testCtx *testutils.TestContext) *v1.Pod {
|
||||||
map[v1.ResourceName]string{
|
return st.MakePod().Namespace(testCtx.NS.Name).Name("pod").
|
||||||
v1.ResourceCPU: "2",
|
Res(map[v1.ResourceName]string{
|
||||||
v1.ResourceMemory: "4G",
|
v1.ResourceCPU: "2",
|
||||||
resourceGPU: "1",
|
v1.ResourceMemory: "4G",
|
||||||
|
resourceGPU: "1",
|
||||||
|
}).Obj()
|
||||||
|
},
|
||||||
|
existingPods: func(testCtx *testutils.TestContext) []*v1.Pod {
|
||||||
|
return []*v1.Pod{
|
||||||
|
st.MakePod().Namespace(testCtx.NS.Name).Name("existing-pod").Node("node-1").
|
||||||
|
Res(map[v1.ResourceName]string{
|
||||||
|
v1.ResourceCPU: "2",
|
||||||
|
v1.ResourceMemory: "4G",
|
||||||
|
resourceGPU: "1",
|
||||||
|
}).Obj(),
|
||||||
|
}
|
||||||
|
},
|
||||||
|
nodes: []*v1.Node{
|
||||||
|
st.MakeNode().Name("node-1").Capacity(
|
||||||
|
map[v1.ResourceName]string{
|
||||||
|
v1.ResourceCPU: "8",
|
||||||
|
v1.ResourceMemory: "16G",
|
||||||
|
resourceGPU: "4",
|
||||||
|
}).Obj(),
|
||||||
|
st.MakeNode().Name("node-2").Capacity(
|
||||||
|
map[v1.ResourceName]string{
|
||||||
|
v1.ResourceCPU: "8",
|
||||||
|
v1.ResourceMemory: "16G",
|
||||||
|
resourceGPU: "4",
|
||||||
|
}).Obj(),
|
||||||
|
},
|
||||||
|
strategy: configv1.LeastAllocated,
|
||||||
|
expectedNodeName: []string{"node-2"},
|
||||||
},
|
},
|
||||||
).Obj())
|
{
|
||||||
if err != nil {
|
name: "with most allocated strategy, pod scheduled to node with less allocatable resources",
|
||||||
t.Fatal(err)
|
pod: func(testCtx *testutils.TestContext) *v1.Pod {
|
||||||
}
|
return st.MakePod().Namespace(testCtx.NS.Name).Name("pod").
|
||||||
gpuBoundPod1, err := runPausePod(testCtx.ClientSet, st.MakePod().Namespace(testCtx.NS.Name).Name("gpubound1").Res(
|
Res(map[v1.ResourceName]string{
|
||||||
map[v1.ResourceName]string{
|
v1.ResourceCPU: "2",
|
||||||
v1.ResourceCPU: "1",
|
v1.ResourceMemory: "4G",
|
||||||
v1.ResourceMemory: "2G",
|
resourceGPU: "1",
|
||||||
resourceGPU: "2",
|
}).Obj()
|
||||||
|
},
|
||||||
|
existingPods: func(testCtx *testutils.TestContext) []*v1.Pod {
|
||||||
|
return []*v1.Pod{
|
||||||
|
st.MakePod().Namespace(testCtx.NS.Name).Name("existing-pod").Node("node-1").
|
||||||
|
Res(map[v1.ResourceName]string{
|
||||||
|
v1.ResourceCPU: "2",
|
||||||
|
v1.ResourceMemory: "4G",
|
||||||
|
resourceGPU: "1",
|
||||||
|
}).Obj(),
|
||||||
|
}
|
||||||
|
},
|
||||||
|
nodes: []*v1.Node{
|
||||||
|
st.MakeNode().Name("node-1").Capacity(
|
||||||
|
map[v1.ResourceName]string{
|
||||||
|
v1.ResourceCPU: "8",
|
||||||
|
v1.ResourceMemory: "16G",
|
||||||
|
resourceGPU: "4",
|
||||||
|
}).Obj(),
|
||||||
|
st.MakeNode().Name("node-2").Capacity(
|
||||||
|
map[v1.ResourceName]string{
|
||||||
|
v1.ResourceCPU: "8",
|
||||||
|
v1.ResourceMemory: "16G",
|
||||||
|
resourceGPU: "4",
|
||||||
|
}).Obj(),
|
||||||
|
},
|
||||||
|
strategy: configv1.MostAllocated,
|
||||||
|
expectedNodeName: []string{"node-1"},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "with least allocated strategy, take existing sidecars into consideration",
|
||||||
|
pod: func(testCtx *testutils.TestContext) *v1.Pod {
|
||||||
|
return st.MakePod().Namespace(testCtx.NS.Name).Name("pod").
|
||||||
|
Res(map[v1.ResourceName]string{
|
||||||
|
v1.ResourceCPU: "2",
|
||||||
|
v1.ResourceMemory: "4G",
|
||||||
|
resourceGPU: "1",
|
||||||
|
}).Obj()
|
||||||
|
},
|
||||||
|
existingPods: func(testCtx *testutils.TestContext) []*v1.Pod {
|
||||||
|
return []*v1.Pod{
|
||||||
|
st.MakePod().Namespace(testCtx.NS.Name).Name("existing-pod-1").Node("node-1").
|
||||||
|
Res(map[v1.ResourceName]string{
|
||||||
|
v1.ResourceCPU: "2",
|
||||||
|
v1.ResourceMemory: "4G",
|
||||||
|
resourceGPU: "1",
|
||||||
|
}).
|
||||||
|
SidecarReq(map[v1.ResourceName]string{
|
||||||
|
v1.ResourceCPU: "2",
|
||||||
|
v1.ResourceMemory: "2G",
|
||||||
|
}).
|
||||||
|
Obj(),
|
||||||
|
st.MakePod().Namespace(testCtx.NS.Name).Name("existing-pod-2").Node("node-2").
|
||||||
|
Res(map[v1.ResourceName]string{
|
||||||
|
v1.ResourceCPU: "2",
|
||||||
|
v1.ResourceMemory: "4G",
|
||||||
|
resourceGPU: "1",
|
||||||
|
}).Obj(),
|
||||||
|
}
|
||||||
|
},
|
||||||
|
nodes: []*v1.Node{
|
||||||
|
st.MakeNode().Name("node-1").Capacity(
|
||||||
|
map[v1.ResourceName]string{
|
||||||
|
v1.ResourceCPU: "8",
|
||||||
|
v1.ResourceMemory: "16G",
|
||||||
|
resourceGPU: "4",
|
||||||
|
}).Obj(),
|
||||||
|
st.MakeNode().Name("node-2").Capacity(
|
||||||
|
map[v1.ResourceName]string{
|
||||||
|
v1.ResourceCPU: "8",
|
||||||
|
v1.ResourceMemory: "16G",
|
||||||
|
resourceGPU: "4",
|
||||||
|
}).Obj(),
|
||||||
|
},
|
||||||
|
strategy: configv1.LeastAllocated,
|
||||||
|
expectedNodeName: []string{"node-2"},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "with most allocated strategy, take existing sidecars into consideration",
|
||||||
|
pod: func(testCtx *testutils.TestContext) *v1.Pod {
|
||||||
|
return st.MakePod().Namespace(testCtx.NS.Name).Name("pod").
|
||||||
|
Res(map[v1.ResourceName]string{
|
||||||
|
v1.ResourceCPU: "2",
|
||||||
|
v1.ResourceMemory: "4G",
|
||||||
|
resourceGPU: "1",
|
||||||
|
}).Obj()
|
||||||
|
},
|
||||||
|
existingPods: func(testCtx *testutils.TestContext) []*v1.Pod {
|
||||||
|
return []*v1.Pod{
|
||||||
|
st.MakePod().Namespace(testCtx.NS.Name).Name("existing-pod-1").Node("node-1").
|
||||||
|
Res(map[v1.ResourceName]string{
|
||||||
|
v1.ResourceCPU: "2",
|
||||||
|
v1.ResourceMemory: "4G",
|
||||||
|
resourceGPU: "1",
|
||||||
|
}).
|
||||||
|
SidecarReq(map[v1.ResourceName]string{
|
||||||
|
v1.ResourceCPU: "2",
|
||||||
|
v1.ResourceMemory: "2G",
|
||||||
|
}).
|
||||||
|
Obj(),
|
||||||
|
st.MakePod().Namespace(testCtx.NS.Name).Name("existing-pod-2").Node("node-2").
|
||||||
|
Res(map[v1.ResourceName]string{
|
||||||
|
v1.ResourceCPU: "2",
|
||||||
|
v1.ResourceMemory: "4G",
|
||||||
|
resourceGPU: "1",
|
||||||
|
}).Obj(),
|
||||||
|
}
|
||||||
|
},
|
||||||
|
nodes: []*v1.Node{
|
||||||
|
st.MakeNode().Name("node-1").Capacity(
|
||||||
|
map[v1.ResourceName]string{
|
||||||
|
v1.ResourceCPU: "8",
|
||||||
|
v1.ResourceMemory: "16G",
|
||||||
|
resourceGPU: "4",
|
||||||
|
}).Obj(),
|
||||||
|
st.MakeNode().Name("node-2").Capacity(
|
||||||
|
map[v1.ResourceName]string{
|
||||||
|
v1.ResourceCPU: "8",
|
||||||
|
v1.ResourceMemory: "16G",
|
||||||
|
resourceGPU: "4",
|
||||||
|
}).Obj(),
|
||||||
|
},
|
||||||
|
strategy: configv1.MostAllocated,
|
||||||
|
expectedNodeName: []string{"node-1"},
|
||||||
},
|
},
|
||||||
).Obj())
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
if cpuBoundPod1.Spec.NodeName == "" || gpuBoundPod1.Spec.NodeName == "" {
|
|
||||||
t.Fatalf("pods should have nodeName assigned, got %q and %q",
|
|
||||||
cpuBoundPod1.Spec.NodeName, gpuBoundPod1.Spec.NodeName)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Since both pods used the default scheduler, then they should land on two different
|
for _, tt := range tests {
|
||||||
// nodes because the default configuration uses LeastAllocated.
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
if cpuBoundPod1.Spec.NodeName == gpuBoundPod1.Spec.NodeName {
|
featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.SidecarContainers, true)
|
||||||
t.Fatalf("pods should have landed on different nodes, both scheduled on %q",
|
testCtx := initTestSchedulerForNodeResourcesTest(t, tt.strategy)
|
||||||
cpuBoundPod1.Spec.NodeName)
|
|
||||||
}
|
|
||||||
|
|
||||||
// The following pod is using the gpu-binpacking-scheduler profile, which gives a higher weight to
|
for _, n := range tt.nodes {
|
||||||
// GPU-based binpacking, and so it should land on the node with higher GPU utilization.
|
if _, err := createNode(testCtx.ClientSet, n); err != nil {
|
||||||
cpuBoundPod2, err := runPausePod(testCtx.ClientSet, st.MakePod().Namespace(testCtx.NS.Name).Name("cpubound2").SchedulerName("gpu-binpacking-scheduler").Res(
|
t.Fatalf("failed to create node: %v", err)
|
||||||
map[v1.ResourceName]string{
|
}
|
||||||
v1.ResourceCPU: "2",
|
}
|
||||||
v1.ResourceMemory: "4G",
|
|
||||||
resourceGPU: "1",
|
if err := testutils.WaitForNodesInCache(testCtx.Ctx, testCtx.Scheduler, len(tt.nodes)); err != nil {
|
||||||
},
|
t.Fatalf("failed to wait for nodes in cache: %v", err)
|
||||||
).Obj())
|
}
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
if tt.existingPods != nil {
|
||||||
}
|
for _, p := range tt.existingPods(testCtx) {
|
||||||
if cpuBoundPod2.Spec.NodeName != gpuBoundPod1.Spec.NodeName {
|
if _, err := runPausePod(testCtx.ClientSet, p); err != nil {
|
||||||
t.Errorf("pods should have landed on the same node")
|
t.Fatalf("failed to create existing pod: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pod, err := runPausePod(testCtx.ClientSet, tt.pod(testCtx))
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Error running pause pod: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
err = wait.PollUntilContextTimeout(testCtx.Ctx, pollInterval, wait.ForeverTestTimeout, false, podScheduledIn(testCtx.ClientSet, pod.Namespace, pod.Name, tt.expectedNodeName))
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Error while trying to wait for a pod to be scheduled: %v", err)
|
||||||
|
}
|
||||||
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user