mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-29 06:27:05 +00:00
Add test to verify preempt ignore
This commit is contained in:
parent
7f04129736
commit
083684d771
@ -146,7 +146,10 @@ func (f *FakeExtender) ProcessPreemption(
|
||||
|
||||
for node, victims := range nodeToVictimsCopy {
|
||||
// Try to do preemption on extender side.
|
||||
extenderVictimPods, extendernPDBViolations, fits := f.selectVictimsOnNodeByExtender(pod, node, nodeNameToInfo)
|
||||
extenderVictimPods, extendernPDBViolations, fits, err := f.selectVictimsOnNodeByExtender(pod, node, nodeNameToInfo)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// If it's unfit after extender's preemption, this node is unresolvable by preemption overall,
|
||||
// let's remove it from potential preemption nodes.
|
||||
if !fits {
|
||||
@ -169,15 +172,18 @@ func (f *FakeExtender) selectVictimsOnNodeByExtender(
|
||||
pod *v1.Pod,
|
||||
node *v1.Node,
|
||||
nodeNameToInfo map[string]*schedulercache.NodeInfo,
|
||||
) ([]*v1.Pod, int, bool) {
|
||||
// TODO(harry): add more test in generic_scheduler_test.go to verify this logic.
|
||||
) ([]*v1.Pod, int, bool, error) {
|
||||
// If a extender support preemption but have no cached node info, let's run filter to make sure
|
||||
// default scheduler's decision still stand with given pod and node.
|
||||
if !f.nodeCacheCapable {
|
||||
if fits, _ := f.runPredicate(pod, node); !fits {
|
||||
return nil, 0, false
|
||||
fits, err := f.runPredicate(pod, node)
|
||||
if err != nil {
|
||||
return nil, 0, false, err
|
||||
}
|
||||
return []*v1.Pod{}, 0, true
|
||||
if !fits {
|
||||
return nil, 0, false, nil
|
||||
}
|
||||
return []*v1.Pod{}, 0, true, nil
|
||||
}
|
||||
|
||||
// Otherwise, as a extender support preemption and have cached node info, we will assume cachedNodeNameToInfo is available
|
||||
@ -205,8 +211,12 @@ func (f *FakeExtender) selectVictimsOnNodeByExtender(
|
||||
|
||||
// If the new pod does not fit after removing all the lower priority pods,
|
||||
// we are almost done and this node is not suitable for preemption.
|
||||
if fits, _ := f.runPredicate(pod, nodeInfoCopy.Node()); !fits {
|
||||
return nil, 0, false
|
||||
fits, err := f.runPredicate(pod, nodeInfoCopy.Node())
|
||||
if err != nil {
|
||||
return nil, 0, false, err
|
||||
}
|
||||
if !fits {
|
||||
return nil, 0, false, nil
|
||||
}
|
||||
|
||||
var victims []*v1.Pod
|
||||
@ -230,7 +240,7 @@ func (f *FakeExtender) selectVictimsOnNodeByExtender(
|
||||
reprievePod(p.(*v1.Pod))
|
||||
}
|
||||
|
||||
return victims, numViolatingVictim, true
|
||||
return victims, numViolatingVictim, true, nil
|
||||
}
|
||||
|
||||
// runPredicate run predicates of extender one by one for given pod and node.
|
||||
@ -444,7 +454,7 @@ func TestGenericSchedulerWithExtenders(t *testing.T) {
|
||||
// Filter/Prioritize phases if the extender is not interested in
|
||||
// the pod.
|
||||
//
|
||||
// If scheduler sends the pod by mistake, the test will fail
|
||||
// If scheduler sends the pod by mistake, the test would fail
|
||||
// because of the errors from errorPredicateExtender and/or
|
||||
// errorPrioritizerExtender.
|
||||
predicates: map[string]algorithm.FitPredicate{"true": truePredicate},
|
||||
@ -465,7 +475,7 @@ func TestGenericSchedulerWithExtenders(t *testing.T) {
|
||||
// Scheduling is expected to not fail in
|
||||
// Filter/Prioritize phases if the extender is not available and ignorable.
|
||||
//
|
||||
// If scheduler did not ignore the extender, the test will fail
|
||||
// If scheduler did not ignore the extender, the test would fail
|
||||
// because of the errors from errorPredicateExtender.
|
||||
predicates: map[string]algorithm.FitPredicate{"true": truePredicate},
|
||||
prioritizers: []algorithm.PriorityConfig{{Map: EqualPriorityMap, Weight: 1}},
|
||||
|
@ -282,6 +282,7 @@ func (g *genericScheduler) processPreemptionWithExtenders(
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Replace nodeToVictims with new result after preemption. So the
|
||||
// rest of extenders can continue use it as parameter.
|
||||
nodeToVictims = newNodeToVictims
|
||||
|
@ -1274,6 +1274,30 @@ func TestPreempt(t *testing.T) {
|
||||
expectedNode: "",
|
||||
expectedPods: []string{},
|
||||
},
|
||||
{
|
||||
name: "One scheduler extender allows only machine1, the other returns error but ignorable. Only machine1 would be chosen",
|
||||
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "pod1", UID: types.UID("pod1")}, Spec: v1.PodSpec{
|
||||
Containers: veryLargeContainers,
|
||||
Priority: &highPriority},
|
||||
},
|
||||
pods: []*v1.Pod{
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "m1.1", UID: types.UID("m1.1")}, Spec: v1.PodSpec{Containers: smallContainers, Priority: &midPriority, NodeName: "machine1"}, Status: v1.PodStatus{Phase: v1.PodRunning}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "m1.2", UID: types.UID("m1.2")}, Spec: v1.PodSpec{Containers: smallContainers, Priority: &lowPriority, NodeName: "machine1"}, Status: v1.PodStatus{Phase: v1.PodRunning}},
|
||||
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "m2.1", UID: types.UID("m2.1")}, Spec: v1.PodSpec{Containers: largeContainers, Priority: &midPriority, NodeName: "machine2"}, Status: v1.PodStatus{Phase: v1.PodRunning}},
|
||||
},
|
||||
extenders: []*FakeExtender{
|
||||
{
|
||||
predicates: []fitPredicate{errorPredicateExtender},
|
||||
ignorable: true,
|
||||
},
|
||||
{
|
||||
predicates: []fitPredicate{machine1PredicateExtender},
|
||||
},
|
||||
},
|
||||
expectedNode: "machine1",
|
||||
expectedPods: []string{"m1.1", "m1.2"},
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
|
Loading…
Reference in New Issue
Block a user