mirror of
https://github.com/k3s-io/kubernetes.git
synced 2026-01-04 23:17:50 +00:00
Fix weakness of current receivedMoveRequest
- add incremental scheduling cycle - instead of set a flag on move reqeust, we cache current scheduling cycle in moveRequestCycle - when unschedulable pods are added back, compare its cycle with moveRequestCycle to decide whether it should be added into active queue or not
This commit is contained in:
@@ -179,9 +179,9 @@ func TestPriorityQueue_AddIfNotPresent(t *testing.T) {
|
||||
func TestPriorityQueue_AddUnschedulableIfNotPresent(t *testing.T) {
|
||||
q := NewPriorityQueue(nil)
|
||||
q.Add(&highPriNominatedPod)
|
||||
q.AddUnschedulableIfNotPresent(&highPriNominatedPod) // Must not add anything.
|
||||
q.AddUnschedulableIfNotPresent(&medPriorityPod) // This should go to activeQ.
|
||||
q.AddUnschedulableIfNotPresent(&unschedulablePod)
|
||||
q.AddUnschedulableIfNotPresent(&highPriNominatedPod, q.SchedulingCycle()) // Must not add anything.
|
||||
q.AddUnschedulableIfNotPresent(&medPriorityPod, q.SchedulingCycle()) // This should go to activeQ.
|
||||
q.AddUnschedulableIfNotPresent(&unschedulablePod, q.SchedulingCycle())
|
||||
expectedNominatedPods := &nominatedPodMap{
|
||||
nominatedPodToNode: map[types.UID]string{
|
||||
medPriorityPod.UID: "node1",
|
||||
@@ -209,6 +209,78 @@ func TestPriorityQueue_AddUnschedulableIfNotPresent(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
// TestPriorityQueue_AddUnschedulableIfNotPresent_Async tests scenario when
|
||||
// AddUnschedulableIfNotPresent is called asynchronously pods in and before
|
||||
// current scheduling cycle will be put back to activeQueue if we were trying
|
||||
// to schedule them when we received move request.
|
||||
func TestPriorityQueue_AddUnschedulableIfNotPresent_Async(t *testing.T) {
|
||||
q := NewPriorityQueue(nil)
|
||||
totalNum := 10
|
||||
expectedPods := make([]v1.Pod, 0, totalNum)
|
||||
for i := 0; i < totalNum; i++ {
|
||||
priority := int32(i)
|
||||
p := v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: fmt.Sprintf("pod%d", i),
|
||||
Namespace: fmt.Sprintf("ns%d", i),
|
||||
UID: types.UID(fmt.Sprintf("upns%d", i)),
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Priority: &priority,
|
||||
},
|
||||
}
|
||||
expectedPods = append(expectedPods, p)
|
||||
// priority is to make pods ordered in the PriorityQueue
|
||||
q.Add(&p)
|
||||
}
|
||||
|
||||
// Pop all pods except for the first one
|
||||
for i := totalNum - 1; i > 0; i-- {
|
||||
p, _ := q.Pop()
|
||||
if !reflect.DeepEqual(&expectedPods[i], p) {
|
||||
t.Errorf("Unexpected pod. Expected: %v, got: %v", &expectedPods[i], p)
|
||||
}
|
||||
}
|
||||
|
||||
// move all pods to active queue when we were trying to schedule them
|
||||
q.MoveAllToActiveQueue()
|
||||
moveReqChan := make(chan struct{})
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(totalNum - 1)
|
||||
// mark pods[1] ~ pods[totalNum-1] as unschedulable, fire goroutines to add them back later
|
||||
for i := 1; i < totalNum; i++ {
|
||||
unschedulablePod := expectedPods[i].DeepCopy()
|
||||
unschedulablePod.Status = v1.PodStatus{
|
||||
Conditions: []v1.PodCondition{
|
||||
{
|
||||
Type: v1.PodScheduled,
|
||||
Status: v1.ConditionFalse,
|
||||
Reason: v1.PodReasonUnschedulable,
|
||||
},
|
||||
},
|
||||
}
|
||||
cycle := q.SchedulingCycle()
|
||||
go func() {
|
||||
<-moveReqChan
|
||||
q.AddUnschedulableIfNotPresent(unschedulablePod, cycle)
|
||||
wg.Done()
|
||||
}()
|
||||
}
|
||||
firstPod, _ := q.Pop()
|
||||
if !reflect.DeepEqual(&expectedPods[0], firstPod) {
|
||||
t.Errorf("Unexpected pod. Expected: %v, got: %v", &expectedPods[0], firstPod)
|
||||
}
|
||||
// close moveReqChan here to make sure q.AddUnschedulableIfNotPresent is called after another pod is popped
|
||||
close(moveReqChan)
|
||||
wg.Wait()
|
||||
// all other pods should be in active queue again
|
||||
for i := 1; i < totalNum; i++ {
|
||||
if _, exists, _ := q.activeQ.Get(&expectedPods[i]); !exists {
|
||||
t.Errorf("Expected %v to be added to activeQ.", expectedPods[i].Name)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestPriorityQueue_Pop(t *testing.T) {
|
||||
q := NewPriorityQueue(nil)
|
||||
wg := sync.WaitGroup{}
|
||||
@@ -680,7 +752,7 @@ func TestRecentlyTriedPodsGoBack(t *testing.T) {
|
||||
LastProbeTime: metav1.Now(),
|
||||
})
|
||||
// Put in the unschedulable queue.
|
||||
q.AddUnschedulableIfNotPresent(p1)
|
||||
q.AddUnschedulableIfNotPresent(p1, q.SchedulingCycle())
|
||||
// Move all unschedulable pods to the active queue.
|
||||
q.MoveAllToActiveQueue()
|
||||
// Simulation is over. Now let's pop all pods. The pod popped first should be
|
||||
@@ -728,7 +800,7 @@ func TestPodFailedSchedulingMultipleTimesDoesNotBlockNewerPod(t *testing.T) {
|
||||
LastProbeTime: metav1.Now(),
|
||||
})
|
||||
// Put in the unschedulable queue
|
||||
q.AddUnschedulableIfNotPresent(&unschedulablePod)
|
||||
q.AddUnschedulableIfNotPresent(&unschedulablePod, q.SchedulingCycle())
|
||||
// Clear its backoff to simulate backoff its expiration
|
||||
q.clearPodBackoff(&unschedulablePod)
|
||||
// Move all unschedulable pods to the active queue.
|
||||
@@ -771,7 +843,7 @@ func TestPodFailedSchedulingMultipleTimesDoesNotBlockNewerPod(t *testing.T) {
|
||||
LastProbeTime: metav1.Now(),
|
||||
})
|
||||
// And then, put unschedulable pod to the unschedulable queue
|
||||
q.AddUnschedulableIfNotPresent(&unschedulablePod)
|
||||
q.AddUnschedulableIfNotPresent(&unschedulablePod, q.SchedulingCycle())
|
||||
// Clear its backoff to simulate its backoff expiration
|
||||
q.clearPodBackoff(&unschedulablePod)
|
||||
// Move all unschedulable pods to the active queue.
|
||||
@@ -838,7 +910,7 @@ func TestHighProirotyBackoff(t *testing.T) {
|
||||
Message: "fake scheduling failure",
|
||||
})
|
||||
// Put in the unschedulable queue.
|
||||
q.AddUnschedulableIfNotPresent(p)
|
||||
q.AddUnschedulableIfNotPresent(p, q.SchedulingCycle())
|
||||
// Move all unschedulable pods to the active queue.
|
||||
q.MoveAllToActiveQueue()
|
||||
|
||||
|
||||
Reference in New Issue
Block a user