mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-08-07 03:03:59 +00:00
make scheduling queue start before the scheduler starts and stops after the scheduler stops
This commit is contained in:
parent
aa67744438
commit
f388534082
@ -539,7 +539,7 @@ func TestGenericSchedulerWithExtenders(t *testing.T) {
|
|||||||
for _, name := range test.nodes {
|
for _, name := range test.nodes {
|
||||||
cache.AddNode(createNode(name))
|
cache.AddNode(createNode(name))
|
||||||
}
|
}
|
||||||
queue := internalqueue.NewSchedulingQueue(nil, nil)
|
queue := internalqueue.NewSchedulingQueue(nil)
|
||||||
scheduler := NewGenericScheduler(
|
scheduler := NewGenericScheduler(
|
||||||
cache,
|
cache,
|
||||||
queue,
|
queue,
|
||||||
|
@ -662,7 +662,7 @@ func TestGenericScheduler(t *testing.T) {
|
|||||||
}
|
}
|
||||||
scheduler := NewGenericScheduler(
|
scheduler := NewGenericScheduler(
|
||||||
cache,
|
cache,
|
||||||
internalqueue.NewSchedulingQueue(nil, nil),
|
internalqueue.NewSchedulingQueue(nil),
|
||||||
test.predicates,
|
test.predicates,
|
||||||
predMetaProducer,
|
predMetaProducer,
|
||||||
test.prioritizers,
|
test.prioritizers,
|
||||||
@ -702,7 +702,7 @@ func makeScheduler(predicates map[string]algorithmpredicates.FitPredicate, nodes
|
|||||||
|
|
||||||
s := NewGenericScheduler(
|
s := NewGenericScheduler(
|
||||||
cache,
|
cache,
|
||||||
internalqueue.NewSchedulingQueue(nil, nil),
|
internalqueue.NewSchedulingQueue(nil),
|
||||||
predicates,
|
predicates,
|
||||||
algorithmpredicates.EmptyMetadataProducer,
|
algorithmpredicates.EmptyMetadataProducer,
|
||||||
nil,
|
nil,
|
||||||
@ -819,7 +819,7 @@ func TestFindFitPredicateCallCounts(t *testing.T) {
|
|||||||
cache.AddNode(n)
|
cache.AddNode(n)
|
||||||
}
|
}
|
||||||
|
|
||||||
queue := internalqueue.NewSchedulingQueue(nil, nil)
|
queue := internalqueue.NewSchedulingQueue(nil)
|
||||||
scheduler := NewGenericScheduler(
|
scheduler := NewGenericScheduler(
|
||||||
cache,
|
cache,
|
||||||
queue,
|
queue,
|
||||||
@ -1412,7 +1412,7 @@ func TestSelectNodesForPreemption(t *testing.T) {
|
|||||||
filterPlugin.failedNodeReturnCodeMap = filterFailedNodeReturnCodeMap
|
filterPlugin.failedNodeReturnCodeMap = filterFailedNodeReturnCodeMap
|
||||||
scheduler := NewGenericScheduler(
|
scheduler := NewGenericScheduler(
|
||||||
nil,
|
nil,
|
||||||
internalqueue.NewSchedulingQueue(nil, nil),
|
internalqueue.NewSchedulingQueue(nil),
|
||||||
test.predicates,
|
test.predicates,
|
||||||
factory.GetPredicateMetadata,
|
factory.GetPredicateMetadata,
|
||||||
nil,
|
nil,
|
||||||
@ -2160,7 +2160,7 @@ func TestPreempt(t *testing.T) {
|
|||||||
}
|
}
|
||||||
scheduler := NewGenericScheduler(
|
scheduler := NewGenericScheduler(
|
||||||
cache,
|
cache,
|
||||||
internalqueue.NewSchedulingQueue(nil, nil),
|
internalqueue.NewSchedulingQueue(nil),
|
||||||
map[string]algorithmpredicates.FitPredicate{"matches": predicate},
|
map[string]algorithmpredicates.FitPredicate{"matches": predicate},
|
||||||
predMetaProducer,
|
predMetaProducer,
|
||||||
[]priorities.PriorityConfig{{Map: numericMapPriority, Weight: 1}},
|
[]priorities.PriorityConfig{{Map: numericMapPriority, Weight: 1}},
|
||||||
|
@ -266,7 +266,6 @@ func (c *Configurator) CreateFromKeys(predicateKeys, priorityKeys sets.String, e
|
|||||||
}
|
}
|
||||||
|
|
||||||
podQueue := internalqueue.NewSchedulingQueue(
|
podQueue := internalqueue.NewSchedulingQueue(
|
||||||
c.StopEverything,
|
|
||||||
framework,
|
framework,
|
||||||
internalqueue.WithPodInitialBackoffDuration(time.Duration(c.podInitialBackoffSeconds)*time.Second),
|
internalqueue.WithPodInitialBackoffDuration(time.Duration(c.podInitialBackoffSeconds)*time.Second),
|
||||||
internalqueue.WithPodMaxBackoffDuration(time.Duration(c.podMaxBackoffSeconds)*time.Second),
|
internalqueue.WithPodMaxBackoffDuration(time.Duration(c.podMaxBackoffSeconds)*time.Second),
|
||||||
@ -281,11 +280,6 @@ func (c *Configurator) CreateFromKeys(predicateKeys, priorityKeys sets.String, e
|
|||||||
)
|
)
|
||||||
debugger.ListenForSignal(c.StopEverything)
|
debugger.ListenForSignal(c.StopEverything)
|
||||||
|
|
||||||
go func() {
|
|
||||||
<-c.StopEverything
|
|
||||||
podQueue.Close()
|
|
||||||
}()
|
|
||||||
|
|
||||||
algo := core.NewGenericScheduler(
|
algo := core.NewGenericScheduler(
|
||||||
c.schedulerCache,
|
c.schedulerCache,
|
||||||
podQueue,
|
podQueue,
|
||||||
|
@ -319,7 +319,7 @@ func TestDefaultErrorFunc(t *testing.T) {
|
|||||||
defer close(stopCh)
|
defer close(stopCh)
|
||||||
|
|
||||||
timestamp := time.Now()
|
timestamp := time.Now()
|
||||||
queue := internalqueue.NewPriorityQueue(nil, nil, internalqueue.WithClock(clock.NewFakeClock(timestamp)))
|
queue := internalqueue.NewPriorityQueue(nil, internalqueue.WithClock(clock.NewFakeClock(timestamp)))
|
||||||
schedulerCache := internalcache.New(30*time.Second, stopCh)
|
schedulerCache := internalcache.New(30*time.Second, stopCh)
|
||||||
errFunc := MakeDefaultErrorFunc(client, queue, schedulerCache)
|
errFunc := MakeDefaultErrorFunc(client, queue, schedulerCache)
|
||||||
|
|
||||||
|
@ -97,11 +97,13 @@ type SchedulingQueue interface {
|
|||||||
DeleteNominatedPodIfExists(pod *v1.Pod)
|
DeleteNominatedPodIfExists(pod *v1.Pod)
|
||||||
// NumUnschedulablePods returns the number of unschedulable pods exist in the SchedulingQueue.
|
// NumUnschedulablePods returns the number of unschedulable pods exist in the SchedulingQueue.
|
||||||
NumUnschedulablePods() int
|
NumUnschedulablePods() int
|
||||||
|
// Run starts the goroutines managing the queue.
|
||||||
|
Run()
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewSchedulingQueue initializes a priority queue as a new scheduling queue.
|
// NewSchedulingQueue initializes a priority queue as a new scheduling queue.
|
||||||
func NewSchedulingQueue(stop <-chan struct{}, fwk framework.Framework, opts ...Option) SchedulingQueue {
|
func NewSchedulingQueue(fwk framework.Framework, opts ...Option) SchedulingQueue {
|
||||||
return NewPriorityQueue(stop, fwk, opts...)
|
return NewPriorityQueue(fwk, opts...)
|
||||||
}
|
}
|
||||||
|
|
||||||
// NominatedNodeName returns nominated node name of a Pod.
|
// NominatedNodeName returns nominated node name of a Pod.
|
||||||
@ -117,7 +119,7 @@ func NominatedNodeName(pod *v1.Pod) string {
|
|||||||
// is called unschedulableQ. The third queue holds pods that are moved from
|
// is called unschedulableQ. The third queue holds pods that are moved from
|
||||||
// unschedulable queues and will be moved to active queue when backoff are completed.
|
// unschedulable queues and will be moved to active queue when backoff are completed.
|
||||||
type PriorityQueue struct {
|
type PriorityQueue struct {
|
||||||
stop <-chan struct{}
|
stop chan struct{}
|
||||||
clock util.Clock
|
clock util.Clock
|
||||||
// podBackoff tracks backoff for pods attempting to be rescheduled
|
// podBackoff tracks backoff for pods attempting to be rescheduled
|
||||||
podBackoff *PodBackoffMap
|
podBackoff *PodBackoffMap
|
||||||
@ -209,7 +211,6 @@ func activeQComp(podInfo1, podInfo2 interface{}) bool {
|
|||||||
|
|
||||||
// NewPriorityQueue creates a PriorityQueue object.
|
// NewPriorityQueue creates a PriorityQueue object.
|
||||||
func NewPriorityQueue(
|
func NewPriorityQueue(
|
||||||
stop <-chan struct{},
|
|
||||||
fwk framework.Framework,
|
fwk framework.Framework,
|
||||||
opts ...Option,
|
opts ...Option,
|
||||||
) *PriorityQueue {
|
) *PriorityQueue {
|
||||||
@ -232,7 +233,7 @@ func NewPriorityQueue(
|
|||||||
|
|
||||||
pq := &PriorityQueue{
|
pq := &PriorityQueue{
|
||||||
clock: options.clock,
|
clock: options.clock,
|
||||||
stop: stop,
|
stop: make(chan struct{}),
|
||||||
podBackoff: NewPodBackoffMap(options.podInitialBackoffDuration, options.podMaxBackoffDuration),
|
podBackoff: NewPodBackoffMap(options.podInitialBackoffDuration, options.podMaxBackoffDuration),
|
||||||
activeQ: heap.NewWithRecorder(podInfoKeyFunc, comp, metrics.NewActivePodsRecorder()),
|
activeQ: heap.NewWithRecorder(podInfoKeyFunc, comp, metrics.NewActivePodsRecorder()),
|
||||||
unschedulableQ: newUnschedulablePodsMap(metrics.NewUnschedulablePodsRecorder()),
|
unschedulableQ: newUnschedulablePodsMap(metrics.NewUnschedulablePodsRecorder()),
|
||||||
@ -242,13 +243,11 @@ func NewPriorityQueue(
|
|||||||
pq.cond.L = &pq.lock
|
pq.cond.L = &pq.lock
|
||||||
pq.podBackoffQ = heap.NewWithRecorder(podInfoKeyFunc, pq.podsCompareBackoffCompleted, metrics.NewBackoffPodsRecorder())
|
pq.podBackoffQ = heap.NewWithRecorder(podInfoKeyFunc, pq.podsCompareBackoffCompleted, metrics.NewBackoffPodsRecorder())
|
||||||
|
|
||||||
pq.run()
|
|
||||||
|
|
||||||
return pq
|
return pq
|
||||||
}
|
}
|
||||||
|
|
||||||
// run starts the goroutine to pump from podBackoffQ to activeQ
|
// Run starts the goroutine to pump from podBackoffQ to activeQ
|
||||||
func (p *PriorityQueue) run() {
|
func (p *PriorityQueue) Run() {
|
||||||
go wait.Until(p.flushBackoffQCompleted, 1.0*time.Second, p.stop)
|
go wait.Until(p.flushBackoffQCompleted, 1.0*time.Second, p.stop)
|
||||||
go wait.Until(p.flushUnschedulableQLeftover, 30*time.Second, p.stop)
|
go wait.Until(p.flushUnschedulableQLeftover, 30*time.Second, p.stop)
|
||||||
}
|
}
|
||||||
@ -636,6 +635,7 @@ func (p *PriorityQueue) PendingPods() []*v1.Pod {
|
|||||||
func (p *PriorityQueue) Close() {
|
func (p *PriorityQueue) Close() {
|
||||||
p.lock.Lock()
|
p.lock.Lock()
|
||||||
defer p.lock.Unlock()
|
defer p.lock.Unlock()
|
||||||
|
close(p.stop)
|
||||||
p.closed = true
|
p.closed = true
|
||||||
p.cond.Broadcast()
|
p.cond.Broadcast()
|
||||||
}
|
}
|
||||||
|
@ -129,7 +129,7 @@ func getUnschedulablePod(p *PriorityQueue, pod *v1.Pod) *v1.Pod {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestPriorityQueue_Add(t *testing.T) {
|
func TestPriorityQueue_Add(t *testing.T) {
|
||||||
q := NewPriorityQueue(nil, nil)
|
q := createAndRunPriorityQueue(nil)
|
||||||
if err := q.Add(&medPriorityPod); err != nil {
|
if err := q.Add(&medPriorityPod); err != nil {
|
||||||
t.Errorf("add failed: %v", err)
|
t.Errorf("add failed: %v", err)
|
||||||
}
|
}
|
||||||
@ -259,7 +259,7 @@ func (*fakeFramework) SnapshotSharedLister() schedulerlisters.SharedLister {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestPriorityQueue_AddWithReversePriorityLessFunc(t *testing.T) {
|
func TestPriorityQueue_AddWithReversePriorityLessFunc(t *testing.T) {
|
||||||
q := NewPriorityQueue(nil, &fakeFramework{})
|
q := createAndRunPriorityQueue(&fakeFramework{})
|
||||||
if err := q.Add(&medPriorityPod); err != nil {
|
if err := q.Add(&medPriorityPod); err != nil {
|
||||||
t.Errorf("add failed: %v", err)
|
t.Errorf("add failed: %v", err)
|
||||||
}
|
}
|
||||||
@ -275,7 +275,7 @@ func TestPriorityQueue_AddWithReversePriorityLessFunc(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestPriorityQueue_AddUnschedulableIfNotPresent(t *testing.T) {
|
func TestPriorityQueue_AddUnschedulableIfNotPresent(t *testing.T) {
|
||||||
q := NewPriorityQueue(nil, nil)
|
q := createAndRunPriorityQueue(nil)
|
||||||
q.Add(&highPriNominatedPod)
|
q.Add(&highPriNominatedPod)
|
||||||
q.AddUnschedulableIfNotPresent(newPodInfoNoTimestamp(&highPriNominatedPod), q.SchedulingCycle()) // Must not add anything.
|
q.AddUnschedulableIfNotPresent(newPodInfoNoTimestamp(&highPriNominatedPod), q.SchedulingCycle()) // Must not add anything.
|
||||||
q.AddUnschedulableIfNotPresent(newPodInfoNoTimestamp(&unschedulablePod), q.SchedulingCycle())
|
q.AddUnschedulableIfNotPresent(newPodInfoNoTimestamp(&unschedulablePod), q.SchedulingCycle())
|
||||||
@ -307,7 +307,7 @@ func TestPriorityQueue_AddUnschedulableIfNotPresent(t *testing.T) {
|
|||||||
// Pods in and before current scheduling cycle will be put back to activeQueue
|
// Pods in and before current scheduling cycle will be put back to activeQueue
|
||||||
// if we were trying to schedule them when we received move request.
|
// if we were trying to schedule them when we received move request.
|
||||||
func TestPriorityQueue_AddUnschedulableIfNotPresent_Backoff(t *testing.T) {
|
func TestPriorityQueue_AddUnschedulableIfNotPresent_Backoff(t *testing.T) {
|
||||||
q := NewPriorityQueue(nil, nil, WithClock(clock.NewFakeClock(time.Now())))
|
q := createAndRunPriorityQueue(nil, WithClock(clock.NewFakeClock(time.Now())))
|
||||||
totalNum := 10
|
totalNum := 10
|
||||||
expectedPods := make([]v1.Pod, 0, totalNum)
|
expectedPods := make([]v1.Pod, 0, totalNum)
|
||||||
for i := 0; i < totalNum; i++ {
|
for i := 0; i < totalNum; i++ {
|
||||||
@ -374,7 +374,7 @@ func TestPriorityQueue_AddUnschedulableIfNotPresent_Backoff(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestPriorityQueue_Pop(t *testing.T) {
|
func TestPriorityQueue_Pop(t *testing.T) {
|
||||||
q := NewPriorityQueue(nil, nil)
|
q := createAndRunPriorityQueue(nil)
|
||||||
wg := sync.WaitGroup{}
|
wg := sync.WaitGroup{}
|
||||||
wg.Add(1)
|
wg.Add(1)
|
||||||
go func() {
|
go func() {
|
||||||
@ -391,7 +391,7 @@ func TestPriorityQueue_Pop(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestPriorityQueue_Update(t *testing.T) {
|
func TestPriorityQueue_Update(t *testing.T) {
|
||||||
q := NewPriorityQueue(nil, nil)
|
q := createAndRunPriorityQueue(nil)
|
||||||
q.Update(nil, &highPriorityPod)
|
q.Update(nil, &highPriorityPod)
|
||||||
if _, exists, _ := q.activeQ.Get(newPodInfoNoTimestamp(&highPriorityPod)); !exists {
|
if _, exists, _ := q.activeQ.Get(newPodInfoNoTimestamp(&highPriorityPod)); !exists {
|
||||||
t.Errorf("Expected %v to be added to activeQ.", highPriorityPod.Name)
|
t.Errorf("Expected %v to be added to activeQ.", highPriorityPod.Name)
|
||||||
@ -427,7 +427,7 @@ func TestPriorityQueue_Update(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestPriorityQueue_Delete(t *testing.T) {
|
func TestPriorityQueue_Delete(t *testing.T) {
|
||||||
q := NewPriorityQueue(nil, nil)
|
q := createAndRunPriorityQueue(nil)
|
||||||
q.Update(&highPriorityPod, &highPriNominatedPod)
|
q.Update(&highPriorityPod, &highPriNominatedPod)
|
||||||
q.Add(&unschedulablePod)
|
q.Add(&unschedulablePod)
|
||||||
if err := q.Delete(&highPriNominatedPod); err != nil {
|
if err := q.Delete(&highPriNominatedPod); err != nil {
|
||||||
@ -451,7 +451,7 @@ func TestPriorityQueue_Delete(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestPriorityQueue_MoveAllToActiveQueue(t *testing.T) {
|
func TestPriorityQueue_MoveAllToActiveQueue(t *testing.T) {
|
||||||
q := NewPriorityQueue(nil, nil)
|
q := createAndRunPriorityQueue(nil)
|
||||||
q.Add(&medPriorityPod)
|
q.Add(&medPriorityPod)
|
||||||
addOrUpdateUnschedulablePod(q, q.newPodInfo(&unschedulablePod))
|
addOrUpdateUnschedulablePod(q, q.newPodInfo(&unschedulablePod))
|
||||||
addOrUpdateUnschedulablePod(q, q.newPodInfo(&highPriorityPod))
|
addOrUpdateUnschedulablePod(q, q.newPodInfo(&highPriorityPod))
|
||||||
@ -497,7 +497,7 @@ func TestPriorityQueue_AssignedPodAdded(t *testing.T) {
|
|||||||
Spec: v1.PodSpec{NodeName: "machine1"},
|
Spec: v1.PodSpec{NodeName: "machine1"},
|
||||||
}
|
}
|
||||||
|
|
||||||
q := NewPriorityQueue(nil, nil)
|
q := createAndRunPriorityQueue(nil)
|
||||||
q.Add(&medPriorityPod)
|
q.Add(&medPriorityPod)
|
||||||
// Add a couple of pods to the unschedulableQ.
|
// Add a couple of pods to the unschedulableQ.
|
||||||
addOrUpdateUnschedulablePod(q, q.newPodInfo(&unschedulablePod))
|
addOrUpdateUnschedulablePod(q, q.newPodInfo(&unschedulablePod))
|
||||||
@ -518,7 +518,7 @@ func TestPriorityQueue_AssignedPodAdded(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestPriorityQueue_NominatedPodsForNode(t *testing.T) {
|
func TestPriorityQueue_NominatedPodsForNode(t *testing.T) {
|
||||||
q := NewPriorityQueue(nil, nil)
|
q := createAndRunPriorityQueue(nil)
|
||||||
q.Add(&medPriorityPod)
|
q.Add(&medPriorityPod)
|
||||||
q.Add(&unschedulablePod)
|
q.Add(&unschedulablePod)
|
||||||
q.Add(&highPriorityPod)
|
q.Add(&highPriorityPod)
|
||||||
@ -543,7 +543,7 @@ func TestPriorityQueue_PendingPods(t *testing.T) {
|
|||||||
return pendingSet
|
return pendingSet
|
||||||
}
|
}
|
||||||
|
|
||||||
q := NewPriorityQueue(nil, nil)
|
q := createAndRunPriorityQueue(nil)
|
||||||
q.Add(&medPriorityPod)
|
q.Add(&medPriorityPod)
|
||||||
addOrUpdateUnschedulablePod(q, q.newPodInfo(&unschedulablePod))
|
addOrUpdateUnschedulablePod(q, q.newPodInfo(&unschedulablePod))
|
||||||
addOrUpdateUnschedulablePod(q, q.newPodInfo(&highPriorityPod))
|
addOrUpdateUnschedulablePod(q, q.newPodInfo(&highPriorityPod))
|
||||||
@ -559,7 +559,7 @@ func TestPriorityQueue_PendingPods(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestPriorityQueue_UpdateNominatedPodForNode(t *testing.T) {
|
func TestPriorityQueue_UpdateNominatedPodForNode(t *testing.T) {
|
||||||
q := NewPriorityQueue(nil, nil)
|
q := createAndRunPriorityQueue(nil)
|
||||||
if err := q.Add(&medPriorityPod); err != nil {
|
if err := q.Add(&medPriorityPod); err != nil {
|
||||||
t.Errorf("add failed: %v", err)
|
t.Errorf("add failed: %v", err)
|
||||||
}
|
}
|
||||||
@ -628,8 +628,7 @@ func TestPriorityQueue_UpdateNominatedPodForNode(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestPriorityQueue_NewWithOptions(t *testing.T) {
|
func TestPriorityQueue_NewWithOptions(t *testing.T) {
|
||||||
q := NewPriorityQueue(
|
q := createAndRunPriorityQueue(
|
||||||
nil,
|
|
||||||
nil,
|
nil,
|
||||||
WithPodInitialBackoffDuration(2*time.Second),
|
WithPodInitialBackoffDuration(2*time.Second),
|
||||||
WithPodMaxBackoffDuration(20*time.Second),
|
WithPodMaxBackoffDuration(20*time.Second),
|
||||||
@ -802,7 +801,7 @@ func TestSchedulingQueue_Close(t *testing.T) {
|
|||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
name: "PriorityQueue close",
|
name: "PriorityQueue close",
|
||||||
q: NewPriorityQueue(nil, nil),
|
q: createAndRunPriorityQueue(nil),
|
||||||
expectedErr: fmt.Errorf(queueClosed),
|
expectedErr: fmt.Errorf(queueClosed),
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
@ -831,7 +830,7 @@ func TestSchedulingQueue_Close(t *testing.T) {
|
|||||||
// ensures that an unschedulable pod does not block head of the queue when there
|
// ensures that an unschedulable pod does not block head of the queue when there
|
||||||
// are frequent events that move pods to the active queue.
|
// are frequent events that move pods to the active queue.
|
||||||
func TestRecentlyTriedPodsGoBack(t *testing.T) {
|
func TestRecentlyTriedPodsGoBack(t *testing.T) {
|
||||||
q := NewPriorityQueue(nil, nil)
|
q := createAndRunPriorityQueue(nil)
|
||||||
// Add a few pods to priority queue.
|
// Add a few pods to priority queue.
|
||||||
for i := 0; i < 5; i++ {
|
for i := 0; i < 5; i++ {
|
||||||
p := v1.Pod{
|
p := v1.Pod{
|
||||||
@ -885,7 +884,7 @@ func TestRecentlyTriedPodsGoBack(t *testing.T) {
|
|||||||
// This behavior ensures that an unschedulable pod does not block head of the queue when there
|
// This behavior ensures that an unschedulable pod does not block head of the queue when there
|
||||||
// are frequent events that move pods to the active queue.
|
// are frequent events that move pods to the active queue.
|
||||||
func TestPodFailedSchedulingMultipleTimesDoesNotBlockNewerPod(t *testing.T) {
|
func TestPodFailedSchedulingMultipleTimesDoesNotBlockNewerPod(t *testing.T) {
|
||||||
q := NewPriorityQueue(nil, nil)
|
q := createAndRunPriorityQueue(nil)
|
||||||
|
|
||||||
// Add an unschedulable pod to a priority queue.
|
// Add an unschedulable pod to a priority queue.
|
||||||
// This makes a situation that the pod was tried to schedule
|
// This makes a situation that the pod was tried to schedule
|
||||||
@ -976,7 +975,7 @@ func TestPodFailedSchedulingMultipleTimesDoesNotBlockNewerPod(t *testing.T) {
|
|||||||
// TestHighPriorityBackoff tests that a high priority pod does not block
|
// TestHighPriorityBackoff tests that a high priority pod does not block
|
||||||
// other pods if it is unschedulable
|
// other pods if it is unschedulable
|
||||||
func TestHighPriorityBackoff(t *testing.T) {
|
func TestHighPriorityBackoff(t *testing.T) {
|
||||||
q := NewPriorityQueue(nil, nil)
|
q := createAndRunPriorityQueue(nil)
|
||||||
|
|
||||||
midPod := v1.Pod{
|
midPod := v1.Pod{
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
@ -1039,7 +1038,7 @@ func TestHighPriorityBackoff(t *testing.T) {
|
|||||||
// TestHighPriorityFlushUnschedulableQLeftover tests that pods will be moved to
|
// TestHighPriorityFlushUnschedulableQLeftover tests that pods will be moved to
|
||||||
// activeQ after one minutes if it is in unschedulableQ
|
// activeQ after one minutes if it is in unschedulableQ
|
||||||
func TestHighPriorityFlushUnschedulableQLeftover(t *testing.T) {
|
func TestHighPriorityFlushUnschedulableQLeftover(t *testing.T) {
|
||||||
q := NewPriorityQueue(nil, nil)
|
q := createAndRunPriorityQueue(nil)
|
||||||
midPod := v1.Pod{
|
midPod := v1.Pod{
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
Name: "test-midpod",
|
Name: "test-midpod",
|
||||||
@ -1236,7 +1235,7 @@ func TestPodTimestamp(t *testing.T) {
|
|||||||
|
|
||||||
for _, test := range tests {
|
for _, test := range tests {
|
||||||
t.Run(test.name, func(t *testing.T) {
|
t.Run(test.name, func(t *testing.T) {
|
||||||
queue := NewPriorityQueue(nil, nil, WithClock(clock.NewFakeClock(timestamp)))
|
queue := createAndRunPriorityQueue(nil, WithClock(clock.NewFakeClock(timestamp)))
|
||||||
var podInfoList []*framework.PodInfo
|
var podInfoList []*framework.PodInfo
|
||||||
|
|
||||||
for i, op := range test.operations {
|
for i, op := range test.operations {
|
||||||
@ -1403,7 +1402,7 @@ scheduler_pending_pods{queue="unschedulable"} 0
|
|||||||
for _, test := range tests {
|
for _, test := range tests {
|
||||||
t.Run(test.name, func(t *testing.T) {
|
t.Run(test.name, func(t *testing.T) {
|
||||||
resetMetrics()
|
resetMetrics()
|
||||||
queue := NewPriorityQueue(nil, nil, WithClock(clock.NewFakeClock(timestamp)))
|
queue := createAndRunPriorityQueue(nil, WithClock(clock.NewFakeClock(timestamp)))
|
||||||
for i, op := range test.operations {
|
for i, op := range test.operations {
|
||||||
for _, pInfo := range test.operands[i] {
|
for _, pInfo := range test.operands[i] {
|
||||||
op(queue, pInfo)
|
op(queue, pInfo)
|
||||||
@ -1432,7 +1431,7 @@ func TestPerPodSchedulingMetrics(t *testing.T) {
|
|||||||
// Case 1: A pod is created and scheduled after 1 attempt. The queue operations are
|
// Case 1: A pod is created and scheduled after 1 attempt. The queue operations are
|
||||||
// Add -> Pop.
|
// Add -> Pop.
|
||||||
c := clock.NewFakeClock(timestamp)
|
c := clock.NewFakeClock(timestamp)
|
||||||
queue := NewPriorityQueue(nil, nil, WithClock(c))
|
queue := createAndRunPriorityQueue(nil, WithClock(c))
|
||||||
queue.Add(pod)
|
queue.Add(pod)
|
||||||
pInfo, err := queue.Pop()
|
pInfo, err := queue.Pop()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -1443,7 +1442,7 @@ func TestPerPodSchedulingMetrics(t *testing.T) {
|
|||||||
// Case 2: A pod is created and scheduled after 2 attempts. The queue operations are
|
// Case 2: A pod is created and scheduled after 2 attempts. The queue operations are
|
||||||
// Add -> Pop -> AddUnschedulableIfNotPresent -> flushUnschedulableQLeftover -> Pop.
|
// Add -> Pop -> AddUnschedulableIfNotPresent -> flushUnschedulableQLeftover -> Pop.
|
||||||
c = clock.NewFakeClock(timestamp)
|
c = clock.NewFakeClock(timestamp)
|
||||||
queue = NewPriorityQueue(nil, nil, WithClock(c))
|
queue = createAndRunPriorityQueue(nil, WithClock(c))
|
||||||
queue.Add(pod)
|
queue.Add(pod)
|
||||||
pInfo, err = queue.Pop()
|
pInfo, err = queue.Pop()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -1463,7 +1462,7 @@ func TestPerPodSchedulingMetrics(t *testing.T) {
|
|||||||
// Case 3: Similar to case 2, but before the second pop, call update, the queue operations are
|
// Case 3: Similar to case 2, but before the second pop, call update, the queue operations are
|
||||||
// Add -> Pop -> AddUnschedulableIfNotPresent -> flushUnschedulableQLeftover -> Update -> Pop.
|
// Add -> Pop -> AddUnschedulableIfNotPresent -> flushUnschedulableQLeftover -> Update -> Pop.
|
||||||
c = clock.NewFakeClock(timestamp)
|
c = clock.NewFakeClock(timestamp)
|
||||||
queue = NewPriorityQueue(nil, nil, WithClock(c))
|
queue = createAndRunPriorityQueue(nil, WithClock(c))
|
||||||
queue.Add(pod)
|
queue.Add(pod)
|
||||||
pInfo, err = queue.Pop()
|
pInfo, err = queue.Pop()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -1561,9 +1560,9 @@ func TestIncomingPodsMetrics(t *testing.T) {
|
|||||||
for _, test := range tests {
|
for _, test := range tests {
|
||||||
t.Run(test.name, func(t *testing.T) {
|
t.Run(test.name, func(t *testing.T) {
|
||||||
metrics.SchedulerQueueIncomingPods.Reset()
|
metrics.SchedulerQueueIncomingPods.Reset()
|
||||||
stop := make(chan struct{})
|
queue := NewPriorityQueue(nil, WithClock(clock.NewFakeClock(timestamp)))
|
||||||
close(stop) // Stop the periodic flush
|
queue.Close()
|
||||||
queue := NewPriorityQueue(stop, nil, WithClock(clock.NewFakeClock(timestamp)))
|
queue.Run()
|
||||||
for _, op := range test.operations {
|
for _, op := range test.operations {
|
||||||
for _, pInfo := range pInfos {
|
for _, pInfo := range pInfos {
|
||||||
op(queue, pInfo)
|
op(queue, pInfo)
|
||||||
@ -1586,3 +1585,9 @@ func checkPerPodSchedulingMetrics(name string, t *testing.T, pInfo *framework.Po
|
|||||||
t.Errorf("[%s] Pod initial schedule attempt timestamp unexpected, got %v, want %v", name, pInfo.InitialAttemptTimestamp, wantInitialAttemptTs)
|
t.Errorf("[%s] Pod initial schedule attempt timestamp unexpected, got %v, want %v", name, pInfo.InitialAttemptTimestamp, wantInitialAttemptTs)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func createAndRunPriorityQueue(fwk framework.Framework, opts ...Option) *PriorityQueue {
|
||||||
|
q := NewPriorityQueue(fwk, opts...)
|
||||||
|
q.Run()
|
||||||
|
return q
|
||||||
|
}
|
||||||
|
@ -420,8 +420,9 @@ func (sched *Scheduler) Run(ctx context.Context) {
|
|||||||
if !cache.WaitForCacheSync(ctx.Done(), sched.scheduledPodsHasSynced) {
|
if !cache.WaitForCacheSync(ctx.Done(), sched.scheduledPodsHasSynced) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
sched.SchedulingQueue.Run()
|
||||||
wait.UntilWithContext(ctx, sched.scheduleOne, 0)
|
wait.UntilWithContext(ctx, sched.scheduleOne, 0)
|
||||||
|
sched.SchedulingQueue.Close()
|
||||||
}
|
}
|
||||||
|
|
||||||
// recordFailedSchedulingEvent records an event for the pod that indicates the
|
// recordFailedSchedulingEvent records an event for the pod that indicates the
|
||||||
|
@ -646,7 +646,7 @@ func TestSchedulerFailedSchedulingReasons(t *testing.T) {
|
|||||||
func setupTestScheduler(queuedPodStore *clientcache.FIFO, scache internalcache.Cache, informerFactory informers.SharedInformerFactory, predicateMap map[string]predicates.FitPredicate, recorder events.EventRecorder) (*Scheduler, chan *v1.Binding, chan error) {
|
func setupTestScheduler(queuedPodStore *clientcache.FIFO, scache internalcache.Cache, informerFactory informers.SharedInformerFactory, predicateMap map[string]predicates.FitPredicate, recorder events.EventRecorder) (*Scheduler, chan *v1.Binding, chan error) {
|
||||||
algo := core.NewGenericScheduler(
|
algo := core.NewGenericScheduler(
|
||||||
scache,
|
scache,
|
||||||
internalqueue.NewSchedulingQueue(nil, nil),
|
internalqueue.NewSchedulingQueue(nil),
|
||||||
predicateMap,
|
predicateMap,
|
||||||
predicates.EmptyMetadataProducer,
|
predicates.EmptyMetadataProducer,
|
||||||
[]priorities.PriorityConfig{},
|
[]priorities.PriorityConfig{},
|
||||||
@ -695,9 +695,10 @@ func setupTestScheduler(queuedPodStore *clientcache.FIFO, scache internalcache.C
|
|||||||
}
|
}
|
||||||
|
|
||||||
func setupTestSchedulerLongBindingWithRetry(queuedPodStore *clientcache.FIFO, scache internalcache.Cache, informerFactory informers.SharedInformerFactory, predicateMap map[string]predicates.FitPredicate, stop chan struct{}, bindingTime time.Duration) (*Scheduler, chan *v1.Binding) {
|
func setupTestSchedulerLongBindingWithRetry(queuedPodStore *clientcache.FIFO, scache internalcache.Cache, informerFactory informers.SharedInformerFactory, predicateMap map[string]predicates.FitPredicate, stop chan struct{}, bindingTime time.Duration) (*Scheduler, chan *v1.Binding) {
|
||||||
|
queue := internalqueue.NewSchedulingQueue(nil)
|
||||||
algo := core.NewGenericScheduler(
|
algo := core.NewGenericScheduler(
|
||||||
scache,
|
scache,
|
||||||
internalqueue.NewSchedulingQueue(nil, nil),
|
queue,
|
||||||
predicateMap,
|
predicateMap,
|
||||||
predicates.EmptyMetadataProducer,
|
predicates.EmptyMetadataProducer,
|
||||||
[]priorities.PriorityConfig{},
|
[]priorities.PriorityConfig{},
|
||||||
@ -740,6 +741,7 @@ func setupTestSchedulerLongBindingWithRetry(queuedPodStore *clientcache.FIFO, sc
|
|||||||
StopEverything: stop,
|
StopEverything: stop,
|
||||||
Framework: emptyFramework,
|
Framework: emptyFramework,
|
||||||
VolumeBinder: volumebinder.NewFakeVolumeBinder(&volumescheduling.FakeVolumeBinderConfig{AllBound: true}),
|
VolumeBinder: volumebinder.NewFakeVolumeBinder(&volumescheduling.FakeVolumeBinderConfig{AllBound: true}),
|
||||||
|
SchedulingQueue: queue,
|
||||||
}
|
}
|
||||||
|
|
||||||
return sched, bindingChan
|
return sched, bindingChan
|
||||||
|
Loading…
Reference in New Issue
Block a user