mirror of
https://github.com/k3s-io/kubernetes.git
synced 2026-01-05 23:47:50 +00:00
Store a cluster event to plugin map in SchedulerQueue
This commit is contained in:
@@ -29,6 +29,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
ktypes "k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/clock"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/component-base/metrics/testutil"
|
||||
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
|
||||
"k8s.io/kubernetes/pkg/scheduler/framework"
|
||||
@@ -361,40 +362,56 @@ func TestPriorityQueue_Delete(t *testing.T) {
|
||||
|
||||
func TestPriorityQueue_MoveAllToActiveOrBackoffQueue(t *testing.T) {
|
||||
c := clock.NewFakeClock(time.Now())
|
||||
q := NewPriorityQueue(newDefaultQueueSort(), WithClock(c))
|
||||
m := map[framework.ClusterEvent]sets.String{
|
||||
{Resource: framework.Node, ActionType: framework.Add}: sets.NewString("fooPlugin"),
|
||||
}
|
||||
q := NewPriorityQueue(newDefaultQueueSort(), WithClock(c), WithClusterEventMap(m))
|
||||
q.Add(medPriorityPodInfo.Pod)
|
||||
q.AddUnschedulableIfNotPresent(q.newQueuedPodInfo(unschedulablePodInfo.Pod), q.SchedulingCycle())
|
||||
q.AddUnschedulableIfNotPresent(q.newQueuedPodInfo(highPriorityPodInfo.Pod), q.SchedulingCycle())
|
||||
q.AddUnschedulableIfNotPresent(q.newQueuedPodInfo(unschedulablePodInfo.Pod, "fooPlugin"), q.SchedulingCycle())
|
||||
q.AddUnschedulableIfNotPresent(q.newQueuedPodInfo(highPriorityPodInfo.Pod, "fooPlugin"), q.SchedulingCycle())
|
||||
// Construct a Pod, but don't associate its scheduler failure to any plugin
|
||||
hpp1 := highPriorityPodInfo.Pod.DeepCopy()
|
||||
hpp1.Name = "hpp1"
|
||||
q.AddUnschedulableIfNotPresent(q.newQueuedPodInfo(hpp1), q.SchedulingCycle())
|
||||
// Construct another Pod, and associate its scheduler failure to plugin "barPlugin".
|
||||
hpp2 := highPriorityPodInfo.Pod.DeepCopy()
|
||||
hpp2.Name = "hpp2"
|
||||
q.AddUnschedulableIfNotPresent(q.newQueuedPodInfo(hpp2, "barPlugin"), q.SchedulingCycle())
|
||||
// Pods is still backing off, move the pod into backoffQ.
|
||||
q.MoveAllToActiveOrBackoffQueue("test")
|
||||
q.MoveAllToActiveOrBackoffQueue(NodeAdd)
|
||||
if q.activeQ.Len() != 1 {
|
||||
t.Errorf("Expected 1 item to be in activeQ, but got: %v", q.activeQ.Len())
|
||||
}
|
||||
if q.podBackoffQ.Len() != 2 {
|
||||
t.Errorf("Expected 2 items to be in podBackoffQ, but got: %v", q.podBackoffQ.Len())
|
||||
// hpp2 won't be moved.
|
||||
if q.podBackoffQ.Len() != 3 {
|
||||
t.Fatalf("Expected 3 items to be in podBackoffQ, but got: %v", q.podBackoffQ.Len())
|
||||
}
|
||||
|
||||
// pop out the pods in the backoffQ.
|
||||
q.podBackoffQ.Pop()
|
||||
q.podBackoffQ.Pop()
|
||||
for q.podBackoffQ.Len() != 0 {
|
||||
q.podBackoffQ.Pop()
|
||||
}
|
||||
|
||||
q.schedulingCycle++
|
||||
q.AddUnschedulableIfNotPresent(q.newQueuedPodInfo(unschedulablePodInfo.Pod), q.SchedulingCycle())
|
||||
q.AddUnschedulableIfNotPresent(q.newQueuedPodInfo(highPriorityPodInfo.Pod), q.SchedulingCycle())
|
||||
if q.unschedulableQ.get(unschedulablePodInfo.Pod) == nil || q.unschedulableQ.get(highPriorityPodInfo.Pod) == nil {
|
||||
t.Errorf("Expected %v and %v in the unschedulableQ", unschedulablePodInfo.Pod.Name, highPriorityPodInfo.Pod.Name)
|
||||
q.AddUnschedulableIfNotPresent(q.newQueuedPodInfo(unschedulablePodInfo.Pod, "fooPlugin"), q.SchedulingCycle())
|
||||
q.AddUnschedulableIfNotPresent(q.newQueuedPodInfo(highPriorityPodInfo.Pod, "fooPlugin"), q.SchedulingCycle())
|
||||
q.AddUnschedulableIfNotPresent(q.newQueuedPodInfo(hpp1), q.SchedulingCycle())
|
||||
for _, pod := range []*v1.Pod{unschedulablePodInfo.Pod, highPriorityPodInfo.Pod, hpp1, hpp2} {
|
||||
if q.unschedulableQ.get(pod) == nil {
|
||||
t.Errorf("Expected %v in the unschedulableQ", pod.Name)
|
||||
}
|
||||
}
|
||||
// Move clock by podInitialBackoffDuration, so that pods in the unschedulableQ would pass the backing off,
|
||||
// and the pods will be moved into activeQ.
|
||||
c.Step(q.podInitialBackoffDuration)
|
||||
q.MoveAllToActiveOrBackoffQueue("test")
|
||||
if q.activeQ.Len() != 3 {
|
||||
t.Errorf("Expected 3 items to be in activeQ, but got: %v", q.activeQ.Len())
|
||||
q.MoveAllToActiveOrBackoffQueue(NodeAdd)
|
||||
// hpp2 won't be moved regardless of its backoff timer.
|
||||
if q.activeQ.Len() != 4 {
|
||||
t.Errorf("Expected 4 items to be in activeQ, but got: %v", q.activeQ.Len())
|
||||
}
|
||||
if q.podBackoffQ.Len() != 0 {
|
||||
t.Errorf("Expected 0 item to be in podBackoffQ, but got: %v", q.podBackoffQ.Len())
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// TestPriorityQueue_AssignedPodAdded tests AssignedPodAdded. It checks that
|
||||
@@ -434,11 +451,14 @@ func TestPriorityQueue_AssignedPodAdded(t *testing.T) {
|
||||
}
|
||||
|
||||
c := clock.NewFakeClock(time.Now())
|
||||
q := NewPriorityQueue(newDefaultQueueSort(), WithClock(c))
|
||||
m := map[framework.ClusterEvent]sets.String{
|
||||
{Resource: framework.Pod, ActionType: framework.Add}: sets.NewString("fakePlugin"),
|
||||
}
|
||||
q := NewPriorityQueue(newDefaultQueueSort(), WithClock(c), WithClusterEventMap(m))
|
||||
q.Add(medPriorityPodInfo.Pod)
|
||||
// Add a couple of pods to the unschedulableQ.
|
||||
q.AddUnschedulableIfNotPresent(q.newQueuedPodInfo(unschedulablePodInfo.Pod), q.SchedulingCycle())
|
||||
q.AddUnschedulableIfNotPresent(q.newQueuedPodInfo(affinityPod), q.SchedulingCycle())
|
||||
q.AddUnschedulableIfNotPresent(q.newQueuedPodInfo(unschedulablePodInfo.Pod, "fakePlugin"), q.SchedulingCycle())
|
||||
q.AddUnschedulableIfNotPresent(q.newQueuedPodInfo(affinityPod, "fakePlugin"), q.SchedulingCycle())
|
||||
|
||||
// Move clock to make the unschedulable pods complete backoff.
|
||||
c.Step(DefaultPodInitialBackoffDuration + time.Second)
|
||||
@@ -647,35 +667,35 @@ func TestUnschedulablePodsMap(t *testing.T) {
|
||||
name: "create, update, delete subset of pods",
|
||||
podsToAdd: []*v1.Pod{pods[0], pods[1], pods[2], pods[3]},
|
||||
expectedMapAfterAdd: map[string]*framework.QueuedPodInfo{
|
||||
util.GetPodFullName(pods[0]): {PodInfo: framework.NewPodInfo(pods[0])},
|
||||
util.GetPodFullName(pods[1]): {PodInfo: framework.NewPodInfo(pods[1])},
|
||||
util.GetPodFullName(pods[2]): {PodInfo: framework.NewPodInfo(pods[2])},
|
||||
util.GetPodFullName(pods[3]): {PodInfo: framework.NewPodInfo(pods[3])},
|
||||
util.GetPodFullName(pods[0]): {PodInfo: framework.NewPodInfo(pods[0]), UnschedulablePlugins: sets.NewString()},
|
||||
util.GetPodFullName(pods[1]): {PodInfo: framework.NewPodInfo(pods[1]), UnschedulablePlugins: sets.NewString()},
|
||||
util.GetPodFullName(pods[2]): {PodInfo: framework.NewPodInfo(pods[2]), UnschedulablePlugins: sets.NewString()},
|
||||
util.GetPodFullName(pods[3]): {PodInfo: framework.NewPodInfo(pods[3]), UnschedulablePlugins: sets.NewString()},
|
||||
},
|
||||
podsToUpdate: []*v1.Pod{updatedPods[0]},
|
||||
expectedMapAfterUpdate: map[string]*framework.QueuedPodInfo{
|
||||
util.GetPodFullName(pods[0]): {PodInfo: framework.NewPodInfo(updatedPods[0])},
|
||||
util.GetPodFullName(pods[1]): {PodInfo: framework.NewPodInfo(pods[1])},
|
||||
util.GetPodFullName(pods[2]): {PodInfo: framework.NewPodInfo(pods[2])},
|
||||
util.GetPodFullName(pods[3]): {PodInfo: framework.NewPodInfo(pods[3])},
|
||||
util.GetPodFullName(pods[0]): {PodInfo: framework.NewPodInfo(updatedPods[0]), UnschedulablePlugins: sets.NewString()},
|
||||
util.GetPodFullName(pods[1]): {PodInfo: framework.NewPodInfo(pods[1]), UnschedulablePlugins: sets.NewString()},
|
||||
util.GetPodFullName(pods[2]): {PodInfo: framework.NewPodInfo(pods[2]), UnschedulablePlugins: sets.NewString()},
|
||||
util.GetPodFullName(pods[3]): {PodInfo: framework.NewPodInfo(pods[3]), UnschedulablePlugins: sets.NewString()},
|
||||
},
|
||||
podsToDelete: []*v1.Pod{pods[0], pods[1]},
|
||||
expectedMapAfterDelete: map[string]*framework.QueuedPodInfo{
|
||||
util.GetPodFullName(pods[2]): {PodInfo: framework.NewPodInfo(pods[2])},
|
||||
util.GetPodFullName(pods[3]): {PodInfo: framework.NewPodInfo(pods[3])},
|
||||
util.GetPodFullName(pods[2]): {PodInfo: framework.NewPodInfo(pods[2]), UnschedulablePlugins: sets.NewString()},
|
||||
util.GetPodFullName(pods[3]): {PodInfo: framework.NewPodInfo(pods[3]), UnschedulablePlugins: sets.NewString()},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "create, update, delete all",
|
||||
podsToAdd: []*v1.Pod{pods[0], pods[3]},
|
||||
expectedMapAfterAdd: map[string]*framework.QueuedPodInfo{
|
||||
util.GetPodFullName(pods[0]): {PodInfo: framework.NewPodInfo(pods[0])},
|
||||
util.GetPodFullName(pods[3]): {PodInfo: framework.NewPodInfo(pods[3])},
|
||||
util.GetPodFullName(pods[0]): {PodInfo: framework.NewPodInfo(pods[0]), UnschedulablePlugins: sets.NewString()},
|
||||
util.GetPodFullName(pods[3]): {PodInfo: framework.NewPodInfo(pods[3]), UnschedulablePlugins: sets.NewString()},
|
||||
},
|
||||
podsToUpdate: []*v1.Pod{updatedPods[3]},
|
||||
expectedMapAfterUpdate: map[string]*framework.QueuedPodInfo{
|
||||
util.GetPodFullName(pods[0]): {PodInfo: framework.NewPodInfo(pods[0])},
|
||||
util.GetPodFullName(pods[3]): {PodInfo: framework.NewPodInfo(updatedPods[3])},
|
||||
util.GetPodFullName(pods[0]): {PodInfo: framework.NewPodInfo(pods[0]), UnschedulablePlugins: sets.NewString()},
|
||||
util.GetPodFullName(pods[3]): {PodInfo: framework.NewPodInfo(updatedPods[3]), UnschedulablePlugins: sets.NewString()},
|
||||
},
|
||||
podsToDelete: []*v1.Pod{pods[0], pods[3]},
|
||||
expectedMapAfterDelete: map[string]*framework.QueuedPodInfo{},
|
||||
@@ -684,17 +704,17 @@ func TestUnschedulablePodsMap(t *testing.T) {
|
||||
name: "delete non-existing and existing pods",
|
||||
podsToAdd: []*v1.Pod{pods[1], pods[2]},
|
||||
expectedMapAfterAdd: map[string]*framework.QueuedPodInfo{
|
||||
util.GetPodFullName(pods[1]): {PodInfo: framework.NewPodInfo(pods[1])},
|
||||
util.GetPodFullName(pods[2]): {PodInfo: framework.NewPodInfo(pods[2])},
|
||||
util.GetPodFullName(pods[1]): {PodInfo: framework.NewPodInfo(pods[1]), UnschedulablePlugins: sets.NewString()},
|
||||
util.GetPodFullName(pods[2]): {PodInfo: framework.NewPodInfo(pods[2]), UnschedulablePlugins: sets.NewString()},
|
||||
},
|
||||
podsToUpdate: []*v1.Pod{updatedPods[1]},
|
||||
expectedMapAfterUpdate: map[string]*framework.QueuedPodInfo{
|
||||
util.GetPodFullName(pods[1]): {PodInfo: framework.NewPodInfo(updatedPods[1])},
|
||||
util.GetPodFullName(pods[2]): {PodInfo: framework.NewPodInfo(pods[2])},
|
||||
util.GetPodFullName(pods[1]): {PodInfo: framework.NewPodInfo(updatedPods[1]), UnschedulablePlugins: sets.NewString()},
|
||||
util.GetPodFullName(pods[2]): {PodInfo: framework.NewPodInfo(pods[2]), UnschedulablePlugins: sets.NewString()},
|
||||
},
|
||||
podsToDelete: []*v1.Pod{pods[2], pods[3]},
|
||||
expectedMapAfterDelete: map[string]*framework.QueuedPodInfo{
|
||||
util.GetPodFullName(pods[1]): {PodInfo: framework.NewPodInfo(updatedPods[1])},
|
||||
util.GetPodFullName(pods[1]): {PodInfo: framework.NewPodInfo(updatedPods[1]), UnschedulablePlugins: sets.NewString()},
|
||||
},
|
||||
},
|
||||
}
|
||||
@@ -796,7 +816,7 @@ func TestRecentlyTriedPodsGoBack(t *testing.T) {
|
||||
q.AddUnschedulableIfNotPresent(p1, q.SchedulingCycle())
|
||||
c.Step(DefaultPodInitialBackoffDuration)
|
||||
// Move all unschedulable pods to the active queue.
|
||||
q.MoveAllToActiveOrBackoffQueue("test")
|
||||
q.MoveAllToActiveOrBackoffQueue(UnschedulableTimeout)
|
||||
// Simulation is over. Now let's pop all pods. The pod popped first should be
|
||||
// the last one we pop here.
|
||||
for i := 0; i < 5; i++ {
|
||||
@@ -848,7 +868,7 @@ func TestPodFailedSchedulingMultipleTimesDoesNotBlockNewerPod(t *testing.T) {
|
||||
// Move clock to make the unschedulable pods complete backoff.
|
||||
c.Step(DefaultPodInitialBackoffDuration + time.Second)
|
||||
// Move all unschedulable pods to the active queue.
|
||||
q.MoveAllToActiveOrBackoffQueue("test")
|
||||
q.MoveAllToActiveOrBackoffQueue(UnschedulableTimeout)
|
||||
|
||||
// Simulate a pod being popped by the scheduler,
|
||||
// At this time, unschedulable pod should be popped.
|
||||
@@ -891,7 +911,7 @@ func TestPodFailedSchedulingMultipleTimesDoesNotBlockNewerPod(t *testing.T) {
|
||||
// Move clock to make the unschedulable pods complete backoff.
|
||||
c.Step(DefaultPodInitialBackoffDuration + time.Second)
|
||||
// Move all unschedulable pods to the active queue.
|
||||
q.MoveAllToActiveOrBackoffQueue("test")
|
||||
q.MoveAllToActiveOrBackoffQueue(UnschedulableTimeout)
|
||||
|
||||
// At this time, newerPod should be popped
|
||||
// because it is the oldest tried pod.
|
||||
@@ -971,7 +991,10 @@ func TestHighPriorityBackoff(t *testing.T) {
|
||||
// activeQ after one minutes if it is in unschedulableQ
|
||||
func TestHighPriorityFlushUnschedulableQLeftover(t *testing.T) {
|
||||
c := clock.NewFakeClock(time.Now())
|
||||
q := NewPriorityQueue(newDefaultQueueSort(), WithClock(c))
|
||||
m := map[framework.ClusterEvent]sets.String{
|
||||
{Resource: framework.Node, ActionType: framework.Add}: sets.NewString("fakePlugin"),
|
||||
}
|
||||
q := NewPriorityQueue(newDefaultQueueSort(), WithClock(c), WithClusterEventMap(m))
|
||||
midPod := v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-midpod",
|
||||
@@ -1015,8 +1038,8 @@ func TestHighPriorityFlushUnschedulableQLeftover(t *testing.T) {
|
||||
Message: "fake scheduling failure",
|
||||
})
|
||||
|
||||
q.AddUnschedulableIfNotPresent(q.newQueuedPodInfo(&highPod), q.SchedulingCycle())
|
||||
q.AddUnschedulableIfNotPresent(q.newQueuedPodInfo(&midPod), q.SchedulingCycle())
|
||||
q.AddUnschedulableIfNotPresent(q.newQueuedPodInfo(&highPod, "fakePlugin"), q.SchedulingCycle())
|
||||
q.AddUnschedulableIfNotPresent(q.newQueuedPodInfo(&midPod, "fakePlugin"), q.SchedulingCycle())
|
||||
c.Step(unschedulableQTimeInterval + time.Second)
|
||||
q.flushUnschedulableQLeftover()
|
||||
|
||||
@@ -1060,7 +1083,7 @@ var (
|
||||
queue.podBackoffQ.Add(pInfo)
|
||||
}
|
||||
moveAllToActiveOrBackoffQ = func(queue *PriorityQueue, _ *framework.QueuedPodInfo) {
|
||||
queue.MoveAllToActiveOrBackoffQueue("test")
|
||||
queue.MoveAllToActiveOrBackoffQueue(UnschedulableTimeout)
|
||||
}
|
||||
flushBackoffQ = func(queue *PriorityQueue, _ *framework.QueuedPodInfo) {
|
||||
queue.clock.(*clock.FakeClock).Step(2 * time.Second)
|
||||
@@ -1440,7 +1463,7 @@ func TestIncomingPodsMetrics(t *testing.T) {
|
||||
moveAllToActiveOrBackoffQ,
|
||||
},
|
||||
want: ` scheduler_queue_incoming_pods_total{event="ScheduleAttemptFailure",queue="unschedulable"} 3
|
||||
scheduler_queue_incoming_pods_total{event="test",queue="backoff"} 3
|
||||
scheduler_queue_incoming_pods_total{event="UnschedulableTimeout",queue="backoff"} 3
|
||||
`,
|
||||
},
|
||||
{
|
||||
@@ -1451,7 +1474,7 @@ func TestIncomingPodsMetrics(t *testing.T) {
|
||||
moveAllToActiveOrBackoffQ,
|
||||
},
|
||||
want: ` scheduler_queue_incoming_pods_total{event="ScheduleAttemptFailure",queue="unschedulable"} 3
|
||||
scheduler_queue_incoming_pods_total{event="test",queue="active"} 3
|
||||
scheduler_queue_incoming_pods_total{event="UnschedulableTimeout",queue="active"} 3
|
||||
`,
|
||||
},
|
||||
{
|
||||
@@ -1540,7 +1563,7 @@ func TestBackOffFlow(t *testing.T) {
|
||||
}
|
||||
|
||||
// An event happens.
|
||||
q.MoveAllToActiveOrBackoffQueue("deleted pod")
|
||||
q.MoveAllToActiveOrBackoffQueue(UnschedulableTimeout)
|
||||
|
||||
if _, ok, _ := q.podBackoffQ.Get(podInfo); !ok {
|
||||
t.Errorf("pod %v is not in the backoff queue", podID)
|
||||
@@ -1570,6 +1593,100 @@ func TestBackOffFlow(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestPodMatchesEvent(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
podInfo *framework.QueuedPodInfo
|
||||
event string
|
||||
clusterEventMap map[framework.ClusterEvent]sets.String
|
||||
want bool
|
||||
}{
|
||||
{
|
||||
name: "event not registered",
|
||||
podInfo: newQueuedPodInfoForLookup(&v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "p"}}),
|
||||
event: "ClusterTearDown",
|
||||
clusterEventMap: map[framework.ClusterEvent]sets.String{
|
||||
{Resource: framework.Node, ActionType: framework.All}: sets.NewString("foo"),
|
||||
},
|
||||
want: false,
|
||||
},
|
||||
{
|
||||
name: "pod's failed plugin matches but event does not match",
|
||||
podInfo: newQueuedPodInfoForLookup(&v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "p"}}, "bar"),
|
||||
event: PodAdd,
|
||||
clusterEventMap: map[framework.ClusterEvent]sets.String{
|
||||
{Resource: framework.Node, ActionType: framework.All}: sets.NewString("foo", "bar"),
|
||||
},
|
||||
want: false,
|
||||
},
|
||||
{
|
||||
name: "wildcard event wins regardless of event matching",
|
||||
podInfo: newQueuedPodInfoForLookup(&v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "p"}}, "bar"),
|
||||
event: UnschedulableTimeout,
|
||||
clusterEventMap: map[framework.ClusterEvent]sets.String{
|
||||
{Resource: framework.Node, ActionType: framework.All}: sets.NewString("foo"),
|
||||
},
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
name: "pod's failed plugin and event both match",
|
||||
podInfo: newQueuedPodInfoForLookup(&v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "p"}}, "bar"),
|
||||
event: NodeTaintChange,
|
||||
clusterEventMap: map[framework.ClusterEvent]sets.String{
|
||||
{Resource: framework.Node, ActionType: framework.All}: sets.NewString("foo", "bar"),
|
||||
},
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
name: "pod's failed plugin registers fine-grained event",
|
||||
podInfo: newQueuedPodInfoForLookup(&v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "p"}}, "bar"),
|
||||
event: NodeTaintChange,
|
||||
clusterEventMap: map[framework.ClusterEvent]sets.String{
|
||||
{Resource: framework.Node, ActionType: framework.All}: sets.NewString("foo"),
|
||||
{Resource: framework.Node, ActionType: framework.UpdateNodeTaint}: sets.NewString("bar"),
|
||||
},
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
name: "if pod failed by multiple plugins, a single match gets a final match",
|
||||
podInfo: newQueuedPodInfoForLookup(&v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "p"}}, "foo", "bar"),
|
||||
event: NodeAdd,
|
||||
clusterEventMap: map[framework.ClusterEvent]sets.String{
|
||||
{Resource: framework.Node, ActionType: framework.All}: sets.NewString("bar"),
|
||||
},
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
name: "plugin returns WildCardEvent and plugin name matches",
|
||||
podInfo: newQueuedPodInfoForLookup(&v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "p"}}, "foo"),
|
||||
event: PvAdd,
|
||||
clusterEventMap: map[framework.ClusterEvent]sets.String{
|
||||
framework.WildCardEvent: sets.NewString("foo"),
|
||||
},
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
name: "plugin returns WildCardEvent but plugin name not match",
|
||||
podInfo: newQueuedPodInfoForLookup(&v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "p"}}, "foo"),
|
||||
event: PvAdd,
|
||||
clusterEventMap: map[framework.ClusterEvent]sets.String{
|
||||
framework.WildCardEvent: sets.NewString("bar"),
|
||||
},
|
||||
want: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
q := NewPriorityQueue(newDefaultQueueSort())
|
||||
q.clusterEventMap = tt.clusterEventMap
|
||||
if got := q.podMatchesEvent(tt.podInfo, tt.event); got != tt.want {
|
||||
t.Errorf("Want %v, but got %v", tt.want, got)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func makeQueuedPodInfos(num int, timestamp time.Time) []*framework.QueuedPodInfo {
|
||||
var pInfos = make([]*framework.QueuedPodInfo, 0, num)
|
||||
for i := 1; i <= num; i++ {
|
||||
|
||||
Reference in New Issue
Block a user