Store a cluster event to plugin map in SchedulerQueue

This commit is contained in:
Wei Huang 2021-01-28 22:29:10 -08:00
parent 6404eda8de
commit f322019d7a
No known key found for this signature in database
GPG Key ID: BE5E9752F8B6E005
9 changed files with 546 additions and 53 deletions

View File

@ -241,6 +241,7 @@ func (g *genericScheduler) findNodesThatFitPod(ctx context.Context, fwk framewor
for _, n := range allNodes {
diagnosis.NodeToStatusMap[n.Node().Name] = s
}
// Status satisfying IsUnschedulable() gets injected into diagnosis.UnschedulablePlugins.
diagnosis.UnschedulablePlugins.Insert(s.FailedPlugin())
return nil, diagnosis, nil
}

View File

@ -132,6 +132,8 @@ func (c *Configurator) create() (*Scheduler, error) {
// The nominator will be passed all the way to framework instantiation.
nominator := internalqueue.NewPodNominator()
// It's a "cluster event" -> "plugin names" map.
clusterEventMap := make(map[framework.ClusterEvent]sets.String)
profiles, err := profile.NewMap(c.profiles, c.registry, c.recorderFactory,
frameworkruntime.WithClientSet(c.client),
frameworkruntime.WithInformerFactory(c.informerFactory),
@ -139,6 +141,7 @@ func (c *Configurator) create() (*Scheduler, error) {
frameworkruntime.WithRunAllFilters(c.alwaysCheckAllPredicates),
frameworkruntime.WithPodNominator(nominator),
frameworkruntime.WithCaptureProfile(frameworkruntime.CaptureProfile(c.frameworkCapturer)),
frameworkruntime.WithClusterEventMap(clusterEventMap),
)
if err != nil {
return nil, fmt.Errorf("initializing profiles: %v", err)
@ -153,6 +156,7 @@ func (c *Configurator) create() (*Scheduler, error) {
internalqueue.WithPodInitialBackoffDuration(time.Duration(c.podInitialBackoffSeconds)*time.Second),
internalqueue.WithPodMaxBackoffDuration(time.Duration(c.podMaxBackoffSeconds)*time.Second),
internalqueue.WithPodNominator(nominator),
internalqueue.WithClusterEventMap(clusterEventMap),
)
// Setup cache debugger.
@ -317,7 +321,9 @@ func MakeDefaultErrorFunc(client clientset.Interface, podLister corelisters.PodL
pod := podInfo.Pod
if err == core.ErrNoNodesAvailable {
klog.V(2).InfoS("Unable to schedule pod; no nodes are registered to the cluster; waiting", "pod", klog.KObj(pod))
} else if _, ok := err.(*framework.FitError); ok {
} else if fitError, ok := err.(*framework.FitError); ok {
// Inject UnschedulablePlugins to PodInfo, which will be used later for moving Pods between queues efficiently.
podInfo.UnschedulablePlugins = fitError.Diagnosis.UnschedulablePlugins
klog.V(2).InfoS("Unable to schedule pod; no fit; waiting", "pod", klog.KObj(pod), "err", err)
} else if apierrors.IsNotFound(err) {
klog.V(2).InfoS("Unable to schedule pod, possibly due to node not found; waiting", "pod", klog.KObj(pod), "err", err)

View File

@ -279,6 +279,16 @@ type QueueSortPlugin interface {
Less(*QueuedPodInfo, *QueuedPodInfo) bool
}
// EnqueueExtensions is an optional interface that plugins can implement to efficiently
// move unschedulable Pods in internal scheduling queues.
type EnqueueExtensions interface {
// EventsToRegister returns a series of interested events that
// will be registered when instantiating the internal scheduling queue.
// Note: the returned list needs to be static (not depend on configuration parameters);
// otherwise it would lead to undefined behavior.
EventsToRegister() []ClusterEvent
}
// PreFilterExtensions is an interface that is included in plugins that allow specifying
// callbacks to make incremental updates to its supposedly pre-calculated
// state.

View File

@ -60,6 +60,16 @@ const (
permit = "Permit"
)
var allClusterEvents = []framework.ClusterEvent{
{Resource: framework.Pod, ActionType: framework.All},
{Resource: framework.Node, ActionType: framework.All},
{Resource: framework.CSINode, ActionType: framework.All},
{Resource: framework.PersistentVolume, ActionType: framework.All},
{Resource: framework.PersistentVolumeClaim, ActionType: framework.All},
{Resource: framework.Service, ActionType: framework.All},
{Resource: framework.StorageClass, ActionType: framework.All},
}
var configDecoder = scheme.Codecs.UniversalDecoder()
// frameworkImpl is the component responsible for initializing and running scheduler
@ -139,6 +149,7 @@ type frameworkOptions struct {
extenders []framework.Extender
runAllFilters bool
captureProfile CaptureProfile
clusterEventMap map[framework.ClusterEvent]sets.String
}
// Option for the frameworkImpl.
@ -221,6 +232,14 @@ func WithCaptureProfile(c CaptureProfile) Option {
func defaultFrameworkOptions() frameworkOptions {
return frameworkOptions{
metricsRecorder: newMetricsRecorder(1000, time.Second),
clusterEventMap: make(map[framework.ClusterEvent]sets.String),
}
}
// WithClusterEventMap sets clusterEventMap for the scheduling frameworkImpl.
func WithClusterEventMap(m map[framework.ClusterEvent]sets.String) Option {
return func(o *frameworkOptions) {
o.clusterEventMap = m
}
}
@ -292,6 +311,9 @@ func NewFramework(r Registry, plugins *config.Plugins, args []config.PluginConfi
}
pluginsMap[name] = p
// Update ClusterEventMap in place.
fillEventToPluginMap(p, options.clusterEventMap)
// a weight of zero is not permitted, plugins can be disabled explicitly
// when configured.
f.pluginNameToWeightMap[name] = int(pg[name].Weight)
@ -343,6 +365,37 @@ func NewFramework(r Registry, plugins *config.Plugins, args []config.PluginConfi
return f, nil
}
func fillEventToPluginMap(p framework.Plugin, eventToPlugins map[framework.ClusterEvent]sets.String) {
ext, ok := p.(framework.EnqueueExtensions)
if !ok {
// If interface EnqueueExtensions is not implemented, register the default events
// to the plugin. This is to ensure backward compatibility.
registerClusterEvents(p.Name(), eventToPlugins, allClusterEvents)
return
}
events := ext.EventsToRegister()
// It's rare that a plugin implements EnqueueExtensions but returns nil.
// We treat it as: the plugin is not interested in any event, and hence pod failed by that plugin
// cannot be moved by any regular cluster event.
if len(events) == 0 {
klog.InfoS("Plugin's EventsToRegister() returned nil", "plugin", p.Name())
return
}
// The most common case: a plugin implements EnqueueExtensions and returns non-nil result.
registerClusterEvents(p.Name(), eventToPlugins, events)
}
func registerClusterEvents(name string, eventToPlugins map[framework.ClusterEvent]sets.String, evts []framework.ClusterEvent) {
for _, evt := range evts {
if eventToPlugins[evt] == nil {
eventToPlugins[evt] = sets.NewString(name)
} else {
eventToPlugins[evt].Insert(name)
}
}
}
// getPluginArgsOrDefault returns a configuration provided by the user or builds
// a default from the scheme. Returns `nil, nil` if the plugin does not have a
// defined arg types, such as in-tree plugins that don't require configuration

View File

@ -30,6 +30,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/component-base/metrics/testutil"
"k8s.io/kubernetes/pkg/scheduler/apis/config"
"k8s.io/kubernetes/pkg/scheduler/framework"
@ -639,6 +640,166 @@ func TestNewFrameworkPluginDefaults(t *testing.T) {
}
}
// fakeNoopPlugin doesn't implement interface framework.EnqueueExtensions.
type fakeNoopPlugin struct{}
func (*fakeNoopPlugin) Name() string { return "fakeNoop" }
func (*fakeNoopPlugin) Filter(_ context.Context, _ *framework.CycleState, _ *v1.Pod, _ *framework.NodeInfo) *framework.Status {
return nil
}
type fakeNodePlugin struct{}
func (*fakeNodePlugin) Name() string { return "fakeNode" }
func (*fakeNodePlugin) Filter(_ context.Context, _ *framework.CycleState, _ *v1.Pod, _ *framework.NodeInfo) *framework.Status {
return nil
}
func (*fakeNodePlugin) EventsToRegister() []framework.ClusterEvent {
return []framework.ClusterEvent{
{Resource: framework.Pod, ActionType: framework.All},
{Resource: framework.Node, ActionType: framework.Delete},
{Resource: framework.CSINode, ActionType: framework.Update | framework.Delete},
}
}
type fakePodPlugin struct{}
func (*fakePodPlugin) Name() string { return "fakePod" }
func (*fakePodPlugin) Filter(_ context.Context, _ *framework.CycleState, _ *v1.Pod, _ *framework.NodeInfo) *framework.Status {
return nil
}
func (*fakePodPlugin) EventsToRegister() []framework.ClusterEvent {
return []framework.ClusterEvent{
{Resource: framework.Pod, ActionType: framework.All},
{Resource: framework.Node, ActionType: framework.Add | framework.Delete},
{Resource: framework.Service, ActionType: framework.Delete},
}
}
// fakeNoopRuntimePlugin implement interface framework.EnqueueExtensions, but returns nil
// at runtime. This can simulate a plugin registered at scheduler setup, but does nothing
// due to some disabled feature gate.
type fakeNoopRuntimePlugin struct{}
func (*fakeNoopRuntimePlugin) Name() string { return "fakeNoopRuntime" }
func (*fakeNoopRuntimePlugin) Filter(_ context.Context, _ *framework.CycleState, _ *v1.Pod, _ *framework.NodeInfo) *framework.Status {
return nil
}
func (*fakeNoopRuntimePlugin) EventsToRegister() []framework.ClusterEvent { return nil }
func TestNewFrameworkFillEventToPluginMap(t *testing.T) {
tests := []struct {
name string
plugins []framework.Plugin
want map[framework.ClusterEvent]sets.String
}{
{
name: "no-op plugin",
plugins: []framework.Plugin{&fakeNoopPlugin{}},
want: map[framework.ClusterEvent]sets.String{
{Resource: framework.Pod, ActionType: framework.All}: sets.NewString("fakeNoop", bindPlugin, queueSortPlugin),
{Resource: framework.Node, ActionType: framework.All}: sets.NewString("fakeNoop", bindPlugin, queueSortPlugin),
{Resource: framework.CSINode, ActionType: framework.All}: sets.NewString("fakeNoop", bindPlugin, queueSortPlugin),
{Resource: framework.PersistentVolume, ActionType: framework.All}: sets.NewString("fakeNoop", bindPlugin, queueSortPlugin),
{Resource: framework.PersistentVolumeClaim, ActionType: framework.All}: sets.NewString("fakeNoop", bindPlugin, queueSortPlugin),
{Resource: framework.Service, ActionType: framework.All}: sets.NewString("fakeNoop", bindPlugin, queueSortPlugin),
{Resource: framework.StorageClass, ActionType: framework.All}: sets.NewString("fakeNoop", bindPlugin, queueSortPlugin),
},
},
{
name: "node plugin",
plugins: []framework.Plugin{&fakeNodePlugin{}},
want: map[framework.ClusterEvent]sets.String{
{Resource: framework.Pod, ActionType: framework.All}: sets.NewString("fakeNode", bindPlugin, queueSortPlugin),
{Resource: framework.Node, ActionType: framework.Delete}: sets.NewString("fakeNode"),
{Resource: framework.Node, ActionType: framework.All}: sets.NewString(bindPlugin, queueSortPlugin),
{Resource: framework.CSINode, ActionType: framework.Update | framework.Delete}: sets.NewString("fakeNode"),
{Resource: framework.CSINode, ActionType: framework.All}: sets.NewString(bindPlugin, queueSortPlugin),
{Resource: framework.PersistentVolume, ActionType: framework.All}: sets.NewString(bindPlugin, queueSortPlugin),
{Resource: framework.PersistentVolumeClaim, ActionType: framework.All}: sets.NewString(bindPlugin, queueSortPlugin),
{Resource: framework.Service, ActionType: framework.All}: sets.NewString(bindPlugin, queueSortPlugin),
{Resource: framework.StorageClass, ActionType: framework.All}: sets.NewString(bindPlugin, queueSortPlugin),
},
},
{
name: "pod plugin",
plugins: []framework.Plugin{&fakePodPlugin{}},
want: map[framework.ClusterEvent]sets.String{
{Resource: framework.Pod, ActionType: framework.All}: sets.NewString("fakePod", bindPlugin, queueSortPlugin),
{Resource: framework.Node, ActionType: framework.Add | framework.Delete}: sets.NewString("fakePod"),
{Resource: framework.Node, ActionType: framework.All}: sets.NewString(bindPlugin, queueSortPlugin),
{Resource: framework.Service, ActionType: framework.Delete}: sets.NewString("fakePod"),
{Resource: framework.Service, ActionType: framework.All}: sets.NewString(bindPlugin, queueSortPlugin),
{Resource: framework.CSINode, ActionType: framework.All}: sets.NewString(bindPlugin, queueSortPlugin),
{Resource: framework.PersistentVolume, ActionType: framework.All}: sets.NewString(bindPlugin, queueSortPlugin),
{Resource: framework.PersistentVolumeClaim, ActionType: framework.All}: sets.NewString(bindPlugin, queueSortPlugin),
{Resource: framework.StorageClass, ActionType: framework.All}: sets.NewString(bindPlugin, queueSortPlugin),
},
},
{
name: "node and pod plugin",
plugins: []framework.Plugin{&fakeNodePlugin{}, &fakePodPlugin{}},
want: map[framework.ClusterEvent]sets.String{
{Resource: framework.Node, ActionType: framework.Delete}: sets.NewString("fakeNode"),
{Resource: framework.Node, ActionType: framework.Add | framework.Delete}: sets.NewString("fakePod"),
{Resource: framework.Pod, ActionType: framework.All}: sets.NewString("fakeNode", "fakePod", bindPlugin, queueSortPlugin),
{Resource: framework.CSINode, ActionType: framework.Update | framework.Delete}: sets.NewString("fakeNode"),
{Resource: framework.Service, ActionType: framework.Delete}: sets.NewString("fakePod"),
{Resource: framework.Node, ActionType: framework.All}: sets.NewString(bindPlugin, queueSortPlugin),
{Resource: framework.Service, ActionType: framework.All}: sets.NewString(bindPlugin, queueSortPlugin),
{Resource: framework.CSINode, ActionType: framework.All}: sets.NewString(bindPlugin, queueSortPlugin),
{Resource: framework.PersistentVolume, ActionType: framework.All}: sets.NewString(bindPlugin, queueSortPlugin),
{Resource: framework.PersistentVolumeClaim, ActionType: framework.All}: sets.NewString(bindPlugin, queueSortPlugin),
{Resource: framework.StorageClass, ActionType: framework.All}: sets.NewString(bindPlugin, queueSortPlugin),
},
},
{
name: "no-op runtime plugin",
plugins: []framework.Plugin{&fakeNoopRuntimePlugin{}},
want: map[framework.ClusterEvent]sets.String{
{Resource: framework.Pod, ActionType: framework.All}: sets.NewString(bindPlugin, queueSortPlugin),
{Resource: framework.Node, ActionType: framework.All}: sets.NewString(bindPlugin, queueSortPlugin),
{Resource: framework.Service, ActionType: framework.All}: sets.NewString(bindPlugin, queueSortPlugin),
{Resource: framework.CSINode, ActionType: framework.All}: sets.NewString(bindPlugin, queueSortPlugin),
{Resource: framework.PersistentVolume, ActionType: framework.All}: sets.NewString(bindPlugin, queueSortPlugin),
{Resource: framework.PersistentVolumeClaim, ActionType: framework.All}: sets.NewString(bindPlugin, queueSortPlugin),
{Resource: framework.StorageClass, ActionType: framework.All}: sets.NewString(bindPlugin, queueSortPlugin),
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
registry := Registry{}
cfgPls := &config.Plugins{}
for _, pl := range tt.plugins {
tmpPl := pl
if err := registry.Register(pl.Name(), func(_ runtime.Object, _ framework.Handle) (framework.Plugin, error) {
return tmpPl, nil
}); err != nil {
t.Fatalf("fail to register filter plugin (%s)", pl.Name())
}
cfgPls.Filter.Enabled = append(cfgPls.Filter.Enabled, config.Plugin{Name: pl.Name()})
}
got := make(map[framework.ClusterEvent]sets.String)
_, err := newFrameworkWithQueueSortAndBind(registry, cfgPls, emptyArgs, WithClusterEventMap(got))
if err != nil {
t.Fatal(err)
}
if diff := cmp.Diff(tt.want, got); diff != "" {
t.Errorf("Unexpected eventToPlugin map (-want,+got):%s", diff)
}
})
}
}
func TestRunScorePlugins(t *testing.T) {
tests := []struct {
name string

View File

@ -40,6 +40,52 @@ import (
var generation int64
// ActionType is an integer to represent one type of resource change.
// Different ActionTypes can be bit-wised to compose new semantics.
type ActionType int64
// Constants for ActionTypes.
const (
Add ActionType = 1 << iota // 1
Delete // 10
// UpdateNodeXYZ is only applicable for Node events.
UpdateNodeAllocatable // 100
UpdateNodeLabel // 1000
UpdateNodeTaint // 10000
UpdateNodeCondition // 100000
All ActionType = 1<<iota - 1 // 111111
// Use the general Update type if you don't either know or care the specific sub-Update type to use.
Update = UpdateNodeAllocatable | UpdateNodeLabel | UpdateNodeTaint | UpdateNodeCondition
)
// GVK is short for group/version/kind, which can uniquely represent a particular API resource.
type GVK string
// Constants for GVKs.
const (
Pod GVK = "Pod"
Node GVK = "Node"
PersistentVolume GVK = "PersistentVolume"
PersistentVolumeClaim GVK = "PersistentVolumeClaim"
Service GVK = "Service"
StorageClass GVK = "storage.k8s.io/StorageClass"
CSINode GVK = "storage.k8s.io/CSINode"
WildCard GVK = "*"
)
// WildCardEvent semantically matches all resources on all actions.
var WildCardEvent = ClusterEvent{Resource: WildCard, ActionType: All}
// ClusterEvent abstracts how a system resource's state gets changed.
// Resource represents the standard API resources such as Pod, Node, etc.
// ActionType denotes the specific change such as Add, Update or Delete.
type ClusterEvent struct {
Resource GVK
ActionType ActionType
}
// QueuedPodInfo is a Pod wrapper with additional information related to
// the pod's status in the scheduling queue, such as the timestamp when
// it's added to the queue.
@ -55,6 +101,8 @@ type QueuedPodInfo struct {
// It shouldn't be updated once initialized. It's used to record the e2e scheduling
// latency for a pod.
InitialAttemptTimestamp time.Time
// If a Pod failed in a scheduling cycle, record the plugin names it failed by.
UnschedulablePlugins sets.String
}
// DeepCopy returns a deep copy of the QueuedPodInfo object.

View File

@ -16,6 +16,10 @@ limitations under the License.
package queue
import (
"k8s.io/kubernetes/pkg/scheduler/framework"
)
// Events that trigger scheduler queue to change.
const (
// PodAdd is the event when a new pod is added to API server.
@ -68,3 +72,28 @@ const (
// NodeConditionChange is the event when node condition is changed.
NodeConditionChange = "NodeConditionChange"
)
// TODO: benchmark the perf gain if making the keys as enums (int), and then
// making clusterEventReg a []framework.ClusterEvent.
var clusterEventReg = map[string]framework.ClusterEvent{
AssignedPodAdd: {Resource: framework.Pod, ActionType: framework.Add},
AssignedPodUpdate: {Resource: framework.Pod, ActionType: framework.Update},
AssignedPodDelete: {Resource: framework.Pod, ActionType: framework.Delete},
NodeAdd: {Resource: framework.Node, ActionType: framework.Add},
NodeSpecUnschedulableChange: {Resource: framework.Node, ActionType: framework.UpdateNodeTaint},
NodeAllocatableChange: {Resource: framework.Node, ActionType: framework.UpdateNodeAllocatable},
NodeLabelChange: {Resource: framework.Node, ActionType: framework.UpdateNodeLabel},
NodeTaintChange: {Resource: framework.Node, ActionType: framework.UpdateNodeTaint},
NodeConditionChange: {Resource: framework.Node, ActionType: framework.UpdateNodeCondition},
PvAdd: {Resource: framework.PersistentVolume, ActionType: framework.Add},
PvUpdate: {Resource: framework.PersistentVolume, ActionType: framework.Update},
PvcAdd: {Resource: framework.PersistentVolumeClaim, ActionType: framework.Add},
PvcUpdate: {Resource: framework.PersistentVolumeClaim, ActionType: framework.Update},
StorageClassAdd: {Resource: framework.StorageClass, ActionType: framework.Add},
CSINodeAdd: {Resource: framework.CSINode, ActionType: framework.Add},
CSINodeUpdate: {Resource: framework.CSINode, ActionType: framework.Update},
ServiceAdd: {Resource: framework.Service, ActionType: framework.Add},
ServiceUpdate: {Resource: framework.Service, ActionType: framework.Update},
ServiceDelete: {Resource: framework.Service, ActionType: framework.Delete},
UnschedulableTimeout: framework.WildCardEvent,
}

View File

@ -34,6 +34,7 @@ import (
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
ktypes "k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/tools/cache"
"k8s.io/kubernetes/pkg/scheduler/framework"
@ -142,6 +143,8 @@ type PriorityQueue struct {
// when we received move request.
moveRequestCycle int64
clusterEventMap map[framework.ClusterEvent]sets.String
// closed indicates that the queue is closed.
// It is mainly used to let Pop() exit its control loop while waiting for an item.
closed bool
@ -152,6 +155,7 @@ type priorityQueueOptions struct {
podInitialBackoffDuration time.Duration
podMaxBackoffDuration time.Duration
podNominator framework.PodNominator
clusterEventMap map[framework.ClusterEvent]sets.String
}
// Option configures a PriorityQueue
@ -185,6 +189,13 @@ func WithPodNominator(pn framework.PodNominator) Option {
}
}
// WithClusterEventMap sets clusterEventMap for PriorityQueue.
func WithClusterEventMap(m map[framework.ClusterEvent]sets.String) Option {
return func(o *priorityQueueOptions) {
o.clusterEventMap = m
}
}
var defaultPriorityQueueOptions = priorityQueueOptions{
clock: util.RealClock{},
podInitialBackoffDuration: DefaultPodInitialBackoffDuration,
@ -195,11 +206,12 @@ var defaultPriorityQueueOptions = priorityQueueOptions{
var _ SchedulingQueue = &PriorityQueue{}
// newQueuedPodInfoForLookup builds a QueuedPodInfo object for a lookup in the queue.
func newQueuedPodInfoForLookup(pod *v1.Pod) *framework.QueuedPodInfo {
func newQueuedPodInfoForLookup(pod *v1.Pod, plugins ...string) *framework.QueuedPodInfo {
// Since this is only used for a lookup in the queue, we only need to set the Pod,
// and so we avoid creating a full PodInfo, which is expensive to instantiate frequently.
return &framework.QueuedPodInfo{
PodInfo: &framework.PodInfo{Pod: pod},
PodInfo: &framework.PodInfo{Pod: pod},
UnschedulablePlugins: sets.NewString(plugins...),
}
}
@ -232,6 +244,7 @@ func NewPriorityQueue(
activeQ: heap.NewWithRecorder(podInfoKeyFunc, comp, metrics.NewActivePodsRecorder()),
unschedulableQ: newUnschedulablePodsMap(metrics.NewUnschedulablePodsRecorder()),
moveRequestCycle: -1,
clusterEventMap: options.clusterEventMap,
}
pq.cond.L = &pq.lock
pq.podBackoffQ = heap.NewWithRecorder(podInfoKeyFunc, pq.podsCompareBackoffCompleted, metrics.NewBackoffPodsRecorder())
@ -509,7 +522,16 @@ func (p *PriorityQueue) MoveAllToActiveOrBackoffQueue(event string) {
// NOTE: this function assumes lock has been acquired in caller
func (p *PriorityQueue) movePodsToActiveOrBackoffQueue(podInfoList []*framework.QueuedPodInfo, event string) {
moved := false
for _, pInfo := range podInfoList {
// If the event doesn't help making the Pod schedulable, continue.
// Note: we don't run the check if pInfo.UnschedulablePlugins is nil, which denotes
// either there is some abnormal error, or scheduling the pod failed by plugins other than PreFilter, Filter and Permit.
// In that case, it's desired to move it anyways.
if len(pInfo.UnschedulablePlugins) != 0 && !p.podMatchesEvent(pInfo, event) {
continue
}
moved = true
pod := pInfo.Pod
if p.isPodBackingoff(pInfo) {
if err := p.podBackoffQ.Add(pInfo); err != nil {
@ -528,7 +550,9 @@ func (p *PriorityQueue) movePodsToActiveOrBackoffQueue(podInfoList []*framework.
}
}
p.moveRequestCycle = p.schedulingCycle
p.cond.Broadcast()
if moved {
p.cond.Broadcast()
}
}
// getUnschedulablePodsWithMatchingAffinityTerm returns unschedulable pods which have
@ -625,12 +649,13 @@ func (p *PriorityQueue) NumUnschedulablePods() int {
}
// newQueuedPodInfo builds a QueuedPodInfo object.
func (p *PriorityQueue) newQueuedPodInfo(pod *v1.Pod) *framework.QueuedPodInfo {
func (p *PriorityQueue) newQueuedPodInfo(pod *v1.Pod, plugins ...string) *framework.QueuedPodInfo {
now := p.clock.Now()
return &framework.QueuedPodInfo{
PodInfo: framework.NewPodInfo(pod),
Timestamp: now,
InitialAttemptTimestamp: now,
UnschedulablePlugins: sets.NewString(plugins...),
}
}
@ -820,3 +845,46 @@ func MakeNextPodFunc(queue SchedulingQueue) func() *framework.QueuedPodInfo {
func podInfoKeyFunc(obj interface{}) (string, error) {
return cache.MetaNamespaceKeyFunc(obj.(*framework.QueuedPodInfo).Pod)
}
// Checks if the Pod may become schedulable upon the event.
// This is achieved by looking up the global clusterEventMap registry.
func (p *PriorityQueue) podMatchesEvent(podInfo *framework.QueuedPodInfo, event string) bool {
clusterEvent, ok := clusterEventReg[event]
if !ok {
return false
}
if clusterEvent == framework.WildCardEvent {
return true
}
for evt, nameSet := range p.clusterEventMap {
// Firstly verify if the two ClusterEvents match:
// - either the registered event from plugin side is a WildCardEvent,
// - or the two events have identical Resource fields and *compatible* ActionType.
// Note the ActionTypes don't need to be *identical*. We check if the ANDed value
// is zero or not. In this way, it's easy to tell Update&Delete is not compatible,
// but Update&All is.
evtMatch := evt == framework.WildCardEvent ||
(evt.Resource == clusterEvent.Resource && evt.ActionType&clusterEvent.ActionType != 0)
// Secondly verify the plugin name matches.
// Note that if it doesn't match, we shouldn't continue to search.
if evtMatch && intersect(nameSet, podInfo.UnschedulablePlugins) {
return true
}
}
return false
}
func intersect(x, y sets.String) bool {
if len(x) > len(y) {
x, y = y, x
}
for v := range x {
if y.Has(v) {
return true
}
}
return false
}

View File

@ -29,6 +29,7 @@ import (
"k8s.io/apimachinery/pkg/types"
ktypes "k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/clock"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/component-base/metrics/testutil"
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
"k8s.io/kubernetes/pkg/scheduler/framework"
@ -361,40 +362,56 @@ func TestPriorityQueue_Delete(t *testing.T) {
func TestPriorityQueue_MoveAllToActiveOrBackoffQueue(t *testing.T) {
c := clock.NewFakeClock(time.Now())
q := NewPriorityQueue(newDefaultQueueSort(), WithClock(c))
m := map[framework.ClusterEvent]sets.String{
{Resource: framework.Node, ActionType: framework.Add}: sets.NewString("fooPlugin"),
}
q := NewPriorityQueue(newDefaultQueueSort(), WithClock(c), WithClusterEventMap(m))
q.Add(medPriorityPodInfo.Pod)
q.AddUnschedulableIfNotPresent(q.newQueuedPodInfo(unschedulablePodInfo.Pod), q.SchedulingCycle())
q.AddUnschedulableIfNotPresent(q.newQueuedPodInfo(highPriorityPodInfo.Pod), q.SchedulingCycle())
q.AddUnschedulableIfNotPresent(q.newQueuedPodInfo(unschedulablePodInfo.Pod, "fooPlugin"), q.SchedulingCycle())
q.AddUnschedulableIfNotPresent(q.newQueuedPodInfo(highPriorityPodInfo.Pod, "fooPlugin"), q.SchedulingCycle())
// Construct a Pod, but don't associate its scheduler failure to any plugin
hpp1 := highPriorityPodInfo.Pod.DeepCopy()
hpp1.Name = "hpp1"
q.AddUnschedulableIfNotPresent(q.newQueuedPodInfo(hpp1), q.SchedulingCycle())
// Construct another Pod, and associate its scheduler failure to plugin "barPlugin".
hpp2 := highPriorityPodInfo.Pod.DeepCopy()
hpp2.Name = "hpp2"
q.AddUnschedulableIfNotPresent(q.newQueuedPodInfo(hpp2, "barPlugin"), q.SchedulingCycle())
// Pods is still backing off, move the pod into backoffQ.
q.MoveAllToActiveOrBackoffQueue("test")
q.MoveAllToActiveOrBackoffQueue(NodeAdd)
if q.activeQ.Len() != 1 {
t.Errorf("Expected 1 item to be in activeQ, but got: %v", q.activeQ.Len())
}
if q.podBackoffQ.Len() != 2 {
t.Errorf("Expected 2 items to be in podBackoffQ, but got: %v", q.podBackoffQ.Len())
// hpp2 won't be moved.
if q.podBackoffQ.Len() != 3 {
t.Fatalf("Expected 3 items to be in podBackoffQ, but got: %v", q.podBackoffQ.Len())
}
// pop out the pods in the backoffQ.
q.podBackoffQ.Pop()
q.podBackoffQ.Pop()
for q.podBackoffQ.Len() != 0 {
q.podBackoffQ.Pop()
}
q.schedulingCycle++
q.AddUnschedulableIfNotPresent(q.newQueuedPodInfo(unschedulablePodInfo.Pod), q.SchedulingCycle())
q.AddUnschedulableIfNotPresent(q.newQueuedPodInfo(highPriorityPodInfo.Pod), q.SchedulingCycle())
if q.unschedulableQ.get(unschedulablePodInfo.Pod) == nil || q.unschedulableQ.get(highPriorityPodInfo.Pod) == nil {
t.Errorf("Expected %v and %v in the unschedulableQ", unschedulablePodInfo.Pod.Name, highPriorityPodInfo.Pod.Name)
q.AddUnschedulableIfNotPresent(q.newQueuedPodInfo(unschedulablePodInfo.Pod, "fooPlugin"), q.SchedulingCycle())
q.AddUnschedulableIfNotPresent(q.newQueuedPodInfo(highPriorityPodInfo.Pod, "fooPlugin"), q.SchedulingCycle())
q.AddUnschedulableIfNotPresent(q.newQueuedPodInfo(hpp1), q.SchedulingCycle())
for _, pod := range []*v1.Pod{unschedulablePodInfo.Pod, highPriorityPodInfo.Pod, hpp1, hpp2} {
if q.unschedulableQ.get(pod) == nil {
t.Errorf("Expected %v in the unschedulableQ", pod.Name)
}
}
// Move clock by podInitialBackoffDuration, so that pods in the unschedulableQ would pass the backing off,
// and the pods will be moved into activeQ.
c.Step(q.podInitialBackoffDuration)
q.MoveAllToActiveOrBackoffQueue("test")
if q.activeQ.Len() != 3 {
t.Errorf("Expected 3 items to be in activeQ, but got: %v", q.activeQ.Len())
q.MoveAllToActiveOrBackoffQueue(NodeAdd)
// hpp2 won't be moved regardless of its backoff timer.
if q.activeQ.Len() != 4 {
t.Errorf("Expected 4 items to be in activeQ, but got: %v", q.activeQ.Len())
}
if q.podBackoffQ.Len() != 0 {
t.Errorf("Expected 0 item to be in podBackoffQ, but got: %v", q.podBackoffQ.Len())
}
}
// TestPriorityQueue_AssignedPodAdded tests AssignedPodAdded. It checks that
@ -434,11 +451,14 @@ func TestPriorityQueue_AssignedPodAdded(t *testing.T) {
}
c := clock.NewFakeClock(time.Now())
q := NewPriorityQueue(newDefaultQueueSort(), WithClock(c))
m := map[framework.ClusterEvent]sets.String{
{Resource: framework.Pod, ActionType: framework.Add}: sets.NewString("fakePlugin"),
}
q := NewPriorityQueue(newDefaultQueueSort(), WithClock(c), WithClusterEventMap(m))
q.Add(medPriorityPodInfo.Pod)
// Add a couple of pods to the unschedulableQ.
q.AddUnschedulableIfNotPresent(q.newQueuedPodInfo(unschedulablePodInfo.Pod), q.SchedulingCycle())
q.AddUnschedulableIfNotPresent(q.newQueuedPodInfo(affinityPod), q.SchedulingCycle())
q.AddUnschedulableIfNotPresent(q.newQueuedPodInfo(unschedulablePodInfo.Pod, "fakePlugin"), q.SchedulingCycle())
q.AddUnschedulableIfNotPresent(q.newQueuedPodInfo(affinityPod, "fakePlugin"), q.SchedulingCycle())
// Move clock to make the unschedulable pods complete backoff.
c.Step(DefaultPodInitialBackoffDuration + time.Second)
@ -647,35 +667,35 @@ func TestUnschedulablePodsMap(t *testing.T) {
name: "create, update, delete subset of pods",
podsToAdd: []*v1.Pod{pods[0], pods[1], pods[2], pods[3]},
expectedMapAfterAdd: map[string]*framework.QueuedPodInfo{
util.GetPodFullName(pods[0]): {PodInfo: framework.NewPodInfo(pods[0])},
util.GetPodFullName(pods[1]): {PodInfo: framework.NewPodInfo(pods[1])},
util.GetPodFullName(pods[2]): {PodInfo: framework.NewPodInfo(pods[2])},
util.GetPodFullName(pods[3]): {PodInfo: framework.NewPodInfo(pods[3])},
util.GetPodFullName(pods[0]): {PodInfo: framework.NewPodInfo(pods[0]), UnschedulablePlugins: sets.NewString()},
util.GetPodFullName(pods[1]): {PodInfo: framework.NewPodInfo(pods[1]), UnschedulablePlugins: sets.NewString()},
util.GetPodFullName(pods[2]): {PodInfo: framework.NewPodInfo(pods[2]), UnschedulablePlugins: sets.NewString()},
util.GetPodFullName(pods[3]): {PodInfo: framework.NewPodInfo(pods[3]), UnschedulablePlugins: sets.NewString()},
},
podsToUpdate: []*v1.Pod{updatedPods[0]},
expectedMapAfterUpdate: map[string]*framework.QueuedPodInfo{
util.GetPodFullName(pods[0]): {PodInfo: framework.NewPodInfo(updatedPods[0])},
util.GetPodFullName(pods[1]): {PodInfo: framework.NewPodInfo(pods[1])},
util.GetPodFullName(pods[2]): {PodInfo: framework.NewPodInfo(pods[2])},
util.GetPodFullName(pods[3]): {PodInfo: framework.NewPodInfo(pods[3])},
util.GetPodFullName(pods[0]): {PodInfo: framework.NewPodInfo(updatedPods[0]), UnschedulablePlugins: sets.NewString()},
util.GetPodFullName(pods[1]): {PodInfo: framework.NewPodInfo(pods[1]), UnschedulablePlugins: sets.NewString()},
util.GetPodFullName(pods[2]): {PodInfo: framework.NewPodInfo(pods[2]), UnschedulablePlugins: sets.NewString()},
util.GetPodFullName(pods[3]): {PodInfo: framework.NewPodInfo(pods[3]), UnschedulablePlugins: sets.NewString()},
},
podsToDelete: []*v1.Pod{pods[0], pods[1]},
expectedMapAfterDelete: map[string]*framework.QueuedPodInfo{
util.GetPodFullName(pods[2]): {PodInfo: framework.NewPodInfo(pods[2])},
util.GetPodFullName(pods[3]): {PodInfo: framework.NewPodInfo(pods[3])},
util.GetPodFullName(pods[2]): {PodInfo: framework.NewPodInfo(pods[2]), UnschedulablePlugins: sets.NewString()},
util.GetPodFullName(pods[3]): {PodInfo: framework.NewPodInfo(pods[3]), UnschedulablePlugins: sets.NewString()},
},
},
{
name: "create, update, delete all",
podsToAdd: []*v1.Pod{pods[0], pods[3]},
expectedMapAfterAdd: map[string]*framework.QueuedPodInfo{
util.GetPodFullName(pods[0]): {PodInfo: framework.NewPodInfo(pods[0])},
util.GetPodFullName(pods[3]): {PodInfo: framework.NewPodInfo(pods[3])},
util.GetPodFullName(pods[0]): {PodInfo: framework.NewPodInfo(pods[0]), UnschedulablePlugins: sets.NewString()},
util.GetPodFullName(pods[3]): {PodInfo: framework.NewPodInfo(pods[3]), UnschedulablePlugins: sets.NewString()},
},
podsToUpdate: []*v1.Pod{updatedPods[3]},
expectedMapAfterUpdate: map[string]*framework.QueuedPodInfo{
util.GetPodFullName(pods[0]): {PodInfo: framework.NewPodInfo(pods[0])},
util.GetPodFullName(pods[3]): {PodInfo: framework.NewPodInfo(updatedPods[3])},
util.GetPodFullName(pods[0]): {PodInfo: framework.NewPodInfo(pods[0]), UnschedulablePlugins: sets.NewString()},
util.GetPodFullName(pods[3]): {PodInfo: framework.NewPodInfo(updatedPods[3]), UnschedulablePlugins: sets.NewString()},
},
podsToDelete: []*v1.Pod{pods[0], pods[3]},
expectedMapAfterDelete: map[string]*framework.QueuedPodInfo{},
@ -684,17 +704,17 @@ func TestUnschedulablePodsMap(t *testing.T) {
name: "delete non-existing and existing pods",
podsToAdd: []*v1.Pod{pods[1], pods[2]},
expectedMapAfterAdd: map[string]*framework.QueuedPodInfo{
util.GetPodFullName(pods[1]): {PodInfo: framework.NewPodInfo(pods[1])},
util.GetPodFullName(pods[2]): {PodInfo: framework.NewPodInfo(pods[2])},
util.GetPodFullName(pods[1]): {PodInfo: framework.NewPodInfo(pods[1]), UnschedulablePlugins: sets.NewString()},
util.GetPodFullName(pods[2]): {PodInfo: framework.NewPodInfo(pods[2]), UnschedulablePlugins: sets.NewString()},
},
podsToUpdate: []*v1.Pod{updatedPods[1]},
expectedMapAfterUpdate: map[string]*framework.QueuedPodInfo{
util.GetPodFullName(pods[1]): {PodInfo: framework.NewPodInfo(updatedPods[1])},
util.GetPodFullName(pods[2]): {PodInfo: framework.NewPodInfo(pods[2])},
util.GetPodFullName(pods[1]): {PodInfo: framework.NewPodInfo(updatedPods[1]), UnschedulablePlugins: sets.NewString()},
util.GetPodFullName(pods[2]): {PodInfo: framework.NewPodInfo(pods[2]), UnschedulablePlugins: sets.NewString()},
},
podsToDelete: []*v1.Pod{pods[2], pods[3]},
expectedMapAfterDelete: map[string]*framework.QueuedPodInfo{
util.GetPodFullName(pods[1]): {PodInfo: framework.NewPodInfo(updatedPods[1])},
util.GetPodFullName(pods[1]): {PodInfo: framework.NewPodInfo(updatedPods[1]), UnschedulablePlugins: sets.NewString()},
},
},
}
@ -796,7 +816,7 @@ func TestRecentlyTriedPodsGoBack(t *testing.T) {
q.AddUnschedulableIfNotPresent(p1, q.SchedulingCycle())
c.Step(DefaultPodInitialBackoffDuration)
// Move all unschedulable pods to the active queue.
q.MoveAllToActiveOrBackoffQueue("test")
q.MoveAllToActiveOrBackoffQueue(UnschedulableTimeout)
// Simulation is over. Now let's pop all pods. The pod popped first should be
// the last one we pop here.
for i := 0; i < 5; i++ {
@ -848,7 +868,7 @@ func TestPodFailedSchedulingMultipleTimesDoesNotBlockNewerPod(t *testing.T) {
// Move clock to make the unschedulable pods complete backoff.
c.Step(DefaultPodInitialBackoffDuration + time.Second)
// Move all unschedulable pods to the active queue.
q.MoveAllToActiveOrBackoffQueue("test")
q.MoveAllToActiveOrBackoffQueue(UnschedulableTimeout)
// Simulate a pod being popped by the scheduler,
// At this time, unschedulable pod should be popped.
@ -891,7 +911,7 @@ func TestPodFailedSchedulingMultipleTimesDoesNotBlockNewerPod(t *testing.T) {
// Move clock to make the unschedulable pods complete backoff.
c.Step(DefaultPodInitialBackoffDuration + time.Second)
// Move all unschedulable pods to the active queue.
q.MoveAllToActiveOrBackoffQueue("test")
q.MoveAllToActiveOrBackoffQueue(UnschedulableTimeout)
// At this time, newerPod should be popped
// because it is the oldest tried pod.
@ -971,7 +991,10 @@ func TestHighPriorityBackoff(t *testing.T) {
// activeQ after one minutes if it is in unschedulableQ
func TestHighPriorityFlushUnschedulableQLeftover(t *testing.T) {
c := clock.NewFakeClock(time.Now())
q := NewPriorityQueue(newDefaultQueueSort(), WithClock(c))
m := map[framework.ClusterEvent]sets.String{
{Resource: framework.Node, ActionType: framework.Add}: sets.NewString("fakePlugin"),
}
q := NewPriorityQueue(newDefaultQueueSort(), WithClock(c), WithClusterEventMap(m))
midPod := v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "test-midpod",
@ -1015,8 +1038,8 @@ func TestHighPriorityFlushUnschedulableQLeftover(t *testing.T) {
Message: "fake scheduling failure",
})
q.AddUnschedulableIfNotPresent(q.newQueuedPodInfo(&highPod), q.SchedulingCycle())
q.AddUnschedulableIfNotPresent(q.newQueuedPodInfo(&midPod), q.SchedulingCycle())
q.AddUnschedulableIfNotPresent(q.newQueuedPodInfo(&highPod, "fakePlugin"), q.SchedulingCycle())
q.AddUnschedulableIfNotPresent(q.newQueuedPodInfo(&midPod, "fakePlugin"), q.SchedulingCycle())
c.Step(unschedulableQTimeInterval + time.Second)
q.flushUnschedulableQLeftover()
@ -1060,7 +1083,7 @@ var (
queue.podBackoffQ.Add(pInfo)
}
moveAllToActiveOrBackoffQ = func(queue *PriorityQueue, _ *framework.QueuedPodInfo) {
queue.MoveAllToActiveOrBackoffQueue("test")
queue.MoveAllToActiveOrBackoffQueue(UnschedulableTimeout)
}
flushBackoffQ = func(queue *PriorityQueue, _ *framework.QueuedPodInfo) {
queue.clock.(*clock.FakeClock).Step(2 * time.Second)
@ -1440,7 +1463,7 @@ func TestIncomingPodsMetrics(t *testing.T) {
moveAllToActiveOrBackoffQ,
},
want: ` scheduler_queue_incoming_pods_total{event="ScheduleAttemptFailure",queue="unschedulable"} 3
scheduler_queue_incoming_pods_total{event="test",queue="backoff"} 3
scheduler_queue_incoming_pods_total{event="UnschedulableTimeout",queue="backoff"} 3
`,
},
{
@ -1451,7 +1474,7 @@ func TestIncomingPodsMetrics(t *testing.T) {
moveAllToActiveOrBackoffQ,
},
want: ` scheduler_queue_incoming_pods_total{event="ScheduleAttemptFailure",queue="unschedulable"} 3
scheduler_queue_incoming_pods_total{event="test",queue="active"} 3
scheduler_queue_incoming_pods_total{event="UnschedulableTimeout",queue="active"} 3
`,
},
{
@ -1540,7 +1563,7 @@ func TestBackOffFlow(t *testing.T) {
}
// An event happens.
q.MoveAllToActiveOrBackoffQueue("deleted pod")
q.MoveAllToActiveOrBackoffQueue(UnschedulableTimeout)
if _, ok, _ := q.podBackoffQ.Get(podInfo); !ok {
t.Errorf("pod %v is not in the backoff queue", podID)
@ -1570,6 +1593,100 @@ func TestBackOffFlow(t *testing.T) {
}
}
func TestPodMatchesEvent(t *testing.T) {
tests := []struct {
name string
podInfo *framework.QueuedPodInfo
event string
clusterEventMap map[framework.ClusterEvent]sets.String
want bool
}{
{
name: "event not registered",
podInfo: newQueuedPodInfoForLookup(&v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "p"}}),
event: "ClusterTearDown",
clusterEventMap: map[framework.ClusterEvent]sets.String{
{Resource: framework.Node, ActionType: framework.All}: sets.NewString("foo"),
},
want: false,
},
{
name: "pod's failed plugin matches but event does not match",
podInfo: newQueuedPodInfoForLookup(&v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "p"}}, "bar"),
event: PodAdd,
clusterEventMap: map[framework.ClusterEvent]sets.String{
{Resource: framework.Node, ActionType: framework.All}: sets.NewString("foo", "bar"),
},
want: false,
},
{
name: "wildcard event wins regardless of event matching",
podInfo: newQueuedPodInfoForLookup(&v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "p"}}, "bar"),
event: UnschedulableTimeout,
clusterEventMap: map[framework.ClusterEvent]sets.String{
{Resource: framework.Node, ActionType: framework.All}: sets.NewString("foo"),
},
want: true,
},
{
name: "pod's failed plugin and event both match",
podInfo: newQueuedPodInfoForLookup(&v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "p"}}, "bar"),
event: NodeTaintChange,
clusterEventMap: map[framework.ClusterEvent]sets.String{
{Resource: framework.Node, ActionType: framework.All}: sets.NewString("foo", "bar"),
},
want: true,
},
{
name: "pod's failed plugin registers fine-grained event",
podInfo: newQueuedPodInfoForLookup(&v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "p"}}, "bar"),
event: NodeTaintChange,
clusterEventMap: map[framework.ClusterEvent]sets.String{
{Resource: framework.Node, ActionType: framework.All}: sets.NewString("foo"),
{Resource: framework.Node, ActionType: framework.UpdateNodeTaint}: sets.NewString("bar"),
},
want: true,
},
{
name: "if pod failed by multiple plugins, a single match gets a final match",
podInfo: newQueuedPodInfoForLookup(&v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "p"}}, "foo", "bar"),
event: NodeAdd,
clusterEventMap: map[framework.ClusterEvent]sets.String{
{Resource: framework.Node, ActionType: framework.All}: sets.NewString("bar"),
},
want: true,
},
{
name: "plugin returns WildCardEvent and plugin name matches",
podInfo: newQueuedPodInfoForLookup(&v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "p"}}, "foo"),
event: PvAdd,
clusterEventMap: map[framework.ClusterEvent]sets.String{
framework.WildCardEvent: sets.NewString("foo"),
},
want: true,
},
{
name: "plugin returns WildCardEvent but plugin name not match",
podInfo: newQueuedPodInfoForLookup(&v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "p"}}, "foo"),
event: PvAdd,
clusterEventMap: map[framework.ClusterEvent]sets.String{
framework.WildCardEvent: sets.NewString("bar"),
},
want: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
q := NewPriorityQueue(newDefaultQueueSort())
q.clusterEventMap = tt.clusterEventMap
if got := q.podMatchesEvent(tt.podInfo, tt.event); got != tt.want {
t.Errorf("Want %v, but got %v", tt.want, got)
}
})
}
}
func makeQueuedPodInfos(num int, timestamp time.Time) []*framework.QueuedPodInfo {
var pInfos = make([]*framework.QueuedPodInfo, 0, num)
for i := 1; i <= num; i++ {