mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-08-07 19:23:40 +00:00
Merge pull request #127088 from sanposhiho/fix-test
fix: run a test for requeueing with PreFilterResult correctly
This commit is contained in:
commit
17bae91f20
@ -35,6 +35,7 @@ import (
|
||||
"time"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
@ -117,14 +118,18 @@ type SchedulingQueue interface {
|
||||
MoveAllToActiveOrBackoffQueue(logger klog.Logger, event framework.ClusterEvent, oldObj, newObj interface{}, preCheck PreEnqueueCheck)
|
||||
AssignedPodAdded(logger klog.Logger, pod *v1.Pod)
|
||||
AssignedPodUpdated(logger klog.Logger, oldPod, newPod *v1.Pod, event framework.ClusterEvent)
|
||||
PendingPods() ([]*v1.Pod, string)
|
||||
PodsInActiveQ() []*v1.Pod
|
||||
InFlightPods() []*v1.Pod
|
||||
|
||||
// Close closes the SchedulingQueue so that the goroutine which is
|
||||
// waiting to pop items can exit gracefully.
|
||||
Close()
|
||||
// Run starts the goroutines managing the queue.
|
||||
Run(logger klog.Logger)
|
||||
|
||||
// The following functions are supposed to be used only for testing or debugging.
|
||||
GetPod(name, namespace string) (*framework.QueuedPodInfo, bool)
|
||||
PendingPods() ([]*v1.Pod, string)
|
||||
InFlightPods() []*v1.Pod
|
||||
PodsInActiveQ() []*v1.Pod
|
||||
}
|
||||
|
||||
// NewSchedulingQueue initializes a priority queue as a new scheduling queue.
|
||||
@ -1149,6 +1154,34 @@ func (p *PriorityQueue) PodsInActiveQ() []*v1.Pod {
|
||||
|
||||
var pendingPodsSummary = "activeQ:%v; backoffQ:%v; unschedulablePods:%v"
|
||||
|
||||
// GetPod searches for a pod in the activeQ, backoffQ, and unschedulablePods.
|
||||
func (p *PriorityQueue) GetPod(name, namespace string) (pInfo *framework.QueuedPodInfo, ok bool) {
|
||||
p.lock.RLock()
|
||||
defer p.lock.RUnlock()
|
||||
|
||||
pInfoLookup := &framework.QueuedPodInfo{
|
||||
PodInfo: &framework.PodInfo{
|
||||
Pod: &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Namespace: namespace,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
if pInfo, ok = p.podBackoffQ.Get(pInfoLookup); ok {
|
||||
return pInfo, true
|
||||
}
|
||||
if pInfo = p.unschedulablePods.get(pInfoLookup.Pod); pInfo != nil {
|
||||
return pInfo, true
|
||||
}
|
||||
|
||||
p.activeQ.underRLock(func(unlockedActiveQ unlockedActiveQueueReader) {
|
||||
pInfo, ok = unlockedActiveQ.Get(pInfoLookup)
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
// PendingPods returns all the pending pods in the queue; accompanied by a debugging string
|
||||
// recording showing the number of pods in each queue respectively.
|
||||
// This function is used for debugging purposes in the scheduler cache dumper and comparer.
|
||||
|
@ -3964,6 +3964,92 @@ func Test_queuedPodInfo_gatedSetUponCreationAndUnsetUponUpdate(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestPriorityQueue_GetPod(t *testing.T) {
|
||||
activeQPod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pod1",
|
||||
Namespace: "default",
|
||||
},
|
||||
}
|
||||
backoffQPod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pod2",
|
||||
Namespace: "default",
|
||||
},
|
||||
}
|
||||
unschedPod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pod3",
|
||||
Namespace: "default",
|
||||
},
|
||||
}
|
||||
|
||||
_, ctx := ktesting.NewTestContext(t)
|
||||
q := NewTestQueue(ctx, newDefaultQueueSort())
|
||||
q.activeQ.underLock(func(unlockedActiveQ unlockedActiveQueuer) {
|
||||
unlockedActiveQ.AddOrUpdate(newQueuedPodInfoForLookup(activeQPod))
|
||||
})
|
||||
q.podBackoffQ.AddOrUpdate(newQueuedPodInfoForLookup(backoffQPod))
|
||||
q.unschedulablePods.addOrUpdate(newQueuedPodInfoForLookup(unschedPod))
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
podName string
|
||||
namespace string
|
||||
expectedPod *v1.Pod
|
||||
expectedOK bool
|
||||
}{
|
||||
{
|
||||
name: "pod is found in activeQ",
|
||||
podName: "pod1",
|
||||
namespace: "default",
|
||||
expectedPod: activeQPod,
|
||||
expectedOK: true,
|
||||
},
|
||||
{
|
||||
name: "pod is found in backoffQ",
|
||||
podName: "pod2",
|
||||
namespace: "default",
|
||||
expectedPod: backoffQPod,
|
||||
expectedOK: true,
|
||||
},
|
||||
{
|
||||
name: "pod is found in unschedulablePods",
|
||||
podName: "pod3",
|
||||
namespace: "default",
|
||||
expectedPod: unschedPod,
|
||||
expectedOK: true,
|
||||
},
|
||||
{
|
||||
name: "pod is not found",
|
||||
podName: "pod4",
|
||||
namespace: "default",
|
||||
expectedPod: nil,
|
||||
expectedOK: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
pInfo, ok := q.GetPod(tt.podName, tt.namespace)
|
||||
if ok != tt.expectedOK {
|
||||
t.Errorf("Expected ok=%v, but got ok=%v", tt.expectedOK, ok)
|
||||
}
|
||||
|
||||
if tt.expectedPod == nil {
|
||||
if pInfo == nil {
|
||||
return
|
||||
}
|
||||
t.Fatalf("Expected pod is empty, but got pod=%v", pInfo.Pod)
|
||||
}
|
||||
|
||||
if !cmp.Equal(pInfo.Pod, tt.expectedPod) {
|
||||
t.Errorf("Expected pod=%v, but got pod=%v", tt.expectedPod, pInfo.Pod)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func attemptQueuedPodInfo(podInfo *framework.QueuedPodInfo) *framework.QueuedPodInfo {
|
||||
podInfo.Attempts++
|
||||
return podInfo
|
||||
|
@ -129,7 +129,7 @@ func TestPreCheckForNode(t *testing.T) {
|
||||
st.MakePod().Name("p1").Req(cpu4).Obj(),
|
||||
st.MakePod().Name("p2").Req(cpu16).Obj(),
|
||||
st.MakePod().Name("p3").Req(cpu4).Req(cpu8).Obj(),
|
||||
st.MakePod().Name("p4").NodeAffinityIn("hostname", []string{"fake-node"}).Obj(),
|
||||
st.MakePod().Name("p4").NodeAffinityIn("hostname", []string{"fake-node"}, st.NodeSelectorTypeMatchExpressions).Obj(),
|
||||
st.MakePod().Name("p5").NodeAffinityNotIn("hostname", []string{"fake-node"}).Obj(),
|
||||
st.MakePod().Name("p6").Obj(),
|
||||
st.MakePod().Name("p7").Node("invalid-node").Obj(),
|
||||
@ -150,7 +150,7 @@ func TestPreCheckForNode(t *testing.T) {
|
||||
st.MakePod().Name("p1").Req(cpu4).Obj(),
|
||||
st.MakePod().Name("p2").Req(cpu16).Obj(),
|
||||
st.MakePod().Name("p3").Req(cpu4).Req(cpu8).Obj(),
|
||||
st.MakePod().Name("p4").NodeAffinityIn("hostname", []string{"fake-node"}).Obj(),
|
||||
st.MakePod().Name("p4").NodeAffinityIn("hostname", []string{"fake-node"}, st.NodeSelectorTypeMatchExpressions).Obj(),
|
||||
st.MakePod().Name("p5").NodeAffinityNotIn("hostname", []string{"fake-node"}).Obj(),
|
||||
st.MakePod().Name("p6").Obj(),
|
||||
st.MakePod().Name("p7").Node("invalid-node").Obj(),
|
||||
@ -188,10 +188,10 @@ func TestPreCheckForNode(t *testing.T) {
|
||||
},
|
||||
pods: []*v1.Pod{
|
||||
st.MakePod().Name("p1").Req(cpu4).NodeAffinityNotIn("hostname", []string{"fake-node"}).Obj(),
|
||||
st.MakePod().Name("p2").Req(cpu16).NodeAffinityIn("hostname", []string{"fake-node"}).Obj(),
|
||||
st.MakePod().Name("p3").Req(cpu8).NodeAffinityIn("hostname", []string{"fake-node"}).Obj(),
|
||||
st.MakePod().Name("p2").Req(cpu16).NodeAffinityIn("hostname", []string{"fake-node"}, st.NodeSelectorTypeMatchExpressions).Obj(),
|
||||
st.MakePod().Name("p3").Req(cpu8).NodeAffinityIn("hostname", []string{"fake-node"}, st.NodeSelectorTypeMatchExpressions).Obj(),
|
||||
st.MakePod().Name("p4").HostPort(8080).Node("invalid-node").Obj(),
|
||||
st.MakePod().Name("p5").Req(cpu4).NodeAffinityIn("hostname", []string{"fake-node"}).HostPort(80).Obj(),
|
||||
st.MakePod().Name("p5").Req(cpu4).NodeAffinityIn("hostname", []string{"fake-node"}, st.NodeSelectorTypeMatchExpressions).HostPort(80).Obj(),
|
||||
},
|
||||
want: []bool{false, false, true, false, false},
|
||||
},
|
||||
|
@ -137,11 +137,7 @@ var (
|
||||
}},
|
||||
},
|
||||
NodeSelector: func() *v1.NodeSelector {
|
||||
// Label selector...
|
||||
nodeSelector := st.MakeNodeSelector().In("metadata.name", []string{nodeName}).Obj()
|
||||
// ... but we need a field selector, so let's swap.
|
||||
nodeSelector.NodeSelectorTerms[0].MatchExpressions, nodeSelector.NodeSelectorTerms[0].MatchFields = nodeSelector.NodeSelectorTerms[0].MatchFields, nodeSelector.NodeSelectorTerms[0].MatchExpressions
|
||||
return nodeSelector
|
||||
return st.MakeNodeSelector().In("metadata.name", []string{nodeName}, st.NodeSelectorTypeMatchFields).Obj()
|
||||
}(),
|
||||
}
|
||||
deallocatingClaim = st.FromResourceClaim(pendingClaim).
|
||||
@ -160,10 +156,10 @@ var (
|
||||
Obj()
|
||||
|
||||
allocatedClaimWithWrongTopology = st.FromResourceClaim(allocatedClaim).
|
||||
Allocation(&resourceapi.AllocationResult{Controller: controller, NodeSelector: st.MakeNodeSelector().In("no-such-label", []string{"no-such-value"}).Obj()}).
|
||||
Allocation(&resourceapi.AllocationResult{Controller: controller, NodeSelector: st.MakeNodeSelector().In("no-such-label", []string{"no-such-value"}, st.NodeSelectorTypeMatchExpressions).Obj()}).
|
||||
Obj()
|
||||
allocatedClaimWithGoodTopology = st.FromResourceClaim(allocatedClaim).
|
||||
Allocation(&resourceapi.AllocationResult{Controller: controller, NodeSelector: st.MakeNodeSelector().In("kubernetes.io/hostname", []string{nodeName}).Obj()}).
|
||||
Allocation(&resourceapi.AllocationResult{Controller: controller, NodeSelector: st.MakeNodeSelector().In("kubernetes.io/hostname", []string{nodeName}, st.NodeSelectorTypeMatchExpressions).Obj()}).
|
||||
Obj()
|
||||
otherClaim = st.MakeResourceClaim(controller).
|
||||
Name("not-my-claim").
|
||||
|
@ -1233,7 +1233,7 @@ func TestNodeAffinityPriority(t *testing.T) {
|
||||
}
|
||||
|
||||
func Test_isSchedulableAfterNodeChange(t *testing.T) {
|
||||
podWithNodeAffinity := st.MakePod().NodeAffinityIn("foo", []string{"bar"})
|
||||
podWithNodeAffinity := st.MakePod().NodeAffinityIn("foo", []string{"bar"}, st.NodeSelectorTypeMatchExpressions)
|
||||
testcases := map[string]struct {
|
||||
args *config.NodeAffinityArgs
|
||||
pod *v1.Pod
|
||||
|
@ -806,7 +806,7 @@ func TestPreFilterState(t *testing.T) {
|
||||
{
|
||||
name: "NodeAffinityPolicy honored with nodeAffinity",
|
||||
pod: st.MakePod().Name("p").Label("foo", "").
|
||||
NodeAffinityIn("foo", []string{""}).
|
||||
NodeAffinityIn("foo", []string{""}, st.NodeSelectorTypeMatchExpressions).
|
||||
SpreadConstraint(1, "node", v1.DoNotSchedule, barSelector, nil, nil, nil, nil).
|
||||
Obj(),
|
||||
nodes: []*v1.Node{
|
||||
@ -845,7 +845,7 @@ func TestPreFilterState(t *testing.T) {
|
||||
{
|
||||
name: "NodeAffinityPolicy ignored with nodeAffinity",
|
||||
pod: st.MakePod().Name("p").Label("foo", "").
|
||||
NodeAffinityIn("foo", []string{""}).
|
||||
NodeAffinityIn("foo", []string{""}, st.NodeSelectorTypeMatchExpressions).
|
||||
SpreadConstraint(1, "node", v1.DoNotSchedule, barSelector, nil, &ignorePolicy, nil, nil).
|
||||
Obj(),
|
||||
nodes: []*v1.Node{
|
||||
@ -2708,7 +2708,7 @@ func TestSingleConstraint(t *testing.T) {
|
||||
// the fact that node-a fits can prove the underlying logic works
|
||||
name: "incoming pod has nodeAffinity, pods spread as 2/~1~/~0~/3, hence node-a fits",
|
||||
pod: st.MakePod().Name("p").Label("foo", "").
|
||||
NodeAffinityIn("node", []string{"node-a", "node-y"}).
|
||||
NodeAffinityIn("node", []string{"node-a", "node-y"}, st.NodeSelectorTypeMatchExpressions).
|
||||
SpreadConstraint(1, "node", v1.DoNotSchedule, fooSelector, nil, nil, nil, nil).
|
||||
Obj(),
|
||||
nodes: []*v1.Node{
|
||||
@ -2951,7 +2951,7 @@ func TestSingleConstraint(t *testing.T) {
|
||||
// pods spread across node as 1/1/0/~0~
|
||||
name: "NodeAffinityPolicy honored with nodeAffinity",
|
||||
pod: st.MakePod().Name("p").Label("foo", "").
|
||||
NodeAffinityIn("foo", []string{""}).
|
||||
NodeAffinityIn("foo", []string{""}, st.NodeSelectorTypeMatchExpressions).
|
||||
SpreadConstraint(1, "node", v1.DoNotSchedule, fooSelector, nil, nil, nil, nil).
|
||||
Obj(),
|
||||
nodes: []*v1.Node{
|
||||
@ -2977,7 +2977,7 @@ func TestSingleConstraint(t *testing.T) {
|
||||
// pods spread across node as 1/1/0/~1~
|
||||
name: "NodeAffinityPolicy ignored with labelSelectors",
|
||||
pod: st.MakePod().Name("p").Label("foo", "").
|
||||
NodeAffinityIn("foo", []string{""}).
|
||||
NodeAffinityIn("foo", []string{""}, st.NodeSelectorTypeMatchExpressions).
|
||||
SpreadConstraint(1, "node", v1.DoNotSchedule, fooSelector, nil, &ignorePolicy, nil, nil).
|
||||
Obj(),
|
||||
nodes: []*v1.Node{
|
||||
|
@ -439,7 +439,7 @@ func TestPreScoreStateEmptyNodes(t *testing.T) {
|
||||
{
|
||||
name: "NodeAffinityPolicy honored with nodeAffinity",
|
||||
pod: st.MakePod().Name("p").Label("foo", "").
|
||||
NodeAffinityIn("foo", []string{""}).
|
||||
NodeAffinityIn("foo", []string{""}, st.NodeSelectorTypeMatchExpressions).
|
||||
SpreadConstraint(1, "zone", v1.ScheduleAnyway, barSelector, nil, nil, nil, nil).
|
||||
Obj(),
|
||||
nodes: []*v1.Node{
|
||||
@ -473,7 +473,7 @@ func TestPreScoreStateEmptyNodes(t *testing.T) {
|
||||
{
|
||||
name: "NodeAffinityPolicy ignored with nodeAffinity",
|
||||
pod: st.MakePod().Name("p").Label("foo", "").
|
||||
NodeAffinityIn("foo", []string{""}).
|
||||
NodeAffinityIn("foo", []string{""}, st.NodeSelectorTypeMatchExpressions).
|
||||
SpreadConstraint(1, "zone", v1.ScheduleAnyway, barSelector, nil, &ignorePolicy, nil, nil).
|
||||
Obj(),
|
||||
nodes: []*v1.Node{
|
||||
@ -1164,7 +1164,7 @@ func TestPodTopologySpreadScore(t *testing.T) {
|
||||
{
|
||||
name: "NodeAffinityPolicy honoed with nodeAffinity",
|
||||
pod: st.MakePod().Name("p").Label("foo", "").
|
||||
NodeAffinityIn("foo", []string{""}).
|
||||
NodeAffinityIn("foo", []string{""}, st.NodeSelectorTypeMatchExpressions).
|
||||
SpreadConstraint(1, "node", v1.ScheduleAnyway, fooSelector, nil, nil, nil, nil).
|
||||
Obj(),
|
||||
nodes: []*v1.Node{
|
||||
@ -1188,7 +1188,7 @@ func TestPodTopologySpreadScore(t *testing.T) {
|
||||
{
|
||||
name: "NodeAffinityPolicy ignored with nodeAffinity",
|
||||
pod: st.MakePod().Name("p").Label("foo", "").
|
||||
NodeAffinityIn("foo", []string{""}).
|
||||
NodeAffinityIn("foo", []string{""}, st.NodeSelectorTypeMatchExpressions).
|
||||
SpreadConstraint(1, "node", v1.ScheduleAnyway, fooSelector, nil, &ignorePolicy, nil, nil).
|
||||
Obj(),
|
||||
nodes: []*v1.Node{
|
||||
|
@ -40,17 +40,28 @@ func MakeNodeSelector() *NodeSelectorWrapper {
|
||||
return &NodeSelectorWrapper{v1.NodeSelector{}}
|
||||
}
|
||||
|
||||
type NodeSelectorType int
|
||||
|
||||
const (
|
||||
NodeSelectorTypeMatchExpressions NodeSelectorType = iota
|
||||
NodeSelectorTypeMatchFields
|
||||
)
|
||||
|
||||
// In injects a matchExpression (with an operator IN) as a selectorTerm
|
||||
// to the inner nodeSelector.
|
||||
// NOTE: appended selecterTerms are ORed.
|
||||
func (s *NodeSelectorWrapper) In(key string, vals []string) *NodeSelectorWrapper {
|
||||
func (s *NodeSelectorWrapper) In(key string, vals []string, t NodeSelectorType) *NodeSelectorWrapper {
|
||||
expression := v1.NodeSelectorRequirement{
|
||||
Key: key,
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: vals,
|
||||
}
|
||||
selectorTerm := v1.NodeSelectorTerm{}
|
||||
selectorTerm.MatchExpressions = append(selectorTerm.MatchExpressions, expression)
|
||||
if t == NodeSelectorTypeMatchExpressions {
|
||||
selectorTerm.MatchExpressions = append(selectorTerm.MatchExpressions, expression)
|
||||
} else {
|
||||
selectorTerm.MatchFields = append(selectorTerm.MatchFields, expression)
|
||||
}
|
||||
s.NodeSelectorTerms = append(s.NodeSelectorTerms, selectorTerm)
|
||||
return s
|
||||
}
|
||||
@ -320,19 +331,19 @@ func (p *PodWrapper) NodeSelector(m map[string]string) *PodWrapper {
|
||||
|
||||
// NodeAffinityIn creates a HARD node affinity (with the operator In)
|
||||
// and injects into the inner pod.
|
||||
func (p *PodWrapper) NodeAffinityIn(key string, vals []string) *PodWrapper {
|
||||
func (p *PodWrapper) NodeAffinityIn(key string, vals []string, t NodeSelectorType) *PodWrapper {
|
||||
if p.Spec.Affinity == nil {
|
||||
p.Spec.Affinity = &v1.Affinity{}
|
||||
}
|
||||
if p.Spec.Affinity.NodeAffinity == nil {
|
||||
p.Spec.Affinity.NodeAffinity = &v1.NodeAffinity{}
|
||||
}
|
||||
nodeSelector := MakeNodeSelector().In(key, vals).Obj()
|
||||
nodeSelector := MakeNodeSelector().In(key, vals, t).Obj()
|
||||
p.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution = nodeSelector
|
||||
return p
|
||||
}
|
||||
|
||||
// NodeAffinityNotIn creates a HARD node affinity (with the operator NotIn)
|
||||
// NodeAffinityNotIn creates a HARD node affinity (with MatchExpressinos and the operator NotIn)
|
||||
// and injects into the inner pod.
|
||||
func (p *PodWrapper) NodeAffinityNotIn(key string, vals []string) *PodWrapper {
|
||||
if p.Spec.Affinity == nil {
|
||||
@ -882,7 +893,7 @@ func (p *PersistentVolumeWrapper) HostPathVolumeSource(src *v1.HostPathVolumeSou
|
||||
return p
|
||||
}
|
||||
|
||||
// NodeAffinityIn creates a HARD node affinity (with the operator In)
|
||||
// NodeAffinityIn creates a HARD node affinity (with MatchExpressions and the operator In)
|
||||
// and injects into the pv.
|
||||
func (p *PersistentVolumeWrapper) NodeAffinityIn(key string, vals []string) *PersistentVolumeWrapper {
|
||||
if p.Spec.NodeAffinity == nil {
|
||||
@ -891,7 +902,7 @@ func (p *PersistentVolumeWrapper) NodeAffinityIn(key string, vals []string) *Per
|
||||
if p.Spec.NodeAffinity.Required == nil {
|
||||
p.Spec.NodeAffinity.Required = &v1.NodeSelector{}
|
||||
}
|
||||
nodeSelector := MakeNodeSelector().In(key, vals).Obj()
|
||||
nodeSelector := MakeNodeSelector().In(key, vals, NodeSelectorTypeMatchExpressions).Obj()
|
||||
p.Spec.NodeAffinity.Required.NodeSelectorTerms = append(p.Spec.NodeAffinity.Required.NodeSelectorTerms, nodeSelector.NodeSelectorTerms...)
|
||||
return p
|
||||
}
|
||||
|
@ -1400,7 +1400,7 @@ func TestPodTopologySpreadFilter(t *testing.T) {
|
||||
{
|
||||
name: "pod is required to be placed on zone0, so only node-1 fits",
|
||||
incomingPod: st.MakePod().Name("p").Label("foo", "").Container(pause).
|
||||
NodeAffinityIn("zone", []string{"zone-0"}).
|
||||
NodeAffinityIn("zone", []string{"zone-0"}, st.NodeSelectorTypeMatchExpressions).
|
||||
SpreadConstraint(1, "node", hardSpread, st.MakeLabelSelector().Exists("foo").Obj(), nil, nil, nil, nil).
|
||||
Obj(),
|
||||
existingPods: []*v1.Pod{
|
||||
@ -1583,7 +1583,7 @@ func TestPodTopologySpreadFilter(t *testing.T) {
|
||||
{
|
||||
name: "NodeAffinityPolicy ignored with nodeAffinity, pods spread across zone as 1/~2~",
|
||||
incomingPod: st.MakePod().Name("p").Label("foo", "").Container(pause).
|
||||
NodeAffinityIn("foo", []string{""}).
|
||||
NodeAffinityIn("foo", []string{""}, st.NodeSelectorTypeMatchExpressions).
|
||||
SpreadConstraint(1, "zone", v1.DoNotSchedule, st.MakeLabelSelector().Exists("foo").Obj(), nil, &ignorePolicy, nil, nil).
|
||||
Obj(),
|
||||
existingPods: []*v1.Pod{
|
||||
|
@ -202,6 +202,7 @@ func TestCoreResourceEnqueue(t *testing.T) {
|
||||
// wantRequeuedPods is the map of Pods that are expected to be requeued after triggerFn.
|
||||
wantRequeuedPods sets.Set[string]
|
||||
// enableSchedulingQueueHint indicates which feature gate value(s) the test case should run with.
|
||||
// By default, it's {true, false}
|
||||
enableSchedulingQueueHint []bool
|
||||
}{
|
||||
{
|
||||
@ -223,8 +224,7 @@ func TestCoreResourceEnqueue(t *testing.T) {
|
||||
}
|
||||
return nil
|
||||
},
|
||||
wantRequeuedPods: sets.New("pod2"),
|
||||
enableSchedulingQueueHint: []bool{false, true},
|
||||
wantRequeuedPods: sets.New("pod2"),
|
||||
},
|
||||
{
|
||||
name: "Pod rejected by the PodAffinity plugin is requeued when a new Node is created and turned to ready",
|
||||
@ -255,17 +255,16 @@ func TestCoreResourceEnqueue(t *testing.T) {
|
||||
}
|
||||
return nil
|
||||
},
|
||||
wantRequeuedPods: sets.New("pod2"),
|
||||
enableSchedulingQueueHint: []bool{false, true},
|
||||
wantRequeuedPods: sets.New("pod2"),
|
||||
},
|
||||
{
|
||||
name: "Pod rejected by the NodeAffinity plugin is requeued when a Node is updated",
|
||||
initialNodes: []*v1.Node{st.MakeNode().Name("fake-node1").Label("group", "a").Obj()},
|
||||
pods: []*v1.Pod{
|
||||
// - Pod1 will be rejected by the NodeAffinity plugin.
|
||||
st.MakePod().Name("pod1").NodeAffinityIn("group", []string{"b"}).Container("image").Obj(),
|
||||
st.MakePod().Name("pod1").NodeAffinityIn("group", []string{"b"}, st.NodeSelectorTypeMatchExpressions).Container("image").Obj(),
|
||||
// - Pod2 will be rejected by the NodeAffinity plugin.
|
||||
st.MakePod().Name("pod2").NodeAffinityIn("group", []string{"c"}).Container("image").Obj(),
|
||||
st.MakePod().Name("pod2").NodeAffinityIn("group", []string{"c"}, st.NodeSelectorTypeMatchExpressions).Container("image").Obj(),
|
||||
},
|
||||
triggerFn: func(testCtx *testutils.TestContext) error {
|
||||
// Trigger a NodeUpdate event to change label.
|
||||
@ -276,8 +275,7 @@ func TestCoreResourceEnqueue(t *testing.T) {
|
||||
}
|
||||
return nil
|
||||
},
|
||||
wantRequeuedPods: sets.New("pod1"),
|
||||
enableSchedulingQueueHint: []bool{false, true},
|
||||
wantRequeuedPods: sets.New("pod1"),
|
||||
},
|
||||
{
|
||||
name: "Pod updated with toleration requeued to activeQ",
|
||||
@ -294,8 +292,7 @@ func TestCoreResourceEnqueue(t *testing.T) {
|
||||
}
|
||||
return nil
|
||||
},
|
||||
wantRequeuedPods: sets.New("pod1"),
|
||||
enableSchedulingQueueHint: []bool{false, true},
|
||||
wantRequeuedPods: sets.New("pod1"),
|
||||
},
|
||||
{
|
||||
name: "Pod got resource scaled down requeued to activeQ",
|
||||
@ -312,8 +309,7 @@ func TestCoreResourceEnqueue(t *testing.T) {
|
||||
}
|
||||
return nil
|
||||
},
|
||||
wantRequeuedPods: sets.New("pod1"),
|
||||
enableSchedulingQueueHint: []bool{false, true},
|
||||
wantRequeuedPods: sets.New("pod1"),
|
||||
},
|
||||
{
|
||||
name: "Updating pod condition doesn't retry scheduling if the Pod was rejected by TaintToleration",
|
||||
@ -358,29 +354,29 @@ func TestCoreResourceEnqueue(t *testing.T) {
|
||||
},
|
||||
pods: []*v1.Pod{
|
||||
// - Pod1 will be rejected by the NodeAffinity plugin and NodeResourcesFit plugin.
|
||||
st.MakePod().Label("unscheduled", "plugins").Name("pod1").NodeAffinityIn("metadata.name", []string{"fake-node"}).Req(map[v1.ResourceName]string{v1.ResourceCPU: "4"}).Obj(),
|
||||
st.MakePod().Label("unscheduled", "plugins").Name("pod1").NodeAffinityIn("metadata.name", []string{"fake-node"}, st.NodeSelectorTypeMatchFields).Req(map[v1.ResourceName]string{v1.ResourceCPU: "4"}).Obj(),
|
||||
},
|
||||
triggerFn: func(testCtx *testutils.TestContext) error {
|
||||
// Trigger a NodeDeleted event.
|
||||
// It will not requeue pod1 to activeQ,
|
||||
// because both NodeAffinity and NodeResourceFit don't register Node/delete event.
|
||||
err := testCtx.ClientSet.CoreV1().Nodes().Delete(testCtx.Ctx, "fake-node", metav1.DeleteOptions{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to delete fake-node node")
|
||||
// Because of preCheck, we cannot determine which unschedulable plugins are registered for pod1.
|
||||
// So, not the ideal way as the integration test though,
|
||||
// here we check the unschedulable plugins by directly using the SchedulingQueue function for now.
|
||||
// We can change here to assess unschedPlugin by triggering cluster events like other test cases
|
||||
// after QHint is graduated and preCheck is removed.
|
||||
pInfo, ok := testCtx.Scheduler.SchedulingQueue.GetPod("pod1", testCtx.NS.Name)
|
||||
if !ok || pInfo == nil {
|
||||
return fmt.Errorf("pod1 is not found in the scheduling queue")
|
||||
}
|
||||
|
||||
// Trigger a NodeCreated event.
|
||||
// It will requeue pod1 to activeQ, because QHint of NodeAffinity return Queue.
|
||||
// It makes sure PreFilter plugins returned PreFilterResult takes an effect for sure,
|
||||
// because NodeResourceFit QHint returns QueueSkip for this event actually.
|
||||
node := st.MakeNode().Name("fake-node").Label("node", "fake-node").Capacity(map[v1.ResourceName]string{v1.ResourceCPU: "2"}).Obj()
|
||||
if _, err := testCtx.ClientSet.CoreV1().Nodes().Create(testCtx.Ctx, node, metav1.CreateOptions{}); err != nil {
|
||||
return fmt.Errorf("failed to create a new node: %w", err)
|
||||
if pInfo.Pod.Name != "pod1" {
|
||||
return fmt.Errorf("unexpected pod info: %#v", pInfo)
|
||||
}
|
||||
|
||||
if pInfo.UnschedulablePlugins.Difference(sets.New(names.NodeAffinity, names.NodeResourcesFit)).Len() != 0 {
|
||||
return fmt.Errorf("unexpected unschedulable plugin(s) is registered in pod1: %v", pInfo.UnschedulablePlugins.UnsortedList())
|
||||
}
|
||||
|
||||
return nil
|
||||
},
|
||||
wantRequeuedPods: sets.New("pod1"),
|
||||
},
|
||||
{
|
||||
name: "Pods with PodTopologySpread should be requeued when a Pod with matching label is scheduled",
|
||||
@ -401,8 +397,7 @@ func TestCoreResourceEnqueue(t *testing.T) {
|
||||
|
||||
return nil
|
||||
},
|
||||
wantRequeuedPods: sets.New("pod2"),
|
||||
enableSchedulingQueueHint: []bool{false, true},
|
||||
wantRequeuedPods: sets.New("pod2"),
|
||||
},
|
||||
{
|
||||
name: "Pod rejected by the PodAffinity plugin is requeued when deleting the existed pod's label to make it match the podAntiAffinity",
|
||||
@ -515,6 +510,10 @@ func TestCoreResourceEnqueue(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
if len(tt.enableSchedulingQueueHint) == 0 {
|
||||
tt.enableSchedulingQueueHint = []bool{true, false}
|
||||
}
|
||||
|
||||
for _, featureEnabled := range tt.enableSchedulingQueueHint {
|
||||
t.Run(fmt.Sprintf("%s [SchedulerQueueingHints enabled: %v]", tt.name, featureEnabled), func(t *testing.T) {
|
||||
featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.SchedulerQueueingHints, featureEnabled)
|
||||
|
@ -617,7 +617,7 @@ func TestNodeEvents(t *testing.T) {
|
||||
// make sure the scheduler received the node add event by creating a pod that only fits node2
|
||||
plugPod := st.MakePod().Name("plug-pod").Namespace(testCtx.NS.Name).Container("pause").
|
||||
Req(map[v1.ResourceName]string{v1.ResourceCPU: "40m"}).
|
||||
NodeAffinityIn("affinity-key", []string{"affinity-value"}).
|
||||
NodeAffinityIn("affinity-key", []string{"affinity-value"}, st.NodeSelectorTypeMatchExpressions).
|
||||
Toleration("taint-key").Obj()
|
||||
plugPod, err = testCtx.ClientSet.CoreV1().Pods(plugPod.Namespace).Create(testCtx.Ctx, plugPod, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
|
Loading…
Reference in New Issue
Block a user