mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-08-07 19:23:40 +00:00
Merge pull request #118608 from utam0k/podtopologyspread-prescore-skip
Return Skip in PodTopologySpread#PreScore under specific conditions
This commit is contained in:
commit
c95b16b280
@ -120,9 +120,9 @@ func (pl *PodTopologySpread) PreScore(
|
||||
return framework.AsStatus(fmt.Errorf("getting all nodes: %w", err))
|
||||
}
|
||||
|
||||
if len(filteredNodes) == 0 || len(allNodes) == 0 {
|
||||
// No nodes to score.
|
||||
return nil
|
||||
if len(allNodes) == 0 {
|
||||
// No need to score.
|
||||
return framework.NewStatus(framework.Skip)
|
||||
}
|
||||
|
||||
state := &preScoreState{
|
||||
@ -138,10 +138,9 @@ func (pl *PodTopologySpread) PreScore(
|
||||
return framework.AsStatus(fmt.Errorf("calculating preScoreState: %w", err))
|
||||
}
|
||||
|
||||
// return if incoming pod doesn't have soft topology spread Constraints.
|
||||
// return Skip if incoming pod doesn't have soft topology spread Constraints.
|
||||
if len(state.Constraints) == 0 {
|
||||
cycleState.Write(preScoreStateKey, state)
|
||||
return nil
|
||||
return framework.NewStatus(framework.Skip)
|
||||
}
|
||||
|
||||
// Ignore parsing errors for backwards compatibility.
|
||||
|
@ -42,6 +42,74 @@ import (
|
||||
|
||||
var podTopologySpreadFunc = frameworkruntime.FactoryAdapter(feature.Features{}, New)
|
||||
|
||||
// TestPreScoreSkip tests the cases that TopologySpread#PreScore returns the Skip status.
|
||||
func TestPreScoreSkip(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
pod *v1.Pod
|
||||
nodes []*v1.Node
|
||||
objs []runtime.Object
|
||||
config config.PodTopologySpreadArgs
|
||||
}{
|
||||
{
|
||||
name: "the pod doesn't have soft topology spread Constraints",
|
||||
pod: st.MakePod().Name("p").Namespace("default").Obj(),
|
||||
config: config.PodTopologySpreadArgs{
|
||||
DefaultingType: config.ListDefaulting,
|
||||
},
|
||||
nodes: []*v1.Node{
|
||||
st.MakeNode().Name("node-a").Label(v1.LabelHostname, "node-a").Obj(),
|
||||
st.MakeNode().Name("node-b").Label(v1.LabelHostname, "node-b").Obj(),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "default constraints and a replicaset that doesn't match",
|
||||
pod: st.MakePod().Name("p").Namespace("default").Label("foo", "bar").Label("baz", "sup").OwnerReference("rs2", appsv1.SchemeGroupVersion.WithKind("ReplicaSet")).Obj(),
|
||||
config: config.PodTopologySpreadArgs{
|
||||
DefaultConstraints: []v1.TopologySpreadConstraint{
|
||||
{
|
||||
MaxSkew: 2,
|
||||
TopologyKey: "planet",
|
||||
WhenUnsatisfiable: v1.ScheduleAnyway,
|
||||
},
|
||||
},
|
||||
DefaultingType: config.ListDefaulting,
|
||||
},
|
||||
nodes: []*v1.Node{
|
||||
st.MakeNode().Name("node-a").Label("planet", "mars").Obj(),
|
||||
},
|
||||
objs: []runtime.Object{
|
||||
&appsv1.ReplicaSet{ObjectMeta: metav1.ObjectMeta{Namespace: "default", Name: "rs1"}, Spec: appsv1.ReplicaSetSpec{Selector: st.MakeLabelSelector().Exists("tar").Obj()}},
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
_, ctx := ktesting.NewTestContext(t)
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
informerFactory := informers.NewSharedInformerFactory(fake.NewSimpleClientset(tt.objs...), 0)
|
||||
f, err := frameworkruntime.NewFramework(ctx, nil, nil,
|
||||
frameworkruntime.WithSnapshotSharedLister(cache.NewSnapshot(nil, tt.nodes)),
|
||||
frameworkruntime.WithInformerFactory(informerFactory))
|
||||
if err != nil {
|
||||
t.Fatalf("Failed creating framework runtime: %v", err)
|
||||
}
|
||||
pl, err := New(&tt.config, f, feature.Features{})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed creating plugin: %v", err)
|
||||
}
|
||||
informerFactory.Start(ctx.Done())
|
||||
informerFactory.WaitForCacheSync(ctx.Done())
|
||||
p := pl.(*PodTopologySpread)
|
||||
cs := framework.NewCycleState()
|
||||
if s := p.PreScore(context.Background(), cs, tt.pod, tt.nodes); !s.IsSkip() {
|
||||
t.Fatalf("Expected skip but got %v", s.AsError())
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestPreScoreStateEmptyNodes(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
@ -258,29 +326,6 @@ func TestPreScoreStateEmptyNodes(t *testing.T) {
|
||||
TopologyNormalizingWeight: []float64{topologyNormalizingWeight(1), topologyNormalizingWeight(1)},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "default constraints and a replicaset that doesn't match",
|
||||
pod: st.MakePod().Name("p").Namespace("default").Label("foo", "bar").Label("baz", "sup").OwnerReference("rs2", appsv1.SchemeGroupVersion.WithKind("ReplicaSet")).Obj(),
|
||||
config: config.PodTopologySpreadArgs{
|
||||
DefaultConstraints: []v1.TopologySpreadConstraint{
|
||||
{
|
||||
MaxSkew: 2,
|
||||
TopologyKey: "planet",
|
||||
WhenUnsatisfiable: v1.ScheduleAnyway,
|
||||
},
|
||||
},
|
||||
DefaultingType: config.ListDefaulting,
|
||||
},
|
||||
nodes: []*v1.Node{
|
||||
st.MakeNode().Name("node-a").Label("planet", "mars").Obj(),
|
||||
},
|
||||
objs: []runtime.Object{
|
||||
&appsv1.ReplicaSet{ObjectMeta: metav1.ObjectMeta{Namespace: "default", Name: "rs1"}, Spec: appsv1.ReplicaSetSpec{Selector: st.MakeLabelSelector().Exists("tar").Obj()}},
|
||||
},
|
||||
want: &preScoreState{
|
||||
TopologyPairToPodCounts: make(map[topologyPair]*int64),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "default constraints and a replicaset, but pod has constraints",
|
||||
pod: st.MakePod().Name("p").Namespace("default").Label("foo", "bar").Label("baz", "sup").
|
||||
@ -545,7 +590,7 @@ func TestPreScoreStateEmptyNodes(t *testing.T) {
|
||||
informerFactory.WaitForCacheSync(ctx.Done())
|
||||
p := pl.(*PodTopologySpread)
|
||||
cs := framework.NewCycleState()
|
||||
if s := p.PreScore(context.Background(), cs, tt.pod, tt.nodes); !s.IsSuccess() {
|
||||
if s := p.PreScore(ctx, cs, tt.pod, tt.nodes); !s.IsSuccess() {
|
||||
t.Fatal(s.AsError())
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user