Merge pull request #118608 from utam0k/podtopologyspread-prescore-skip

Return Skip in PodTopologySpread#PreScore under specific conditions
This commit is contained in:
Kubernetes Prow Robot 2023-07-10 09:27:07 -07:00 committed by GitHub
commit c95b16b280
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
2 changed files with 74 additions and 30 deletions

View File

@ -120,9 +120,9 @@ func (pl *PodTopologySpread) PreScore(
return framework.AsStatus(fmt.Errorf("getting all nodes: %w", err))
}
if len(filteredNodes) == 0 || len(allNodes) == 0 {
// No nodes to score.
return nil
if len(allNodes) == 0 {
// No need to score.
return framework.NewStatus(framework.Skip)
}
state := &preScoreState{
@ -138,10 +138,9 @@ func (pl *PodTopologySpread) PreScore(
return framework.AsStatus(fmt.Errorf("calculating preScoreState: %w", err))
}
// return if incoming pod doesn't have soft topology spread Constraints.
// return Skip if incoming pod doesn't have soft topology spread Constraints.
if len(state.Constraints) == 0 {
cycleState.Write(preScoreStateKey, state)
return nil
return framework.NewStatus(framework.Skip)
}
// Ignore parsing errors for backwards compatibility.

View File

@ -42,6 +42,74 @@ import (
var podTopologySpreadFunc = frameworkruntime.FactoryAdapter(feature.Features{}, New)
// TestPreScoreSkip tests the cases that TopologySpread#PreScore returns the Skip status.
func TestPreScoreSkip(t *testing.T) {
tests := []struct {
name string
pod *v1.Pod
nodes []*v1.Node
objs []runtime.Object
config config.PodTopologySpreadArgs
}{
{
name: "the pod doesn't have soft topology spread Constraints",
pod: st.MakePod().Name("p").Namespace("default").Obj(),
config: config.PodTopologySpreadArgs{
DefaultingType: config.ListDefaulting,
},
nodes: []*v1.Node{
st.MakeNode().Name("node-a").Label(v1.LabelHostname, "node-a").Obj(),
st.MakeNode().Name("node-b").Label(v1.LabelHostname, "node-b").Obj(),
},
},
{
name: "default constraints and a replicaset that doesn't match",
pod: st.MakePod().Name("p").Namespace("default").Label("foo", "bar").Label("baz", "sup").OwnerReference("rs2", appsv1.SchemeGroupVersion.WithKind("ReplicaSet")).Obj(),
config: config.PodTopologySpreadArgs{
DefaultConstraints: []v1.TopologySpreadConstraint{
{
MaxSkew: 2,
TopologyKey: "planet",
WhenUnsatisfiable: v1.ScheduleAnyway,
},
},
DefaultingType: config.ListDefaulting,
},
nodes: []*v1.Node{
st.MakeNode().Name("node-a").Label("planet", "mars").Obj(),
},
objs: []runtime.Object{
&appsv1.ReplicaSet{ObjectMeta: metav1.ObjectMeta{Namespace: "default", Name: "rs1"}, Spec: appsv1.ReplicaSetSpec{Selector: st.MakeLabelSelector().Exists("tar").Obj()}},
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
_, ctx := ktesting.NewTestContext(t)
ctx, cancel := context.WithCancel(ctx)
defer cancel()
informerFactory := informers.NewSharedInformerFactory(fake.NewSimpleClientset(tt.objs...), 0)
f, err := frameworkruntime.NewFramework(ctx, nil, nil,
frameworkruntime.WithSnapshotSharedLister(cache.NewSnapshot(nil, tt.nodes)),
frameworkruntime.WithInformerFactory(informerFactory))
if err != nil {
t.Fatalf("Failed creating framework runtime: %v", err)
}
pl, err := New(&tt.config, f, feature.Features{})
if err != nil {
t.Fatalf("Failed creating plugin: %v", err)
}
informerFactory.Start(ctx.Done())
informerFactory.WaitForCacheSync(ctx.Done())
p := pl.(*PodTopologySpread)
cs := framework.NewCycleState()
if s := p.PreScore(context.Background(), cs, tt.pod, tt.nodes); !s.IsSkip() {
t.Fatalf("Expected skip but got %v", s.AsError())
}
})
}
}
func TestPreScoreStateEmptyNodes(t *testing.T) {
tests := []struct {
name string
@ -258,29 +326,6 @@ func TestPreScoreStateEmptyNodes(t *testing.T) {
TopologyNormalizingWeight: []float64{topologyNormalizingWeight(1), topologyNormalizingWeight(1)},
},
},
{
name: "default constraints and a replicaset that doesn't match",
pod: st.MakePod().Name("p").Namespace("default").Label("foo", "bar").Label("baz", "sup").OwnerReference("rs2", appsv1.SchemeGroupVersion.WithKind("ReplicaSet")).Obj(),
config: config.PodTopologySpreadArgs{
DefaultConstraints: []v1.TopologySpreadConstraint{
{
MaxSkew: 2,
TopologyKey: "planet",
WhenUnsatisfiable: v1.ScheduleAnyway,
},
},
DefaultingType: config.ListDefaulting,
},
nodes: []*v1.Node{
st.MakeNode().Name("node-a").Label("planet", "mars").Obj(),
},
objs: []runtime.Object{
&appsv1.ReplicaSet{ObjectMeta: metav1.ObjectMeta{Namespace: "default", Name: "rs1"}, Spec: appsv1.ReplicaSetSpec{Selector: st.MakeLabelSelector().Exists("tar").Obj()}},
},
want: &preScoreState{
TopologyPairToPodCounts: make(map[topologyPair]*int64),
},
},
{
name: "default constraints and a replicaset, but pod has constraints",
pod: st.MakePod().Name("p").Namespace("default").Label("foo", "bar").Label("baz", "sup").
@ -545,7 +590,7 @@ func TestPreScoreStateEmptyNodes(t *testing.T) {
informerFactory.WaitForCacheSync(ctx.Done())
p := pl.(*PodTopologySpread)
cs := framework.NewCycleState()
if s := p.PreScore(context.Background(), cs, tt.pod, tt.nodes); !s.IsSuccess() {
if s := p.PreScore(ctx, cs, tt.pod, tt.nodes); !s.IsSuccess() {
t.Fatal(s.AsError())
}