diff --git a/pkg/scheduler/algorithmprovider/registry.go b/pkg/scheduler/algorithmprovider/registry.go index cd38604f9f2..8d3e978aa01 100644 --- a/pkg/scheduler/algorithmprovider/registry.go +++ b/pkg/scheduler/algorithmprovider/registry.go @@ -169,7 +169,9 @@ func applyFeatureGates(config *Config) { // Prioritizes nodes that satisfy pod's resource limits if utilfeature.DefaultFeatureGate.Enabled(features.ResourceLimitsPriorityFunction) { klog.Infof("Registering resourcelimits priority function") - s := schedulerapi.Plugin{Name: noderesources.ResourceLimitsName, Weight: 1} + s := schedulerapi.Plugin{Name: noderesources.ResourceLimitsName} + config.FrameworkPlugins.PostFilter.Enabled = append(config.FrameworkPlugins.PostFilter.Enabled, s) + s = schedulerapi.Plugin{Name: noderesources.ResourceLimitsName, Weight: 1} config.FrameworkPlugins.Score.Enabled = append(config.FrameworkPlugins.Score.Enabled, s) } } diff --git a/pkg/scheduler/algorithmprovider/registry_test.go b/pkg/scheduler/algorithmprovider/registry_test.go index 46be6b3d5a3..281a6f33ed4 100644 --- a/pkg/scheduler/algorithmprovider/registry_test.go +++ b/pkg/scheduler/algorithmprovider/registry_test.go @@ -208,6 +208,7 @@ func TestApplyFeatureGates(t *testing.T) { {Name: interpodaffinity.Name}, {Name: tainttoleration.Name}, {Name: podtopologyspread.Name}, + {Name: noderesources.ResourceLimitsName}, }, }, Score: &schedulerapi.PluginSet{ diff --git a/pkg/scheduler/framework/plugins/noderesources/resource_limits.go b/pkg/scheduler/framework/plugins/noderesources/resource_limits.go index 435c659410a..e2136275609 100644 --- a/pkg/scheduler/framework/plugins/noderesources/resource_limits.go +++ b/pkg/scheduler/framework/plugins/noderesources/resource_limits.go @@ -22,9 +22,7 @@ import ( v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/runtime" - "k8s.io/klog" framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" - "k8s.io/kubernetes/pkg/scheduler/nodeinfo" schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo" ) @@ -84,11 +82,10 @@ func (rl *ResourceLimits) PostFilter( return nil } -func getPodResource(cycleState *framework.CycleState) (*nodeinfo.Resource, error) { +func getPodResource(cycleState *framework.CycleState) (*schedulernodeinfo.Resource, error) { c, err := cycleState.Read(postFilterStateKey) if err != nil { - klog.V(5).Infof("Error reading %q from cycleState: %v", postFilterStateKey, err) - return nil, nil + return nil, fmt.Errorf("Error reading %q from cycleState: %v", postFilterStateKey, err) } s, ok := c.(*postFilterState) diff --git a/pkg/scheduler/framework/plugins/noderesources/resource_limits_test.go b/pkg/scheduler/framework/plugins/noderesources/resource_limits_test.go index 778fa3b192b..894308ca4e9 100644 --- a/pkg/scheduler/framework/plugins/noderesources/resource_limits_test.go +++ b/pkg/scheduler/framework/plugins/noderesources/resource_limits_test.go @@ -18,7 +18,6 @@ package noderesources import ( "context" - "reflect" "testing" v1 "k8s.io/api/core/v1" @@ -100,10 +99,11 @@ func TestResourceLimits(t *testing.T) { tests := []struct { // input pod - pod *v1.Pod - nodes []*v1.Node - expectedList framework.NodeScoreList - name string + pod *v1.Pod + nodes []*v1.Node + expectedList framework.NodeScoreList + name string + skipPostFilter bool }{ { pod: &v1.Pod{Spec: noResources}, @@ -135,6 +135,13 @@ func TestResourceLimits(t *testing.T) { expectedList: []framework.NodeScore{{Name: "machine1", Score: 0}}, name: "node does not advertise its allocatables", }, + { + pod: &v1.Pod{Spec: cpuAndMemory}, + nodes: []*v1.Node{makeNode("machine1", 0, 0)}, + expectedList: []framework.NodeScore{{Name: "machine1", Score: 0}}, + skipPostFilter: true, + name: "postFilter skipped", + }, } for _, test := range tests { @@ -142,18 +149,25 @@ func TestResourceLimits(t *testing.T) { snapshot := nodeinfosnapshot.NewSnapshot(nodeinfosnapshot.CreateNodeInfoMap(nil, test.nodes)) fh, _ := framework.NewFramework(nil, nil, nil, framework.WithSnapshotSharedLister(snapshot)) p := &ResourceLimits{handle: fh} - state := framework.NewCycleState() - status := p.PostFilter(context.Background(), state, test.pod, test.nodes, nil) - if !status.IsSuccess() { - t.Errorf("unexpected error: %v", status) - } for i := range test.nodes { - hostResult, err := p.Score(context.Background(), state, test.pod, test.nodes[i].Name) - if err != nil { + state := framework.NewCycleState() + if !test.skipPostFilter { + status := p.PostFilter(context.Background(), state, test.pod, test.nodes, nil) + if !status.IsSuccess() { + t.Errorf("unexpected error: %v", status) + } + } + + gotScore, err := p.Score(context.Background(), state, test.pod, test.nodes[i].Name) + if test.skipPostFilter { + if err == nil { + t.Errorf("expected error") + } + } else if err != nil { t.Errorf("unexpected error: %v", err) } - if !reflect.DeepEqual(test.expectedList[i].Score, hostResult) { - t.Errorf("expected %#v, got %#v", test.expectedList[i].Score, hostResult) + if test.expectedList[i].Score != gotScore { + t.Errorf("gotScore %v, wantScore %v", gotScore, test.expectedList[i].Score) } } })