diff --git a/cmd/kube-scheduler/app/server_test.go b/cmd/kube-scheduler/app/server_test.go index 1024bc501ea..c170ce481f6 100644 --- a/cmd/kube-scheduler/app/server_test.go +++ b/cmd/kube-scheduler/app/server_test.go @@ -402,6 +402,7 @@ leaderElection: {Name: "NodePorts"}, } plugins.PreScore.Enabled = []config.Plugin{ + {Name: "NodeResourcesFit"}, {Name: "InterPodAffinity"}, {Name: "TaintToleration"}, } @@ -431,6 +432,7 @@ leaderElection: {Name: "NodePorts"}, } plugins.PreScore.Enabled = []config.Plugin{ + {Name: "NodeResourcesFit"}, {Name: "InterPodAffinity"}, {Name: "TaintToleration"}, } diff --git a/pkg/scheduler/apis/config/testing/defaults/defaults.go b/pkg/scheduler/apis/config/testing/defaults/defaults.go index c092e11c0e7..112f7cde0f7 100644 --- a/pkg/scheduler/apis/config/testing/defaults/defaults.go +++ b/pkg/scheduler/apis/config/testing/defaults/defaults.go @@ -75,6 +75,8 @@ var PluginsV1beta2 = &config.Plugins{ {Name: names.PodTopologySpread}, {Name: names.TaintToleration}, {Name: names.NodeAffinity}, + {Name: names.NodeResourcesFit}, + {Name: names.NodeResourcesBalancedAllocation}, }, }, Score: config.PluginSet{ @@ -238,8 +240,10 @@ var ExpandedPluginsV1beta3 = &config.Plugins{ Enabled: []config.Plugin{ {Name: names.TaintToleration}, {Name: names.NodeAffinity}, + {Name: names.NodeResourcesFit}, {Name: names.PodTopologySpread}, {Name: names.InterPodAffinity}, + {Name: names.NodeResourcesBalancedAllocation}, }, }, Score: config.PluginSet{ @@ -415,8 +419,10 @@ var ExpandedPluginsV1 = &config.Plugins{ Enabled: []config.Plugin{ {Name: names.TaintToleration}, {Name: names.NodeAffinity}, + {Name: names.NodeResourcesFit}, {Name: names.PodTopologySpread}, {Name: names.InterPodAffinity}, + {Name: names.NodeResourcesBalancedAllocation}, }, }, Score: config.PluginSet{ diff --git a/pkg/scheduler/apis/config/v1beta2/default_plugins.go b/pkg/scheduler/apis/config/v1beta2/default_plugins.go index 70758fab0b9..6bd76b704ae 100644 --- a/pkg/scheduler/apis/config/v1beta2/default_plugins.go +++ b/pkg/scheduler/apis/config/v1beta2/default_plugins.go @@ -76,6 +76,8 @@ func getDefaultPlugins() *v1beta2.Plugins { {Name: names.PodTopologySpread}, {Name: names.TaintToleration}, {Name: names.NodeAffinity}, + {Name: names.NodeResourcesFit}, + {Name: names.NodeResourcesBalancedAllocation}, }, }, Score: v1beta2.PluginSet{ diff --git a/pkg/scheduler/apis/config/v1beta2/default_plugins_test.go b/pkg/scheduler/apis/config/v1beta2/default_plugins_test.go index a1bd23b7ded..e7cf21003a5 100644 --- a/pkg/scheduler/apis/config/v1beta2/default_plugins_test.go +++ b/pkg/scheduler/apis/config/v1beta2/default_plugins_test.go @@ -88,6 +88,8 @@ func TestApplyFeatureGates(t *testing.T) { {Name: names.PodTopologySpread}, {Name: names.TaintToleration}, {Name: names.NodeAffinity}, + {Name: names.NodeResourcesFit}, + {Name: names.NodeResourcesBalancedAllocation}, }, }, Score: v1beta2.PluginSet{ @@ -176,6 +178,8 @@ func TestApplyFeatureGates(t *testing.T) { {Name: names.PodTopologySpread}, {Name: names.TaintToleration}, {Name: names.NodeAffinity}, + {Name: names.NodeResourcesFit}, + {Name: names.NodeResourcesBalancedAllocation}, }, }, Score: v1beta2.PluginSet{ diff --git a/pkg/scheduler/apis/config/v1beta2/defaults_test.go b/pkg/scheduler/apis/config/v1beta2/defaults_test.go index 1c0caba5d26..9abf5ecbebb 100644 --- a/pkg/scheduler/apis/config/v1beta2/defaults_test.go +++ b/pkg/scheduler/apis/config/v1beta2/defaults_test.go @@ -378,6 +378,8 @@ func TestSchedulerDefaults(t *testing.T) { {Name: names.PodTopologySpread}, {Name: names.TaintToleration}, {Name: names.NodeAffinity}, + {Name: names.NodeResourcesFit}, + {Name: names.NodeResourcesBalancedAllocation}, }, }, Score: v1beta2.PluginSet{ diff --git a/pkg/scheduler/framework/plugins/noderesources/balanced_allocation.go b/pkg/scheduler/framework/plugins/noderesources/balanced_allocation.go index 4c0438bbc3d..ef6b8723b65 100644 --- a/pkg/scheduler/framework/plugins/noderesources/balanced_allocation.go +++ b/pkg/scheduler/framework/plugins/noderesources/balanced_allocation.go @@ -37,10 +37,51 @@ type BalancedAllocation struct { resourceAllocationScorer } +var _ framework.PreScorePlugin = &BalancedAllocation{} var _ framework.ScorePlugin = &BalancedAllocation{} // BalancedAllocationName is the name of the plugin used in the plugin registry and configurations. -const BalancedAllocationName = names.NodeResourcesBalancedAllocation +const ( + BalancedAllocationName = names.NodeResourcesBalancedAllocation + + // balancedAllocationPreScoreStateKey is the key in CycleState to NodeResourcesBalancedAllocation pre-computed data for Scoring. + balancedAllocationPreScoreStateKey = "PreScore" + BalancedAllocationName +) + +// balancedAllocationPreScoreState computed at PreScore and used at Score. +type balancedAllocationPreScoreState struct { + // podRequests have the same order of the resources defined in NodeResourcesFitArgs.Resources, + // same for other place we store a list like that. + podRequests []int64 +} + +// Clone implements the mandatory Clone interface. We don't really copy the data since +// there is no need for that. +func (s *balancedAllocationPreScoreState) Clone() framework.StateData { + return s +} + +// PreScore calculates incoming pod's resource requests and writes them to the cycle state used. +func (ba *BalancedAllocation) PreScore(ctx context.Context, cycleState *framework.CycleState, pod *v1.Pod, nodes []*v1.Node) *framework.Status { + state := &balancedAllocationPreScoreState{ + podRequests: ba.calculatePodResourceRequestList(pod, ba.resources), + } + cycleState.Write(balancedAllocationPreScoreStateKey, state) + return nil +} + +func getBalancedAllocationPreScoreState(cycleState *framework.CycleState) (*balancedAllocationPreScoreState, error) { + c, err := cycleState.Read(balancedAllocationPreScoreStateKey) + if err != nil { + return nil, fmt.Errorf("reading %q from cycleState: %w", balancedAllocationPreScoreStateKey, err) + } + + s, ok := c.(*balancedAllocationPreScoreState) + if !ok { + return nil, fmt.Errorf("invalid PreScore state, got type %T", c) + } + return s, nil +} // Name returns name of the plugin. It is used in logs, etc. func (ba *BalancedAllocation) Name() string { @@ -54,12 +95,17 @@ func (ba *BalancedAllocation) Score(ctx context.Context, state *framework.CycleS return 0, framework.AsStatus(fmt.Errorf("getting node %q from Snapshot: %w", nodeName, err)) } + s, err := getBalancedAllocationPreScoreState(state) + if err != nil { + s = &balancedAllocationPreScoreState{podRequests: ba.calculatePodResourceRequestList(pod, ba.resources)} + } + // ba.score favors nodes with balanced resource usage rate. // It calculates the standard deviation for those resources and prioritizes the node based on how close the usage of those resources is to each other. // Detail: score = (1 - std) * MaxNodeScore, where std is calculated by the root square of Σ((fraction(i)-mean)^2)/len(resources) // The algorithm is partly inspired by: // "Wei Huang et al. An Energy Efficient Virtual Machine Placement Algorithm with Balanced Resource Utilization" - return ba.score(pod, nodeInfo) + return ba.score(pod, nodeInfo, s.podRequests) } // ScoreExtensions of the Score plugin. diff --git a/pkg/scheduler/framework/plugins/noderesources/balanced_allocation_test.go b/pkg/scheduler/framework/plugins/noderesources/balanced_allocation_test.go index 97a22283855..3d43b92410e 100644 --- a/pkg/scheduler/framework/plugins/noderesources/balanced_allocation_test.go +++ b/pkg/scheduler/framework/plugins/noderesources/balanced_allocation_test.go @@ -123,6 +123,7 @@ func TestNodeResourcesBalancedAllocation(t *testing.T) { expectedList framework.NodeScoreList name string args config.NodeResourcesBalancedAllocationArgs + runPreScore bool }{ { // Node1 scores (remaining resources) on 0-MaxNodeScore scale @@ -138,6 +139,7 @@ func TestNodeResourcesBalancedAllocation(t *testing.T) { expectedList: []framework.NodeScore{{Name: "node1", Score: framework.MaxNodeScore}, {Name: "node2", Score: framework.MaxNodeScore}}, name: "nothing scheduled, nothing requested", args: config.NodeResourcesBalancedAllocationArgs{Resources: defaultResourceBalancedAllocationSet}, + runPreScore: true, }, { // Node1 scores on 0-MaxNodeScore scale @@ -155,6 +157,7 @@ func TestNodeResourcesBalancedAllocation(t *testing.T) { expectedList: []framework.NodeScore{{Name: "node1", Score: 87}, {Name: "node2", Score: framework.MaxNodeScore}}, name: "nothing scheduled, resources requested, differently sized nodes", args: config.NodeResourcesBalancedAllocationArgs{Resources: defaultResourceBalancedAllocationSet}, + runPreScore: true, }, { // Node1 scores on 0-MaxNodeScore scale @@ -177,7 +180,8 @@ func TestNodeResourcesBalancedAllocation(t *testing.T) { st.MakePod().Node("node2").Labels(labels1).Obj(), st.MakePod().Node("node2").Labels(labels1).Obj(), }, - args: config.NodeResourcesBalancedAllocationArgs{Resources: defaultResourceBalancedAllocationSet}, + args: config.NodeResourcesBalancedAllocationArgs{Resources: defaultResourceBalancedAllocationSet}, + runPreScore: true, }, { // Node1 scores on 0-MaxNodeScore scale @@ -198,7 +202,8 @@ func TestNodeResourcesBalancedAllocation(t *testing.T) { st.MakePod().Node("node1").Obj(), st.MakePod().Node("node1").Obj(), }, - args: config.NodeResourcesBalancedAllocationArgs{Resources: defaultResourceBalancedAllocationSet}, + args: config.NodeResourcesBalancedAllocationArgs{Resources: defaultResourceBalancedAllocationSet}, + runPreScore: true, }, { // Node1 scores on 0-MaxNodeScore scale @@ -221,7 +226,8 @@ func TestNodeResourcesBalancedAllocation(t *testing.T) { {Spec: cpuOnly2, ObjectMeta: metav1.ObjectMeta{Labels: labels1}}, {Spec: cpuAndMemory, ObjectMeta: metav1.ObjectMeta{Labels: labels1}}, }, - args: config.NodeResourcesBalancedAllocationArgs{Resources: defaultResourceBalancedAllocationSet}, + args: config.NodeResourcesBalancedAllocationArgs{Resources: defaultResourceBalancedAllocationSet}, + runPreScore: true, }, { // Node1 scores on 0-MaxNodeScore scale @@ -242,7 +248,8 @@ func TestNodeResourcesBalancedAllocation(t *testing.T) { {Spec: cpuOnly}, {Spec: cpuAndMemory}, }, - args: config.NodeResourcesBalancedAllocationArgs{Resources: defaultResourceBalancedAllocationSet}, + args: config.NodeResourcesBalancedAllocationArgs{Resources: defaultResourceBalancedAllocationSet}, + runPreScore: true, }, { // Node1 scores on 0-MaxNodeScore scale @@ -263,7 +270,8 @@ func TestNodeResourcesBalancedAllocation(t *testing.T) { {Spec: cpuOnly}, {Spec: cpuAndMemory}, }, - args: config.NodeResourcesBalancedAllocationArgs{Resources: defaultResourceBalancedAllocationSet}, + args: config.NodeResourcesBalancedAllocationArgs{Resources: defaultResourceBalancedAllocationSet}, + runPreScore: true, }, { // Node1 scores on 0-MaxNodeScore scale @@ -285,7 +293,8 @@ func TestNodeResourcesBalancedAllocation(t *testing.T) { {Spec: cpuOnly}, {Spec: cpuAndMemory}, }, - args: config.NodeResourcesBalancedAllocationArgs{Resources: defaultResourceBalancedAllocationSet}, + args: config.NodeResourcesBalancedAllocationArgs{Resources: defaultResourceBalancedAllocationSet}, + runPreScore: true, }, { pod: st.MakePod().Obj(), @@ -296,7 +305,8 @@ func TestNodeResourcesBalancedAllocation(t *testing.T) { {Spec: cpuOnly}, {Spec: cpuAndMemory}, }, - args: config.NodeResourcesBalancedAllocationArgs{Resources: defaultResourceBalancedAllocationSet}, + args: config.NodeResourcesBalancedAllocationArgs{Resources: defaultResourceBalancedAllocationSet}, + runPreScore: true, }, // Node1 scores on 0-MaxNodeScore scale // CPU Fraction: 3000 / 3500 = 85.71% @@ -327,6 +337,7 @@ func TestNodeResourcesBalancedAllocation(t *testing.T) { {Name: string(v1.ResourceMemory), Weight: 1}, {Name: "nvidia.com/gpu", Weight: 1}, }}, + runPreScore: true, }, // Only one node (node1) has the scalar resource, pod doesn't request the scalar resource and the scalar resource should be skipped for consideration. // Node1: std = 0, score = 100 @@ -344,6 +355,29 @@ func TestNodeResourcesBalancedAllocation(t *testing.T) { {Name: string(v1.ResourceCPU), Weight: 1}, {Name: "nvidia.com/gpu", Weight: 1}, }}, + runPreScore: true, + }, + { + // Node1 scores on 0-MaxNodeScore scale + // CPU Fraction: 6000 / 10000 = 60% + // Memory Fraction: 5000 / 20000 = 25% + // Node1 std: (0.6 - 0.25) / 2 = 0.175 + // Node1 Score: (1 - 0.175)*MaxNodeScore = 82 + // Node2 scores on 0-MaxNodeScore scale + // CPU Fraction: 6000 / 10000 = 60% + // Memory Fraction: 10000 / 20000 = 50% + // Node2 std: (0.6 - 0.5) / 2 = 0.05 + // Node2 Score: (1 - 0.05)*MaxNodeScore = 95 + pod: &v1.Pod{Spec: cpuAndMemory}, + nodes: []*v1.Node{makeNode("node1", 10000, 20000, nil), makeNode("node2", 10000, 20000, nil)}, + expectedList: []framework.NodeScore{{Name: "node1", Score: 82}, {Name: "node2", Score: 95}}, + name: "resources requested, pods scheduled with resources if PreScore not called", + pods: []*v1.Pod{ + {Spec: cpuOnly}, + {Spec: cpuAndMemory}, + }, + args: config.NodeResourcesBalancedAllocationArgs{Resources: defaultResourceBalancedAllocationSet}, + runPreScore: false, }, } @@ -354,10 +388,17 @@ func TestNodeResourcesBalancedAllocation(t *testing.T) { defer cancel() fh, _ := runtime.NewFramework(nil, nil, ctx.Done(), runtime.WithSnapshotSharedLister(snapshot)) p, _ := NewBalancedAllocation(&test.args, fh, feature.Features{}) + state := framework.NewCycleState() for i := range test.nodes { - hostResult, err := p.(framework.ScorePlugin).Score(ctx, nil, test.pod, test.nodes[i].Name) - if err != nil { - t.Errorf("unexpected error: %v", err) + if !test.runPreScore { + status := p.(framework.PreScorePlugin).PreScore(ctx, state, test.pod, test.nodes) + if !status.IsSuccess() { + t.Errorf("PreScore is expected to return success, but didn't. Got status: %v", status) + } + } + hostResult, status := p.(framework.ScorePlugin).Score(ctx, state, test.pod, test.nodes[i].Name) + if !status.IsSuccess() { + t.Errorf("Score is expected to return success, but didn't. Got status: %v", status) } if !reflect.DeepEqual(test.expectedList[i].Score, hostResult) { t.Errorf("got score %v for host %v, expected %v", hostResult, test.nodes[i].Name, test.expectedList[i].Score) diff --git a/pkg/scheduler/framework/plugins/noderesources/fit.go b/pkg/scheduler/framework/plugins/noderesources/fit.go index 0f309294acd..81edad6e277 100644 --- a/pkg/scheduler/framework/plugins/noderesources/fit.go +++ b/pkg/scheduler/framework/plugins/noderesources/fit.go @@ -37,6 +37,7 @@ import ( var _ framework.PreFilterPlugin = &Fit{} var _ framework.FilterPlugin = &Fit{} var _ framework.EnqueueExtensions = &Fit{} +var _ framework.PreScorePlugin = &Fit{} var _ framework.ScorePlugin = &Fit{} const ( @@ -46,6 +47,9 @@ const ( // preFilterStateKey is the key in CycleState to NodeResourcesFit pre-computed data. // Using the name of the plugin will likely help us avoid collisions with other plugins. preFilterStateKey = "PreFilter" + Name + + // preScoreStateKey is the key in CycleState to NodeResourcesFit pre-computed data for Scoring. + preScoreStateKey = "PreScore" + Name ) // nodeResourceStrategyTypeMap maps strategy to scorer implementation @@ -100,6 +104,41 @@ func (s *preFilterState) Clone() framework.StateData { return s } +// preScoreState computed at PreScore and used at Score. +type preScoreState struct { + // podRequests have the same order as the resources defined in NodeResourcesBalancedAllocationArgs.Resources, + // same for other place we store a list like that. + podRequests []int64 +} + +// Clone implements the mandatory Clone interface. We don't really copy the data since +// there is no need for that. +func (s *preScoreState) Clone() framework.StateData { + return s +} + +// PreScore calculates incoming pod's resource requests and writes them to the cycle state used. +func (f *Fit) PreScore(ctx context.Context, cycleState *framework.CycleState, pod *v1.Pod, nodes []*v1.Node) *framework.Status { + state := &preScoreState{ + podRequests: f.calculatePodResourceRequestList(pod, f.resources), + } + cycleState.Write(preScoreStateKey, state) + return nil +} + +func getPreScoreState(cycleState *framework.CycleState) (*preScoreState, error) { + c, err := cycleState.Read(preScoreStateKey) + if err != nil { + return nil, fmt.Errorf("reading %q from cycleState: %w", preScoreStateKey, err) + } + + s, ok := c.(*preScoreState) + if !ok { + return nil, fmt.Errorf("invalid PreScore state, got type %T", c) + } + return s, nil +} + // Name returns name of the plugin. It is used in logs, etc. func (f *Fit) Name() string { return Name @@ -335,5 +374,12 @@ func (f *Fit) Score(ctx context.Context, state *framework.CycleState, pod *v1.Po return 0, framework.AsStatus(fmt.Errorf("getting node %q from Snapshot: %w", nodeName, err)) } - return f.score(pod, nodeInfo) + s, err := getPreScoreState(state) + if err != nil { + s = &preScoreState{ + podRequests: f.calculatePodResourceRequestList(pod, f.resources), + } + } + + return f.score(pod, nodeInfo, s.podRequests) } diff --git a/pkg/scheduler/framework/plugins/noderesources/fit_test.go b/pkg/scheduler/framework/plugins/noderesources/fit_test.go index 819e1bfe07d..644dfb37da3 100644 --- a/pkg/scheduler/framework/plugins/noderesources/fit_test.go +++ b/pkg/scheduler/framework/plugins/noderesources/fit_test.go @@ -647,6 +647,7 @@ func TestFitScore(t *testing.T) { existingPods []*v1.Pod expectedPriorities framework.NodeScoreList nodeResourcesFitArgs config.NodeResourcesFitArgs + runPreScore bool }{ { name: "test case for ScoringStrategy RequestedToCapacityRatio case1", @@ -674,6 +675,7 @@ func TestFitScore(t *testing.T) { }, }, }, + runPreScore: true, }, { name: "test case for ScoringStrategy RequestedToCapacityRatio case2", @@ -701,6 +703,7 @@ func TestFitScore(t *testing.T) { }, }, }, + runPreScore: true, }, { name: "test case for ScoringStrategy MostAllocated", @@ -722,6 +725,7 @@ func TestFitScore(t *testing.T) { Resources: defaultResources, }, }, + runPreScore: true, }, { name: "test case for ScoringStrategy LeastAllocated", @@ -743,6 +747,79 @@ func TestFitScore(t *testing.T) { Resources: defaultResources, }, }, + runPreScore: true, + }, + { + name: "test case for ScoringStrategy RequestedToCapacityRatio case1 if PreScore is not called", + requestedPod: st.MakePod(). + Req(map[v1.ResourceName]string{"cpu": "3000", "memory": "5000"}). + Obj(), + nodes: []*v1.Node{ + st.MakeNode().Name("node1").Capacity(map[v1.ResourceName]string{"cpu": "4000", "memory": "10000"}).Obj(), + st.MakeNode().Name("node2").Capacity(map[v1.ResourceName]string{"cpu": "6000", "memory": "10000"}).Obj(), + }, + existingPods: []*v1.Pod{ + st.MakePod().Node("node1").Req(map[v1.ResourceName]string{"cpu": "2000", "memory": "4000"}).Obj(), + st.MakePod().Node("node2").Req(map[v1.ResourceName]string{"cpu": "1000", "memory": "2000"}).Obj(), + }, + expectedPriorities: []framework.NodeScore{{Name: "node1", Score: 10}, {Name: "node2", Score: 32}}, + nodeResourcesFitArgs: config.NodeResourcesFitArgs{ + ScoringStrategy: &config.ScoringStrategy{ + Type: config.RequestedToCapacityRatio, + Resources: defaultResources, + RequestedToCapacityRatio: &config.RequestedToCapacityRatioParam{ + Shape: []config.UtilizationShapePoint{ + {Utilization: 0, Score: 10}, + {Utilization: 100, Score: 0}, + }, + }, + }, + }, + runPreScore: false, + }, + { + name: "test case for ScoringStrategy MostAllocated if PreScore is not called", + requestedPod: st.MakePod(). + Req(map[v1.ResourceName]string{"cpu": "1000", "memory": "2000"}). + Obj(), + nodes: []*v1.Node{ + st.MakeNode().Name("node1").Capacity(map[v1.ResourceName]string{"cpu": "4000", "memory": "10000"}).Obj(), + st.MakeNode().Name("node2").Capacity(map[v1.ResourceName]string{"cpu": "6000", "memory": "10000"}).Obj(), + }, + existingPods: []*v1.Pod{ + st.MakePod().Node("node1").Req(map[v1.ResourceName]string{"cpu": "2000", "memory": "4000"}).Obj(), + st.MakePod().Node("node2").Req(map[v1.ResourceName]string{"cpu": "1000", "memory": "2000"}).Obj(), + }, + expectedPriorities: []framework.NodeScore{{Name: "node1", Score: 67}, {Name: "node2", Score: 36}}, + nodeResourcesFitArgs: config.NodeResourcesFitArgs{ + ScoringStrategy: &config.ScoringStrategy{ + Type: config.MostAllocated, + Resources: defaultResources, + }, + }, + runPreScore: false, + }, + { + name: "test case for ScoringStrategy LeastAllocated if PreScore is not called", + requestedPod: st.MakePod(). + Req(map[v1.ResourceName]string{"cpu": "1000", "memory": "2000"}). + Obj(), + nodes: []*v1.Node{ + st.MakeNode().Name("node1").Capacity(map[v1.ResourceName]string{"cpu": "4000", "memory": "10000"}).Obj(), + st.MakeNode().Name("node2").Capacity(map[v1.ResourceName]string{"cpu": "6000", "memory": "10000"}).Obj(), + }, + existingPods: []*v1.Pod{ + st.MakePod().Node("node1").Req(map[v1.ResourceName]string{"cpu": "2000", "memory": "4000"}).Obj(), + st.MakePod().Node("node2").Req(map[v1.ResourceName]string{"cpu": "1000", "memory": "2000"}).Obj(), + }, + expectedPriorities: []framework.NodeScore{{Name: "node1", Score: 32}, {Name: "node2", Score: 63}}, + nodeResourcesFitArgs: config.NodeResourcesFitArgs{ + ScoringStrategy: &config.ScoringStrategy{ + Type: config.LeastAllocated, + Resources: defaultResources, + }, + }, + runPreScore: false, }, } @@ -762,9 +839,15 @@ func TestFitScore(t *testing.T) { var gotPriorities framework.NodeScoreList for _, n := range test.nodes { + if !test.runPreScore { + status := p.(framework.PreScorePlugin).PreScore(ctx, state, test.requestedPod, test.nodes) + if !status.IsSuccess() { + t.Errorf("PreScore is expected to return success, but didn't. Got status: %v", status) + } + } score, status := p.(framework.ScorePlugin).Score(ctx, state, test.requestedPod, n.Name) if !status.IsSuccess() { - t.Errorf("unexpected error: %v", status) + t.Errorf("Score is expected to return success, but didn't. Got status: %v", status) } gotPriorities = append(gotPriorities, framework.NodeScore{Name: n.Name, Score: score}) } @@ -879,6 +962,7 @@ func BenchmarkTestFitScore(b *testing.B) { requestedPod := st.MakePod().Req(map[v1.ResourceName]string{"cpu": "1000", "memory": "2000"}).Obj() for i := 0; i < b.N; i++ { + _, status := p.Score(context.Background(), state, requestedPod, nodes[0].Name) if !status.IsSuccess() { b.Errorf("unexpected status: %v", status) diff --git a/pkg/scheduler/framework/plugins/noderesources/least_allocated_test.go b/pkg/scheduler/framework/plugins/noderesources/least_allocated_test.go index 43f52bf6004..9c8558d01e5 100644 --- a/pkg/scheduler/framework/plugins/noderesources/least_allocated_test.go +++ b/pkg/scheduler/framework/plugins/noderesources/least_allocated_test.go @@ -407,11 +407,16 @@ func TestLeastAllocatedScoringStrategy(t *testing.T) { return } + status := p.(framework.PreScorePlugin).PreScore(ctx, state, test.requestedPod, test.nodes) + if !status.IsSuccess() { + t.Errorf("PreScore is expected to return success, but didn't. Got status: %v", status) + } + var gotScores framework.NodeScoreList for _, n := range test.nodes { score, status := p.(framework.ScorePlugin).Score(ctx, state, test.requestedPod, n.Name) if status.Code() != test.wantStatusCode { - t.Errorf("unexpected status code, want: %v, got: %v", test.wantStatusCode, status) + t.Errorf("unexpected status code, want: %v, got: %v", test.wantStatusCode, status.Code()) } gotScores = append(gotScores, framework.NodeScore{Name: n.Name, Score: score}) } diff --git a/pkg/scheduler/framework/plugins/noderesources/most_allocated_test.go b/pkg/scheduler/framework/plugins/noderesources/most_allocated_test.go index 28c7cd49011..1b9d3b784e2 100644 --- a/pkg/scheduler/framework/plugins/noderesources/most_allocated_test.go +++ b/pkg/scheduler/framework/plugins/noderesources/most_allocated_test.go @@ -364,6 +364,11 @@ func TestMostAllocatedScoringStrategy(t *testing.T) { return } + status := p.(framework.PreScorePlugin).PreScore(ctx, state, test.requestedPod, test.nodes) + if !status.IsSuccess() { + t.Errorf("PreScore is expected to return success, but didn't. Got status: %v", status) + } + var gotScores framework.NodeScoreList for _, n := range test.nodes { score, status := p.(framework.ScorePlugin).Score(ctx, state, test.requestedPod, n.Name) diff --git a/pkg/scheduler/framework/plugins/noderesources/requested_to_capacity_ratio_test.go b/pkg/scheduler/framework/plugins/noderesources/requested_to_capacity_ratio_test.go index 1d8fb08fca9..2a84d8a91a5 100644 --- a/pkg/scheduler/framework/plugins/noderesources/requested_to_capacity_ratio_test.go +++ b/pkg/scheduler/framework/plugins/noderesources/requested_to_capacity_ratio_test.go @@ -129,9 +129,13 @@ func TestRequestedToCapacityRatioScoringStrategy(t *testing.T) { var gotScores framework.NodeScoreList for _, n := range test.nodes { + status := p.(framework.PreScorePlugin).PreScore(ctx, state, test.requestedPod, test.nodes) + if !status.IsSuccess() { + t.Errorf("PreScore is expected to return success, but didn't. Got status: %v", status) + } score, status := p.(framework.ScorePlugin).Score(ctx, state, test.requestedPod, n.Name) if !status.IsSuccess() { - t.Errorf("unexpected error: %v", status) + t.Errorf("Score is expected to return success, but didn't. Got status: %v", status) } gotScores = append(gotScores, framework.NodeScore{Name: n.Name, Score: score}) } @@ -321,9 +325,13 @@ func TestResourceBinPackingSingleExtended(t *testing.T) { var gotList framework.NodeScoreList for _, n := range test.nodes { + status := p.(framework.PreScorePlugin).PreScore(context.Background(), state, test.pod, test.nodes) + if !status.IsSuccess() { + t.Errorf("PreScore is expected to return success, but didn't. Got status: %v", status) + } score, status := p.(framework.ScorePlugin).Score(context.Background(), state, test.pod, n.Name) if !status.IsSuccess() { - t.Errorf("unexpected error: %v", status) + t.Errorf("Score is expected to return success, but didn't. Got status: %v", status) } gotList = append(gotList, framework.NodeScore{Name: n.Name, Score: score}) } @@ -542,11 +550,16 @@ func TestResourceBinPackingMultipleExtended(t *testing.T) { t.Fatalf("unexpected error: %v", err) } + status := p.(framework.PreScorePlugin).PreScore(context.Background(), state, test.pod, test.nodes) + if !status.IsSuccess() { + t.Errorf("PreScore is expected to return success, but didn't. Got status: %v", status) + } + var gotScores framework.NodeScoreList for _, n := range test.nodes { score, status := p.(framework.ScorePlugin).Score(context.Background(), state, test.pod, n.Name) if !status.IsSuccess() { - t.Errorf("unexpected error: %v", status) + t.Errorf("Score is expected to return success, but didn't. Got status: %v", status) } gotScores = append(gotScores, framework.NodeScore{Name: n.Name, Score: score}) } diff --git a/pkg/scheduler/framework/plugins/noderesources/resource_allocation.go b/pkg/scheduler/framework/plugins/noderesources/resource_allocation.go index eaa8a86dec1..68e4433f918 100644 --- a/pkg/scheduler/framework/plugins/noderesources/resource_allocation.go +++ b/pkg/scheduler/framework/plugins/noderesources/resource_allocation.go @@ -45,7 +45,8 @@ type resourceAllocationScorer struct { // score will use `scorer` function to calculate the score. func (r *resourceAllocationScorer) score( pod *v1.Pod, - nodeInfo *framework.NodeInfo) (int64, *framework.Status) { + nodeInfo *framework.NodeInfo, + podRequests []int64) (int64, *framework.Status) { node := nodeInfo.Node() if node == nil { return 0, framework.NewStatus(framework.Error, "node not found") @@ -58,7 +59,7 @@ func (r *resourceAllocationScorer) score( requested := make([]int64, len(r.resources)) allocatable := make([]int64, len(r.resources)) for i := range r.resources { - alloc, req := r.calculateResourceAllocatableRequest(nodeInfo, pod, v1.ResourceName(r.resources[i].Name)) + alloc, req := r.calculateResourceAllocatableRequest(nodeInfo, v1.ResourceName(r.resources[i].Name), podRequests[i]) // Only fill the extended resource entry when it's non-zero. if alloc == 0 { continue @@ -83,13 +84,12 @@ func (r *resourceAllocationScorer) score( // - 1st param: quantity of allocatable resource on the node. // - 2nd param: aggregated quantity of requested resource on the node. // Note: if it's an extended resource, and the pod doesn't request it, (0, 0) is returned. -func (r *resourceAllocationScorer) calculateResourceAllocatableRequest(nodeInfo *framework.NodeInfo, pod *v1.Pod, resource v1.ResourceName) (int64, int64) { +func (r *resourceAllocationScorer) calculateResourceAllocatableRequest(nodeInfo *framework.NodeInfo, resource v1.ResourceName, podRequest int64) (int64, int64) { requested := nodeInfo.NonZeroRequested if r.useRequested { requested = nodeInfo.Requested } - podRequest := r.calculatePodResourceRequest(pod, resource) // If it's an extended resource, and the pod doesn't request it. We return (0, 0) // as an implication to bypass scoring on this resource. if podRequest == 0 && schedutil.IsScalarResourceName(resource) { @@ -133,3 +133,11 @@ func (r *resourceAllocationScorer) calculatePodResourceRequest(pod *v1.Pod, reso } return quantity.Value() } + +func (r *resourceAllocationScorer) calculatePodResourceRequestList(pod *v1.Pod, resources []config.ResourceSpec) []int64 { + podRequests := make([]int64, len(resources)) + for i := range resources { + podRequests[i] = r.calculatePodResourceRequest(pod, v1.ResourceName(resources[i].Name)) + } + return podRequests +}