mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-23 03:41:45 +00:00
Merge pull request #86913 from ahg-g/ahg-nodelimit
fix a bug in scheduler's node resource limits score
This commit is contained in:
commit
c7af1da206
@ -169,7 +169,9 @@ func applyFeatureGates(config *Config) {
|
||||
// Prioritizes nodes that satisfy pod's resource limits
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.ResourceLimitsPriorityFunction) {
|
||||
klog.Infof("Registering resourcelimits priority function")
|
||||
s := schedulerapi.Plugin{Name: noderesources.ResourceLimitsName, Weight: 1}
|
||||
s := schedulerapi.Plugin{Name: noderesources.ResourceLimitsName}
|
||||
config.FrameworkPlugins.PostFilter.Enabled = append(config.FrameworkPlugins.PostFilter.Enabled, s)
|
||||
s = schedulerapi.Plugin{Name: noderesources.ResourceLimitsName, Weight: 1}
|
||||
config.FrameworkPlugins.Score.Enabled = append(config.FrameworkPlugins.Score.Enabled, s)
|
||||
}
|
||||
}
|
||||
|
@ -208,6 +208,7 @@ func TestApplyFeatureGates(t *testing.T) {
|
||||
{Name: interpodaffinity.Name},
|
||||
{Name: tainttoleration.Name},
|
||||
{Name: podtopologyspread.Name},
|
||||
{Name: noderesources.ResourceLimitsName},
|
||||
},
|
||||
},
|
||||
Score: &schedulerapi.PluginSet{
|
||||
|
@ -22,9 +22,7 @@ import (
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/klog"
|
||||
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
|
||||
"k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||
)
|
||||
|
||||
@ -84,11 +82,10 @@ func (rl *ResourceLimits) PostFilter(
|
||||
return nil
|
||||
}
|
||||
|
||||
func getPodResource(cycleState *framework.CycleState) (*nodeinfo.Resource, error) {
|
||||
func getPodResource(cycleState *framework.CycleState) (*schedulernodeinfo.Resource, error) {
|
||||
c, err := cycleState.Read(postFilterStateKey)
|
||||
if err != nil {
|
||||
klog.V(5).Infof("Error reading %q from cycleState: %v", postFilterStateKey, err)
|
||||
return nil, nil
|
||||
return nil, fmt.Errorf("Error reading %q from cycleState: %v", postFilterStateKey, err)
|
||||
}
|
||||
|
||||
s, ok := c.(*postFilterState)
|
||||
|
@ -18,7 +18,6 @@ package noderesources
|
||||
|
||||
import (
|
||||
"context"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
@ -100,10 +99,11 @@ func TestResourceLimits(t *testing.T) {
|
||||
|
||||
tests := []struct {
|
||||
// input pod
|
||||
pod *v1.Pod
|
||||
nodes []*v1.Node
|
||||
expectedList framework.NodeScoreList
|
||||
name string
|
||||
pod *v1.Pod
|
||||
nodes []*v1.Node
|
||||
expectedList framework.NodeScoreList
|
||||
name string
|
||||
skipPostFilter bool
|
||||
}{
|
||||
{
|
||||
pod: &v1.Pod{Spec: noResources},
|
||||
@ -135,6 +135,13 @@ func TestResourceLimits(t *testing.T) {
|
||||
expectedList: []framework.NodeScore{{Name: "machine1", Score: 0}},
|
||||
name: "node does not advertise its allocatables",
|
||||
},
|
||||
{
|
||||
pod: &v1.Pod{Spec: cpuAndMemory},
|
||||
nodes: []*v1.Node{makeNode("machine1", 0, 0)},
|
||||
expectedList: []framework.NodeScore{{Name: "machine1", Score: 0}},
|
||||
skipPostFilter: true,
|
||||
name: "postFilter skipped",
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
@ -142,18 +149,25 @@ func TestResourceLimits(t *testing.T) {
|
||||
snapshot := nodeinfosnapshot.NewSnapshot(nodeinfosnapshot.CreateNodeInfoMap(nil, test.nodes))
|
||||
fh, _ := framework.NewFramework(nil, nil, nil, framework.WithSnapshotSharedLister(snapshot))
|
||||
p := &ResourceLimits{handle: fh}
|
||||
state := framework.NewCycleState()
|
||||
status := p.PostFilter(context.Background(), state, test.pod, test.nodes, nil)
|
||||
if !status.IsSuccess() {
|
||||
t.Errorf("unexpected error: %v", status)
|
||||
}
|
||||
for i := range test.nodes {
|
||||
hostResult, err := p.Score(context.Background(), state, test.pod, test.nodes[i].Name)
|
||||
if err != nil {
|
||||
state := framework.NewCycleState()
|
||||
if !test.skipPostFilter {
|
||||
status := p.PostFilter(context.Background(), state, test.pod, test.nodes, nil)
|
||||
if !status.IsSuccess() {
|
||||
t.Errorf("unexpected error: %v", status)
|
||||
}
|
||||
}
|
||||
|
||||
gotScore, err := p.Score(context.Background(), state, test.pod, test.nodes[i].Name)
|
||||
if test.skipPostFilter {
|
||||
if err == nil {
|
||||
t.Errorf("expected error")
|
||||
}
|
||||
} else if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
if !reflect.DeepEqual(test.expectedList[i].Score, hostResult) {
|
||||
t.Errorf("expected %#v, got %#v", test.expectedList[i].Score, hostResult)
|
||||
if test.expectedList[i].Score != gotScore {
|
||||
t.Errorf("gotScore %v, wantScore %v", gotScore, test.expectedList[i].Score)
|
||||
}
|
||||
}
|
||||
})
|
||||
|
Loading…
Reference in New Issue
Block a user