Merge pull request #86913 from ahg-g/ahg-nodelimit

fix a bug in scheduler's node resource limits score
This commit is contained in:
Kubernetes Prow Robot 2020-01-07 12:34:25 -08:00 committed by GitHub
commit c7af1da206
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
4 changed files with 34 additions and 20 deletions

View File

@ -169,7 +169,9 @@ func applyFeatureGates(config *Config) {
// Prioritizes nodes that satisfy pod's resource limits // Prioritizes nodes that satisfy pod's resource limits
if utilfeature.DefaultFeatureGate.Enabled(features.ResourceLimitsPriorityFunction) { if utilfeature.DefaultFeatureGate.Enabled(features.ResourceLimitsPriorityFunction) {
klog.Infof("Registering resourcelimits priority function") klog.Infof("Registering resourcelimits priority function")
s := schedulerapi.Plugin{Name: noderesources.ResourceLimitsName, Weight: 1} s := schedulerapi.Plugin{Name: noderesources.ResourceLimitsName}
config.FrameworkPlugins.PostFilter.Enabled = append(config.FrameworkPlugins.PostFilter.Enabled, s)
s = schedulerapi.Plugin{Name: noderesources.ResourceLimitsName, Weight: 1}
config.FrameworkPlugins.Score.Enabled = append(config.FrameworkPlugins.Score.Enabled, s) config.FrameworkPlugins.Score.Enabled = append(config.FrameworkPlugins.Score.Enabled, s)
} }
} }

View File

@ -208,6 +208,7 @@ func TestApplyFeatureGates(t *testing.T) {
{Name: interpodaffinity.Name}, {Name: interpodaffinity.Name},
{Name: tainttoleration.Name}, {Name: tainttoleration.Name},
{Name: podtopologyspread.Name}, {Name: podtopologyspread.Name},
{Name: noderesources.ResourceLimitsName},
}, },
}, },
Score: &schedulerapi.PluginSet{ Score: &schedulerapi.PluginSet{

View File

@ -22,9 +22,7 @@ import (
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime"
"k8s.io/klog"
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
"k8s.io/kubernetes/pkg/scheduler/nodeinfo"
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo" schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
) )
@ -84,11 +82,10 @@ func (rl *ResourceLimits) PostFilter(
return nil return nil
} }
func getPodResource(cycleState *framework.CycleState) (*nodeinfo.Resource, error) { func getPodResource(cycleState *framework.CycleState) (*schedulernodeinfo.Resource, error) {
c, err := cycleState.Read(postFilterStateKey) c, err := cycleState.Read(postFilterStateKey)
if err != nil { if err != nil {
klog.V(5).Infof("Error reading %q from cycleState: %v", postFilterStateKey, err) return nil, fmt.Errorf("Error reading %q from cycleState: %v", postFilterStateKey, err)
return nil, nil
} }
s, ok := c.(*postFilterState) s, ok := c.(*postFilterState)

View File

@ -18,7 +18,6 @@ package noderesources
import ( import (
"context" "context"
"reflect"
"testing" "testing"
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
@ -100,10 +99,11 @@ func TestResourceLimits(t *testing.T) {
tests := []struct { tests := []struct {
// input pod // input pod
pod *v1.Pod pod *v1.Pod
nodes []*v1.Node nodes []*v1.Node
expectedList framework.NodeScoreList expectedList framework.NodeScoreList
name string name string
skipPostFilter bool
}{ }{
{ {
pod: &v1.Pod{Spec: noResources}, pod: &v1.Pod{Spec: noResources},
@ -135,6 +135,13 @@ func TestResourceLimits(t *testing.T) {
expectedList: []framework.NodeScore{{Name: "machine1", Score: 0}}, expectedList: []framework.NodeScore{{Name: "machine1", Score: 0}},
name: "node does not advertise its allocatables", name: "node does not advertise its allocatables",
}, },
{
pod: &v1.Pod{Spec: cpuAndMemory},
nodes: []*v1.Node{makeNode("machine1", 0, 0)},
expectedList: []framework.NodeScore{{Name: "machine1", Score: 0}},
skipPostFilter: true,
name: "postFilter skipped",
},
} }
for _, test := range tests { for _, test := range tests {
@ -142,18 +149,25 @@ func TestResourceLimits(t *testing.T) {
snapshot := nodeinfosnapshot.NewSnapshot(nodeinfosnapshot.CreateNodeInfoMap(nil, test.nodes)) snapshot := nodeinfosnapshot.NewSnapshot(nodeinfosnapshot.CreateNodeInfoMap(nil, test.nodes))
fh, _ := framework.NewFramework(nil, nil, nil, framework.WithSnapshotSharedLister(snapshot)) fh, _ := framework.NewFramework(nil, nil, nil, framework.WithSnapshotSharedLister(snapshot))
p := &ResourceLimits{handle: fh} p := &ResourceLimits{handle: fh}
state := framework.NewCycleState()
status := p.PostFilter(context.Background(), state, test.pod, test.nodes, nil)
if !status.IsSuccess() {
t.Errorf("unexpected error: %v", status)
}
for i := range test.nodes { for i := range test.nodes {
hostResult, err := p.Score(context.Background(), state, test.pod, test.nodes[i].Name) state := framework.NewCycleState()
if err != nil { if !test.skipPostFilter {
status := p.PostFilter(context.Background(), state, test.pod, test.nodes, nil)
if !status.IsSuccess() {
t.Errorf("unexpected error: %v", status)
}
}
gotScore, err := p.Score(context.Background(), state, test.pod, test.nodes[i].Name)
if test.skipPostFilter {
if err == nil {
t.Errorf("expected error")
}
} else if err != nil {
t.Errorf("unexpected error: %v", err) t.Errorf("unexpected error: %v", err)
} }
if !reflect.DeepEqual(test.expectedList[i].Score, hostResult) { if test.expectedList[i].Score != gotScore {
t.Errorf("expected %#v, got %#v", test.expectedList[i].Score, hostResult) t.Errorf("gotScore %v, wantScore %v", gotScore, test.expectedList[i].Score)
} }
} }
}) })