Merge pull request #105906 from kerthcet/feature/remove-scheduler-plugin-node-label

remove scheduler NodeLabel plugin
This commit is contained in:
Kubernetes Prow Robot 2021-10-27 07:45:25 -07:00 committed by GitHub
commit a0a79e3c91
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
12 changed files with 3 additions and 643 deletions

View File

@ -40,7 +40,6 @@ func addKnownTypes(scheme *runtime.Scheme) error {
&KubeSchedulerConfiguration{},
&DefaultPreemptionArgs{},
&InterPodAffinityArgs{},
&NodeLabelArgs{},
&NodeResourcesFitArgs{},
&PodTopologySpreadArgs{},
&RequestedToCapacityRatioArgs{},

View File

@ -56,26 +56,6 @@ type InterPodAffinityArgs struct {
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// NodeLabelArgs holds arguments used to configure the NodeLabel plugin.
//
// This plugin has been deprecated and is only configurable through the
// scheduler policy API and the v1beta1 component config API. It is recommended
// to use the NodeAffinity plugin instead.
type NodeLabelArgs struct {
metav1.TypeMeta
// PresentLabels should be present for the node to be considered a fit for hosting the pod
PresentLabels []string
// AbsentLabels should be absent for the node to be considered a fit for hosting the pod
AbsentLabels []string
// Nodes that have labels in the list will get a higher score.
PresentLabelsPreference []string
// Nodes that don't have labels in the list will get a higher score.
AbsentLabelsPreference []string
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// NodeResourcesFitArgs holds arguments used to configure the NodeResourcesFit plugin.
type NodeResourcesFitArgs struct {
metav1.TypeMeta

View File

@ -129,7 +129,6 @@ var removedPluginsByVersion = []removedPlugins{
{
schemeGroupVersion: v1beta2.SchemeGroupVersion.String(),
plugins: []string{
"NodeLabel",
"NodeResourcesLeastAllocated",
"NodeResourcesMostAllocated",
"RequestedToCapacityRatio",
@ -182,7 +181,6 @@ func validatePluginConfig(path *field.Path, apiVersion string, profile *config.K
"DefaultPreemption": ValidateDefaultPreemptionArgs,
"InterPodAffinity": ValidateInterPodAffinityArgs,
"NodeAffinity": ValidateNodeAffinityArgs,
"NodeLabel": ValidateNodeLabelArgs,
"NodeResourcesBalancedAllocation": ValidateNodeResourcesBalancedAllocationArgs,
"NodeResourcesFitArgs": ValidateNodeResourcesFitArgs,
"NodeResourcesLeastAllocated": ValidateNodeResourcesLeastAllocatedArgs,

View File

@ -87,35 +87,6 @@ func validateHardPodAffinityWeight(path *field.Path, w int32) error {
return nil
}
// ValidateNodeLabelArgs validates that NodeLabelArgs are correct.
func ValidateNodeLabelArgs(path *field.Path, args *config.NodeLabelArgs) error {
var allErrs field.ErrorList
allErrs = append(allErrs, validateNoConflict(args.PresentLabels, args.AbsentLabels,
path.Child("presentLabels"), path.Child("absentLabels"))...)
allErrs = append(allErrs, validateNoConflict(args.PresentLabelsPreference, args.AbsentLabelsPreference,
path.Child("presentLabelsPreference"), path.Child("absentLabelsPreference"))...)
return allErrs.ToAggregate()
}
// validateNoConflict validates that presentLabels and absentLabels do not conflict.
func validateNoConflict(presentLabels, absentLabels []string, presentPath, absentPath *field.Path) field.ErrorList {
var allErrs field.ErrorList
m := make(map[string]int, len(presentLabels)) // label -> index
for i, l := range presentLabels {
m[l] = i
}
for i, l := range absentLabels {
if j, ok := m[l]; ok {
allErrs = append(allErrs, field.Invalid(presentPath.Index(j), l,
fmt.Sprintf("conflict with %v", absentPath.Index(i).String())))
}
}
return allErrs
}
// ValidatePodTopologySpreadArgs validates that PodTopologySpreadArgs are correct.
// It replicates the validation from pkg/apis/core/validation.validateTopologySpreadConstraints
// with an additional check for .labelSelector to be nil.

View File

@ -167,87 +167,6 @@ func TestValidateInterPodAffinityArgs(t *testing.T) {
}
}
func TestValidateNodeLabelArgs(t *testing.T) {
cases := map[string]struct {
args config.NodeLabelArgs
wantErrs field.ErrorList
}{
"valid config": {
args: config.NodeLabelArgs{
PresentLabels: []string{"present"},
AbsentLabels: []string{"absent"},
PresentLabelsPreference: []string{"present-preference"},
AbsentLabelsPreference: []string{"absent-preference"},
},
},
"labels conflict": {
args: config.NodeLabelArgs{
PresentLabels: []string{"label"},
AbsentLabels: []string{"label"},
},
wantErrs: field.ErrorList{
&field.Error{
Type: field.ErrorTypeInvalid,
Field: "presentLabels[0]",
},
},
},
"multiple labels conflict": {
args: config.NodeLabelArgs{
PresentLabels: []string{"label", "label3"},
AbsentLabels: []string{"label", "label2", "label3"},
},
wantErrs: field.ErrorList{
&field.Error{
Type: field.ErrorTypeInvalid,
Field: "presentLabels[0]",
},
&field.Error{
Type: field.ErrorTypeInvalid,
Field: "presentLabels[1]",
},
},
},
"labels preference conflict": {
args: config.NodeLabelArgs{
PresentLabelsPreference: []string{"label"},
AbsentLabelsPreference: []string{"label"},
},
wantErrs: field.ErrorList{
&field.Error{
Type: field.ErrorTypeInvalid,
Field: "presentLabelsPreference[0]",
},
},
},
"multiple labels preference conflict": {
args: config.NodeLabelArgs{
PresentLabelsPreference: []string{"label", "label3"},
AbsentLabelsPreference: []string{"label", "label2", "label3"},
},
wantErrs: field.ErrorList{
&field.Error{
Type: field.ErrorTypeInvalid,
Field: "presentLabelsPreference[0]",
},
&field.Error{
Type: field.ErrorTypeInvalid,
Field: "presentLabelsPreference[1]",
},
},
},
}
for name, tc := range cases {
t.Run(name, func(t *testing.T) {
err := ValidateNodeLabelArgs(nil, &tc.args)
if diff := cmp.Diff(tc.wantErrs.ToAggregate(), err, ignoreBadValueDetail); diff != "" {
t.Errorf("ValidateNodeLabelArgs returned err (-want,+got):\n%s", diff)
}
})
}
}
func TestValidatePodTopologySpreadArgs(t *testing.T) {
cases := map[string]struct {
args *config.PodTopologySpreadArgs

View File

@ -199,7 +199,7 @@ func TestValidateKubeSchedulerConfigurationV1beta2(t *testing.T) {
})
badRemovedPlugins1 := validConfig.DeepCopy()
badRemovedPlugins1.Profiles[0].Plugins.Score.Enabled = append(badRemovedPlugins1.Profiles[0].Plugins.Score.Enabled, config.Plugin{Name: "NodeLabel", Weight: 2})
badRemovedPlugins1.Profiles[0].Plugins.Score.Enabled = append(badRemovedPlugins1.Profiles[0].Plugins.Score.Enabled, config.Plugin{Name: "NodeResourcesLeastAllocated", Weight: 2})
badRemovedPlugins3 := validConfig.DeepCopy()
badRemovedPlugins3.Profiles[0].Plugins.Score.Enabled = append(badRemovedPlugins3.Profiles[0].Plugins.Score.Enabled, config.Plugin{Name: "NodeResourcesMostAllocated", Weight: 2})
@ -498,7 +498,7 @@ func TestValidateKubeSchedulerConfigurationV1beta3(t *testing.T) {
})
badRemovedPlugins1 := validConfig.DeepCopy()
badRemovedPlugins1.Profiles[0].Plugins.Score.Enabled = append(badRemovedPlugins1.Profiles[0].Plugins.Score.Enabled, config.Plugin{Name: "NodeLabel", Weight: 2})
badRemovedPlugins1.Profiles[0].Plugins.Score.Enabled = append(badRemovedPlugins1.Profiles[0].Plugins.Score.Enabled, config.Plugin{Name: "NodeResourcesLeastAllocated", Weight: 2})
badRemovedPlugins2 := validConfig.DeepCopy()
badRemovedPlugins2.Profiles[0].Plugins.Score.Enabled = append(badRemovedPlugins2.Profiles[0].Plugins.Score.Enabled, config.Plugin{Name: "RequestedToCapacityRatio", Weight: 2})

View File

@ -250,51 +250,6 @@ func (in *NodeAffinityArgs) DeepCopyObject() runtime.Object {
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *NodeLabelArgs) DeepCopyInto(out *NodeLabelArgs) {
*out = *in
out.TypeMeta = in.TypeMeta
if in.PresentLabels != nil {
in, out := &in.PresentLabels, &out.PresentLabels
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.AbsentLabels != nil {
in, out := &in.AbsentLabels, &out.AbsentLabels
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.PresentLabelsPreference != nil {
in, out := &in.PresentLabelsPreference, &out.PresentLabelsPreference
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.AbsentLabelsPreference != nil {
in, out := &in.AbsentLabelsPreference, &out.AbsentLabelsPreference
*out = make([]string, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeLabelArgs.
func (in *NodeLabelArgs) DeepCopy() *NodeLabelArgs {
if in == nil {
return nil
}
out := new(NodeLabelArgs)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *NodeLabelArgs) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *NodeResourcesBalancedAllocationArgs) DeepCopyInto(out *NodeResourcesBalancedAllocationArgs) {
*out = *in

View File

@ -23,7 +23,6 @@ const (
ImageLocality = "ImageLocality"
InterPodAffinity = "InterPodAffinity"
NodeAffinity = "NodeAffinity"
NodeLabel = "NodeLabel"
NodeName = "NodeName"
NodePorts = "NodePorts"
NodeResourcesBalancedAllocation = "NodeResourcesBalancedAllocation"

View File

@ -1,164 +0,0 @@
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// This plugin has been deprecated and is only configurable through the
// scheduler policy API and the v1beta1 component config API. It is recommended
// to use the NodeAffinity plugin instead.
package nodelabel
import (
"context"
"fmt"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/klog/v2"
"k8s.io/kubernetes/pkg/scheduler/apis/config"
"k8s.io/kubernetes/pkg/scheduler/apis/config/validation"
"k8s.io/kubernetes/pkg/scheduler/framework"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/names"
)
// Name of this plugin.
const Name = names.NodeLabel
const (
// ErrReasonPresenceViolated is used for CheckNodeLabelPresence predicate error.
ErrReasonPresenceViolated = "node(s) didn't have the requested labels"
)
// New initializes a new plugin and returns it.
func New(plArgs runtime.Object, handle framework.Handle) (framework.Plugin, error) {
args, err := getArgs(plArgs)
if err != nil {
return nil, err
}
if err := validation.ValidateNodeLabelArgs(nil, &args); err != nil {
return nil, err
}
klog.InfoS("NodeLabel plugin is deprecated and will be removed in a future version, use NodeAffinity instead")
return &NodeLabel{
handle: handle,
args: args,
}, nil
}
func getArgs(obj runtime.Object) (config.NodeLabelArgs, error) {
ptr, ok := obj.(*config.NodeLabelArgs)
if !ok {
return config.NodeLabelArgs{}, fmt.Errorf("want args to be of type NodeLabelArgs, got %T", obj)
}
return *ptr, nil
}
// NodeLabel checks whether a pod can fit based on the node labels which match a filter that it requests.
type NodeLabel struct {
handle framework.Handle
args config.NodeLabelArgs
}
var _ framework.FilterPlugin = &NodeLabel{}
var _ framework.ScorePlugin = &NodeLabel{}
var _ framework.EnqueueExtensions = &NodeLabel{}
// Name returns name of the plugin. It is used in logs, etc.
func (pl *NodeLabel) Name() string {
return Name
}
// Filter invoked at the filter extension point.
// It checks whether all of the specified labels exists on a node or not, regardless of their value
//
// Consider the cases where the nodes are placed in regions/zones/racks and these are identified by labels
// In some cases, it is required that only nodes that are part of ANY of the defined regions/zones/racks be selected
//
// Alternately, eliminating nodes that have a certain label, regardless of value, is also useful
// A node may have a label with "retiring" as key and the date as the value
// and it may be desirable to avoid scheduling new pods on this node.
func (pl *NodeLabel) Filter(ctx context.Context, _ *framework.CycleState, _ *v1.Pod, nodeInfo *framework.NodeInfo) *framework.Status {
node := nodeInfo.Node()
if node == nil {
return framework.NewStatus(framework.Error, "node not found")
}
size := int64(len(pl.args.PresentLabels) + len(pl.args.AbsentLabels))
if size == 0 {
return nil
}
nodeLabels := labels.Set(node.Labels)
check := func(labels []string, presence bool) bool {
for _, label := range labels {
exists := nodeLabels.Has(label)
if exists != presence {
return false
}
}
return true
}
if check(pl.args.PresentLabels, true) && check(pl.args.AbsentLabels, false) {
return nil
}
return framework.NewStatus(framework.UnschedulableAndUnresolvable, ErrReasonPresenceViolated)
}
// Score invoked at the score extension point.
func (pl *NodeLabel) Score(ctx context.Context, _ *framework.CycleState, _ *v1.Pod, nodeName string) (int64, *framework.Status) {
nodeInfo, err := pl.handle.SnapshotSharedLister().NodeInfos().Get(nodeName)
if err != nil {
return 0, framework.AsStatus(fmt.Errorf("getting node %q from Snapshot: %w", nodeName, err))
}
node := nodeInfo.Node()
size := int64(len(pl.args.PresentLabelsPreference) + len(pl.args.AbsentLabelsPreference))
if size == 0 {
return 0, nil
}
score := int64(0)
for _, label := range pl.args.PresentLabelsPreference {
if labels.Set(node.Labels).Has(label) {
score += framework.MaxNodeScore
}
}
for _, label := range pl.args.AbsentLabelsPreference {
if !labels.Set(node.Labels).Has(label) {
score += framework.MaxNodeScore
}
}
// Take average score for each label to ensure the score doesn't exceed MaxNodeScore.
score /= size
return score, nil
}
// ScoreExtensions of the Score plugin.
func (pl *NodeLabel) ScoreExtensions() framework.ScoreExtensions {
return nil
}
// EventsToRegister returns the possible events that may make a Pod
// failed by this plugin schedulable.
func (pl *NodeLabel) EventsToRegister() []framework.ClusterEvent {
return []framework.ClusterEvent{
{Resource: framework.Node, ActionType: framework.Add | framework.UpdateNodeLabel},
}
}

View File

@ -1,294 +0,0 @@
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package nodelabel
import (
"context"
"testing"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/kubernetes/pkg/scheduler/apis/config"
"k8s.io/kubernetes/pkg/scheduler/framework"
"k8s.io/kubernetes/pkg/scheduler/framework/runtime"
"k8s.io/kubernetes/pkg/scheduler/internal/cache"
)
func TestNodeLabelFilter(t *testing.T) {
label := map[string]string{"foo": "any value", "bar": "any value"}
var pod *v1.Pod
tests := []struct {
name string
args config.NodeLabelArgs
res framework.Code
}{
{
name: "present label does not match",
args: config.NodeLabelArgs{
PresentLabels: []string{"baz"},
},
res: framework.UnschedulableAndUnresolvable,
},
{
name: "absent label does not match",
args: config.NodeLabelArgs{
AbsentLabels: []string{"baz"},
},
res: framework.Success,
},
{
name: "one of two present labels matches",
args: config.NodeLabelArgs{
PresentLabels: []string{"foo", "baz"},
},
res: framework.UnschedulableAndUnresolvable,
},
{
name: "one of two absent labels matches",
args: config.NodeLabelArgs{
AbsentLabels: []string{"foo", "baz"},
},
res: framework.UnschedulableAndUnresolvable,
},
{
name: "all present labels match",
args: config.NodeLabelArgs{
PresentLabels: []string{"foo", "bar"},
},
res: framework.Success,
},
{
name: "all absent labels match",
args: config.NodeLabelArgs{
AbsentLabels: []string{"foo", "bar"},
},
res: framework.UnschedulableAndUnresolvable,
},
{
name: "both present and absent label matches",
args: config.NodeLabelArgs{
PresentLabels: []string{"foo"},
AbsentLabels: []string{"bar"},
},
res: framework.UnschedulableAndUnresolvable,
},
{
name: "neither present nor absent label matches",
args: config.NodeLabelArgs{
PresentLabels: []string{"foz"},
AbsentLabels: []string{"baz"},
},
res: framework.UnschedulableAndUnresolvable,
},
{
name: "present label matches and absent label doesn't match",
args: config.NodeLabelArgs{
PresentLabels: []string{"foo"},
AbsentLabels: []string{"baz"},
},
res: framework.Success,
},
{
name: "present label doesn't match and absent label matches",
args: config.NodeLabelArgs{
PresentLabels: []string{"foz"},
AbsentLabels: []string{"bar"},
},
res: framework.UnschedulableAndUnresolvable,
},
{
name: "no label",
args: config.NodeLabelArgs{},
res: framework.Success,
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
node := v1.Node{ObjectMeta: metav1.ObjectMeta{Labels: label}}
nodeInfo := framework.NewNodeInfo()
nodeInfo.SetNode(&node)
p, err := New(&test.args, nil)
if err != nil {
t.Fatalf("Failed to create plugin: %v", err)
}
status := p.(framework.FilterPlugin).Filter(context.TODO(), nil, pod, nodeInfo)
if status.Code() != test.res {
t.Errorf("Status mismatch. got: %v, want: %v", status.Code(), test.res)
}
})
}
}
func TestNodeLabelScore(t *testing.T) {
tests := []struct {
args config.NodeLabelArgs
want int64
name string
}{
{
want: framework.MaxNodeScore,
args: config.NodeLabelArgs{
PresentLabelsPreference: []string{"foo"},
},
name: "one present label match",
},
{
want: 0,
args: config.NodeLabelArgs{
PresentLabelsPreference: []string{"somelabel"},
},
name: "one present label mismatch",
},
{
want: framework.MaxNodeScore,
args: config.NodeLabelArgs{
PresentLabelsPreference: []string{"foo", "bar"},
},
name: "two present labels match",
},
{
want: 0,
args: config.NodeLabelArgs{
PresentLabelsPreference: []string{"somelabel1", "somelabel2"},
},
name: "two present labels mismatch",
},
{
want: framework.MaxNodeScore / 2,
args: config.NodeLabelArgs{
PresentLabelsPreference: []string{"foo", "somelabel"},
},
name: "two present labels only one matches",
},
{
want: 0,
args: config.NodeLabelArgs{
AbsentLabelsPreference: []string{"foo"},
},
name: "one absent label match",
},
{
want: framework.MaxNodeScore,
args: config.NodeLabelArgs{
AbsentLabelsPreference: []string{"somelabel"},
},
name: "one absent label mismatch",
},
{
want: 0,
args: config.NodeLabelArgs{
AbsentLabelsPreference: []string{"foo", "bar"},
},
name: "two absent labels match",
},
{
want: framework.MaxNodeScore,
args: config.NodeLabelArgs{
AbsentLabelsPreference: []string{"somelabel1", "somelabel2"},
},
name: "two absent labels mismatch",
},
{
want: framework.MaxNodeScore / 2,
args: config.NodeLabelArgs{
AbsentLabelsPreference: []string{"foo", "somelabel"},
},
name: "two absent labels only one matches",
},
{
want: framework.MaxNodeScore,
args: config.NodeLabelArgs{
PresentLabelsPreference: []string{"foo", "bar"},
AbsentLabelsPreference: []string{"somelabel1", "somelabel2"},
},
name: "two present labels match, two absent labels mismatch",
},
{
want: 0,
args: config.NodeLabelArgs{
PresentLabelsPreference: []string{"somelabel1", "somelabel2"},
AbsentLabelsPreference: []string{"foo", "bar"},
},
name: "two present labels both mismatch, two absent labels both match",
},
{
want: 3 * framework.MaxNodeScore / 4,
args: config.NodeLabelArgs{
PresentLabelsPreference: []string{"foo", "somelabel"},
AbsentLabelsPreference: []string{"somelabel1", "somelabel2"},
},
name: "two present labels one matches, two absent labels mismatch",
},
{
want: 0,
args: config.NodeLabelArgs{},
name: "no label preference",
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
state := framework.NewCycleState()
node := &v1.Node{ObjectMeta: metav1.ObjectMeta{Name: "machine1", Labels: map[string]string{"foo": "", "bar": ""}}}
fh, _ := runtime.NewFramework(nil, nil, runtime.WithSnapshotSharedLister(cache.NewSnapshot(nil, []*v1.Node{node})))
p, err := New(&test.args, fh)
if err != nil {
t.Fatalf("Failed to create plugin: %+v", err)
}
nodeName := node.ObjectMeta.Name
score, status := p.(framework.ScorePlugin).Score(context.Background(), state, nil, nodeName)
if !status.IsSuccess() {
t.Errorf("unexpected error: %v", status)
}
if test.want != score {
t.Errorf("Wrong score. got %#v, want %#v", score, test.want)
}
})
}
}
func TestNodeLabelFilterWithoutNode(t *testing.T) {
var pod *v1.Pod
t.Run("node does not exist", func(t *testing.T) {
nodeInfo := framework.NewNodeInfo()
p, err := New(&config.NodeLabelArgs{}, nil)
if err != nil {
t.Fatalf("Failed to create plugin: %v", err)
}
status := p.(framework.FilterPlugin).Filter(context.TODO(), nil, pod, nodeInfo)
if status.Code() != framework.Error {
t.Errorf("Status mismatch. got: %v, want: %v", status.Code(), framework.Error)
}
})
}
func TestNodeLabelScoreWithoutNode(t *testing.T) {
t.Run("node does not exist", func(t *testing.T) {
fh, _ := runtime.NewFramework(nil, nil, runtime.WithSnapshotSharedLister(cache.NewEmptySnapshot()))
p, err := New(&config.NodeLabelArgs{}, fh)
if err != nil {
t.Fatalf("Failed to create plugin: %+v", err)
}
_, status := p.(framework.ScorePlugin).Score(context.Background(), nil, nil, "")
if status.Code() != framework.Error {
t.Errorf("Status mismatch. got: %v, want: %v", status.Code(), framework.Error)
}
})
}

View File

@ -25,7 +25,6 @@ import (
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/imagelocality"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/interpodaffinity"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodeaffinity"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodelabel"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodename"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodeports"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/noderesources"
@ -78,7 +77,6 @@ func NewInTreeRegistry() runtime.Registry {
nodevolumelimits.AzureDiskName: runtime.FactoryAdapter(fts, nodevolumelimits.NewAzureDisk),
nodevolumelimits.CinderName: runtime.FactoryAdapter(fts, nodevolumelimits.NewCinder),
interpodaffinity.Name: runtime.FactoryAdapter(fts, interpodaffinity.New),
nodelabel.Name: nodelabel.New,
queuesort.Name: queuesort.New,
defaultbinder.Name: defaultbinder.New,
defaultpreemption.Name: runtime.FactoryAdapter(fts, defaultpreemption.New),

View File

@ -34,7 +34,6 @@ import (
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/defaultbinder"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/interpodaffinity"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodeaffinity"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodelabel"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodename"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodeunschedulable"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/podtopologyspread"
@ -95,7 +94,7 @@ func TestNodesWherePreemptionMightHelp(t *testing.T) {
"node1": framework.NewStatus(framework.UnschedulableAndUnresolvable, nodeaffinity.ErrReasonPod),
"node2": framework.NewStatus(framework.UnschedulableAndUnresolvable, nodename.ErrReason),
"node3": framework.NewStatus(framework.UnschedulableAndUnresolvable, tainttoleration.ErrReasonNotMatch),
"node4": framework.NewStatus(framework.UnschedulableAndUnresolvable, nodelabel.ErrReasonPresenceViolated),
"node4": framework.NewStatus(framework.UnschedulableAndUnresolvable, interpodaffinity.ErrReasonAffinityRulesNotMatch),
},
expected: sets.NewString(),
},