Merge pull request #86446 from ahg-g/ahg1-nodelabel

Move NodeLabel priority logic to its Score and Filter plugin
This commit is contained in:
Kubernetes Prow Robot 2019-12-19 15:37:21 -08:00 committed by GitHub
commit b632eaddba
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
7 changed files with 51 additions and 213 deletions

View File

@ -939,59 +939,6 @@ func PodFitsHost(pod *v1.Pod, meta Metadata, nodeInfo *schedulernodeinfo.NodeInf
return false, []PredicateFailureReason{ErrPodNotMatchHostName}, nil
}
// NodeLabelChecker contains information to check node labels for a predicate.
type NodeLabelChecker struct {
// presentLabels should be present for the node to be considered a fit for hosting the pod
presentLabels []string
// absentLabels should be absent for the node to be considered a fit for hosting the pod
absentLabels []string
}
// NewNodeLabelPredicate creates a predicate which evaluates whether a pod can fit based on the
// node labels which match a filter that it requests.
func NewNodeLabelPredicate(presentLabels []string, absentLabels []string) FitPredicate {
labelChecker := &NodeLabelChecker{
presentLabels: presentLabels,
absentLabels: absentLabels,
}
return labelChecker.CheckNodeLabelPresence
}
// CheckNodeLabelPresence checks whether all of the specified labels exists on a node or not, regardless of their value
// If "presence" is false, then returns false if any of the requested labels matches any of the node's labels,
// otherwise returns true.
// If "presence" is true, then returns false if any of the requested labels does not match any of the node's labels,
// otherwise returns true.
//
// Consider the cases where the nodes are placed in regions/zones/racks and these are identified by labels
// In some cases, it is required that only nodes that are part of ANY of the defined regions/zones/racks be selected
//
// Alternately, eliminating nodes that have a certain label, regardless of value, is also useful
// A node may have a label with "retiring" as key and the date as the value
// and it may be desirable to avoid scheduling new pods on this node.
func (n *NodeLabelChecker) CheckNodeLabelPresence(pod *v1.Pod, meta Metadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []PredicateFailureReason, error) {
node := nodeInfo.Node()
if node == nil {
return false, nil, fmt.Errorf("node not found")
}
nodeLabels := labels.Set(node.Labels)
check := func(labels []string, presence bool) bool {
for _, label := range labels {
exists := nodeLabels.Has(label)
if (exists && !presence) || (!exists && presence) {
return false
}
}
return true
}
if check(n.presentLabels, true) && check(n.absentLabels, false) {
return true, nil, nil
}
return false, []PredicateFailureReason{ErrNodeLabelPresenceViolated}, nil
}
// PodFitsHostPorts is a wrapper around PodFitsHostPortsPredicate. This is needed until
// we are able to get rid of the FitPredicate function signature.
// TODO(#85822): remove this function once predicate registration logic is deleted.

View File

@ -1651,69 +1651,6 @@ func TestPodFitsSelector(t *testing.T) {
}
}
func TestNodeLabelPresence(t *testing.T) {
label := map[string]string{"foo": "bar", "bar": "foo"}
tests := []struct {
pod *v1.Pod
presentLabels []string
absentLabels []string
fits bool
name string
}{
{
presentLabels: []string{"baz"},
fits: false,
name: "label does not match, presence true",
},
{
absentLabels: []string{"baz"},
fits: true,
name: "label does not match, presence false",
},
{
presentLabels: []string{"foo", "baz"},
fits: false,
name: "one label matches, presence true",
},
{
absentLabels: []string{"foo", "baz"},
fits: false,
name: "one label matches, presence false",
},
{
presentLabels: []string{"foo", "bar"},
fits: true,
name: "all labels match, presence true",
},
{
absentLabels: []string{"foo", "bar"},
fits: false,
name: "all labels match, presence false",
},
}
expectedFailureReasons := []PredicateFailureReason{ErrNodeLabelPresenceViolated}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
node := v1.Node{ObjectMeta: metav1.ObjectMeta{Labels: label}}
nodeInfo := schedulernodeinfo.NewNodeInfo()
nodeInfo.SetNode(&node)
labelChecker := NodeLabelChecker{test.presentLabels, test.absentLabels}
fits, reasons, err := labelChecker.CheckNodeLabelPresence(test.pod, nil, nodeInfo)
if err != nil {
t.Errorf("unexpected error: %v", err)
}
if !fits && !reflect.DeepEqual(reasons, expectedFailureReasons) {
t.Errorf("unexpected failure reasons: %v, want: %v", reasons, expectedFailureReasons)
}
if fits != test.fits {
t.Errorf("expected: %v got %v", test.fits, fits)
}
})
}
}
func newPodWithPort(hostPorts ...int) *v1.Pod {
networkPorts := []v1.ContainerPort{}
for _, port := range hostPorts {

View File

@ -16,7 +16,6 @@ go_library(
"metadata.go",
"most_requested.go",
"node_affinity.go",
"node_label.go",
"node_prefer_avoid_pods.go",
"priorities.go",
"reduce.go",

View File

@ -1,70 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package priorities
import (
"fmt"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/labels"
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
)
// NodeLabelPrioritizer contains information to calculate node label priority.
type NodeLabelPrioritizer struct {
presentLabelsPreference []string
absentLabelsPreference []string
}
// NewNodeLabelPriority creates a NodeLabelPrioritizer.
func NewNodeLabelPriority(presentLabelsPreference []string, absentLabelsPreference []string) (PriorityMapFunction, PriorityReduceFunction) {
labelPrioritizer := &NodeLabelPrioritizer{
presentLabelsPreference: presentLabelsPreference,
absentLabelsPreference: absentLabelsPreference,
}
return labelPrioritizer.CalculateNodeLabelPriorityMap, nil
}
// CalculateNodeLabelPriorityMap checks whether a particular label exists on a node or not, regardless of its value.
// If presence is true, prioritizes nodes that have the specified label, regardless of value.
// If presence is false, prioritizes nodes that do not have the specified label.
func (n *NodeLabelPrioritizer) CalculateNodeLabelPriorityMap(pod *v1.Pod, meta interface{}, nodeInfo *schedulernodeinfo.NodeInfo) (framework.NodeScore, error) {
node := nodeInfo.Node()
if node == nil {
return framework.NodeScore{}, fmt.Errorf("node not found")
}
score := int64(0)
for _, label := range n.presentLabelsPreference {
if labels.Set(node.Labels).Has(label) {
score += framework.MaxNodeScore
}
}
for _, label := range n.absentLabelsPreference {
if !labels.Set(node.Labels).Has(label) {
score += framework.MaxNodeScore
}
}
// Take average score for each label to ensure the score doesn't exceed MaxNodeScore.
score /= int64(len(n.presentLabelsPreference) + len(n.absentLabelsPreference))
return framework.NodeScore{
Name: node.Name,
Score: score,
}, nil
}

View File

@ -278,10 +278,7 @@ func RegisterCustomFitPredicate(policy schedulerapi.PredicatePolicy, pluginArgs
pluginArgs.NodeLabelArgs.AbsentLabels = append(pluginArgs.NodeLabelArgs.AbsentLabels, policy.Argument.LabelsPresence.Labels...)
}
predicateFactory = func(_ AlgorithmFactoryArgs) predicates.FitPredicate {
return predicates.NewNodeLabelPredicate(
pluginArgs.NodeLabelArgs.PresentLabels,
pluginArgs.NodeLabelArgs.AbsentLabels,
)
return nil
}
}
} else if predicateFactory, ok = fitPredicateMap[policyName]; ok {
@ -399,10 +396,7 @@ func RegisterCustomPriorityFunction(policy schedulerapi.PriorityPolicy, configPr
schedulerFactoryMutex.RUnlock()
pcf = &PriorityConfigFactory{
MapReduceFunction: func(_ AlgorithmFactoryArgs) (priorities.PriorityMapFunction, priorities.PriorityReduceFunction) {
return priorities.NewNodeLabelPriority(
configProducerArgs.NodeLabelArgs.PresentLabelsPreference,
configProducerArgs.NodeLabelArgs.AbsentLabelsPreference,
)
return nil, nil
},
Weight: weight,
}

View File

@ -7,11 +7,11 @@ go_library(
visibility = ["//visibility:public"],
deps = [
"//pkg/scheduler/algorithm/predicates:go_default_library",
"//pkg/scheduler/algorithm/priorities:go_default_library",
"//pkg/scheduler/framework/plugins/migration:go_default_library",
"//pkg/scheduler/framework/v1alpha1:go_default_library",
"//pkg/scheduler/nodeinfo:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
],
)

View File

@ -21,9 +21,9 @@ import (
"fmt"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/kubernetes/pkg/scheduler/algorithm/predicates"
"k8s.io/kubernetes/pkg/scheduler/algorithm/priorities"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/migration"
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
"k8s.io/kubernetes/pkg/scheduler/nodeinfo"
@ -60,8 +60,8 @@ func validateNoConflict(presentLabels []string, absentLabels []string) error {
// New initializes a new plugin and returns it.
func New(plArgs *runtime.Unknown, handle framework.FrameworkHandle) (framework.Plugin, error) {
args := &Args{}
if err := framework.DecodeInto(plArgs, args); err != nil {
args := Args{}
if err := framework.DecodeInto(plArgs, &args); err != nil {
return nil, err
}
if err := validateNoConflict(args.PresentLabels, args.AbsentLabels); err != nil {
@ -70,20 +70,16 @@ func New(plArgs *runtime.Unknown, handle framework.FrameworkHandle) (framework.P
if err := validateNoConflict(args.PresentLabelsPreference, args.AbsentLabelsPreference); err != nil {
return nil, err
}
// Note that the reduce function is always nil therefore it's ignored.
prioritize, _ := priorities.NewNodeLabelPriority(args.PresentLabelsPreference, args.AbsentLabelsPreference)
return &NodeLabel{
handle: handle,
predicate: predicates.NewNodeLabelPredicate(args.PresentLabels, args.AbsentLabels),
prioritize: prioritize,
handle: handle,
Args: args,
}, nil
}
// NodeLabel checks whether a pod can fit based on the node labels which match a filter that it requests.
type NodeLabel struct {
handle framework.FrameworkHandle
predicate predicates.FitPredicate
prioritize priorities.PriorityMapFunction
handle framework.FrameworkHandle
Args
}
var _ framework.FilterPlugin = &NodeLabel{}
@ -95,10 +91,31 @@ func (pl *NodeLabel) Name() string {
}
// Filter invoked at the filter extension point.
// It checks whether all of the specified labels exists on a node or not, regardless of their value
//
// Consider the cases where the nodes are placed in regions/zones/racks and these are identified by labels
// In some cases, it is required that only nodes that are part of ANY of the defined regions/zones/racks be selected
//
// Alternately, eliminating nodes that have a certain label, regardless of value, is also useful
// A node may have a label with "retiring" as key and the date as the value
// and it may be desirable to avoid scheduling new pods on this node.
func (pl *NodeLabel) Filter(ctx context.Context, _ *framework.CycleState, pod *v1.Pod, nodeInfo *nodeinfo.NodeInfo) *framework.Status {
// Note that NodeLabelPredicate doesn't use predicate metadata, hence passing nil here.
_, reasons, err := pl.predicate(pod, nil, nodeInfo)
return migration.PredicateResultToFrameworkStatus(reasons, err)
node := nodeInfo.Node()
nodeLabels := labels.Set(node.Labels)
check := func(labels []string, presence bool) bool {
for _, label := range labels {
exists := nodeLabels.Has(label)
if (exists && !presence) || (!exists && presence) {
return false
}
}
return true
}
if check(pl.PresentLabels, true) && check(pl.AbsentLabels, false) {
return nil
}
return migration.PredicateResultToFrameworkStatus([]predicates.PredicateFailureReason{predicates.ErrNodeLabelPresenceViolated}, nil)
}
// Score invoked at the score extension point.
@ -107,9 +124,23 @@ func (pl *NodeLabel) Score(ctx context.Context, state *framework.CycleState, pod
if err != nil {
return 0, framework.NewStatus(framework.Error, fmt.Sprintf("getting node %q from Snapshot: %v", nodeName, err))
}
// Note that node label priority function doesn't use metadata, hence passing nil here.
s, err := pl.prioritize(pod, nil, nodeInfo)
return s.Score, migration.ErrorToFrameworkStatus(err)
node := nodeInfo.Node()
score := int64(0)
for _, label := range pl.PresentLabelsPreference {
if labels.Set(node.Labels).Has(label) {
score += framework.MaxNodeScore
}
}
for _, label := range pl.AbsentLabelsPreference {
if !labels.Set(node.Labels).Has(label) {
score += framework.MaxNodeScore
}
}
// Take average score for each label to ensure the score doesn't exceed MaxNodeScore.
score /= int64(len(pl.PresentLabelsPreference) + len(pl.AbsentLabelsPreference))
return score, nil
}
// ScoreExtensions of the Score plugin.