mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-28 14:07:14 +00:00
move Taint and toleration predicate to its Score plugin
This commit is contained in:
parent
39d0710a4b
commit
21c916c3d2
@ -22,7 +22,6 @@ go_library(
|
||||
"resource_allocation.go",
|
||||
"resource_limits.go",
|
||||
"selector_spreading.go",
|
||||
"taint_toleration.go",
|
||||
"test_util.go",
|
||||
"types.go",
|
||||
],
|
||||
@ -62,7 +61,6 @@ go_test(
|
||||
"resource_limits_test.go",
|
||||
"selector_spreading_test.go",
|
||||
"spreading_perf_test.go",
|
||||
"taint_toleration_test.go",
|
||||
"types_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
|
@ -57,7 +57,6 @@ func NewMetadataFactory(
|
||||
// priorityMetadata is a type that is passed as metadata for priority functions
|
||||
type priorityMetadata struct {
|
||||
podLimits *schedulernodeinfo.Resource
|
||||
podTolerations []v1.Toleration
|
||||
affinity *v1.Affinity
|
||||
podSelector labels.Selector
|
||||
controllerRef *metav1.OwnerReference
|
||||
@ -91,7 +90,6 @@ func (pmf *MetadataFactory) PriorityMetadata(
|
||||
}
|
||||
return &priorityMetadata{
|
||||
podLimits: getResourceLimits(pod),
|
||||
podTolerations: getAllTolerationPreferNoSchedule(pod.Spec.Tolerations),
|
||||
affinity: pod.Spec.Affinity,
|
||||
podSelector: getSelector(pod, pmf.serviceLister, pmf.controllerLister, pmf.replicaSetLister, pmf.statefulSetLister),
|
||||
controllerRef: metav1.GetControllerOf(pod),
|
||||
|
@ -138,30 +138,27 @@ func TestPriorityMetadata(t *testing.T) {
|
||||
{
|
||||
pod: podWithTolerationsAndAffinity,
|
||||
expected: &priorityMetadata{
|
||||
podLimits: nonPodLimits,
|
||||
podTolerations: tolerations,
|
||||
affinity: podAffinity,
|
||||
podSelector: labels.NewSelector(),
|
||||
podLimits: nonPodLimits,
|
||||
affinity: podAffinity,
|
||||
podSelector: labels.NewSelector(),
|
||||
},
|
||||
name: "Produce a priorityMetadata with default requests",
|
||||
},
|
||||
{
|
||||
pod: podWithTolerationsAndRequests,
|
||||
expected: &priorityMetadata{
|
||||
podLimits: nonPodLimits,
|
||||
podTolerations: tolerations,
|
||||
affinity: nil,
|
||||
podSelector: labels.NewSelector(),
|
||||
podLimits: nonPodLimits,
|
||||
affinity: nil,
|
||||
podSelector: labels.NewSelector(),
|
||||
},
|
||||
name: "Produce a priorityMetadata with tolerations and requests",
|
||||
},
|
||||
{
|
||||
pod: podWithAffinityAndRequests,
|
||||
expected: &priorityMetadata{
|
||||
podLimits: specifiedPodLimits,
|
||||
podTolerations: nil,
|
||||
affinity: podAffinity,
|
||||
podSelector: labels.NewSelector(),
|
||||
podLimits: specifiedPodLimits,
|
||||
affinity: podAffinity,
|
||||
podSelector: labels.NewSelector(),
|
||||
},
|
||||
name: "Produce a priorityMetadata with affinity and requests",
|
||||
},
|
||||
|
@ -1,76 +0,0 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package priorities
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
|
||||
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
|
||||
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||
)
|
||||
|
||||
// CountIntolerableTaintsPreferNoSchedule gives the count of intolerable taints of a pod with effect PreferNoSchedule
|
||||
func countIntolerableTaintsPreferNoSchedule(taints []v1.Taint, tolerations []v1.Toleration) (intolerableTaints int) {
|
||||
for _, taint := range taints {
|
||||
// check only on taints that have effect PreferNoSchedule
|
||||
if taint.Effect != v1.TaintEffectPreferNoSchedule {
|
||||
continue
|
||||
}
|
||||
|
||||
if !v1helper.TolerationsTolerateTaint(tolerations, &taint) {
|
||||
intolerableTaints++
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// getAllTolerationEffectPreferNoSchedule gets the list of all Tolerations with Effect PreferNoSchedule or with no effect.
|
||||
func getAllTolerationPreferNoSchedule(tolerations []v1.Toleration) (tolerationList []v1.Toleration) {
|
||||
for _, toleration := range tolerations {
|
||||
// Empty effect means all effects which includes PreferNoSchedule, so we need to collect it as well.
|
||||
if len(toleration.Effect) == 0 || toleration.Effect == v1.TaintEffectPreferNoSchedule {
|
||||
tolerationList = append(tolerationList, toleration)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// ComputeTaintTolerationPriorityMap prepares the priority list for all the nodes based on the number of intolerable taints on the node
|
||||
func ComputeTaintTolerationPriorityMap(pod *v1.Pod, meta interface{}, nodeInfo *schedulernodeinfo.NodeInfo) (framework.NodeScore, error) {
|
||||
node := nodeInfo.Node()
|
||||
if node == nil {
|
||||
return framework.NodeScore{}, fmt.Errorf("node not found")
|
||||
}
|
||||
// To hold all the tolerations with Effect PreferNoSchedule
|
||||
var tolerationsPreferNoSchedule []v1.Toleration
|
||||
if priorityMeta, ok := meta.(*priorityMetadata); ok {
|
||||
tolerationsPreferNoSchedule = priorityMeta.podTolerations
|
||||
|
||||
} else {
|
||||
tolerationsPreferNoSchedule = getAllTolerationPreferNoSchedule(pod.Spec.Tolerations)
|
||||
}
|
||||
|
||||
return framework.NodeScore{
|
||||
Name: node.Name,
|
||||
Score: int64(countIntolerableTaintsPreferNoSchedule(node.Spec.Taints, tolerationsPreferNoSchedule)),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// ComputeTaintTolerationPriorityReduce calculates the source of each node based on the number of intolerable taints on the node
|
||||
var ComputeTaintTolerationPriorityReduce = NormalizeReduce(framework.MaxNodeScore, true)
|
@ -1,241 +0,0 @@
|
||||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package priorities
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
|
||||
nodeinfosnapshot "k8s.io/kubernetes/pkg/scheduler/nodeinfo/snapshot"
|
||||
)
|
||||
|
||||
func nodeWithTaints(nodeName string, taints []v1.Taint) *v1.Node {
|
||||
return &v1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: nodeName,
|
||||
},
|
||||
Spec: v1.NodeSpec{
|
||||
Taints: taints,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func podWithTolerations(tolerations []v1.Toleration) *v1.Pod {
|
||||
return &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Tolerations: tolerations,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// This function will create a set of nodes and pods and test the priority
|
||||
// Nodes with zero,one,two,three,four and hundred taints are created
|
||||
// Pods with zero,one,two,three,four and hundred tolerations are created
|
||||
|
||||
func TestTaintAndToleration(t *testing.T) {
|
||||
tests := []struct {
|
||||
pod *v1.Pod
|
||||
nodes []*v1.Node
|
||||
expectedList framework.NodeScoreList
|
||||
name string
|
||||
}{
|
||||
// basic test case
|
||||
{
|
||||
name: "node with taints tolerated by the pod, gets a higher score than those node with intolerable taints",
|
||||
pod: podWithTolerations([]v1.Toleration{{
|
||||
Key: "foo",
|
||||
Operator: v1.TolerationOpEqual,
|
||||
Value: "bar",
|
||||
Effect: v1.TaintEffectPreferNoSchedule,
|
||||
}}),
|
||||
nodes: []*v1.Node{
|
||||
nodeWithTaints("nodeA", []v1.Taint{{
|
||||
Key: "foo",
|
||||
Value: "bar",
|
||||
Effect: v1.TaintEffectPreferNoSchedule,
|
||||
}}),
|
||||
nodeWithTaints("nodeB", []v1.Taint{{
|
||||
Key: "foo",
|
||||
Value: "blah",
|
||||
Effect: v1.TaintEffectPreferNoSchedule,
|
||||
}}),
|
||||
},
|
||||
expectedList: []framework.NodeScore{
|
||||
{Name: "nodeA", Score: framework.MaxNodeScore},
|
||||
{Name: "nodeB", Score: 0},
|
||||
},
|
||||
},
|
||||
// the count of taints that are tolerated by pod, does not matter.
|
||||
{
|
||||
name: "the nodes that all of their taints are tolerated by the pod, get the same score, no matter how many tolerable taints a node has",
|
||||
pod: podWithTolerations([]v1.Toleration{
|
||||
{
|
||||
Key: "cpu-type",
|
||||
Operator: v1.TolerationOpEqual,
|
||||
Value: "arm64",
|
||||
Effect: v1.TaintEffectPreferNoSchedule,
|
||||
}, {
|
||||
Key: "disk-type",
|
||||
Operator: v1.TolerationOpEqual,
|
||||
Value: "ssd",
|
||||
Effect: v1.TaintEffectPreferNoSchedule,
|
||||
},
|
||||
}),
|
||||
nodes: []*v1.Node{
|
||||
nodeWithTaints("nodeA", []v1.Taint{}),
|
||||
nodeWithTaints("nodeB", []v1.Taint{
|
||||
{
|
||||
Key: "cpu-type",
|
||||
Value: "arm64",
|
||||
Effect: v1.TaintEffectPreferNoSchedule,
|
||||
},
|
||||
}),
|
||||
nodeWithTaints("nodeC", []v1.Taint{
|
||||
{
|
||||
Key: "cpu-type",
|
||||
Value: "arm64",
|
||||
Effect: v1.TaintEffectPreferNoSchedule,
|
||||
}, {
|
||||
Key: "disk-type",
|
||||
Value: "ssd",
|
||||
Effect: v1.TaintEffectPreferNoSchedule,
|
||||
},
|
||||
}),
|
||||
},
|
||||
expectedList: []framework.NodeScore{
|
||||
{Name: "nodeA", Score: framework.MaxNodeScore},
|
||||
{Name: "nodeB", Score: framework.MaxNodeScore},
|
||||
{Name: "nodeC", Score: framework.MaxNodeScore},
|
||||
},
|
||||
},
|
||||
// the count of taints on a node that are not tolerated by pod, matters.
|
||||
{
|
||||
name: "the more intolerable taints a node has, the lower score it gets.",
|
||||
pod: podWithTolerations([]v1.Toleration{{
|
||||
Key: "foo",
|
||||
Operator: v1.TolerationOpEqual,
|
||||
Value: "bar",
|
||||
Effect: v1.TaintEffectPreferNoSchedule,
|
||||
}}),
|
||||
nodes: []*v1.Node{
|
||||
nodeWithTaints("nodeA", []v1.Taint{}),
|
||||
nodeWithTaints("nodeB", []v1.Taint{
|
||||
{
|
||||
Key: "cpu-type",
|
||||
Value: "arm64",
|
||||
Effect: v1.TaintEffectPreferNoSchedule,
|
||||
},
|
||||
}),
|
||||
nodeWithTaints("nodeC", []v1.Taint{
|
||||
{
|
||||
Key: "cpu-type",
|
||||
Value: "arm64",
|
||||
Effect: v1.TaintEffectPreferNoSchedule,
|
||||
}, {
|
||||
Key: "disk-type",
|
||||
Value: "ssd",
|
||||
Effect: v1.TaintEffectPreferNoSchedule,
|
||||
},
|
||||
}),
|
||||
},
|
||||
expectedList: []framework.NodeScore{
|
||||
{Name: "nodeA", Score: framework.MaxNodeScore},
|
||||
{Name: "nodeB", Score: 50},
|
||||
{Name: "nodeC", Score: 0},
|
||||
},
|
||||
},
|
||||
// taints-tolerations priority only takes care about the taints and tolerations that have effect PreferNoSchedule
|
||||
{
|
||||
name: "only taints and tolerations that have effect PreferNoSchedule are checked by taints-tolerations priority function",
|
||||
pod: podWithTolerations([]v1.Toleration{
|
||||
{
|
||||
Key: "cpu-type",
|
||||
Operator: v1.TolerationOpEqual,
|
||||
Value: "arm64",
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
}, {
|
||||
Key: "disk-type",
|
||||
Operator: v1.TolerationOpEqual,
|
||||
Value: "ssd",
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
},
|
||||
}),
|
||||
nodes: []*v1.Node{
|
||||
nodeWithTaints("nodeA", []v1.Taint{}),
|
||||
nodeWithTaints("nodeB", []v1.Taint{
|
||||
{
|
||||
Key: "cpu-type",
|
||||
Value: "arm64",
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
},
|
||||
}),
|
||||
nodeWithTaints("nodeC", []v1.Taint{
|
||||
{
|
||||
Key: "cpu-type",
|
||||
Value: "arm64",
|
||||
Effect: v1.TaintEffectPreferNoSchedule,
|
||||
}, {
|
||||
Key: "disk-type",
|
||||
Value: "ssd",
|
||||
Effect: v1.TaintEffectPreferNoSchedule,
|
||||
},
|
||||
}),
|
||||
},
|
||||
expectedList: []framework.NodeScore{
|
||||
{Name: "nodeA", Score: framework.MaxNodeScore},
|
||||
{Name: "nodeB", Score: framework.MaxNodeScore},
|
||||
{Name: "nodeC", Score: 0},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Default behaviour No taints and tolerations, lands on node with no taints",
|
||||
//pod without tolerations
|
||||
pod: podWithTolerations([]v1.Toleration{}),
|
||||
nodes: []*v1.Node{
|
||||
//Node without taints
|
||||
nodeWithTaints("nodeA", []v1.Taint{}),
|
||||
nodeWithTaints("nodeB", []v1.Taint{
|
||||
{
|
||||
Key: "cpu-type",
|
||||
Value: "arm64",
|
||||
Effect: v1.TaintEffectPreferNoSchedule,
|
||||
},
|
||||
}),
|
||||
},
|
||||
expectedList: []framework.NodeScore{
|
||||
{Name: "nodeA", Score: framework.MaxNodeScore},
|
||||
{Name: "nodeB", Score: 0},
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
snapshot := nodeinfosnapshot.NewSnapshot(nodeinfosnapshot.CreateNodeInfoMap(nil, test.nodes))
|
||||
list, err := runMapReducePriority(ComputeTaintTolerationPriorityMap, ComputeTaintTolerationPriorityReduce, nil, test.pod, snapshot, test.nodes)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(test.expectedList, list) {
|
||||
t.Errorf("expected:\n\t%+v,\ngot:\n\t%+v", test.expectedList, list)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
@ -126,6 +126,7 @@ func TestCompatibility(t *testing.T) {
|
||||
},
|
||||
"PostFilterPlugin": {
|
||||
{Name: "InterPodAffinity"},
|
||||
{Name: "TaintToleration"},
|
||||
},
|
||||
"ScorePlugin": {
|
||||
{Name: "NodeResourcesBalancedAllocation", Weight: 1},
|
||||
@ -166,6 +167,7 @@ func TestCompatibility(t *testing.T) {
|
||||
},
|
||||
"PostFilterPlugin": {
|
||||
{Name: "InterPodAffinity"},
|
||||
{Name: "TaintToleration"},
|
||||
},
|
||||
"ScorePlugin": {
|
||||
{Name: "NodeResourcesBalancedAllocation", Weight: 1},
|
||||
@ -206,6 +208,7 @@ func TestCompatibility(t *testing.T) {
|
||||
},
|
||||
"PostFilterPlugin": {
|
||||
{Name: "InterPodAffinity"},
|
||||
{Name: "TaintToleration"},
|
||||
},
|
||||
"ScorePlugin": {
|
||||
{Name: "NodeResourcesBalancedAllocation", Weight: 1},
|
||||
|
@ -86,7 +86,7 @@ func init() {
|
||||
scheduler.RegisterPriorityMapReduceFunction(priorities.NodeAffinityPriority, priorities.CalculateNodeAffinityPriorityMap, priorities.CalculateNodeAffinityPriorityReduce, 1)
|
||||
|
||||
// Prioritizes nodes that marked with taint which pod can tolerate.
|
||||
scheduler.RegisterPriorityMapReduceFunction(priorities.TaintTolerationPriority, priorities.ComputeTaintTolerationPriorityMap, priorities.ComputeTaintTolerationPriorityReduce, 1)
|
||||
scheduler.RegisterPriorityMapReduceFunction(priorities.TaintTolerationPriority, nil, nil, 1)
|
||||
|
||||
// ImageLocalityPriority prioritizes nodes that have images requested by the pod present.
|
||||
scheduler.RegisterPriorityMapReduceFunction(priorities.ImageLocalityPriority, nil, nil, 1)
|
||||
|
@ -315,6 +315,7 @@ func TestCompatibility_v1_Scheduler(t *testing.T) {
|
||||
},
|
||||
"PostFilterPlugin": {
|
||||
{Name: "InterPodAffinity"},
|
||||
{Name: "TaintToleration"},
|
||||
},
|
||||
"ScorePlugin": {
|
||||
{Name: "NodeResourcesBalancedAllocation", Weight: 2},
|
||||
@ -388,6 +389,7 @@ func TestCompatibility_v1_Scheduler(t *testing.T) {
|
||||
},
|
||||
"PostFilterPlugin": {
|
||||
{Name: "InterPodAffinity"},
|
||||
{Name: "TaintToleration"},
|
||||
},
|
||||
"ScorePlugin": {
|
||||
{Name: "NodeResourcesBalancedAllocation", Weight: 2},
|
||||
@ -472,6 +474,7 @@ func TestCompatibility_v1_Scheduler(t *testing.T) {
|
||||
},
|
||||
"PostFilterPlugin": {
|
||||
{Name: "InterPodAffinity"},
|
||||
{Name: "TaintToleration"},
|
||||
},
|
||||
"ScorePlugin": {
|
||||
{Name: "NodeResourcesBalancedAllocation", Weight: 2},
|
||||
@ -567,6 +570,7 @@ func TestCompatibility_v1_Scheduler(t *testing.T) {
|
||||
},
|
||||
"PostFilterPlugin": {
|
||||
{Name: "InterPodAffinity"},
|
||||
{Name: "TaintToleration"},
|
||||
},
|
||||
"ScorePlugin": {
|
||||
{Name: "NodeResourcesBalancedAllocation", Weight: 2},
|
||||
@ -664,6 +668,7 @@ func TestCompatibility_v1_Scheduler(t *testing.T) {
|
||||
},
|
||||
"PostFilterPlugin": {
|
||||
{Name: "InterPodAffinity"},
|
||||
{Name: "TaintToleration"},
|
||||
},
|
||||
"ScorePlugin": {
|
||||
{Name: "NodeResourcesBalancedAllocation", Weight: 2},
|
||||
@ -764,6 +769,7 @@ func TestCompatibility_v1_Scheduler(t *testing.T) {
|
||||
},
|
||||
"PostFilterPlugin": {
|
||||
{Name: "InterPodAffinity"},
|
||||
{Name: "TaintToleration"},
|
||||
},
|
||||
"ScorePlugin": {
|
||||
{Name: "NodeResourcesBalancedAllocation", Weight: 2},
|
||||
@ -876,6 +882,7 @@ func TestCompatibility_v1_Scheduler(t *testing.T) {
|
||||
},
|
||||
"PostFilterPlugin": {
|
||||
{Name: "InterPodAffinity"},
|
||||
{Name: "TaintToleration"},
|
||||
},
|
||||
"ScorePlugin": {
|
||||
{Name: "NodeResourcesBalancedAllocation", Weight: 2},
|
||||
@ -991,6 +998,7 @@ func TestCompatibility_v1_Scheduler(t *testing.T) {
|
||||
},
|
||||
"PostFilterPlugin": {
|
||||
{Name: "InterPodAffinity"},
|
||||
{Name: "TaintToleration"},
|
||||
},
|
||||
"ScorePlugin": {
|
||||
{Name: "NodeResourcesBalancedAllocation", Weight: 2},
|
||||
@ -1106,6 +1114,7 @@ func TestCompatibility_v1_Scheduler(t *testing.T) {
|
||||
},
|
||||
"PostFilterPlugin": {
|
||||
{Name: "InterPodAffinity"},
|
||||
{Name: "TaintToleration"},
|
||||
},
|
||||
"ScorePlugin": {
|
||||
{Name: "NodeResourcesBalancedAllocation", Weight: 2},
|
||||
@ -1225,6 +1234,7 @@ func TestCompatibility_v1_Scheduler(t *testing.T) {
|
||||
},
|
||||
"PostFilterPlugin": {
|
||||
{Name: "InterPodAffinity"},
|
||||
{Name: "TaintToleration"},
|
||||
},
|
||||
"ScorePlugin": {
|
||||
{Name: "NodeResourcesBalancedAllocation", Weight: 2},
|
||||
|
@ -241,6 +241,7 @@ func NewDefaultConfigProducerRegistry() *ConfigProducerRegistry {
|
||||
})
|
||||
registry.RegisterPriority(priorities.TaintTolerationPriority,
|
||||
func(args ConfigProducerArgs) (plugins config.Plugins, pluginConfig []config.PluginConfig) {
|
||||
plugins.PostFilter = appendToPluginSet(plugins.PostFilter, tainttoleration.Name, nil)
|
||||
plugins.Score = appendToPluginSet(plugins.Score, tainttoleration.Name, &args.Weight)
|
||||
return
|
||||
})
|
||||
|
@ -6,6 +6,7 @@ go_library(
|
||||
importpath = "k8s.io/kubernetes/pkg/scheduler/framework/plugins/tainttoleration",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//pkg/apis/core/v1/helper:go_default_library",
|
||||
"//pkg/scheduler/algorithm/predicates:go_default_library",
|
||||
"//pkg/scheduler/algorithm/priorities:go_default_library",
|
||||
"//pkg/scheduler/framework/plugins/migration:go_default_library",
|
||||
|
@ -22,6 +22,7 @@ import (
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithm/predicates"
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithm/priorities"
|
||||
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/migration"
|
||||
@ -35,10 +36,15 @@ type TaintToleration struct {
|
||||
}
|
||||
|
||||
var _ framework.FilterPlugin = &TaintToleration{}
|
||||
var _ framework.PostFilterPlugin = &TaintToleration{}
|
||||
var _ framework.ScorePlugin = &TaintToleration{}
|
||||
|
||||
// Name is the name of the plugin used in the plugin registry and configurations.
|
||||
const Name = "TaintToleration"
|
||||
const (
|
||||
// Name is the name of the plugin used in the plugin registry and configurations.
|
||||
Name = "TaintToleration"
|
||||
// postFilterStateKey is the key in CycleState to InterPodAffinity pre-computed data for Scoring.
|
||||
postFilterStateKey = "PostFilter" + Name
|
||||
)
|
||||
|
||||
// Name returns name of the plugin. It is used in logs, etc.
|
||||
func (pl *TaintToleration) Name() string {
|
||||
@ -52,21 +58,91 @@ func (pl *TaintToleration) Filter(ctx context.Context, state *framework.CycleSta
|
||||
return migration.PredicateResultToFrameworkStatus(reasons, err)
|
||||
}
|
||||
|
||||
// postFilterState computed at PostFilter and used at Score.
|
||||
type postFilterState struct {
|
||||
tolerationsPreferNoSchedule []v1.Toleration
|
||||
}
|
||||
|
||||
// Clone implements the mandatory Clone interface. We don't really copy the data since
|
||||
// there is no need for that.
|
||||
func (s *postFilterState) Clone() framework.StateData {
|
||||
return s
|
||||
}
|
||||
|
||||
// getAllTolerationEffectPreferNoSchedule gets the list of all Tolerations with Effect PreferNoSchedule or with no effect.
|
||||
func getAllTolerationPreferNoSchedule(tolerations []v1.Toleration) (tolerationList []v1.Toleration) {
|
||||
for _, toleration := range tolerations {
|
||||
// Empty effect means all effects which includes PreferNoSchedule, so we need to collect it as well.
|
||||
if len(toleration.Effect) == 0 || toleration.Effect == v1.TaintEffectPreferNoSchedule {
|
||||
tolerationList = append(tolerationList, toleration)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// PostFilter builds and writes cycle state used by Score and NormalizeScore.
|
||||
func (pl *TaintToleration) PostFilter(ctx context.Context, cycleState *framework.CycleState, pod *v1.Pod, nodes []*v1.Node, _ framework.NodeToStatusMap) *framework.Status {
|
||||
if len(nodes) == 0 {
|
||||
return nil
|
||||
}
|
||||
tolerationsPreferNoSchedule := getAllTolerationPreferNoSchedule(pod.Spec.Tolerations)
|
||||
state := &postFilterState{
|
||||
tolerationsPreferNoSchedule: tolerationsPreferNoSchedule,
|
||||
}
|
||||
cycleState.Write(postFilterStateKey, state)
|
||||
return nil
|
||||
}
|
||||
|
||||
func getPostFilterState(cycleState *framework.CycleState) (*postFilterState, error) {
|
||||
c, err := cycleState.Read(postFilterStateKey)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Error reading %q from cycleState: %v", postFilterStateKey, err)
|
||||
}
|
||||
|
||||
s, ok := c.(*postFilterState)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("%+v convert to tainttoleration.postFilterState error", c)
|
||||
}
|
||||
return s, nil
|
||||
}
|
||||
|
||||
// CountIntolerableTaintsPreferNoSchedule gives the count of intolerable taints of a pod with effect PreferNoSchedule
|
||||
func countIntolerableTaintsPreferNoSchedule(taints []v1.Taint, tolerations []v1.Toleration) (intolerableTaints int) {
|
||||
for _, taint := range taints {
|
||||
// check only on taints that have effect PreferNoSchedule
|
||||
if taint.Effect != v1.TaintEffectPreferNoSchedule {
|
||||
continue
|
||||
}
|
||||
|
||||
if !v1helper.TolerationsTolerateTaint(tolerations, &taint) {
|
||||
intolerableTaints++
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Score invoked at the Score extension point.
|
||||
func (pl *TaintToleration) Score(ctx context.Context, state *framework.CycleState, pod *v1.Pod, nodeName string) (int64, *framework.Status) {
|
||||
nodeInfo, err := pl.handle.SnapshotSharedLister().NodeInfos().Get(nodeName)
|
||||
if err != nil {
|
||||
if err != nil || nodeInfo.Node() == nil {
|
||||
return 0, framework.NewStatus(framework.Error, fmt.Sprintf("getting node %q from Snapshot: %v", nodeName, err))
|
||||
}
|
||||
meta := migration.PriorityMetadata(state)
|
||||
s, err := priorities.ComputeTaintTolerationPriorityMap(pod, meta, nodeInfo)
|
||||
return s.Score, migration.ErrorToFrameworkStatus(err)
|
||||
node := nodeInfo.Node()
|
||||
|
||||
s, err := getPostFilterState(state)
|
||||
if err != nil {
|
||||
return 0, framework.NewStatus(framework.Error, err.Error())
|
||||
}
|
||||
|
||||
score := int64(countIntolerableTaintsPreferNoSchedule(node.Spec.Taints, s.tolerationsPreferNoSchedule))
|
||||
return score, nil
|
||||
}
|
||||
|
||||
// NormalizeScore invoked after scoring all nodes.
|
||||
func (pl *TaintToleration) NormalizeScore(ctx context.Context, _ *framework.CycleState, pod *v1.Pod, scores framework.NodeScoreList) *framework.Status {
|
||||
// Note that ComputeTaintTolerationPriorityReduce doesn't use priority metadata, hence passing nil here.
|
||||
err := priorities.ComputeTaintTolerationPriorityReduce(pod, nil, pl.handle.SnapshotSharedLister(), scores)
|
||||
normalizeFun := priorities.NormalizeReduce(framework.MaxNodeScore, true)
|
||||
err := normalizeFun(pod, nil, pl.handle.SnapshotSharedLister(), scores)
|
||||
return migration.ErrorToFrameworkStatus(err)
|
||||
}
|
||||
|
||||
|
@ -234,6 +234,10 @@ func TestTaintTolerationScore(t *testing.T) {
|
||||
fh, _ := framework.NewFramework(nil, nil, nil, framework.WithSnapshotSharedLister(snapshot))
|
||||
|
||||
p, _ := New(nil, fh)
|
||||
status := p.(framework.PostFilterPlugin).PostFilter(context.Background(), state, test.pod, test.nodes, nil)
|
||||
if !status.IsSuccess() {
|
||||
t.Errorf("unexpected error: %v", status)
|
||||
}
|
||||
var gotList framework.NodeScoreList
|
||||
for _, n := range test.nodes {
|
||||
nodeName := n.ObjectMeta.Name
|
||||
@ -244,7 +248,7 @@ func TestTaintTolerationScore(t *testing.T) {
|
||||
gotList = append(gotList, framework.NodeScore{Name: nodeName, Score: score})
|
||||
}
|
||||
|
||||
status := p.(framework.ScorePlugin).ScoreExtensions().NormalizeScore(context.Background(), state, test.pod, gotList)
|
||||
status = p.(framework.ScorePlugin).ScoreExtensions().NormalizeScore(context.Background(), state, test.pod, gotList)
|
||||
if !status.IsSuccess() {
|
||||
t.Errorf("unexpected error: %v", status)
|
||||
}
|
||||
|
@ -148,6 +148,7 @@ func TestSchedulerCreationFromConfigMap(t *testing.T) {
|
||||
},
|
||||
"PostFilterPlugin": {
|
||||
{Name: "InterPodAffinity"},
|
||||
{Name: "TaintToleration"},
|
||||
},
|
||||
"ScorePlugin": {
|
||||
{Name: "NodeResourcesBalancedAllocation", Weight: 1},
|
||||
@ -227,6 +228,7 @@ kind: Policy
|
||||
},
|
||||
"PostFilterPlugin": {
|
||||
{Name: "InterPodAffinity"},
|
||||
{Name: "TaintToleration"},
|
||||
},
|
||||
"ScorePlugin": {
|
||||
{Name: "NodeResourcesBalancedAllocation", Weight: 1},
|
||||
|
Loading…
Reference in New Issue
Block a user