mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-23 19:56:01 +00:00
Merge pull request #83601 from ahg-g/ahg-migration-priority
Implemented taints and tolerations priority function as a Score plugin
This commit is contained in:
commit
c1a735c642
@ -16,6 +16,7 @@ go_test(
|
|||||||
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||||
"//staging/src/k8s.io/client-go/informers:go_default_library",
|
"//staging/src/k8s.io/client-go/informers:go_default_library",
|
||||||
"//staging/src/k8s.io/client-go/kubernetes/fake:go_default_library",
|
"//staging/src/k8s.io/client-go/kubernetes/fake:go_default_library",
|
||||||
|
"//vendor/github.com/google/go-cmp/cmp:go_default_library",
|
||||||
],
|
],
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -19,6 +19,8 @@ package compatibility
|
|||||||
import (
|
import (
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
"github.com/google/go-cmp/cmp"
|
||||||
|
|
||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/util/sets"
|
"k8s.io/apimachinery/pkg/util/sets"
|
||||||
@ -37,11 +39,11 @@ import (
|
|||||||
func TestCompatibility_v1_Scheduler(t *testing.T) {
|
func TestCompatibility_v1_Scheduler(t *testing.T) {
|
||||||
// Add serialized versions of scheduler config that exercise available options to ensure compatibility between releases
|
// Add serialized versions of scheduler config that exercise available options to ensure compatibility between releases
|
||||||
schedulerFiles := map[string]struct {
|
schedulerFiles := map[string]struct {
|
||||||
JSON string
|
JSON string
|
||||||
wantPredicates sets.String
|
wantPredicates sets.String
|
||||||
wantPrioritizers sets.String
|
wantPrioritizers sets.String
|
||||||
wantFilterPlugins sets.String
|
wantPlugins map[string][]kubeschedulerconfig.Plugin
|
||||||
wantExtenders []schedulerapi.ExtenderConfig
|
wantExtenders []schedulerapi.ExtenderConfig
|
||||||
}{
|
}{
|
||||||
// Do not change this JSON after the corresponding release has been tagged.
|
// Do not change this JSON after the corresponding release has been tagged.
|
||||||
// A failure indicates backwards compatibility with the specified release was broken.
|
// A failure indicates backwards compatibility with the specified release was broken.
|
||||||
@ -231,12 +233,12 @@ func TestCompatibility_v1_Scheduler(t *testing.T) {
|
|||||||
"BalancedResourceAllocation",
|
"BalancedResourceAllocation",
|
||||||
"SelectorSpreadPriority",
|
"SelectorSpreadPriority",
|
||||||
"NodeAffinityPriority",
|
"NodeAffinityPriority",
|
||||||
"TaintTolerationPriority",
|
|
||||||
"InterPodAffinityPriority",
|
"InterPodAffinityPriority",
|
||||||
),
|
),
|
||||||
wantFilterPlugins: sets.NewString(
|
wantPlugins: map[string][]kubeschedulerconfig.Plugin{
|
||||||
"TaintToleration",
|
"FilterPlugin": {{Name: "TaintToleration"}},
|
||||||
),
|
"ScorePlugin": {{Name: "TaintToleration", Weight: 2}},
|
||||||
|
},
|
||||||
},
|
},
|
||||||
|
|
||||||
// Do not change this JSON after the corresponding release has been tagged.
|
// Do not change this JSON after the corresponding release has been tagged.
|
||||||
@ -300,13 +302,13 @@ func TestCompatibility_v1_Scheduler(t *testing.T) {
|
|||||||
"SelectorSpreadPriority",
|
"SelectorSpreadPriority",
|
||||||
"NodePreferAvoidPodsPriority",
|
"NodePreferAvoidPodsPriority",
|
||||||
"NodeAffinityPriority",
|
"NodeAffinityPriority",
|
||||||
"TaintTolerationPriority",
|
|
||||||
"InterPodAffinityPriority",
|
"InterPodAffinityPriority",
|
||||||
"MostRequestedPriority",
|
"MostRequestedPriority",
|
||||||
),
|
),
|
||||||
wantFilterPlugins: sets.NewString(
|
wantPlugins: map[string][]kubeschedulerconfig.Plugin{
|
||||||
"TaintToleration",
|
"FilterPlugin": {{Name: "TaintToleration"}},
|
||||||
),
|
"ScorePlugin": {{Name: "TaintToleration", Weight: 2}},
|
||||||
|
},
|
||||||
},
|
},
|
||||||
// Do not change this JSON after the corresponding release has been tagged.
|
// Do not change this JSON after the corresponding release has been tagged.
|
||||||
// A failure indicates backwards compatibility with the specified release was broken.
|
// A failure indicates backwards compatibility with the specified release was broken.
|
||||||
@ -379,13 +381,13 @@ func TestCompatibility_v1_Scheduler(t *testing.T) {
|
|||||||
"SelectorSpreadPriority",
|
"SelectorSpreadPriority",
|
||||||
"NodePreferAvoidPodsPriority",
|
"NodePreferAvoidPodsPriority",
|
||||||
"NodeAffinityPriority",
|
"NodeAffinityPriority",
|
||||||
"TaintTolerationPriority",
|
|
||||||
"InterPodAffinityPriority",
|
"InterPodAffinityPriority",
|
||||||
"MostRequestedPriority",
|
"MostRequestedPriority",
|
||||||
),
|
),
|
||||||
wantFilterPlugins: sets.NewString(
|
wantPlugins: map[string][]kubeschedulerconfig.Plugin{
|
||||||
"TaintToleration",
|
"FilterPlugin": {{Name: "TaintToleration"}},
|
||||||
),
|
"ScorePlugin": {{Name: "TaintToleration", Weight: 2}},
|
||||||
|
},
|
||||||
wantExtenders: []schedulerapi.ExtenderConfig{{
|
wantExtenders: []schedulerapi.ExtenderConfig{{
|
||||||
URLPrefix: "/prefix",
|
URLPrefix: "/prefix",
|
||||||
FilterVerb: "filter",
|
FilterVerb: "filter",
|
||||||
@ -471,13 +473,13 @@ func TestCompatibility_v1_Scheduler(t *testing.T) {
|
|||||||
"SelectorSpreadPriority",
|
"SelectorSpreadPriority",
|
||||||
"NodePreferAvoidPodsPriority",
|
"NodePreferAvoidPodsPriority",
|
||||||
"NodeAffinityPriority",
|
"NodeAffinityPriority",
|
||||||
"TaintTolerationPriority",
|
|
||||||
"InterPodAffinityPriority",
|
"InterPodAffinityPriority",
|
||||||
"MostRequestedPriority",
|
"MostRequestedPriority",
|
||||||
),
|
),
|
||||||
wantFilterPlugins: sets.NewString(
|
wantPlugins: map[string][]kubeschedulerconfig.Plugin{
|
||||||
"TaintToleration",
|
"FilterPlugin": {{Name: "TaintToleration"}},
|
||||||
),
|
"ScorePlugin": {{Name: "TaintToleration", Weight: 2}},
|
||||||
|
},
|
||||||
wantExtenders: []schedulerapi.ExtenderConfig{{
|
wantExtenders: []schedulerapi.ExtenderConfig{{
|
||||||
URLPrefix: "/prefix",
|
URLPrefix: "/prefix",
|
||||||
FilterVerb: "filter",
|
FilterVerb: "filter",
|
||||||
@ -565,13 +567,13 @@ func TestCompatibility_v1_Scheduler(t *testing.T) {
|
|||||||
"SelectorSpreadPriority",
|
"SelectorSpreadPriority",
|
||||||
"NodePreferAvoidPodsPriority",
|
"NodePreferAvoidPodsPriority",
|
||||||
"NodeAffinityPriority",
|
"NodeAffinityPriority",
|
||||||
"TaintTolerationPriority",
|
|
||||||
"InterPodAffinityPriority",
|
"InterPodAffinityPriority",
|
||||||
"MostRequestedPriority",
|
"MostRequestedPriority",
|
||||||
),
|
),
|
||||||
wantFilterPlugins: sets.NewString(
|
wantPlugins: map[string][]kubeschedulerconfig.Plugin{
|
||||||
"TaintToleration",
|
"FilterPlugin": {{Name: "TaintToleration"}},
|
||||||
),
|
"ScorePlugin": {{Name: "TaintToleration", Weight: 2}},
|
||||||
|
},
|
||||||
wantExtenders: []schedulerapi.ExtenderConfig{{
|
wantExtenders: []schedulerapi.ExtenderConfig{{
|
||||||
URLPrefix: "/prefix",
|
URLPrefix: "/prefix",
|
||||||
FilterVerb: "filter",
|
FilterVerb: "filter",
|
||||||
@ -664,13 +666,13 @@ func TestCompatibility_v1_Scheduler(t *testing.T) {
|
|||||||
"SelectorSpreadPriority",
|
"SelectorSpreadPriority",
|
||||||
"NodePreferAvoidPodsPriority",
|
"NodePreferAvoidPodsPriority",
|
||||||
"NodeAffinityPriority",
|
"NodeAffinityPriority",
|
||||||
"TaintTolerationPriority",
|
|
||||||
"InterPodAffinityPriority",
|
"InterPodAffinityPriority",
|
||||||
"MostRequestedPriority",
|
"MostRequestedPriority",
|
||||||
),
|
),
|
||||||
wantFilterPlugins: sets.NewString(
|
wantPlugins: map[string][]kubeschedulerconfig.Plugin{
|
||||||
"TaintToleration",
|
"FilterPlugin": {{Name: "TaintToleration"}},
|
||||||
),
|
"ScorePlugin": {{Name: "TaintToleration", Weight: 2}},
|
||||||
|
},
|
||||||
wantExtenders: []schedulerapi.ExtenderConfig{{
|
wantExtenders: []schedulerapi.ExtenderConfig{{
|
||||||
URLPrefix: "/prefix",
|
URLPrefix: "/prefix",
|
||||||
FilterVerb: "filter",
|
FilterVerb: "filter",
|
||||||
@ -775,14 +777,14 @@ func TestCompatibility_v1_Scheduler(t *testing.T) {
|
|||||||
"SelectorSpreadPriority",
|
"SelectorSpreadPriority",
|
||||||
"NodePreferAvoidPodsPriority",
|
"NodePreferAvoidPodsPriority",
|
||||||
"NodeAffinityPriority",
|
"NodeAffinityPriority",
|
||||||
"TaintTolerationPriority",
|
|
||||||
"InterPodAffinityPriority",
|
"InterPodAffinityPriority",
|
||||||
"MostRequestedPriority",
|
"MostRequestedPriority",
|
||||||
"RequestedToCapacityRatioPriority",
|
"RequestedToCapacityRatioPriority",
|
||||||
),
|
),
|
||||||
wantFilterPlugins: sets.NewString(
|
wantPlugins: map[string][]kubeschedulerconfig.Plugin{
|
||||||
"TaintToleration",
|
"FilterPlugin": {{Name: "TaintToleration"}},
|
||||||
),
|
"ScorePlugin": {{Name: "TaintToleration", Weight: 2}},
|
||||||
|
},
|
||||||
wantExtenders: []schedulerapi.ExtenderConfig{{
|
wantExtenders: []schedulerapi.ExtenderConfig{{
|
||||||
URLPrefix: "/prefix",
|
URLPrefix: "/prefix",
|
||||||
FilterVerb: "filter",
|
FilterVerb: "filter",
|
||||||
@ -889,14 +891,14 @@ func TestCompatibility_v1_Scheduler(t *testing.T) {
|
|||||||
"SelectorSpreadPriority",
|
"SelectorSpreadPriority",
|
||||||
"NodePreferAvoidPodsPriority",
|
"NodePreferAvoidPodsPriority",
|
||||||
"NodeAffinityPriority",
|
"NodeAffinityPriority",
|
||||||
"TaintTolerationPriority",
|
|
||||||
"InterPodAffinityPriority",
|
"InterPodAffinityPriority",
|
||||||
"MostRequestedPriority",
|
"MostRequestedPriority",
|
||||||
"RequestedToCapacityRatioPriority",
|
"RequestedToCapacityRatioPriority",
|
||||||
),
|
),
|
||||||
wantFilterPlugins: sets.NewString(
|
wantPlugins: map[string][]kubeschedulerconfig.Plugin{
|
||||||
"TaintToleration",
|
"FilterPlugin": {{Name: "TaintToleration"}},
|
||||||
),
|
"ScorePlugin": {{Name: "TaintToleration", Weight: 2}},
|
||||||
|
},
|
||||||
wantExtenders: []schedulerapi.ExtenderConfig{{
|
wantExtenders: []schedulerapi.ExtenderConfig{{
|
||||||
URLPrefix: "/prefix",
|
URLPrefix: "/prefix",
|
||||||
FilterVerb: "filter",
|
FilterVerb: "filter",
|
||||||
@ -1003,14 +1005,14 @@ func TestCompatibility_v1_Scheduler(t *testing.T) {
|
|||||||
"SelectorSpreadPriority",
|
"SelectorSpreadPriority",
|
||||||
"NodePreferAvoidPodsPriority",
|
"NodePreferAvoidPodsPriority",
|
||||||
"NodeAffinityPriority",
|
"NodeAffinityPriority",
|
||||||
"TaintTolerationPriority",
|
|
||||||
"InterPodAffinityPriority",
|
"InterPodAffinityPriority",
|
||||||
"MostRequestedPriority",
|
"MostRequestedPriority",
|
||||||
"RequestedToCapacityRatioPriority",
|
"RequestedToCapacityRatioPriority",
|
||||||
),
|
),
|
||||||
wantFilterPlugins: sets.NewString(
|
wantPlugins: map[string][]kubeschedulerconfig.Plugin{
|
||||||
"TaintToleration",
|
"FilterPlugin": {{Name: "TaintToleration"}},
|
||||||
),
|
"ScorePlugin": {{Name: "TaintToleration", Weight: 2}},
|
||||||
|
},
|
||||||
wantExtenders: []schedulerapi.ExtenderConfig{{
|
wantExtenders: []schedulerapi.ExtenderConfig{{
|
||||||
URLPrefix: "/prefix",
|
URLPrefix: "/prefix",
|
||||||
FilterVerb: "filter",
|
FilterVerb: "filter",
|
||||||
@ -1121,14 +1123,14 @@ func TestCompatibility_v1_Scheduler(t *testing.T) {
|
|||||||
"SelectorSpreadPriority",
|
"SelectorSpreadPriority",
|
||||||
"NodePreferAvoidPodsPriority",
|
"NodePreferAvoidPodsPriority",
|
||||||
"NodeAffinityPriority",
|
"NodeAffinityPriority",
|
||||||
"TaintTolerationPriority",
|
|
||||||
"InterPodAffinityPriority",
|
"InterPodAffinityPriority",
|
||||||
"MostRequestedPriority",
|
"MostRequestedPriority",
|
||||||
"RequestedToCapacityRatioPriority",
|
"RequestedToCapacityRatioPriority",
|
||||||
),
|
),
|
||||||
wantFilterPlugins: sets.NewString(
|
wantPlugins: map[string][]kubeschedulerconfig.Plugin{
|
||||||
"TaintToleration",
|
"FilterPlugin": {{Name: "TaintToleration"}},
|
||||||
),
|
"ScorePlugin": {{Name: "TaintToleration", Weight: 2}},
|
||||||
|
},
|
||||||
wantExtenders: []schedulerapi.ExtenderConfig{{
|
wantExtenders: []schedulerapi.ExtenderConfig{{
|
||||||
URLPrefix: "/prefix",
|
URLPrefix: "/prefix",
|
||||||
FilterVerb: "filter",
|
FilterVerb: "filter",
|
||||||
@ -1152,6 +1154,9 @@ func TestCompatibility_v1_Scheduler(t *testing.T) {
|
|||||||
filterToPredicateMap := map[string]string{
|
filterToPredicateMap := map[string]string{
|
||||||
"TaintToleration": "PodToleratesNodeTaints",
|
"TaintToleration": "PodToleratesNodeTaints",
|
||||||
}
|
}
|
||||||
|
scoreToPriorityMap := map[string]string{
|
||||||
|
"TaintToleration": "TaintTolerationPriority",
|
||||||
|
}
|
||||||
|
|
||||||
for v, tc := range schedulerFiles {
|
for v, tc := range schedulerFiles {
|
||||||
t.Run(v, func(t *testing.T) {
|
t.Run(v, func(t *testing.T) {
|
||||||
@ -1208,15 +1213,17 @@ func TestCompatibility_v1_Scheduler(t *testing.T) {
|
|||||||
t.Errorf("Got prioritizers %v, want %v", gotPrioritizers, tc.wantPrioritizers)
|
t.Errorf("Got prioritizers %v, want %v", gotPrioritizers, tc.wantPrioritizers)
|
||||||
}
|
}
|
||||||
|
|
||||||
gotFilterPlugins := sets.NewString()
|
gotPlugins := sched.Framework.ListPlugins()
|
||||||
plugins := sched.Framework.ListPlugins()
|
for _, p := range gotPlugins["FilterPlugin"] {
|
||||||
for _, p := range plugins["FilterPlugin"] {
|
seenPredicates.Insert(filterToPredicateMap[p.Name])
|
||||||
gotFilterPlugins.Insert(p)
|
|
||||||
seenPredicates.Insert(filterToPredicateMap[p])
|
|
||||||
|
|
||||||
}
|
}
|
||||||
if !gotFilterPlugins.Equal(tc.wantFilterPlugins) {
|
for _, p := range gotPlugins["FilterPlugin"] {
|
||||||
t.Errorf("Got filter plugins %v, want %v", gotFilterPlugins, tc.wantFilterPlugins)
|
seenPriorities.Insert(scoreToPriorityMap[p.Name])
|
||||||
|
|
||||||
|
}
|
||||||
|
if diff := cmp.Diff(tc.wantPlugins, gotPlugins); diff != "" {
|
||||||
|
t.Errorf("unexpected plugins diff (-want, +got): %s", diff)
|
||||||
}
|
}
|
||||||
|
|
||||||
gotExtenders := sched.Algorithm.Extenders()
|
gotExtenders := sched.Algorithm.Extenders()
|
||||||
|
@ -636,7 +636,7 @@ func (t *TestPlugin) Name() string {
|
|||||||
return t.name
|
return t.name
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *TestPlugin) Score(state *framework.CycleState, p *v1.Pod, nodeName string) (int, *framework.Status) {
|
func (t *TestPlugin) Score(state *framework.CycleState, p *v1.Pod, nodeName string) (int64, *framework.Status) {
|
||||||
return 1, nil
|
return 1, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -8,6 +8,7 @@ go_library(
|
|||||||
deps = [
|
deps = [
|
||||||
"//pkg/scheduler/algorithm:go_default_library",
|
"//pkg/scheduler/algorithm:go_default_library",
|
||||||
"//pkg/scheduler/algorithm/predicates:go_default_library",
|
"//pkg/scheduler/algorithm/predicates:go_default_library",
|
||||||
|
"//pkg/scheduler/algorithm/priorities:go_default_library",
|
||||||
"//pkg/scheduler/apis/config:go_default_library",
|
"//pkg/scheduler/apis/config:go_default_library",
|
||||||
"//pkg/scheduler/framework/plugins/tainttoleration:go_default_library",
|
"//pkg/scheduler/framework/plugins/tainttoleration:go_default_library",
|
||||||
"//pkg/scheduler/framework/v1alpha1:go_default_library",
|
"//pkg/scheduler/framework/v1alpha1:go_default_library",
|
||||||
|
@ -23,6 +23,7 @@ import (
|
|||||||
storagelistersv1 "k8s.io/client-go/listers/storage/v1"
|
storagelistersv1 "k8s.io/client-go/listers/storage/v1"
|
||||||
"k8s.io/kubernetes/pkg/scheduler/algorithm"
|
"k8s.io/kubernetes/pkg/scheduler/algorithm"
|
||||||
"k8s.io/kubernetes/pkg/scheduler/algorithm/predicates"
|
"k8s.io/kubernetes/pkg/scheduler/algorithm/predicates"
|
||||||
|
"k8s.io/kubernetes/pkg/scheduler/algorithm/priorities"
|
||||||
"k8s.io/kubernetes/pkg/scheduler/apis/config"
|
"k8s.io/kubernetes/pkg/scheduler/apis/config"
|
||||||
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/tainttoleration"
|
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/tainttoleration"
|
||||||
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
|
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
|
||||||
@ -83,6 +84,12 @@ func NewDefaultConfigProducerRegistry() *ConfigProducerRegistry {
|
|||||||
return
|
return
|
||||||
})
|
})
|
||||||
|
|
||||||
|
registry.RegisterPriority(priorities.TaintTolerationPriority,
|
||||||
|
func(args ConfigProducerArgs) (plugins config.Plugins, pluginConfig []config.PluginConfig) {
|
||||||
|
plugins.Score = appendToPluginSet(plugins.Score, tainttoleration.Name, &args.Weight)
|
||||||
|
return
|
||||||
|
})
|
||||||
|
|
||||||
return registry
|
return registry
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -8,6 +8,7 @@ go_library(
|
|||||||
deps = [
|
deps = [
|
||||||
"//pkg/scheduler/algorithm/predicates:go_default_library",
|
"//pkg/scheduler/algorithm/predicates:go_default_library",
|
||||||
"//pkg/scheduler/framework/v1alpha1:go_default_library",
|
"//pkg/scheduler/framework/v1alpha1:go_default_library",
|
||||||
|
"//vendor/k8s.io/klog:go_default_library",
|
||||||
],
|
],
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -17,6 +17,7 @@ limitations under the License.
|
|||||||
package migration
|
package migration
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"k8s.io/klog"
|
||||||
"k8s.io/kubernetes/pkg/scheduler/algorithm/predicates"
|
"k8s.io/kubernetes/pkg/scheduler/algorithm/predicates"
|
||||||
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
|
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
|
||||||
)
|
)
|
||||||
@ -88,3 +89,18 @@ func (p *PrioritiesStateData) Clone() framework.StateData {
|
|||||||
Reference: p.Reference,
|
Reference: p.Reference,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// PriorityMetadata returns priority metadata stored in CycleState.
|
||||||
|
func PriorityMetadata(state *framework.CycleState) interface{} {
|
||||||
|
if state == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var meta interface{}
|
||||||
|
if s, err := state.Read(PrioritiesStateKey); err == nil {
|
||||||
|
meta = s.(*PrioritiesStateData).Reference
|
||||||
|
} else {
|
||||||
|
klog.Errorf("reading key %q from CycleState, continuing without metadata: %v", PrioritiesStateKey, err)
|
||||||
|
}
|
||||||
|
return meta
|
||||||
|
}
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
load("@io_bazel_rules_go//go:def.bzl", "go_library")
|
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
|
||||||
|
|
||||||
go_library(
|
go_library(
|
||||||
name = "go_default_library",
|
name = "go_default_library",
|
||||||
@ -7,6 +7,7 @@ go_library(
|
|||||||
visibility = ["//visibility:public"],
|
visibility = ["//visibility:public"],
|
||||||
deps = [
|
deps = [
|
||||||
"//pkg/scheduler/algorithm/predicates:go_default_library",
|
"//pkg/scheduler/algorithm/predicates:go_default_library",
|
||||||
|
"//pkg/scheduler/algorithm/priorities:go_default_library",
|
||||||
"//pkg/scheduler/framework/plugins/migration:go_default_library",
|
"//pkg/scheduler/framework/plugins/migration:go_default_library",
|
||||||
"//pkg/scheduler/framework/v1alpha1:go_default_library",
|
"//pkg/scheduler/framework/v1alpha1:go_default_library",
|
||||||
"//pkg/scheduler/nodeinfo:go_default_library",
|
"//pkg/scheduler/nodeinfo:go_default_library",
|
||||||
@ -28,3 +29,16 @@ filegroup(
|
|||||||
tags = ["automanaged"],
|
tags = ["automanaged"],
|
||||||
visibility = ["//visibility:public"],
|
visibility = ["//visibility:public"],
|
||||||
)
|
)
|
||||||
|
|
||||||
|
go_test(
|
||||||
|
name = "go_default_test",
|
||||||
|
srcs = ["taint_toleration_test.go"],
|
||||||
|
embed = [":go_default_library"],
|
||||||
|
deps = [
|
||||||
|
"//pkg/scheduler/algorithm/predicates:go_default_library",
|
||||||
|
"//pkg/scheduler/framework/v1alpha1:go_default_library",
|
||||||
|
"//pkg/scheduler/nodeinfo:go_default_library",
|
||||||
|
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||||
|
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||||
|
],
|
||||||
|
)
|
||||||
|
@ -17,18 +17,24 @@ limitations under the License.
|
|||||||
package tainttoleration
|
package tainttoleration
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"fmt"
|
||||||
|
|
||||||
"k8s.io/api/core/v1"
|
"k8s.io/api/core/v1"
|
||||||
"k8s.io/apimachinery/pkg/runtime"
|
"k8s.io/apimachinery/pkg/runtime"
|
||||||
"k8s.io/kubernetes/pkg/scheduler/algorithm/predicates"
|
"k8s.io/kubernetes/pkg/scheduler/algorithm/predicates"
|
||||||
|
"k8s.io/kubernetes/pkg/scheduler/algorithm/priorities"
|
||||||
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/migration"
|
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/migration"
|
||||||
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
|
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
|
||||||
"k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
"k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||||
)
|
)
|
||||||
|
|
||||||
// TaintToleration is a plugin that checks if a pod tolerates a node's taints.
|
// TaintToleration is a plugin that checks if a pod tolerates a node's taints.
|
||||||
type TaintToleration struct{}
|
type TaintToleration struct {
|
||||||
|
handle framework.FrameworkHandle
|
||||||
|
}
|
||||||
|
|
||||||
var _ = framework.FilterPlugin(&TaintToleration{})
|
var _ = framework.FilterPlugin(&TaintToleration{})
|
||||||
|
var _ = framework.ScorePlugin(&TaintToleration{})
|
||||||
|
|
||||||
// Name is the name of the plugin used in the plugin registry and configurations.
|
// Name is the name of the plugin used in the plugin registry and configurations.
|
||||||
const Name = "TaintToleration"
|
const Name = "TaintToleration"
|
||||||
@ -39,12 +45,36 @@ func (pl *TaintToleration) Name() string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Filter invoked at the filter extension point.
|
// Filter invoked at the filter extension point.
|
||||||
func (pl *TaintToleration) Filter(_ *framework.CycleState, pod *v1.Pod, nodeInfo *nodeinfo.NodeInfo) *framework.Status {
|
func (pl *TaintToleration) Filter(state *framework.CycleState, pod *v1.Pod, nodeInfo *nodeinfo.NodeInfo) *framework.Status {
|
||||||
|
// Note that PodToleratesNodeTaints doesn't use predicate metadata, hence passing nil here.
|
||||||
_, reasons, err := predicates.PodToleratesNodeTaints(pod, nil, nodeInfo)
|
_, reasons, err := predicates.PodToleratesNodeTaints(pod, nil, nodeInfo)
|
||||||
return migration.PredicateResultToFrameworkStatus(reasons, err)
|
return migration.PredicateResultToFrameworkStatus(reasons, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// New initializes a new plugin and returns it.
|
// Score invoked at the Score extension point.
|
||||||
func New(_ *runtime.Unknown, _ framework.FrameworkHandle) (framework.Plugin, error) {
|
func (pl *TaintToleration) Score(state *framework.CycleState, pod *v1.Pod, nodeName string) (int64, *framework.Status) {
|
||||||
return &TaintToleration{}, nil
|
nodeInfo, exist := pl.handle.NodeInfoSnapshot().NodeInfoMap[nodeName]
|
||||||
|
if !exist {
|
||||||
|
return 0, framework.NewStatus(framework.Error, fmt.Sprintf("node %q does not exist in NodeInfoSnapshot", nodeName))
|
||||||
|
}
|
||||||
|
meta := migration.PriorityMetadata(state)
|
||||||
|
s, err := priorities.ComputeTaintTolerationPriorityMap(pod, meta, nodeInfo)
|
||||||
|
return s.Score, migration.ErrorToFrameworkStatus(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NormalizeScore invoked after scoring all nodes.
|
||||||
|
func (pl *TaintToleration) NormalizeScore(_ *framework.CycleState, pod *v1.Pod, scores framework.NodeScoreList) *framework.Status {
|
||||||
|
// Note that ComputeTaintTolerationPriorityReduce doesn't use priority metadata, hence passing nil here.
|
||||||
|
err := priorities.ComputeTaintTolerationPriorityReduce(pod, nil, pl.handle.NodeInfoSnapshot().NodeInfoMap, scores)
|
||||||
|
return migration.ErrorToFrameworkStatus(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ScoreExtensions of the Score plugin.
|
||||||
|
func (pl *TaintToleration) ScoreExtensions() framework.ScoreExtensions {
|
||||||
|
return pl
|
||||||
|
}
|
||||||
|
|
||||||
|
// New initializes a new plugin and returns it.
|
||||||
|
func New(_ *runtime.Unknown, h framework.FrameworkHandle) (framework.Plugin, error) {
|
||||||
|
return &TaintToleration{handle: h}, nil
|
||||||
}
|
}
|
||||||
|
@ -0,0 +1,337 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2019 The Kubernetes Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package tainttoleration
|
||||||
|
|
||||||
|
import (
|
||||||
|
"reflect"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"k8s.io/api/core/v1"
|
||||||
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
"k8s.io/kubernetes/pkg/scheduler/algorithm/predicates"
|
||||||
|
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
|
||||||
|
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||||
|
)
|
||||||
|
|
||||||
|
func nodeWithTaints(nodeName string, taints []v1.Taint) *v1.Node {
|
||||||
|
return &v1.Node{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Name: nodeName,
|
||||||
|
},
|
||||||
|
Spec: v1.NodeSpec{
|
||||||
|
Taints: taints,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func podWithTolerations(podName string, tolerations []v1.Toleration) *v1.Pod {
|
||||||
|
return &v1.Pod{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Name: podName,
|
||||||
|
},
|
||||||
|
Spec: v1.PodSpec{
|
||||||
|
Tolerations: tolerations,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestTaintTolerationScore(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
pod *v1.Pod
|
||||||
|
nodes []*v1.Node
|
||||||
|
expectedList framework.NodeScoreList
|
||||||
|
}{
|
||||||
|
// basic test case
|
||||||
|
{
|
||||||
|
name: "node with taints tolerated by the pod, gets a higher score than those node with intolerable taints",
|
||||||
|
pod: podWithTolerations("pod1", []v1.Toleration{{
|
||||||
|
Key: "foo",
|
||||||
|
Operator: v1.TolerationOpEqual,
|
||||||
|
Value: "bar",
|
||||||
|
Effect: v1.TaintEffectPreferNoSchedule,
|
||||||
|
}}),
|
||||||
|
nodes: []*v1.Node{
|
||||||
|
nodeWithTaints("nodeA", []v1.Taint{{
|
||||||
|
Key: "foo",
|
||||||
|
Value: "bar",
|
||||||
|
Effect: v1.TaintEffectPreferNoSchedule,
|
||||||
|
}}),
|
||||||
|
nodeWithTaints("nodeB", []v1.Taint{{
|
||||||
|
Key: "foo",
|
||||||
|
Value: "blah",
|
||||||
|
Effect: v1.TaintEffectPreferNoSchedule,
|
||||||
|
}}),
|
||||||
|
},
|
||||||
|
expectedList: []framework.NodeScore{
|
||||||
|
{Name: "nodeA", Score: framework.MaxNodeScore},
|
||||||
|
{Name: "nodeB", Score: 0},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
// the count of taints that are tolerated by pod, does not matter.
|
||||||
|
{
|
||||||
|
name: "the nodes that all of their taints are tolerated by the pod, get the same score, no matter how many tolerable taints a node has",
|
||||||
|
pod: podWithTolerations("pod1", []v1.Toleration{
|
||||||
|
{
|
||||||
|
Key: "cpu-type",
|
||||||
|
Operator: v1.TolerationOpEqual,
|
||||||
|
Value: "arm64",
|
||||||
|
Effect: v1.TaintEffectPreferNoSchedule,
|
||||||
|
}, {
|
||||||
|
Key: "disk-type",
|
||||||
|
Operator: v1.TolerationOpEqual,
|
||||||
|
Value: "ssd",
|
||||||
|
Effect: v1.TaintEffectPreferNoSchedule,
|
||||||
|
},
|
||||||
|
}),
|
||||||
|
nodes: []*v1.Node{
|
||||||
|
nodeWithTaints("nodeA", []v1.Taint{}),
|
||||||
|
nodeWithTaints("nodeB", []v1.Taint{
|
||||||
|
{
|
||||||
|
Key: "cpu-type",
|
||||||
|
Value: "arm64",
|
||||||
|
Effect: v1.TaintEffectPreferNoSchedule,
|
||||||
|
},
|
||||||
|
}),
|
||||||
|
nodeWithTaints("nodeC", []v1.Taint{
|
||||||
|
{
|
||||||
|
Key: "cpu-type",
|
||||||
|
Value: "arm64",
|
||||||
|
Effect: v1.TaintEffectPreferNoSchedule,
|
||||||
|
}, {
|
||||||
|
Key: "disk-type",
|
||||||
|
Value: "ssd",
|
||||||
|
Effect: v1.TaintEffectPreferNoSchedule,
|
||||||
|
},
|
||||||
|
}),
|
||||||
|
},
|
||||||
|
expectedList: []framework.NodeScore{
|
||||||
|
{Name: "nodeA", Score: framework.MaxNodeScore},
|
||||||
|
{Name: "nodeB", Score: framework.MaxNodeScore},
|
||||||
|
{Name: "nodeC", Score: framework.MaxNodeScore},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
// the count of taints on a node that are not tolerated by pod, matters.
|
||||||
|
{
|
||||||
|
name: "the more intolerable taints a node has, the lower score it gets.",
|
||||||
|
pod: podWithTolerations("pod1", []v1.Toleration{{
|
||||||
|
Key: "foo",
|
||||||
|
Operator: v1.TolerationOpEqual,
|
||||||
|
Value: "bar",
|
||||||
|
Effect: v1.TaintEffectPreferNoSchedule,
|
||||||
|
}}),
|
||||||
|
nodes: []*v1.Node{
|
||||||
|
nodeWithTaints("nodeA", []v1.Taint{}),
|
||||||
|
nodeWithTaints("nodeB", []v1.Taint{
|
||||||
|
{
|
||||||
|
Key: "cpu-type",
|
||||||
|
Value: "arm64",
|
||||||
|
Effect: v1.TaintEffectPreferNoSchedule,
|
||||||
|
},
|
||||||
|
}),
|
||||||
|
nodeWithTaints("nodeC", []v1.Taint{
|
||||||
|
{
|
||||||
|
Key: "cpu-type",
|
||||||
|
Value: "arm64",
|
||||||
|
Effect: v1.TaintEffectPreferNoSchedule,
|
||||||
|
}, {
|
||||||
|
Key: "disk-type",
|
||||||
|
Value: "ssd",
|
||||||
|
Effect: v1.TaintEffectPreferNoSchedule,
|
||||||
|
},
|
||||||
|
}),
|
||||||
|
},
|
||||||
|
expectedList: []framework.NodeScore{
|
||||||
|
{Name: "nodeA", Score: framework.MaxNodeScore},
|
||||||
|
{Name: "nodeB", Score: 5},
|
||||||
|
{Name: "nodeC", Score: 0},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
// taints-tolerations priority only takes care about the taints and tolerations that have effect PreferNoSchedule
|
||||||
|
{
|
||||||
|
name: "only taints and tolerations that have effect PreferNoSchedule are checked by taints-tolerations priority function",
|
||||||
|
pod: podWithTolerations("pod1", []v1.Toleration{
|
||||||
|
{
|
||||||
|
Key: "cpu-type",
|
||||||
|
Operator: v1.TolerationOpEqual,
|
||||||
|
Value: "arm64",
|
||||||
|
Effect: v1.TaintEffectNoSchedule,
|
||||||
|
}, {
|
||||||
|
Key: "disk-type",
|
||||||
|
Operator: v1.TolerationOpEqual,
|
||||||
|
Value: "ssd",
|
||||||
|
Effect: v1.TaintEffectNoSchedule,
|
||||||
|
},
|
||||||
|
}),
|
||||||
|
nodes: []*v1.Node{
|
||||||
|
nodeWithTaints("nodeA", []v1.Taint{}),
|
||||||
|
nodeWithTaints("nodeB", []v1.Taint{
|
||||||
|
{
|
||||||
|
Key: "cpu-type",
|
||||||
|
Value: "arm64",
|
||||||
|
Effect: v1.TaintEffectNoSchedule,
|
||||||
|
},
|
||||||
|
}),
|
||||||
|
nodeWithTaints("nodeC", []v1.Taint{
|
||||||
|
{
|
||||||
|
Key: "cpu-type",
|
||||||
|
Value: "arm64",
|
||||||
|
Effect: v1.TaintEffectPreferNoSchedule,
|
||||||
|
}, {
|
||||||
|
Key: "disk-type",
|
||||||
|
Value: "ssd",
|
||||||
|
Effect: v1.TaintEffectPreferNoSchedule,
|
||||||
|
},
|
||||||
|
}),
|
||||||
|
},
|
||||||
|
expectedList: []framework.NodeScore{
|
||||||
|
{Name: "nodeA", Score: framework.MaxNodeScore},
|
||||||
|
{Name: "nodeB", Score: framework.MaxNodeScore},
|
||||||
|
{Name: "nodeC", Score: 0},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Default behaviour No taints and tolerations, lands on node with no taints",
|
||||||
|
//pod without tolerations
|
||||||
|
pod: podWithTolerations("pod1", []v1.Toleration{}),
|
||||||
|
nodes: []*v1.Node{
|
||||||
|
//Node without taints
|
||||||
|
nodeWithTaints("nodeA", []v1.Taint{}),
|
||||||
|
nodeWithTaints("nodeB", []v1.Taint{
|
||||||
|
{
|
||||||
|
Key: "cpu-type",
|
||||||
|
Value: "arm64",
|
||||||
|
Effect: v1.TaintEffectPreferNoSchedule,
|
||||||
|
},
|
||||||
|
}),
|
||||||
|
},
|
||||||
|
expectedList: []framework.NodeScore{
|
||||||
|
{Name: "nodeA", Score: framework.MaxNodeScore},
|
||||||
|
{Name: "nodeB", Score: 0},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, test := range tests {
|
||||||
|
t.Run(test.name, func(t *testing.T) {
|
||||||
|
state := framework.NewCycleState()
|
||||||
|
|
||||||
|
fh, _ := framework.NewFramework(nil, nil, nil)
|
||||||
|
snapshot := fh.NodeInfoSnapshot()
|
||||||
|
snapshot.NodeInfoMap = schedulernodeinfo.CreateNodeNameToInfoMap(nil, test.nodes)
|
||||||
|
|
||||||
|
p, _ := New(nil, fh)
|
||||||
|
var gotList framework.NodeScoreList
|
||||||
|
for _, n := range test.nodes {
|
||||||
|
nodeName := n.ObjectMeta.Name
|
||||||
|
score, status := p.(framework.ScorePlugin).Score(state, test.pod, nodeName)
|
||||||
|
if !status.IsSuccess() {
|
||||||
|
t.Errorf("unexpected error: %v", status)
|
||||||
|
}
|
||||||
|
gotList = append(gotList, framework.NodeScore{Name: nodeName, Score: score})
|
||||||
|
}
|
||||||
|
|
||||||
|
status := p.(framework.ScorePlugin).ScoreExtensions().NormalizeScore(state, test.pod, gotList)
|
||||||
|
if !status.IsSuccess() {
|
||||||
|
t.Errorf("unexpected error: %v", status)
|
||||||
|
}
|
||||||
|
|
||||||
|
if !reflect.DeepEqual(test.expectedList, gotList) {
|
||||||
|
t.Errorf("expected:\n\t%+v,\ngot:\n\t%+v", test.expectedList, gotList)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestTaintTolerationFilter(t *testing.T) {
|
||||||
|
unschedulable := framework.NewStatus(framework.UnschedulableAndUnresolvable, predicates.ErrTaintsTolerationsNotMatch.GetReason())
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
pod *v1.Pod
|
||||||
|
node *v1.Node
|
||||||
|
wantStatus *framework.Status
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "A pod having no tolerations can't be scheduled onto a node with nonempty taints",
|
||||||
|
pod: podWithTolerations("pod1", []v1.Toleration{}),
|
||||||
|
node: nodeWithTaints("nodeA", []v1.Taint{{Key: "dedicated", Value: "user1", Effect: "NoSchedule"}}),
|
||||||
|
wantStatus: unschedulable,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "A pod which can be scheduled on a dedicated node assigned to user1 with effect NoSchedule",
|
||||||
|
pod: podWithTolerations("pod1", []v1.Toleration{{Key: "dedicated", Value: "user1", Effect: "NoSchedule"}}),
|
||||||
|
node: nodeWithTaints("nodeA", []v1.Taint{{Key: "dedicated", Value: "user1", Effect: "NoSchedule"}}),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "A pod which can't be scheduled on a dedicated node assigned to user2 with effect NoSchedule",
|
||||||
|
pod: podWithTolerations("pod1", []v1.Toleration{{Key: "dedicated", Operator: "Equal", Value: "user2", Effect: "NoSchedule"}}),
|
||||||
|
node: nodeWithTaints("nodeA", []v1.Taint{{Key: "dedicated", Value: "user1", Effect: "NoSchedule"}}),
|
||||||
|
wantStatus: unschedulable,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "A pod can be scheduled onto the node, with a toleration uses operator Exists that tolerates the taints on the node",
|
||||||
|
pod: podWithTolerations("pod1", []v1.Toleration{{Key: "foo", Operator: "Exists", Effect: "NoSchedule"}}),
|
||||||
|
node: nodeWithTaints("nodeA", []v1.Taint{{Key: "foo", Value: "bar", Effect: "NoSchedule"}}),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "A pod has multiple tolerations, node has multiple taints, all the taints are tolerated, pod can be scheduled onto the node",
|
||||||
|
pod: podWithTolerations("pod1", []v1.Toleration{
|
||||||
|
{Key: "dedicated", Operator: "Equal", Value: "user2", Effect: "NoSchedule"},
|
||||||
|
{Key: "foo", Operator: "Exists", Effect: "NoSchedule"},
|
||||||
|
}),
|
||||||
|
node: nodeWithTaints("nodeA", []v1.Taint{
|
||||||
|
{Key: "dedicated", Value: "user2", Effect: "NoSchedule"},
|
||||||
|
{Key: "foo", Value: "bar", Effect: "NoSchedule"},
|
||||||
|
}),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "A pod has a toleration that keys and values match the taint on the node, but (non-empty) effect doesn't match, " +
|
||||||
|
"can't be scheduled onto the node",
|
||||||
|
pod: podWithTolerations("pod1", []v1.Toleration{{Key: "foo", Operator: "Equal", Value: "bar", Effect: "PreferNoSchedule"}}),
|
||||||
|
node: nodeWithTaints("nodeA", []v1.Taint{{Key: "foo", Value: "bar", Effect: "NoSchedule"}}),
|
||||||
|
wantStatus: unschedulable,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "The pod has a toleration that keys and values match the taint on the node, the effect of toleration is empty, " +
|
||||||
|
"and the effect of taint is NoSchedule. Pod can be scheduled onto the node",
|
||||||
|
pod: podWithTolerations("pod1", []v1.Toleration{{Key: "foo", Operator: "Equal", Value: "bar"}}),
|
||||||
|
node: nodeWithTaints("nodeA", []v1.Taint{{Key: "foo", Value: "bar", Effect: "NoSchedule"}}),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "The pod has a toleration that key and value don't match the taint on the node, " +
|
||||||
|
"but the effect of taint on node is PreferNochedule. Pod can be scheduled onto the node",
|
||||||
|
pod: podWithTolerations("pod1", []v1.Toleration{{Key: "dedicated", Operator: "Equal", Value: "user2", Effect: "NoSchedule"}}),
|
||||||
|
node: nodeWithTaints("nodeA", []v1.Taint{{Key: "dedicated", Value: "user1", Effect: "PreferNoSchedule"}}),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "The pod has no toleration, " +
|
||||||
|
"but the effect of taint on node is PreferNochedule. Pod can be scheduled onto the node",
|
||||||
|
pod: podWithTolerations("pod1", []v1.Toleration{}),
|
||||||
|
node: nodeWithTaints("nodeA", []v1.Taint{{Key: "dedicated", Value: "user1", Effect: "PreferNoSchedule"}}),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, test := range tests {
|
||||||
|
t.Run(test.name, func(t *testing.T) {
|
||||||
|
nodeInfo := schedulernodeinfo.NewNodeInfo()
|
||||||
|
nodeInfo.SetNode(test.node)
|
||||||
|
p, _ := New(nil, nil)
|
||||||
|
gotStatus := p.(framework.FilterPlugin).Filter(nil, test.pod, nodeInfo)
|
||||||
|
if !reflect.DeepEqual(gotStatus, test.wantStatus) {
|
||||||
|
t.Errorf("status does not match: %v, want: %v", gotStatus, test.wantStatus)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
@ -553,18 +553,24 @@ func (f *framework) GetWaitingPod(uid types.UID) WaitingPod {
|
|||||||
|
|
||||||
// ListPlugins returns a map of extension point name to plugin names configured at each extension
|
// ListPlugins returns a map of extension point name to plugin names configured at each extension
|
||||||
// point. Returns nil if no plugins where configred.
|
// point. Returns nil if no plugins where configred.
|
||||||
func (f *framework) ListPlugins() map[string][]string {
|
func (f *framework) ListPlugins() map[string][]config.Plugin {
|
||||||
m := make(map[string][]string)
|
m := make(map[string][]config.Plugin)
|
||||||
|
|
||||||
for _, e := range f.getExtensionPoints(&config.Plugins{}) {
|
for _, e := range f.getExtensionPoints(&config.Plugins{}) {
|
||||||
plugins := reflect.ValueOf(e.slicePtr).Elem()
|
plugins := reflect.ValueOf(e.slicePtr).Elem()
|
||||||
var names []string
|
extName := plugins.Type().Elem().Name()
|
||||||
|
var cfgs []config.Plugin
|
||||||
for i := 0; i < plugins.Len(); i++ {
|
for i := 0; i < plugins.Len(); i++ {
|
||||||
name := plugins.Index(i).Interface().(Plugin).Name()
|
name := plugins.Index(i).Interface().(Plugin).Name()
|
||||||
names = append(names, name)
|
p := config.Plugin{Name: name}
|
||||||
|
if extName == "ScorePlugin" {
|
||||||
|
// Weights apply only to score plugins.
|
||||||
|
p.Weight = int32(f.pluginNameToWeightMap[name])
|
||||||
|
}
|
||||||
|
cfgs = append(cfgs, p)
|
||||||
}
|
}
|
||||||
if len(names) > 0 {
|
if len(cfgs) > 0 {
|
||||||
extName := plugins.Type().Elem().Name()
|
m[extName] = cfgs
|
||||||
m[extName] = names
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if len(m) > 0 {
|
if len(m) > 0 {
|
||||||
@ -579,7 +585,7 @@ func (f *framework) ClientSet() clientset.Interface {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (f *framework) pluginsNeeded(plugins *config.Plugins) map[string]config.Plugin {
|
func (f *framework) pluginsNeeded(plugins *config.Plugins) map[string]config.Plugin {
|
||||||
pgMap := make(map[string]config.Plugin, 0)
|
pgMap := make(map[string]config.Plugin)
|
||||||
|
|
||||||
if plugins == nil {
|
if plugins == nil {
|
||||||
return pgMap
|
return pgMap
|
||||||
|
@ -85,7 +85,7 @@ func (pl *TestScoreWithNormalizePlugin) NormalizeScore(state *CycleState, pod *v
|
|||||||
return injectNormalizeRes(pl.inj, scores)
|
return injectNormalizeRes(pl.inj, scores)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (pl *TestScoreWithNormalizePlugin) Score(state *CycleState, p *v1.Pod, nodeName string) (int, *Status) {
|
func (pl *TestScoreWithNormalizePlugin) Score(state *CycleState, p *v1.Pod, nodeName string) (int64, *Status) {
|
||||||
return setScoreRes(pl.inj)
|
return setScoreRes(pl.inj)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -103,7 +103,7 @@ func (pl *TestScorePlugin) Name() string {
|
|||||||
return pl.name
|
return pl.name
|
||||||
}
|
}
|
||||||
|
|
||||||
func (pl *TestScorePlugin) Score(state *CycleState, p *v1.Pod, nodeName string) (int, *Status) {
|
func (pl *TestScorePlugin) Score(state *CycleState, p *v1.Pod, nodeName string) (int64, *Status) {
|
||||||
return setScoreRes(pl.inj)
|
return setScoreRes(pl.inj)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -523,13 +523,13 @@ func buildConfigWithWeights(weights map[string]int32, ps ...string) *config.Plug
|
|||||||
}
|
}
|
||||||
|
|
||||||
type injectedResult struct {
|
type injectedResult struct {
|
||||||
ScoreRes int `json:"scoreRes,omitempty"`
|
ScoreRes int64 `json:"scoreRes,omitempty"`
|
||||||
NormalizeRes int64 `json:"normalizeRes,omitempty"`
|
NormalizeRes int64 `json:"normalizeRes,omitempty"`
|
||||||
ScoreErr bool `json:"scoreErr,omitempty"`
|
ScoreErr bool `json:"scoreErr,omitempty"`
|
||||||
NormalizeErr bool `json:"normalizeErr,omitempty"`
|
NormalizeErr bool `json:"normalizeErr,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func setScoreRes(inj injectedResult) (int, *Status) {
|
func setScoreRes(inj injectedResult) (int64, *Status) {
|
||||||
if inj.ScoreErr {
|
if inj.ScoreErr {
|
||||||
return 0, NewStatus(Error, "injecting failure.")
|
return 0, NewStatus(Error, "injecting failure.")
|
||||||
}
|
}
|
||||||
|
@ -26,6 +26,7 @@ import (
|
|||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
"k8s.io/apimachinery/pkg/types"
|
"k8s.io/apimachinery/pkg/types"
|
||||||
clientset "k8s.io/client-go/kubernetes"
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
|
"k8s.io/kubernetes/pkg/scheduler/apis/config"
|
||||||
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -271,7 +272,7 @@ type ScorePlugin interface {
|
|||||||
// Score is called on each filtered node. It must return success and an integer
|
// Score is called on each filtered node. It must return success and an integer
|
||||||
// indicating the rank of the node. All scoring plugins must return success or
|
// indicating the rank of the node. All scoring plugins must return success or
|
||||||
// the pod will be rejected.
|
// the pod will be rejected.
|
||||||
Score(state *CycleState, p *v1.Pod, nodeName string) (int, *Status)
|
Score(state *CycleState, p *v1.Pod, nodeName string) (int64, *Status)
|
||||||
|
|
||||||
// ScoreExtensions returns a ScoreExtensions interface if it implements one, or nil if does not.
|
// ScoreExtensions returns a ScoreExtensions interface if it implements one, or nil if does not.
|
||||||
ScoreExtensions() ScoreExtensions
|
ScoreExtensions() ScoreExtensions
|
||||||
@ -426,9 +427,8 @@ type Framework interface {
|
|||||||
// code=4("skip") status.
|
// code=4("skip") status.
|
||||||
RunBindPlugins(state *CycleState, pod *v1.Pod, nodeName string) *Status
|
RunBindPlugins(state *CycleState, pod *v1.Pod, nodeName string) *Status
|
||||||
|
|
||||||
// ListPlugins returns a map of extension point name to plugin names
|
// ListPlugins returns a map of extension point name to list of configured Plugins.
|
||||||
// configured at each extension point.
|
ListPlugins() map[string][]config.Plugin
|
||||||
ListPlugins() map[string][]string
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// FrameworkHandle provides data and some tools that plugins can use. It is
|
// FrameworkHandle provides data and some tools that plugins can use. It is
|
||||||
|
@ -34,6 +34,7 @@ go_test(
|
|||||||
embed = [":go_default_library"],
|
embed = [":go_default_library"],
|
||||||
deps = [
|
deps = [
|
||||||
"//pkg/api/v1/pod:go_default_library",
|
"//pkg/api/v1/pod:go_default_library",
|
||||||
|
"//pkg/scheduler/apis/config:go_default_library",
|
||||||
"//pkg/scheduler/framework/v1alpha1:go_default_library",
|
"//pkg/scheduler/framework/v1alpha1:go_default_library",
|
||||||
"//pkg/scheduler/metrics:go_default_library",
|
"//pkg/scheduler/metrics:go_default_library",
|
||||||
"//pkg/scheduler/nodeinfo:go_default_library",
|
"//pkg/scheduler/nodeinfo:go_default_library",
|
||||||
|
@ -31,6 +31,7 @@ import (
|
|||||||
clientset "k8s.io/client-go/kubernetes"
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
"k8s.io/component-base/metrics/testutil"
|
"k8s.io/component-base/metrics/testutil"
|
||||||
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
|
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
|
||||||
|
"k8s.io/kubernetes/pkg/scheduler/apis/config"
|
||||||
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
|
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
|
||||||
"k8s.io/kubernetes/pkg/scheduler/metrics"
|
"k8s.io/kubernetes/pkg/scheduler/metrics"
|
||||||
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||||
@ -165,7 +166,7 @@ func (*fakeFramework) QueueSortFunc() framework.LessFunc {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *fakeFramework) ListPlugins() map[string][]string {
|
func (f *fakeFramework) ListPlugins() map[string][]config.Plugin {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -148,17 +148,17 @@ func (sp *ScorePlugin) reset() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Score returns the score of scheduling a pod on a specific node.
|
// Score returns the score of scheduling a pod on a specific node.
|
||||||
func (sp *ScorePlugin) Score(state *framework.CycleState, p *v1.Pod, nodeName string) (int, *framework.Status) {
|
func (sp *ScorePlugin) Score(state *framework.CycleState, p *v1.Pod, nodeName string) (int64, *framework.Status) {
|
||||||
sp.numScoreCalled++
|
sp.numScoreCalled++
|
||||||
if sp.failScore {
|
if sp.failScore {
|
||||||
return 0, framework.NewStatus(framework.Error, fmt.Sprintf("injecting failure for pod %v", p.Name))
|
return 0, framework.NewStatus(framework.Error, fmt.Sprintf("injecting failure for pod %v", p.Name))
|
||||||
}
|
}
|
||||||
|
|
||||||
score := 1
|
score := int64(1)
|
||||||
if sp.numScoreCalled == 1 {
|
if sp.numScoreCalled == 1 {
|
||||||
// The first node is scored the highest, the rest is scored lower.
|
// The first node is scored the highest, the rest is scored lower.
|
||||||
sp.highScoreNode = nodeName
|
sp.highScoreNode = nodeName
|
||||||
score = int(framework.MaxNodeScore)
|
score = framework.MaxNodeScore
|
||||||
}
|
}
|
||||||
return score, nil
|
return score, nil
|
||||||
}
|
}
|
||||||
@ -179,9 +179,9 @@ func (sp *ScoreWithNormalizePlugin) reset() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Score returns the score of scheduling a pod on a specific node.
|
// Score returns the score of scheduling a pod on a specific node.
|
||||||
func (sp *ScoreWithNormalizePlugin) Score(state *framework.CycleState, p *v1.Pod, nodeName string) (int, *framework.Status) {
|
func (sp *ScoreWithNormalizePlugin) Score(state *framework.CycleState, p *v1.Pod, nodeName string) (int64, *framework.Status) {
|
||||||
sp.numScoreCalled++
|
sp.numScoreCalled++
|
||||||
score := 10
|
score := int64(10)
|
||||||
return score, nil
|
return score, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -95,7 +95,7 @@ func TestSchedulerCreationFromConfigMap(t *testing.T) {
|
|||||||
policy string
|
policy string
|
||||||
expectedPredicates sets.String
|
expectedPredicates sets.String
|
||||||
expectedPrioritizers sets.String
|
expectedPrioritizers sets.String
|
||||||
expectedPlugins map[string][]string
|
expectedPlugins map[string][]kubeschedulerconfig.Plugin
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
policy: `{
|
policy: `{
|
||||||
@ -147,11 +147,11 @@ func TestSchedulerCreationFromConfigMap(t *testing.T) {
|
|||||||
"NodeAffinityPriority",
|
"NodeAffinityPriority",
|
||||||
"NodePreferAvoidPodsPriority",
|
"NodePreferAvoidPodsPriority",
|
||||||
"SelectorSpreadPriority",
|
"SelectorSpreadPriority",
|
||||||
"TaintTolerationPriority",
|
|
||||||
"ImageLocalityPriority",
|
"ImageLocalityPriority",
|
||||||
),
|
),
|
||||||
expectedPlugins: map[string][]string{
|
expectedPlugins: map[string][]kubeschedulerconfig.Plugin{
|
||||||
"FilterPlugin": {"TaintToleration"},
|
"FilterPlugin": {{Name: "TaintToleration"}},
|
||||||
|
"ScorePlugin": {{Name: "TaintToleration", Weight: 1}},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -214,11 +214,11 @@ kind: Policy
|
|||||||
"NodeAffinityPriority",
|
"NodeAffinityPriority",
|
||||||
"NodePreferAvoidPodsPriority",
|
"NodePreferAvoidPodsPriority",
|
||||||
"SelectorSpreadPriority",
|
"SelectorSpreadPriority",
|
||||||
"TaintTolerationPriority",
|
|
||||||
"ImageLocalityPriority",
|
"ImageLocalityPriority",
|
||||||
),
|
),
|
||||||
expectedPlugins: map[string][]string{
|
expectedPlugins: map[string][]kubeschedulerconfig.Plugin{
|
||||||
"FilterPlugin": {"TaintToleration"},
|
"FilterPlugin": {{Name: "TaintToleration"}},
|
||||||
|
"ScorePlugin": {{Name: "TaintToleration", Weight: 1}},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
Loading…
Reference in New Issue
Block a user