Merge pull request #86979 from ahg-g/ahg-cleanup2

Move TaintToleration predicate to its plugin
This commit is contained in:
Kubernetes Prow Robot 2020-01-09 06:53:45 -08:00 committed by GitHub
commit 32d8799ef1
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
8 changed files with 30 additions and 260 deletions

View File

@ -33,8 +33,6 @@ var (
// ErrNodeSelectorNotMatch is used for MatchNodeSelector predicate error.
ErrNodeSelectorNotMatch = NewPredicateFailureError("MatchNodeSelector", "node(s) didn't match node selector")
// ErrTaintsTolerationsNotMatch is used for PodToleratesNodeTaints predicate error.
ErrTaintsTolerationsNotMatch = NewPredicateFailureError("PodToleratesNodeTaints", "node(s) had taints that the pod didn't tolerate")
// ErrPodNotMatchHostName is used for HostName predicate error.
ErrPodNotMatchHostName = NewPredicateFailureError("HostName", "node(s) didn't match the requested hostname")
// ErrPodNotFitsHostPorts is used for PodFitsHostPorts predicate error.
@ -63,9 +61,8 @@ var (
)
var unresolvablePredicateFailureErrors = map[PredicateFailureReason]struct{}{
ErrNodeSelectorNotMatch: {},
ErrPodNotMatchHostName: {},
ErrTaintsTolerationsNotMatch: {},
ErrNodeSelectorNotMatch: {},
ErrPodNotMatchHostName: {},
// Node conditions won't change when scheduler simulates removal of preemption victims.
// So, it is pointless to try nodes that have not been able to host the pod due to node
// conditions. These include ErrNodeNotReady, ErrNodeUnderPIDPressure, ErrNodeUnderMemoryPressure, ....

View File

@ -52,8 +52,6 @@ const (
PodToleratesNodeTaintsPred = "PodToleratesNodeTaints"
// CheckNodeUnschedulablePred defines the name of predicate CheckNodeUnschedulablePredicate.
CheckNodeUnschedulablePred = "CheckNodeUnschedulable"
// PodToleratesNodeNoExecuteTaintsPred defines the name of predicate PodToleratesNodeNoExecuteTaints.
PodToleratesNodeNoExecuteTaintsPred = "PodToleratesNodeNoExecuteTaints"
// CheckNodeLabelPresencePred defines the name of predicate CheckNodeLabelPresence.
CheckNodeLabelPresencePred = "CheckNodeLabelPresence"
// CheckServiceAffinityPred defines the name of predicate checkServiceAffinity.
@ -99,7 +97,7 @@ var (
predicatesOrdering = []string{CheckNodeUnschedulablePred,
GeneralPred, HostNamePred, PodFitsHostPortsPred,
MatchNodeSelectorPred, PodFitsResourcesPred, NoDiskConflictPred,
PodToleratesNodeTaintsPred, PodToleratesNodeNoExecuteTaintsPred, CheckNodeLabelPresencePred,
PodToleratesNodeTaintsPred, CheckNodeLabelPresencePred,
CheckServiceAffinityPred, MaxEBSVolumeCountPred, MaxGCEPDVolumeCountPred, MaxCSIVolumeCountPred,
MaxAzureDiskVolumeCountPred, MaxCinderVolumeCountPred, CheckVolumeBindingPred, NoVolumeZoneConflictPred,
EvenPodsSpreadPred, MatchInterPodAffinityPred}
@ -303,34 +301,3 @@ func GeneralPredicates(pod *v1.Pod, meta Metadata, nodeInfo *schedulernodeinfo.N
return len(predicateFails) == 0, predicateFails, nil
}
// PodToleratesNodeTaints checks if a pod tolerations can tolerate the node taints.
func PodToleratesNodeTaints(pod *v1.Pod, meta Metadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []PredicateFailureReason, error) {
if nodeInfo == nil || nodeInfo.Node() == nil {
return false, []PredicateFailureReason{ErrNodeUnknownCondition}, nil
}
return podToleratesNodeTaints(pod, nodeInfo, func(t *v1.Taint) bool {
// PodToleratesNodeTaints is only interested in NoSchedule and NoExecute taints.
return t.Effect == v1.TaintEffectNoSchedule || t.Effect == v1.TaintEffectNoExecute
})
}
// PodToleratesNodeNoExecuteTaints checks if a pod tolerations can tolerate the node's NoExecute taints.
func PodToleratesNodeNoExecuteTaints(pod *v1.Pod, meta Metadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []PredicateFailureReason, error) {
return podToleratesNodeTaints(pod, nodeInfo, func(t *v1.Taint) bool {
return t.Effect == v1.TaintEffectNoExecute
})
}
func podToleratesNodeTaints(pod *v1.Pod, nodeInfo *schedulernodeinfo.NodeInfo, filter func(t *v1.Taint) bool) (bool, []PredicateFailureReason, error) {
taints, err := nodeInfo.Taints()
if err != nil {
return false, nil, err
}
if v1helper.TolerationsTolerateTaintsWithFilter(pod.Spec.Tolerations, taints, filter) {
return true, nil, nil
}
return false, []PredicateFailureReason{ErrTaintsTolerationsNotMatch}, nil
}

View File

@ -1515,206 +1515,3 @@ func TestRunGeneralPredicates(t *testing.T) {
})
}
}
func TestPodToleratesTaints(t *testing.T) {
podTolerateTaintsTests := []struct {
pod *v1.Pod
node v1.Node
fits bool
name string
}{
{
pod: &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "pod0",
},
},
node: v1.Node{
Spec: v1.NodeSpec{
Taints: []v1.Taint{{Key: "dedicated", Value: "user1", Effect: "NoSchedule"}},
},
},
fits: false,
name: "A pod having no tolerations can't be scheduled onto a node with nonempty taints",
},
{
pod: &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "pod1",
},
Spec: v1.PodSpec{
Containers: []v1.Container{{Image: "pod1:V1"}},
Tolerations: []v1.Toleration{{Key: "dedicated", Value: "user1", Effect: "NoSchedule"}},
},
},
node: v1.Node{
Spec: v1.NodeSpec{
Taints: []v1.Taint{{Key: "dedicated", Value: "user1", Effect: "NoSchedule"}},
},
},
fits: true,
name: "A pod which can be scheduled on a dedicated node assigned to user1 with effect NoSchedule",
},
{
pod: &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "pod2",
},
Spec: v1.PodSpec{
Containers: []v1.Container{{Image: "pod2:V1"}},
Tolerations: []v1.Toleration{{Key: "dedicated", Operator: "Equal", Value: "user2", Effect: "NoSchedule"}},
},
},
node: v1.Node{
Spec: v1.NodeSpec{
Taints: []v1.Taint{{Key: "dedicated", Value: "user1", Effect: "NoSchedule"}},
},
},
fits: false,
name: "A pod which can't be scheduled on a dedicated node assigned to user2 with effect NoSchedule",
},
{
pod: &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "pod2",
},
Spec: v1.PodSpec{
Containers: []v1.Container{{Image: "pod2:V1"}},
Tolerations: []v1.Toleration{{Key: "foo", Operator: "Exists", Effect: "NoSchedule"}},
},
},
node: v1.Node{
Spec: v1.NodeSpec{
Taints: []v1.Taint{{Key: "foo", Value: "bar", Effect: "NoSchedule"}},
},
},
fits: true,
name: "A pod can be scheduled onto the node, with a toleration uses operator Exists that tolerates the taints on the node",
},
{
pod: &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "pod2",
},
Spec: v1.PodSpec{
Containers: []v1.Container{{Image: "pod2:V1"}},
Tolerations: []v1.Toleration{
{Key: "dedicated", Operator: "Equal", Value: "user2", Effect: "NoSchedule"},
{Key: "foo", Operator: "Exists", Effect: "NoSchedule"},
},
},
},
node: v1.Node{
Spec: v1.NodeSpec{
Taints: []v1.Taint{
{Key: "dedicated", Value: "user2", Effect: "NoSchedule"},
{Key: "foo", Value: "bar", Effect: "NoSchedule"},
},
},
},
fits: true,
name: "A pod has multiple tolerations, node has multiple taints, all the taints are tolerated, pod can be scheduled onto the node",
},
{
pod: &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "pod2",
},
Spec: v1.PodSpec{
Containers: []v1.Container{{Image: "pod2:V1"}},
Tolerations: []v1.Toleration{{Key: "foo", Operator: "Equal", Value: "bar", Effect: "PreferNoSchedule"}},
},
},
node: v1.Node{
Spec: v1.NodeSpec{
Taints: []v1.Taint{
{Key: "foo", Value: "bar", Effect: "NoSchedule"},
},
},
},
fits: false,
name: "A pod has a toleration that keys and values match the taint on the node, but (non-empty) effect doesn't match, " +
"can't be scheduled onto the node",
},
{
pod: &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "pod2",
},
Spec: v1.PodSpec{
Containers: []v1.Container{{Image: "pod2:V1"}},
Tolerations: []v1.Toleration{{Key: "foo", Operator: "Equal", Value: "bar"}},
},
},
node: v1.Node{
Spec: v1.NodeSpec{
Taints: []v1.Taint{
{Key: "foo", Value: "bar", Effect: "NoSchedule"},
},
},
},
fits: true,
name: "The pod has a toleration that keys and values match the taint on the node, the effect of toleration is empty, " +
"and the effect of taint is NoSchedule. Pod can be scheduled onto the node",
},
{
pod: &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "pod2",
},
Spec: v1.PodSpec{
Containers: []v1.Container{{Image: "pod2:V1"}},
Tolerations: []v1.Toleration{{Key: "dedicated", Operator: "Equal", Value: "user2", Effect: "NoSchedule"}},
},
},
node: v1.Node{
Spec: v1.NodeSpec{
Taints: []v1.Taint{
{Key: "dedicated", Value: "user1", Effect: "PreferNoSchedule"},
},
},
},
fits: true,
name: "The pod has a toleration that key and value don't match the taint on the node, " +
"but the effect of taint on node is PreferNochedule. Pod can be scheduled onto the node",
},
{
pod: &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "pod2",
},
Spec: v1.PodSpec{
Containers: []v1.Container{{Image: "pod2:V1"}},
},
},
node: v1.Node{
Spec: v1.NodeSpec{
Taints: []v1.Taint{
{Key: "dedicated", Value: "user1", Effect: "PreferNoSchedule"},
},
},
},
fits: true,
name: "The pod has no toleration, " +
"but the effect of taint on node is PreferNochedule. Pod can be scheduled onto the node",
},
}
expectedFailureReasons := []PredicateFailureReason{ErrTaintsTolerationsNotMatch}
for _, test := range podTolerateTaintsTests {
t.Run(test.name, func(t *testing.T) {
nodeInfo := schedulernodeinfo.NewNodeInfo()
nodeInfo.SetNode(&test.node)
fits, reasons, err := PodToleratesNodeTaints(test.pod, nil, nodeInfo)
if err != nil {
t.Errorf("unexpected error: %v", err)
}
if !fits && !reflect.DeepEqual(reasons, expectedFailureReasons) {
t.Errorf("unexpected failure reason: %v, want: %v", reasons, expectedFailureReasons)
}
if fits != test.fits {
t.Errorf("expected: %v got %v", test.fits, fits)
}
})
}
}

View File

@ -58,6 +58,7 @@ go_test(
"//pkg/scheduler/framework/plugins/nodelabel:go_default_library",
"//pkg/scheduler/framework/plugins/noderesources:go_default_library",
"//pkg/scheduler/framework/plugins/podtopologyspread:go_default_library",
"//pkg/scheduler/framework/plugins/tainttoleration:go_default_library",
"//pkg/scheduler/framework/plugins/volumebinding:go_default_library",
"//pkg/scheduler/framework/plugins/volumerestrictions:go_default_library",
"//pkg/scheduler/framework/plugins/volumezone:go_default_library",

View File

@ -27,7 +27,7 @@ import (
"testing"
"time"
v1 "k8s.io/api/core/v1"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
@ -47,6 +47,7 @@ import (
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodelabel"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/noderesources"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/podtopologyspread"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/tainttoleration"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/volumebinding"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/volumerestrictions"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/volumezone"
@ -1878,7 +1879,7 @@ func TestNodesWherePreemptionMightHelp(t *testing.T) {
nodesStatuses: framework.NodeToStatusMap{
"machine1": framework.NewStatus(framework.UnschedulableAndUnresolvable, algorithmpredicates.ErrNodeSelectorNotMatch.GetReason()),
"machine2": framework.NewStatus(framework.UnschedulableAndUnresolvable, algorithmpredicates.ErrPodNotMatchHostName.GetReason()),
"machine3": framework.NewStatus(framework.UnschedulableAndUnresolvable, algorithmpredicates.ErrTaintsTolerationsNotMatch.GetReason()),
"machine3": framework.NewStatus(framework.UnschedulableAndUnresolvable, tainttoleration.ErrReasonNotMatch),
"machine4": framework.NewStatus(framework.UnschedulableAndUnresolvable, nodelabel.ErrReasonPresenceViolated),
},
expected: map[string]bool{},

View File

@ -7,9 +7,7 @@ go_library(
visibility = ["//visibility:public"],
deps = [
"//pkg/apis/core/v1/helper:go_default_library",
"//pkg/scheduler/algorithm/predicates:go_default_library",
"//pkg/scheduler/algorithm/priorities:go_default_library",
"//pkg/scheduler/framework/plugins/migration:go_default_library",
"//pkg/scheduler/framework/plugins/helper:go_default_library",
"//pkg/scheduler/framework/v1alpha1:go_default_library",
"//pkg/scheduler/nodeinfo:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
@ -36,7 +34,6 @@ go_test(
srcs = ["taint_toleration_test.go"],
embed = [":go_default_library"],
deps = [
"//pkg/scheduler/algorithm/predicates:go_default_library",
"//pkg/scheduler/framework/v1alpha1:go_default_library",
"//pkg/scheduler/nodeinfo:go_default_library",
"//pkg/scheduler/nodeinfo/snapshot:go_default_library",

View File

@ -23,9 +23,7 @@ import (
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/runtime"
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
"k8s.io/kubernetes/pkg/scheduler/algorithm/predicates"
"k8s.io/kubernetes/pkg/scheduler/algorithm/priorities"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/migration"
pluginhelper "k8s.io/kubernetes/pkg/scheduler/framework/plugins/helper"
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
"k8s.io/kubernetes/pkg/scheduler/nodeinfo"
)
@ -44,6 +42,8 @@ const (
Name = "TaintToleration"
// postFilterStateKey is the key in CycleState to InterPodAffinity pre-computed data for Scoring.
postFilterStateKey = "PostFilter" + Name
// ErrReasonNotMatch is the Filter reason status when not matching.
ErrReasonNotMatch = "node(s) had taints that the pod didn't tolerate"
)
// Name returns name of the plugin. It is used in logs, etc.
@ -53,9 +53,23 @@ func (pl *TaintToleration) Name() string {
// Filter invoked at the filter extension point.
func (pl *TaintToleration) Filter(ctx context.Context, state *framework.CycleState, pod *v1.Pod, nodeInfo *nodeinfo.NodeInfo) *framework.Status {
// Note that PodToleratesNodeTaints doesn't use predicate metadata, hence passing nil here.
_, reasons, err := predicates.PodToleratesNodeTaints(pod, nil, nodeInfo)
return migration.PredicateResultToFrameworkStatus(reasons, err)
if nodeInfo == nil || nodeInfo.Node() == nil {
return framework.NewStatus(framework.Error, "invalid nodeInfo")
}
taints, err := nodeInfo.Taints()
if err != nil {
return framework.NewStatus(framework.Error, err.Error())
}
if v1helper.TolerationsTolerateTaintsWithFilter(pod.Spec.Tolerations, taints, func(t *v1.Taint) bool {
// PodToleratesNodeTaints is only interested in NoSchedule and NoExecute taints.
return t.Effect == v1.TaintEffectNoSchedule || t.Effect == v1.TaintEffectNoExecute
}) {
return nil
}
return framework.NewStatus(framework.UnschedulableAndUnresolvable, ErrReasonNotMatch)
}
// postFilterState computed at PostFilter and used at Score.
@ -140,10 +154,7 @@ func (pl *TaintToleration) Score(ctx context.Context, state *framework.CycleStat
// NormalizeScore invoked after scoring all nodes.
func (pl *TaintToleration) NormalizeScore(ctx context.Context, _ *framework.CycleState, pod *v1.Pod, scores framework.NodeScoreList) *framework.Status {
// Note that ComputeTaintTolerationPriorityReduce doesn't use priority metadata, hence passing nil here.
normalizeFun := priorities.NormalizeReduce(framework.MaxNodeScore, true)
err := normalizeFun(pod, nil, pl.handle.SnapshotSharedLister(), scores)
return migration.ErrorToFrameworkStatus(err)
return pluginhelper.DefaultNormalizeScore(framework.MaxNodeScore, true, scores)
}
// ScoreExtensions of the Score plugin.

View File

@ -23,7 +23,6 @@ import (
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/kubernetes/pkg/scheduler/algorithm/predicates"
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
nodeinfosnapshot "k8s.io/kubernetes/pkg/scheduler/nodeinfo/snapshot"
@ -261,7 +260,7 @@ func TestTaintTolerationScore(t *testing.T) {
}
func TestTaintTolerationFilter(t *testing.T) {
unschedulable := framework.NewStatus(framework.UnschedulableAndUnresolvable, predicates.ErrTaintsTolerationsNotMatch.GetReason())
unschedulable := framework.NewStatus(framework.UnschedulableAndUnresolvable, ErrReasonNotMatch)
tests := []struct {
name string
pod *v1.Pod