mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-27 21:47:07 +00:00
Merge pull request #60386 from k82cn/k8s_60163
Automatic merge from submit-queue (batch tested with PRs 60683, 60386). If you want to cherry-pick this change to another branch, please follow the instructions <a href="https://github.com/kubernetes/community/blob/master/contributors/devel/cherry-picks.md">here</a>. Added unschedulabe predicate. Signed-off-by: Da K. Ma <madaxa@cn.ibm.com> **Which issue(s) this PR fixes** *(optional, in `fixes #<issue number>(, fixes #<issue_number>, ...)` format, will close the issue(s) when PR gets merged)*: Fixes #60163 **Release note**: ```release-note None ```
This commit is contained in:
commit
ae1fc13aee
@ -1545,6 +1545,7 @@ func TestNodeShouldRunDaemonPod(t *testing.T) {
|
|||||||
predicateName string
|
predicateName string
|
||||||
podsOnNode []*v1.Pod
|
podsOnNode []*v1.Pod
|
||||||
nodeCondition []v1.NodeCondition
|
nodeCondition []v1.NodeCondition
|
||||||
|
nodeUnschedulable bool
|
||||||
ds *apps.DaemonSet
|
ds *apps.DaemonSet
|
||||||
wantToRun, shouldSchedule, shouldContinueRunning bool
|
wantToRun, shouldSchedule, shouldContinueRunning bool
|
||||||
err error
|
err error
|
||||||
@ -1800,6 +1801,24 @@ func TestNodeShouldRunDaemonPod(t *testing.T) {
|
|||||||
shouldSchedule: true,
|
shouldSchedule: true,
|
||||||
shouldContinueRunning: true,
|
shouldContinueRunning: true,
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
predicateName: "ShouldRunDaemonPodOnUnscheduableNode",
|
||||||
|
ds: &apps.DaemonSet{
|
||||||
|
Spec: apps.DaemonSetSpec{
|
||||||
|
Selector: &metav1.LabelSelector{MatchLabels: simpleDaemonSetLabel},
|
||||||
|
Template: v1.PodTemplateSpec{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Labels: simpleDaemonSetLabel,
|
||||||
|
},
|
||||||
|
Spec: resourcePodSpec("", "50M", "0.5"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
nodeUnschedulable: true,
|
||||||
|
wantToRun: true,
|
||||||
|
shouldSchedule: true,
|
||||||
|
shouldContinueRunning: true,
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
for i, c := range cases {
|
for i, c := range cases {
|
||||||
@ -1807,6 +1826,7 @@ func TestNodeShouldRunDaemonPod(t *testing.T) {
|
|||||||
node := newNode("test-node", simpleDaemonSetLabel)
|
node := newNode("test-node", simpleDaemonSetLabel)
|
||||||
node.Status.Conditions = append(node.Status.Conditions, c.nodeCondition...)
|
node.Status.Conditions = append(node.Status.Conditions, c.nodeCondition...)
|
||||||
node.Status.Allocatable = allocatableResources("100M", "1")
|
node.Status.Allocatable = allocatableResources("100M", "1")
|
||||||
|
node.Spec.Unschedulable = c.nodeUnschedulable
|
||||||
manager, _, _, err := newTestController()
|
manager, _, _, err := newTestController()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("error creating DaemonSets controller: %v", err)
|
t.Fatalf("error creating DaemonSets controller: %v", err)
|
||||||
|
@ -69,6 +69,8 @@ const (
|
|||||||
NoDiskConflictPred = "NoDiskConflict"
|
NoDiskConflictPred = "NoDiskConflict"
|
||||||
// PodToleratesNodeTaintsPred defines the name of predicate PodToleratesNodeTaints.
|
// PodToleratesNodeTaintsPred defines the name of predicate PodToleratesNodeTaints.
|
||||||
PodToleratesNodeTaintsPred = "PodToleratesNodeTaints"
|
PodToleratesNodeTaintsPred = "PodToleratesNodeTaints"
|
||||||
|
// CheckNodeUnschedulablePred defines the name of predicate CheckNodeUnschedulablePredicate.
|
||||||
|
CheckNodeUnschedulablePred = "CheckNodeUnschedulable"
|
||||||
// PodToleratesNodeNoExecuteTaintsPred defines the name of predicate PodToleratesNodeNoExecuteTaints.
|
// PodToleratesNodeNoExecuteTaintsPred defines the name of predicate PodToleratesNodeNoExecuteTaints.
|
||||||
PodToleratesNodeNoExecuteTaintsPred = "PodToleratesNodeNoExecuteTaints"
|
PodToleratesNodeNoExecuteTaintsPred = "PodToleratesNodeNoExecuteTaints"
|
||||||
// CheckNodeLabelPresencePred defines the name of predicate CheckNodeLabelPresence.
|
// CheckNodeLabelPresencePred defines the name of predicate CheckNodeLabelPresence.
|
||||||
@ -125,7 +127,7 @@ const (
|
|||||||
// The order is based on the restrictiveness & complexity of predicates.
|
// The order is based on the restrictiveness & complexity of predicates.
|
||||||
// Design doc: https://github.com/kubernetes/community/blob/master/contributors/design-proposals/scheduling/predicates-ordering.md
|
// Design doc: https://github.com/kubernetes/community/blob/master/contributors/design-proposals/scheduling/predicates-ordering.md
|
||||||
var (
|
var (
|
||||||
predicatesOrdering = []string{CheckNodeConditionPred,
|
predicatesOrdering = []string{CheckNodeConditionPred, CheckNodeUnschedulablePred,
|
||||||
GeneralPred, HostNamePred, PodFitsHostPortsPred,
|
GeneralPred, HostNamePred, PodFitsHostPortsPred,
|
||||||
MatchNodeSelectorPred, PodFitsResourcesPred, NoDiskConflictPred,
|
MatchNodeSelectorPred, PodFitsResourcesPred, NoDiskConflictPred,
|
||||||
PodToleratesNodeTaintsPred, PodToleratesNodeNoExecuteTaintsPred, CheckNodeLabelPresencePred,
|
PodToleratesNodeTaintsPred, PodToleratesNodeNoExecuteTaintsPred, CheckNodeLabelPresencePred,
|
||||||
@ -1446,8 +1448,8 @@ func (c *PodAffinityChecker) satisfiesPodsAffinityAntiAffinity(pod *v1.Pod, node
|
|||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// PodToleratesNodeTaints checks if a pod tolerations can tolerate the node taints
|
// CheckNodeUnschedulablePredicate checks if a pod can be scheduled on a node with Unschedulable spec.
|
||||||
func PodToleratesNodeTaints(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) {
|
func CheckNodeUnschedulablePredicate(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) {
|
||||||
if nodeInfo == nil || nodeInfo.Node() == nil {
|
if nodeInfo == nil || nodeInfo.Node() == nil {
|
||||||
return false, []algorithm.PredicateFailureReason{ErrNodeUnknownCondition}, nil
|
return false, []algorithm.PredicateFailureReason{ErrNodeUnknownCondition}, nil
|
||||||
}
|
}
|
||||||
@ -1456,6 +1458,15 @@ func PodToleratesNodeTaints(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeI
|
|||||||
return false, []algorithm.PredicateFailureReason{ErrNodeUnschedulable}, nil
|
return false, []algorithm.PredicateFailureReason{ErrNodeUnschedulable}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
return true, nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// PodToleratesNodeTaints checks if a pod tolerations can tolerate the node taints
|
||||||
|
func PodToleratesNodeTaints(pod *v1.Pod, meta algorithm.PredicateMetadata, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) {
|
||||||
|
if nodeInfo == nil || nodeInfo.Node() == nil {
|
||||||
|
return false, []algorithm.PredicateFailureReason{ErrNodeUnknownCondition}, nil
|
||||||
|
}
|
||||||
|
|
||||||
return podToleratesNodeTaints(pod, nodeInfo, func(t *v1.Taint) bool {
|
return podToleratesNodeTaints(pod, nodeInfo, func(t *v1.Taint) bool {
|
||||||
// PodToleratesNodeTaints is only interested in NoSchedule and NoExecute taints.
|
// PodToleratesNodeTaints is only interested in NoSchedule and NoExecute taints.
|
||||||
return t.Effect == v1.TaintEffectNoSchedule || t.Effect == v1.TaintEffectNoExecute
|
return t.Effect == v1.TaintEffectNoSchedule || t.Effect == v1.TaintEffectNoExecute
|
||||||
|
@ -186,12 +186,15 @@ func ApplyFeatureGates() {
|
|||||||
// if you just want remove specific provider, call func RemovePredicateKeyFromAlgoProvider()
|
// if you just want remove specific provider, call func RemovePredicateKeyFromAlgoProvider()
|
||||||
factory.RemovePredicateKeyFromAlgorithmProviderMap(predicates.CheckNodeConditionPred)
|
factory.RemovePredicateKeyFromAlgorithmProviderMap(predicates.CheckNodeConditionPred)
|
||||||
|
|
||||||
|
// Fit is determined based on whether a node has Unschedulable spec
|
||||||
|
factory.RegisterMandatoryFitPredicate(predicates.CheckNodeUnschedulablePred, predicates.CheckNodeUnschedulablePredicate)
|
||||||
// Fit is determined based on whether a pod can tolerate all of the node's taints
|
// Fit is determined based on whether a pod can tolerate all of the node's taints
|
||||||
factory.RegisterMandatoryFitPredicate(predicates.PodToleratesNodeTaintsPred, predicates.PodToleratesNodeTaints)
|
factory.RegisterMandatoryFitPredicate(predicates.PodToleratesNodeTaintsPred, predicates.PodToleratesNodeTaints)
|
||||||
// Insert Key "PodToleratesNodeTaints" To All Algorithm Provider
|
// Insert Key "PodToleratesNodeTaints" and "CheckNodeUnschedulable" To All Algorithm Provider
|
||||||
// The key will insert to all providers which in algorithmProviderMap[]
|
// The key will insert to all providers which in algorithmProviderMap[]
|
||||||
// if you just want insert to specific provider, call func InsertPredicateKeyToAlgoProvider()
|
// if you just want insert to specific provider, call func InsertPredicateKeyToAlgoProvider()
|
||||||
factory.InsertPredicateKeyToAlgorithmProviderMap(predicates.PodToleratesNodeTaintsPred)
|
factory.InsertPredicateKeyToAlgorithmProviderMap(predicates.PodToleratesNodeTaintsPred)
|
||||||
|
factory.InsertPredicateKeyToAlgorithmProviderMap(predicates.CheckNodeUnschedulablePred)
|
||||||
|
|
||||||
glog.Warningf("TaintNodesByCondition is enabled, PodToleratesNodeTaints predicate is mandatory")
|
glog.Warningf("TaintNodesByCondition is enabled, PodToleratesNodeTaints predicate is mandatory")
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user