mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-27 05:27:21 +00:00
Improve for-loop in nodesWherePreemptionMightHelp function
This commit is contained in:
parent
a9db137737
commit
d84a8c563e
@ -62,6 +62,27 @@ const (
|
|||||||
minFeasibleNodesPercentageToFind = 5
|
minFeasibleNodesPercentageToFind = 5
|
||||||
)
|
)
|
||||||
|
|
||||||
|
var unresolvablePredicateFailureErrors = map[predicates.PredicateFailureReason]struct{}{
|
||||||
|
predicates.ErrNodeSelectorNotMatch: {},
|
||||||
|
predicates.ErrPodAffinityRulesNotMatch: {},
|
||||||
|
predicates.ErrPodNotMatchHostName: {},
|
||||||
|
predicates.ErrTaintsTolerationsNotMatch: {},
|
||||||
|
predicates.ErrNodeLabelPresenceViolated: {},
|
||||||
|
// Node conditions won't change when scheduler simulates removal of preemption victims.
|
||||||
|
// So, it is pointless to try nodes that have not been able to host the pod due to node
|
||||||
|
// conditions. These include ErrNodeNotReady, ErrNodeUnderPIDPressure, ErrNodeUnderMemoryPressure, ....
|
||||||
|
predicates.ErrNodeNotReady: {},
|
||||||
|
predicates.ErrNodeNetworkUnavailable: {},
|
||||||
|
predicates.ErrNodeUnderDiskPressure: {},
|
||||||
|
predicates.ErrNodeUnderPIDPressure: {},
|
||||||
|
predicates.ErrNodeUnderMemoryPressure: {},
|
||||||
|
predicates.ErrNodeUnschedulable: {},
|
||||||
|
predicates.ErrNodeUnknownCondition: {},
|
||||||
|
predicates.ErrVolumeZoneConflict: {},
|
||||||
|
predicates.ErrVolumeNodeConflict: {},
|
||||||
|
predicates.ErrVolumeBindConflict: {},
|
||||||
|
}
|
||||||
|
|
||||||
// FailedPredicateMap declares a map[string][]algorithm.PredicateFailureReason type.
|
// FailedPredicateMap declares a map[string][]algorithm.PredicateFailureReason type.
|
||||||
type FailedPredicateMap map[string][]predicates.PredicateFailureReason
|
type FailedPredicateMap map[string][]predicates.PredicateFailureReason
|
||||||
|
|
||||||
@ -1111,44 +1132,28 @@ func selectVictimsOnNode(
|
|||||||
return victims, numViolatingVictim, true
|
return victims, numViolatingVictim, true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// unresolvablePredicateExists checks whether failedPredicates has unresolvable predicate.
|
||||||
|
func unresolvablePredicateExists(failedPredicates []predicates.PredicateFailureReason) bool {
|
||||||
|
for _, failedPredicate := range failedPredicates {
|
||||||
|
if _, ok := unresolvablePredicateFailureErrors[failedPredicate]; ok {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
// nodesWherePreemptionMightHelp returns a list of nodes with failed predicates
|
// nodesWherePreemptionMightHelp returns a list of nodes with failed predicates
|
||||||
// that may be satisfied by removing pods from the node.
|
// that may be satisfied by removing pods from the node.
|
||||||
func nodesWherePreemptionMightHelp(nodes []*v1.Node, failedPredicatesMap FailedPredicateMap) []*v1.Node {
|
func nodesWherePreemptionMightHelp(nodes []*v1.Node, failedPredicatesMap FailedPredicateMap) []*v1.Node {
|
||||||
potentialNodes := []*v1.Node{}
|
potentialNodes := []*v1.Node{}
|
||||||
for _, node := range nodes {
|
for _, node := range nodes {
|
||||||
unresolvableReasonExist := false
|
|
||||||
failedPredicates, _ := failedPredicatesMap[node.Name]
|
failedPredicates, _ := failedPredicatesMap[node.Name]
|
||||||
// If we assume that scheduler looks at all nodes and populates the failedPredicateMap
|
// If we assume that scheduler looks at all nodes and populates the failedPredicateMap
|
||||||
// (which is the case today), the !found case should never happen, but we'd prefer
|
// (which is the case today), the !found case should never happen, but we'd prefer
|
||||||
// to rely less on such assumptions in the code when checking does not impose
|
// to rely less on such assumptions in the code when checking does not impose
|
||||||
// significant overhead.
|
// significant overhead.
|
||||||
// Also, we currently assume all failures returned by extender as resolvable.
|
// Also, we currently assume all failures returned by extender as resolvable.
|
||||||
for _, failedPredicate := range failedPredicates {
|
if !unresolvablePredicateExists(failedPredicates) {
|
||||||
switch failedPredicate {
|
|
||||||
case
|
|
||||||
predicates.ErrNodeSelectorNotMatch,
|
|
||||||
predicates.ErrPodAffinityRulesNotMatch,
|
|
||||||
predicates.ErrPodNotMatchHostName,
|
|
||||||
predicates.ErrTaintsTolerationsNotMatch,
|
|
||||||
predicates.ErrNodeLabelPresenceViolated,
|
|
||||||
// Node conditions won't change when scheduler simulates removal of preemption victims.
|
|
||||||
// So, it is pointless to try nodes that have not been able to host the pod due to node
|
|
||||||
// conditions. These include ErrNodeNotReady, ErrNodeUnderPIDPressure, ErrNodeUnderMemoryPressure, ....
|
|
||||||
predicates.ErrNodeNotReady,
|
|
||||||
predicates.ErrNodeNetworkUnavailable,
|
|
||||||
predicates.ErrNodeUnderDiskPressure,
|
|
||||||
predicates.ErrNodeUnderPIDPressure,
|
|
||||||
predicates.ErrNodeUnderMemoryPressure,
|
|
||||||
predicates.ErrNodeUnschedulable,
|
|
||||||
predicates.ErrNodeUnknownCondition,
|
|
||||||
predicates.ErrVolumeZoneConflict,
|
|
||||||
predicates.ErrVolumeNodeConflict,
|
|
||||||
predicates.ErrVolumeBindConflict:
|
|
||||||
unresolvableReasonExist = true
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if !unresolvableReasonExist {
|
|
||||||
klog.V(3).Infof("Node %v is a potential node for preemption.", node.Name)
|
klog.V(3).Infof("Node %v is a potential node for preemption.", node.Name)
|
||||||
potentialNodes = append(potentialNodes, node)
|
potentialNodes = append(potentialNodes, node)
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user