mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-31 15:25:57 +00:00
Merge pull request #47566 from zhangxiaoyu-zidif/cleanup-add-comment-for-scheduler-predicates
Automatic merge from submit-queue Cleanup predicates.go **What this PR does / why we need it**: cleanup some comments and errors.New(). **Special notes for your reviewer**: /cc @jayunit100 **Release note**: ```release-note NONE ```
This commit is contained in:
commit
c885c70c9b
@ -17,7 +17,6 @@ limitations under the License.
|
||||
package predicates
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"strconv"
|
||||
@ -1008,7 +1007,7 @@ func (c *PodAffinityChecker) InterPodAffinityMatches(pod *v1.Pod, meta interface
|
||||
// TODO: Do we really need any pod matching, or all pods matching? I think the latter.
|
||||
func (c *PodAffinityChecker) anyPodMatchesPodAffinityTerm(pod *v1.Pod, allPods []*v1.Pod, node *v1.Node, term *v1.PodAffinityTerm) (bool, bool, error) {
|
||||
if len(term.TopologyKey) == 0 {
|
||||
return false, false, errors.New("Empty topologyKey is not allowed except for PreferredDuringScheduling pod anti-affinity")
|
||||
return false, false, fmt.Errorf("empty topologyKey is not allowed except for PreferredDuringScheduling pod anti-affinity")
|
||||
}
|
||||
matchingPodExists := false
|
||||
namespaces := priorityutil.GetNamespacesFromPodAffinityTerm(pod, term)
|
||||
@ -1248,7 +1247,7 @@ func PodToleratesNodeTaints(pod *v1.Pod, meta interface{}, nodeInfo *schedulerca
|
||||
return false, []algorithm.PredicateFailureReason{ErrTaintsTolerationsNotMatch}, nil
|
||||
}
|
||||
|
||||
// Determine if a pod is scheduled with best-effort QoS
|
||||
// isPodBestEffort checks if pod is scheduled with best-effort QoS
|
||||
func isPodBestEffort(pod *v1.Pod) bool {
|
||||
return v1qos.GetPodQOS(pod) == v1.PodQOSBestEffort
|
||||
}
|
||||
@ -1268,7 +1267,7 @@ func CheckNodeMemoryPressurePredicate(pod *v1.Pod, meta interface{}, nodeInfo *s
|
||||
return true, nil, nil
|
||||
}
|
||||
|
||||
// is node under presure?
|
||||
// check if node is under memory preasure
|
||||
if nodeInfo.MemoryPressureCondition() == v1.ConditionTrue {
|
||||
return false, []algorithm.PredicateFailureReason{ErrNodeUnderMemoryPressure}, nil
|
||||
}
|
||||
@ -1278,7 +1277,7 @@ func CheckNodeMemoryPressurePredicate(pod *v1.Pod, meta interface{}, nodeInfo *s
|
||||
// CheckNodeDiskPressurePredicate checks if a pod can be scheduled on a node
|
||||
// reporting disk pressure condition.
|
||||
func CheckNodeDiskPressurePredicate(pod *v1.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) {
|
||||
// is node under presure?
|
||||
// check if node is under disk preasure
|
||||
if nodeInfo.DiskPressureCondition() == v1.ConditionTrue {
|
||||
return false, []algorithm.PredicateFailureReason{ErrNodeUnderDiskPressure}, nil
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user