mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-25 04:33:26 +00:00
Moved node condition check into Predicats.
This commit is contained in:
parent
7bc1c67685
commit
c8ecd92269
@ -1084,7 +1084,6 @@ func (dsc *DaemonSetsController) simulate(newPod *v1.Pod, node *v1.Node, ds *ext
|
|||||||
// running on that node.
|
// running on that node.
|
||||||
func (dsc *DaemonSetsController) nodeShouldRunDaemonPod(node *v1.Node, ds *extensions.DaemonSet) (wantToRun, shouldSchedule, shouldContinueRunning bool, err error) {
|
func (dsc *DaemonSetsController) nodeShouldRunDaemonPod(node *v1.Node, ds *extensions.DaemonSet) (wantToRun, shouldSchedule, shouldContinueRunning bool, err error) {
|
||||||
newPod := NewPod(ds, node.Name)
|
newPod := NewPod(ds, node.Name)
|
||||||
critical := utilfeature.DefaultFeatureGate.Enabled(features.ExperimentalCriticalPodAnnotation) && kubelettypes.IsCriticalPod(newPod)
|
|
||||||
|
|
||||||
// Because these bools require an && of all their required conditions, we start
|
// Because these bools require an && of all their required conditions, we start
|
||||||
// with all bools set to true and set a bool to false if a condition is not met.
|
// with all bools set to true and set a bool to false if a condition is not met.
|
||||||
@ -1095,21 +1094,6 @@ func (dsc *DaemonSetsController) nodeShouldRunDaemonPod(node *v1.Node, ds *exten
|
|||||||
return false, false, false, nil
|
return false, false, false, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO: Move it to the predicates
|
|
||||||
for _, c := range node.Status.Conditions {
|
|
||||||
if critical {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
// TODO: There are other node status that the DaemonSet should ideally respect too,
|
|
||||||
// e.g. MemoryPressure, and DiskPressure
|
|
||||||
if c.Type == v1.NodeOutOfDisk && c.Status == v1.ConditionTrue {
|
|
||||||
// the kubelet will evict this pod if it needs to. Let kubelet
|
|
||||||
// decide whether to continue running this pod so leave shouldContinueRunning
|
|
||||||
// set to true
|
|
||||||
shouldSchedule = false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
reasons, nodeInfo, err := dsc.simulate(newPod, node, ds)
|
reasons, nodeInfo, err := dsc.simulate(newPod, node, ds)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.Warningf("DaemonSet Predicates failed on node %s for ds '%s/%s' due to unexpected error: %v", node.Name, ds.ObjectMeta.Namespace, ds.ObjectMeta.Name, err)
|
glog.Warningf("DaemonSet Predicates failed on node %s for ds '%s/%s' due to unexpected error: %v", node.Name, ds.ObjectMeta.Namespace, ds.ObjectMeta.Name, err)
|
||||||
@ -1117,7 +1101,6 @@ func (dsc *DaemonSetsController) nodeShouldRunDaemonPod(node *v1.Node, ds *exten
|
|||||||
}
|
}
|
||||||
|
|
||||||
var insufficientResourceErr error
|
var insufficientResourceErr error
|
||||||
|
|
||||||
for _, r := range reasons {
|
for _, r := range reasons {
|
||||||
glog.V(4).Infof("DaemonSet Predicates failed on node %s for ds '%s/%s' for reason: %v", node.Name, ds.ObjectMeta.Namespace, ds.ObjectMeta.Name, r.GetReason())
|
glog.V(4).Infof("DaemonSet Predicates failed on node %s for ds '%s/%s' for reason: %v", node.Name, ds.ObjectMeta.Namespace, ds.ObjectMeta.Name, r.GetReason())
|
||||||
switch reason := r.(type) {
|
switch reason := r.(type) {
|
||||||
@ -1127,6 +1110,11 @@ func (dsc *DaemonSetsController) nodeShouldRunDaemonPod(node *v1.Node, ds *exten
|
|||||||
var emitEvent bool
|
var emitEvent bool
|
||||||
// we try to partition predicates into two partitions here: intentional on the part of the operator and not.
|
// we try to partition predicates into two partitions here: intentional on the part of the operator and not.
|
||||||
switch reason {
|
switch reason {
|
||||||
|
case predicates.ErrNodeOutOfDisk:
|
||||||
|
// the kubelet will evict this pod if it needs to. Let kubelet
|
||||||
|
// decide whether to continue running this pod so leave shouldContinueRunning
|
||||||
|
// set to true
|
||||||
|
shouldSchedule = false
|
||||||
// intentional
|
// intentional
|
||||||
case
|
case
|
||||||
predicates.ErrNodeSelectorNotMatch,
|
predicates.ErrNodeSelectorNotMatch,
|
||||||
@ -1203,13 +1191,15 @@ func Predicates(pod *v1.Pod, nodeInfo *schedulercache.NodeInfo) (bool, []algorit
|
|||||||
if !fit {
|
if !fit {
|
||||||
predicateFails = append(predicateFails, reasons...)
|
predicateFails = append(predicateFails, reasons...)
|
||||||
}
|
}
|
||||||
|
|
||||||
if critical {
|
if critical {
|
||||||
// If the pod is marked as critical and support for critical pod annotations is enabled,
|
// If the pod is marked as critical and support for critical pod annotations is enabled,
|
||||||
// check predicates for critical pods only.
|
// check predicates for critical pods only.
|
||||||
fit, reasons, err = predicates.EssentialPredicates(pod, nil, nodeInfo)
|
fit, reasons, err = predicates.EssentialPredicates(pod, nil, nodeInfo)
|
||||||
} else {
|
} else {
|
||||||
fit, reasons, err = predicates.GeneralPredicates(pod, nil, nodeInfo)
|
fit, reasons, err = predicates.GeneralPredicates(pod, nil, nodeInfo)
|
||||||
|
ncFit, ncReasons := NodeConditionPredicates(nodeInfo)
|
||||||
|
fit = ncFit && fit
|
||||||
|
reasons = append(reasons, ncReasons...)
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, predicateFails, err
|
return false, predicateFails, err
|
||||||
@ -1221,6 +1211,21 @@ func Predicates(pod *v1.Pod, nodeInfo *schedulercache.NodeInfo) (bool, []algorit
|
|||||||
return len(predicateFails) == 0, predicateFails, nil
|
return len(predicateFails) == 0, predicateFails, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func NodeConditionPredicates(nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason) {
|
||||||
|
reasons := []algorithm.PredicateFailureReason{}
|
||||||
|
|
||||||
|
for _, c := range nodeInfo.Node().Status.Conditions {
|
||||||
|
// TODO: There are other node status that the DaemonSet should ideally respect too,
|
||||||
|
// e.g. MemoryPressure, and DiskPressure
|
||||||
|
if c.Type == v1.NodeOutOfDisk && c.Status == v1.ConditionTrue {
|
||||||
|
reasons = append(reasons, predicates.ErrNodeSelectorNotMatch)
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return len(reasons) == 0, reasons
|
||||||
|
}
|
||||||
|
|
||||||
// newControllerRef creates a ControllerRef pointing to the given DaemonSet.
|
// newControllerRef creates a ControllerRef pointing to the given DaemonSet.
|
||||||
func newControllerRef(ds *extensions.DaemonSet) *metav1.OwnerReference {
|
func newControllerRef(ds *extensions.DaemonSet) *metav1.OwnerReference {
|
||||||
blockOwnerDeletion := true
|
blockOwnerDeletion := true
|
||||||
|
@ -37,6 +37,7 @@ var (
|
|||||||
ErrMaxVolumeCountExceeded = newPredicateFailureError("MaxVolumeCount")
|
ErrMaxVolumeCountExceeded = newPredicateFailureError("MaxVolumeCount")
|
||||||
ErrNodeUnderMemoryPressure = newPredicateFailureError("NodeUnderMemoryPressure")
|
ErrNodeUnderMemoryPressure = newPredicateFailureError("NodeUnderMemoryPressure")
|
||||||
ErrNodeUnderDiskPressure = newPredicateFailureError("NodeUnderDiskPressure")
|
ErrNodeUnderDiskPressure = newPredicateFailureError("NodeUnderDiskPressure")
|
||||||
|
ErrNodeOutOfDisk = newPredicateFailureError("NodeOutOfDisk")
|
||||||
ErrVolumeNodeConflict = newPredicateFailureError("NoVolumeNodeConflict")
|
ErrVolumeNodeConflict = newPredicateFailureError("NoVolumeNodeConflict")
|
||||||
// ErrFakePredicate is used for test only. The fake predicates returning false also returns error
|
// ErrFakePredicate is used for test only. The fake predicates returning false also returns error
|
||||||
// as ErrFakePredicate.
|
// as ErrFakePredicate.
|
||||||
|
Loading…
Reference in New Issue
Block a user