mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-09-17 15:13:08 +00:00
Use node out of disk condition in the scheduler while scheduling pods.
Set the out of disk node condition to unknown in the node controller if the kubelet does not report its node condition in a long time. Update node controller unit tests. Implement a node condition predicate function that checks if a given node satisfies the conditions defined by the predicate and if it does, use that node for scheduling pods. The predicate function takes both NodeReady and NodeOutOfDisk into consideration to determine if a node is fit for scheduling pods. The predicate is then passed to the node lister in the scheduler factory so that the node lister can run the predicate function on the nodes when schedling pods thereby omitting nodes that does not satisfy the predicate. Also update listers test.
This commit is contained in:
@@ -164,7 +164,7 @@ func (f *ConfigFactory) CreateFromKeys(predicateKeys, priorityKeys sets.String)
|
||||
ServiceLister: f.ServiceLister,
|
||||
ControllerLister: f.ControllerLister,
|
||||
// All fit predicates only need to consider schedulable nodes.
|
||||
NodeLister: f.NodeLister.NodeCondition(api.NodeReady, api.ConditionTrue),
|
||||
NodeLister: f.NodeLister.NodeCondition(getNodeConditionPredicate()),
|
||||
NodeInfo: f.NodeLister,
|
||||
}
|
||||
predicateFuncs, err := getFitPredicateFunctions(predicateKeys, pluginArgs)
|
||||
@@ -212,7 +212,7 @@ func (f *ConfigFactory) CreateFromKeys(predicateKeys, priorityKeys sets.String)
|
||||
return &scheduler.Config{
|
||||
Modeler: f.modeler,
|
||||
// The scheduler only needs to consider schedulable nodes.
|
||||
NodeLister: f.NodeLister.NodeCondition(api.NodeReady, api.ConditionTrue),
|
||||
NodeLister: f.NodeLister.NodeCondition(getNodeConditionPredicate()),
|
||||
Algorithm: algo,
|
||||
Binder: &binder{f.Client},
|
||||
NextPod: func() *api.Pod {
|
||||
@@ -226,6 +226,23 @@ func (f *ConfigFactory) CreateFromKeys(predicateKeys, priorityKeys sets.String)
|
||||
}, nil
|
||||
}
|
||||
|
||||
func getNodeConditionPredicate() cache.NodeConditionPredicate {
|
||||
return func(node api.Node) bool {
|
||||
for _, cond := range node.Status.Conditions {
|
||||
// We consider the node for scheduling only when its NodeReady condition status
|
||||
// is ConditionTrue and its NodeOutOfDisk condition status is ConditionFalse.
|
||||
if cond.Type == api.NodeReady && cond.Status != api.ConditionTrue {
|
||||
glog.V(4).Infof("Ignoring node %v with %v condition status %v", node.Name, cond.Type, cond.Status)
|
||||
return false
|
||||
} else if cond.Type == api.NodeOutOfDisk && cond.Status != api.ConditionFalse {
|
||||
glog.V(4).Infof("Ignoring node %v with %v condition status %v", node.Name, cond.Type, cond.Status)
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
// Returns a cache.ListWatch that finds all pods that need to be
|
||||
// scheduled.
|
||||
func (factory *ConfigFactory) createUnassignedPodLW() *cache.ListWatch {
|
||||
|
Reference in New Issue
Block a user