mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-09-10 13:42:02 +00:00
Merge pull request #44115 from derekwaynecarr/reduce-logging-severity
Automatic merge from submit-queue (batch tested with PRs 47083, 44115, 46881, 47082, 46577) Scheduler should not log an error when there is no fit **What this PR does / why we need it**: The scheduler should not log an error when it is unable to find a fit for a pod as it's an expected situation when resources are unavailable on the cluster that satisfy the pods requirements.
This commit is contained in:
@@ -653,7 +653,11 @@ func (factory *ConfigFactory) MakeDefaultErrorFunc(backoff *util.PodBackoff, pod
|
|||||||
if err == core.ErrNoNodesAvailable {
|
if err == core.ErrNoNodesAvailable {
|
||||||
glog.V(4).Infof("Unable to schedule %v %v: no nodes are registered to the cluster; waiting", pod.Namespace, pod.Name)
|
glog.V(4).Infof("Unable to schedule %v %v: no nodes are registered to the cluster; waiting", pod.Namespace, pod.Name)
|
||||||
} else {
|
} else {
|
||||||
glog.Errorf("Error scheduling %v %v: %v; retrying", pod.Namespace, pod.Name, err)
|
if _, ok := err.(*core.FitError); ok {
|
||||||
|
glog.V(4).Infof("Unable to schedule %v %v: no fit: %v; waiting", pod.Namespace, pod.Name, err)
|
||||||
|
} else {
|
||||||
|
glog.Errorf("Error scheduling %v %v: %v; retrying", pod.Namespace, pod.Name, err)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
backoff.Gc()
|
backoff.Gc()
|
||||||
// Retry asynchronously.
|
// Retry asynchronously.
|
||||||
|
Reference in New Issue
Block a user