mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-22 03:11:40 +00:00
Move constant schedulerError in scheduler to v1 package
Signed-off-by: kerthcet <kerthcet@gmail.com>
This commit is contained in:
parent
48c8183fc4
commit
71d0b89e3e
@ -45,8 +45,6 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
// SchedulerError is the reason recorded for events when an error occurs during scheduling a pod.
|
|
||||||
SchedulerError = "SchedulerError"
|
|
||||||
// Percentage of plugin metrics to be sampled.
|
// Percentage of plugin metrics to be sampled.
|
||||||
pluginMetricsSamplePercent = 10
|
pluginMetricsSamplePercent = 10
|
||||||
// minFeasibleNodesToFind is the minimum number of nodes that would be scored
|
// minFeasibleNodesToFind is the minimum number of nodes that would be scored
|
||||||
@ -151,7 +149,7 @@ func (sched *Scheduler) schedulingCycle(ctx context.Context, state *framework.Cy
|
|||||||
nominatingInfo = clearNominatedNode
|
nominatingInfo = clearNominatedNode
|
||||||
klog.ErrorS(err, "Error selecting node for pod", "pod", klog.KObj(pod))
|
klog.ErrorS(err, "Error selecting node for pod", "pod", klog.KObj(pod))
|
||||||
metrics.PodScheduleError(fwk.ProfileName(), metrics.SinceInSeconds(start))
|
metrics.PodScheduleError(fwk.ProfileName(), metrics.SinceInSeconds(start))
|
||||||
reason = SchedulerError
|
reason = v1.PodReasonSchedulerError
|
||||||
}
|
}
|
||||||
sched.FailureHandler(ctx, fwk, podInfo, err, reason, nominatingInfo)
|
sched.FailureHandler(ctx, fwk, podInfo, err, reason, nominatingInfo)
|
||||||
return ScheduleResult{}, nil
|
return ScheduleResult{}, nil
|
||||||
@ -170,7 +168,7 @@ func (sched *Scheduler) schedulingCycle(ctx context.Context, state *framework.Cy
|
|||||||
// This relies on the fact that Error will check if the pod has been bound
|
// This relies on the fact that Error will check if the pod has been bound
|
||||||
// to a node and if so will not add it back to the unscheduled pods queue
|
// to a node and if so will not add it back to the unscheduled pods queue
|
||||||
// (otherwise this would cause an infinite loop).
|
// (otherwise this would cause an infinite loop).
|
||||||
sched.FailureHandler(ctx, fwk, assumedPodInfo, err, SchedulerError, clearNominatedNode)
|
sched.FailureHandler(ctx, fwk, assumedPodInfo, err, v1.PodReasonSchedulerError, clearNominatedNode)
|
||||||
return ScheduleResult{}, nil
|
return ScheduleResult{}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -182,7 +180,7 @@ func (sched *Scheduler) schedulingCycle(ctx context.Context, state *framework.Cy
|
|||||||
if forgetErr := sched.Cache.ForgetPod(assumedPod); forgetErr != nil {
|
if forgetErr := sched.Cache.ForgetPod(assumedPod); forgetErr != nil {
|
||||||
klog.ErrorS(forgetErr, "Scheduler cache ForgetPod failed")
|
klog.ErrorS(forgetErr, "Scheduler cache ForgetPod failed")
|
||||||
}
|
}
|
||||||
sched.FailureHandler(ctx, fwk, assumedPodInfo, sts.AsError(), SchedulerError, clearNominatedNode)
|
sched.FailureHandler(ctx, fwk, assumedPodInfo, sts.AsError(), v1.PodReasonSchedulerError, clearNominatedNode)
|
||||||
return ScheduleResult{}, nil
|
return ScheduleResult{}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -195,7 +193,7 @@ func (sched *Scheduler) schedulingCycle(ctx context.Context, state *framework.Cy
|
|||||||
reason = v1.PodReasonUnschedulable
|
reason = v1.PodReasonUnschedulable
|
||||||
} else {
|
} else {
|
||||||
metrics.PodScheduleError(fwk.ProfileName(), metrics.SinceInSeconds(start))
|
metrics.PodScheduleError(fwk.ProfileName(), metrics.SinceInSeconds(start))
|
||||||
reason = SchedulerError
|
reason = v1.PodReasonSchedulerError
|
||||||
}
|
}
|
||||||
// One of the plugins returned status different than success or wait.
|
// One of the plugins returned status different than success or wait.
|
||||||
fwk.RunReservePluginsUnreserve(ctx, state, assumedPod, scheduleResult.SuggestedHost)
|
fwk.RunReservePluginsUnreserve(ctx, state, assumedPod, scheduleResult.SuggestedHost)
|
||||||
@ -228,7 +226,7 @@ func (sched *Scheduler) bindingCycle(ctx context.Context, state *framework.Cycle
|
|||||||
reason = v1.PodReasonUnschedulable
|
reason = v1.PodReasonUnschedulable
|
||||||
} else {
|
} else {
|
||||||
metrics.PodScheduleError(fwk.ProfileName(), metrics.SinceInSeconds(start))
|
metrics.PodScheduleError(fwk.ProfileName(), metrics.SinceInSeconds(start))
|
||||||
reason = SchedulerError
|
reason = v1.PodReasonSchedulerError
|
||||||
}
|
}
|
||||||
// trigger un-reserve plugins to clean up state associated with the reserved Pod
|
// trigger un-reserve plugins to clean up state associated with the reserved Pod
|
||||||
fwk.RunReservePluginsUnreserve(ctx, state, assumedPod, scheduleResult.SuggestedHost)
|
fwk.RunReservePluginsUnreserve(ctx, state, assumedPod, scheduleResult.SuggestedHost)
|
||||||
@ -263,7 +261,7 @@ func (sched *Scheduler) bindingCycle(ctx context.Context, state *framework.Cycle
|
|||||||
// TODO(#103853): de-duplicate the logic.
|
// TODO(#103853): de-duplicate the logic.
|
||||||
sched.SchedulingQueue.MoveAllToActiveOrBackoffQueue(internalqueue.AssignedPodDelete, nil)
|
sched.SchedulingQueue.MoveAllToActiveOrBackoffQueue(internalqueue.AssignedPodDelete, nil)
|
||||||
}
|
}
|
||||||
sched.FailureHandler(ctx, fwk, assumedPodInfo, preBindStatus.AsError(), SchedulerError, clearNominatedNode)
|
sched.FailureHandler(ctx, fwk, assumedPodInfo, preBindStatus.AsError(), v1.PodReasonSchedulerError, clearNominatedNode)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -280,7 +278,7 @@ func (sched *Scheduler) bindingCycle(ctx context.Context, state *framework.Cycle
|
|||||||
// TODO(#103853): de-duplicate the logic.
|
// TODO(#103853): de-duplicate the logic.
|
||||||
sched.SchedulingQueue.MoveAllToActiveOrBackoffQueue(internalqueue.AssignedPodDelete, nil)
|
sched.SchedulingQueue.MoveAllToActiveOrBackoffQueue(internalqueue.AssignedPodDelete, nil)
|
||||||
}
|
}
|
||||||
sched.FailureHandler(ctx, fwk, assumedPodInfo, fmt.Errorf("binding rejected: %w", err), SchedulerError, clearNominatedNode)
|
sched.FailureHandler(ctx, fwk, assumedPodInfo, fmt.Errorf("binding rejected: %w", err), v1.PodReasonSchedulerError, clearNominatedNode)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
// Calculating nodeResourceString can be heavy. Avoid it if klog verbosity is below 2.
|
// Calculating nodeResourceString can be heavy. Avoid it if klog verbosity is below 2.
|
||||||
|
@ -2665,6 +2665,10 @@ const (
|
|||||||
// PodReasonUnschedulable reason in PodScheduled PodCondition means that the scheduler
|
// PodReasonUnschedulable reason in PodScheduled PodCondition means that the scheduler
|
||||||
// can't schedule the pod right now, for example due to insufficient resources in the cluster.
|
// can't schedule the pod right now, for example due to insufficient resources in the cluster.
|
||||||
PodReasonUnschedulable = "Unschedulable"
|
PodReasonUnschedulable = "Unschedulable"
|
||||||
|
|
||||||
|
// PodReasonSchedulerError reason in PodScheduled PodCondition means that some internal error happens
|
||||||
|
// during scheduling, for example due to nodeAffinity parsing errors.
|
||||||
|
PodReasonSchedulerError = "SchedulerError"
|
||||||
)
|
)
|
||||||
|
|
||||||
// PodCondition contains details for the current condition of this pod.
|
// PodCondition contains details for the current condition of this pod.
|
||||||
|
Loading…
Reference in New Issue
Block a user