mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-31 07:20:13 +00:00
Merge pull request #14614 from mesosphere/sttts-fix-pre-scheduled-termination
Auto commit by PR queue bot
This commit is contained in:
commit
069b91d31f
@ -440,11 +440,6 @@ func (q *queuer) Run(done <-chan struct{}) {
|
|||||||
if recoverAssignedSlave(pod.Pod) != "" {
|
if recoverAssignedSlave(pod.Pod) != "" {
|
||||||
log.V(3).Infof("dequeuing assigned pod for scheduling: %v", pod.Pod.Name)
|
log.V(3).Infof("dequeuing assigned pod for scheduling: %v", pod.Pod.Name)
|
||||||
q.dequeue(pod.GetUID())
|
q.dequeue(pod.GetUID())
|
||||||
} else if pod.InGracefulTermination() {
|
|
||||||
// pods which are pre-scheduled (i.e. NodeName is set) may be gracefully deleted,
|
|
||||||
// even though they are not running yet.
|
|
||||||
log.V(3).Infof("dequeuing graceful deleted pre-scheduled pod for scheduling: %v", pod.Pod.Name)
|
|
||||||
q.dequeue(pod.GetUID())
|
|
||||||
} else {
|
} else {
|
||||||
// use ReplaceExisting because we are always pushing the latest state
|
// use ReplaceExisting because we are always pushing the latest state
|
||||||
now := time.Now()
|
now := time.Now()
|
||||||
@ -744,6 +739,16 @@ func (s *schedulingPlugin) Run(done <-chan struct{}) {
|
|||||||
// with the Modeler stuff removed since we don't use it because we have mesos.
|
// with the Modeler stuff removed since we don't use it because we have mesos.
|
||||||
func (s *schedulingPlugin) scheduleOne() {
|
func (s *schedulingPlugin) scheduleOne() {
|
||||||
pod := s.config.NextPod()
|
pod := s.config.NextPod()
|
||||||
|
|
||||||
|
// pods which are pre-scheduled (i.e. NodeName is set) are deleted by the kubelet
|
||||||
|
// in upstream. Not so in Mesos because the kubelet hasn't see that pod yet. Hence,
|
||||||
|
// the scheduler has to take care of this:
|
||||||
|
if pod.Spec.NodeName != "" && pod.DeletionTimestamp != nil {
|
||||||
|
log.V(3).Infof("deleting pre-scheduled, not yet running pod: %s/%s", pod.Namespace, pod.Name)
|
||||||
|
s.client.Pods(pod.Namespace).Delete(pod.Name, api.NewDeleteOptions(0))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
log.V(3).Infof("Attempting to schedule: %+v", pod)
|
log.V(3).Infof("Attempting to schedule: %+v", pod)
|
||||||
dest, err := s.config.Algorithm.Schedule(pod, s.config.NodeLister) // call kubeScheduler.Schedule
|
dest, err := s.config.Algorithm.Schedule(pod, s.config.NodeLister) // call kubeScheduler.Schedule
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -78,8 +78,3 @@ func (p *Pod) String() string {
|
|||||||
}
|
}
|
||||||
return fmt.Sprintf("{pod:%v, deadline:%v, delay:%v}", p.Pod.Name, displayDeadline, p.GetDelay())
|
return fmt.Sprintf("{pod:%v, deadline:%v, delay:%v}", p.Pod.Name, displayDeadline, p.GetDelay())
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *Pod) InGracefulTermination() bool {
|
|
||||||
return p.Pod.DeletionTimestamp != nil &&
|
|
||||||
p.Pod.DeletionGracePeriodSeconds != nil && *p.Pod.DeletionGracePeriodSeconds > 0
|
|
||||||
}
|
|
||||||
|
Loading…
Reference in New Issue
Block a user