mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-27 05:27:21 +00:00
Revert "Make scheduler optimistic about its bindings"
This commit is contained in:
parent
e968b6be81
commit
5fbe58b2c8
@ -104,21 +104,20 @@ func (s *Scheduler) Run() {
|
|||||||
go util.Until(s.scheduleOne, 0, s.config.StopEverything)
|
go util.Until(s.scheduleOne, 0, s.config.StopEverything)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Scheduler) schedule() (executeBinding func()) {
|
func (s *Scheduler) scheduleOne() {
|
||||||
pod := s.config.NextPod()
|
pod := s.config.NextPod()
|
||||||
glog.V(3).Infof("Attempting to schedule: %v", pod)
|
glog.V(3).Infof("Attempting to schedule: %v", pod)
|
||||||
start := time.Now()
|
start := time.Now()
|
||||||
recordTime := func() {
|
defer func() {
|
||||||
metrics.E2eSchedulingLatency.Observe(metrics.SinceInMicroseconds(start))
|
metrics.E2eSchedulingLatency.Observe(metrics.SinceInMicroseconds(start))
|
||||||
}
|
}()
|
||||||
dest, err := s.config.Algorithm.Schedule(pod, s.config.MinionLister)
|
dest, err := s.config.Algorithm.Schedule(pod, s.config.MinionLister)
|
||||||
metrics.SchedulingAlgorithmLatency.Observe(metrics.SinceInMicroseconds(start))
|
metrics.SchedulingAlgorithmLatency.Observe(metrics.SinceInMicroseconds(start))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.V(1).Infof("Failed to schedule: %v", pod)
|
glog.V(1).Infof("Failed to schedule: %v", pod)
|
||||||
s.config.Recorder.Eventf(pod, "failedScheduling", "Error scheduling: %v", err)
|
s.config.Recorder.Eventf(pod, "failedScheduling", "Error scheduling: %v", err)
|
||||||
s.config.Error(pod, err)
|
s.config.Error(pod, err)
|
||||||
recordTime()
|
return
|
||||||
return func() {}
|
|
||||||
}
|
}
|
||||||
b := &api.Binding{
|
b := &api.Binding{
|
||||||
ObjectMeta: api.ObjectMeta{Namespace: pod.Namespace, Name: pod.Name},
|
ObjectMeta: api.ObjectMeta{Namespace: pod.Namespace, Name: pod.Name},
|
||||||
@ -128,32 +127,22 @@ func (s *Scheduler) schedule() (executeBinding func()) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
// Actually do the binding asynchronously with respect to the scheduling queue.
|
// We want to add the pod to the model iff the bind succeeds, but we don't want to race
|
||||||
return func() {
|
// with any deletions, which happen asyncronously.
|
||||||
defer recordTime()
|
s.config.Modeler.LockedAction(func() {
|
||||||
defer util.HandleCrash()
|
|
||||||
|
|
||||||
// Make an object representing our assumtion that the bind will succeed.
|
|
||||||
assumed := *pod
|
|
||||||
assumed.Spec.Host = dest
|
|
||||||
s.config.Modeler.AssumePod(&assumed)
|
|
||||||
|
|
||||||
bindingStart := time.Now()
|
bindingStart := time.Now()
|
||||||
err := s.config.Binder.Bind(b)
|
err := s.config.Binder.Bind(b)
|
||||||
metrics.BindingLatency.Observe(metrics.SinceInMicroseconds(bindingStart))
|
metrics.BindingLatency.Observe(metrics.SinceInMicroseconds(bindingStart))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// Remove our (now invalid) assumption
|
|
||||||
s.config.Modeler.ForgetPod(&assumed)
|
|
||||||
glog.V(1).Infof("Failed to bind pod: %v", err)
|
glog.V(1).Infof("Failed to bind pod: %v", err)
|
||||||
s.config.Recorder.Eventf(pod, "failedScheduling", "Binding rejected: %v", err)
|
s.config.Recorder.Eventf(pod, "failedScheduling", "Binding rejected: %v", err)
|
||||||
s.config.Error(pod, err)
|
s.config.Error(pod, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
s.config.Recorder.Eventf(pod, "scheduled", "Successfully assigned %v to %v", pod.Name, dest)
|
s.config.Recorder.Eventf(pod, "scheduled", "Successfully assigned %v to %v", pod.Name, dest)
|
||||||
}
|
// tell the model to assume that this binding took effect.
|
||||||
}
|
assumed := *pod
|
||||||
|
assumed.Spec.Host = dest
|
||||||
func (s *Scheduler) scheduleOne() {
|
s.config.Modeler.AssumePod(&assumed)
|
||||||
bind := s.schedule()
|
})
|
||||||
go bind()
|
|
||||||
}
|
}
|
||||||
|
@ -113,11 +113,6 @@ func TestScheduler(t *testing.T) {
|
|||||||
AssumePodFunc: func(pod *api.Pod) {
|
AssumePodFunc: func(pod *api.Pod) {
|
||||||
gotAssumedPod = pod
|
gotAssumedPod = pod
|
||||||
},
|
},
|
||||||
ForgetPodFunc: func(pod *api.Pod) {
|
|
||||||
if gotAssumedPod != nil && gotAssumedPod.Name == pod.Name && gotAssumedPod.Namespace == pod.Namespace {
|
|
||||||
gotAssumedPod = nil
|
|
||||||
}
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
MinionLister: algorithm.FakeMinionLister(
|
MinionLister: algorithm.FakeMinionLister(
|
||||||
api.NodeList{Items: []api.Node{{ObjectMeta: api.ObjectMeta{Name: "machine1"}}}},
|
api.NodeList{Items: []api.Node{{ObjectMeta: api.ObjectMeta{Name: "machine1"}}}},
|
||||||
@ -144,7 +139,7 @@ func TestScheduler(t *testing.T) {
|
|||||||
}
|
}
|
||||||
close(called)
|
close(called)
|
||||||
})
|
})
|
||||||
s.schedule()()
|
s.scheduleOne()
|
||||||
if e, a := item.expectAssumedPod, gotAssumedPod; !reflect.DeepEqual(e, a) {
|
if e, a := item.expectAssumedPod, gotAssumedPod; !reflect.DeepEqual(e, a) {
|
||||||
t.Errorf("%v: assumed pod: wanted %v, got %v", i, e, a)
|
t.Errorf("%v: assumed pod: wanted %v, got %v", i, e, a)
|
||||||
}
|
}
|
||||||
@ -234,7 +229,7 @@ func TestSchedulerForgetAssumedPodAfterDelete(t *testing.T) {
|
|||||||
// scheduledPodStore: []
|
// scheduledPodStore: []
|
||||||
// assumedPods: []
|
// assumedPods: []
|
||||||
|
|
||||||
s.schedule()()
|
s.scheduleOne()
|
||||||
// queuedPodStore: []
|
// queuedPodStore: []
|
||||||
// scheduledPodStore: [foo:8080]
|
// scheduledPodStore: [foo:8080]
|
||||||
// assumedPods: [foo:8080]
|
// assumedPods: [foo:8080]
|
||||||
@ -288,7 +283,7 @@ func TestSchedulerForgetAssumedPodAfterDelete(t *testing.T) {
|
|||||||
close(called)
|
close(called)
|
||||||
})
|
})
|
||||||
|
|
||||||
s.schedule()()
|
s.scheduleOne()
|
||||||
|
|
||||||
expectBind = &api.Binding{
|
expectBind = &api.Binding{
|
||||||
ObjectMeta: api.ObjectMeta{Name: "bar"},
|
ObjectMeta: api.ObjectMeta{Name: "bar"},
|
||||||
|
Loading…
Reference in New Issue
Block a user