mirror of
				https://github.com/k3s-io/kubernetes.git
				synced 2025-10-31 05:40:42 +00:00 
			
		
		
		
	Automatic merge from submit-queue Disambiguate schedule, assume, and bind in functions as well as in Addresses my comments in #45972 about how these metrics need to be disambiguated. - separates schedule, assume, and bind. - renames variables like `dest` to be explicit. - removes the logging statement to occur outside of the timed portion of the metric measurement. Generally makes `sheduleOne` a happy function to read :)
		
			
				
	
	
		
			278 lines
		
	
	
		
			10 KiB
		
	
	
	
		
			Go
		
	
	
	
	
	
			
		
		
	
	
			278 lines
		
	
	
		
			10 KiB
		
	
	
	
		
			Go
		
	
	
	
	
	
| /*
 | |
| Copyright 2014 The Kubernetes Authors.
 | |
| 
 | |
| Licensed under the Apache License, Version 2.0 (the "License");
 | |
| you may not use this file except in compliance with the License.
 | |
| You may obtain a copy of the License at
 | |
| 
 | |
|     http://www.apache.org/licenses/LICENSE-2.0
 | |
| 
 | |
| Unless required by applicable law or agreed to in writing, software
 | |
| distributed under the License is distributed on an "AS IS" BASIS,
 | |
| WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | |
| See the License for the specific language governing permissions and
 | |
| limitations under the License.
 | |
| */
 | |
| 
 | |
| package scheduler
 | |
| 
 | |
| import (
 | |
| 	"time"
 | |
| 
 | |
| 	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
 | |
| 	"k8s.io/apimachinery/pkg/util/sets"
 | |
| 	"k8s.io/apimachinery/pkg/util/wait"
 | |
| 	"k8s.io/client-go/tools/cache"
 | |
| 	"k8s.io/client-go/tools/record"
 | |
| 	"k8s.io/kubernetes/pkg/api/v1"
 | |
| 	"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
 | |
| 	corelisters "k8s.io/kubernetes/pkg/client/listers/core/v1"
 | |
| 	"k8s.io/kubernetes/plugin/pkg/scheduler/algorithm"
 | |
| 	schedulerapi "k8s.io/kubernetes/plugin/pkg/scheduler/api"
 | |
| 	"k8s.io/kubernetes/plugin/pkg/scheduler/core"
 | |
| 	"k8s.io/kubernetes/plugin/pkg/scheduler/metrics"
 | |
| 	"k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache"
 | |
| 	"k8s.io/kubernetes/plugin/pkg/scheduler/util"
 | |
| 
 | |
| 	"fmt"
 | |
| 	"github.com/golang/glog"
 | |
| )
 | |
| 
 | |
| // Binder knows how to write a binding.
 | |
| type Binder interface {
 | |
| 	Bind(binding *v1.Binding) error
 | |
| }
 | |
| 
 | |
| // PodConditionUpdater updates the condition of a pod based on the passed
 | |
| // PodCondition
 | |
| type PodConditionUpdater interface {
 | |
| 	Update(pod *v1.Pod, podCondition *v1.PodCondition) error
 | |
| }
 | |
| 
 | |
| // Scheduler watches for new unscheduled pods. It attempts to find
 | |
| // nodes that they fit on and writes bindings back to the api server.
 | |
| type Scheduler struct {
 | |
| 	config *Config
 | |
| }
 | |
| 
 | |
| // StopEverything closes the scheduler config's StopEverything channel, to shut
 | |
| // down the Scheduler.
 | |
| func (sched *Scheduler) StopEverything() {
 | |
| 	close(sched.config.StopEverything)
 | |
| }
 | |
| 
 | |
| // Configurator defines I/O, caching, and other functionality needed to
 | |
| // construct a new scheduler. An implementation of this can be seen in
 | |
| // factory.go.
 | |
| type Configurator interface {
 | |
| 	GetPriorityFunctionConfigs(priorityKeys sets.String) ([]algorithm.PriorityConfig, error)
 | |
| 	GetPriorityMetadataProducer() (algorithm.MetadataProducer, error)
 | |
| 	GetPredicateMetadataProducer() (algorithm.MetadataProducer, error)
 | |
| 	GetPredicates(predicateKeys sets.String) (map[string]algorithm.FitPredicate, error)
 | |
| 	GetHardPodAffinitySymmetricWeight() int
 | |
| 	GetSchedulerName() string
 | |
| 	MakeDefaultErrorFunc(backoff *util.PodBackoff, podQueue *cache.FIFO) func(pod *v1.Pod, err error)
 | |
| 
 | |
| 	// Probably doesn't need to be public.  But exposed for now in case.
 | |
| 	ResponsibleForPod(pod *v1.Pod) bool
 | |
| 
 | |
| 	// Needs to be exposed for things like integration tests where we want to make fake nodes.
 | |
| 	GetNodeLister() corelisters.NodeLister
 | |
| 	GetClient() clientset.Interface
 | |
| 	GetScheduledPodLister() corelisters.PodLister
 | |
| 	Run()
 | |
| 
 | |
| 	Create() (*Config, error)
 | |
| 	CreateFromProvider(providerName string) (*Config, error)
 | |
| 	CreateFromConfig(policy schedulerapi.Policy) (*Config, error)
 | |
| 	CreateFromKeys(predicateKeys, priorityKeys sets.String, extenders []algorithm.SchedulerExtender) (*Config, error)
 | |
| }
 | |
| 
 | |
| // Config is an implementation of the Scheduler's configured input data.
 | |
| // TODO over time we should make this struct a hidden implementation detail of the scheduler.
 | |
| type Config struct {
 | |
| 	// It is expected that changes made via SchedulerCache will be observed
 | |
| 	// by NodeLister and Algorithm.
 | |
| 	SchedulerCache schedulercache.Cache
 | |
| 	// Ecache is used for optimistically invalid affected cache items after
 | |
| 	// successfully binding a pod
 | |
| 	Ecache     *core.EquivalenceCache
 | |
| 	NodeLister algorithm.NodeLister
 | |
| 	Algorithm  algorithm.ScheduleAlgorithm
 | |
| 	Binder     Binder
 | |
| 	// PodConditionUpdater is used only in case of scheduling errors. If we succeed
 | |
| 	// with scheduling, PodScheduled condition will be updated in apiserver in /bind
 | |
| 	// handler so that binding and setting PodCondition it is atomic.
 | |
| 	PodConditionUpdater PodConditionUpdater
 | |
| 
 | |
| 	// NextPod should be a function that blocks until the next pod
 | |
| 	// is available. We don't use a channel for this, because scheduling
 | |
| 	// a pod may take some amount of time and we don't want pods to get
 | |
| 	// stale while they sit in a channel.
 | |
| 	NextPod func() *v1.Pod
 | |
| 
 | |
| 	// WaitForCacheSync waits for scheduler cache to populate.
 | |
| 	// It returns true if it was successful, false if the controller should shutdown.
 | |
| 	WaitForCacheSync func() bool
 | |
| 
 | |
| 	// Error is called if there is an error. It is passed the pod in
 | |
| 	// question, and the error
 | |
| 	Error func(*v1.Pod, error)
 | |
| 
 | |
| 	// Recorder is the EventRecorder to use
 | |
| 	Recorder record.EventRecorder
 | |
| 
 | |
| 	// Close this to shut down the scheduler.
 | |
| 	StopEverything chan struct{}
 | |
| }
 | |
| 
 | |
| // NewFromConfigurator returns a new scheduler that is created entirely by the Configurator.  Assumes Create() is implemented.
 | |
| // Supports intermediate Config mutation for now if you provide modifier functions which will run after Config is created.
 | |
| func NewFromConfigurator(c Configurator, modifiers ...func(c *Config)) (*Scheduler, error) {
 | |
| 	cfg, err := c.Create()
 | |
| 	if err != nil {
 | |
| 		return nil, err
 | |
| 	}
 | |
| 	// Mutate it if any functions were provided, changes might be required for certain types of tests (i.e. change the recorder).
 | |
| 	for _, modifier := range modifiers {
 | |
| 		modifier(cfg)
 | |
| 	}
 | |
| 	// From this point on the config is immutable to the outside.
 | |
| 	s := &Scheduler{
 | |
| 		config: cfg,
 | |
| 	}
 | |
| 	metrics.Register()
 | |
| 	return s, nil
 | |
| }
 | |
| 
 | |
| // Run begins watching and scheduling. It waits for cache to be synced, then starts a goroutine and returns immediately.
 | |
| func (sched *Scheduler) Run() {
 | |
| 	if !sched.config.WaitForCacheSync() {
 | |
| 		return
 | |
| 	}
 | |
| 
 | |
| 	go wait.Until(sched.scheduleOne, 0, sched.config.StopEverything)
 | |
| }
 | |
| 
 | |
| // Config return scheduler's config pointer. It is exposed for testing purposes.
 | |
| func (sched *Scheduler) Config() *Config {
 | |
| 	return sched.config
 | |
| }
 | |
| 
 | |
| // schedule implements the scheduling algorithm and returns the suggested host.
 | |
| func (sched *Scheduler) schedule(pod *v1.Pod) (string, error) {
 | |
| 	host, err := sched.config.Algorithm.Schedule(pod, sched.config.NodeLister)
 | |
| 	if err != nil {
 | |
| 		glog.V(1).Infof("Failed to schedule pod: %v/%v", pod.Namespace, pod.Name)
 | |
| 		sched.config.Error(pod, err)
 | |
| 		sched.config.Recorder.Eventf(pod, v1.EventTypeWarning, "FailedScheduling", "%v", err)
 | |
| 		sched.config.PodConditionUpdater.Update(pod, &v1.PodCondition{
 | |
| 			Type:    v1.PodScheduled,
 | |
| 			Status:  v1.ConditionFalse,
 | |
| 			Reason:  v1.PodReasonUnschedulable,
 | |
| 			Message: err.Error(),
 | |
| 		})
 | |
| 		return "", err
 | |
| 	}
 | |
| 	return host, err
 | |
| }
 | |
| 
 | |
| // assume signals to the cache that a pod is already in the cache, so that binding can be asnychronous.
 | |
| func (sched *Scheduler) assume(pod *v1.Pod, host string) error {
 | |
| 	// Optimistically assume that the binding will succeed and send it to apiserver
 | |
| 	// in the background.
 | |
| 	// If the binding fails, scheduler will release resources allocated to assumed pod
 | |
| 	// immediately.
 | |
| 	assumed := *pod
 | |
| 	assumed.Spec.NodeName = host
 | |
| 	if err := sched.config.SchedulerCache.AssumePod(&assumed); err != nil {
 | |
| 		glog.Errorf("scheduler cache AssumePod failed: %v", err)
 | |
| 		// TODO: This means that a given pod is already in cache (which means it
 | |
| 		// is either assumed or already added). This is most probably result of a
 | |
| 		// BUG in retrying logic. As a temporary workaround (which doesn't fully
 | |
| 		// fix the problem, but should reduce its impact), we simply return here,
 | |
| 		// as binding doesn't make sense anyway.
 | |
| 		// This should be fixed properly though.
 | |
| 		return err
 | |
| 	}
 | |
| 
 | |
| 	// Optimistically assume that the binding will succeed, so we need to invalidate affected
 | |
| 	// predicates in equivalence cache.
 | |
| 	// If the binding fails, these invalidated item will not break anything.
 | |
| 	if sched.config.Ecache != nil {
 | |
| 		sched.config.Ecache.InvalidateCachedPredicateItemForPodAdd(pod, host)
 | |
| 	}
 | |
| 	return nil
 | |
| }
 | |
| 
 | |
| // bind binds a pod to a given node defined in a binding object.  We expect this to run asynchronously, so we
 | |
| // handle binding metrics internally.
 | |
| func (sched *Scheduler) bind(assumed *v1.Pod, b *v1.Binding) error {
 | |
| 	bindingStart := time.Now()
 | |
| 	// If binding succeeded then PodScheduled condition will be updated in apiserver so that
 | |
| 	// it's atomic with setting host.
 | |
| 	err := sched.config.Binder.Bind(b)
 | |
| 	if err := sched.config.SchedulerCache.FinishBinding(assumed); err != nil {
 | |
| 		return fmt.Errorf("scheduler cache FinishBinding failed: %v", err)
 | |
| 	}
 | |
| 	if err != nil {
 | |
| 		glog.V(1).Infof("Failed to bind pod: %v/%v", assumed.Namespace, assumed.Name)
 | |
| 		if err := sched.config.SchedulerCache.ForgetPod(assumed); err != nil {
 | |
| 			return fmt.Errorf("scheduler cache ForgetPod failed: %v", err)
 | |
| 		}
 | |
| 		sched.config.Error(assumed, err)
 | |
| 		sched.config.Recorder.Eventf(assumed, v1.EventTypeWarning, "FailedScheduling", "Binding rejected: %v", err)
 | |
| 		sched.config.PodConditionUpdater.Update(assumed, &v1.PodCondition{
 | |
| 			Type:   v1.PodScheduled,
 | |
| 			Status: v1.ConditionFalse,
 | |
| 			Reason: "BindingRejected",
 | |
| 		})
 | |
| 		return err
 | |
| 	}
 | |
| 	metrics.BindingLatency.Observe(metrics.SinceInMicroseconds(bindingStart))
 | |
| 	sched.config.Recorder.Eventf(assumed, v1.EventTypeNormal, "Scheduled", "Successfully assigned %v to %v", assumed.Name, b.Target.Name)
 | |
| 	return nil
 | |
| }
 | |
| 
 | |
| // scheduleOne does the entire scheduling workflow for a single pod.  It is serialized on the scheduling algorithm's host fitting.
 | |
| func (sched *Scheduler) scheduleOne() {
 | |
| 	pod := sched.config.NextPod()
 | |
| 	if pod.DeletionTimestamp != nil {
 | |
| 		sched.config.Recorder.Eventf(pod, v1.EventTypeWarning, "FailedScheduling", "skip schedule deleting pod: %v/%v", pod.Namespace, pod.Name)
 | |
| 		glog.V(3).Infof("Skip schedule deleting pod: %v/%v", pod.Namespace, pod.Name)
 | |
| 		return
 | |
| 	}
 | |
| 
 | |
| 	glog.V(3).Infof("Attempting to schedule pod: %v/%v", pod.Namespace, pod.Name)
 | |
| 
 | |
| 	// Synchronously attempt to find a fit for the pod.
 | |
| 	start := time.Now()
 | |
| 	suggestedHost, err := sched.schedule(pod)
 | |
| 	metrics.SchedulingAlgorithmLatency.Observe(metrics.SinceInMicroseconds(start))
 | |
| 	if err != nil {
 | |
| 		return
 | |
| 	}
 | |
| 
 | |
| 	// Tell the cache to assume that a pod now is running on a given node, even though it hasn't been bound yet.
 | |
| 	// This allows us to keep scheduling without waiting on binding to occur.
 | |
| 	err = sched.assume(pod, suggestedHost)
 | |
| 	if err != nil {
 | |
| 		return
 | |
| 	}
 | |
| 
 | |
| 	// bind the pod to its host asynchronously (we can do this b/c of the assumption step above).
 | |
| 	go func() {
 | |
| 		err := sched.bind(pod, &v1.Binding{
 | |
| 			ObjectMeta: metav1.ObjectMeta{Namespace: pod.Namespace, Name: pod.Name},
 | |
| 			Target: v1.ObjectReference{
 | |
| 				Kind: "Node",
 | |
| 				Name: suggestedHost,
 | |
| 			},
 | |
| 		})
 | |
| 		metrics.E2eSchedulingLatency.Observe(metrics.SinceInMicroseconds(start))
 | |
| 		if err != nil {
 | |
| 			glog.Errorf("Internal error binding pod: (%v)", err)
 | |
| 		}
 | |
| 	}()
 | |
| }
 |