mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-27 21:47:07 +00:00
Merge pull request #53793 from wojtek-t/separate_leader_election_in_scheduler
Automatic merge from submit-queue. If you want to cherry-pick this change to another branch, please follow the instructions <a href="https://github.com/kubernetes/community/blob/master/contributors/devel/cherry-picks.md">here</a>. User separate client for leader election in scheduler Ref https://github.com/kubernetes/kubernetes/issues/53327 @kubernetes/sig-scheduling-bugs @bsalamat @davidopp ```release-note Use separate client for leader election in scheduler to avoid starving leader election by regular scheduler operations. ```
This commit is contained in:
commit
9c1796a733
@ -56,22 +56,22 @@ func createRecorder(kubecli *clientset.Clientset, s *options.SchedulerServer) re
|
||||
return eventBroadcaster.NewRecorder(api.Scheme, v1.EventSource{Component: s.SchedulerName})
|
||||
}
|
||||
|
||||
func createClient(s *options.SchedulerServer) (*clientset.Clientset, error) {
|
||||
func createClients(s *options.SchedulerServer) (*clientset.Clientset, *clientset.Clientset, error) {
|
||||
kubeconfig, err := clientcmd.BuildConfigFromFlags(s.Master, s.Kubeconfig)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to build config from flags: %v", err)
|
||||
return nil, nil, fmt.Errorf("unable to build config from flags: %v", err)
|
||||
}
|
||||
|
||||
kubeconfig.ContentType = s.ContentType
|
||||
// Override kubeconfig qps/burst settings from flags
|
||||
kubeconfig.QPS = s.KubeAPIQPS
|
||||
kubeconfig.Burst = int(s.KubeAPIBurst)
|
||||
|
||||
cli, err := clientset.NewForConfig(restclient.AddUserAgent(kubeconfig, "leader-election"))
|
||||
kubeClient, err := clientset.NewForConfig(restclient.AddUserAgent(kubeconfig, "scheduler"))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid API configuration: %v", err)
|
||||
glog.Fatalf("Invalid API configuration: %v", err)
|
||||
}
|
||||
return cli, nil
|
||||
leaderElectionClient := clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "leader-election"))
|
||||
return kubeClient, leaderElectionClient, nil
|
||||
}
|
||||
|
||||
// CreateScheduler encapsulates the entire creation of a runnable scheduler.
|
||||
|
@ -70,23 +70,23 @@ func Run(s *options.SchedulerServer) error {
|
||||
// To help debugging, immediately log version
|
||||
glog.Infof("Version: %+v", version.Get())
|
||||
|
||||
kubecli, err := createClient(s)
|
||||
kubeClient, leaderElectionClient, err := createClients(s)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to create kube client: %v", err)
|
||||
}
|
||||
|
||||
recorder := createRecorder(kubecli, s)
|
||||
recorder := createRecorder(kubeClient, s)
|
||||
|
||||
informerFactory := informers.NewSharedInformerFactory(kubecli, 0)
|
||||
informerFactory := informers.NewSharedInformerFactory(kubeClient, 0)
|
||||
// cache only non-terminal pods
|
||||
podInformer := factory.NewPodInformer(kubecli, 0)
|
||||
podInformer := factory.NewPodInformer(kubeClient, 0)
|
||||
|
||||
// Apply algorithms based on feature gates.
|
||||
algorithmprovider.ApplyFeatureGates()
|
||||
|
||||
sched, err := CreateScheduler(
|
||||
s,
|
||||
kubecli,
|
||||
kubeClient,
|
||||
informerFactory.Core().V1().Nodes(),
|
||||
podInformer,
|
||||
informerFactory.Core().V1().PersistentVolumes(),
|
||||
@ -131,7 +131,7 @@ func Run(s *options.SchedulerServer) error {
|
||||
rl, err := resourcelock.New(s.LeaderElection.ResourceLock,
|
||||
s.LockObjectNamespace,
|
||||
s.LockObjectName,
|
||||
kubecli.CoreV1(),
|
||||
leaderElectionClient.CoreV1(),
|
||||
resourcelock.ResourceLockConfig{
|
||||
Identity: id,
|
||||
EventRecorder: recorder,
|
||||
|
Loading…
Reference in New Issue
Block a user