mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-08-06 10:43:56 +00:00
move leader election configuration into component configuration
Signed-off-by: Mike Danese <mikedanese@google.com>
This commit is contained in:
parent
7489cb6e79
commit
daa7040195
@ -62,3 +62,28 @@ const (
|
|||||||
ProxyModeUserspace ProxyMode = "userspace"
|
ProxyModeUserspace ProxyMode = "userspace"
|
||||||
ProxyModeIPTables ProxyMode = "iptables"
|
ProxyModeIPTables ProxyMode = "iptables"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// LeaderElectionConfiguration defines the configuration of leader election
|
||||||
|
// clients for components that can run with leader election enabled.
|
||||||
|
type LeaderElectionConfiguration struct {
|
||||||
|
// leaderElect enables a leader election client to gain leadership
|
||||||
|
// before executing the main loop. Enable this when running replicated
|
||||||
|
// components for high availability.
|
||||||
|
LeaderElect bool `json:"leaderElect"`
|
||||||
|
// leaseDuration is the duration that non-leader candidates will wait
|
||||||
|
// after observing a leadership renewal until attempting to acquire
|
||||||
|
// leadership of a led but unrenewed leader slot. This is effectively the
|
||||||
|
// maximum duration that a leader can be stopped before it is replaced
|
||||||
|
// by another candidate. This is only applicable if leader election is
|
||||||
|
// enabled.
|
||||||
|
LeaseDuration unversioned.Duration `json:"leaseDuration"`
|
||||||
|
// renewDeadline is the interval between attempts by the acting master to
|
||||||
|
// renew a leadership slot before it stops leading. This must be less
|
||||||
|
// than or equal to the lease duration. This is only applicable if leader
|
||||||
|
// election is enabled.
|
||||||
|
RenewDeadline unversioned.Duration `json:"renewDeadline"`
|
||||||
|
// retryPeriod is the duration the clients should wait between attempting
|
||||||
|
// acquisition and renewal of a leadership. This is only applicable if
|
||||||
|
// leader election is enabled.
|
||||||
|
RetryPeriod unversioned.Duration `json:"retryPeriod"`
|
||||||
|
}
|
||||||
|
@ -57,6 +57,7 @@ import (
|
|||||||
"k8s.io/kubernetes/pkg/api"
|
"k8s.io/kubernetes/pkg/api"
|
||||||
"k8s.io/kubernetes/pkg/api/errors"
|
"k8s.io/kubernetes/pkg/api/errors"
|
||||||
"k8s.io/kubernetes/pkg/api/unversioned"
|
"k8s.io/kubernetes/pkg/api/unversioned"
|
||||||
|
"k8s.io/kubernetes/pkg/apis/componentconfig"
|
||||||
"k8s.io/kubernetes/pkg/client/record"
|
"k8s.io/kubernetes/pkg/client/record"
|
||||||
client "k8s.io/kubernetes/pkg/client/unversioned"
|
client "k8s.io/kubernetes/pkg/client/unversioned"
|
||||||
"k8s.io/kubernetes/pkg/util"
|
"k8s.io/kubernetes/pkg/util"
|
||||||
@ -331,41 +332,32 @@ func (l *LeaderElector) maybeReportTransition() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func DefaultLeaderElectionCLIConfig() LeaderElectionCLIConfig {
|
func DefaultLeaderElectionConfiguration() componentconfig.LeaderElectionConfiguration {
|
||||||
return LeaderElectionCLIConfig{
|
return componentconfig.LeaderElectionConfiguration{
|
||||||
LeaderElect: false,
|
LeaderElect: false,
|
||||||
LeaseDuration: DefaultLeaseDuration,
|
LeaseDuration: unversioned.Duration{DefaultLeaseDuration},
|
||||||
RenewDeadline: DefaultRenewDeadline,
|
RenewDeadline: unversioned.Duration{DefaultRenewDeadline},
|
||||||
RetryPeriod: DefaultRetryPeriod,
|
RetryPeriod: unversioned.Duration{DefaultRetryPeriod},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// LeaderElectionCLIConfig is useful for embedding into component configuration objects
|
|
||||||
// to maintain consistent command line flags.
|
|
||||||
type LeaderElectionCLIConfig struct {
|
|
||||||
LeaderElect bool
|
|
||||||
LeaseDuration time.Duration
|
|
||||||
RenewDeadline time.Duration
|
|
||||||
RetryPeriod time.Duration
|
|
||||||
}
|
|
||||||
|
|
||||||
// BindFlags binds the common LeaderElectionCLIConfig flags to a flagset
|
// BindFlags binds the common LeaderElectionCLIConfig flags to a flagset
|
||||||
func (l *LeaderElectionCLIConfig) BindFlags(fs *pflag.FlagSet) {
|
func BindFlags(l *componentconfig.LeaderElectionConfiguration, fs *pflag.FlagSet) {
|
||||||
fs.BoolVar(&l.LeaderElect, "leader-elect", l.LeaderElect, ""+
|
fs.BoolVar(&l.LeaderElect, "leader-elect", l.LeaderElect, ""+
|
||||||
"Start a leader election client and gain leadership before "+
|
"Start a leader election client and gain leadership before "+
|
||||||
"executing scheduler loop. Enable this when running replicated "+
|
"executing scheduler loop. Enable this when running replicated "+
|
||||||
"schedulers.")
|
"schedulers.")
|
||||||
fs.DurationVar(&l.LeaseDuration, "leader-elect-lease-duration", l.LeaseDuration, ""+
|
fs.DurationVar(&l.LeaseDuration.Duration, "leader-elect-lease-duration", l.LeaseDuration.Duration, ""+
|
||||||
"The duration that non-leader candidates will wait after observing a leadership"+
|
"The duration that non-leader candidates will wait after observing a leadership"+
|
||||||
"renewal until attempting to acquire leadership of a led but unrenewed leader "+
|
"renewal until attempting to acquire leadership of a led but unrenewed leader "+
|
||||||
"slot. This is effectively the maximum duration that a leader can be stopped "+
|
"slot. This is effectively the maximum duration that a leader can be stopped "+
|
||||||
"before it is replaced by another candidate. This is only applicable if leader "+
|
"before it is replaced by another candidate. This is only applicable if leader "+
|
||||||
"election is enabled.")
|
"election is enabled.")
|
||||||
fs.DurationVar(&l.RenewDeadline, "leader-elect-renew-deadline", l.RenewDeadline, ""+
|
fs.DurationVar(&l.RenewDeadline.Duration, "leader-elect-renew-deadline", l.RenewDeadline.Duration, ""+
|
||||||
"The interval between attempts by the acting master to renew a leadership slot "+
|
"The interval between attempts by the acting master to renew a leadership slot "+
|
||||||
"before it stops leading. This must be less than or equal to the lease duration. "+
|
"before it stops leading. This must be less than or equal to the lease duration. "+
|
||||||
"This is only applicable if leader election is enabled.")
|
"This is only applicable if leader election is enabled.")
|
||||||
fs.DurationVar(&l.RetryPeriod, "leader-elect-retry-period", l.RetryPeriod, ""+
|
fs.DurationVar(&l.RetryPeriod.Duration, "leader-elect-retry-period", l.RetryPeriod.Duration, ""+
|
||||||
"The duration the clients should wait between attempting acquisition and renewal "+
|
"The duration the clients should wait between attempting acquisition and renewal "+
|
||||||
"of a leadership. This is only applicable if leader election is enabled.")
|
"of a leadership. This is only applicable if leader election is enabled.")
|
||||||
}
|
}
|
||||||
|
@ -21,6 +21,7 @@ import (
|
|||||||
"net"
|
"net"
|
||||||
|
|
||||||
"k8s.io/kubernetes/pkg/api"
|
"k8s.io/kubernetes/pkg/api"
|
||||||
|
"k8s.io/kubernetes/pkg/apis/componentconfig"
|
||||||
"k8s.io/kubernetes/pkg/client/leaderelection"
|
"k8s.io/kubernetes/pkg/client/leaderelection"
|
||||||
"k8s.io/kubernetes/pkg/master/ports"
|
"k8s.io/kubernetes/pkg/master/ports"
|
||||||
"k8s.io/kubernetes/plugin/pkg/scheduler/factory"
|
"k8s.io/kubernetes/plugin/pkg/scheduler/factory"
|
||||||
@ -42,7 +43,7 @@ type SchedulerServer struct {
|
|||||||
KubeAPIQPS float32
|
KubeAPIQPS float32
|
||||||
KubeAPIBurst int
|
KubeAPIBurst int
|
||||||
SchedulerName string
|
SchedulerName string
|
||||||
LeaderElection leaderelection.LeaderElectionCLIConfig
|
LeaderElection componentconfig.LeaderElectionConfiguration
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewSchedulerServer creates a new SchedulerServer with default parameters
|
// NewSchedulerServer creates a new SchedulerServer with default parameters
|
||||||
@ -56,7 +57,7 @@ func NewSchedulerServer() *SchedulerServer {
|
|||||||
KubeAPIQPS: 50.0,
|
KubeAPIQPS: 50.0,
|
||||||
KubeAPIBurst: 100,
|
KubeAPIBurst: 100,
|
||||||
SchedulerName: api.DefaultSchedulerName,
|
SchedulerName: api.DefaultSchedulerName,
|
||||||
LeaderElection: leaderelection.DefaultLeaderElectionCLIConfig(),
|
LeaderElection: leaderelection.DefaultLeaderElectionConfiguration(),
|
||||||
}
|
}
|
||||||
return &s
|
return &s
|
||||||
}
|
}
|
||||||
@ -75,5 +76,5 @@ func (s *SchedulerServer) AddFlags(fs *pflag.FlagSet) {
|
|||||||
fs.Float32Var(&s.KubeAPIQPS, "kube-api-qps", s.KubeAPIQPS, "QPS to use while talking with kubernetes apiserver")
|
fs.Float32Var(&s.KubeAPIQPS, "kube-api-qps", s.KubeAPIQPS, "QPS to use while talking with kubernetes apiserver")
|
||||||
fs.IntVar(&s.KubeAPIBurst, "kube-api-burst", s.KubeAPIBurst, "Burst to use while talking with kubernetes apiserver")
|
fs.IntVar(&s.KubeAPIBurst, "kube-api-burst", s.KubeAPIBurst, "Burst to use while talking with kubernetes apiserver")
|
||||||
fs.StringVar(&s.SchedulerName, "scheduler-name", s.SchedulerName, "Name of the scheduler, used to select which pods will be processed by this scheduler, based on pod's annotation with key 'scheduler.alpha.kubernetes.io/name'")
|
fs.StringVar(&s.SchedulerName, "scheduler-name", s.SchedulerName, "Name of the scheduler, used to select which pods will be processed by this scheduler, based on pod's annotation with key 'scheduler.alpha.kubernetes.io/name'")
|
||||||
s.LeaderElection.BindFlags(fs)
|
leaderelection.BindFlags(&s.LeaderElection, fs)
|
||||||
}
|
}
|
||||||
|
@ -136,9 +136,9 @@ func Run(s *options.SchedulerServer) error {
|
|||||||
Client: kubeClient,
|
Client: kubeClient,
|
||||||
Identity: id,
|
Identity: id,
|
||||||
EventRecorder: config.Recorder,
|
EventRecorder: config.Recorder,
|
||||||
LeaseDuration: s.LeaderElection.LeaseDuration,
|
LeaseDuration: s.LeaderElection.LeaseDuration.Duration,
|
||||||
RenewDeadline: s.LeaderElection.RenewDeadline,
|
RenewDeadline: s.LeaderElection.RenewDeadline.Duration,
|
||||||
RetryPeriod: s.LeaderElection.RetryPeriod,
|
RetryPeriod: s.LeaderElection.RetryPeriod.Duration,
|
||||||
Callbacks: leaderelection.LeaderCallbacks{
|
Callbacks: leaderelection.LeaderCallbacks{
|
||||||
OnStartedLeading: run,
|
OnStartedLeading: run,
|
||||||
OnStoppedLeading: func() {
|
OnStoppedLeading: func() {
|
||||||
|
Loading…
Reference in New Issue
Block a user