Remove wrapper around base LeaderElectionConfiguration

Signed-off-by: Aldo Culquicondor <acondor@google.com>
This commit is contained in:
Aldo Culquicondor
2020-04-28 14:54:34 -04:00
parent 7814f3aaf7
commit 29f5adee7f
11 changed files with 112 additions and 227 deletions

View File

@@ -159,7 +159,7 @@ func (o *Options) Flags() (nfs cliflag.NamedFlagSets) {
o.Authorization.AddFlags(nfs.FlagSet("authorization"))
o.Deprecated.AddFlags(nfs.FlagSet("deprecated"), &o.ComponentConfig)
leaderelectionconfig.BindFlags(&o.ComponentConfig.LeaderElection.LeaderElectionConfiguration, nfs.FlagSet("leader election"))
leaderelectionconfig.BindFlags(&o.ComponentConfig.LeaderElection, nfs.FlagSet("leader election"))
utilfeature.DefaultMutableFeatureGate.AddFlag(nfs.FlagSet("feature gate"))
o.Metrics.AddFlags(nfs.FlagSet("metrics"))
@@ -274,7 +274,7 @@ func (o *Options) Config() (*schedulerappconfig.Config, error) {
// makeLeaderElectionConfig builds a leader election configuration. It will
// create a new resource lock associated with the configuration.
func makeLeaderElectionConfig(config kubeschedulerconfig.KubeSchedulerLeaderElectionConfiguration, client clientset.Interface, recorder record.EventRecorder) (*leaderelection.LeaderElectionConfig, error) {
func makeLeaderElectionConfig(config componentbaseconfig.LeaderElectionConfiguration, client clientset.Interface, recorder record.EventRecorder) (*leaderelection.LeaderElectionConfig, error) {
hostname, err := os.Hostname()
if err != nil {
return nil, fmt.Errorf("unable to get hostname: %v", err)

View File

@@ -300,16 +300,14 @@ profiles:
EnableProfiling: true,
EnableContentionProfiling: true,
},
LeaderElection: kubeschedulerconfig.KubeSchedulerLeaderElectionConfiguration{
LeaderElectionConfiguration: componentbaseconfig.LeaderElectionConfiguration{
LeaderElect: true,
LeaseDuration: metav1.Duration{Duration: 15 * time.Second},
RenewDeadline: metav1.Duration{Duration: 10 * time.Second},
RetryPeriod: metav1.Duration{Duration: 2 * time.Second},
ResourceLock: "endpointsleases",
ResourceNamespace: "kube-system",
ResourceName: "kube-scheduler",
},
LeaderElection: componentbaseconfig.LeaderElectionConfiguration{
LeaderElect: true,
LeaseDuration: metav1.Duration{Duration: 15 * time.Second},
RenewDeadline: metav1.Duration{Duration: 10 * time.Second},
RetryPeriod: metav1.Duration{Duration: 2 * time.Second},
ResourceLock: "endpointsleases",
ResourceNamespace: "kube-system",
ResourceName: "kube-scheduler",
},
ClientConnection: componentbaseconfig.ClientConnectionConfiguration{
Kubeconfig: configKubeconfig,
@@ -392,16 +390,14 @@ profiles:
EnableProfiling: true,
EnableContentionProfiling: true,
},
LeaderElection: kubeschedulerconfig.KubeSchedulerLeaderElectionConfiguration{
LeaderElectionConfiguration: componentbaseconfig.LeaderElectionConfiguration{
LeaderElect: true,
LeaseDuration: metav1.Duration{Duration: 15 * time.Second},
RenewDeadline: metav1.Duration{Duration: 10 * time.Second},
RetryPeriod: metav1.Duration{Duration: 2 * time.Second},
ResourceLock: "endpointsleases",
ResourceNamespace: "kube-system",
ResourceName: "kube-scheduler",
},
LeaderElection: componentbaseconfig.LeaderElectionConfiguration{
LeaderElect: true,
LeaseDuration: metav1.Duration{Duration: 15 * time.Second},
RenewDeadline: metav1.Duration{Duration: 10 * time.Second},
RetryPeriod: metav1.Duration{Duration: 2 * time.Second},
ResourceLock: "endpointsleases",
ResourceNamespace: "kube-system",
ResourceName: "kube-scheduler",
},
ClientConnection: componentbaseconfig.ClientConnectionConfiguration{
Kubeconfig: flagKubeconfig,
@@ -458,16 +454,14 @@ profiles:
EnableProfiling: true,
EnableContentionProfiling: true,
},
LeaderElection: kubeschedulerconfig.KubeSchedulerLeaderElectionConfiguration{
LeaderElectionConfiguration: componentbaseconfig.LeaderElectionConfiguration{
LeaderElect: true,
LeaseDuration: metav1.Duration{Duration: 15 * time.Second},
RenewDeadline: metav1.Duration{Duration: 10 * time.Second},
RetryPeriod: metav1.Duration{Duration: 2 * time.Second},
ResourceLock: "endpointsleases",
ResourceNamespace: "kube-system",
ResourceName: "kube-scheduler",
},
LeaderElection: componentbaseconfig.LeaderElectionConfiguration{
LeaderElect: true,
LeaseDuration: metav1.Duration{Duration: 15 * time.Second},
RenewDeadline: metav1.Duration{Duration: 10 * time.Second},
RetryPeriod: metav1.Duration{Duration: 2 * time.Second},
ResourceLock: "endpointsleases",
ResourceNamespace: "kube-system",
ResourceName: "kube-scheduler",
},
ClientConnection: componentbaseconfig.ClientConnectionConfiguration{
Kubeconfig: flagKubeconfig,
@@ -499,16 +493,14 @@ profiles:
EnableProfiling: true,
EnableContentionProfiling: true,
},
LeaderElection: kubeschedulerconfig.KubeSchedulerLeaderElectionConfiguration{
LeaderElectionConfiguration: componentbaseconfig.LeaderElectionConfiguration{
LeaderElect: true,
LeaseDuration: metav1.Duration{Duration: 15 * time.Second},
RenewDeadline: metav1.Duration{Duration: 10 * time.Second},
RetryPeriod: metav1.Duration{Duration: 2 * time.Second},
ResourceLock: "endpointsleases",
ResourceNamespace: "kube-system",
ResourceName: "kube-scheduler",
},
LeaderElection: componentbaseconfig.LeaderElectionConfiguration{
LeaderElect: true,
LeaseDuration: metav1.Duration{Duration: 15 * time.Second},
RenewDeadline: metav1.Duration{Duration: 10 * time.Second},
RetryPeriod: metav1.Duration{Duration: 2 * time.Second},
ResourceLock: "endpointsleases",
ResourceNamespace: "kube-system",
ResourceName: "kube-scheduler",
},
ClientConnection: componentbaseconfig.ClientConnectionConfiguration{
Kubeconfig: configKubeconfig,
@@ -569,16 +561,14 @@ profiles:
EnableProfiling: true,
EnableContentionProfiling: true,
},
LeaderElection: kubeschedulerconfig.KubeSchedulerLeaderElectionConfiguration{
LeaderElectionConfiguration: componentbaseconfig.LeaderElectionConfiguration{
LeaderElect: true,
LeaseDuration: metav1.Duration{Duration: 15 * time.Second},
RenewDeadline: metav1.Duration{Duration: 10 * time.Second},
RetryPeriod: metav1.Duration{Duration: 2 * time.Second},
ResourceLock: "endpointsleases",
ResourceNamespace: "kube-system",
ResourceName: "kube-scheduler",
},
LeaderElection: componentbaseconfig.LeaderElectionConfiguration{
LeaderElect: true,
LeaseDuration: metav1.Duration{Duration: 15 * time.Second},
RenewDeadline: metav1.Duration{Duration: 10 * time.Second},
RetryPeriod: metav1.Duration{Duration: 2 * time.Second},
ResourceLock: "endpointsleases",
ResourceNamespace: "kube-system",
ResourceName: "kube-scheduler",
},
ClientConnection: componentbaseconfig.ClientConnectionConfiguration{
Kubeconfig: configKubeconfig,
@@ -643,16 +633,14 @@ profiles:
EnableProfiling: true,
EnableContentionProfiling: true,
},
LeaderElection: kubeschedulerconfig.KubeSchedulerLeaderElectionConfiguration{
LeaderElectionConfiguration: componentbaseconfig.LeaderElectionConfiguration{
LeaderElect: true,
LeaseDuration: metav1.Duration{Duration: 15 * time.Second},
RenewDeadline: metav1.Duration{Duration: 10 * time.Second},
RetryPeriod: metav1.Duration{Duration: 2 * time.Second},
ResourceLock: "endpointsleases",
ResourceNamespace: "kube-system",
ResourceName: "kube-scheduler",
},
LeaderElection: componentbaseconfig.LeaderElectionConfiguration{
LeaderElect: true,
LeaseDuration: metav1.Duration{Duration: 15 * time.Second},
RenewDeadline: metav1.Duration{Duration: 10 * time.Second},
RetryPeriod: metav1.Duration{Duration: 2 * time.Second},
ResourceLock: "endpointsleases",
ResourceNamespace: "kube-system",
ResourceName: "kube-scheduler",
},
ClientConnection: componentbaseconfig.ClientConnectionConfiguration{
Kubeconfig: flagKubeconfig,
@@ -699,16 +687,14 @@ profiles:
EnableProfiling: true,
EnableContentionProfiling: true,
},
LeaderElection: kubeschedulerconfig.KubeSchedulerLeaderElectionConfiguration{
LeaderElectionConfiguration: componentbaseconfig.LeaderElectionConfiguration{
LeaderElect: true,
LeaseDuration: metav1.Duration{Duration: 15 * time.Second},
RenewDeadline: metav1.Duration{Duration: 10 * time.Second},
RetryPeriod: metav1.Duration{Duration: 2 * time.Second},
ResourceLock: "endpointsleases",
ResourceNamespace: "kube-system",
ResourceName: "kube-scheduler",
},
LeaderElection: componentbaseconfig.LeaderElectionConfiguration{
LeaderElect: true,
LeaseDuration: metav1.Duration{Duration: 15 * time.Second},
RenewDeadline: metav1.Duration{Duration: 10 * time.Second},
RetryPeriod: metav1.Duration{Duration: 2 * time.Second},
ResourceLock: "endpointsleases",
ResourceNamespace: "kube-system",
ResourceName: "kube-scheduler",
},
ClientConnection: componentbaseconfig.ClientConnectionConfiguration{
Kubeconfig: flagKubeconfig,