mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-09-03 10:17:46 +00:00
Merge pull request #80681 from ricky1993/customize_resource_name_and_namespace
add options for name and namespace of leaderelection object
This commit is contained in:
@@ -18,6 +18,7 @@ package options
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/spf13/pflag"
|
||||
|
||||
"k8s.io/apimachinery/pkg/util/validation/field"
|
||||
@@ -58,8 +59,8 @@ func (o *DeprecatedOptions) AddFlags(fs *pflag.FlagSet, cfg *kubeschedulerconfig
|
||||
fs.Float32Var(&cfg.ClientConnection.QPS, "kube-api-qps", cfg.ClientConnection.QPS, "DEPRECATED: QPS to use while talking with kubernetes apiserver")
|
||||
fs.Int32Var(&cfg.ClientConnection.Burst, "kube-api-burst", cfg.ClientConnection.Burst, "DEPRECATED: burst to use while talking with kubernetes apiserver")
|
||||
fs.StringVar(&cfg.SchedulerName, "scheduler-name", cfg.SchedulerName, "DEPRECATED: name of the scheduler, used to select which pods will be processed by this scheduler, based on pod's \"spec.schedulerName\".")
|
||||
fs.StringVar(&cfg.LeaderElection.LockObjectNamespace, "lock-object-namespace", cfg.LeaderElection.LockObjectNamespace, "DEPRECATED: define the namespace of the lock object.")
|
||||
fs.StringVar(&cfg.LeaderElection.LockObjectName, "lock-object-name", cfg.LeaderElection.LockObjectName, "DEPRECATED: define the name of the lock object.")
|
||||
fs.StringVar(&cfg.LeaderElection.ResourceNamespace, "lock-object-namespace", cfg.LeaderElection.ResourceNamespace, "DEPRECATED: define the namespace of the lock object. Will be removed in favor of leader-elect-resource-namespace.")
|
||||
fs.StringVar(&cfg.LeaderElection.ResourceName, "lock-object-name", cfg.LeaderElection.ResourceName, "DEPRECATED: define the name of the lock object. Will be removed in favor of leader-elect-resource-name")
|
||||
|
||||
fs.Int32Var(&cfg.HardPodAffinitySymmetricWeight, "hard-pod-affinity-symmetric-weight", cfg.HardPodAffinitySymmetricWeight,
|
||||
"RequiredDuringScheduling affinity is not symmetric, but there is an implicit PreferredDuringScheduling affinity rule corresponding "+
|
||||
|
@@ -275,8 +275,8 @@ func makeLeaderElectionConfig(config kubeschedulerconfig.KubeSchedulerLeaderElec
|
||||
id := hostname + "_" + string(uuid.NewUUID())
|
||||
|
||||
rl, err := resourcelock.New(config.ResourceLock,
|
||||
config.LockObjectNamespace,
|
||||
config.LockObjectName,
|
||||
config.ResourceNamespace,
|
||||
config.ResourceName,
|
||||
client.CoreV1(),
|
||||
client.CoordinationV1(),
|
||||
resourcelock.ResourceLockConfig{
|
||||
|
@@ -234,14 +234,14 @@ pluginConfig:
|
||||
MetricsBindAddress: "0.0.0.0:10251",
|
||||
LeaderElection: kubeschedulerconfig.KubeSchedulerLeaderElectionConfiguration{
|
||||
LeaderElectionConfiguration: componentbaseconfig.LeaderElectionConfiguration{
|
||||
LeaderElect: true,
|
||||
LeaseDuration: metav1.Duration{Duration: 15 * time.Second},
|
||||
RenewDeadline: metav1.Duration{Duration: 10 * time.Second},
|
||||
RetryPeriod: metav1.Duration{Duration: 2 * time.Second},
|
||||
ResourceLock: "endpoints",
|
||||
LeaderElect: true,
|
||||
LeaseDuration: metav1.Duration{Duration: 15 * time.Second},
|
||||
RenewDeadline: metav1.Duration{Duration: 10 * time.Second},
|
||||
RetryPeriod: metav1.Duration{Duration: 2 * time.Second},
|
||||
ResourceLock: "endpoints",
|
||||
ResourceNamespace: "kube-system",
|
||||
ResourceName: "kube-scheduler",
|
||||
},
|
||||
LockObjectNamespace: "kube-system",
|
||||
LockObjectName: "kube-scheduler",
|
||||
},
|
||||
ClientConnection: componentbaseconfig.ClientConnectionConfiguration{
|
||||
Kubeconfig: configKubeconfig,
|
||||
@@ -314,14 +314,14 @@ pluginConfig:
|
||||
MetricsBindAddress: "", // defaults empty when not running from config file
|
||||
LeaderElection: kubeschedulerconfig.KubeSchedulerLeaderElectionConfiguration{
|
||||
LeaderElectionConfiguration: componentbaseconfig.LeaderElectionConfiguration{
|
||||
LeaderElect: true,
|
||||
LeaseDuration: metav1.Duration{Duration: 15 * time.Second},
|
||||
RenewDeadline: metav1.Duration{Duration: 10 * time.Second},
|
||||
RetryPeriod: metav1.Duration{Duration: 2 * time.Second},
|
||||
ResourceLock: "endpoints",
|
||||
LeaderElect: true,
|
||||
LeaseDuration: metav1.Duration{Duration: 15 * time.Second},
|
||||
RenewDeadline: metav1.Duration{Duration: 10 * time.Second},
|
||||
RetryPeriod: metav1.Duration{Duration: 2 * time.Second},
|
||||
ResourceLock: "endpoints",
|
||||
ResourceNamespace: "kube-system",
|
||||
ResourceName: "kube-scheduler",
|
||||
},
|
||||
LockObjectNamespace: "kube-system",
|
||||
LockObjectName: "kube-scheduler",
|
||||
},
|
||||
ClientConnection: componentbaseconfig.ClientConnectionConfiguration{
|
||||
Kubeconfig: flagKubeconfig,
|
||||
@@ -375,14 +375,14 @@ pluginConfig:
|
||||
MetricsBindAddress: "0.0.0.0:10251",
|
||||
LeaderElection: kubeschedulerconfig.KubeSchedulerLeaderElectionConfiguration{
|
||||
LeaderElectionConfiguration: componentbaseconfig.LeaderElectionConfiguration{
|
||||
LeaderElect: true,
|
||||
LeaseDuration: metav1.Duration{Duration: 15 * time.Second},
|
||||
RenewDeadline: metav1.Duration{Duration: 10 * time.Second},
|
||||
RetryPeriod: metav1.Duration{Duration: 2 * time.Second},
|
||||
ResourceLock: "endpoints",
|
||||
LeaderElect: true,
|
||||
LeaseDuration: metav1.Duration{Duration: 15 * time.Second},
|
||||
RenewDeadline: metav1.Duration{Duration: 10 * time.Second},
|
||||
RetryPeriod: metav1.Duration{Duration: 2 * time.Second},
|
||||
ResourceLock: "endpoints",
|
||||
ResourceNamespace: "kube-system",
|
||||
ResourceName: "kube-scheduler",
|
||||
},
|
||||
LockObjectNamespace: "kube-system",
|
||||
LockObjectName: "kube-scheduler",
|
||||
},
|
||||
ClientConnection: componentbaseconfig.ClientConnectionConfiguration{
|
||||
Kubeconfig: configKubeconfig,
|
||||
|
Reference in New Issue
Block a user