mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-08-03 17:30:00 +00:00
Merge pull request #84084 from wojtek-t/migrate_scheduler_to_endpoints_lease_lock
Migrate components to EndpointsLeases leader election lock
This commit is contained in:
commit
badcd4af3f
@ -49,7 +49,7 @@ func TestDefaultFlags(t *testing.T) {
|
|||||||
},
|
},
|
||||||
ControllerStartInterval: metav1.Duration{Duration: 0},
|
ControllerStartInterval: metav1.Duration{Duration: 0},
|
||||||
LeaderElection: componentbaseconfig.LeaderElectionConfiguration{
|
LeaderElection: componentbaseconfig.LeaderElectionConfiguration{
|
||||||
ResourceLock: "endpoints",
|
ResourceLock: "endpointsleases",
|
||||||
LeaderElect: true,
|
LeaderElect: true,
|
||||||
LeaseDuration: metav1.Duration{Duration: 15 * time.Second},
|
LeaseDuration: metav1.Duration{Duration: 15 * time.Second},
|
||||||
RenewDeadline: metav1.Duration{Duration: 10 * time.Second},
|
RenewDeadline: metav1.Duration{Duration: 10 * time.Second},
|
||||||
|
@ -266,7 +266,7 @@ pluginConfig:
|
|||||||
LeaseDuration: metav1.Duration{Duration: 15 * time.Second},
|
LeaseDuration: metav1.Duration{Duration: 15 * time.Second},
|
||||||
RenewDeadline: metav1.Duration{Duration: 10 * time.Second},
|
RenewDeadline: metav1.Duration{Duration: 10 * time.Second},
|
||||||
RetryPeriod: metav1.Duration{Duration: 2 * time.Second},
|
RetryPeriod: metav1.Duration{Duration: 2 * time.Second},
|
||||||
ResourceLock: "endpoints",
|
ResourceLock: "endpointsleases",
|
||||||
ResourceNamespace: "kube-system",
|
ResourceNamespace: "kube-system",
|
||||||
ResourceName: "kube-scheduler",
|
ResourceName: "kube-scheduler",
|
||||||
},
|
},
|
||||||
@ -348,7 +348,7 @@ pluginConfig:
|
|||||||
LeaseDuration: metav1.Duration{Duration: 15 * time.Second},
|
LeaseDuration: metav1.Duration{Duration: 15 * time.Second},
|
||||||
RenewDeadline: metav1.Duration{Duration: 10 * time.Second},
|
RenewDeadline: metav1.Duration{Duration: 10 * time.Second},
|
||||||
RetryPeriod: metav1.Duration{Duration: 2 * time.Second},
|
RetryPeriod: metav1.Duration{Duration: 2 * time.Second},
|
||||||
ResourceLock: "endpoints",
|
ResourceLock: "endpointsleases",
|
||||||
ResourceNamespace: "kube-system",
|
ResourceNamespace: "kube-system",
|
||||||
ResourceName: "kube-scheduler",
|
ResourceName: "kube-scheduler",
|
||||||
},
|
},
|
||||||
@ -411,7 +411,7 @@ pluginConfig:
|
|||||||
LeaseDuration: metav1.Duration{Duration: 15 * time.Second},
|
LeaseDuration: metav1.Duration{Duration: 15 * time.Second},
|
||||||
RenewDeadline: metav1.Duration{Duration: 10 * time.Second},
|
RenewDeadline: metav1.Duration{Duration: 10 * time.Second},
|
||||||
RetryPeriod: metav1.Duration{Duration: 2 * time.Second},
|
RetryPeriod: metav1.Duration{Duration: 2 * time.Second},
|
||||||
ResourceLock: "endpoints",
|
ResourceLock: "endpointsleases",
|
||||||
ResourceNamespace: "kube-system",
|
ResourceNamespace: "kube-system",
|
||||||
ResourceName: "kube-scheduler",
|
ResourceName: "kube-scheduler",
|
||||||
},
|
},
|
||||||
|
@ -126,6 +126,10 @@ func RecommendedDefaultGenericControllerManagerConfiguration(obj *kubectrlmgrcon
|
|||||||
obj.Controllers = []string{"*"}
|
obj.Controllers = []string{"*"}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if len(obj.LeaderElection.ResourceLock) == 0 {
|
||||||
|
obj.LeaderElection.ResourceLock = "endpointsleases"
|
||||||
|
}
|
||||||
|
|
||||||
// Use the default ClientConnectionConfiguration and LeaderElectionConfiguration options
|
// Use the default ClientConnectionConfiguration and LeaderElectionConfiguration options
|
||||||
componentbaseconfigv1alpha1.RecommendedDefaultClientConnectionConfiguration(&obj.ClientConnection)
|
componentbaseconfigv1alpha1.RecommendedDefaultClientConnectionConfiguration(&obj.ClientConnection)
|
||||||
componentbaseconfigv1alpha1.RecommendedDefaultLeaderElectionConfiguration(&obj.LeaderElection)
|
componentbaseconfigv1alpha1.RecommendedDefaultLeaderElectionConfiguration(&obj.LeaderElection)
|
||||||
|
@ -73,6 +73,9 @@ func SetDefaults_KubeSchedulerConfiguration(obj *kubeschedulerconfigv1alpha1.Kub
|
|||||||
obj.MetricsBindAddress = net.JoinHostPort("0.0.0.0", strconv.Itoa(ports.InsecureSchedulerPort))
|
obj.MetricsBindAddress = net.JoinHostPort("0.0.0.0", strconv.Itoa(ports.InsecureSchedulerPort))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if len(obj.LeaderElection.ResourceLock) == 0 {
|
||||||
|
obj.LeaderElection.ResourceLock = "endpointsleases"
|
||||||
|
}
|
||||||
if len(obj.LeaderElection.LockObjectNamespace) == 0 && len(obj.LeaderElection.ResourceNamespace) == 0 {
|
if len(obj.LeaderElection.LockObjectNamespace) == 0 && len(obj.LeaderElection.ResourceNamespace) == 0 {
|
||||||
obj.LeaderElection.LockObjectNamespace = kubeschedulerconfigv1alpha1.SchedulerDefaultLockObjectNamespace
|
obj.LeaderElection.LockObjectNamespace = kubeschedulerconfigv1alpha1.SchedulerDefaultLockObjectNamespace
|
||||||
}
|
}
|
||||||
|
@ -45,6 +45,7 @@ const (
|
|||||||
autoscalingGroup = "autoscaling"
|
autoscalingGroup = "autoscaling"
|
||||||
batchGroup = "batch"
|
batchGroup = "batch"
|
||||||
certificatesGroup = "certificates.k8s.io"
|
certificatesGroup = "certificates.k8s.io"
|
||||||
|
coordinationGroup = "coordination.k8s.io"
|
||||||
discoveryGroup = "discovery.k8s.io"
|
discoveryGroup = "discovery.k8s.io"
|
||||||
extensionsGroup = "extensions"
|
extensionsGroup = "extensions"
|
||||||
policyGroup = "policy"
|
policyGroup = "policy"
|
||||||
@ -172,7 +173,7 @@ func NodeRules() []rbacv1.PolicyRule {
|
|||||||
|
|
||||||
// Node leases
|
// Node leases
|
||||||
if utilfeature.DefaultFeatureGate.Enabled(features.NodeLease) {
|
if utilfeature.DefaultFeatureGate.Enabled(features.NodeLease) {
|
||||||
nodePolicyRules = append(nodePolicyRules, rbacv1helpers.NewRule("get", "create", "update", "patch", "delete").Groups("coordination.k8s.io").Resources("leases").RuleOrDie())
|
nodePolicyRules = append(nodePolicyRules, rbacv1helpers.NewRule("get", "create", "update", "patch", "delete").Groups(coordinationGroup).Resources("leases").RuleOrDie())
|
||||||
}
|
}
|
||||||
|
|
||||||
// RuntimeClass
|
// RuntimeClass
|
||||||
@ -394,10 +395,17 @@ func ClusterRoles() []rbacv1.ClusterRole {
|
|||||||
ObjectMeta: metav1.ObjectMeta{Name: "system:kube-controller-manager"},
|
ObjectMeta: metav1.ObjectMeta{Name: "system:kube-controller-manager"},
|
||||||
Rules: []rbacv1.PolicyRule{
|
Rules: []rbacv1.PolicyRule{
|
||||||
eventsRule(),
|
eventsRule(),
|
||||||
rbacv1helpers.NewRule("create").Groups(legacyGroup).Resources("endpoints", "secrets", "serviceaccounts").RuleOrDie(),
|
// Needed for leader election.
|
||||||
|
rbacv1helpers.NewRule("create").Groups(coordinationGroup).Resources("leases").RuleOrDie(),
|
||||||
|
rbacv1helpers.NewRule("get", "update").Groups(coordinationGroup).Resources("leases").Names("kube-controller-manager").RuleOrDie(),
|
||||||
|
// TODO: Remove once we fully migrate to lease in leader-election.
|
||||||
|
rbacv1helpers.NewRule("create").Groups(legacyGroup).Resources("endpoints").RuleOrDie(),
|
||||||
|
rbacv1helpers.NewRule("get", "update").Groups(legacyGroup).Resources("endpoints").Names("kube-controller-manager").RuleOrDie(),
|
||||||
|
// Fundamental resources.
|
||||||
|
rbacv1helpers.NewRule("create").Groups(legacyGroup).Resources("secrets", "serviceaccounts").RuleOrDie(),
|
||||||
rbacv1helpers.NewRule("delete").Groups(legacyGroup).Resources("secrets").RuleOrDie(),
|
rbacv1helpers.NewRule("delete").Groups(legacyGroup).Resources("secrets").RuleOrDie(),
|
||||||
rbacv1helpers.NewRule("get").Groups(legacyGroup).Resources("endpoints", "namespaces", "secrets", "serviceaccounts", "configmaps").RuleOrDie(),
|
rbacv1helpers.NewRule("get").Groups(legacyGroup).Resources("namespaces", "secrets", "serviceaccounts", "configmaps").RuleOrDie(),
|
||||||
rbacv1helpers.NewRule("update").Groups(legacyGroup).Resources("endpoints", "secrets", "serviceaccounts").RuleOrDie(),
|
rbacv1helpers.NewRule("update").Groups(legacyGroup).Resources("secrets", "serviceaccounts").RuleOrDie(),
|
||||||
// Needed to check API access. These creates are non-mutating
|
// Needed to check API access. These creates are non-mutating
|
||||||
rbacv1helpers.NewRule("create").Groups(authenticationGroup).Resources("tokenreviews").RuleOrDie(),
|
rbacv1helpers.NewRule("create").Groups(authenticationGroup).Resources("tokenreviews").RuleOrDie(),
|
||||||
rbacv1helpers.NewRule("create").Groups(authorizationGroup).Resources("subjectaccessreviews").RuleOrDie(),
|
rbacv1helpers.NewRule("create").Groups(authorizationGroup).Resources("subjectaccessreviews").RuleOrDie(),
|
||||||
@ -471,8 +479,11 @@ func ClusterRoles() []rbacv1.ClusterRole {
|
|||||||
eventsRule(),
|
eventsRule(),
|
||||||
// This is for leaderlease access
|
// This is for leaderlease access
|
||||||
// TODO: scope this to the kube-system namespace
|
// TODO: scope this to the kube-system namespace
|
||||||
|
rbacv1helpers.NewRule("create").Groups(coordinationGroup).Resources("leases").RuleOrDie(),
|
||||||
|
rbacv1helpers.NewRule("get", "update").Groups(coordinationGroup).Resources("leases").Names("kube-scheduler").RuleOrDie(),
|
||||||
|
// TODO: Remove once we fully migrate to lease in leader-election.
|
||||||
rbacv1helpers.NewRule("create").Groups(legacyGroup).Resources("endpoints").RuleOrDie(),
|
rbacv1helpers.NewRule("create").Groups(legacyGroup).Resources("endpoints").RuleOrDie(),
|
||||||
rbacv1helpers.NewRule("get", "update", "patch", "delete").Groups(legacyGroup).Resources("endpoints").Names("kube-scheduler").RuleOrDie(),
|
rbacv1helpers.NewRule("get", "update").Groups(legacyGroup).Resources("endpoints").Names("kube-scheduler").RuleOrDie(),
|
||||||
|
|
||||||
// Fundamental resources
|
// Fundamental resources
|
||||||
rbacv1helpers.NewRule(Read...).Groups(legacyGroup).Resources("nodes").RuleOrDie(),
|
rbacv1helpers.NewRule(Read...).Groups(legacyGroup).Resources("nodes").RuleOrDie(),
|
||||||
|
@ -510,10 +510,39 @@ items:
|
|||||||
- create
|
- create
|
||||||
- patch
|
- patch
|
||||||
- update
|
- update
|
||||||
|
- apiGroups:
|
||||||
|
- coordination.k8s.io
|
||||||
|
resources:
|
||||||
|
- leases
|
||||||
|
verbs:
|
||||||
|
- create
|
||||||
|
- apiGroups:
|
||||||
|
- coordination.k8s.io
|
||||||
|
resourceNames:
|
||||||
|
- kube-controller-manager
|
||||||
|
resources:
|
||||||
|
- leases
|
||||||
|
verbs:
|
||||||
|
- get
|
||||||
|
- update
|
||||||
- apiGroups:
|
- apiGroups:
|
||||||
- ""
|
- ""
|
||||||
resources:
|
resources:
|
||||||
- endpoints
|
- endpoints
|
||||||
|
verbs:
|
||||||
|
- create
|
||||||
|
- apiGroups:
|
||||||
|
- ""
|
||||||
|
resourceNames:
|
||||||
|
- kube-controller-manager
|
||||||
|
resources:
|
||||||
|
- endpoints
|
||||||
|
verbs:
|
||||||
|
- get
|
||||||
|
- update
|
||||||
|
- apiGroups:
|
||||||
|
- ""
|
||||||
|
resources:
|
||||||
- secrets
|
- secrets
|
||||||
- serviceaccounts
|
- serviceaccounts
|
||||||
verbs:
|
verbs:
|
||||||
@ -528,7 +557,6 @@ items:
|
|||||||
- ""
|
- ""
|
||||||
resources:
|
resources:
|
||||||
- configmaps
|
- configmaps
|
||||||
- endpoints
|
|
||||||
- namespaces
|
- namespaces
|
||||||
- secrets
|
- secrets
|
||||||
- serviceaccounts
|
- serviceaccounts
|
||||||
@ -537,7 +565,6 @@ items:
|
|||||||
- apiGroups:
|
- apiGroups:
|
||||||
- ""
|
- ""
|
||||||
resources:
|
resources:
|
||||||
- endpoints
|
|
||||||
- secrets
|
- secrets
|
||||||
- serviceaccounts
|
- serviceaccounts
|
||||||
verbs:
|
verbs:
|
||||||
@ -604,6 +631,21 @@ items:
|
|||||||
- create
|
- create
|
||||||
- patch
|
- patch
|
||||||
- update
|
- update
|
||||||
|
- apiGroups:
|
||||||
|
- coordination.k8s.io
|
||||||
|
resources:
|
||||||
|
- leases
|
||||||
|
verbs:
|
||||||
|
- create
|
||||||
|
- apiGroups:
|
||||||
|
- coordination.k8s.io
|
||||||
|
resourceNames:
|
||||||
|
- kube-scheduler
|
||||||
|
resources:
|
||||||
|
- leases
|
||||||
|
verbs:
|
||||||
|
- get
|
||||||
|
- update
|
||||||
- apiGroups:
|
- apiGroups:
|
||||||
- ""
|
- ""
|
||||||
resources:
|
resources:
|
||||||
@ -617,9 +659,7 @@ items:
|
|||||||
resources:
|
resources:
|
||||||
- endpoints
|
- endpoints
|
||||||
verbs:
|
verbs:
|
||||||
- delete
|
|
||||||
- get
|
- get
|
||||||
- patch
|
|
||||||
- update
|
- update
|
||||||
- apiGroups:
|
- apiGroups:
|
||||||
- ""
|
- ""
|
||||||
|
@ -44,6 +44,7 @@ func RecommendedDefaultLeaderElectionConfiguration(obj *LeaderElectionConfigurat
|
|||||||
obj.RetryPeriod = metav1.Duration{Duration: 2 * time.Second}
|
obj.RetryPeriod = metav1.Duration{Duration: 2 * time.Second}
|
||||||
}
|
}
|
||||||
if obj.ResourceLock == "" {
|
if obj.ResourceLock == "" {
|
||||||
|
// TODO: Migrate to LeaseLock.
|
||||||
obj.ResourceLock = EndpointsResourceLock
|
obj.ResourceLock = EndpointsResourceLock
|
||||||
}
|
}
|
||||||
if obj.LeaderElect == nil {
|
if obj.LeaderElect == nil {
|
||||||
|
Loading…
Reference in New Issue
Block a user