mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-28 05:57:25 +00:00
generalize lease controller
This commit is contained in:
parent
c70e3649e1
commit
c9bbd8532f
@ -55,6 +55,7 @@ import (
|
|||||||
"k8s.io/client-go/util/certificate"
|
"k8s.io/client-go/util/certificate"
|
||||||
"k8s.io/client-go/util/flowcontrol"
|
"k8s.io/client-go/util/flowcontrol"
|
||||||
cloudprovider "k8s.io/cloud-provider"
|
cloudprovider "k8s.io/cloud-provider"
|
||||||
|
"k8s.io/component-helpers/lease"
|
||||||
internalapi "k8s.io/cri-api/pkg/apis"
|
internalapi "k8s.io/cri-api/pkg/apis"
|
||||||
"k8s.io/klog/v2"
|
"k8s.io/klog/v2"
|
||||||
pluginwatcherapi "k8s.io/kubelet/pkg/apis/pluginregistration/v1"
|
pluginwatcherapi "k8s.io/kubelet/pkg/apis/pluginregistration/v1"
|
||||||
@ -83,7 +84,6 @@ import (
|
|||||||
"k8s.io/kubernetes/pkg/kubelet/metrics"
|
"k8s.io/kubernetes/pkg/kubelet/metrics"
|
||||||
"k8s.io/kubernetes/pkg/kubelet/metrics/collectors"
|
"k8s.io/kubernetes/pkg/kubelet/metrics/collectors"
|
||||||
"k8s.io/kubernetes/pkg/kubelet/network/dns"
|
"k8s.io/kubernetes/pkg/kubelet/network/dns"
|
||||||
"k8s.io/kubernetes/pkg/kubelet/nodelease"
|
|
||||||
oomwatcher "k8s.io/kubernetes/pkg/kubelet/oom"
|
oomwatcher "k8s.io/kubernetes/pkg/kubelet/oom"
|
||||||
"k8s.io/kubernetes/pkg/kubelet/pleg"
|
"k8s.io/kubernetes/pkg/kubelet/pleg"
|
||||||
"k8s.io/kubernetes/pkg/kubelet/pluginmanager"
|
"k8s.io/kubernetes/pkg/kubelet/pluginmanager"
|
||||||
@ -168,6 +168,9 @@ const (
|
|||||||
|
|
||||||
// Minimum number of dead containers to keep in a pod
|
// Minimum number of dead containers to keep in a pod
|
||||||
minDeadContainerInPod = 1
|
minDeadContainerInPod = 1
|
||||||
|
|
||||||
|
// nodeLeaseRenewIntervalFraction is the fraction of lease duration to renew the lease
|
||||||
|
nodeLeaseRenewIntervalFraction = 0.25
|
||||||
)
|
)
|
||||||
|
|
||||||
// SyncHandler is an interface implemented by Kubelet, for testability
|
// SyncHandler is an interface implemented by Kubelet, for testability
|
||||||
@ -780,7 +783,17 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration,
|
|||||||
klet.softAdmitHandlers.AddPodAdmitHandler(lifecycle.NewNoNewPrivsAdmitHandler(klet.containerRuntime))
|
klet.softAdmitHandlers.AddPodAdmitHandler(lifecycle.NewNoNewPrivsAdmitHandler(klet.containerRuntime))
|
||||||
klet.softAdmitHandlers.AddPodAdmitHandler(lifecycle.NewProcMountAdmitHandler(klet.containerRuntime))
|
klet.softAdmitHandlers.AddPodAdmitHandler(lifecycle.NewProcMountAdmitHandler(klet.containerRuntime))
|
||||||
|
|
||||||
klet.nodeLeaseController = nodelease.NewController(klet.clock, klet.heartbeatClient, string(klet.nodeName), kubeCfg.NodeLeaseDurationSeconds, klet.onRepeatedHeartbeatFailure)
|
leaseDuration := time.Duration(kubeCfg.NodeLeaseDurationSeconds) * time.Second
|
||||||
|
renewInterval := time.Duration(float64(leaseDuration) * nodeLeaseRenewIntervalFraction)
|
||||||
|
klet.nodeLeaseController = lease.NewController(
|
||||||
|
klet.clock,
|
||||||
|
klet.heartbeatClient,
|
||||||
|
string(klet.nodeName),
|
||||||
|
kubeCfg.NodeLeaseDurationSeconds,
|
||||||
|
klet.onRepeatedHeartbeatFailure,
|
||||||
|
renewInterval,
|
||||||
|
v1.NamespaceNodeLease,
|
||||||
|
util.SetNodeOwnerFunc(klet.heartbeatClient, string(klet.nodeName)))
|
||||||
|
|
||||||
// Finally, put the most recent version of the config on the Kubelet, so
|
// Finally, put the most recent version of the config on the Kubelet, so
|
||||||
// people can see how it was configured.
|
// people can see how it was configured.
|
||||||
@ -986,7 +999,7 @@ type Kubelet struct {
|
|||||||
updateRuntimeMux sync.Mutex
|
updateRuntimeMux sync.Mutex
|
||||||
|
|
||||||
// nodeLeaseController claims and renews the node lease for this Kubelet
|
// nodeLeaseController claims and renews the node lease for this Kubelet
|
||||||
nodeLeaseController nodelease.Controller
|
nodeLeaseController lease.Controller
|
||||||
|
|
||||||
// Generates pod events.
|
// Generates pod events.
|
||||||
pleg pleg.PodLifecycleEventGenerator
|
pleg pleg.PodLifecycleEventGenerator
|
||||||
|
55
pkg/kubelet/util/nodelease.go
Normal file
55
pkg/kubelet/util/nodelease.go
Normal file
@ -0,0 +1,55 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2020 The Kubernetes Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package util
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
|
||||||
|
coordinationv1 "k8s.io/api/coordination/v1"
|
||||||
|
corev1 "k8s.io/api/core/v1"
|
||||||
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
|
|
||||||
|
"k8s.io/klog/v2"
|
||||||
|
)
|
||||||
|
|
||||||
|
// SetNodeOwnerFunc helps construct a newLeasePostProcessFunc which sets
|
||||||
|
// a node OwnerReference to the given lease object
|
||||||
|
func SetNodeOwnerFunc(c clientset.Interface, nodeName string) func(lease *coordinationv1.Lease) error {
|
||||||
|
return func(lease *coordinationv1.Lease) error {
|
||||||
|
// Setting owner reference needs node's UID. Note that it is different from
|
||||||
|
// kubelet.nodeRef.UID. When lease is initially created, it is possible that
|
||||||
|
// the connection between master and node is not ready yet. So try to set
|
||||||
|
// owner reference every time when renewing the lease, until successful.
|
||||||
|
if len(lease.OwnerReferences) == 0 {
|
||||||
|
if node, err := c.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{}); err == nil {
|
||||||
|
lease.OwnerReferences = []metav1.OwnerReference{
|
||||||
|
{
|
||||||
|
APIVersion: corev1.SchemeGroupVersion.WithKind("Node").Version,
|
||||||
|
Kind: corev1.SchemeGroupVersion.WithKind("Node").Kind,
|
||||||
|
Name: nodeName,
|
||||||
|
UID: node.UID,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
klog.Errorf("failed to get node %q when trying to set owner ref to the node lease: %v", nodeName, err)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
|
|||||||
limitations under the License.
|
limitations under the License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
package nodelease
|
package lease
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
@ -22,7 +22,6 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
coordinationv1 "k8s.io/api/coordination/v1"
|
coordinationv1 "k8s.io/api/coordination/v1"
|
||||||
corev1 "k8s.io/api/core/v1"
|
|
||||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/util/clock"
|
"k8s.io/apimachinery/pkg/util/clock"
|
||||||
@ -35,9 +34,7 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
// renewIntervalFraction is the fraction of lease duration to renew the lease
|
// maxUpdateRetries is the number of immediate, successive retries the controller will attempt
|
||||||
renewIntervalFraction = 0.25
|
|
||||||
// maxUpdateRetries is the number of immediate, successive retries the Kubelet will attempt
|
|
||||||
// when renewing the lease before it waits for the renewal interval before trying again,
|
// when renewing the lease before it waits for the renewal interval before trying again,
|
||||||
// similar to what we do for node status retries
|
// similar to what we do for node status retries
|
||||||
maxUpdateRetries = 5
|
maxUpdateRetries = 5
|
||||||
@ -45,46 +42,57 @@ const (
|
|||||||
maxBackoff = 7 * time.Second
|
maxBackoff = 7 * time.Second
|
||||||
)
|
)
|
||||||
|
|
||||||
// Controller manages creating and renewing the lease for this Kubelet
|
// Controller manages creating and renewing the lease for this component (kube-apiserver, kubelet, etc.)
|
||||||
type Controller interface {
|
type Controller interface {
|
||||||
Run(stopCh <-chan struct{})
|
Run(stopCh <-chan struct{})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ProcessLeaseFunc processes the given lease in-place
|
||||||
|
type ProcessLeaseFunc func(*coordinationv1.Lease) error
|
||||||
|
|
||||||
type controller struct {
|
type controller struct {
|
||||||
client clientset.Interface
|
client clientset.Interface
|
||||||
leaseClient coordclientset.LeaseInterface
|
leaseClient coordclientset.LeaseInterface
|
||||||
holderIdentity string
|
holderIdentity string
|
||||||
|
leaseNamespace string
|
||||||
leaseDurationSeconds int32
|
leaseDurationSeconds int32
|
||||||
renewInterval time.Duration
|
renewInterval time.Duration
|
||||||
clock clock.Clock
|
clock clock.Clock
|
||||||
onRepeatedHeartbeatFailure func()
|
onRepeatedHeartbeatFailure func()
|
||||||
|
|
||||||
// latestLease is the latest node lease which Kubelet updated or created
|
// latestLease is the latest lease which the controller updated or created
|
||||||
latestLease *coordinationv1.Lease
|
latestLease *coordinationv1.Lease
|
||||||
|
|
||||||
|
// newLeasePostProcessFunc allows customizing a lease object (e.g. setting OwnerReference)
|
||||||
|
// before every time the lease is created/refreshed(updated). Note that an error will block
|
||||||
|
// a lease CREATE, causing the controller to retry next time, but an error won't block a
|
||||||
|
// lease UPDATE.
|
||||||
|
newLeasePostProcessFunc ProcessLeaseFunc
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewController constructs and returns a controller
|
// NewController constructs and returns a controller
|
||||||
func NewController(clock clock.Clock, client clientset.Interface, holderIdentity string, leaseDurationSeconds int32, onRepeatedHeartbeatFailure func()) Controller {
|
func NewController(clock clock.Clock, client clientset.Interface, holderIdentity string, leaseDurationSeconds int32, onRepeatedHeartbeatFailure func(), renewInterval time.Duration, leaseNamespace string, newLeasePostProcessFunc ProcessLeaseFunc) Controller {
|
||||||
var leaseClient coordclientset.LeaseInterface
|
var leaseClient coordclientset.LeaseInterface
|
||||||
if client != nil {
|
if client != nil {
|
||||||
leaseClient = client.CoordinationV1().Leases(corev1.NamespaceNodeLease)
|
leaseClient = client.CoordinationV1().Leases(leaseNamespace)
|
||||||
}
|
}
|
||||||
leaseDuration := time.Duration(leaseDurationSeconds) * time.Second
|
|
||||||
return &controller{
|
return &controller{
|
||||||
client: client,
|
client: client,
|
||||||
leaseClient: leaseClient,
|
leaseClient: leaseClient,
|
||||||
holderIdentity: holderIdentity,
|
holderIdentity: holderIdentity,
|
||||||
|
leaseNamespace: leaseNamespace,
|
||||||
leaseDurationSeconds: leaseDurationSeconds,
|
leaseDurationSeconds: leaseDurationSeconds,
|
||||||
renewInterval: time.Duration(float64(leaseDuration) * renewIntervalFraction),
|
renewInterval: renewInterval,
|
||||||
clock: clock,
|
clock: clock,
|
||||||
onRepeatedHeartbeatFailure: onRepeatedHeartbeatFailure,
|
onRepeatedHeartbeatFailure: onRepeatedHeartbeatFailure,
|
||||||
|
newLeasePostProcessFunc: newLeasePostProcessFunc,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Run runs the controller
|
// Run runs the controller
|
||||||
func (c *controller) Run(stopCh <-chan struct{}) {
|
func (c *controller) Run(stopCh <-chan struct{}) {
|
||||||
if c.leaseClient == nil {
|
if c.leaseClient == nil {
|
||||||
klog.Infof("node lease controller has nil lease client, will not claim or renew leases")
|
klog.Infof("lease controller has nil lease client, will not claim or renew leases")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
wait.Until(c.sync, c.renewInterval, stopCh)
|
wait.Until(c.sync, c.renewInterval, stopCh)
|
||||||
@ -92,14 +100,14 @@ func (c *controller) Run(stopCh <-chan struct{}) {
|
|||||||
|
|
||||||
func (c *controller) sync() {
|
func (c *controller) sync() {
|
||||||
if c.latestLease != nil {
|
if c.latestLease != nil {
|
||||||
// As long as node lease is not (or very rarely) updated by any other agent than Kubelet,
|
// As long as the lease is not (or very rarely) updated by any other agent than the component itself,
|
||||||
// we can optimistically assume it didn't change since our last update and try updating
|
// we can optimistically assume it didn't change since our last update and try updating
|
||||||
// based on the version from that time. Thanks to it we avoid GET call and reduce load
|
// based on the version from that time. Thanks to it we avoid GET call and reduce load
|
||||||
// on etcd and kube-apiserver.
|
// on etcd and kube-apiserver.
|
||||||
// If at some point other agents will also be frequently updating the Lease object, this
|
// If at some point other agents will also be frequently updating the Lease object, this
|
||||||
// can result in performance degradation, because we will end up with calling additional
|
// can result in performance degradation, because we will end up with calling additional
|
||||||
// GET/PUT - at this point this whole "if" should be removed.
|
// GET/PUT - at this point this whole "if" should be removed.
|
||||||
err := c.retryUpdateLease(c.newLease(c.latestLease))
|
err := c.retryUpdateLease(c.latestLease)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@ -133,7 +141,7 @@ func (c *controller) backoffEnsureLease() (*coordinationv1.Lease, bool) {
|
|||||||
break
|
break
|
||||||
}
|
}
|
||||||
sleep = minDuration(2*sleep, maxBackoff)
|
sleep = minDuration(2*sleep, maxBackoff)
|
||||||
klog.Errorf("failed to ensure node lease exists, will retry in %v, error: %v", sleep, err)
|
klog.Errorf("failed to ensure lease exists, will retry in %v, error: %v", sleep, err)
|
||||||
// backoff wait
|
// backoff wait
|
||||||
c.clock.Sleep(sleep)
|
c.clock.Sleep(sleep)
|
||||||
}
|
}
|
||||||
@ -146,11 +154,11 @@ func (c *controller) ensureLease() (*coordinationv1.Lease, bool, error) {
|
|||||||
lease, err := c.leaseClient.Get(context.TODO(), c.holderIdentity, metav1.GetOptions{})
|
lease, err := c.leaseClient.Get(context.TODO(), c.holderIdentity, metav1.GetOptions{})
|
||||||
if apierrors.IsNotFound(err) {
|
if apierrors.IsNotFound(err) {
|
||||||
// lease does not exist, create it.
|
// lease does not exist, create it.
|
||||||
leaseToCreate := c.newLease(nil)
|
leaseToCreate, err := c.newLease(nil)
|
||||||
if len(leaseToCreate.OwnerReferences) == 0 {
|
// An error occurred during allocating the new lease (likely from newLeasePostProcessFunc).
|
||||||
// We want to ensure that a lease will always have OwnerReferences set.
|
// Given that we weren't able to set the lease correctly, we simply
|
||||||
// Thus, given that we weren't able to set it correctly, we simply
|
// not create it this time - we will retry in the next iteration.
|
||||||
// not create it this time - we will retry in the next iteration.
|
if err != nil {
|
||||||
return nil, false, nil
|
return nil, false, nil
|
||||||
}
|
}
|
||||||
lease, err := c.leaseClient.Create(context.TODO(), leaseToCreate, metav1.CreateOptions{})
|
lease, err := c.leaseClient.Create(context.TODO(), leaseToCreate, metav1.CreateOptions{})
|
||||||
@ -170,12 +178,13 @@ func (c *controller) ensureLease() (*coordinationv1.Lease, bool, error) {
|
|||||||
// call this once you're sure the lease has been created
|
// call this once you're sure the lease has been created
|
||||||
func (c *controller) retryUpdateLease(base *coordinationv1.Lease) error {
|
func (c *controller) retryUpdateLease(base *coordinationv1.Lease) error {
|
||||||
for i := 0; i < maxUpdateRetries; i++ {
|
for i := 0; i < maxUpdateRetries; i++ {
|
||||||
lease, err := c.leaseClient.Update(context.TODO(), c.newLease(base), metav1.UpdateOptions{})
|
leaseToUpdate, _ := c.newLease(base)
|
||||||
|
lease, err := c.leaseClient.Update(context.TODO(), leaseToUpdate, metav1.UpdateOptions{})
|
||||||
if err == nil {
|
if err == nil {
|
||||||
c.latestLease = lease
|
c.latestLease = lease
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
klog.Errorf("failed to update node lease, error: %v", err)
|
klog.Errorf("failed to update lease, error: %v", err)
|
||||||
// OptimisticLockError requires getting the newer version of lease to proceed.
|
// OptimisticLockError requires getting the newer version of lease to proceed.
|
||||||
if apierrors.IsConflict(err) {
|
if apierrors.IsConflict(err) {
|
||||||
base, _ = c.backoffEnsureLease()
|
base, _ = c.backoffEnsureLease()
|
||||||
@ -185,20 +194,22 @@ func (c *controller) retryUpdateLease(base *coordinationv1.Lease) error {
|
|||||||
c.onRepeatedHeartbeatFailure()
|
c.onRepeatedHeartbeatFailure()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return fmt.Errorf("failed %d attempts to update node lease", maxUpdateRetries)
|
return fmt.Errorf("failed %d attempts to update lease", maxUpdateRetries)
|
||||||
}
|
}
|
||||||
|
|
||||||
// newLease constructs a new lease if base is nil, or returns a copy of base
|
// newLease constructs a new lease if base is nil, or returns a copy of base
|
||||||
// with desired state asserted on the copy.
|
// with desired state asserted on the copy.
|
||||||
func (c *controller) newLease(base *coordinationv1.Lease) *coordinationv1.Lease {
|
// Note that an error will block lease CREATE, causing the CREATE to be retried in
|
||||||
|
// the next iteration; but the error won't block lease refresh (UPDATE).
|
||||||
|
func (c *controller) newLease(base *coordinationv1.Lease) (*coordinationv1.Lease, error) {
|
||||||
// Use the bare minimum set of fields; other fields exist for debugging/legacy,
|
// Use the bare minimum set of fields; other fields exist for debugging/legacy,
|
||||||
// but we don't need to make node heartbeats more complicated by using them.
|
// but we don't need to make component heartbeats more complicated by using them.
|
||||||
var lease *coordinationv1.Lease
|
var lease *coordinationv1.Lease
|
||||||
if base == nil {
|
if base == nil {
|
||||||
lease = &coordinationv1.Lease{
|
lease = &coordinationv1.Lease{
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
Name: c.holderIdentity,
|
Name: c.holderIdentity,
|
||||||
Namespace: corev1.NamespaceNodeLease,
|
Namespace: c.leaseNamespace,
|
||||||
},
|
},
|
||||||
Spec: coordinationv1.LeaseSpec{
|
Spec: coordinationv1.LeaseSpec{
|
||||||
HolderIdentity: pointer.StringPtr(c.holderIdentity),
|
HolderIdentity: pointer.StringPtr(c.holderIdentity),
|
||||||
@ -210,26 +221,12 @@ func (c *controller) newLease(base *coordinationv1.Lease) *coordinationv1.Lease
|
|||||||
}
|
}
|
||||||
lease.Spec.RenewTime = &metav1.MicroTime{Time: c.clock.Now()}
|
lease.Spec.RenewTime = &metav1.MicroTime{Time: c.clock.Now()}
|
||||||
|
|
||||||
// Setting owner reference needs node's UID. Note that it is different from
|
if c.newLeasePostProcessFunc != nil {
|
||||||
// kubelet.nodeRef.UID. When lease is initially created, it is possible that
|
err := c.newLeasePostProcessFunc(lease)
|
||||||
// the connection between master and node is not ready yet. So try to set
|
return lease, err
|
||||||
// owner reference every time when renewing the lease, until successful.
|
|
||||||
if len(lease.OwnerReferences) == 0 {
|
|
||||||
if node, err := c.client.CoreV1().Nodes().Get(context.TODO(), c.holderIdentity, metav1.GetOptions{}); err == nil {
|
|
||||||
lease.OwnerReferences = []metav1.OwnerReference{
|
|
||||||
{
|
|
||||||
APIVersion: corev1.SchemeGroupVersion.WithKind("Node").Version,
|
|
||||||
Kind: corev1.SchemeGroupVersion.WithKind("Node").Kind,
|
|
||||||
Name: c.holderIdentity,
|
|
||||||
UID: node.UID,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
klog.Errorf("failed to get node %q when trying to set owner ref to the node lease: %v", c.holderIdentity, err)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return lease
|
return lease, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func minDuration(a, b time.Duration) time.Duration {
|
func minDuration(a, b time.Duration) time.Duration {
|
||||||
|
@ -14,9 +14,10 @@ See the License for the specific language governing permissions and
|
|||||||
limitations under the License.
|
limitations under the License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
package nodelease
|
package lease
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"testing"
|
"testing"
|
||||||
@ -32,12 +33,15 @@ import (
|
|||||||
"k8s.io/apimachinery/pkg/types"
|
"k8s.io/apimachinery/pkg/types"
|
||||||
"k8s.io/apimachinery/pkg/util/clock"
|
"k8s.io/apimachinery/pkg/util/clock"
|
||||||
"k8s.io/apimachinery/pkg/util/diff"
|
"k8s.io/apimachinery/pkg/util/diff"
|
||||||
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
"k8s.io/client-go/kubernetes/fake"
|
"k8s.io/client-go/kubernetes/fake"
|
||||||
clienttesting "k8s.io/client-go/testing"
|
clienttesting "k8s.io/client-go/testing"
|
||||||
"k8s.io/utils/pointer"
|
"k8s.io/utils/pointer"
|
||||||
|
|
||||||
|
"k8s.io/klog/v2"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestNewLease(t *testing.T) {
|
func TestNewNodeLease(t *testing.T) {
|
||||||
fakeClock := clock.NewFakeClock(time.Now())
|
fakeClock := clock.NewFakeClock(time.Now())
|
||||||
node := &corev1.Node{
|
node := &corev1.Node{
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
@ -191,7 +195,9 @@ func TestNewLease(t *testing.T) {
|
|||||||
|
|
||||||
for _, tc := range cases {
|
for _, tc := range cases {
|
||||||
t.Run(tc.desc, func(t *testing.T) {
|
t.Run(tc.desc, func(t *testing.T) {
|
||||||
newLease := tc.controller.newLease(tc.base)
|
tc.controller.newLeasePostProcessFunc = setNodeOwnerFunc(tc.controller.client, node.Name)
|
||||||
|
tc.controller.leaseNamespace = corev1.NamespaceNodeLease
|
||||||
|
newLease, _ := tc.controller.newLease(tc.base)
|
||||||
if newLease == tc.base {
|
if newLease == tc.base {
|
||||||
t.Fatalf("the new lease must be newly allocated, but got same address as base")
|
t.Fatalf("the new lease must be newly allocated, but got same address as base")
|
||||||
}
|
}
|
||||||
@ -202,7 +208,7 @@ func TestNewLease(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestRetryUpdateLease(t *testing.T) {
|
func TestRetryUpdateNodeLease(t *testing.T) {
|
||||||
node := &corev1.Node{
|
node := &corev1.Node{
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
Name: "foo",
|
Name: "foo",
|
||||||
@ -274,8 +280,10 @@ func TestRetryUpdateLease(t *testing.T) {
|
|||||||
client: cl,
|
client: cl,
|
||||||
leaseClient: cl.CoordinationV1().Leases(corev1.NamespaceNodeLease),
|
leaseClient: cl.CoordinationV1().Leases(corev1.NamespaceNodeLease),
|
||||||
holderIdentity: node.Name,
|
holderIdentity: node.Name,
|
||||||
|
leaseNamespace: corev1.NamespaceNodeLease,
|
||||||
leaseDurationSeconds: 10,
|
leaseDurationSeconds: 10,
|
||||||
onRepeatedHeartbeatFailure: tc.onRepeatedHeartbeatFailure,
|
onRepeatedHeartbeatFailure: tc.onRepeatedHeartbeatFailure,
|
||||||
|
newLeasePostProcessFunc: setNodeOwnerFunc(cl, node.Name),
|
||||||
}
|
}
|
||||||
if err := c.retryUpdateLease(nil); tc.expectErr != (err != nil) {
|
if err := c.retryUpdateLease(nil); tc.expectErr != (err != nil) {
|
||||||
t.Fatalf("got %v, expected %v", err != nil, tc.expectErr)
|
t.Fatalf("got %v, expected %v", err != nil, tc.expectErr)
|
||||||
@ -406,12 +414,14 @@ func TestUpdateUsingLatestLease(t *testing.T) {
|
|||||||
cl.PrependReactor("create", "leases", tc.createReactor)
|
cl.PrependReactor("create", "leases", tc.createReactor)
|
||||||
}
|
}
|
||||||
c := &controller{
|
c := &controller{
|
||||||
clock: clock.NewFakeClock(time.Now()),
|
clock: clock.NewFakeClock(time.Now()),
|
||||||
client: cl,
|
client: cl,
|
||||||
leaseClient: cl.CoordinationV1().Leases(corev1.NamespaceNodeLease),
|
leaseClient: cl.CoordinationV1().Leases(corev1.NamespaceNodeLease),
|
||||||
holderIdentity: node.Name,
|
holderIdentity: node.Name,
|
||||||
leaseDurationSeconds: 10,
|
leaseNamespace: corev1.NamespaceNodeLease,
|
||||||
latestLease: tc.latestLease,
|
leaseDurationSeconds: 10,
|
||||||
|
latestLease: tc.latestLease,
|
||||||
|
newLeasePostProcessFunc: setNodeOwnerFunc(cl, node.Name),
|
||||||
}
|
}
|
||||||
|
|
||||||
c.sync()
|
c.sync()
|
||||||
@ -428,3 +438,30 @@ func TestUpdateUsingLatestLease(t *testing.T) {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// setNodeOwnerFunc helps construct a newLeasePostProcessFunc which sets
|
||||||
|
// a node OwnerReference to the given lease object
|
||||||
|
func setNodeOwnerFunc(c clientset.Interface, nodeName string) func(lease *coordinationv1.Lease) error {
|
||||||
|
return func(lease *coordinationv1.Lease) error {
|
||||||
|
// Setting owner reference needs node's UID. Note that it is different from
|
||||||
|
// kubelet.nodeRef.UID. When lease is initially created, it is possible that
|
||||||
|
// the connection between master and node is not ready yet. So try to set
|
||||||
|
// owner reference every time when renewing the lease, until successful.
|
||||||
|
if len(lease.OwnerReferences) == 0 {
|
||||||
|
if node, err := c.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{}); err == nil {
|
||||||
|
lease.OwnerReferences = []metav1.OwnerReference{
|
||||||
|
{
|
||||||
|
APIVersion: corev1.SchemeGroupVersion.WithKind("Node").Version,
|
||||||
|
Kind: corev1.SchemeGroupVersion.WithKind("Node").Kind,
|
||||||
|
Name: nodeName,
|
||||||
|
UID: node.UID,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
klog.Errorf("failed to get node %q when trying to set owner ref to the node lease: %v", nodeName, err)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user