generalize lease controller

This commit is contained in:
Haowei Cai 2020-10-22 11:30:58 -07:00
parent c70e3649e1
commit c9bbd8532f
4 changed files with 159 additions and 57 deletions

View File

@ -55,6 +55,7 @@ import (
"k8s.io/client-go/util/certificate"
"k8s.io/client-go/util/flowcontrol"
cloudprovider "k8s.io/cloud-provider"
"k8s.io/component-helpers/lease"
internalapi "k8s.io/cri-api/pkg/apis"
"k8s.io/klog/v2"
pluginwatcherapi "k8s.io/kubelet/pkg/apis/pluginregistration/v1"
@ -83,7 +84,6 @@ import (
"k8s.io/kubernetes/pkg/kubelet/metrics"
"k8s.io/kubernetes/pkg/kubelet/metrics/collectors"
"k8s.io/kubernetes/pkg/kubelet/network/dns"
"k8s.io/kubernetes/pkg/kubelet/nodelease"
oomwatcher "k8s.io/kubernetes/pkg/kubelet/oom"
"k8s.io/kubernetes/pkg/kubelet/pleg"
"k8s.io/kubernetes/pkg/kubelet/pluginmanager"
@ -168,6 +168,9 @@ const (
// Minimum number of dead containers to keep in a pod
minDeadContainerInPod = 1
// nodeLeaseRenewIntervalFraction is the fraction of lease duration to renew the lease
nodeLeaseRenewIntervalFraction = 0.25
)
// SyncHandler is an interface implemented by Kubelet, for testability
@ -780,7 +783,17 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration,
klet.softAdmitHandlers.AddPodAdmitHandler(lifecycle.NewNoNewPrivsAdmitHandler(klet.containerRuntime))
klet.softAdmitHandlers.AddPodAdmitHandler(lifecycle.NewProcMountAdmitHandler(klet.containerRuntime))
klet.nodeLeaseController = nodelease.NewController(klet.clock, klet.heartbeatClient, string(klet.nodeName), kubeCfg.NodeLeaseDurationSeconds, klet.onRepeatedHeartbeatFailure)
leaseDuration := time.Duration(kubeCfg.NodeLeaseDurationSeconds) * time.Second
renewInterval := time.Duration(float64(leaseDuration) * nodeLeaseRenewIntervalFraction)
klet.nodeLeaseController = lease.NewController(
klet.clock,
klet.heartbeatClient,
string(klet.nodeName),
kubeCfg.NodeLeaseDurationSeconds,
klet.onRepeatedHeartbeatFailure,
renewInterval,
v1.NamespaceNodeLease,
util.SetNodeOwnerFunc(klet.heartbeatClient, string(klet.nodeName)))
// Finally, put the most recent version of the config on the Kubelet, so
// people can see how it was configured.
@ -986,7 +999,7 @@ type Kubelet struct {
updateRuntimeMux sync.Mutex
// nodeLeaseController claims and renews the node lease for this Kubelet
nodeLeaseController nodelease.Controller
nodeLeaseController lease.Controller
// Generates pod events.
pleg pleg.PodLifecycleEventGenerator

View File

@ -0,0 +1,55 @@
/*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package util
import (
"context"
coordinationv1 "k8s.io/api/coordination/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/klog/v2"
)
// SetNodeOwnerFunc helps construct a newLeasePostProcessFunc which sets
// a node OwnerReference to the given lease object
func SetNodeOwnerFunc(c clientset.Interface, nodeName string) func(lease *coordinationv1.Lease) error {
return func(lease *coordinationv1.Lease) error {
// Setting owner reference needs node's UID. Note that it is different from
// kubelet.nodeRef.UID. When lease is initially created, it is possible that
// the connection between master and node is not ready yet. So try to set
// owner reference every time when renewing the lease, until successful.
if len(lease.OwnerReferences) == 0 {
if node, err := c.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{}); err == nil {
lease.OwnerReferences = []metav1.OwnerReference{
{
APIVersion: corev1.SchemeGroupVersion.WithKind("Node").Version,
Kind: corev1.SchemeGroupVersion.WithKind("Node").Kind,
Name: nodeName,
UID: node.UID,
},
}
} else {
klog.Errorf("failed to get node %q when trying to set owner ref to the node lease: %v", nodeName, err)
return err
}
}
return nil
}
}

View File

@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
package nodelease
package lease
import (
"context"
@ -22,7 +22,6 @@ import (
"time"
coordinationv1 "k8s.io/api/coordination/v1"
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/clock"
@ -35,9 +34,7 @@ import (
)
const (
// renewIntervalFraction is the fraction of lease duration to renew the lease
renewIntervalFraction = 0.25
// maxUpdateRetries is the number of immediate, successive retries the Kubelet will attempt
// maxUpdateRetries is the number of immediate, successive retries the controller will attempt
// when renewing the lease before it waits for the renewal interval before trying again,
// similar to what we do for node status retries
maxUpdateRetries = 5
@ -45,46 +42,57 @@ const (
maxBackoff = 7 * time.Second
)
// Controller manages creating and renewing the lease for this Kubelet
// Controller manages creating and renewing the lease for this component (kube-apiserver, kubelet, etc.)
type Controller interface {
Run(stopCh <-chan struct{})
}
// ProcessLeaseFunc processes the given lease in-place
type ProcessLeaseFunc func(*coordinationv1.Lease) error
type controller struct {
client clientset.Interface
leaseClient coordclientset.LeaseInterface
holderIdentity string
leaseNamespace string
leaseDurationSeconds int32
renewInterval time.Duration
clock clock.Clock
onRepeatedHeartbeatFailure func()
// latestLease is the latest node lease which Kubelet updated or created
// latestLease is the latest lease which the controller updated or created
latestLease *coordinationv1.Lease
// newLeasePostProcessFunc allows customizing a lease object (e.g. setting OwnerReference)
// before every time the lease is created/refreshed(updated). Note that an error will block
// a lease CREATE, causing the controller to retry next time, but an error won't block a
// lease UPDATE.
newLeasePostProcessFunc ProcessLeaseFunc
}
// NewController constructs and returns a controller
func NewController(clock clock.Clock, client clientset.Interface, holderIdentity string, leaseDurationSeconds int32, onRepeatedHeartbeatFailure func()) Controller {
func NewController(clock clock.Clock, client clientset.Interface, holderIdentity string, leaseDurationSeconds int32, onRepeatedHeartbeatFailure func(), renewInterval time.Duration, leaseNamespace string, newLeasePostProcessFunc ProcessLeaseFunc) Controller {
var leaseClient coordclientset.LeaseInterface
if client != nil {
leaseClient = client.CoordinationV1().Leases(corev1.NamespaceNodeLease)
leaseClient = client.CoordinationV1().Leases(leaseNamespace)
}
leaseDuration := time.Duration(leaseDurationSeconds) * time.Second
return &controller{
client: client,
leaseClient: leaseClient,
holderIdentity: holderIdentity,
leaseNamespace: leaseNamespace,
leaseDurationSeconds: leaseDurationSeconds,
renewInterval: time.Duration(float64(leaseDuration) * renewIntervalFraction),
renewInterval: renewInterval,
clock: clock,
onRepeatedHeartbeatFailure: onRepeatedHeartbeatFailure,
newLeasePostProcessFunc: newLeasePostProcessFunc,
}
}
// Run runs the controller
func (c *controller) Run(stopCh <-chan struct{}) {
if c.leaseClient == nil {
klog.Infof("node lease controller has nil lease client, will not claim or renew leases")
klog.Infof("lease controller has nil lease client, will not claim or renew leases")
return
}
wait.Until(c.sync, c.renewInterval, stopCh)
@ -92,14 +100,14 @@ func (c *controller) Run(stopCh <-chan struct{}) {
func (c *controller) sync() {
if c.latestLease != nil {
// As long as node lease is not (or very rarely) updated by any other agent than Kubelet,
// As long as the lease is not (or very rarely) updated by any other agent than the component itself,
// we can optimistically assume it didn't change since our last update and try updating
// based on the version from that time. Thanks to it we avoid GET call and reduce load
// on etcd and kube-apiserver.
// If at some point other agents will also be frequently updating the Lease object, this
// can result in performance degradation, because we will end up with calling additional
// GET/PUT - at this point this whole "if" should be removed.
err := c.retryUpdateLease(c.newLease(c.latestLease))
err := c.retryUpdateLease(c.latestLease)
if err == nil {
return
}
@ -133,7 +141,7 @@ func (c *controller) backoffEnsureLease() (*coordinationv1.Lease, bool) {
break
}
sleep = minDuration(2*sleep, maxBackoff)
klog.Errorf("failed to ensure node lease exists, will retry in %v, error: %v", sleep, err)
klog.Errorf("failed to ensure lease exists, will retry in %v, error: %v", sleep, err)
// backoff wait
c.clock.Sleep(sleep)
}
@ -146,11 +154,11 @@ func (c *controller) ensureLease() (*coordinationv1.Lease, bool, error) {
lease, err := c.leaseClient.Get(context.TODO(), c.holderIdentity, metav1.GetOptions{})
if apierrors.IsNotFound(err) {
// lease does not exist, create it.
leaseToCreate := c.newLease(nil)
if len(leaseToCreate.OwnerReferences) == 0 {
// We want to ensure that a lease will always have OwnerReferences set.
// Thus, given that we weren't able to set it correctly, we simply
// not create it this time - we will retry in the next iteration.
leaseToCreate, err := c.newLease(nil)
// An error occurred during allocating the new lease (likely from newLeasePostProcessFunc).
// Given that we weren't able to set the lease correctly, we simply
// not create it this time - we will retry in the next iteration.
if err != nil {
return nil, false, nil
}
lease, err := c.leaseClient.Create(context.TODO(), leaseToCreate, metav1.CreateOptions{})
@ -170,12 +178,13 @@ func (c *controller) ensureLease() (*coordinationv1.Lease, bool, error) {
// call this once you're sure the lease has been created
func (c *controller) retryUpdateLease(base *coordinationv1.Lease) error {
for i := 0; i < maxUpdateRetries; i++ {
lease, err := c.leaseClient.Update(context.TODO(), c.newLease(base), metav1.UpdateOptions{})
leaseToUpdate, _ := c.newLease(base)
lease, err := c.leaseClient.Update(context.TODO(), leaseToUpdate, metav1.UpdateOptions{})
if err == nil {
c.latestLease = lease
return nil
}
klog.Errorf("failed to update node lease, error: %v", err)
klog.Errorf("failed to update lease, error: %v", err)
// OptimisticLockError requires getting the newer version of lease to proceed.
if apierrors.IsConflict(err) {
base, _ = c.backoffEnsureLease()
@ -185,20 +194,22 @@ func (c *controller) retryUpdateLease(base *coordinationv1.Lease) error {
c.onRepeatedHeartbeatFailure()
}
}
return fmt.Errorf("failed %d attempts to update node lease", maxUpdateRetries)
return fmt.Errorf("failed %d attempts to update lease", maxUpdateRetries)
}
// newLease constructs a new lease if base is nil, or returns a copy of base
// with desired state asserted on the copy.
func (c *controller) newLease(base *coordinationv1.Lease) *coordinationv1.Lease {
// Note that an error will block lease CREATE, causing the CREATE to be retried in
// the next iteration; but the error won't block lease refresh (UPDATE).
func (c *controller) newLease(base *coordinationv1.Lease) (*coordinationv1.Lease, error) {
// Use the bare minimum set of fields; other fields exist for debugging/legacy,
// but we don't need to make node heartbeats more complicated by using them.
// but we don't need to make component heartbeats more complicated by using them.
var lease *coordinationv1.Lease
if base == nil {
lease = &coordinationv1.Lease{
ObjectMeta: metav1.ObjectMeta{
Name: c.holderIdentity,
Namespace: corev1.NamespaceNodeLease,
Namespace: c.leaseNamespace,
},
Spec: coordinationv1.LeaseSpec{
HolderIdentity: pointer.StringPtr(c.holderIdentity),
@ -210,26 +221,12 @@ func (c *controller) newLease(base *coordinationv1.Lease) *coordinationv1.Lease
}
lease.Spec.RenewTime = &metav1.MicroTime{Time: c.clock.Now()}
// Setting owner reference needs node's UID. Note that it is different from
// kubelet.nodeRef.UID. When lease is initially created, it is possible that
// the connection between master and node is not ready yet. So try to set
// owner reference every time when renewing the lease, until successful.
if len(lease.OwnerReferences) == 0 {
if node, err := c.client.CoreV1().Nodes().Get(context.TODO(), c.holderIdentity, metav1.GetOptions{}); err == nil {
lease.OwnerReferences = []metav1.OwnerReference{
{
APIVersion: corev1.SchemeGroupVersion.WithKind("Node").Version,
Kind: corev1.SchemeGroupVersion.WithKind("Node").Kind,
Name: c.holderIdentity,
UID: node.UID,
},
}
} else {
klog.Errorf("failed to get node %q when trying to set owner ref to the node lease: %v", c.holderIdentity, err)
}
if c.newLeasePostProcessFunc != nil {
err := c.newLeasePostProcessFunc(lease)
return lease, err
}
return lease
return lease, nil
}
func minDuration(a, b time.Duration) time.Duration {

View File

@ -14,9 +14,10 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
package nodelease
package lease
import (
"context"
"errors"
"fmt"
"testing"
@ -32,12 +33,15 @@ import (
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/clock"
"k8s.io/apimachinery/pkg/util/diff"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/client-go/kubernetes/fake"
clienttesting "k8s.io/client-go/testing"
"k8s.io/utils/pointer"
"k8s.io/klog/v2"
)
func TestNewLease(t *testing.T) {
func TestNewNodeLease(t *testing.T) {
fakeClock := clock.NewFakeClock(time.Now())
node := &corev1.Node{
ObjectMeta: metav1.ObjectMeta{
@ -191,7 +195,9 @@ func TestNewLease(t *testing.T) {
for _, tc := range cases {
t.Run(tc.desc, func(t *testing.T) {
newLease := tc.controller.newLease(tc.base)
tc.controller.newLeasePostProcessFunc = setNodeOwnerFunc(tc.controller.client, node.Name)
tc.controller.leaseNamespace = corev1.NamespaceNodeLease
newLease, _ := tc.controller.newLease(tc.base)
if newLease == tc.base {
t.Fatalf("the new lease must be newly allocated, but got same address as base")
}
@ -202,7 +208,7 @@ func TestNewLease(t *testing.T) {
}
}
func TestRetryUpdateLease(t *testing.T) {
func TestRetryUpdateNodeLease(t *testing.T) {
node := &corev1.Node{
ObjectMeta: metav1.ObjectMeta{
Name: "foo",
@ -274,8 +280,10 @@ func TestRetryUpdateLease(t *testing.T) {
client: cl,
leaseClient: cl.CoordinationV1().Leases(corev1.NamespaceNodeLease),
holderIdentity: node.Name,
leaseNamespace: corev1.NamespaceNodeLease,
leaseDurationSeconds: 10,
onRepeatedHeartbeatFailure: tc.onRepeatedHeartbeatFailure,
newLeasePostProcessFunc: setNodeOwnerFunc(cl, node.Name),
}
if err := c.retryUpdateLease(nil); tc.expectErr != (err != nil) {
t.Fatalf("got %v, expected %v", err != nil, tc.expectErr)
@ -406,12 +414,14 @@ func TestUpdateUsingLatestLease(t *testing.T) {
cl.PrependReactor("create", "leases", tc.createReactor)
}
c := &controller{
clock: clock.NewFakeClock(time.Now()),
client: cl,
leaseClient: cl.CoordinationV1().Leases(corev1.NamespaceNodeLease),
holderIdentity: node.Name,
leaseDurationSeconds: 10,
latestLease: tc.latestLease,
clock: clock.NewFakeClock(time.Now()),
client: cl,
leaseClient: cl.CoordinationV1().Leases(corev1.NamespaceNodeLease),
holderIdentity: node.Name,
leaseNamespace: corev1.NamespaceNodeLease,
leaseDurationSeconds: 10,
latestLease: tc.latestLease,
newLeasePostProcessFunc: setNodeOwnerFunc(cl, node.Name),
}
c.sync()
@ -428,3 +438,30 @@ func TestUpdateUsingLatestLease(t *testing.T) {
})
}
}
// setNodeOwnerFunc helps construct a newLeasePostProcessFunc which sets
// a node OwnerReference to the given lease object
func setNodeOwnerFunc(c clientset.Interface, nodeName string) func(lease *coordinationv1.Lease) error {
return func(lease *coordinationv1.Lease) error {
// Setting owner reference needs node's UID. Note that it is different from
// kubelet.nodeRef.UID. When lease is initially created, it is possible that
// the connection between master and node is not ready yet. So try to set
// owner reference every time when renewing the lease, until successful.
if len(lease.OwnerReferences) == 0 {
if node, err := c.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{}); err == nil {
lease.OwnerReferences = []metav1.OwnerReference{
{
APIVersion: corev1.SchemeGroupVersion.WithKind("Node").Version,
Kind: corev1.SchemeGroupVersion.WithKind("Node").Kind,
Name: nodeName,
UID: node.UID,
},
}
} else {
klog.Errorf("failed to get node %q when trying to set owner ref to the node lease: %v", nodeName, err)
return err
}
}
return nil
}
}