From 49b88f1d8a8d7dddea1713d2efe9800cd23abe6d Mon Sep 17 00:00:00 2001 From: Oksana Baranova Date: Wed, 30 Oct 2024 14:35:31 +0200 Subject: [PATCH] kubelet: migrate clustertrustbundle, token to contextual logging Signed-off-by: Oksana Baranova --- hack/golangci-hints.yaml | 2 ++ hack/golangci-strict.yaml | 2 ++ hack/golangci.yaml | 2 ++ hack/logcheck.conf | 2 ++ .../clustertrustbundle_manager.go | 10 ++++++---- .../clustertrustbundle_manager_test.go | 20 ++++++++++++------- pkg/kubelet/kubelet.go | 2 +- pkg/kubelet/token/token_manager.go | 12 +++++++---- pkg/kubelet/token/token_manager_test.go | 4 +++- 9 files changed, 39 insertions(+), 17 deletions(-) diff --git a/hack/golangci-hints.yaml b/hack/golangci-hints.yaml index 19c9ec89580..ffb1728261b 100644 --- a/hack/golangci-hints.yaml +++ b/hack/golangci-hints.yaml @@ -165,6 +165,8 @@ linters-settings: # please keep this alphabetized contextual k8s.io/kubernetes/test/e2e/dra/.* contextual k8s.io/kubernetes/pkg/kubelet/cm/dra/.* contextual k8s.io/kubernetes/pkg/kubelet/pleg/.* + contextual k8s.io/kubernetes/pkg/kubelet/clustertrustbundle/.* + contextual k8s.io/kubernetes/pkg/kubelet/token/.* # As long as contextual logging is alpha or beta, all WithName, WithValues, # NewContext calls have to go through klog. Once it is GA, we can lift diff --git a/hack/golangci-strict.yaml b/hack/golangci-strict.yaml index 13792a8e584..91de7d9d5f1 100644 --- a/hack/golangci-strict.yaml +++ b/hack/golangci-strict.yaml @@ -211,6 +211,8 @@ linters-settings: # please keep this alphabetized contextual k8s.io/kubernetes/test/e2e/dra/.* contextual k8s.io/kubernetes/pkg/kubelet/cm/dra/.* contextual k8s.io/kubernetes/pkg/kubelet/pleg/.* + contextual k8s.io/kubernetes/pkg/kubelet/clustertrustbundle/.* + contextual k8s.io/kubernetes/pkg/kubelet/token/.* # As long as contextual logging is alpha or beta, all WithName, WithValues, # NewContext calls have to go through klog. Once it is GA, we can lift diff --git a/hack/golangci.yaml b/hack/golangci.yaml index f542613ae20..9b93ed3c919 100644 --- a/hack/golangci.yaml +++ b/hack/golangci.yaml @@ -213,6 +213,8 @@ linters-settings: # please keep this alphabetized contextual k8s.io/kubernetes/test/e2e/dra/.* contextual k8s.io/kubernetes/pkg/kubelet/cm/dra/.* contextual k8s.io/kubernetes/pkg/kubelet/pleg/.* + contextual k8s.io/kubernetes/pkg/kubelet/clustertrustbundle/.* + contextual k8s.io/kubernetes/pkg/kubelet/token/.* # As long as contextual logging is alpha or beta, all WithName, WithValues, # NewContext calls have to go through klog. Once it is GA, we can lift diff --git a/hack/logcheck.conf b/hack/logcheck.conf index 01707811a3b..dda92fcaf25 100644 --- a/hack/logcheck.conf +++ b/hack/logcheck.conf @@ -49,6 +49,8 @@ contextual k8s.io/kubernetes/pkg/scheduler/.* contextual k8s.io/kubernetes/test/e2e/dra/.* contextual k8s.io/kubernetes/pkg/kubelet/cm/dra/.* contextual k8s.io/kubernetes/pkg/kubelet/pleg/.* +contextual k8s.io/kubernetes/pkg/kubelet/clustertrustbundle/.* +contextual k8s.io/kubernetes/pkg/kubelet/token/.* # As long as contextual logging is alpha or beta, all WithName, WithValues, # NewContext calls have to go through klog. Once it is GA, we can lift diff --git a/pkg/kubelet/clustertrustbundle/clustertrustbundle_manager.go b/pkg/kubelet/clustertrustbundle/clustertrustbundle_manager.go index 3f3337b7411..2cf6b3f1c9e 100644 --- a/pkg/kubelet/clustertrustbundle/clustertrustbundle_manager.go +++ b/pkg/kubelet/clustertrustbundle/clustertrustbundle_manager.go @@ -19,6 +19,7 @@ limitations under the License. package clustertrustbundle import ( + "context" "encoding/pem" "fmt" "math/rand" @@ -58,7 +59,7 @@ type InformerManager struct { var _ Manager = (*InformerManager)(nil) // NewInformerManager returns an initialized InformerManager. -func NewInformerManager(bundles certinformersv1alpha1.ClusterTrustBundleInformer, cacheSize int, cacheTTL time.Duration) (*InformerManager, error) { +func NewInformerManager(ctx context.Context, bundles certinformersv1alpha1.ClusterTrustBundleInformer, cacheSize int, cacheTTL time.Duration) (*InformerManager, error) { // We need to call Informer() before calling start on the shared informer // factory, or the informer won't be registered to be started. m := &InformerManager{ @@ -68,6 +69,7 @@ func NewInformerManager(bundles certinformersv1alpha1.ClusterTrustBundleInformer cacheTTL: cacheTTL, } + logger := klog.FromContext(ctx) // Have the informer bust cache entries when it sees updates that could // apply to them. _, err := m.ctbInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{ @@ -76,7 +78,7 @@ func NewInformerManager(bundles certinformersv1alpha1.ClusterTrustBundleInformer if !ok { return } - klog.InfoS("Dropping all cache entries for signer", "signerName", ctb.Spec.SignerName) + logger.Info("Dropping all cache entries for signer", "signerName", ctb.Spec.SignerName) m.dropCacheFor(ctb) }, UpdateFunc: func(old, new any) { @@ -84,7 +86,7 @@ func NewInformerManager(bundles certinformersv1alpha1.ClusterTrustBundleInformer if !ok { return } - klog.InfoS("Dropping cache for ClusterTrustBundle", "signerName", ctb.Spec.SignerName) + logger.Info("Dropping cache for ClusterTrustBundle", "signerName", ctb.Spec.SignerName) m.dropCacheFor(new.(*certificatesv1alpha1.ClusterTrustBundle)) }, DeleteFunc: func(obj any) { @@ -99,7 +101,7 @@ func NewInformerManager(bundles certinformersv1alpha1.ClusterTrustBundleInformer return } } - klog.InfoS("Dropping cache for ClusterTrustBundle", "signerName", ctb.Spec.SignerName) + logger.Info("Dropping cache for ClusterTrustBundle", "signerName", ctb.Spec.SignerName) m.dropCacheFor(ctb) }, }) diff --git a/pkg/kubelet/clustertrustbundle/clustertrustbundle_manager_test.go b/pkg/kubelet/clustertrustbundle/clustertrustbundle_manager_test.go index eb778bdf8b5..7fe6a0914bc 100644 --- a/pkg/kubelet/clustertrustbundle/clustertrustbundle_manager_test.go +++ b/pkg/kubelet/clustertrustbundle/clustertrustbundle_manager_test.go @@ -37,15 +37,17 @@ import ( "k8s.io/client-go/informers" "k8s.io/client-go/kubernetes/fake" "k8s.io/client-go/tools/cache" + "k8s.io/kubernetes/test/utils/ktesting" ) func TestBeforeSynced(t *testing.T) { + tCtx := ktesting.Init(t) kc := fake.NewSimpleClientset() informerFactory := informers.NewSharedInformerFactoryWithOptions(kc, 0) ctbInformer := informerFactory.Certificates().V1alpha1().ClusterTrustBundles() - ctbManager, _ := NewInformerManager(ctbInformer, 256, 5*time.Minute) + ctbManager, _ := NewInformerManager(tCtx, ctbInformer, 256, 5*time.Minute) _, err := ctbManager.GetTrustAnchorsByName("foo", false) if err == nil { @@ -55,6 +57,7 @@ func TestBeforeSynced(t *testing.T) { func TestGetTrustAnchorsByName(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + tCtx := ktesting.Init(t) defer cancel() ctb1 := &certificatesv1alpha1.ClusterTrustBundle{ @@ -80,7 +83,7 @@ func TestGetTrustAnchorsByName(t *testing.T) { informerFactory := informers.NewSharedInformerFactoryWithOptions(kc, 0) ctbInformer := informerFactory.Certificates().V1alpha1().ClusterTrustBundles() - ctbManager, _ := NewInformerManager(ctbInformer, 256, 5*time.Minute) + ctbManager, _ := NewInformerManager(tCtx, ctbInformer, 256, 5*time.Minute) informerFactory.Start(ctx.Done()) if !cache.WaitForCacheSync(ctx.Done(), ctbInformer.Informer().HasSynced) { @@ -117,7 +120,8 @@ func TestGetTrustAnchorsByName(t *testing.T) { } func TestGetTrustAnchorsByNameCaching(t *testing.T) { - ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second) + tCtx := ktesting.Init(t) + ctx, cancel := context.WithTimeout(tCtx, 20*time.Second) defer cancel() ctb1 := &certificatesv1alpha1.ClusterTrustBundle{ @@ -143,7 +147,7 @@ func TestGetTrustAnchorsByNameCaching(t *testing.T) { informerFactory := informers.NewSharedInformerFactoryWithOptions(kc, 0) ctbInformer := informerFactory.Certificates().V1alpha1().ClusterTrustBundles() - ctbManager, _ := NewInformerManager(ctbInformer, 256, 5*time.Minute) + ctbManager, _ := NewInformerManager(tCtx, ctbInformer, 256, 5*time.Minute) informerFactory.Start(ctx.Done()) if !cache.WaitForCacheSync(ctx.Done(), ctbInformer.Informer().HasSynced) { @@ -204,6 +208,7 @@ func TestGetTrustAnchorsByNameCaching(t *testing.T) { func TestGetTrustAnchorsBySignerName(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + tCtx := ktesting.Init(t) defer cancel() ctb1 := mustMakeCTB("signer-a-label-a-1", "foo.bar/a", map[string]string{"label": "a"}, mustMakeRoot(t, "0")) @@ -217,7 +222,7 @@ func TestGetTrustAnchorsBySignerName(t *testing.T) { informerFactory := informers.NewSharedInformerFactoryWithOptions(kc, 0) ctbInformer := informerFactory.Certificates().V1alpha1().ClusterTrustBundles() - ctbManager, _ := NewInformerManager(ctbInformer, 256, 5*time.Minute) + ctbManager, _ := NewInformerManager(tCtx, ctbInformer, 256, 5*time.Minute) informerFactory.Start(ctx.Done()) if !cache.WaitForCacheSync(ctx.Done(), ctbInformer.Informer().HasSynced) { @@ -319,7 +324,8 @@ func TestGetTrustAnchorsBySignerName(t *testing.T) { } func TestGetTrustAnchorsBySignerNameCaching(t *testing.T) { - ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second) + tCtx := ktesting.Init(t) + ctx, cancel := context.WithTimeout(tCtx, 20*time.Second) defer cancel() ctb1 := mustMakeCTB("signer-a-label-a-1", "foo.bar/a", map[string]string{"label": "a"}, mustMakeRoot(t, "0")) @@ -330,7 +336,7 @@ func TestGetTrustAnchorsBySignerNameCaching(t *testing.T) { informerFactory := informers.NewSharedInformerFactoryWithOptions(kc, 0) ctbInformer := informerFactory.Certificates().V1alpha1().ClusterTrustBundles() - ctbManager, _ := NewInformerManager(ctbInformer, 256, 5*time.Minute) + ctbManager, _ := NewInformerManager(tCtx, ctbInformer, 256, 5*time.Minute) informerFactory.Start(ctx.Done()) if !cache.WaitForCacheSync(ctx.Done(), ctbInformer.Informer().HasSynced) { diff --git a/pkg/kubelet/kubelet.go b/pkg/kubelet/kubelet.go index 5c07e4367af..8c10db99cf7 100644 --- a/pkg/kubelet/kubelet.go +++ b/pkg/kubelet/kubelet.go @@ -827,7 +827,7 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration, var clusterTrustBundleManager clustertrustbundle.Manager if kubeDeps.KubeClient != nil && utilfeature.DefaultFeatureGate.Enabled(features.ClusterTrustBundleProjection) { kubeInformers := informers.NewSharedInformerFactoryWithOptions(kubeDeps.KubeClient, 0) - clusterTrustBundleManager, err = clustertrustbundle.NewInformerManager(kubeInformers.Certificates().V1alpha1().ClusterTrustBundles(), 2*int(kubeCfg.MaxPods), 5*time.Minute) + clusterTrustBundleManager, err = clustertrustbundle.NewInformerManager(ctx, kubeInformers.Certificates().V1alpha1().ClusterTrustBundles(), 2*int(kubeCfg.MaxPods), 5*time.Minute) if err != nil { return nil, fmt.Errorf("while starting informer-based ClusterTrustBundle manager: %w", err) } diff --git a/pkg/kubelet/token/token_manager.go b/pkg/kubelet/token/token_manager.go index 327d68f6b16..3accadef0b8 100644 --- a/pkg/kubelet/token/token_manager.go +++ b/pkg/kubelet/token/token_manager.go @@ -102,11 +102,13 @@ type Manager struct { // * If refresh fails and the old token is still valid, log an error and return the old token. // * If refresh fails and the old token is no longer valid, return an error func (m *Manager) GetServiceAccountToken(namespace, name string, tr *authenticationv1.TokenRequest) (*authenticationv1.TokenRequest, error) { + // TODO: pass ctx to GetServiceAccountToken after switching pkg/volume to contextual logging + ctx := context.TODO() key := keyFunc(name, namespace, tr) ctr, ok := m.get(key) - if ok && !m.requiresRefresh(ctr) { + if ok && !m.requiresRefresh(ctx, ctr) { return ctr, nil } @@ -118,7 +120,8 @@ func (m *Manager) GetServiceAccountToken(namespace, name string, tr *authenticat case m.expired(ctr): return nil, fmt.Errorf("token %s expired and refresh failed: %v", key, err) default: - klog.ErrorS(err, "Couldn't update token", "cacheKey", key) + logger := klog.FromContext(ctx) + logger.Error(err, "Couldn't update token", "cacheKey", key) return ctr, nil } } @@ -168,11 +171,12 @@ func (m *Manager) expired(t *authenticationv1.TokenRequest) bool { // requiresRefresh returns true if the token is older than 80% of its total // ttl, or if the token is older than 24 hours. -func (m *Manager) requiresRefresh(tr *authenticationv1.TokenRequest) bool { +func (m *Manager) requiresRefresh(ctx context.Context, tr *authenticationv1.TokenRequest) bool { if tr.Spec.ExpirationSeconds == nil { cpy := tr.DeepCopy() cpy.Status.Token = "" - klog.ErrorS(nil, "Expiration seconds was nil for token request", "tokenRequest", cpy) + logger := klog.FromContext(ctx) + logger.Error(nil, "Expiration seconds was nil for token request", "tokenRequest", cpy) return false } now := m.clock.Now() diff --git a/pkg/kubelet/token/token_manager_test.go b/pkg/kubelet/token/token_manager_test.go index fc42d048902..9512ccaf3f5 100644 --- a/pkg/kubelet/token/token_manager_test.go +++ b/pkg/kubelet/token/token_manager_test.go @@ -24,6 +24,7 @@ import ( authenticationv1 "k8s.io/api/authentication/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" + "k8s.io/kubernetes/test/utils/ktesting" testingclock "k8s.io/utils/clock/testing" ) @@ -126,6 +127,7 @@ func TestTokenCachingAndExpiration(t *testing.T) { } func TestRequiresRefresh(t *testing.T) { + tCtx := ktesting.Init(t) start := time.Now() cases := []struct { now, exp time.Time @@ -183,7 +185,7 @@ func TestRequiresRefresh(t *testing.T) { mgr := NewManager(nil) mgr.clock = clock - rr := mgr.requiresRefresh(tr) + rr := mgr.requiresRefresh(tCtx, tr) if rr != c.expectRefresh { t.Fatalf("unexpected requiresRefresh result, got: %v, want: %v", rr, c.expectRefresh) }