kubelet: migrate clustertrustbundle, token to contextual logging

Signed-off-by: Oksana Baranova <oksana.baranova@intel.com>
This commit is contained in:
Oksana Baranova 2024-10-30 14:35:31 +02:00
parent 5f594f4215
commit 49b88f1d8a
9 changed files with 39 additions and 17 deletions

View File

@ -165,6 +165,8 @@ linters-settings: # please keep this alphabetized
contextual k8s.io/kubernetes/test/e2e/dra/.* contextual k8s.io/kubernetes/test/e2e/dra/.*
contextual k8s.io/kubernetes/pkg/kubelet/cm/dra/.* contextual k8s.io/kubernetes/pkg/kubelet/cm/dra/.*
contextual k8s.io/kubernetes/pkg/kubelet/pleg/.* contextual k8s.io/kubernetes/pkg/kubelet/pleg/.*
contextual k8s.io/kubernetes/pkg/kubelet/clustertrustbundle/.*
contextual k8s.io/kubernetes/pkg/kubelet/token/.*
# As long as contextual logging is alpha or beta, all WithName, WithValues, # As long as contextual logging is alpha or beta, all WithName, WithValues,
# NewContext calls have to go through klog. Once it is GA, we can lift # NewContext calls have to go through klog. Once it is GA, we can lift

View File

@ -211,6 +211,8 @@ linters-settings: # please keep this alphabetized
contextual k8s.io/kubernetes/test/e2e/dra/.* contextual k8s.io/kubernetes/test/e2e/dra/.*
contextual k8s.io/kubernetes/pkg/kubelet/cm/dra/.* contextual k8s.io/kubernetes/pkg/kubelet/cm/dra/.*
contextual k8s.io/kubernetes/pkg/kubelet/pleg/.* contextual k8s.io/kubernetes/pkg/kubelet/pleg/.*
contextual k8s.io/kubernetes/pkg/kubelet/clustertrustbundle/.*
contextual k8s.io/kubernetes/pkg/kubelet/token/.*
# As long as contextual logging is alpha or beta, all WithName, WithValues, # As long as contextual logging is alpha or beta, all WithName, WithValues,
# NewContext calls have to go through klog. Once it is GA, we can lift # NewContext calls have to go through klog. Once it is GA, we can lift

View File

@ -213,6 +213,8 @@ linters-settings: # please keep this alphabetized
contextual k8s.io/kubernetes/test/e2e/dra/.* contextual k8s.io/kubernetes/test/e2e/dra/.*
contextual k8s.io/kubernetes/pkg/kubelet/cm/dra/.* contextual k8s.io/kubernetes/pkg/kubelet/cm/dra/.*
contextual k8s.io/kubernetes/pkg/kubelet/pleg/.* contextual k8s.io/kubernetes/pkg/kubelet/pleg/.*
contextual k8s.io/kubernetes/pkg/kubelet/clustertrustbundle/.*
contextual k8s.io/kubernetes/pkg/kubelet/token/.*
# As long as contextual logging is alpha or beta, all WithName, WithValues, # As long as contextual logging is alpha or beta, all WithName, WithValues,
# NewContext calls have to go through klog. Once it is GA, we can lift # NewContext calls have to go through klog. Once it is GA, we can lift

View File

@ -49,6 +49,8 @@ contextual k8s.io/kubernetes/pkg/scheduler/.*
contextual k8s.io/kubernetes/test/e2e/dra/.* contextual k8s.io/kubernetes/test/e2e/dra/.*
contextual k8s.io/kubernetes/pkg/kubelet/cm/dra/.* contextual k8s.io/kubernetes/pkg/kubelet/cm/dra/.*
contextual k8s.io/kubernetes/pkg/kubelet/pleg/.* contextual k8s.io/kubernetes/pkg/kubelet/pleg/.*
contextual k8s.io/kubernetes/pkg/kubelet/clustertrustbundle/.*
contextual k8s.io/kubernetes/pkg/kubelet/token/.*
# As long as contextual logging is alpha or beta, all WithName, WithValues, # As long as contextual logging is alpha or beta, all WithName, WithValues,
# NewContext calls have to go through klog. Once it is GA, we can lift # NewContext calls have to go through klog. Once it is GA, we can lift

View File

@ -19,6 +19,7 @@ limitations under the License.
package clustertrustbundle package clustertrustbundle
import ( import (
"context"
"encoding/pem" "encoding/pem"
"fmt" "fmt"
"math/rand" "math/rand"
@ -58,7 +59,7 @@ type InformerManager struct {
var _ Manager = (*InformerManager)(nil) var _ Manager = (*InformerManager)(nil)
// NewInformerManager returns an initialized InformerManager. // NewInformerManager returns an initialized InformerManager.
func NewInformerManager(bundles certinformersv1alpha1.ClusterTrustBundleInformer, cacheSize int, cacheTTL time.Duration) (*InformerManager, error) { func NewInformerManager(ctx context.Context, bundles certinformersv1alpha1.ClusterTrustBundleInformer, cacheSize int, cacheTTL time.Duration) (*InformerManager, error) {
// We need to call Informer() before calling start on the shared informer // We need to call Informer() before calling start on the shared informer
// factory, or the informer won't be registered to be started. // factory, or the informer won't be registered to be started.
m := &InformerManager{ m := &InformerManager{
@ -68,6 +69,7 @@ func NewInformerManager(bundles certinformersv1alpha1.ClusterTrustBundleInformer
cacheTTL: cacheTTL, cacheTTL: cacheTTL,
} }
logger := klog.FromContext(ctx)
// Have the informer bust cache entries when it sees updates that could // Have the informer bust cache entries when it sees updates that could
// apply to them. // apply to them.
_, err := m.ctbInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{ _, err := m.ctbInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{
@ -76,7 +78,7 @@ func NewInformerManager(bundles certinformersv1alpha1.ClusterTrustBundleInformer
if !ok { if !ok {
return return
} }
klog.InfoS("Dropping all cache entries for signer", "signerName", ctb.Spec.SignerName) logger.Info("Dropping all cache entries for signer", "signerName", ctb.Spec.SignerName)
m.dropCacheFor(ctb) m.dropCacheFor(ctb)
}, },
UpdateFunc: func(old, new any) { UpdateFunc: func(old, new any) {
@ -84,7 +86,7 @@ func NewInformerManager(bundles certinformersv1alpha1.ClusterTrustBundleInformer
if !ok { if !ok {
return return
} }
klog.InfoS("Dropping cache for ClusterTrustBundle", "signerName", ctb.Spec.SignerName) logger.Info("Dropping cache for ClusterTrustBundle", "signerName", ctb.Spec.SignerName)
m.dropCacheFor(new.(*certificatesv1alpha1.ClusterTrustBundle)) m.dropCacheFor(new.(*certificatesv1alpha1.ClusterTrustBundle))
}, },
DeleteFunc: func(obj any) { DeleteFunc: func(obj any) {
@ -99,7 +101,7 @@ func NewInformerManager(bundles certinformersv1alpha1.ClusterTrustBundleInformer
return return
} }
} }
klog.InfoS("Dropping cache for ClusterTrustBundle", "signerName", ctb.Spec.SignerName) logger.Info("Dropping cache for ClusterTrustBundle", "signerName", ctb.Spec.SignerName)
m.dropCacheFor(ctb) m.dropCacheFor(ctb)
}, },
}) })

View File

@ -37,15 +37,17 @@ import (
"k8s.io/client-go/informers" "k8s.io/client-go/informers"
"k8s.io/client-go/kubernetes/fake" "k8s.io/client-go/kubernetes/fake"
"k8s.io/client-go/tools/cache" "k8s.io/client-go/tools/cache"
"k8s.io/kubernetes/test/utils/ktesting"
) )
func TestBeforeSynced(t *testing.T) { func TestBeforeSynced(t *testing.T) {
tCtx := ktesting.Init(t)
kc := fake.NewSimpleClientset() kc := fake.NewSimpleClientset()
informerFactory := informers.NewSharedInformerFactoryWithOptions(kc, 0) informerFactory := informers.NewSharedInformerFactoryWithOptions(kc, 0)
ctbInformer := informerFactory.Certificates().V1alpha1().ClusterTrustBundles() ctbInformer := informerFactory.Certificates().V1alpha1().ClusterTrustBundles()
ctbManager, _ := NewInformerManager(ctbInformer, 256, 5*time.Minute) ctbManager, _ := NewInformerManager(tCtx, ctbInformer, 256, 5*time.Minute)
_, err := ctbManager.GetTrustAnchorsByName("foo", false) _, err := ctbManager.GetTrustAnchorsByName("foo", false)
if err == nil { if err == nil {
@ -55,6 +57,7 @@ func TestBeforeSynced(t *testing.T) {
func TestGetTrustAnchorsByName(t *testing.T) { func TestGetTrustAnchorsByName(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
tCtx := ktesting.Init(t)
defer cancel() defer cancel()
ctb1 := &certificatesv1alpha1.ClusterTrustBundle{ ctb1 := &certificatesv1alpha1.ClusterTrustBundle{
@ -80,7 +83,7 @@ func TestGetTrustAnchorsByName(t *testing.T) {
informerFactory := informers.NewSharedInformerFactoryWithOptions(kc, 0) informerFactory := informers.NewSharedInformerFactoryWithOptions(kc, 0)
ctbInformer := informerFactory.Certificates().V1alpha1().ClusterTrustBundles() ctbInformer := informerFactory.Certificates().V1alpha1().ClusterTrustBundles()
ctbManager, _ := NewInformerManager(ctbInformer, 256, 5*time.Minute) ctbManager, _ := NewInformerManager(tCtx, ctbInformer, 256, 5*time.Minute)
informerFactory.Start(ctx.Done()) informerFactory.Start(ctx.Done())
if !cache.WaitForCacheSync(ctx.Done(), ctbInformer.Informer().HasSynced) { if !cache.WaitForCacheSync(ctx.Done(), ctbInformer.Informer().HasSynced) {
@ -117,7 +120,8 @@ func TestGetTrustAnchorsByName(t *testing.T) {
} }
func TestGetTrustAnchorsByNameCaching(t *testing.T) { func TestGetTrustAnchorsByNameCaching(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second) tCtx := ktesting.Init(t)
ctx, cancel := context.WithTimeout(tCtx, 20*time.Second)
defer cancel() defer cancel()
ctb1 := &certificatesv1alpha1.ClusterTrustBundle{ ctb1 := &certificatesv1alpha1.ClusterTrustBundle{
@ -143,7 +147,7 @@ func TestGetTrustAnchorsByNameCaching(t *testing.T) {
informerFactory := informers.NewSharedInformerFactoryWithOptions(kc, 0) informerFactory := informers.NewSharedInformerFactoryWithOptions(kc, 0)
ctbInformer := informerFactory.Certificates().V1alpha1().ClusterTrustBundles() ctbInformer := informerFactory.Certificates().V1alpha1().ClusterTrustBundles()
ctbManager, _ := NewInformerManager(ctbInformer, 256, 5*time.Minute) ctbManager, _ := NewInformerManager(tCtx, ctbInformer, 256, 5*time.Minute)
informerFactory.Start(ctx.Done()) informerFactory.Start(ctx.Done())
if !cache.WaitForCacheSync(ctx.Done(), ctbInformer.Informer().HasSynced) { if !cache.WaitForCacheSync(ctx.Done(), ctbInformer.Informer().HasSynced) {
@ -204,6 +208,7 @@ func TestGetTrustAnchorsByNameCaching(t *testing.T) {
func TestGetTrustAnchorsBySignerName(t *testing.T) { func TestGetTrustAnchorsBySignerName(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
tCtx := ktesting.Init(t)
defer cancel() defer cancel()
ctb1 := mustMakeCTB("signer-a-label-a-1", "foo.bar/a", map[string]string{"label": "a"}, mustMakeRoot(t, "0")) ctb1 := mustMakeCTB("signer-a-label-a-1", "foo.bar/a", map[string]string{"label": "a"}, mustMakeRoot(t, "0"))
@ -217,7 +222,7 @@ func TestGetTrustAnchorsBySignerName(t *testing.T) {
informerFactory := informers.NewSharedInformerFactoryWithOptions(kc, 0) informerFactory := informers.NewSharedInformerFactoryWithOptions(kc, 0)
ctbInformer := informerFactory.Certificates().V1alpha1().ClusterTrustBundles() ctbInformer := informerFactory.Certificates().V1alpha1().ClusterTrustBundles()
ctbManager, _ := NewInformerManager(ctbInformer, 256, 5*time.Minute) ctbManager, _ := NewInformerManager(tCtx, ctbInformer, 256, 5*time.Minute)
informerFactory.Start(ctx.Done()) informerFactory.Start(ctx.Done())
if !cache.WaitForCacheSync(ctx.Done(), ctbInformer.Informer().HasSynced) { if !cache.WaitForCacheSync(ctx.Done(), ctbInformer.Informer().HasSynced) {
@ -319,7 +324,8 @@ func TestGetTrustAnchorsBySignerName(t *testing.T) {
} }
func TestGetTrustAnchorsBySignerNameCaching(t *testing.T) { func TestGetTrustAnchorsBySignerNameCaching(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second) tCtx := ktesting.Init(t)
ctx, cancel := context.WithTimeout(tCtx, 20*time.Second)
defer cancel() defer cancel()
ctb1 := mustMakeCTB("signer-a-label-a-1", "foo.bar/a", map[string]string{"label": "a"}, mustMakeRoot(t, "0")) ctb1 := mustMakeCTB("signer-a-label-a-1", "foo.bar/a", map[string]string{"label": "a"}, mustMakeRoot(t, "0"))
@ -330,7 +336,7 @@ func TestGetTrustAnchorsBySignerNameCaching(t *testing.T) {
informerFactory := informers.NewSharedInformerFactoryWithOptions(kc, 0) informerFactory := informers.NewSharedInformerFactoryWithOptions(kc, 0)
ctbInformer := informerFactory.Certificates().V1alpha1().ClusterTrustBundles() ctbInformer := informerFactory.Certificates().V1alpha1().ClusterTrustBundles()
ctbManager, _ := NewInformerManager(ctbInformer, 256, 5*time.Minute) ctbManager, _ := NewInformerManager(tCtx, ctbInformer, 256, 5*time.Minute)
informerFactory.Start(ctx.Done()) informerFactory.Start(ctx.Done())
if !cache.WaitForCacheSync(ctx.Done(), ctbInformer.Informer().HasSynced) { if !cache.WaitForCacheSync(ctx.Done(), ctbInformer.Informer().HasSynced) {

View File

@ -827,7 +827,7 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration,
var clusterTrustBundleManager clustertrustbundle.Manager var clusterTrustBundleManager clustertrustbundle.Manager
if kubeDeps.KubeClient != nil && utilfeature.DefaultFeatureGate.Enabled(features.ClusterTrustBundleProjection) { if kubeDeps.KubeClient != nil && utilfeature.DefaultFeatureGate.Enabled(features.ClusterTrustBundleProjection) {
kubeInformers := informers.NewSharedInformerFactoryWithOptions(kubeDeps.KubeClient, 0) kubeInformers := informers.NewSharedInformerFactoryWithOptions(kubeDeps.KubeClient, 0)
clusterTrustBundleManager, err = clustertrustbundle.NewInformerManager(kubeInformers.Certificates().V1alpha1().ClusterTrustBundles(), 2*int(kubeCfg.MaxPods), 5*time.Minute) clusterTrustBundleManager, err = clustertrustbundle.NewInformerManager(ctx, kubeInformers.Certificates().V1alpha1().ClusterTrustBundles(), 2*int(kubeCfg.MaxPods), 5*time.Minute)
if err != nil { if err != nil {
return nil, fmt.Errorf("while starting informer-based ClusterTrustBundle manager: %w", err) return nil, fmt.Errorf("while starting informer-based ClusterTrustBundle manager: %w", err)
} }

View File

@ -102,11 +102,13 @@ type Manager struct {
// * If refresh fails and the old token is still valid, log an error and return the old token. // * If refresh fails and the old token is still valid, log an error and return the old token.
// * If refresh fails and the old token is no longer valid, return an error // * If refresh fails and the old token is no longer valid, return an error
func (m *Manager) GetServiceAccountToken(namespace, name string, tr *authenticationv1.TokenRequest) (*authenticationv1.TokenRequest, error) { func (m *Manager) GetServiceAccountToken(namespace, name string, tr *authenticationv1.TokenRequest) (*authenticationv1.TokenRequest, error) {
// TODO: pass ctx to GetServiceAccountToken after switching pkg/volume to contextual logging
ctx := context.TODO()
key := keyFunc(name, namespace, tr) key := keyFunc(name, namespace, tr)
ctr, ok := m.get(key) ctr, ok := m.get(key)
if ok && !m.requiresRefresh(ctr) { if ok && !m.requiresRefresh(ctx, ctr) {
return ctr, nil return ctr, nil
} }
@ -118,7 +120,8 @@ func (m *Manager) GetServiceAccountToken(namespace, name string, tr *authenticat
case m.expired(ctr): case m.expired(ctr):
return nil, fmt.Errorf("token %s expired and refresh failed: %v", key, err) return nil, fmt.Errorf("token %s expired and refresh failed: %v", key, err)
default: default:
klog.ErrorS(err, "Couldn't update token", "cacheKey", key) logger := klog.FromContext(ctx)
logger.Error(err, "Couldn't update token", "cacheKey", key)
return ctr, nil return ctr, nil
} }
} }
@ -168,11 +171,12 @@ func (m *Manager) expired(t *authenticationv1.TokenRequest) bool {
// requiresRefresh returns true if the token is older than 80% of its total // requiresRefresh returns true if the token is older than 80% of its total
// ttl, or if the token is older than 24 hours. // ttl, or if the token is older than 24 hours.
func (m *Manager) requiresRefresh(tr *authenticationv1.TokenRequest) bool { func (m *Manager) requiresRefresh(ctx context.Context, tr *authenticationv1.TokenRequest) bool {
if tr.Spec.ExpirationSeconds == nil { if tr.Spec.ExpirationSeconds == nil {
cpy := tr.DeepCopy() cpy := tr.DeepCopy()
cpy.Status.Token = "" cpy.Status.Token = ""
klog.ErrorS(nil, "Expiration seconds was nil for token request", "tokenRequest", cpy) logger := klog.FromContext(ctx)
logger.Error(nil, "Expiration seconds was nil for token request", "tokenRequest", cpy)
return false return false
} }
now := m.clock.Now() now := m.clock.Now()

View File

@ -24,6 +24,7 @@ import (
authenticationv1 "k8s.io/api/authentication/v1" authenticationv1 "k8s.io/api/authentication/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/types"
"k8s.io/kubernetes/test/utils/ktesting"
testingclock "k8s.io/utils/clock/testing" testingclock "k8s.io/utils/clock/testing"
) )
@ -126,6 +127,7 @@ func TestTokenCachingAndExpiration(t *testing.T) {
} }
func TestRequiresRefresh(t *testing.T) { func TestRequiresRefresh(t *testing.T) {
tCtx := ktesting.Init(t)
start := time.Now() start := time.Now()
cases := []struct { cases := []struct {
now, exp time.Time now, exp time.Time
@ -183,7 +185,7 @@ func TestRequiresRefresh(t *testing.T) {
mgr := NewManager(nil) mgr := NewManager(nil)
mgr.clock = clock mgr.clock = clock
rr := mgr.requiresRefresh(tr) rr := mgr.requiresRefresh(tCtx, tr)
if rr != c.expectRefresh { if rr != c.expectRefresh {
t.Fatalf("unexpected requiresRefresh result, got: %v, want: %v", rr, c.expectRefresh) t.Fatalf("unexpected requiresRefresh result, got: %v, want: %v", rr, c.expectRefresh)
} }