kubelet: migrate clustertrustbundle, token to contextual logging

Signed-off-by: Oksana Baranova <oksana.baranova@intel.com>
This commit is contained in:
Oksana Baranova 2024-10-30 14:35:31 +02:00
parent 5f594f4215
commit 49b88f1d8a
9 changed files with 39 additions and 17 deletions

View File

@ -165,6 +165,8 @@ linters-settings: # please keep this alphabetized
contextual k8s.io/kubernetes/test/e2e/dra/.*
contextual k8s.io/kubernetes/pkg/kubelet/cm/dra/.*
contextual k8s.io/kubernetes/pkg/kubelet/pleg/.*
contextual k8s.io/kubernetes/pkg/kubelet/clustertrustbundle/.*
contextual k8s.io/kubernetes/pkg/kubelet/token/.*
# As long as contextual logging is alpha or beta, all WithName, WithValues,
# NewContext calls have to go through klog. Once it is GA, we can lift

View File

@ -211,6 +211,8 @@ linters-settings: # please keep this alphabetized
contextual k8s.io/kubernetes/test/e2e/dra/.*
contextual k8s.io/kubernetes/pkg/kubelet/cm/dra/.*
contextual k8s.io/kubernetes/pkg/kubelet/pleg/.*
contextual k8s.io/kubernetes/pkg/kubelet/clustertrustbundle/.*
contextual k8s.io/kubernetes/pkg/kubelet/token/.*
# As long as contextual logging is alpha or beta, all WithName, WithValues,
# NewContext calls have to go through klog. Once it is GA, we can lift

View File

@ -213,6 +213,8 @@ linters-settings: # please keep this alphabetized
contextual k8s.io/kubernetes/test/e2e/dra/.*
contextual k8s.io/kubernetes/pkg/kubelet/cm/dra/.*
contextual k8s.io/kubernetes/pkg/kubelet/pleg/.*
contextual k8s.io/kubernetes/pkg/kubelet/clustertrustbundle/.*
contextual k8s.io/kubernetes/pkg/kubelet/token/.*
# As long as contextual logging is alpha or beta, all WithName, WithValues,
# NewContext calls have to go through klog. Once it is GA, we can lift

View File

@ -49,6 +49,8 @@ contextual k8s.io/kubernetes/pkg/scheduler/.*
contextual k8s.io/kubernetes/test/e2e/dra/.*
contextual k8s.io/kubernetes/pkg/kubelet/cm/dra/.*
contextual k8s.io/kubernetes/pkg/kubelet/pleg/.*
contextual k8s.io/kubernetes/pkg/kubelet/clustertrustbundle/.*
contextual k8s.io/kubernetes/pkg/kubelet/token/.*
# As long as contextual logging is alpha or beta, all WithName, WithValues,
# NewContext calls have to go through klog. Once it is GA, we can lift

View File

@ -19,6 +19,7 @@ limitations under the License.
package clustertrustbundle
import (
"context"
"encoding/pem"
"fmt"
"math/rand"
@ -58,7 +59,7 @@ type InformerManager struct {
var _ Manager = (*InformerManager)(nil)
// NewInformerManager returns an initialized InformerManager.
func NewInformerManager(bundles certinformersv1alpha1.ClusterTrustBundleInformer, cacheSize int, cacheTTL time.Duration) (*InformerManager, error) {
func NewInformerManager(ctx context.Context, bundles certinformersv1alpha1.ClusterTrustBundleInformer, cacheSize int, cacheTTL time.Duration) (*InformerManager, error) {
// We need to call Informer() before calling start on the shared informer
// factory, or the informer won't be registered to be started.
m := &InformerManager{
@ -68,6 +69,7 @@ func NewInformerManager(bundles certinformersv1alpha1.ClusterTrustBundleInformer
cacheTTL: cacheTTL,
}
logger := klog.FromContext(ctx)
// Have the informer bust cache entries when it sees updates that could
// apply to them.
_, err := m.ctbInformer.AddEventHandler(cache.ResourceEventHandlerFuncs{
@ -76,7 +78,7 @@ func NewInformerManager(bundles certinformersv1alpha1.ClusterTrustBundleInformer
if !ok {
return
}
klog.InfoS("Dropping all cache entries for signer", "signerName", ctb.Spec.SignerName)
logger.Info("Dropping all cache entries for signer", "signerName", ctb.Spec.SignerName)
m.dropCacheFor(ctb)
},
UpdateFunc: func(old, new any) {
@ -84,7 +86,7 @@ func NewInformerManager(bundles certinformersv1alpha1.ClusterTrustBundleInformer
if !ok {
return
}
klog.InfoS("Dropping cache for ClusterTrustBundle", "signerName", ctb.Spec.SignerName)
logger.Info("Dropping cache for ClusterTrustBundle", "signerName", ctb.Spec.SignerName)
m.dropCacheFor(new.(*certificatesv1alpha1.ClusterTrustBundle))
},
DeleteFunc: func(obj any) {
@ -99,7 +101,7 @@ func NewInformerManager(bundles certinformersv1alpha1.ClusterTrustBundleInformer
return
}
}
klog.InfoS("Dropping cache for ClusterTrustBundle", "signerName", ctb.Spec.SignerName)
logger.Info("Dropping cache for ClusterTrustBundle", "signerName", ctb.Spec.SignerName)
m.dropCacheFor(ctb)
},
})

View File

@ -37,15 +37,17 @@ import (
"k8s.io/client-go/informers"
"k8s.io/client-go/kubernetes/fake"
"k8s.io/client-go/tools/cache"
"k8s.io/kubernetes/test/utils/ktesting"
)
func TestBeforeSynced(t *testing.T) {
tCtx := ktesting.Init(t)
kc := fake.NewSimpleClientset()
informerFactory := informers.NewSharedInformerFactoryWithOptions(kc, 0)
ctbInformer := informerFactory.Certificates().V1alpha1().ClusterTrustBundles()
ctbManager, _ := NewInformerManager(ctbInformer, 256, 5*time.Minute)
ctbManager, _ := NewInformerManager(tCtx, ctbInformer, 256, 5*time.Minute)
_, err := ctbManager.GetTrustAnchorsByName("foo", false)
if err == nil {
@ -55,6 +57,7 @@ func TestBeforeSynced(t *testing.T) {
func TestGetTrustAnchorsByName(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
tCtx := ktesting.Init(t)
defer cancel()
ctb1 := &certificatesv1alpha1.ClusterTrustBundle{
@ -80,7 +83,7 @@ func TestGetTrustAnchorsByName(t *testing.T) {
informerFactory := informers.NewSharedInformerFactoryWithOptions(kc, 0)
ctbInformer := informerFactory.Certificates().V1alpha1().ClusterTrustBundles()
ctbManager, _ := NewInformerManager(ctbInformer, 256, 5*time.Minute)
ctbManager, _ := NewInformerManager(tCtx, ctbInformer, 256, 5*time.Minute)
informerFactory.Start(ctx.Done())
if !cache.WaitForCacheSync(ctx.Done(), ctbInformer.Informer().HasSynced) {
@ -117,7 +120,8 @@ func TestGetTrustAnchorsByName(t *testing.T) {
}
func TestGetTrustAnchorsByNameCaching(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second)
tCtx := ktesting.Init(t)
ctx, cancel := context.WithTimeout(tCtx, 20*time.Second)
defer cancel()
ctb1 := &certificatesv1alpha1.ClusterTrustBundle{
@ -143,7 +147,7 @@ func TestGetTrustAnchorsByNameCaching(t *testing.T) {
informerFactory := informers.NewSharedInformerFactoryWithOptions(kc, 0)
ctbInformer := informerFactory.Certificates().V1alpha1().ClusterTrustBundles()
ctbManager, _ := NewInformerManager(ctbInformer, 256, 5*time.Minute)
ctbManager, _ := NewInformerManager(tCtx, ctbInformer, 256, 5*time.Minute)
informerFactory.Start(ctx.Done())
if !cache.WaitForCacheSync(ctx.Done(), ctbInformer.Informer().HasSynced) {
@ -204,6 +208,7 @@ func TestGetTrustAnchorsByNameCaching(t *testing.T) {
func TestGetTrustAnchorsBySignerName(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
tCtx := ktesting.Init(t)
defer cancel()
ctb1 := mustMakeCTB("signer-a-label-a-1", "foo.bar/a", map[string]string{"label": "a"}, mustMakeRoot(t, "0"))
@ -217,7 +222,7 @@ func TestGetTrustAnchorsBySignerName(t *testing.T) {
informerFactory := informers.NewSharedInformerFactoryWithOptions(kc, 0)
ctbInformer := informerFactory.Certificates().V1alpha1().ClusterTrustBundles()
ctbManager, _ := NewInformerManager(ctbInformer, 256, 5*time.Minute)
ctbManager, _ := NewInformerManager(tCtx, ctbInformer, 256, 5*time.Minute)
informerFactory.Start(ctx.Done())
if !cache.WaitForCacheSync(ctx.Done(), ctbInformer.Informer().HasSynced) {
@ -319,7 +324,8 @@ func TestGetTrustAnchorsBySignerName(t *testing.T) {
}
func TestGetTrustAnchorsBySignerNameCaching(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second)
tCtx := ktesting.Init(t)
ctx, cancel := context.WithTimeout(tCtx, 20*time.Second)
defer cancel()
ctb1 := mustMakeCTB("signer-a-label-a-1", "foo.bar/a", map[string]string{"label": "a"}, mustMakeRoot(t, "0"))
@ -330,7 +336,7 @@ func TestGetTrustAnchorsBySignerNameCaching(t *testing.T) {
informerFactory := informers.NewSharedInformerFactoryWithOptions(kc, 0)
ctbInformer := informerFactory.Certificates().V1alpha1().ClusterTrustBundles()
ctbManager, _ := NewInformerManager(ctbInformer, 256, 5*time.Minute)
ctbManager, _ := NewInformerManager(tCtx, ctbInformer, 256, 5*time.Minute)
informerFactory.Start(ctx.Done())
if !cache.WaitForCacheSync(ctx.Done(), ctbInformer.Informer().HasSynced) {

View File

@ -827,7 +827,7 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration,
var clusterTrustBundleManager clustertrustbundle.Manager
if kubeDeps.KubeClient != nil && utilfeature.DefaultFeatureGate.Enabled(features.ClusterTrustBundleProjection) {
kubeInformers := informers.NewSharedInformerFactoryWithOptions(kubeDeps.KubeClient, 0)
clusterTrustBundleManager, err = clustertrustbundle.NewInformerManager(kubeInformers.Certificates().V1alpha1().ClusterTrustBundles(), 2*int(kubeCfg.MaxPods), 5*time.Minute)
clusterTrustBundleManager, err = clustertrustbundle.NewInformerManager(ctx, kubeInformers.Certificates().V1alpha1().ClusterTrustBundles(), 2*int(kubeCfg.MaxPods), 5*time.Minute)
if err != nil {
return nil, fmt.Errorf("while starting informer-based ClusterTrustBundle manager: %w", err)
}

View File

@ -102,11 +102,13 @@ type Manager struct {
// * If refresh fails and the old token is still valid, log an error and return the old token.
// * If refresh fails and the old token is no longer valid, return an error
func (m *Manager) GetServiceAccountToken(namespace, name string, tr *authenticationv1.TokenRequest) (*authenticationv1.TokenRequest, error) {
// TODO: pass ctx to GetServiceAccountToken after switching pkg/volume to contextual logging
ctx := context.TODO()
key := keyFunc(name, namespace, tr)
ctr, ok := m.get(key)
if ok && !m.requiresRefresh(ctr) {
if ok && !m.requiresRefresh(ctx, ctr) {
return ctr, nil
}
@ -118,7 +120,8 @@ func (m *Manager) GetServiceAccountToken(namespace, name string, tr *authenticat
case m.expired(ctr):
return nil, fmt.Errorf("token %s expired and refresh failed: %v", key, err)
default:
klog.ErrorS(err, "Couldn't update token", "cacheKey", key)
logger := klog.FromContext(ctx)
logger.Error(err, "Couldn't update token", "cacheKey", key)
return ctr, nil
}
}
@ -168,11 +171,12 @@ func (m *Manager) expired(t *authenticationv1.TokenRequest) bool {
// requiresRefresh returns true if the token is older than 80% of its total
// ttl, or if the token is older than 24 hours.
func (m *Manager) requiresRefresh(tr *authenticationv1.TokenRequest) bool {
func (m *Manager) requiresRefresh(ctx context.Context, tr *authenticationv1.TokenRequest) bool {
if tr.Spec.ExpirationSeconds == nil {
cpy := tr.DeepCopy()
cpy.Status.Token = ""
klog.ErrorS(nil, "Expiration seconds was nil for token request", "tokenRequest", cpy)
logger := klog.FromContext(ctx)
logger.Error(nil, "Expiration seconds was nil for token request", "tokenRequest", cpy)
return false
}
now := m.clock.Now()

View File

@ -24,6 +24,7 @@ import (
authenticationv1 "k8s.io/api/authentication/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/kubernetes/test/utils/ktesting"
testingclock "k8s.io/utils/clock/testing"
)
@ -126,6 +127,7 @@ func TestTokenCachingAndExpiration(t *testing.T) {
}
func TestRequiresRefresh(t *testing.T) {
tCtx := ktesting.Init(t)
start := time.Now()
cases := []struct {
now, exp time.Time
@ -183,7 +185,7 @@ func TestRequiresRefresh(t *testing.T) {
mgr := NewManager(nil)
mgr.clock = clock
rr := mgr.requiresRefresh(tr)
rr := mgr.requiresRefresh(tCtx, tr)
if rr != c.expectRefresh {
t.Fatalf("unexpected requiresRefresh result, got: %v, want: %v", rr, c.expectRefresh)
}