mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-20 10:20:51 +00:00
Migrating pkg/controller/serviceaccount
to contextual logging (#114918)
* migrating pkg/controller/serviceaccount to contextual logging Signed-off-by: Naman <namanlakhwani@gmail.com> * small nit Signed-off-by: Naman <namanlakhwani@gmail.com> * capitalising first letter of error Signed-off-by: Naman <namanlakhwani@gmail.com> * addressed review comments Signed-off-by: Naman <namanlakhwani@gmail.com> * small nit to add key Signed-off-by: Naman <namanlakhwani@gmail.com> --------- Signed-off-by: Naman <namanlakhwani@gmail.com>
This commit is contained in:
parent
8f45b64c93
commit
b6f9a65558
@ -674,7 +674,7 @@ func (c serviceAccountTokenControllerStarter) startServiceAccountTokenController
|
||||
if err != nil {
|
||||
return nil, true, fmt.Errorf("error creating Tokens controller: %v", err)
|
||||
}
|
||||
go controller.Run(int(controllerContext.ComponentConfig.SAController.ConcurrentSATokenSyncs), ctx.Done())
|
||||
go controller.Run(ctx, int(controllerContext.ComponentConfig.SAController.ConcurrentSATokenSyncs))
|
||||
|
||||
// start the first set of informers now so that other controllers can start
|
||||
controllerContext.InformerFactory.Start(ctx.Done())
|
||||
|
@ -21,7 +21,7 @@ import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
||||
@ -108,8 +108,8 @@ func (c *ServiceAccountsController) Run(ctx context.Context, workers int) {
|
||||
defer utilruntime.HandleCrash()
|
||||
defer c.queue.ShutDown()
|
||||
|
||||
klog.Infof("Starting service account controller")
|
||||
defer klog.Infof("Shutting down service account controller")
|
||||
klog.FromContext(ctx).Info("Starting service account controller")
|
||||
defer klog.FromContext(ctx).Info("Shutting down service account controller")
|
||||
|
||||
if !cache.WaitForNamedCacheSync("service account", ctx.Done(), c.saListerSynced, c.nsListerSynced) {
|
||||
return
|
||||
@ -179,7 +179,7 @@ func (c *ServiceAccountsController) processNextWorkItem(ctx context.Context) boo
|
||||
func (c *ServiceAccountsController) syncNamespace(ctx context.Context, key string) error {
|
||||
startTime := time.Now()
|
||||
defer func() {
|
||||
klog.V(4).Infof("Finished syncing namespace %q (%v)", key, time.Since(startTime))
|
||||
klog.FromContext(ctx).V(4).Info("Finished syncing namespace", "namespace", key, "duration", time.Since(startTime))
|
||||
}()
|
||||
|
||||
ns, err := c.nsLister.Get(key)
|
||||
|
@ -156,23 +156,23 @@ type TokensController struct {
|
||||
}
|
||||
|
||||
// Run runs controller blocks until stopCh is closed
|
||||
func (e *TokensController) Run(workers int, stopCh <-chan struct{}) {
|
||||
func (e *TokensController) Run(ctx context.Context, workers int) {
|
||||
// Shut down queues
|
||||
defer utilruntime.HandleCrash()
|
||||
defer e.syncServiceAccountQueue.ShutDown()
|
||||
defer e.syncSecretQueue.ShutDown()
|
||||
|
||||
if !cache.WaitForNamedCacheSync("tokens", stopCh, e.serviceAccountSynced, e.secretSynced) {
|
||||
if !cache.WaitForNamedCacheSync("tokens", ctx.Done(), e.serviceAccountSynced, e.secretSynced) {
|
||||
return
|
||||
}
|
||||
|
||||
klog.V(5).Infof("Starting workers")
|
||||
klog.FromContext(ctx).V(5).Info("Starting workers")
|
||||
for i := 0; i < workers; i++ {
|
||||
go wait.Until(e.syncServiceAccount, 0, stopCh)
|
||||
go wait.Until(e.syncSecret, 0, stopCh)
|
||||
go wait.Until(e.syncServiceAccount, 0, ctx.Done())
|
||||
go wait.Until(e.syncSecret, 0, ctx.Done())
|
||||
}
|
||||
<-stopCh
|
||||
klog.V(1).Infof("Shutting down")
|
||||
<-ctx.Done()
|
||||
klog.FromContext(ctx).V(1).Info("Shutting down")
|
||||
}
|
||||
|
||||
func (e *TokensController) queueServiceAccountSync(obj interface{}) {
|
||||
@ -217,6 +217,7 @@ func (e *TokensController) queueSecretUpdateSync(oldObj interface{}, newObj inte
|
||||
}
|
||||
|
||||
func (e *TokensController) syncServiceAccount() {
|
||||
logger := klog.FromContext(context.TODO())
|
||||
key, quit := e.syncServiceAccountQueue.Get()
|
||||
if quit {
|
||||
return
|
||||
@ -230,22 +231,22 @@ func (e *TokensController) syncServiceAccount() {
|
||||
|
||||
saInfo, err := parseServiceAccountKey(key)
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
logger.Error(err, "Parsing service account key")
|
||||
return
|
||||
}
|
||||
|
||||
sa, err := e.getServiceAccount(saInfo.namespace, saInfo.name, saInfo.uid, false)
|
||||
switch {
|
||||
case err != nil:
|
||||
klog.Error(err)
|
||||
logger.Error(err, "Getting service account")
|
||||
retry = true
|
||||
case sa == nil:
|
||||
// service account no longer exists, so delete related tokens
|
||||
klog.V(4).Infof("syncServiceAccount(%s/%s), service account deleted, removing tokens", saInfo.namespace, saInfo.name)
|
||||
logger.V(4).Info("Service account deleted, removing tokens", "namespace", saInfo.namespace, "serviceaccount", saInfo.name)
|
||||
sa = &v1.ServiceAccount{ObjectMeta: metav1.ObjectMeta{Namespace: saInfo.namespace, Name: saInfo.name, UID: saInfo.uid}}
|
||||
retry, err = e.deleteTokens(sa)
|
||||
if err != nil {
|
||||
klog.Errorf("error deleting serviceaccount tokens for %s/%s: %v", saInfo.namespace, saInfo.name, err)
|
||||
logger.Error(err, "Error deleting serviceaccount tokens", "namespace", saInfo.namespace, "serviceaccount", saInfo.name)
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -263,16 +264,17 @@ func (e *TokensController) syncSecret() {
|
||||
e.retryOrForget(e.syncSecretQueue, key, retry)
|
||||
}()
|
||||
|
||||
logger := klog.FromContext(context.TODO())
|
||||
secretInfo, err := parseSecretQueueKey(key)
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
logger.Error(err, "Parsing secret queue key")
|
||||
return
|
||||
}
|
||||
|
||||
secret, err := e.getSecret(secretInfo.namespace, secretInfo.name, secretInfo.uid, false)
|
||||
switch {
|
||||
case err != nil:
|
||||
klog.Error(err)
|
||||
logger.Error(err, "Getting secret")
|
||||
retry = true
|
||||
case secret == nil:
|
||||
// If the service account exists
|
||||
@ -281,7 +283,7 @@ func (e *TokensController) syncSecret() {
|
||||
if err := clientretry.RetryOnConflict(RemoveTokenBackoff, func() error {
|
||||
return e.removeSecretReference(secretInfo.namespace, secretInfo.saName, secretInfo.saUID, secretInfo.name)
|
||||
}); err != nil {
|
||||
klog.Error(err)
|
||||
logger.Error(err, "Removing secret reference")
|
||||
}
|
||||
}
|
||||
default:
|
||||
@ -289,19 +291,19 @@ func (e *TokensController) syncSecret() {
|
||||
sa, saErr := e.getServiceAccount(secretInfo.namespace, secretInfo.saName, secretInfo.saUID, true)
|
||||
switch {
|
||||
case saErr != nil:
|
||||
klog.Error(saErr)
|
||||
logger.Error(saErr, "Getting service account")
|
||||
retry = true
|
||||
case sa == nil:
|
||||
// Delete token
|
||||
klog.V(4).Infof("syncSecret(%s/%s), service account does not exist, deleting token", secretInfo.namespace, secretInfo.name)
|
||||
logger.V(4).Info("Service account does not exist, deleting token", "secret", klog.KRef(secretInfo.namespace, secretInfo.name))
|
||||
if retriable, err := e.deleteToken(secretInfo.namespace, secretInfo.name, secretInfo.uid); err != nil {
|
||||
klog.Errorf("error deleting serviceaccount token %s/%s for service account %s: %v", secretInfo.namespace, secretInfo.name, secretInfo.saName, err)
|
||||
logger.Error(err, "Deleting serviceaccount token", "secret", klog.KRef(secretInfo.namespace, secretInfo.name), "serviceAccount", klog.KRef(secretInfo.namespace, secretInfo.saName))
|
||||
retry = retriable
|
||||
}
|
||||
default:
|
||||
// Update token if needed
|
||||
if retriable, err := e.generateTokenIfNeeded(sa, secret); err != nil {
|
||||
klog.Errorf("error populating serviceaccount token %s/%s for service account %s: %v", secretInfo.namespace, secretInfo.name, secretInfo.saName, err)
|
||||
if retriable, err := e.generateTokenIfNeeded(logger, sa, secret); err != nil {
|
||||
logger.Error(err, "Populating serviceaccount token", "secret", klog.KRef(secretInfo.namespace, secretInfo.name), "serviceAccount", klog.KRef(secretInfo.namespace, secretInfo.saName))
|
||||
retry = retriable
|
||||
}
|
||||
}
|
||||
@ -356,7 +358,7 @@ func (e *TokensController) secretUpdateNeeded(secret *v1.Secret) (bool, bool, bo
|
||||
}
|
||||
|
||||
// generateTokenIfNeeded populates the token data for the given Secret if not already set
|
||||
func (e *TokensController) generateTokenIfNeeded(serviceAccount *v1.ServiceAccount, cachedSecret *v1.Secret) ( /* retry */ bool, error) {
|
||||
func (e *TokensController) generateTokenIfNeeded(logger klog.Logger, serviceAccount *v1.ServiceAccount, cachedSecret *v1.Secret) ( /* retry */ bool, error) {
|
||||
// Check the cached secret to see if changes are needed
|
||||
if needsCA, needsNamespace, needsToken := e.secretUpdateNeeded(cachedSecret); !needsCA && !needsToken && !needsNamespace {
|
||||
return false, nil
|
||||
@ -373,7 +375,7 @@ func (e *TokensController) generateTokenIfNeeded(serviceAccount *v1.ServiceAccou
|
||||
if liveSecret.ResourceVersion != cachedSecret.ResourceVersion {
|
||||
// our view of the secret is not up to date
|
||||
// we'll get notified of an update event later and get to try again
|
||||
klog.V(2).Infof("secret %s/%s is not up to date, skipping token population", liveSecret.Namespace, liveSecret.Name)
|
||||
logger.V(2).Info("Secret is not up to date, skipping token population", "secret", klog.KRef(liveSecret.Namespace, liveSecret.Name))
|
||||
return false, nil
|
||||
}
|
||||
|
||||
|
@ -403,7 +403,7 @@ func startServiceAccountTestServerAndWaitForCaches(t *testing.T) (clientset.Inte
|
||||
if err != nil {
|
||||
return rootClientset, clientConfig, stop, err
|
||||
}
|
||||
go tokenController.Run(1, ctx.Done())
|
||||
go tokenController.Run(ctx, 1)
|
||||
|
||||
serviceAccountController, err := serviceaccountcontroller.NewServiceAccountsController(
|
||||
informers.Core().V1().ServiceAccounts(),
|
||||
|
Loading…
Reference in New Issue
Block a user