Replace HandleError with HandleErrorWithContext

This commit is contained in:
Mayank Agrawal
2025-09-16 23:44:42 -07:00
parent 58f2d96901
commit d12eeb98d0
3 changed files with 13 additions and 14 deletions

View File

@@ -161,7 +161,7 @@ func (d *namespacedResourcesDeleter) initOpCache(ctx context.Context) {
// TODO(sttts): get rid of opCache and http 405 logic around it and trust discovery info
resources, err := d.discoverResourcesFn()
if err != nil {
utilruntime.HandleError(fmt.Errorf("unable to get all supported resources from server: %v", err))
utilruntime.HandleErrorWithContext(ctx, err, "Unable to get all supported resources from server")
}
logger := klog.FromContext(ctx)
if len(resources) == 0 {
@@ -555,7 +555,7 @@ func (d *namespacedResourcesDeleter) deleteAllContent(ctx context.Context, ns *v
logger.V(5).Info("Namespace controller - pods still remain, delaying deletion of other resources", "namespace", namespace)
if hasChanged := conditionUpdater.Update(ns); hasChanged {
if _, err = d.nsClient.UpdateStatus(ctx, ns, metav1.UpdateOptions{}); err != nil {
utilruntime.HandleError(fmt.Errorf("couldn't update status condition for namespace %q: %w", namespace, err))
utilruntime.HandleErrorWithContext(ctx, err, "Couldn't update status condition for namespace", "namespace", namespace)
}
}
return estimate, utilerrors.NewAggregate(errs)
@@ -595,7 +595,7 @@ func (d *namespacedResourcesDeleter) deleteAllContent(ctx context.Context, ns *v
// NOT remove the resource instance.
if hasChanged := conditionUpdater.Update(ns); hasChanged {
if _, err = d.nsClient.UpdateStatus(ctx, ns, metav1.UpdateOptions{}); err != nil {
utilruntime.HandleError(fmt.Errorf("couldn't update status condition for namespace %q: %v", namespace, err))
utilruntime.HandleErrorWithContext(ctx, err, "Couldn't update status condition for namespace", "namespace", namespace)
}
}

View File

@@ -18,7 +18,6 @@ package namespace
import (
"context"
"fmt"
"time"
"golang.org/x/time/rate"
@@ -88,11 +87,11 @@ func NewNamespaceController(
cache.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) {
namespace := obj.(*v1.Namespace)
namespaceController.enqueueNamespace(namespace)
namespaceController.enqueueNamespace(ctx, namespace)
},
UpdateFunc: func(oldObj, newObj interface{}) {
namespace := newObj.(*v1.Namespace)
namespaceController.enqueueNamespace(namespace)
namespaceController.enqueueNamespace(ctx, namespace)
},
},
resyncPeriod,
@@ -117,10 +116,10 @@ func nsControllerRateLimiter() workqueue.TypedRateLimiter[string] {
// enqueueNamespace adds an object to the controller work queue
// obj could be an *v1.Namespace, or a DeletionFinalStateUnknown item.
func (nm *NamespaceController) enqueueNamespace(obj interface{}) {
func (nm *NamespaceController) enqueueNamespace(ctx context.Context, obj interface{}) {
key, err := controller.KeyFunc(obj)
if err != nil {
utilruntime.HandleError(fmt.Errorf("Couldn't get key for object %+v: %v", obj, err))
utilruntime.HandleErrorWithContext(ctx, err, "Couldn't get key for object", "object", obj)
return
}
@@ -161,7 +160,7 @@ func (nm *NamespaceController) worker(ctx context.Context) {
} else {
// rather than wait for a full resync, re-add the namespace to the queue to be processed
nm.queue.AddRateLimited(key)
utilruntime.HandleError(fmt.Errorf("deletion of namespace %v failed: %v", key, err))
utilruntime.HandleErrorWithContext(ctx, err, "Deletion of namespace failed", "namespace", key)
}
return false
}
@@ -188,7 +187,7 @@ func (nm *NamespaceController) syncNamespaceFromKey(ctx context.Context, key str
return nil
}
if err != nil {
utilruntime.HandleError(fmt.Errorf("Unable to retrieve namespace %v from store: %v", key, err))
utilruntime.HandleErrorWithContext(ctx, err, "Unable to retrieve namespace from store", "namespace", key)
return err
}
return nm.namespacedResourcesDeleter.Delete(ctx, namespace.Name)
@@ -196,7 +195,7 @@ func (nm *NamespaceController) syncNamespaceFromKey(ctx context.Context, key str
// Run starts observing the system with the specified number of workers.
func (nm *NamespaceController) Run(ctx context.Context, workers int) {
defer utilruntime.HandleCrash()
defer utilruntime.HandleCrashWithContext(ctx)
defer nm.queue.ShutDown()
logger := klog.FromContext(ctx)
logger.Info("Starting namespace controller")

View File

@@ -445,7 +445,7 @@ func NewNodeLifecycleController(
// Run starts an asynchronous loop that monitors the status of cluster nodes.
func (nc *Controller) Run(ctx context.Context) {
defer utilruntime.HandleCrash()
defer utilruntime.HandleCrashWithContext(ctx)
// Start events processing pipeline.
nc.broadcaster.StartStructuredLogging(3)
@@ -725,7 +725,7 @@ func (nc *Controller) monitorNodeHealth(ctx context.Context) error {
if currentReadyCondition != nil {
pods, err := nc.getPodsAssignedToNode(node.Name)
if err != nil {
utilruntime.HandleError(fmt.Errorf("unable to list pods of node %v: %v", node.Name, err))
utilruntime.HandleErrorWithContext(ctx, err, "Unable to list pods of node", node.Name)
if currentReadyCondition.Status != v1.ConditionTrue && observedReadyCondition.Status == v1.ConditionTrue {
// If error happened during node status transition (Ready -> NotReady)
// we need to mark node for retry to force MarkPodsNotReady execution
@@ -744,7 +744,7 @@ func (nc *Controller) monitorNodeHealth(ctx context.Context) error {
fallthrough
case needsRetry && observedReadyCondition.Status != v1.ConditionTrue:
if err = controllerutil.MarkPodsNotReady(ctx, nc.kubeClient, nc.recorder, pods, node.Name); err != nil {
utilruntime.HandleError(fmt.Errorf("unable to mark all pods NotReady on node %v: %v; queuing for retry", node.Name, err))
utilruntime.HandleErrorWithContext(ctx, err, "Unable to mark all pods NotReady on node; queuing for retry", "node", node.Name)
nc.nodesToRetry.Store(node.Name, struct{}{})
return
}