From 78318c7a26928bdabe2491cf022cf9e61be32188 Mon Sep 17 00:00:00 2001 From: tahsinrahman Date: Fri, 29 May 2020 20:27:22 +0800 Subject: [PATCH] Migrate to log calls to klog.InfoS and klog.ErroS for pkg/controller --- pkg/controller/controller_utils.go | 2 +- .../deployment/deployment_controller.go | 7 ++++++- pkg/controller/endpoint/endpoints_controller.go | 7 ++++++- .../garbagecollector/garbagecollector.go | 7 +++++-- .../nodeipam/ipam/cloud_cidr_allocator.go | 2 +- pkg/controller/replicaset/replica_set.go | 16 ++++++---------- pkg/controller/ttl/ttl_controller.go | 4 ++-- 7 files changed, 27 insertions(+), 18 deletions(-) diff --git a/pkg/controller/controller_utils.go b/pkg/controller/controller_utils.go index e2efe3bb2b5..e5c66bfacd3 100644 --- a/pkg/controller/controller_utils.go +++ b/pkg/controller/controller_utils.go @@ -601,7 +601,7 @@ func (r RealPodControl) DeletePod(namespace string, podID string, object runtime if err != nil { return fmt.Errorf("object does not have ObjectMeta, %v", err) } - klog.V(2).Infof("Controller %v deleting pod %v/%v", accessor.GetName(), namespace, podID) + klog.V(2).InfoS("Deleting pod", "controller", accessor.GetName(), "pod", klog.KRef(namespace, podID)) if err := r.KubeClient.CoreV1().Pods(namespace).Delete(context.TODO(), podID, metav1.DeleteOptions{}); err != nil { if apierrors.IsNotFound(err) { klog.V(4).Infof("pod %v/%v has already been deleted.", namespace, podID) diff --git a/pkg/controller/deployment/deployment_controller.go b/pkg/controller/deployment/deployment_controller.go index 5e3d808a242..f942635e7a3 100644 --- a/pkg/controller/deployment/deployment_controller.go +++ b/pkg/controller/deployment/deployment_controller.go @@ -481,8 +481,13 @@ func (dc *DeploymentController) handleErr(err error, key interface{}) { return } + ns, name, keyErr := cache.SplitMetaNamespaceKey(key.(string)) + if keyErr != nil { + klog.ErrorS(err, "Failed to split meta namespace cache key", "key", key) + } + if dc.queue.NumRequeues(key) < maxRetries { - klog.V(2).Infof("Error syncing deployment %v: %v", key, err) + klog.V(2).InfoS("Error syncing deployment", "deployment", klog.KRef(ns, name), "err", err) dc.queue.AddRateLimited(key) return } diff --git a/pkg/controller/endpoint/endpoints_controller.go b/pkg/controller/endpoint/endpoints_controller.go index 6762ff5acaa..d60a40a3712 100644 --- a/pkg/controller/endpoint/endpoints_controller.go +++ b/pkg/controller/endpoint/endpoints_controller.go @@ -337,8 +337,13 @@ func (e *EndpointController) handleErr(err error, key interface{}) { return } + ns, name, keyErr := cache.SplitMetaNamespaceKey(key.(string)) + if keyErr != nil { + klog.ErrorS(err, "Failed to split meta namespace cache key", "key", key) + } + if e.queue.NumRequeues(key) < maxRetries { - klog.V(2).Infof("Error syncing endpoints for service %q, retrying. Error: %v", key, err) + klog.V(2).InfoS("Error syncing endpoints, retrying", "service", klog.KRef(ns, name), "err", err) e.queue.AddRateLimited(key) return } diff --git a/pkg/controller/garbagecollector/garbagecollector.go b/pkg/controller/garbagecollector/garbagecollector.go index f75fe6f4c87..606dae39c4c 100644 --- a/pkg/controller/garbagecollector/garbagecollector.go +++ b/pkg/controller/garbagecollector/garbagecollector.go @@ -406,7 +406,9 @@ func ownerRefsToUIDs(refs []metav1.OwnerReference) []types.UID { } func (gc *GarbageCollector) attemptToDeleteItem(item *node) error { - klog.V(2).Infof("processing item %s", item.identity) + klog.V(2).InfoS("Processing object", "object", klog.KRef(item.identity.Namespace, item.identity.Name), + "objectUID", item.identity.UID, "kind", item.identity.Kind) + // "being deleted" is an one-way trip to the final deletion. We'll just wait for the final deletion, and then process the object's dependents. if item.isBeingDeleted() && !item.isDeletingDependents() { klog.V(5).Infof("processing item %s returned at once, because its DeletionTimestamp is non-nil", item.identity) @@ -519,7 +521,8 @@ func (gc *GarbageCollector) attemptToDeleteItem(item *node) error { // otherwise, default to background. policy = metav1.DeletePropagationBackground } - klog.V(2).Infof("delete object %s with propagation policy %s", item.identity, policy) + klog.V(2).InfoS("Deleting object", "object", klog.KRef(item.identity.Namespace, item.identity.Name), + "objectUID", item.identity.UID, "kind", item.identity.Kind, "propagationPolicy", policy) return gc.deleteObject(item.identity, &policy) } } diff --git a/pkg/controller/nodeipam/ipam/cloud_cidr_allocator.go b/pkg/controller/nodeipam/ipam/cloud_cidr_allocator.go index 7a34e7d4193..cc6b08d9bd5 100644 --- a/pkg/controller/nodeipam/ipam/cloud_cidr_allocator.go +++ b/pkg/controller/nodeipam/ipam/cloud_cidr_allocator.go @@ -230,7 +230,7 @@ func (ca *cloudCIDRAllocator) AllocateOrOccupyCIDR(node *v1.Node) error { return nil } if !ca.insertNodeToProcessing(node.Name) { - klog.V(2).Infof("Node %v is already in a process of CIDR assignment.", node.Name) + klog.V(2).InfoS("Node is already in a process of CIDR assignment", "node", klog.KObj(node)) return nil } diff --git a/pkg/controller/replicaset/replica_set.go b/pkg/controller/replicaset/replica_set.go index 5d4ba3b399f..3ffa6cd6be0 100644 --- a/pkg/controller/replicaset/replica_set.go +++ b/pkg/controller/replicaset/replica_set.go @@ -216,15 +216,11 @@ func (rsc *ReplicaSetController) getReplicaSetsWithSameController(rs *apps.Repli } if klog.V(2).Enabled() { - var related string - if len(relatedRSs) > 0 { - var relatedNames []string - for _, r := range relatedRSs { - relatedNames = append(relatedNames, r.Name) - } - related = ": " + strings.Join(relatedNames, ", ") + var relatedNames []string + for _, r := range relatedRSs { + relatedNames = append(relatedNames, r.Name) } - klog.Infof("Found %d related %vs for %v %s/%s%s", len(relatedRSs), rsc.Kind, rsc.Kind, rs.Namespace, rs.Name, related) + klog.InfoS("Found related ReplicaSets", "replicaSet", klog.KObj(rs), "relatedReplicaSets", relatedNames) } return relatedRSs @@ -560,7 +556,7 @@ func (rsc *ReplicaSetController) manageReplicas(filteredPods []*v1.Pod, rs *apps // into a performance bottleneck. We should generate a UID for the pod // beforehand and store it via ExpectCreations. rsc.expectations.ExpectCreations(rsKey, diff) - klog.V(2).Infof("Too few replicas for %v %s/%s, need %d, creating %d", rsc.Kind, rs.Namespace, rs.Name, *(rs.Spec.Replicas), diff) + klog.V(2).InfoS("Too few replicas", "replicaSet", klog.KObj(rs), "need", *(rs.Spec.Replicas), "creating", diff) // Batch the pod creates. Batch sizes start at SlowStartInitialBatchSize // and double with each successful iteration in a kind of "slow start". // This handles attempts to start large numbers of pods that would @@ -596,7 +592,7 @@ func (rsc *ReplicaSetController) manageReplicas(filteredPods []*v1.Pod, rs *apps if diff > rsc.burstReplicas { diff = rsc.burstReplicas } - klog.V(2).Infof("Too many replicas for %v %s/%s, need %d, deleting %d", rsc.Kind, rs.Namespace, rs.Name, *(rs.Spec.Replicas), diff) + klog.V(2).InfoS("Too many replicas", "replicaSet", klog.KObj(rs), "need", *(rs.Spec.Replicas), "deleting", diff) relatedPods, err := rsc.getIndirectlyRelatedPods(rs) utilruntime.HandleError(err) diff --git a/pkg/controller/ttl/ttl_controller.go b/pkg/controller/ttl/ttl_controller.go index 8c89dad6963..5d77536cec9 100644 --- a/pkg/controller/ttl/ttl_controller.go +++ b/pkg/controller/ttl/ttl_controller.go @@ -267,10 +267,10 @@ func (ttlc *TTLController) patchNodeWithAnnotation(node *v1.Node, annotationKey } _, err = ttlc.kubeClient.CoreV1().Nodes().Patch(context.TODO(), node.Name, types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{}) if err != nil { - klog.V(2).Infof("Failed to change ttl annotation for node %s: %v", node.Name, err) + klog.V(2).InfoS("Failed to change ttl annotation for node", "node", klog.KObj(node), "err", err) return err } - klog.V(2).Infof("Changed ttl annotation for node %s to %d seconds", node.Name, value) + klog.V(2).InfoS("Changed ttl annotation", "node", klog.KObj(node), "new_ttl", time.Duration(value)*time.Second) return nil }