Merge pull request #91576 from tahsinrahman/migrate-klog

Migrate to log calls to klog.InfoS and klog.ErroS for pkg/controller
This commit is contained in:
Kubernetes Prow Robot 2020-06-09 14:48:47 -07:00 committed by GitHub
commit a1c351cd28
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
7 changed files with 27 additions and 18 deletions

View File

@ -601,7 +601,7 @@ func (r RealPodControl) DeletePod(namespace string, podID string, object runtime
if err != nil { if err != nil {
return fmt.Errorf("object does not have ObjectMeta, %v", err) return fmt.Errorf("object does not have ObjectMeta, %v", err)
} }
klog.V(2).Infof("Controller %v deleting pod %v/%v", accessor.GetName(), namespace, podID) klog.V(2).InfoS("Deleting pod", "controller", accessor.GetName(), "pod", klog.KRef(namespace, podID))
if err := r.KubeClient.CoreV1().Pods(namespace).Delete(context.TODO(), podID, metav1.DeleteOptions{}); err != nil { if err := r.KubeClient.CoreV1().Pods(namespace).Delete(context.TODO(), podID, metav1.DeleteOptions{}); err != nil {
if apierrors.IsNotFound(err) { if apierrors.IsNotFound(err) {
klog.V(4).Infof("pod %v/%v has already been deleted.", namespace, podID) klog.V(4).Infof("pod %v/%v has already been deleted.", namespace, podID)

View File

@ -481,8 +481,13 @@ func (dc *DeploymentController) handleErr(err error, key interface{}) {
return return
} }
ns, name, keyErr := cache.SplitMetaNamespaceKey(key.(string))
if keyErr != nil {
klog.ErrorS(err, "Failed to split meta namespace cache key", "key", key)
}
if dc.queue.NumRequeues(key) < maxRetries { if dc.queue.NumRequeues(key) < maxRetries {
klog.V(2).Infof("Error syncing deployment %v: %v", key, err) klog.V(2).InfoS("Error syncing deployment", "deployment", klog.KRef(ns, name), "err", err)
dc.queue.AddRateLimited(key) dc.queue.AddRateLimited(key)
return return
} }

View File

@ -337,8 +337,13 @@ func (e *EndpointController) handleErr(err error, key interface{}) {
return return
} }
ns, name, keyErr := cache.SplitMetaNamespaceKey(key.(string))
if keyErr != nil {
klog.ErrorS(err, "Failed to split meta namespace cache key", "key", key)
}
if e.queue.NumRequeues(key) < maxRetries { if e.queue.NumRequeues(key) < maxRetries {
klog.V(2).Infof("Error syncing endpoints for service %q, retrying. Error: %v", key, err) klog.V(2).InfoS("Error syncing endpoints, retrying", "service", klog.KRef(ns, name), "err", err)
e.queue.AddRateLimited(key) e.queue.AddRateLimited(key)
return return
} }

View File

@ -406,7 +406,9 @@ func ownerRefsToUIDs(refs []metav1.OwnerReference) []types.UID {
} }
func (gc *GarbageCollector) attemptToDeleteItem(item *node) error { func (gc *GarbageCollector) attemptToDeleteItem(item *node) error {
klog.V(2).Infof("processing item %s", item.identity) klog.V(2).InfoS("Processing object", "object", klog.KRef(item.identity.Namespace, item.identity.Name),
"objectUID", item.identity.UID, "kind", item.identity.Kind)
// "being deleted" is an one-way trip to the final deletion. We'll just wait for the final deletion, and then process the object's dependents. // "being deleted" is an one-way trip to the final deletion. We'll just wait for the final deletion, and then process the object's dependents.
if item.isBeingDeleted() && !item.isDeletingDependents() { if item.isBeingDeleted() && !item.isDeletingDependents() {
klog.V(5).Infof("processing item %s returned at once, because its DeletionTimestamp is non-nil", item.identity) klog.V(5).Infof("processing item %s returned at once, because its DeletionTimestamp is non-nil", item.identity)
@ -519,7 +521,8 @@ func (gc *GarbageCollector) attemptToDeleteItem(item *node) error {
// otherwise, default to background. // otherwise, default to background.
policy = metav1.DeletePropagationBackground policy = metav1.DeletePropagationBackground
} }
klog.V(2).Infof("delete object %s with propagation policy %s", item.identity, policy) klog.V(2).InfoS("Deleting object", "object", klog.KRef(item.identity.Namespace, item.identity.Name),
"objectUID", item.identity.UID, "kind", item.identity.Kind, "propagationPolicy", policy)
return gc.deleteObject(item.identity, &policy) return gc.deleteObject(item.identity, &policy)
} }
} }

View File

@ -230,7 +230,7 @@ func (ca *cloudCIDRAllocator) AllocateOrOccupyCIDR(node *v1.Node) error {
return nil return nil
} }
if !ca.insertNodeToProcessing(node.Name) { if !ca.insertNodeToProcessing(node.Name) {
klog.V(2).Infof("Node %v is already in a process of CIDR assignment.", node.Name) klog.V(2).InfoS("Node is already in a process of CIDR assignment", "node", klog.KObj(node))
return nil return nil
} }

View File

@ -216,15 +216,11 @@ func (rsc *ReplicaSetController) getReplicaSetsWithSameController(rs *apps.Repli
} }
if klog.V(2).Enabled() { if klog.V(2).Enabled() {
var related string var relatedNames []string
if len(relatedRSs) > 0 { for _, r := range relatedRSs {
var relatedNames []string relatedNames = append(relatedNames, r.Name)
for _, r := range relatedRSs {
relatedNames = append(relatedNames, r.Name)
}
related = ": " + strings.Join(relatedNames, ", ")
} }
klog.Infof("Found %d related %vs for %v %s/%s%s", len(relatedRSs), rsc.Kind, rsc.Kind, rs.Namespace, rs.Name, related) klog.InfoS("Found related ReplicaSets", "replicaSet", klog.KObj(rs), "relatedReplicaSets", relatedNames)
} }
return relatedRSs return relatedRSs
@ -560,7 +556,7 @@ func (rsc *ReplicaSetController) manageReplicas(filteredPods []*v1.Pod, rs *apps
// into a performance bottleneck. We should generate a UID for the pod // into a performance bottleneck. We should generate a UID for the pod
// beforehand and store it via ExpectCreations. // beforehand and store it via ExpectCreations.
rsc.expectations.ExpectCreations(rsKey, diff) rsc.expectations.ExpectCreations(rsKey, diff)
klog.V(2).Infof("Too few replicas for %v %s/%s, need %d, creating %d", rsc.Kind, rs.Namespace, rs.Name, *(rs.Spec.Replicas), diff) klog.V(2).InfoS("Too few replicas", "replicaSet", klog.KObj(rs), "need", *(rs.Spec.Replicas), "creating", diff)
// Batch the pod creates. Batch sizes start at SlowStartInitialBatchSize // Batch the pod creates. Batch sizes start at SlowStartInitialBatchSize
// and double with each successful iteration in a kind of "slow start". // and double with each successful iteration in a kind of "slow start".
// This handles attempts to start large numbers of pods that would // This handles attempts to start large numbers of pods that would
@ -596,7 +592,7 @@ func (rsc *ReplicaSetController) manageReplicas(filteredPods []*v1.Pod, rs *apps
if diff > rsc.burstReplicas { if diff > rsc.burstReplicas {
diff = rsc.burstReplicas diff = rsc.burstReplicas
} }
klog.V(2).Infof("Too many replicas for %v %s/%s, need %d, deleting %d", rsc.Kind, rs.Namespace, rs.Name, *(rs.Spec.Replicas), diff) klog.V(2).InfoS("Too many replicas", "replicaSet", klog.KObj(rs), "need", *(rs.Spec.Replicas), "deleting", diff)
relatedPods, err := rsc.getIndirectlyRelatedPods(rs) relatedPods, err := rsc.getIndirectlyRelatedPods(rs)
utilruntime.HandleError(err) utilruntime.HandleError(err)

View File

@ -267,10 +267,10 @@ func (ttlc *TTLController) patchNodeWithAnnotation(node *v1.Node, annotationKey
} }
_, err = ttlc.kubeClient.CoreV1().Nodes().Patch(context.TODO(), node.Name, types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{}) _, err = ttlc.kubeClient.CoreV1().Nodes().Patch(context.TODO(), node.Name, types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{})
if err != nil { if err != nil {
klog.V(2).Infof("Failed to change ttl annotation for node %s: %v", node.Name, err) klog.V(2).InfoS("Failed to change ttl annotation for node", "node", klog.KObj(node), "err", err)
return err return err
} }
klog.V(2).Infof("Changed ttl annotation for node %s to %d seconds", node.Name, value) klog.V(2).InfoS("Changed ttl annotation", "node", klog.KObj(node), "new_ttl", time.Duration(value)*time.Second)
return nil return nil
} }