Structured Logging migration: modify logs of controller-manager

Signed-off-by: wangyx1992 <wang.yixiang@zte.com.cn>
This commit is contained in:
wangyx1992 2021-03-30 21:29:52 +08:00
parent 6572fe4d90
commit 7175d82a27
6 changed files with 33 additions and 33 deletions

View File

@ -98,14 +98,14 @@ func startNodeIpamController(ccmConfig *cloudcontrollerconfig.CompletedConfig, n
if len(strings.TrimSpace(nodeIPAMConfig.ServiceCIDR)) != 0 {
_, serviceCIDR, err = net.ParseCIDR(nodeIPAMConfig.ServiceCIDR)
if err != nil {
klog.Warningf("Unsuccessful parsing of service CIDR %v: %v", nodeIPAMConfig.ServiceCIDR, err)
klog.ErrorS(err, "Unsuccessful parsing of service CIDR", "CIDR", nodeIPAMConfig.ServiceCIDR)
}
}
if len(strings.TrimSpace(nodeIPAMConfig.SecondaryServiceCIDR)) != 0 {
_, secondaryServiceCIDR, err = net.ParseCIDR(nodeIPAMConfig.SecondaryServiceCIDR)
if err != nil {
klog.Warningf("Unsuccessful parsing of service CIDR %v: %v", nodeIPAMConfig.SecondaryServiceCIDR, err)
klog.ErrorS(err, "Unsuccessful parsing of service CIDR", "CIDR", nodeIPAMConfig.SecondaryServiceCIDR)
}
}

View File

@ -88,7 +88,7 @@ func (t *TopologyCache) AddHints(si *SliceInfo) ([]*discovery.EndpointSlice, []*
allocations := t.getAllocations(totalEndpoints)
if allocations == nil {
klog.V(2).Infof("Insufficient endpoints, removing hints from %s Service", si.ServiceKey)
klog.V(2).InfoS("Insufficient endpoints, removing hints from service", "serviceKey", si.ServiceKey)
t.RemoveHints(si.ServiceKey, si.AddressType)
return RemoveHintsFromSlices(si)
}
@ -104,7 +104,7 @@ func (t *TopologyCache) AddHints(si *SliceInfo) ([]*discovery.EndpointSlice, []*
for _, slice := range allocatableSlices {
for i, endpoint := range slice.Endpoints {
if endpoint.Zone == nil || *endpoint.Zone == "" {
klog.Warningf("Endpoint found without zone specified, removing hints from %s Service", si.ServiceKey)
klog.InfoS("Endpoint found without zone specified, removing hints from service", "serviceKey", si.ServiceKey)
t.RemoveHints(si.ServiceKey, si.AddressType)
return RemoveHintsFromSlices(si)
}

View File

@ -111,7 +111,7 @@ func NewController(
// registers the informers for node changes. This will start synchronization
// of the node and cloud CIDR range allocations.
func (c *Controller) Start(nodeInformer informers.NodeInformer) error {
klog.V(0).Infof("Starting IPAM controller (config=%+v)", c.config)
klog.V(0).InfoS("Starting IPAM controller", "config", c.config)
nodes, err := listNodes(c.adapter.k8s)
if err != nil {
@ -122,9 +122,9 @@ func (c *Controller) Start(nodeInformer informers.NodeInformer) error {
_, cidrRange, err := net.ParseCIDR(node.Spec.PodCIDR)
if err == nil {
c.set.Occupy(cidrRange)
klog.V(3).Infof("Occupying CIDR for node %q (%v)", node.Name, node.Spec.PodCIDR)
klog.V(3).InfoS("Occupying CIDR for node", "CIDR", node.Spec.PodCIDR, "node", node.Name)
} else {
klog.Errorf("Node %q has an invalid CIDR (%q): %v", node.Name, node.Spec.PodCIDR, err)
klog.ErrorS(err, "Node has an invalid CIDR", "node", node.Name, "CIDR", node.Spec.PodCIDR)
}
}
@ -192,7 +192,7 @@ func (c *Controller) onAdd(node *v1.Node) error {
c.syncers[node.Name] = syncer
go syncer.Loop(nil)
} else {
klog.Warningf("Add for node %q that already exists", node.Name)
klog.InfoS("Add for node that already exists", "node", node.Name)
}
syncer.Update(node)
@ -206,7 +206,7 @@ func (c *Controller) onUpdate(_, node *v1.Node) error {
if sync, ok := c.syncers[node.Name]; ok {
sync.Update(node)
} else {
klog.Errorf("Received update for non-existent node %q", node.Name)
klog.ErrorS(nil, "Received update for non-existent node", "node", node.Name)
return fmt.Errorf("unknown node %q", node.Name)
}
@ -221,7 +221,7 @@ func (c *Controller) onDelete(node *v1.Node) error {
syncer.Delete(node)
delete(c.syncers, node.Name)
} else {
klog.Warningf("Node %q was already deleted", node.Name)
klog.InfoS("Node was already deleted", "node", node.Name)
}
return nil

View File

@ -75,7 +75,7 @@ func NewPodGC(kubeClient clientset.Interface, podInformer coreinformers.PodInfor
nodeListerSynced: nodeInformer.Informer().HasSynced,
nodeQueue: workqueue.NewNamedDelayingQueue("orphaned_pods_nodes"),
deletePod: func(namespace, name string) error {
klog.Infof("PodGC is force deleting Pod: %v/%v", namespace, name)
klog.InfoS("PodGC is force deleting Pod", "pod", klog.KRef(namespace, name))
return kubeClient.CoreV1().Pods(namespace).Delete(context.TODO(), name, *metav1.NewDeleteOptions(0))
},
}
@ -139,7 +139,7 @@ func (gcc *PodGCController) gcTerminated(pods []*v1.Pod) {
return
}
klog.Infof("garbage collecting %v pods", deleteCount)
klog.InfoS("Garbage collecting pods", "numPods", deleteCount)
// sort only when necessary
sort.Sort(byCreationTimestamp(terminatedPods))
var wait sync.WaitGroup
@ -179,11 +179,11 @@ func (gcc *PodGCController) gcOrphaned(pods []*v1.Pod, nodes []*v1.Node) {
if !deletedNodesNames.Has(pod.Spec.NodeName) {
continue
}
klog.V(2).Infof("Found orphaned Pod %v/%v assigned to the Node %v. Deleting.", pod.Namespace, pod.Name, pod.Spec.NodeName)
klog.V(2).InfoS("Found orphaned Pod assigned to the Node, deleting.", "pod", klog.KObj(pod), "node", pod.Spec.NodeName)
if err := gcc.deletePod(pod.Namespace, pod.Name); err != nil {
utilruntime.HandleError(err)
} else {
klog.V(0).Infof("Forced deletion of orphaned Pod %v/%v succeeded", pod.Namespace, pod.Name)
klog.V(0).InfoS("Forced deletion of orphaned Pod succeeded", "pod", klog.KObj(pod))
}
}
}
@ -200,7 +200,7 @@ func (gcc *PodGCController) discoverDeletedNodes(existingNodeNames sets.String)
exists, err := gcc.checkIfNodeExists(nodeName)
switch {
case err != nil:
klog.Errorf("Error while getting node %q: %v", nodeName, err)
klog.ErrorS(err, "Error while getting node", "node", nodeName)
// Node will be added back to the queue in the subsequent loop if still needed
case !exists:
deletedNodesNames.Insert(nodeName)
@ -228,11 +228,11 @@ func (gcc *PodGCController) gcUnscheduledTerminating(pods []*v1.Pod) {
continue
}
klog.V(2).Infof("Found unscheduled terminating Pod %v/%v not assigned to any Node. Deleting.", pod.Namespace, pod.Name)
klog.V(2).InfoS("Found unscheduled terminating Pod not assigned to any Node, deleting.", "pod", klog.KObj(pod))
if err := gcc.deletePod(pod.Namespace, pod.Name); err != nil {
utilruntime.HandleError(err)
} else {
klog.V(0).Infof("Forced deletion of unscheduled terminating Pod %v/%v succeeded", pod.Namespace, pod.Name)
klog.V(0).InfoS("Forced deletion of unscheduled terminating Pod succeeded", "pod", klog.KObj(pod))
}
}
}

View File

@ -78,7 +78,7 @@ func DeletePods(kubeClient clientset.Interface, pods []*v1.Pod, recorder record.
continue
}
klog.V(2).Infof("Starting deletion of pod %v/%v", pod.Namespace, pod.Name)
klog.V(2).InfoS("Starting deletion of pod", "pod", klog.KObj(pod))
recorder.Eventf(pod, v1.EventTypeNormal, "NodeControllerEviction", "Marking for deletion Pod %s from Node %s", pod.Name, nodeName)
if err := kubeClient.CoreV1().Pods(pod.Namespace).Delete(context.TODO(), pod.Name, metav1.DeleteOptions{}); err != nil {
if apierrors.IsNotFound(err) {
@ -119,7 +119,7 @@ func SetPodTerminationReason(kubeClient clientset.Interface, pod *v1.Pod, nodeNa
// MarkPodsNotReady updates ready status of given pods running on
// given node from master return true if success
func MarkPodsNotReady(kubeClient clientset.Interface, recorder record.EventRecorder, pods []*v1.Pod, nodeName string) error {
klog.V(2).Infof("Update ready status of pods on node [%v]", nodeName)
klog.V(2).InfoS("Update ready status of pods on node", "node", nodeName)
errMsg := []string{}
for i := range pods {
@ -137,7 +137,7 @@ func MarkPodsNotReady(kubeClient clientset.Interface, recorder record.EventRecor
break
}
klog.V(2).Infof("Updating ready status of pod %v to false", pod.Name)
klog.V(2).InfoS("Updating ready status of pod to false", "pod", pod.Name)
_, err := kubeClient.CoreV1().Pods(pod.Namespace).UpdateStatus(context.TODO(), pod, metav1.UpdateOptions{})
if err != nil {
if apierrors.IsNotFound(err) {
@ -145,7 +145,7 @@ func MarkPodsNotReady(kubeClient clientset.Interface, recorder record.EventRecor
// There is nothing left to do with this pod.
continue
}
klog.Warningf("Failed to update status for pod %q: %v", format.Pod(pod), err)
klog.InfoS("Failed to update status for pod", "pod", klog.KObj(pod), "err", err)
errMsg = append(errMsg, fmt.Sprintf("%v", err))
}
// record NodeNotReady event after updateStatus to make sure pod still exists
@ -169,7 +169,7 @@ func RecordNodeEvent(recorder record.EventRecorder, nodeName, nodeUID, eventtype
UID: types.UID(nodeUID),
Namespace: "",
}
klog.V(2).Infof("Recording %s event message for node %s", event, nodeName)
klog.V(2).InfoS("Recording event message for node", "event", event, "node", nodeName)
recorder.Eventf(ref, eventtype, reason, "Node %s event: %s", nodeName, event)
}
@ -182,7 +182,7 @@ func RecordNodeStatusChange(recorder record.EventRecorder, node *v1.Node, newSta
UID: node.UID,
Namespace: "",
}
klog.V(2).Infof("Recording status change %s event message for node %s", newStatus, node.Name)
klog.V(2).InfoS("Recording status change event message for node", "status", newStatus, "node", node.Name)
// TODO: This requires a transaction, either both node status is updated
// and event is recorded or neither should happen, see issue #6055.
recorder.Eventf(ref, v1.EventTypeNormal, newStatus, "Node %s status is now: %s", node.Name, newStatus)
@ -206,7 +206,7 @@ func SwapNodeControllerTaint(kubeClient clientset.Interface, taintsToAdd, taints
err))
return false
}
klog.V(4).Infof("Added %+v Taint to Node %v", taintsToAdd, node.Name)
klog.V(4).InfoS("Added taint to node", "taint", taintsToAdd, "node", node.Name)
err = controller.RemoveTaintOffNode(kubeClient, node.Name, node, taintsToRemove...)
if err != nil {
@ -218,7 +218,7 @@ func SwapNodeControllerTaint(kubeClient clientset.Interface, taintsToAdd, taints
err))
return false
}
klog.V(4).Infof("Made sure that Node %+v has no %v Taint", node.Name, taintsToRemove)
klog.V(4).InfoS("Made sure that node has no taint", "node", node.Name, "taint", taintsToRemove)
return true
}
@ -236,7 +236,7 @@ func AddOrUpdateLabelsOnNode(kubeClient clientset.Interface, labelsToUpdate map[
err))
return false
}
klog.V(4).Infof("Updated labels %+v to Node %v", labelsToUpdate, node.Name)
klog.V(4).InfoS("Updated labels to node", "label", labelsToUpdate, "node", node.Name)
return true
}
@ -271,12 +271,12 @@ func CreateDeleteNodeHandler(f func(node *v1.Node) error) func(obj interface{})
if !isNode {
deletedState, ok := originalObj.(cache.DeletedFinalStateUnknown)
if !ok {
klog.Errorf("Received unexpected object: %v", originalObj)
klog.ErrorS(nil, "Received unexpected object", "object", originalObj)
return
}
originalNode, ok = deletedState.Obj.(*v1.Node)
if !ok {
klog.Errorf("DeletedFinalStateUnknown contained non-Node object: %v", deletedState.Obj)
klog.ErrorS(nil, "DeletedFinalStateUnknown contained non-Node object", "object", deletedState.Obj)
return
}
}

View File

@ -93,7 +93,7 @@ func (c *Controller) Run(stopCh <-chan struct{}) {
func (c *Controller) gc() {
leases, err := c.leaseLister.Leases(c.leaseNamespace).List(labels.Everything())
if err != nil {
klog.Errorf("Error while listing apiserver leases: %v", err)
klog.ErrorS(err, "Error while listing apiserver leases")
return
}
for _, lease := range leases {
@ -104,14 +104,14 @@ func (c *Controller) gc() {
// double check latest lease from apiserver before deleting
lease, err := c.kubeclientset.CoordinationV1().Leases(c.leaseNamespace).Get(context.TODO(), lease.Name, metav1.GetOptions{})
if err != nil && !errors.IsNotFound(err) {
klog.Errorf("Error getting lease: %v", err)
klog.ErrorS(err, "Error getting lease")
continue
}
if errors.IsNotFound(err) || lease == nil {
// In an HA cluster, this can happen if the lease was deleted
// by the same GC controller in another apiserver, which is legit.
// We don't expect other components to delete the lease.
klog.V(4).Infof("cannot find apiserver lease: %v", err)
klog.V(4).InfoS("Cannot find apiserver lease", "err", err)
continue
}
// evaluate lease from apiserver
@ -124,9 +124,9 @@ func (c *Controller) gc() {
// In an HA cluster, this can happen if the lease was deleted
// by the same GC controller in another apiserver, which is legit.
// We don't expect other components to delete the lease.
klog.V(4).Infof("apiserver lease is gone already: %v", err)
klog.V(4).InfoS("Apiserver lease is gone already", "err", err)
} else {
klog.Errorf("Error deleting lease: %v", err)
klog.ErrorS(err, "Error deleting lease")
}
}
}