mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-21 10:51:29 +00:00
Merge pull request #99292 from yangjunmyfm192085/run-test23
replace all occurrences of "node", nodeName to "node", klog.KRef("", nodeName)
This commit is contained in:
commit
f979b4094e
@ -350,7 +350,7 @@ func (tc *NoExecuteTaintManager) processPodOnNode(
|
||||
}
|
||||
allTolerated, usedTolerations := v1helper.GetMatchingTolerations(taints, tolerations)
|
||||
if !allTolerated {
|
||||
klog.V(2).InfoS("Not all taints are tolerated after update for pod on node", "pod", podNamespacedName.String(), "node", nodeName)
|
||||
klog.V(2).InfoS("Not all taints are tolerated after update for pod on node", "pod", podNamespacedName.String(), "node", klog.KRef("", nodeName))
|
||||
// We're canceling scheduled work (if any), as we're going to delete the Pod right away.
|
||||
tc.cancelWorkWithEvent(podNamespacedName)
|
||||
tc.taintEvictionQueue.AddWork(ctx, NewWorkArgs(podNamespacedName.Name, podNamespacedName.Namespace), time.Now(), time.Now())
|
||||
|
@ -200,7 +200,7 @@ func (gcc *PodGCController) discoverDeletedNodes(ctx context.Context, existingNo
|
||||
exists, err := gcc.checkIfNodeExists(ctx, nodeName)
|
||||
switch {
|
||||
case err != nil:
|
||||
klog.ErrorS(err, "Error while getting node", "node", nodeName)
|
||||
klog.ErrorS(err, "Error while getting node", "node", klog.KRef("", nodeName))
|
||||
// Node will be added back to the queue in the subsequent loop if still needed
|
||||
case !exists:
|
||||
deletedNodesNames.Insert(nodeName)
|
||||
|
@ -117,7 +117,7 @@ func SetPodTerminationReason(ctx context.Context, kubeClient clientset.Interface
|
||||
// MarkPodsNotReady updates ready status of given pods running on
|
||||
// given node from master return true if success
|
||||
func MarkPodsNotReady(ctx context.Context, kubeClient clientset.Interface, recorder record.EventRecorder, pods []*v1.Pod, nodeName string) error {
|
||||
klog.V(2).InfoS("Update ready status of pods on node", "node", nodeName)
|
||||
klog.V(2).InfoS("Update ready status of pods on node", "node", klog.KRef("", nodeName))
|
||||
|
||||
errs := []error{}
|
||||
for i := range pods {
|
||||
@ -165,7 +165,7 @@ func RecordNodeEvent(recorder record.EventRecorder, nodeName, nodeUID, eventtype
|
||||
UID: types.UID(nodeUID),
|
||||
Namespace: "",
|
||||
}
|
||||
klog.V(2).InfoS("Recording event message for node", "event", event, "node", nodeName)
|
||||
klog.V(2).InfoS("Recording event message for node", "event", event, "node", klog.KRef("", nodeName))
|
||||
recorder.Eventf(ref, eventtype, reason, "Node %s event: %s", nodeName, event)
|
||||
}
|
||||
|
||||
|
@ -49,7 +49,7 @@ func (b DefaultBinder) Name() string {
|
||||
|
||||
// Bind binds pods to nodes using the k8s client.
|
||||
func (b DefaultBinder) Bind(ctx context.Context, state *framework.CycleState, p *v1.Pod, nodeName string) *framework.Status {
|
||||
klog.V(3).InfoS("Attempting to bind pod to node", "pod", klog.KObj(p), "node", nodeName)
|
||||
klog.V(3).InfoS("Attempting to bind pod to node", "pod", klog.KObj(p), "node", klog.KRef("", nodeName))
|
||||
binding := &v1.Binding{
|
||||
ObjectMeta: metav1.ObjectMeta{Namespace: p.Namespace, Name: p.Name, UID: p.UID},
|
||||
Target: v1.ObjectReference{Kind: "Node", Name: nodeName},
|
||||
|
Loading…
Reference in New Issue
Block a user