mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-23 11:50:44 +00:00
Merge pull request #87743 from u2takey/master
log pod event when node not ready
This commit is contained in:
commit
85ee5fdd90
@ -847,7 +847,7 @@ func (nc *Controller) monitorNodeHealth() error {
|
|||||||
nodeutil.RecordNodeStatusChange(nc.recorder, node, "NodeNotReady")
|
nodeutil.RecordNodeStatusChange(nc.recorder, node, "NodeNotReady")
|
||||||
fallthrough
|
fallthrough
|
||||||
case needsRetry && observedReadyCondition.Status != v1.ConditionTrue:
|
case needsRetry && observedReadyCondition.Status != v1.ConditionTrue:
|
||||||
if err = nodeutil.MarkPodsNotReady(nc.kubeClient, pods, node.Name); err != nil {
|
if err = nodeutil.MarkPodsNotReady(nc.kubeClient, nc.recorder, pods, node.Name); err != nil {
|
||||||
utilruntime.HandleError(fmt.Errorf("unable to mark all pods NotReady on node %v: %v; queuing for retry", node.Name, err))
|
utilruntime.HandleError(fmt.Errorf("unable to mark all pods NotReady on node %v: %v; queuing for retry", node.Name, err))
|
||||||
nc.nodesToRetry.Store(node.Name, struct{}{})
|
nc.nodesToRetry.Store(node.Name, struct{}{})
|
||||||
continue
|
continue
|
||||||
@ -1330,7 +1330,7 @@ func (nc *Controller) processPod(podItem podUpdateItem) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if currentReadyCondition.Status != v1.ConditionTrue {
|
if currentReadyCondition.Status != v1.ConditionTrue {
|
||||||
if err := nodeutil.MarkPodsNotReady(nc.kubeClient, pods, nodeName); err != nil {
|
if err := nodeutil.MarkPodsNotReady(nc.kubeClient, nc.recorder, pods, nodeName); err != nil {
|
||||||
klog.Warningf("Unable to mark pod %+v NotReady on node %v: %v.", podItem, nodeName, err)
|
klog.Warningf("Unable to mark pod %+v NotReady on node %v: %v.", podItem, nodeName, err)
|
||||||
nc.podUpdateQueue.AddRateLimited(podItem)
|
nc.podUpdateQueue.AddRateLimited(podItem)
|
||||||
}
|
}
|
||||||
|
@ -118,7 +118,7 @@ func SetPodTerminationReason(kubeClient clientset.Interface, pod *v1.Pod, nodeNa
|
|||||||
|
|
||||||
// MarkPodsNotReady updates ready status of given pods running on
|
// MarkPodsNotReady updates ready status of given pods running on
|
||||||
// given node from master return true if success
|
// given node from master return true if success
|
||||||
func MarkPodsNotReady(kubeClient clientset.Interface, pods []*v1.Pod, nodeName string) error {
|
func MarkPodsNotReady(kubeClient clientset.Interface, recorder record.EventRecorder, pods []*v1.Pod, nodeName string) error {
|
||||||
klog.V(2).Infof("Update ready status of pods on node [%v]", nodeName)
|
klog.V(2).Infof("Update ready status of pods on node [%v]", nodeName)
|
||||||
|
|
||||||
errMsg := []string{}
|
errMsg := []string{}
|
||||||
@ -136,6 +136,7 @@ func MarkPodsNotReady(kubeClient clientset.Interface, pods []*v1.Pod, nodeName s
|
|||||||
if !utilpod.UpdatePodCondition(&pod.Status, &cond) {
|
if !utilpod.UpdatePodCondition(&pod.Status, &cond) {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
|
||||||
klog.V(2).Infof("Updating ready status of pod %v to false", pod.Name)
|
klog.V(2).Infof("Updating ready status of pod %v to false", pod.Name)
|
||||||
_, err := kubeClient.CoreV1().Pods(pod.Namespace).UpdateStatus(context.TODO(), pod, metav1.UpdateOptions{})
|
_, err := kubeClient.CoreV1().Pods(pod.Namespace).UpdateStatus(context.TODO(), pod, metav1.UpdateOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -147,6 +148,8 @@ func MarkPodsNotReady(kubeClient clientset.Interface, pods []*v1.Pod, nodeName s
|
|||||||
klog.Warningf("Failed to update status for pod %q: %v", format.Pod(pod), err)
|
klog.Warningf("Failed to update status for pod %q: %v", format.Pod(pod), err)
|
||||||
errMsg = append(errMsg, fmt.Sprintf("%v", err))
|
errMsg = append(errMsg, fmt.Sprintf("%v", err))
|
||||||
}
|
}
|
||||||
|
// record NodeNotReady event after updateStatus to make sure pod still exists
|
||||||
|
recorder.Event(pod, v1.EventTypeWarning, "NodeNotReady", "Node is not ready")
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user