mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-08-04 01:40:07 +00:00
nodecontroller: improve node status event recording
This patch substitutes the misleading reason "unknown" for the event recording. For symmetry with kubelet's message "online" the conditions Unknown and False are reported as "offline". Signed-off-by: Federico Simoncelli <fsimonce@redhat.com>
This commit is contained in:
parent
21c57a5633
commit
2f503c57a5
@ -424,7 +424,7 @@ func (nc *NodeController) recordNodeEvent(node *api.Node, event string) {
|
|||||||
glog.V(2).Infof("Recording %s event message for node %s", event, node.Name)
|
glog.V(2).Infof("Recording %s event message for node %s", event, node.Name)
|
||||||
// TODO: This requires a transaction, either both node status is updated
|
// TODO: This requires a transaction, either both node status is updated
|
||||||
// and event is recorded or neither should happen, see issue #6055.
|
// and event is recorded or neither should happen, see issue #6055.
|
||||||
nc.recorder.Eventf(ref, event, "Node %s is now %s", node.Name, event)
|
nc.recorder.Eventf(ref, event, "Node %s status is now: %s", node.Name, event)
|
||||||
}
|
}
|
||||||
|
|
||||||
// For a given node checks its conditions and tries to update it. Returns grace period to which given node
|
// For a given node checks its conditions and tries to update it. Returns grace period to which given node
|
||||||
@ -611,12 +611,9 @@ func (nc *NodeController) monitorNodeStatus() error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Report node events.
|
// Report node event.
|
||||||
if readyCondition.Status == api.ConditionFalse && lastReadyCondition.Status != api.ConditionFalse {
|
if readyCondition.Status != api.ConditionTrue && lastReadyCondition.Status == api.ConditionTrue {
|
||||||
nc.recordNodeEvent(node, "not_ready")
|
nc.recordNodeEvent(node, "NodeNotReady")
|
||||||
}
|
|
||||||
if readyCondition.Status == api.ConditionUnknown && lastReadyCondition.Status != api.ConditionUnknown {
|
|
||||||
nc.recordNodeEvent(node, "unknown")
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1717,22 +1717,11 @@ func (kl *Kubelet) updateNodeStatus() error {
|
|||||||
return fmt.Errorf("Update node status exceeds retry count")
|
return fmt.Errorf("Update node status exceeds retry count")
|
||||||
}
|
}
|
||||||
|
|
||||||
func (kl *Kubelet) recordNodeOnlineEvent() {
|
func (kl *Kubelet) recordNodeStatusEvent(event string) {
|
||||||
|
glog.V(2).Infof("Recording %s event message for node %s", event, kl.hostname)
|
||||||
// TODO: This requires a transaction, either both node status is updated
|
// TODO: This requires a transaction, either both node status is updated
|
||||||
// and event is recorded or neither should happen, see issue #6055.
|
// and event is recorded or neither should happen, see issue #6055.
|
||||||
kl.recorder.Eventf(kl.nodeRef, "online", "Node %s is now online", kl.hostname)
|
kl.recorder.Eventf(kl.nodeRef, event, "Node %s status is now: %s", kl.hostname, event)
|
||||||
}
|
|
||||||
|
|
||||||
func (kl *Kubelet) recordNodeSchedulableEvent() {
|
|
||||||
// TODO: This requires a transaction, either both node status is updated
|
|
||||||
// and event is recorded or neither should happen, see issue #6055.
|
|
||||||
kl.recorder.Eventf(kl.nodeRef, "schedulable", "Node %s is now schedulable", kl.hostname)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (kl *Kubelet) recordNodeUnschedulableEvent() {
|
|
||||||
// TODO: This requires a transaction, either both node status is updated
|
|
||||||
// and event is recorded or neither should happen, see issue #6055.
|
|
||||||
kl.recorder.Eventf(kl.nodeRef, "unschedulable", "Node %s is now unschedulable", kl.hostname)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Maintains Node.Spec.Unschedulable value from previous run of tryUpdateNodeStatus()
|
// Maintains Node.Spec.Unschedulable value from previous run of tryUpdateNodeStatus()
|
||||||
@ -1828,7 +1817,7 @@ func (kl *Kubelet) tryUpdateNodeStatus() error {
|
|||||||
if node.Status.Conditions[i].Type == api.NodeReady {
|
if node.Status.Conditions[i].Type == api.NodeReady {
|
||||||
newCondition.LastTransitionTime = node.Status.Conditions[i].LastTransitionTime
|
newCondition.LastTransitionTime = node.Status.Conditions[i].LastTransitionTime
|
||||||
if node.Status.Conditions[i].Status != api.ConditionTrue {
|
if node.Status.Conditions[i].Status != api.ConditionTrue {
|
||||||
kl.recordNodeOnlineEvent()
|
kl.recordNodeStatusEvent("NodeReady")
|
||||||
}
|
}
|
||||||
node.Status.Conditions[i] = newCondition
|
node.Status.Conditions[i] = newCondition
|
||||||
updated = true
|
updated = true
|
||||||
@ -1837,14 +1826,14 @@ func (kl *Kubelet) tryUpdateNodeStatus() error {
|
|||||||
if !updated {
|
if !updated {
|
||||||
newCondition.LastTransitionTime = currentTime
|
newCondition.LastTransitionTime = currentTime
|
||||||
node.Status.Conditions = append(node.Status.Conditions, newCondition)
|
node.Status.Conditions = append(node.Status.Conditions, newCondition)
|
||||||
kl.recordNodeOnlineEvent()
|
kl.recordNodeStatusEvent("NodeReady")
|
||||||
}
|
}
|
||||||
|
|
||||||
if oldNodeUnschedulable != node.Spec.Unschedulable {
|
if oldNodeUnschedulable != node.Spec.Unschedulable {
|
||||||
if node.Spec.Unschedulable {
|
if node.Spec.Unschedulable {
|
||||||
kl.recordNodeUnschedulableEvent()
|
kl.recordNodeStatusEvent("NodeNotSchedulable")
|
||||||
} else {
|
} else {
|
||||||
kl.recordNodeSchedulableEvent()
|
kl.recordNodeStatusEvent("NodeSchedulable")
|
||||||
}
|
}
|
||||||
oldNodeUnschedulable = node.Spec.Unschedulable
|
oldNodeUnschedulable = node.Spec.Unschedulable
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user