mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-30 15:05:27 +00:00
Merge pull request #7170 from simon3z/node-events
Node events recording fixes
This commit is contained in:
commit
6b428ef1a8
@ -424,7 +424,7 @@ func (nc *NodeController) recordNodeEvent(node *api.Node, event string) {
|
|||||||
glog.V(2).Infof("Recording %s event message for node %s", event, node.Name)
|
glog.V(2).Infof("Recording %s event message for node %s", event, node.Name)
|
||||||
// TODO: This requires a transaction, either both node status is updated
|
// TODO: This requires a transaction, either both node status is updated
|
||||||
// and event is recorded or neither should happen, see issue #6055.
|
// and event is recorded or neither should happen, see issue #6055.
|
||||||
nc.recorder.Eventf(ref, event, "Node %s is now %s", node.Name, event)
|
nc.recorder.Eventf(ref, event, "Node %s status is now: %s", node.Name, event)
|
||||||
}
|
}
|
||||||
|
|
||||||
// For a given node checks its conditions and tries to update it. Returns grace period to which given node
|
// For a given node checks its conditions and tries to update it. Returns grace period to which given node
|
||||||
@ -611,15 +611,9 @@ func (nc *NodeController) monitorNodeStatus() error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Report node events.
|
// Report node event.
|
||||||
if readyCondition.Status == api.ConditionTrue && lastReadyCondition.Status != api.ConditionTrue {
|
if readyCondition.Status != api.ConditionTrue && lastReadyCondition.Status == api.ConditionTrue {
|
||||||
nc.recordNodeEvent(node, "ready")
|
nc.recordNodeEvent(node, "NodeNotReady")
|
||||||
}
|
|
||||||
if readyCondition.Status == api.ConditionFalse && lastReadyCondition.Status != api.ConditionFalse {
|
|
||||||
nc.recordNodeEvent(node, "not_ready")
|
|
||||||
}
|
|
||||||
if readyCondition.Status == api.ConditionUnknown && lastReadyCondition.Status != api.ConditionUnknown {
|
|
||||||
nc.recordNodeEvent(node, "unknown")
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1716,22 +1716,11 @@ func (kl *Kubelet) updateNodeStatus() error {
|
|||||||
return fmt.Errorf("Update node status exceeds retry count")
|
return fmt.Errorf("Update node status exceeds retry count")
|
||||||
}
|
}
|
||||||
|
|
||||||
func (kl *Kubelet) recordNodeOnlineEvent() {
|
func (kl *Kubelet) recordNodeStatusEvent(event string) {
|
||||||
|
glog.V(2).Infof("Recording %s event message for node %s", event, kl.hostname)
|
||||||
// TODO: This requires a transaction, either both node status is updated
|
// TODO: This requires a transaction, either both node status is updated
|
||||||
// and event is recorded or neither should happen, see issue #6055.
|
// and event is recorded or neither should happen, see issue #6055.
|
||||||
kl.recorder.Eventf(kl.nodeRef, "online", "Node %s is now online", kl.hostname)
|
kl.recorder.Eventf(kl.nodeRef, event, "Node %s status is now: %s", kl.hostname, event)
|
||||||
}
|
|
||||||
|
|
||||||
func (kl *Kubelet) recordNodeSchedulableEvent() {
|
|
||||||
// TODO: This requires a transaction, either both node status is updated
|
|
||||||
// and event is recorded or neither should happen, see issue #6055.
|
|
||||||
kl.recorder.Eventf(kl.nodeRef, "schedulable", "Node %s is now schedulable", kl.hostname)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (kl *Kubelet) recordNodeUnschedulableEvent() {
|
|
||||||
// TODO: This requires a transaction, either both node status is updated
|
|
||||||
// and event is recorded or neither should happen, see issue #6055.
|
|
||||||
kl.recorder.Eventf(kl.nodeRef, "unschedulable", "Node %s is now unschedulable", kl.hostname)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Maintains Node.Spec.Unschedulable value from previous run of tryUpdateNodeStatus()
|
// Maintains Node.Spec.Unschedulable value from previous run of tryUpdateNodeStatus()
|
||||||
@ -1798,9 +1787,10 @@ func (kl *Kubelet) tryUpdateNodeStatus() error {
|
|||||||
}()
|
}()
|
||||||
|
|
||||||
currentTime := util.Now()
|
currentTime := util.Now()
|
||||||
var newCondition api.NodeCondition
|
var newNodeReadyCondition api.NodeCondition
|
||||||
|
var oldNodeReadyConditionStatus api.ConditionStatus
|
||||||
if containerRuntimeUp && networkConfigured {
|
if containerRuntimeUp && networkConfigured {
|
||||||
newCondition = api.NodeCondition{
|
newNodeReadyCondition = api.NodeCondition{
|
||||||
Type: api.NodeReady,
|
Type: api.NodeReady,
|
||||||
Status: api.ConditionTrue,
|
Status: api.ConditionTrue,
|
||||||
Reason: "kubelet is posting ready status",
|
Reason: "kubelet is posting ready status",
|
||||||
@ -1814,7 +1804,7 @@ func (kl *Kubelet) tryUpdateNodeStatus() error {
|
|||||||
if !networkConfigured {
|
if !networkConfigured {
|
||||||
reasons = append(reasons, "network not configured correctly")
|
reasons = append(reasons, "network not configured correctly")
|
||||||
}
|
}
|
||||||
newCondition = api.NodeCondition{
|
newNodeReadyCondition = api.NodeCondition{
|
||||||
Type: api.NodeReady,
|
Type: api.NodeReady,
|
||||||
Status: api.ConditionFalse,
|
Status: api.ConditionFalse,
|
||||||
Reason: strings.Join(reasons, ","),
|
Reason: strings.Join(reasons, ","),
|
||||||
@ -1825,25 +1815,32 @@ func (kl *Kubelet) tryUpdateNodeStatus() error {
|
|||||||
updated := false
|
updated := false
|
||||||
for i := range node.Status.Conditions {
|
for i := range node.Status.Conditions {
|
||||||
if node.Status.Conditions[i].Type == api.NodeReady {
|
if node.Status.Conditions[i].Type == api.NodeReady {
|
||||||
newCondition.LastTransitionTime = node.Status.Conditions[i].LastTransitionTime
|
oldNodeReadyConditionStatus = node.Status.Conditions[i].Status
|
||||||
if node.Status.Conditions[i].Status != api.ConditionTrue {
|
if oldNodeReadyConditionStatus == newNodeReadyCondition.Status {
|
||||||
kl.recordNodeOnlineEvent()
|
newNodeReadyCondition.LastTransitionTime = node.Status.Conditions[i].LastTransitionTime
|
||||||
|
} else {
|
||||||
|
newNodeReadyCondition.LastTransitionTime = currentTime
|
||||||
}
|
}
|
||||||
node.Status.Conditions[i] = newCondition
|
node.Status.Conditions[i] = newNodeReadyCondition
|
||||||
updated = true
|
updated = true
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if !updated {
|
if !updated {
|
||||||
newCondition.LastTransitionTime = currentTime
|
newNodeReadyCondition.LastTransitionTime = currentTime
|
||||||
node.Status.Conditions = append(node.Status.Conditions, newCondition)
|
node.Status.Conditions = append(node.Status.Conditions, newNodeReadyCondition)
|
||||||
kl.recordNodeOnlineEvent()
|
}
|
||||||
|
if !updated || oldNodeReadyConditionStatus != newNodeReadyCondition.Status {
|
||||||
|
if newNodeReadyCondition.Status == api.ConditionTrue {
|
||||||
|
kl.recordNodeStatusEvent("NodeReady")
|
||||||
|
} else {
|
||||||
|
kl.recordNodeStatusEvent("NodeNotReady")
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if oldNodeUnschedulable != node.Spec.Unschedulable {
|
if oldNodeUnschedulable != node.Spec.Unschedulable {
|
||||||
if node.Spec.Unschedulable {
|
if node.Spec.Unschedulable {
|
||||||
kl.recordNodeUnschedulableEvent()
|
kl.recordNodeStatusEvent("NodeNotSchedulable")
|
||||||
} else {
|
} else {
|
||||||
kl.recordNodeSchedulableEvent()
|
kl.recordNodeStatusEvent("NodeSchedulable")
|
||||||
}
|
}
|
||||||
oldNodeUnschedulable = node.Spec.Unschedulable
|
oldNodeUnschedulable = node.Spec.Unschedulable
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user