mirror of
https://github.com/k3s-io/kubernetes.git
synced 2026-01-05 15:37:24 +00:00
Move pkg/api/v1/node to pkg/util/node (#73656)
* merge pkg/api/v1/node with pkg/util/node * update test case for utilnode * remove package pkg/api/v1/node * move isNodeReady to internal func * Split GetNodeCondition into e2e and controller pkg * fix import errors
This commit is contained in:
committed by
Kubernetes Prow Robot
parent
e9a8d27bd4
commit
d648ba856b
@@ -52,7 +52,6 @@ import (
|
||||
"k8s.io/client-go/tools/record"
|
||||
"k8s.io/client-go/util/flowcontrol"
|
||||
"k8s.io/client-go/util/workqueue"
|
||||
v1node "k8s.io/kubernetes/pkg/api/v1/node"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
"k8s.io/kubernetes/pkg/controller/nodelifecycle/scheduler"
|
||||
nodeutil "k8s.io/kubernetes/pkg/controller/util/node"
|
||||
@@ -525,7 +524,7 @@ func (nc *Controller) doNoExecuteTaintingPass() {
|
||||
// retry in 50 millisecond
|
||||
return false, 50 * time.Millisecond
|
||||
}
|
||||
_, condition := v1node.GetNodeCondition(&node.Status, v1.NodeReady)
|
||||
_, condition := nodeutil.GetNodeCondition(&node.Status, v1.NodeReady)
|
||||
// Because we want to mimic NodeStatus.Condition["Ready"] we make "unreachable" and "not ready" taints mutually exclusive.
|
||||
taintToAdd := v1.Taint{}
|
||||
oppositeTaint := v1.Taint{}
|
||||
@@ -742,7 +741,7 @@ func (nc *Controller) tryUpdateNodeHealth(node *v1.Node) (time.Duration, v1.Node
|
||||
var err error
|
||||
var gracePeriod time.Duration
|
||||
var observedReadyCondition v1.NodeCondition
|
||||
_, currentReadyCondition := v1node.GetNodeCondition(&node.Status, v1.NodeReady)
|
||||
_, currentReadyCondition := nodeutil.GetNodeCondition(&node.Status, v1.NodeReady)
|
||||
if currentReadyCondition == nil {
|
||||
// If ready condition is nil, then kubelet (or nodecontroller) never posted node status.
|
||||
// A fake ready condition is created, where LastHeartbeatTime and LastTransitionTime is set
|
||||
@@ -787,10 +786,10 @@ func (nc *Controller) tryUpdateNodeHealth(node *v1.Node) (time.Duration, v1.Node
|
||||
var savedCondition *v1.NodeCondition
|
||||
var savedLease *coordv1beta1.Lease
|
||||
if found {
|
||||
_, savedCondition = v1node.GetNodeCondition(savedNodeHealth.status, v1.NodeReady)
|
||||
_, savedCondition = nodeutil.GetNodeCondition(savedNodeHealth.status, v1.NodeReady)
|
||||
savedLease = savedNodeHealth.lease
|
||||
}
|
||||
_, observedCondition := v1node.GetNodeCondition(&node.Status, v1.NodeReady)
|
||||
_, observedCondition := nodeutil.GetNodeCondition(&node.Status, v1.NodeReady)
|
||||
if !found {
|
||||
klog.Warningf("Missing timestamp for Node %s. Assuming now as a timestamp.", node.Name)
|
||||
savedNodeHealth = &nodeHealthData{
|
||||
@@ -885,7 +884,7 @@ func (nc *Controller) tryUpdateNodeHealth(node *v1.Node) (time.Duration, v1.Node
|
||||
|
||||
nowTimestamp := nc.now()
|
||||
for _, nodeConditionType := range remainingNodeConditionTypes {
|
||||
_, currentCondition := v1node.GetNodeCondition(&node.Status, nodeConditionType)
|
||||
_, currentCondition := nodeutil.GetNodeCondition(&node.Status, nodeConditionType)
|
||||
if currentCondition == nil {
|
||||
klog.V(2).Infof("Condition %v of node %v was never updated by kubelet", nodeConditionType, node.Name)
|
||||
node.Status.Conditions = append(node.Status.Conditions, v1.NodeCondition{
|
||||
@@ -908,7 +907,7 @@ func (nc *Controller) tryUpdateNodeHealth(node *v1.Node) (time.Duration, v1.Node
|
||||
}
|
||||
}
|
||||
|
||||
_, currentCondition := v1node.GetNodeCondition(&node.Status, v1.NodeReady)
|
||||
_, currentCondition := nodeutil.GetNodeCondition(&node.Status, v1.NodeReady)
|
||||
if !apiequality.Semantic.DeepEqual(currentCondition, &observedReadyCondition) {
|
||||
if _, err = nc.kubeClient.CoreV1().Nodes().UpdateStatus(node); err != nil {
|
||||
klog.Errorf("Error updating node %s: %v", node.Name, err)
|
||||
|
||||
Reference in New Issue
Block a user