mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-24 12:15:52 +00:00
Merge pull request #111683 from lucming/code-cleanup5
reorganize some logic of controller_utils.go
This commit is contained in:
commit
4557c694ef
@ -1039,12 +1039,12 @@ func AddOrUpdateTaintOnNode(ctx context.Context, c clientset.Interface, nodeName
|
||||
var oldNode *v1.Node
|
||||
// First we try getting node from the API server cache, as it's cheaper. If it fails
|
||||
// we get it from etcd to be sure to have fresh data.
|
||||
option := metav1.GetOptions{}
|
||||
if firstTry {
|
||||
oldNode, err = c.CoreV1().Nodes().Get(ctx, nodeName, metav1.GetOptions{ResourceVersion: "0"})
|
||||
option.ResourceVersion = "0"
|
||||
firstTry = false
|
||||
} else {
|
||||
oldNode, err = c.CoreV1().Nodes().Get(ctx, nodeName, metav1.GetOptions{})
|
||||
}
|
||||
oldNode, err = c.CoreV1().Nodes().Get(ctx, nodeName, option)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -1096,12 +1096,12 @@ func RemoveTaintOffNode(ctx context.Context, c clientset.Interface, nodeName str
|
||||
var oldNode *v1.Node
|
||||
// First we try getting node from the API server cache, as it's cheaper. If it fails
|
||||
// we get it from etcd to be sure to have fresh data.
|
||||
option := metav1.GetOptions{}
|
||||
if firstTry {
|
||||
oldNode, err = c.CoreV1().Nodes().Get(ctx, nodeName, metav1.GetOptions{ResourceVersion: "0"})
|
||||
option.ResourceVersion = "0"
|
||||
firstTry = false
|
||||
} else {
|
||||
oldNode, err = c.CoreV1().Nodes().Get(ctx, nodeName, metav1.GetOptions{})
|
||||
}
|
||||
oldNode, err = c.CoreV1().Nodes().Get(ctx, nodeName, option)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -1178,12 +1178,12 @@ func AddOrUpdateLabelsOnNode(kubeClient clientset.Interface, nodeName string, la
|
||||
var node *v1.Node
|
||||
// First we try getting node from the API server cache, as it's cheaper. If it fails
|
||||
// we get it from etcd to be sure to have fresh data.
|
||||
option := metav1.GetOptions{}
|
||||
if firstTry {
|
||||
node, err = kubeClient.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{ResourceVersion: "0"})
|
||||
option.ResourceVersion = "0"
|
||||
firstTry = false
|
||||
} else {
|
||||
node, err = kubeClient.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{})
|
||||
}
|
||||
node, err = kubeClient.CoreV1().Nodes().Get(context.TODO(), nodeName, option)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -129,27 +129,28 @@ func MarkPodsNotReady(ctx context.Context, kubeClient clientset.Interface, recor
|
||||
// Pod will be modified, so making copy is required.
|
||||
pod := pods[i].DeepCopy()
|
||||
for _, cond := range pod.Status.Conditions {
|
||||
if cond.Type == v1.PodReady {
|
||||
cond.Status = v1.ConditionFalse
|
||||
if !utilpod.UpdatePodCondition(&pod.Status, &cond) {
|
||||
break
|
||||
}
|
||||
if cond.Type != v1.PodReady {
|
||||
continue
|
||||
}
|
||||
|
||||
klog.V(2).InfoS("Updating ready status of pod to false", "pod", pod.Name)
|
||||
_, err := kubeClient.CoreV1().Pods(pod.Namespace).UpdateStatus(ctx, pod, metav1.UpdateOptions{})
|
||||
if err != nil {
|
||||
if apierrors.IsNotFound(err) {
|
||||
// NotFound error means that pod was already deleted.
|
||||
// There is nothing left to do with this pod.
|
||||
continue
|
||||
}
|
||||
klog.InfoS("Failed to update status for pod", "pod", klog.KObj(pod), "err", err)
|
||||
errs = append(errs, err)
|
||||
}
|
||||
// record NodeNotReady event after updateStatus to make sure pod still exists
|
||||
recorder.Event(pod, v1.EventTypeWarning, "NodeNotReady", "Node is not ready")
|
||||
cond.Status = v1.ConditionFalse
|
||||
if !utilpod.UpdatePodCondition(&pod.Status, &cond) {
|
||||
break
|
||||
}
|
||||
|
||||
klog.V(2).InfoS("Updating ready status of pod to false", "pod", pod.Name)
|
||||
if _, err := kubeClient.CoreV1().Pods(pod.Namespace).UpdateStatus(ctx, pod, metav1.UpdateOptions{}); err != nil {
|
||||
if apierrors.IsNotFound(err) {
|
||||
// NotFound error means that pod was already deleted.
|
||||
// There is nothing left to do with this pod.
|
||||
continue
|
||||
}
|
||||
klog.InfoS("Failed to update status for pod", "pod", klog.KObj(pod), "err", err)
|
||||
errs = append(errs, err)
|
||||
}
|
||||
// record NodeNotReady event after updateStatus to make sure pod still exists
|
||||
recorder.Event(pod, v1.EventTypeWarning, "NodeNotReady", "Node is not ready")
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
@ -222,8 +223,7 @@ func SwapNodeControllerTaint(ctx context.Context, kubeClient clientset.Interface
|
||||
// AddOrUpdateLabelsOnNode updates the labels on the node and returns true on
|
||||
// success and false on failure.
|
||||
func AddOrUpdateLabelsOnNode(kubeClient clientset.Interface, labelsToUpdate map[string]string, node *v1.Node) bool {
|
||||
err := controller.AddOrUpdateLabelsOnNode(kubeClient, node.Name, labelsToUpdate)
|
||||
if err != nil {
|
||||
if err := controller.AddOrUpdateLabelsOnNode(kubeClient, node.Name, labelsToUpdate); err != nil {
|
||||
utilruntime.HandleError(
|
||||
fmt.Errorf(
|
||||
"unable to update labels %+v for Node %q: %v",
|
||||
|
Loading…
Reference in New Issue
Block a user