mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-28 22:17:14 +00:00
adding pods to MarkPodsNotReady parameters
This commit is contained in:
parent
8f48896709
commit
a07a3a6878
@ -790,7 +790,12 @@ func (nc *Controller) monitorNodeHealth() error {
|
|||||||
// Report node event.
|
// Report node event.
|
||||||
if currentReadyCondition.Status != v1.ConditionTrue && observedReadyCondition.Status == v1.ConditionTrue {
|
if currentReadyCondition.Status != v1.ConditionTrue && observedReadyCondition.Status == v1.ConditionTrue {
|
||||||
nodeutil.RecordNodeStatusChange(nc.recorder, node, "NodeNotReady")
|
nodeutil.RecordNodeStatusChange(nc.recorder, node, "NodeNotReady")
|
||||||
if err = nodeutil.MarkAllPodsNotReady(nc.kubeClient, node); err != nil {
|
pods, err := listPodsFromNode(nc.kubeClient, node.Name)
|
||||||
|
if err != nil {
|
||||||
|
utilruntime.HandleError(fmt.Errorf("Unable to list pods from node %v: %v", node.Name, err))
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if err = nodeutil.MarkPodsNotReady(nc.kubeClient, pods, node.Name); err != nil {
|
||||||
utilruntime.HandleError(fmt.Errorf("Unable to mark all pods NotReady on node %v: %v", node.Name, err))
|
utilruntime.HandleError(fmt.Errorf("Unable to mark all pods NotReady on node %v: %v", node.Name, err))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -7,14 +7,12 @@ go_library(
|
|||||||
visibility = ["//visibility:public"],
|
visibility = ["//visibility:public"],
|
||||||
deps = [
|
deps = [
|
||||||
"//pkg/api/v1/pod:go_default_library",
|
"//pkg/api/v1/pod:go_default_library",
|
||||||
"//pkg/apis/core:go_default_library",
|
|
||||||
"//pkg/controller:go_default_library",
|
"//pkg/controller:go_default_library",
|
||||||
"//pkg/kubelet/util/format:go_default_library",
|
"//pkg/kubelet/util/format:go_default_library",
|
||||||
"//pkg/util/node:go_default_library",
|
"//pkg/util/node:go_default_library",
|
||||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||||
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||||
"//staging/src/k8s.io/apimachinery/pkg/fields:go_default_library",
|
|
||||||
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
|
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||||
"//staging/src/k8s.io/apimachinery/pkg/util/errors:go_default_library",
|
"//staging/src/k8s.io/apimachinery/pkg/util/errors:go_default_library",
|
||||||
"//staging/src/k8s.io/apimachinery/pkg/util/runtime:go_default_library",
|
"//staging/src/k8s.io/apimachinery/pkg/util/runtime:go_default_library",
|
||||||
|
@ -22,7 +22,6 @@ import (
|
|||||||
|
|
||||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/fields"
|
|
||||||
"k8s.io/apimachinery/pkg/types"
|
"k8s.io/apimachinery/pkg/types"
|
||||||
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
||||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||||
@ -33,7 +32,6 @@ import (
|
|||||||
clientset "k8s.io/client-go/kubernetes"
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
appsv1listers "k8s.io/client-go/listers/apps/v1"
|
appsv1listers "k8s.io/client-go/listers/apps/v1"
|
||||||
utilpod "k8s.io/kubernetes/pkg/api/v1/pod"
|
utilpod "k8s.io/kubernetes/pkg/api/v1/pod"
|
||||||
api "k8s.io/kubernetes/pkg/apis/core"
|
|
||||||
"k8s.io/kubernetes/pkg/controller"
|
"k8s.io/kubernetes/pkg/controller"
|
||||||
"k8s.io/kubernetes/pkg/kubelet/util/format"
|
"k8s.io/kubernetes/pkg/kubelet/util/format"
|
||||||
nodepkg "k8s.io/kubernetes/pkg/util/node"
|
nodepkg "k8s.io/kubernetes/pkg/util/node"
|
||||||
@ -111,19 +109,13 @@ func SetPodTerminationReason(kubeClient clientset.Interface, pod *v1.Pod, nodeNa
|
|||||||
return updatedPod, nil
|
return updatedPod, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// MarkAllPodsNotReady updates ready status of all pods running on
|
// MarkPodsNotReady updates ready status of given pods running on
|
||||||
// given node from master return true if success
|
// given node from master return true if success
|
||||||
func MarkAllPodsNotReady(kubeClient clientset.Interface, node *v1.Node) error {
|
func MarkPodsNotReady(kubeClient clientset.Interface, pods []v1.Pod, nodeName string) error {
|
||||||
nodeName := node.Name
|
|
||||||
klog.V(2).Infof("Update ready status of pods on node [%v]", nodeName)
|
klog.V(2).Infof("Update ready status of pods on node [%v]", nodeName)
|
||||||
opts := metav1.ListOptions{FieldSelector: fields.OneTermEqualSelector(api.PodHostField, nodeName).String()}
|
|
||||||
pods, err := kubeClient.CoreV1().Pods(metav1.NamespaceAll).List(opts)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
errMsg := []string{}
|
errMsg := []string{}
|
||||||
for _, pod := range pods.Items {
|
for _, pod := range pods {
|
||||||
// Defensive check, also needed for tests.
|
// Defensive check, also needed for tests.
|
||||||
if pod.Spec.NodeName != nodeName {
|
if pod.Spec.NodeName != nodeName {
|
||||||
continue
|
continue
|
||||||
|
Loading…
Reference in New Issue
Block a user