using pod pointers in node lifecycle controller

This commit is contained in:
Krzysztof Siedlecki 2019-10-11 15:41:34 +02:00
parent 833e8dc10b
commit b1dfa83be6
5 changed files with 37 additions and 23 deletions

View File

@ -246,7 +246,7 @@ type Controller struct {
nodeLister corelisters.NodeLister
nodeInformerSynced cache.InformerSynced
getPodsAssignedToNode func(nodeName string) ([]v1.Pod, error)
getPodsAssignedToNode func(nodeName string) ([]*v1.Pod, error)
recorder record.EventRecorder
@ -419,18 +419,18 @@ func NewNodeLifecycleController(
})
podIndexer := podInformer.Informer().GetIndexer()
nc.getPodsAssignedToNode = func(nodeName string) ([]v1.Pod, error) {
nc.getPodsAssignedToNode = func(nodeName string) ([]*v1.Pod, error) {
objs, err := podIndexer.ByIndex(nodeNameKeyIndex, nodeName)
if err != nil {
return nil, err
}
pods := make([]v1.Pod, 0, len(objs))
pods := make([]*v1.Pod, 0, len(objs))
for _, obj := range objs {
pod, ok := obj.(*v1.Pod)
if !ok {
continue
}
pods = append(pods, *pod)
pods = append(pods, pod)
}
return pods, nil
}
@ -700,14 +700,18 @@ func (nc *Controller) doEvictionPass() {
}
}
func listPodsFromNode(kubeClient clientset.Interface, nodeName string) ([]v1.Pod, error) {
func listPodsFromNode(kubeClient clientset.Interface, nodeName string) ([]*v1.Pod, error) {
selector := fields.OneTermEqualSelector(apicore.PodHostField, nodeName).String()
options := metav1.ListOptions{FieldSelector: selector}
pods, err := kubeClient.CoreV1().Pods(metav1.NamespaceAll).List(options)
if err != nil {
return nil, err
}
return pods.Items, nil
rPods := make([]*v1.Pod, len(pods.Items))
for i := range pods.Items {
rPods[i] = &pods.Items[i]
}
return rPods, nil
}
// monitorNodeHealth verifies node health are constantly updated by kubelet, and

View File

@ -63,8 +63,8 @@ const (
func alwaysReady() bool { return true }
func fakeGetPodsAssignedToNode(c *fake.Clientset) func(string) ([]v1.Pod, error) {
return func(nodeName string) ([]v1.Pod, error) {
func fakeGetPodsAssignedToNode(c *fake.Clientset) func(string) ([]*v1.Pod, error) {
return func(nodeName string) ([]*v1.Pod, error) {
selector := fields.SelectorFromSet(fields.Set{"spec.nodeName": nodeName})
pods, err := c.CoreV1().Pods(v1.NamespaceAll).List(metav1.ListOptions{
FieldSelector: selector.String(),
@ -73,7 +73,11 @@ func fakeGetPodsAssignedToNode(c *fake.Clientset) func(string) ([]v1.Pod, error)
if err != nil {
return nil, fmt.Errorf("failed to get Pods assigned to node %v", nodeName)
}
return pods.Items, nil
rPods := make([]*v1.Pod, len(pods.Items))
for i := range pods.Items {
rPods[i] = &pods.Items[i]
}
return rPods, nil
}
}

View File

@ -75,7 +75,7 @@ type GetPodFunc func(name, namespace string) (*v1.Pod, error)
type GetNodeFunc func(name string) (*v1.Node, error)
// GetPodsByNodeNameFunc returns the list of pods assigned to the specified node.
type GetPodsByNodeNameFunc func(nodeName string) ([]v1.Pod, error)
type GetPodsByNodeNameFunc func(nodeName string) ([]*v1.Pod, error)
// NoExecuteTaintManager listens to Taint/Toleration changes and is responsible for removing Pods
// from Nodes tainted with NoExecute Taints.
@ -464,8 +464,7 @@ func (tc *NoExecuteTaintManager) handleNodeUpdate(nodeUpdate nodeUpdateItem) {
}
now := time.Now()
for i := range pods {
pod := &pods[i]
for _, pod := range pods {
podNamespacedName := types.NamespacedName{Namespace: pod.Namespace, Name: pod.Name}
tc.processPodOnNode(podNamespacedName, node.Name, pod.Spec.Tolerations, taints, now)
}

View File

@ -42,16 +42,20 @@ func getPodFromClientset(clientset *fake.Clientset) GetPodFunc {
}
func getPodsAssignedToNode(c *fake.Clientset) GetPodsByNodeNameFunc {
return func(nodeName string) ([]v1.Pod, error) {
return func(nodeName string) ([]*v1.Pod, error) {
selector := fields.SelectorFromSet(fields.Set{"spec.nodeName": nodeName})
pods, err := c.CoreV1().Pods(v1.NamespaceAll).List(metav1.ListOptions{
FieldSelector: selector.String(),
LabelSelector: labels.Everything().String(),
})
if err != nil {
return []v1.Pod{}, fmt.Errorf("failed to get Pods assigned to node %v", nodeName)
return []*v1.Pod{}, fmt.Errorf("failed to get Pods assigned to node %v", nodeName)
}
return pods.Items, nil
rPods := make([]*v1.Pod, len(pods.Items))
for i := range pods.Items {
rPods[i] = &pods.Items[i]
}
return rPods, nil
}
}

View File

@ -42,7 +42,7 @@ import (
// DeletePods will delete all pods from master running on given node,
// and return true if any pods were deleted, or were found pending
// deletion.
func DeletePods(kubeClient clientset.Interface, pods []v1.Pod, recorder record.EventRecorder, nodeName, nodeUID string, daemonStore appsv1listers.DaemonSetLister) (bool, error) {
func DeletePods(kubeClient clientset.Interface, pods []*v1.Pod, recorder record.EventRecorder, nodeName, nodeUID string, daemonStore appsv1listers.DaemonSetLister) (bool, error) {
remaining := false
var updateErrList []error
@ -51,12 +51,13 @@ func DeletePods(kubeClient clientset.Interface, pods []v1.Pod, recorder record.E
}
for i := range pods {
pod := &pods[i]
// Defensive check, also needed for tests.
if pod.Spec.NodeName != nodeName {
if pods[i].Spec.NodeName != nodeName {
continue
}
// Pod will be modified, so making copy is requiered.
pod := pods[i].DeepCopy()
// Set reason and message in the pod object.
if _, err := SetPodTerminationReason(kubeClient, pod, nodeName); err != nil {
if apierrors.IsConflict(err) {
@ -111,16 +112,18 @@ func SetPodTerminationReason(kubeClient clientset.Interface, pod *v1.Pod, nodeNa
// MarkPodsNotReady updates ready status of given pods running on
// given node from master return true if success
func MarkPodsNotReady(kubeClient clientset.Interface, pods []v1.Pod, nodeName string) error {
func MarkPodsNotReady(kubeClient clientset.Interface, pods []*v1.Pod, nodeName string) error {
klog.V(2).Infof("Update ready status of pods on node [%v]", nodeName)
errMsg := []string{}
for _, pod := range pods {
for i := range pods {
// Defensive check, also needed for tests.
if pod.Spec.NodeName != nodeName {
if pods[i].Spec.NodeName != nodeName {
continue
}
// Pod will be modified, so making copy is requiered.
pod := pods[i].DeepCopy()
for _, cond := range pod.Status.Conditions {
if cond.Type == v1.PodReady {
cond.Status = v1.ConditionFalse
@ -128,9 +131,9 @@ func MarkPodsNotReady(kubeClient clientset.Interface, pods []v1.Pod, nodeName st
break
}
klog.V(2).Infof("Updating ready status of pod %v to false", pod.Name)
_, err := kubeClient.CoreV1().Pods(pod.Namespace).UpdateStatus(&pod)
_, err := kubeClient.CoreV1().Pods(pod.Namespace).UpdateStatus(pod)
if err != nil {
klog.Warningf("Failed to update status for pod %q: %v", format.Pod(&pod), err)
klog.Warningf("Failed to update status for pod %q: %v", format.Pod(pod), err)
errMsg = append(errMsg, fmt.Sprintf("%v", err))
}
break