mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-28 14:07:14 +00:00
Update controllers and client to handle terminating pods
This commit is contained in:
parent
02dbb95447
commit
2aaf8bddc2
@ -322,7 +322,8 @@ func FilterActivePods(pods []api.Pod) []*api.Pod {
|
|||||||
var result []*api.Pod
|
var result []*api.Pod
|
||||||
for i := range pods {
|
for i := range pods {
|
||||||
if api.PodSucceeded != pods[i].Status.Phase &&
|
if api.PodSucceeded != pods[i].Status.Phase &&
|
||||||
api.PodFailed != pods[i].Status.Phase {
|
api.PodFailed != pods[i].Status.Phase &&
|
||||||
|
pods[i].DeletionTimestamp == nil {
|
||||||
result = append(result, &pods[i])
|
result = append(result, &pods[i])
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -310,7 +310,11 @@ func (e *EndpointController) syncService(key string) {
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if len(pod.Status.PodIP) == 0 {
|
if len(pod.Status.PodIP) == 0 {
|
||||||
glog.V(4).Infof("Failed to find an IP for pod %s/%s", pod.Namespace, pod.Name)
|
glog.V(5).Infof("Failed to find an IP for pod %s/%s", pod.Namespace, pod.Name)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if pod.DeletionTimestamp != nil {
|
||||||
|
glog.V(5).Infof("Pod is being deleted %s/%s", pod.Namespace, pod.Name)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -213,6 +213,12 @@ func (rm *ReplicationManager) getPodController(pod *api.Pod) *api.ReplicationCon
|
|||||||
// When a pod is created, enqueue the controller that manages it and update it's expectations.
|
// When a pod is created, enqueue the controller that manages it and update it's expectations.
|
||||||
func (rm *ReplicationManager) addPod(obj interface{}) {
|
func (rm *ReplicationManager) addPod(obj interface{}) {
|
||||||
pod := obj.(*api.Pod)
|
pod := obj.(*api.Pod)
|
||||||
|
if pod.DeletionTimestamp != nil {
|
||||||
|
// on a restart of the controller manager, it's possible a new pod shows up in a state that
|
||||||
|
// is already pending deletion. Prevent the pod from being a creation observation.
|
||||||
|
rm.deletePod(pod)
|
||||||
|
return
|
||||||
|
}
|
||||||
if rc := rm.getPodController(pod); rc != nil {
|
if rc := rm.getPodController(pod); rc != nil {
|
||||||
rcKey, err := controller.KeyFunc(rc)
|
rcKey, err := controller.KeyFunc(rc)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -234,6 +240,15 @@ func (rm *ReplicationManager) updatePod(old, cur interface{}) {
|
|||||||
}
|
}
|
||||||
// TODO: Write a unittest for this case
|
// TODO: Write a unittest for this case
|
||||||
curPod := cur.(*api.Pod)
|
curPod := cur.(*api.Pod)
|
||||||
|
if curPod.DeletionTimestamp != nil {
|
||||||
|
// when a pod is deleted gracefully it's deletion timestamp is first modified to reflect a grace period,
|
||||||
|
// and after such time has passed, the kubelet actually deletes it from the store. We receive an update
|
||||||
|
// for modification of the deletion timestamp and expect an rc to create more replicas asap, not wait
|
||||||
|
// until the kubelet actually deletes the pod. This is different from the Phase of a pod changing, because
|
||||||
|
// an rc never initiates a phase change, and so is never asleep waiting for the same.
|
||||||
|
rm.deletePod(curPod)
|
||||||
|
return
|
||||||
|
}
|
||||||
if rc := rm.getPodController(curPod); rc != nil {
|
if rc := rm.getPodController(curPod); rc != nil {
|
||||||
rm.enqueueController(rc)
|
rm.enqueueController(rc)
|
||||||
}
|
}
|
||||||
|
@ -417,7 +417,12 @@ func describePod(pod *api.Pod, rcs []api.ReplicationController, events *api.Even
|
|||||||
fmt.Fprintf(out, "Image(s):\t%s\n", makeImageList(&pod.Spec))
|
fmt.Fprintf(out, "Image(s):\t%s\n", makeImageList(&pod.Spec))
|
||||||
fmt.Fprintf(out, "Node:\t%s\n", pod.Spec.NodeName+"/"+pod.Status.HostIP)
|
fmt.Fprintf(out, "Node:\t%s\n", pod.Spec.NodeName+"/"+pod.Status.HostIP)
|
||||||
fmt.Fprintf(out, "Labels:\t%s\n", formatLabels(pod.Labels))
|
fmt.Fprintf(out, "Labels:\t%s\n", formatLabels(pod.Labels))
|
||||||
fmt.Fprintf(out, "Status:\t%s\n", string(pod.Status.Phase))
|
if pod.DeletionTimestamp != nil {
|
||||||
|
fmt.Fprintf(out, "Status:\tTerminating (expires %s)\n", pod.DeletionTimestamp.Time.Format(time.RFC1123Z))
|
||||||
|
fmt.Fprintf(out, "Termination Grace Period:\t%ds\n", pod.DeletionGracePeriodSeconds)
|
||||||
|
} else {
|
||||||
|
fmt.Fprintf(out, "Status:\t%s\n", string(pod.Status.Phase))
|
||||||
|
}
|
||||||
fmt.Fprintf(out, "Reason:\t%s\n", pod.Status.Reason)
|
fmt.Fprintf(out, "Reason:\t%s\n", pod.Status.Reason)
|
||||||
fmt.Fprintf(out, "Message:\t%s\n", pod.Status.Message)
|
fmt.Fprintf(out, "Message:\t%s\n", pod.Status.Message)
|
||||||
fmt.Fprintf(out, "IP:\t%s\n", pod.Status.PodIP)
|
fmt.Fprintf(out, "IP:\t%s\n", pod.Status.PodIP)
|
||||||
|
@ -425,6 +425,9 @@ func printPod(pod *api.Pod, w io.Writer, withNamespace bool, wide bool, showAll
|
|||||||
readyContainers++
|
readyContainers++
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if pod.DeletionTimestamp != nil {
|
||||||
|
reason = "Terminating"
|
||||||
|
}
|
||||||
|
|
||||||
if withNamespace {
|
if withNamespace {
|
||||||
if _, err := fmt.Fprintf(w, "%s\t", namespace); err != nil {
|
if _, err := fmt.Fprintf(w, "%s\t", namespace); err != nil {
|
||||||
|
Loading…
Reference in New Issue
Block a user