From 6b08ef575fe52f49acac53583d6b954ad27a8806 Mon Sep 17 00:00:00 2001 From: Minhan Xia Date: Fri, 25 May 2018 15:07:29 -0700 Subject: [PATCH 1/3] add ContainersReady condition --- pkg/apis/core/types.go | 2 ++ pkg/kubelet/types/pod_status.go | 1 + staging/src/k8s.io/api/core/v1/types.go | 2 ++ 3 files changed, 5 insertions(+) diff --git a/pkg/apis/core/types.go b/pkg/apis/core/types.go index d2593cac0cc..4d0d9f9abcb 100644 --- a/pkg/apis/core/types.go +++ b/pkg/apis/core/types.go @@ -2094,6 +2094,8 @@ const ( // PodReasonUnschedulable reason in PodScheduled PodCondition means that the scheduler // can't schedule the pod right now, for example due to insufficient resources in the cluster. PodReasonUnschedulable = "Unschedulable" + // ContainersReady indicates whether all containers in the pod are ready. + ContainersReady PodConditionType = "ContainersReady" ) type PodCondition struct { diff --git a/pkg/kubelet/types/pod_status.go b/pkg/kubelet/types/pod_status.go index a7756382b3c..8c46ba39eb6 100644 --- a/pkg/kubelet/types/pod_status.go +++ b/pkg/kubelet/types/pod_status.go @@ -26,6 +26,7 @@ var PodConditionsByKubelet = []v1.PodConditionType{ v1.PodReady, v1.PodInitialized, v1.PodReasonUnschedulable, + v1.ContainersReady, } // PodConditionByKubelet returns if the pod condition type is owned by kubelet diff --git a/staging/src/k8s.io/api/core/v1/types.go b/staging/src/k8s.io/api/core/v1/types.go index c8ad4371c79..e850dd5b12f 100644 --- a/staging/src/k8s.io/api/core/v1/types.go +++ b/staging/src/k8s.io/api/core/v1/types.go @@ -2312,6 +2312,8 @@ const ( // PodReasonUnschedulable reason in PodScheduled PodCondition means that the scheduler // can't schedule the pod right now, for example due to insufficient resources in the cluster. PodReasonUnschedulable = "Unschedulable" + // ContainersReady indicates whether all containers in the pod are ready. + ContainersReady PodConditionType = "ContainersReady" ) // PodCondition contains details for the current condition of this pod. From 176f34ea0728183a85b7d4687368f1f45c41b131 Mon Sep 17 00:00:00 2001 From: Minhan Xia Date: Fri, 1 Jun 2018 18:32:07 -0700 Subject: [PATCH 2/3] Generate ContainersReady condition --- pkg/kubelet/status/generate.go | 36 ++++++++--- pkg/kubelet/status/generate_test.go | 92 +++++++++++++++++++++++++++++ 2 files changed, 120 insertions(+), 8 deletions(-) diff --git a/pkg/kubelet/status/generate.go b/pkg/kubelet/status/generate.go index 30ec57b92a4..8d9415d2247 100644 --- a/pkg/kubelet/status/generate.go +++ b/pkg/kubelet/status/generate.go @@ -32,14 +32,13 @@ const ( ReadinessGatesNotReady = "ReadinessGatesNotReady" ) -// GeneratePodReadyCondition returns "Ready" condition of a pod. -// The status of "Ready" condition is "True", if all containers in a pod are ready -// AND all matching conditions specified in the ReadinessGates have status equal to "True". -func GeneratePodReadyCondition(spec *v1.PodSpec, conditions []v1.PodCondition, containerStatuses []v1.ContainerStatus, podPhase v1.PodPhase) v1.PodCondition { +// GenerateContainersReadyCondition returns the status of "ContainersReady" condition. +// The status of "ContainersReady" condition is true when all containers are ready. +func GenerateContainersReadyCondition(spec *v1.PodSpec, containerStatuses []v1.ContainerStatus, podPhase v1.PodPhase) v1.PodCondition { // Find if all containers are ready or not. if containerStatuses == nil { return v1.PodCondition{ - Type: v1.PodReady, + Type: v1.ContainersReady, Status: v1.ConditionFalse, Reason: UnknownContainerStatuses, } @@ -59,7 +58,7 @@ func GeneratePodReadyCondition(spec *v1.PodSpec, conditions []v1.PodCondition, c // If all containers are known and succeeded, just return PodCompleted. if podPhase == v1.PodSucceeded && len(unknownContainers) == 0 { return v1.PodCondition{ - Type: v1.PodReady, + Type: v1.ContainersReady, Status: v1.ConditionFalse, Reason: PodCompleted, } @@ -76,16 +75,37 @@ func GeneratePodReadyCondition(spec *v1.PodSpec, conditions []v1.PodCondition, c unreadyMessage := strings.Join(unreadyMessages, ", ") if unreadyMessage != "" { return v1.PodCondition{ - Type: v1.PodReady, + Type: v1.ContainersReady, Status: v1.ConditionFalse, Reason: ContainersNotReady, Message: unreadyMessage, } } + return v1.PodCondition{ + Type: v1.ContainersReady, + Status: v1.ConditionTrue, + } +} + +// GeneratePodReadyCondition returns "Ready" condition of a pod. +// The status of "Ready" condition is "True", if all containers in a pod are ready +// AND all matching conditions specified in the ReadinessGates have status equal to "True". +func GeneratePodReadyCondition(spec *v1.PodSpec, conditions []v1.PodCondition, containerStatuses []v1.ContainerStatus, podPhase v1.PodPhase) v1.PodCondition { + containersReady := GenerateContainersReadyCondition(spec, containerStatuses, podPhase) + // If the status of ContainersReady is not True, return the same status, reason and message as ContainersReady. + if containersReady.Status != v1.ConditionTrue { + return v1.PodCondition{ + Type: v1.PodReady, + Status: containersReady.Status, + Reason: containersReady.Reason, + Message: containersReady.Message, + } + } + // Evaluate corresponding conditions specified in readiness gate // Generate message if any readiness gate is not satisfied. - unreadyMessages = []string{} + unreadyMessages := []string{} for _, rg := range spec.ReadinessGates { _, c := podutil.GetPodConditionFromList(conditions, rg.ConditionType) if c == nil { diff --git a/pkg/kubelet/status/generate_test.go b/pkg/kubelet/status/generate_test.go index 46ea7df97fd..a8a191d1bc0 100644 --- a/pkg/kubelet/status/generate_test.go +++ b/pkg/kubelet/status/generate_test.go @@ -24,6 +24,98 @@ import ( "k8s.io/api/core/v1" ) +func TestGenerateContainersReadyCondition(t *testing.T) { + tests := []struct { + spec *v1.PodSpec + containerStatuses []v1.ContainerStatus + podPhase v1.PodPhase + expectReady v1.PodCondition + }{ + { + spec: nil, + containerStatuses: nil, + podPhase: v1.PodRunning, + expectReady: getPodCondition(v1.ContainersReady, v1.ConditionFalse, UnknownContainerStatuses, ""), + }, + { + spec: &v1.PodSpec{}, + containerStatuses: []v1.ContainerStatus{}, + podPhase: v1.PodRunning, + expectReady: getPodCondition(v1.ContainersReady, v1.ConditionTrue, "", ""), + }, + { + spec: &v1.PodSpec{ + Containers: []v1.Container{ + {Name: "1234"}, + }, + }, + containerStatuses: []v1.ContainerStatus{}, + podPhase: v1.PodRunning, + expectReady: getPodCondition(v1.ContainersReady, v1.ConditionFalse, ContainersNotReady, "containers with unknown status: [1234]"), + }, + { + spec: &v1.PodSpec{ + Containers: []v1.Container{ + {Name: "1234"}, + {Name: "5678"}, + }, + }, + containerStatuses: []v1.ContainerStatus{ + getReadyStatus("1234"), + getReadyStatus("5678"), + }, + podPhase: v1.PodRunning, + expectReady: getPodCondition(v1.ContainersReady, v1.ConditionTrue, "", ""), + }, + { + spec: &v1.PodSpec{ + Containers: []v1.Container{ + {Name: "1234"}, + {Name: "5678"}, + }, + }, + containerStatuses: []v1.ContainerStatus{ + getReadyStatus("1234"), + }, + podPhase: v1.PodRunning, + expectReady: getPodCondition(v1.ContainersReady, v1.ConditionFalse, ContainersNotReady, "containers with unknown status: [5678]"), + }, + { + spec: &v1.PodSpec{ + Containers: []v1.Container{ + {Name: "1234"}, + {Name: "5678"}, + }, + }, + containerStatuses: []v1.ContainerStatus{ + getReadyStatus("1234"), + getNotReadyStatus("5678"), + }, + podPhase: v1.PodRunning, + expectReady: getPodCondition(v1.ContainersReady, v1.ConditionFalse, ContainersNotReady, "containers with unready status: [5678]"), + }, + { + spec: &v1.PodSpec{ + Containers: []v1.Container{ + {Name: "1234"}, + }, + }, + containerStatuses: []v1.ContainerStatus{ + getNotReadyStatus("1234"), + }, + podPhase: v1.PodSucceeded, + expectReady: getPodCondition(v1.ContainersReady, v1.ConditionFalse, PodCompleted, ""), + }, + } + + for i, test := range tests { + ready := GenerateContainersReadyCondition(test.spec, test.containerStatuses, test.podPhase) + if !reflect.DeepEqual(ready, test.expectReady) { + t.Errorf("On test case %v, expectReady:\n%+v\ngot\n%+v\n", i, test.expectReady, ready) + } + } +} + func TestGeneratePodReadyCondition(t *testing.T) { tests := []struct { spec *v1.PodSpec From 370268f123c793bca12615967737106aa381cbae Mon Sep 17 00:00:00 2001 From: Minhan Xia Date: Fri, 1 Jun 2018 18:32:27 -0700 Subject: [PATCH 3/3] Inject ContainersReady --- pkg/kubelet/kubelet_pods.go | 1 + pkg/kubelet/status/status_manager.go | 29 +++++++++++++++------------- 2 files changed, 17 insertions(+), 13 deletions(-) diff --git a/pkg/kubelet/kubelet_pods.go b/pkg/kubelet/kubelet_pods.go index 4c89639f4fe..0e9ea8682b7 100644 --- a/pkg/kubelet/kubelet_pods.go +++ b/pkg/kubelet/kubelet_pods.go @@ -1354,6 +1354,7 @@ func (kl *Kubelet) generateAPIPodStatus(pod *v1.Pod, podStatus *kubecontainer.Po kl.probeManager.UpdatePodStatus(pod.UID, s) s.Conditions = append(s.Conditions, status.GeneratePodInitializedCondition(spec, s.InitContainerStatuses, s.Phase)) s.Conditions = append(s.Conditions, status.GeneratePodReadyCondition(spec, s.Conditions, s.ContainerStatuses, s.Phase)) + s.Conditions = append(s.Conditions, status.GenerateContainersReadyCondition(spec, s.ContainerStatuses, s.Phase)) // Status manager will take care of the LastTransitionTimestamp, either preserve // the timestamp from apiserver, or set a new one. When kubelet sees the pod, // `PodScheduled` condition must be true. diff --git a/pkg/kubelet/status/status_manager.go b/pkg/kubelet/status/status_manager.go index 74f458716e8..1dbfc6aa5e7 100644 --- a/pkg/kubelet/status/status_manager.go +++ b/pkg/kubelet/status/status_manager.go @@ -226,21 +226,24 @@ func (m *manager) SetContainerReadiness(podUID types.UID, containerID kubecontai containerStatus, _, _ = findContainerStatus(&status, containerID.String()) containerStatus.Ready = ready - // Update pod condition. - podReadyConditionIndex := -1 - for i, condition := range status.Conditions { - if condition.Type == v1.PodReady { - podReadyConditionIndex = i - break + // updateConditionFunc updates the corresponding type of condition + updateConditionFunc := func(conditionType v1.PodConditionType, condition v1.PodCondition) { + conditionIndex := -1 + for i, condition := range status.Conditions { + if condition.Type == conditionType { + conditionIndex = i + break + } + } + if conditionIndex != -1 { + status.Conditions[conditionIndex] = condition + } else { + glog.Warningf("PodStatus missing %s type condition: %+v", conditionType, status) + status.Conditions = append(status.Conditions, condition) } } - podReady := GeneratePodReadyCondition(&pod.Spec, status.Conditions, status.ContainerStatuses, status.Phase) - if podReadyConditionIndex != -1 { - status.Conditions[podReadyConditionIndex] = podReady - } else { - glog.Warningf("PodStatus missing PodReady condition: %+v", status) - status.Conditions = append(status.Conditions, podReady) - } + updateConditionFunc(v1.PodReady, GeneratePodReadyCondition(&pod.Spec, status.Conditions, status.ContainerStatuses, status.Phase)) + updateConditionFunc(v1.ContainersReady, GenerateContainersReadyCondition(&pod.Spec, status.ContainerStatuses, status.Phase)) m.updateStatusInternal(pod, status, false) }