mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-23 03:41:45 +00:00
Merge pull request #64646 from freehan/pod-ready-plus2-new
Automatic merge from submit-queue (batch tested with PRs 63717, 64646, 64792, 64784, 64800). If you want to cherry-pick this change to another branch, please follow the instructions <a href="https://github.com/kubernetes/community/blob/master/contributors/devel/cherry-picks.md">here</a>. Add ContainersReady condition into Pod Status **Last 3 commits are new** Follow up PR of: https://github.com/kubernetes/kubernetes/pull/64057 and https://github.com/kubernetes/kubernetes/pull/64344 Have a single PR for adding ContainersReady per https://github.com/kubernetes/kubernetes/pull/64344#issuecomment-394038384 ```release-note Introduce ContainersReady condition in Pod Status ``` /assign yujuhong for review /assign thockin for the tiny API change
This commit is contained in:
commit
0b8394a1f4
@ -2094,6 +2094,8 @@ const (
|
||||
// PodReasonUnschedulable reason in PodScheduled PodCondition means that the scheduler
|
||||
// can't schedule the pod right now, for example due to insufficient resources in the cluster.
|
||||
PodReasonUnschedulable = "Unschedulable"
|
||||
// ContainersReady indicates whether all containers in the pod are ready.
|
||||
ContainersReady PodConditionType = "ContainersReady"
|
||||
)
|
||||
|
||||
type PodCondition struct {
|
||||
|
@ -1354,6 +1354,7 @@ func (kl *Kubelet) generateAPIPodStatus(pod *v1.Pod, podStatus *kubecontainer.Po
|
||||
kl.probeManager.UpdatePodStatus(pod.UID, s)
|
||||
s.Conditions = append(s.Conditions, status.GeneratePodInitializedCondition(spec, s.InitContainerStatuses, s.Phase))
|
||||
s.Conditions = append(s.Conditions, status.GeneratePodReadyCondition(spec, s.Conditions, s.ContainerStatuses, s.Phase))
|
||||
s.Conditions = append(s.Conditions, status.GenerateContainersReadyCondition(spec, s.ContainerStatuses, s.Phase))
|
||||
// Status manager will take care of the LastTransitionTimestamp, either preserve
|
||||
// the timestamp from apiserver, or set a new one. When kubelet sees the pod,
|
||||
// `PodScheduled` condition must be true.
|
||||
|
@ -32,14 +32,13 @@ const (
|
||||
ReadinessGatesNotReady = "ReadinessGatesNotReady"
|
||||
)
|
||||
|
||||
// GeneratePodReadyCondition returns "Ready" condition of a pod.
|
||||
// The status of "Ready" condition is "True", if all containers in a pod are ready
|
||||
// AND all matching conditions specified in the ReadinessGates have status equal to "True".
|
||||
func GeneratePodReadyCondition(spec *v1.PodSpec, conditions []v1.PodCondition, containerStatuses []v1.ContainerStatus, podPhase v1.PodPhase) v1.PodCondition {
|
||||
// GenerateContainersReadyCondition returns the status of "ContainersReady" condition.
|
||||
// The status of "ContainersReady" condition is true when all containers are ready.
|
||||
func GenerateContainersReadyCondition(spec *v1.PodSpec, containerStatuses []v1.ContainerStatus, podPhase v1.PodPhase) v1.PodCondition {
|
||||
// Find if all containers are ready or not.
|
||||
if containerStatuses == nil {
|
||||
return v1.PodCondition{
|
||||
Type: v1.PodReady,
|
||||
Type: v1.ContainersReady,
|
||||
Status: v1.ConditionFalse,
|
||||
Reason: UnknownContainerStatuses,
|
||||
}
|
||||
@ -59,7 +58,7 @@ func GeneratePodReadyCondition(spec *v1.PodSpec, conditions []v1.PodCondition, c
|
||||
// If all containers are known and succeeded, just return PodCompleted.
|
||||
if podPhase == v1.PodSucceeded && len(unknownContainers) == 0 {
|
||||
return v1.PodCondition{
|
||||
Type: v1.PodReady,
|
||||
Type: v1.ContainersReady,
|
||||
Status: v1.ConditionFalse,
|
||||
Reason: PodCompleted,
|
||||
}
|
||||
@ -76,16 +75,37 @@ func GeneratePodReadyCondition(spec *v1.PodSpec, conditions []v1.PodCondition, c
|
||||
unreadyMessage := strings.Join(unreadyMessages, ", ")
|
||||
if unreadyMessage != "" {
|
||||
return v1.PodCondition{
|
||||
Type: v1.PodReady,
|
||||
Type: v1.ContainersReady,
|
||||
Status: v1.ConditionFalse,
|
||||
Reason: ContainersNotReady,
|
||||
Message: unreadyMessage,
|
||||
}
|
||||
}
|
||||
|
||||
return v1.PodCondition{
|
||||
Type: v1.ContainersReady,
|
||||
Status: v1.ConditionTrue,
|
||||
}
|
||||
}
|
||||
|
||||
// GeneratePodReadyCondition returns "Ready" condition of a pod.
|
||||
// The status of "Ready" condition is "True", if all containers in a pod are ready
|
||||
// AND all matching conditions specified in the ReadinessGates have status equal to "True".
|
||||
func GeneratePodReadyCondition(spec *v1.PodSpec, conditions []v1.PodCondition, containerStatuses []v1.ContainerStatus, podPhase v1.PodPhase) v1.PodCondition {
|
||||
containersReady := GenerateContainersReadyCondition(spec, containerStatuses, podPhase)
|
||||
// If the status of ContainersReady is not True, return the same status, reason and message as ContainersReady.
|
||||
if containersReady.Status != v1.ConditionTrue {
|
||||
return v1.PodCondition{
|
||||
Type: v1.PodReady,
|
||||
Status: containersReady.Status,
|
||||
Reason: containersReady.Reason,
|
||||
Message: containersReady.Message,
|
||||
}
|
||||
}
|
||||
|
||||
// Evaluate corresponding conditions specified in readiness gate
|
||||
// Generate message if any readiness gate is not satisfied.
|
||||
unreadyMessages = []string{}
|
||||
unreadyMessages := []string{}
|
||||
for _, rg := range spec.ReadinessGates {
|
||||
_, c := podutil.GetPodConditionFromList(conditions, rg.ConditionType)
|
||||
if c == nil {
|
||||
|
@ -24,6 +24,98 @@ import (
|
||||
"k8s.io/api/core/v1"
|
||||
)
|
||||
|
||||
func TestGenerateContainersReadyCondition(t *testing.T) {
|
||||
tests := []struct {
|
||||
spec *v1.PodSpec
|
||||
containerStatuses []v1.ContainerStatus
|
||||
podPhase v1.PodPhase
|
||||
expectReady v1.PodCondition
|
||||
}{
|
||||
{
|
||||
spec: nil,
|
||||
containerStatuses: nil,
|
||||
podPhase: v1.PodRunning,
|
||||
expectReady: getPodCondition(v1.ContainersReady, v1.ConditionFalse, UnknownContainerStatuses, ""),
|
||||
},
|
||||
{
|
||||
spec: &v1.PodSpec{},
|
||||
containerStatuses: []v1.ContainerStatus{},
|
||||
podPhase: v1.PodRunning,
|
||||
expectReady: getPodCondition(v1.ContainersReady, v1.ConditionTrue, "", ""),
|
||||
},
|
||||
{
|
||||
spec: &v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{Name: "1234"},
|
||||
},
|
||||
},
|
||||
containerStatuses: []v1.ContainerStatus{},
|
||||
podPhase: v1.PodRunning,
|
||||
expectReady: getPodCondition(v1.ContainersReady, v1.ConditionFalse, ContainersNotReady, "containers with unknown status: [1234]"),
|
||||
},
|
||||
{
|
||||
spec: &v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{Name: "1234"},
|
||||
{Name: "5678"},
|
||||
},
|
||||
},
|
||||
containerStatuses: []v1.ContainerStatus{
|
||||
getReadyStatus("1234"),
|
||||
getReadyStatus("5678"),
|
||||
},
|
||||
podPhase: v1.PodRunning,
|
||||
expectReady: getPodCondition(v1.ContainersReady, v1.ConditionTrue, "", ""),
|
||||
},
|
||||
{
|
||||
spec: &v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{Name: "1234"},
|
||||
{Name: "5678"},
|
||||
},
|
||||
},
|
||||
containerStatuses: []v1.ContainerStatus{
|
||||
getReadyStatus("1234"),
|
||||
},
|
||||
podPhase: v1.PodRunning,
|
||||
expectReady: getPodCondition(v1.ContainersReady, v1.ConditionFalse, ContainersNotReady, "containers with unknown status: [5678]"),
|
||||
},
|
||||
{
|
||||
spec: &v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{Name: "1234"},
|
||||
{Name: "5678"},
|
||||
},
|
||||
},
|
||||
containerStatuses: []v1.ContainerStatus{
|
||||
getReadyStatus("1234"),
|
||||
getNotReadyStatus("5678"),
|
||||
},
|
||||
podPhase: v1.PodRunning,
|
||||
expectReady: getPodCondition(v1.ContainersReady, v1.ConditionFalse, ContainersNotReady, "containers with unready status: [5678]"),
|
||||
},
|
||||
{
|
||||
spec: &v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{Name: "1234"},
|
||||
},
|
||||
},
|
||||
containerStatuses: []v1.ContainerStatus{
|
||||
getNotReadyStatus("1234"),
|
||||
},
|
||||
podPhase: v1.PodSucceeded,
|
||||
expectReady: getPodCondition(v1.ContainersReady, v1.ConditionFalse, PodCompleted, ""),
|
||||
},
|
||||
}
|
||||
|
||||
for i, test := range tests {
|
||||
ready := GenerateContainersReadyCondition(test.spec, test.containerStatuses, test.podPhase)
|
||||
if !reflect.DeepEqual(ready, test.expectReady) {
|
||||
t.Errorf("On test case %v, expectReady:\n%+v\ngot\n%+v\n", i, test.expectReady, ready)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestGeneratePodReadyCondition(t *testing.T) {
|
||||
tests := []struct {
|
||||
spec *v1.PodSpec
|
||||
|
@ -226,21 +226,24 @@ func (m *manager) SetContainerReadiness(podUID types.UID, containerID kubecontai
|
||||
containerStatus, _, _ = findContainerStatus(&status, containerID.String())
|
||||
containerStatus.Ready = ready
|
||||
|
||||
// Update pod condition.
|
||||
podReadyConditionIndex := -1
|
||||
for i, condition := range status.Conditions {
|
||||
if condition.Type == v1.PodReady {
|
||||
podReadyConditionIndex = i
|
||||
break
|
||||
// updateConditionFunc updates the corresponding type of condition
|
||||
updateConditionFunc := func(conditionType v1.PodConditionType, condition v1.PodCondition) {
|
||||
conditionIndex := -1
|
||||
for i, condition := range status.Conditions {
|
||||
if condition.Type == conditionType {
|
||||
conditionIndex = i
|
||||
break
|
||||
}
|
||||
}
|
||||
if conditionIndex != -1 {
|
||||
status.Conditions[conditionIndex] = condition
|
||||
} else {
|
||||
glog.Warningf("PodStatus missing %s type condition: %+v", conditionType, status)
|
||||
status.Conditions = append(status.Conditions, condition)
|
||||
}
|
||||
}
|
||||
podReady := GeneratePodReadyCondition(&pod.Spec, status.Conditions, status.ContainerStatuses, status.Phase)
|
||||
if podReadyConditionIndex != -1 {
|
||||
status.Conditions[podReadyConditionIndex] = podReady
|
||||
} else {
|
||||
glog.Warningf("PodStatus missing PodReady condition: %+v", status)
|
||||
status.Conditions = append(status.Conditions, podReady)
|
||||
}
|
||||
updateConditionFunc(v1.PodReady, GeneratePodReadyCondition(&pod.Spec, status.Conditions, status.ContainerStatuses, status.Phase))
|
||||
updateConditionFunc(v1.ContainersReady, GenerateContainersReadyCondition(&pod.Spec, status.ContainerStatuses, status.Phase))
|
||||
m.updateStatusInternal(pod, status, false)
|
||||
}
|
||||
|
||||
|
@ -26,6 +26,7 @@ var PodConditionsByKubelet = []v1.PodConditionType{
|
||||
v1.PodReady,
|
||||
v1.PodInitialized,
|
||||
v1.PodReasonUnschedulable,
|
||||
v1.ContainersReady,
|
||||
}
|
||||
|
||||
// PodConditionByKubelet returns if the pod condition type is owned by kubelet
|
||||
|
@ -2312,6 +2312,8 @@ const (
|
||||
// PodReasonUnschedulable reason in PodScheduled PodCondition means that the scheduler
|
||||
// can't schedule the pod right now, for example due to insufficient resources in the cluster.
|
||||
PodReasonUnschedulable = "Unschedulable"
|
||||
// ContainersReady indicates whether all containers in the pod are ready.
|
||||
ContainersReady PodConditionType = "ContainersReady"
|
||||
)
|
||||
|
||||
// PodCondition contains details for the current condition of this pod.
|
||||
|
Loading…
Reference in New Issue
Block a user