mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-28 05:57:25 +00:00
Merge pull request #64646 from freehan/pod-ready-plus2-new
Automatic merge from submit-queue (batch tested with PRs 63717, 64646, 64792, 64784, 64800). If you want to cherry-pick this change to another branch, please follow the instructions <a href="https://github.com/kubernetes/community/blob/master/contributors/devel/cherry-picks.md">here</a>. Add ContainersReady condition into Pod Status **Last 3 commits are new** Follow up PR of: https://github.com/kubernetes/kubernetes/pull/64057 and https://github.com/kubernetes/kubernetes/pull/64344 Have a single PR for adding ContainersReady per https://github.com/kubernetes/kubernetes/pull/64344#issuecomment-394038384 ```release-note Introduce ContainersReady condition in Pod Status ``` /assign yujuhong for review /assign thockin for the tiny API change
This commit is contained in:
commit
0b8394a1f4
@ -2094,6 +2094,8 @@ const (
|
|||||||
// PodReasonUnschedulable reason in PodScheduled PodCondition means that the scheduler
|
// PodReasonUnschedulable reason in PodScheduled PodCondition means that the scheduler
|
||||||
// can't schedule the pod right now, for example due to insufficient resources in the cluster.
|
// can't schedule the pod right now, for example due to insufficient resources in the cluster.
|
||||||
PodReasonUnschedulable = "Unschedulable"
|
PodReasonUnschedulable = "Unschedulable"
|
||||||
|
// ContainersReady indicates whether all containers in the pod are ready.
|
||||||
|
ContainersReady PodConditionType = "ContainersReady"
|
||||||
)
|
)
|
||||||
|
|
||||||
type PodCondition struct {
|
type PodCondition struct {
|
||||||
|
@ -1354,6 +1354,7 @@ func (kl *Kubelet) generateAPIPodStatus(pod *v1.Pod, podStatus *kubecontainer.Po
|
|||||||
kl.probeManager.UpdatePodStatus(pod.UID, s)
|
kl.probeManager.UpdatePodStatus(pod.UID, s)
|
||||||
s.Conditions = append(s.Conditions, status.GeneratePodInitializedCondition(spec, s.InitContainerStatuses, s.Phase))
|
s.Conditions = append(s.Conditions, status.GeneratePodInitializedCondition(spec, s.InitContainerStatuses, s.Phase))
|
||||||
s.Conditions = append(s.Conditions, status.GeneratePodReadyCondition(spec, s.Conditions, s.ContainerStatuses, s.Phase))
|
s.Conditions = append(s.Conditions, status.GeneratePodReadyCondition(spec, s.Conditions, s.ContainerStatuses, s.Phase))
|
||||||
|
s.Conditions = append(s.Conditions, status.GenerateContainersReadyCondition(spec, s.ContainerStatuses, s.Phase))
|
||||||
// Status manager will take care of the LastTransitionTimestamp, either preserve
|
// Status manager will take care of the LastTransitionTimestamp, either preserve
|
||||||
// the timestamp from apiserver, or set a new one. When kubelet sees the pod,
|
// the timestamp from apiserver, or set a new one. When kubelet sees the pod,
|
||||||
// `PodScheduled` condition must be true.
|
// `PodScheduled` condition must be true.
|
||||||
|
@ -32,14 +32,13 @@ const (
|
|||||||
ReadinessGatesNotReady = "ReadinessGatesNotReady"
|
ReadinessGatesNotReady = "ReadinessGatesNotReady"
|
||||||
)
|
)
|
||||||
|
|
||||||
// GeneratePodReadyCondition returns "Ready" condition of a pod.
|
// GenerateContainersReadyCondition returns the status of "ContainersReady" condition.
|
||||||
// The status of "Ready" condition is "True", if all containers in a pod are ready
|
// The status of "ContainersReady" condition is true when all containers are ready.
|
||||||
// AND all matching conditions specified in the ReadinessGates have status equal to "True".
|
func GenerateContainersReadyCondition(spec *v1.PodSpec, containerStatuses []v1.ContainerStatus, podPhase v1.PodPhase) v1.PodCondition {
|
||||||
func GeneratePodReadyCondition(spec *v1.PodSpec, conditions []v1.PodCondition, containerStatuses []v1.ContainerStatus, podPhase v1.PodPhase) v1.PodCondition {
|
|
||||||
// Find if all containers are ready or not.
|
// Find if all containers are ready or not.
|
||||||
if containerStatuses == nil {
|
if containerStatuses == nil {
|
||||||
return v1.PodCondition{
|
return v1.PodCondition{
|
||||||
Type: v1.PodReady,
|
Type: v1.ContainersReady,
|
||||||
Status: v1.ConditionFalse,
|
Status: v1.ConditionFalse,
|
||||||
Reason: UnknownContainerStatuses,
|
Reason: UnknownContainerStatuses,
|
||||||
}
|
}
|
||||||
@ -59,7 +58,7 @@ func GeneratePodReadyCondition(spec *v1.PodSpec, conditions []v1.PodCondition, c
|
|||||||
// If all containers are known and succeeded, just return PodCompleted.
|
// If all containers are known and succeeded, just return PodCompleted.
|
||||||
if podPhase == v1.PodSucceeded && len(unknownContainers) == 0 {
|
if podPhase == v1.PodSucceeded && len(unknownContainers) == 0 {
|
||||||
return v1.PodCondition{
|
return v1.PodCondition{
|
||||||
Type: v1.PodReady,
|
Type: v1.ContainersReady,
|
||||||
Status: v1.ConditionFalse,
|
Status: v1.ConditionFalse,
|
||||||
Reason: PodCompleted,
|
Reason: PodCompleted,
|
||||||
}
|
}
|
||||||
@ -76,16 +75,37 @@ func GeneratePodReadyCondition(spec *v1.PodSpec, conditions []v1.PodCondition, c
|
|||||||
unreadyMessage := strings.Join(unreadyMessages, ", ")
|
unreadyMessage := strings.Join(unreadyMessages, ", ")
|
||||||
if unreadyMessage != "" {
|
if unreadyMessage != "" {
|
||||||
return v1.PodCondition{
|
return v1.PodCondition{
|
||||||
Type: v1.PodReady,
|
Type: v1.ContainersReady,
|
||||||
Status: v1.ConditionFalse,
|
Status: v1.ConditionFalse,
|
||||||
Reason: ContainersNotReady,
|
Reason: ContainersNotReady,
|
||||||
Message: unreadyMessage,
|
Message: unreadyMessage,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
return v1.PodCondition{
|
||||||
|
Type: v1.ContainersReady,
|
||||||
|
Status: v1.ConditionTrue,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// GeneratePodReadyCondition returns "Ready" condition of a pod.
|
||||||
|
// The status of "Ready" condition is "True", if all containers in a pod are ready
|
||||||
|
// AND all matching conditions specified in the ReadinessGates have status equal to "True".
|
||||||
|
func GeneratePodReadyCondition(spec *v1.PodSpec, conditions []v1.PodCondition, containerStatuses []v1.ContainerStatus, podPhase v1.PodPhase) v1.PodCondition {
|
||||||
|
containersReady := GenerateContainersReadyCondition(spec, containerStatuses, podPhase)
|
||||||
|
// If the status of ContainersReady is not True, return the same status, reason and message as ContainersReady.
|
||||||
|
if containersReady.Status != v1.ConditionTrue {
|
||||||
|
return v1.PodCondition{
|
||||||
|
Type: v1.PodReady,
|
||||||
|
Status: containersReady.Status,
|
||||||
|
Reason: containersReady.Reason,
|
||||||
|
Message: containersReady.Message,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Evaluate corresponding conditions specified in readiness gate
|
// Evaluate corresponding conditions specified in readiness gate
|
||||||
// Generate message if any readiness gate is not satisfied.
|
// Generate message if any readiness gate is not satisfied.
|
||||||
unreadyMessages = []string{}
|
unreadyMessages := []string{}
|
||||||
for _, rg := range spec.ReadinessGates {
|
for _, rg := range spec.ReadinessGates {
|
||||||
_, c := podutil.GetPodConditionFromList(conditions, rg.ConditionType)
|
_, c := podutil.GetPodConditionFromList(conditions, rg.ConditionType)
|
||||||
if c == nil {
|
if c == nil {
|
||||||
|
@ -24,6 +24,98 @@ import (
|
|||||||
"k8s.io/api/core/v1"
|
"k8s.io/api/core/v1"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
func TestGenerateContainersReadyCondition(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
spec *v1.PodSpec
|
||||||
|
containerStatuses []v1.ContainerStatus
|
||||||
|
podPhase v1.PodPhase
|
||||||
|
expectReady v1.PodCondition
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
spec: nil,
|
||||||
|
containerStatuses: nil,
|
||||||
|
podPhase: v1.PodRunning,
|
||||||
|
expectReady: getPodCondition(v1.ContainersReady, v1.ConditionFalse, UnknownContainerStatuses, ""),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
spec: &v1.PodSpec{},
|
||||||
|
containerStatuses: []v1.ContainerStatus{},
|
||||||
|
podPhase: v1.PodRunning,
|
||||||
|
expectReady: getPodCondition(v1.ContainersReady, v1.ConditionTrue, "", ""),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
spec: &v1.PodSpec{
|
||||||
|
Containers: []v1.Container{
|
||||||
|
{Name: "1234"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
containerStatuses: []v1.ContainerStatus{},
|
||||||
|
podPhase: v1.PodRunning,
|
||||||
|
expectReady: getPodCondition(v1.ContainersReady, v1.ConditionFalse, ContainersNotReady, "containers with unknown status: [1234]"),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
spec: &v1.PodSpec{
|
||||||
|
Containers: []v1.Container{
|
||||||
|
{Name: "1234"},
|
||||||
|
{Name: "5678"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
containerStatuses: []v1.ContainerStatus{
|
||||||
|
getReadyStatus("1234"),
|
||||||
|
getReadyStatus("5678"),
|
||||||
|
},
|
||||||
|
podPhase: v1.PodRunning,
|
||||||
|
expectReady: getPodCondition(v1.ContainersReady, v1.ConditionTrue, "", ""),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
spec: &v1.PodSpec{
|
||||||
|
Containers: []v1.Container{
|
||||||
|
{Name: "1234"},
|
||||||
|
{Name: "5678"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
containerStatuses: []v1.ContainerStatus{
|
||||||
|
getReadyStatus("1234"),
|
||||||
|
},
|
||||||
|
podPhase: v1.PodRunning,
|
||||||
|
expectReady: getPodCondition(v1.ContainersReady, v1.ConditionFalse, ContainersNotReady, "containers with unknown status: [5678]"),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
spec: &v1.PodSpec{
|
||||||
|
Containers: []v1.Container{
|
||||||
|
{Name: "1234"},
|
||||||
|
{Name: "5678"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
containerStatuses: []v1.ContainerStatus{
|
||||||
|
getReadyStatus("1234"),
|
||||||
|
getNotReadyStatus("5678"),
|
||||||
|
},
|
||||||
|
podPhase: v1.PodRunning,
|
||||||
|
expectReady: getPodCondition(v1.ContainersReady, v1.ConditionFalse, ContainersNotReady, "containers with unready status: [5678]"),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
spec: &v1.PodSpec{
|
||||||
|
Containers: []v1.Container{
|
||||||
|
{Name: "1234"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
containerStatuses: []v1.ContainerStatus{
|
||||||
|
getNotReadyStatus("1234"),
|
||||||
|
},
|
||||||
|
podPhase: v1.PodSucceeded,
|
||||||
|
expectReady: getPodCondition(v1.ContainersReady, v1.ConditionFalse, PodCompleted, ""),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, test := range tests {
|
||||||
|
ready := GenerateContainersReadyCondition(test.spec, test.containerStatuses, test.podPhase)
|
||||||
|
if !reflect.DeepEqual(ready, test.expectReady) {
|
||||||
|
t.Errorf("On test case %v, expectReady:\n%+v\ngot\n%+v\n", i, test.expectReady, ready)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func TestGeneratePodReadyCondition(t *testing.T) {
|
func TestGeneratePodReadyCondition(t *testing.T) {
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
spec *v1.PodSpec
|
spec *v1.PodSpec
|
||||||
|
@ -226,21 +226,24 @@ func (m *manager) SetContainerReadiness(podUID types.UID, containerID kubecontai
|
|||||||
containerStatus, _, _ = findContainerStatus(&status, containerID.String())
|
containerStatus, _, _ = findContainerStatus(&status, containerID.String())
|
||||||
containerStatus.Ready = ready
|
containerStatus.Ready = ready
|
||||||
|
|
||||||
// Update pod condition.
|
// updateConditionFunc updates the corresponding type of condition
|
||||||
podReadyConditionIndex := -1
|
updateConditionFunc := func(conditionType v1.PodConditionType, condition v1.PodCondition) {
|
||||||
for i, condition := range status.Conditions {
|
conditionIndex := -1
|
||||||
if condition.Type == v1.PodReady {
|
for i, condition := range status.Conditions {
|
||||||
podReadyConditionIndex = i
|
if condition.Type == conditionType {
|
||||||
break
|
conditionIndex = i
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if conditionIndex != -1 {
|
||||||
|
status.Conditions[conditionIndex] = condition
|
||||||
|
} else {
|
||||||
|
glog.Warningf("PodStatus missing %s type condition: %+v", conditionType, status)
|
||||||
|
status.Conditions = append(status.Conditions, condition)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
podReady := GeneratePodReadyCondition(&pod.Spec, status.Conditions, status.ContainerStatuses, status.Phase)
|
updateConditionFunc(v1.PodReady, GeneratePodReadyCondition(&pod.Spec, status.Conditions, status.ContainerStatuses, status.Phase))
|
||||||
if podReadyConditionIndex != -1 {
|
updateConditionFunc(v1.ContainersReady, GenerateContainersReadyCondition(&pod.Spec, status.ContainerStatuses, status.Phase))
|
||||||
status.Conditions[podReadyConditionIndex] = podReady
|
|
||||||
} else {
|
|
||||||
glog.Warningf("PodStatus missing PodReady condition: %+v", status)
|
|
||||||
status.Conditions = append(status.Conditions, podReady)
|
|
||||||
}
|
|
||||||
m.updateStatusInternal(pod, status, false)
|
m.updateStatusInternal(pod, status, false)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -26,6 +26,7 @@ var PodConditionsByKubelet = []v1.PodConditionType{
|
|||||||
v1.PodReady,
|
v1.PodReady,
|
||||||
v1.PodInitialized,
|
v1.PodInitialized,
|
||||||
v1.PodReasonUnschedulable,
|
v1.PodReasonUnschedulable,
|
||||||
|
v1.ContainersReady,
|
||||||
}
|
}
|
||||||
|
|
||||||
// PodConditionByKubelet returns if the pod condition type is owned by kubelet
|
// PodConditionByKubelet returns if the pod condition type is owned by kubelet
|
||||||
|
@ -2312,6 +2312,8 @@ const (
|
|||||||
// PodReasonUnschedulable reason in PodScheduled PodCondition means that the scheduler
|
// PodReasonUnschedulable reason in PodScheduled PodCondition means that the scheduler
|
||||||
// can't schedule the pod right now, for example due to insufficient resources in the cluster.
|
// can't schedule the pod right now, for example due to insufficient resources in the cluster.
|
||||||
PodReasonUnschedulable = "Unschedulable"
|
PodReasonUnschedulable = "Unschedulable"
|
||||||
|
// ContainersReady indicates whether all containers in the pod are ready.
|
||||||
|
ContainersReady PodConditionType = "ContainersReady"
|
||||||
)
|
)
|
||||||
|
|
||||||
// PodCondition contains details for the current condition of this pod.
|
// PodCondition contains details for the current condition of this pod.
|
||||||
|
Loading…
Reference in New Issue
Block a user