kubelet sets observedGeneration in conditions

This commit is contained in:
Natasha Sarkar 2025-03-07 16:55:56 +00:00
parent ded2956c83
commit 4c2be4bdde
14 changed files with 216 additions and 121 deletions

View File

@ -432,16 +432,16 @@ func GetPodObservedGenerationIfEnabled(pod *v1.Pod) int64 {
// We will emit condition.observedGeneration if the feature is enabled OR if condition.observedGeneration is already set.
// This protects against an infinite loop of kubelet trying to clear the value after the FG is turned off, and
// the API server preserving existing values when an incoming update tries to clear it.
func GetPodObservedGenerationIfEnabledOnCondition(pod *v1.Pod, conditionType v1.PodConditionType) int64 {
if pod == nil {
func GetPodObservedGenerationIfEnabledOnCondition(podStatus *v1.PodStatus, generation int64, conditionType v1.PodConditionType) int64 {
if podStatus == nil {
return 0
}
if utilfeature.DefaultFeatureGate.Enabled(features.PodObservedGenerationTracking) {
return pod.Generation
return generation
}
for _, condition := range pod.Status.Conditions {
for _, condition := range podStatus.Conditions {
if condition.Type == conditionType && condition.ObservedGeneration != 0 {
return pod.Generation
return generation
}
}
return 0

View File

@ -790,7 +790,7 @@ func (dc *DisruptionController) syncStalePodDisruption(ctx context.Context, key
newPod := pod.DeepCopy()
updated := apipod.UpdatePodCondition(&newPod.Status, &v1.PodCondition{
Type: v1.DisruptionTarget,
ObservedGeneration: apipod.GetPodObservedGenerationIfEnabledOnCondition(newPod, v1.DisruptionTarget),
ObservedGeneration: apipod.GetPodObservedGenerationIfEnabledOnCondition(&newPod.Status, newPod.Generation, v1.DisruptionTarget),
Status: v1.ConditionFalse,
})
if !updated {

View File

@ -247,7 +247,7 @@ func (gcc *PodGCController) gcOrphaned(ctx context.Context, pods []*v1.Pod, node
logger.V(2).Info("Found orphaned Pod assigned to the Node, deleting", "pod", klog.KObj(pod), "node", klog.KRef("", pod.Spec.NodeName))
condition := &v1.PodCondition{
Type: v1.DisruptionTarget,
ObservedGeneration: apipod.GetPodObservedGenerationIfEnabledOnCondition(pod, v1.DisruptionTarget),
ObservedGeneration: apipod.GetPodObservedGenerationIfEnabledOnCondition(&pod.Status, pod.Generation, v1.DisruptionTarget),
Status: v1.ConditionTrue,
Reason: "DeletionByPodGC",
Message: "PodGC: node no longer exists",

View File

@ -134,7 +134,7 @@ func addConditionAndDeletePod(ctx context.Context, c clientset.Interface, name,
newStatus := pod.Status.DeepCopy()
updated := apipod.UpdatePodCondition(newStatus, &v1.PodCondition{
Type: v1.DisruptionTarget,
ObservedGeneration: apipod.GetPodObservedGenerationIfEnabledOnCondition(pod, v1.DisruptionTarget),
ObservedGeneration: apipod.GetPodObservedGenerationIfEnabledOnCondition(&pod.Status, pod.Generation, v1.DisruptionTarget),
Status: v1.ConditionTrue,
Reason: "DeletionByTaintManager",
Message: "Taint manager: deleting due to NoExecute taint",

View File

@ -425,10 +425,11 @@ func (m *managerImpl) synchronize(diskInfoProvider DiskInfoProvider, podFunc Act
message, annotations := evictionMessage(resourceToReclaim, pod, statsFunc, thresholds, observations)
condition := &v1.PodCondition{
Type: v1.DisruptionTarget,
Status: v1.ConditionTrue,
Reason: v1.PodReasonTerminationByKubelet,
Message: message,
Type: v1.DisruptionTarget,
ObservedGeneration: pod.Generation,
Status: v1.ConditionTrue,
Reason: v1.PodReasonTerminationByKubelet,
Message: message,
}
if m.evictPod(pod, gracePeriodOverride, message, annotations, condition) {
metrics.Evictions.WithLabelValues(string(thresholdToReclaim.Signal)).Inc()
@ -618,6 +619,7 @@ func (m *managerImpl) evictPod(pod *v1.Pod, gracePeriodOverride int64, evictMsg
status.Reason = Reason
status.Message = evictMsg
if condition != nil {
condition.ObservedGeneration = podutil.GetPodObservedGenerationIfEnabledOnCondition(status, pod.Generation, v1.DisruptionTarget)
podutil.UpdatePodCondition(status, condition)
}
})

View File

@ -1828,6 +1828,7 @@ func (kl *Kubelet) generateAPIPodStatus(pod *v1.Pod, podStatus *kubecontainer.Po
if utilfeature.DefaultFeatureGate.Enabled(features.InPlacePodVerticalScaling) {
resizeStatus := kl.determinePodResizeStatus(pod, podIsTerminal)
for _, c := range resizeStatus {
c.ObservedGeneration = podutil.GetPodObservedGenerationIfEnabledOnCondition(&oldPodStatus, pod.Generation, c.Type)
s.Conditions = append(s.Conditions, *c)
}
}
@ -1843,15 +1844,16 @@ func (kl *Kubelet) generateAPIPodStatus(pod *v1.Pod, podStatus *kubecontainer.Po
// set all Kubelet-owned conditions
if utilfeature.DefaultFeatureGate.Enabled(features.PodReadyToStartContainersCondition) {
s.Conditions = append(s.Conditions, status.GeneratePodReadyToStartContainersCondition(pod, podStatus))
s.Conditions = append(s.Conditions, status.GeneratePodReadyToStartContainersCondition(pod, &oldPodStatus, podStatus))
}
allContainerStatuses := append(s.InitContainerStatuses, s.ContainerStatuses...)
s.Conditions = append(s.Conditions, status.GeneratePodInitializedCondition(&pod.Spec, allContainerStatuses, s.Phase))
s.Conditions = append(s.Conditions, status.GeneratePodReadyCondition(&pod.Spec, s.Conditions, allContainerStatuses, s.Phase))
s.Conditions = append(s.Conditions, status.GenerateContainersReadyCondition(&pod.Spec, allContainerStatuses, s.Phase))
s.Conditions = append(s.Conditions, status.GeneratePodInitializedCondition(pod, &oldPodStatus, allContainerStatuses, s.Phase))
s.Conditions = append(s.Conditions, status.GeneratePodReadyCondition(pod, &oldPodStatus, s.Conditions, allContainerStatuses, s.Phase))
s.Conditions = append(s.Conditions, status.GenerateContainersReadyCondition(pod, &oldPodStatus, allContainerStatuses, s.Phase))
s.Conditions = append(s.Conditions, v1.PodCondition{
Type: v1.PodScheduled,
Status: v1.ConditionTrue,
Type: v1.PodScheduled,
ObservedGeneration: podutil.GetPodObservedGenerationIfEnabledOnCondition(&oldPodStatus, pod.Generation, v1.PodScheduled),
Status: v1.ConditionTrue,
})
// set HostIP/HostIPs and initialize PodIP/PodIPs for host network pods
if kl.kubeClient != nil {

View File

@ -160,10 +160,11 @@ func (m *podManager) killPods(activePods []*v1.Pod) error {
status.Message = nodeShutdownMessage
status.Reason = nodeShutdownReason
podutil.UpdatePodCondition(status, &v1.PodCondition{
Type: v1.DisruptionTarget,
Status: v1.ConditionTrue,
Reason: v1.PodReasonTerminationByKubelet,
Message: nodeShutdownMessage,
Type: v1.DisruptionTarget,
ObservedGeneration: podutil.GetPodObservedGenerationIfEnabledOnCondition(status, pod.Generation, v1.DisruptionTarget),
Status: v1.ConditionTrue,
Reason: v1.PodReasonTerminationByKubelet,
Message: nodeShutdownMessage,
})
}); err != nil {
m.logger.V(1).Info("Shutdown manager failed killing pod", "pod", klog.KObj(pod), "err", err)

View File

@ -105,10 +105,11 @@ func (c *CriticalPodAdmissionHandler) evictPodsToFreeRequests(admitPod *v1.Pod,
status.Reason = events.PreemptContainer
status.Message = message
podutil.UpdatePodCondition(status, &v1.PodCondition{
Type: v1.DisruptionTarget,
Status: v1.ConditionTrue,
Reason: v1.PodReasonTerminationByKubelet,
Message: "Pod was preempted by Kubelet to accommodate a critical pod.",
Type: v1.DisruptionTarget,
ObservedGeneration: podutil.GetPodObservedGenerationIfEnabledOnCondition(status, pod.Generation, v1.DisruptionTarget),
Status: v1.ConditionTrue,
Reason: v1.PodReasonTerminationByKubelet,
Message: "Pod was preempted by Kubelet to accommodate a critical pod.",
})
})
if err != nil {

View File

@ -43,19 +43,20 @@ const (
// GenerateContainersReadyCondition returns the status of "ContainersReady" condition.
// The status of "ContainersReady" condition is true when all containers are ready.
func GenerateContainersReadyCondition(spec *v1.PodSpec, containerStatuses []v1.ContainerStatus, podPhase v1.PodPhase) v1.PodCondition {
func GenerateContainersReadyCondition(pod *v1.Pod, oldPodStatus *v1.PodStatus, containerStatuses []v1.ContainerStatus, podPhase v1.PodPhase) v1.PodCondition {
// Find if all containers are ready or not.
if containerStatuses == nil {
return v1.PodCondition{
Type: v1.ContainersReady,
Status: v1.ConditionFalse,
Reason: UnknownContainerStatuses,
Type: v1.ContainersReady,
ObservedGeneration: podutil.GetPodObservedGenerationIfEnabledOnCondition(oldPodStatus, pod.Generation, v1.ContainersReady),
Status: v1.ConditionFalse,
Reason: UnknownContainerStatuses,
}
}
unknownContainers := []string{}
unreadyContainers := []string{}
for _, container := range spec.InitContainers {
for _, container := range pod.Spec.InitContainers {
if !podutil.IsRestartableInitContainer(&container) {
continue
}
@ -69,7 +70,7 @@ func GenerateContainersReadyCondition(spec *v1.PodSpec, containerStatuses []v1.C
}
}
for _, container := range spec.Containers {
for _, container := range pod.Spec.Containers {
if containerStatus, ok := podutil.GetContainerStatus(containerStatuses, container.Name); ok {
if !containerStatus.Ready {
unreadyContainers = append(unreadyContainers, container.Name)
@ -81,12 +82,12 @@ func GenerateContainersReadyCondition(spec *v1.PodSpec, containerStatuses []v1.C
// If all containers are known and succeeded, just return PodCompleted.
if podPhase == v1.PodSucceeded && len(unknownContainers) == 0 {
return generateContainersReadyConditionForTerminalPhase(podPhase)
return generateContainersReadyConditionForTerminalPhase(pod, oldPodStatus, podPhase)
}
// If the pod phase is failed, explicitly set the ready condition to false for containers since they may be in progress of terminating.
if podPhase == v1.PodFailed {
return generateContainersReadyConditionForTerminalPhase(podPhase)
return generateContainersReadyConditionForTerminalPhase(pod, oldPodStatus, podPhase)
}
// Generate message for containers in unknown condition.
@ -100,38 +101,41 @@ func GenerateContainersReadyCondition(spec *v1.PodSpec, containerStatuses []v1.C
unreadyMessage := strings.Join(unreadyMessages, ", ")
if unreadyMessage != "" {
return v1.PodCondition{
Type: v1.ContainersReady,
Status: v1.ConditionFalse,
Reason: ContainersNotReady,
Message: unreadyMessage,
Type: v1.ContainersReady,
ObservedGeneration: podutil.GetPodObservedGenerationIfEnabledOnCondition(oldPodStatus, pod.Generation, v1.ContainersReady),
Status: v1.ConditionFalse,
Reason: ContainersNotReady,
Message: unreadyMessage,
}
}
return v1.PodCondition{
Type: v1.ContainersReady,
Status: v1.ConditionTrue,
Type: v1.ContainersReady,
ObservedGeneration: podutil.GetPodObservedGenerationIfEnabledOnCondition(oldPodStatus, pod.Generation, v1.ContainersReady),
Status: v1.ConditionTrue,
}
}
// GeneratePodReadyCondition returns "Ready" condition of a pod.
// The status of "Ready" condition is "True", if all containers in a pod are ready
// AND all matching conditions specified in the ReadinessGates have status equal to "True".
func GeneratePodReadyCondition(spec *v1.PodSpec, conditions []v1.PodCondition, containerStatuses []v1.ContainerStatus, podPhase v1.PodPhase) v1.PodCondition {
containersReady := GenerateContainersReadyCondition(spec, containerStatuses, podPhase)
func GeneratePodReadyCondition(pod *v1.Pod, oldPodStatus *v1.PodStatus, conditions []v1.PodCondition, containerStatuses []v1.ContainerStatus, podPhase v1.PodPhase) v1.PodCondition {
containersReady := GenerateContainersReadyCondition(pod, oldPodStatus, containerStatuses, podPhase)
// If the status of ContainersReady is not True, return the same status, reason and message as ContainersReady.
if containersReady.Status != v1.ConditionTrue {
return v1.PodCondition{
Type: v1.PodReady,
Status: containersReady.Status,
Reason: containersReady.Reason,
Message: containersReady.Message,
Type: v1.PodReady,
ObservedGeneration: podutil.GetPodObservedGenerationIfEnabledOnCondition(oldPodStatus, pod.Generation, v1.PodReady),
Status: containersReady.Status,
Reason: containersReady.Reason,
Message: containersReady.Message,
}
}
// Evaluate corresponding conditions specified in readiness gate
// Generate message if any readiness gate is not satisfied.
unreadyMessages := []string{}
for _, rg := range spec.ReadinessGates {
for _, rg := range pod.Spec.ReadinessGates {
_, c := podutil.GetPodConditionFromList(conditions, rg.ConditionType)
if c == nil {
unreadyMessages = append(unreadyMessages, fmt.Sprintf("corresponding condition of pod readiness gate %q does not exist.", string(rg.ConditionType)))
@ -144,16 +148,18 @@ func GeneratePodReadyCondition(spec *v1.PodSpec, conditions []v1.PodCondition, c
if len(unreadyMessages) != 0 {
unreadyMessage := strings.Join(unreadyMessages, ", ")
return v1.PodCondition{
Type: v1.PodReady,
Status: v1.ConditionFalse,
Reason: ReadinessGatesNotReady,
Message: unreadyMessage,
Type: v1.PodReady,
ObservedGeneration: podutil.GetPodObservedGenerationIfEnabledOnCondition(oldPodStatus, pod.Generation, v1.PodReady),
Status: v1.ConditionFalse,
Reason: ReadinessGatesNotReady,
Message: unreadyMessage,
}
}
return v1.PodCondition{
Type: v1.PodReady,
Status: v1.ConditionTrue,
Type: v1.PodReady,
ObservedGeneration: podutil.GetPodObservedGenerationIfEnabledOnCondition(oldPodStatus, pod.Generation, v1.PodReady),
Status: v1.ConditionTrue,
}
}
@ -172,19 +178,20 @@ func isInitContainerInitialized(initContainer *v1.Container, containerStatus *v1
// GeneratePodInitializedCondition returns initialized condition if all init containers in a pod are ready, else it
// returns an uninitialized condition.
func GeneratePodInitializedCondition(spec *v1.PodSpec, containerStatuses []v1.ContainerStatus, podPhase v1.PodPhase) v1.PodCondition {
func GeneratePodInitializedCondition(pod *v1.Pod, oldPodStatus *v1.PodStatus, containerStatuses []v1.ContainerStatus, podPhase v1.PodPhase) v1.PodCondition {
// Find if all containers are ready or not.
if containerStatuses == nil && len(spec.InitContainers) > 0 {
if containerStatuses == nil && len(pod.Spec.InitContainers) > 0 {
return v1.PodCondition{
Type: v1.PodInitialized,
Status: v1.ConditionFalse,
Reason: UnknownContainerStatuses,
Type: v1.PodInitialized,
ObservedGeneration: podutil.GetPodObservedGenerationIfEnabledOnCondition(oldPodStatus, pod.Generation, v1.PodInitialized),
Status: v1.ConditionFalse,
Reason: UnknownContainerStatuses,
}
}
unknownContainers := []string{}
incompleteContainers := []string{}
for _, container := range spec.InitContainers {
for _, container := range pod.Spec.InitContainers {
containerStatus, ok := podutil.GetContainerStatus(containerStatuses, container.Name)
if !ok {
unknownContainers = append(unknownContainers, container.Name)
@ -198,9 +205,10 @@ func GeneratePodInitializedCondition(spec *v1.PodSpec, containerStatuses []v1.Co
// If all init containers are known and succeeded, just return PodCompleted.
if podPhase == v1.PodSucceeded && len(unknownContainers) == 0 {
return v1.PodCondition{
Type: v1.PodInitialized,
Status: v1.ConditionTrue,
Reason: PodCompleted,
Type: v1.PodInitialized,
ObservedGeneration: podutil.GetPodObservedGenerationIfEnabledOnCondition(oldPodStatus, pod.Generation, v1.PodInitialized),
Status: v1.ConditionTrue,
Reason: PodCompleted,
}
}
@ -208,10 +216,11 @@ func GeneratePodInitializedCondition(spec *v1.PodSpec, containerStatuses []v1.Co
// been initialized before.
// This is needed to handle the case where the pod has been initialized but
// the restartable init containers are restarting.
if kubecontainer.HasAnyRegularContainerStarted(spec, containerStatuses) {
if kubecontainer.HasAnyRegularContainerStarted(&pod.Spec, containerStatuses) {
return v1.PodCondition{
Type: v1.PodInitialized,
Status: v1.ConditionTrue,
Type: v1.PodInitialized,
ObservedGeneration: podutil.GetPodObservedGenerationIfEnabledOnCondition(oldPodStatus, pod.Generation, v1.PodInitialized),
Status: v1.ConditionTrue,
}
}
@ -225,20 +234,22 @@ func GeneratePodInitializedCondition(spec *v1.PodSpec, containerStatuses []v1.Co
unreadyMessage := strings.Join(unreadyMessages, ", ")
if unreadyMessage != "" {
return v1.PodCondition{
Type: v1.PodInitialized,
Status: v1.ConditionFalse,
Reason: ContainersNotInitialized,
Message: unreadyMessage,
Type: v1.PodInitialized,
ObservedGeneration: podutil.GetPodObservedGenerationIfEnabledOnCondition(oldPodStatus, pod.Generation, v1.PodInitialized),
Status: v1.ConditionFalse,
Reason: ContainersNotInitialized,
Message: unreadyMessage,
}
}
return v1.PodCondition{
Type: v1.PodInitialized,
Status: v1.ConditionTrue,
Type: v1.PodInitialized,
ObservedGeneration: podutil.GetPodObservedGenerationIfEnabledOnCondition(oldPodStatus, pod.Generation, v1.PodInitialized),
Status: v1.ConditionTrue,
}
}
func GeneratePodReadyToStartContainersCondition(pod *v1.Pod, podStatus *kubecontainer.PodStatus) v1.PodCondition {
func GeneratePodReadyToStartContainersCondition(pod *v1.Pod, oldPodStatus *v1.PodStatus, podStatus *kubecontainer.PodStatus) v1.PodCondition {
newSandboxNeeded, _, _ := runtimeutil.PodSandboxChanged(pod, podStatus)
// if a new sandbox does not need to be created for a pod, it indicates that
// a sandbox for the pod with networking configured already exists.
@ -246,20 +257,23 @@ func GeneratePodReadyToStartContainersCondition(pod *v1.Pod, podStatus *kubecont
// fresh sandbox and configure networking for the sandbox.
if !newSandboxNeeded {
return v1.PodCondition{
Type: v1.PodReadyToStartContainers,
Status: v1.ConditionTrue,
Type: v1.PodReadyToStartContainers,
ObservedGeneration: podutil.GetPodObservedGenerationIfEnabledOnCondition(oldPodStatus, pod.Generation, v1.PodReadyToStartContainers),
Status: v1.ConditionTrue,
}
}
return v1.PodCondition{
Type: v1.PodReadyToStartContainers,
Status: v1.ConditionFalse,
Type: v1.PodReadyToStartContainers,
ObservedGeneration: podutil.GetPodObservedGenerationIfEnabledOnCondition(oldPodStatus, pod.Generation, v1.PodReadyToStartContainers),
Status: v1.ConditionFalse,
}
}
func generateContainersReadyConditionForTerminalPhase(podPhase v1.PodPhase) v1.PodCondition {
func generateContainersReadyConditionForTerminalPhase(pod *v1.Pod, oldPodStatus *v1.PodStatus, podPhase v1.PodPhase) v1.PodCondition {
condition := v1.PodCondition{
Type: v1.ContainersReady,
Status: v1.ConditionFalse,
Type: v1.ContainersReady,
ObservedGeneration: podutil.GetPodObservedGenerationIfEnabledOnCondition(oldPodStatus, pod.Generation, v1.ContainersReady),
Status: v1.ConditionFalse,
}
if podPhase == v1.PodFailed {
@ -271,10 +285,11 @@ func generateContainersReadyConditionForTerminalPhase(podPhase v1.PodPhase) v1.P
return condition
}
func generatePodReadyConditionForTerminalPhase(podPhase v1.PodPhase) v1.PodCondition {
func generatePodReadyConditionForTerminalPhase(pod *v1.Pod, oldPodStatus *v1.PodStatus, podPhase v1.PodPhase) v1.PodCondition {
condition := v1.PodCondition{
Type: v1.PodReady,
Status: v1.ConditionFalse,
Type: v1.PodReady,
ObservedGeneration: podutil.GetPodObservedGenerationIfEnabledOnCondition(oldPodStatus, pod.Generation, v1.PodReady),
Status: v1.ConditionFalse,
}
if podPhase == v1.PodFailed {

View File

@ -35,25 +35,25 @@ var (
func TestGenerateContainersReadyCondition(t *testing.T) {
tests := []struct {
spec *v1.PodSpec
spec v1.PodSpec
containerStatuses []v1.ContainerStatus
podPhase v1.PodPhase
expectReady v1.PodCondition
}{
{
spec: nil,
spec: v1.PodSpec{},
containerStatuses: nil,
podPhase: v1.PodRunning,
expectReady: getPodCondition(v1.ContainersReady, v1.ConditionFalse, UnknownContainerStatuses, ""),
},
{
spec: &v1.PodSpec{},
spec: v1.PodSpec{},
containerStatuses: []v1.ContainerStatus{},
podPhase: v1.PodRunning,
expectReady: getPodCondition(v1.ContainersReady, v1.ConditionTrue, "", ""),
},
{
spec: &v1.PodSpec{
spec: v1.PodSpec{
Containers: []v1.Container{
{Name: "1234"},
},
@ -63,7 +63,7 @@ func TestGenerateContainersReadyCondition(t *testing.T) {
expectReady: getPodCondition(v1.ContainersReady, v1.ConditionFalse, ContainersNotReady, "containers with unknown status: [1234]"),
},
{
spec: &v1.PodSpec{
spec: v1.PodSpec{
Containers: []v1.Container{
{Name: "1234"},
{Name: "5678"},
@ -77,7 +77,7 @@ func TestGenerateContainersReadyCondition(t *testing.T) {
expectReady: getPodCondition(v1.ContainersReady, v1.ConditionTrue, "", ""),
},
{
spec: &v1.PodSpec{
spec: v1.PodSpec{
Containers: []v1.Container{
{Name: "1234"},
{Name: "5678"},
@ -90,7 +90,7 @@ func TestGenerateContainersReadyCondition(t *testing.T) {
expectReady: getPodCondition(v1.ContainersReady, v1.ConditionFalse, ContainersNotReady, "containers with unknown status: [5678]"),
},
{
spec: &v1.PodSpec{
spec: v1.PodSpec{
Containers: []v1.Container{
{Name: "1234"},
{Name: "5678"},
@ -104,7 +104,7 @@ func TestGenerateContainersReadyCondition(t *testing.T) {
expectReady: getPodCondition(v1.ContainersReady, v1.ConditionFalse, ContainersNotReady, "containers with unready status: [5678]"),
},
{
spec: &v1.PodSpec{
spec: v1.PodSpec{
Containers: []v1.Container{
{Name: "1234"},
},
@ -116,7 +116,7 @@ func TestGenerateContainersReadyCondition(t *testing.T) {
expectReady: getPodCondition(v1.ContainersReady, v1.ConditionFalse, PodCompleted, ""),
},
{
spec: &v1.PodSpec{
spec: v1.PodSpec{
InitContainers: []v1.Container{
{Name: "restartable-init-1", RestartPolicy: &containerRestartPolicyAlways},
},
@ -131,7 +131,7 @@ func TestGenerateContainersReadyCondition(t *testing.T) {
expectReady: getPodCondition(v1.ContainersReady, v1.ConditionFalse, ContainersNotReady, "containers with unknown status: [restartable-init-1]"),
},
{
spec: &v1.PodSpec{
spec: v1.PodSpec{
InitContainers: []v1.Container{
{Name: "restartable-init-1", RestartPolicy: &containerRestartPolicyAlways},
{Name: "restartable-init-2", RestartPolicy: &containerRestartPolicyAlways},
@ -149,7 +149,7 @@ func TestGenerateContainersReadyCondition(t *testing.T) {
expectReady: getPodCondition(v1.ContainersReady, v1.ConditionTrue, "", ""),
},
{
spec: &v1.PodSpec{
spec: v1.PodSpec{
InitContainers: []v1.Container{
{Name: "restartable-init-1", RestartPolicy: &containerRestartPolicyAlways},
{Name: "restartable-init-2", RestartPolicy: &containerRestartPolicyAlways},
@ -166,7 +166,7 @@ func TestGenerateContainersReadyCondition(t *testing.T) {
expectReady: getPodCondition(v1.ContainersReady, v1.ConditionFalse, ContainersNotReady, "containers with unknown status: [restartable-init-2]"),
},
{
spec: &v1.PodSpec{
spec: v1.PodSpec{
InitContainers: []v1.Container{
{Name: "restartable-init-1", RestartPolicy: &containerRestartPolicyAlways},
{Name: "restartable-init-2", RestartPolicy: &containerRestartPolicyAlways},
@ -186,7 +186,8 @@ func TestGenerateContainersReadyCondition(t *testing.T) {
}
for i, test := range tests {
ready := GenerateContainersReadyCondition(test.spec, test.containerStatuses, test.podPhase)
pod := &v1.Pod{Spec: test.spec}
ready := GenerateContainersReadyCondition(pod, &v1.PodStatus{}, test.containerStatuses, test.podPhase)
if !reflect.DeepEqual(ready, test.expectReady) {
t.Errorf("On test case %v, expectReady:\n%+v\ngot\n%+v\n", i, test.expectReady, ready)
}
@ -195,28 +196,28 @@ func TestGenerateContainersReadyCondition(t *testing.T) {
func TestGeneratePodReadyCondition(t *testing.T) {
tests := []struct {
spec *v1.PodSpec
spec v1.PodSpec
conditions []v1.PodCondition
containerStatuses []v1.ContainerStatus
podPhase v1.PodPhase
expectReady v1.PodCondition
}{
{
spec: nil,
spec: v1.PodSpec{},
conditions: nil,
containerStatuses: nil,
podPhase: v1.PodRunning,
expectReady: getPodCondition(v1.PodReady, v1.ConditionFalse, UnknownContainerStatuses, ""),
},
{
spec: &v1.PodSpec{},
spec: v1.PodSpec{},
conditions: nil,
containerStatuses: []v1.ContainerStatus{},
podPhase: v1.PodRunning,
expectReady: getPodCondition(v1.PodReady, v1.ConditionTrue, "", ""),
},
{
spec: &v1.PodSpec{
spec: v1.PodSpec{
Containers: []v1.Container{
{Name: "1234"},
},
@ -227,7 +228,7 @@ func TestGeneratePodReadyCondition(t *testing.T) {
expectReady: getPodCondition(v1.PodReady, v1.ConditionFalse, ContainersNotReady, "containers with unknown status: [1234]"),
},
{
spec: &v1.PodSpec{
spec: v1.PodSpec{
Containers: []v1.Container{
{Name: "1234"},
{Name: "5678"},
@ -242,7 +243,7 @@ func TestGeneratePodReadyCondition(t *testing.T) {
expectReady: getPodCondition(v1.PodReady, v1.ConditionTrue, "", ""),
},
{
spec: &v1.PodSpec{
spec: v1.PodSpec{
Containers: []v1.Container{
{Name: "1234"},
{Name: "5678"},
@ -256,7 +257,7 @@ func TestGeneratePodReadyCondition(t *testing.T) {
expectReady: getPodCondition(v1.PodReady, v1.ConditionFalse, ContainersNotReady, "containers with unknown status: [5678]"),
},
{
spec: &v1.PodSpec{
spec: v1.PodSpec{
Containers: []v1.Container{
{Name: "1234"},
{Name: "5678"},
@ -271,7 +272,7 @@ func TestGeneratePodReadyCondition(t *testing.T) {
expectReady: getPodCondition(v1.PodReady, v1.ConditionFalse, ContainersNotReady, "containers with unready status: [5678]"),
},
{
spec: &v1.PodSpec{
spec: v1.PodSpec{
Containers: []v1.Container{
{Name: "1234"},
},
@ -284,7 +285,7 @@ func TestGeneratePodReadyCondition(t *testing.T) {
expectReady: getPodCondition(v1.PodReady, v1.ConditionFalse, PodCompleted, ""),
},
{
spec: &v1.PodSpec{
spec: v1.PodSpec{
ReadinessGates: []v1.PodReadinessGate{
{ConditionType: v1.PodConditionType("gate1")},
},
@ -295,7 +296,7 @@ func TestGeneratePodReadyCondition(t *testing.T) {
expectReady: getPodCondition(v1.PodReady, v1.ConditionFalse, ReadinessGatesNotReady, `corresponding condition of pod readiness gate "gate1" does not exist.`),
},
{
spec: &v1.PodSpec{
spec: v1.PodSpec{
ReadinessGates: []v1.PodReadinessGate{
{ConditionType: v1.PodConditionType("gate1")},
},
@ -308,7 +309,7 @@ func TestGeneratePodReadyCondition(t *testing.T) {
expectReady: getPodCondition(v1.PodReady, v1.ConditionFalse, ReadinessGatesNotReady, `the status of pod readiness gate "gate1" is not "True", but False`),
},
{
spec: &v1.PodSpec{
spec: v1.PodSpec{
ReadinessGates: []v1.PodReadinessGate{
{ConditionType: v1.PodConditionType("gate1")},
},
@ -321,7 +322,7 @@ func TestGeneratePodReadyCondition(t *testing.T) {
expectReady: getPodCondition(v1.PodReady, v1.ConditionTrue, "", ""),
},
{
spec: &v1.PodSpec{
spec: v1.PodSpec{
ReadinessGates: []v1.PodReadinessGate{
{ConditionType: v1.PodConditionType("gate1")},
{ConditionType: v1.PodConditionType("gate2")},
@ -335,7 +336,7 @@ func TestGeneratePodReadyCondition(t *testing.T) {
expectReady: getPodCondition(v1.PodReady, v1.ConditionFalse, ReadinessGatesNotReady, `corresponding condition of pod readiness gate "gate2" does not exist.`),
},
{
spec: &v1.PodSpec{
spec: v1.PodSpec{
ReadinessGates: []v1.PodReadinessGate{
{ConditionType: v1.PodConditionType("gate1")},
{ConditionType: v1.PodConditionType("gate2")},
@ -350,7 +351,7 @@ func TestGeneratePodReadyCondition(t *testing.T) {
expectReady: getPodCondition(v1.PodReady, v1.ConditionFalse, ReadinessGatesNotReady, `the status of pod readiness gate "gate2" is not "True", but False`),
},
{
spec: &v1.PodSpec{
spec: v1.PodSpec{
ReadinessGates: []v1.PodReadinessGate{
{ConditionType: v1.PodConditionType("gate1")},
{ConditionType: v1.PodConditionType("gate2")},
@ -365,7 +366,7 @@ func TestGeneratePodReadyCondition(t *testing.T) {
expectReady: getPodCondition(v1.PodReady, v1.ConditionTrue, "", ""),
},
{
spec: &v1.PodSpec{
spec: v1.PodSpec{
Containers: []v1.Container{
{Name: "1234"},
},
@ -383,7 +384,8 @@ func TestGeneratePodReadyCondition(t *testing.T) {
}
for i, test := range tests {
ready := GeneratePodReadyCondition(test.spec, test.conditions, test.containerStatuses, test.podPhase)
pod := &v1.Pod{Spec: test.spec}
ready := GeneratePodReadyCondition(pod, &v1.PodStatus{}, test.conditions, test.containerStatuses, test.podPhase)
if !reflect.DeepEqual(ready, test.expectReady) {
t.Errorf("On test case %v, expectReady:\n%+v\ngot\n%+v\n", i, test.expectReady, ready)
}
@ -543,7 +545,8 @@ func TestGeneratePodInitializedCondition(t *testing.T) {
for _, test := range tests {
test.expected.Type = v1.PodInitialized
condition := GeneratePodInitializedCondition(test.spec, test.containerStatuses, test.podPhase)
pod := &v1.Pod{Spec: *test.spec}
condition := GeneratePodInitializedCondition(pod, &v1.PodStatus{}, test.containerStatuses, test.podPhase)
assert.Equal(t, test.expected.Type, condition.Type)
assert.Equal(t, test.expected.Status, condition.Status)
assert.Equal(t, test.expected.Reason, condition.Reason)
@ -615,7 +618,7 @@ func TestGeneratePodReadyToStartContainersCondition(t *testing.T) {
} {
t.Run(desc, func(t *testing.T) {
test.expected.Type = v1.PodReadyToStartContainers
condition := GeneratePodReadyToStartContainersCondition(test.pod, test.status)
condition := GeneratePodReadyToStartContainersCondition(test.pod, &v1.PodStatus{}, test.status)
require.Equal(t, test.expected.Type, condition.Type)
require.Equal(t, test.expected.Status, condition.Status)
})

View File

@ -375,9 +375,10 @@ func (m *manager) SetContainerReadiness(podUID types.UID, containerID kubecontai
status.Conditions = append(status.Conditions, condition)
}
}
allContainerStatuses := append(status.InitContainerStatuses, status.ContainerStatuses...)
updateConditionFunc(v1.PodReady, GeneratePodReadyCondition(&pod.Spec, status.Conditions, allContainerStatuses, status.Phase))
updateConditionFunc(v1.ContainersReady, GenerateContainersReadyCondition(&pod.Spec, allContainerStatuses, status.Phase))
updateConditionFunc(v1.PodReady, GeneratePodReadyCondition(pod, &oldStatus.status, status.Conditions, allContainerStatuses, status.Phase))
updateConditionFunc(v1.ContainersReady, GenerateContainersReadyCondition(pod, &oldStatus.status, allContainerStatuses, status.Phase))
m.updateStatusInternal(pod, status, false, false)
}
@ -897,7 +898,7 @@ func (m *manager) syncPod(uid types.UID, status versionedPodStatus) {
return
}
mergedStatus := mergePodStatus(pod.Status, status.status, m.podDeletionSafety.PodCouldHaveRunningContainers(pod))
mergedStatus := mergePodStatus(pod, pod.Status, status.status, m.podDeletionSafety.PodCouldHaveRunningContainers(pod))
newPod, patchBytes, unchanged, err := statusutil.PatchPodStatus(context.TODO(), m.kubeClient, pod.Namespace, pod.Name, pod.UID, pod.Status, mergedStatus)
klog.V(3).InfoS("Patch status for pod", "pod", klog.KObj(pod), "podUID", uid, "patch", string(patchBytes))
@ -1075,7 +1076,7 @@ func normalizeStatus(pod *v1.Pod, status *v1.PodStatus) *v1.PodStatus {
// mergePodStatus merges oldPodStatus and newPodStatus to preserve where pod conditions
// not owned by kubelet and to ensure terminal phase transition only happens after all
// running containers have terminated. This method does not modify the old status.
func mergePodStatus(oldPodStatus, newPodStatus v1.PodStatus, couldHaveRunningContainers bool) v1.PodStatus {
func mergePodStatus(pod *v1.Pod, oldPodStatus, newPodStatus v1.PodStatus, couldHaveRunningContainers bool) v1.PodStatus {
podConditions := make([]v1.PodCondition, 0, len(oldPodStatus.Conditions)+len(newPodStatus.Conditions))
for _, c := range oldPodStatus.Conditions {
@ -1137,10 +1138,10 @@ func mergePodStatus(oldPodStatus, newPodStatus v1.PodStatus, couldHaveRunningCon
// See https://issues.k8s.io/108594 for more details.
if podutil.IsPodPhaseTerminal(newPodStatus.Phase) {
if podutil.IsPodReadyConditionTrue(newPodStatus) || podutil.IsContainersReadyConditionTrue(newPodStatus) {
containersReadyCondition := generateContainersReadyConditionForTerminalPhase(newPodStatus.Phase)
containersReadyCondition := generateContainersReadyConditionForTerminalPhase(pod, &oldPodStatus, newPodStatus.Phase)
podutil.UpdatePodCondition(&newPodStatus, &containersReadyCondition)
podReadyCondition := generatePodReadyConditionForTerminalPhase(newPodStatus.Phase)
podReadyCondition := generatePodReadyConditionForTerminalPhase(pod, &oldPodStatus, newPodStatus.Phase)
podutil.UpdatePodCondition(&newPodStatus, &podReadyCondition)
}
}
@ -1153,7 +1154,7 @@ func NeedToReconcilePodReadiness(pod *v1.Pod) bool {
if len(pod.Spec.ReadinessGates) == 0 {
return false
}
podReadyCondition := GeneratePodReadyCondition(&pod.Spec, pod.Status.Conditions, pod.Status.ContainerStatuses, pod.Status.Phase)
podReadyCondition := GeneratePodReadyCondition(pod, &pod.Status, pod.Status.Conditions, pod.Status.ContainerStatuses, pod.Status.Phase)
i, curCondition := podutil.GetPodConditionFromList(pod.Status.Conditions, v1.PodReady)
// Only reconcile if "Ready" condition is present and Status or Message is not expected
if i >= 0 && (curCondition.Status != podReadyCondition.Status || curCondition.Message != podReadyCondition.Message) {

View File

@ -2020,7 +2020,9 @@ func TestMergePodStatus(t *testing.T) {
for _, tc := range useCases {
t.Run(tc.desc, func(t *testing.T) {
output := mergePodStatus(tc.oldPodStatus(getPodStatus()), tc.newPodStatus(getPodStatus()), tc.hasRunningContainers)
oldPodStatus := tc.oldPodStatus(getPodStatus())
pod := &v1.Pod{Status: oldPodStatus}
output := mergePodStatus(pod, oldPodStatus, tc.newPodStatus(getPodStatus()), tc.hasRunningContainers)
if !conditionsEqual(output.Conditions, tc.expectPodStatus.Conditions) || !statusEqual(output, tc.expectPodStatus) {
t.Fatalf("unexpected output: %s", cmp.Diff(tc.expectPodStatus, output))
}

View File

@ -341,6 +341,30 @@ func WaitForPodObservedGeneration(ctx context.Context, c clientset.Interface, ns
}))
}
// WaitForPodConditionObservedGeneration waits for a pod condition to have the given observed generation.
func WaitForPodConditionObservedGeneration(ctx context.Context, c clientset.Interface, ns, podName string, conditionType v1.PodConditionType, expectedGeneration int64, timeout time.Duration) error {
return framework.Gomega().
Eventually(ctx, framework.RetryNotFound(framework.GetObject(c.CoreV1().Pods(ns).Get, podName, metav1.GetOptions{}))).
WithTimeout(timeout).
Should(framework.MakeMatcher(func(pod *v1.Pod) (func() string, error) {
for _, condition := range pod.Status.Conditions {
if condition.Type == conditionType {
if condition.ObservedGeneration == expectedGeneration {
return nil, nil
} else {
return func() string {
return fmt.Sprintf("expected condition %s generation to be %d, got %d instead:\n", conditionType, expectedGeneration, condition.ObservedGeneration)
}, nil
}
}
}
return func() string {
return fmt.Sprintf("could not find condition %s:\n", conditionType)
}, nil
}))
}
// Range determines how many items must exist and how many must match a certain
// condition. Values <= 0 are ignored.
// TODO (?): move to test/e2e/framework/range

View File

@ -655,6 +655,50 @@ var _ = SIGDescribe("Pods Extended (pod generation)", feature.PodObservedGenerat
gomega.Expect(gotPod.Generation).To(gomega.BeEquivalentTo(1))
gomega.Expect(gotPod.Status.ObservedGeneration).To(gomega.BeEquivalentTo(1))
})
ginkgo.It("pod observedGeneration field set in pod conditions", func(ctx context.Context) {
ginkgo.By("creating the pod")
name := "pod-generation-" + string(uuid.NewUUID())
pod := e2epod.NewAgnhostPod(f.Namespace.Name, name, nil, nil, nil)
// Set the pod image to something that doesn't exist to induce a pull error
// to start with.
agnImage := pod.Spec.Containers[0].Image
pod.Spec.Containers[0].Image = "some-image-that-doesnt-exist"
ginkgo.By("submitting the pod to kubernetes")
pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(ctx, pod, metav1.CreateOptions{})
framework.ExpectNoError(err)
ginkgo.DeferCleanup(func(ctx context.Context) error {
ginkgo.By("deleting the pod")
return podClient.Delete(ctx, pod.Name, metav1.DeleteOptions{})
})
expectedPodConditions := []v1.PodConditionType{
v1.PodReadyToStartContainers,
v1.PodInitialized,
v1.PodReady,
v1.ContainersReady,
v1.PodScheduled,
}
ginkgo.By("verifying the pod conditions have observedGeneration values")
expectedObservedGeneration := int64(1)
for _, condition := range expectedPodConditions {
framework.ExpectNoError(e2epod.WaitForPodConditionObservedGeneration(ctx, f.ClientSet, f.Namespace.Name, pod.Name, condition, expectedObservedGeneration, 30*time.Second))
}
ginkgo.By("updating pod to have a valid image")
podClient.Update(ctx, pod.Name, func(pod *v1.Pod) {
pod.Spec.Containers[0].Image = agnImage
})
expectedObservedGeneration++
ginkgo.By("verifying the pod conditions have updated observedGeneration values")
for _, condition := range expectedPodConditions {
framework.ExpectNoError(e2epod.WaitForPodConditionObservedGeneration(ctx, f.ClientSet, f.Namespace.Name, pod.Name, condition, expectedObservedGeneration, 30*time.Second))
}
})
})
})