mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-08-03 17:30:00 +00:00
Merge pull request #119168 from gjkim42/sidecar-allow-probes-and-lifecycle-hooks
Allow all probes and lifecycle for restartable init containers
This commit is contained in:
commit
d17f3ba2cf
@ -2859,6 +2859,45 @@ func validatePodResourceClaimSource(claimSource core.ClaimSource, fldPath *field
|
|||||||
return allErrs
|
return allErrs
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func validateLivenessProbe(probe *core.Probe, fldPath *field.Path) field.ErrorList {
|
||||||
|
allErrs := field.ErrorList{}
|
||||||
|
|
||||||
|
if probe == nil {
|
||||||
|
return allErrs
|
||||||
|
}
|
||||||
|
allErrs = append(allErrs, validateProbe(probe, fldPath)...)
|
||||||
|
if probe.SuccessThreshold != 1 {
|
||||||
|
allErrs = append(allErrs, field.Invalid(fldPath.Child("successThreshold"), probe.SuccessThreshold, "must be 1"))
|
||||||
|
}
|
||||||
|
return allErrs
|
||||||
|
}
|
||||||
|
|
||||||
|
func validateReadinessProbe(probe *core.Probe, fldPath *field.Path) field.ErrorList {
|
||||||
|
allErrs := field.ErrorList{}
|
||||||
|
|
||||||
|
if probe == nil {
|
||||||
|
return allErrs
|
||||||
|
}
|
||||||
|
allErrs = append(allErrs, validateProbe(probe, fldPath)...)
|
||||||
|
if probe.TerminationGracePeriodSeconds != nil {
|
||||||
|
allErrs = append(allErrs, field.Invalid(fldPath.Child("terminationGracePeriodSeconds"), probe.TerminationGracePeriodSeconds, "must not be set for readinessProbes"))
|
||||||
|
}
|
||||||
|
return allErrs
|
||||||
|
}
|
||||||
|
|
||||||
|
func validateStartupProbe(probe *core.Probe, fldPath *field.Path) field.ErrorList {
|
||||||
|
allErrs := field.ErrorList{}
|
||||||
|
|
||||||
|
if probe == nil {
|
||||||
|
return allErrs
|
||||||
|
}
|
||||||
|
allErrs = append(allErrs, validateProbe(probe, fldPath)...)
|
||||||
|
if probe.SuccessThreshold != 1 {
|
||||||
|
allErrs = append(allErrs, field.Invalid(fldPath.Child("successThreshold"), probe.SuccessThreshold, "must be 1"))
|
||||||
|
}
|
||||||
|
return allErrs
|
||||||
|
}
|
||||||
|
|
||||||
func validateProbe(probe *core.Probe, fldPath *field.Path) field.ErrorList {
|
func validateProbe(probe *core.Probe, fldPath *field.Path) field.ErrorList {
|
||||||
allErrs := field.ErrorList{}
|
allErrs := field.ErrorList{}
|
||||||
|
|
||||||
@ -3245,36 +3284,26 @@ func validateInitContainers(containers []core.Container, regularContainers []cor
|
|||||||
|
|
||||||
switch {
|
switch {
|
||||||
case restartAlways:
|
case restartAlways:
|
||||||
// TODO: Allow restartable init containers to have a lifecycle hook.
|
|
||||||
if ctr.Lifecycle != nil {
|
if ctr.Lifecycle != nil {
|
||||||
allErrs = append(allErrs, field.Forbidden(idxPath.Child("lifecycle"), "may not be set for init containers"))
|
allErrs = append(allErrs, validateLifecycle(ctr.Lifecycle, idxPath.Child("lifecycle"))...)
|
||||||
}
|
|
||||||
// TODO: Allow restartable init containers to have a liveness probe.
|
|
||||||
if ctr.LivenessProbe != nil {
|
|
||||||
allErrs = append(allErrs, field.Forbidden(idxPath.Child("livenessProbe"), "may not be set for init containers"))
|
|
||||||
}
|
|
||||||
// TODO: Allow restartable init containers to have a readiness probe.
|
|
||||||
if ctr.ReadinessProbe != nil {
|
|
||||||
allErrs = append(allErrs, field.Forbidden(idxPath.Child("readinessProbe"), "may not be set for init containers"))
|
|
||||||
}
|
|
||||||
allErrs = append(allErrs, validateProbe(ctr.StartupProbe, idxPath.Child("startupProbe"))...)
|
|
||||||
if ctr.StartupProbe != nil && ctr.StartupProbe.SuccessThreshold != 1 {
|
|
||||||
allErrs = append(allErrs, field.Invalid(idxPath.Child("startupProbe", "successThreshold"), ctr.StartupProbe.SuccessThreshold, "must be 1"))
|
|
||||||
}
|
}
|
||||||
|
allErrs = append(allErrs, validateLivenessProbe(ctr.LivenessProbe, idxPath.Child("livenessProbe"))...)
|
||||||
|
allErrs = append(allErrs, validateReadinessProbe(ctr.ReadinessProbe, idxPath.Child("readinessProbe"))...)
|
||||||
|
allErrs = append(allErrs, validateStartupProbe(ctr.StartupProbe, idxPath.Child("startupProbe"))...)
|
||||||
|
|
||||||
default:
|
default:
|
||||||
// These fields are disallowed for init containers.
|
// These fields are disallowed for init containers.
|
||||||
if ctr.Lifecycle != nil {
|
if ctr.Lifecycle != nil {
|
||||||
allErrs = append(allErrs, field.Forbidden(idxPath.Child("lifecycle"), "may not be set for init containers"))
|
allErrs = append(allErrs, field.Forbidden(idxPath.Child("lifecycle"), "may not be set for init containers without restartPolicy=Always"))
|
||||||
}
|
}
|
||||||
if ctr.LivenessProbe != nil {
|
if ctr.LivenessProbe != nil {
|
||||||
allErrs = append(allErrs, field.Forbidden(idxPath.Child("livenessProbe"), "may not be set for init containers"))
|
allErrs = append(allErrs, field.Forbidden(idxPath.Child("livenessProbe"), "may not be set for init containers without restartPolicy=Always"))
|
||||||
}
|
}
|
||||||
if ctr.ReadinessProbe != nil {
|
if ctr.ReadinessProbe != nil {
|
||||||
allErrs = append(allErrs, field.Forbidden(idxPath.Child("readinessProbe"), "may not be set for init containers"))
|
allErrs = append(allErrs, field.Forbidden(idxPath.Child("readinessProbe"), "may not be set for init containers without restartPolicy=Always"))
|
||||||
}
|
}
|
||||||
if ctr.StartupProbe != nil {
|
if ctr.StartupProbe != nil {
|
||||||
allErrs = append(allErrs, field.Forbidden(idxPath.Child("startupProbe"), "may not be set for init containers"))
|
allErrs = append(allErrs, field.Forbidden(idxPath.Child("startupProbe"), "may not be set for init containers without restartPolicy=Always"))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -3383,23 +3412,16 @@ func validateContainers(containers []core.Container, volumes map[string]core.Vol
|
|||||||
allNames.Insert(ctr.Name)
|
allNames.Insert(ctr.Name)
|
||||||
}
|
}
|
||||||
|
|
||||||
// These fields are only allowed for regular containers, so only check supported values here.
|
// These fields are allowed for regular containers and restartable init
|
||||||
// Init and ephemeral container validation will return field.Forbidden() for these paths.
|
// containers.
|
||||||
|
// Regular init container and ephemeral container validation will return
|
||||||
|
// field.Forbidden() for these paths.
|
||||||
if ctr.Lifecycle != nil {
|
if ctr.Lifecycle != nil {
|
||||||
allErrs = append(allErrs, validateLifecycle(ctr.Lifecycle, path.Child("lifecycle"))...)
|
allErrs = append(allErrs, validateLifecycle(ctr.Lifecycle, path.Child("lifecycle"))...)
|
||||||
}
|
}
|
||||||
allErrs = append(allErrs, validateProbe(ctr.LivenessProbe, path.Child("livenessProbe"))...)
|
allErrs = append(allErrs, validateLivenessProbe(ctr.LivenessProbe, path.Child("livenessProbe"))...)
|
||||||
if ctr.LivenessProbe != nil && ctr.LivenessProbe.SuccessThreshold != 1 {
|
allErrs = append(allErrs, validateReadinessProbe(ctr.ReadinessProbe, path.Child("readinessProbe"))...)
|
||||||
allErrs = append(allErrs, field.Invalid(path.Child("livenessProbe", "successThreshold"), ctr.LivenessProbe.SuccessThreshold, "must be 1"))
|
allErrs = append(allErrs, validateStartupProbe(ctr.StartupProbe, path.Child("startupProbe"))...)
|
||||||
}
|
|
||||||
allErrs = append(allErrs, validateProbe(ctr.ReadinessProbe, path.Child("readinessProbe"))...)
|
|
||||||
if ctr.ReadinessProbe != nil && ctr.ReadinessProbe.TerminationGracePeriodSeconds != nil {
|
|
||||||
allErrs = append(allErrs, field.Invalid(path.Child("readinessProbe", "terminationGracePeriodSeconds"), ctr.ReadinessProbe.TerminationGracePeriodSeconds, "must not be set for readinessProbes"))
|
|
||||||
}
|
|
||||||
allErrs = append(allErrs, validateProbe(ctr.StartupProbe, path.Child("startupProbe"))...)
|
|
||||||
if ctr.StartupProbe != nil && ctr.StartupProbe.SuccessThreshold != 1 {
|
|
||||||
allErrs = append(allErrs, field.Invalid(path.Child("startupProbe", "successThreshold"), ctr.StartupProbe.SuccessThreshold, "must be 1"))
|
|
||||||
}
|
|
||||||
|
|
||||||
// These fields are disallowed for regular containers
|
// These fields are disallowed for regular containers
|
||||||
if ctr.RestartPolicy != nil {
|
if ctr.RestartPolicy != nil {
|
||||||
|
@ -8162,14 +8162,43 @@ func TestValidateInitContainers(t *testing.T) {
|
|||||||
ImagePullPolicy: "IfNotPresent",
|
ImagePullPolicy: "IfNotPresent",
|
||||||
TerminationMessagePolicy: "File",
|
TerminationMessagePolicy: "File",
|
||||||
}, {
|
}, {
|
||||||
Name: "container-3-restart-always-with-startup-probe",
|
Name: "container-3-restart-always-with-lifecycle-hook-and-probes",
|
||||||
Image: "image",
|
Image: "image",
|
||||||
ImagePullPolicy: "IfNotPresent",
|
ImagePullPolicy: "IfNotPresent",
|
||||||
TerminationMessagePolicy: "File",
|
TerminationMessagePolicy: "File",
|
||||||
RestartPolicy: &containerRestartPolicyAlways,
|
RestartPolicy: &containerRestartPolicyAlways,
|
||||||
|
Lifecycle: &core.Lifecycle{
|
||||||
|
PostStart: &core.LifecycleHandler{
|
||||||
|
Exec: &core.ExecAction{
|
||||||
|
Command: []string{"echo", "post start"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
PreStop: &core.LifecycleHandler{
|
||||||
|
Exec: &core.ExecAction{
|
||||||
|
Command: []string{"echo", "pre stop"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
LivenessProbe: &core.Probe{
|
||||||
|
ProbeHandler: core.ProbeHandler{
|
||||||
|
TCPSocket: &core.TCPSocketAction{
|
||||||
|
Port: intstr.FromInt32(80),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
SuccessThreshold: 1,
|
||||||
|
},
|
||||||
|
ReadinessProbe: &core.Probe{
|
||||||
|
ProbeHandler: core.ProbeHandler{
|
||||||
|
TCPSocket: &core.TCPSocketAction{
|
||||||
|
Port: intstr.FromInt32(80),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
StartupProbe: &core.Probe{
|
StartupProbe: &core.Probe{
|
||||||
ProbeHandler: core.ProbeHandler{
|
ProbeHandler: core.ProbeHandler{
|
||||||
TCPSocket: &core.TCPSocketAction{Port: intstr.FromInt(80)},
|
TCPSocket: &core.TCPSocketAction{
|
||||||
|
Port: intstr.FromInt(80),
|
||||||
|
},
|
||||||
},
|
},
|
||||||
SuccessThreshold: 1,
|
SuccessThreshold: 1,
|
||||||
},
|
},
|
||||||
@ -8390,6 +8419,165 @@ func TestValidateInitContainers(t *testing.T) {
|
|||||||
},
|
},
|
||||||
}},
|
}},
|
||||||
field.ErrorList{{Type: field.ErrorTypeInvalid, Field: "initContainers[0].startupProbe.successThreshold", BadValue: int32(2)}},
|
field.ErrorList{{Type: field.ErrorTypeInvalid, Field: "initContainers[0].startupProbe.successThreshold", BadValue: int32(2)}},
|
||||||
|
}, {
|
||||||
|
"invalid readiness probe, terminationGracePeriodSeconds set.",
|
||||||
|
line(),
|
||||||
|
[]core.Container{{
|
||||||
|
Name: "life-123",
|
||||||
|
Image: "image",
|
||||||
|
ImagePullPolicy: "IfNotPresent",
|
||||||
|
TerminationMessagePolicy: "File",
|
||||||
|
RestartPolicy: &containerRestartPolicyAlways,
|
||||||
|
ReadinessProbe: &core.Probe{
|
||||||
|
ProbeHandler: core.ProbeHandler{
|
||||||
|
TCPSocket: &core.TCPSocketAction{
|
||||||
|
Port: intstr.FromInt32(80),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
TerminationGracePeriodSeconds: utilpointer.Int64(10),
|
||||||
|
},
|
||||||
|
}},
|
||||||
|
field.ErrorList{{Type: field.ErrorTypeInvalid, Field: "initContainers[0].readinessProbe.terminationGracePeriodSeconds", BadValue: utilpointer.Int64(10)}},
|
||||||
|
}, {
|
||||||
|
"invalid liveness probe, successThreshold != 1",
|
||||||
|
line(),
|
||||||
|
[]core.Container{{
|
||||||
|
Name: "live-123",
|
||||||
|
Image: "image",
|
||||||
|
ImagePullPolicy: "IfNotPresent",
|
||||||
|
TerminationMessagePolicy: "File",
|
||||||
|
RestartPolicy: &containerRestartPolicyAlways,
|
||||||
|
LivenessProbe: &core.Probe{
|
||||||
|
ProbeHandler: core.ProbeHandler{
|
||||||
|
TCPSocket: &core.TCPSocketAction{
|
||||||
|
Port: intstr.FromInt32(80),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
SuccessThreshold: 2,
|
||||||
|
},
|
||||||
|
}},
|
||||||
|
field.ErrorList{{Type: field.ErrorTypeInvalid, Field: "initContainers[0].livenessProbe.successThreshold", BadValue: int32(2)}},
|
||||||
|
}, {
|
||||||
|
"invalid lifecycle, no exec command.",
|
||||||
|
line(),
|
||||||
|
[]core.Container{{
|
||||||
|
Name: "life-123",
|
||||||
|
Image: "image",
|
||||||
|
ImagePullPolicy: "IfNotPresent",
|
||||||
|
TerminationMessagePolicy: "File",
|
||||||
|
RestartPolicy: &containerRestartPolicyAlways,
|
||||||
|
Lifecycle: &core.Lifecycle{
|
||||||
|
PreStop: &core.LifecycleHandler{
|
||||||
|
Exec: &core.ExecAction{},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}},
|
||||||
|
field.ErrorList{{Type: field.ErrorTypeRequired, Field: "initContainers[0].lifecycle.preStop.exec.command", BadValue: ""}},
|
||||||
|
}, {
|
||||||
|
"invalid lifecycle, no http path.",
|
||||||
|
line(),
|
||||||
|
[]core.Container{{
|
||||||
|
Name: "life-123",
|
||||||
|
Image: "image",
|
||||||
|
ImagePullPolicy: "IfNotPresent",
|
||||||
|
TerminationMessagePolicy: "File",
|
||||||
|
RestartPolicy: &containerRestartPolicyAlways,
|
||||||
|
Lifecycle: &core.Lifecycle{
|
||||||
|
PreStop: &core.LifecycleHandler{
|
||||||
|
HTTPGet: &core.HTTPGetAction{
|
||||||
|
Port: intstr.FromInt32(80),
|
||||||
|
Scheme: "HTTP",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}},
|
||||||
|
field.ErrorList{{Type: field.ErrorTypeRequired, Field: "initContainers[0].lifecycle.preStop.httpGet.path", BadValue: ""}},
|
||||||
|
}, {
|
||||||
|
"invalid lifecycle, no http port.",
|
||||||
|
line(),
|
||||||
|
[]core.Container{{
|
||||||
|
Name: "life-123",
|
||||||
|
Image: "image",
|
||||||
|
ImagePullPolicy: "IfNotPresent",
|
||||||
|
TerminationMessagePolicy: "File",
|
||||||
|
RestartPolicy: &containerRestartPolicyAlways,
|
||||||
|
Lifecycle: &core.Lifecycle{
|
||||||
|
PreStop: &core.LifecycleHandler{
|
||||||
|
HTTPGet: &core.HTTPGetAction{
|
||||||
|
Path: "/",
|
||||||
|
Scheme: "HTTP",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}},
|
||||||
|
field.ErrorList{{Type: field.ErrorTypeInvalid, Field: "initContainers[0].lifecycle.preStop.httpGet.port", BadValue: 0}},
|
||||||
|
}, {
|
||||||
|
"invalid lifecycle, no http scheme.",
|
||||||
|
line(),
|
||||||
|
[]core.Container{{
|
||||||
|
Name: "life-123",
|
||||||
|
Image: "image",
|
||||||
|
ImagePullPolicy: "IfNotPresent",
|
||||||
|
TerminationMessagePolicy: "File",
|
||||||
|
RestartPolicy: &containerRestartPolicyAlways,
|
||||||
|
Lifecycle: &core.Lifecycle{
|
||||||
|
PreStop: &core.LifecycleHandler{
|
||||||
|
HTTPGet: &core.HTTPGetAction{
|
||||||
|
Path: "/",
|
||||||
|
Port: intstr.FromInt32(80),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}},
|
||||||
|
field.ErrorList{{Type: field.ErrorTypeNotSupported, Field: "initContainers[0].lifecycle.preStop.httpGet.scheme", BadValue: core.URIScheme("")}},
|
||||||
|
}, {
|
||||||
|
"invalid lifecycle, no tcp socket port.",
|
||||||
|
line(),
|
||||||
|
[]core.Container{{
|
||||||
|
Name: "life-123",
|
||||||
|
Image: "image",
|
||||||
|
ImagePullPolicy: "IfNotPresent",
|
||||||
|
TerminationMessagePolicy: "File",
|
||||||
|
RestartPolicy: &containerRestartPolicyAlways,
|
||||||
|
Lifecycle: &core.Lifecycle{
|
||||||
|
PreStop: &core.LifecycleHandler{
|
||||||
|
TCPSocket: &core.TCPSocketAction{},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}},
|
||||||
|
field.ErrorList{{Type: field.ErrorTypeInvalid, Field: "initContainers[0].lifecycle.preStop.tcpSocket.port", BadValue: 0}},
|
||||||
|
}, {
|
||||||
|
"invalid lifecycle, zero tcp socket port.",
|
||||||
|
line(),
|
||||||
|
[]core.Container{{
|
||||||
|
Name: "life-123",
|
||||||
|
Image: "image",
|
||||||
|
ImagePullPolicy: "IfNotPresent",
|
||||||
|
TerminationMessagePolicy: "File",
|
||||||
|
RestartPolicy: &containerRestartPolicyAlways,
|
||||||
|
Lifecycle: &core.Lifecycle{
|
||||||
|
PreStop: &core.LifecycleHandler{
|
||||||
|
TCPSocket: &core.TCPSocketAction{
|
||||||
|
Port: intstr.FromInt32(0),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}},
|
||||||
|
field.ErrorList{{Type: field.ErrorTypeInvalid, Field: "initContainers[0].lifecycle.preStop.tcpSocket.port", BadValue: 0}},
|
||||||
|
}, {
|
||||||
|
"invalid lifecycle, no action.",
|
||||||
|
line(),
|
||||||
|
[]core.Container{{
|
||||||
|
Name: "life-123",
|
||||||
|
Image: "image",
|
||||||
|
ImagePullPolicy: "IfNotPresent",
|
||||||
|
TerminationMessagePolicy: "File",
|
||||||
|
RestartPolicy: &containerRestartPolicyAlways,
|
||||||
|
Lifecycle: &core.Lifecycle{
|
||||||
|
PreStop: &core.LifecycleHandler{},
|
||||||
|
},
|
||||||
|
}},
|
||||||
|
field.ErrorList{{Type: field.ErrorTypeRequired, Field: "initContainers[0].lifecycle.preStop", BadValue: ""}},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
for _, tc := range errorCases {
|
for _, tc := range errorCases {
|
||||||
|
@ -1738,9 +1738,10 @@ func (kl *Kubelet) generateAPIPodStatus(pod *v1.Pod, podStatus *kubecontainer.Po
|
|||||||
if utilfeature.DefaultFeatureGate.Enabled(features.PodReadyToStartContainersCondition) {
|
if utilfeature.DefaultFeatureGate.Enabled(features.PodReadyToStartContainersCondition) {
|
||||||
s.Conditions = append(s.Conditions, status.GeneratePodReadyToStartContainersCondition(pod, podStatus))
|
s.Conditions = append(s.Conditions, status.GeneratePodReadyToStartContainersCondition(pod, podStatus))
|
||||||
}
|
}
|
||||||
s.Conditions = append(s.Conditions, status.GeneratePodInitializedCondition(&pod.Spec, append(s.InitContainerStatuses, s.ContainerStatuses...), s.Phase))
|
allContainerStatuses := append(s.InitContainerStatuses, s.ContainerStatuses...)
|
||||||
s.Conditions = append(s.Conditions, status.GeneratePodReadyCondition(&pod.Spec, s.Conditions, s.ContainerStatuses, s.Phase))
|
s.Conditions = append(s.Conditions, status.GeneratePodInitializedCondition(&pod.Spec, allContainerStatuses, s.Phase))
|
||||||
s.Conditions = append(s.Conditions, status.GenerateContainersReadyCondition(&pod.Spec, s.ContainerStatuses, s.Phase))
|
s.Conditions = append(s.Conditions, status.GeneratePodReadyCondition(&pod.Spec, s.Conditions, allContainerStatuses, s.Phase))
|
||||||
|
s.Conditions = append(s.Conditions, status.GenerateContainersReadyCondition(&pod.Spec, allContainerStatuses, s.Phase))
|
||||||
s.Conditions = append(s.Conditions, v1.PodCondition{
|
s.Conditions = append(s.Conditions, v1.PodCondition{
|
||||||
Type: v1.PodScheduled,
|
Type: v1.PodScheduled,
|
||||||
Status: v1.ConditionTrue,
|
Status: v1.ConditionTrue,
|
||||||
|
@ -877,7 +877,7 @@ func hasAnyRegularContainerCreated(pod *v1.Pod, podStatus *kubecontainer.PodStat
|
|||||||
// The actions include:
|
// The actions include:
|
||||||
// - Start the first init container that has not been started.
|
// - Start the first init container that has not been started.
|
||||||
// - Restart all restartable init containers that have started but are not running.
|
// - Restart all restartable init containers that have started but are not running.
|
||||||
// - Kill the restartable init containers that have failed the startup probe.
|
// - Kill the restartable init containers that are not alive or started.
|
||||||
func (m *kubeGenericRuntimeManager) computeInitContainerActions(pod *v1.Pod, podStatus *kubecontainer.PodStatus, changes *podActions) bool {
|
func (m *kubeGenericRuntimeManager) computeInitContainerActions(pod *v1.Pod, podStatus *kubecontainer.PodStatus, changes *podActions) bool {
|
||||||
if len(pod.Spec.InitContainers) == 0 {
|
if len(pod.Spec.InitContainers) == 0 {
|
||||||
return true
|
return true
|
||||||
@ -960,6 +960,27 @@ func (m *kubeGenericRuntimeManager) computeInitContainerActions(pod *v1.Pod, pod
|
|||||||
// this init container is initialized for the first time, start the next one
|
// this init container is initialized for the first time, start the next one
|
||||||
changes.InitContainersToStart = append(changes.InitContainersToStart, i+1)
|
changes.InitContainersToStart = append(changes.InitContainersToStart, i+1)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// A restartable init container does not have to take into account its
|
||||||
|
// liveness probe when it determines to start the next init container.
|
||||||
|
if container.LivenessProbe != nil {
|
||||||
|
liveness, found := m.livenessManager.Get(status.ID)
|
||||||
|
if !found {
|
||||||
|
// If the liveness probe has not been run, wait for it.
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if liveness == proberesults.Failure {
|
||||||
|
// If the restartable init container failed the liveness probe,
|
||||||
|
// restart it.
|
||||||
|
changes.ContainersToKill[status.ID] = containerToKillInfo{
|
||||||
|
name: container.Name,
|
||||||
|
container: container,
|
||||||
|
message: fmt.Sprintf("Init container %s failed liveness probe", container.Name),
|
||||||
|
reason: reasonLivenessProbe,
|
||||||
|
}
|
||||||
|
changes.InitContainersToStart = append(changes.InitContainersToStart, i)
|
||||||
|
}
|
||||||
|
}
|
||||||
} else { // init container
|
} else { // init container
|
||||||
// nothing do to but wait for it to finish
|
// nothing do to but wait for it to finish
|
||||||
break
|
break
|
||||||
|
@ -341,7 +341,7 @@ func TestToKubeContainerStatusWithResources(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestLifeCycleHook(t *testing.T) {
|
func testLifeCycleHook(t *testing.T, testPod *v1.Pod, testContainer *v1.Container) {
|
||||||
|
|
||||||
// Setup
|
// Setup
|
||||||
fakeRuntime, _, m, _ := createTestRuntimeManager()
|
fakeRuntime, _, m, _ := createTestRuntimeManager()
|
||||||
@ -352,23 +352,6 @@ func TestLifeCycleHook(t *testing.T) {
|
|||||||
ID: "foo",
|
ID: "foo",
|
||||||
}
|
}
|
||||||
|
|
||||||
testPod := &v1.Pod{
|
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
|
||||||
Name: "bar",
|
|
||||||
Namespace: "default",
|
|
||||||
},
|
|
||||||
Spec: v1.PodSpec{
|
|
||||||
Containers: []v1.Container{
|
|
||||||
{
|
|
||||||
Name: "foo",
|
|
||||||
Image: "busybox",
|
|
||||||
ImagePullPolicy: v1.PullIfNotPresent,
|
|
||||||
Command: []string{"testCommand"},
|
|
||||||
WorkingDir: "testWorkingDir",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
cmdPostStart := &v1.Lifecycle{
|
cmdPostStart := &v1.Lifecycle{
|
||||||
PostStart: &v1.LifecycleHandler{
|
PostStart: &v1.LifecycleHandler{
|
||||||
Exec: &v1.ExecAction{
|
Exec: &v1.ExecAction{
|
||||||
@ -418,7 +401,7 @@ func TestLifeCycleHook(t *testing.T) {
|
|||||||
// Configured and works as expected
|
// Configured and works as expected
|
||||||
t.Run("PreStop-CMDExec", func(t *testing.T) {
|
t.Run("PreStop-CMDExec", func(t *testing.T) {
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
testPod.Spec.Containers[0].Lifecycle = cmdLifeCycle
|
testContainer.Lifecycle = cmdLifeCycle
|
||||||
m.killContainer(ctx, testPod, cID, "foo", "testKill", "", &gracePeriod)
|
m.killContainer(ctx, testPod, cID, "foo", "testKill", "", &gracePeriod)
|
||||||
if fakeRunner.Cmd[0] != cmdLifeCycle.PreStop.Exec.Command[0] {
|
if fakeRunner.Cmd[0] != cmdLifeCycle.PreStop.Exec.Command[0] {
|
||||||
t.Errorf("CMD Prestop hook was not invoked")
|
t.Errorf("CMD Prestop hook was not invoked")
|
||||||
@ -432,7 +415,7 @@ func TestLifeCycleHook(t *testing.T) {
|
|||||||
defer func() { fakeHTTP.req = nil }()
|
defer func() { fakeHTTP.req = nil }()
|
||||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ConsistentHTTPGetHandlers, false)()
|
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ConsistentHTTPGetHandlers, false)()
|
||||||
httpLifeCycle.PreStop.HTTPGet.Port = intstr.IntOrString{}
|
httpLifeCycle.PreStop.HTTPGet.Port = intstr.IntOrString{}
|
||||||
testPod.Spec.Containers[0].Lifecycle = httpLifeCycle
|
testContainer.Lifecycle = httpLifeCycle
|
||||||
m.killContainer(ctx, testPod, cID, "foo", "testKill", "", &gracePeriod)
|
m.killContainer(ctx, testPod, cID, "foo", "testKill", "", &gracePeriod)
|
||||||
|
|
||||||
if fakeHTTP.req == nil || !strings.Contains(fakeHTTP.req.URL.String(), httpLifeCycle.PreStop.HTTPGet.Host) {
|
if fakeHTTP.req == nil || !strings.Contains(fakeHTTP.req.URL.String(), httpLifeCycle.PreStop.HTTPGet.Host) {
|
||||||
@ -443,7 +426,7 @@ func TestLifeCycleHook(t *testing.T) {
|
|||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
defer func() { fakeHTTP.req = nil }()
|
defer func() { fakeHTTP.req = nil }()
|
||||||
httpLifeCycle.PreStop.HTTPGet.Port = intstr.FromInt32(80)
|
httpLifeCycle.PreStop.HTTPGet.Port = intstr.FromInt32(80)
|
||||||
testPod.Spec.Containers[0].Lifecycle = httpLifeCycle
|
testContainer.Lifecycle = httpLifeCycle
|
||||||
m.killContainer(ctx, testPod, cID, "foo", "testKill", "", &gracePeriod)
|
m.killContainer(ctx, testPod, cID, "foo", "testKill", "", &gracePeriod)
|
||||||
|
|
||||||
if fakeHTTP.req == nil || !strings.Contains(fakeHTTP.req.URL.String(), httpLifeCycle.PreStop.HTTPGet.Host) {
|
if fakeHTTP.req == nil || !strings.Contains(fakeHTTP.req.URL.String(), httpLifeCycle.PreStop.HTTPGet.Host) {
|
||||||
@ -473,8 +456,7 @@ func TestLifeCycleHook(t *testing.T) {
|
|||||||
// Fake all the things you need before trying to create a container
|
// Fake all the things you need before trying to create a container
|
||||||
fakeSandBox, _ := makeAndSetFakePod(t, m, fakeRuntime, testPod)
|
fakeSandBox, _ := makeAndSetFakePod(t, m, fakeRuntime, testPod)
|
||||||
fakeSandBoxConfig, _ := m.generatePodSandboxConfig(testPod, 0)
|
fakeSandBoxConfig, _ := m.generatePodSandboxConfig(testPod, 0)
|
||||||
testPod.Spec.Containers[0].Lifecycle = cmdPostStart
|
testContainer.Lifecycle = cmdPostStart
|
||||||
testContainer := &testPod.Spec.Containers[0]
|
|
||||||
fakePodStatus := &kubecontainer.PodStatus{
|
fakePodStatus := &kubecontainer.PodStatus{
|
||||||
ContainerStatuses: []*kubecontainer.Status{
|
ContainerStatuses: []*kubecontainer.Status{
|
||||||
{
|
{
|
||||||
@ -500,6 +482,51 @@ func TestLifeCycleHook(t *testing.T) {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestLifeCycleHook(t *testing.T) {
|
||||||
|
testPod := &v1.Pod{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Name: "bar",
|
||||||
|
Namespace: "default",
|
||||||
|
},
|
||||||
|
Spec: v1.PodSpec{
|
||||||
|
Containers: []v1.Container{
|
||||||
|
{
|
||||||
|
Name: "foo",
|
||||||
|
Image: "busybox",
|
||||||
|
ImagePullPolicy: v1.PullIfNotPresent,
|
||||||
|
Command: []string{"testCommand"},
|
||||||
|
WorkingDir: "testWorkingDir",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
testLifeCycleHook(t, testPod, &testPod.Spec.Containers[0])
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestLifeCycleHookForRestartableInitContainer(t *testing.T) {
|
||||||
|
testPod := &v1.Pod{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Name: "bar",
|
||||||
|
Namespace: "default",
|
||||||
|
},
|
||||||
|
Spec: v1.PodSpec{
|
||||||
|
InitContainers: []v1.Container{
|
||||||
|
{
|
||||||
|
Name: "foo",
|
||||||
|
Image: "busybox",
|
||||||
|
ImagePullPolicy: v1.PullIfNotPresent,
|
||||||
|
Command: []string{"testCommand"},
|
||||||
|
WorkingDir: "testWorkingDir",
|
||||||
|
RestartPolicy: &containerRestartPolicyAlways,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
testLifeCycleHook(t, testPod, &testPod.Spec.InitContainers[0])
|
||||||
|
}
|
||||||
|
|
||||||
func TestStartSpec(t *testing.T) {
|
func TestStartSpec(t *testing.T) {
|
||||||
podStatus := &kubecontainer.PodStatus{
|
podStatus := &kubecontainer.PodStatus{
|
||||||
ContainerStatuses: []*kubecontainer.Status{
|
ContainerStatuses: []*kubecontainer.Status{
|
||||||
|
@ -1518,6 +1518,62 @@ func TestComputePodActionsWithRestartableInitContainers(t *testing.T) {
|
|||||||
ContainersToKill: getKillMapWithInitContainers(basePod, baseStatus, []int{}),
|
ContainersToKill: getKillMapWithInitContainers(basePod, baseStatus, []int{}),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
"livenessProbe has not been run; start the nothing": {
|
||||||
|
mutatePodFn: func(pod *v1.Pod) { pod.Spec.RestartPolicy = v1.RestartPolicyAlways },
|
||||||
|
mutateStatusFn: func(pod *v1.Pod, status *kubecontainer.PodStatus) {
|
||||||
|
m.livenessManager.Remove(status.ContainerStatuses[1].ID)
|
||||||
|
status.ContainerStatuses = status.ContainerStatuses[:2]
|
||||||
|
},
|
||||||
|
actions: podActions{
|
||||||
|
SandboxID: baseStatus.SandboxStatuses[0].Id,
|
||||||
|
InitContainersToStart: []int{2},
|
||||||
|
ContainersToStart: []int{},
|
||||||
|
ContainersToKill: getKillMapWithInitContainers(basePod, baseStatus, []int{}),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
"livenessProbe in progress; start the next": {
|
||||||
|
mutatePodFn: func(pod *v1.Pod) { pod.Spec.RestartPolicy = v1.RestartPolicyAlways },
|
||||||
|
mutateStatusFn: func(pod *v1.Pod, status *kubecontainer.PodStatus) {
|
||||||
|
m.livenessManager.Set(status.ContainerStatuses[1].ID, proberesults.Unknown, basePod)
|
||||||
|
status.ContainerStatuses = status.ContainerStatuses[:2]
|
||||||
|
},
|
||||||
|
actions: podActions{
|
||||||
|
SandboxID: baseStatus.SandboxStatuses[0].Id,
|
||||||
|
InitContainersToStart: []int{2},
|
||||||
|
ContainersToStart: []int{},
|
||||||
|
ContainersToKill: getKillMapWithInitContainers(basePod, baseStatus, []int{}),
|
||||||
|
},
|
||||||
|
resetStatusFn: func(status *kubecontainer.PodStatus) {
|
||||||
|
m.livenessManager.Remove(status.ContainerStatuses[1].ID)
|
||||||
|
},
|
||||||
|
},
|
||||||
|
"livenessProbe has completed; start the next": {
|
||||||
|
mutatePodFn: func(pod *v1.Pod) { pod.Spec.RestartPolicy = v1.RestartPolicyAlways },
|
||||||
|
mutateStatusFn: func(pod *v1.Pod, status *kubecontainer.PodStatus) {
|
||||||
|
status.ContainerStatuses = status.ContainerStatuses[:2]
|
||||||
|
},
|
||||||
|
actions: podActions{
|
||||||
|
SandboxID: baseStatus.SandboxStatuses[0].Id,
|
||||||
|
InitContainersToStart: []int{2},
|
||||||
|
ContainersToStart: []int{},
|
||||||
|
ContainersToKill: getKillMapWithInitContainers(basePod, baseStatus, []int{}),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
"kill and recreate the restartable init container if the liveness check has failed": {
|
||||||
|
mutatePodFn: func(pod *v1.Pod) { pod.Spec.RestartPolicy = v1.RestartPolicyAlways },
|
||||||
|
mutateStatusFn: func(pod *v1.Pod, status *kubecontainer.PodStatus) {
|
||||||
|
m.livenessManager.Set(status.ContainerStatuses[2].ID, proberesults.Failure, basePod)
|
||||||
|
},
|
||||||
|
actions: podActions{
|
||||||
|
SandboxID: baseStatus.SandboxStatuses[0].Id,
|
||||||
|
InitContainersToStart: []int{2},
|
||||||
|
ContainersToKill: getKillMapWithInitContainers(basePod, baseStatus, []int{2}),
|
||||||
|
ContainersToStart: []int{0, 1, 2},
|
||||||
|
},
|
||||||
|
resetStatusFn: func(status *kubecontainer.PodStatus) {
|
||||||
|
m.livenessManager.Remove(status.ContainerStatuses[2].ID)
|
||||||
|
},
|
||||||
|
},
|
||||||
"startupProbe has not been run; do nothing": {
|
"startupProbe has not been run; do nothing": {
|
||||||
mutatePodFn: func(pod *v1.Pod) { pod.Spec.RestartPolicy = v1.RestartPolicyAlways },
|
mutatePodFn: func(pod *v1.Pod) { pod.Spec.RestartPolicy = v1.RestartPolicyAlways },
|
||||||
mutateStatusFn: func(pod *v1.Pod, status *kubecontainer.PodStatus) {
|
mutateStatusFn: func(pod *v1.Pod, status *kubecontainer.PodStatus) {
|
||||||
@ -1740,7 +1796,9 @@ func TestComputePodActionsWithRestartableInitContainers(t *testing.T) {
|
|||||||
},
|
},
|
||||||
} {
|
} {
|
||||||
pod, status := makeBasePodAndStatusWithRestartableInitContainers()
|
pod, status := makeBasePodAndStatusWithRestartableInitContainers()
|
||||||
|
m.livenessManager.Set(status.ContainerStatuses[1].ID, proberesults.Success, basePod)
|
||||||
m.startupManager.Set(status.ContainerStatuses[1].ID, proberesults.Success, basePod)
|
m.startupManager.Set(status.ContainerStatuses[1].ID, proberesults.Success, basePod)
|
||||||
|
m.livenessManager.Set(status.ContainerStatuses[2].ID, proberesults.Success, basePod)
|
||||||
m.startupManager.Set(status.ContainerStatuses[2].ID, proberesults.Success, basePod)
|
m.startupManager.Set(status.ContainerStatuses[2].ID, proberesults.Success, basePod)
|
||||||
if test.mutatePodFn != nil {
|
if test.mutatePodFn != nil {
|
||||||
test.mutatePodFn(pod)
|
test.mutatePodFn(pod)
|
||||||
@ -1769,12 +1827,14 @@ func makeBasePodAndStatusWithRestartableInitContainers() (*v1.Pod, *kubecontaine
|
|||||||
Name: "restartable-init-2",
|
Name: "restartable-init-2",
|
||||||
Image: "bar-image",
|
Image: "bar-image",
|
||||||
RestartPolicy: &containerRestartPolicyAlways,
|
RestartPolicy: &containerRestartPolicyAlways,
|
||||||
|
LivenessProbe: &v1.Probe{},
|
||||||
StartupProbe: &v1.Probe{},
|
StartupProbe: &v1.Probe{},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Name: "restartable-init-3",
|
Name: "restartable-init-3",
|
||||||
Image: "bar-image",
|
Image: "bar-image",
|
||||||
RestartPolicy: &containerRestartPolicyAlways,
|
RestartPolicy: &containerRestartPolicyAlways,
|
||||||
|
LivenessProbe: &v1.Probe{},
|
||||||
StartupProbe: &v1.Probe{},
|
StartupProbe: &v1.Probe{},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
@ -152,8 +152,12 @@ func TestAddRemovePodsWithRestartableInitContainer(t *testing.T) {
|
|||||||
enableSidecarContainers: false,
|
enableSidecarContainers: false,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
desc: "pod with sidecar (sidecar containers feature enabled)",
|
desc: "pod with sidecar (sidecar containers feature enabled)",
|
||||||
probePaths: []probeKey{{"restartable_init_container_pod", "restartable-init", readiness}},
|
probePaths: []probeKey{
|
||||||
|
{"restartable_init_container_pod", "restartable-init", liveness},
|
||||||
|
{"restartable_init_container_pod", "restartable-init", readiness},
|
||||||
|
{"restartable_init_container_pod", "restartable-init", startup},
|
||||||
|
},
|
||||||
enableSidecarContainers: true,
|
enableSidecarContainers: true,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
@ -179,7 +183,9 @@ func TestAddRemovePodsWithRestartableInitContainer(t *testing.T) {
|
|||||||
Name: "init",
|
Name: "init",
|
||||||
}, {
|
}, {
|
||||||
Name: "restartable-init",
|
Name: "restartable-init",
|
||||||
|
LivenessProbe: defaultProbe,
|
||||||
ReadinessProbe: defaultProbe,
|
ReadinessProbe: defaultProbe,
|
||||||
|
StartupProbe: defaultProbe,
|
||||||
RestartPolicy: containerRestartPolicy(tc.enableSidecarContainers),
|
RestartPolicy: containerRestartPolicy(tc.enableSidecarContainers),
|
||||||
}},
|
}},
|
||||||
Containers: []v1.Container{{
|
Containers: []v1.Container{{
|
||||||
|
@ -55,6 +55,21 @@ func GenerateContainersReadyCondition(spec *v1.PodSpec, containerStatuses []v1.C
|
|||||||
}
|
}
|
||||||
unknownContainers := []string{}
|
unknownContainers := []string{}
|
||||||
unreadyContainers := []string{}
|
unreadyContainers := []string{}
|
||||||
|
|
||||||
|
for _, container := range spec.InitContainers {
|
||||||
|
if !kubetypes.IsRestartableInitContainer(&container) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if containerStatus, ok := podutil.GetContainerStatus(containerStatuses, container.Name); ok {
|
||||||
|
if !containerStatus.Ready {
|
||||||
|
unreadyContainers = append(unreadyContainers, container.Name)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
unknownContainers = append(unknownContainers, container.Name)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
for _, container := range spec.Containers {
|
for _, container := range spec.Containers {
|
||||||
if containerStatus, ok := podutil.GetContainerStatus(containerStatuses, container.Name); ok {
|
if containerStatus, ok := podutil.GetContainerStatus(containerStatuses, container.Name); ok {
|
||||||
if !containerStatus.Ready {
|
if !containerStatus.Ready {
|
||||||
|
@ -30,6 +30,10 @@ import (
|
|||||||
"k8s.io/utils/pointer"
|
"k8s.io/utils/pointer"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
containerRestartPolicyAlways = v1.ContainerRestartPolicyAlways
|
||||||
|
)
|
||||||
|
|
||||||
func TestGenerateContainersReadyCondition(t *testing.T) {
|
func TestGenerateContainersReadyCondition(t *testing.T) {
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
spec *v1.PodSpec
|
spec *v1.PodSpec
|
||||||
@ -112,6 +116,74 @@ func TestGenerateContainersReadyCondition(t *testing.T) {
|
|||||||
podPhase: v1.PodSucceeded,
|
podPhase: v1.PodSucceeded,
|
||||||
expectReady: getPodCondition(v1.ContainersReady, v1.ConditionFalse, PodCompleted, ""),
|
expectReady: getPodCondition(v1.ContainersReady, v1.ConditionFalse, PodCompleted, ""),
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
spec: &v1.PodSpec{
|
||||||
|
InitContainers: []v1.Container{
|
||||||
|
{Name: "restartable-init-1", RestartPolicy: &containerRestartPolicyAlways},
|
||||||
|
},
|
||||||
|
Containers: []v1.Container{
|
||||||
|
{Name: "regular-1"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
containerStatuses: []v1.ContainerStatus{
|
||||||
|
getReadyStatus("regular-1"),
|
||||||
|
},
|
||||||
|
podPhase: v1.PodRunning,
|
||||||
|
expectReady: getPodCondition(v1.ContainersReady, v1.ConditionFalse, ContainersNotReady, "containers with unknown status: [restartable-init-1]"),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
spec: &v1.PodSpec{
|
||||||
|
InitContainers: []v1.Container{
|
||||||
|
{Name: "restartable-init-1", RestartPolicy: &containerRestartPolicyAlways},
|
||||||
|
{Name: "restartable-init-2", RestartPolicy: &containerRestartPolicyAlways},
|
||||||
|
},
|
||||||
|
Containers: []v1.Container{
|
||||||
|
{Name: "regular-1"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
containerStatuses: []v1.ContainerStatus{
|
||||||
|
getReadyStatus("restartable-init-1"),
|
||||||
|
getReadyStatus("restartable-init-2"),
|
||||||
|
getReadyStatus("regular-1"),
|
||||||
|
},
|
||||||
|
podPhase: v1.PodRunning,
|
||||||
|
expectReady: getPodCondition(v1.ContainersReady, v1.ConditionTrue, "", ""),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
spec: &v1.PodSpec{
|
||||||
|
InitContainers: []v1.Container{
|
||||||
|
{Name: "restartable-init-1", RestartPolicy: &containerRestartPolicyAlways},
|
||||||
|
{Name: "restartable-init-2", RestartPolicy: &containerRestartPolicyAlways},
|
||||||
|
},
|
||||||
|
Containers: []v1.Container{
|
||||||
|
{Name: "regular-1"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
containerStatuses: []v1.ContainerStatus{
|
||||||
|
getReadyStatus("restartable-init-1"),
|
||||||
|
getReadyStatus("regular-1"),
|
||||||
|
},
|
||||||
|
podPhase: v1.PodRunning,
|
||||||
|
expectReady: getPodCondition(v1.ContainersReady, v1.ConditionFalse, ContainersNotReady, "containers with unknown status: [restartable-init-2]"),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
spec: &v1.PodSpec{
|
||||||
|
InitContainers: []v1.Container{
|
||||||
|
{Name: "restartable-init-1", RestartPolicy: &containerRestartPolicyAlways},
|
||||||
|
{Name: "restartable-init-2", RestartPolicy: &containerRestartPolicyAlways},
|
||||||
|
},
|
||||||
|
Containers: []v1.Container{
|
||||||
|
{Name: "regular-1"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
containerStatuses: []v1.ContainerStatus{
|
||||||
|
getReadyStatus("restartable-init-1"),
|
||||||
|
getNotReadyStatus("restartable-init-2"),
|
||||||
|
getReadyStatus("regular-1"),
|
||||||
|
},
|
||||||
|
podPhase: v1.PodRunning,
|
||||||
|
expectReady: getPodCondition(v1.ContainersReady, v1.ConditionFalse, ContainersNotReady, "containers with unready status: [restartable-init-2]"),
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
for i, test := range tests {
|
for i, test := range tests {
|
||||||
|
@ -349,8 +349,9 @@ func (m *manager) SetContainerReadiness(podUID types.UID, containerID kubecontai
|
|||||||
status.Conditions = append(status.Conditions, condition)
|
status.Conditions = append(status.Conditions, condition)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
updateConditionFunc(v1.PodReady, GeneratePodReadyCondition(&pod.Spec, status.Conditions, status.ContainerStatuses, status.Phase))
|
allContainerStatuses := append(status.InitContainerStatuses, status.ContainerStatuses...)
|
||||||
updateConditionFunc(v1.ContainersReady, GenerateContainersReadyCondition(&pod.Spec, status.ContainerStatuses, status.Phase))
|
updateConditionFunc(v1.PodReady, GeneratePodReadyCondition(&pod.Spec, status.Conditions, allContainerStatuses, status.Phase))
|
||||||
|
updateConditionFunc(v1.ContainersReady, GenerateContainersReadyCondition(&pod.Spec, allContainerStatuses, status.Phase))
|
||||||
m.updateStatusInternal(pod, status, false, false)
|
m.updateStatusInternal(pod, status, false, false)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
File diff suppressed because it is too large
Load Diff
@ -253,6 +253,254 @@ var _ = SIGDescribe("Container Lifecycle Hook", func() {
|
|||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
|
var _ = SIGDescribe("[NodeAlphaFeature:SidecarContainers][Feature:SidecarContainers] Restartable Init Container Lifecycle Hook", func() {
|
||||||
|
f := framework.NewDefaultFramework("restartable-init-container-lifecycle-hook")
|
||||||
|
f.NamespacePodSecurityLevel = admissionapi.LevelBaseline
|
||||||
|
var podClient *e2epod.PodClient
|
||||||
|
const (
|
||||||
|
podCheckInterval = 1 * time.Second
|
||||||
|
postStartWaitTimeout = 2 * time.Minute
|
||||||
|
preStopWaitTimeout = 30 * time.Second
|
||||||
|
)
|
||||||
|
ginkgo.Context("when create a pod with lifecycle hook", func() {
|
||||||
|
var (
|
||||||
|
targetIP, targetURL, targetNode string
|
||||||
|
|
||||||
|
httpPorts = []v1.ContainerPort{
|
||||||
|
{
|
||||||
|
ContainerPort: 8080,
|
||||||
|
Protocol: v1.ProtocolTCP,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
httpsPorts = []v1.ContainerPort{
|
||||||
|
{
|
||||||
|
ContainerPort: 9090,
|
||||||
|
Protocol: v1.ProtocolTCP,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
httpsArgs = []string{
|
||||||
|
"netexec",
|
||||||
|
"--http-port", "9090",
|
||||||
|
"--udp-port", "9091",
|
||||||
|
"--tls-cert-file", "/localhost.crt",
|
||||||
|
"--tls-private-key-file", "/localhost.key",
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
podHandleHookRequest := e2epod.NewAgnhostPodFromContainers(
|
||||||
|
"", "pod-handle-http-request", nil,
|
||||||
|
e2epod.NewAgnhostContainer("container-handle-http-request", nil, httpPorts, "netexec"),
|
||||||
|
e2epod.NewAgnhostContainer("container-handle-https-request", nil, httpsPorts, httpsArgs...),
|
||||||
|
)
|
||||||
|
|
||||||
|
ginkgo.BeforeEach(func(ctx context.Context) {
|
||||||
|
node, err := e2enode.GetRandomReadySchedulableNode(ctx, f.ClientSet)
|
||||||
|
framework.ExpectNoError(err)
|
||||||
|
targetNode = node.Name
|
||||||
|
nodeSelection := e2epod.NodeSelection{}
|
||||||
|
e2epod.SetAffinity(&nodeSelection, targetNode)
|
||||||
|
e2epod.SetNodeSelection(&podHandleHookRequest.Spec, nodeSelection)
|
||||||
|
|
||||||
|
podClient = e2epod.NewPodClient(f)
|
||||||
|
ginkgo.By("create the container to handle the HTTPGet hook request.")
|
||||||
|
newPod := podClient.CreateSync(ctx, podHandleHookRequest)
|
||||||
|
targetIP = newPod.Status.PodIP
|
||||||
|
targetURL = targetIP
|
||||||
|
if strings.Contains(targetIP, ":") {
|
||||||
|
targetURL = fmt.Sprintf("[%s]", targetIP)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
testPodWithHook := func(ctx context.Context, podWithHook *v1.Pod) {
|
||||||
|
ginkgo.By("create the pod with lifecycle hook")
|
||||||
|
podClient.CreateSync(ctx, podWithHook)
|
||||||
|
const (
|
||||||
|
defaultHandler = iota
|
||||||
|
httpsHandler
|
||||||
|
)
|
||||||
|
handlerContainer := defaultHandler
|
||||||
|
if podWithHook.Spec.InitContainers[0].Lifecycle.PostStart != nil {
|
||||||
|
ginkgo.By("check poststart hook")
|
||||||
|
if podWithHook.Spec.InitContainers[0].Lifecycle.PostStart.HTTPGet != nil {
|
||||||
|
if v1.URISchemeHTTPS == podWithHook.Spec.InitContainers[0].Lifecycle.PostStart.HTTPGet.Scheme {
|
||||||
|
handlerContainer = httpsHandler
|
||||||
|
}
|
||||||
|
}
|
||||||
|
gomega.Eventually(ctx, func(ctx context.Context) error {
|
||||||
|
return podClient.MatchContainerOutput(ctx, podHandleHookRequest.Name, podHandleHookRequest.Spec.Containers[handlerContainer].Name,
|
||||||
|
`GET /echo\?msg=poststart`)
|
||||||
|
}, postStartWaitTimeout, podCheckInterval).Should(gomega.BeNil())
|
||||||
|
}
|
||||||
|
ginkgo.By("delete the pod with lifecycle hook")
|
||||||
|
podClient.DeleteSync(ctx, podWithHook.Name, *metav1.NewDeleteOptions(15), e2epod.DefaultPodDeletionTimeout)
|
||||||
|
if podWithHook.Spec.InitContainers[0].Lifecycle.PreStop != nil {
|
||||||
|
ginkgo.By("check prestop hook")
|
||||||
|
if podWithHook.Spec.InitContainers[0].Lifecycle.PreStop.HTTPGet != nil {
|
||||||
|
if v1.URISchemeHTTPS == podWithHook.Spec.InitContainers[0].Lifecycle.PreStop.HTTPGet.Scheme {
|
||||||
|
handlerContainer = httpsHandler
|
||||||
|
}
|
||||||
|
}
|
||||||
|
gomega.Eventually(ctx, func(ctx context.Context) error {
|
||||||
|
return podClient.MatchContainerOutput(ctx, podHandleHookRequest.Name, podHandleHookRequest.Spec.Containers[handlerContainer].Name,
|
||||||
|
`GET /echo\?msg=prestop`)
|
||||||
|
}, preStopWaitTimeout, podCheckInterval).Should(gomega.BeNil())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
/*
|
||||||
|
Release: v1.28
|
||||||
|
Testname: Pod Lifecycle with restartable init container, post start exec hook
|
||||||
|
Description: When a post start handler is specified in the container
|
||||||
|
lifecycle using a 'Exec' action, then the handler MUST be invoked after
|
||||||
|
the start of the container. A server pod is created that will serve http
|
||||||
|
requests, create a second pod with a container lifecycle specifying a
|
||||||
|
post start that invokes the server pod using ExecAction to validate that
|
||||||
|
the post start is executed.
|
||||||
|
*/
|
||||||
|
ginkgo.It("should execute poststart exec hook properly", func(ctx context.Context) {
|
||||||
|
lifecycle := &v1.Lifecycle{
|
||||||
|
PostStart: &v1.LifecycleHandler{
|
||||||
|
Exec: &v1.ExecAction{
|
||||||
|
Command: []string{"sh", "-c", "curl http://" + targetURL + ":8080/echo?msg=poststart"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
podWithHook := getSidecarPodWithHook("pod-with-poststart-exec-hook", imageutils.GetE2EImage(imageutils.Agnhost), lifecycle)
|
||||||
|
|
||||||
|
testPodWithHook(ctx, podWithHook)
|
||||||
|
})
|
||||||
|
/*
|
||||||
|
Release: v1.28
|
||||||
|
Testname: Pod Lifecycle with restartable init container, prestop exec hook
|
||||||
|
Description: When a pre-stop handler is specified in the container
|
||||||
|
lifecycle using a 'Exec' action, then the handler MUST be invoked before
|
||||||
|
the container is terminated. A server pod is created that will serve http
|
||||||
|
requests, create a second pod with a container lifecycle specifying a
|
||||||
|
pre-stop that invokes the server pod using ExecAction to validate that
|
||||||
|
the pre-stop is executed.
|
||||||
|
*/
|
||||||
|
ginkgo.It("should execute prestop exec hook properly", func(ctx context.Context) {
|
||||||
|
lifecycle := &v1.Lifecycle{
|
||||||
|
PreStop: &v1.LifecycleHandler{
|
||||||
|
Exec: &v1.ExecAction{
|
||||||
|
Command: []string{"sh", "-c", "curl http://" + targetURL + ":8080/echo?msg=prestop"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
podWithHook := getSidecarPodWithHook("pod-with-prestop-exec-hook", imageutils.GetE2EImage(imageutils.Agnhost), lifecycle)
|
||||||
|
testPodWithHook(ctx, podWithHook)
|
||||||
|
})
|
||||||
|
/*
|
||||||
|
Release: v1.28
|
||||||
|
Testname: Pod Lifecycle with restartable init container, post start http hook
|
||||||
|
Description: When a post start handler is specified in the container
|
||||||
|
lifecycle using a HttpGet action, then the handler MUST be invoked after
|
||||||
|
the start of the container. A server pod is created that will serve http
|
||||||
|
requests, create a second pod on the same node with a container lifecycle
|
||||||
|
specifying a post start that invokes the server pod to validate that the
|
||||||
|
post start is executed.
|
||||||
|
*/
|
||||||
|
ginkgo.It("should execute poststart http hook properly", func(ctx context.Context) {
|
||||||
|
lifecycle := &v1.Lifecycle{
|
||||||
|
PostStart: &v1.LifecycleHandler{
|
||||||
|
HTTPGet: &v1.HTTPGetAction{
|
||||||
|
Path: "/echo?msg=poststart",
|
||||||
|
Host: targetIP,
|
||||||
|
Port: intstr.FromInt(8080),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
podWithHook := getSidecarPodWithHook("pod-with-poststart-http-hook", imageutils.GetPauseImageName(), lifecycle)
|
||||||
|
// make sure we spawn the test pod on the same node as the webserver.
|
||||||
|
nodeSelection := e2epod.NodeSelection{}
|
||||||
|
e2epod.SetAffinity(&nodeSelection, targetNode)
|
||||||
|
e2epod.SetNodeSelection(&podWithHook.Spec, nodeSelection)
|
||||||
|
testPodWithHook(ctx, podWithHook)
|
||||||
|
})
|
||||||
|
/*
|
||||||
|
Release : v1.28
|
||||||
|
Testname: Pod Lifecycle with restartable init container, poststart https hook
|
||||||
|
Description: When a post-start handler is specified in the container
|
||||||
|
lifecycle using a 'HttpGet' action, then the handler MUST be invoked
|
||||||
|
before the container is terminated. A server pod is created that will
|
||||||
|
serve https requests, create a second pod on the same node with a
|
||||||
|
container lifecycle specifying a post-start that invokes the server pod
|
||||||
|
to validate that the post-start is executed.
|
||||||
|
*/
|
||||||
|
ginkgo.It("should execute poststart https hook properly [MinimumKubeletVersion:1.23]", func(ctx context.Context) {
|
||||||
|
lifecycle := &v1.Lifecycle{
|
||||||
|
PostStart: &v1.LifecycleHandler{
|
||||||
|
HTTPGet: &v1.HTTPGetAction{
|
||||||
|
Scheme: v1.URISchemeHTTPS,
|
||||||
|
Path: "/echo?msg=poststart",
|
||||||
|
Host: targetIP,
|
||||||
|
Port: intstr.FromInt(9090),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
podWithHook := getSidecarPodWithHook("pod-with-poststart-https-hook", imageutils.GetPauseImageName(), lifecycle)
|
||||||
|
// make sure we spawn the test pod on the same node as the webserver.
|
||||||
|
nodeSelection := e2epod.NodeSelection{}
|
||||||
|
e2epod.SetAffinity(&nodeSelection, targetNode)
|
||||||
|
e2epod.SetNodeSelection(&podWithHook.Spec, nodeSelection)
|
||||||
|
testPodWithHook(ctx, podWithHook)
|
||||||
|
})
|
||||||
|
/*
|
||||||
|
Release : v1.28
|
||||||
|
Testname: Pod Lifecycle with restartable init container, prestop http hook
|
||||||
|
Description: When a pre-stop handler is specified in the container
|
||||||
|
lifecycle using a 'HttpGet' action, then the handler MUST be invoked
|
||||||
|
before the container is terminated. A server pod is created that will
|
||||||
|
serve http requests, create a second pod on the same node with a
|
||||||
|
container lifecycle specifying a pre-stop that invokes the server pod to
|
||||||
|
validate that the pre-stop is executed.
|
||||||
|
*/
|
||||||
|
ginkgo.It("should execute prestop http hook properly", func(ctx context.Context) {
|
||||||
|
lifecycle := &v1.Lifecycle{
|
||||||
|
PreStop: &v1.LifecycleHandler{
|
||||||
|
HTTPGet: &v1.HTTPGetAction{
|
||||||
|
Path: "/echo?msg=prestop",
|
||||||
|
Host: targetIP,
|
||||||
|
Port: intstr.FromInt(8080),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
podWithHook := getSidecarPodWithHook("pod-with-prestop-http-hook", imageutils.GetPauseImageName(), lifecycle)
|
||||||
|
// make sure we spawn the test pod on the same node as the webserver.
|
||||||
|
nodeSelection := e2epod.NodeSelection{}
|
||||||
|
e2epod.SetAffinity(&nodeSelection, targetNode)
|
||||||
|
e2epod.SetNodeSelection(&podWithHook.Spec, nodeSelection)
|
||||||
|
testPodWithHook(ctx, podWithHook)
|
||||||
|
})
|
||||||
|
/*
|
||||||
|
Release : v1.28
|
||||||
|
Testname: Pod Lifecycle with restartable init container, prestop https hook
|
||||||
|
Description: When a pre-stop handler is specified in the container
|
||||||
|
lifecycle using a 'HttpGet' action, then the handler MUST be invoked
|
||||||
|
before the container is terminated. A server pod is created that will
|
||||||
|
serve https requests, create a second pod on the same node with a
|
||||||
|
container lifecycle specifying a pre-stop that invokes the server pod to
|
||||||
|
validate that the pre-stop is executed.
|
||||||
|
*/
|
||||||
|
ginkgo.It("should execute prestop https hook properly [MinimumKubeletVersion:1.23]", func(ctx context.Context) {
|
||||||
|
lifecycle := &v1.Lifecycle{
|
||||||
|
PreStop: &v1.LifecycleHandler{
|
||||||
|
HTTPGet: &v1.HTTPGetAction{
|
||||||
|
Scheme: v1.URISchemeHTTPS,
|
||||||
|
Path: "/echo?msg=prestop",
|
||||||
|
Host: targetIP,
|
||||||
|
Port: intstr.FromInt(9090),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
podWithHook := getSidecarPodWithHook("pod-with-prestop-https-hook", imageutils.GetPauseImageName(), lifecycle)
|
||||||
|
// make sure we spawn the test pod on the same node as the webserver.
|
||||||
|
nodeSelection := e2epod.NodeSelection{}
|
||||||
|
e2epod.SetAffinity(&nodeSelection, targetNode)
|
||||||
|
e2epod.SetNodeSelection(&podWithHook.Spec, nodeSelection)
|
||||||
|
testPodWithHook(ctx, podWithHook)
|
||||||
|
})
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
func getPodWithHook(name string, image string, lifecycle *v1.Lifecycle) *v1.Pod {
|
func getPodWithHook(name string, image string, lifecycle *v1.Lifecycle) *v1.Pod {
|
||||||
return &v1.Pod{
|
return &v1.Pod{
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
@ -269,3 +517,30 @@ func getPodWithHook(name string, image string, lifecycle *v1.Lifecycle) *v1.Pod
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func getSidecarPodWithHook(name string, image string, lifecycle *v1.Lifecycle) *v1.Pod {
|
||||||
|
return &v1.Pod{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Name: name,
|
||||||
|
},
|
||||||
|
Spec: v1.PodSpec{
|
||||||
|
InitContainers: []v1.Container{
|
||||||
|
{
|
||||||
|
Name: name,
|
||||||
|
Image: image,
|
||||||
|
Lifecycle: lifecycle,
|
||||||
|
RestartPolicy: func() *v1.ContainerRestartPolicy {
|
||||||
|
restartPolicy := v1.ContainerRestartPolicyAlways
|
||||||
|
return &restartPolicy
|
||||||
|
}(),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Containers: []v1.Container{
|
||||||
|
{
|
||||||
|
Name: "main",
|
||||||
|
Image: imageutils.GetPauseImageName(),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
@ -845,7 +845,7 @@ var _ = SIGDescribe("[NodeAlphaFeature:SidecarContainers] Containers Lifecycle "
|
|||||||
framework.ExpectNoError(results.StartsBefore(restartableInit2, init3))
|
framework.ExpectNoError(results.StartsBefore(restartableInit2, init3))
|
||||||
})
|
})
|
||||||
|
|
||||||
ginkgo.It("should run both restartable init cotnainers and third init container together", func() {
|
ginkgo.It("should run both restartable init containers and third init container together", func() {
|
||||||
framework.ExpectNoError(results.RunTogether(restartableInit2, restartableInit1))
|
framework.ExpectNoError(results.RunTogether(restartableInit2, restartableInit1))
|
||||||
framework.ExpectNoError(results.RunTogether(restartableInit1, init3))
|
framework.ExpectNoError(results.RunTogether(restartableInit1, init3))
|
||||||
framework.ExpectNoError(results.RunTogether(restartableInit2, init3))
|
framework.ExpectNoError(results.RunTogether(restartableInit2, init3))
|
||||||
@ -856,7 +856,7 @@ var _ = SIGDescribe("[NodeAlphaFeature:SidecarContainers] Containers Lifecycle "
|
|||||||
framework.ExpectNoError(results.ExitsBefore(init3, regular1))
|
framework.ExpectNoError(results.ExitsBefore(init3, regular1))
|
||||||
})
|
})
|
||||||
|
|
||||||
ginkgo.It("should run both restartable init cotnainers and a regular container together", func() {
|
ginkgo.It("should run both restartable init containers and a regular container together", func() {
|
||||||
framework.ExpectNoError(results.RunTogether(restartableInit1, regular1))
|
framework.ExpectNoError(results.RunTogether(restartableInit1, regular1))
|
||||||
framework.ExpectNoError(results.RunTogether(restartableInit2, regular1))
|
framework.ExpectNoError(results.RunTogether(restartableInit2, regular1))
|
||||||
})
|
})
|
||||||
@ -1249,7 +1249,6 @@ var _ = SIGDescribe("[NodeAlphaFeature:SidecarContainers] Containers Lifecycle "
|
|||||||
ginkgo.It("should be running restartable init container and a failed Init container in parallel", func() {
|
ginkgo.It("should be running restartable init container and a failed Init container in parallel", func() {
|
||||||
framework.ExpectNoError(results.RunTogether(restartableInit1, init1))
|
framework.ExpectNoError(results.RunTogether(restartableInit1, init1))
|
||||||
})
|
})
|
||||||
// TODO: check preStop hooks when they are enabled
|
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
@ -1661,7 +1660,6 @@ var _ = SIGDescribe("[NodeAlphaFeature:SidecarContainers] Containers Lifecycle "
|
|||||||
ginkgo.It("should be running restartable init container and a failed Init container in parallel", func() {
|
ginkgo.It("should be running restartable init container and a failed Init container in parallel", func() {
|
||||||
framework.ExpectNoError(results.RunTogether(restartableInit1, init1))
|
framework.ExpectNoError(results.RunTogether(restartableInit1, init1))
|
||||||
})
|
})
|
||||||
// TODO: check preStop hooks when they are enabled
|
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
@ -2075,11 +2073,10 @@ var _ = SIGDescribe("[NodeAlphaFeature:SidecarContainers] Containers Lifecycle "
|
|||||||
ginkgo.It("should be running restartable init container and a failed Init container in parallel", func() {
|
ginkgo.It("should be running restartable init container and a failed Init container in parallel", func() {
|
||||||
framework.ExpectNoError(results.RunTogether(restartableInit1, init1))
|
framework.ExpectNoError(results.RunTogether(restartableInit1, init1))
|
||||||
})
|
})
|
||||||
// TODO: check preStop hooks when they are enabled
|
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
ginkgo.It("should launch restartable init cotnainers serially considering the startup probe", func() {
|
ginkgo.It("should launch restartable init containers serially considering the startup probe", func() {
|
||||||
|
|
||||||
restartableInit1 := "restartable-init-1"
|
restartableInit1 := "restartable-init-1"
|
||||||
restartableInit2 := "restartable-init-2"
|
restartableInit2 := "restartable-init-2"
|
||||||
@ -2158,7 +2155,7 @@ var _ = SIGDescribe("[NodeAlphaFeature:SidecarContainers] Containers Lifecycle "
|
|||||||
framework.ExpectNoError(results.StartsBefore(restartableInit2, regular1))
|
framework.ExpectNoError(results.StartsBefore(restartableInit2, regular1))
|
||||||
})
|
})
|
||||||
|
|
||||||
ginkgo.It("should not launch next container if the restartable init cotnainer failed to complete startup probe", func() {
|
ginkgo.It("should call the container's preStop hook and not launch next container if the restartable init container's startup probe fails", func() {
|
||||||
|
|
||||||
restartableInit1 := "restartable-init-1"
|
restartableInit1 := "restartable-init-1"
|
||||||
regular1 := "regular-1"
|
regular1 := "regular-1"
|
||||||
@ -2174,16 +2171,30 @@ var _ = SIGDescribe("[NodeAlphaFeature:SidecarContainers] Containers Lifecycle "
|
|||||||
Name: restartableInit1,
|
Name: restartableInit1,
|
||||||
Image: busyboxImage,
|
Image: busyboxImage,
|
||||||
Command: ExecCommand(restartableInit1, execCommand{
|
Command: ExecCommand(restartableInit1, execCommand{
|
||||||
StartDelay: 30,
|
Delay: 600,
|
||||||
Delay: 600,
|
TerminationSeconds: 15,
|
||||||
ExitCode: 0,
|
ExitCode: 0,
|
||||||
}),
|
}),
|
||||||
StartupProbe: &v1.Probe{
|
StartupProbe: &v1.Probe{
|
||||||
PeriodSeconds: 1,
|
InitialDelaySeconds: 5,
|
||||||
FailureThreshold: 1,
|
FailureThreshold: 1,
|
||||||
ProbeHandler: v1.ProbeHandler{
|
ProbeHandler: v1.ProbeHandler{
|
||||||
Exec: &v1.ExecAction{
|
Exec: &v1.ExecAction{
|
||||||
Command: []string{"test", "-f", "started"},
|
Command: []string{
|
||||||
|
"sh",
|
||||||
|
"-c",
|
||||||
|
"exit 1",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Lifecycle: &v1.Lifecycle{
|
||||||
|
PreStop: &v1.LifecycleHandler{
|
||||||
|
Exec: &v1.ExecAction{
|
||||||
|
Command: ExecCommand(prefixedName(PreStopPrefix, restartableInit1), execCommand{
|
||||||
|
Delay: 1,
|
||||||
|
ExitCode: 0,
|
||||||
|
}),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@ -2208,7 +2219,7 @@ var _ = SIGDescribe("[NodeAlphaFeature:SidecarContainers] Containers Lifecycle "
|
|||||||
client := e2epod.NewPodClient(f)
|
client := e2epod.NewPodClient(f)
|
||||||
pod = client.Create(context.TODO(), pod)
|
pod = client.Create(context.TODO(), pod)
|
||||||
|
|
||||||
ginkgo.By("Waiting for the restartable init cotnainer to restart")
|
ginkgo.By("Waiting for the restartable init container to restart")
|
||||||
err := WaitForPodInitContainerRestartCount(context.TODO(), f.ClientSet, pod.Namespace, pod.Name, 0, 2, 2*time.Minute)
|
err := WaitForPodInitContainerRestartCount(context.TODO(), f.ClientSet, pod.Namespace, pod.Name, 0, 2, 2*time.Minute)
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
|
|
||||||
@ -2222,6 +2233,92 @@ var _ = SIGDescribe("[NodeAlphaFeature:SidecarContainers] Containers Lifecycle "
|
|||||||
results := parseOutput(pod)
|
results := parseOutput(pod)
|
||||||
|
|
||||||
ginkgo.By("Analyzing results")
|
ginkgo.By("Analyzing results")
|
||||||
|
framework.ExpectNoError(results.RunTogether(restartableInit1, prefixedName(PreStopPrefix, restartableInit1)))
|
||||||
|
framework.ExpectNoError(results.Starts(prefixedName(PreStopPrefix, restartableInit1)))
|
||||||
|
framework.ExpectNoError(results.Exits(restartableInit1))
|
||||||
framework.ExpectNoError(results.DoesntStart(regular1))
|
framework.ExpectNoError(results.DoesntStart(regular1))
|
||||||
})
|
})
|
||||||
|
|
||||||
|
ginkgo.It("should call the container's preStop hook and start the next container if the restartable init container's liveness probe fails", func() {
|
||||||
|
|
||||||
|
restartableInit1 := "restartable-init-1"
|
||||||
|
regular1 := "regular-1"
|
||||||
|
|
||||||
|
pod := &v1.Pod{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Name: "restartable-init-container-failed-startup",
|
||||||
|
},
|
||||||
|
Spec: v1.PodSpec{
|
||||||
|
RestartPolicy: v1.RestartPolicyAlways,
|
||||||
|
InitContainers: []v1.Container{
|
||||||
|
{
|
||||||
|
Name: restartableInit1,
|
||||||
|
Image: busyboxImage,
|
||||||
|
Command: ExecCommand(restartableInit1, execCommand{
|
||||||
|
Delay: 600,
|
||||||
|
TerminationSeconds: 15,
|
||||||
|
ExitCode: 0,
|
||||||
|
}),
|
||||||
|
LivenessProbe: &v1.Probe{
|
||||||
|
InitialDelaySeconds: 5,
|
||||||
|
FailureThreshold: 1,
|
||||||
|
ProbeHandler: v1.ProbeHandler{
|
||||||
|
Exec: &v1.ExecAction{
|
||||||
|
Command: []string{
|
||||||
|
"sh",
|
||||||
|
"-c",
|
||||||
|
"exit 1",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Lifecycle: &v1.Lifecycle{
|
||||||
|
PreStop: &v1.LifecycleHandler{
|
||||||
|
Exec: &v1.ExecAction{
|
||||||
|
Command: ExecCommand(prefixedName(PreStopPrefix, restartableInit1), execCommand{
|
||||||
|
Delay: 1,
|
||||||
|
ExitCode: 0,
|
||||||
|
}),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
RestartPolicy: &containerRestartPolicyAlways,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Containers: []v1.Container{
|
||||||
|
{
|
||||||
|
Name: regular1,
|
||||||
|
Image: busyboxImage,
|
||||||
|
Command: ExecCommand(regular1, execCommand{
|
||||||
|
Delay: 1,
|
||||||
|
ExitCode: 0,
|
||||||
|
}),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
preparePod(pod)
|
||||||
|
|
||||||
|
client := e2epod.NewPodClient(f)
|
||||||
|
pod = client.Create(context.TODO(), pod)
|
||||||
|
|
||||||
|
ginkgo.By("Waiting for the restartable init container to restart")
|
||||||
|
err := WaitForPodInitContainerRestartCount(context.TODO(), f.ClientSet, pod.Namespace, pod.Name, 0, 2, 2*time.Minute)
|
||||||
|
framework.ExpectNoError(err)
|
||||||
|
|
||||||
|
err = WaitForPodContainerRestartCount(context.TODO(), f.ClientSet, pod.Namespace, pod.Name, 0, 1, 2*time.Minute)
|
||||||
|
framework.ExpectNoError(err)
|
||||||
|
|
||||||
|
pod, err = client.Get(context.TODO(), pod.Name, metav1.GetOptions{})
|
||||||
|
framework.ExpectNoError(err)
|
||||||
|
|
||||||
|
results := parseOutput(pod)
|
||||||
|
|
||||||
|
ginkgo.By("Analyzing results")
|
||||||
|
framework.ExpectNoError(results.RunTogether(restartableInit1, prefixedName(PreStopPrefix, restartableInit1)))
|
||||||
|
framework.ExpectNoError(results.Starts(prefixedName(PreStopPrefix, restartableInit1)))
|
||||||
|
framework.ExpectNoError(results.Exits(restartableInit1))
|
||||||
|
framework.ExpectNoError(results.Starts(regular1))
|
||||||
|
})
|
||||||
})
|
})
|
||||||
|
Loading…
Reference in New Issue
Block a user