mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-08-16 23:29:21 +00:00
Allow restartable init containers to have lifecycle
This commit is contained in:
parent
7ef2d674e2
commit
3bf282652f
@ -3241,9 +3241,8 @@ func validateInitContainers(containers []core.Container, regularContainers []cor
|
|||||||
|
|
||||||
switch {
|
switch {
|
||||||
case restartAlways:
|
case restartAlways:
|
||||||
// TODO: Allow restartable init containers to have a lifecycle hook.
|
|
||||||
if ctr.Lifecycle != nil {
|
if ctr.Lifecycle != nil {
|
||||||
allErrs = append(allErrs, field.Forbidden(idxPath.Child("lifecycle"), "may not be set for init containers"))
|
allErrs = append(allErrs, validateLifecycle(ctr.Lifecycle, idxPath.Child("lifecycle"))...)
|
||||||
}
|
}
|
||||||
allErrs = append(allErrs, validateLivenessProbe(ctr.LivenessProbe, idxPath.Child("livenessProbe"))...)
|
allErrs = append(allErrs, validateLivenessProbe(ctr.LivenessProbe, idxPath.Child("livenessProbe"))...)
|
||||||
allErrs = append(allErrs, validateReadinessProbe(ctr.ReadinessProbe, idxPath.Child("readinessProbe"))...)
|
allErrs = append(allErrs, validateReadinessProbe(ctr.ReadinessProbe, idxPath.Child("readinessProbe"))...)
|
||||||
@ -3252,7 +3251,7 @@ func validateInitContainers(containers []core.Container, regularContainers []cor
|
|||||||
default:
|
default:
|
||||||
// These fields are disallowed for init containers.
|
// These fields are disallowed for init containers.
|
||||||
if ctr.Lifecycle != nil {
|
if ctr.Lifecycle != nil {
|
||||||
allErrs = append(allErrs, field.Forbidden(idxPath.Child("lifecycle"), "may not be set for init containers"))
|
allErrs = append(allErrs, field.Forbidden(idxPath.Child("lifecycle"), "may not be set for init containers without restartPolicy=Always"))
|
||||||
}
|
}
|
||||||
if ctr.LivenessProbe != nil {
|
if ctr.LivenessProbe != nil {
|
||||||
allErrs = append(allErrs, field.Forbidden(idxPath.Child("livenessProbe"), "may not be set for init containers without restartPolicy=Always"))
|
allErrs = append(allErrs, field.Forbidden(idxPath.Child("livenessProbe"), "may not be set for init containers without restartPolicy=Always"))
|
||||||
@ -3370,8 +3369,10 @@ func validateContainers(containers []core.Container, volumes map[string]core.Vol
|
|||||||
allNames.Insert(ctr.Name)
|
allNames.Insert(ctr.Name)
|
||||||
}
|
}
|
||||||
|
|
||||||
// These fields are only allowed for regular containers, so only check supported values here.
|
// These fields are allowed for regular containers and restartable init
|
||||||
// Init and ephemeral container validation will return field.Forbidden() for these paths.
|
// containers.
|
||||||
|
// Regular init container and ephemeral container validation will return
|
||||||
|
// field.Forbidden() for these paths.
|
||||||
if ctr.Lifecycle != nil {
|
if ctr.Lifecycle != nil {
|
||||||
allErrs = append(allErrs, validateLifecycle(ctr.Lifecycle, path.Child("lifecycle"))...)
|
allErrs = append(allErrs, validateLifecycle(ctr.Lifecycle, path.Child("lifecycle"))...)
|
||||||
}
|
}
|
||||||
|
@ -8164,11 +8164,23 @@ func TestValidateInitContainers(t *testing.T) {
|
|||||||
ImagePullPolicy: "IfNotPresent",
|
ImagePullPolicy: "IfNotPresent",
|
||||||
TerminationMessagePolicy: "File",
|
TerminationMessagePolicy: "File",
|
||||||
}, {
|
}, {
|
||||||
Name: "container-3-restart-always-with-probes",
|
Name: "container-3-restart-always-with-lifecycle-hook-and-probes",
|
||||||
Image: "image",
|
Image: "image",
|
||||||
ImagePullPolicy: "IfNotPresent",
|
ImagePullPolicy: "IfNotPresent",
|
||||||
TerminationMessagePolicy: "File",
|
TerminationMessagePolicy: "File",
|
||||||
RestartPolicy: &containerRestartPolicyAlways,
|
RestartPolicy: &containerRestartPolicyAlways,
|
||||||
|
Lifecycle: &core.Lifecycle{
|
||||||
|
PostStart: &core.LifecycleHandler{
|
||||||
|
Exec: &core.ExecAction{
|
||||||
|
Command: []string{"echo", "post start"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
PreStop: &core.LifecycleHandler{
|
||||||
|
Exec: &core.ExecAction{
|
||||||
|
Command: []string{"echo", "pre stop"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
LivenessProbe: &core.Probe{
|
LivenessProbe: &core.Probe{
|
||||||
ProbeHandler: core.ProbeHandler{
|
ProbeHandler: core.ProbeHandler{
|
||||||
TCPSocket: &core.TCPSocketAction{
|
TCPSocket: &core.TCPSocketAction{
|
||||||
@ -8447,6 +8459,127 @@ func TestValidateInitContainers(t *testing.T) {
|
|||||||
},
|
},
|
||||||
}},
|
}},
|
||||||
field.ErrorList{{Type: field.ErrorTypeInvalid, Field: "initContainers[0].livenessProbe.successThreshold", BadValue: int32(2)}},
|
field.ErrorList{{Type: field.ErrorTypeInvalid, Field: "initContainers[0].livenessProbe.successThreshold", BadValue: int32(2)}},
|
||||||
|
}, {
|
||||||
|
"invalid lifecycle, no exec command.",
|
||||||
|
line(),
|
||||||
|
[]core.Container{{
|
||||||
|
Name: "life-123",
|
||||||
|
Image: "image",
|
||||||
|
ImagePullPolicy: "IfNotPresent",
|
||||||
|
TerminationMessagePolicy: "File",
|
||||||
|
RestartPolicy: &containerRestartPolicyAlways,
|
||||||
|
Lifecycle: &core.Lifecycle{
|
||||||
|
PreStop: &core.LifecycleHandler{
|
||||||
|
Exec: &core.ExecAction{},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}},
|
||||||
|
field.ErrorList{{Type: field.ErrorTypeRequired, Field: "initContainers[0].lifecycle.preStop.exec.command", BadValue: ""}},
|
||||||
|
}, {
|
||||||
|
"invalid lifecycle, no http path.",
|
||||||
|
line(),
|
||||||
|
[]core.Container{{
|
||||||
|
Name: "life-123",
|
||||||
|
Image: "image",
|
||||||
|
ImagePullPolicy: "IfNotPresent",
|
||||||
|
TerminationMessagePolicy: "File",
|
||||||
|
RestartPolicy: &containerRestartPolicyAlways,
|
||||||
|
Lifecycle: &core.Lifecycle{
|
||||||
|
PreStop: &core.LifecycleHandler{
|
||||||
|
HTTPGet: &core.HTTPGetAction{
|
||||||
|
Port: intstr.FromInt32(80),
|
||||||
|
Scheme: "HTTP",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}},
|
||||||
|
field.ErrorList{{Type: field.ErrorTypeRequired, Field: "initContainers[0].lifecycle.preStop.httpGet.path", BadValue: ""}},
|
||||||
|
}, {
|
||||||
|
"invalid lifecycle, no http port.",
|
||||||
|
line(),
|
||||||
|
[]core.Container{{
|
||||||
|
Name: "life-123",
|
||||||
|
Image: "image",
|
||||||
|
ImagePullPolicy: "IfNotPresent",
|
||||||
|
TerminationMessagePolicy: "File",
|
||||||
|
RestartPolicy: &containerRestartPolicyAlways,
|
||||||
|
Lifecycle: &core.Lifecycle{
|
||||||
|
PreStop: &core.LifecycleHandler{
|
||||||
|
HTTPGet: &core.HTTPGetAction{
|
||||||
|
Path: "/",
|
||||||
|
Scheme: "HTTP",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}},
|
||||||
|
field.ErrorList{{Type: field.ErrorTypeInvalid, Field: "initContainers[0].lifecycle.preStop.httpGet.port", BadValue: 0}},
|
||||||
|
}, {
|
||||||
|
"invalid lifecycle, no http scheme.",
|
||||||
|
line(),
|
||||||
|
[]core.Container{{
|
||||||
|
Name: "life-123",
|
||||||
|
Image: "image",
|
||||||
|
ImagePullPolicy: "IfNotPresent",
|
||||||
|
TerminationMessagePolicy: "File",
|
||||||
|
RestartPolicy: &containerRestartPolicyAlways,
|
||||||
|
Lifecycle: &core.Lifecycle{
|
||||||
|
PreStop: &core.LifecycleHandler{
|
||||||
|
HTTPGet: &core.HTTPGetAction{
|
||||||
|
Path: "/",
|
||||||
|
Port: intstr.FromInt32(80),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}},
|
||||||
|
field.ErrorList{{Type: field.ErrorTypeNotSupported, Field: "initContainers[0].lifecycle.preStop.httpGet.scheme", BadValue: core.URIScheme("")}},
|
||||||
|
}, {
|
||||||
|
"invalid lifecycle, no tcp socket port.",
|
||||||
|
line(),
|
||||||
|
[]core.Container{{
|
||||||
|
Name: "life-123",
|
||||||
|
Image: "image",
|
||||||
|
ImagePullPolicy: "IfNotPresent",
|
||||||
|
TerminationMessagePolicy: "File",
|
||||||
|
RestartPolicy: &containerRestartPolicyAlways,
|
||||||
|
Lifecycle: &core.Lifecycle{
|
||||||
|
PreStop: &core.LifecycleHandler{
|
||||||
|
TCPSocket: &core.TCPSocketAction{},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}},
|
||||||
|
field.ErrorList{{Type: field.ErrorTypeInvalid, Field: "initContainers[0].lifecycle.preStop.tcpSocket.port", BadValue: 0}},
|
||||||
|
}, {
|
||||||
|
"invalid lifecycle, zero tcp socket port.",
|
||||||
|
line(),
|
||||||
|
[]core.Container{{
|
||||||
|
Name: "life-123",
|
||||||
|
Image: "image",
|
||||||
|
ImagePullPolicy: "IfNotPresent",
|
||||||
|
TerminationMessagePolicy: "File",
|
||||||
|
RestartPolicy: &containerRestartPolicyAlways,
|
||||||
|
Lifecycle: &core.Lifecycle{
|
||||||
|
PreStop: &core.LifecycleHandler{
|
||||||
|
TCPSocket: &core.TCPSocketAction{
|
||||||
|
Port: intstr.FromInt32(0),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}},
|
||||||
|
field.ErrorList{{Type: field.ErrorTypeInvalid, Field: "initContainers[0].lifecycle.preStop.tcpSocket.port", BadValue: 0}},
|
||||||
|
}, {
|
||||||
|
"invalid lifecycle, no action.",
|
||||||
|
line(),
|
||||||
|
[]core.Container{{
|
||||||
|
Name: "life-123",
|
||||||
|
Image: "image",
|
||||||
|
ImagePullPolicy: "IfNotPresent",
|
||||||
|
TerminationMessagePolicy: "File",
|
||||||
|
RestartPolicy: &containerRestartPolicyAlways,
|
||||||
|
Lifecycle: &core.Lifecycle{
|
||||||
|
PreStop: &core.LifecycleHandler{},
|
||||||
|
},
|
||||||
|
}},
|
||||||
|
field.ErrorList{{Type: field.ErrorTypeRequired, Field: "initContainers[0].lifecycle.preStop", BadValue: ""}},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
for _, tc := range errorCases {
|
for _, tc := range errorCases {
|
||||||
|
@ -341,7 +341,7 @@ func TestToKubeContainerStatusWithResources(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestLifeCycleHook(t *testing.T) {
|
func testLifeCycleHook(t *testing.T, testPod *v1.Pod, testContainer *v1.Container) {
|
||||||
|
|
||||||
// Setup
|
// Setup
|
||||||
fakeRuntime, _, m, _ := createTestRuntimeManager()
|
fakeRuntime, _, m, _ := createTestRuntimeManager()
|
||||||
@ -352,23 +352,6 @@ func TestLifeCycleHook(t *testing.T) {
|
|||||||
ID: "foo",
|
ID: "foo",
|
||||||
}
|
}
|
||||||
|
|
||||||
testPod := &v1.Pod{
|
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
|
||||||
Name: "bar",
|
|
||||||
Namespace: "default",
|
|
||||||
},
|
|
||||||
Spec: v1.PodSpec{
|
|
||||||
Containers: []v1.Container{
|
|
||||||
{
|
|
||||||
Name: "foo",
|
|
||||||
Image: "busybox",
|
|
||||||
ImagePullPolicy: v1.PullIfNotPresent,
|
|
||||||
Command: []string{"testCommand"},
|
|
||||||
WorkingDir: "testWorkingDir",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
cmdPostStart := &v1.Lifecycle{
|
cmdPostStart := &v1.Lifecycle{
|
||||||
PostStart: &v1.LifecycleHandler{
|
PostStart: &v1.LifecycleHandler{
|
||||||
Exec: &v1.ExecAction{
|
Exec: &v1.ExecAction{
|
||||||
@ -418,7 +401,7 @@ func TestLifeCycleHook(t *testing.T) {
|
|||||||
// Configured and works as expected
|
// Configured and works as expected
|
||||||
t.Run("PreStop-CMDExec", func(t *testing.T) {
|
t.Run("PreStop-CMDExec", func(t *testing.T) {
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
testPod.Spec.Containers[0].Lifecycle = cmdLifeCycle
|
testContainer.Lifecycle = cmdLifeCycle
|
||||||
m.killContainer(ctx, testPod, cID, "foo", "testKill", "", &gracePeriod)
|
m.killContainer(ctx, testPod, cID, "foo", "testKill", "", &gracePeriod)
|
||||||
if fakeRunner.Cmd[0] != cmdLifeCycle.PreStop.Exec.Command[0] {
|
if fakeRunner.Cmd[0] != cmdLifeCycle.PreStop.Exec.Command[0] {
|
||||||
t.Errorf("CMD Prestop hook was not invoked")
|
t.Errorf("CMD Prestop hook was not invoked")
|
||||||
@ -432,7 +415,7 @@ func TestLifeCycleHook(t *testing.T) {
|
|||||||
defer func() { fakeHTTP.req = nil }()
|
defer func() { fakeHTTP.req = nil }()
|
||||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ConsistentHTTPGetHandlers, false)()
|
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ConsistentHTTPGetHandlers, false)()
|
||||||
httpLifeCycle.PreStop.HTTPGet.Port = intstr.IntOrString{}
|
httpLifeCycle.PreStop.HTTPGet.Port = intstr.IntOrString{}
|
||||||
testPod.Spec.Containers[0].Lifecycle = httpLifeCycle
|
testContainer.Lifecycle = httpLifeCycle
|
||||||
m.killContainer(ctx, testPod, cID, "foo", "testKill", "", &gracePeriod)
|
m.killContainer(ctx, testPod, cID, "foo", "testKill", "", &gracePeriod)
|
||||||
|
|
||||||
if fakeHTTP.req == nil || !strings.Contains(fakeHTTP.req.URL.String(), httpLifeCycle.PreStop.HTTPGet.Host) {
|
if fakeHTTP.req == nil || !strings.Contains(fakeHTTP.req.URL.String(), httpLifeCycle.PreStop.HTTPGet.Host) {
|
||||||
@ -443,7 +426,7 @@ func TestLifeCycleHook(t *testing.T) {
|
|||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
defer func() { fakeHTTP.req = nil }()
|
defer func() { fakeHTTP.req = nil }()
|
||||||
httpLifeCycle.PreStop.HTTPGet.Port = intstr.FromInt32(80)
|
httpLifeCycle.PreStop.HTTPGet.Port = intstr.FromInt32(80)
|
||||||
testPod.Spec.Containers[0].Lifecycle = httpLifeCycle
|
testContainer.Lifecycle = httpLifeCycle
|
||||||
m.killContainer(ctx, testPod, cID, "foo", "testKill", "", &gracePeriod)
|
m.killContainer(ctx, testPod, cID, "foo", "testKill", "", &gracePeriod)
|
||||||
|
|
||||||
if fakeHTTP.req == nil || !strings.Contains(fakeHTTP.req.URL.String(), httpLifeCycle.PreStop.HTTPGet.Host) {
|
if fakeHTTP.req == nil || !strings.Contains(fakeHTTP.req.URL.String(), httpLifeCycle.PreStop.HTTPGet.Host) {
|
||||||
@ -473,8 +456,7 @@ func TestLifeCycleHook(t *testing.T) {
|
|||||||
// Fake all the things you need before trying to create a container
|
// Fake all the things you need before trying to create a container
|
||||||
fakeSandBox, _ := makeAndSetFakePod(t, m, fakeRuntime, testPod)
|
fakeSandBox, _ := makeAndSetFakePod(t, m, fakeRuntime, testPod)
|
||||||
fakeSandBoxConfig, _ := m.generatePodSandboxConfig(testPod, 0)
|
fakeSandBoxConfig, _ := m.generatePodSandboxConfig(testPod, 0)
|
||||||
testPod.Spec.Containers[0].Lifecycle = cmdPostStart
|
testContainer.Lifecycle = cmdPostStart
|
||||||
testContainer := &testPod.Spec.Containers[0]
|
|
||||||
fakePodStatus := &kubecontainer.PodStatus{
|
fakePodStatus := &kubecontainer.PodStatus{
|
||||||
ContainerStatuses: []*kubecontainer.Status{
|
ContainerStatuses: []*kubecontainer.Status{
|
||||||
{
|
{
|
||||||
@ -500,6 +482,51 @@ func TestLifeCycleHook(t *testing.T) {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestLifeCycleHook(t *testing.T) {
|
||||||
|
testPod := &v1.Pod{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Name: "bar",
|
||||||
|
Namespace: "default",
|
||||||
|
},
|
||||||
|
Spec: v1.PodSpec{
|
||||||
|
Containers: []v1.Container{
|
||||||
|
{
|
||||||
|
Name: "foo",
|
||||||
|
Image: "busybox",
|
||||||
|
ImagePullPolicy: v1.PullIfNotPresent,
|
||||||
|
Command: []string{"testCommand"},
|
||||||
|
WorkingDir: "testWorkingDir",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
testLifeCycleHook(t, testPod, &testPod.Spec.Containers[0])
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestLifeCycleHookForRestartableInitContainer(t *testing.T) {
|
||||||
|
testPod := &v1.Pod{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Name: "bar",
|
||||||
|
Namespace: "default",
|
||||||
|
},
|
||||||
|
Spec: v1.PodSpec{
|
||||||
|
InitContainers: []v1.Container{
|
||||||
|
{
|
||||||
|
Name: "foo",
|
||||||
|
Image: "busybox",
|
||||||
|
ImagePullPolicy: v1.PullIfNotPresent,
|
||||||
|
Command: []string{"testCommand"},
|
||||||
|
WorkingDir: "testWorkingDir",
|
||||||
|
RestartPolicy: &containerRestartPolicyAlways,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
testLifeCycleHook(t, testPod, &testPod.Spec.InitContainers[0])
|
||||||
|
}
|
||||||
|
|
||||||
func TestStartSpec(t *testing.T) {
|
func TestStartSpec(t *testing.T) {
|
||||||
podStatus := &kubecontainer.PodStatus{
|
podStatus := &kubecontainer.PodStatus{
|
||||||
ContainerStatuses: []*kubecontainer.Status{
|
ContainerStatuses: []*kubecontainer.Status{
|
||||||
|
@ -845,7 +845,7 @@ var _ = SIGDescribe("[NodeAlphaFeature:SidecarContainers] Containers Lifecycle "
|
|||||||
framework.ExpectNoError(results.StartsBefore(restartableInit2, init3))
|
framework.ExpectNoError(results.StartsBefore(restartableInit2, init3))
|
||||||
})
|
})
|
||||||
|
|
||||||
ginkgo.It("should run both restartable init cotnainers and third init container together", func() {
|
ginkgo.It("should run both restartable init containers and third init container together", func() {
|
||||||
framework.ExpectNoError(results.RunTogether(restartableInit2, restartableInit1))
|
framework.ExpectNoError(results.RunTogether(restartableInit2, restartableInit1))
|
||||||
framework.ExpectNoError(results.RunTogether(restartableInit1, init3))
|
framework.ExpectNoError(results.RunTogether(restartableInit1, init3))
|
||||||
framework.ExpectNoError(results.RunTogether(restartableInit2, init3))
|
framework.ExpectNoError(results.RunTogether(restartableInit2, init3))
|
||||||
@ -856,7 +856,7 @@ var _ = SIGDescribe("[NodeAlphaFeature:SidecarContainers] Containers Lifecycle "
|
|||||||
framework.ExpectNoError(results.ExitsBefore(init3, regular1))
|
framework.ExpectNoError(results.ExitsBefore(init3, regular1))
|
||||||
})
|
})
|
||||||
|
|
||||||
ginkgo.It("should run both restartable init cotnainers and a regular container together", func() {
|
ginkgo.It("should run both restartable init containers and a regular container together", func() {
|
||||||
framework.ExpectNoError(results.RunTogether(restartableInit1, regular1))
|
framework.ExpectNoError(results.RunTogether(restartableInit1, regular1))
|
||||||
framework.ExpectNoError(results.RunTogether(restartableInit2, regular1))
|
framework.ExpectNoError(results.RunTogether(restartableInit2, regular1))
|
||||||
})
|
})
|
||||||
@ -1249,7 +1249,6 @@ var _ = SIGDescribe("[NodeAlphaFeature:SidecarContainers] Containers Lifecycle "
|
|||||||
ginkgo.It("should be running restartable init container and a failed Init container in parallel", func() {
|
ginkgo.It("should be running restartable init container and a failed Init container in parallel", func() {
|
||||||
framework.ExpectNoError(results.RunTogether(restartableInit1, init1))
|
framework.ExpectNoError(results.RunTogether(restartableInit1, init1))
|
||||||
})
|
})
|
||||||
// TODO: check preStop hooks when they are enabled
|
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
@ -1661,7 +1660,6 @@ var _ = SIGDescribe("[NodeAlphaFeature:SidecarContainers] Containers Lifecycle "
|
|||||||
ginkgo.It("should be running restartable init container and a failed Init container in parallel", func() {
|
ginkgo.It("should be running restartable init container and a failed Init container in parallel", func() {
|
||||||
framework.ExpectNoError(results.RunTogether(restartableInit1, init1))
|
framework.ExpectNoError(results.RunTogether(restartableInit1, init1))
|
||||||
})
|
})
|
||||||
// TODO: check preStop hooks when they are enabled
|
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
@ -2075,11 +2073,10 @@ var _ = SIGDescribe("[NodeAlphaFeature:SidecarContainers] Containers Lifecycle "
|
|||||||
ginkgo.It("should be running restartable init container and a failed Init container in parallel", func() {
|
ginkgo.It("should be running restartable init container and a failed Init container in parallel", func() {
|
||||||
framework.ExpectNoError(results.RunTogether(restartableInit1, init1))
|
framework.ExpectNoError(results.RunTogether(restartableInit1, init1))
|
||||||
})
|
})
|
||||||
// TODO: check preStop hooks when they are enabled
|
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
ginkgo.It("should launch restartable init cotnainers serially considering the startup probe", func() {
|
ginkgo.It("should launch restartable init containers serially considering the startup probe", func() {
|
||||||
|
|
||||||
restartableInit1 := "restartable-init-1"
|
restartableInit1 := "restartable-init-1"
|
||||||
restartableInit2 := "restartable-init-2"
|
restartableInit2 := "restartable-init-2"
|
||||||
@ -2158,7 +2155,7 @@ var _ = SIGDescribe("[NodeAlphaFeature:SidecarContainers] Containers Lifecycle "
|
|||||||
framework.ExpectNoError(results.StartsBefore(restartableInit2, regular1))
|
framework.ExpectNoError(results.StartsBefore(restartableInit2, regular1))
|
||||||
})
|
})
|
||||||
|
|
||||||
ginkgo.It("should not launch next container if the restartable init cotnainer failed to complete startup probe", func() {
|
ginkgo.It("should call the container's preStop hook and not launch next container if the restartable init container's startup probe fails", func() {
|
||||||
|
|
||||||
restartableInit1 := "restartable-init-1"
|
restartableInit1 := "restartable-init-1"
|
||||||
regular1 := "regular-1"
|
regular1 := "regular-1"
|
||||||
@ -2174,16 +2171,30 @@ var _ = SIGDescribe("[NodeAlphaFeature:SidecarContainers] Containers Lifecycle "
|
|||||||
Name: restartableInit1,
|
Name: restartableInit1,
|
||||||
Image: busyboxImage,
|
Image: busyboxImage,
|
||||||
Command: ExecCommand(restartableInit1, execCommand{
|
Command: ExecCommand(restartableInit1, execCommand{
|
||||||
StartDelay: 30,
|
Delay: 600,
|
||||||
Delay: 600,
|
TerminationSeconds: 15,
|
||||||
ExitCode: 0,
|
ExitCode: 0,
|
||||||
}),
|
}),
|
||||||
StartupProbe: &v1.Probe{
|
StartupProbe: &v1.Probe{
|
||||||
PeriodSeconds: 1,
|
InitialDelaySeconds: 5,
|
||||||
FailureThreshold: 1,
|
FailureThreshold: 1,
|
||||||
ProbeHandler: v1.ProbeHandler{
|
ProbeHandler: v1.ProbeHandler{
|
||||||
Exec: &v1.ExecAction{
|
Exec: &v1.ExecAction{
|
||||||
Command: []string{"test", "-f", "started"},
|
Command: []string{
|
||||||
|
"sh",
|
||||||
|
"-c",
|
||||||
|
"exit 1",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Lifecycle: &v1.Lifecycle{
|
||||||
|
PreStop: &v1.LifecycleHandler{
|
||||||
|
Exec: &v1.ExecAction{
|
||||||
|
Command: ExecCommand(prefixedName(PreStopPrefix, restartableInit1), execCommand{
|
||||||
|
Delay: 1,
|
||||||
|
ExitCode: 0,
|
||||||
|
}),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@ -2208,7 +2219,7 @@ var _ = SIGDescribe("[NodeAlphaFeature:SidecarContainers] Containers Lifecycle "
|
|||||||
client := e2epod.NewPodClient(f)
|
client := e2epod.NewPodClient(f)
|
||||||
pod = client.Create(context.TODO(), pod)
|
pod = client.Create(context.TODO(), pod)
|
||||||
|
|
||||||
ginkgo.By("Waiting for the restartable init cotnainer to restart")
|
ginkgo.By("Waiting for the restartable init container to restart")
|
||||||
err := WaitForPodInitContainerRestartCount(context.TODO(), f.ClientSet, pod.Namespace, pod.Name, 0, 2, 2*time.Minute)
|
err := WaitForPodInitContainerRestartCount(context.TODO(), f.ClientSet, pod.Namespace, pod.Name, 0, 2, 2*time.Minute)
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
|
|
||||||
@ -2222,6 +2233,92 @@ var _ = SIGDescribe("[NodeAlphaFeature:SidecarContainers] Containers Lifecycle "
|
|||||||
results := parseOutput(pod)
|
results := parseOutput(pod)
|
||||||
|
|
||||||
ginkgo.By("Analyzing results")
|
ginkgo.By("Analyzing results")
|
||||||
|
framework.ExpectNoError(results.RunTogether(restartableInit1, prefixedName(PreStopPrefix, restartableInit1)))
|
||||||
|
framework.ExpectNoError(results.Starts(prefixedName(PreStopPrefix, restartableInit1)))
|
||||||
|
framework.ExpectNoError(results.Exits(restartableInit1))
|
||||||
framework.ExpectNoError(results.DoesntStart(regular1))
|
framework.ExpectNoError(results.DoesntStart(regular1))
|
||||||
})
|
})
|
||||||
|
|
||||||
|
ginkgo.It("should call the container's preStop hook and start the next container if the restartable init container's liveness probe fails", func() {
|
||||||
|
|
||||||
|
restartableInit1 := "restartable-init-1"
|
||||||
|
regular1 := "regular-1"
|
||||||
|
|
||||||
|
pod := &v1.Pod{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Name: "restartable-init-container-failed-startup",
|
||||||
|
},
|
||||||
|
Spec: v1.PodSpec{
|
||||||
|
RestartPolicy: v1.RestartPolicyAlways,
|
||||||
|
InitContainers: []v1.Container{
|
||||||
|
{
|
||||||
|
Name: restartableInit1,
|
||||||
|
Image: busyboxImage,
|
||||||
|
Command: ExecCommand(restartableInit1, execCommand{
|
||||||
|
Delay: 600,
|
||||||
|
TerminationSeconds: 15,
|
||||||
|
ExitCode: 0,
|
||||||
|
}),
|
||||||
|
LivenessProbe: &v1.Probe{
|
||||||
|
InitialDelaySeconds: 5,
|
||||||
|
FailureThreshold: 1,
|
||||||
|
ProbeHandler: v1.ProbeHandler{
|
||||||
|
Exec: &v1.ExecAction{
|
||||||
|
Command: []string{
|
||||||
|
"sh",
|
||||||
|
"-c",
|
||||||
|
"exit 1",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Lifecycle: &v1.Lifecycle{
|
||||||
|
PreStop: &v1.LifecycleHandler{
|
||||||
|
Exec: &v1.ExecAction{
|
||||||
|
Command: ExecCommand(prefixedName(PreStopPrefix, restartableInit1), execCommand{
|
||||||
|
Delay: 1,
|
||||||
|
ExitCode: 0,
|
||||||
|
}),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
RestartPolicy: &containerRestartPolicyAlways,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Containers: []v1.Container{
|
||||||
|
{
|
||||||
|
Name: regular1,
|
||||||
|
Image: busyboxImage,
|
||||||
|
Command: ExecCommand(regular1, execCommand{
|
||||||
|
Delay: 1,
|
||||||
|
ExitCode: 0,
|
||||||
|
}),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
preparePod(pod)
|
||||||
|
|
||||||
|
client := e2epod.NewPodClient(f)
|
||||||
|
pod = client.Create(context.TODO(), pod)
|
||||||
|
|
||||||
|
ginkgo.By("Waiting for the restartable init container to restart")
|
||||||
|
err := WaitForPodInitContainerRestartCount(context.TODO(), f.ClientSet, pod.Namespace, pod.Name, 0, 2, 2*time.Minute)
|
||||||
|
framework.ExpectNoError(err)
|
||||||
|
|
||||||
|
err = WaitForPodContainerRestartCount(context.TODO(), f.ClientSet, pod.Namespace, pod.Name, 0, 1, 2*time.Minute)
|
||||||
|
framework.ExpectNoError(err)
|
||||||
|
|
||||||
|
pod, err = client.Get(context.TODO(), pod.Name, metav1.GetOptions{})
|
||||||
|
framework.ExpectNoError(err)
|
||||||
|
|
||||||
|
results := parseOutput(pod)
|
||||||
|
|
||||||
|
ginkgo.By("Analyzing results")
|
||||||
|
framework.ExpectNoError(results.RunTogether(restartableInit1, prefixedName(PreStopPrefix, restartableInit1)))
|
||||||
|
framework.ExpectNoError(results.Starts(prefixedName(PreStopPrefix, restartableInit1)))
|
||||||
|
framework.ExpectNoError(results.Exits(restartableInit1))
|
||||||
|
framework.ExpectNoError(results.Starts(regular1))
|
||||||
|
})
|
||||||
})
|
})
|
||||||
|
Loading…
Reference in New Issue
Block a user