correct unsafe sysctls e2e test case & use status reason check instead of events watch

Signed-off-by: Paco Xu <paco.xu@daocloud.io>
This commit is contained in:
pacoxu
2021-04-16 18:01:44 +08:00
committed by Paco Xu
parent 355bc3df22
commit ddaa3466eb
2 changed files with 24 additions and 57 deletions

View File

@@ -114,56 +114,10 @@ var _ = SIGDescribe("Sysctls [LinuxOnly] [NodeConformance]", func() {
/*
Release: v1.21
Testname: Sysctl, allow specified unsafe sysctls
Description: Pod is created with kernel.shm_rmid_forced. Should allow unsafe sysctls that are specified.
Testname: Sysctls, reject invalid sysctls
Description: Pod is created with one valid and two invalid sysctls. Pod should not apply invalid sysctls.
[LinuxOnly]: This test is marked as LinuxOnly since Windows does not support sysctls
*/
ginkgo.It("should support unsafe sysctls which are actually allowed [MinimumKubeletVersion:1.21]", func() {
pod := testPod()
pod.Spec.SecurityContext = &v1.PodSecurityContext{
Sysctls: []v1.Sysctl{
{
Name: "kernel.shm_rmid_forced",
Value: "1",
},
},
}
pod.Spec.Containers[0].Command = []string{"/bin/sysctl", "kernel.shm_rmid_forced"}
ginkgo.By("Creating a pod with the kernel.shm_rmid_forced sysctl")
pod = podClient.Create(pod)
ginkgo.By("Watching for error events or started pod")
// watch for events instead of termination of pod because the kubelet deletes
// failed pods without running containers. This would create a race as the pod
// might have already been deleted here.
ev, err := f.PodClient().WaitForErrorEventOrSuccess(pod)
framework.ExpectNoError(err)
gomega.Expect(ev).To(gomega.BeNil())
ginkgo.By("Waiting for pod completion")
err = e2epod.WaitForPodNoLongerRunningInNamespace(f.ClientSet, pod.Name, f.Namespace.Name)
framework.ExpectNoError(err)
pod, err = podClient.Get(context.TODO(), pod.Name, metav1.GetOptions{})
framework.ExpectNoError(err)
ginkgo.By("Checking that the pod succeeded")
framework.ExpectEqual(pod.Status.Phase, v1.PodSucceeded)
ginkgo.By("Getting logs from the pod")
log, err := e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, pod.Spec.Containers[0].Name)
framework.ExpectNoError(err)
ginkgo.By("Checking that the sysctl is actually updated")
gomega.Expect(log).To(gomega.ContainSubstring("kernel.shm_rmid_forced = 1"))
})
/*
Release: v1.21
Testname: Sysctls, reject invalid sysctls
Description: Pod is created with one valid and two invalid sysctls. Pod should not apply invalid sysctls.
[LinuxOnly]: This test is marked as LinuxOnly since Windows does not support sysctls
*/
framework.ConformanceIt("should reject invalid sysctls [MinimumKubeletVersion:1.21]", func() {
pod := testPod()
pod.Spec.SecurityContext = &v1.PodSecurityContext{
@@ -199,6 +153,7 @@ var _ = SIGDescribe("Sysctls [LinuxOnly] [NodeConformance]", func() {
gomega.Expect(err.Error()).NotTo(gomega.ContainSubstring("kernel.shmmax"))
})
// Pod is created with kernel.msgmax, an unsafe sysctl.
ginkgo.It("should not launch unsafe, but not explicitly enabled sysctls on the node [MinimumKubeletVersion:1.21]", func() {
pod := testPod()
pod.Spec.SecurityContext = &v1.PodSecurityContext{
@@ -213,15 +168,9 @@ var _ = SIGDescribe("Sysctls [LinuxOnly] [NodeConformance]", func() {
ginkgo.By("Creating a pod with an ignorelisted, but not allowlisted sysctl on the node")
pod = podClient.Create(pod)
ginkgo.By("Watching for error events or started pod")
// watch for events instead of termination of pod because the kubelet deletes
// failed pods without running containers. This would create a race as the pod
// might have already been deleted here.
ev, err := f.PodClient().WaitForErrorEventOrSuccess(pod)
ginkgo.By("Wait for pod failed reason")
// watch for pod failed reason instead of termination of pod
err := e2epod.WaitForPodFailedReason(f.ClientSet, pod, "SysctlForbidden", f.Timeouts.PodStart)
framework.ExpectNoError(err)
ginkgo.By("Checking that the pod was rejected")
gomega.Expect(ev).ToNot(gomega.BeNil())
framework.ExpectEqual(ev.Reason, "SysctlForbidden")
})
})