Add SELinux disruptive test

This commit is contained in:
Jan Safranek 2022-11-03 17:40:17 +01:00
parent cf912a2512
commit 802979c295
3 changed files with 140 additions and 15 deletions

View File

@ -18,7 +18,6 @@ package testsuites
import (
"github.com/onsi/ginkgo/v2"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/util/errors"
clientset "k8s.io/client-go/kubernetes"
@ -90,7 +89,7 @@ func (s *disruptiveTestSuite) DefineTests(driver storageframework.TestDriver, pa
f := framework.NewFrameworkWithCustomTimeouts("disruptive", storageframework.GetDriverTimeouts(driver))
f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged
init := func() {
init := func(accessModes []v1.PersistentVolumeAccessMode) {
l = local{}
l.ns = f.Namespace
l.cs = f.ClientSet
@ -99,7 +98,20 @@ func (s *disruptiveTestSuite) DefineTests(driver storageframework.TestDriver, pa
l.config = driver.PrepareTest(f)
testVolumeSizeRange := s.GetTestSuiteInfo().SupportedSizeRange
l.resource = storageframework.CreateVolumeResource(driver, l.config, pattern, testVolumeSizeRange)
if accessModes == nil {
l.resource = storageframework.CreateVolumeResource(
driver,
l.config,
pattern,
testVolumeSizeRange)
} else {
l.resource = storageframework.CreateVolumeResourceWithAccessModes(
driver,
l.config,
pattern,
testVolumeSizeRange,
accessModes)
}
}
cleanup := func() {
@ -120,13 +132,13 @@ func (s *disruptiveTestSuite) DefineTests(driver storageframework.TestDriver, pa
framework.ExpectNoError(errors.NewAggregate(errs), "while cleaning up resource")
}
type testBody func(c clientset.Interface, f *framework.Framework, clientPod *v1.Pod)
type disruptiveTest struct {
type singlePodTestBody func(c clientset.Interface, f *framework.Framework, clientPod *v1.Pod)
type singlePodTest struct {
testItStmt string
runTestFile testBody
runTestBlock testBody
runTestFile singlePodTestBody
runTestBlock singlePodTestBody
}
disruptiveTestTable := []disruptiveTest{
singlePodTests := []singlePodTest{
{
testItStmt: "Should test that pv written before kubelet restart is readable after restart.",
runTestFile: storageutils.TestKubeletRestartsAndRestoresMount,
@ -144,12 +156,12 @@ func (s *disruptiveTestSuite) DefineTests(driver storageframework.TestDriver, pa
},
}
for _, test := range disruptiveTestTable {
func(t disruptiveTest) {
for _, test := range singlePodTests {
func(t singlePodTest) {
if (pattern.VolMode == v1.PersistentVolumeBlock && t.runTestBlock != nil) ||
(pattern.VolMode == v1.PersistentVolumeFilesystem && t.runTestFile != nil) {
ginkgo.It(t.testItStmt, func() {
init()
init(nil)
defer cleanup()
var err error
@ -182,4 +194,80 @@ func (s *disruptiveTestSuite) DefineTests(driver storageframework.TestDriver, pa
}
}(test)
}
type multiplePodTestBody func(c clientset.Interface, f *framework.Framework, pod1, pod2 *v1.Pod)
type multiplePodTest struct {
testItStmt string
changeSELinuxContexts bool
runTestFile multiplePodTestBody
}
multiplePodTests := []multiplePodTest{
{
testItStmt: "Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod when kubelet returns.",
runTestFile: func(c clientset.Interface, f *framework.Framework, pod1, pod2 *v1.Pod) {
storageutils.TestVolumeUnmountsFromDeletedPodWithForceOption(c, f, pod1, false, false, pod2)
},
},
{
testItStmt: "Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod when kubelet returns.",
runTestFile: func(c clientset.Interface, f *framework.Framework, pod1, pod2 *v1.Pod) {
storageutils.TestVolumeUnmountsFromDeletedPodWithForceOption(c, f, pod1, true, false, pod2)
},
},
{
testItStmt: "Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinuxMountReadWriteOncePod].",
changeSELinuxContexts: true,
runTestFile: func(c clientset.Interface, f *framework.Framework, pod1, pod2 *v1.Pod) {
storageutils.TestVolumeUnmountsFromDeletedPodWithForceOption(c, f, pod1, false, false, pod2)
},
},
{
testItStmt: "Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinuxMountReadWriteOncePod].",
changeSELinuxContexts: true,
runTestFile: func(c clientset.Interface, f *framework.Framework, pod1, pod2 *v1.Pod) {
storageutils.TestVolumeUnmountsFromDeletedPodWithForceOption(c, f, pod1, true, false, pod2)
},
},
}
for _, test := range multiplePodTests {
func(t multiplePodTest) {
if pattern.VolMode == v1.PersistentVolumeFilesystem && t.runTestFile != nil {
ginkgo.It(t.testItStmt, func() {
init([]v1.PersistentVolumeAccessMode{v1.ReadWriteOncePod})
defer cleanup()
var err error
var pvcs []*v1.PersistentVolumeClaim
var inlineSources []*v1.VolumeSource
if pattern.VolType == storageframework.InlineVolume {
inlineSources = append(inlineSources, l.resource.VolSource)
} else {
pvcs = append(pvcs, l.resource.Pvc)
}
ginkgo.By("Creating a pod with pvc")
podConfig := e2epod.Config{
NS: l.ns.Name,
PVCs: pvcs,
InlineVolumeSources: inlineSources,
SeLinuxLabel: e2epv.SELinuxLabel,
NodeSelection: l.config.ClientNodeSelection,
ImageID: e2epod.GetDefaultTestImageID(),
}
l.pod, err = e2epod.CreateSecPodWithNodeSelection(l.cs, &podConfig, f.Timeouts.PodStart)
framework.ExpectNoError(err, "While creating pods for kubelet restart test")
if t.changeSELinuxContexts {
// Different than e2epv.SELinuxLabel
podConfig.SeLinuxLabel = &v1.SELinuxOptions{Level: "s0:c98,c99"}
}
pod2, err := e2epod.MakeSecPod(&podConfig)
// Instantly schedule the second pod on the same node as the first one.
pod2.Spec.NodeName = l.pod.Spec.NodeName
framework.ExpectNoError(err, "While creating second pod for kubelet restart test")
if pattern.VolMode == v1.PersistentVolumeFilesystem && t.runTestFile != nil {
t.runTestFile(l.cs, l.config.Framework, l.pod, pod2)
}
})
}
}(test)
}
}

View File

@ -1002,7 +1002,7 @@ func testSubpathReconstruction(f *framework.Framework, hostExec storageutils.Hos
}
framework.ExpectNotEqual(podNode, nil, "pod node should exist in schedulable nodes")
storageutils.TestVolumeUnmountsFromDeletedPodWithForceOption(f.ClientSet, f, pod, forceDelete, true)
storageutils.TestVolumeUnmountsFromDeletedPodWithForceOption(f.ClientSet, f, pod, forceDelete, true, nil)
if podNode != nil {
mountPoints := globalMountPointsByNode[podNode.Name]

View File

@ -133,7 +133,8 @@ func TestKubeletRestartsAndRestoresMap(c clientset.Interface, f *framework.Frame
// TestVolumeUnmountsFromDeletedPodWithForceOption tests that a volume unmounts if the client pod was deleted while the kubelet was down.
// forceDelete is true indicating whether the pod is forcefully deleted.
// checkSubpath is true indicating whether the subpath should be checked.
func TestVolumeUnmountsFromDeletedPodWithForceOption(c clientset.Interface, f *framework.Framework, clientPod *v1.Pod, forceDelete bool, checkSubpath bool) {
// If secondPod is set, it is started when kubelet is down to check that the volume is usable while the old pod is being deleted and the new pod is starting.
func TestVolumeUnmountsFromDeletedPodWithForceOption(c clientset.Interface, f *framework.Framework, clientPod *v1.Pod, forceDelete bool, checkSubpath bool, secondPod *v1.Pod) {
nodeIP, err := getHostAddress(c, clientPod)
framework.ExpectNoError(err)
nodeIP = nodeIP + ":22"
@ -152,6 +153,12 @@ func TestVolumeUnmountsFromDeletedPodWithForceOption(c clientset.Interface, f *f
framework.ExpectEqual(result.Code, 0, fmt.Sprintf("Expected grep exit code of 0, got %d", result.Code))
}
ginkgo.By("Writing to the volume.")
path := "/mnt/volume1"
byteLen := 64
seed := time.Now().UTC().UnixNano()
CheckWriteToPath(f, clientPod, v1.PersistentVolumeFilesystem, false, path, byteLen, seed)
// This command is to make sure kubelet is started after test finishes no matter it fails or not.
defer func() {
KubeletCommand(KStart, c, clientPod)
@ -159,6 +166,12 @@ func TestVolumeUnmountsFromDeletedPodWithForceOption(c clientset.Interface, f *f
ginkgo.By("Stopping the kubelet.")
KubeletCommand(KStop, c, clientPod)
if secondPod != nil {
ginkgo.By("Starting the second pod")
_, err = c.CoreV1().Pods(clientPod.Namespace).Create(context.TODO(), secondPod, metav1.CreateOptions{})
framework.ExpectNoError(err, "when starting the second pod")
}
ginkgo.By(fmt.Sprintf("Deleting Pod %q", clientPod.Name))
if forceDelete {
err = c.CoreV1().Pods(clientPod.Namespace).Delete(context.TODO(), clientPod.Name, *metav1.NewDeleteOptions(0))
@ -180,6 +193,29 @@ func TestVolumeUnmountsFromDeletedPodWithForceOption(c clientset.Interface, f *f
time.Sleep(30 * time.Second)
}
if secondPod != nil {
ginkgo.By("Waiting for the second pod.")
err = e2epod.WaitForPodRunningInNamespace(c, secondPod)
framework.ExpectNoError(err, "while waiting for the second pod Running")
ginkgo.By("Getting the second pod uuid.")
secondPod, err := c.CoreV1().Pods(secondPod.Namespace).Get(context.TODO(), secondPod.Name, metav1.GetOptions{})
framework.ExpectNoError(err, "getting the second UID")
ginkgo.By("Expecting the volume mount to be found in the second pod.")
result, err := e2essh.SSH(fmt.Sprintf("mount | grep %s | grep -v volume-subpaths", secondPod.UID), nodeIP, framework.TestContext.Provider)
e2essh.LogResult(result)
framework.ExpectNoError(err, "Encountered SSH error when checking the second pod.")
framework.ExpectEqual(result.Code, 0, fmt.Sprintf("Expected grep exit code of 0, got %d", result.Code))
ginkgo.By("Testing that written file is accessible in the second pod.")
CheckReadFromPath(f, secondPod, v1.PersistentVolumeFilesystem, false, path, byteLen, seed)
err = c.CoreV1().Pods(secondPod.Namespace).Delete(context.TODO(), secondPod.Name, metav1.DeleteOptions{})
framework.ExpectNoError(err, "when deleting the second pod")
err = e2epod.WaitForPodNotFoundInNamespace(f.ClientSet, secondPod.Name, f.Namespace.Name, f.Timeouts.PodDelete)
framework.ExpectNoError(err, "when waiting for the second pod to disappear")
}
ginkgo.By("Expecting the volume mount not to be found.")
result, err = e2essh.SSH(fmt.Sprintf("mount | grep %s | grep -v volume-subpaths", clientPod.UID), nodeIP, framework.TestContext.Provider)
e2essh.LogResult(result)
@ -195,16 +231,17 @@ func TestVolumeUnmountsFromDeletedPodWithForceOption(c clientset.Interface, f *f
gomega.Expect(result.Stdout).To(gomega.BeEmpty(), "Expected grep stdout to be empty (i.e. no subpath mount found).")
framework.Logf("Subpath volume unmounted on node %s", clientPod.Spec.NodeName)
}
}
// TestVolumeUnmountsFromDeletedPod tests that a volume unmounts if the client pod was deleted while the kubelet was down.
func TestVolumeUnmountsFromDeletedPod(c clientset.Interface, f *framework.Framework, clientPod *v1.Pod) {
TestVolumeUnmountsFromDeletedPodWithForceOption(c, f, clientPod, false, false)
TestVolumeUnmountsFromDeletedPodWithForceOption(c, f, clientPod, false, false, nil)
}
// TestVolumeUnmountsFromForceDeletedPod tests that a volume unmounts if the client pod was forcefully deleted while the kubelet was down.
func TestVolumeUnmountsFromForceDeletedPod(c clientset.Interface, f *framework.Framework, clientPod *v1.Pod) {
TestVolumeUnmountsFromDeletedPodWithForceOption(c, f, clientPod, true, false)
TestVolumeUnmountsFromDeletedPodWithForceOption(c, f, clientPod, true, false, nil)
}
// TestVolumeUnmapsFromDeletedPodWithForceOption tests that a volume unmaps if the client pod was deleted while the kubelet was down.