diff --git a/test/e2e/framework/pod/create.go b/test/e2e/framework/pod/create.go index 2d3e9bf2e44..d711fcab4f0 100644 --- a/test/e2e/framework/pod/create.go +++ b/test/e2e/framework/pod/create.go @@ -33,6 +33,21 @@ var ( BusyBoxImage = imageutils.GetE2EImage(imageutils.BusyBox) ) +// Config is a struct containing all arguments for creating a pod. +// SELinux testing requires to pass HostIPC and HostPID as boolean arguments. +type Config struct { + NS string + PVCs []*v1.PersistentVolumeClaim + InlineVolumeSources []*v1.VolumeSource + IsPrivileged bool + Command string + HostIPC bool + HostPID bool + SeLinuxLabel *v1.SELinuxOptions + FsGroup *int64 + NodeSelection NodeSelection +} + // CreateUnschedulablePod with given claims based on node selector func CreateUnschedulablePod(client clientset.Interface, namespace string, nodeSelector map[string]string, pvclaims []*v1.PersistentVolumeClaim, isPrivileged bool, command string) (*v1.Pod, error) { pod := MakePod(namespace, nodeSelector, pvclaims, isPrivileged, command) @@ -79,27 +94,29 @@ func CreatePod(client clientset.Interface, namespace string, nodeSelector map[st } // CreateSecPod creates security pod with given claims -func CreateSecPod(client clientset.Interface, namespace string, pvclaims []*v1.PersistentVolumeClaim, inlineVolumeSources []*v1.VolumeSource, isPrivileged bool, command string, hostIPC bool, hostPID bool, seLinuxLabel *v1.SELinuxOptions, fsGroup *int64, timeout time.Duration) (*v1.Pod, error) { - return CreateSecPodWithNodeSelection(client, namespace, pvclaims, inlineVolumeSources, isPrivileged, command, hostIPC, hostPID, seLinuxLabel, fsGroup, NodeSelection{}, timeout) +func CreateSecPod(client clientset.Interface, podConfig *Config, timeout time.Duration) (*v1.Pod, error) { + return CreateSecPodWithNodeSelection(client, podConfig, timeout) } // CreateSecPodWithNodeSelection creates security pod with given claims -func CreateSecPodWithNodeSelection(client clientset.Interface, namespace string, pvclaims []*v1.PersistentVolumeClaim, inlineVolumeSources []*v1.VolumeSource, isPrivileged bool, command string, hostIPC bool, hostPID bool, seLinuxLabel *v1.SELinuxOptions, fsGroup *int64, node NodeSelection, timeout time.Duration) (*v1.Pod, error) { - pod := MakeSecPod(namespace, pvclaims, inlineVolumeSources, isPrivileged, command, hostIPC, hostPID, seLinuxLabel, fsGroup) - SetNodeSelection(&pod.Spec, node) +func CreateSecPodWithNodeSelection(client clientset.Interface, podConfig *Config, timeout time.Duration) (*v1.Pod, error) { + pod, err := MakeSecPod(podConfig) + if err != nil { + return nil, fmt.Errorf("Unable to create pod: %v", err) + } - pod, err := client.CoreV1().Pods(namespace).Create(context.TODO(), pod, metav1.CreateOptions{}) + pod, err = client.CoreV1().Pods(podConfig.NS).Create(context.TODO(), pod, metav1.CreateOptions{}) if err != nil { return nil, fmt.Errorf("pod Create API error: %v", err) } // Waiting for pod to be running - err = WaitTimeoutForPodRunningInNamespace(client, pod.Name, namespace, timeout) + err = WaitTimeoutForPodRunningInNamespace(client, pod.Name, podConfig.NS, timeout) if err != nil { return pod, fmt.Errorf("pod %q is not Running: %v", pod.Name, err) } // get fresh pod info - pod, err = client.CoreV1().Pods(namespace).Get(context.TODO(), pod.Name, metav1.GetOptions{}) + pod, err = client.CoreV1().Pods(podConfig.NS).Get(context.TODO(), pod.Name, metav1.GetOptions{}) if err != nil { return pod, fmt.Errorf("pod Get API error: %v", err) } @@ -153,14 +170,16 @@ func MakePod(ns string, nodeSelector map[string]string, pvclaims []*v1.Persisten // MakeSecPod returns a pod definition based on the namespace. The pod references the PVC's // name. A slice of BASH commands can be supplied as args to be run by the pod. -// SELinux testing requires to pass HostIPC and HostPID as booleansi arguments. -func MakeSecPod(ns string, pvclaims []*v1.PersistentVolumeClaim, inlineVolumeSources []*v1.VolumeSource, isPrivileged bool, command string, hostIPC bool, hostPID bool, seLinuxLabel *v1.SELinuxOptions, fsGroup *int64) *v1.Pod { - if len(command) == 0 { - command = "trap exit TERM; while true; do sleep 1; done" +func MakeSecPod(podConfig *Config) (*v1.Pod, error) { + if podConfig.NS == "" { + return nil, fmt.Errorf("Cannot create pod with empty namespace") + } + if len(podConfig.Command) == 0 { + podConfig.Command = "trap exit TERM; while true; do sleep 1; done" } podName := "security-context-" + string(uuid.NewUUID()) - if fsGroup == nil { - fsGroup = func(i int64) *int64 { + if podConfig.FsGroup == nil { + podConfig.FsGroup = func(i int64) *int64 { return &i }(1000) } @@ -171,22 +190,22 @@ func MakeSecPod(ns string, pvclaims []*v1.PersistentVolumeClaim, inlineVolumeSou }, ObjectMeta: metav1.ObjectMeta{ Name: podName, - Namespace: ns, + Namespace: podConfig.NS, }, Spec: v1.PodSpec{ - HostIPC: hostIPC, - HostPID: hostPID, + HostIPC: podConfig.HostIPC, + HostPID: podConfig.HostPID, SecurityContext: &v1.PodSecurityContext{ - FSGroup: fsGroup, + FSGroup: podConfig.FsGroup, }, Containers: []v1.Container{ { Name: "write-pod", Image: imageutils.GetE2EImage(imageutils.BusyBox), Command: []string{"/bin/sh"}, - Args: []string{"-c", command}, + Args: []string{"-c", podConfig.Command}, SecurityContext: &v1.SecurityContext{ - Privileged: &isPrivileged, + Privileged: &podConfig.IsPrivileged, }, }, }, @@ -195,9 +214,9 @@ func MakeSecPod(ns string, pvclaims []*v1.PersistentVolumeClaim, inlineVolumeSou } var volumeMounts = make([]v1.VolumeMount, 0) var volumeDevices = make([]v1.VolumeDevice, 0) - var volumes = make([]v1.Volume, len(pvclaims)+len(inlineVolumeSources)) + var volumes = make([]v1.Volume, len(podConfig.PVCs)+len(podConfig.InlineVolumeSources)) volumeIndex := 0 - for _, pvclaim := range pvclaims { + for _, pvclaim := range podConfig.PVCs { volumename := fmt.Sprintf("volume%v", volumeIndex+1) if pvclaim.Spec.VolumeMode != nil && *pvclaim.Spec.VolumeMode == v1.PersistentVolumeBlock { volumeDevices = append(volumeDevices, v1.VolumeDevice{Name: volumename, DevicePath: "/mnt/" + volumename}) @@ -208,7 +227,7 @@ func MakeSecPod(ns string, pvclaims []*v1.PersistentVolumeClaim, inlineVolumeSou volumes[volumeIndex] = v1.Volume{Name: volumename, VolumeSource: v1.VolumeSource{PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ClaimName: pvclaim.Name, ReadOnly: false}}} volumeIndex++ } - for _, src := range inlineVolumeSources { + for _, src := range podConfig.InlineVolumeSources { volumename := fmt.Sprintf("volume%v", volumeIndex+1) // In-line volumes can be only filesystem, not block. volumeMounts = append(volumeMounts, v1.VolumeMount{Name: volumename, MountPath: "/mnt/" + volumename}) @@ -219,6 +238,8 @@ func MakeSecPod(ns string, pvclaims []*v1.PersistentVolumeClaim, inlineVolumeSou podSpec.Spec.Containers[0].VolumeMounts = volumeMounts podSpec.Spec.Containers[0].VolumeDevices = volumeDevices podSpec.Spec.Volumes = volumes - podSpec.Spec.SecurityContext.SELinuxOptions = seLinuxLabel - return podSpec + podSpec.Spec.SecurityContext.SELinuxOptions = podConfig.SeLinuxLabel + + SetNodeSelection(&podSpec.Spec, podConfig.NodeSelection) + return podSpec, nil } diff --git a/test/e2e/storage/generic_persistent_volume-disruptive.go b/test/e2e/storage/generic_persistent_volume-disruptive.go index dde4b384909..13b10c22d75 100644 --- a/test/e2e/storage/generic_persistent_volume-disruptive.go +++ b/test/e2e/storage/generic_persistent_volume-disruptive.go @@ -18,6 +18,7 @@ package storage import ( "context" + "github.com/onsi/ginkgo" v1 "k8s.io/api/core/v1" @@ -106,9 +107,12 @@ func createPodPVCFromSC(f *framework.Framework, c clientset.Interface, ns string framework.ExpectEqual(len(pvs), 1) ginkgo.By("Creating a pod with dynamically provisioned volume") - pod, err := e2epod.CreateSecPod(c, ns, pvcClaims, nil, - false, "", false, false, e2epv.SELinuxLabel, - nil, framework.PodStartTimeout) + podConfig := e2epod.Config{ + NS: ns, + PVCs: pvcClaims, + SeLinuxLabel: e2epv.SELinuxLabel, + } + pod, err := e2epod.CreateSecPod(c, &podConfig, framework.PodStartTimeout) framework.ExpectNoError(err, "While creating pods for kubelet restart test") return pod, pvc, pvs[0] } diff --git a/test/e2e/storage/persistent_volumes-local.go b/test/e2e/storage/persistent_volumes-local.go index ca2d06c5b49..14773e8d4f8 100644 --- a/test/e2e/storage/persistent_volumes-local.go +++ b/test/e2e/storage/persistent_volumes-local.go @@ -550,9 +550,15 @@ var _ = utils.SIGDescribe("PersistentVolumes-local ", func() { framework.ExpectNoError(err) pvcs = append(pvcs, pvc) } - - pod := e2epod.MakeSecPod(config.ns, pvcs, nil, false, "sleep 1", false, false, selinuxLabel, nil) - pod, err := config.client.CoreV1().Pods(config.ns).Create(context.TODO(), pod, metav1.CreateOptions{}) + podConfig := e2epod.Config{ + NS: config.ns, + PVCs: pvcs, + Command: "sleep 1", + SeLinuxLabel: selinuxLabel, + } + pod, err := e2epod.MakeSecPod(&podConfig) + framework.ExpectNoError(err) + pod, err = config.client.CoreV1().Pods(config.ns).Create(context.TODO(), pod, metav1.CreateOptions{}) framework.ExpectNoError(err) pods[pod.Name] = pod numCreated++ @@ -644,9 +650,16 @@ var _ = utils.SIGDescribe("PersistentVolumes-local ", func() { pvc, err = e2epv.CreatePVC(config.client, config.ns, pvc) framework.ExpectNoError(err) ginkgo.By(fmt.Sprintf("Create %d pods to use this PVC", count)) + podConfig := e2epod.Config{ + NS: config.ns, + PVCs: []*v1.PersistentVolumeClaim{pvc}, + SeLinuxLabel: selinuxLabel, + } for i := 0; i < count; i++ { - pod := e2epod.MakeSecPod(config.ns, []*v1.PersistentVolumeClaim{pvc}, nil, false, "", false, false, selinuxLabel, nil) - pod, err := config.client.CoreV1().Pods(config.ns).Create(context.TODO(), pod, metav1.CreateOptions{}) + + pod, err := e2epod.MakeSecPod(&podConfig) + framework.ExpectNoError(err) + pod, err = config.client.CoreV1().Pods(config.ns).Create(context.TODO(), pod, metav1.CreateOptions{}) framework.ExpectNoError(err) pods[pod.Name] = pod } @@ -946,10 +959,6 @@ func createLocalPVCsPVs(config *localTestConfig, volumes []*localTestVolume, mod } func makeLocalPodWithNodeAffinity(config *localTestConfig, volume *localTestVolume, nodeName string) (pod *v1.Pod) { - pod = e2epod.MakeSecPod(config.ns, []*v1.PersistentVolumeClaim{volume.pvc}, nil, false, "", false, false, selinuxLabel, nil) - if pod == nil { - return - } affinity := &v1.Affinity{ NodeAffinity: &v1.NodeAffinity{ RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{ @@ -967,25 +976,45 @@ func makeLocalPodWithNodeAffinity(config *localTestConfig, volume *localTestVolu }, }, } + podConfig := e2epod.Config{ + NS: config.ns, + PVCs: []*v1.PersistentVolumeClaim{volume.pvc}, + SeLinuxLabel: selinuxLabel, + NodeSelection: e2epod.NodeSelection{Affinity: affinity}, + } + pod, err := e2epod.MakeSecPod(&podConfig) + if pod == nil || err != nil { + return + } pod.Spec.Affinity = affinity return } func makeLocalPodWithNodeSelector(config *localTestConfig, volume *localTestVolume, nodeName string) (pod *v1.Pod) { - pod = e2epod.MakeSecPod(config.ns, []*v1.PersistentVolumeClaim{volume.pvc}, nil, false, "", false, false, selinuxLabel, nil) - if pod == nil { - return - } ns := map[string]string{ "kubernetes.io/hostname": nodeName, } - pod.Spec.NodeSelector = ns + podConfig := e2epod.Config{ + NS: config.ns, + PVCs: []*v1.PersistentVolumeClaim{volume.pvc}, + SeLinuxLabel: selinuxLabel, + NodeSelection: e2epod.NodeSelection{Selector: ns}, + } + pod, err := e2epod.MakeSecPod(&podConfig) + if pod == nil || err != nil { + return + } return } func makeLocalPodWithNodeName(config *localTestConfig, volume *localTestVolume, nodeName string) (pod *v1.Pod) { - pod = e2epod.MakeSecPod(config.ns, []*v1.PersistentVolumeClaim{volume.pvc}, nil, false, "", false, false, selinuxLabel, nil) - if pod == nil { + podConfig := e2epod.Config{ + NS: config.ns, + PVCs: []*v1.PersistentVolumeClaim{volume.pvc}, + SeLinuxLabel: selinuxLabel, + } + pod, err := e2epod.MakeSecPod(&podConfig) + if pod == nil || err != nil { return } @@ -995,7 +1024,13 @@ func makeLocalPodWithNodeName(config *localTestConfig, volume *localTestVolume, func createLocalPod(config *localTestConfig, volume *localTestVolume, fsGroup *int64) (*v1.Pod, error) { ginkgo.By("Creating a pod") - return e2epod.CreateSecPod(config.client, config.ns, []*v1.PersistentVolumeClaim{volume.pvc}, nil, false, "", false, false, selinuxLabel, fsGroup, framework.PodStartShortTimeout) + podConfig := e2epod.Config{ + NS: config.ns, + PVCs: []*v1.PersistentVolumeClaim{volume.pvc}, + SeLinuxLabel: selinuxLabel, + FsGroup: fsGroup, + } + return e2epod.CreateSecPod(config.client, &podConfig, framework.PodStartShortTimeout) } func createWriteCmd(testDir string, testFile string, writeTestFileContent string, volumeType localVolumeType) string { diff --git a/test/e2e/storage/testsuites/disruptive.go b/test/e2e/storage/testsuites/disruptive.go index 7e564031bb7..bc259d2d93f 100644 --- a/test/e2e/storage/testsuites/disruptive.go +++ b/test/e2e/storage/testsuites/disruptive.go @@ -160,7 +160,14 @@ func (s *disruptiveTestSuite) DefineTests(driver TestDriver, pattern testpattern pvcs = append(pvcs, l.resource.Pvc) } ginkgo.By("Creating a pod with pvc") - l.pod, err = e2epod.CreateSecPodWithNodeSelection(l.cs, l.ns.Name, pvcs, inlineSources, false, "", false, false, e2epv.SELinuxLabel, nil, l.config.ClientNodeSelection, framework.PodStartTimeout) + podConfig := e2epod.Config{ + NS: l.ns.Name, + PVCs: pvcs, + InlineVolumeSources: inlineSources, + SeLinuxLabel: e2epv.SELinuxLabel, + NodeSelection: l.config.ClientNodeSelection, + } + l.pod, err = e2epod.CreateSecPodWithNodeSelection(l.cs, &podConfig, framework.PodStartTimeout) framework.ExpectNoError(err, "While creating pods for kubelet restart test") if pattern.VolMode == v1.PersistentVolumeBlock && t.runTestBlock != nil { diff --git a/test/e2e/storage/testsuites/multivolume.go b/test/e2e/storage/testsuites/multivolume.go index 36dfb1ef265..d167c82e737 100644 --- a/test/e2e/storage/testsuites/multivolume.go +++ b/test/e2e/storage/testsuites/multivolume.go @@ -374,9 +374,13 @@ func (t *multiVolumeTestSuite) DefineTests(driver TestDriver, pattern testpatter func testAccessMultipleVolumes(f *framework.Framework, cs clientset.Interface, ns string, node e2epod.NodeSelection, pvcs []*v1.PersistentVolumeClaim, readSeedBase int64, writeSeedBase int64) string { ginkgo.By(fmt.Sprintf("Creating pod on %+v with multiple volumes", node)) - pod, err := e2epod.CreateSecPodWithNodeSelection(cs, ns, pvcs, nil, - false, "", false, false, e2epv.SELinuxLabel, - nil, node, framework.PodStartTimeout) + podConfig := e2epod.Config{ + NS: ns, + PVCs: pvcs, + SeLinuxLabel: e2epv.SELinuxLabel, + NodeSelection: node, + } + pod, err := e2epod.CreateSecPodWithNodeSelection(cs, &podConfig, framework.PodStartTimeout) defer func() { framework.ExpectNoError(e2epod.DeletePodWithWait(cs, pod)) }() @@ -447,10 +451,13 @@ func TestConcurrentAccessToSingleVolume(f *framework.Framework, cs clientset.Int for i := 0; i < numPods; i++ { index := i + 1 ginkgo.By(fmt.Sprintf("Creating pod%d with a volume on %+v", index, node)) - pod, err := e2epod.CreateSecPodWithNodeSelection(cs, ns, - []*v1.PersistentVolumeClaim{pvc}, nil, - false, "", false, false, e2epv.SELinuxLabel, - nil, node, framework.PodStartTimeout) + podConfig := e2epod.Config{ + NS: ns, + PVCs: []*v1.PersistentVolumeClaim{pvc}, + SeLinuxLabel: e2epv.SELinuxLabel, + NodeSelection: node, + } + pod, err := e2epod.CreateSecPodWithNodeSelection(cs, &podConfig, framework.PodStartTimeout) defer func() { framework.ExpectNoError(e2epod.DeletePodWithWait(cs, pod)) }() diff --git a/test/e2e/storage/testsuites/topology.go b/test/e2e/storage/testsuites/topology.go index 26dad5c1428..7195876dc5a 100644 --- a/test/e2e/storage/testsuites/topology.go +++ b/test/e2e/storage/testsuites/topology.go @@ -333,16 +333,14 @@ func (t *topologyTestSuite) createResources(cs clientset.Interface, l *topologyT framework.ExpectNoError(err) ginkgo.By("Creating pod") - l.pod = e2epod.MakeSecPod(l.config.Framework.Namespace.Name, - []*v1.PersistentVolumeClaim{l.resource.Pvc}, - nil, - false, - "", - false, - false, - e2epv.SELinuxLabel, - nil) - l.pod.Spec.Affinity = affinity + podConfig := e2epod.Config{ + NS: l.config.Framework.Namespace.Name, + PVCs: []*v1.PersistentVolumeClaim{l.resource.Pvc}, + SeLinuxLabel: e2epv.SELinuxLabel, + NodeSelection: e2epod.NodeSelection{Affinity: affinity}, + } + l.pod, err = e2epod.MakeSecPod(&podConfig) + framework.ExpectNoError(err) l.pod, err = cs.CoreV1().Pods(l.pod.Namespace).Create(context.TODO(), l.pod, metav1.CreateOptions{}) framework.ExpectNoError(err) } diff --git a/test/e2e/storage/testsuites/volume_expand.go b/test/e2e/storage/testsuites/volume_expand.go index 7ce2511043e..2fb9a496bdc 100644 --- a/test/e2e/storage/testsuites/volume_expand.go +++ b/test/e2e/storage/testsuites/volume_expand.go @@ -166,7 +166,13 @@ func (v *volumeExpandTestSuite) DefineTests(driver TestDriver, pattern testpatte var err error ginkgo.By("Creating a pod with dynamically provisioned volume") - l.pod, err = e2epod.CreateSecPodWithNodeSelection(f.ClientSet, f.Namespace.Name, []*v1.PersistentVolumeClaim{l.resource.Pvc}, nil, false, "", false, false, e2epv.SELinuxLabel, nil, l.config.ClientNodeSelection, framework.PodStartTimeout) + podConfig := e2epod.Config{ + NS: f.Namespace.Name, + PVCs: []*v1.PersistentVolumeClaim{l.resource.Pvc}, + SeLinuxLabel: e2epv.SELinuxLabel, + NodeSelection: l.config.ClientNodeSelection, + } + l.pod, err = e2epod.CreateSecPodWithNodeSelection(f.ClientSet, &podConfig, framework.PodStartTimeout) defer func() { err = e2epod.DeletePodWithWait(f.ClientSet, l.pod) framework.ExpectNoError(err, "while cleaning up pod already deleted in resize test") @@ -203,7 +209,13 @@ func (v *volumeExpandTestSuite) DefineTests(driver TestDriver, pattern testpatte l.resource.Pvc = npvc ginkgo.By("Creating a new pod with same volume") - l.pod2, err = e2epod.CreateSecPodWithNodeSelection(f.ClientSet, f.Namespace.Name, []*v1.PersistentVolumeClaim{l.resource.Pvc}, nil, false, "", false, false, e2epv.SELinuxLabel, nil, l.config.ClientNodeSelection, framework.PodStartTimeout) + podConfig = e2epod.Config{ + NS: f.Namespace.Name, + PVCs: []*v1.PersistentVolumeClaim{l.resource.Pvc}, + SeLinuxLabel: e2epv.SELinuxLabel, + NodeSelection: l.config.ClientNodeSelection, + } + l.pod2, err = e2epod.CreateSecPodWithNodeSelection(f.ClientSet, &podConfig, framework.PodStartTimeout) defer func() { err = e2epod.DeletePodWithWait(f.ClientSet, l.pod2) framework.ExpectNoError(err, "while cleaning up pod before exiting resizing test") @@ -224,7 +236,13 @@ func (v *volumeExpandTestSuite) DefineTests(driver TestDriver, pattern testpatte var err error ginkgo.By("Creating a pod with dynamically provisioned volume") - l.pod, err = e2epod.CreateSecPodWithNodeSelection(f.ClientSet, f.Namespace.Name, []*v1.PersistentVolumeClaim{l.resource.Pvc}, nil, false, "", false, false, e2epv.SELinuxLabel, nil, l.config.ClientNodeSelection, framework.PodStartTimeout) + podConfig := e2epod.Config{ + NS: f.Namespace.Name, + PVCs: []*v1.PersistentVolumeClaim{l.resource.Pvc}, + SeLinuxLabel: e2epv.SELinuxLabel, + NodeSelection: l.config.ClientNodeSelection, + } + l.pod, err = e2epod.CreateSecPodWithNodeSelection(f.ClientSet, &podConfig, framework.PodStartTimeout) defer func() { err = e2epod.DeletePodWithWait(f.ClientSet, l.pod) framework.ExpectNoError(err, "while cleaning up pod already deleted in resize test") diff --git a/test/e2e/storage/testsuites/volumelimits.go b/test/e2e/storage/testsuites/volumelimits.go index 438ce54d592..c22739db919 100644 --- a/test/e2e/storage/testsuites/volumelimits.go +++ b/test/e2e/storage/testsuites/volumelimits.go @@ -170,11 +170,15 @@ func (t *volumeLimitsTestSuite) DefineTests(driver TestDriver, pattern testpatte } ginkgo.By("Creating pod to use all PVC(s)") - pod := e2epod.MakeSecPod(l.ns.Name, l.pvcs, nil, false, "", false, false, e2epv.SELinuxLabel, nil) - // Use affinity to schedule everything on the right node - selection := e2epod.NodeSelection{} - e2epod.SetAffinity(&selection, nodeName) - pod.Spec.Affinity = selection.Affinity + selection := e2epod.NodeSelection{Name: nodeName} + podConfig := e2epod.Config{ + NS: l.ns.Name, + PVCs: l.pvcs, + SeLinuxLabel: e2epv.SELinuxLabel, + NodeSelection: selection, + } + pod, err := e2epod.MakeSecPod(&podConfig) + framework.ExpectNoError(err) l.runningPod, err = l.cs.CoreV1().Pods(l.ns.Name).Create(context.TODO(), pod, metav1.CreateOptions{}) framework.ExpectNoError(err) @@ -187,10 +191,14 @@ func (t *volumeLimitsTestSuite) DefineTests(driver TestDriver, pattern testpatte framework.ExpectNoError(err) ginkgo.By("Creating an extra pod with one volume to exceed the limit") - pod = e2epod.MakeSecPod(l.ns.Name, []*v1.PersistentVolumeClaim{l.resource.Pvc}, nil, false, "", false, false, e2epv.SELinuxLabel, nil) - // Use affinity to schedule everything on the right node - e2epod.SetAffinity(&selection, nodeName) - pod.Spec.Affinity = selection.Affinity + podConfig = e2epod.Config{ + NS: l.ns.Name, + PVCs: []*v1.PersistentVolumeClaim{l.resource.Pvc}, + SeLinuxLabel: e2epv.SELinuxLabel, + NodeSelection: selection, + } + pod, err = e2epod.MakeSecPod(&podConfig) + framework.ExpectNoError(err) l.unschedulablePod, err = l.cs.CoreV1().Pods(l.ns.Name).Create(context.TODO(), pod, metav1.CreateOptions{}) framework.ExpectNoError(err, "Failed to create an extra pod with one volume to exceed the limit") diff --git a/test/e2e/storage/testsuites/volumemode.go b/test/e2e/storage/testsuites/volumemode.go index eea12422f8b..c035878f7ef 100644 --- a/test/e2e/storage/testsuites/volumemode.go +++ b/test/e2e/storage/testsuites/volumemode.go @@ -213,9 +213,15 @@ func (t *volumeModeTestSuite) DefineTests(driver TestDriver, pattern testpattern framework.ExpectNoError(e2epv.WaitOnPVandPVC(l.cs, l.ns.Name, l.Pv, l.Pvc), "Failed to bind pv and pvc") ginkgo.By("Creating pod") - pod := e2epod.MakeSecPod(l.ns.Name, []*v1.PersistentVolumeClaim{l.Pvc}, nil, false, "", false, false, e2epv.SELinuxLabel, nil) - // Setting node - e2epod.SetNodeSelection(&pod.Spec, l.config.ClientNodeSelection) + podConfig := e2epod.Config{ + NS: l.ns.Name, + PVCs: []*v1.PersistentVolumeClaim{l.Pvc}, + SeLinuxLabel: e2epv.SELinuxLabel, + NodeSelection: l.config.ClientNodeSelection, + } + pod, err := e2epod.MakeSecPod(&podConfig) + framework.ExpectNoError(err, "Failed to create pod") + pod, err = l.cs.CoreV1().Pods(l.ns.Name).Create(context.TODO(), pod, metav1.CreateOptions{}) framework.ExpectNoError(err, "Failed to create pod") defer func() { @@ -292,7 +298,14 @@ func (t *volumeModeTestSuite) DefineTests(driver TestDriver, pattern testpattern ginkgo.By("Creating pod") var err error - pod := e2epod.MakeSecPod(l.ns.Name, []*v1.PersistentVolumeClaim{l.Pvc}, nil, false, "", false, false, e2epv.SELinuxLabel, nil) + podConfig := e2epod.Config{ + NS: l.ns.Name, + PVCs: []*v1.PersistentVolumeClaim{l.Pvc}, + SeLinuxLabel: e2epv.SELinuxLabel, + } + pod, err := e2epod.MakeSecPod(&podConfig) + framework.ExpectNoError(err) + // Change volumeMounts to volumeDevices and the other way around pod = swapVolumeMode(pod) @@ -341,7 +354,14 @@ func (t *volumeModeTestSuite) DefineTests(driver TestDriver, pattern testpattern ginkgo.By("Creating pod") var err error - pod := e2epod.MakeSecPod(l.ns.Name, []*v1.PersistentVolumeClaim{l.Pvc}, nil, false, "", false, false, e2epv.SELinuxLabel, nil) + podConfig := e2epod.Config{ + NS: l.ns.Name, + PVCs: []*v1.PersistentVolumeClaim{l.Pvc}, + SeLinuxLabel: e2epv.SELinuxLabel, + } + pod, err := e2epod.MakeSecPod(&podConfig) + framework.ExpectNoError(err) + for i := range pod.Spec.Containers { pod.Spec.Containers[i].VolumeDevices = nil pod.Spec.Containers[i].VolumeMounts = nil diff --git a/test/e2e/upgrades/storage/volume_mode.go b/test/e2e/upgrades/storage/volume_mode.go index edf85e8e724..c961f03f20e 100644 --- a/test/e2e/upgrades/storage/volume_mode.go +++ b/test/e2e/upgrades/storage/volume_mode.go @@ -21,7 +21,7 @@ import ( "fmt" "time" - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/version" "k8s.io/kubernetes/test/e2e/framework" @@ -95,7 +95,12 @@ func (t *VolumeModeDowngradeTest) Setup(f *framework.Framework) { framework.ExpectNoError(err) ginkgo.By("Consuming the PVC before downgrade") - t.pod, err = e2epod.CreateSecPod(cs, ns, []*v1.PersistentVolumeClaim{t.pvc}, nil, false, "", false, false, e2epv.SELinuxLabel, nil, framework.PodStartTimeout) + podConfig := e2epod.Config{ + NS: ns, + PVCs: []*v1.PersistentVolumeClaim{t.pvc}, + SeLinuxLabel: e2epv.SELinuxLabel, + } + t.pod, err = e2epod.CreateSecPod(cs, &podConfig, framework.PodStartTimeout) framework.ExpectNoError(err) ginkgo.By("Checking if PV exists as expected volume mode")