Refactors MakeSecPods function

This commit is contained in:
Somtochi Onyekwere 2020-02-17 00:27:40 +01:00
parent 562a420d86
commit ee41c6b1a4
10 changed files with 205 additions and 82 deletions

View File

@ -33,6 +33,21 @@ var (
BusyBoxImage = imageutils.GetE2EImage(imageutils.BusyBox) BusyBoxImage = imageutils.GetE2EImage(imageutils.BusyBox)
) )
// Config is a struct containing all arguments for creating a pod.
// SELinux testing requires to pass HostIPC and HostPID as boolean arguments.
type Config struct {
NS string
PVCs []*v1.PersistentVolumeClaim
InlineVolumeSources []*v1.VolumeSource
IsPrivileged bool
Command string
HostIPC bool
HostPID bool
SeLinuxLabel *v1.SELinuxOptions
FsGroup *int64
NodeSelection NodeSelection
}
// CreateUnschedulablePod with given claims based on node selector // CreateUnschedulablePod with given claims based on node selector
func CreateUnschedulablePod(client clientset.Interface, namespace string, nodeSelector map[string]string, pvclaims []*v1.PersistentVolumeClaim, isPrivileged bool, command string) (*v1.Pod, error) { func CreateUnschedulablePod(client clientset.Interface, namespace string, nodeSelector map[string]string, pvclaims []*v1.PersistentVolumeClaim, isPrivileged bool, command string) (*v1.Pod, error) {
pod := MakePod(namespace, nodeSelector, pvclaims, isPrivileged, command) pod := MakePod(namespace, nodeSelector, pvclaims, isPrivileged, command)
@ -79,27 +94,29 @@ func CreatePod(client clientset.Interface, namespace string, nodeSelector map[st
} }
// CreateSecPod creates security pod with given claims // CreateSecPod creates security pod with given claims
func CreateSecPod(client clientset.Interface, namespace string, pvclaims []*v1.PersistentVolumeClaim, inlineVolumeSources []*v1.VolumeSource, isPrivileged bool, command string, hostIPC bool, hostPID bool, seLinuxLabel *v1.SELinuxOptions, fsGroup *int64, timeout time.Duration) (*v1.Pod, error) { func CreateSecPod(client clientset.Interface, podConfig *Config, timeout time.Duration) (*v1.Pod, error) {
return CreateSecPodWithNodeSelection(client, namespace, pvclaims, inlineVolumeSources, isPrivileged, command, hostIPC, hostPID, seLinuxLabel, fsGroup, NodeSelection{}, timeout) return CreateSecPodWithNodeSelection(client, podConfig, timeout)
} }
// CreateSecPodWithNodeSelection creates security pod with given claims // CreateSecPodWithNodeSelection creates security pod with given claims
func CreateSecPodWithNodeSelection(client clientset.Interface, namespace string, pvclaims []*v1.PersistentVolumeClaim, inlineVolumeSources []*v1.VolumeSource, isPrivileged bool, command string, hostIPC bool, hostPID bool, seLinuxLabel *v1.SELinuxOptions, fsGroup *int64, node NodeSelection, timeout time.Duration) (*v1.Pod, error) { func CreateSecPodWithNodeSelection(client clientset.Interface, podConfig *Config, timeout time.Duration) (*v1.Pod, error) {
pod := MakeSecPod(namespace, pvclaims, inlineVolumeSources, isPrivileged, command, hostIPC, hostPID, seLinuxLabel, fsGroup) pod, err := MakeSecPod(podConfig)
SetNodeSelection(&pod.Spec, node) if err != nil {
return nil, fmt.Errorf("Unable to create pod: %v", err)
}
pod, err := client.CoreV1().Pods(namespace).Create(context.TODO(), pod, metav1.CreateOptions{}) pod, err = client.CoreV1().Pods(podConfig.NS).Create(context.TODO(), pod, metav1.CreateOptions{})
if err != nil { if err != nil {
return nil, fmt.Errorf("pod Create API error: %v", err) return nil, fmt.Errorf("pod Create API error: %v", err)
} }
// Waiting for pod to be running // Waiting for pod to be running
err = WaitTimeoutForPodRunningInNamespace(client, pod.Name, namespace, timeout) err = WaitTimeoutForPodRunningInNamespace(client, pod.Name, podConfig.NS, timeout)
if err != nil { if err != nil {
return pod, fmt.Errorf("pod %q is not Running: %v", pod.Name, err) return pod, fmt.Errorf("pod %q is not Running: %v", pod.Name, err)
} }
// get fresh pod info // get fresh pod info
pod, err = client.CoreV1().Pods(namespace).Get(context.TODO(), pod.Name, metav1.GetOptions{}) pod, err = client.CoreV1().Pods(podConfig.NS).Get(context.TODO(), pod.Name, metav1.GetOptions{})
if err != nil { if err != nil {
return pod, fmt.Errorf("pod Get API error: %v", err) return pod, fmt.Errorf("pod Get API error: %v", err)
} }
@ -153,14 +170,16 @@ func MakePod(ns string, nodeSelector map[string]string, pvclaims []*v1.Persisten
// MakeSecPod returns a pod definition based on the namespace. The pod references the PVC's // MakeSecPod returns a pod definition based on the namespace. The pod references the PVC's
// name. A slice of BASH commands can be supplied as args to be run by the pod. // name. A slice of BASH commands can be supplied as args to be run by the pod.
// SELinux testing requires to pass HostIPC and HostPID as booleansi arguments. func MakeSecPod(podConfig *Config) (*v1.Pod, error) {
func MakeSecPod(ns string, pvclaims []*v1.PersistentVolumeClaim, inlineVolumeSources []*v1.VolumeSource, isPrivileged bool, command string, hostIPC bool, hostPID bool, seLinuxLabel *v1.SELinuxOptions, fsGroup *int64) *v1.Pod { if podConfig.NS == "" {
if len(command) == 0 { return nil, fmt.Errorf("Cannot create pod with empty namespace")
command = "trap exit TERM; while true; do sleep 1; done" }
if len(podConfig.Command) == 0 {
podConfig.Command = "trap exit TERM; while true; do sleep 1; done"
} }
podName := "security-context-" + string(uuid.NewUUID()) podName := "security-context-" + string(uuid.NewUUID())
if fsGroup == nil { if podConfig.FsGroup == nil {
fsGroup = func(i int64) *int64 { podConfig.FsGroup = func(i int64) *int64 {
return &i return &i
}(1000) }(1000)
} }
@ -171,22 +190,22 @@ func MakeSecPod(ns string, pvclaims []*v1.PersistentVolumeClaim, inlineVolumeSou
}, },
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Name: podName, Name: podName,
Namespace: ns, Namespace: podConfig.NS,
}, },
Spec: v1.PodSpec{ Spec: v1.PodSpec{
HostIPC: hostIPC, HostIPC: podConfig.HostIPC,
HostPID: hostPID, HostPID: podConfig.HostPID,
SecurityContext: &v1.PodSecurityContext{ SecurityContext: &v1.PodSecurityContext{
FSGroup: fsGroup, FSGroup: podConfig.FsGroup,
}, },
Containers: []v1.Container{ Containers: []v1.Container{
{ {
Name: "write-pod", Name: "write-pod",
Image: imageutils.GetE2EImage(imageutils.BusyBox), Image: imageutils.GetE2EImage(imageutils.BusyBox),
Command: []string{"/bin/sh"}, Command: []string{"/bin/sh"},
Args: []string{"-c", command}, Args: []string{"-c", podConfig.Command},
SecurityContext: &v1.SecurityContext{ SecurityContext: &v1.SecurityContext{
Privileged: &isPrivileged, Privileged: &podConfig.IsPrivileged,
}, },
}, },
}, },
@ -195,9 +214,9 @@ func MakeSecPod(ns string, pvclaims []*v1.PersistentVolumeClaim, inlineVolumeSou
} }
var volumeMounts = make([]v1.VolumeMount, 0) var volumeMounts = make([]v1.VolumeMount, 0)
var volumeDevices = make([]v1.VolumeDevice, 0) var volumeDevices = make([]v1.VolumeDevice, 0)
var volumes = make([]v1.Volume, len(pvclaims)+len(inlineVolumeSources)) var volumes = make([]v1.Volume, len(podConfig.PVCs)+len(podConfig.InlineVolumeSources))
volumeIndex := 0 volumeIndex := 0
for _, pvclaim := range pvclaims { for _, pvclaim := range podConfig.PVCs {
volumename := fmt.Sprintf("volume%v", volumeIndex+1) volumename := fmt.Sprintf("volume%v", volumeIndex+1)
if pvclaim.Spec.VolumeMode != nil && *pvclaim.Spec.VolumeMode == v1.PersistentVolumeBlock { if pvclaim.Spec.VolumeMode != nil && *pvclaim.Spec.VolumeMode == v1.PersistentVolumeBlock {
volumeDevices = append(volumeDevices, v1.VolumeDevice{Name: volumename, DevicePath: "/mnt/" + volumename}) volumeDevices = append(volumeDevices, v1.VolumeDevice{Name: volumename, DevicePath: "/mnt/" + volumename})
@ -208,7 +227,7 @@ func MakeSecPod(ns string, pvclaims []*v1.PersistentVolumeClaim, inlineVolumeSou
volumes[volumeIndex] = v1.Volume{Name: volumename, VolumeSource: v1.VolumeSource{PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ClaimName: pvclaim.Name, ReadOnly: false}}} volumes[volumeIndex] = v1.Volume{Name: volumename, VolumeSource: v1.VolumeSource{PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ClaimName: pvclaim.Name, ReadOnly: false}}}
volumeIndex++ volumeIndex++
} }
for _, src := range inlineVolumeSources { for _, src := range podConfig.InlineVolumeSources {
volumename := fmt.Sprintf("volume%v", volumeIndex+1) volumename := fmt.Sprintf("volume%v", volumeIndex+1)
// In-line volumes can be only filesystem, not block. // In-line volumes can be only filesystem, not block.
volumeMounts = append(volumeMounts, v1.VolumeMount{Name: volumename, MountPath: "/mnt/" + volumename}) volumeMounts = append(volumeMounts, v1.VolumeMount{Name: volumename, MountPath: "/mnt/" + volumename})
@ -219,6 +238,8 @@ func MakeSecPod(ns string, pvclaims []*v1.PersistentVolumeClaim, inlineVolumeSou
podSpec.Spec.Containers[0].VolumeMounts = volumeMounts podSpec.Spec.Containers[0].VolumeMounts = volumeMounts
podSpec.Spec.Containers[0].VolumeDevices = volumeDevices podSpec.Spec.Containers[0].VolumeDevices = volumeDevices
podSpec.Spec.Volumes = volumes podSpec.Spec.Volumes = volumes
podSpec.Spec.SecurityContext.SELinuxOptions = seLinuxLabel podSpec.Spec.SecurityContext.SELinuxOptions = podConfig.SeLinuxLabel
return podSpec
SetNodeSelection(&podSpec.Spec, podConfig.NodeSelection)
return podSpec, nil
} }

View File

@ -18,6 +18,7 @@ package storage
import ( import (
"context" "context"
"github.com/onsi/ginkgo" "github.com/onsi/ginkgo"
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
@ -106,9 +107,12 @@ func createPodPVCFromSC(f *framework.Framework, c clientset.Interface, ns string
framework.ExpectEqual(len(pvs), 1) framework.ExpectEqual(len(pvs), 1)
ginkgo.By("Creating a pod with dynamically provisioned volume") ginkgo.By("Creating a pod with dynamically provisioned volume")
pod, err := e2epod.CreateSecPod(c, ns, pvcClaims, nil, podConfig := e2epod.Config{
false, "", false, false, e2epv.SELinuxLabel, NS: ns,
nil, framework.PodStartTimeout) PVCs: pvcClaims,
SeLinuxLabel: e2epv.SELinuxLabel,
}
pod, err := e2epod.CreateSecPod(c, &podConfig, framework.PodStartTimeout)
framework.ExpectNoError(err, "While creating pods for kubelet restart test") framework.ExpectNoError(err, "While creating pods for kubelet restart test")
return pod, pvc, pvs[0] return pod, pvc, pvs[0]
} }

View File

@ -550,9 +550,15 @@ var _ = utils.SIGDescribe("PersistentVolumes-local ", func() {
framework.ExpectNoError(err) framework.ExpectNoError(err)
pvcs = append(pvcs, pvc) pvcs = append(pvcs, pvc)
} }
podConfig := e2epod.Config{
pod := e2epod.MakeSecPod(config.ns, pvcs, nil, false, "sleep 1", false, false, selinuxLabel, nil) NS: config.ns,
pod, err := config.client.CoreV1().Pods(config.ns).Create(context.TODO(), pod, metav1.CreateOptions{}) PVCs: pvcs,
Command: "sleep 1",
SeLinuxLabel: selinuxLabel,
}
pod, err := e2epod.MakeSecPod(&podConfig)
framework.ExpectNoError(err)
pod, err = config.client.CoreV1().Pods(config.ns).Create(context.TODO(), pod, metav1.CreateOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
pods[pod.Name] = pod pods[pod.Name] = pod
numCreated++ numCreated++
@ -644,9 +650,16 @@ var _ = utils.SIGDescribe("PersistentVolumes-local ", func() {
pvc, err = e2epv.CreatePVC(config.client, config.ns, pvc) pvc, err = e2epv.CreatePVC(config.client, config.ns, pvc)
framework.ExpectNoError(err) framework.ExpectNoError(err)
ginkgo.By(fmt.Sprintf("Create %d pods to use this PVC", count)) ginkgo.By(fmt.Sprintf("Create %d pods to use this PVC", count))
podConfig := e2epod.Config{
NS: config.ns,
PVCs: []*v1.PersistentVolumeClaim{pvc},
SeLinuxLabel: selinuxLabel,
}
for i := 0; i < count; i++ { for i := 0; i < count; i++ {
pod := e2epod.MakeSecPod(config.ns, []*v1.PersistentVolumeClaim{pvc}, nil, false, "", false, false, selinuxLabel, nil)
pod, err := config.client.CoreV1().Pods(config.ns).Create(context.TODO(), pod, metav1.CreateOptions{}) pod, err := e2epod.MakeSecPod(&podConfig)
framework.ExpectNoError(err)
pod, err = config.client.CoreV1().Pods(config.ns).Create(context.TODO(), pod, metav1.CreateOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
pods[pod.Name] = pod pods[pod.Name] = pod
} }
@ -946,10 +959,6 @@ func createLocalPVCsPVs(config *localTestConfig, volumes []*localTestVolume, mod
} }
func makeLocalPodWithNodeAffinity(config *localTestConfig, volume *localTestVolume, nodeName string) (pod *v1.Pod) { func makeLocalPodWithNodeAffinity(config *localTestConfig, volume *localTestVolume, nodeName string) (pod *v1.Pod) {
pod = e2epod.MakeSecPod(config.ns, []*v1.PersistentVolumeClaim{volume.pvc}, nil, false, "", false, false, selinuxLabel, nil)
if pod == nil {
return
}
affinity := &v1.Affinity{ affinity := &v1.Affinity{
NodeAffinity: &v1.NodeAffinity{ NodeAffinity: &v1.NodeAffinity{
RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{ RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
@ -967,25 +976,45 @@ func makeLocalPodWithNodeAffinity(config *localTestConfig, volume *localTestVolu
}, },
}, },
} }
podConfig := e2epod.Config{
NS: config.ns,
PVCs: []*v1.PersistentVolumeClaim{volume.pvc},
SeLinuxLabel: selinuxLabel,
NodeSelection: e2epod.NodeSelection{Affinity: affinity},
}
pod, err := e2epod.MakeSecPod(&podConfig)
if pod == nil || err != nil {
return
}
pod.Spec.Affinity = affinity pod.Spec.Affinity = affinity
return return
} }
func makeLocalPodWithNodeSelector(config *localTestConfig, volume *localTestVolume, nodeName string) (pod *v1.Pod) { func makeLocalPodWithNodeSelector(config *localTestConfig, volume *localTestVolume, nodeName string) (pod *v1.Pod) {
pod = e2epod.MakeSecPod(config.ns, []*v1.PersistentVolumeClaim{volume.pvc}, nil, false, "", false, false, selinuxLabel, nil)
if pod == nil {
return
}
ns := map[string]string{ ns := map[string]string{
"kubernetes.io/hostname": nodeName, "kubernetes.io/hostname": nodeName,
} }
pod.Spec.NodeSelector = ns podConfig := e2epod.Config{
NS: config.ns,
PVCs: []*v1.PersistentVolumeClaim{volume.pvc},
SeLinuxLabel: selinuxLabel,
NodeSelection: e2epod.NodeSelection{Selector: ns},
}
pod, err := e2epod.MakeSecPod(&podConfig)
if pod == nil || err != nil {
return
}
return return
} }
func makeLocalPodWithNodeName(config *localTestConfig, volume *localTestVolume, nodeName string) (pod *v1.Pod) { func makeLocalPodWithNodeName(config *localTestConfig, volume *localTestVolume, nodeName string) (pod *v1.Pod) {
pod = e2epod.MakeSecPod(config.ns, []*v1.PersistentVolumeClaim{volume.pvc}, nil, false, "", false, false, selinuxLabel, nil) podConfig := e2epod.Config{
if pod == nil { NS: config.ns,
PVCs: []*v1.PersistentVolumeClaim{volume.pvc},
SeLinuxLabel: selinuxLabel,
}
pod, err := e2epod.MakeSecPod(&podConfig)
if pod == nil || err != nil {
return return
} }
@ -995,7 +1024,13 @@ func makeLocalPodWithNodeName(config *localTestConfig, volume *localTestVolume,
func createLocalPod(config *localTestConfig, volume *localTestVolume, fsGroup *int64) (*v1.Pod, error) { func createLocalPod(config *localTestConfig, volume *localTestVolume, fsGroup *int64) (*v1.Pod, error) {
ginkgo.By("Creating a pod") ginkgo.By("Creating a pod")
return e2epod.CreateSecPod(config.client, config.ns, []*v1.PersistentVolumeClaim{volume.pvc}, nil, false, "", false, false, selinuxLabel, fsGroup, framework.PodStartShortTimeout) podConfig := e2epod.Config{
NS: config.ns,
PVCs: []*v1.PersistentVolumeClaim{volume.pvc},
SeLinuxLabel: selinuxLabel,
FsGroup: fsGroup,
}
return e2epod.CreateSecPod(config.client, &podConfig, framework.PodStartShortTimeout)
} }
func createWriteCmd(testDir string, testFile string, writeTestFileContent string, volumeType localVolumeType) string { func createWriteCmd(testDir string, testFile string, writeTestFileContent string, volumeType localVolumeType) string {

View File

@ -160,7 +160,14 @@ func (s *disruptiveTestSuite) DefineTests(driver TestDriver, pattern testpattern
pvcs = append(pvcs, l.resource.Pvc) pvcs = append(pvcs, l.resource.Pvc)
} }
ginkgo.By("Creating a pod with pvc") ginkgo.By("Creating a pod with pvc")
l.pod, err = e2epod.CreateSecPodWithNodeSelection(l.cs, l.ns.Name, pvcs, inlineSources, false, "", false, false, e2epv.SELinuxLabel, nil, l.config.ClientNodeSelection, framework.PodStartTimeout) podConfig := e2epod.Config{
NS: l.ns.Name,
PVCs: pvcs,
InlineVolumeSources: inlineSources,
SeLinuxLabel: e2epv.SELinuxLabel,
NodeSelection: l.config.ClientNodeSelection,
}
l.pod, err = e2epod.CreateSecPodWithNodeSelection(l.cs, &podConfig, framework.PodStartTimeout)
framework.ExpectNoError(err, "While creating pods for kubelet restart test") framework.ExpectNoError(err, "While creating pods for kubelet restart test")
if pattern.VolMode == v1.PersistentVolumeBlock && t.runTestBlock != nil { if pattern.VolMode == v1.PersistentVolumeBlock && t.runTestBlock != nil {

View File

@ -374,9 +374,13 @@ func (t *multiVolumeTestSuite) DefineTests(driver TestDriver, pattern testpatter
func testAccessMultipleVolumes(f *framework.Framework, cs clientset.Interface, ns string, func testAccessMultipleVolumes(f *framework.Framework, cs clientset.Interface, ns string,
node e2epod.NodeSelection, pvcs []*v1.PersistentVolumeClaim, readSeedBase int64, writeSeedBase int64) string { node e2epod.NodeSelection, pvcs []*v1.PersistentVolumeClaim, readSeedBase int64, writeSeedBase int64) string {
ginkgo.By(fmt.Sprintf("Creating pod on %+v with multiple volumes", node)) ginkgo.By(fmt.Sprintf("Creating pod on %+v with multiple volumes", node))
pod, err := e2epod.CreateSecPodWithNodeSelection(cs, ns, pvcs, nil, podConfig := e2epod.Config{
false, "", false, false, e2epv.SELinuxLabel, NS: ns,
nil, node, framework.PodStartTimeout) PVCs: pvcs,
SeLinuxLabel: e2epv.SELinuxLabel,
NodeSelection: node,
}
pod, err := e2epod.CreateSecPodWithNodeSelection(cs, &podConfig, framework.PodStartTimeout)
defer func() { defer func() {
framework.ExpectNoError(e2epod.DeletePodWithWait(cs, pod)) framework.ExpectNoError(e2epod.DeletePodWithWait(cs, pod))
}() }()
@ -447,10 +451,13 @@ func TestConcurrentAccessToSingleVolume(f *framework.Framework, cs clientset.Int
for i := 0; i < numPods; i++ { for i := 0; i < numPods; i++ {
index := i + 1 index := i + 1
ginkgo.By(fmt.Sprintf("Creating pod%d with a volume on %+v", index, node)) ginkgo.By(fmt.Sprintf("Creating pod%d with a volume on %+v", index, node))
pod, err := e2epod.CreateSecPodWithNodeSelection(cs, ns, podConfig := e2epod.Config{
[]*v1.PersistentVolumeClaim{pvc}, nil, NS: ns,
false, "", false, false, e2epv.SELinuxLabel, PVCs: []*v1.PersistentVolumeClaim{pvc},
nil, node, framework.PodStartTimeout) SeLinuxLabel: e2epv.SELinuxLabel,
NodeSelection: node,
}
pod, err := e2epod.CreateSecPodWithNodeSelection(cs, &podConfig, framework.PodStartTimeout)
defer func() { defer func() {
framework.ExpectNoError(e2epod.DeletePodWithWait(cs, pod)) framework.ExpectNoError(e2epod.DeletePodWithWait(cs, pod))
}() }()

View File

@ -333,16 +333,14 @@ func (t *topologyTestSuite) createResources(cs clientset.Interface, l *topologyT
framework.ExpectNoError(err) framework.ExpectNoError(err)
ginkgo.By("Creating pod") ginkgo.By("Creating pod")
l.pod = e2epod.MakeSecPod(l.config.Framework.Namespace.Name, podConfig := e2epod.Config{
[]*v1.PersistentVolumeClaim{l.resource.Pvc}, NS: l.config.Framework.Namespace.Name,
nil, PVCs: []*v1.PersistentVolumeClaim{l.resource.Pvc},
false, SeLinuxLabel: e2epv.SELinuxLabel,
"", NodeSelection: e2epod.NodeSelection{Affinity: affinity},
false, }
false, l.pod, err = e2epod.MakeSecPod(&podConfig)
e2epv.SELinuxLabel, framework.ExpectNoError(err)
nil)
l.pod.Spec.Affinity = affinity
l.pod, err = cs.CoreV1().Pods(l.pod.Namespace).Create(context.TODO(), l.pod, metav1.CreateOptions{}) l.pod, err = cs.CoreV1().Pods(l.pod.Namespace).Create(context.TODO(), l.pod, metav1.CreateOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
} }

View File

@ -166,7 +166,13 @@ func (v *volumeExpandTestSuite) DefineTests(driver TestDriver, pattern testpatte
var err error var err error
ginkgo.By("Creating a pod with dynamically provisioned volume") ginkgo.By("Creating a pod with dynamically provisioned volume")
l.pod, err = e2epod.CreateSecPodWithNodeSelection(f.ClientSet, f.Namespace.Name, []*v1.PersistentVolumeClaim{l.resource.Pvc}, nil, false, "", false, false, e2epv.SELinuxLabel, nil, l.config.ClientNodeSelection, framework.PodStartTimeout) podConfig := e2epod.Config{
NS: f.Namespace.Name,
PVCs: []*v1.PersistentVolumeClaim{l.resource.Pvc},
SeLinuxLabel: e2epv.SELinuxLabel,
NodeSelection: l.config.ClientNodeSelection,
}
l.pod, err = e2epod.CreateSecPodWithNodeSelection(f.ClientSet, &podConfig, framework.PodStartTimeout)
defer func() { defer func() {
err = e2epod.DeletePodWithWait(f.ClientSet, l.pod) err = e2epod.DeletePodWithWait(f.ClientSet, l.pod)
framework.ExpectNoError(err, "while cleaning up pod already deleted in resize test") framework.ExpectNoError(err, "while cleaning up pod already deleted in resize test")
@ -203,7 +209,13 @@ func (v *volumeExpandTestSuite) DefineTests(driver TestDriver, pattern testpatte
l.resource.Pvc = npvc l.resource.Pvc = npvc
ginkgo.By("Creating a new pod with same volume") ginkgo.By("Creating a new pod with same volume")
l.pod2, err = e2epod.CreateSecPodWithNodeSelection(f.ClientSet, f.Namespace.Name, []*v1.PersistentVolumeClaim{l.resource.Pvc}, nil, false, "", false, false, e2epv.SELinuxLabel, nil, l.config.ClientNodeSelection, framework.PodStartTimeout) podConfig = e2epod.Config{
NS: f.Namespace.Name,
PVCs: []*v1.PersistentVolumeClaim{l.resource.Pvc},
SeLinuxLabel: e2epv.SELinuxLabel,
NodeSelection: l.config.ClientNodeSelection,
}
l.pod2, err = e2epod.CreateSecPodWithNodeSelection(f.ClientSet, &podConfig, framework.PodStartTimeout)
defer func() { defer func() {
err = e2epod.DeletePodWithWait(f.ClientSet, l.pod2) err = e2epod.DeletePodWithWait(f.ClientSet, l.pod2)
framework.ExpectNoError(err, "while cleaning up pod before exiting resizing test") framework.ExpectNoError(err, "while cleaning up pod before exiting resizing test")
@ -224,7 +236,13 @@ func (v *volumeExpandTestSuite) DefineTests(driver TestDriver, pattern testpatte
var err error var err error
ginkgo.By("Creating a pod with dynamically provisioned volume") ginkgo.By("Creating a pod with dynamically provisioned volume")
l.pod, err = e2epod.CreateSecPodWithNodeSelection(f.ClientSet, f.Namespace.Name, []*v1.PersistentVolumeClaim{l.resource.Pvc}, nil, false, "", false, false, e2epv.SELinuxLabel, nil, l.config.ClientNodeSelection, framework.PodStartTimeout) podConfig := e2epod.Config{
NS: f.Namespace.Name,
PVCs: []*v1.PersistentVolumeClaim{l.resource.Pvc},
SeLinuxLabel: e2epv.SELinuxLabel,
NodeSelection: l.config.ClientNodeSelection,
}
l.pod, err = e2epod.CreateSecPodWithNodeSelection(f.ClientSet, &podConfig, framework.PodStartTimeout)
defer func() { defer func() {
err = e2epod.DeletePodWithWait(f.ClientSet, l.pod) err = e2epod.DeletePodWithWait(f.ClientSet, l.pod)
framework.ExpectNoError(err, "while cleaning up pod already deleted in resize test") framework.ExpectNoError(err, "while cleaning up pod already deleted in resize test")

View File

@ -170,11 +170,15 @@ func (t *volumeLimitsTestSuite) DefineTests(driver TestDriver, pattern testpatte
} }
ginkgo.By("Creating pod to use all PVC(s)") ginkgo.By("Creating pod to use all PVC(s)")
pod := e2epod.MakeSecPod(l.ns.Name, l.pvcs, nil, false, "", false, false, e2epv.SELinuxLabel, nil) selection := e2epod.NodeSelection{Name: nodeName}
// Use affinity to schedule everything on the right node podConfig := e2epod.Config{
selection := e2epod.NodeSelection{} NS: l.ns.Name,
e2epod.SetAffinity(&selection, nodeName) PVCs: l.pvcs,
pod.Spec.Affinity = selection.Affinity SeLinuxLabel: e2epv.SELinuxLabel,
NodeSelection: selection,
}
pod, err := e2epod.MakeSecPod(&podConfig)
framework.ExpectNoError(err)
l.runningPod, err = l.cs.CoreV1().Pods(l.ns.Name).Create(context.TODO(), pod, metav1.CreateOptions{}) l.runningPod, err = l.cs.CoreV1().Pods(l.ns.Name).Create(context.TODO(), pod, metav1.CreateOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
@ -187,10 +191,14 @@ func (t *volumeLimitsTestSuite) DefineTests(driver TestDriver, pattern testpatte
framework.ExpectNoError(err) framework.ExpectNoError(err)
ginkgo.By("Creating an extra pod with one volume to exceed the limit") ginkgo.By("Creating an extra pod with one volume to exceed the limit")
pod = e2epod.MakeSecPod(l.ns.Name, []*v1.PersistentVolumeClaim{l.resource.Pvc}, nil, false, "", false, false, e2epv.SELinuxLabel, nil) podConfig = e2epod.Config{
// Use affinity to schedule everything on the right node NS: l.ns.Name,
e2epod.SetAffinity(&selection, nodeName) PVCs: []*v1.PersistentVolumeClaim{l.resource.Pvc},
pod.Spec.Affinity = selection.Affinity SeLinuxLabel: e2epv.SELinuxLabel,
NodeSelection: selection,
}
pod, err = e2epod.MakeSecPod(&podConfig)
framework.ExpectNoError(err)
l.unschedulablePod, err = l.cs.CoreV1().Pods(l.ns.Name).Create(context.TODO(), pod, metav1.CreateOptions{}) l.unschedulablePod, err = l.cs.CoreV1().Pods(l.ns.Name).Create(context.TODO(), pod, metav1.CreateOptions{})
framework.ExpectNoError(err, "Failed to create an extra pod with one volume to exceed the limit") framework.ExpectNoError(err, "Failed to create an extra pod with one volume to exceed the limit")

View File

@ -213,9 +213,15 @@ func (t *volumeModeTestSuite) DefineTests(driver TestDriver, pattern testpattern
framework.ExpectNoError(e2epv.WaitOnPVandPVC(l.cs, l.ns.Name, l.Pv, l.Pvc), "Failed to bind pv and pvc") framework.ExpectNoError(e2epv.WaitOnPVandPVC(l.cs, l.ns.Name, l.Pv, l.Pvc), "Failed to bind pv and pvc")
ginkgo.By("Creating pod") ginkgo.By("Creating pod")
pod := e2epod.MakeSecPod(l.ns.Name, []*v1.PersistentVolumeClaim{l.Pvc}, nil, false, "", false, false, e2epv.SELinuxLabel, nil) podConfig := e2epod.Config{
// Setting node NS: l.ns.Name,
e2epod.SetNodeSelection(&pod.Spec, l.config.ClientNodeSelection) PVCs: []*v1.PersistentVolumeClaim{l.Pvc},
SeLinuxLabel: e2epv.SELinuxLabel,
NodeSelection: l.config.ClientNodeSelection,
}
pod, err := e2epod.MakeSecPod(&podConfig)
framework.ExpectNoError(err, "Failed to create pod")
pod, err = l.cs.CoreV1().Pods(l.ns.Name).Create(context.TODO(), pod, metav1.CreateOptions{}) pod, err = l.cs.CoreV1().Pods(l.ns.Name).Create(context.TODO(), pod, metav1.CreateOptions{})
framework.ExpectNoError(err, "Failed to create pod") framework.ExpectNoError(err, "Failed to create pod")
defer func() { defer func() {
@ -292,7 +298,14 @@ func (t *volumeModeTestSuite) DefineTests(driver TestDriver, pattern testpattern
ginkgo.By("Creating pod") ginkgo.By("Creating pod")
var err error var err error
pod := e2epod.MakeSecPod(l.ns.Name, []*v1.PersistentVolumeClaim{l.Pvc}, nil, false, "", false, false, e2epv.SELinuxLabel, nil) podConfig := e2epod.Config{
NS: l.ns.Name,
PVCs: []*v1.PersistentVolumeClaim{l.Pvc},
SeLinuxLabel: e2epv.SELinuxLabel,
}
pod, err := e2epod.MakeSecPod(&podConfig)
framework.ExpectNoError(err)
// Change volumeMounts to volumeDevices and the other way around // Change volumeMounts to volumeDevices and the other way around
pod = swapVolumeMode(pod) pod = swapVolumeMode(pod)
@ -341,7 +354,14 @@ func (t *volumeModeTestSuite) DefineTests(driver TestDriver, pattern testpattern
ginkgo.By("Creating pod") ginkgo.By("Creating pod")
var err error var err error
pod := e2epod.MakeSecPod(l.ns.Name, []*v1.PersistentVolumeClaim{l.Pvc}, nil, false, "", false, false, e2epv.SELinuxLabel, nil) podConfig := e2epod.Config{
NS: l.ns.Name,
PVCs: []*v1.PersistentVolumeClaim{l.Pvc},
SeLinuxLabel: e2epv.SELinuxLabel,
}
pod, err := e2epod.MakeSecPod(&podConfig)
framework.ExpectNoError(err)
for i := range pod.Spec.Containers { for i := range pod.Spec.Containers {
pod.Spec.Containers[i].VolumeDevices = nil pod.Spec.Containers[i].VolumeDevices = nil
pod.Spec.Containers[i].VolumeMounts = nil pod.Spec.Containers[i].VolumeMounts = nil

View File

@ -21,7 +21,7 @@ import (
"fmt" "fmt"
"time" "time"
"k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/version" "k8s.io/apimachinery/pkg/util/version"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
@ -95,7 +95,12 @@ func (t *VolumeModeDowngradeTest) Setup(f *framework.Framework) {
framework.ExpectNoError(err) framework.ExpectNoError(err)
ginkgo.By("Consuming the PVC before downgrade") ginkgo.By("Consuming the PVC before downgrade")
t.pod, err = e2epod.CreateSecPod(cs, ns, []*v1.PersistentVolumeClaim{t.pvc}, nil, false, "", false, false, e2epv.SELinuxLabel, nil, framework.PodStartTimeout) podConfig := e2epod.Config{
NS: ns,
PVCs: []*v1.PersistentVolumeClaim{t.pvc},
SeLinuxLabel: e2epv.SELinuxLabel,
}
t.pod, err = e2epod.CreateSecPod(cs, &podConfig, framework.PodStartTimeout)
framework.ExpectNoError(err) framework.ExpectNoError(err)
ginkgo.By("Checking if PV exists as expected volume mode") ginkgo.By("Checking if PV exists as expected volume mode")