mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-24 12:15:52 +00:00
Update tests to use common pod functions from framework/pod/utils.go
This commit is contained in:
parent
e4e9c31218
commit
5d9053014e
@ -28,8 +28,8 @@ import (
|
||||
"k8s.io/apimachinery/pkg/util/uuid"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
)
|
||||
|
||||
// UpdateDeploymentWithRetries updates the specified deployment with retries.
|
||||
@ -199,13 +199,10 @@ func testDeployment(replicas int32, podLabels map[string]string, nodeSelector ma
|
||||
TerminationGracePeriodSeconds: &zero,
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "write-pod",
|
||||
Image: imageutils.GetE2EImage(imageutils.BusyBox),
|
||||
Command: []string{"/bin/sh"},
|
||||
Args: []string{"-c", command},
|
||||
SecurityContext: &v1.SecurityContext{
|
||||
Privileged: &isPrivileged,
|
||||
},
|
||||
Name: "write-pod",
|
||||
Image: e2epod.GetDefaultTestImage(),
|
||||
Command: e2epod.GenerateScriptCmd(command),
|
||||
SecurityContext: e2epod.GenerateContainerSecurityContext(isPrivileged),
|
||||
},
|
||||
},
|
||||
RestartPolicy: v1.RestartPolicyAlways,
|
||||
|
@ -145,13 +145,10 @@ func MakePod(ns string, nodeSelector map[string]string, pvclaims []*v1.Persisten
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "write-pod",
|
||||
Image: BusyBoxImage,
|
||||
Command: []string{"/bin/sh"},
|
||||
Args: []string{"-c", command},
|
||||
SecurityContext: &v1.SecurityContext{
|
||||
Privileged: &isPrivileged,
|
||||
},
|
||||
Name: "write-pod",
|
||||
Image: GetDefaultTestImage(),
|
||||
Command: GenerateScriptCmd(command),
|
||||
SecurityContext: GenerateContainerSecurityContext(isPrivileged),
|
||||
},
|
||||
},
|
||||
RestartPolicy: v1.RestartPolicyOnFailure,
|
||||
@ -187,10 +184,6 @@ func MakeSecPod(podConfig *Config) (*v1.Pod, error) {
|
||||
return &i
|
||||
}(1000)
|
||||
}
|
||||
image := imageutils.BusyBox
|
||||
if podConfig.ImageID != imageutils.None {
|
||||
image = podConfig.ImageID
|
||||
}
|
||||
podSpec := &v1.Pod{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "Pod",
|
||||
@ -200,28 +193,34 @@ func MakeSecPod(podConfig *Config) (*v1.Pod, error) {
|
||||
Name: podName,
|
||||
Namespace: podConfig.NS,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
HostIPC: podConfig.HostIPC,
|
||||
HostPID: podConfig.HostPID,
|
||||
SecurityContext: &v1.PodSecurityContext{
|
||||
FSGroup: podConfig.FsGroup,
|
||||
},
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "write-pod",
|
||||
Image: imageutils.GetE2EImage(image),
|
||||
Command: []string{"/bin/sh"},
|
||||
Args: []string{"-c", podConfig.Command},
|
||||
SecurityContext: &v1.SecurityContext{
|
||||
Privileged: &podConfig.IsPrivileged,
|
||||
},
|
||||
},
|
||||
},
|
||||
RestartPolicy: v1.RestartPolicyOnFailure,
|
||||
},
|
||||
Spec: *MakePodSpec(podConfig),
|
||||
}
|
||||
return podSpec, nil
|
||||
}
|
||||
|
||||
// MakePodSpec returns a PodSpec definition
|
||||
func MakePodSpec(podConfig *Config) *v1.PodSpec {
|
||||
image := imageutils.BusyBox
|
||||
if podConfig.ImageID != imageutils.None {
|
||||
image = podConfig.ImageID
|
||||
}
|
||||
podSpec := &v1.PodSpec{
|
||||
HostIPC: podConfig.HostIPC,
|
||||
HostPID: podConfig.HostPID,
|
||||
SecurityContext: GeneratePodSecurityContext(podConfig.FsGroup, podConfig.SeLinuxLabel),
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "write-pod",
|
||||
Image: GetTestImage(image),
|
||||
Command: GenerateScriptCmd(podConfig.Command),
|
||||
SecurityContext: GenerateContainerSecurityContext(podConfig.IsPrivileged),
|
||||
},
|
||||
},
|
||||
RestartPolicy: v1.RestartPolicyOnFailure,
|
||||
}
|
||||
|
||||
if podConfig.PodFSGroupChangePolicy != nil {
|
||||
podSpec.Spec.SecurityContext.FSGroupChangePolicy = podConfig.PodFSGroupChangePolicy
|
||||
podSpec.SecurityContext.FSGroupChangePolicy = podConfig.PodFSGroupChangePolicy
|
||||
}
|
||||
|
||||
var volumeMounts = make([]v1.VolumeMount, 0)
|
||||
@ -247,13 +246,13 @@ func MakeSecPod(podConfig *Config) (*v1.Pod, error) {
|
||||
volumeIndex++
|
||||
}
|
||||
|
||||
podSpec.Spec.Containers[0].VolumeMounts = volumeMounts
|
||||
podSpec.Spec.Containers[0].VolumeDevices = volumeDevices
|
||||
podSpec.Spec.Volumes = volumes
|
||||
podSpec.Containers[0].VolumeMounts = volumeMounts
|
||||
podSpec.Containers[0].VolumeDevices = volumeDevices
|
||||
podSpec.Volumes = volumes
|
||||
if runtime.GOOS != "windows" {
|
||||
podSpec.Spec.SecurityContext.SELinuxOptions = podConfig.SeLinuxLabel
|
||||
podSpec.SecurityContext.SELinuxOptions = podConfig.SeLinuxLabel
|
||||
}
|
||||
|
||||
SetNodeSelection(&podSpec.Spec, podConfig.NodeSelection)
|
||||
return podSpec, nil
|
||||
SetNodeSelection(podSpec, podConfig.NodeSelection)
|
||||
return podSpec
|
||||
}
|
||||
|
120
test/e2e/framework/pod/utils.go
Normal file
120
test/e2e/framework/pod/utils.go
Normal file
@ -0,0 +1,120 @@
|
||||
/*
|
||||
Copyright 2021 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package pod
|
||||
|
||||
import (
|
||||
"flag"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
)
|
||||
|
||||
// NodeOSDistroIs returns true if the distro is the same as `--node-os-distro`
|
||||
// the package framework/pod can't import the framework package (see #81245)
|
||||
// we need to check if the --node-os-distro=windows is set and the framework package
|
||||
// is the one that's parsing the flags, as a workaround this method is looking for the same flag again
|
||||
// TODO: replace with `framework.NodeOSDistroIs` when #81245 is complete
|
||||
func NodeOSDistroIs(distro string) bool {
|
||||
var nodeOsDistro *flag.Flag = flag.Lookup("node-os-distro")
|
||||
if nodeOsDistro != nil && nodeOsDistro.Value.String() == distro {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// GenerateScriptCmd generates the corresponding command lines to execute a command.
|
||||
// Depending on the Node OS is Windows or linux, the command will use powershell or /bin/sh
|
||||
func GenerateScriptCmd(command string) []string {
|
||||
var commands []string
|
||||
if !NodeOSDistroIs("windows") {
|
||||
commands = []string{"/bin/sh", "-c", command}
|
||||
} else {
|
||||
commands = []string{"powershell", "/c", command}
|
||||
}
|
||||
return commands
|
||||
}
|
||||
|
||||
// GetDefaultTestImage returns the default test image based on OS.
|
||||
// If the node OS is windows, currently we return Agnhost image for Windows node
|
||||
// due to the issue of #https://github.com/kubernetes-sigs/windows-testing/pull/35.
|
||||
// If the node OS is linux, return busybox image
|
||||
func GetDefaultTestImage() string {
|
||||
return imageutils.GetE2EImage(GetDefaultTestImageID())
|
||||
}
|
||||
|
||||
// GetDefaultTestImageID returns the default test image id based on OS.
|
||||
// If the node OS is windows, currently we return Agnhost image for Windows node
|
||||
// due to the issue of #https://github.com/kubernetes-sigs/windows-testing/pull/35.
|
||||
// If the node OS is linux, return busybox image
|
||||
func GetDefaultTestImageID() int {
|
||||
return GetTestImageID(imageutils.BusyBox)
|
||||
}
|
||||
|
||||
// GetTestImage returns the image name with the given input
|
||||
// If the Node OS is windows, currently we return Agnhost image for Windows node
|
||||
// due to the issue of #https://github.com/kubernetes-sigs/windows-testing/pull/35.
|
||||
func GetTestImage(id int) string {
|
||||
if NodeOSDistroIs("windows") {
|
||||
return imageutils.GetE2EImage(imageutils.Agnhost)
|
||||
}
|
||||
return imageutils.GetE2EImage(id)
|
||||
}
|
||||
|
||||
// GetTestImageID returns the image id with the given input
|
||||
// If the Node OS is windows, currently we return Agnhost image for Windows node
|
||||
// due to the issue of #https://github.com/kubernetes-sigs/windows-testing/pull/35.
|
||||
func GetTestImageID(id int) int {
|
||||
if NodeOSDistroIs("windows") {
|
||||
return imageutils.Agnhost
|
||||
}
|
||||
return id
|
||||
}
|
||||
|
||||
// GeneratePodSecurityContext generates the corresponding pod security context with the given inputs
|
||||
// If the Node OS is windows, currently we will ignore the inputs and return nil.
|
||||
// TODO: Will modify it after windows has its own security context
|
||||
func GeneratePodSecurityContext(fsGroup *int64, seLinuxOptions *v1.SELinuxOptions) *v1.PodSecurityContext {
|
||||
if NodeOSDistroIs("windows") {
|
||||
return nil
|
||||
}
|
||||
return &v1.PodSecurityContext{
|
||||
FSGroup: fsGroup,
|
||||
SELinuxOptions: seLinuxOptions,
|
||||
}
|
||||
}
|
||||
|
||||
// GenerateContainerSecurityContext generates the corresponding container security context with the given inputs
|
||||
// If the Node OS is windows, currently we will ignore the inputs and return nil.
|
||||
// TODO: Will modify it after windows has its own security context
|
||||
func GenerateContainerSecurityContext(privileged bool) *v1.SecurityContext {
|
||||
if NodeOSDistroIs("windows") {
|
||||
return nil
|
||||
}
|
||||
return &v1.SecurityContext{
|
||||
Privileged: &privileged,
|
||||
}
|
||||
}
|
||||
|
||||
// GetLinuxLabel returns the default SELinuxLabel based on OS.
|
||||
// If the node OS is windows, it will return nil
|
||||
func GetLinuxLabel() *v1.SELinuxOptions {
|
||||
if NodeOSDistroIs("windows") {
|
||||
return nil
|
||||
}
|
||||
return &v1.SELinuxOptions{
|
||||
Level: "s0:c0,c1"}
|
||||
}
|
@ -735,7 +735,7 @@ func WaitForPersistentVolumeClaimsPhase(phase v1.PersistentVolumeClaimPhase, c c
|
||||
if len(pvcNames) == 0 {
|
||||
return fmt.Errorf("Incorrect parameter: Need at least one PVC to track. Found 0")
|
||||
}
|
||||
framework.Logf("Waiting up to %v for PersistentVolumeClaims %v to have phase %s", timeout, pvcNames, phase)
|
||||
framework.Logf("Waiting up to timeout=%v for PersistentVolumeClaims %v to have phase %s", timeout, pvcNames, phase)
|
||||
for start := time.Now(); time.Since(start) < timeout; time.Sleep(poll) {
|
||||
phaseFoundInAllClaims := true
|
||||
for _, pvcName := range pvcNames {
|
||||
@ -865,3 +865,11 @@ func WaitForPVCFinalizer(ctx context.Context, cs clientset.Interface, name, name
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// GetDefaultFSType returns the default fsType
|
||||
func GetDefaultFSType() string {
|
||||
if framework.NodeOSDistroIs("windows") {
|
||||
return "ntfs"
|
||||
}
|
||||
return "ext4"
|
||||
}
|
||||
|
@ -389,17 +389,17 @@ func runVolumeTesterPod(client clientset.Interface, timeouts *framework.TimeoutC
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: config.Prefix + "-" + podSuffix,
|
||||
Image: GetDefaultTestImage(),
|
||||
Image: e2epod.GetDefaultTestImage(),
|
||||
WorkingDir: "/opt",
|
||||
// An imperative and easily debuggable container which reads/writes vol contents for
|
||||
// us to scan in the tests or by eye.
|
||||
// We expect that /opt is empty in the minimal containers which we use in this test.
|
||||
Command: GenerateScriptCmd(command),
|
||||
Command: e2epod.GenerateScriptCmd(command),
|
||||
VolumeMounts: []v1.VolumeMount{},
|
||||
},
|
||||
},
|
||||
TerminationGracePeriodSeconds: &gracePeriod,
|
||||
SecurityContext: GeneratePodSecurityContext(fsGroup, seLinuxOptions),
|
||||
SecurityContext: e2epod.GeneratePodSecurityContext(fsGroup, seLinuxOptions),
|
||||
Volumes: []v1.Volume{},
|
||||
},
|
||||
}
|
||||
@ -416,7 +416,7 @@ func runVolumeTesterPod(client clientset.Interface, timeouts *framework.TimeoutC
|
||||
if privileged && test.Mode == v1.PersistentVolumeBlock {
|
||||
privileged = false
|
||||
}
|
||||
clientPod.Spec.Containers[0].SecurityContext = GenerateSecurityContext(privileged)
|
||||
clientPod.Spec.Containers[0].SecurityContext = e2epod.GenerateContainerSecurityContext(privileged)
|
||||
|
||||
if test.Mode == v1.PersistentVolumeBlock {
|
||||
clientPod.Spec.Containers[0].VolumeDevices = append(clientPod.Spec.Containers[0].VolumeDevices, v1.VolumeDevice{
|
||||
@ -569,18 +569,6 @@ func InjectContent(f *framework.Framework, config TestConfig, fsGroup *int64, fs
|
||||
testVolumeContent(f, injectorPod, fsGroup, fsType, tests)
|
||||
}
|
||||
|
||||
// GenerateScriptCmd generates the corresponding command lines to execute a command.
|
||||
// Depending on the Node OS is Windows or linux, the command will use powershell or /bin/sh
|
||||
func GenerateScriptCmd(command string) []string {
|
||||
var commands []string
|
||||
if !framework.NodeOSDistroIs("windows") {
|
||||
commands = []string{"/bin/sh", "-c", command}
|
||||
} else {
|
||||
commands = []string{"powershell", "/c", command}
|
||||
}
|
||||
return commands
|
||||
}
|
||||
|
||||
// generateWriteCmd is used by generateWriteBlockCmd and generateWriteFileCmd
|
||||
func generateWriteCmd(content, path string) []string {
|
||||
var commands []string
|
||||
@ -629,77 +617,6 @@ func generateWriteFileCmd(content, fullPath string) []string {
|
||||
return generateWriteCmd(content, fullPath)
|
||||
}
|
||||
|
||||
// GenerateSecurityContext generates the corresponding container security context with the given inputs
|
||||
// If the Node OS is windows, currently we will ignore the inputs and return nil.
|
||||
// TODO: Will modify it after windows has its own security context
|
||||
func GenerateSecurityContext(privileged bool) *v1.SecurityContext {
|
||||
if framework.NodeOSDistroIs("windows") {
|
||||
return nil
|
||||
}
|
||||
return &v1.SecurityContext{
|
||||
Privileged: &privileged,
|
||||
}
|
||||
}
|
||||
|
||||
// GeneratePodSecurityContext generates the corresponding pod security context with the given inputs
|
||||
// If the Node OS is windows, currently we will ignore the inputs and return nil.
|
||||
// TODO: Will modify it after windows has its own security context
|
||||
func GeneratePodSecurityContext(fsGroup *int64, seLinuxOptions *v1.SELinuxOptions) *v1.PodSecurityContext {
|
||||
if framework.NodeOSDistroIs("windows") {
|
||||
return nil
|
||||
}
|
||||
return &v1.PodSecurityContext{
|
||||
SELinuxOptions: seLinuxOptions,
|
||||
FSGroup: fsGroup,
|
||||
}
|
||||
}
|
||||
|
||||
// GetTestImage returns the image name with the given input
|
||||
// If the Node OS is windows, currently we return Agnhost image for Windows node
|
||||
// due to the issue of #https://github.com/kubernetes-sigs/windows-testing/pull/35.
|
||||
func GetTestImage(id int) string {
|
||||
if framework.NodeOSDistroIs("windows") {
|
||||
return imageutils.GetE2EImage(imageutils.Agnhost)
|
||||
}
|
||||
return imageutils.GetE2EImage(id)
|
||||
}
|
||||
|
||||
// GetTestImageID returns the image id with the given input
|
||||
// If the Node OS is windows, currently we return Agnhost image for Windows node
|
||||
// due to the issue of #https://github.com/kubernetes-sigs/windows-testing/pull/35.
|
||||
func GetTestImageID(id int) int {
|
||||
if framework.NodeOSDistroIs("windows") {
|
||||
return imageutils.Agnhost
|
||||
}
|
||||
return id
|
||||
}
|
||||
|
||||
// GetDefaultTestImage returns the default test image based on OS.
|
||||
// If the node OS is windows, currently we return Agnhost image for Windows node
|
||||
// due to the issue of #https://github.com/kubernetes-sigs/windows-testing/pull/35.
|
||||
// If the node OS is linux, return busybox image
|
||||
func GetDefaultTestImage() string {
|
||||
return imageutils.GetE2EImage(GetDefaultTestImageID())
|
||||
}
|
||||
|
||||
// GetDefaultTestImageID returns the default test image id based on OS.
|
||||
// If the node OS is windows, currently we return Agnhost image for Windows node
|
||||
// due to the issue of #https://github.com/kubernetes-sigs/windows-testing/pull/35.
|
||||
// If the node OS is linux, return busybox image
|
||||
func GetDefaultTestImageID() int {
|
||||
return GetTestImageID(imageutils.BusyBox)
|
||||
}
|
||||
|
||||
// GetLinuxLabel returns the default SELinuxLabel based on OS.
|
||||
// If the node OS is windows, it will return nil
|
||||
func GetLinuxLabel() *v1.SELinuxOptions {
|
||||
if framework.NodeOSDistroIs("windows") {
|
||||
return nil
|
||||
}
|
||||
return &v1.SELinuxOptions{
|
||||
Level: "s0:c0,c1"}
|
||||
}
|
||||
|
||||
// CheckVolumeModeOfPath check mode of volume
|
||||
func CheckVolumeModeOfPath(f *framework.Framework, pod *v1.Pod, volMode v1.PersistentVolumeMode, path string) {
|
||||
if volMode == v1.PersistentVolumeBlock {
|
||||
@ -725,7 +642,6 @@ func PodExec(f *framework.Framework, pod *v1.Pod, shExec string) (string, string
|
||||
return f.ExecCommandInContainerWithFullOutput(pod.Name, pod.Spec.Containers[0].Name, "powershell", "/c", shExec)
|
||||
}
|
||||
return f.ExecCommandInContainerWithFullOutput(pod.Name, pod.Spec.Containers[0].Name, "/bin/sh", "-c", shExec)
|
||||
|
||||
}
|
||||
|
||||
// VerifyExecInPodSucceed verifies shell cmd in target pod succeed
|
||||
|
@ -1876,7 +1876,7 @@ func startBusyBoxPodWithVolumeSource(cs clientset.Interface, volumeSource v1.Vol
|
||||
MountPath: "/mnt/test",
|
||||
},
|
||||
},
|
||||
Command: e2evolume.GenerateScriptCmd("while true ; do sleep 2; done"),
|
||||
Command: e2epod.GenerateScriptCmd("while true ; do sleep 2; done"),
|
||||
},
|
||||
},
|
||||
SecurityContext: &v1.PodSecurityContext{
|
||||
|
@ -28,6 +28,7 @@ import (
|
||||
storagev1 "k8s.io/api/storage/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
@ -47,7 +48,8 @@ var _ = utils.SIGDescribe("Mounted volume expand", func() {
|
||||
c clientset.Interface
|
||||
ns string
|
||||
pvc *v1.PersistentVolumeClaim
|
||||
resizableSc *storagev1.StorageClass
|
||||
sc *storagev1.StorageClass
|
||||
cleanStorageClass func()
|
||||
nodeName string
|
||||
isNodeLabeled bool
|
||||
nodeKeyValueLabel map[string]string
|
||||
@ -82,14 +84,15 @@ var _ = utils.SIGDescribe("Mounted volume expand", func() {
|
||||
ClaimSize: "2Gi",
|
||||
AllowVolumeExpansion: true,
|
||||
DelayBinding: true,
|
||||
Parameters: make(map[string]string),
|
||||
}
|
||||
resizableSc, err = c.StorageV1().StorageClasses().Create(context.TODO(), newStorageClass(test, ns, "resizing"), metav1.CreateOptions{})
|
||||
framework.ExpectNoError(err, "Error creating resizable storage class")
|
||||
framework.ExpectEqual(*resizableSc.AllowVolumeExpansion, true)
|
||||
|
||||
sc, cleanStorageClass = testsuites.SetupStorageClass(c, newStorageClass(test, ns, "resizing"))
|
||||
framework.ExpectEqual(*sc.AllowVolumeExpansion, true)
|
||||
|
||||
pvc = e2epv.MakePersistentVolumeClaim(e2epv.PersistentVolumeClaimConfig{
|
||||
ClaimSize: test.ClaimSize,
|
||||
StorageClassName: &(resizableSc.Name),
|
||||
StorageClassName: &(sc.Name),
|
||||
VolumeMode: &test.VolumeMode,
|
||||
}, ns)
|
||||
pvc, err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(context.TODO(), pvc, metav1.CreateOptions{})
|
||||
@ -112,6 +115,8 @@ var _ = utils.SIGDescribe("Mounted volume expand", func() {
|
||||
pvc, nodeName, isNodeLabeled, nodeLabelValue = nil, "", false, ""
|
||||
nodeKeyValueLabel = make(map[string]string)
|
||||
}
|
||||
|
||||
cleanStorageClass()
|
||||
})
|
||||
|
||||
ginkgo.It("Should verify mounted devices can be resized", func() {
|
||||
|
@ -86,13 +86,14 @@ var _ = utils.SIGDescribe("PersistentVolumes GCEPD", func() {
|
||||
ginkgo.By("Initializing Test Spec")
|
||||
diskName, err = e2epv.CreatePDWithRetry()
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
pvConfig = e2epv.PersistentVolumeConfig{
|
||||
NamePrefix: "gce-",
|
||||
Labels: volLabel,
|
||||
PVSource: v1.PersistentVolumeSource{
|
||||
GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{
|
||||
PDName: diskName,
|
||||
FSType: "ext3",
|
||||
FSType: e2epv.GetDefaultFSType(),
|
||||
ReadOnly: false,
|
||||
},
|
||||
},
|
||||
|
@ -420,9 +420,8 @@ func makeStatefulSetWithPVCs(ns, cmd string, mounts []v1.VolumeMount, claims []v
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "nginx",
|
||||
Image: imageutils.GetE2EImage(imageutils.Nginx),
|
||||
Command: []string{"/bin/sh"},
|
||||
Args: []string{"-c", cmd},
|
||||
Image: e2epod.GetTestImage(imageutils.Nginx),
|
||||
Command: e2epod.GenerateScriptCmd(cmd),
|
||||
VolumeMounts: mounts,
|
||||
ReadinessProbe: readyProbe,
|
||||
},
|
||||
|
@ -26,7 +26,6 @@ import (
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
e2epv "k8s.io/kubernetes/test/e2e/framework/pv"
|
||||
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
|
||||
e2evolume "k8s.io/kubernetes/test/e2e/framework/volume"
|
||||
storageframework "k8s.io/kubernetes/test/e2e/storage/framework"
|
||||
"k8s.io/kubernetes/test/e2e/storage/utils"
|
||||
storageutils "k8s.io/kubernetes/test/e2e/storage/utils"
|
||||
@ -170,7 +169,7 @@ func (s *disruptiveTestSuite) DefineTests(driver storageframework.TestDriver, pa
|
||||
InlineVolumeSources: inlineSources,
|
||||
SeLinuxLabel: e2epv.SELinuxLabel,
|
||||
NodeSelection: l.config.ClientNodeSelection,
|
||||
ImageID: e2evolume.GetDefaultTestImageID(),
|
||||
ImageID: e2epod.GetDefaultTestImageID(),
|
||||
}
|
||||
l.pod, err = e2epod.CreateSecPodWithNodeSelection(l.cs, &podConfig, f.Timeouts.PodStart)
|
||||
framework.ExpectNoError(err, "While creating pods for kubelet restart test")
|
||||
|
@ -347,8 +347,8 @@ func StartInPodWithInlineVolume(c clientset.Interface, ns, podName, command stri
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "csi-volume-tester",
|
||||
Image: e2evolume.GetDefaultTestImage(),
|
||||
Command: e2evolume.GenerateScriptCmd(command),
|
||||
Image: e2epod.GetDefaultTestImage(),
|
||||
Command: e2epod.GenerateScriptCmd(command),
|
||||
},
|
||||
},
|
||||
RestartPolicy: v1.RestartPolicyNever,
|
||||
|
@ -408,9 +408,9 @@ func testAccessMultipleVolumes(f *framework.Framework, cs clientset.Interface, n
|
||||
podConfig := e2epod.Config{
|
||||
NS: ns,
|
||||
PVCs: pvcs,
|
||||
SeLinuxLabel: e2evolume.GetLinuxLabel(),
|
||||
SeLinuxLabel: e2epod.GetLinuxLabel(),
|
||||
NodeSelection: node,
|
||||
ImageID: e2evolume.GetDefaultTestImageID(),
|
||||
ImageID: e2epod.GetDefaultTestImageID(),
|
||||
}
|
||||
pod, err := e2epod.CreateSecPodWithNodeSelection(cs, &podConfig, f.Timeouts.PodStart)
|
||||
defer func() {
|
||||
@ -487,10 +487,10 @@ func TestConcurrentAccessToSingleVolume(f *framework.Framework, cs clientset.Int
|
||||
podConfig := e2epod.Config{
|
||||
NS: ns,
|
||||
PVCs: []*v1.PersistentVolumeClaim{pvc},
|
||||
SeLinuxLabel: e2evolume.GetLinuxLabel(),
|
||||
SeLinuxLabel: e2epod.GetLinuxLabel(),
|
||||
NodeSelection: node,
|
||||
PVCsReadOnly: readOnly,
|
||||
ImageID: e2evolume.GetTestImageID(imageutils.DebianIptables),
|
||||
ImageID: e2epod.GetTestImageID(imageutils.DebianIptables),
|
||||
}
|
||||
pod, err := e2epod.CreateSecPodWithNodeSelection(cs, &podConfig, f.Timeouts.PodStart)
|
||||
defer func() {
|
||||
@ -653,9 +653,9 @@ func initializeVolume(cs clientset.Interface, t *framework.TimeoutContext, ns st
|
||||
podConfig := e2epod.Config{
|
||||
NS: ns,
|
||||
PVCs: []*v1.PersistentVolumeClaim{pvc},
|
||||
SeLinuxLabel: e2evolume.GetLinuxLabel(),
|
||||
SeLinuxLabel: e2epod.GetLinuxLabel(),
|
||||
NodeSelection: node,
|
||||
ImageID: e2evolume.GetDefaultTestImageID(),
|
||||
ImageID: e2epod.GetDefaultTestImageID(),
|
||||
}
|
||||
pod, err := e2epod.CreateSecPod(cs, &podConfig, t.PodStart)
|
||||
defer func() {
|
||||
|
@ -330,8 +330,8 @@ func SetupStorageClass(
|
||||
// skip storageclass creation if it already exists
|
||||
ginkgo.By("Storage class " + computedStorageClass.Name + " is already created, skipping creation.")
|
||||
} else {
|
||||
ginkgo.By("Creating a StorageClass " + class.Name)
|
||||
_, err = client.StorageV1().StorageClasses().Create(context.TODO(), class, metav1.CreateOptions{})
|
||||
ginkgo.By("Creating a StorageClass")
|
||||
class, err = client.StorageV1().StorageClasses().Create(context.TODO(), class, metav1.CreateOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
computedStorageClass, err = client.StorageV1().StorageClasses().Get(context.TODO(), class.Name, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
@ -698,8 +698,8 @@ func StartInPodWithVolume(c clientset.Interface, ns, claimName, podName, command
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "volume-tester",
|
||||
Image: e2evolume.GetDefaultTestImage(),
|
||||
Command: e2evolume.GenerateScriptCmd(command),
|
||||
Image: e2epod.GetDefaultTestImage(),
|
||||
Command: e2epod.GenerateScriptCmd(command),
|
||||
VolumeMounts: []v1.VolumeMount{
|
||||
{
|
||||
Name: "my-volume",
|
||||
|
@ -139,7 +139,7 @@ func (s *snapshottableTestSuite) DefineTests(driver storageframework.TestDriver,
|
||||
sc = volumeResource.Sc
|
||||
claimSize = pvc.Spec.Resources.Requests.Storage().String()
|
||||
|
||||
ginkgo.By("starting a pod to use the claim")
|
||||
ginkgo.By("[init] starting a pod to use the claim")
|
||||
originalMntTestData = fmt.Sprintf("hello from %s namespace", pvc.GetNamespace())
|
||||
command := fmt.Sprintf("echo '%s' > %s", originalMntTestData, datapath)
|
||||
|
||||
@ -147,13 +147,14 @@ func (s *snapshottableTestSuite) DefineTests(driver storageframework.TestDriver,
|
||||
|
||||
err = e2epv.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, cs, pvc.Namespace, pvc.Name, framework.Poll, f.Timeouts.ClaimProvision)
|
||||
framework.ExpectNoError(err)
|
||||
ginkgo.By("checking the claim")
|
||||
|
||||
// Get new copy of the claim
|
||||
ginkgo.By("[init] checking the claim")
|
||||
pvc, err = cs.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get(context.TODO(), pvc.Name, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
// Get the bound PV
|
||||
ginkgo.By("checking the PV")
|
||||
ginkgo.By("[init] checking the PV")
|
||||
_, err = cs.CoreV1().PersistentVolumes().Get(context.TODO(), pvc.Spec.VolumeName, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
}
|
||||
|
@ -446,8 +446,8 @@ func (s *subPathTestSuite) DefineTests(driver storageframework.TestDriver, patte
|
||||
defer cleanup()
|
||||
|
||||
// Change volume container to busybox so we can exec later
|
||||
l.pod.Spec.Containers[1].Image = e2evolume.GetDefaultTestImage()
|
||||
l.pod.Spec.Containers[1].Command = e2evolume.GenerateScriptCmd("sleep 100000")
|
||||
l.pod.Spec.Containers[1].Image = e2epod.GetDefaultTestImage()
|
||||
l.pod.Spec.Containers[1].Command = e2epod.GenerateScriptCmd("sleep 100000")
|
||||
l.pod.Spec.Containers[1].Args = nil
|
||||
|
||||
ginkgo.By(fmt.Sprintf("Creating pod %s", l.pod.Name))
|
||||
@ -521,19 +521,19 @@ func SubpathTestPod(f *framework.Framework, subpath, volumeType string, source *
|
||||
initSubpathContainer := e2epod.NewAgnhostContainer(
|
||||
fmt.Sprintf("test-init-subpath-%s", suffix),
|
||||
[]v1.VolumeMount{volumeSubpathMount, probeMount}, nil, "mounttest")
|
||||
initSubpathContainer.SecurityContext = e2evolume.GenerateSecurityContext(privilegedSecurityContext)
|
||||
initSubpathContainer.SecurityContext = e2epod.GenerateContainerSecurityContext(privilegedSecurityContext)
|
||||
initVolumeContainer := e2epod.NewAgnhostContainer(
|
||||
fmt.Sprintf("test-init-volume-%s", suffix),
|
||||
[]v1.VolumeMount{volumeMount, probeMount}, nil, "mounttest")
|
||||
initVolumeContainer.SecurityContext = e2evolume.GenerateSecurityContext(privilegedSecurityContext)
|
||||
initVolumeContainer.SecurityContext = e2epod.GenerateContainerSecurityContext(privilegedSecurityContext)
|
||||
subpathContainer := e2epod.NewAgnhostContainer(
|
||||
fmt.Sprintf("test-container-subpath-%s", suffix),
|
||||
[]v1.VolumeMount{volumeSubpathMount, probeMount}, nil, "mounttest")
|
||||
subpathContainer.SecurityContext = e2evolume.GenerateSecurityContext(privilegedSecurityContext)
|
||||
subpathContainer.SecurityContext = e2epod.GenerateContainerSecurityContext(privilegedSecurityContext)
|
||||
volumeContainer := e2epod.NewAgnhostContainer(
|
||||
fmt.Sprintf("test-container-volume-%s", suffix),
|
||||
[]v1.VolumeMount{volumeMount, probeMount}, nil, "mounttest")
|
||||
volumeContainer.SecurityContext = e2evolume.GenerateSecurityContext(privilegedSecurityContext)
|
||||
volumeContainer.SecurityContext = e2epod.GenerateContainerSecurityContext(privilegedSecurityContext)
|
||||
|
||||
return &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
@ -544,9 +544,9 @@ func SubpathTestPod(f *framework.Framework, subpath, volumeType string, source *
|
||||
InitContainers: []v1.Container{
|
||||
{
|
||||
Name: fmt.Sprintf("init-volume-%s", suffix),
|
||||
Image: e2evolume.GetDefaultTestImage(),
|
||||
Image: e2epod.GetDefaultTestImage(),
|
||||
VolumeMounts: []v1.VolumeMount{volumeMount, probeMount},
|
||||
SecurityContext: e2evolume.GenerateSecurityContext(privilegedSecurityContext),
|
||||
SecurityContext: e2epod.GenerateContainerSecurityContext(privilegedSecurityContext),
|
||||
},
|
||||
initSubpathContainer,
|
||||
initVolumeContainer,
|
||||
@ -569,7 +569,7 @@ func SubpathTestPod(f *framework.Framework, subpath, volumeType string, source *
|
||||
},
|
||||
},
|
||||
},
|
||||
SecurityContext: e2evolume.GeneratePodSecurityContext(nil, seLinuxOptions),
|
||||
SecurityContext: e2epod.GeneratePodSecurityContext(nil, seLinuxOptions),
|
||||
},
|
||||
}
|
||||
}
|
||||
@ -613,8 +613,8 @@ func volumeFormatPod(f *framework.Framework, volumeSource *v1.VolumeSource) *v1.
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: fmt.Sprintf("init-volume-%s", f.Namespace.Name),
|
||||
Image: e2evolume.GetDefaultTestImage(),
|
||||
Command: e2evolume.GenerateScriptCmd("echo nothing"),
|
||||
Image: e2epod.GetDefaultTestImage(),
|
||||
Command: e2epod.GenerateScriptCmd("echo nothing"),
|
||||
VolumeMounts: []v1.VolumeMount{
|
||||
{
|
||||
Name: volumeName,
|
||||
@ -635,7 +635,7 @@ func volumeFormatPod(f *framework.Framework, volumeSource *v1.VolumeSource) *v1.
|
||||
}
|
||||
|
||||
func setInitCommand(pod *v1.Pod, command string) {
|
||||
pod.Spec.InitContainers[0].Command = e2evolume.GenerateScriptCmd(command)
|
||||
pod.Spec.InitContainers[0].Command = e2epod.GenerateScriptCmd(command)
|
||||
}
|
||||
|
||||
func setWriteCommand(file string, container *v1.Container) {
|
||||
@ -792,11 +792,11 @@ func (h *podContainerRestartHooks) FixLivenessProbe(pod *v1.Pod, probeFilePath s
|
||||
func testPodContainerRestartWithHooks(f *framework.Framework, pod *v1.Pod, hooks *podContainerRestartHooks) {
|
||||
pod.Spec.RestartPolicy = v1.RestartPolicyOnFailure
|
||||
|
||||
pod.Spec.Containers[0].Image = e2evolume.GetDefaultTestImage()
|
||||
pod.Spec.Containers[0].Command = e2evolume.GenerateScriptCmd("sleep 100000")
|
||||
pod.Spec.Containers[0].Image = e2epod.GetDefaultTestImage()
|
||||
pod.Spec.Containers[0].Command = e2epod.GenerateScriptCmd("sleep 100000")
|
||||
pod.Spec.Containers[0].Args = nil
|
||||
pod.Spec.Containers[1].Image = e2evolume.GetDefaultTestImage()
|
||||
pod.Spec.Containers[1].Command = e2evolume.GenerateScriptCmd("sleep 100000")
|
||||
pod.Spec.Containers[1].Image = e2epod.GetDefaultTestImage()
|
||||
pod.Spec.Containers[1].Command = e2epod.GenerateScriptCmd("sleep 100000")
|
||||
pod.Spec.Containers[1].Args = nil
|
||||
hooks.AddLivenessProbe(pod, probeFilePath)
|
||||
|
||||
@ -923,7 +923,7 @@ func TestPodContainerRestartWithConfigmapModified(f *framework.Framework, origin
|
||||
break
|
||||
}
|
||||
pod := SubpathTestPod(f, subpath, "configmap", &v1.VolumeSource{ConfigMap: &v1.ConfigMapVolumeSource{LocalObjectReference: v1.LocalObjectReference{Name: original.Name}}}, false)
|
||||
pod.Spec.InitContainers[0].Command = e2evolume.GenerateScriptCmd(fmt.Sprintf("touch %v", probeFilePath))
|
||||
pod.Spec.InitContainers[0].Command = e2epod.GenerateScriptCmd(fmt.Sprintf("touch %v", probeFilePath))
|
||||
|
||||
modifiedValue := modified.Data[subpath]
|
||||
testPodContainerRestartWithHooks(f, pod, &podContainerRestartHooks{
|
||||
@ -966,11 +966,11 @@ func testSubpathReconstruction(f *framework.Framework, hostExec utils.HostExec,
|
||||
}
|
||||
|
||||
// Change to busybox
|
||||
pod.Spec.Containers[0].Image = e2evolume.GetDefaultTestImage()
|
||||
pod.Spec.Containers[0].Command = e2evolume.GenerateScriptCmd("sleep 100000")
|
||||
pod.Spec.Containers[0].Image = e2epod.GetDefaultTestImage()
|
||||
pod.Spec.Containers[0].Command = e2epod.GenerateScriptCmd("sleep 100000")
|
||||
pod.Spec.Containers[0].Args = nil
|
||||
pod.Spec.Containers[1].Image = e2evolume.GetDefaultTestImage()
|
||||
pod.Spec.Containers[1].Command = e2evolume.GenerateScriptCmd("sleep 100000")
|
||||
pod.Spec.Containers[1].Image = e2epod.GetDefaultTestImage()
|
||||
pod.Spec.Containers[1].Command = e2epod.GenerateScriptCmd("sleep 100000")
|
||||
pod.Spec.Containers[1].Args = nil
|
||||
// If grace period is too short, then there is not enough time for the volume
|
||||
// manager to cleanup the volumes
|
||||
|
@ -34,7 +34,6 @@ import (
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
e2epv "k8s.io/kubernetes/test/e2e/framework/pv"
|
||||
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
|
||||
e2evolume "k8s.io/kubernetes/test/e2e/framework/volume"
|
||||
storageframework "k8s.io/kubernetes/test/e2e/storage/framework"
|
||||
storageutils "k8s.io/kubernetes/test/e2e/storage/utils"
|
||||
)
|
||||
@ -338,8 +337,8 @@ func (t *topologyTestSuite) createResources(cs clientset.Interface, l *topologyT
|
||||
NS: l.config.Framework.Namespace.Name,
|
||||
PVCs: []*v1.PersistentVolumeClaim{l.resource.Pvc},
|
||||
NodeSelection: e2epod.NodeSelection{Affinity: affinity},
|
||||
SeLinuxLabel: e2evolume.GetLinuxLabel(),
|
||||
ImageID: e2evolume.GetDefaultTestImageID(),
|
||||
SeLinuxLabel: e2epod.GetLinuxLabel(),
|
||||
ImageID: e2epod.GetDefaultTestImageID(),
|
||||
}
|
||||
l.pod, err = e2epod.MakeSecPod(&podConfig)
|
||||
framework.ExpectNoError(err)
|
||||
|
@ -180,9 +180,9 @@ func (v *volumeExpandTestSuite) DefineTests(driver storageframework.TestDriver,
|
||||
podConfig := e2epod.Config{
|
||||
NS: f.Namespace.Name,
|
||||
PVCs: []*v1.PersistentVolumeClaim{l.resource.Pvc},
|
||||
SeLinuxLabel: e2evolume.GetLinuxLabel(),
|
||||
SeLinuxLabel: e2epod.GetLinuxLabel(),
|
||||
NodeSelection: l.config.ClientNodeSelection,
|
||||
ImageID: e2evolume.GetDefaultTestImageID(),
|
||||
ImageID: e2epod.GetDefaultTestImageID(),
|
||||
}
|
||||
l.pod, err = e2epod.CreateSecPodWithNodeSelection(f.ClientSet, &podConfig, f.Timeouts.PodStart)
|
||||
defer func() {
|
||||
@ -224,9 +224,9 @@ func (v *volumeExpandTestSuite) DefineTests(driver storageframework.TestDriver,
|
||||
podConfig = e2epod.Config{
|
||||
NS: f.Namespace.Name,
|
||||
PVCs: []*v1.PersistentVolumeClaim{l.resource.Pvc},
|
||||
SeLinuxLabel: e2evolume.GetLinuxLabel(),
|
||||
SeLinuxLabel: e2epod.GetLinuxLabel(),
|
||||
NodeSelection: l.config.ClientNodeSelection,
|
||||
ImageID: e2evolume.GetDefaultTestImageID(),
|
||||
ImageID: e2epod.GetDefaultTestImageID(),
|
||||
}
|
||||
l.pod2, err = e2epod.CreateSecPodWithNodeSelection(f.ClientSet, &podConfig, resizedPodStartupTimeout)
|
||||
defer func() {
|
||||
@ -252,9 +252,9 @@ func (v *volumeExpandTestSuite) DefineTests(driver storageframework.TestDriver,
|
||||
podConfig := e2epod.Config{
|
||||
NS: f.Namespace.Name,
|
||||
PVCs: []*v1.PersistentVolumeClaim{l.resource.Pvc},
|
||||
SeLinuxLabel: e2evolume.GetLinuxLabel(),
|
||||
SeLinuxLabel: e2epod.GetLinuxLabel(),
|
||||
NodeSelection: l.config.ClientNodeSelection,
|
||||
ImageID: e2evolume.GetDefaultTestImageID(),
|
||||
ImageID: e2epod.GetDefaultTestImageID(),
|
||||
}
|
||||
l.pod, err = e2epod.CreateSecPodWithNodeSelection(f.ClientSet, &podConfig, f.Timeouts.PodStart)
|
||||
defer func() {
|
||||
@ -352,9 +352,9 @@ func WaitForResizingCondition(pvc *v1.PersistentVolumeClaim, c clientset.Interfa
|
||||
}
|
||||
|
||||
// WaitForControllerVolumeResize waits for the controller resize to be finished
|
||||
func WaitForControllerVolumeResize(pvc *v1.PersistentVolumeClaim, c clientset.Interface, duration time.Duration) error {
|
||||
func WaitForControllerVolumeResize(pvc *v1.PersistentVolumeClaim, c clientset.Interface, timeout time.Duration) error {
|
||||
pvName := pvc.Spec.VolumeName
|
||||
waitErr := wait.PollImmediate(resizePollInterval, duration, func() (bool, error) {
|
||||
waitErr := wait.PollImmediate(resizePollInterval, timeout, func() (bool, error) {
|
||||
pvcSize := pvc.Spec.Resources.Requests[v1.ResourceStorage]
|
||||
|
||||
pv, err := c.CoreV1().PersistentVolumes().Get(context.TODO(), pvName, metav1.GetOptions{})
|
||||
|
@ -199,7 +199,7 @@ func makePodSpec(config e2evolume.TestConfig, initCmd string, volsrc v1.VolumeSo
|
||||
InitContainers: []v1.Container{
|
||||
{
|
||||
Name: config.Prefix + "-io-init",
|
||||
Image: e2evolume.GetDefaultTestImage(),
|
||||
Image: e2epod.GetDefaultTestImage(),
|
||||
Command: []string{
|
||||
"/bin/sh",
|
||||
"-c",
|
||||
@ -216,7 +216,7 @@ func makePodSpec(config e2evolume.TestConfig, initCmd string, volsrc v1.VolumeSo
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: config.Prefix + "-io-client",
|
||||
Image: e2evolume.GetDefaultTestImage(),
|
||||
Image: e2epod.GetDefaultTestImage(),
|
||||
Command: []string{
|
||||
"/bin/sh",
|
||||
"-c",
|
||||
|
@ -219,9 +219,9 @@ func (t *volumeModeTestSuite) DefineTests(driver storageframework.TestDriver, pa
|
||||
podConfig := e2epod.Config{
|
||||
NS: l.ns.Name,
|
||||
PVCs: []*v1.PersistentVolumeClaim{l.Pvc},
|
||||
SeLinuxLabel: e2evolume.GetLinuxLabel(),
|
||||
SeLinuxLabel: e2epod.GetLinuxLabel(),
|
||||
NodeSelection: l.config.ClientNodeSelection,
|
||||
ImageID: e2evolume.GetDefaultTestImageID(),
|
||||
ImageID: e2epod.GetDefaultTestImageID(),
|
||||
}
|
||||
pod, err := e2epod.MakeSecPod(&podConfig)
|
||||
framework.ExpectNoError(err, "Failed to create pod")
|
||||
@ -305,8 +305,8 @@ func (t *volumeModeTestSuite) DefineTests(driver storageframework.TestDriver, pa
|
||||
podConfig := e2epod.Config{
|
||||
NS: l.ns.Name,
|
||||
PVCs: []*v1.PersistentVolumeClaim{l.Pvc},
|
||||
SeLinuxLabel: e2evolume.GetLinuxLabel(),
|
||||
ImageID: e2evolume.GetDefaultTestImageID(),
|
||||
SeLinuxLabel: e2epod.GetLinuxLabel(),
|
||||
ImageID: e2epod.GetDefaultTestImageID(),
|
||||
}
|
||||
pod, err := e2epod.MakeSecPod(&podConfig)
|
||||
framework.ExpectNoError(err)
|
||||
@ -362,8 +362,8 @@ func (t *volumeModeTestSuite) DefineTests(driver storageframework.TestDriver, pa
|
||||
podConfig := e2epod.Config{
|
||||
NS: l.ns.Name,
|
||||
PVCs: []*v1.PersistentVolumeClaim{l.Pvc},
|
||||
SeLinuxLabel: e2evolume.GetLinuxLabel(),
|
||||
ImageID: e2evolume.GetDefaultTestImageID(),
|
||||
SeLinuxLabel: e2epod.GetLinuxLabel(),
|
||||
ImageID: e2epod.GetDefaultTestImageID(),
|
||||
}
|
||||
pod, err := e2epod.MakeSecPod(&podConfig)
|
||||
framework.ExpectNoError(err)
|
||||
|
@ -231,7 +231,7 @@ func testScriptInPod(
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: fmt.Sprintf("exec-container-%s", suffix),
|
||||
Image: e2evolume.GetTestImage(imageutils.Nginx),
|
||||
Image: e2epod.GetTestImage(imageutils.Nginx),
|
||||
Command: command,
|
||||
VolumeMounts: []v1.VolumeMount{
|
||||
{
|
||||
|
@ -359,18 +359,18 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
|
||||
ginkgo.By("Testing " + test.Name)
|
||||
suffix := fmt.Sprintf("%d", i)
|
||||
test.Client = c
|
||||
test.Class = newStorageClass(test, ns, suffix)
|
||||
|
||||
// overwrite StorageClass spec with provisioned StorageClass
|
||||
storageClass, clearStorageClass := testsuites.SetupStorageClass(test.Client, newStorageClass(test, ns, suffix))
|
||||
defer clearStorageClass()
|
||||
|
||||
test.Class = storageClass
|
||||
test.Claim = e2epv.MakePersistentVolumeClaim(e2epv.PersistentVolumeClaimConfig{
|
||||
ClaimSize: test.ClaimSize,
|
||||
StorageClassName: &test.Class.Name,
|
||||
VolumeMode: &test.VolumeMode,
|
||||
}, ns)
|
||||
|
||||
// overwrite StorageClass spec with provisioned StorageClass
|
||||
class, clearStorageClass := testsuites.SetupStorageClass(test.Client, test.Class)
|
||||
test.Class = class
|
||||
defer clearStorageClass()
|
||||
|
||||
test.TestDynamicProvisioning()
|
||||
}
|
||||
|
||||
@ -425,13 +425,15 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
|
||||
test.Class = newStorageClass(test, ns, "reclaimpolicy")
|
||||
retain := v1.PersistentVolumeReclaimRetain
|
||||
test.Class.ReclaimPolicy = &retain
|
||||
storageClass, clearStorageClass := testsuites.SetupStorageClass(test.Client, test.Class)
|
||||
defer clearStorageClass()
|
||||
test.Class = storageClass
|
||||
|
||||
test.Claim = e2epv.MakePersistentVolumeClaim(e2epv.PersistentVolumeClaimConfig{
|
||||
ClaimSize: test.ClaimSize,
|
||||
StorageClassName: &test.Class.Name,
|
||||
VolumeMode: &test.VolumeMode,
|
||||
}, ns)
|
||||
_, clearStorageClass := testsuites.SetupStorageClass(test.Client, test.Class)
|
||||
defer clearStorageClass()
|
||||
|
||||
pv := test.TestDynamicProvisioning()
|
||||
|
||||
@ -448,7 +450,6 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
|
||||
|
||||
ginkgo.It("should not provision a volume in an unmanaged GCE zone.", func() {
|
||||
e2eskipper.SkipUnlessProviderIs("gce", "gke")
|
||||
var suffix string = "unmananged"
|
||||
|
||||
ginkgo.By("Discovering an unmanaged zone")
|
||||
allZones := sets.NewString() // all zones in the project
|
||||
@ -484,7 +485,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
|
||||
Parameters: map[string]string{"zone": unmanagedZone},
|
||||
ClaimSize: "1Gi",
|
||||
}
|
||||
sc := newStorageClass(test, ns, suffix)
|
||||
sc := newStorageClass(test, ns, "unmanaged")
|
||||
sc, err = c.StorageV1().StorageClasses().Create(context.TODO(), sc, metav1.CreateOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
defer deleteStorageClass(c, sc.Name)
|
||||
@ -670,18 +671,17 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
|
||||
ClaimSize: "1500Mi",
|
||||
ExpectedSize: "1500Mi",
|
||||
}
|
||||
test.Class = newStorageClass(test, ns, "external")
|
||||
|
||||
storageClass, clearStorageClass := testsuites.SetupStorageClass(test.Client, newStorageClass(test, ns, "external"))
|
||||
defer clearStorageClass()
|
||||
test.Class = storageClass
|
||||
|
||||
test.Claim = e2epv.MakePersistentVolumeClaim(e2epv.PersistentVolumeClaimConfig{
|
||||
ClaimSize: test.ClaimSize,
|
||||
StorageClassName: &test.Class.Name,
|
||||
VolumeMode: &test.VolumeMode,
|
||||
}, ns)
|
||||
|
||||
// rewrite the storageClass with the computed storageClass
|
||||
storageClass, clearStorageClass := testsuites.SetupStorageClass(test.Client, test.Class)
|
||||
defer clearStorageClass()
|
||||
test.Class = storageClass
|
||||
|
||||
ginkgo.By("creating a claim with a external provisioning annotation")
|
||||
|
||||
test.TestDynamicProvisioning()
|
||||
@ -806,8 +806,9 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
|
||||
ExpectedSize: "2Gi",
|
||||
Parameters: map[string]string{"resturl": serverURL},
|
||||
}
|
||||
suffix := fmt.Sprintf("glusterdptest")
|
||||
test.Class = newStorageClass(test, ns, suffix)
|
||||
storageClass, clearStorageClass := testsuites.SetupStorageClass(test.Client, newStorageClass(test, ns, "glusterdptest"))
|
||||
defer clearStorageClass()
|
||||
test.Class = storageClass
|
||||
|
||||
ginkgo.By("creating a claim object with a suffix for gluster dynamic provisioner")
|
||||
test.Claim = e2epv.MakePersistentVolumeClaim(e2epv.PersistentVolumeClaimConfig{
|
||||
@ -816,8 +817,6 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
|
||||
VolumeMode: &test.VolumeMode,
|
||||
}, ns)
|
||||
|
||||
_, clearStorageClass := testsuites.SetupStorageClass(test.Client, test.Class)
|
||||
defer clearStorageClass()
|
||||
test.TestDynamicProvisioning()
|
||||
})
|
||||
})
|
||||
@ -834,22 +833,17 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
|
||||
}
|
||||
|
||||
ginkgo.By("creating a StorageClass")
|
||||
suffix := fmt.Sprintf("invalid-aws")
|
||||
class := newStorageClass(test, ns, suffix)
|
||||
class, err := c.StorageV1().StorageClasses().Create(context.TODO(), class, metav1.CreateOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
defer func() {
|
||||
framework.Logf("deleting storage class %s", class.Name)
|
||||
framework.ExpectNoError(c.StorageV1().StorageClasses().Delete(context.TODO(), class.Name, metav1.DeleteOptions{}))
|
||||
}()
|
||||
storageClass, clearStorageClass := testsuites.SetupStorageClass(test.Client, newStorageClass(test, ns, "invalid-aws"))
|
||||
defer clearStorageClass()
|
||||
test.Class = storageClass
|
||||
|
||||
ginkgo.By("creating a claim object with a suffix for gluster dynamic provisioner")
|
||||
claim := e2epv.MakePersistentVolumeClaim(e2epv.PersistentVolumeClaimConfig{
|
||||
ClaimSize: test.ClaimSize,
|
||||
StorageClassName: &class.Name,
|
||||
StorageClassName: &test.Class.Name,
|
||||
VolumeMode: &test.VolumeMode,
|
||||
}, ns)
|
||||
claim, err = c.CoreV1().PersistentVolumeClaims(claim.Namespace).Create(context.TODO(), claim, metav1.CreateOptions{})
|
||||
claim, err := c.CoreV1().PersistentVolumeClaims(claim.Namespace).Create(context.TODO(), claim, metav1.CreateOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
defer func() {
|
||||
framework.Logf("deleting claim %q/%q", claim.Namespace, claim.Name)
|
||||
@ -941,19 +935,31 @@ func getDefaultPluginName() string {
|
||||
return ""
|
||||
}
|
||||
|
||||
func newStorageClass(t testsuites.StorageClassTest, ns string, suffix string) *storagev1.StorageClass {
|
||||
func newStorageClass(t testsuites.StorageClassTest, ns string, prefix string) *storagev1.StorageClass {
|
||||
pluginName := t.Provisioner
|
||||
if pluginName == "" {
|
||||
pluginName = getDefaultPluginName()
|
||||
}
|
||||
if suffix == "" {
|
||||
suffix = "sc"
|
||||
if prefix == "" {
|
||||
prefix = "sc"
|
||||
}
|
||||
bindingMode := storagev1.VolumeBindingImmediate
|
||||
if t.DelayBinding {
|
||||
bindingMode = storagev1.VolumeBindingWaitForFirstConsumer
|
||||
}
|
||||
sc := getStorageClass(pluginName, t.Parameters, &bindingMode, ns, suffix)
|
||||
if t.Parameters == nil {
|
||||
t.Parameters = make(map[string]string)
|
||||
}
|
||||
|
||||
if framework.NodeOSDistroIs("windows") {
|
||||
// fstype might be forced from outside, in that case skip setting a default
|
||||
if _, exists := t.Parameters["fstype"]; !exists {
|
||||
t.Parameters["fstype"] = e2epv.GetDefaultFSType()
|
||||
framework.Logf("settings a default fsType=%s in the storage class", t.Parameters["fstype"])
|
||||
}
|
||||
}
|
||||
|
||||
sc := getStorageClass(pluginName, t.Parameters, &bindingMode, ns, prefix)
|
||||
if t.AllowVolumeExpansion {
|
||||
sc.AllowVolumeExpansion = &t.AllowVolumeExpansion
|
||||
}
|
||||
@ -965,7 +971,7 @@ func getStorageClass(
|
||||
parameters map[string]string,
|
||||
bindingMode *storagev1.VolumeBindingMode,
|
||||
ns string,
|
||||
suffix string,
|
||||
prefix string,
|
||||
) *storagev1.StorageClass {
|
||||
if bindingMode == nil {
|
||||
defaultBindingMode := storagev1.VolumeBindingImmediate
|
||||
@ -976,8 +982,8 @@ func getStorageClass(
|
||||
Kind: "StorageClass",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
// Name must be unique, so let's base it on namespace name
|
||||
Name: ns + "-" + suffix,
|
||||
// Name must be unique, so let's base it on namespace name and the prefix (the prefix is test specific)
|
||||
GenerateName: ns + "-" + prefix,
|
||||
},
|
||||
Provisioner: provisioner,
|
||||
Parameters: parameters,
|
||||
|
Loading…
Reference in New Issue
Block a user