e2e: use custom timeouts in all storage E2E tests

This commit is contained in:
Fabio Bertinatto 2020-10-22 16:04:35 +02:00
parent f6e900f468
commit c82626f96f
50 changed files with 239 additions and 193 deletions

View File

@ -494,7 +494,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
pv, pvc, err := e2epv.CreatePVPVC(c, pvConfig, pvcConfig, f.Namespace.Name, false)
framework.ExpectNoError(err)
framework.ExpectNoError(e2epv.WaitOnPVandPVC(c, f.Namespace.Name, pv, pvc))
framework.ExpectNoError(e2epv.WaitOnPVandPVC(c, f.Timeouts, f.Namespace.Name, pv, pvc))
defer func() {
errs := e2epv.PVPVCCleanup(c, f.Namespace.Name, pv, pvc)

View File

@ -212,7 +212,7 @@ func DeletePersistentVolumeClaim(c clientset.Interface, pvcName string, ns strin
// DeletePVCandValidatePV deletes the PVC and waits for the PV to enter its expected phase. Validate that the PV
// has been reclaimed (assumption here about reclaimPolicy). Caller tells this func which
// phase value to expect for the pv bound to the to-be-deleted claim.
func DeletePVCandValidatePV(c clientset.Interface, ns string, pvc *v1.PersistentVolumeClaim, pv *v1.PersistentVolume, expectPVPhase v1.PersistentVolumePhase) error {
func DeletePVCandValidatePV(c clientset.Interface, timeouts *framework.TimeoutContext, ns string, pvc *v1.PersistentVolumeClaim, pv *v1.PersistentVolume, expectPVPhase v1.PersistentVolumePhase) error {
pvname := pvc.Spec.VolumeName
framework.Logf("Deleting PVC %v to trigger reclamation of PV %v", pvc.Name, pvname)
err := DeletePersistentVolumeClaim(c, pvc.Name, ns)
@ -222,7 +222,7 @@ func DeletePVCandValidatePV(c clientset.Interface, ns string, pvc *v1.Persistent
// Wait for the PV's phase to return to be `expectPVPhase`
framework.Logf("Waiting for reclaim process to complete.")
err = WaitForPersistentVolumePhase(expectPVPhase, c, pv.Name, framework.Poll, PVReclaimingTimeout)
err = WaitForPersistentVolumePhase(expectPVPhase, c, pv.Name, framework.Poll, timeouts.PVReclaim)
if err != nil {
return fmt.Errorf("pv %q phase did not become %v: %v", pv.Name, expectPVPhase, err)
}
@ -255,7 +255,7 @@ func DeletePVCandValidatePV(c clientset.Interface, ns string, pvc *v1.Persistent
// Available, Bound).
// Note: if there are more claims than pvs then some of the remaining claims may bind to just made
// available pvs.
func DeletePVCandValidatePVGroup(c clientset.Interface, ns string, pvols PVMap, claims PVCMap, expectPVPhase v1.PersistentVolumePhase) error {
func DeletePVCandValidatePVGroup(c clientset.Interface, timeouts *framework.TimeoutContext, ns string, pvols PVMap, claims PVCMap, expectPVPhase v1.PersistentVolumePhase) error {
var boundPVs, deletedPVCs int
for pvName := range pvols {
@ -276,7 +276,7 @@ func DeletePVCandValidatePVGroup(c clientset.Interface, ns string, pvols PVMap,
// get the pvc for the delete call below
pvc, err := c.CoreV1().PersistentVolumeClaims(ns).Get(context.TODO(), cr.Name, metav1.GetOptions{})
if err == nil {
if err = DeletePVCandValidatePV(c, ns, pvc, pv, expectPVPhase); err != nil {
if err = DeletePVCandValidatePV(c, timeouts, ns, pvc, pv, expectPVPhase); err != nil {
return err
}
} else if !apierrors.IsNotFound(err) {
@ -434,17 +434,17 @@ func CreatePVsPVCs(numpvs, numpvcs int, c clientset.Interface, ns string, pvConf
}
// WaitOnPVandPVC waits for the pv and pvc to bind to each other.
func WaitOnPVandPVC(c clientset.Interface, ns string, pv *v1.PersistentVolume, pvc *v1.PersistentVolumeClaim) error {
func WaitOnPVandPVC(c clientset.Interface, timeouts *framework.TimeoutContext, ns string, pv *v1.PersistentVolume, pvc *v1.PersistentVolumeClaim) error {
// Wait for newly created PVC to bind to the PV
framework.Logf("Waiting for PV %v to bind to PVC %v", pv.Name, pvc.Name)
err := WaitForPersistentVolumeClaimPhase(v1.ClaimBound, c, ns, pvc.Name, framework.Poll, ClaimBindingTimeout)
err := WaitForPersistentVolumeClaimPhase(v1.ClaimBound, c, ns, pvc.Name, framework.Poll, timeouts.ClaimBound)
if err != nil {
return fmt.Errorf("PVC %q did not become Bound: %v", pvc.Name, err)
}
// Wait for PersistentVolume.Status.Phase to be Bound, which it should be
// since the PVC is already bound.
err = WaitForPersistentVolumePhase(v1.VolumeBound, c, pv.Name, framework.Poll, PVBindingTimeout)
err = WaitForPersistentVolumePhase(v1.VolumeBound, c, pv.Name, framework.Poll, timeouts.PVBound)
if err != nil {
return fmt.Errorf("PV %q did not become Bound: %v", pv.Name, err)
}
@ -482,7 +482,7 @@ func WaitOnPVandPVC(c clientset.Interface, ns string, pv *v1.PersistentVolume, p
// to situations where the maximum wait times are reached several times in succession,
// extending test time. Thus, it is recommended to keep the delta between PVs and PVCs
// small.
func WaitAndVerifyBinds(c clientset.Interface, ns string, pvols PVMap, claims PVCMap, testExpected bool) error {
func WaitAndVerifyBinds(c clientset.Interface, timeouts *framework.TimeoutContext, ns string, pvols PVMap, claims PVCMap, testExpected bool) error {
var actualBinds int
expectedBinds := len(pvols)
if expectedBinds > len(claims) { // want the min of # pvs or #pvcs
@ -490,7 +490,7 @@ func WaitAndVerifyBinds(c clientset.Interface, ns string, pvols PVMap, claims PV
}
for pvName := range pvols {
err := WaitForPersistentVolumePhase(v1.VolumeBound, c, pvName, framework.Poll, PVBindingTimeout)
err := WaitForPersistentVolumePhase(v1.VolumeBound, c, pvName, framework.Poll, timeouts.PVBound)
if err != nil && len(pvols) > len(claims) {
framework.Logf("WARN: pv %v is not bound after max wait", pvName)
framework.Logf(" This may be ok since there are more pvs than pvcs")
@ -513,7 +513,7 @@ func WaitAndVerifyBinds(c clientset.Interface, ns string, pvols PVMap, claims PV
return fmt.Errorf("internal: claims map is missing pvc %q", pvcKey)
}
err := WaitForPersistentVolumeClaimPhase(v1.ClaimBound, c, ns, cr.Name, framework.Poll, ClaimBindingTimeout)
err := WaitForPersistentVolumeClaimPhase(v1.ClaimBound, c, ns, cr.Name, framework.Poll, timeouts.ClaimBound)
if err != nil {
return fmt.Errorf("PVC %q did not become Bound: %v", cr.Name, err)
}

View File

@ -362,7 +362,7 @@ func TestServerCleanup(f *framework.Framework, config TestConfig) {
gomega.Expect(err).To(gomega.BeNil(), "Failed to delete pod %v in namespace %v", config.Prefix+"-server", config.Namespace)
}
func runVolumeTesterPod(client clientset.Interface, config TestConfig, podSuffix string, privileged bool, fsGroup *int64, tests []Test, slow bool) (*v1.Pod, error) {
func runVolumeTesterPod(client clientset.Interface, timeouts *framework.TimeoutContext, config TestConfig, podSuffix string, privileged bool, fsGroup *int64, tests []Test, slow bool) (*v1.Pod, error) {
ginkgo.By(fmt.Sprint("starting ", config.Prefix, "-", podSuffix))
var gracePeriod int64 = 1
var command string
@ -439,13 +439,13 @@ func runVolumeTesterPod(client clientset.Interface, config TestConfig, podSuffix
return nil, err
}
if slow {
err = e2epod.WaitForPodRunningInNamespaceSlow(client, clientPod.Name, clientPod.Namespace)
err = e2epod.WaitTimeoutForPodRunningInNamespace(client, clientPod.Name, clientPod.Namespace, timeouts.PodStartSlow)
} else {
err = e2epod.WaitForPodRunningInNamespace(client, clientPod)
err = e2epod.WaitTimeoutForPodRunningInNamespace(client, clientPod.Name, clientPod.Namespace, timeouts.PodStart)
}
if err != nil {
e2epod.DeletePodOrFail(client, clientPod.Namespace, clientPod.Name)
e2epod.WaitForPodToDisappear(client, clientPod.Namespace, clientPod.Name, labels.Everything(), framework.Poll, framework.PodDeleteTimeout)
e2epod.WaitForPodToDisappear(client, clientPod.Namespace, clientPod.Name, labels.Everything(), framework.Poll, timeouts.PodDelete)
return nil, err
}
return clientPod, nil
@ -514,13 +514,14 @@ func TestVolumeClientSlow(f *framework.Framework, config TestConfig, fsGroup *in
}
func testVolumeClient(f *framework.Framework, config TestConfig, fsGroup *int64, fsType string, tests []Test, slow bool) {
clientPod, err := runVolumeTesterPod(f.ClientSet, config, "client", false, fsGroup, tests, slow)
timeouts := f.Timeouts
clientPod, err := runVolumeTesterPod(f.ClientSet, timeouts, config, "client", false, fsGroup, tests, slow)
if err != nil {
framework.Failf("Failed to create client pod: %v", err)
}
defer func() {
e2epod.DeletePodOrFail(f.ClientSet, clientPod.Namespace, clientPod.Name)
e2epod.WaitForPodToDisappear(f.ClientSet, clientPod.Namespace, clientPod.Name, labels.Everything(), framework.Poll, framework.PodDeleteTimeout)
e2epod.WaitForPodToDisappear(f.ClientSet, clientPod.Namespace, clientPod.Name, labels.Everything(), framework.Poll, timeouts.PodDelete)
}()
testVolumeContent(f, clientPod, fsGroup, fsType, tests)
@ -531,17 +532,18 @@ func testVolumeClient(f *framework.Framework, config TestConfig, fsGroup *int64,
// The volume must be writable.
func InjectContent(f *framework.Framework, config TestConfig, fsGroup *int64, fsType string, tests []Test) {
privileged := true
timeouts := f.Timeouts
if framework.NodeOSDistroIs("windows") {
privileged = false
}
injectorPod, err := runVolumeTesterPod(f.ClientSet, config, "injector", privileged, fsGroup, tests, false /*slow*/)
injectorPod, err := runVolumeTesterPod(f.ClientSet, timeouts, config, "injector", privileged, fsGroup, tests, false /*slow*/)
if err != nil {
framework.Failf("Failed to create injector pod: %v", err)
return
}
defer func() {
e2epod.DeletePodOrFail(f.ClientSet, injectorPod.Namespace, injectorPod.Name)
e2epod.WaitForPodToDisappear(f.ClientSet, injectorPod.Namespace, injectorPod.Name, labels.Everything(), framework.Poll, framework.PodDeleteTimeout)
e2epod.WaitForPodToDisappear(f.ClientSet, injectorPod.Namespace, injectorPod.Name, labels.Everything(), framework.Poll, timeouts.PodDelete)
}()
ginkgo.By("Writing text file contents in the container.")

View File

@ -231,7 +231,7 @@ func PodsUseStaticPVsOrFail(f *framework.Framework, podCount int, image string)
ginkgo.By("Waiting for all PVCs to be bound")
for _, config := range configs {
e2epv.WaitOnPVandPVC(c, ns, config.pv, config.pvc)
e2epv.WaitOnPVandPVC(c, f.Timeouts, ns, config.pv, config.pvc)
}
ginkgo.By("Creating pods for each static PV")

View File

@ -194,6 +194,7 @@ var _ = utils.SIGDescribe("CSI mock volume", func() {
}
scTest := testsuites.StorageClassTest{
Name: m.driver.GetDriverInfo().Name,
Timeouts: f.Timeouts,
Provisioner: sc.Provisioner,
Parameters: sc.Parameters,
ClaimSize: "1Gi",
@ -383,7 +384,7 @@ var _ = utils.SIGDescribe("CSI mock volume", func() {
}.AsSelector().String()
msg := "AttachVolume.Attach failed for volume"
err = e2eevents.WaitTimeoutForEvent(m.cs, pod.Namespace, eventSelector, msg, framework.PodStartTimeout)
err = e2eevents.WaitTimeoutForEvent(m.cs, pod.Namespace, eventSelector, msg, f.Timeouts.PodStart)
if err != nil {
podErr := e2epod.WaitTimeoutForPodRunningInNamespace(m.cs, pod.Name, pod.Namespace, 10*time.Second)
framework.ExpectError(podErr, "Pod should not be in running status because attaching should failed")
@ -504,7 +505,7 @@ var _ = utils.SIGDescribe("CSI mock volume", func() {
csiInlineVolumesEnabled := test.expectEphemeral
if test.expectPodInfo {
ginkgo.By("checking for CSIInlineVolumes feature")
csiInlineVolumesEnabled, err = testsuites.CSIInlineVolumesEnabled(m.cs, f.Namespace.Name)
csiInlineVolumesEnabled, err = testsuites.CSIInlineVolumesEnabled(m.cs, f.Timeouts, f.Namespace.Name)
framework.ExpectNoError(err, "failed to test for CSIInlineVolumes")
}
@ -1187,7 +1188,7 @@ var _ = utils.SIGDescribe("CSI mock volume", func() {
sc, _, pod := createPod(false /* persistent volume, late binding as specified above */)
framework.ExpectEqual(sc.Name, scName, "pre-selected storage class name not used")
waitCtx, cancel := context.WithTimeout(context.Background(), podStartTimeout)
waitCtx, cancel := context.WithTimeout(context.Background(), f.Timeouts.PodStart)
defer cancel()
condition := anyOf(
podRunning(waitCtx, f.ClientSet, pod.Name, pod.Namespace),
@ -1271,7 +1272,7 @@ var _ = utils.SIGDescribe("CSI mock volume", func() {
ginkgo.By("Creating snapshot")
// TODO: Test VolumeSnapshots with Retain policy
snapshotClass, snapshot := testsuites.CreateSnapshot(sDriver, m.config, testpatterns.DynamicSnapshotDelete, claim.Name, claim.Namespace)
snapshotClass, snapshot := testsuites.CreateSnapshot(sDriver, m.config, testpatterns.DynamicSnapshotDelete, claim.Name, claim.Namespace, f.Timeouts)
framework.ExpectNoError(err, "failed to create snapshot")
m.vsc[snapshotClass.GetName()] = snapshotClass
volumeSnapshotName := snapshot.GetName()

View File

@ -86,6 +86,7 @@ var _ = utils.SIGDescribe("[Feature:Flexvolumes] Mounted flexvolume expand[Slow]
test := testsuites.StorageClassTest{
Name: "flexvolume-resize",
Timeouts: f.Timeouts,
ClaimSize: "2Gi",
AllowVolumeExpansion: true,
Provisioner: "flex-expand",

View File

@ -79,6 +79,7 @@ var _ = utils.SIGDescribe("Mounted flexvolume volume expand [Slow] [Feature:Expa
test := testsuites.StorageClassTest{
Name: "flexvolume-resize",
Timeouts: f.Timeouts,
ClaimSize: "2Gi",
AllowVolumeExpansion: true,
Provisioner: "flex-expand",

View File

@ -93,6 +93,7 @@ func createPodPVCFromSC(f *framework.Framework, c clientset.Interface, ns string
var err error
test := testsuites.StorageClassTest{
Name: "default",
Timeouts: f.Timeouts,
ClaimSize: "2Gi",
}
pvc := e2epv.MakePersistentVolumeClaim(e2epv.PersistentVolumeClaimConfig{
@ -112,7 +113,7 @@ func createPodPVCFromSC(f *framework.Framework, c clientset.Interface, ns string
PVCs: pvcClaims,
SeLinuxLabel: e2epv.SELinuxLabel,
}
pod, err := e2epod.CreateSecPod(c, &podConfig, framework.PodStartTimeout)
pod, err := e2epod.CreateSecPod(c, &podConfig, f.Timeouts.PodStart)
framework.ExpectNoError(err, "While creating pods for kubelet restart test")
return pod, pvc, pvs[0]
}

View File

@ -462,7 +462,7 @@ func verifyPodHostPathTypeFailure(f *framework.Framework, nodeSelector map[strin
}.AsSelector().String()
msg := "hostPath type check failed"
err = e2eevents.WaitTimeoutForEvent(f.ClientSet, f.Namespace.Name, eventSelector, msg, framework.PodStartTimeout)
err = e2eevents.WaitTimeoutForEvent(f.ClientSet, f.Namespace.Name, eventSelector, msg, f.Timeouts.PodStart)
// Events are unreliable, don't depend on the event. It's used only to speed up the test.
if err != nil {
framework.Logf("Warning: did not get event about FailedMountVolume")
@ -480,7 +480,7 @@ func verifyPodHostPathType(f *framework.Framework, nodeSelector map[string]strin
newPod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(),
newHostPathTypeTestPod(nodeSelector, hostDir, "/mnt/test", hostPathType), metav1.CreateOptions{})
framework.ExpectNoError(err)
framework.ExpectNoError(e2epod.WaitTimeoutForPodRunningInNamespace(f.ClientSet, newPod.Name, newPod.Namespace, framework.PodStartShortTimeout))
framework.ExpectNoError(e2epod.WaitTimeoutForPodRunningInNamespace(f.ClientSet, newPod.Name, newPod.Namespace, f.Timeouts.PodStartShort))
f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), newPod.Name, *metav1.NewDeleteOptions(0))
}

View File

@ -78,6 +78,7 @@ var _ = utils.SIGDescribe("Mounted volume expand", func() {
test := testsuites.StorageClassTest{
Name: "default",
Timeouts: f.Timeouts,
ClaimSize: "2Gi",
AllowVolumeExpansion: true,
DelayBinding: true,

View File

@ -163,7 +163,7 @@ var _ = utils.SIGDescribe("NFSPersistentVolumes[Disruptive][Flaky]", func() {
}
pv1, pvc1, err = e2epv.CreatePVPVC(c, pvConfig1, pvcConfig, ns, false)
framework.ExpectNoError(err)
framework.ExpectNoError(e2epv.WaitOnPVandPVC(c, ns, pv1, pvc1))
framework.ExpectNoError(e2epv.WaitOnPVandPVC(c, f.Timeouts, ns, pv1, pvc1))
ginkgo.By("Initializing second PD with PVPVC binding")
pvSource2, diskName2 = createGCEVolume()
@ -176,7 +176,7 @@ var _ = utils.SIGDescribe("NFSPersistentVolumes[Disruptive][Flaky]", func() {
}
pv2, pvc2, err = e2epv.CreatePVPVC(c, pvConfig2, pvcConfig, ns, false)
framework.ExpectNoError(err)
framework.ExpectNoError(e2epv.WaitOnPVandPVC(c, ns, pv2, pvc2))
framework.ExpectNoError(e2epv.WaitOnPVandPVC(c, f.Timeouts, ns, pv2, pvc2))
ginkgo.By("Attaching both PVC's to a single pod")
clientPod, err = e2epod.CreatePod(c, ns, nil, []*v1.PersistentVolumeClaim{pvc1, pvc2}, true, "")
@ -312,7 +312,7 @@ func initTestCase(f *framework.Framework, c clientset.Interface, pvConfig e2epv.
e2epod.DeletePodWithWait(c, pod)
}
}()
err = e2epod.WaitForPodRunningInNamespace(c, pod)
err = e2epod.WaitTimeoutForPodRunningInNamespace(c, pod.Name, pod.Namespace, f.Timeouts.PodStart)
framework.ExpectNoError(err, fmt.Sprintf("Pod %q timed out waiting for phase: Running", pod.Name))
// Return created api objects
pod, err = c.CoreV1().Pods(ns).Get(context.TODO(), pod.Name, metav1.GetOptions{})

View File

@ -148,7 +148,7 @@ var _ = utils.SIGDescribe("Pod Disks", func() {
fmtPod = testPDPod([]string{diskName}, host0Name, false, 1)
_, err = podClient.Create(context.TODO(), fmtPod, metav1.CreateOptions{})
framework.ExpectNoError(err, "Failed to create fmtPod")
framework.ExpectNoError(e2epod.WaitForPodRunningInNamespaceSlow(f.ClientSet, fmtPod.Name, f.Namespace.Name))
framework.ExpectNoError(e2epod.WaitTimeoutForPodRunningInNamespace(f.ClientSet, fmtPod.Name, f.Namespace.Name, f.Timeouts.PodStartSlow))
ginkgo.By("deleting the fmtPod")
framework.ExpectNoError(podClient.Delete(context.TODO(), fmtPod.Name, *metav1.NewDeleteOptions(0)), "Failed to delete fmtPod")
@ -176,7 +176,7 @@ var _ = utils.SIGDescribe("Pod Disks", func() {
ginkgo.By("creating host0Pod on node0")
_, err = podClient.Create(context.TODO(), host0Pod, metav1.CreateOptions{})
framework.ExpectNoError(err, fmt.Sprintf("Failed to create host0Pod: %v", err))
framework.ExpectNoError(e2epod.WaitForPodRunningInNamespaceSlow(f.ClientSet, host0Pod.Name, f.Namespace.Name))
framework.ExpectNoError(e2epod.WaitTimeoutForPodRunningInNamespace(f.ClientSet, host0Pod.Name, f.Namespace.Name, f.Timeouts.PodStartSlow))
framework.Logf("host0Pod: %q, node0: %q", host0Pod.Name, host0Name)
var containerName, testFile, testFileContents string
@ -200,7 +200,7 @@ var _ = utils.SIGDescribe("Pod Disks", func() {
ginkgo.By("creating host1Pod on node1")
_, err = podClient.Create(context.TODO(), host1Pod, metav1.CreateOptions{})
framework.ExpectNoError(err, "Failed to create host1Pod")
framework.ExpectNoError(e2epod.WaitForPodRunningInNamespaceSlow(f.ClientSet, host1Pod.Name, f.Namespace.Name))
framework.ExpectNoError(e2epod.WaitTimeoutForPodRunningInNamespace(f.ClientSet, host1Pod.Name, f.Namespace.Name, f.Timeouts.PodStartSlow))
framework.Logf("host1Pod: %q, node1: %q", host1Pod.Name, host1Name)
if readOnly {
@ -282,7 +282,7 @@ var _ = utils.SIGDescribe("Pod Disks", func() {
host0Pod = testPDPod(diskNames, host0Name, false /* readOnly */, numContainers)
_, err = podClient.Create(context.TODO(), host0Pod, metav1.CreateOptions{})
framework.ExpectNoError(err, fmt.Sprintf("Failed to create host0Pod: %v", err))
framework.ExpectNoError(e2epod.WaitForPodRunningInNamespaceSlow(f.ClientSet, host0Pod.Name, f.Namespace.Name))
framework.ExpectNoError(e2epod.WaitTimeoutForPodRunningInNamespace(f.ClientSet, host0Pod.Name, f.Namespace.Name, f.Timeouts.PodStartSlow))
ginkgo.By(fmt.Sprintf("writing %d file(s) via a container", numPDs))
containerName := "mycontainer"
@ -385,7 +385,7 @@ var _ = utils.SIGDescribe("Pod Disks", func() {
_, err = podClient.Create(context.TODO(), host0Pod, metav1.CreateOptions{})
framework.ExpectNoError(err, fmt.Sprintf("Failed to create host0Pod: %v", err))
ginkgo.By("waiting for host0Pod to be running")
framework.ExpectNoError(e2epod.WaitForPodRunningInNamespaceSlow(f.ClientSet, host0Pod.Name, f.Namespace.Name))
framework.ExpectNoError(e2epod.WaitTimeoutForPodRunningInNamespace(f.ClientSet, host0Pod.Name, f.Namespace.Name, f.Timeouts.PodStartSlow))
ginkgo.By("writing content to host0Pod")
testFile := "/testpd1/tracker"
@ -474,7 +474,7 @@ var _ = utils.SIGDescribe("Pod Disks", func() {
ginkgo.By("Creating test pod with same volume")
_, err = podClient.Create(context.TODO(), pod, metav1.CreateOptions{})
framework.ExpectNoError(err, "Failed to create pod")
framework.ExpectNoError(e2epod.WaitForPodRunningInNamespaceSlow(f.ClientSet, pod.Name, f.Namespace.Name))
framework.ExpectNoError(e2epod.WaitTimeoutForPodRunningInNamespace(f.ClientSet, pod.Name, f.Namespace.Name, f.Timeouts.PodStartSlow))
ginkgo.By("deleting the pod")
framework.ExpectNoError(podClient.Delete(context.TODO(), pod.Name, *metav1.NewDeleteOptions(0)), "Failed to delete pod")

View File

@ -44,11 +44,11 @@ func verifyGCEDiskAttached(diskName string, nodeName types.NodeName) bool {
}
// initializeGCETestSpec creates a PV, PVC, and ClientPod that will run until killed by test or clean up.
func initializeGCETestSpec(c clientset.Interface, ns string, pvConfig e2epv.PersistentVolumeConfig, pvcConfig e2epv.PersistentVolumeClaimConfig, isPrebound bool) (*v1.Pod, *v1.PersistentVolume, *v1.PersistentVolumeClaim) {
func initializeGCETestSpec(c clientset.Interface, t *framework.TimeoutContext, ns string, pvConfig e2epv.PersistentVolumeConfig, pvcConfig e2epv.PersistentVolumeClaimConfig, isPrebound bool) (*v1.Pod, *v1.PersistentVolume, *v1.PersistentVolumeClaim) {
ginkgo.By("Creating the PV and PVC")
pv, pvc, err := e2epv.CreatePVPVC(c, pvConfig, pvcConfig, ns, isPrebound)
framework.ExpectNoError(err)
framework.ExpectNoError(e2epv.WaitOnPVandPVC(c, ns, pv, pvc))
framework.ExpectNoError(e2epv.WaitOnPVandPVC(c, t, ns, pv, pvc))
ginkgo.By("Creating the Client Pod")
clientPod, err := e2epod.CreateClientPod(c, ns, pvc)
@ -103,7 +103,7 @@ var _ = utils.SIGDescribe("PersistentVolumes GCEPD", func() {
Selector: selector,
StorageClassName: &emptyStorageClass,
}
clientPod, pv, pvc = initializeGCETestSpec(c, ns, pvConfig, pvcConfig, false)
clientPod, pv, pvc = initializeGCETestSpec(c, f.Timeouts, ns, pvConfig, pvcConfig, false)
node = types.NodeName(clientPod.Spec.NodeName)
})

View File

@ -54,6 +54,7 @@ type localTestConfig struct {
nodes []v1.Node
node0 *v1.Node
client clientset.Interface
timeouts *framework.TimeoutContext
scName string
discoveryDir string
hostExec utils.HostExec
@ -165,6 +166,7 @@ var _ = utils.SIGDescribe("PersistentVolumes-local ", func() {
config = &localTestConfig{
ns: f.Namespace.Name,
client: f.ClientSet,
timeouts: f.Timeouts,
nodes: nodes.Items,
node0: node0,
scName: scName,
@ -312,7 +314,7 @@ var _ = utils.SIGDescribe("PersistentVolumes-local ", func() {
createLocalPVCsPVs(config, []*localTestVolume{testVol}, immediateMode)
pod, err := createLocalPod(config, testVol, nil)
framework.ExpectError(err)
err = e2epod.WaitTimeoutForPodRunningInNamespace(config.client, pod.Name, pod.Namespace, framework.PodStartShortTimeout)
err = e2epod.WaitTimeoutForPodRunningInNamespace(config.client, pod.Name, pod.Namespace, f.Timeouts.PodStartShort)
framework.ExpectError(err)
cleanupLocalPVCsPVs(config, []*localTestVolume{testVol})
})
@ -329,7 +331,7 @@ var _ = utils.SIGDescribe("PersistentVolumes-local ", func() {
pod, err := config.client.CoreV1().Pods(config.ns).Create(context.TODO(), pod, metav1.CreateOptions{})
framework.ExpectNoError(err)
err = e2epod.WaitTimeoutForPodRunningInNamespace(config.client, pod.Name, pod.Namespace, framework.PodStartShortTimeout)
err = e2epod.WaitTimeoutForPodRunningInNamespace(config.client, pod.Name, pod.Namespace, f.Timeouts.PodStartShort)
framework.ExpectError(err)
cleanupLocalVolumes(config, []*localTestVolume{testVol})
@ -854,7 +856,7 @@ func cleanupLocalVolumes(config *localTestConfig, volumes []*localTestVolume) {
}
func verifyLocalVolume(config *localTestConfig, volume *localTestVolume) {
framework.ExpectNoError(e2epv.WaitOnPVandPVC(config.client, config.ns, volume.pv, volume.pvc))
framework.ExpectNoError(e2epv.WaitOnPVandPVC(config.client, config.timeouts, config.ns, volume.pv, volume.pvc))
}
func verifyLocalPod(config *localTestConfig, volume *localTestVolume, pod *v1.Pod, expectedNodeName string) {
@ -1031,7 +1033,7 @@ func createLocalPod(config *localTestConfig, volume *localTestVolume, fsGroup *i
SeLinuxLabel: selinuxLabel,
FsGroup: fsGroup,
}
return e2epod.CreateSecPod(config.client, &podConfig, framework.PodStartShortTimeout)
return e2epod.CreateSecPod(config.client, &podConfig, config.timeouts.PodStartShort)
}
func createWriteCmd(testDir string, testFile string, writeTestFileContent string, volumeType localVolumeType) string {

View File

@ -44,7 +44,7 @@ import (
func completeTest(f *framework.Framework, c clientset.Interface, ns string, pv *v1.PersistentVolume, pvc *v1.PersistentVolumeClaim) {
// 1. verify that the PV and PVC have bound correctly
ginkgo.By("Validating the PV-PVC binding")
framework.ExpectNoError(e2epv.WaitOnPVandPVC(c, ns, pv, pvc))
framework.ExpectNoError(e2epv.WaitOnPVandPVC(c, f.Timeouts, ns, pv, pvc))
// 2. create the nfs writer pod, test if the write was successful,
// then delete the pod and verify that it was deleted
@ -53,7 +53,7 @@ func completeTest(f *framework.Framework, c clientset.Interface, ns string, pv *
// 3. delete the PVC, wait for PV to become "Released"
ginkgo.By("Deleting the PVC to invoke the reclaim policy.")
framework.ExpectNoError(e2epv.DeletePVCandValidatePV(c, ns, pvc, pv, v1.VolumeReleased))
framework.ExpectNoError(e2epv.DeletePVCandValidatePV(c, f.Timeouts, ns, pvc, pv, v1.VolumeReleased))
}
// Validate pairs of PVs and PVCs, create and verify writer pod, delete PVC and validate
@ -87,7 +87,7 @@ func completeMultiTest(f *framework.Framework, c clientset.Interface, ns string,
// 2. delete each PVC, wait for its bound PV to reach `expectedPhase`
ginkgo.By("Deleting PVCs to invoke reclaim policy")
if err = e2epv.DeletePVCandValidatePVGroup(c, ns, pvols, claims, expectPhase); err != nil {
if err = e2epv.DeletePVCandValidatePVGroup(c, f.Timeouts, ns, pvols, claims, expectPhase); err != nil {
return err
}
return nil
@ -234,7 +234,7 @@ var _ = utils.SIGDescribe("PersistentVolumes", func() {
numPVs, numPVCs := 2, 4
pvols, claims, err = e2epv.CreatePVsPVCs(numPVs, numPVCs, c, ns, pvConfig, pvcConfig)
framework.ExpectNoError(err)
framework.ExpectNoError(e2epv.WaitAndVerifyBinds(c, ns, pvols, claims, true))
framework.ExpectNoError(e2epv.WaitAndVerifyBinds(c, f.Timeouts, ns, pvols, claims, true))
framework.ExpectNoError(completeMultiTest(f, c, ns, pvols, claims, v1.VolumeReleased))
})
@ -244,7 +244,7 @@ var _ = utils.SIGDescribe("PersistentVolumes", func() {
numPVs, numPVCs := 3, 3
pvols, claims, err = e2epv.CreatePVsPVCs(numPVs, numPVCs, c, ns, pvConfig, pvcConfig)
framework.ExpectNoError(err)
framework.ExpectNoError(e2epv.WaitAndVerifyBinds(c, ns, pvols, claims, true))
framework.ExpectNoError(e2epv.WaitAndVerifyBinds(c, f.Timeouts, ns, pvols, claims, true))
framework.ExpectNoError(completeMultiTest(f, c, ns, pvols, claims, v1.VolumeReleased))
})
@ -254,7 +254,7 @@ var _ = utils.SIGDescribe("PersistentVolumes", func() {
numPVs, numPVCs := 4, 2
pvols, claims, err = e2epv.CreatePVsPVCs(numPVs, numPVCs, c, ns, pvConfig, pvcConfig)
framework.ExpectNoError(err)
framework.ExpectNoError(e2epv.WaitAndVerifyBinds(c, ns, pvols, claims, true))
framework.ExpectNoError(e2epv.WaitAndVerifyBinds(c, f.Timeouts, ns, pvols, claims, true))
framework.ExpectNoError(completeMultiTest(f, c, ns, pvols, claims, v1.VolumeReleased))
})
})
@ -267,7 +267,7 @@ var _ = utils.SIGDescribe("PersistentVolumes", func() {
pvConfig.ReclaimPolicy = v1.PersistentVolumeReclaimRecycle
pv, pvc, err = e2epv.CreatePVPVC(c, pvConfig, pvcConfig, ns, false)
framework.ExpectNoError(err, "BeforeEach: Failed to create PV/PVC")
framework.ExpectNoError(e2epv.WaitOnPVandPVC(c, ns, pv, pvc), "BeforeEach: WaitOnPVandPVC failed")
framework.ExpectNoError(e2epv.WaitOnPVandPVC(c, f.Timeouts, ns, pv, pvc), "BeforeEach: WaitOnPVandPVC failed")
})
ginkgo.AfterEach(func() {
@ -289,7 +289,7 @@ var _ = utils.SIGDescribe("PersistentVolumes", func() {
ginkgo.By("Deleting the claim")
framework.ExpectNoError(e2epod.DeletePodWithWait(c, pod))
framework.ExpectNoError(e2epv.DeletePVCandValidatePV(c, ns, pvc, pv, v1.VolumeAvailable))
framework.ExpectNoError(e2epv.DeletePVCandValidatePV(c, f.Timeouts, ns, pvc, pv, v1.VolumeAvailable))
ginkgo.By("Re-mounting the volume.")
pvc = e2epv.MakePersistentVolumeClaim(pvcConfig, ns)
@ -310,7 +310,7 @@ var _ = utils.SIGDescribe("PersistentVolumes", func() {
// Delete the PVC and wait for the recycler to finish before the NFS server gets shutdown during cleanup.
framework.Logf("Removing second PVC, waiting for the recycler to finish before cleanup.")
framework.ExpectNoError(e2epv.DeletePVCandValidatePV(c, ns, pvc, pv, v1.VolumeAvailable))
framework.ExpectNoError(e2epv.DeletePVCandValidatePV(c, f.Timeouts, ns, pvc, pv, v1.VolumeAvailable))
pvc = nil
})
})

View File

@ -100,7 +100,7 @@ var _ = utils.SIGDescribe("PV Protection", func() {
ginkgo.By("Deleting the PV")
err = client.CoreV1().PersistentVolumes().Delete(context.TODO(), pv.Name, *metav1.NewDeleteOptions(0))
framework.ExpectNoError(err, "Error deleting PV")
err = e2epv.WaitForPersistentVolumeDeleted(client, pv.Name, framework.Poll, e2epv.PVDeletingTimeout)
err = e2epv.WaitForPersistentVolumeDeleted(client, pv.Name, framework.Poll, f.Timeouts.PVDelete)
framework.ExpectNoError(err, "waiting for PV to be deleted")
})
@ -111,7 +111,7 @@ var _ = utils.SIGDescribe("PV Protection", func() {
framework.ExpectNoError(err, "Error creating PVC")
ginkgo.By("Waiting for PVC to become Bound")
err = e2epv.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, client, nameSpace, pvc.Name, framework.Poll, e2epv.ClaimBindingTimeout)
err = e2epv.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, client, nameSpace, pvc.Name, framework.Poll, f.Timeouts.ClaimBound)
framework.ExpectNoError(err, "Failed waiting for PVC to be bound %v", err)
ginkgo.By("Deleting the PV, however, the PV must not be removed from the system as it's bound to a PVC")
@ -128,7 +128,7 @@ var _ = utils.SIGDescribe("PV Protection", func() {
framework.ExpectNoError(err, "Error deleting PVC")
ginkgo.By("Checking that the PV is automatically removed from the system because it's no longer bound to a PVC")
err = e2epv.WaitForPersistentVolumeDeleted(client, pv.Name, framework.Poll, e2epv.PVDeletingTimeout)
err = e2epv.WaitForPersistentVolumeDeleted(client, pv.Name, framework.Poll, f.Timeouts.PVDelete)
framework.ExpectNoError(err, "waiting for PV to be deleted")
})
})

View File

@ -18,12 +18,13 @@ package storage
import (
"context"
"github.com/onsi/ginkgo"
"fmt"
"time"
"k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clientset "k8s.io/client-go/kubernetes"
@ -77,6 +78,7 @@ var _ = utils.SIGDescribe("PVC Protection", func() {
prefix := "pvc-protection"
e2epv.SkipIfNoDefaultStorageClass(client)
t := testsuites.StorageClassTest{
Timeouts: f.Timeouts,
ClaimSize: "1Gi",
}
pvc = e2epv.MakePersistentVolumeClaim(e2epv.PersistentVolumeClaimConfig{
@ -94,7 +96,7 @@ var _ = utils.SIGDescribe("PVC Protection", func() {
framework.ExpectNoError(err, "While creating pod that uses the PVC or waiting for the Pod to become Running")
ginkgo.By("Waiting for PVC to become Bound")
err = e2epv.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, client, nameSpace, pvc.Name, framework.Poll, e2epv.ClaimBindingTimeout)
err = e2epv.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, client, nameSpace, pvc.Name, framework.Poll, f.Timeouts.ClaimBound)
framework.ExpectNoError(err, "Failed waiting for PVC to be bound %v", err)
ginkgo.By("Checking that PVC Protection finalizer is set")

View File

@ -18,6 +18,7 @@ package storage
import (
"context"
"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
@ -107,6 +108,7 @@ func testVolumeProvisioning(c clientset.Interface, ns string) {
Name: "HDD Regional PD on GCE/GKE",
CloudProviders: []string{"gce", "gke"},
Provisioner: "kubernetes.io/gce-pd",
Timeouts: framework.NewTimeoutContextWithDefaults(),
Parameters: map[string]string{
"type": "pd-standard",
"zones": strings.Join(cloudZones, ","),
@ -129,6 +131,7 @@ func testVolumeProvisioning(c clientset.Interface, ns string) {
Name: "HDD Regional PD with auto zone selection on GCE/GKE",
CloudProviders: []string{"gce", "gke"},
Provisioner: "kubernetes.io/gce-pd",
Timeouts: framework.NewTimeoutContextWithDefaults(),
Parameters: map[string]string{
"type": "pd-standard",
"replication-type": "regional-pd",
@ -166,6 +169,7 @@ func testZonalFailover(c clientset.Interface, ns string) {
testSpec := testsuites.StorageClassTest{
Name: "Regional PD Failover on GCE/GKE",
CloudProviders: []string{"gce", "gke"},
Timeouts: framework.NewTimeoutContextWithDefaults(),
Provisioner: "kubernetes.io/gce-pd",
Parameters: map[string]string{
"type": "pd-standard",
@ -326,6 +330,7 @@ func testRegionalDelayedBinding(c clientset.Interface, ns string, pvcCount int)
Client: c,
Name: "Regional PD storage class with waitForFirstConsumer test on GCE",
Provisioner: "kubernetes.io/gce-pd",
Timeouts: framework.NewTimeoutContextWithDefaults(),
Parameters: map[string]string{
"type": "pd-standard",
"replication-type": "regional-pd",
@ -362,6 +367,7 @@ func testRegionalAllowedTopologies(c clientset.Interface, ns string) {
test := testsuites.StorageClassTest{
Name: "Regional PD storage class with allowedTopologies test on GCE",
Provisioner: "kubernetes.io/gce-pd",
Timeouts: framework.NewTimeoutContextWithDefaults(),
Parameters: map[string]string{
"type": "pd-standard",
"replication-type": "regional-pd",
@ -389,6 +395,7 @@ func testRegionalAllowedTopologies(c clientset.Interface, ns string) {
func testRegionalAllowedTopologiesWithDelayedBinding(c clientset.Interface, ns string, pvcCount int) {
test := testsuites.StorageClassTest{
Client: c,
Timeouts: framework.NewTimeoutContextWithDefaults(),
Name: "Regional PD storage class with allowedTopologies and waitForFirstConsumer test on GCE",
Provisioner: "kubernetes.io/gce-pd",
Parameters: map[string]string{

View File

@ -420,7 +420,7 @@ func createPVCPV(
pv, pvc, err := e2epv.CreatePVCPV(f.ClientSet, pvConfig, pvcConfig, f.Namespace.Name, false)
framework.ExpectNoError(err, "PVC, PV creation failed")
err = e2epv.WaitOnPVandPVC(f.ClientSet, f.Namespace.Name, pv, pvc)
err = e2epv.WaitOnPVandPVC(f.ClientSet, f.Timeouts, f.Namespace.Name, pv, pvc)
framework.ExpectNoError(err, "PVC, PV failed to bind")
return pv, pvc
@ -453,7 +453,7 @@ func createPVCPVFromDynamicProvisionSC(
framework.ExpectNoError(err)
if !isDelayedBinding(sc) {
err = e2epv.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, cs, pvc.Namespace, pvc.Name, framework.Poll, framework.ClaimProvisionTimeout)
err = e2epv.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, cs, pvc.Namespace, pvc.Name, framework.Poll, f.Timeouts.ClaimProvision)
framework.ExpectNoError(err)
}

View File

@ -82,7 +82,7 @@ func (s *disruptiveTestSuite) DefineTests(driver TestDriver, pattern testpattern
// registers its own BeforeEach which creates the namespace. Beware that it
// also registers an AfterEach which renders f unusable. Any code using
// f must run inside an It or Context callback.
f := framework.NewDefaultFramework("disruptive")
f := framework.NewFrameworkWithCustomTimeouts("disruptive", getDriverTimeouts(driver))
init := func() {
l = local{}
@ -169,7 +169,7 @@ func (s *disruptiveTestSuite) DefineTests(driver TestDriver, pattern testpattern
NodeSelection: l.config.ClientNodeSelection,
ImageID: e2evolume.GetDefaultTestImageID(),
}
l.pod, err = e2epod.CreateSecPodWithNodeSelection(l.cs, &podConfig, framework.PodStartTimeout)
l.pod, err = e2epod.CreateSecPodWithNodeSelection(l.cs, &podConfig, f.Timeouts.PodStart)
framework.ExpectNoError(err, "While creating pods for kubelet restart test")
if pattern.VolMode == v1.PersistentVolumeBlock && t.runTestBlock != nil {

View File

@ -106,11 +106,11 @@ func (p *ephemeralTestSuite) DefineTests(driver TestDriver, pattern testpatterns
// registers its own BeforeEach which creates the namespace. Beware that it
// also registers an AfterEach which renders f unusable. Any code using
// f must run inside an It or Context callback.
f := framework.NewDefaultFramework("ephemeral")
f := framework.NewFrameworkWithCustomTimeouts("ephemeral", getDriverTimeouts(driver))
init := func() {
if pattern.VolType == testpatterns.GenericEphemeralVolume {
enabled, err := GenericEphemeralVolumesEnabled(f.ClientSet, f.Namespace.Name)
enabled, err := GenericEphemeralVolumesEnabled(f.ClientSet, f.Timeouts, f.Namespace.Name)
framework.ExpectNoError(err, "check GenericEphemeralVolume feature")
if !enabled {
e2eskipper.Skipf("Cluster doesn't support %q volumes -- skipping", pattern.VolType)
@ -127,6 +127,7 @@ func (p *ephemeralTestSuite) DefineTests(driver TestDriver, pattern testpatterns
case testpatterns.CSIInlineVolume:
l.testCase = &EphemeralTest{
Client: l.config.Framework.ClientSet,
Timeouts: f.Timeouts,
Namespace: f.Namespace.Name,
DriverName: eDriver.GetCSIDriverName(l.config),
Node: l.config.ClientNodeSelection,
@ -137,6 +138,7 @@ func (p *ephemeralTestSuite) DefineTests(driver TestDriver, pattern testpatterns
case testpatterns.GenericEphemeralVolume:
l.testCase = &EphemeralTest{
Client: l.config.Framework.ClientSet,
Timeouts: f.Timeouts,
Namespace: f.Namespace.Name,
Node: l.config.ClientNodeSelection,
VolSource: l.resource.VolSource,
@ -194,7 +196,7 @@ func (p *ephemeralTestSuite) DefineTests(driver TestDriver, pattern testpatterns
[]v1.VolumeSource{pod.Spec.Volumes[0].VolumeSource},
readOnly,
l.testCase.Node)
framework.ExpectNoError(e2epod.WaitForPodRunningInNamespaceSlow(f.ClientSet, pod2.Name, pod2.Namespace), "waiting for second pod with inline volume")
framework.ExpectNoError(e2epod.WaitTimeoutForPodRunningInNamespace(f.ClientSet, pod2.Name, pod2.Namespace, f.Timeouts.PodStartSlow), "waiting for second pod with inline volume")
// If (and only if) we were able to mount
// read/write and volume data is not shared
@ -207,7 +209,7 @@ func (p *ephemeralTestSuite) DefineTests(driver TestDriver, pattern testpatterns
storageutils.VerifyExecInPodSucceed(f, pod2, "[ ! -f /mnt/test-0/hello-world ]")
}
defer StopPodAndDependents(f.ClientSet, pod2)
defer StopPodAndDependents(f.ClientSet, f.Timeouts, pod2)
return nil
}
@ -232,6 +234,7 @@ func (p *ephemeralTestSuite) DefineTests(driver TestDriver, pattern testpatterns
// Not all parameters are used by all tests.
type EphemeralTest struct {
Client clientset.Interface
Timeouts *framework.TimeoutContext
Namespace string
DriverName string
VolSource *v1.VolumeSource
@ -307,9 +310,9 @@ func (t EphemeralTest) TestEphemeral() {
pod := StartInPodWithInlineVolume(client, t.Namespace, "inline-volume-tester", command, volumes, t.ReadOnly, t.Node)
defer func() {
// pod might be nil now.
StopPodAndDependents(client, pod)
StopPodAndDependents(client, t.Timeouts, pod)
}()
framework.ExpectNoError(e2epod.WaitForPodRunningInNamespaceSlow(client, pod.Name, pod.Namespace), "waiting for pod with inline volume")
framework.ExpectNoError(e2epod.WaitTimeoutForPodRunningInNamespace(client, pod.Name, pod.Namespace, t.Timeouts.PodStartSlow), "waiting for pod with inline volume")
runningPod, err := client.CoreV1().Pods(pod.Namespace).Get(context.TODO(), pod.Name, metav1.GetOptions{})
framework.ExpectNoError(err, "get pod")
actualNodeName := runningPod.Spec.NodeName
@ -320,7 +323,7 @@ func (t EphemeralTest) TestEphemeral() {
runningPodData = t.RunningPodCheck(pod)
}
StopPodAndDependents(client, pod)
StopPodAndDependents(client, t.Timeouts, pod)
pod = nil // Don't stop twice.
// There should be no dangling PVCs in the namespace now. There might be for
@ -383,8 +386,8 @@ func StartInPodWithInlineVolume(c clientset.Interface, ns, podName, command stri
// CSIInlineVolumesEnabled checks whether the running cluster has the CSIInlineVolumes feature gate enabled.
// It does that by trying to create a pod that uses that feature.
func CSIInlineVolumesEnabled(c clientset.Interface, ns string) (bool, error) {
return VolumeSourceEnabled(c, ns, v1.VolumeSource{
func CSIInlineVolumesEnabled(c clientset.Interface, t *framework.TimeoutContext, ns string) (bool, error) {
return VolumeSourceEnabled(c, t, ns, v1.VolumeSource{
CSI: &v1.CSIVolumeSource{
Driver: "no-such-driver.example.com",
},
@ -393,9 +396,9 @@ func CSIInlineVolumesEnabled(c clientset.Interface, ns string) (bool, error) {
// GenericEphemeralVolumesEnabled checks whether the running cluster has the GenericEphemeralVolume feature gate enabled.
// It does that by trying to create a pod that uses that feature.
func GenericEphemeralVolumesEnabled(c clientset.Interface, ns string) (bool, error) {
func GenericEphemeralVolumesEnabled(c clientset.Interface, t *framework.TimeoutContext, ns string) (bool, error) {
storageClassName := "no-such-storage-class"
return VolumeSourceEnabled(c, ns, v1.VolumeSource{
return VolumeSourceEnabled(c, t, ns, v1.VolumeSource{
Ephemeral: &v1.EphemeralVolumeSource{
VolumeClaimTemplate: &v1.PersistentVolumeClaimTemplate{
Spec: v1.PersistentVolumeClaimSpec{
@ -414,7 +417,7 @@ func GenericEphemeralVolumesEnabled(c clientset.Interface, ns string) (bool, err
// VolumeSourceEnabled checks whether a certain kind of volume source is enabled by trying
// to create a pod that uses it.
func VolumeSourceEnabled(c clientset.Interface, ns string, volume v1.VolumeSource) (bool, error) {
func VolumeSourceEnabled(c clientset.Interface, t *framework.TimeoutContext, ns string, volume v1.VolumeSource) (bool, error) {
pod := &v1.Pod{
TypeMeta: metav1.TypeMeta{
Kind: "Pod",
@ -451,7 +454,7 @@ func VolumeSourceEnabled(c clientset.Interface, ns string, volume v1.VolumeSourc
switch {
case err == nil:
// Pod was created, feature supported.
StopPodAndDependents(c, pod)
StopPodAndDependents(c, t, pod)
return true, nil
case apierrors.IsInvalid(err):
// "Invalid" because it uses a feature that isn't supported.

View File

@ -102,7 +102,7 @@ func (s *fsGroupChangePolicyTestSuite) DefineTests(driver TestDriver, pattern te
// registers its own BeforeEach which creates the namespace. Beware that it
// also registers an AfterEach which renders f unusable. Any code using
// f must run inside an It or Context callback.
f := framework.NewDefaultFramework("fsgroupchangepolicy")
f := framework.NewFrameworkWithCustomTimeouts("fsgroupchangepolicy", getDriverTimeouts(driver))
init := func() {
e2eskipper.SkipIfNodeOSDistroIs("windows")
@ -244,7 +244,7 @@ func (s *fsGroupChangePolicyTestSuite) DefineTests(driver TestDriver, pattern te
func createPodAndVerifyContentGid(f *framework.Framework, podConfig *e2epod.Config, createInitialFiles bool, expectedRootDirFileOwnership, expectedSubDirFileOwnership string) *v1.Pod {
podFsGroup := strconv.FormatInt(*podConfig.FsGroup, 10)
ginkgo.By(fmt.Sprintf("Creating Pod in namespace %s with fsgroup %s", podConfig.NS, podFsGroup))
pod, err := e2epod.CreateSecPodWithNodeSelection(f.ClientSet, podConfig, framework.PodStartTimeout)
pod, err := e2epod.CreateSecPodWithNodeSelection(f.ClientSet, podConfig, f.Timeouts.PodStart)
framework.ExpectNoError(err)
framework.Logf("Pod %s/%s started successfully", pod.Namespace, pod.Name)

View File

@ -97,7 +97,7 @@ func (t *multiVolumeTestSuite) DefineTests(driver TestDriver, pattern testpatter
// registers its own BeforeEach which creates the namespace. Beware that it
// also registers an AfterEach which renders f unusable. Any code using
// f must run inside an It or Context callback.
f := framework.NewDefaultFramework("multivolume")
f := framework.NewFrameworkWithCustomTimeouts("multivolume", getDriverTimeouts(driver))
init := func() {
l = local{}
@ -346,7 +346,7 @@ func (t *multiVolumeTestSuite) DefineTests(driver TestDriver, pattern testpatter
l.resources = append(l.resources, resource)
// Initialize the volume with a filesystem - it's going to be mounted as read-only below.
initializeVolume(l.cs, l.ns.Name, resource.Pvc, l.config.ClientNodeSelection)
initializeVolume(l.cs, f.Timeouts, l.ns.Name, resource.Pvc, l.config.ClientNodeSelection)
// Test access to the volume from pods on a single node
TestConcurrentAccessToSingleVolume(l.config.Framework, l.cs, l.ns.Name,
@ -408,7 +408,7 @@ func testAccessMultipleVolumes(f *framework.Framework, cs clientset.Interface, n
NodeSelection: node,
ImageID: e2evolume.GetDefaultTestImageID(),
}
pod, err := e2epod.CreateSecPodWithNodeSelection(cs, &podConfig, framework.PodStartTimeout)
pod, err := e2epod.CreateSecPodWithNodeSelection(cs, &podConfig, f.Timeouts.PodStart)
defer func() {
framework.ExpectNoError(e2epod.DeletePodWithWait(cs, pod))
}()
@ -488,7 +488,7 @@ func TestConcurrentAccessToSingleVolume(f *framework.Framework, cs clientset.Int
PVCsReadOnly: readOnly,
ImageID: e2evolume.GetTestImageID(imageutils.DebianIptables),
}
pod, err := e2epod.CreateSecPodWithNodeSelection(cs, &podConfig, framework.PodStartTimeout)
pod, err := e2epod.CreateSecPodWithNodeSelection(cs, &podConfig, f.Timeouts.PodStart)
defer func() {
framework.ExpectNoError(e2epod.DeletePodWithWait(cs, pod))
}()
@ -637,7 +637,7 @@ func ensureTopologyRequirements(nodeSelection *e2epod.NodeSelection, nodes *v1.N
}
// initializeVolume creates a filesystem on given volume, so it can be used as read-only later
func initializeVolume(cs clientset.Interface, ns string, pvc *v1.PersistentVolumeClaim, node e2epod.NodeSelection) {
func initializeVolume(cs clientset.Interface, t *framework.TimeoutContext, ns string, pvc *v1.PersistentVolumeClaim, node e2epod.NodeSelection) {
if pvc.Spec.VolumeMode != nil && *pvc.Spec.VolumeMode == v1.PersistentVolumeBlock {
// Block volumes do not need to be initialized.
return
@ -653,7 +653,7 @@ func initializeVolume(cs clientset.Interface, ns string, pvc *v1.PersistentVolum
NodeSelection: node,
ImageID: e2evolume.GetDefaultTestImageID(),
}
pod, err := e2epod.CreateSecPod(cs, &podConfig, framework.PodStartTimeout)
pod, err := e2epod.CreateSecPod(cs, &podConfig, t.PodStart)
defer func() {
framework.ExpectNoError(e2epod.DeletePodWithWait(cs, pod))
}()

View File

@ -45,6 +45,7 @@ import (
// Not all parameters are used by all tests.
type StorageClassTest struct {
Client clientset.Interface
Timeouts *framework.TimeoutContext
Claim *v1.PersistentVolumeClaim
SourceClaim *v1.PersistentVolumeClaim
Class *storagev1.StorageClass
@ -129,7 +130,7 @@ func (p *provisioningTestSuite) DefineTests(driver TestDriver, pattern testpatte
// registers its own BeforeEach which creates the namespace. Beware that it
// also registers an AfterEach which renders f unusable. Any code using
// f must run inside an It or Context callback.
f := framework.NewDefaultFramework("provisioning")
f := framework.NewFrameworkWithCustomTimeouts("provisioning", getDriverTimeouts(driver))
init := func() {
l = local{}
@ -160,6 +161,7 @@ func (p *provisioningTestSuite) DefineTests(driver TestDriver, pattern testpatte
framework.Logf("In creating storage class object and pvc objects for driver - sc: %v, pvc: %v, src-pvc: %v", l.sc, l.pvc, l.sourcePVC)
l.testCase = &StorageClassTest{
Client: l.config.Framework.ClientSet,
Timeouts: f.Timeouts,
Claim: l.pvc,
SourceClaim: l.sourcePVC,
Class: l.sc,
@ -406,7 +408,7 @@ func getBoundPV(client clientset.Interface, pvc *v1.PersistentVolumeClaim) (*v1.
// checkProvisioning verifies that the claim is bound and has the correct properities
func (t StorageClassTest) checkProvisioning(client clientset.Interface, claim *v1.PersistentVolumeClaim, class *storagev1.StorageClass) *v1.PersistentVolume {
err := e2epv.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, client, claim.Namespace, claim.Name, framework.Poll, framework.ClaimProvisionTimeout)
err := e2epv.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, client, claim.Namespace, claim.Name, framework.Poll, t.Timeouts.ClaimProvision)
framework.ExpectNoError(err)
ginkgo.By("checking the claim")
@ -595,7 +597,7 @@ func (t StorageClassTest) TestBindingWaitForFirstConsumerMultiPVC(claims []*v1.P
// Wait for ClaimProvisionTimeout (across all PVCs in parallel) and make sure the phase did not become Bound i.e. the Wait errors out
ginkgo.By("checking the claims are in pending state")
err = e2epv.WaitForPersistentVolumeClaimsPhase(v1.ClaimBound, t.Client, namespace, claimNames, 2*time.Second /* Poll */, framework.ClaimProvisionShortTimeout, true)
err = e2epv.WaitForPersistentVolumeClaimsPhase(v1.ClaimBound, t.Client, namespace, claimNames, 2*time.Second /* Poll */, t.Timeouts.ClaimProvisionShort, true)
framework.ExpectError(err)
verifyPVCsPending(t.Client, createdClaims)
@ -610,7 +612,7 @@ func (t StorageClassTest) TestBindingWaitForFirstConsumerMultiPVC(claims []*v1.P
framework.ExpectNoError(err)
defer func() {
e2epod.DeletePodOrFail(t.Client, pod.Namespace, pod.Name)
e2epod.WaitForPodToDisappear(t.Client, pod.Namespace, pod.Name, labels.Everything(), framework.Poll, framework.PodDeleteTimeout)
e2epod.WaitForPodToDisappear(t.Client, pod.Namespace, pod.Name, labels.Everything(), framework.Poll, t.Timeouts.PodDelete)
}()
if expectUnschedulable {
// Verify that no claims are provisioned.
@ -629,7 +631,7 @@ func (t StorageClassTest) TestBindingWaitForFirstConsumerMultiPVC(claims []*v1.P
claim, err = t.Client.CoreV1().PersistentVolumeClaims(claim.Namespace).Get(context.TODO(), claim.Name, metav1.GetOptions{})
framework.ExpectNoError(err)
// make sure claim did bind
err = e2epv.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, t.Client, claim.Namespace, claim.Name, framework.Poll, framework.ClaimProvisionTimeout)
err = e2epv.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, t.Client, claim.Namespace, claim.Name, framework.Poll, t.Timeouts.ClaimProvision)
framework.ExpectNoError(err)
pv, err := t.Client.CoreV1().PersistentVolumes().Get(context.TODO(), claim.Spec.VolumeName, metav1.GetOptions{})
@ -715,7 +717,7 @@ func StopPod(c clientset.Interface, pod *v1.Pod) {
// StopPodAndDependents first tries to log the output of the pod's container,
// then deletes the pod and waits for that to succeed. Also waits for all owned
// resources to be deleted.
func StopPodAndDependents(c clientset.Interface, pod *v1.Pod) {
func StopPodAndDependents(c clientset.Interface, timeouts *framework.TimeoutContext, pod *v1.Pod) {
if pod == nil {
return
}
@ -762,14 +764,14 @@ func StopPodAndDependents(c clientset.Interface, pod *v1.Pod) {
}
framework.Logf("pod Delete API error: %v", err)
}
framework.Logf("Wait up to %v for pod %q to be fully deleted", e2epod.PodDeleteTimeout, pod.Name)
e2epod.WaitForPodNotFoundInNamespace(c, pod.Name, pod.Namespace, e2epod.PodDeleteTimeout)
framework.Logf("Wait up to %v for pod %q to be fully deleted", timeouts.PodDelete, pod.Name)
e2epod.WaitForPodNotFoundInNamespace(c, pod.Name, pod.Namespace, timeouts.PodDelete)
if len(podPVs) > 0 {
for _, pv := range podPVs {
// As with CSI inline volumes, we use the pod delete timeout here because conceptually
// the volume deletion needs to be that fast (whatever "that" is).
framework.Logf("Wait up to %v for pod PV %s to be fully deleted", e2epod.PodDeleteTimeout, pv.Name)
e2epv.WaitForPersistentVolumeDeleted(c, pv.Name, 5*time.Second, e2epod.PodDeleteTimeout)
framework.Logf("Wait up to %v for pod PV %s to be fully deleted", timeouts.PodDelete, pv.Name)
e2epv.WaitForPersistentVolumeDeleted(c, pv.Name, 5*time.Second, timeouts.PodDelete)
}
}
}
@ -818,7 +820,7 @@ func prepareSnapshotDataSourceForProvisioning(
}
e2evolume.InjectContent(f, config, nil, "", tests)
snapshotResource := CreateSnapshotResource(sDriver, perTestConfig, pattern, updatedClaim.GetName(), updatedClaim.GetNamespace())
snapshotResource := CreateSnapshotResource(sDriver, perTestConfig, pattern, updatedClaim.GetName(), updatedClaim.GetNamespace(), f.Timeouts)
group := "snapshot.storage.k8s.io"
dataSourceRef := &v1.TypedLocalObjectReference{
@ -834,7 +836,7 @@ func prepareSnapshotDataSourceForProvisioning(
framework.Failf("Error deleting initClaim %q. Error: %v", updatedClaim.Name, err)
}
err = snapshotResource.CleanupResource()
err = snapshotResource.CleanupResource(f.Timeouts)
framework.ExpectNoError(err)
ginkgo.By("deleting StorageClass " + class.Name)

View File

@ -158,7 +158,7 @@ func (s *snapshottableTestSuite) DefineTests(driver TestDriver, pattern testpatt
RunInPodWithVolume(cs, pvc.Namespace, pvc.Name, "pvc-snapshottable-tester", command, config.ClientNodeSelection)
err = e2epv.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, cs, pvc.Namespace, pvc.Name, framework.Poll, framework.ClaimProvisionTimeout)
err = e2epv.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, cs, pvc.Namespace, pvc.Name, framework.Poll, f.Timeouts.ClaimProvision)
framework.ExpectNoError(err)
ginkgo.By("checking the claim")
// Get new copy of the claim
@ -203,9 +203,9 @@ func (s *snapshottableTestSuite) DefineTests(driver TestDriver, pattern testpatt
ginkgo.BeforeEach(func() {
var sr *SnapshotResource
cleanupSteps = append(cleanupSteps, func() {
framework.ExpectNoError(sr.CleanupResource())
framework.ExpectNoError(sr.CleanupResource(f.Timeouts))
})
sr = CreateSnapshotResource(sDriver, config, pattern, pvc.GetName(), pvc.GetNamespace())
sr = CreateSnapshotResource(sDriver, config, pattern, pvc.GetName(), pvc.GetNamespace(), f.Timeouts)
vs = sr.Vs
vscontent = sr.Vscontent
vsc = sr.Vsclass
@ -275,19 +275,19 @@ func (s *snapshottableTestSuite) DefineTests(driver TestDriver, pattern testpatt
cleanupSteps = append(cleanupSteps, func() {
StopPod(cs, restoredPod)
})
framework.ExpectNoError(e2epod.WaitForPodRunningInNamespaceSlow(cs, restoredPod.Name, restoredPod.Namespace))
framework.ExpectNoError(e2epod.WaitTimeoutForPodRunningInNamespace(cs, restoredPod.Name, restoredPod.Namespace, f.Timeouts.PodStartSlow))
commands := e2evolume.GenerateReadFileCmd(datapath)
_, err = framework.LookForStringInPodExec(restoredPod.Namespace, restoredPod.Name, commands, originalMntTestData, time.Minute)
framework.ExpectNoError(err)
ginkgo.By("should delete the VolumeSnapshotContent according to its deletion policy")
err = DeleteAndWaitSnapshot(dc, vs.GetNamespace(), vs.GetName(), framework.Poll, framework.SnapshotDeleteTimeout)
err = DeleteAndWaitSnapshot(dc, vs.GetNamespace(), vs.GetName(), framework.Poll, f.Timeouts.SnapshotDelete)
framework.ExpectNoError(err)
switch pattern.SnapshotDeletionPolicy {
case testpatterns.DeleteSnapshot:
ginkgo.By("checking the SnapshotContent has been deleted")
err = utils.WaitForGVRDeletion(dc, SnapshotContentGVR, vscontent.GetName(), framework.Poll, framework.SnapshotDeleteTimeout)
err = utils.WaitForGVRDeletion(dc, SnapshotContentGVR, vscontent.GetName(), framework.Poll, f.Timeouts.SnapshotDelete)
framework.ExpectNoError(err)
case testpatterns.RetainSnapshot:
ginkgo.By("checking the SnapshotContent has not been deleted")
@ -358,7 +358,7 @@ type SnapshotResource struct {
// CreateSnapshot creates a VolumeSnapshotClass with given SnapshotDeletionPolicy and a VolumeSnapshot
// from the VolumeSnapshotClass using a dynamic client.
// Returns the unstructured VolumeSnapshotClass and VolumeSnapshot objects.
func CreateSnapshot(sDriver SnapshottableTestDriver, config *PerTestConfig, pattern testpatterns.TestPattern, pvcName string, pvcNamespace string) (*unstructured.Unstructured, *unstructured.Unstructured) {
func CreateSnapshot(sDriver SnapshottableTestDriver, config *PerTestConfig, pattern testpatterns.TestPattern, pvcName string, pvcNamespace string, timeouts *framework.TimeoutContext) (*unstructured.Unstructured, *unstructured.Unstructured) {
defer ginkgo.GinkgoRecover()
var err error
if pattern.SnapshotType != testpatterns.DynamicCreatedSnapshot && pattern.SnapshotType != testpatterns.PreprovisionedCreatedSnapshot {
@ -414,13 +414,13 @@ func GetSnapshotContentFromSnapshot(dc dynamic.Interface, snapshot *unstructured
// CreateSnapshotResource creates a snapshot resource for the current test. It knows how to deal with
// different test pattern snapshot provisioning and deletion policy
func CreateSnapshotResource(sDriver SnapshottableTestDriver, config *PerTestConfig, pattern testpatterns.TestPattern, pvcName string, pvcNamespace string) *SnapshotResource {
func CreateSnapshotResource(sDriver SnapshottableTestDriver, config *PerTestConfig, pattern testpatterns.TestPattern, pvcName string, pvcNamespace string, timeouts *framework.TimeoutContext) *SnapshotResource {
var err error
r := SnapshotResource{
Config: config,
Pattern: pattern,
}
r.Vsclass, r.Vs = CreateSnapshot(sDriver, config, pattern, pvcName, pvcNamespace)
r.Vsclass, r.Vs = CreateSnapshot(sDriver, config, pattern, pvcName, pvcNamespace, timeouts)
dc := r.Config.Framework.DynamicClient
@ -456,7 +456,7 @@ func CreateSnapshotResource(sDriver SnapshottableTestDriver, config *PerTestConf
framework.ExpectNoError(err)
ginkgo.By("checking the Snapshot has been deleted")
err = utils.WaitForNamespacedGVRDeletion(dc, SnapshotGVR, r.Vs.GetName(), r.Vs.GetNamespace(), framework.Poll, framework.SnapshotDeleteTimeout)
err = utils.WaitForNamespacedGVRDeletion(dc, SnapshotGVR, r.Vs.GetName(), r.Vs.GetNamespace(), framework.Poll, timeouts.SnapshotDelete)
framework.ExpectNoError(err)
err = dc.Resource(SnapshotContentGVR).Delete(context.TODO(), r.Vscontent.GetName(), metav1.DeleteOptions{})
@ -466,7 +466,7 @@ func CreateSnapshotResource(sDriver SnapshottableTestDriver, config *PerTestConf
framework.ExpectNoError(err)
ginkgo.By("checking the Snapshot content has been deleted")
err = utils.WaitForGVRDeletion(dc, SnapshotContentGVR, r.Vscontent.GetName(), framework.Poll, framework.SnapshotDeleteTimeout)
err = utils.WaitForGVRDeletion(dc, SnapshotContentGVR, r.Vscontent.GetName(), framework.Poll, timeouts.SnapshotDelete)
framework.ExpectNoError(err)
ginkgo.By("creating a snapshot content with the snapshot handle")
@ -484,7 +484,7 @@ func CreateSnapshotResource(sDriver SnapshottableTestDriver, config *PerTestConf
r.Vs, err = dc.Resource(SnapshotGVR).Namespace(r.Vs.GetNamespace()).Create(context.TODO(), r.Vs, metav1.CreateOptions{})
framework.ExpectNoError(err)
err = WaitForSnapshotReady(dc, r.Vs.GetNamespace(), r.Vs.GetName(), framework.Poll, framework.SnapshotCreateTimeout)
err = WaitForSnapshotReady(dc, r.Vs.GetNamespace(), r.Vs.GetName(), framework.Poll, timeouts.SnapshotCreate)
framework.ExpectNoError(err)
ginkgo.By("getting the snapshot and snapshot content")
@ -498,7 +498,7 @@ func CreateSnapshotResource(sDriver SnapshottableTestDriver, config *PerTestConf
}
// CleanupResource cleans up the snapshot resource and ignores not found errors
func (sr *SnapshotResource) CleanupResource() error {
func (sr *SnapshotResource) CleanupResource(timeouts *framework.TimeoutContext) error {
var err error
var cleanupErrs []error
@ -532,7 +532,7 @@ func (sr *SnapshotResource) CleanupResource() error {
}
framework.ExpectNoError(err)
err = utils.WaitForGVRDeletion(dc, SnapshotContentGVR, boundVsContent.GetName(), framework.Poll, framework.SnapshotDeleteTimeout)
err = utils.WaitForGVRDeletion(dc, SnapshotContentGVR, boundVsContent.GetName(), framework.Poll, timeouts.SnapshotDelete)
framework.ExpectNoError(err)
case apierrors.IsNotFound(err):
@ -543,7 +543,7 @@ func (sr *SnapshotResource) CleanupResource() error {
}
framework.ExpectNoError(err)
err = utils.WaitForNamespacedGVRDeletion(dc, SnapshotGVR, sr.Vs.GetName(), sr.Vs.GetNamespace(), framework.Poll, framework.SnapshotDeleteTimeout)
err = utils.WaitForNamespacedGVRDeletion(dc, SnapshotGVR, sr.Vs.GetName(), sr.Vs.GetNamespace(), framework.Poll, timeouts.SnapshotDelete)
framework.ExpectNoError(err)
default:
cleanupErrs = append(cleanupErrs, err)
@ -574,7 +574,7 @@ func (sr *SnapshotResource) CleanupResource() error {
}
framework.ExpectNoError(err)
err = utils.WaitForGVRDeletion(dc, SnapshotContentGVR, sr.Vscontent.GetName(), framework.Poll, framework.SnapshotDeleteTimeout)
err = utils.WaitForGVRDeletion(dc, SnapshotContentGVR, sr.Vscontent.GetName(), framework.Poll, timeouts.SnapshotDelete)
framework.ExpectNoError(err)
case apierrors.IsNotFound(err):
// Hope the underlying physical snapshot resource has been deleted already
@ -589,7 +589,7 @@ func (sr *SnapshotResource) CleanupResource() error {
if err != nil && !apierrors.IsNotFound(err) {
framework.Failf("Error deleting snapshot class %q. Error: %v", sr.Vsclass.GetName(), err)
}
err = utils.WaitForGVRDeletion(dc, SnapshotClassGVR, sr.Vsclass.GetName(), framework.Poll, framework.SnapshotDeleteTimeout)
err = utils.WaitForGVRDeletion(dc, SnapshotClassGVR, sr.Vsclass.GetName(), framework.Poll, timeouts.SnapshotDelete)
framework.ExpectNoError(err)
}
return utilerrors.NewAggregate(cleanupErrs)

View File

@ -202,7 +202,7 @@ func (t *snapshottableStressTestSuite) DefineTests(driver TestDriver, pattern te
defer wg.Done()
framework.Logf("Deleting snapshot %s/%s", snapshot.Vs.GetNamespace(), snapshot.Vs.GetName())
err := snapshot.CleanupResource()
err := snapshot.CleanupResource(f.Timeouts)
mu.Lock()
defer mu.Unlock()
errs = append(errs, err)
@ -275,7 +275,7 @@ func (t *snapshottableStressTestSuite) DefineTests(driver TestDriver, pattern te
return
default:
framework.Logf("Pod-%d [%s], Iteration %d/%d", podIndex, pod.Name, snapshotIndex, stressTest.testOptions.NumSnapshots-1)
snapshot := CreateSnapshotResource(snapshottableDriver, stressTest.config, pattern, volume.Pvc.GetName(), volume.Pvc.GetNamespace())
snapshot := CreateSnapshotResource(snapshottableDriver, stressTest.config, pattern, volume.Pvc.GetName(), volume.Pvc.GetNamespace(), f.Timeouts)
stressTest.snapshotsMutex.Lock()
defer stressTest.snapshotsMutex.Unlock()
stressTest.snapshots = append(stressTest.snapshots, snapshot)

View File

@ -112,7 +112,7 @@ func (s *subPathTestSuite) DefineTests(driver TestDriver, pattern testpatterns.T
// registers its own BeforeEach which creates the namespace. Beware that it
// also registers an AfterEach which renders f unusable. Any code using
// f must run inside an It or Context callback.
f := framework.NewDefaultFramework("provisioning")
f := framework.NewFrameworkWithCustomTimeouts("provisioning", getDriverTimeouts(driver))
init := func() {
l = local{}
@ -457,7 +457,7 @@ func (s *subPathTestSuite) DefineTests(driver TestDriver, pattern testpatterns.T
}()
// Wait for pod to be running
err = e2epod.WaitForPodRunningInNamespace(f.ClientSet, l.pod)
err = e2epod.WaitTimeoutForPodRunningInNamespace(f.ClientSet, l.pod.Name, l.pod.Namespace, f.Timeouts.PodStart)
framework.ExpectNoError(err, "while waiting for pod to be running")
// Exec into container that mounted the volume, delete subpath directory
@ -727,7 +727,7 @@ func waitForPodSubpathError(f *framework.Framework, pod *v1.Pod, allowContainerT
return fmt.Errorf("failed to find container that uses subpath")
}
waitErr := wait.PollImmediate(framework.Poll, framework.PodStartTimeout, func() (bool, error) {
waitErr := wait.PollImmediate(framework.Poll, f.Timeouts.PodStart, func() (bool, error) {
pod, err := f.ClientSet.CoreV1().Pods(pod.Namespace).Get(context.TODO(), pod.Name, metav1.GetOptions{})
if err != nil {
return false, err
@ -805,7 +805,7 @@ func testPodContainerRestartWithHooks(f *framework.Framework, pod *v1.Pod, hooks
defer func() {
e2epod.DeletePodWithWait(f.ClientSet, pod)
}()
err = e2epod.WaitForPodRunningInNamespace(f.ClientSet, pod)
err = e2epod.WaitTimeoutForPodRunningInNamespace(f.ClientSet, pod.Name, pod.Namespace, f.Timeouts.PodStart)
framework.ExpectNoError(err, "while waiting for pod to be running")
ginkgo.By("Failing liveness probe")
@ -978,8 +978,7 @@ func testSubpathReconstruction(f *framework.Framework, hostExec utils.HostExec,
removeUnusedContainers(pod)
pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod, metav1.CreateOptions{})
framework.ExpectNoError(err, "while creating pod")
err = e2epod.WaitForPodRunningInNamespace(f.ClientSet, pod)
err = e2epod.WaitTimeoutForPodRunningInNamespace(f.ClientSet, pod.Name, pod.Namespace, f.Timeouts.PodStart)
framework.ExpectNoError(err, "while waiting for pod to be running")
pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(context.TODO(), pod.Name, metav1.GetOptions{})

View File

@ -103,7 +103,7 @@ func (t *topologyTestSuite) DefineTests(driver TestDriver, pattern testpatterns.
// registers its own BeforeEach which creates the namespace. Beware that it
// also registers an AfterEach which renders f unusable. Any code using
// f must run inside an It or Context callback.
f := framework.NewDefaultFramework("topology")
f := framework.NewFrameworkWithCustomTimeouts("topology", getDriverTimeouts(driver))
init := func() topologyTest {
@ -176,7 +176,7 @@ func (t *topologyTestSuite) DefineTests(driver TestDriver, pattern testpatterns.
t.createResources(cs, &l, nil)
err = e2epod.WaitForPodRunningInNamespace(cs, l.pod)
err = e2epod.WaitTimeoutForPodRunningInNamespace(cs, l.pod.Name, l.pod.Namespace, f.Timeouts.PodStart)
framework.ExpectNoError(err)
ginkgo.By("Verifying pod scheduled to correct node")

View File

@ -111,7 +111,7 @@ func (v *volumeExpandTestSuite) DefineTests(driver TestDriver, pattern testpatte
// registers its own BeforeEach which creates the namespace. Beware that it
// also registers an AfterEach which renders f unusable. Any code using
// f must run inside an It or Context callback.
f := framework.NewDefaultFramework("volume-expand")
f := framework.NewFrameworkWithCustomTimeouts("volume-expand", getDriverTimeouts(driver))
init := func() {
l = local{}
@ -179,7 +179,7 @@ func (v *volumeExpandTestSuite) DefineTests(driver TestDriver, pattern testpatte
NodeSelection: l.config.ClientNodeSelection,
ImageID: e2evolume.GetDefaultTestImageID(),
}
l.pod, err = e2epod.CreateSecPodWithNodeSelection(f.ClientSet, &podConfig, framework.PodStartTimeout)
l.pod, err = e2epod.CreateSecPodWithNodeSelection(f.ClientSet, &podConfig, f.Timeouts.PodStart)
defer func() {
err = e2epod.DeletePodWithWait(f.ClientSet, l.pod)
framework.ExpectNoError(err, "while cleaning up pod already deleted in resize test")
@ -251,7 +251,7 @@ func (v *volumeExpandTestSuite) DefineTests(driver TestDriver, pattern testpatte
NodeSelection: l.config.ClientNodeSelection,
ImageID: e2evolume.GetDefaultTestImageID(),
}
l.pod, err = e2epod.CreateSecPodWithNodeSelection(f.ClientSet, &podConfig, framework.PodStartTimeout)
l.pod, err = e2epod.CreateSecPodWithNodeSelection(f.ClientSet, &podConfig, f.Timeouts.PodStart)
defer func() {
err = e2epod.DeletePodWithWait(f.ClientSet, l.pod)
framework.ExpectNoError(err, "while cleaning up pod already deleted in resize test")

View File

@ -108,7 +108,7 @@ func (t *volumeIOTestSuite) DefineTests(driver TestDriver, pattern testpatterns.
// registers its own BeforeEach which creates the namespace. Beware that it
// also registers an AfterEach which renders f unusable. Any code using
// f must run inside an It or Context callback.
f := framework.NewDefaultFramework("volumeio")
f := framework.NewFrameworkWithCustomTimeouts("volumeio", getDriverTimeouts(driver))
init := func() {
l = local{}
@ -338,7 +338,7 @@ func testVolumeIO(f *framework.Framework, cs clientset.Interface, config e2evolu
}
}()
err = e2epod.WaitForPodRunningInNamespace(cs, clientPod)
err = e2epod.WaitTimeoutForPodRunningInNamespace(cs, clientPod.Name, clientPod.Namespace, f.Timeouts.PodStart)
if err != nil {
return fmt.Errorf("client pod %q not running: %v", clientPod.Name, err)
}

View File

@ -109,7 +109,7 @@ func (t *volumeStressTestSuite) DefineTests(driver TestDriver, pattern testpatte
// registers its own BeforeEach which creates the namespace. Beware that it
// also registers an AfterEach which renders f unusable. Any code using
// f must run inside an It or Context callback.
f := framework.NewDefaultFramework("volume-stress")
f := framework.NewFrameworkWithCustomTimeouts("stress", getDriverTimeouts(driver))
init := func() {
cs = f.ClientSet
@ -194,7 +194,7 @@ func (t *volumeStressTestSuite) DefineTests(driver TestDriver, pattern testpatte
framework.Failf("Failed to create pod-%v [%+v]. Error: %v", podIndex, pod, err)
}
err = e2epod.WaitForPodRunningInNamespace(cs, pod)
err = e2epod.WaitTimeoutForPodRunningInNamespace(cs, pod.Name, pod.Namespace, f.Timeouts.PodStart)
if err != nil {
l.cancel()
framework.Failf("Failed to wait for pod-%v [%+v] turn into running status. Error: %v", podIndex, pod, err)

View File

@ -100,7 +100,7 @@ func (t *volumeLimitsTestSuite) DefineTests(driver TestDriver, pattern testpatte
)
// No preconditions to test. Normally they would be in a BeforeEach here.
f := framework.NewDefaultFramework("volumelimits")
f := framework.NewFrameworkWithCustomTimeouts("volumelimits", getDriverTimeouts(driver))
// This checks that CSIMaxVolumeLimitChecker works as expected.
// A randomly chosen node should be able to handle as many CSI volumes as
@ -122,6 +122,7 @@ func (t *volumeLimitsTestSuite) DefineTests(driver TestDriver, pattern testpatte
l.ns = f.Namespace
l.cs = f.ClientSet
l.config, l.testCleanup = driver.PrepareTest(f)
defer l.testCleanup()
@ -153,7 +154,7 @@ func (t *volumeLimitsTestSuite) DefineTests(driver TestDriver, pattern testpatte
framework.ExpectNoError(err, "while cleaning up resource")
}()
defer func() {
cleanupTest(l.cs, l.ns.Name, l.runningPod.Name, l.unschedulablePod.Name, l.pvcs, l.pvNames)
cleanupTest(l.cs, l.ns.Name, l.runningPod.Name, l.unschedulablePod.Name, l.pvcs, l.pvNames, testSlowMultiplier*f.Timeouts.PVDelete)
}()
// Create <limit> PVCs for one gigantic pod.
@ -183,11 +184,11 @@ func (t *volumeLimitsTestSuite) DefineTests(driver TestDriver, pattern testpatte
framework.ExpectNoError(err)
ginkgo.By("Waiting for all PVCs to get Bound")
l.pvNames, err = waitForAllPVCsBound(l.cs, testSlowMultiplier*e2epv.PVBindingTimeout, l.pvcs)
l.pvNames, err = waitForAllPVCsBound(l.cs, testSlowMultiplier*f.Timeouts.PVBound, l.pvcs)
framework.ExpectNoError(err)
ginkgo.By("Waiting for the pod Running")
err = e2epod.WaitTimeoutForPodRunningInNamespace(l.cs, l.runningPod.Name, l.ns.Name, testSlowMultiplier*framework.PodStartTimeout)
err = e2epod.WaitTimeoutForPodRunningInNamespace(l.cs, l.runningPod.Name, l.ns.Name, testSlowMultiplier*f.Timeouts.PodStart)
framework.ExpectNoError(err)
ginkgo.By("Creating an extra pod with one volume to exceed the limit")
@ -203,7 +204,7 @@ func (t *volumeLimitsTestSuite) DefineTests(driver TestDriver, pattern testpatte
framework.ExpectNoError(err, "Failed to create an extra pod with one volume to exceed the limit")
ginkgo.By("Waiting for the pod to get unschedulable with the right message")
err = e2epod.WaitForPodCondition(l.cs, l.ns.Name, l.unschedulablePod.Name, "Unschedulable", framework.PodStartTimeout, func(pod *v1.Pod) (bool, error) {
err = e2epod.WaitForPodCondition(l.cs, l.ns.Name, l.unschedulablePod.Name, "Unschedulable", f.Timeouts.PodStart, func(pod *v1.Pod) (bool, error) {
if pod.Status.Phase == v1.PodPending {
reg, err := regexp.Compile(`max.+volume.+count`)
if err != nil {
@ -225,7 +226,7 @@ func (t *volumeLimitsTestSuite) DefineTests(driver TestDriver, pattern testpatte
})
}
func cleanupTest(cs clientset.Interface, ns string, runningPodName, unschedulablePodName string, pvcs []*v1.PersistentVolumeClaim, pvNames sets.String) error {
func cleanupTest(cs clientset.Interface, ns string, runningPodName, unschedulablePodName string, pvcs []*v1.PersistentVolumeClaim, pvNames sets.String, timeout time.Duration) error {
var cleanupErrors []string
if runningPodName != "" {
err := cs.CoreV1().Pods(ns).Delete(context.TODO(), runningPodName, metav1.DeleteOptions{})
@ -248,7 +249,7 @@ func cleanupTest(cs clientset.Interface, ns string, runningPodName, unschedulabl
// Wait for the PVs to be deleted. It includes also pod and PVC deletion because of PVC protection.
// We use PVs to make sure that the test does not leave orphan PVs when a CSI driver is destroyed
// just after the test ends.
err := wait.Poll(5*time.Second, testSlowMultiplier*e2epv.PVDeletingTimeout, func() (bool, error) {
err := wait.Poll(5*time.Second, timeout, func() (bool, error) {
existing := 0
for _, pvName := range pvNames.UnsortedList() {
_, err := cs.CoreV1().PersistentVolumes().Get(context.TODO(), pvName, metav1.GetOptions{})

View File

@ -102,7 +102,7 @@ func (t *volumeModeTestSuite) DefineTests(driver TestDriver, pattern testpattern
// registers its own BeforeEach which creates the namespace. Beware that it
// also registers an AfterEach which renders f unusable. Any code using
// f must run inside an It or Context callback.
f := framework.NewDefaultFramework("volumemode")
f := framework.NewFrameworkWithCustomTimeouts("volumemode", getDriverTimeouts(driver))
init := func() {
l = local{}
@ -209,7 +209,7 @@ func (t *volumeModeTestSuite) DefineTests(driver TestDriver, pattern testpattern
l.Pvc, err = l.cs.CoreV1().PersistentVolumeClaims(l.ns.Name).Create(context.TODO(), l.Pvc, metav1.CreateOptions{})
framework.ExpectNoError(err, "Failed to create pvc")
framework.ExpectNoError(e2epv.WaitOnPVandPVC(l.cs, l.ns.Name, l.Pv, l.Pvc), "Failed to bind pv and pvc")
framework.ExpectNoError(e2epv.WaitOnPVandPVC(l.cs, f.Timeouts, l.ns.Name, l.Pv, l.Pvc), "Failed to bind pv and pvc")
ginkgo.By("Creating pod")
podConfig := e2epod.Config{
@ -236,7 +236,7 @@ func (t *volumeModeTestSuite) DefineTests(driver TestDriver, pattern testpattern
}.AsSelector().String()
msg := "Unable to attach or mount volumes"
err = e2eevents.WaitTimeoutForEvent(l.cs, l.ns.Name, eventSelector, msg, framework.PodStartTimeout)
err = e2eevents.WaitTimeoutForEvent(l.cs, l.ns.Name, eventSelector, msg, f.Timeouts.PodStart)
// Events are unreliable, don't depend on the event. It's used only to speed up the test.
if err != nil {
framework.Logf("Warning: did not get event about FailedMountVolume")
@ -273,7 +273,7 @@ func (t *volumeModeTestSuite) DefineTests(driver TestDriver, pattern testpattern
}.AsSelector().String()
msg := "does not support block volume provisioning"
err = e2eevents.WaitTimeoutForEvent(l.cs, l.ns.Name, eventSelector, msg, framework.ClaimProvisionTimeout)
err = e2eevents.WaitTimeoutForEvent(l.cs, l.ns.Name, eventSelector, msg, f.Timeouts.ClaimProvision)
// Events are unreliable, don't depend on the event. It's used only to speed up the test.
if err != nil {
framework.Logf("Warning: did not get event about provisioing failed")
@ -332,7 +332,7 @@ func (t *volumeModeTestSuite) DefineTests(driver TestDriver, pattern testpattern
} else {
msg = "has volumeMode Filesystem, but is specified in volumeDevices"
}
err = e2eevents.WaitTimeoutForEvent(l.cs, l.ns.Name, eventSelector, msg, framework.PodStartTimeout)
err = e2eevents.WaitTimeoutForEvent(l.cs, l.ns.Name, eventSelector, msg, f.Timeouts.PodStart)
// Events are unreliable, don't depend on them. They're used only to speed up the test.
if err != nil {
framework.Logf("Warning: did not get event about mismatched volume use")

View File

@ -120,7 +120,7 @@ func (t *volumesTestSuite) DefineTests(driver TestDriver, pattern testpatterns.T
// registers its own BeforeEach which creates the namespace. Beware that it
// also registers an AfterEach which renders f unusable. Any code using
// f must run inside an It or Context callback.
f := framework.NewDefaultFramework("volume")
f := framework.NewFrameworkWithCustomTimeouts("volume", getDriverTimeouts(driver))
init := func() {
l = local{}

View File

@ -106,7 +106,7 @@ func (h *hostExecutor) launchNodeExecPod(node string) *v1.Pod {
}
pod, err := cs.CoreV1().Pods(ns.Name).Create(context.TODO(), hostExecPod, metav1.CreateOptions{})
framework.ExpectNoError(err)
err = e2epod.WaitForPodRunningInNamespace(cs, pod)
err = e2epod.WaitTimeoutForPodRunningInNamespace(cs, pod.Name, pod.Namespace, f.Timeouts.PodStart)
framework.ExpectNoError(err)
return pod
}

View File

@ -325,7 +325,7 @@ func TestVolumeUnmountsFromDeletedPodWithForceOption(c clientset.Interface, f *f
ginkgo.By("Starting the kubelet and waiting for pod to delete.")
KubeletCommand(KStart, c, clientPod)
err = e2epod.WaitForPodNotFoundInNamespace(f.ClientSet, clientPod.Name, f.Namespace.Name, framework.PodDeleteTimeout)
err = e2epod.WaitForPodNotFoundInNamespace(f.ClientSet, clientPod.Name, f.Namespace.Name, f.Timeouts.PodDelete)
if err != nil {
framework.ExpectNoError(err, "Expected pod to be not found.")
}
@ -411,7 +411,7 @@ func TestVolumeUnmapsFromDeletedPodWithForceOption(c clientset.Interface, f *fra
ginkgo.By("Starting the kubelet and waiting for pod to delete.")
KubeletCommand(KStart, c, clientPod)
err = e2epod.WaitForPodNotFoundInNamespace(f.ClientSet, clientPod.Name, f.Namespace.Name, framework.PodDeleteTimeout)
err = e2epod.WaitForPodNotFoundInNamespace(f.ClientSet, clientPod.Name, f.Namespace.Name, f.Timeouts.PodDelete)
framework.ExpectNoError(err, "Expected pod to be not found.")
if forceDelete {

View File

@ -63,6 +63,7 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() {
test := testsuites.StorageClassTest{
Name: "default",
Timeouts: f.Timeouts,
ClaimSize: "2Gi",
}
@ -125,7 +126,7 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() {
pod, err = c.CoreV1().Pods(ns).Create(context.TODO(), pod, metav1.CreateOptions{})
framework.ExpectNoError(err)
err = e2epod.WaitForPodRunningInNamespace(c, pod)
err = e2epod.WaitTimeoutForPodRunningInNamespace(c, pod.Name, pod.Namespace, f.Timeouts.PodStart)
framework.ExpectNoError(err, "Error starting pod %s", pod.Name)
framework.Logf("Deleting pod %q/%q", pod.Namespace, pod.Name)
@ -185,7 +186,7 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() {
pod, err = c.CoreV1().Pods(ns).Create(context.TODO(), pod, metav1.CreateOptions{})
framework.ExpectNoError(err, "failed to create Pod %s/%s", pod.Namespace, pod.Name)
err = e2epod.WaitTimeoutForPodRunningInNamespace(c, pod.Name, pod.Namespace, framework.PodStartShortTimeout)
err = e2epod.WaitTimeoutForPodRunningInNamespace(c, pod.Name, pod.Namespace, f.Timeouts.PodStartShort)
framework.ExpectError(err)
framework.Logf("Deleting pod %q/%q", pod.Namespace, pod.Name)
@ -211,7 +212,7 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() {
pod, err = c.CoreV1().Pods(ns).Create(context.TODO(), pod, metav1.CreateOptions{})
framework.ExpectNoError(err)
err = e2epod.WaitForPodRunningInNamespace(c, pod)
err = e2epod.WaitTimeoutForPodRunningInNamespace(c, pod.Name, pod.Namespace, f.Timeouts.PodStart)
framework.ExpectNoError(err, "Error starting pod ", pod.Name)
pod, err = c.CoreV1().Pods(ns).Get(context.TODO(), pod.Name, metav1.GetOptions{})
@ -268,7 +269,7 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() {
pod, err = c.CoreV1().Pods(ns).Create(context.TODO(), pod, metav1.CreateOptions{})
framework.ExpectNoError(err)
err = e2epod.WaitForPodRunningInNamespace(c, pod)
err = e2epod.WaitTimeoutForPodRunningInNamespace(c, pod.Name, pod.Namespace, f.Timeouts.PodStart)
framework.ExpectNoError(err, "Error starting pod ", pod.Name)
pod, err = c.CoreV1().Pods(ns).Get(context.TODO(), pod.Name, metav1.GetOptions{})
@ -299,7 +300,7 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() {
pod, err = c.CoreV1().Pods(ns).Create(context.TODO(), pod, metav1.CreateOptions{})
framework.ExpectNoError(err)
err = e2epod.WaitForPodRunningInNamespace(c, pod)
err = e2epod.WaitTimeoutForPodRunningInNamespace(c, pod.Name, pod.Namespace, f.Timeouts.PodStart)
framework.ExpectNoError(err, "Error starting pod ", pod.Name)
pod, err = c.CoreV1().Pods(ns).Get(context.TODO(), pod.Name, metav1.GetOptions{})
@ -336,7 +337,7 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() {
// Create pod
pod, err = c.CoreV1().Pods(ns).Create(context.TODO(), pod, metav1.CreateOptions{})
framework.ExpectNoError(err)
err = e2epod.WaitForPodRunningInNamespace(c, pod)
err = e2epod.WaitTimeoutForPodRunningInNamespace(c, pod.Name, pod.Namespace, f.Timeouts.PodStart)
framework.ExpectNoError(err, "Error starting pod ", pod.Name)
pod, err = c.CoreV1().Pods(ns).Get(context.TODO(), pod.Name, metav1.GetOptions{})
framework.ExpectNoError(err)

View File

@ -140,11 +140,13 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
// filled in BeforeEach
var c clientset.Interface
var timeouts *framework.TimeoutContext
var ns string
ginkgo.BeforeEach(func() {
c = f.ClientSet
ns = f.Namespace.Name
timeouts = f.Timeouts
})
ginkgo.Describe("DynamicProvisioner [Slow]", func() {
@ -157,6 +159,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
{
Name: "SSD PD on GCE/GKE",
CloudProviders: []string{"gce", "gke"},
Timeouts: f.Timeouts,
Provisioner: "kubernetes.io/gce-pd",
Parameters: map[string]string{
"type": "pd-ssd",
@ -175,6 +178,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
{
Name: "HDD PD on GCE/GKE",
CloudProviders: []string{"gce", "gke"},
Timeouts: f.Timeouts,
Provisioner: "kubernetes.io/gce-pd",
Parameters: map[string]string{
"type": "pd-standard",
@ -193,6 +197,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
{
Name: "gp2 EBS on AWS",
CloudProviders: []string{"aws"},
Timeouts: f.Timeouts,
Provisioner: "kubernetes.io/aws-ebs",
Parameters: map[string]string{
"type": "gp2",
@ -211,6 +216,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
{
Name: "io1 EBS on AWS",
CloudProviders: []string{"aws"},
Timeouts: f.Timeouts,
Provisioner: "kubernetes.io/aws-ebs",
Parameters: map[string]string{
"type": "io1",
@ -229,6 +235,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
{
Name: "sc1 EBS on AWS",
CloudProviders: []string{"aws"},
Timeouts: f.Timeouts,
Provisioner: "kubernetes.io/aws-ebs",
Parameters: map[string]string{
"type": "sc1",
@ -246,6 +253,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
{
Name: "st1 EBS on AWS",
CloudProviders: []string{"aws"},
Timeouts: f.Timeouts,
Provisioner: "kubernetes.io/aws-ebs",
Parameters: map[string]string{
"type": "st1",
@ -263,6 +271,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
{
Name: "encrypted EBS on AWS",
CloudProviders: []string{"aws"},
Timeouts: f.Timeouts,
Provisioner: "kubernetes.io/aws-ebs",
Parameters: map[string]string{
"encrypted": "true",
@ -281,6 +290,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
{
Name: "generic Cinder volume on OpenStack",
CloudProviders: []string{"openstack"},
Timeouts: f.Timeouts,
Provisioner: "kubernetes.io/cinder",
Parameters: map[string]string{},
ClaimSize: "1.5Gi",
@ -292,6 +302,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
{
Name: "Cinder volume with empty volume type and zone on OpenStack",
CloudProviders: []string{"openstack"},
Timeouts: f.Timeouts,
Provisioner: "kubernetes.io/cinder",
Parameters: map[string]string{
"type": "",
@ -307,6 +318,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
{
Name: "generic vSphere volume",
CloudProviders: []string{"vsphere"},
Timeouts: f.Timeouts,
Provisioner: "kubernetes.io/vsphere-volume",
Parameters: map[string]string{},
ClaimSize: "1.5Gi",
@ -319,6 +331,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
{
Name: "Azure disk volume with empty sku and location",
CloudProviders: []string{"azure"},
Timeouts: f.Timeouts,
Provisioner: "kubernetes.io/azure-disk",
Parameters: map[string]string{},
ClaimSize: "1Gi",
@ -384,6 +397,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
Name: "HDD PD on GCE/GKE",
CloudProviders: []string{"gce", "gke"},
Provisioner: "kubernetes.io/gce-pd",
Timeouts: f.Timeouts,
Parameters: map[string]string{
"type": "pd-standard",
},
@ -452,6 +466,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
test := testsuites.StorageClassTest{
Name: "unmanaged_zone",
Provisioner: "kubernetes.io/gce-pd",
Timeouts: f.Timeouts,
Parameters: map[string]string{"zone": unmanagedZone},
ClaimSize: "1Gi",
}
@ -473,7 +488,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
}()
// The claim should timeout phase:Pending
err = e2epv.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, c, ns, pvc.Name, 2*time.Second, framework.ClaimProvisionShortTimeout)
err = e2epv.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, c, ns, pvc.Name, 2*time.Second, timeouts.ClaimProvisionShort)
framework.ExpectError(err)
framework.Logf(err.Error())
})
@ -492,6 +507,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
test := testsuites.StorageClassTest{
Name: "deletion race",
Provisioner: "", // Use a native one based on current cloud provider
Timeouts: f.Timeouts,
ClaimSize: "1Gi",
}
@ -572,7 +588,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
framework.ExpectNoError(err)
ginkgo.By("waiting for the PV to get Released")
err = e2epv.WaitForPersistentVolumePhase(v1.VolumeReleased, c, pv.Name, 2*time.Second, e2epv.PVReclaimingTimeout)
err = e2epv.WaitForPersistentVolumePhase(v1.VolumeReleased, c, pv.Name, 2*time.Second, timeouts.PVReclaim)
framework.ExpectNoError(err)
ginkgo.By("deleting the PD")
@ -587,7 +603,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
framework.ExpectNoError(err)
ginkgo.By("waiting for the PV to get deleted")
err = e2epv.WaitForPersistentVolumeDeleted(c, pv.Name, 5*time.Second, e2epv.PVDeletingTimeout)
err = e2epv.WaitForPersistentVolumeDeleted(c, pv.Name, 5*time.Second, timeouts.PVDelete)
framework.ExpectNoError(err)
})
})
@ -636,6 +652,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
Client: c,
Name: "external provisioner test",
Provisioner: externalPluginName,
Timeouts: f.Timeouts,
ClaimSize: "1500Mi",
ExpectedSize: "1500Mi",
}
@ -659,6 +676,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
test := testsuites.StorageClassTest{
Client: c,
Name: "default",
Timeouts: f.Timeouts,
ClaimSize: "2Gi",
ExpectedSize: "2Gi",
}
@ -679,6 +697,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
test := testsuites.StorageClassTest{
Name: "default",
Timeouts: f.Timeouts,
ClaimSize: "2Gi",
}
@ -716,6 +735,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
test := testsuites.StorageClassTest{
Name: "default",
Timeouts: f.Timeouts,
ClaimSize: "2Gi",
}
@ -756,6 +776,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
Client: c,
Name: "Gluster Dynamic provisioner test",
Provisioner: "kubernetes.io/glusterfs",
Timeouts: f.Timeouts,
ClaimSize: "2Gi",
ExpectedSize: "2Gi",
Parameters: map[string]string{"resturl": serverURL},
@ -780,6 +801,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
test := testsuites.StorageClassTest{
Name: "AWS EBS with invalid KMS key",
Provisioner: "kubernetes.io/aws-ebs",
Timeouts: f.Timeouts,
ClaimSize: "2Gi",
Parameters: map[string]string{"kmsKeyId": "arn:aws:kms:us-east-1:123456789012:key/55555555-5555-5555-5555-555555555555"},
}

View File

@ -96,7 +96,7 @@ var _ = utils.SIGDescribe("PersistentVolumes:vsphere [Feature:vsphere]", func()
ginkgo.By("Creating the PV and PVC")
pv, pvc, err = e2epv.CreatePVPVC(c, pvConfig, pvcConfig, ns, false)
framework.ExpectNoError(err)
framework.ExpectNoError(e2epv.WaitOnPVandPVC(c, ns, pv, pvc))
framework.ExpectNoError(e2epv.WaitOnPVandPVC(c, f.Timeouts, ns, pv, pvc))
ginkgo.By("Creating the Client Pod")
clientPod, err = e2epod.CreateClientPod(c, ns, pvc)

View File

@ -110,7 +110,7 @@ var _ = utils.SIGDescribe("PersistentVolumes [Feature:vsphere][Feature:ReclaimPo
volumePath, pv, pvc, err = testSetupVSpherePersistentVolumeReclaim(c, nodeInfo, ns, v1.PersistentVolumeReclaimDelete)
framework.ExpectNoError(err)
// Wait for PV and PVC to Bind
framework.ExpectNoError(e2epv.WaitOnPVandPVC(c, ns, pv, pvc))
framework.ExpectNoError(e2epv.WaitOnPVandPVC(c, f.Timeouts, ns, pv, pvc))
ginkgo.By("Creating the Pod")
pod, err := e2epod.CreateClientPod(c, ns, pvc)
@ -196,7 +196,7 @@ var _ = utils.SIGDescribe("PersistentVolumes [Feature:vsphere][Feature:ReclaimPo
framework.ExpectNoError(err)
ginkgo.By("wait for the pv and pvc to bind")
framework.ExpectNoError(e2epv.WaitOnPVandPVC(c, ns, pv, pvc))
framework.ExpectNoError(e2epv.WaitOnPVandPVC(c, f.Timeouts, ns, pv, pvc))
verifyContentOfVSpherePV(c, pvc, volumeFileContent)
})
@ -243,7 +243,7 @@ func deletePVCAfterBind(c clientset.Interface, ns string, pvc *v1.PersistentVolu
var err error
ginkgo.By("wait for the pv and pvc to bind")
framework.ExpectNoError(e2epv.WaitOnPVandPVC(c, ns, pv, pvc))
framework.ExpectNoError(e2epv.WaitOnPVandPVC(c, f.Timeouts, ns, pv, pvc))
ginkgo.By("delete pvc")
framework.ExpectNoError(e2epv.DeletePersistentVolumeClaim(c, pvc.Name, ns), "Failed to delete PVC ", pvc.Name)

View File

@ -87,7 +87,7 @@ var _ = utils.SIGDescribe("PersistentVolumes [Feature:vsphere][Feature:LabelSele
framework.ExpectNoError(err)
ginkgo.By("wait for the pvcSsd to bind with pvSsd")
framework.ExpectNoError(e2epv.WaitOnPVandPVC(c, ns, pvSsd, pvcSsd))
framework.ExpectNoError(e2epv.WaitOnPVandPVC(c, f.Timeouts, ns, pvSsd, pvcSsd))
ginkgo.By("Verify status of pvcVvol is pending")
err = e2epv.WaitForPersistentVolumeClaimPhase(v1.ClaimPending, c, ns, pvcVvol.Name, 3*time.Second, 300*time.Second)

View File

@ -149,7 +149,7 @@ var _ = utils.SIGDescribe("vcp at scale [Feature:vsphere] ", func() {
volumeCountPerInstance = volumeCount
}
volumeCount = volumeCount - volumeCountPerInstance
go VolumeCreateAndAttach(client, namespace, scArrays, volumeCountPerInstance, volumesPerPod, nodeSelectorList, nodeVolumeMapChan)
go VolumeCreateAndAttach(client, f.Timeouts, namespace, scArrays, volumeCountPerInstance, volumesPerPod, nodeSelectorList, nodeVolumeMapChan)
}
// Get the list of all volumes attached to each node from the go routines by reading the data from the channel
@ -189,7 +189,7 @@ func getClaimsForPod(pod *v1.Pod, volumesPerPod int) []string {
}
// VolumeCreateAndAttach peforms create and attach operations of vSphere persistent volumes at scale
func VolumeCreateAndAttach(client clientset.Interface, namespace string, sc []*storagev1.StorageClass, volumeCountPerInstance int, volumesPerPod int, nodeSelectorList []*NodeSelector, nodeVolumeMapChan chan map[string][]string) {
func VolumeCreateAndAttach(client clientset.Interface, timeouts *framework.TimeoutContext, namespace string, sc []*storagev1.StorageClass, volumeCountPerInstance int, volumesPerPod int, nodeSelectorList []*NodeSelector, nodeVolumeMapChan chan map[string][]string) {
defer ginkgo.GinkgoRecover()
nodeVolumeMap := make(map[string][]string)
nodeSelectorIndex := 0
@ -206,7 +206,7 @@ func VolumeCreateAndAttach(client clientset.Interface, namespace string, sc []*s
}
ginkgo.By("Waiting for claim to be in bound phase")
persistentvolumes, err := e2epv.WaitForPVClaimBoundPhase(client, pvclaims, framework.ClaimProvisionTimeout)
persistentvolumes, err := e2epv.WaitForPVClaimBoundPhase(client, pvclaims, timeouts.ClaimProvision)
framework.ExpectNoError(err)
ginkgo.By("Creating pod to attach PV to the node")

View File

@ -23,7 +23,7 @@ import (
"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
"k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
storagev1 "k8s.io/api/storage/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clientset "k8s.io/client-go/kubernetes"
@ -140,7 +140,7 @@ func PerformVolumeLifeCycleInParallel(f *framework.Framework, client clientset.I
var pvclaims []*v1.PersistentVolumeClaim
pvclaims = append(pvclaims, pvclaim)
ginkgo.By(fmt.Sprintf("%v Waiting for claim: %v to be in bound phase", logPrefix, pvclaim.Name))
persistentvolumes, err := e2epv.WaitForPVClaimBoundPhase(client, pvclaims, framework.ClaimProvisionTimeout)
persistentvolumes, err := e2epv.WaitForPVClaimBoundPhase(client, pvclaims, f.Timeouts.ClaimProvision)
framework.ExpectNoError(err)
ginkgo.By(fmt.Sprintf("%v Creating Pod using the claim: %v", logPrefix, pvclaim.Name))
@ -149,7 +149,7 @@ func PerformVolumeLifeCycleInParallel(f *framework.Framework, client clientset.I
framework.ExpectNoError(err)
ginkgo.By(fmt.Sprintf("%v Waiting for the Pod: %v to be in the running state", logPrefix, pod.Name))
err = e2epod.WaitForPodRunningInNamespaceSlow(f.ClientSet, pod.Name, f.Namespace.Name)
err = e2epod.WaitTimeoutForPodRunningInNamespace(f.ClientSet, pod.Name, f.Namespace.Name, f.Timeouts.PodStartSlow)
framework.ExpectNoError(err)
// Get the copy of the Pod to know the assigned node name.

View File

@ -125,7 +125,7 @@ func invokeTest(f *framework.Framework, client clientset.Interface, namespace st
}()
ginkgo.By("Waiting for claim to be in bound phase")
err = e2epv.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, client, pvclaim.Namespace, pvclaim.Name, framework.Poll, framework.ClaimProvisionTimeout)
err = e2epv.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, client, pvclaim.Namespace, pvclaim.Name, framework.Poll, f.Timeouts.ClaimProvision)
framework.ExpectNoError(err)
// Get new copy of the claim

View File

@ -103,7 +103,7 @@ func invokeTestForFstype(f *framework.Framework, client clientset.Interface, nam
// Create Persistent Volume
ginkgo.By("Creating Storage Class With Fstype")
pvclaim, persistentvolumes := createVolume(client, namespace, scParameters)
pvclaim, persistentvolumes := createVolume(client, f.Timeouts, namespace, scParameters)
// Create Pod and verify the persistent volume is accessible
pod := createPodAndVerifyVolumeAccessible(client, namespace, pvclaim, persistentvolumes)
@ -122,7 +122,7 @@ func invokeTestForInvalidFstype(f *framework.Framework, client clientset.Interfa
// Create Persistent Volume
ginkgo.By("Creating Storage Class With Invalid Fstype")
pvclaim, persistentvolumes := createVolume(client, namespace, scParameters)
pvclaim, persistentvolumes := createVolume(client, f.Timeouts, namespace, scParameters)
ginkgo.By("Creating pod to attach PV to the node")
var pvclaims []*v1.PersistentVolumeClaim
@ -150,7 +150,7 @@ func invokeTestForInvalidFstype(f *framework.Framework, client clientset.Interfa
framework.ExpectEqual(isFound, true, "Unable to verify MountVolume.MountDevice failure")
}
func createVolume(client clientset.Interface, namespace string, scParameters map[string]string) (*v1.PersistentVolumeClaim, []*v1.PersistentVolume) {
func createVolume(client clientset.Interface, timeouts *framework.TimeoutContext, namespace string, scParameters map[string]string) (*v1.PersistentVolumeClaim, []*v1.PersistentVolume) {
storageclass, err := client.StorageV1().StorageClasses().Create(context.TODO(), getVSphereStorageClassSpec("fstype", scParameters, nil, ""), metav1.CreateOptions{})
framework.ExpectNoError(err)
defer client.StorageV1().StorageClasses().Delete(context.TODO(), storageclass.Name, metav1.DeleteOptions{})
@ -162,7 +162,7 @@ func createVolume(client clientset.Interface, namespace string, scParameters map
var pvclaims []*v1.PersistentVolumeClaim
pvclaims = append(pvclaims, pvclaim)
ginkgo.By("Waiting for claim to be in bound phase")
persistentvolumes, err := e2epv.WaitForPVClaimBoundPhase(client, pvclaims, framework.ClaimProvisionTimeout)
persistentvolumes, err := e2epv.WaitForPVClaimBoundPhase(client, pvclaims, timeouts.ClaimProvision)
framework.ExpectNoError(err)
return pvclaim, persistentvolumes
}

View File

@ -92,7 +92,7 @@ var _ = utils.SIGDescribe("Node Poweroff [Feature:vsphere] [Slow] [Disruptive]",
ginkgo.By("Waiting for PVC to be in bound phase")
pvclaims := []*v1.PersistentVolumeClaim{pvclaim}
pvs, err := e2epv.WaitForPVClaimBoundPhase(client, pvclaims, framework.ClaimProvisionTimeout)
pvs, err := e2epv.WaitForPVClaimBoundPhase(client, pvclaims, f.Timeouts.ClaimProvision)
framework.ExpectNoError(err, fmt.Sprintf("Failed to wait until PVC phase set to bound: %v", err))
volumePath := pvs[0].Spec.VsphereVolume.VolumePath

View File

@ -24,7 +24,7 @@ import (
"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
"k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
storagev1 "k8s.io/api/storage/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clientset "k8s.io/client-go/kubernetes"
@ -104,7 +104,7 @@ var _ = utils.SIGDescribe("Volume Operations Storm [Feature:vsphere]", func() {
}
ginkgo.By("Waiting for all claims to be in bound phase")
persistentvolumes, err = e2epv.WaitForPVClaimBoundPhase(client, pvclaims, framework.ClaimProvisionTimeout)
persistentvolumes, err = e2epv.WaitForPVClaimBoundPhase(client, pvclaims, f.Timeouts.ClaimProvision)
framework.ExpectNoError(err)
ginkgo.By("Creating pod to attach PVs to the node")

View File

@ -184,7 +184,7 @@ func invokeVolumeLifeCyclePerformance(f *framework.Framework, client clientset.I
totalpvclaims = append(totalpvclaims, pvclaims)
}
for _, pvclaims := range totalpvclaims {
persistentvolumes, err := e2epv.WaitForPVClaimBoundPhase(client, pvclaims, framework.ClaimProvisionTimeout)
persistentvolumes, err := e2epv.WaitForPVClaimBoundPhase(client, pvclaims, f.Timeouts.ClaimProvision)
framework.ExpectNoError(err)
totalpvs = append(totalpvs, persistentvolumes)
}

View File

@ -269,7 +269,7 @@ func invokeValidPolicyTest(f *framework.Framework, client clientset.Interface, n
var pvclaims []*v1.PersistentVolumeClaim
pvclaims = append(pvclaims, pvclaim)
ginkgo.By("Waiting for claim to be in bound phase")
persistentvolumes, err := e2epv.WaitForPVClaimBoundPhase(client, pvclaims, framework.ClaimProvisionTimeout)
persistentvolumes, err := e2epv.WaitForPVClaimBoundPhase(client, pvclaims, f.Timeouts.ClaimProvision)
framework.ExpectNoError(err)
ginkgo.By("Creating pod to attach PV to the node")

View File

@ -127,14 +127,14 @@ var _ = utils.SIGDescribe("Zone Support [Feature:vsphere]", func() {
ginkgo.It("Verify dynamically created pv with allowed zones specified in storage class, shows the right zone information on its labels", func() {
ginkgo.By(fmt.Sprintf("Creating storage class with the following zones : %s", zoneA))
zones = append(zones, zoneA)
verifyPVZoneLabels(client, namespace, nil, zones)
verifyPVZoneLabels(client, f.Timeouts, namespace, nil, zones)
})
ginkgo.It("Verify dynamically created pv with multiple zones specified in the storage class, shows both the zones on its labels", func() {
ginkgo.By(fmt.Sprintf("Creating storage class with the following zones : %s, %s", zoneA, zoneB))
zones = append(zones, zoneA)
zones = append(zones, zoneB)
verifyPVZoneLabels(client, namespace, nil, zones)
verifyPVZoneLabels(client, f.Timeouts, namespace, nil, zones)
})
ginkgo.It("Verify PVC creation with invalid zone specified in storage class fails", func() {
@ -151,21 +151,21 @@ var _ = utils.SIGDescribe("Zone Support [Feature:vsphere]", func() {
ginkgo.It("Verify a pod is created and attached to a dynamically created PV, based on allowed zones specified in storage class ", func() {
ginkgo.By(fmt.Sprintf("Creating storage class with zones :%s", zoneA))
zones = append(zones, zoneA)
verifyPVCAndPodCreationSucceeds(client, namespace, nil, zones, "")
verifyPVCAndPodCreationSucceeds(client, f.Timeouts, namespace, nil, zones, "")
})
ginkgo.It("Verify a pod is created and attached to a dynamically created PV, based on multiple zones specified in storage class ", func() {
ginkgo.By(fmt.Sprintf("Creating storage class with zones :%s, %s", zoneA, zoneB))
zones = append(zones, zoneA)
zones = append(zones, zoneB)
verifyPVCAndPodCreationSucceeds(client, namespace, nil, zones, "")
verifyPVCAndPodCreationSucceeds(client, f.Timeouts, namespace, nil, zones, "")
})
ginkgo.It("Verify a pod is created and attached to a dynamically created PV, based on the allowed zones and datastore specified in storage class", func() {
ginkgo.By(fmt.Sprintf("Creating storage class with zone :%s and datastore :%s", zoneA, vsanDatastore1))
scParameters[Datastore] = vsanDatastore1
zones = append(zones, zoneA)
verifyPVCAndPodCreationSucceeds(client, namespace, scParameters, zones, "")
verifyPVCAndPodCreationSucceeds(client, f.Timeouts, namespace, scParameters, zones, "")
})
ginkgo.It("Verify PVC creation with incompatible datastore and zone combination specified in storage class fails", func() {
@ -183,14 +183,14 @@ var _ = utils.SIGDescribe("Zone Support [Feature:vsphere]", func() {
ginkgo.By(fmt.Sprintf("Creating storage class with zone :%s and storage policy :%s", zoneA, compatPolicy))
scParameters[SpbmStoragePolicy] = compatPolicy
zones = append(zones, zoneA)
verifyPVCAndPodCreationSucceeds(client, namespace, scParameters, zones, "")
verifyPVCAndPodCreationSucceeds(client, f.Timeouts, namespace, scParameters, zones, "")
})
ginkgo.It("Verify a pod is created on a non-Workspace zone and attached to a dynamically created PV, based on the allowed zones and storage policy specified in storage class", func() {
ginkgo.By(fmt.Sprintf("Creating storage class with zone :%s and storage policy :%s", zoneB, compatPolicy))
scParameters[SpbmStoragePolicy] = compatPolicy
zones = append(zones, zoneB)
verifyPVCAndPodCreationSucceeds(client, namespace, scParameters, zones, "")
verifyPVCAndPodCreationSucceeds(client, f.Timeouts, namespace, scParameters, zones, "")
})
ginkgo.It("Verify PVC creation with incompatible storagePolicy and zone combination specified in storage class fails", func() {
@ -209,7 +209,7 @@ var _ = utils.SIGDescribe("Zone Support [Feature:vsphere]", func() {
scParameters[SpbmStoragePolicy] = compatPolicy
scParameters[Datastore] = vsanDatastore1
zones = append(zones, zoneA)
verifyPVCAndPodCreationSucceeds(client, namespace, scParameters, zones, "")
verifyPVCAndPodCreationSucceeds(client, f.Timeouts, namespace, scParameters, zones, "")
})
ginkgo.It("Verify PVC creation with incompatible storage policy along with compatible zone and datastore combination specified in storage class fails", func() {
@ -314,40 +314,40 @@ var _ = utils.SIGDescribe("Zone Support [Feature:vsphere]", func() {
scParameters[PolicyIopsLimit] = IopsLimitCapabilityVal
scParameters[Datastore] = vsanDatastore1
zones = append(zones, zoneA)
verifyPVCAndPodCreationSucceeds(client, namespace, scParameters, zones, "")
verifyPVCAndPodCreationSucceeds(client, f.Timeouts, namespace, scParameters, zones, "")
})
ginkgo.It("Verify a pod is created and attached to a dynamically created PV, based on the allowed zones specified in storage class when the datastore under the zone is present in another datacenter", func() {
ginkgo.By(fmt.Sprintf("Creating storage class with zone :%s", zoneD))
zones = append(zones, zoneD)
verifyPVCAndPodCreationSucceeds(client, namespace, scParameters, zones, "")
verifyPVCAndPodCreationSucceeds(client, f.Timeouts, namespace, scParameters, zones, "")
})
ginkgo.It("Verify a pod is created and attached to a dynamically created PV, based on the allowed zones and datastore specified in storage class when there are multiple datastores with the same name under different zones across datacenters", func() {
ginkgo.By(fmt.Sprintf("Creating storage class with zone :%s and datastore name :%s", zoneD, localDatastore))
scParameters[Datastore] = localDatastore
zones = append(zones, zoneD)
verifyPVCAndPodCreationSucceeds(client, namespace, scParameters, zones, "")
verifyPVCAndPodCreationSucceeds(client, f.Timeouts, namespace, scParameters, zones, "")
})
ginkgo.It("Verify a pod is created and attached to a dynamically created PV with storage policy specified in storage class in waitForFirstConsumer binding mode", func() {
ginkgo.By(fmt.Sprintf("Creating storage class with waitForFirstConsumer mode and storage policy :%s", compatPolicy))
scParameters[SpbmStoragePolicy] = compatPolicy
verifyPVCAndPodCreationSucceeds(client, namespace, scParameters, nil, storagev1.VolumeBindingWaitForFirstConsumer)
verifyPVCAndPodCreationSucceeds(client, f.Timeouts, namespace, scParameters, nil, storagev1.VolumeBindingWaitForFirstConsumer)
})
ginkgo.It("Verify a pod is created and attached to a dynamically created PV with storage policy specified in storage class in waitForFirstConsumer binding mode with allowedTopologies", func() {
ginkgo.By(fmt.Sprintf("Creating storage class with waitForFirstConsumer mode, storage policy :%s and zone :%s", compatPolicy, zoneA))
scParameters[SpbmStoragePolicy] = compatPolicy
zones = append(zones, zoneA)
verifyPVCAndPodCreationSucceeds(client, namespace, scParameters, zones, storagev1.VolumeBindingWaitForFirstConsumer)
verifyPVCAndPodCreationSucceeds(client, f.Timeouts, namespace, scParameters, zones, storagev1.VolumeBindingWaitForFirstConsumer)
})
ginkgo.It("Verify a pod is created and attached to a dynamically created PV with storage policy specified in storage class in waitForFirstConsumer binding mode with multiple allowedTopologies", func() {
ginkgo.By(fmt.Sprintf("Creating storage class with waitForFirstConsumer mode and zones : %s, %s", zoneA, zoneB))
zones = append(zones, zoneA)
zones = append(zones, zoneB)
verifyPVCAndPodCreationSucceeds(client, namespace, nil, zones, storagev1.VolumeBindingWaitForFirstConsumer)
verifyPVCAndPodCreationSucceeds(client, f.Timeouts, namespace, nil, zones, storagev1.VolumeBindingWaitForFirstConsumer)
})
ginkgo.It("Verify a PVC creation fails when multiple zones are specified in the storage class without shared datastores among the zones in waitForFirstConsumer binding mode", func() {
@ -375,7 +375,7 @@ var _ = utils.SIGDescribe("Zone Support [Feature:vsphere]", func() {
})
})
func verifyPVCAndPodCreationSucceeds(client clientset.Interface, namespace string, scParameters map[string]string, zones []string, volumeBindingMode storagev1.VolumeBindingMode) {
func verifyPVCAndPodCreationSucceeds(client clientset.Interface, timeouts *framework.TimeoutContext, namespace string, scParameters map[string]string, zones []string, volumeBindingMode storagev1.VolumeBindingMode) {
storageclass, err := client.StorageV1().StorageClasses().Create(context.TODO(), getVSphereStorageClassSpec("zone-sc", scParameters, zones, volumeBindingMode), metav1.CreateOptions{})
framework.ExpectNoError(err, fmt.Sprintf("Failed to create storage class with err: %v", err))
defer client.StorageV1().StorageClasses().Delete(context.TODO(), storageclass.Name, metav1.DeleteOptions{})
@ -391,7 +391,7 @@ func verifyPVCAndPodCreationSucceeds(client clientset.Interface, namespace strin
var persistentvolumes []*v1.PersistentVolume
// If WaitForFirstConsumer mode, verify pvc binding status after pod creation. For immediate mode, do now.
if volumeBindingMode != storagev1.VolumeBindingWaitForFirstConsumer {
persistentvolumes = waitForPVClaimBoundPhase(client, pvclaims, framework.ClaimProvisionTimeout)
persistentvolumes = waitForPVClaimBoundPhase(client, pvclaims, timeouts.ClaimProvision)
}
ginkgo.By("Creating pod to attach PV to the node")
@ -399,7 +399,7 @@ func verifyPVCAndPodCreationSucceeds(client clientset.Interface, namespace strin
framework.ExpectNoError(err)
if volumeBindingMode == storagev1.VolumeBindingWaitForFirstConsumer {
persistentvolumes = waitForPVClaimBoundPhase(client, pvclaims, framework.ClaimProvisionTimeout)
persistentvolumes = waitForPVClaimBoundPhase(client, pvclaims, timeouts.ClaimProvision)
}
if zones != nil {
@ -499,7 +499,7 @@ func verifyPVCCreationFails(client clientset.Interface, namespace string, scPara
return fmt.Errorf("Failure message: %+q", eventList.Items[0].Message)
}
func verifyPVZoneLabels(client clientset.Interface, namespace string, scParameters map[string]string, zones []string) {
func verifyPVZoneLabels(client clientset.Interface, timeouts *framework.TimeoutContext, namespace string, scParameters map[string]string, zones []string) {
storageclass, err := client.StorageV1().StorageClasses().Create(context.TODO(), getVSphereStorageClassSpec("zone-sc", nil, zones, ""), metav1.CreateOptions{})
framework.ExpectNoError(err, fmt.Sprintf("Failed to create storage class with err: %v", err))
defer client.StorageV1().StorageClasses().Delete(context.TODO(), storageclass.Name, metav1.DeleteOptions{})
@ -512,7 +512,7 @@ func verifyPVZoneLabels(client clientset.Interface, namespace string, scParamete
var pvclaims []*v1.PersistentVolumeClaim
pvclaims = append(pvclaims, pvclaim)
ginkgo.By("Waiting for claim to be in bound phase")
persistentvolumes, err := e2epv.WaitForPVClaimBoundPhase(client, pvclaims, framework.ClaimProvisionTimeout)
persistentvolumes, err := e2epv.WaitForPVClaimBoundPhase(client, pvclaims, timeouts.ClaimProvision)
framework.ExpectNoError(err)
ginkgo.By("Verify zone information is present in the volume labels")