e2e: use custom timeouts in all storage E2E tests

This commit is contained in:
Fabio Bertinatto 2020-10-22 16:04:35 +02:00
parent f6e900f468
commit c82626f96f
50 changed files with 239 additions and 193 deletions

View File

@ -494,7 +494,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
pv, pvc, err := e2epv.CreatePVPVC(c, pvConfig, pvcConfig, f.Namespace.Name, false) pv, pvc, err := e2epv.CreatePVPVC(c, pvConfig, pvcConfig, f.Namespace.Name, false)
framework.ExpectNoError(err) framework.ExpectNoError(err)
framework.ExpectNoError(e2epv.WaitOnPVandPVC(c, f.Namespace.Name, pv, pvc)) framework.ExpectNoError(e2epv.WaitOnPVandPVC(c, f.Timeouts, f.Namespace.Name, pv, pvc))
defer func() { defer func() {
errs := e2epv.PVPVCCleanup(c, f.Namespace.Name, pv, pvc) errs := e2epv.PVPVCCleanup(c, f.Namespace.Name, pv, pvc)

View File

@ -212,7 +212,7 @@ func DeletePersistentVolumeClaim(c clientset.Interface, pvcName string, ns strin
// DeletePVCandValidatePV deletes the PVC and waits for the PV to enter its expected phase. Validate that the PV // DeletePVCandValidatePV deletes the PVC and waits for the PV to enter its expected phase. Validate that the PV
// has been reclaimed (assumption here about reclaimPolicy). Caller tells this func which // has been reclaimed (assumption here about reclaimPolicy). Caller tells this func which
// phase value to expect for the pv bound to the to-be-deleted claim. // phase value to expect for the pv bound to the to-be-deleted claim.
func DeletePVCandValidatePV(c clientset.Interface, ns string, pvc *v1.PersistentVolumeClaim, pv *v1.PersistentVolume, expectPVPhase v1.PersistentVolumePhase) error { func DeletePVCandValidatePV(c clientset.Interface, timeouts *framework.TimeoutContext, ns string, pvc *v1.PersistentVolumeClaim, pv *v1.PersistentVolume, expectPVPhase v1.PersistentVolumePhase) error {
pvname := pvc.Spec.VolumeName pvname := pvc.Spec.VolumeName
framework.Logf("Deleting PVC %v to trigger reclamation of PV %v", pvc.Name, pvname) framework.Logf("Deleting PVC %v to trigger reclamation of PV %v", pvc.Name, pvname)
err := DeletePersistentVolumeClaim(c, pvc.Name, ns) err := DeletePersistentVolumeClaim(c, pvc.Name, ns)
@ -222,7 +222,7 @@ func DeletePVCandValidatePV(c clientset.Interface, ns string, pvc *v1.Persistent
// Wait for the PV's phase to return to be `expectPVPhase` // Wait for the PV's phase to return to be `expectPVPhase`
framework.Logf("Waiting for reclaim process to complete.") framework.Logf("Waiting for reclaim process to complete.")
err = WaitForPersistentVolumePhase(expectPVPhase, c, pv.Name, framework.Poll, PVReclaimingTimeout) err = WaitForPersistentVolumePhase(expectPVPhase, c, pv.Name, framework.Poll, timeouts.PVReclaim)
if err != nil { if err != nil {
return fmt.Errorf("pv %q phase did not become %v: %v", pv.Name, expectPVPhase, err) return fmt.Errorf("pv %q phase did not become %v: %v", pv.Name, expectPVPhase, err)
} }
@ -255,7 +255,7 @@ func DeletePVCandValidatePV(c clientset.Interface, ns string, pvc *v1.Persistent
// Available, Bound). // Available, Bound).
// Note: if there are more claims than pvs then some of the remaining claims may bind to just made // Note: if there are more claims than pvs then some of the remaining claims may bind to just made
// available pvs. // available pvs.
func DeletePVCandValidatePVGroup(c clientset.Interface, ns string, pvols PVMap, claims PVCMap, expectPVPhase v1.PersistentVolumePhase) error { func DeletePVCandValidatePVGroup(c clientset.Interface, timeouts *framework.TimeoutContext, ns string, pvols PVMap, claims PVCMap, expectPVPhase v1.PersistentVolumePhase) error {
var boundPVs, deletedPVCs int var boundPVs, deletedPVCs int
for pvName := range pvols { for pvName := range pvols {
@ -276,7 +276,7 @@ func DeletePVCandValidatePVGroup(c clientset.Interface, ns string, pvols PVMap,
// get the pvc for the delete call below // get the pvc for the delete call below
pvc, err := c.CoreV1().PersistentVolumeClaims(ns).Get(context.TODO(), cr.Name, metav1.GetOptions{}) pvc, err := c.CoreV1().PersistentVolumeClaims(ns).Get(context.TODO(), cr.Name, metav1.GetOptions{})
if err == nil { if err == nil {
if err = DeletePVCandValidatePV(c, ns, pvc, pv, expectPVPhase); err != nil { if err = DeletePVCandValidatePV(c, timeouts, ns, pvc, pv, expectPVPhase); err != nil {
return err return err
} }
} else if !apierrors.IsNotFound(err) { } else if !apierrors.IsNotFound(err) {
@ -434,17 +434,17 @@ func CreatePVsPVCs(numpvs, numpvcs int, c clientset.Interface, ns string, pvConf
} }
// WaitOnPVandPVC waits for the pv and pvc to bind to each other. // WaitOnPVandPVC waits for the pv and pvc to bind to each other.
func WaitOnPVandPVC(c clientset.Interface, ns string, pv *v1.PersistentVolume, pvc *v1.PersistentVolumeClaim) error { func WaitOnPVandPVC(c clientset.Interface, timeouts *framework.TimeoutContext, ns string, pv *v1.PersistentVolume, pvc *v1.PersistentVolumeClaim) error {
// Wait for newly created PVC to bind to the PV // Wait for newly created PVC to bind to the PV
framework.Logf("Waiting for PV %v to bind to PVC %v", pv.Name, pvc.Name) framework.Logf("Waiting for PV %v to bind to PVC %v", pv.Name, pvc.Name)
err := WaitForPersistentVolumeClaimPhase(v1.ClaimBound, c, ns, pvc.Name, framework.Poll, ClaimBindingTimeout) err := WaitForPersistentVolumeClaimPhase(v1.ClaimBound, c, ns, pvc.Name, framework.Poll, timeouts.ClaimBound)
if err != nil { if err != nil {
return fmt.Errorf("PVC %q did not become Bound: %v", pvc.Name, err) return fmt.Errorf("PVC %q did not become Bound: %v", pvc.Name, err)
} }
// Wait for PersistentVolume.Status.Phase to be Bound, which it should be // Wait for PersistentVolume.Status.Phase to be Bound, which it should be
// since the PVC is already bound. // since the PVC is already bound.
err = WaitForPersistentVolumePhase(v1.VolumeBound, c, pv.Name, framework.Poll, PVBindingTimeout) err = WaitForPersistentVolumePhase(v1.VolumeBound, c, pv.Name, framework.Poll, timeouts.PVBound)
if err != nil { if err != nil {
return fmt.Errorf("PV %q did not become Bound: %v", pv.Name, err) return fmt.Errorf("PV %q did not become Bound: %v", pv.Name, err)
} }
@ -482,7 +482,7 @@ func WaitOnPVandPVC(c clientset.Interface, ns string, pv *v1.PersistentVolume, p
// to situations where the maximum wait times are reached several times in succession, // to situations where the maximum wait times are reached several times in succession,
// extending test time. Thus, it is recommended to keep the delta between PVs and PVCs // extending test time. Thus, it is recommended to keep the delta between PVs and PVCs
// small. // small.
func WaitAndVerifyBinds(c clientset.Interface, ns string, pvols PVMap, claims PVCMap, testExpected bool) error { func WaitAndVerifyBinds(c clientset.Interface, timeouts *framework.TimeoutContext, ns string, pvols PVMap, claims PVCMap, testExpected bool) error {
var actualBinds int var actualBinds int
expectedBinds := len(pvols) expectedBinds := len(pvols)
if expectedBinds > len(claims) { // want the min of # pvs or #pvcs if expectedBinds > len(claims) { // want the min of # pvs or #pvcs
@ -490,7 +490,7 @@ func WaitAndVerifyBinds(c clientset.Interface, ns string, pvols PVMap, claims PV
} }
for pvName := range pvols { for pvName := range pvols {
err := WaitForPersistentVolumePhase(v1.VolumeBound, c, pvName, framework.Poll, PVBindingTimeout) err := WaitForPersistentVolumePhase(v1.VolumeBound, c, pvName, framework.Poll, timeouts.PVBound)
if err != nil && len(pvols) > len(claims) { if err != nil && len(pvols) > len(claims) {
framework.Logf("WARN: pv %v is not bound after max wait", pvName) framework.Logf("WARN: pv %v is not bound after max wait", pvName)
framework.Logf(" This may be ok since there are more pvs than pvcs") framework.Logf(" This may be ok since there are more pvs than pvcs")
@ -513,7 +513,7 @@ func WaitAndVerifyBinds(c clientset.Interface, ns string, pvols PVMap, claims PV
return fmt.Errorf("internal: claims map is missing pvc %q", pvcKey) return fmt.Errorf("internal: claims map is missing pvc %q", pvcKey)
} }
err := WaitForPersistentVolumeClaimPhase(v1.ClaimBound, c, ns, cr.Name, framework.Poll, ClaimBindingTimeout) err := WaitForPersistentVolumeClaimPhase(v1.ClaimBound, c, ns, cr.Name, framework.Poll, timeouts.ClaimBound)
if err != nil { if err != nil {
return fmt.Errorf("PVC %q did not become Bound: %v", cr.Name, err) return fmt.Errorf("PVC %q did not become Bound: %v", cr.Name, err)
} }

View File

@ -362,7 +362,7 @@ func TestServerCleanup(f *framework.Framework, config TestConfig) {
gomega.Expect(err).To(gomega.BeNil(), "Failed to delete pod %v in namespace %v", config.Prefix+"-server", config.Namespace) gomega.Expect(err).To(gomega.BeNil(), "Failed to delete pod %v in namespace %v", config.Prefix+"-server", config.Namespace)
} }
func runVolumeTesterPod(client clientset.Interface, config TestConfig, podSuffix string, privileged bool, fsGroup *int64, tests []Test, slow bool) (*v1.Pod, error) { func runVolumeTesterPod(client clientset.Interface, timeouts *framework.TimeoutContext, config TestConfig, podSuffix string, privileged bool, fsGroup *int64, tests []Test, slow bool) (*v1.Pod, error) {
ginkgo.By(fmt.Sprint("starting ", config.Prefix, "-", podSuffix)) ginkgo.By(fmt.Sprint("starting ", config.Prefix, "-", podSuffix))
var gracePeriod int64 = 1 var gracePeriod int64 = 1
var command string var command string
@ -439,13 +439,13 @@ func runVolumeTesterPod(client clientset.Interface, config TestConfig, podSuffix
return nil, err return nil, err
} }
if slow { if slow {
err = e2epod.WaitForPodRunningInNamespaceSlow(client, clientPod.Name, clientPod.Namespace) err = e2epod.WaitTimeoutForPodRunningInNamespace(client, clientPod.Name, clientPod.Namespace, timeouts.PodStartSlow)
} else { } else {
err = e2epod.WaitForPodRunningInNamespace(client, clientPod) err = e2epod.WaitTimeoutForPodRunningInNamespace(client, clientPod.Name, clientPod.Namespace, timeouts.PodStart)
} }
if err != nil { if err != nil {
e2epod.DeletePodOrFail(client, clientPod.Namespace, clientPod.Name) e2epod.DeletePodOrFail(client, clientPod.Namespace, clientPod.Name)
e2epod.WaitForPodToDisappear(client, clientPod.Namespace, clientPod.Name, labels.Everything(), framework.Poll, framework.PodDeleteTimeout) e2epod.WaitForPodToDisappear(client, clientPod.Namespace, clientPod.Name, labels.Everything(), framework.Poll, timeouts.PodDelete)
return nil, err return nil, err
} }
return clientPod, nil return clientPod, nil
@ -514,13 +514,14 @@ func TestVolumeClientSlow(f *framework.Framework, config TestConfig, fsGroup *in
} }
func testVolumeClient(f *framework.Framework, config TestConfig, fsGroup *int64, fsType string, tests []Test, slow bool) { func testVolumeClient(f *framework.Framework, config TestConfig, fsGroup *int64, fsType string, tests []Test, slow bool) {
clientPod, err := runVolumeTesterPod(f.ClientSet, config, "client", false, fsGroup, tests, slow) timeouts := f.Timeouts
clientPod, err := runVolumeTesterPod(f.ClientSet, timeouts, config, "client", false, fsGroup, tests, slow)
if err != nil { if err != nil {
framework.Failf("Failed to create client pod: %v", err) framework.Failf("Failed to create client pod: %v", err)
} }
defer func() { defer func() {
e2epod.DeletePodOrFail(f.ClientSet, clientPod.Namespace, clientPod.Name) e2epod.DeletePodOrFail(f.ClientSet, clientPod.Namespace, clientPod.Name)
e2epod.WaitForPodToDisappear(f.ClientSet, clientPod.Namespace, clientPod.Name, labels.Everything(), framework.Poll, framework.PodDeleteTimeout) e2epod.WaitForPodToDisappear(f.ClientSet, clientPod.Namespace, clientPod.Name, labels.Everything(), framework.Poll, timeouts.PodDelete)
}() }()
testVolumeContent(f, clientPod, fsGroup, fsType, tests) testVolumeContent(f, clientPod, fsGroup, fsType, tests)
@ -531,17 +532,18 @@ func testVolumeClient(f *framework.Framework, config TestConfig, fsGroup *int64,
// The volume must be writable. // The volume must be writable.
func InjectContent(f *framework.Framework, config TestConfig, fsGroup *int64, fsType string, tests []Test) { func InjectContent(f *framework.Framework, config TestConfig, fsGroup *int64, fsType string, tests []Test) {
privileged := true privileged := true
timeouts := f.Timeouts
if framework.NodeOSDistroIs("windows") { if framework.NodeOSDistroIs("windows") {
privileged = false privileged = false
} }
injectorPod, err := runVolumeTesterPod(f.ClientSet, config, "injector", privileged, fsGroup, tests, false /*slow*/) injectorPod, err := runVolumeTesterPod(f.ClientSet, timeouts, config, "injector", privileged, fsGroup, tests, false /*slow*/)
if err != nil { if err != nil {
framework.Failf("Failed to create injector pod: %v", err) framework.Failf("Failed to create injector pod: %v", err)
return return
} }
defer func() { defer func() {
e2epod.DeletePodOrFail(f.ClientSet, injectorPod.Namespace, injectorPod.Name) e2epod.DeletePodOrFail(f.ClientSet, injectorPod.Namespace, injectorPod.Name)
e2epod.WaitForPodToDisappear(f.ClientSet, injectorPod.Namespace, injectorPod.Name, labels.Everything(), framework.Poll, framework.PodDeleteTimeout) e2epod.WaitForPodToDisappear(f.ClientSet, injectorPod.Namespace, injectorPod.Name, labels.Everything(), framework.Poll, timeouts.PodDelete)
}() }()
ginkgo.By("Writing text file contents in the container.") ginkgo.By("Writing text file contents in the container.")

View File

@ -231,7 +231,7 @@ func PodsUseStaticPVsOrFail(f *framework.Framework, podCount int, image string)
ginkgo.By("Waiting for all PVCs to be bound") ginkgo.By("Waiting for all PVCs to be bound")
for _, config := range configs { for _, config := range configs {
e2epv.WaitOnPVandPVC(c, ns, config.pv, config.pvc) e2epv.WaitOnPVandPVC(c, f.Timeouts, ns, config.pv, config.pvc)
} }
ginkgo.By("Creating pods for each static PV") ginkgo.By("Creating pods for each static PV")

View File

@ -194,6 +194,7 @@ var _ = utils.SIGDescribe("CSI mock volume", func() {
} }
scTest := testsuites.StorageClassTest{ scTest := testsuites.StorageClassTest{
Name: m.driver.GetDriverInfo().Name, Name: m.driver.GetDriverInfo().Name,
Timeouts: f.Timeouts,
Provisioner: sc.Provisioner, Provisioner: sc.Provisioner,
Parameters: sc.Parameters, Parameters: sc.Parameters,
ClaimSize: "1Gi", ClaimSize: "1Gi",
@ -383,7 +384,7 @@ var _ = utils.SIGDescribe("CSI mock volume", func() {
}.AsSelector().String() }.AsSelector().String()
msg := "AttachVolume.Attach failed for volume" msg := "AttachVolume.Attach failed for volume"
err = e2eevents.WaitTimeoutForEvent(m.cs, pod.Namespace, eventSelector, msg, framework.PodStartTimeout) err = e2eevents.WaitTimeoutForEvent(m.cs, pod.Namespace, eventSelector, msg, f.Timeouts.PodStart)
if err != nil { if err != nil {
podErr := e2epod.WaitTimeoutForPodRunningInNamespace(m.cs, pod.Name, pod.Namespace, 10*time.Second) podErr := e2epod.WaitTimeoutForPodRunningInNamespace(m.cs, pod.Name, pod.Namespace, 10*time.Second)
framework.ExpectError(podErr, "Pod should not be in running status because attaching should failed") framework.ExpectError(podErr, "Pod should not be in running status because attaching should failed")
@ -504,7 +505,7 @@ var _ = utils.SIGDescribe("CSI mock volume", func() {
csiInlineVolumesEnabled := test.expectEphemeral csiInlineVolumesEnabled := test.expectEphemeral
if test.expectPodInfo { if test.expectPodInfo {
ginkgo.By("checking for CSIInlineVolumes feature") ginkgo.By("checking for CSIInlineVolumes feature")
csiInlineVolumesEnabled, err = testsuites.CSIInlineVolumesEnabled(m.cs, f.Namespace.Name) csiInlineVolumesEnabled, err = testsuites.CSIInlineVolumesEnabled(m.cs, f.Timeouts, f.Namespace.Name)
framework.ExpectNoError(err, "failed to test for CSIInlineVolumes") framework.ExpectNoError(err, "failed to test for CSIInlineVolumes")
} }
@ -1187,7 +1188,7 @@ var _ = utils.SIGDescribe("CSI mock volume", func() {
sc, _, pod := createPod(false /* persistent volume, late binding as specified above */) sc, _, pod := createPod(false /* persistent volume, late binding as specified above */)
framework.ExpectEqual(sc.Name, scName, "pre-selected storage class name not used") framework.ExpectEqual(sc.Name, scName, "pre-selected storage class name not used")
waitCtx, cancel := context.WithTimeout(context.Background(), podStartTimeout) waitCtx, cancel := context.WithTimeout(context.Background(), f.Timeouts.PodStart)
defer cancel() defer cancel()
condition := anyOf( condition := anyOf(
podRunning(waitCtx, f.ClientSet, pod.Name, pod.Namespace), podRunning(waitCtx, f.ClientSet, pod.Name, pod.Namespace),
@ -1271,7 +1272,7 @@ var _ = utils.SIGDescribe("CSI mock volume", func() {
ginkgo.By("Creating snapshot") ginkgo.By("Creating snapshot")
// TODO: Test VolumeSnapshots with Retain policy // TODO: Test VolumeSnapshots with Retain policy
snapshotClass, snapshot := testsuites.CreateSnapshot(sDriver, m.config, testpatterns.DynamicSnapshotDelete, claim.Name, claim.Namespace) snapshotClass, snapshot := testsuites.CreateSnapshot(sDriver, m.config, testpatterns.DynamicSnapshotDelete, claim.Name, claim.Namespace, f.Timeouts)
framework.ExpectNoError(err, "failed to create snapshot") framework.ExpectNoError(err, "failed to create snapshot")
m.vsc[snapshotClass.GetName()] = snapshotClass m.vsc[snapshotClass.GetName()] = snapshotClass
volumeSnapshotName := snapshot.GetName() volumeSnapshotName := snapshot.GetName()

View File

@ -86,6 +86,7 @@ var _ = utils.SIGDescribe("[Feature:Flexvolumes] Mounted flexvolume expand[Slow]
test := testsuites.StorageClassTest{ test := testsuites.StorageClassTest{
Name: "flexvolume-resize", Name: "flexvolume-resize",
Timeouts: f.Timeouts,
ClaimSize: "2Gi", ClaimSize: "2Gi",
AllowVolumeExpansion: true, AllowVolumeExpansion: true,
Provisioner: "flex-expand", Provisioner: "flex-expand",

View File

@ -79,6 +79,7 @@ var _ = utils.SIGDescribe("Mounted flexvolume volume expand [Slow] [Feature:Expa
test := testsuites.StorageClassTest{ test := testsuites.StorageClassTest{
Name: "flexvolume-resize", Name: "flexvolume-resize",
Timeouts: f.Timeouts,
ClaimSize: "2Gi", ClaimSize: "2Gi",
AllowVolumeExpansion: true, AllowVolumeExpansion: true,
Provisioner: "flex-expand", Provisioner: "flex-expand",

View File

@ -93,6 +93,7 @@ func createPodPVCFromSC(f *framework.Framework, c clientset.Interface, ns string
var err error var err error
test := testsuites.StorageClassTest{ test := testsuites.StorageClassTest{
Name: "default", Name: "default",
Timeouts: f.Timeouts,
ClaimSize: "2Gi", ClaimSize: "2Gi",
} }
pvc := e2epv.MakePersistentVolumeClaim(e2epv.PersistentVolumeClaimConfig{ pvc := e2epv.MakePersistentVolumeClaim(e2epv.PersistentVolumeClaimConfig{
@ -112,7 +113,7 @@ func createPodPVCFromSC(f *framework.Framework, c clientset.Interface, ns string
PVCs: pvcClaims, PVCs: pvcClaims,
SeLinuxLabel: e2epv.SELinuxLabel, SeLinuxLabel: e2epv.SELinuxLabel,
} }
pod, err := e2epod.CreateSecPod(c, &podConfig, framework.PodStartTimeout) pod, err := e2epod.CreateSecPod(c, &podConfig, f.Timeouts.PodStart)
framework.ExpectNoError(err, "While creating pods for kubelet restart test") framework.ExpectNoError(err, "While creating pods for kubelet restart test")
return pod, pvc, pvs[0] return pod, pvc, pvs[0]
} }

View File

@ -462,7 +462,7 @@ func verifyPodHostPathTypeFailure(f *framework.Framework, nodeSelector map[strin
}.AsSelector().String() }.AsSelector().String()
msg := "hostPath type check failed" msg := "hostPath type check failed"
err = e2eevents.WaitTimeoutForEvent(f.ClientSet, f.Namespace.Name, eventSelector, msg, framework.PodStartTimeout) err = e2eevents.WaitTimeoutForEvent(f.ClientSet, f.Namespace.Name, eventSelector, msg, f.Timeouts.PodStart)
// Events are unreliable, don't depend on the event. It's used only to speed up the test. // Events are unreliable, don't depend on the event. It's used only to speed up the test.
if err != nil { if err != nil {
framework.Logf("Warning: did not get event about FailedMountVolume") framework.Logf("Warning: did not get event about FailedMountVolume")
@ -480,7 +480,7 @@ func verifyPodHostPathType(f *framework.Framework, nodeSelector map[string]strin
newPod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), newPod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(),
newHostPathTypeTestPod(nodeSelector, hostDir, "/mnt/test", hostPathType), metav1.CreateOptions{}) newHostPathTypeTestPod(nodeSelector, hostDir, "/mnt/test", hostPathType), metav1.CreateOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
framework.ExpectNoError(e2epod.WaitTimeoutForPodRunningInNamespace(f.ClientSet, newPod.Name, newPod.Namespace, framework.PodStartShortTimeout)) framework.ExpectNoError(e2epod.WaitTimeoutForPodRunningInNamespace(f.ClientSet, newPod.Name, newPod.Namespace, f.Timeouts.PodStartShort))
f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), newPod.Name, *metav1.NewDeleteOptions(0)) f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), newPod.Name, *metav1.NewDeleteOptions(0))
} }

View File

@ -78,6 +78,7 @@ var _ = utils.SIGDescribe("Mounted volume expand", func() {
test := testsuites.StorageClassTest{ test := testsuites.StorageClassTest{
Name: "default", Name: "default",
Timeouts: f.Timeouts,
ClaimSize: "2Gi", ClaimSize: "2Gi",
AllowVolumeExpansion: true, AllowVolumeExpansion: true,
DelayBinding: true, DelayBinding: true,

View File

@ -163,7 +163,7 @@ var _ = utils.SIGDescribe("NFSPersistentVolumes[Disruptive][Flaky]", func() {
} }
pv1, pvc1, err = e2epv.CreatePVPVC(c, pvConfig1, pvcConfig, ns, false) pv1, pvc1, err = e2epv.CreatePVPVC(c, pvConfig1, pvcConfig, ns, false)
framework.ExpectNoError(err) framework.ExpectNoError(err)
framework.ExpectNoError(e2epv.WaitOnPVandPVC(c, ns, pv1, pvc1)) framework.ExpectNoError(e2epv.WaitOnPVandPVC(c, f.Timeouts, ns, pv1, pvc1))
ginkgo.By("Initializing second PD with PVPVC binding") ginkgo.By("Initializing second PD with PVPVC binding")
pvSource2, diskName2 = createGCEVolume() pvSource2, diskName2 = createGCEVolume()
@ -176,7 +176,7 @@ var _ = utils.SIGDescribe("NFSPersistentVolumes[Disruptive][Flaky]", func() {
} }
pv2, pvc2, err = e2epv.CreatePVPVC(c, pvConfig2, pvcConfig, ns, false) pv2, pvc2, err = e2epv.CreatePVPVC(c, pvConfig2, pvcConfig, ns, false)
framework.ExpectNoError(err) framework.ExpectNoError(err)
framework.ExpectNoError(e2epv.WaitOnPVandPVC(c, ns, pv2, pvc2)) framework.ExpectNoError(e2epv.WaitOnPVandPVC(c, f.Timeouts, ns, pv2, pvc2))
ginkgo.By("Attaching both PVC's to a single pod") ginkgo.By("Attaching both PVC's to a single pod")
clientPod, err = e2epod.CreatePod(c, ns, nil, []*v1.PersistentVolumeClaim{pvc1, pvc2}, true, "") clientPod, err = e2epod.CreatePod(c, ns, nil, []*v1.PersistentVolumeClaim{pvc1, pvc2}, true, "")
@ -312,7 +312,7 @@ func initTestCase(f *framework.Framework, c clientset.Interface, pvConfig e2epv.
e2epod.DeletePodWithWait(c, pod) e2epod.DeletePodWithWait(c, pod)
} }
}() }()
err = e2epod.WaitForPodRunningInNamespace(c, pod) err = e2epod.WaitTimeoutForPodRunningInNamespace(c, pod.Name, pod.Namespace, f.Timeouts.PodStart)
framework.ExpectNoError(err, fmt.Sprintf("Pod %q timed out waiting for phase: Running", pod.Name)) framework.ExpectNoError(err, fmt.Sprintf("Pod %q timed out waiting for phase: Running", pod.Name))
// Return created api objects // Return created api objects
pod, err = c.CoreV1().Pods(ns).Get(context.TODO(), pod.Name, metav1.GetOptions{}) pod, err = c.CoreV1().Pods(ns).Get(context.TODO(), pod.Name, metav1.GetOptions{})

View File

@ -148,7 +148,7 @@ var _ = utils.SIGDescribe("Pod Disks", func() {
fmtPod = testPDPod([]string{diskName}, host0Name, false, 1) fmtPod = testPDPod([]string{diskName}, host0Name, false, 1)
_, err = podClient.Create(context.TODO(), fmtPod, metav1.CreateOptions{}) _, err = podClient.Create(context.TODO(), fmtPod, metav1.CreateOptions{})
framework.ExpectNoError(err, "Failed to create fmtPod") framework.ExpectNoError(err, "Failed to create fmtPod")
framework.ExpectNoError(e2epod.WaitForPodRunningInNamespaceSlow(f.ClientSet, fmtPod.Name, f.Namespace.Name)) framework.ExpectNoError(e2epod.WaitTimeoutForPodRunningInNamespace(f.ClientSet, fmtPod.Name, f.Namespace.Name, f.Timeouts.PodStartSlow))
ginkgo.By("deleting the fmtPod") ginkgo.By("deleting the fmtPod")
framework.ExpectNoError(podClient.Delete(context.TODO(), fmtPod.Name, *metav1.NewDeleteOptions(0)), "Failed to delete fmtPod") framework.ExpectNoError(podClient.Delete(context.TODO(), fmtPod.Name, *metav1.NewDeleteOptions(0)), "Failed to delete fmtPod")
@ -176,7 +176,7 @@ var _ = utils.SIGDescribe("Pod Disks", func() {
ginkgo.By("creating host0Pod on node0") ginkgo.By("creating host0Pod on node0")
_, err = podClient.Create(context.TODO(), host0Pod, metav1.CreateOptions{}) _, err = podClient.Create(context.TODO(), host0Pod, metav1.CreateOptions{})
framework.ExpectNoError(err, fmt.Sprintf("Failed to create host0Pod: %v", err)) framework.ExpectNoError(err, fmt.Sprintf("Failed to create host0Pod: %v", err))
framework.ExpectNoError(e2epod.WaitForPodRunningInNamespaceSlow(f.ClientSet, host0Pod.Name, f.Namespace.Name)) framework.ExpectNoError(e2epod.WaitTimeoutForPodRunningInNamespace(f.ClientSet, host0Pod.Name, f.Namespace.Name, f.Timeouts.PodStartSlow))
framework.Logf("host0Pod: %q, node0: %q", host0Pod.Name, host0Name) framework.Logf("host0Pod: %q, node0: %q", host0Pod.Name, host0Name)
var containerName, testFile, testFileContents string var containerName, testFile, testFileContents string
@ -200,7 +200,7 @@ var _ = utils.SIGDescribe("Pod Disks", func() {
ginkgo.By("creating host1Pod on node1") ginkgo.By("creating host1Pod on node1")
_, err = podClient.Create(context.TODO(), host1Pod, metav1.CreateOptions{}) _, err = podClient.Create(context.TODO(), host1Pod, metav1.CreateOptions{})
framework.ExpectNoError(err, "Failed to create host1Pod") framework.ExpectNoError(err, "Failed to create host1Pod")
framework.ExpectNoError(e2epod.WaitForPodRunningInNamespaceSlow(f.ClientSet, host1Pod.Name, f.Namespace.Name)) framework.ExpectNoError(e2epod.WaitTimeoutForPodRunningInNamespace(f.ClientSet, host1Pod.Name, f.Namespace.Name, f.Timeouts.PodStartSlow))
framework.Logf("host1Pod: %q, node1: %q", host1Pod.Name, host1Name) framework.Logf("host1Pod: %q, node1: %q", host1Pod.Name, host1Name)
if readOnly { if readOnly {
@ -282,7 +282,7 @@ var _ = utils.SIGDescribe("Pod Disks", func() {
host0Pod = testPDPod(diskNames, host0Name, false /* readOnly */, numContainers) host0Pod = testPDPod(diskNames, host0Name, false /* readOnly */, numContainers)
_, err = podClient.Create(context.TODO(), host0Pod, metav1.CreateOptions{}) _, err = podClient.Create(context.TODO(), host0Pod, metav1.CreateOptions{})
framework.ExpectNoError(err, fmt.Sprintf("Failed to create host0Pod: %v", err)) framework.ExpectNoError(err, fmt.Sprintf("Failed to create host0Pod: %v", err))
framework.ExpectNoError(e2epod.WaitForPodRunningInNamespaceSlow(f.ClientSet, host0Pod.Name, f.Namespace.Name)) framework.ExpectNoError(e2epod.WaitTimeoutForPodRunningInNamespace(f.ClientSet, host0Pod.Name, f.Namespace.Name, f.Timeouts.PodStartSlow))
ginkgo.By(fmt.Sprintf("writing %d file(s) via a container", numPDs)) ginkgo.By(fmt.Sprintf("writing %d file(s) via a container", numPDs))
containerName := "mycontainer" containerName := "mycontainer"
@ -385,7 +385,7 @@ var _ = utils.SIGDescribe("Pod Disks", func() {
_, err = podClient.Create(context.TODO(), host0Pod, metav1.CreateOptions{}) _, err = podClient.Create(context.TODO(), host0Pod, metav1.CreateOptions{})
framework.ExpectNoError(err, fmt.Sprintf("Failed to create host0Pod: %v", err)) framework.ExpectNoError(err, fmt.Sprintf("Failed to create host0Pod: %v", err))
ginkgo.By("waiting for host0Pod to be running") ginkgo.By("waiting for host0Pod to be running")
framework.ExpectNoError(e2epod.WaitForPodRunningInNamespaceSlow(f.ClientSet, host0Pod.Name, f.Namespace.Name)) framework.ExpectNoError(e2epod.WaitTimeoutForPodRunningInNamespace(f.ClientSet, host0Pod.Name, f.Namespace.Name, f.Timeouts.PodStartSlow))
ginkgo.By("writing content to host0Pod") ginkgo.By("writing content to host0Pod")
testFile := "/testpd1/tracker" testFile := "/testpd1/tracker"
@ -474,7 +474,7 @@ var _ = utils.SIGDescribe("Pod Disks", func() {
ginkgo.By("Creating test pod with same volume") ginkgo.By("Creating test pod with same volume")
_, err = podClient.Create(context.TODO(), pod, metav1.CreateOptions{}) _, err = podClient.Create(context.TODO(), pod, metav1.CreateOptions{})
framework.ExpectNoError(err, "Failed to create pod") framework.ExpectNoError(err, "Failed to create pod")
framework.ExpectNoError(e2epod.WaitForPodRunningInNamespaceSlow(f.ClientSet, pod.Name, f.Namespace.Name)) framework.ExpectNoError(e2epod.WaitTimeoutForPodRunningInNamespace(f.ClientSet, pod.Name, f.Namespace.Name, f.Timeouts.PodStartSlow))
ginkgo.By("deleting the pod") ginkgo.By("deleting the pod")
framework.ExpectNoError(podClient.Delete(context.TODO(), pod.Name, *metav1.NewDeleteOptions(0)), "Failed to delete pod") framework.ExpectNoError(podClient.Delete(context.TODO(), pod.Name, *metav1.NewDeleteOptions(0)), "Failed to delete pod")

View File

@ -44,11 +44,11 @@ func verifyGCEDiskAttached(diskName string, nodeName types.NodeName) bool {
} }
// initializeGCETestSpec creates a PV, PVC, and ClientPod that will run until killed by test or clean up. // initializeGCETestSpec creates a PV, PVC, and ClientPod that will run until killed by test or clean up.
func initializeGCETestSpec(c clientset.Interface, ns string, pvConfig e2epv.PersistentVolumeConfig, pvcConfig e2epv.PersistentVolumeClaimConfig, isPrebound bool) (*v1.Pod, *v1.PersistentVolume, *v1.PersistentVolumeClaim) { func initializeGCETestSpec(c clientset.Interface, t *framework.TimeoutContext, ns string, pvConfig e2epv.PersistentVolumeConfig, pvcConfig e2epv.PersistentVolumeClaimConfig, isPrebound bool) (*v1.Pod, *v1.PersistentVolume, *v1.PersistentVolumeClaim) {
ginkgo.By("Creating the PV and PVC") ginkgo.By("Creating the PV and PVC")
pv, pvc, err := e2epv.CreatePVPVC(c, pvConfig, pvcConfig, ns, isPrebound) pv, pvc, err := e2epv.CreatePVPVC(c, pvConfig, pvcConfig, ns, isPrebound)
framework.ExpectNoError(err) framework.ExpectNoError(err)
framework.ExpectNoError(e2epv.WaitOnPVandPVC(c, ns, pv, pvc)) framework.ExpectNoError(e2epv.WaitOnPVandPVC(c, t, ns, pv, pvc))
ginkgo.By("Creating the Client Pod") ginkgo.By("Creating the Client Pod")
clientPod, err := e2epod.CreateClientPod(c, ns, pvc) clientPod, err := e2epod.CreateClientPod(c, ns, pvc)
@ -103,7 +103,7 @@ var _ = utils.SIGDescribe("PersistentVolumes GCEPD", func() {
Selector: selector, Selector: selector,
StorageClassName: &emptyStorageClass, StorageClassName: &emptyStorageClass,
} }
clientPod, pv, pvc = initializeGCETestSpec(c, ns, pvConfig, pvcConfig, false) clientPod, pv, pvc = initializeGCETestSpec(c, f.Timeouts, ns, pvConfig, pvcConfig, false)
node = types.NodeName(clientPod.Spec.NodeName) node = types.NodeName(clientPod.Spec.NodeName)
}) })

View File

@ -54,6 +54,7 @@ type localTestConfig struct {
nodes []v1.Node nodes []v1.Node
node0 *v1.Node node0 *v1.Node
client clientset.Interface client clientset.Interface
timeouts *framework.TimeoutContext
scName string scName string
discoveryDir string discoveryDir string
hostExec utils.HostExec hostExec utils.HostExec
@ -165,6 +166,7 @@ var _ = utils.SIGDescribe("PersistentVolumes-local ", func() {
config = &localTestConfig{ config = &localTestConfig{
ns: f.Namespace.Name, ns: f.Namespace.Name,
client: f.ClientSet, client: f.ClientSet,
timeouts: f.Timeouts,
nodes: nodes.Items, nodes: nodes.Items,
node0: node0, node0: node0,
scName: scName, scName: scName,
@ -312,7 +314,7 @@ var _ = utils.SIGDescribe("PersistentVolumes-local ", func() {
createLocalPVCsPVs(config, []*localTestVolume{testVol}, immediateMode) createLocalPVCsPVs(config, []*localTestVolume{testVol}, immediateMode)
pod, err := createLocalPod(config, testVol, nil) pod, err := createLocalPod(config, testVol, nil)
framework.ExpectError(err) framework.ExpectError(err)
err = e2epod.WaitTimeoutForPodRunningInNamespace(config.client, pod.Name, pod.Namespace, framework.PodStartShortTimeout) err = e2epod.WaitTimeoutForPodRunningInNamespace(config.client, pod.Name, pod.Namespace, f.Timeouts.PodStartShort)
framework.ExpectError(err) framework.ExpectError(err)
cleanupLocalPVCsPVs(config, []*localTestVolume{testVol}) cleanupLocalPVCsPVs(config, []*localTestVolume{testVol})
}) })
@ -329,7 +331,7 @@ var _ = utils.SIGDescribe("PersistentVolumes-local ", func() {
pod, err := config.client.CoreV1().Pods(config.ns).Create(context.TODO(), pod, metav1.CreateOptions{}) pod, err := config.client.CoreV1().Pods(config.ns).Create(context.TODO(), pod, metav1.CreateOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
err = e2epod.WaitTimeoutForPodRunningInNamespace(config.client, pod.Name, pod.Namespace, framework.PodStartShortTimeout) err = e2epod.WaitTimeoutForPodRunningInNamespace(config.client, pod.Name, pod.Namespace, f.Timeouts.PodStartShort)
framework.ExpectError(err) framework.ExpectError(err)
cleanupLocalVolumes(config, []*localTestVolume{testVol}) cleanupLocalVolumes(config, []*localTestVolume{testVol})
@ -854,7 +856,7 @@ func cleanupLocalVolumes(config *localTestConfig, volumes []*localTestVolume) {
} }
func verifyLocalVolume(config *localTestConfig, volume *localTestVolume) { func verifyLocalVolume(config *localTestConfig, volume *localTestVolume) {
framework.ExpectNoError(e2epv.WaitOnPVandPVC(config.client, config.ns, volume.pv, volume.pvc)) framework.ExpectNoError(e2epv.WaitOnPVandPVC(config.client, config.timeouts, config.ns, volume.pv, volume.pvc))
} }
func verifyLocalPod(config *localTestConfig, volume *localTestVolume, pod *v1.Pod, expectedNodeName string) { func verifyLocalPod(config *localTestConfig, volume *localTestVolume, pod *v1.Pod, expectedNodeName string) {
@ -1031,7 +1033,7 @@ func createLocalPod(config *localTestConfig, volume *localTestVolume, fsGroup *i
SeLinuxLabel: selinuxLabel, SeLinuxLabel: selinuxLabel,
FsGroup: fsGroup, FsGroup: fsGroup,
} }
return e2epod.CreateSecPod(config.client, &podConfig, framework.PodStartShortTimeout) return e2epod.CreateSecPod(config.client, &podConfig, config.timeouts.PodStartShort)
} }
func createWriteCmd(testDir string, testFile string, writeTestFileContent string, volumeType localVolumeType) string { func createWriteCmd(testDir string, testFile string, writeTestFileContent string, volumeType localVolumeType) string {

View File

@ -44,7 +44,7 @@ import (
func completeTest(f *framework.Framework, c clientset.Interface, ns string, pv *v1.PersistentVolume, pvc *v1.PersistentVolumeClaim) { func completeTest(f *framework.Framework, c clientset.Interface, ns string, pv *v1.PersistentVolume, pvc *v1.PersistentVolumeClaim) {
// 1. verify that the PV and PVC have bound correctly // 1. verify that the PV and PVC have bound correctly
ginkgo.By("Validating the PV-PVC binding") ginkgo.By("Validating the PV-PVC binding")
framework.ExpectNoError(e2epv.WaitOnPVandPVC(c, ns, pv, pvc)) framework.ExpectNoError(e2epv.WaitOnPVandPVC(c, f.Timeouts, ns, pv, pvc))
// 2. create the nfs writer pod, test if the write was successful, // 2. create the nfs writer pod, test if the write was successful,
// then delete the pod and verify that it was deleted // then delete the pod and verify that it was deleted
@ -53,7 +53,7 @@ func completeTest(f *framework.Framework, c clientset.Interface, ns string, pv *
// 3. delete the PVC, wait for PV to become "Released" // 3. delete the PVC, wait for PV to become "Released"
ginkgo.By("Deleting the PVC to invoke the reclaim policy.") ginkgo.By("Deleting the PVC to invoke the reclaim policy.")
framework.ExpectNoError(e2epv.DeletePVCandValidatePV(c, ns, pvc, pv, v1.VolumeReleased)) framework.ExpectNoError(e2epv.DeletePVCandValidatePV(c, f.Timeouts, ns, pvc, pv, v1.VolumeReleased))
} }
// Validate pairs of PVs and PVCs, create and verify writer pod, delete PVC and validate // Validate pairs of PVs and PVCs, create and verify writer pod, delete PVC and validate
@ -87,7 +87,7 @@ func completeMultiTest(f *framework.Framework, c clientset.Interface, ns string,
// 2. delete each PVC, wait for its bound PV to reach `expectedPhase` // 2. delete each PVC, wait for its bound PV to reach `expectedPhase`
ginkgo.By("Deleting PVCs to invoke reclaim policy") ginkgo.By("Deleting PVCs to invoke reclaim policy")
if err = e2epv.DeletePVCandValidatePVGroup(c, ns, pvols, claims, expectPhase); err != nil { if err = e2epv.DeletePVCandValidatePVGroup(c, f.Timeouts, ns, pvols, claims, expectPhase); err != nil {
return err return err
} }
return nil return nil
@ -234,7 +234,7 @@ var _ = utils.SIGDescribe("PersistentVolumes", func() {
numPVs, numPVCs := 2, 4 numPVs, numPVCs := 2, 4
pvols, claims, err = e2epv.CreatePVsPVCs(numPVs, numPVCs, c, ns, pvConfig, pvcConfig) pvols, claims, err = e2epv.CreatePVsPVCs(numPVs, numPVCs, c, ns, pvConfig, pvcConfig)
framework.ExpectNoError(err) framework.ExpectNoError(err)
framework.ExpectNoError(e2epv.WaitAndVerifyBinds(c, ns, pvols, claims, true)) framework.ExpectNoError(e2epv.WaitAndVerifyBinds(c, f.Timeouts, ns, pvols, claims, true))
framework.ExpectNoError(completeMultiTest(f, c, ns, pvols, claims, v1.VolumeReleased)) framework.ExpectNoError(completeMultiTest(f, c, ns, pvols, claims, v1.VolumeReleased))
}) })
@ -244,7 +244,7 @@ var _ = utils.SIGDescribe("PersistentVolumes", func() {
numPVs, numPVCs := 3, 3 numPVs, numPVCs := 3, 3
pvols, claims, err = e2epv.CreatePVsPVCs(numPVs, numPVCs, c, ns, pvConfig, pvcConfig) pvols, claims, err = e2epv.CreatePVsPVCs(numPVs, numPVCs, c, ns, pvConfig, pvcConfig)
framework.ExpectNoError(err) framework.ExpectNoError(err)
framework.ExpectNoError(e2epv.WaitAndVerifyBinds(c, ns, pvols, claims, true)) framework.ExpectNoError(e2epv.WaitAndVerifyBinds(c, f.Timeouts, ns, pvols, claims, true))
framework.ExpectNoError(completeMultiTest(f, c, ns, pvols, claims, v1.VolumeReleased)) framework.ExpectNoError(completeMultiTest(f, c, ns, pvols, claims, v1.VolumeReleased))
}) })
@ -254,7 +254,7 @@ var _ = utils.SIGDescribe("PersistentVolumes", func() {
numPVs, numPVCs := 4, 2 numPVs, numPVCs := 4, 2
pvols, claims, err = e2epv.CreatePVsPVCs(numPVs, numPVCs, c, ns, pvConfig, pvcConfig) pvols, claims, err = e2epv.CreatePVsPVCs(numPVs, numPVCs, c, ns, pvConfig, pvcConfig)
framework.ExpectNoError(err) framework.ExpectNoError(err)
framework.ExpectNoError(e2epv.WaitAndVerifyBinds(c, ns, pvols, claims, true)) framework.ExpectNoError(e2epv.WaitAndVerifyBinds(c, f.Timeouts, ns, pvols, claims, true))
framework.ExpectNoError(completeMultiTest(f, c, ns, pvols, claims, v1.VolumeReleased)) framework.ExpectNoError(completeMultiTest(f, c, ns, pvols, claims, v1.VolumeReleased))
}) })
}) })
@ -267,7 +267,7 @@ var _ = utils.SIGDescribe("PersistentVolumes", func() {
pvConfig.ReclaimPolicy = v1.PersistentVolumeReclaimRecycle pvConfig.ReclaimPolicy = v1.PersistentVolumeReclaimRecycle
pv, pvc, err = e2epv.CreatePVPVC(c, pvConfig, pvcConfig, ns, false) pv, pvc, err = e2epv.CreatePVPVC(c, pvConfig, pvcConfig, ns, false)
framework.ExpectNoError(err, "BeforeEach: Failed to create PV/PVC") framework.ExpectNoError(err, "BeforeEach: Failed to create PV/PVC")
framework.ExpectNoError(e2epv.WaitOnPVandPVC(c, ns, pv, pvc), "BeforeEach: WaitOnPVandPVC failed") framework.ExpectNoError(e2epv.WaitOnPVandPVC(c, f.Timeouts, ns, pv, pvc), "BeforeEach: WaitOnPVandPVC failed")
}) })
ginkgo.AfterEach(func() { ginkgo.AfterEach(func() {
@ -289,7 +289,7 @@ var _ = utils.SIGDescribe("PersistentVolumes", func() {
ginkgo.By("Deleting the claim") ginkgo.By("Deleting the claim")
framework.ExpectNoError(e2epod.DeletePodWithWait(c, pod)) framework.ExpectNoError(e2epod.DeletePodWithWait(c, pod))
framework.ExpectNoError(e2epv.DeletePVCandValidatePV(c, ns, pvc, pv, v1.VolumeAvailable)) framework.ExpectNoError(e2epv.DeletePVCandValidatePV(c, f.Timeouts, ns, pvc, pv, v1.VolumeAvailable))
ginkgo.By("Re-mounting the volume.") ginkgo.By("Re-mounting the volume.")
pvc = e2epv.MakePersistentVolumeClaim(pvcConfig, ns) pvc = e2epv.MakePersistentVolumeClaim(pvcConfig, ns)
@ -310,7 +310,7 @@ var _ = utils.SIGDescribe("PersistentVolumes", func() {
// Delete the PVC and wait for the recycler to finish before the NFS server gets shutdown during cleanup. // Delete the PVC and wait for the recycler to finish before the NFS server gets shutdown during cleanup.
framework.Logf("Removing second PVC, waiting for the recycler to finish before cleanup.") framework.Logf("Removing second PVC, waiting for the recycler to finish before cleanup.")
framework.ExpectNoError(e2epv.DeletePVCandValidatePV(c, ns, pvc, pv, v1.VolumeAvailable)) framework.ExpectNoError(e2epv.DeletePVCandValidatePV(c, f.Timeouts, ns, pvc, pv, v1.VolumeAvailable))
pvc = nil pvc = nil
}) })
}) })

View File

@ -100,7 +100,7 @@ var _ = utils.SIGDescribe("PV Protection", func() {
ginkgo.By("Deleting the PV") ginkgo.By("Deleting the PV")
err = client.CoreV1().PersistentVolumes().Delete(context.TODO(), pv.Name, *metav1.NewDeleteOptions(0)) err = client.CoreV1().PersistentVolumes().Delete(context.TODO(), pv.Name, *metav1.NewDeleteOptions(0))
framework.ExpectNoError(err, "Error deleting PV") framework.ExpectNoError(err, "Error deleting PV")
err = e2epv.WaitForPersistentVolumeDeleted(client, pv.Name, framework.Poll, e2epv.PVDeletingTimeout) err = e2epv.WaitForPersistentVolumeDeleted(client, pv.Name, framework.Poll, f.Timeouts.PVDelete)
framework.ExpectNoError(err, "waiting for PV to be deleted") framework.ExpectNoError(err, "waiting for PV to be deleted")
}) })
@ -111,7 +111,7 @@ var _ = utils.SIGDescribe("PV Protection", func() {
framework.ExpectNoError(err, "Error creating PVC") framework.ExpectNoError(err, "Error creating PVC")
ginkgo.By("Waiting for PVC to become Bound") ginkgo.By("Waiting for PVC to become Bound")
err = e2epv.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, client, nameSpace, pvc.Name, framework.Poll, e2epv.ClaimBindingTimeout) err = e2epv.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, client, nameSpace, pvc.Name, framework.Poll, f.Timeouts.ClaimBound)
framework.ExpectNoError(err, "Failed waiting for PVC to be bound %v", err) framework.ExpectNoError(err, "Failed waiting for PVC to be bound %v", err)
ginkgo.By("Deleting the PV, however, the PV must not be removed from the system as it's bound to a PVC") ginkgo.By("Deleting the PV, however, the PV must not be removed from the system as it's bound to a PVC")
@ -128,7 +128,7 @@ var _ = utils.SIGDescribe("PV Protection", func() {
framework.ExpectNoError(err, "Error deleting PVC") framework.ExpectNoError(err, "Error deleting PVC")
ginkgo.By("Checking that the PV is automatically removed from the system because it's no longer bound to a PVC") ginkgo.By("Checking that the PV is automatically removed from the system because it's no longer bound to a PVC")
err = e2epv.WaitForPersistentVolumeDeleted(client, pv.Name, framework.Poll, e2epv.PVDeletingTimeout) err = e2epv.WaitForPersistentVolumeDeleted(client, pv.Name, framework.Poll, f.Timeouts.PVDelete)
framework.ExpectNoError(err, "waiting for PV to be deleted") framework.ExpectNoError(err, "waiting for PV to be deleted")
}) })
}) })

View File

@ -18,12 +18,13 @@ package storage
import ( import (
"context" "context"
"github.com/onsi/ginkgo" "github.com/onsi/ginkgo"
"fmt" "fmt"
"time" "time"
"k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors" apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
@ -77,6 +78,7 @@ var _ = utils.SIGDescribe("PVC Protection", func() {
prefix := "pvc-protection" prefix := "pvc-protection"
e2epv.SkipIfNoDefaultStorageClass(client) e2epv.SkipIfNoDefaultStorageClass(client)
t := testsuites.StorageClassTest{ t := testsuites.StorageClassTest{
Timeouts: f.Timeouts,
ClaimSize: "1Gi", ClaimSize: "1Gi",
} }
pvc = e2epv.MakePersistentVolumeClaim(e2epv.PersistentVolumeClaimConfig{ pvc = e2epv.MakePersistentVolumeClaim(e2epv.PersistentVolumeClaimConfig{
@ -94,7 +96,7 @@ var _ = utils.SIGDescribe("PVC Protection", func() {
framework.ExpectNoError(err, "While creating pod that uses the PVC or waiting for the Pod to become Running") framework.ExpectNoError(err, "While creating pod that uses the PVC or waiting for the Pod to become Running")
ginkgo.By("Waiting for PVC to become Bound") ginkgo.By("Waiting for PVC to become Bound")
err = e2epv.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, client, nameSpace, pvc.Name, framework.Poll, e2epv.ClaimBindingTimeout) err = e2epv.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, client, nameSpace, pvc.Name, framework.Poll, f.Timeouts.ClaimBound)
framework.ExpectNoError(err, "Failed waiting for PVC to be bound %v", err) framework.ExpectNoError(err, "Failed waiting for PVC to be bound %v", err)
ginkgo.By("Checking that PVC Protection finalizer is set") ginkgo.By("Checking that PVC Protection finalizer is set")

View File

@ -18,6 +18,7 @@ package storage
import ( import (
"context" "context"
"github.com/onsi/ginkgo" "github.com/onsi/ginkgo"
"github.com/onsi/gomega" "github.com/onsi/gomega"
@ -107,6 +108,7 @@ func testVolumeProvisioning(c clientset.Interface, ns string) {
Name: "HDD Regional PD on GCE/GKE", Name: "HDD Regional PD on GCE/GKE",
CloudProviders: []string{"gce", "gke"}, CloudProviders: []string{"gce", "gke"},
Provisioner: "kubernetes.io/gce-pd", Provisioner: "kubernetes.io/gce-pd",
Timeouts: framework.NewTimeoutContextWithDefaults(),
Parameters: map[string]string{ Parameters: map[string]string{
"type": "pd-standard", "type": "pd-standard",
"zones": strings.Join(cloudZones, ","), "zones": strings.Join(cloudZones, ","),
@ -129,6 +131,7 @@ func testVolumeProvisioning(c clientset.Interface, ns string) {
Name: "HDD Regional PD with auto zone selection on GCE/GKE", Name: "HDD Regional PD with auto zone selection on GCE/GKE",
CloudProviders: []string{"gce", "gke"}, CloudProviders: []string{"gce", "gke"},
Provisioner: "kubernetes.io/gce-pd", Provisioner: "kubernetes.io/gce-pd",
Timeouts: framework.NewTimeoutContextWithDefaults(),
Parameters: map[string]string{ Parameters: map[string]string{
"type": "pd-standard", "type": "pd-standard",
"replication-type": "regional-pd", "replication-type": "regional-pd",
@ -166,6 +169,7 @@ func testZonalFailover(c clientset.Interface, ns string) {
testSpec := testsuites.StorageClassTest{ testSpec := testsuites.StorageClassTest{
Name: "Regional PD Failover on GCE/GKE", Name: "Regional PD Failover on GCE/GKE",
CloudProviders: []string{"gce", "gke"}, CloudProviders: []string{"gce", "gke"},
Timeouts: framework.NewTimeoutContextWithDefaults(),
Provisioner: "kubernetes.io/gce-pd", Provisioner: "kubernetes.io/gce-pd",
Parameters: map[string]string{ Parameters: map[string]string{
"type": "pd-standard", "type": "pd-standard",
@ -326,6 +330,7 @@ func testRegionalDelayedBinding(c clientset.Interface, ns string, pvcCount int)
Client: c, Client: c,
Name: "Regional PD storage class with waitForFirstConsumer test on GCE", Name: "Regional PD storage class with waitForFirstConsumer test on GCE",
Provisioner: "kubernetes.io/gce-pd", Provisioner: "kubernetes.io/gce-pd",
Timeouts: framework.NewTimeoutContextWithDefaults(),
Parameters: map[string]string{ Parameters: map[string]string{
"type": "pd-standard", "type": "pd-standard",
"replication-type": "regional-pd", "replication-type": "regional-pd",
@ -362,6 +367,7 @@ func testRegionalAllowedTopologies(c clientset.Interface, ns string) {
test := testsuites.StorageClassTest{ test := testsuites.StorageClassTest{
Name: "Regional PD storage class with allowedTopologies test on GCE", Name: "Regional PD storage class with allowedTopologies test on GCE",
Provisioner: "kubernetes.io/gce-pd", Provisioner: "kubernetes.io/gce-pd",
Timeouts: framework.NewTimeoutContextWithDefaults(),
Parameters: map[string]string{ Parameters: map[string]string{
"type": "pd-standard", "type": "pd-standard",
"replication-type": "regional-pd", "replication-type": "regional-pd",
@ -389,6 +395,7 @@ func testRegionalAllowedTopologies(c clientset.Interface, ns string) {
func testRegionalAllowedTopologiesWithDelayedBinding(c clientset.Interface, ns string, pvcCount int) { func testRegionalAllowedTopologiesWithDelayedBinding(c clientset.Interface, ns string, pvcCount int) {
test := testsuites.StorageClassTest{ test := testsuites.StorageClassTest{
Client: c, Client: c,
Timeouts: framework.NewTimeoutContextWithDefaults(),
Name: "Regional PD storage class with allowedTopologies and waitForFirstConsumer test on GCE", Name: "Regional PD storage class with allowedTopologies and waitForFirstConsumer test on GCE",
Provisioner: "kubernetes.io/gce-pd", Provisioner: "kubernetes.io/gce-pd",
Parameters: map[string]string{ Parameters: map[string]string{

View File

@ -420,7 +420,7 @@ func createPVCPV(
pv, pvc, err := e2epv.CreatePVCPV(f.ClientSet, pvConfig, pvcConfig, f.Namespace.Name, false) pv, pvc, err := e2epv.CreatePVCPV(f.ClientSet, pvConfig, pvcConfig, f.Namespace.Name, false)
framework.ExpectNoError(err, "PVC, PV creation failed") framework.ExpectNoError(err, "PVC, PV creation failed")
err = e2epv.WaitOnPVandPVC(f.ClientSet, f.Namespace.Name, pv, pvc) err = e2epv.WaitOnPVandPVC(f.ClientSet, f.Timeouts, f.Namespace.Name, pv, pvc)
framework.ExpectNoError(err, "PVC, PV failed to bind") framework.ExpectNoError(err, "PVC, PV failed to bind")
return pv, pvc return pv, pvc
@ -453,7 +453,7 @@ func createPVCPVFromDynamicProvisionSC(
framework.ExpectNoError(err) framework.ExpectNoError(err)
if !isDelayedBinding(sc) { if !isDelayedBinding(sc) {
err = e2epv.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, cs, pvc.Namespace, pvc.Name, framework.Poll, framework.ClaimProvisionTimeout) err = e2epv.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, cs, pvc.Namespace, pvc.Name, framework.Poll, f.Timeouts.ClaimProvision)
framework.ExpectNoError(err) framework.ExpectNoError(err)
} }

View File

@ -82,7 +82,7 @@ func (s *disruptiveTestSuite) DefineTests(driver TestDriver, pattern testpattern
// registers its own BeforeEach which creates the namespace. Beware that it // registers its own BeforeEach which creates the namespace. Beware that it
// also registers an AfterEach which renders f unusable. Any code using // also registers an AfterEach which renders f unusable. Any code using
// f must run inside an It or Context callback. // f must run inside an It or Context callback.
f := framework.NewDefaultFramework("disruptive") f := framework.NewFrameworkWithCustomTimeouts("disruptive", getDriverTimeouts(driver))
init := func() { init := func() {
l = local{} l = local{}
@ -169,7 +169,7 @@ func (s *disruptiveTestSuite) DefineTests(driver TestDriver, pattern testpattern
NodeSelection: l.config.ClientNodeSelection, NodeSelection: l.config.ClientNodeSelection,
ImageID: e2evolume.GetDefaultTestImageID(), ImageID: e2evolume.GetDefaultTestImageID(),
} }
l.pod, err = e2epod.CreateSecPodWithNodeSelection(l.cs, &podConfig, framework.PodStartTimeout) l.pod, err = e2epod.CreateSecPodWithNodeSelection(l.cs, &podConfig, f.Timeouts.PodStart)
framework.ExpectNoError(err, "While creating pods for kubelet restart test") framework.ExpectNoError(err, "While creating pods for kubelet restart test")
if pattern.VolMode == v1.PersistentVolumeBlock && t.runTestBlock != nil { if pattern.VolMode == v1.PersistentVolumeBlock && t.runTestBlock != nil {

View File

@ -106,11 +106,11 @@ func (p *ephemeralTestSuite) DefineTests(driver TestDriver, pattern testpatterns
// registers its own BeforeEach which creates the namespace. Beware that it // registers its own BeforeEach which creates the namespace. Beware that it
// also registers an AfterEach which renders f unusable. Any code using // also registers an AfterEach which renders f unusable. Any code using
// f must run inside an It or Context callback. // f must run inside an It or Context callback.
f := framework.NewDefaultFramework("ephemeral") f := framework.NewFrameworkWithCustomTimeouts("ephemeral", getDriverTimeouts(driver))
init := func() { init := func() {
if pattern.VolType == testpatterns.GenericEphemeralVolume { if pattern.VolType == testpatterns.GenericEphemeralVolume {
enabled, err := GenericEphemeralVolumesEnabled(f.ClientSet, f.Namespace.Name) enabled, err := GenericEphemeralVolumesEnabled(f.ClientSet, f.Timeouts, f.Namespace.Name)
framework.ExpectNoError(err, "check GenericEphemeralVolume feature") framework.ExpectNoError(err, "check GenericEphemeralVolume feature")
if !enabled { if !enabled {
e2eskipper.Skipf("Cluster doesn't support %q volumes -- skipping", pattern.VolType) e2eskipper.Skipf("Cluster doesn't support %q volumes -- skipping", pattern.VolType)
@ -127,6 +127,7 @@ func (p *ephemeralTestSuite) DefineTests(driver TestDriver, pattern testpatterns
case testpatterns.CSIInlineVolume: case testpatterns.CSIInlineVolume:
l.testCase = &EphemeralTest{ l.testCase = &EphemeralTest{
Client: l.config.Framework.ClientSet, Client: l.config.Framework.ClientSet,
Timeouts: f.Timeouts,
Namespace: f.Namespace.Name, Namespace: f.Namespace.Name,
DriverName: eDriver.GetCSIDriverName(l.config), DriverName: eDriver.GetCSIDriverName(l.config),
Node: l.config.ClientNodeSelection, Node: l.config.ClientNodeSelection,
@ -137,6 +138,7 @@ func (p *ephemeralTestSuite) DefineTests(driver TestDriver, pattern testpatterns
case testpatterns.GenericEphemeralVolume: case testpatterns.GenericEphemeralVolume:
l.testCase = &EphemeralTest{ l.testCase = &EphemeralTest{
Client: l.config.Framework.ClientSet, Client: l.config.Framework.ClientSet,
Timeouts: f.Timeouts,
Namespace: f.Namespace.Name, Namespace: f.Namespace.Name,
Node: l.config.ClientNodeSelection, Node: l.config.ClientNodeSelection,
VolSource: l.resource.VolSource, VolSource: l.resource.VolSource,
@ -194,7 +196,7 @@ func (p *ephemeralTestSuite) DefineTests(driver TestDriver, pattern testpatterns
[]v1.VolumeSource{pod.Spec.Volumes[0].VolumeSource}, []v1.VolumeSource{pod.Spec.Volumes[0].VolumeSource},
readOnly, readOnly,
l.testCase.Node) l.testCase.Node)
framework.ExpectNoError(e2epod.WaitForPodRunningInNamespaceSlow(f.ClientSet, pod2.Name, pod2.Namespace), "waiting for second pod with inline volume") framework.ExpectNoError(e2epod.WaitTimeoutForPodRunningInNamespace(f.ClientSet, pod2.Name, pod2.Namespace, f.Timeouts.PodStartSlow), "waiting for second pod with inline volume")
// If (and only if) we were able to mount // If (and only if) we were able to mount
// read/write and volume data is not shared // read/write and volume data is not shared
@ -207,7 +209,7 @@ func (p *ephemeralTestSuite) DefineTests(driver TestDriver, pattern testpatterns
storageutils.VerifyExecInPodSucceed(f, pod2, "[ ! -f /mnt/test-0/hello-world ]") storageutils.VerifyExecInPodSucceed(f, pod2, "[ ! -f /mnt/test-0/hello-world ]")
} }
defer StopPodAndDependents(f.ClientSet, pod2) defer StopPodAndDependents(f.ClientSet, f.Timeouts, pod2)
return nil return nil
} }
@ -232,6 +234,7 @@ func (p *ephemeralTestSuite) DefineTests(driver TestDriver, pattern testpatterns
// Not all parameters are used by all tests. // Not all parameters are used by all tests.
type EphemeralTest struct { type EphemeralTest struct {
Client clientset.Interface Client clientset.Interface
Timeouts *framework.TimeoutContext
Namespace string Namespace string
DriverName string DriverName string
VolSource *v1.VolumeSource VolSource *v1.VolumeSource
@ -307,9 +310,9 @@ func (t EphemeralTest) TestEphemeral() {
pod := StartInPodWithInlineVolume(client, t.Namespace, "inline-volume-tester", command, volumes, t.ReadOnly, t.Node) pod := StartInPodWithInlineVolume(client, t.Namespace, "inline-volume-tester", command, volumes, t.ReadOnly, t.Node)
defer func() { defer func() {
// pod might be nil now. // pod might be nil now.
StopPodAndDependents(client, pod) StopPodAndDependents(client, t.Timeouts, pod)
}() }()
framework.ExpectNoError(e2epod.WaitForPodRunningInNamespaceSlow(client, pod.Name, pod.Namespace), "waiting for pod with inline volume") framework.ExpectNoError(e2epod.WaitTimeoutForPodRunningInNamespace(client, pod.Name, pod.Namespace, t.Timeouts.PodStartSlow), "waiting for pod with inline volume")
runningPod, err := client.CoreV1().Pods(pod.Namespace).Get(context.TODO(), pod.Name, metav1.GetOptions{}) runningPod, err := client.CoreV1().Pods(pod.Namespace).Get(context.TODO(), pod.Name, metav1.GetOptions{})
framework.ExpectNoError(err, "get pod") framework.ExpectNoError(err, "get pod")
actualNodeName := runningPod.Spec.NodeName actualNodeName := runningPod.Spec.NodeName
@ -320,7 +323,7 @@ func (t EphemeralTest) TestEphemeral() {
runningPodData = t.RunningPodCheck(pod) runningPodData = t.RunningPodCheck(pod)
} }
StopPodAndDependents(client, pod) StopPodAndDependents(client, t.Timeouts, pod)
pod = nil // Don't stop twice. pod = nil // Don't stop twice.
// There should be no dangling PVCs in the namespace now. There might be for // There should be no dangling PVCs in the namespace now. There might be for
@ -383,8 +386,8 @@ func StartInPodWithInlineVolume(c clientset.Interface, ns, podName, command stri
// CSIInlineVolumesEnabled checks whether the running cluster has the CSIInlineVolumes feature gate enabled. // CSIInlineVolumesEnabled checks whether the running cluster has the CSIInlineVolumes feature gate enabled.
// It does that by trying to create a pod that uses that feature. // It does that by trying to create a pod that uses that feature.
func CSIInlineVolumesEnabled(c clientset.Interface, ns string) (bool, error) { func CSIInlineVolumesEnabled(c clientset.Interface, t *framework.TimeoutContext, ns string) (bool, error) {
return VolumeSourceEnabled(c, ns, v1.VolumeSource{ return VolumeSourceEnabled(c, t, ns, v1.VolumeSource{
CSI: &v1.CSIVolumeSource{ CSI: &v1.CSIVolumeSource{
Driver: "no-such-driver.example.com", Driver: "no-such-driver.example.com",
}, },
@ -393,9 +396,9 @@ func CSIInlineVolumesEnabled(c clientset.Interface, ns string) (bool, error) {
// GenericEphemeralVolumesEnabled checks whether the running cluster has the GenericEphemeralVolume feature gate enabled. // GenericEphemeralVolumesEnabled checks whether the running cluster has the GenericEphemeralVolume feature gate enabled.
// It does that by trying to create a pod that uses that feature. // It does that by trying to create a pod that uses that feature.
func GenericEphemeralVolumesEnabled(c clientset.Interface, ns string) (bool, error) { func GenericEphemeralVolumesEnabled(c clientset.Interface, t *framework.TimeoutContext, ns string) (bool, error) {
storageClassName := "no-such-storage-class" storageClassName := "no-such-storage-class"
return VolumeSourceEnabled(c, ns, v1.VolumeSource{ return VolumeSourceEnabled(c, t, ns, v1.VolumeSource{
Ephemeral: &v1.EphemeralVolumeSource{ Ephemeral: &v1.EphemeralVolumeSource{
VolumeClaimTemplate: &v1.PersistentVolumeClaimTemplate{ VolumeClaimTemplate: &v1.PersistentVolumeClaimTemplate{
Spec: v1.PersistentVolumeClaimSpec{ Spec: v1.PersistentVolumeClaimSpec{
@ -414,7 +417,7 @@ func GenericEphemeralVolumesEnabled(c clientset.Interface, ns string) (bool, err
// VolumeSourceEnabled checks whether a certain kind of volume source is enabled by trying // VolumeSourceEnabled checks whether a certain kind of volume source is enabled by trying
// to create a pod that uses it. // to create a pod that uses it.
func VolumeSourceEnabled(c clientset.Interface, ns string, volume v1.VolumeSource) (bool, error) { func VolumeSourceEnabled(c clientset.Interface, t *framework.TimeoutContext, ns string, volume v1.VolumeSource) (bool, error) {
pod := &v1.Pod{ pod := &v1.Pod{
TypeMeta: metav1.TypeMeta{ TypeMeta: metav1.TypeMeta{
Kind: "Pod", Kind: "Pod",
@ -451,7 +454,7 @@ func VolumeSourceEnabled(c clientset.Interface, ns string, volume v1.VolumeSourc
switch { switch {
case err == nil: case err == nil:
// Pod was created, feature supported. // Pod was created, feature supported.
StopPodAndDependents(c, pod) StopPodAndDependents(c, t, pod)
return true, nil return true, nil
case apierrors.IsInvalid(err): case apierrors.IsInvalid(err):
// "Invalid" because it uses a feature that isn't supported. // "Invalid" because it uses a feature that isn't supported.

View File

@ -102,7 +102,7 @@ func (s *fsGroupChangePolicyTestSuite) DefineTests(driver TestDriver, pattern te
// registers its own BeforeEach which creates the namespace. Beware that it // registers its own BeforeEach which creates the namespace. Beware that it
// also registers an AfterEach which renders f unusable. Any code using // also registers an AfterEach which renders f unusable. Any code using
// f must run inside an It or Context callback. // f must run inside an It or Context callback.
f := framework.NewDefaultFramework("fsgroupchangepolicy") f := framework.NewFrameworkWithCustomTimeouts("fsgroupchangepolicy", getDriverTimeouts(driver))
init := func() { init := func() {
e2eskipper.SkipIfNodeOSDistroIs("windows") e2eskipper.SkipIfNodeOSDistroIs("windows")
@ -244,7 +244,7 @@ func (s *fsGroupChangePolicyTestSuite) DefineTests(driver TestDriver, pattern te
func createPodAndVerifyContentGid(f *framework.Framework, podConfig *e2epod.Config, createInitialFiles bool, expectedRootDirFileOwnership, expectedSubDirFileOwnership string) *v1.Pod { func createPodAndVerifyContentGid(f *framework.Framework, podConfig *e2epod.Config, createInitialFiles bool, expectedRootDirFileOwnership, expectedSubDirFileOwnership string) *v1.Pod {
podFsGroup := strconv.FormatInt(*podConfig.FsGroup, 10) podFsGroup := strconv.FormatInt(*podConfig.FsGroup, 10)
ginkgo.By(fmt.Sprintf("Creating Pod in namespace %s with fsgroup %s", podConfig.NS, podFsGroup)) ginkgo.By(fmt.Sprintf("Creating Pod in namespace %s with fsgroup %s", podConfig.NS, podFsGroup))
pod, err := e2epod.CreateSecPodWithNodeSelection(f.ClientSet, podConfig, framework.PodStartTimeout) pod, err := e2epod.CreateSecPodWithNodeSelection(f.ClientSet, podConfig, f.Timeouts.PodStart)
framework.ExpectNoError(err) framework.ExpectNoError(err)
framework.Logf("Pod %s/%s started successfully", pod.Namespace, pod.Name) framework.Logf("Pod %s/%s started successfully", pod.Namespace, pod.Name)

View File

@ -97,7 +97,7 @@ func (t *multiVolumeTestSuite) DefineTests(driver TestDriver, pattern testpatter
// registers its own BeforeEach which creates the namespace. Beware that it // registers its own BeforeEach which creates the namespace. Beware that it
// also registers an AfterEach which renders f unusable. Any code using // also registers an AfterEach which renders f unusable. Any code using
// f must run inside an It or Context callback. // f must run inside an It or Context callback.
f := framework.NewDefaultFramework("multivolume") f := framework.NewFrameworkWithCustomTimeouts("multivolume", getDriverTimeouts(driver))
init := func() { init := func() {
l = local{} l = local{}
@ -346,7 +346,7 @@ func (t *multiVolumeTestSuite) DefineTests(driver TestDriver, pattern testpatter
l.resources = append(l.resources, resource) l.resources = append(l.resources, resource)
// Initialize the volume with a filesystem - it's going to be mounted as read-only below. // Initialize the volume with a filesystem - it's going to be mounted as read-only below.
initializeVolume(l.cs, l.ns.Name, resource.Pvc, l.config.ClientNodeSelection) initializeVolume(l.cs, f.Timeouts, l.ns.Name, resource.Pvc, l.config.ClientNodeSelection)
// Test access to the volume from pods on a single node // Test access to the volume from pods on a single node
TestConcurrentAccessToSingleVolume(l.config.Framework, l.cs, l.ns.Name, TestConcurrentAccessToSingleVolume(l.config.Framework, l.cs, l.ns.Name,
@ -408,7 +408,7 @@ func testAccessMultipleVolumes(f *framework.Framework, cs clientset.Interface, n
NodeSelection: node, NodeSelection: node,
ImageID: e2evolume.GetDefaultTestImageID(), ImageID: e2evolume.GetDefaultTestImageID(),
} }
pod, err := e2epod.CreateSecPodWithNodeSelection(cs, &podConfig, framework.PodStartTimeout) pod, err := e2epod.CreateSecPodWithNodeSelection(cs, &podConfig, f.Timeouts.PodStart)
defer func() { defer func() {
framework.ExpectNoError(e2epod.DeletePodWithWait(cs, pod)) framework.ExpectNoError(e2epod.DeletePodWithWait(cs, pod))
}() }()
@ -488,7 +488,7 @@ func TestConcurrentAccessToSingleVolume(f *framework.Framework, cs clientset.Int
PVCsReadOnly: readOnly, PVCsReadOnly: readOnly,
ImageID: e2evolume.GetTestImageID(imageutils.DebianIptables), ImageID: e2evolume.GetTestImageID(imageutils.DebianIptables),
} }
pod, err := e2epod.CreateSecPodWithNodeSelection(cs, &podConfig, framework.PodStartTimeout) pod, err := e2epod.CreateSecPodWithNodeSelection(cs, &podConfig, f.Timeouts.PodStart)
defer func() { defer func() {
framework.ExpectNoError(e2epod.DeletePodWithWait(cs, pod)) framework.ExpectNoError(e2epod.DeletePodWithWait(cs, pod))
}() }()
@ -637,7 +637,7 @@ func ensureTopologyRequirements(nodeSelection *e2epod.NodeSelection, nodes *v1.N
} }
// initializeVolume creates a filesystem on given volume, so it can be used as read-only later // initializeVolume creates a filesystem on given volume, so it can be used as read-only later
func initializeVolume(cs clientset.Interface, ns string, pvc *v1.PersistentVolumeClaim, node e2epod.NodeSelection) { func initializeVolume(cs clientset.Interface, t *framework.TimeoutContext, ns string, pvc *v1.PersistentVolumeClaim, node e2epod.NodeSelection) {
if pvc.Spec.VolumeMode != nil && *pvc.Spec.VolumeMode == v1.PersistentVolumeBlock { if pvc.Spec.VolumeMode != nil && *pvc.Spec.VolumeMode == v1.PersistentVolumeBlock {
// Block volumes do not need to be initialized. // Block volumes do not need to be initialized.
return return
@ -653,7 +653,7 @@ func initializeVolume(cs clientset.Interface, ns string, pvc *v1.PersistentVolum
NodeSelection: node, NodeSelection: node,
ImageID: e2evolume.GetDefaultTestImageID(), ImageID: e2evolume.GetDefaultTestImageID(),
} }
pod, err := e2epod.CreateSecPod(cs, &podConfig, framework.PodStartTimeout) pod, err := e2epod.CreateSecPod(cs, &podConfig, t.PodStart)
defer func() { defer func() {
framework.ExpectNoError(e2epod.DeletePodWithWait(cs, pod)) framework.ExpectNoError(e2epod.DeletePodWithWait(cs, pod))
}() }()

View File

@ -45,6 +45,7 @@ import (
// Not all parameters are used by all tests. // Not all parameters are used by all tests.
type StorageClassTest struct { type StorageClassTest struct {
Client clientset.Interface Client clientset.Interface
Timeouts *framework.TimeoutContext
Claim *v1.PersistentVolumeClaim Claim *v1.PersistentVolumeClaim
SourceClaim *v1.PersistentVolumeClaim SourceClaim *v1.PersistentVolumeClaim
Class *storagev1.StorageClass Class *storagev1.StorageClass
@ -129,7 +130,7 @@ func (p *provisioningTestSuite) DefineTests(driver TestDriver, pattern testpatte
// registers its own BeforeEach which creates the namespace. Beware that it // registers its own BeforeEach which creates the namespace. Beware that it
// also registers an AfterEach which renders f unusable. Any code using // also registers an AfterEach which renders f unusable. Any code using
// f must run inside an It or Context callback. // f must run inside an It or Context callback.
f := framework.NewDefaultFramework("provisioning") f := framework.NewFrameworkWithCustomTimeouts("provisioning", getDriverTimeouts(driver))
init := func() { init := func() {
l = local{} l = local{}
@ -160,6 +161,7 @@ func (p *provisioningTestSuite) DefineTests(driver TestDriver, pattern testpatte
framework.Logf("In creating storage class object and pvc objects for driver - sc: %v, pvc: %v, src-pvc: %v", l.sc, l.pvc, l.sourcePVC) framework.Logf("In creating storage class object and pvc objects for driver - sc: %v, pvc: %v, src-pvc: %v", l.sc, l.pvc, l.sourcePVC)
l.testCase = &StorageClassTest{ l.testCase = &StorageClassTest{
Client: l.config.Framework.ClientSet, Client: l.config.Framework.ClientSet,
Timeouts: f.Timeouts,
Claim: l.pvc, Claim: l.pvc,
SourceClaim: l.sourcePVC, SourceClaim: l.sourcePVC,
Class: l.sc, Class: l.sc,
@ -406,7 +408,7 @@ func getBoundPV(client clientset.Interface, pvc *v1.PersistentVolumeClaim) (*v1.
// checkProvisioning verifies that the claim is bound and has the correct properities // checkProvisioning verifies that the claim is bound and has the correct properities
func (t StorageClassTest) checkProvisioning(client clientset.Interface, claim *v1.PersistentVolumeClaim, class *storagev1.StorageClass) *v1.PersistentVolume { func (t StorageClassTest) checkProvisioning(client clientset.Interface, claim *v1.PersistentVolumeClaim, class *storagev1.StorageClass) *v1.PersistentVolume {
err := e2epv.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, client, claim.Namespace, claim.Name, framework.Poll, framework.ClaimProvisionTimeout) err := e2epv.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, client, claim.Namespace, claim.Name, framework.Poll, t.Timeouts.ClaimProvision)
framework.ExpectNoError(err) framework.ExpectNoError(err)
ginkgo.By("checking the claim") ginkgo.By("checking the claim")
@ -595,7 +597,7 @@ func (t StorageClassTest) TestBindingWaitForFirstConsumerMultiPVC(claims []*v1.P
// Wait for ClaimProvisionTimeout (across all PVCs in parallel) and make sure the phase did not become Bound i.e. the Wait errors out // Wait for ClaimProvisionTimeout (across all PVCs in parallel) and make sure the phase did not become Bound i.e. the Wait errors out
ginkgo.By("checking the claims are in pending state") ginkgo.By("checking the claims are in pending state")
err = e2epv.WaitForPersistentVolumeClaimsPhase(v1.ClaimBound, t.Client, namespace, claimNames, 2*time.Second /* Poll */, framework.ClaimProvisionShortTimeout, true) err = e2epv.WaitForPersistentVolumeClaimsPhase(v1.ClaimBound, t.Client, namespace, claimNames, 2*time.Second /* Poll */, t.Timeouts.ClaimProvisionShort, true)
framework.ExpectError(err) framework.ExpectError(err)
verifyPVCsPending(t.Client, createdClaims) verifyPVCsPending(t.Client, createdClaims)
@ -610,7 +612,7 @@ func (t StorageClassTest) TestBindingWaitForFirstConsumerMultiPVC(claims []*v1.P
framework.ExpectNoError(err) framework.ExpectNoError(err)
defer func() { defer func() {
e2epod.DeletePodOrFail(t.Client, pod.Namespace, pod.Name) e2epod.DeletePodOrFail(t.Client, pod.Namespace, pod.Name)
e2epod.WaitForPodToDisappear(t.Client, pod.Namespace, pod.Name, labels.Everything(), framework.Poll, framework.PodDeleteTimeout) e2epod.WaitForPodToDisappear(t.Client, pod.Namespace, pod.Name, labels.Everything(), framework.Poll, t.Timeouts.PodDelete)
}() }()
if expectUnschedulable { if expectUnschedulable {
// Verify that no claims are provisioned. // Verify that no claims are provisioned.
@ -629,7 +631,7 @@ func (t StorageClassTest) TestBindingWaitForFirstConsumerMultiPVC(claims []*v1.P
claim, err = t.Client.CoreV1().PersistentVolumeClaims(claim.Namespace).Get(context.TODO(), claim.Name, metav1.GetOptions{}) claim, err = t.Client.CoreV1().PersistentVolumeClaims(claim.Namespace).Get(context.TODO(), claim.Name, metav1.GetOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
// make sure claim did bind // make sure claim did bind
err = e2epv.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, t.Client, claim.Namespace, claim.Name, framework.Poll, framework.ClaimProvisionTimeout) err = e2epv.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, t.Client, claim.Namespace, claim.Name, framework.Poll, t.Timeouts.ClaimProvision)
framework.ExpectNoError(err) framework.ExpectNoError(err)
pv, err := t.Client.CoreV1().PersistentVolumes().Get(context.TODO(), claim.Spec.VolumeName, metav1.GetOptions{}) pv, err := t.Client.CoreV1().PersistentVolumes().Get(context.TODO(), claim.Spec.VolumeName, metav1.GetOptions{})
@ -715,7 +717,7 @@ func StopPod(c clientset.Interface, pod *v1.Pod) {
// StopPodAndDependents first tries to log the output of the pod's container, // StopPodAndDependents first tries to log the output of the pod's container,
// then deletes the pod and waits for that to succeed. Also waits for all owned // then deletes the pod and waits for that to succeed. Also waits for all owned
// resources to be deleted. // resources to be deleted.
func StopPodAndDependents(c clientset.Interface, pod *v1.Pod) { func StopPodAndDependents(c clientset.Interface, timeouts *framework.TimeoutContext, pod *v1.Pod) {
if pod == nil { if pod == nil {
return return
} }
@ -762,14 +764,14 @@ func StopPodAndDependents(c clientset.Interface, pod *v1.Pod) {
} }
framework.Logf("pod Delete API error: %v", err) framework.Logf("pod Delete API error: %v", err)
} }
framework.Logf("Wait up to %v for pod %q to be fully deleted", e2epod.PodDeleteTimeout, pod.Name) framework.Logf("Wait up to %v for pod %q to be fully deleted", timeouts.PodDelete, pod.Name)
e2epod.WaitForPodNotFoundInNamespace(c, pod.Name, pod.Namespace, e2epod.PodDeleteTimeout) e2epod.WaitForPodNotFoundInNamespace(c, pod.Name, pod.Namespace, timeouts.PodDelete)
if len(podPVs) > 0 { if len(podPVs) > 0 {
for _, pv := range podPVs { for _, pv := range podPVs {
// As with CSI inline volumes, we use the pod delete timeout here because conceptually // As with CSI inline volumes, we use the pod delete timeout here because conceptually
// the volume deletion needs to be that fast (whatever "that" is). // the volume deletion needs to be that fast (whatever "that" is).
framework.Logf("Wait up to %v for pod PV %s to be fully deleted", e2epod.PodDeleteTimeout, pv.Name) framework.Logf("Wait up to %v for pod PV %s to be fully deleted", timeouts.PodDelete, pv.Name)
e2epv.WaitForPersistentVolumeDeleted(c, pv.Name, 5*time.Second, e2epod.PodDeleteTimeout) e2epv.WaitForPersistentVolumeDeleted(c, pv.Name, 5*time.Second, timeouts.PodDelete)
} }
} }
} }
@ -818,7 +820,7 @@ func prepareSnapshotDataSourceForProvisioning(
} }
e2evolume.InjectContent(f, config, nil, "", tests) e2evolume.InjectContent(f, config, nil, "", tests)
snapshotResource := CreateSnapshotResource(sDriver, perTestConfig, pattern, updatedClaim.GetName(), updatedClaim.GetNamespace()) snapshotResource := CreateSnapshotResource(sDriver, perTestConfig, pattern, updatedClaim.GetName(), updatedClaim.GetNamespace(), f.Timeouts)
group := "snapshot.storage.k8s.io" group := "snapshot.storage.k8s.io"
dataSourceRef := &v1.TypedLocalObjectReference{ dataSourceRef := &v1.TypedLocalObjectReference{
@ -834,7 +836,7 @@ func prepareSnapshotDataSourceForProvisioning(
framework.Failf("Error deleting initClaim %q. Error: %v", updatedClaim.Name, err) framework.Failf("Error deleting initClaim %q. Error: %v", updatedClaim.Name, err)
} }
err = snapshotResource.CleanupResource() err = snapshotResource.CleanupResource(f.Timeouts)
framework.ExpectNoError(err) framework.ExpectNoError(err)
ginkgo.By("deleting StorageClass " + class.Name) ginkgo.By("deleting StorageClass " + class.Name)

View File

@ -158,7 +158,7 @@ func (s *snapshottableTestSuite) DefineTests(driver TestDriver, pattern testpatt
RunInPodWithVolume(cs, pvc.Namespace, pvc.Name, "pvc-snapshottable-tester", command, config.ClientNodeSelection) RunInPodWithVolume(cs, pvc.Namespace, pvc.Name, "pvc-snapshottable-tester", command, config.ClientNodeSelection)
err = e2epv.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, cs, pvc.Namespace, pvc.Name, framework.Poll, framework.ClaimProvisionTimeout) err = e2epv.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, cs, pvc.Namespace, pvc.Name, framework.Poll, f.Timeouts.ClaimProvision)
framework.ExpectNoError(err) framework.ExpectNoError(err)
ginkgo.By("checking the claim") ginkgo.By("checking the claim")
// Get new copy of the claim // Get new copy of the claim
@ -203,9 +203,9 @@ func (s *snapshottableTestSuite) DefineTests(driver TestDriver, pattern testpatt
ginkgo.BeforeEach(func() { ginkgo.BeforeEach(func() {
var sr *SnapshotResource var sr *SnapshotResource
cleanupSteps = append(cleanupSteps, func() { cleanupSteps = append(cleanupSteps, func() {
framework.ExpectNoError(sr.CleanupResource()) framework.ExpectNoError(sr.CleanupResource(f.Timeouts))
}) })
sr = CreateSnapshotResource(sDriver, config, pattern, pvc.GetName(), pvc.GetNamespace()) sr = CreateSnapshotResource(sDriver, config, pattern, pvc.GetName(), pvc.GetNamespace(), f.Timeouts)
vs = sr.Vs vs = sr.Vs
vscontent = sr.Vscontent vscontent = sr.Vscontent
vsc = sr.Vsclass vsc = sr.Vsclass
@ -275,19 +275,19 @@ func (s *snapshottableTestSuite) DefineTests(driver TestDriver, pattern testpatt
cleanupSteps = append(cleanupSteps, func() { cleanupSteps = append(cleanupSteps, func() {
StopPod(cs, restoredPod) StopPod(cs, restoredPod)
}) })
framework.ExpectNoError(e2epod.WaitForPodRunningInNamespaceSlow(cs, restoredPod.Name, restoredPod.Namespace)) framework.ExpectNoError(e2epod.WaitTimeoutForPodRunningInNamespace(cs, restoredPod.Name, restoredPod.Namespace, f.Timeouts.PodStartSlow))
commands := e2evolume.GenerateReadFileCmd(datapath) commands := e2evolume.GenerateReadFileCmd(datapath)
_, err = framework.LookForStringInPodExec(restoredPod.Namespace, restoredPod.Name, commands, originalMntTestData, time.Minute) _, err = framework.LookForStringInPodExec(restoredPod.Namespace, restoredPod.Name, commands, originalMntTestData, time.Minute)
framework.ExpectNoError(err) framework.ExpectNoError(err)
ginkgo.By("should delete the VolumeSnapshotContent according to its deletion policy") ginkgo.By("should delete the VolumeSnapshotContent according to its deletion policy")
err = DeleteAndWaitSnapshot(dc, vs.GetNamespace(), vs.GetName(), framework.Poll, framework.SnapshotDeleteTimeout) err = DeleteAndWaitSnapshot(dc, vs.GetNamespace(), vs.GetName(), framework.Poll, f.Timeouts.SnapshotDelete)
framework.ExpectNoError(err) framework.ExpectNoError(err)
switch pattern.SnapshotDeletionPolicy { switch pattern.SnapshotDeletionPolicy {
case testpatterns.DeleteSnapshot: case testpatterns.DeleteSnapshot:
ginkgo.By("checking the SnapshotContent has been deleted") ginkgo.By("checking the SnapshotContent has been deleted")
err = utils.WaitForGVRDeletion(dc, SnapshotContentGVR, vscontent.GetName(), framework.Poll, framework.SnapshotDeleteTimeout) err = utils.WaitForGVRDeletion(dc, SnapshotContentGVR, vscontent.GetName(), framework.Poll, f.Timeouts.SnapshotDelete)
framework.ExpectNoError(err) framework.ExpectNoError(err)
case testpatterns.RetainSnapshot: case testpatterns.RetainSnapshot:
ginkgo.By("checking the SnapshotContent has not been deleted") ginkgo.By("checking the SnapshotContent has not been deleted")
@ -358,7 +358,7 @@ type SnapshotResource struct {
// CreateSnapshot creates a VolumeSnapshotClass with given SnapshotDeletionPolicy and a VolumeSnapshot // CreateSnapshot creates a VolumeSnapshotClass with given SnapshotDeletionPolicy and a VolumeSnapshot
// from the VolumeSnapshotClass using a dynamic client. // from the VolumeSnapshotClass using a dynamic client.
// Returns the unstructured VolumeSnapshotClass and VolumeSnapshot objects. // Returns the unstructured VolumeSnapshotClass and VolumeSnapshot objects.
func CreateSnapshot(sDriver SnapshottableTestDriver, config *PerTestConfig, pattern testpatterns.TestPattern, pvcName string, pvcNamespace string) (*unstructured.Unstructured, *unstructured.Unstructured) { func CreateSnapshot(sDriver SnapshottableTestDriver, config *PerTestConfig, pattern testpatterns.TestPattern, pvcName string, pvcNamespace string, timeouts *framework.TimeoutContext) (*unstructured.Unstructured, *unstructured.Unstructured) {
defer ginkgo.GinkgoRecover() defer ginkgo.GinkgoRecover()
var err error var err error
if pattern.SnapshotType != testpatterns.DynamicCreatedSnapshot && pattern.SnapshotType != testpatterns.PreprovisionedCreatedSnapshot { if pattern.SnapshotType != testpatterns.DynamicCreatedSnapshot && pattern.SnapshotType != testpatterns.PreprovisionedCreatedSnapshot {
@ -414,13 +414,13 @@ func GetSnapshotContentFromSnapshot(dc dynamic.Interface, snapshot *unstructured
// CreateSnapshotResource creates a snapshot resource for the current test. It knows how to deal with // CreateSnapshotResource creates a snapshot resource for the current test. It knows how to deal with
// different test pattern snapshot provisioning and deletion policy // different test pattern snapshot provisioning and deletion policy
func CreateSnapshotResource(sDriver SnapshottableTestDriver, config *PerTestConfig, pattern testpatterns.TestPattern, pvcName string, pvcNamespace string) *SnapshotResource { func CreateSnapshotResource(sDriver SnapshottableTestDriver, config *PerTestConfig, pattern testpatterns.TestPattern, pvcName string, pvcNamespace string, timeouts *framework.TimeoutContext) *SnapshotResource {
var err error var err error
r := SnapshotResource{ r := SnapshotResource{
Config: config, Config: config,
Pattern: pattern, Pattern: pattern,
} }
r.Vsclass, r.Vs = CreateSnapshot(sDriver, config, pattern, pvcName, pvcNamespace) r.Vsclass, r.Vs = CreateSnapshot(sDriver, config, pattern, pvcName, pvcNamespace, timeouts)
dc := r.Config.Framework.DynamicClient dc := r.Config.Framework.DynamicClient
@ -456,7 +456,7 @@ func CreateSnapshotResource(sDriver SnapshottableTestDriver, config *PerTestConf
framework.ExpectNoError(err) framework.ExpectNoError(err)
ginkgo.By("checking the Snapshot has been deleted") ginkgo.By("checking the Snapshot has been deleted")
err = utils.WaitForNamespacedGVRDeletion(dc, SnapshotGVR, r.Vs.GetName(), r.Vs.GetNamespace(), framework.Poll, framework.SnapshotDeleteTimeout) err = utils.WaitForNamespacedGVRDeletion(dc, SnapshotGVR, r.Vs.GetName(), r.Vs.GetNamespace(), framework.Poll, timeouts.SnapshotDelete)
framework.ExpectNoError(err) framework.ExpectNoError(err)
err = dc.Resource(SnapshotContentGVR).Delete(context.TODO(), r.Vscontent.GetName(), metav1.DeleteOptions{}) err = dc.Resource(SnapshotContentGVR).Delete(context.TODO(), r.Vscontent.GetName(), metav1.DeleteOptions{})
@ -466,7 +466,7 @@ func CreateSnapshotResource(sDriver SnapshottableTestDriver, config *PerTestConf
framework.ExpectNoError(err) framework.ExpectNoError(err)
ginkgo.By("checking the Snapshot content has been deleted") ginkgo.By("checking the Snapshot content has been deleted")
err = utils.WaitForGVRDeletion(dc, SnapshotContentGVR, r.Vscontent.GetName(), framework.Poll, framework.SnapshotDeleteTimeout) err = utils.WaitForGVRDeletion(dc, SnapshotContentGVR, r.Vscontent.GetName(), framework.Poll, timeouts.SnapshotDelete)
framework.ExpectNoError(err) framework.ExpectNoError(err)
ginkgo.By("creating a snapshot content with the snapshot handle") ginkgo.By("creating a snapshot content with the snapshot handle")
@ -484,7 +484,7 @@ func CreateSnapshotResource(sDriver SnapshottableTestDriver, config *PerTestConf
r.Vs, err = dc.Resource(SnapshotGVR).Namespace(r.Vs.GetNamespace()).Create(context.TODO(), r.Vs, metav1.CreateOptions{}) r.Vs, err = dc.Resource(SnapshotGVR).Namespace(r.Vs.GetNamespace()).Create(context.TODO(), r.Vs, metav1.CreateOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
err = WaitForSnapshotReady(dc, r.Vs.GetNamespace(), r.Vs.GetName(), framework.Poll, framework.SnapshotCreateTimeout) err = WaitForSnapshotReady(dc, r.Vs.GetNamespace(), r.Vs.GetName(), framework.Poll, timeouts.SnapshotCreate)
framework.ExpectNoError(err) framework.ExpectNoError(err)
ginkgo.By("getting the snapshot and snapshot content") ginkgo.By("getting the snapshot and snapshot content")
@ -498,7 +498,7 @@ func CreateSnapshotResource(sDriver SnapshottableTestDriver, config *PerTestConf
} }
// CleanupResource cleans up the snapshot resource and ignores not found errors // CleanupResource cleans up the snapshot resource and ignores not found errors
func (sr *SnapshotResource) CleanupResource() error { func (sr *SnapshotResource) CleanupResource(timeouts *framework.TimeoutContext) error {
var err error var err error
var cleanupErrs []error var cleanupErrs []error
@ -532,7 +532,7 @@ func (sr *SnapshotResource) CleanupResource() error {
} }
framework.ExpectNoError(err) framework.ExpectNoError(err)
err = utils.WaitForGVRDeletion(dc, SnapshotContentGVR, boundVsContent.GetName(), framework.Poll, framework.SnapshotDeleteTimeout) err = utils.WaitForGVRDeletion(dc, SnapshotContentGVR, boundVsContent.GetName(), framework.Poll, timeouts.SnapshotDelete)
framework.ExpectNoError(err) framework.ExpectNoError(err)
case apierrors.IsNotFound(err): case apierrors.IsNotFound(err):
@ -543,7 +543,7 @@ func (sr *SnapshotResource) CleanupResource() error {
} }
framework.ExpectNoError(err) framework.ExpectNoError(err)
err = utils.WaitForNamespacedGVRDeletion(dc, SnapshotGVR, sr.Vs.GetName(), sr.Vs.GetNamespace(), framework.Poll, framework.SnapshotDeleteTimeout) err = utils.WaitForNamespacedGVRDeletion(dc, SnapshotGVR, sr.Vs.GetName(), sr.Vs.GetNamespace(), framework.Poll, timeouts.SnapshotDelete)
framework.ExpectNoError(err) framework.ExpectNoError(err)
default: default:
cleanupErrs = append(cleanupErrs, err) cleanupErrs = append(cleanupErrs, err)
@ -574,7 +574,7 @@ func (sr *SnapshotResource) CleanupResource() error {
} }
framework.ExpectNoError(err) framework.ExpectNoError(err)
err = utils.WaitForGVRDeletion(dc, SnapshotContentGVR, sr.Vscontent.GetName(), framework.Poll, framework.SnapshotDeleteTimeout) err = utils.WaitForGVRDeletion(dc, SnapshotContentGVR, sr.Vscontent.GetName(), framework.Poll, timeouts.SnapshotDelete)
framework.ExpectNoError(err) framework.ExpectNoError(err)
case apierrors.IsNotFound(err): case apierrors.IsNotFound(err):
// Hope the underlying physical snapshot resource has been deleted already // Hope the underlying physical snapshot resource has been deleted already
@ -589,7 +589,7 @@ func (sr *SnapshotResource) CleanupResource() error {
if err != nil && !apierrors.IsNotFound(err) { if err != nil && !apierrors.IsNotFound(err) {
framework.Failf("Error deleting snapshot class %q. Error: %v", sr.Vsclass.GetName(), err) framework.Failf("Error deleting snapshot class %q. Error: %v", sr.Vsclass.GetName(), err)
} }
err = utils.WaitForGVRDeletion(dc, SnapshotClassGVR, sr.Vsclass.GetName(), framework.Poll, framework.SnapshotDeleteTimeout) err = utils.WaitForGVRDeletion(dc, SnapshotClassGVR, sr.Vsclass.GetName(), framework.Poll, timeouts.SnapshotDelete)
framework.ExpectNoError(err) framework.ExpectNoError(err)
} }
return utilerrors.NewAggregate(cleanupErrs) return utilerrors.NewAggregate(cleanupErrs)

View File

@ -202,7 +202,7 @@ func (t *snapshottableStressTestSuite) DefineTests(driver TestDriver, pattern te
defer wg.Done() defer wg.Done()
framework.Logf("Deleting snapshot %s/%s", snapshot.Vs.GetNamespace(), snapshot.Vs.GetName()) framework.Logf("Deleting snapshot %s/%s", snapshot.Vs.GetNamespace(), snapshot.Vs.GetName())
err := snapshot.CleanupResource() err := snapshot.CleanupResource(f.Timeouts)
mu.Lock() mu.Lock()
defer mu.Unlock() defer mu.Unlock()
errs = append(errs, err) errs = append(errs, err)
@ -275,7 +275,7 @@ func (t *snapshottableStressTestSuite) DefineTests(driver TestDriver, pattern te
return return
default: default:
framework.Logf("Pod-%d [%s], Iteration %d/%d", podIndex, pod.Name, snapshotIndex, stressTest.testOptions.NumSnapshots-1) framework.Logf("Pod-%d [%s], Iteration %d/%d", podIndex, pod.Name, snapshotIndex, stressTest.testOptions.NumSnapshots-1)
snapshot := CreateSnapshotResource(snapshottableDriver, stressTest.config, pattern, volume.Pvc.GetName(), volume.Pvc.GetNamespace()) snapshot := CreateSnapshotResource(snapshottableDriver, stressTest.config, pattern, volume.Pvc.GetName(), volume.Pvc.GetNamespace(), f.Timeouts)
stressTest.snapshotsMutex.Lock() stressTest.snapshotsMutex.Lock()
defer stressTest.snapshotsMutex.Unlock() defer stressTest.snapshotsMutex.Unlock()
stressTest.snapshots = append(stressTest.snapshots, snapshot) stressTest.snapshots = append(stressTest.snapshots, snapshot)

View File

@ -112,7 +112,7 @@ func (s *subPathTestSuite) DefineTests(driver TestDriver, pattern testpatterns.T
// registers its own BeforeEach which creates the namespace. Beware that it // registers its own BeforeEach which creates the namespace. Beware that it
// also registers an AfterEach which renders f unusable. Any code using // also registers an AfterEach which renders f unusable. Any code using
// f must run inside an It or Context callback. // f must run inside an It or Context callback.
f := framework.NewDefaultFramework("provisioning") f := framework.NewFrameworkWithCustomTimeouts("provisioning", getDriverTimeouts(driver))
init := func() { init := func() {
l = local{} l = local{}
@ -457,7 +457,7 @@ func (s *subPathTestSuite) DefineTests(driver TestDriver, pattern testpatterns.T
}() }()
// Wait for pod to be running // Wait for pod to be running
err = e2epod.WaitForPodRunningInNamespace(f.ClientSet, l.pod) err = e2epod.WaitTimeoutForPodRunningInNamespace(f.ClientSet, l.pod.Name, l.pod.Namespace, f.Timeouts.PodStart)
framework.ExpectNoError(err, "while waiting for pod to be running") framework.ExpectNoError(err, "while waiting for pod to be running")
// Exec into container that mounted the volume, delete subpath directory // Exec into container that mounted the volume, delete subpath directory
@ -727,7 +727,7 @@ func waitForPodSubpathError(f *framework.Framework, pod *v1.Pod, allowContainerT
return fmt.Errorf("failed to find container that uses subpath") return fmt.Errorf("failed to find container that uses subpath")
} }
waitErr := wait.PollImmediate(framework.Poll, framework.PodStartTimeout, func() (bool, error) { waitErr := wait.PollImmediate(framework.Poll, f.Timeouts.PodStart, func() (bool, error) {
pod, err := f.ClientSet.CoreV1().Pods(pod.Namespace).Get(context.TODO(), pod.Name, metav1.GetOptions{}) pod, err := f.ClientSet.CoreV1().Pods(pod.Namespace).Get(context.TODO(), pod.Name, metav1.GetOptions{})
if err != nil { if err != nil {
return false, err return false, err
@ -805,7 +805,7 @@ func testPodContainerRestartWithHooks(f *framework.Framework, pod *v1.Pod, hooks
defer func() { defer func() {
e2epod.DeletePodWithWait(f.ClientSet, pod) e2epod.DeletePodWithWait(f.ClientSet, pod)
}() }()
err = e2epod.WaitForPodRunningInNamespace(f.ClientSet, pod) err = e2epod.WaitTimeoutForPodRunningInNamespace(f.ClientSet, pod.Name, pod.Namespace, f.Timeouts.PodStart)
framework.ExpectNoError(err, "while waiting for pod to be running") framework.ExpectNoError(err, "while waiting for pod to be running")
ginkgo.By("Failing liveness probe") ginkgo.By("Failing liveness probe")
@ -978,8 +978,7 @@ func testSubpathReconstruction(f *framework.Framework, hostExec utils.HostExec,
removeUnusedContainers(pod) removeUnusedContainers(pod)
pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod, metav1.CreateOptions{}) pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod, metav1.CreateOptions{})
framework.ExpectNoError(err, "while creating pod") framework.ExpectNoError(err, "while creating pod")
err = e2epod.WaitTimeoutForPodRunningInNamespace(f.ClientSet, pod.Name, pod.Namespace, f.Timeouts.PodStart)
err = e2epod.WaitForPodRunningInNamespace(f.ClientSet, pod)
framework.ExpectNoError(err, "while waiting for pod to be running") framework.ExpectNoError(err, "while waiting for pod to be running")
pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(context.TODO(), pod.Name, metav1.GetOptions{}) pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(context.TODO(), pod.Name, metav1.GetOptions{})

View File

@ -103,7 +103,7 @@ func (t *topologyTestSuite) DefineTests(driver TestDriver, pattern testpatterns.
// registers its own BeforeEach which creates the namespace. Beware that it // registers its own BeforeEach which creates the namespace. Beware that it
// also registers an AfterEach which renders f unusable. Any code using // also registers an AfterEach which renders f unusable. Any code using
// f must run inside an It or Context callback. // f must run inside an It or Context callback.
f := framework.NewDefaultFramework("topology") f := framework.NewFrameworkWithCustomTimeouts("topology", getDriverTimeouts(driver))
init := func() topologyTest { init := func() topologyTest {
@ -176,7 +176,7 @@ func (t *topologyTestSuite) DefineTests(driver TestDriver, pattern testpatterns.
t.createResources(cs, &l, nil) t.createResources(cs, &l, nil)
err = e2epod.WaitForPodRunningInNamespace(cs, l.pod) err = e2epod.WaitTimeoutForPodRunningInNamespace(cs, l.pod.Name, l.pod.Namespace, f.Timeouts.PodStart)
framework.ExpectNoError(err) framework.ExpectNoError(err)
ginkgo.By("Verifying pod scheduled to correct node") ginkgo.By("Verifying pod scheduled to correct node")

View File

@ -111,7 +111,7 @@ func (v *volumeExpandTestSuite) DefineTests(driver TestDriver, pattern testpatte
// registers its own BeforeEach which creates the namespace. Beware that it // registers its own BeforeEach which creates the namespace. Beware that it
// also registers an AfterEach which renders f unusable. Any code using // also registers an AfterEach which renders f unusable. Any code using
// f must run inside an It or Context callback. // f must run inside an It or Context callback.
f := framework.NewDefaultFramework("volume-expand") f := framework.NewFrameworkWithCustomTimeouts("volume-expand", getDriverTimeouts(driver))
init := func() { init := func() {
l = local{} l = local{}
@ -179,7 +179,7 @@ func (v *volumeExpandTestSuite) DefineTests(driver TestDriver, pattern testpatte
NodeSelection: l.config.ClientNodeSelection, NodeSelection: l.config.ClientNodeSelection,
ImageID: e2evolume.GetDefaultTestImageID(), ImageID: e2evolume.GetDefaultTestImageID(),
} }
l.pod, err = e2epod.CreateSecPodWithNodeSelection(f.ClientSet, &podConfig, framework.PodStartTimeout) l.pod, err = e2epod.CreateSecPodWithNodeSelection(f.ClientSet, &podConfig, f.Timeouts.PodStart)
defer func() { defer func() {
err = e2epod.DeletePodWithWait(f.ClientSet, l.pod) err = e2epod.DeletePodWithWait(f.ClientSet, l.pod)
framework.ExpectNoError(err, "while cleaning up pod already deleted in resize test") framework.ExpectNoError(err, "while cleaning up pod already deleted in resize test")
@ -251,7 +251,7 @@ func (v *volumeExpandTestSuite) DefineTests(driver TestDriver, pattern testpatte
NodeSelection: l.config.ClientNodeSelection, NodeSelection: l.config.ClientNodeSelection,
ImageID: e2evolume.GetDefaultTestImageID(), ImageID: e2evolume.GetDefaultTestImageID(),
} }
l.pod, err = e2epod.CreateSecPodWithNodeSelection(f.ClientSet, &podConfig, framework.PodStartTimeout) l.pod, err = e2epod.CreateSecPodWithNodeSelection(f.ClientSet, &podConfig, f.Timeouts.PodStart)
defer func() { defer func() {
err = e2epod.DeletePodWithWait(f.ClientSet, l.pod) err = e2epod.DeletePodWithWait(f.ClientSet, l.pod)
framework.ExpectNoError(err, "while cleaning up pod already deleted in resize test") framework.ExpectNoError(err, "while cleaning up pod already deleted in resize test")

View File

@ -108,7 +108,7 @@ func (t *volumeIOTestSuite) DefineTests(driver TestDriver, pattern testpatterns.
// registers its own BeforeEach which creates the namespace. Beware that it // registers its own BeforeEach which creates the namespace. Beware that it
// also registers an AfterEach which renders f unusable. Any code using // also registers an AfterEach which renders f unusable. Any code using
// f must run inside an It or Context callback. // f must run inside an It or Context callback.
f := framework.NewDefaultFramework("volumeio") f := framework.NewFrameworkWithCustomTimeouts("volumeio", getDriverTimeouts(driver))
init := func() { init := func() {
l = local{} l = local{}
@ -338,7 +338,7 @@ func testVolumeIO(f *framework.Framework, cs clientset.Interface, config e2evolu
} }
}() }()
err = e2epod.WaitForPodRunningInNamespace(cs, clientPod) err = e2epod.WaitTimeoutForPodRunningInNamespace(cs, clientPod.Name, clientPod.Namespace, f.Timeouts.PodStart)
if err != nil { if err != nil {
return fmt.Errorf("client pod %q not running: %v", clientPod.Name, err) return fmt.Errorf("client pod %q not running: %v", clientPod.Name, err)
} }

View File

@ -109,7 +109,7 @@ func (t *volumeStressTestSuite) DefineTests(driver TestDriver, pattern testpatte
// registers its own BeforeEach which creates the namespace. Beware that it // registers its own BeforeEach which creates the namespace. Beware that it
// also registers an AfterEach which renders f unusable. Any code using // also registers an AfterEach which renders f unusable. Any code using
// f must run inside an It or Context callback. // f must run inside an It or Context callback.
f := framework.NewDefaultFramework("volume-stress") f := framework.NewFrameworkWithCustomTimeouts("stress", getDriverTimeouts(driver))
init := func() { init := func() {
cs = f.ClientSet cs = f.ClientSet
@ -194,7 +194,7 @@ func (t *volumeStressTestSuite) DefineTests(driver TestDriver, pattern testpatte
framework.Failf("Failed to create pod-%v [%+v]. Error: %v", podIndex, pod, err) framework.Failf("Failed to create pod-%v [%+v]. Error: %v", podIndex, pod, err)
} }
err = e2epod.WaitForPodRunningInNamespace(cs, pod) err = e2epod.WaitTimeoutForPodRunningInNamespace(cs, pod.Name, pod.Namespace, f.Timeouts.PodStart)
if err != nil { if err != nil {
l.cancel() l.cancel()
framework.Failf("Failed to wait for pod-%v [%+v] turn into running status. Error: %v", podIndex, pod, err) framework.Failf("Failed to wait for pod-%v [%+v] turn into running status. Error: %v", podIndex, pod, err)

View File

@ -100,7 +100,7 @@ func (t *volumeLimitsTestSuite) DefineTests(driver TestDriver, pattern testpatte
) )
// No preconditions to test. Normally they would be in a BeforeEach here. // No preconditions to test. Normally they would be in a BeforeEach here.
f := framework.NewDefaultFramework("volumelimits") f := framework.NewFrameworkWithCustomTimeouts("volumelimits", getDriverTimeouts(driver))
// This checks that CSIMaxVolumeLimitChecker works as expected. // This checks that CSIMaxVolumeLimitChecker works as expected.
// A randomly chosen node should be able to handle as many CSI volumes as // A randomly chosen node should be able to handle as many CSI volumes as
@ -122,6 +122,7 @@ func (t *volumeLimitsTestSuite) DefineTests(driver TestDriver, pattern testpatte
l.ns = f.Namespace l.ns = f.Namespace
l.cs = f.ClientSet l.cs = f.ClientSet
l.config, l.testCleanup = driver.PrepareTest(f) l.config, l.testCleanup = driver.PrepareTest(f)
defer l.testCleanup() defer l.testCleanup()
@ -153,7 +154,7 @@ func (t *volumeLimitsTestSuite) DefineTests(driver TestDriver, pattern testpatte
framework.ExpectNoError(err, "while cleaning up resource") framework.ExpectNoError(err, "while cleaning up resource")
}() }()
defer func() { defer func() {
cleanupTest(l.cs, l.ns.Name, l.runningPod.Name, l.unschedulablePod.Name, l.pvcs, l.pvNames) cleanupTest(l.cs, l.ns.Name, l.runningPod.Name, l.unschedulablePod.Name, l.pvcs, l.pvNames, testSlowMultiplier*f.Timeouts.PVDelete)
}() }()
// Create <limit> PVCs for one gigantic pod. // Create <limit> PVCs for one gigantic pod.
@ -183,11 +184,11 @@ func (t *volumeLimitsTestSuite) DefineTests(driver TestDriver, pattern testpatte
framework.ExpectNoError(err) framework.ExpectNoError(err)
ginkgo.By("Waiting for all PVCs to get Bound") ginkgo.By("Waiting for all PVCs to get Bound")
l.pvNames, err = waitForAllPVCsBound(l.cs, testSlowMultiplier*e2epv.PVBindingTimeout, l.pvcs) l.pvNames, err = waitForAllPVCsBound(l.cs, testSlowMultiplier*f.Timeouts.PVBound, l.pvcs)
framework.ExpectNoError(err) framework.ExpectNoError(err)
ginkgo.By("Waiting for the pod Running") ginkgo.By("Waiting for the pod Running")
err = e2epod.WaitTimeoutForPodRunningInNamespace(l.cs, l.runningPod.Name, l.ns.Name, testSlowMultiplier*framework.PodStartTimeout) err = e2epod.WaitTimeoutForPodRunningInNamespace(l.cs, l.runningPod.Name, l.ns.Name, testSlowMultiplier*f.Timeouts.PodStart)
framework.ExpectNoError(err) framework.ExpectNoError(err)
ginkgo.By("Creating an extra pod with one volume to exceed the limit") ginkgo.By("Creating an extra pod with one volume to exceed the limit")
@ -203,7 +204,7 @@ func (t *volumeLimitsTestSuite) DefineTests(driver TestDriver, pattern testpatte
framework.ExpectNoError(err, "Failed to create an extra pod with one volume to exceed the limit") framework.ExpectNoError(err, "Failed to create an extra pod with one volume to exceed the limit")
ginkgo.By("Waiting for the pod to get unschedulable with the right message") ginkgo.By("Waiting for the pod to get unschedulable with the right message")
err = e2epod.WaitForPodCondition(l.cs, l.ns.Name, l.unschedulablePod.Name, "Unschedulable", framework.PodStartTimeout, func(pod *v1.Pod) (bool, error) { err = e2epod.WaitForPodCondition(l.cs, l.ns.Name, l.unschedulablePod.Name, "Unschedulable", f.Timeouts.PodStart, func(pod *v1.Pod) (bool, error) {
if pod.Status.Phase == v1.PodPending { if pod.Status.Phase == v1.PodPending {
reg, err := regexp.Compile(`max.+volume.+count`) reg, err := regexp.Compile(`max.+volume.+count`)
if err != nil { if err != nil {
@ -225,7 +226,7 @@ func (t *volumeLimitsTestSuite) DefineTests(driver TestDriver, pattern testpatte
}) })
} }
func cleanupTest(cs clientset.Interface, ns string, runningPodName, unschedulablePodName string, pvcs []*v1.PersistentVolumeClaim, pvNames sets.String) error { func cleanupTest(cs clientset.Interface, ns string, runningPodName, unschedulablePodName string, pvcs []*v1.PersistentVolumeClaim, pvNames sets.String, timeout time.Duration) error {
var cleanupErrors []string var cleanupErrors []string
if runningPodName != "" { if runningPodName != "" {
err := cs.CoreV1().Pods(ns).Delete(context.TODO(), runningPodName, metav1.DeleteOptions{}) err := cs.CoreV1().Pods(ns).Delete(context.TODO(), runningPodName, metav1.DeleteOptions{})
@ -248,7 +249,7 @@ func cleanupTest(cs clientset.Interface, ns string, runningPodName, unschedulabl
// Wait for the PVs to be deleted. It includes also pod and PVC deletion because of PVC protection. // Wait for the PVs to be deleted. It includes also pod and PVC deletion because of PVC protection.
// We use PVs to make sure that the test does not leave orphan PVs when a CSI driver is destroyed // We use PVs to make sure that the test does not leave orphan PVs when a CSI driver is destroyed
// just after the test ends. // just after the test ends.
err := wait.Poll(5*time.Second, testSlowMultiplier*e2epv.PVDeletingTimeout, func() (bool, error) { err := wait.Poll(5*time.Second, timeout, func() (bool, error) {
existing := 0 existing := 0
for _, pvName := range pvNames.UnsortedList() { for _, pvName := range pvNames.UnsortedList() {
_, err := cs.CoreV1().PersistentVolumes().Get(context.TODO(), pvName, metav1.GetOptions{}) _, err := cs.CoreV1().PersistentVolumes().Get(context.TODO(), pvName, metav1.GetOptions{})

View File

@ -102,7 +102,7 @@ func (t *volumeModeTestSuite) DefineTests(driver TestDriver, pattern testpattern
// registers its own BeforeEach which creates the namespace. Beware that it // registers its own BeforeEach which creates the namespace. Beware that it
// also registers an AfterEach which renders f unusable. Any code using // also registers an AfterEach which renders f unusable. Any code using
// f must run inside an It or Context callback. // f must run inside an It or Context callback.
f := framework.NewDefaultFramework("volumemode") f := framework.NewFrameworkWithCustomTimeouts("volumemode", getDriverTimeouts(driver))
init := func() { init := func() {
l = local{} l = local{}
@ -209,7 +209,7 @@ func (t *volumeModeTestSuite) DefineTests(driver TestDriver, pattern testpattern
l.Pvc, err = l.cs.CoreV1().PersistentVolumeClaims(l.ns.Name).Create(context.TODO(), l.Pvc, metav1.CreateOptions{}) l.Pvc, err = l.cs.CoreV1().PersistentVolumeClaims(l.ns.Name).Create(context.TODO(), l.Pvc, metav1.CreateOptions{})
framework.ExpectNoError(err, "Failed to create pvc") framework.ExpectNoError(err, "Failed to create pvc")
framework.ExpectNoError(e2epv.WaitOnPVandPVC(l.cs, l.ns.Name, l.Pv, l.Pvc), "Failed to bind pv and pvc") framework.ExpectNoError(e2epv.WaitOnPVandPVC(l.cs, f.Timeouts, l.ns.Name, l.Pv, l.Pvc), "Failed to bind pv and pvc")
ginkgo.By("Creating pod") ginkgo.By("Creating pod")
podConfig := e2epod.Config{ podConfig := e2epod.Config{
@ -236,7 +236,7 @@ func (t *volumeModeTestSuite) DefineTests(driver TestDriver, pattern testpattern
}.AsSelector().String() }.AsSelector().String()
msg := "Unable to attach or mount volumes" msg := "Unable to attach or mount volumes"
err = e2eevents.WaitTimeoutForEvent(l.cs, l.ns.Name, eventSelector, msg, framework.PodStartTimeout) err = e2eevents.WaitTimeoutForEvent(l.cs, l.ns.Name, eventSelector, msg, f.Timeouts.PodStart)
// Events are unreliable, don't depend on the event. It's used only to speed up the test. // Events are unreliable, don't depend on the event. It's used only to speed up the test.
if err != nil { if err != nil {
framework.Logf("Warning: did not get event about FailedMountVolume") framework.Logf("Warning: did not get event about FailedMountVolume")
@ -273,7 +273,7 @@ func (t *volumeModeTestSuite) DefineTests(driver TestDriver, pattern testpattern
}.AsSelector().String() }.AsSelector().String()
msg := "does not support block volume provisioning" msg := "does not support block volume provisioning"
err = e2eevents.WaitTimeoutForEvent(l.cs, l.ns.Name, eventSelector, msg, framework.ClaimProvisionTimeout) err = e2eevents.WaitTimeoutForEvent(l.cs, l.ns.Name, eventSelector, msg, f.Timeouts.ClaimProvision)
// Events are unreliable, don't depend on the event. It's used only to speed up the test. // Events are unreliable, don't depend on the event. It's used only to speed up the test.
if err != nil { if err != nil {
framework.Logf("Warning: did not get event about provisioing failed") framework.Logf("Warning: did not get event about provisioing failed")
@ -332,7 +332,7 @@ func (t *volumeModeTestSuite) DefineTests(driver TestDriver, pattern testpattern
} else { } else {
msg = "has volumeMode Filesystem, but is specified in volumeDevices" msg = "has volumeMode Filesystem, but is specified in volumeDevices"
} }
err = e2eevents.WaitTimeoutForEvent(l.cs, l.ns.Name, eventSelector, msg, framework.PodStartTimeout) err = e2eevents.WaitTimeoutForEvent(l.cs, l.ns.Name, eventSelector, msg, f.Timeouts.PodStart)
// Events are unreliable, don't depend on them. They're used only to speed up the test. // Events are unreliable, don't depend on them. They're used only to speed up the test.
if err != nil { if err != nil {
framework.Logf("Warning: did not get event about mismatched volume use") framework.Logf("Warning: did not get event about mismatched volume use")

View File

@ -120,7 +120,7 @@ func (t *volumesTestSuite) DefineTests(driver TestDriver, pattern testpatterns.T
// registers its own BeforeEach which creates the namespace. Beware that it // registers its own BeforeEach which creates the namespace. Beware that it
// also registers an AfterEach which renders f unusable. Any code using // also registers an AfterEach which renders f unusable. Any code using
// f must run inside an It or Context callback. // f must run inside an It or Context callback.
f := framework.NewDefaultFramework("volume") f := framework.NewFrameworkWithCustomTimeouts("volume", getDriverTimeouts(driver))
init := func() { init := func() {
l = local{} l = local{}

View File

@ -106,7 +106,7 @@ func (h *hostExecutor) launchNodeExecPod(node string) *v1.Pod {
} }
pod, err := cs.CoreV1().Pods(ns.Name).Create(context.TODO(), hostExecPod, metav1.CreateOptions{}) pod, err := cs.CoreV1().Pods(ns.Name).Create(context.TODO(), hostExecPod, metav1.CreateOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
err = e2epod.WaitForPodRunningInNamespace(cs, pod) err = e2epod.WaitTimeoutForPodRunningInNamespace(cs, pod.Name, pod.Namespace, f.Timeouts.PodStart)
framework.ExpectNoError(err) framework.ExpectNoError(err)
return pod return pod
} }

View File

@ -325,7 +325,7 @@ func TestVolumeUnmountsFromDeletedPodWithForceOption(c clientset.Interface, f *f
ginkgo.By("Starting the kubelet and waiting for pod to delete.") ginkgo.By("Starting the kubelet and waiting for pod to delete.")
KubeletCommand(KStart, c, clientPod) KubeletCommand(KStart, c, clientPod)
err = e2epod.WaitForPodNotFoundInNamespace(f.ClientSet, clientPod.Name, f.Namespace.Name, framework.PodDeleteTimeout) err = e2epod.WaitForPodNotFoundInNamespace(f.ClientSet, clientPod.Name, f.Namespace.Name, f.Timeouts.PodDelete)
if err != nil { if err != nil {
framework.ExpectNoError(err, "Expected pod to be not found.") framework.ExpectNoError(err, "Expected pod to be not found.")
} }
@ -411,7 +411,7 @@ func TestVolumeUnmapsFromDeletedPodWithForceOption(c clientset.Interface, f *fra
ginkgo.By("Starting the kubelet and waiting for pod to delete.") ginkgo.By("Starting the kubelet and waiting for pod to delete.")
KubeletCommand(KStart, c, clientPod) KubeletCommand(KStart, c, clientPod)
err = e2epod.WaitForPodNotFoundInNamespace(f.ClientSet, clientPod.Name, f.Namespace.Name, framework.PodDeleteTimeout) err = e2epod.WaitForPodNotFoundInNamespace(f.ClientSet, clientPod.Name, f.Namespace.Name, f.Timeouts.PodDelete)
framework.ExpectNoError(err, "Expected pod to be not found.") framework.ExpectNoError(err, "Expected pod to be not found.")
if forceDelete { if forceDelete {

View File

@ -63,6 +63,7 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() {
test := testsuites.StorageClassTest{ test := testsuites.StorageClassTest{
Name: "default", Name: "default",
Timeouts: f.Timeouts,
ClaimSize: "2Gi", ClaimSize: "2Gi",
} }
@ -125,7 +126,7 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() {
pod, err = c.CoreV1().Pods(ns).Create(context.TODO(), pod, metav1.CreateOptions{}) pod, err = c.CoreV1().Pods(ns).Create(context.TODO(), pod, metav1.CreateOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
err = e2epod.WaitForPodRunningInNamespace(c, pod) err = e2epod.WaitTimeoutForPodRunningInNamespace(c, pod.Name, pod.Namespace, f.Timeouts.PodStart)
framework.ExpectNoError(err, "Error starting pod %s", pod.Name) framework.ExpectNoError(err, "Error starting pod %s", pod.Name)
framework.Logf("Deleting pod %q/%q", pod.Namespace, pod.Name) framework.Logf("Deleting pod %q/%q", pod.Namespace, pod.Name)
@ -185,7 +186,7 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() {
pod, err = c.CoreV1().Pods(ns).Create(context.TODO(), pod, metav1.CreateOptions{}) pod, err = c.CoreV1().Pods(ns).Create(context.TODO(), pod, metav1.CreateOptions{})
framework.ExpectNoError(err, "failed to create Pod %s/%s", pod.Namespace, pod.Name) framework.ExpectNoError(err, "failed to create Pod %s/%s", pod.Namespace, pod.Name)
err = e2epod.WaitTimeoutForPodRunningInNamespace(c, pod.Name, pod.Namespace, framework.PodStartShortTimeout) err = e2epod.WaitTimeoutForPodRunningInNamespace(c, pod.Name, pod.Namespace, f.Timeouts.PodStartShort)
framework.ExpectError(err) framework.ExpectError(err)
framework.Logf("Deleting pod %q/%q", pod.Namespace, pod.Name) framework.Logf("Deleting pod %q/%q", pod.Namespace, pod.Name)
@ -211,7 +212,7 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() {
pod, err = c.CoreV1().Pods(ns).Create(context.TODO(), pod, metav1.CreateOptions{}) pod, err = c.CoreV1().Pods(ns).Create(context.TODO(), pod, metav1.CreateOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
err = e2epod.WaitForPodRunningInNamespace(c, pod) err = e2epod.WaitTimeoutForPodRunningInNamespace(c, pod.Name, pod.Namespace, f.Timeouts.PodStart)
framework.ExpectNoError(err, "Error starting pod ", pod.Name) framework.ExpectNoError(err, "Error starting pod ", pod.Name)
pod, err = c.CoreV1().Pods(ns).Get(context.TODO(), pod.Name, metav1.GetOptions{}) pod, err = c.CoreV1().Pods(ns).Get(context.TODO(), pod.Name, metav1.GetOptions{})
@ -268,7 +269,7 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() {
pod, err = c.CoreV1().Pods(ns).Create(context.TODO(), pod, metav1.CreateOptions{}) pod, err = c.CoreV1().Pods(ns).Create(context.TODO(), pod, metav1.CreateOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
err = e2epod.WaitForPodRunningInNamespace(c, pod) err = e2epod.WaitTimeoutForPodRunningInNamespace(c, pod.Name, pod.Namespace, f.Timeouts.PodStart)
framework.ExpectNoError(err, "Error starting pod ", pod.Name) framework.ExpectNoError(err, "Error starting pod ", pod.Name)
pod, err = c.CoreV1().Pods(ns).Get(context.TODO(), pod.Name, metav1.GetOptions{}) pod, err = c.CoreV1().Pods(ns).Get(context.TODO(), pod.Name, metav1.GetOptions{})
@ -299,7 +300,7 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() {
pod, err = c.CoreV1().Pods(ns).Create(context.TODO(), pod, metav1.CreateOptions{}) pod, err = c.CoreV1().Pods(ns).Create(context.TODO(), pod, metav1.CreateOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
err = e2epod.WaitForPodRunningInNamespace(c, pod) err = e2epod.WaitTimeoutForPodRunningInNamespace(c, pod.Name, pod.Namespace, f.Timeouts.PodStart)
framework.ExpectNoError(err, "Error starting pod ", pod.Name) framework.ExpectNoError(err, "Error starting pod ", pod.Name)
pod, err = c.CoreV1().Pods(ns).Get(context.TODO(), pod.Name, metav1.GetOptions{}) pod, err = c.CoreV1().Pods(ns).Get(context.TODO(), pod.Name, metav1.GetOptions{})
@ -336,7 +337,7 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() {
// Create pod // Create pod
pod, err = c.CoreV1().Pods(ns).Create(context.TODO(), pod, metav1.CreateOptions{}) pod, err = c.CoreV1().Pods(ns).Create(context.TODO(), pod, metav1.CreateOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
err = e2epod.WaitForPodRunningInNamespace(c, pod) err = e2epod.WaitTimeoutForPodRunningInNamespace(c, pod.Name, pod.Namespace, f.Timeouts.PodStart)
framework.ExpectNoError(err, "Error starting pod ", pod.Name) framework.ExpectNoError(err, "Error starting pod ", pod.Name)
pod, err = c.CoreV1().Pods(ns).Get(context.TODO(), pod.Name, metav1.GetOptions{}) pod, err = c.CoreV1().Pods(ns).Get(context.TODO(), pod.Name, metav1.GetOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)

View File

@ -140,11 +140,13 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
// filled in BeforeEach // filled in BeforeEach
var c clientset.Interface var c clientset.Interface
var timeouts *framework.TimeoutContext
var ns string var ns string
ginkgo.BeforeEach(func() { ginkgo.BeforeEach(func() {
c = f.ClientSet c = f.ClientSet
ns = f.Namespace.Name ns = f.Namespace.Name
timeouts = f.Timeouts
}) })
ginkgo.Describe("DynamicProvisioner [Slow]", func() { ginkgo.Describe("DynamicProvisioner [Slow]", func() {
@ -157,6 +159,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
{ {
Name: "SSD PD on GCE/GKE", Name: "SSD PD on GCE/GKE",
CloudProviders: []string{"gce", "gke"}, CloudProviders: []string{"gce", "gke"},
Timeouts: f.Timeouts,
Provisioner: "kubernetes.io/gce-pd", Provisioner: "kubernetes.io/gce-pd",
Parameters: map[string]string{ Parameters: map[string]string{
"type": "pd-ssd", "type": "pd-ssd",
@ -175,6 +178,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
{ {
Name: "HDD PD on GCE/GKE", Name: "HDD PD on GCE/GKE",
CloudProviders: []string{"gce", "gke"}, CloudProviders: []string{"gce", "gke"},
Timeouts: f.Timeouts,
Provisioner: "kubernetes.io/gce-pd", Provisioner: "kubernetes.io/gce-pd",
Parameters: map[string]string{ Parameters: map[string]string{
"type": "pd-standard", "type": "pd-standard",
@ -193,6 +197,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
{ {
Name: "gp2 EBS on AWS", Name: "gp2 EBS on AWS",
CloudProviders: []string{"aws"}, CloudProviders: []string{"aws"},
Timeouts: f.Timeouts,
Provisioner: "kubernetes.io/aws-ebs", Provisioner: "kubernetes.io/aws-ebs",
Parameters: map[string]string{ Parameters: map[string]string{
"type": "gp2", "type": "gp2",
@ -211,6 +216,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
{ {
Name: "io1 EBS on AWS", Name: "io1 EBS on AWS",
CloudProviders: []string{"aws"}, CloudProviders: []string{"aws"},
Timeouts: f.Timeouts,
Provisioner: "kubernetes.io/aws-ebs", Provisioner: "kubernetes.io/aws-ebs",
Parameters: map[string]string{ Parameters: map[string]string{
"type": "io1", "type": "io1",
@ -229,6 +235,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
{ {
Name: "sc1 EBS on AWS", Name: "sc1 EBS on AWS",
CloudProviders: []string{"aws"}, CloudProviders: []string{"aws"},
Timeouts: f.Timeouts,
Provisioner: "kubernetes.io/aws-ebs", Provisioner: "kubernetes.io/aws-ebs",
Parameters: map[string]string{ Parameters: map[string]string{
"type": "sc1", "type": "sc1",
@ -246,6 +253,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
{ {
Name: "st1 EBS on AWS", Name: "st1 EBS on AWS",
CloudProviders: []string{"aws"}, CloudProviders: []string{"aws"},
Timeouts: f.Timeouts,
Provisioner: "kubernetes.io/aws-ebs", Provisioner: "kubernetes.io/aws-ebs",
Parameters: map[string]string{ Parameters: map[string]string{
"type": "st1", "type": "st1",
@ -263,6 +271,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
{ {
Name: "encrypted EBS on AWS", Name: "encrypted EBS on AWS",
CloudProviders: []string{"aws"}, CloudProviders: []string{"aws"},
Timeouts: f.Timeouts,
Provisioner: "kubernetes.io/aws-ebs", Provisioner: "kubernetes.io/aws-ebs",
Parameters: map[string]string{ Parameters: map[string]string{
"encrypted": "true", "encrypted": "true",
@ -281,6 +290,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
{ {
Name: "generic Cinder volume on OpenStack", Name: "generic Cinder volume on OpenStack",
CloudProviders: []string{"openstack"}, CloudProviders: []string{"openstack"},
Timeouts: f.Timeouts,
Provisioner: "kubernetes.io/cinder", Provisioner: "kubernetes.io/cinder",
Parameters: map[string]string{}, Parameters: map[string]string{},
ClaimSize: "1.5Gi", ClaimSize: "1.5Gi",
@ -292,6 +302,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
{ {
Name: "Cinder volume with empty volume type and zone on OpenStack", Name: "Cinder volume with empty volume type and zone on OpenStack",
CloudProviders: []string{"openstack"}, CloudProviders: []string{"openstack"},
Timeouts: f.Timeouts,
Provisioner: "kubernetes.io/cinder", Provisioner: "kubernetes.io/cinder",
Parameters: map[string]string{ Parameters: map[string]string{
"type": "", "type": "",
@ -307,6 +318,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
{ {
Name: "generic vSphere volume", Name: "generic vSphere volume",
CloudProviders: []string{"vsphere"}, CloudProviders: []string{"vsphere"},
Timeouts: f.Timeouts,
Provisioner: "kubernetes.io/vsphere-volume", Provisioner: "kubernetes.io/vsphere-volume",
Parameters: map[string]string{}, Parameters: map[string]string{},
ClaimSize: "1.5Gi", ClaimSize: "1.5Gi",
@ -319,6 +331,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
{ {
Name: "Azure disk volume with empty sku and location", Name: "Azure disk volume with empty sku and location",
CloudProviders: []string{"azure"}, CloudProviders: []string{"azure"},
Timeouts: f.Timeouts,
Provisioner: "kubernetes.io/azure-disk", Provisioner: "kubernetes.io/azure-disk",
Parameters: map[string]string{}, Parameters: map[string]string{},
ClaimSize: "1Gi", ClaimSize: "1Gi",
@ -384,6 +397,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
Name: "HDD PD on GCE/GKE", Name: "HDD PD on GCE/GKE",
CloudProviders: []string{"gce", "gke"}, CloudProviders: []string{"gce", "gke"},
Provisioner: "kubernetes.io/gce-pd", Provisioner: "kubernetes.io/gce-pd",
Timeouts: f.Timeouts,
Parameters: map[string]string{ Parameters: map[string]string{
"type": "pd-standard", "type": "pd-standard",
}, },
@ -452,6 +466,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
test := testsuites.StorageClassTest{ test := testsuites.StorageClassTest{
Name: "unmanaged_zone", Name: "unmanaged_zone",
Provisioner: "kubernetes.io/gce-pd", Provisioner: "kubernetes.io/gce-pd",
Timeouts: f.Timeouts,
Parameters: map[string]string{"zone": unmanagedZone}, Parameters: map[string]string{"zone": unmanagedZone},
ClaimSize: "1Gi", ClaimSize: "1Gi",
} }
@ -473,7 +488,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
}() }()
// The claim should timeout phase:Pending // The claim should timeout phase:Pending
err = e2epv.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, c, ns, pvc.Name, 2*time.Second, framework.ClaimProvisionShortTimeout) err = e2epv.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, c, ns, pvc.Name, 2*time.Second, timeouts.ClaimProvisionShort)
framework.ExpectError(err) framework.ExpectError(err)
framework.Logf(err.Error()) framework.Logf(err.Error())
}) })
@ -492,6 +507,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
test := testsuites.StorageClassTest{ test := testsuites.StorageClassTest{
Name: "deletion race", Name: "deletion race",
Provisioner: "", // Use a native one based on current cloud provider Provisioner: "", // Use a native one based on current cloud provider
Timeouts: f.Timeouts,
ClaimSize: "1Gi", ClaimSize: "1Gi",
} }
@ -572,7 +588,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
framework.ExpectNoError(err) framework.ExpectNoError(err)
ginkgo.By("waiting for the PV to get Released") ginkgo.By("waiting for the PV to get Released")
err = e2epv.WaitForPersistentVolumePhase(v1.VolumeReleased, c, pv.Name, 2*time.Second, e2epv.PVReclaimingTimeout) err = e2epv.WaitForPersistentVolumePhase(v1.VolumeReleased, c, pv.Name, 2*time.Second, timeouts.PVReclaim)
framework.ExpectNoError(err) framework.ExpectNoError(err)
ginkgo.By("deleting the PD") ginkgo.By("deleting the PD")
@ -587,7 +603,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
framework.ExpectNoError(err) framework.ExpectNoError(err)
ginkgo.By("waiting for the PV to get deleted") ginkgo.By("waiting for the PV to get deleted")
err = e2epv.WaitForPersistentVolumeDeleted(c, pv.Name, 5*time.Second, e2epv.PVDeletingTimeout) err = e2epv.WaitForPersistentVolumeDeleted(c, pv.Name, 5*time.Second, timeouts.PVDelete)
framework.ExpectNoError(err) framework.ExpectNoError(err)
}) })
}) })
@ -636,6 +652,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
Client: c, Client: c,
Name: "external provisioner test", Name: "external provisioner test",
Provisioner: externalPluginName, Provisioner: externalPluginName,
Timeouts: f.Timeouts,
ClaimSize: "1500Mi", ClaimSize: "1500Mi",
ExpectedSize: "1500Mi", ExpectedSize: "1500Mi",
} }
@ -659,6 +676,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
test := testsuites.StorageClassTest{ test := testsuites.StorageClassTest{
Client: c, Client: c,
Name: "default", Name: "default",
Timeouts: f.Timeouts,
ClaimSize: "2Gi", ClaimSize: "2Gi",
ExpectedSize: "2Gi", ExpectedSize: "2Gi",
} }
@ -679,6 +697,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
test := testsuites.StorageClassTest{ test := testsuites.StorageClassTest{
Name: "default", Name: "default",
Timeouts: f.Timeouts,
ClaimSize: "2Gi", ClaimSize: "2Gi",
} }
@ -716,6 +735,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
test := testsuites.StorageClassTest{ test := testsuites.StorageClassTest{
Name: "default", Name: "default",
Timeouts: f.Timeouts,
ClaimSize: "2Gi", ClaimSize: "2Gi",
} }
@ -756,6 +776,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
Client: c, Client: c,
Name: "Gluster Dynamic provisioner test", Name: "Gluster Dynamic provisioner test",
Provisioner: "kubernetes.io/glusterfs", Provisioner: "kubernetes.io/glusterfs",
Timeouts: f.Timeouts,
ClaimSize: "2Gi", ClaimSize: "2Gi",
ExpectedSize: "2Gi", ExpectedSize: "2Gi",
Parameters: map[string]string{"resturl": serverURL}, Parameters: map[string]string{"resturl": serverURL},
@ -780,6 +801,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
test := testsuites.StorageClassTest{ test := testsuites.StorageClassTest{
Name: "AWS EBS with invalid KMS key", Name: "AWS EBS with invalid KMS key",
Provisioner: "kubernetes.io/aws-ebs", Provisioner: "kubernetes.io/aws-ebs",
Timeouts: f.Timeouts,
ClaimSize: "2Gi", ClaimSize: "2Gi",
Parameters: map[string]string{"kmsKeyId": "arn:aws:kms:us-east-1:123456789012:key/55555555-5555-5555-5555-555555555555"}, Parameters: map[string]string{"kmsKeyId": "arn:aws:kms:us-east-1:123456789012:key/55555555-5555-5555-5555-555555555555"},
} }

View File

@ -96,7 +96,7 @@ var _ = utils.SIGDescribe("PersistentVolumes:vsphere [Feature:vsphere]", func()
ginkgo.By("Creating the PV and PVC") ginkgo.By("Creating the PV and PVC")
pv, pvc, err = e2epv.CreatePVPVC(c, pvConfig, pvcConfig, ns, false) pv, pvc, err = e2epv.CreatePVPVC(c, pvConfig, pvcConfig, ns, false)
framework.ExpectNoError(err) framework.ExpectNoError(err)
framework.ExpectNoError(e2epv.WaitOnPVandPVC(c, ns, pv, pvc)) framework.ExpectNoError(e2epv.WaitOnPVandPVC(c, f.Timeouts, ns, pv, pvc))
ginkgo.By("Creating the Client Pod") ginkgo.By("Creating the Client Pod")
clientPod, err = e2epod.CreateClientPod(c, ns, pvc) clientPod, err = e2epod.CreateClientPod(c, ns, pvc)

View File

@ -110,7 +110,7 @@ var _ = utils.SIGDescribe("PersistentVolumes [Feature:vsphere][Feature:ReclaimPo
volumePath, pv, pvc, err = testSetupVSpherePersistentVolumeReclaim(c, nodeInfo, ns, v1.PersistentVolumeReclaimDelete) volumePath, pv, pvc, err = testSetupVSpherePersistentVolumeReclaim(c, nodeInfo, ns, v1.PersistentVolumeReclaimDelete)
framework.ExpectNoError(err) framework.ExpectNoError(err)
// Wait for PV and PVC to Bind // Wait for PV and PVC to Bind
framework.ExpectNoError(e2epv.WaitOnPVandPVC(c, ns, pv, pvc)) framework.ExpectNoError(e2epv.WaitOnPVandPVC(c, f.Timeouts, ns, pv, pvc))
ginkgo.By("Creating the Pod") ginkgo.By("Creating the Pod")
pod, err := e2epod.CreateClientPod(c, ns, pvc) pod, err := e2epod.CreateClientPod(c, ns, pvc)
@ -196,7 +196,7 @@ var _ = utils.SIGDescribe("PersistentVolumes [Feature:vsphere][Feature:ReclaimPo
framework.ExpectNoError(err) framework.ExpectNoError(err)
ginkgo.By("wait for the pv and pvc to bind") ginkgo.By("wait for the pv and pvc to bind")
framework.ExpectNoError(e2epv.WaitOnPVandPVC(c, ns, pv, pvc)) framework.ExpectNoError(e2epv.WaitOnPVandPVC(c, f.Timeouts, ns, pv, pvc))
verifyContentOfVSpherePV(c, pvc, volumeFileContent) verifyContentOfVSpherePV(c, pvc, volumeFileContent)
}) })
@ -243,7 +243,7 @@ func deletePVCAfterBind(c clientset.Interface, ns string, pvc *v1.PersistentVolu
var err error var err error
ginkgo.By("wait for the pv and pvc to bind") ginkgo.By("wait for the pv and pvc to bind")
framework.ExpectNoError(e2epv.WaitOnPVandPVC(c, ns, pv, pvc)) framework.ExpectNoError(e2epv.WaitOnPVandPVC(c, f.Timeouts, ns, pv, pvc))
ginkgo.By("delete pvc") ginkgo.By("delete pvc")
framework.ExpectNoError(e2epv.DeletePersistentVolumeClaim(c, pvc.Name, ns), "Failed to delete PVC ", pvc.Name) framework.ExpectNoError(e2epv.DeletePersistentVolumeClaim(c, pvc.Name, ns), "Failed to delete PVC ", pvc.Name)

View File

@ -87,7 +87,7 @@ var _ = utils.SIGDescribe("PersistentVolumes [Feature:vsphere][Feature:LabelSele
framework.ExpectNoError(err) framework.ExpectNoError(err)
ginkgo.By("wait for the pvcSsd to bind with pvSsd") ginkgo.By("wait for the pvcSsd to bind with pvSsd")
framework.ExpectNoError(e2epv.WaitOnPVandPVC(c, ns, pvSsd, pvcSsd)) framework.ExpectNoError(e2epv.WaitOnPVandPVC(c, f.Timeouts, ns, pvSsd, pvcSsd))
ginkgo.By("Verify status of pvcVvol is pending") ginkgo.By("Verify status of pvcVvol is pending")
err = e2epv.WaitForPersistentVolumeClaimPhase(v1.ClaimPending, c, ns, pvcVvol.Name, 3*time.Second, 300*time.Second) err = e2epv.WaitForPersistentVolumeClaimPhase(v1.ClaimPending, c, ns, pvcVvol.Name, 3*time.Second, 300*time.Second)

View File

@ -149,7 +149,7 @@ var _ = utils.SIGDescribe("vcp at scale [Feature:vsphere] ", func() {
volumeCountPerInstance = volumeCount volumeCountPerInstance = volumeCount
} }
volumeCount = volumeCount - volumeCountPerInstance volumeCount = volumeCount - volumeCountPerInstance
go VolumeCreateAndAttach(client, namespace, scArrays, volumeCountPerInstance, volumesPerPod, nodeSelectorList, nodeVolumeMapChan) go VolumeCreateAndAttach(client, f.Timeouts, namespace, scArrays, volumeCountPerInstance, volumesPerPod, nodeSelectorList, nodeVolumeMapChan)
} }
// Get the list of all volumes attached to each node from the go routines by reading the data from the channel // Get the list of all volumes attached to each node from the go routines by reading the data from the channel
@ -189,7 +189,7 @@ func getClaimsForPod(pod *v1.Pod, volumesPerPod int) []string {
} }
// VolumeCreateAndAttach peforms create and attach operations of vSphere persistent volumes at scale // VolumeCreateAndAttach peforms create and attach operations of vSphere persistent volumes at scale
func VolumeCreateAndAttach(client clientset.Interface, namespace string, sc []*storagev1.StorageClass, volumeCountPerInstance int, volumesPerPod int, nodeSelectorList []*NodeSelector, nodeVolumeMapChan chan map[string][]string) { func VolumeCreateAndAttach(client clientset.Interface, timeouts *framework.TimeoutContext, namespace string, sc []*storagev1.StorageClass, volumeCountPerInstance int, volumesPerPod int, nodeSelectorList []*NodeSelector, nodeVolumeMapChan chan map[string][]string) {
defer ginkgo.GinkgoRecover() defer ginkgo.GinkgoRecover()
nodeVolumeMap := make(map[string][]string) nodeVolumeMap := make(map[string][]string)
nodeSelectorIndex := 0 nodeSelectorIndex := 0
@ -206,7 +206,7 @@ func VolumeCreateAndAttach(client clientset.Interface, namespace string, sc []*s
} }
ginkgo.By("Waiting for claim to be in bound phase") ginkgo.By("Waiting for claim to be in bound phase")
persistentvolumes, err := e2epv.WaitForPVClaimBoundPhase(client, pvclaims, framework.ClaimProvisionTimeout) persistentvolumes, err := e2epv.WaitForPVClaimBoundPhase(client, pvclaims, timeouts.ClaimProvision)
framework.ExpectNoError(err) framework.ExpectNoError(err)
ginkgo.By("Creating pod to attach PV to the node") ginkgo.By("Creating pod to attach PV to the node")

View File

@ -23,7 +23,7 @@ import (
"github.com/onsi/ginkgo" "github.com/onsi/ginkgo"
"github.com/onsi/gomega" "github.com/onsi/gomega"
"k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
storagev1 "k8s.io/api/storage/v1" storagev1 "k8s.io/api/storage/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
@ -140,7 +140,7 @@ func PerformVolumeLifeCycleInParallel(f *framework.Framework, client clientset.I
var pvclaims []*v1.PersistentVolumeClaim var pvclaims []*v1.PersistentVolumeClaim
pvclaims = append(pvclaims, pvclaim) pvclaims = append(pvclaims, pvclaim)
ginkgo.By(fmt.Sprintf("%v Waiting for claim: %v to be in bound phase", logPrefix, pvclaim.Name)) ginkgo.By(fmt.Sprintf("%v Waiting for claim: %v to be in bound phase", logPrefix, pvclaim.Name))
persistentvolumes, err := e2epv.WaitForPVClaimBoundPhase(client, pvclaims, framework.ClaimProvisionTimeout) persistentvolumes, err := e2epv.WaitForPVClaimBoundPhase(client, pvclaims, f.Timeouts.ClaimProvision)
framework.ExpectNoError(err) framework.ExpectNoError(err)
ginkgo.By(fmt.Sprintf("%v Creating Pod using the claim: %v", logPrefix, pvclaim.Name)) ginkgo.By(fmt.Sprintf("%v Creating Pod using the claim: %v", logPrefix, pvclaim.Name))
@ -149,7 +149,7 @@ func PerformVolumeLifeCycleInParallel(f *framework.Framework, client clientset.I
framework.ExpectNoError(err) framework.ExpectNoError(err)
ginkgo.By(fmt.Sprintf("%v Waiting for the Pod: %v to be in the running state", logPrefix, pod.Name)) ginkgo.By(fmt.Sprintf("%v Waiting for the Pod: %v to be in the running state", logPrefix, pod.Name))
err = e2epod.WaitForPodRunningInNamespaceSlow(f.ClientSet, pod.Name, f.Namespace.Name) err = e2epod.WaitTimeoutForPodRunningInNamespace(f.ClientSet, pod.Name, f.Namespace.Name, f.Timeouts.PodStartSlow)
framework.ExpectNoError(err) framework.ExpectNoError(err)
// Get the copy of the Pod to know the assigned node name. // Get the copy of the Pod to know the assigned node name.

View File

@ -125,7 +125,7 @@ func invokeTest(f *framework.Framework, client clientset.Interface, namespace st
}() }()
ginkgo.By("Waiting for claim to be in bound phase") ginkgo.By("Waiting for claim to be in bound phase")
err = e2epv.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, client, pvclaim.Namespace, pvclaim.Name, framework.Poll, framework.ClaimProvisionTimeout) err = e2epv.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, client, pvclaim.Namespace, pvclaim.Name, framework.Poll, f.Timeouts.ClaimProvision)
framework.ExpectNoError(err) framework.ExpectNoError(err)
// Get new copy of the claim // Get new copy of the claim

View File

@ -103,7 +103,7 @@ func invokeTestForFstype(f *framework.Framework, client clientset.Interface, nam
// Create Persistent Volume // Create Persistent Volume
ginkgo.By("Creating Storage Class With Fstype") ginkgo.By("Creating Storage Class With Fstype")
pvclaim, persistentvolumes := createVolume(client, namespace, scParameters) pvclaim, persistentvolumes := createVolume(client, f.Timeouts, namespace, scParameters)
// Create Pod and verify the persistent volume is accessible // Create Pod and verify the persistent volume is accessible
pod := createPodAndVerifyVolumeAccessible(client, namespace, pvclaim, persistentvolumes) pod := createPodAndVerifyVolumeAccessible(client, namespace, pvclaim, persistentvolumes)
@ -122,7 +122,7 @@ func invokeTestForInvalidFstype(f *framework.Framework, client clientset.Interfa
// Create Persistent Volume // Create Persistent Volume
ginkgo.By("Creating Storage Class With Invalid Fstype") ginkgo.By("Creating Storage Class With Invalid Fstype")
pvclaim, persistentvolumes := createVolume(client, namespace, scParameters) pvclaim, persistentvolumes := createVolume(client, f.Timeouts, namespace, scParameters)
ginkgo.By("Creating pod to attach PV to the node") ginkgo.By("Creating pod to attach PV to the node")
var pvclaims []*v1.PersistentVolumeClaim var pvclaims []*v1.PersistentVolumeClaim
@ -150,7 +150,7 @@ func invokeTestForInvalidFstype(f *framework.Framework, client clientset.Interfa
framework.ExpectEqual(isFound, true, "Unable to verify MountVolume.MountDevice failure") framework.ExpectEqual(isFound, true, "Unable to verify MountVolume.MountDevice failure")
} }
func createVolume(client clientset.Interface, namespace string, scParameters map[string]string) (*v1.PersistentVolumeClaim, []*v1.PersistentVolume) { func createVolume(client clientset.Interface, timeouts *framework.TimeoutContext, namespace string, scParameters map[string]string) (*v1.PersistentVolumeClaim, []*v1.PersistentVolume) {
storageclass, err := client.StorageV1().StorageClasses().Create(context.TODO(), getVSphereStorageClassSpec("fstype", scParameters, nil, ""), metav1.CreateOptions{}) storageclass, err := client.StorageV1().StorageClasses().Create(context.TODO(), getVSphereStorageClassSpec("fstype", scParameters, nil, ""), metav1.CreateOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
defer client.StorageV1().StorageClasses().Delete(context.TODO(), storageclass.Name, metav1.DeleteOptions{}) defer client.StorageV1().StorageClasses().Delete(context.TODO(), storageclass.Name, metav1.DeleteOptions{})
@ -162,7 +162,7 @@ func createVolume(client clientset.Interface, namespace string, scParameters map
var pvclaims []*v1.PersistentVolumeClaim var pvclaims []*v1.PersistentVolumeClaim
pvclaims = append(pvclaims, pvclaim) pvclaims = append(pvclaims, pvclaim)
ginkgo.By("Waiting for claim to be in bound phase") ginkgo.By("Waiting for claim to be in bound phase")
persistentvolumes, err := e2epv.WaitForPVClaimBoundPhase(client, pvclaims, framework.ClaimProvisionTimeout) persistentvolumes, err := e2epv.WaitForPVClaimBoundPhase(client, pvclaims, timeouts.ClaimProvision)
framework.ExpectNoError(err) framework.ExpectNoError(err)
return pvclaim, persistentvolumes return pvclaim, persistentvolumes
} }

View File

@ -92,7 +92,7 @@ var _ = utils.SIGDescribe("Node Poweroff [Feature:vsphere] [Slow] [Disruptive]",
ginkgo.By("Waiting for PVC to be in bound phase") ginkgo.By("Waiting for PVC to be in bound phase")
pvclaims := []*v1.PersistentVolumeClaim{pvclaim} pvclaims := []*v1.PersistentVolumeClaim{pvclaim}
pvs, err := e2epv.WaitForPVClaimBoundPhase(client, pvclaims, framework.ClaimProvisionTimeout) pvs, err := e2epv.WaitForPVClaimBoundPhase(client, pvclaims, f.Timeouts.ClaimProvision)
framework.ExpectNoError(err, fmt.Sprintf("Failed to wait until PVC phase set to bound: %v", err)) framework.ExpectNoError(err, fmt.Sprintf("Failed to wait until PVC phase set to bound: %v", err))
volumePath := pvs[0].Spec.VsphereVolume.VolumePath volumePath := pvs[0].Spec.VsphereVolume.VolumePath

View File

@ -24,7 +24,7 @@ import (
"github.com/onsi/ginkgo" "github.com/onsi/ginkgo"
"github.com/onsi/gomega" "github.com/onsi/gomega"
"k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
storagev1 "k8s.io/api/storage/v1" storagev1 "k8s.io/api/storage/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
@ -104,7 +104,7 @@ var _ = utils.SIGDescribe("Volume Operations Storm [Feature:vsphere]", func() {
} }
ginkgo.By("Waiting for all claims to be in bound phase") ginkgo.By("Waiting for all claims to be in bound phase")
persistentvolumes, err = e2epv.WaitForPVClaimBoundPhase(client, pvclaims, framework.ClaimProvisionTimeout) persistentvolumes, err = e2epv.WaitForPVClaimBoundPhase(client, pvclaims, f.Timeouts.ClaimProvision)
framework.ExpectNoError(err) framework.ExpectNoError(err)
ginkgo.By("Creating pod to attach PVs to the node") ginkgo.By("Creating pod to attach PVs to the node")

View File

@ -184,7 +184,7 @@ func invokeVolumeLifeCyclePerformance(f *framework.Framework, client clientset.I
totalpvclaims = append(totalpvclaims, pvclaims) totalpvclaims = append(totalpvclaims, pvclaims)
} }
for _, pvclaims := range totalpvclaims { for _, pvclaims := range totalpvclaims {
persistentvolumes, err := e2epv.WaitForPVClaimBoundPhase(client, pvclaims, framework.ClaimProvisionTimeout) persistentvolumes, err := e2epv.WaitForPVClaimBoundPhase(client, pvclaims, f.Timeouts.ClaimProvision)
framework.ExpectNoError(err) framework.ExpectNoError(err)
totalpvs = append(totalpvs, persistentvolumes) totalpvs = append(totalpvs, persistentvolumes)
} }

View File

@ -269,7 +269,7 @@ func invokeValidPolicyTest(f *framework.Framework, client clientset.Interface, n
var pvclaims []*v1.PersistentVolumeClaim var pvclaims []*v1.PersistentVolumeClaim
pvclaims = append(pvclaims, pvclaim) pvclaims = append(pvclaims, pvclaim)
ginkgo.By("Waiting for claim to be in bound phase") ginkgo.By("Waiting for claim to be in bound phase")
persistentvolumes, err := e2epv.WaitForPVClaimBoundPhase(client, pvclaims, framework.ClaimProvisionTimeout) persistentvolumes, err := e2epv.WaitForPVClaimBoundPhase(client, pvclaims, f.Timeouts.ClaimProvision)
framework.ExpectNoError(err) framework.ExpectNoError(err)
ginkgo.By("Creating pod to attach PV to the node") ginkgo.By("Creating pod to attach PV to the node")

View File

@ -127,14 +127,14 @@ var _ = utils.SIGDescribe("Zone Support [Feature:vsphere]", func() {
ginkgo.It("Verify dynamically created pv with allowed zones specified in storage class, shows the right zone information on its labels", func() { ginkgo.It("Verify dynamically created pv with allowed zones specified in storage class, shows the right zone information on its labels", func() {
ginkgo.By(fmt.Sprintf("Creating storage class with the following zones : %s", zoneA)) ginkgo.By(fmt.Sprintf("Creating storage class with the following zones : %s", zoneA))
zones = append(zones, zoneA) zones = append(zones, zoneA)
verifyPVZoneLabels(client, namespace, nil, zones) verifyPVZoneLabels(client, f.Timeouts, namespace, nil, zones)
}) })
ginkgo.It("Verify dynamically created pv with multiple zones specified in the storage class, shows both the zones on its labels", func() { ginkgo.It("Verify dynamically created pv with multiple zones specified in the storage class, shows both the zones on its labels", func() {
ginkgo.By(fmt.Sprintf("Creating storage class with the following zones : %s, %s", zoneA, zoneB)) ginkgo.By(fmt.Sprintf("Creating storage class with the following zones : %s, %s", zoneA, zoneB))
zones = append(zones, zoneA) zones = append(zones, zoneA)
zones = append(zones, zoneB) zones = append(zones, zoneB)
verifyPVZoneLabels(client, namespace, nil, zones) verifyPVZoneLabels(client, f.Timeouts, namespace, nil, zones)
}) })
ginkgo.It("Verify PVC creation with invalid zone specified in storage class fails", func() { ginkgo.It("Verify PVC creation with invalid zone specified in storage class fails", func() {
@ -151,21 +151,21 @@ var _ = utils.SIGDescribe("Zone Support [Feature:vsphere]", func() {
ginkgo.It("Verify a pod is created and attached to a dynamically created PV, based on allowed zones specified in storage class ", func() { ginkgo.It("Verify a pod is created and attached to a dynamically created PV, based on allowed zones specified in storage class ", func() {
ginkgo.By(fmt.Sprintf("Creating storage class with zones :%s", zoneA)) ginkgo.By(fmt.Sprintf("Creating storage class with zones :%s", zoneA))
zones = append(zones, zoneA) zones = append(zones, zoneA)
verifyPVCAndPodCreationSucceeds(client, namespace, nil, zones, "") verifyPVCAndPodCreationSucceeds(client, f.Timeouts, namespace, nil, zones, "")
}) })
ginkgo.It("Verify a pod is created and attached to a dynamically created PV, based on multiple zones specified in storage class ", func() { ginkgo.It("Verify a pod is created and attached to a dynamically created PV, based on multiple zones specified in storage class ", func() {
ginkgo.By(fmt.Sprintf("Creating storage class with zones :%s, %s", zoneA, zoneB)) ginkgo.By(fmt.Sprintf("Creating storage class with zones :%s, %s", zoneA, zoneB))
zones = append(zones, zoneA) zones = append(zones, zoneA)
zones = append(zones, zoneB) zones = append(zones, zoneB)
verifyPVCAndPodCreationSucceeds(client, namespace, nil, zones, "") verifyPVCAndPodCreationSucceeds(client, f.Timeouts, namespace, nil, zones, "")
}) })
ginkgo.It("Verify a pod is created and attached to a dynamically created PV, based on the allowed zones and datastore specified in storage class", func() { ginkgo.It("Verify a pod is created and attached to a dynamically created PV, based on the allowed zones and datastore specified in storage class", func() {
ginkgo.By(fmt.Sprintf("Creating storage class with zone :%s and datastore :%s", zoneA, vsanDatastore1)) ginkgo.By(fmt.Sprintf("Creating storage class with zone :%s and datastore :%s", zoneA, vsanDatastore1))
scParameters[Datastore] = vsanDatastore1 scParameters[Datastore] = vsanDatastore1
zones = append(zones, zoneA) zones = append(zones, zoneA)
verifyPVCAndPodCreationSucceeds(client, namespace, scParameters, zones, "") verifyPVCAndPodCreationSucceeds(client, f.Timeouts, namespace, scParameters, zones, "")
}) })
ginkgo.It("Verify PVC creation with incompatible datastore and zone combination specified in storage class fails", func() { ginkgo.It("Verify PVC creation with incompatible datastore and zone combination specified in storage class fails", func() {
@ -183,14 +183,14 @@ var _ = utils.SIGDescribe("Zone Support [Feature:vsphere]", func() {
ginkgo.By(fmt.Sprintf("Creating storage class with zone :%s and storage policy :%s", zoneA, compatPolicy)) ginkgo.By(fmt.Sprintf("Creating storage class with zone :%s and storage policy :%s", zoneA, compatPolicy))
scParameters[SpbmStoragePolicy] = compatPolicy scParameters[SpbmStoragePolicy] = compatPolicy
zones = append(zones, zoneA) zones = append(zones, zoneA)
verifyPVCAndPodCreationSucceeds(client, namespace, scParameters, zones, "") verifyPVCAndPodCreationSucceeds(client, f.Timeouts, namespace, scParameters, zones, "")
}) })
ginkgo.It("Verify a pod is created on a non-Workspace zone and attached to a dynamically created PV, based on the allowed zones and storage policy specified in storage class", func() { ginkgo.It("Verify a pod is created on a non-Workspace zone and attached to a dynamically created PV, based on the allowed zones and storage policy specified in storage class", func() {
ginkgo.By(fmt.Sprintf("Creating storage class with zone :%s and storage policy :%s", zoneB, compatPolicy)) ginkgo.By(fmt.Sprintf("Creating storage class with zone :%s and storage policy :%s", zoneB, compatPolicy))
scParameters[SpbmStoragePolicy] = compatPolicy scParameters[SpbmStoragePolicy] = compatPolicy
zones = append(zones, zoneB) zones = append(zones, zoneB)
verifyPVCAndPodCreationSucceeds(client, namespace, scParameters, zones, "") verifyPVCAndPodCreationSucceeds(client, f.Timeouts, namespace, scParameters, zones, "")
}) })
ginkgo.It("Verify PVC creation with incompatible storagePolicy and zone combination specified in storage class fails", func() { ginkgo.It("Verify PVC creation with incompatible storagePolicy and zone combination specified in storage class fails", func() {
@ -209,7 +209,7 @@ var _ = utils.SIGDescribe("Zone Support [Feature:vsphere]", func() {
scParameters[SpbmStoragePolicy] = compatPolicy scParameters[SpbmStoragePolicy] = compatPolicy
scParameters[Datastore] = vsanDatastore1 scParameters[Datastore] = vsanDatastore1
zones = append(zones, zoneA) zones = append(zones, zoneA)
verifyPVCAndPodCreationSucceeds(client, namespace, scParameters, zones, "") verifyPVCAndPodCreationSucceeds(client, f.Timeouts, namespace, scParameters, zones, "")
}) })
ginkgo.It("Verify PVC creation with incompatible storage policy along with compatible zone and datastore combination specified in storage class fails", func() { ginkgo.It("Verify PVC creation with incompatible storage policy along with compatible zone and datastore combination specified in storage class fails", func() {
@ -314,40 +314,40 @@ var _ = utils.SIGDescribe("Zone Support [Feature:vsphere]", func() {
scParameters[PolicyIopsLimit] = IopsLimitCapabilityVal scParameters[PolicyIopsLimit] = IopsLimitCapabilityVal
scParameters[Datastore] = vsanDatastore1 scParameters[Datastore] = vsanDatastore1
zones = append(zones, zoneA) zones = append(zones, zoneA)
verifyPVCAndPodCreationSucceeds(client, namespace, scParameters, zones, "") verifyPVCAndPodCreationSucceeds(client, f.Timeouts, namespace, scParameters, zones, "")
}) })
ginkgo.It("Verify a pod is created and attached to a dynamically created PV, based on the allowed zones specified in storage class when the datastore under the zone is present in another datacenter", func() { ginkgo.It("Verify a pod is created and attached to a dynamically created PV, based on the allowed zones specified in storage class when the datastore under the zone is present in another datacenter", func() {
ginkgo.By(fmt.Sprintf("Creating storage class with zone :%s", zoneD)) ginkgo.By(fmt.Sprintf("Creating storage class with zone :%s", zoneD))
zones = append(zones, zoneD) zones = append(zones, zoneD)
verifyPVCAndPodCreationSucceeds(client, namespace, scParameters, zones, "") verifyPVCAndPodCreationSucceeds(client, f.Timeouts, namespace, scParameters, zones, "")
}) })
ginkgo.It("Verify a pod is created and attached to a dynamically created PV, based on the allowed zones and datastore specified in storage class when there are multiple datastores with the same name under different zones across datacenters", func() { ginkgo.It("Verify a pod is created and attached to a dynamically created PV, based on the allowed zones and datastore specified in storage class when there are multiple datastores with the same name under different zones across datacenters", func() {
ginkgo.By(fmt.Sprintf("Creating storage class with zone :%s and datastore name :%s", zoneD, localDatastore)) ginkgo.By(fmt.Sprintf("Creating storage class with zone :%s and datastore name :%s", zoneD, localDatastore))
scParameters[Datastore] = localDatastore scParameters[Datastore] = localDatastore
zones = append(zones, zoneD) zones = append(zones, zoneD)
verifyPVCAndPodCreationSucceeds(client, namespace, scParameters, zones, "") verifyPVCAndPodCreationSucceeds(client, f.Timeouts, namespace, scParameters, zones, "")
}) })
ginkgo.It("Verify a pod is created and attached to a dynamically created PV with storage policy specified in storage class in waitForFirstConsumer binding mode", func() { ginkgo.It("Verify a pod is created and attached to a dynamically created PV with storage policy specified in storage class in waitForFirstConsumer binding mode", func() {
ginkgo.By(fmt.Sprintf("Creating storage class with waitForFirstConsumer mode and storage policy :%s", compatPolicy)) ginkgo.By(fmt.Sprintf("Creating storage class with waitForFirstConsumer mode and storage policy :%s", compatPolicy))
scParameters[SpbmStoragePolicy] = compatPolicy scParameters[SpbmStoragePolicy] = compatPolicy
verifyPVCAndPodCreationSucceeds(client, namespace, scParameters, nil, storagev1.VolumeBindingWaitForFirstConsumer) verifyPVCAndPodCreationSucceeds(client, f.Timeouts, namespace, scParameters, nil, storagev1.VolumeBindingWaitForFirstConsumer)
}) })
ginkgo.It("Verify a pod is created and attached to a dynamically created PV with storage policy specified in storage class in waitForFirstConsumer binding mode with allowedTopologies", func() { ginkgo.It("Verify a pod is created and attached to a dynamically created PV with storage policy specified in storage class in waitForFirstConsumer binding mode with allowedTopologies", func() {
ginkgo.By(fmt.Sprintf("Creating storage class with waitForFirstConsumer mode, storage policy :%s and zone :%s", compatPolicy, zoneA)) ginkgo.By(fmt.Sprintf("Creating storage class with waitForFirstConsumer mode, storage policy :%s and zone :%s", compatPolicy, zoneA))
scParameters[SpbmStoragePolicy] = compatPolicy scParameters[SpbmStoragePolicy] = compatPolicy
zones = append(zones, zoneA) zones = append(zones, zoneA)
verifyPVCAndPodCreationSucceeds(client, namespace, scParameters, zones, storagev1.VolumeBindingWaitForFirstConsumer) verifyPVCAndPodCreationSucceeds(client, f.Timeouts, namespace, scParameters, zones, storagev1.VolumeBindingWaitForFirstConsumer)
}) })
ginkgo.It("Verify a pod is created and attached to a dynamically created PV with storage policy specified in storage class in waitForFirstConsumer binding mode with multiple allowedTopologies", func() { ginkgo.It("Verify a pod is created and attached to a dynamically created PV with storage policy specified in storage class in waitForFirstConsumer binding mode with multiple allowedTopologies", func() {
ginkgo.By(fmt.Sprintf("Creating storage class with waitForFirstConsumer mode and zones : %s, %s", zoneA, zoneB)) ginkgo.By(fmt.Sprintf("Creating storage class with waitForFirstConsumer mode and zones : %s, %s", zoneA, zoneB))
zones = append(zones, zoneA) zones = append(zones, zoneA)
zones = append(zones, zoneB) zones = append(zones, zoneB)
verifyPVCAndPodCreationSucceeds(client, namespace, nil, zones, storagev1.VolumeBindingWaitForFirstConsumer) verifyPVCAndPodCreationSucceeds(client, f.Timeouts, namespace, nil, zones, storagev1.VolumeBindingWaitForFirstConsumer)
}) })
ginkgo.It("Verify a PVC creation fails when multiple zones are specified in the storage class without shared datastores among the zones in waitForFirstConsumer binding mode", func() { ginkgo.It("Verify a PVC creation fails when multiple zones are specified in the storage class without shared datastores among the zones in waitForFirstConsumer binding mode", func() {
@ -375,7 +375,7 @@ var _ = utils.SIGDescribe("Zone Support [Feature:vsphere]", func() {
}) })
}) })
func verifyPVCAndPodCreationSucceeds(client clientset.Interface, namespace string, scParameters map[string]string, zones []string, volumeBindingMode storagev1.VolumeBindingMode) { func verifyPVCAndPodCreationSucceeds(client clientset.Interface, timeouts *framework.TimeoutContext, namespace string, scParameters map[string]string, zones []string, volumeBindingMode storagev1.VolumeBindingMode) {
storageclass, err := client.StorageV1().StorageClasses().Create(context.TODO(), getVSphereStorageClassSpec("zone-sc", scParameters, zones, volumeBindingMode), metav1.CreateOptions{}) storageclass, err := client.StorageV1().StorageClasses().Create(context.TODO(), getVSphereStorageClassSpec("zone-sc", scParameters, zones, volumeBindingMode), metav1.CreateOptions{})
framework.ExpectNoError(err, fmt.Sprintf("Failed to create storage class with err: %v", err)) framework.ExpectNoError(err, fmt.Sprintf("Failed to create storage class with err: %v", err))
defer client.StorageV1().StorageClasses().Delete(context.TODO(), storageclass.Name, metav1.DeleteOptions{}) defer client.StorageV1().StorageClasses().Delete(context.TODO(), storageclass.Name, metav1.DeleteOptions{})
@ -391,7 +391,7 @@ func verifyPVCAndPodCreationSucceeds(client clientset.Interface, namespace strin
var persistentvolumes []*v1.PersistentVolume var persistentvolumes []*v1.PersistentVolume
// If WaitForFirstConsumer mode, verify pvc binding status after pod creation. For immediate mode, do now. // If WaitForFirstConsumer mode, verify pvc binding status after pod creation. For immediate mode, do now.
if volumeBindingMode != storagev1.VolumeBindingWaitForFirstConsumer { if volumeBindingMode != storagev1.VolumeBindingWaitForFirstConsumer {
persistentvolumes = waitForPVClaimBoundPhase(client, pvclaims, framework.ClaimProvisionTimeout) persistentvolumes = waitForPVClaimBoundPhase(client, pvclaims, timeouts.ClaimProvision)
} }
ginkgo.By("Creating pod to attach PV to the node") ginkgo.By("Creating pod to attach PV to the node")
@ -399,7 +399,7 @@ func verifyPVCAndPodCreationSucceeds(client clientset.Interface, namespace strin
framework.ExpectNoError(err) framework.ExpectNoError(err)
if volumeBindingMode == storagev1.VolumeBindingWaitForFirstConsumer { if volumeBindingMode == storagev1.VolumeBindingWaitForFirstConsumer {
persistentvolumes = waitForPVClaimBoundPhase(client, pvclaims, framework.ClaimProvisionTimeout) persistentvolumes = waitForPVClaimBoundPhase(client, pvclaims, timeouts.ClaimProvision)
} }
if zones != nil { if zones != nil {
@ -499,7 +499,7 @@ func verifyPVCCreationFails(client clientset.Interface, namespace string, scPara
return fmt.Errorf("Failure message: %+q", eventList.Items[0].Message) return fmt.Errorf("Failure message: %+q", eventList.Items[0].Message)
} }
func verifyPVZoneLabels(client clientset.Interface, namespace string, scParameters map[string]string, zones []string) { func verifyPVZoneLabels(client clientset.Interface, timeouts *framework.TimeoutContext, namespace string, scParameters map[string]string, zones []string) {
storageclass, err := client.StorageV1().StorageClasses().Create(context.TODO(), getVSphereStorageClassSpec("zone-sc", nil, zones, ""), metav1.CreateOptions{}) storageclass, err := client.StorageV1().StorageClasses().Create(context.TODO(), getVSphereStorageClassSpec("zone-sc", nil, zones, ""), metav1.CreateOptions{})
framework.ExpectNoError(err, fmt.Sprintf("Failed to create storage class with err: %v", err)) framework.ExpectNoError(err, fmt.Sprintf("Failed to create storage class with err: %v", err))
defer client.StorageV1().StorageClasses().Delete(context.TODO(), storageclass.Name, metav1.DeleteOptions{}) defer client.StorageV1().StorageClasses().Delete(context.TODO(), storageclass.Name, metav1.DeleteOptions{})
@ -512,7 +512,7 @@ func verifyPVZoneLabels(client clientset.Interface, namespace string, scParamete
var pvclaims []*v1.PersistentVolumeClaim var pvclaims []*v1.PersistentVolumeClaim
pvclaims = append(pvclaims, pvclaim) pvclaims = append(pvclaims, pvclaim)
ginkgo.By("Waiting for claim to be in bound phase") ginkgo.By("Waiting for claim to be in bound phase")
persistentvolumes, err := e2epv.WaitForPVClaimBoundPhase(client, pvclaims, framework.ClaimProvisionTimeout) persistentvolumes, err := e2epv.WaitForPVClaimBoundPhase(client, pvclaims, timeouts.ClaimProvision)
framework.ExpectNoError(err) framework.ExpectNoError(err)
ginkgo.By("Verify zone information is present in the volume labels") ginkgo.By("Verify zone information is present in the volume labels")