e2e: promote use of functions that support custom timeouts in storage tests.

WaitForPodSuccessInNamespace[Slow] are replaced by WaitForPodSuccessInNamespaceTimeout(),
so that custom timeouts are used instead of the hardcoded ones.
This commit is contained in:
Fabio Bertinatto 2020-12-01 15:44:08 -03:00
parent c28dba5494
commit ee082985c2
12 changed files with 51 additions and 51 deletions

View File

@ -245,8 +245,8 @@ func WaitForPodTerminatedInNamespace(c clientset.Interface, podName, reason, nam
})
}
// waitForPodSuccessInNamespaceTimeout returns nil if the pod reached state success, or an error if it reached failure or ran too long.
func waitForPodSuccessInNamespaceTimeout(c clientset.Interface, podName, namespace string, timeout time.Duration) error {
// WaitForPodSuccessInNamespaceTimeout returns nil if the pod reached state success, or an error if it reached failure or ran too long.
func WaitForPodSuccessInNamespaceTimeout(c clientset.Interface, podName, namespace string, timeout time.Duration) error {
return WaitForPodCondition(c, namespace, podName, fmt.Sprintf("%s or %s", v1.PodSucceeded, v1.PodFailed), timeout, func(pod *v1.Pod) (bool, error) {
if pod.Spec.RestartPolicy == v1.RestartPolicyAlways {
return true, fmt.Errorf("pod %q will never terminate with a succeeded state since its restart policy is Always", podName)
@ -365,12 +365,12 @@ func WaitForPodNotPending(c clientset.Interface, ns, podName string) error {
// WaitForPodSuccessInNamespace returns nil if the pod reached state success, or an error if it reached failure or until podStartupTimeout.
func WaitForPodSuccessInNamespace(c clientset.Interface, podName string, namespace string) error {
return waitForPodSuccessInNamespaceTimeout(c, podName, namespace, podStartTimeout)
return WaitForPodSuccessInNamespaceTimeout(c, podName, namespace, podStartTimeout)
}
// WaitForPodSuccessInNamespaceSlow returns nil if the pod reached state success, or an error if it reached failure or until slowPodStartupTimeout.
func WaitForPodSuccessInNamespaceSlow(c clientset.Interface, podName string, namespace string) error {
return waitForPodSuccessInNamespaceTimeout(c, podName, namespace, slowPodStartTimeout)
return WaitForPodSuccessInNamespaceTimeout(c, podName, namespace, slowPodStartTimeout)
}
// WaitForPodNotFoundInNamespace returns an error if it takes too long for the pod to fully terminate.

View File

@ -822,7 +822,7 @@ func (f *Framework) MatchContainerOutput(
}()
// Wait for client pod to complete.
podErr := e2epod.WaitForPodSuccessInNamespace(f.ClientSet, createdPod.Name, ns)
podErr := e2epod.WaitForPodSuccessInNamespaceTimeout(f.ClientSet, createdPod.Name, ns, f.Timeouts.PodStart)
// Grab its logs. Get host first.
podStatus, err := podClient.Get(context.TODO(), createdPod.Name, metav1.GetOptions{})

View File

@ -953,7 +953,7 @@ func (h *hostPathSymlinkDriver) CreateVolume(config *testsuites.PerTestConfig, v
pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), prepPod, metav1.CreateOptions{})
framework.ExpectNoError(err, "while creating hostPath init pod")
err = e2epod.WaitForPodSuccessInNamespace(f.ClientSet, pod.Name, pod.Namespace)
err = e2epod.WaitForPodSuccessInNamespaceTimeout(f.ClientSet, pod.Name, pod.Namespace, f.Timeouts.PodStart)
framework.ExpectNoError(err, "while waiting for hostPath init pod to succeed")
err = e2epod.DeletePodWithWait(f.ClientSet, pod)
@ -975,7 +975,7 @@ func (v *hostPathSymlinkVolume) DeleteVolume() {
pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), v.prepPod, metav1.CreateOptions{})
framework.ExpectNoError(err, "while creating hostPath teardown pod")
err = e2epod.WaitForPodSuccessInNamespace(f.ClientSet, pod.Name, pod.Namespace)
err = e2epod.WaitForPodSuccessInNamespaceTimeout(f.ClientSet, pod.Name, pod.Namespace, f.Timeouts.PodStart)
framework.ExpectNoError(err, "while waiting for hostPath teardown pod to succeed")
err = e2epod.DeletePodWithWait(f.ClientSet, pod)

View File

@ -49,7 +49,7 @@ func completeTest(f *framework.Framework, c clientset.Interface, ns string, pv *
// 2. create the nfs writer pod, test if the write was successful,
// then delete the pod and verify that it was deleted
ginkgo.By("Checking pod has write access to PersistentVolume")
framework.ExpectNoError(createWaitAndDeletePod(c, ns, pvc, "touch /mnt/volume1/SUCCESS && (id -G | grep -E '\\b777\\b')"))
framework.ExpectNoError(createWaitAndDeletePod(c, f.Timeouts, ns, pvc, "touch /mnt/volume1/SUCCESS && (id -G | grep -E '\\b777\\b')"))
// 3. delete the PVC, wait for PV to become "Released"
ginkgo.By("Deleting the PVC to invoke the reclaim policy.")
@ -80,7 +80,7 @@ func completeMultiTest(f *framework.Framework, c clientset.Interface, ns string,
return fmt.Errorf("internal: pvols map is missing volume %q", pvc.Spec.VolumeName)
}
// TODO: currently a serialized test of each PV
if err = createWaitAndDeletePod(c, pvcKey.Namespace, pvc, "touch /mnt/volume1/SUCCESS && (id -G | grep -E '\\b777\\b')"); err != nil {
if err = createWaitAndDeletePod(c, f.Timeouts, pvcKey.Namespace, pvc, "touch /mnt/volume1/SUCCESS && (id -G | grep -E '\\b777\\b')"); err != nil {
return err
}
}
@ -285,7 +285,7 @@ var _ = utils.SIGDescribe("PersistentVolumes", func() {
pod := e2epod.MakePod(ns, nil, []*v1.PersistentVolumeClaim{pvc}, true, "touch /mnt/volume1/SUCCESS && (id -G | grep -E '\\b777\\b')")
pod, err = c.CoreV1().Pods(ns).Create(context.TODO(), pod, metav1.CreateOptions{})
framework.ExpectNoError(err)
framework.ExpectNoError(e2epod.WaitForPodSuccessInNamespace(c, pod.Name, ns))
framework.ExpectNoError(e2epod.WaitForPodSuccessInNamespaceTimeout(c, pod.Name, ns, f.Timeouts.PodStart))
ginkgo.By("Deleting the claim")
framework.ExpectNoError(e2epod.DeletePodWithWait(c, pod))
@ -303,7 +303,7 @@ var _ = utils.SIGDescribe("PersistentVolumes", func() {
pod = e2epod.MakePod(ns, nil, []*v1.PersistentVolumeClaim{pvc}, true, fmt.Sprintf("[ $(ls -A %s | wc -l) -eq 0 ] && exit 0 || exit 1", mount))
pod, err = c.CoreV1().Pods(ns).Create(context.TODO(), pod, metav1.CreateOptions{})
framework.ExpectNoError(err)
framework.ExpectNoError(e2epod.WaitForPodSuccessInNamespace(c, pod.Name, ns))
framework.ExpectNoError(e2epod.WaitForPodSuccessInNamespaceTimeout(c, pod.Name, ns, f.Timeouts.PodStart))
framework.ExpectNoError(e2epod.DeletePodWithWait(c, pod))
framework.Logf("Pod exited without failure; the volume has been recycled.")
@ -437,7 +437,7 @@ func makeStatefulSetWithPVCs(ns, cmd string, mounts []v1.VolumeMount, claims []v
// createWaitAndDeletePod creates the test pod, wait for (hopefully) success, and then delete the pod.
// Note: need named return value so that the err assignment in the defer sets the returned error.
// Has been shown to be necessary using Go 1.7.
func createWaitAndDeletePod(c clientset.Interface, ns string, pvc *v1.PersistentVolumeClaim, command string) (err error) {
func createWaitAndDeletePod(c clientset.Interface, t *framework.TimeoutContext, ns string, pvc *v1.PersistentVolumeClaim, command string) (err error) {
framework.Logf("Creating nfs test pod")
pod := e2epod.MakePod(ns, nil, []*v1.PersistentVolumeClaim{pvc}, true, command)
runPod, err := c.CoreV1().Pods(ns).Create(context.TODO(), pod, metav1.CreateOptions{})
@ -451,7 +451,7 @@ func createWaitAndDeletePod(c clientset.Interface, ns string, pvc *v1.Persistent
}
}()
err = testPodSuccessOrFail(c, ns, runPod)
err = testPodSuccessOrFail(c, t, ns, runPod)
if err != nil {
return fmt.Errorf("pod %q did not exit with Success: %v", runPod.Name, err)
}
@ -459,9 +459,9 @@ func createWaitAndDeletePod(c clientset.Interface, ns string, pvc *v1.Persistent
}
// testPodSuccessOrFail tests whether the pod's exit code is zero.
func testPodSuccessOrFail(c clientset.Interface, ns string, pod *v1.Pod) error {
func testPodSuccessOrFail(c clientset.Interface, t *framework.TimeoutContext, ns string, pod *v1.Pod) error {
framework.Logf("Pod should terminate with exitcode 0 (success)")
if err := e2epod.WaitForPodSuccessInNamespace(c, pod.Name, ns); err != nil {
if err := e2epod.WaitForPodSuccessInNamespaceTimeout(c, pod.Name, ns, t.PodStart); err != nil {
return fmt.Errorf("pod %q failed to reach Success: %v", pod.Name, err)
}
framework.Logf("Pod %v succeeded ", pod.Name)

View File

@ -75,7 +75,7 @@ var _ = utils.SIGDescribe("Regional PD", func() {
ginkgo.Describe("RegionalPD", func() {
ginkgo.It("should provision storage [Slow]", func() {
testVolumeProvisioning(c, ns)
testVolumeProvisioning(c, f.Timeouts, ns)
})
ginkgo.It("should provision storage with delayed binding [Slow]", func() {
@ -98,7 +98,7 @@ var _ = utils.SIGDescribe("Regional PD", func() {
})
})
func testVolumeProvisioning(c clientset.Interface, ns string) {
func testVolumeProvisioning(c clientset.Interface, t *framework.TimeoutContext, ns string) {
cloudZones := getTwoRandomZones(c)
// This test checks that dynamic provisioning can provision a volume
@ -117,7 +117,7 @@ func testVolumeProvisioning(c clientset.Interface, ns string) {
ClaimSize: repdMinSize,
ExpectedSize: repdMinSize,
PvCheck: func(claim *v1.PersistentVolumeClaim) {
volume := testsuites.PVWriteReadSingleNodeCheck(c, claim, e2epod.NodeSelection{})
volume := testsuites.PVWriteReadSingleNodeCheck(c, t, claim, e2epod.NodeSelection{})
gomega.Expect(volume).NotTo(gomega.BeNil())
err := checkGCEPD(volume, "pd-standard")
@ -139,7 +139,7 @@ func testVolumeProvisioning(c clientset.Interface, ns string) {
ClaimSize: repdMinSize,
ExpectedSize: repdMinSize,
PvCheck: func(claim *v1.PersistentVolumeClaim) {
volume := testsuites.PVWriteReadSingleNodeCheck(c, claim, e2epod.NodeSelection{})
volume := testsuites.PVWriteReadSingleNodeCheck(c, t, claim, e2epod.NodeSelection{})
gomega.Expect(volume).NotTo(gomega.BeNil())
err := checkGCEPD(volume, "pd-standard")

View File

@ -192,7 +192,7 @@ func (p *provisioningTestSuite) DefineTests(driver TestDriver, pattern testpatte
l.testCase.Class.MountOptions = dInfo.SupportedMountOption.Union(dInfo.RequiredMountOption).List()
l.testCase.PvCheck = func(claim *v1.PersistentVolumeClaim) {
PVWriteReadSingleNodeCheck(l.cs, claim, l.config.ClientNodeSelection)
PVWriteReadSingleNodeCheck(l.cs, f.Timeouts, claim, l.config.ClientNodeSelection)
}
l.testCase.TestDynamicProvisioning()
})
@ -469,7 +469,7 @@ func (t StorageClassTest) checkProvisioning(client clientset.Interface, claim *v
// persistent across pods.
//
// This is a common test that can be called from a StorageClassTest.PvCheck.
func PVWriteReadSingleNodeCheck(client clientset.Interface, claim *v1.PersistentVolumeClaim, node e2epod.NodeSelection) *v1.PersistentVolume {
func PVWriteReadSingleNodeCheck(client clientset.Interface, timeouts *framework.TimeoutContext, claim *v1.PersistentVolumeClaim, node e2epod.NodeSelection) *v1.PersistentVolume {
ginkgo.By(fmt.Sprintf("checking the created volume is writable on node %+v", node))
command := "echo 'hello world' > /mnt/test/data"
pod := StartInPodWithVolume(client, claim.Namespace, claim.Name, "pvc-volume-tester-writer", command, node)
@ -477,7 +477,7 @@ func PVWriteReadSingleNodeCheck(client clientset.Interface, claim *v1.Persistent
// pod might be nil now.
StopPod(client, pod)
}()
framework.ExpectNoError(e2epod.WaitForPodSuccessInNamespaceSlow(client, pod.Name, pod.Namespace))
framework.ExpectNoError(e2epod.WaitForPodSuccessInNamespaceTimeout(client, pod.Name, pod.Namespace, timeouts.PodStartSlow))
runningPod, err := client.CoreV1().Pods(pod.Namespace).Get(context.TODO(), pod.Name, metav1.GetOptions{})
framework.ExpectNoError(err, "get pod")
actualNodeName := runningPod.Spec.NodeName
@ -502,7 +502,7 @@ func PVWriteReadSingleNodeCheck(client clientset.Interface, claim *v1.Persistent
if framework.NodeOSDistroIs("windows") {
command = "select-string 'hello world' /mnt/test/data"
}
RunInPodWithVolume(client, claim.Namespace, claim.Name, "pvc-volume-tester-reader", command, e2epod.NodeSelection{Name: actualNodeName})
RunInPodWithVolume(client, timeouts, claim.Namespace, claim.Name, "pvc-volume-tester-reader", command, e2epod.NodeSelection{Name: actualNodeName})
return e2evolume
}
@ -521,7 +521,7 @@ func PVWriteReadSingleNodeCheck(client clientset.Interface, claim *v1.Persistent
// persistent across pods and across nodes.
//
// This is a common test that can be called from a StorageClassTest.PvCheck.
func PVMultiNodeCheck(client clientset.Interface, claim *v1.PersistentVolumeClaim, node e2epod.NodeSelection) {
func PVMultiNodeCheck(client clientset.Interface, timeouts *framework.TimeoutContext, claim *v1.PersistentVolumeClaim, node e2epod.NodeSelection) {
framework.ExpectEqual(node.Name, "", "this test only works when not locked onto a single node")
var pod *v1.Pod
@ -533,7 +533,7 @@ func PVMultiNodeCheck(client clientset.Interface, claim *v1.PersistentVolumeClai
ginkgo.By(fmt.Sprintf("checking the created volume is writable on node %+v", node))
command := "echo 'hello world' > /mnt/test/data"
pod = StartInPodWithVolume(client, claim.Namespace, claim.Name, "pvc-writer-node1", command, node)
framework.ExpectNoError(e2epod.WaitForPodSuccessInNamespaceSlow(client, pod.Name, pod.Namespace))
framework.ExpectNoError(e2epod.WaitForPodSuccessInNamespaceTimeout(client, pod.Name, pod.Namespace, timeouts.PodStartSlow))
runningPod, err := client.CoreV1().Pods(pod.Namespace).Get(context.TODO(), pod.Name, metav1.GetOptions{})
framework.ExpectNoError(err, "get pod")
actualNodeName := runningPod.Spec.NodeName
@ -549,7 +549,7 @@ func PVMultiNodeCheck(client clientset.Interface, claim *v1.PersistentVolumeClai
command = "select-string 'hello world' /mnt/test/data"
}
pod = StartInPodWithVolume(client, claim.Namespace, claim.Name, "pvc-reader-node2", command, secondNode)
framework.ExpectNoError(e2epod.WaitForPodSuccessInNamespaceSlow(client, pod.Name, pod.Namespace))
framework.ExpectNoError(e2epod.WaitForPodSuccessInNamespaceTimeout(client, pod.Name, pod.Namespace, timeouts.PodStartSlow))
runningPod, err = client.CoreV1().Pods(pod.Namespace).Get(context.TODO(), pod.Name, metav1.GetOptions{})
framework.ExpectNoError(err, "get pod")
framework.ExpectNotEqual(runningPod.Spec.NodeName, actualNodeName, "second pod should have run on a different node")
@ -644,10 +644,10 @@ func (t StorageClassTest) TestBindingWaitForFirstConsumerMultiPVC(claims []*v1.P
// RunInPodWithVolume runs a command in a pod with given claim mounted to /mnt directory.
// It starts, checks, collects output and stops it.
func RunInPodWithVolume(c clientset.Interface, ns, claimName, podName, command string, node e2epod.NodeSelection) {
func RunInPodWithVolume(c clientset.Interface, t *framework.TimeoutContext, ns, claimName, podName, command string, node e2epod.NodeSelection) {
pod := StartInPodWithVolume(c, ns, claimName, podName, command, node)
defer StopPod(c, pod)
framework.ExpectNoError(e2epod.WaitForPodSuccessInNamespaceSlow(c, pod.Name, pod.Namespace))
framework.ExpectNoError(e2epod.WaitForPodSuccessInNamespaceTimeout(c, pod.Name, pod.Namespace, t.PodStartSlow))
}
// StartInPodWithVolume starts a command in a pod with given claim mounted to /mnt directory

View File

@ -156,7 +156,7 @@ func (s *snapshottableTestSuite) DefineTests(driver TestDriver, pattern testpatt
originalMntTestData = fmt.Sprintf("hello from %s namespace", pvc.GetNamespace())
command := fmt.Sprintf("echo '%s' > %s", originalMntTestData, datapath)
RunInPodWithVolume(cs, pvc.Namespace, pvc.Name, "pvc-snapshottable-tester", command, config.ClientNodeSelection)
RunInPodWithVolume(cs, f.Timeouts, pvc.Namespace, pvc.Name, "pvc-snapshottable-tester", command, config.ClientNodeSelection)
err = e2epv.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, cs, pvc.Namespace, pvc.Name, framework.Poll, f.Timeouts.ClaimProvision)
framework.ExpectNoError(err)
@ -242,7 +242,7 @@ func (s *snapshottableTestSuite) DefineTests(driver TestDriver, pattern testpatt
ginkgo.By("modifying the data in the source PVC")
command := fmt.Sprintf("echo '%s' > %s", modifiedMntTestData, datapath)
RunInPodWithVolume(cs, pvc.Namespace, pvc.Name, "pvc-snapshottable-data-tester", command, config.ClientNodeSelection)
RunInPodWithVolume(cs, f.Timeouts, pvc.Namespace, pvc.Name, "pvc-snapshottable-data-tester", command, config.ClientNodeSelection)
ginkgo.By("creating a pvc from the snapshot")
restoredPVC = e2epv.MakePersistentVolumeClaim(e2epv.PersistentVolumeClaimConfig{

View File

@ -1009,7 +1009,7 @@ func formatVolume(f *framework.Framework, pod *v1.Pod) {
pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), pod, metav1.CreateOptions{})
framework.ExpectNoError(err, "while creating volume init pod")
err = e2epod.WaitForPodSuccessInNamespace(f.ClientSet, pod.Name, pod.Namespace)
err = e2epod.WaitForPodSuccessInNamespaceTimeout(f.ClientSet, pod.Name, pod.Namespace, f.Timeouts.PodStart)
framework.ExpectNoError(err, "while waiting for volume init pod to succeed")
err = e2epod.DeletePodWithWait(f.ClientSet, pod)

View File

@ -446,7 +446,7 @@ func TestVolumeUnmapsFromForceDeletedPod(c clientset.Interface, f *framework.Fra
}
// RunInPodWithVolume runs a command in a pod with given claim mounted to /mnt directory.
func RunInPodWithVolume(c clientset.Interface, ns, claimName, command string) {
func RunInPodWithVolume(c clientset.Interface, t *framework.TimeoutContext, ns, claimName, command string) {
pod := &v1.Pod{
TypeMeta: metav1.TypeMeta{
Kind: "Pod",
@ -489,7 +489,7 @@ func RunInPodWithVolume(c clientset.Interface, ns, claimName, command string) {
defer func() {
e2epod.DeletePodOrFail(c, ns, pod.Name)
}()
framework.ExpectNoError(e2epod.WaitForPodSuccessInNamespaceSlow(c, pod.Name, pod.Namespace))
framework.ExpectNoError(e2epod.WaitForPodSuccessInNamespaceTimeout(c, pod.Name, pod.Namespace, t.PodStartSlow))
}
// StartExternalProvisioner create external provisioner pod

View File

@ -168,7 +168,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
ClaimSize: "1.5Gi",
ExpectedSize: "2Gi",
PvCheck: func(claim *v1.PersistentVolumeClaim) {
volume := testsuites.PVWriteReadSingleNodeCheck(c, claim, e2epod.NodeSelection{})
volume := testsuites.PVWriteReadSingleNodeCheck(c, f.Timeouts, claim, e2epod.NodeSelection{})
gomega.Expect(volume).NotTo(gomega.BeNil(), "get bound PV")
err := checkGCEPD(volume, "pd-ssd")
@ -186,7 +186,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
ClaimSize: "1.5Gi",
ExpectedSize: "2Gi",
PvCheck: func(claim *v1.PersistentVolumeClaim) {
volume := testsuites.PVWriteReadSingleNodeCheck(c, claim, e2epod.NodeSelection{})
volume := testsuites.PVWriteReadSingleNodeCheck(c, f.Timeouts, claim, e2epod.NodeSelection{})
gomega.Expect(volume).NotTo(gomega.BeNil(), "get bound PV")
err := checkGCEPD(volume, "pd-standard")
@ -206,7 +206,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
ClaimSize: "1.5Gi",
ExpectedSize: "2Gi",
PvCheck: func(claim *v1.PersistentVolumeClaim) {
volume := testsuites.PVWriteReadSingleNodeCheck(c, claim, e2epod.NodeSelection{})
volume := testsuites.PVWriteReadSingleNodeCheck(c, f.Timeouts, claim, e2epod.NodeSelection{})
gomega.Expect(volume).NotTo(gomega.BeNil(), "get bound PV")
err := checkAWSEBS(volume, "gp2", false)
@ -225,7 +225,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
ClaimSize: "3.5Gi",
ExpectedSize: "4Gi", // 4 GiB is minimum for io1
PvCheck: func(claim *v1.PersistentVolumeClaim) {
volume := testsuites.PVWriteReadSingleNodeCheck(c, claim, e2epod.NodeSelection{})
volume := testsuites.PVWriteReadSingleNodeCheck(c, f.Timeouts, claim, e2epod.NodeSelection{})
gomega.Expect(volume).NotTo(gomega.BeNil(), "get bound PV")
err := checkAWSEBS(volume, "io1", false)
@ -243,7 +243,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
ClaimSize: "500Gi", // minimum for sc1
ExpectedSize: "500Gi",
PvCheck: func(claim *v1.PersistentVolumeClaim) {
volume := testsuites.PVWriteReadSingleNodeCheck(c, claim, e2epod.NodeSelection{})
volume := testsuites.PVWriteReadSingleNodeCheck(c, f.Timeouts, claim, e2epod.NodeSelection{})
gomega.Expect(volume).NotTo(gomega.BeNil(), "get bound PV")
err := checkAWSEBS(volume, "sc1", false)
@ -261,7 +261,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
ClaimSize: "500Gi", // minimum for st1
ExpectedSize: "500Gi",
PvCheck: func(claim *v1.PersistentVolumeClaim) {
volume := testsuites.PVWriteReadSingleNodeCheck(c, claim, e2epod.NodeSelection{})
volume := testsuites.PVWriteReadSingleNodeCheck(c, f.Timeouts, claim, e2epod.NodeSelection{})
gomega.Expect(volume).NotTo(gomega.BeNil(), "get bound PV")
err := checkAWSEBS(volume, "st1", false)
@ -279,7 +279,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
ClaimSize: "1Gi",
ExpectedSize: "1Gi",
PvCheck: func(claim *v1.PersistentVolumeClaim) {
volume := testsuites.PVWriteReadSingleNodeCheck(c, claim, e2epod.NodeSelection{})
volume := testsuites.PVWriteReadSingleNodeCheck(c, f.Timeouts, claim, e2epod.NodeSelection{})
gomega.Expect(volume).NotTo(gomega.BeNil(), "get bound PV")
err := checkAWSEBS(volume, "gp2", true)
@ -296,7 +296,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
ClaimSize: "1.5Gi",
ExpectedSize: "2Gi",
PvCheck: func(claim *v1.PersistentVolumeClaim) {
testsuites.PVWriteReadSingleNodeCheck(c, claim, e2epod.NodeSelection{})
testsuites.PVWriteReadSingleNodeCheck(c, f.Timeouts, claim, e2epod.NodeSelection{})
},
},
{
@ -311,7 +311,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
ClaimSize: "1.5Gi",
ExpectedSize: "2Gi",
PvCheck: func(claim *v1.PersistentVolumeClaim) {
testsuites.PVWriteReadSingleNodeCheck(c, claim, e2epod.NodeSelection{})
testsuites.PVWriteReadSingleNodeCheck(c, f.Timeouts, claim, e2epod.NodeSelection{})
},
},
// vSphere generic test
@ -324,7 +324,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
ClaimSize: "1.5Gi",
ExpectedSize: "1.5Gi",
PvCheck: func(claim *v1.PersistentVolumeClaim) {
testsuites.PVWriteReadSingleNodeCheck(c, claim, e2epod.NodeSelection{})
testsuites.PVWriteReadSingleNodeCheck(c, f.Timeouts, claim, e2epod.NodeSelection{})
},
},
// Azure
@ -337,7 +337,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
ClaimSize: "1Gi",
ExpectedSize: "1Gi",
PvCheck: func(claim *v1.PersistentVolumeClaim) {
testsuites.PVWriteReadSingleNodeCheck(c, claim, e2epod.NodeSelection{})
testsuites.PVWriteReadSingleNodeCheck(c, f.Timeouts, claim, e2epod.NodeSelection{})
},
},
}
@ -404,7 +404,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
ClaimSize: "1Gi",
ExpectedSize: "1Gi",
PvCheck: func(claim *v1.PersistentVolumeClaim) {
volume := testsuites.PVWriteReadSingleNodeCheck(c, claim, e2epod.NodeSelection{})
volume := testsuites.PVWriteReadSingleNodeCheck(c, f.Timeouts, claim, e2epod.NodeSelection{})
gomega.Expect(volume).NotTo(gomega.BeNil(), "get bound PV")
err := checkGCEPD(volume, "pd-standard")

View File

@ -173,7 +173,7 @@ var _ = utils.SIGDescribe("PersistentVolumes [Feature:vsphere][Feature:ReclaimPo
volumePath, pv, pvc, err = testSetupVSpherePersistentVolumeReclaim(c, nodeInfo, ns, v1.PersistentVolumeReclaimRetain)
framework.ExpectNoError(err)
writeContentToVSpherePV(c, pvc, volumeFileContent)
writeContentToVSpherePV(c, f.Timeouts, pvc, volumeFileContent)
ginkgo.By("Delete PVC")
framework.ExpectNoError(e2epv.DeletePersistentVolumeClaim(c, pvc.Name, ns), "Failed to delete PVC ", pvc.Name)
@ -197,7 +197,7 @@ var _ = utils.SIGDescribe("PersistentVolumes [Feature:vsphere][Feature:ReclaimPo
ginkgo.By("wait for the pv and pvc to bind")
framework.ExpectNoError(e2epv.WaitOnPVandPVC(c, f.Timeouts, ns, pv, pvc))
verifyContentOfVSpherePV(c, pvc, volumeFileContent)
verifyContentOfVSpherePV(c, f.Timeouts, pvc, volumeFileContent)
})
})

View File

@ -196,14 +196,14 @@ func getVSpherePersistentVolumeClaimSpec(namespace string, labels map[string]str
}
// function to write content to the volume backed by given PVC
func writeContentToVSpherePV(client clientset.Interface, pvc *v1.PersistentVolumeClaim, expectedContent string) {
utils.RunInPodWithVolume(client, pvc.Namespace, pvc.Name, "echo "+expectedContent+" > /mnt/test/data")
func writeContentToVSpherePV(client clientset.Interface, timeouts *framework.TimeoutContext, pvc *v1.PersistentVolumeClaim, expectedContent string) {
utils.RunInPodWithVolume(client, timeouts, pvc.Namespace, pvc.Name, "echo "+expectedContent+" > /mnt/test/data")
framework.Logf("Done with writing content to volume")
}
// function to verify content is matching on the volume backed for given PVC
func verifyContentOfVSpherePV(client clientset.Interface, pvc *v1.PersistentVolumeClaim, expectedContent string) {
utils.RunInPodWithVolume(client, pvc.Namespace, pvc.Name, "grep '"+expectedContent+"' /mnt/test/data")
func verifyContentOfVSpherePV(client clientset.Interface, timeouts *framework.TimeoutContext, pvc *v1.PersistentVolumeClaim, expectedContent string) {
utils.RunInPodWithVolume(client, timeouts, pvc.Namespace, pvc.Name, "grep '"+expectedContent+"' /mnt/test/data")
framework.Logf("Successfully verified content of the volume")
}