From 79ce0632fdeef97d88aff137c80dd20e3d753390 Mon Sep 17 00:00:00 2001 From: Christian Huffman Date: Fri, 14 May 2021 14:55:50 -0400 Subject: [PATCH] Corrects node wait to search for volumeHandle --- test/e2e/storage/testsuites/snapshottable.go | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/test/e2e/storage/testsuites/snapshottable.go b/test/e2e/storage/testsuites/snapshottable.go index b4c19925c3d..af969b2ae45 100644 --- a/test/e2e/storage/testsuites/snapshottable.go +++ b/test/e2e/storage/testsuites/snapshottable.go @@ -184,20 +184,25 @@ func (s *snapshottableTestSuite) DefineTests(driver storageframework.TestDriver, nodeName := pod.Spec.NodeName gomega.Expect(nodeName).NotTo(gomega.BeEmpty(), "pod.Spec.NodeName must not be empty") - ginkgo.By(fmt.Sprintf("[init] waiting until the node=%s is not using the volume=%s", nodeName, pv.Name)) + // Snapshot tests are only executed for CSI drivers. When CSI drivers + // are attached to the node they use VolumeHandle instead of the pv.Name. + volumeName := pv.Spec.PersistentVolumeSource.CSI.VolumeHandle + + ginkgo.By(fmt.Sprintf("[init] waiting until the node=%s is not using the volume=%s", nodeName, volumeName)) success := storageutils.WaitUntil(framework.Poll, f.Timeouts.PVDelete, func() bool { node, err := cs.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{}) framework.ExpectNoError(err) volumesInUse := node.Status.VolumesInUse framework.Logf("current volumes in use: %+v", volumesInUse) for i := 0; i < len(volumesInUse); i++ { - if strings.HasSuffix(string(volumesInUse[i]), pv.Name) { + if strings.HasSuffix(string(volumesInUse[i]), volumeName) { return false } } return true }) framework.ExpectEqual(success, true) + } cleanup := func() {