diff --git a/test/e2e/storage/volumes.go b/test/e2e/storage/volumes.go index dd03692b97f..ec76c1ac3bb 100644 --- a/test/e2e/storage/volumes.go +++ b/test/e2e/storage/volumes.go @@ -504,10 +504,7 @@ var _ = utils.SIGDescribe("Volumes", func() { It("should be mountable", func() { framework.SkipUnlessProviderIs("vsphere") vspheretest.Bootstrap(f) - nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet) - Expect(nodeList.Items).NotTo(BeEmpty(), "Unable to find ready and schedulable Node") - nodeInfo := vspheretest.TestContext.NodeMapper.GetNodeInfo(nodeList.Items[0].Name) - + nodeInfo := vspheretest.GetReadySchedulableRandomNodeInfo() var volumePath string config := framework.VolumeTestConfig{ Namespace: namespace.Name, diff --git a/test/e2e/storage/vsphere/bootstrap.go b/test/e2e/storage/vsphere/bootstrap.go index 8d1d93a7260..c556af5e6ac 100644 --- a/test/e2e/storage/vsphere/bootstrap.go +++ b/test/e2e/storage/vsphere/bootstrap.go @@ -17,6 +17,7 @@ limitations under the License. package vsphere import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/kubernetes/test/e2e/framework" "sync" ) @@ -43,10 +44,12 @@ func bootstrapOnce() { if err != nil { framework.Failf("Failed to bootstrap vSphere with error: %v", err) } - // 2. Get all ready nodes - nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet) + // 2. Get all nodes + nodeList, err := f.ClientSet.CoreV1().Nodes().List(metav1.ListOptions{}) + if err != nil { + framework.Failf("Failed to get nodes: %v", err) + } TestContext = VSphereContext{NodeMapper: &NodeMapper{}, VSphereInstances: vsphereInstances} - // 3. Get Node to VSphere mapping err = TestContext.NodeMapper.GenerateNodeMap(vsphereInstances, *nodeList) if err != nil { diff --git a/test/e2e/storage/vsphere/pv_reclaimpolicy.go b/test/e2e/storage/vsphere/pv_reclaimpolicy.go index 0425a73e9da..3a1ca210152 100644 --- a/test/e2e/storage/vsphere/pv_reclaimpolicy.go +++ b/test/e2e/storage/vsphere/pv_reclaimpolicy.go @@ -51,11 +51,7 @@ var _ = utils.SIGDescribe("PersistentVolumes [Feature:ReclaimPolicy]", func() { BeforeEach(func() { framework.SkipUnlessProviderIs("vsphere") Bootstrap(f) - nodes := framework.GetReadySchedulableNodesOrDie(c) - if len(nodes.Items) < 1 { - framework.Skipf("Requires at least %d node", 1) - } - nodeInfo = TestContext.NodeMapper.GetNodeInfo(nodes.Items[0].Name) + nodeInfo = GetReadySchedulableRandomNodeInfo() pv = nil pvc = nil volumePath = "" diff --git a/test/e2e/storage/vsphere/pvc_label_selector.go b/test/e2e/storage/vsphere/pvc_label_selector.go index c7ee9bb2e2e..21eb3d5b1e4 100644 --- a/test/e2e/storage/vsphere/pvc_label_selector.go +++ b/test/e2e/storage/vsphere/pvc_label_selector.go @@ -63,11 +63,7 @@ var _ = utils.SIGDescribe("PersistentVolumes [Feature:LabelSelector]", func() { c = f.ClientSet ns = f.Namespace.Name Bootstrap(f) - nodes := framework.GetReadySchedulableNodesOrDie(c) - if len(nodes.Items) < 1 { - framework.Skipf("Requires at least %d node", 1) - } - nodeInfo = TestContext.NodeMapper.GetNodeInfo(nodes.Items[0].Name) + nodeInfo = GetReadySchedulableRandomNodeInfo() framework.ExpectNoError(framework.WaitForAllNodesSchedulable(c, framework.TestContext.NodeSchedulableTimeout)) ssdlabels = make(map[string]string) ssdlabels["volume-type"] = "ssd" diff --git a/test/e2e/storage/vsphere/vsphere_utils.go b/test/e2e/storage/vsphere/vsphere_utils.go index 500629dfe55..51af0ebad32 100644 --- a/test/e2e/storage/vsphere/vsphere_utils.go +++ b/test/e2e/storage/vsphere/vsphere_utils.go @@ -18,6 +18,7 @@ package vsphere import ( "fmt" + "math/rand" "path/filepath" "time" @@ -513,18 +514,6 @@ func removeStorageClusterORFolderNameFromVDiskPath(vDiskPath string) string { return vDiskPath } -// isDiskAttached checks if disk is attached to the VM. -func isDiskAttached(ctx context.Context, vm *object.VirtualMachine, diskPath string) (bool, error) { - device, err := getVirtualDeviceByPath(ctx, vm, diskPath) - if err != nil { - return false, err - } - if device != nil { - return true, nil - } - return false, nil -} - // getVirtualDeviceByPath gets the virtual device by path func getVirtualDeviceByPath(ctx context.Context, vm *object.VirtualMachine, diskPath string) (vim25types.BaseVirtualDevice, error) { vmDevices, err := vm.Device(ctx) @@ -541,6 +530,8 @@ func getVirtualDeviceByPath(ctx context.Context, vm *object.VirtualMachine, disk if matchVirtualDiskAndVolPath(backing.FileName, diskPath) { framework.Logf("Found VirtualDisk backing with filename %q for diskPath %q", backing.FileName, diskPath) return device, nil + } else { + framework.Logf("VirtualDisk backing filename %q does not match with diskPath %q", backing.FileName, diskPath) } } } @@ -715,21 +706,34 @@ func diskIsAttached(volPath string, nodeName string) (bool, error) { // Create context ctx, cancel := context.WithCancel(context.Background()) defer cancel() - nodeInfo := TestContext.NodeMapper.GetNodeInfo(nodeName) Connect(ctx, nodeInfo.VSphere) vm := object.NewVirtualMachine(nodeInfo.VSphere.Client.Client, nodeInfo.VirtualMachineRef) volPath = removeStorageClusterORFolderNameFromVDiskPath(volPath) - attached, err := isDiskAttached(ctx, vm, volPath) + device, err := getVirtualDeviceByPath(ctx, vm, volPath) if err != nil { framework.Logf("diskIsAttached failed to determine whether disk %q is still attached on node %q", volPath, nodeName) + return false, err } - framework.Logf("diskIsAttached result: %v and error: %v, for volume: %s", attached, err, volPath) - return attached, err + if device != nil { + framework.Logf("diskIsAttached found the disk %q attached on node %q", + volPath, + nodeName) + } + return true, nil } func getUUIDFromProviderID(providerID string) string { return strings.TrimPrefix(providerID, ProviderPrefix) } + +func GetReadySchedulableRandomNodeInfo() *NodeInfo { + nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet) + Expect(nodeList.Items).NotTo(BeEmpty(), "Unable to find ready and schedulable Node") + rand.Seed(time.Now().Unix()) + nodeInfo := TestContext.NodeMapper.GetNodeInfo(nodeList.Items[rand.Int()%len(nodeList.Items)].Name) + Expect(nodeInfo).NotTo(BeNil()) + return nodeInfo +} diff --git a/test/e2e/storage/vsphere/vsphere_volume_cluster_ds.go b/test/e2e/storage/vsphere/vsphere_volume_cluster_ds.go index afc9fcda795..1e0092cecf3 100644 --- a/test/e2e/storage/vsphere/vsphere_volume_cluster_ds.go +++ b/test/e2e/storage/vsphere/vsphere_volume_cluster_ds.go @@ -18,9 +18,6 @@ package vsphere import ( "fmt" - "math/rand" - "time" - . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -55,11 +52,7 @@ var _ = utils.SIGDescribe("Volume Provisioning On Clustered Datastore [Feature:v Bootstrap(f) client = f.ClientSet namespace = f.Namespace.Name - nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet) - Expect(nodeList.Items).NotTo(BeEmpty(), "Unable to find ready and schedulable Node") - rand.Seed(time.Now().Unix()) - nodeInfo = TestContext.NodeMapper.GetNodeInfo(nodeList.Items[rand.Int()%len(nodeList.Items)].Name) - + nodeInfo = GetReadySchedulableRandomNodeInfo() scParameters = make(map[string]string) clusterDatastore = GetAndExpectStringEnvVar(VCPClusterDatastore) }) diff --git a/test/e2e/storage/vsphere/vsphere_volume_diskformat.go b/test/e2e/storage/vsphere/vsphere_volume_diskformat.go index 093526de3ec..051089cfb83 100644 --- a/test/e2e/storage/vsphere/vsphere_volume_diskformat.go +++ b/test/e2e/storage/vsphere/vsphere_volume_diskformat.go @@ -17,9 +17,7 @@ limitations under the License. package vsphere import ( - "math/rand" "path/filepath" - "time" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" @@ -70,14 +68,8 @@ var _ = utils.SIGDescribe("Volume Disk Format [Feature:vsphere]", func() { Bootstrap(f) client = f.ClientSet namespace = f.Namespace.Name - nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet) - if len(nodeList.Items) != 0 { - rand.Seed(time.Now().Unix()) - nodeName = nodeList.Items[rand.Int()%len(nodeList.Items)].Name - } else { - framework.Failf("Unable to find ready and schedulable Node") - } if !isNodeLabeled { + nodeName = GetReadySchedulableRandomNodeInfo().Name nodeLabelValue = "vsphere_e2e_" + string(uuid.NewUUID()) nodeKeyValueLabel = make(map[string]string) nodeKeyValueLabel[NodeLabelKey] = nodeLabelValue diff --git a/test/e2e/storage/vsphere/vsphere_volume_fstype.go b/test/e2e/storage/vsphere/vsphere_volume_fstype.go index 4d77db59988..7d5f5a93f88 100644 --- a/test/e2e/storage/vsphere/vsphere_volume_fstype.go +++ b/test/e2e/storage/vsphere/vsphere_volume_fstype.go @@ -175,11 +175,14 @@ func createPodAndVerifyVolumeAccessible(client clientset.Interface, namespace st } func detachVolume(f *framework.Framework, client clientset.Interface, pod *v1.Pod, volPath string) { + pod, err := f.ClientSet.CoreV1().Pods(pod.Namespace).Get(pod.Name, metav1.GetOptions{}) + Expect(err).To(BeNil()) + nodeName := pod.Spec.NodeName By("Deleting pod") framework.DeletePodWithWait(f, client, pod) By("Waiting for volumes to be detached from the node") - waitForVSphereDiskToDetach(volPath, pod.Spec.NodeName) + waitForVSphereDiskToDetach(volPath, nodeName) } func deleteVolume(client clientset.Interface, pvclaimName string, namespace string) { diff --git a/test/e2e/storage/vsphere/vsphere_volume_placement.go b/test/e2e/storage/vsphere/vsphere_volume_placement.go index 6ae1e1e34f3..e276591d3e6 100644 --- a/test/e2e/storage/vsphere/vsphere_volume_placement.go +++ b/test/e2e/storage/vsphere/vsphere_volume_placement.go @@ -56,9 +56,9 @@ var _ = utils.SIGDescribe("Volume Placement", func() { if !isNodeLabeled { node1Name, node1KeyValueLabel, node2Name, node2KeyValueLabel = testSetupVolumePlacement(c, ns) isNodeLabeled = true + nodeInfo = TestContext.NodeMapper.GetNodeInfo(node1Name) + vsp = nodeInfo.VSphere } - nodeInfo = TestContext.NodeMapper.GetNodeInfo(node1Name) - vsp = nodeInfo.VSphere By("creating vmdk") volumePath, err := vsp.CreateVolume(&VolumeOptions{}, nodeInfo.DataCenterRef) Expect(err).NotTo(HaveOccurred())