Addressed jeffvance's review comments

This commit is contained in:
Divyen Patel 2018-02-13 20:56:34 -08:00
parent 8823c229d5
commit 78b60e2c1c
6 changed files with 63 additions and 40 deletions

View File

@ -98,6 +98,8 @@ func (vs *VSphere) GetFolderByPath(ctx context.Context, dc object.Reference, fol
return vmFolder.Reference(), nil
}
// CreateVolume creates a vsphere volume using given volume paramemters specified in VolumeOptions.
// If volume is created successfully the canonical disk path is returned else error is returned.
func (vs *VSphere) CreateVolume(volumeOptions *VolumeOptions, dataCenterRef types.ManagedObjectReference) (string, error) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
@ -108,34 +110,14 @@ func (vs *VSphere) CreateVolume(volumeOptions *VolumeOptions, dataCenterRef type
directoryAlreadyPresent = false
)
if datacenter == nil {
err = fmt.Errorf("datacenter is nil")
return "", err
return "", fmt.Errorf("datacenter is nil")
}
if volumeOptions == nil {
volumeOptions = &VolumeOptions{}
}
if volumeOptions.Datastore == "" {
volumeOptions.Datastore = vs.Config.DefaultDatastore
}
if volumeOptions.CapacityKB == 0 {
volumeOptions.CapacityKB = DefaultDiskCapacityKB
}
if volumeOptions.Name == "" {
volumeOptions.Name = "e2e-vmdk-" + strconv.FormatInt(time.Now().UnixNano(), 10)
}
if volumeOptions.DiskFormat == "" {
volumeOptions.DiskFormat = DefaultDiskFormat
}
if volumeOptions.SCSIControllerType == "" {
volumeOptions.SCSIControllerType = DefaultSCSIControllerType
}
vs.initVolumeOptions(volumeOptions)
finder := find.NewFinder(datacenter.Client(), true)
finder.SetDatacenter(datacenter)
ds, err := finder.Datastore(ctx, volumeOptions.Datastore)
if err != nil {
err = fmt.Errorf("Failed while searching for datastore: %s. err: %+v", volumeOptions.Datastore, err)
return "", err
return "", fmt.Errorf("Failed while searching for datastore: %s. err: %+v", volumeOptions.Datastore, err)
}
directoryPath := filepath.Clean(ds.Path(VolDir)) + "/"
fileManager := object.NewFileManager(ds.Client())
@ -185,6 +167,8 @@ func (vs *VSphere) CreateVolume(volumeOptions *VolumeOptions, dataCenterRef type
return canonicalDiskPath, nil
}
// DeleteVolume deletes the vmdk file specified in the volumePath.
// if an error is encountered while deleting volume, error is returned.
func (vs *VSphere) DeleteVolume(volumePath string, dataCenterRef types.ManagedObjectReference) error {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
@ -207,6 +191,8 @@ func (vs *VSphere) DeleteVolume(volumePath string, dataCenterRef types.ManagedOb
return nil
}
// IsVMPresent checks if VM with the name specified in the vmName argument, is present in the vCenter inventory.
// if VM is present, function returns true else false.
func (vs *VSphere) IsVMPresent(vmName string, dataCenterRef types.ManagedObjectReference) (isVMPresent bool, err error) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
@ -230,3 +216,25 @@ func (vs *VSphere) IsVMPresent(vmName string, dataCenterRef types.ManagedObjectR
}
return
}
// initVolumeOptions function sets default values for volumeOptions parameters if not set
func (vs *VSphere) initVolumeOptions(volumeOptions *VolumeOptions) {
if volumeOptions == nil {
volumeOptions = &VolumeOptions{}
}
if volumeOptions.Datastore == "" {
volumeOptions.Datastore = vs.Config.DefaultDatastore
}
if volumeOptions.CapacityKB == 0 {
volumeOptions.CapacityKB = DefaultDiskCapacityKB
}
if volumeOptions.Name == "" {
volumeOptions.Name = "e2e-vmdk-" + strconv.FormatInt(time.Now().UnixNano(), 10)
}
if volumeOptions.DiskFormat == "" {
volumeOptions.DiskFormat = DefaultDiskFormat
}
if volumeOptions.SCSIControllerType == "" {
volumeOptions.SCSIControllerType = DefaultSCSIControllerType
}
}

View File

@ -89,7 +89,6 @@ var _ = utils.SIGDescribe("vcp at scale [Feature:vsphere] ", func() {
if len(nodes.Items) < 2 {
framework.Skipf("Requires at least %d nodes (not %d)", 2, len(nodes.Items))
}
//nodeInfo = TestContext.NodeMapper.GetNodeInfo(nodes.Items[0].Name)
// Verify volume count specified by the user can be satisfied
if volumeCount > volumesPerNode*len(nodes.Items) {
framework.Skipf("Cannot attach %d volumes to %d nodes. Maximum volumes that can be attached on %d nodes is %d", volumeCount, len(nodes.Items), len(nodes.Items), volumesPerNode*len(nodes.Items))

View File

@ -565,6 +565,7 @@ func convertVolPathsToDevicePaths(ctx context.Context, nodeVolumes map[string][]
return vmVolumes, nil
}
// convertVolPathToDevicePath takes volPath and returns canonical volume path
func convertVolPathToDevicePath(ctx context.Context, dc *object.Datacenter, volPath string) (string, error) {
volPath = removeStorageClusterORFolderNameFromVDiskPath(volPath)
// Get the canonical volume path for volPath.
@ -673,6 +674,7 @@ func registerNodeVM(nodeName, workingDir, vmxFilePath string, rpool *object.Reso
poweronNodeVM(nodeName, vm)
}
// disksAreAttached takes map of node and it's volumes and returns map of node, its volumes and attachment state
func disksAreAttached(nodeVolumes map[string][]string) (nodeVolumesAttachMap map[string]map[string]bool, err error) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
@ -725,15 +727,32 @@ func diskIsAttached(volPath string, nodeName string) (bool, error) {
return true, nil
}
// getUUIDFromProviderID strips ProviderPrefix - "vsphere://" from the providerID
// this gives the VM UUID which can be used to find Node VM from vCenter
func getUUIDFromProviderID(providerID string) string {
return strings.TrimPrefix(providerID, ProviderPrefix)
}
func GetReadySchedulableRandomNodeInfo() *NodeInfo {
// GetAllReadySchedulableNodeInfos returns NodeInfo objects for all nodes with Ready and schedulable state
func GetReadySchedulableNodeInfos() []*NodeInfo {
nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
Expect(nodeList.Items).NotTo(BeEmpty(), "Unable to find ready and schedulable Node")
rand.Seed(time.Now().Unix())
nodeInfo := TestContext.NodeMapper.GetNodeInfo(nodeList.Items[rand.Int()%len(nodeList.Items)].Name)
Expect(nodeInfo).NotTo(BeNil())
return nodeInfo
var nodesInfo []*NodeInfo
for _, node := range nodeList.Items {
nodeInfo := TestContext.NodeMapper.GetNodeInfo(node.Name)
if nodeInfo != nil {
nodesInfo = append(nodesInfo, nodeInfo)
}
}
return nodesInfo
}
// GetReadySchedulableRandomNodeInfo returns NodeInfo object for one of the Ready and Schedulable Node.
// if multiple nodes are present with Ready and Scheduable state then one of the Node is selected randomly
// and it's associated NodeInfo object is returned.
func GetReadySchedulableRandomNodeInfo() *NodeInfo {
nodesInfo := GetReadySchedulableNodeInfos()
rand.Seed(time.Now().Unix())
Expect(nodesInfo).NotTo(BeEmpty())
return nodesInfo[rand.Int()%len(nodesInfo)]
}

View File

@ -73,7 +73,7 @@ var _ = utils.SIGDescribe("Volume FStype [Feature:vsphere]", func() {
Bootstrap(f)
client = f.ClientSet
namespace = f.Namespace.Name
Expect(framework.GetReadySchedulableNodesOrDie(f.ClientSet).Items).NotTo(BeEmpty(), "Unable to find ready and schedulable Node")
Expect(GetReadySchedulableNodeInfos).NotTo(BeEmpty())
})
It("verify fstype - ext3 formatted volume", func() {
@ -108,7 +108,8 @@ func invokeTestForFstype(f *framework.Framework, client clientset.Interface, nam
// Detach and delete volume
detachVolume(f, client, pod, persistentvolumes[0].Spec.VsphereVolume.VolumePath)
deleteVolume(client, pvclaim.Name, namespace)
err = framework.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace)
Expect(err).To(BeNil())
}
func invokeTestForInvalidFstype(f *framework.Framework, client clientset.Interface, namespace string, fstype string) {
@ -130,7 +131,8 @@ func invokeTestForInvalidFstype(f *framework.Framework, client clientset.Interfa
// Detach and delete volume
detachVolume(f, client, pod, persistentvolumes[0].Spec.VsphereVolume.VolumePath)
deleteVolume(client, pvclaim.Name, namespace)
err = framework.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace)
Expect(err).To(BeNil())
Expect(eventList.Items).NotTo(BeEmpty())
errorMsg := `MountVolume.MountDevice failed for volume "` + persistentvolumes[0].Name + `" : executable file not found`
@ -174,6 +176,7 @@ func createPodAndVerifyVolumeAccessible(client clientset.Interface, namespace st
return pod
}
// detachVolume delete the volume passed in the argument and wait until volume is detached from the node,
func detachVolume(f *framework.Framework, client clientset.Interface, pod *v1.Pod, volPath string) {
pod, err := f.ClientSet.CoreV1().Pods(pod.Namespace).Get(pod.Name, metav1.GetOptions{})
Expect(err).To(BeNil())
@ -184,7 +187,3 @@ func detachVolume(f *framework.Framework, client clientset.Interface, pod *v1.Po
By("Waiting for volumes to be detached from the node")
waitForVSphereDiskToDetach(volPath, nodeName)
}
func deleteVolume(client clientset.Interface, pvclaimName string, namespace string) {
framework.DeletePersistentVolumeClaim(client, pvclaimName, namespace)
}

View File

@ -173,6 +173,7 @@ func waitForPodToFailover(client clientset.Interface, deployment *extensions.Dep
return getNodeForDeployment(client, deployment)
}
// getNodeForDeployment returns node name for the Deployment
func getNodeForDeployment(client clientset.Interface, deployment *extensions.Deployment) (string, error) {
podList, err := framework.GetPodsForDeployment(client, deployment)
if err != nil {

View File

@ -63,10 +63,7 @@ var _ = utils.SIGDescribe("Volume Operations Storm [Feature:vsphere]", func() {
Bootstrap(f)
client = f.ClientSet
namespace = f.Namespace.Name
nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
if len(nodeList.Items) == 0 {
framework.Failf("Unable to find ready and schedulable Node")
}
Expect(GetReadySchedulableNodeInfos()).NotTo(BeEmpty())
if os.Getenv("VOLUME_OPS_SCALE") != "" {
volume_ops_scale, err = strconv.Atoi(os.Getenv("VOLUME_OPS_SCALE"))
Expect(err).NotTo(HaveOccurred())