mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-21 10:51:29 +00:00
Fix golint errors in test/e2e/storage/vsphere
This commit is contained in:
parent
9ae36ba81a
commit
18f05ef5b9
@ -515,5 +515,4 @@ staging/src/k8s.io/sample-apiserver/pkg/apis/wardle/v1alpha1
|
||||
staging/src/k8s.io/sample-apiserver/pkg/registry/wardle/fischer
|
||||
staging/src/k8s.io/sample-apiserver/pkg/registry/wardle/flunder
|
||||
test/e2e/common
|
||||
test/e2e/storage/vsphere
|
||||
test/utils
|
||||
|
@ -49,7 +49,7 @@ func bootstrapOnce() {
|
||||
if err != nil {
|
||||
framework.Failf("Failed to get nodes: %v", err)
|
||||
}
|
||||
TestContext = VSphereContext{NodeMapper: &NodeMapper{}, VSphereInstances: vsphereInstances}
|
||||
TestContext = Context{NodeMapper: &NodeMapper{}, VSphereInstances: vsphereInstances}
|
||||
// 3. Get Node to VSphere mapping
|
||||
err = TestContext.NodeMapper.GenerateNodeMap(vsphereInstances, *nodeList)
|
||||
if err != nil {
|
||||
|
@ -99,7 +99,7 @@ func GetVSphereInstances() (map[string]*VSphere, error) {
|
||||
func getConfig() (*ConfigFile, error) {
|
||||
if confFileLocation == "" {
|
||||
if framework.TestContext.CloudConfig.ConfigFile == "" {
|
||||
return nil, fmt.Errorf("Env variable 'VSPHERE_CONF_FILE' is not set, and no config-file specified")
|
||||
return nil, fmt.Errorf("env variable 'VSPHERE_CONF_FILE' is not set, and no config-file specified")
|
||||
}
|
||||
confFileLocation = framework.TestContext.CloudConfig.ConfigFile
|
||||
}
|
||||
|
@ -17,10 +17,10 @@ limitations under the License.
|
||||
package vsphere
|
||||
|
||||
// Context holds common information for vSphere tests
|
||||
type VSphereContext struct {
|
||||
type Context struct {
|
||||
NodeMapper *NodeMapper
|
||||
VSphereInstances map[string]*VSphere
|
||||
}
|
||||
|
||||
// TestContext should be used by all tests to access common context data. It should be initialized only once, during bootstrapping the tests.
|
||||
var TestContext VSphereContext
|
||||
var TestContext Context
|
||||
|
@ -33,9 +33,11 @@ import (
|
||||
neturl "net/url"
|
||||
)
|
||||
|
||||
// NodeMapper contains information to generate nameToNodeInfo and vcToZoneDatastore maps
|
||||
type NodeMapper struct {
|
||||
}
|
||||
|
||||
// NodeInfo contains information about vcenter nodes
|
||||
type NodeInfo struct {
|
||||
Name string
|
||||
DataCenterRef types.ManagedObjectReference
|
||||
@ -46,9 +48,9 @@ type NodeInfo struct {
|
||||
}
|
||||
|
||||
const (
|
||||
DatacenterType = "Datacenter"
|
||||
ClusterComputeResourceType = "ClusterComputeResource"
|
||||
HostSystemType = "HostSystem"
|
||||
datacenterType = "Datacenter"
|
||||
clusterComputeResourceType = "ClusterComputeResource"
|
||||
hostSystemType = "HostSystem"
|
||||
)
|
||||
|
||||
var (
|
||||
@ -58,13 +60,13 @@ var (
|
||||
|
||||
// GenerateNodeMap populates node name to node info map
|
||||
func (nm *NodeMapper) GenerateNodeMap(vSphereInstances map[string]*VSphere, nodeList v1.NodeList) error {
|
||||
type VmSearch struct {
|
||||
type VMSearch struct {
|
||||
vs *VSphere
|
||||
datacenter *object.Datacenter
|
||||
}
|
||||
|
||||
var wg sync.WaitGroup
|
||||
var queueChannel []*VmSearch
|
||||
var queueChannel []*VMSearch
|
||||
|
||||
var datacenters []*object.Datacenter
|
||||
var err error
|
||||
@ -99,7 +101,7 @@ func (nm *NodeMapper) GenerateNodeMap(vSphereInstances map[string]*VSphere, node
|
||||
|
||||
for _, dc := range datacenters {
|
||||
framework.Logf("Search candidates vc=%s and datacenter=%s", vs.Config.Hostname, dc.Name())
|
||||
queueChannel = append(queueChannel, &VmSearch{vs: vs, datacenter: dc})
|
||||
queueChannel = append(queueChannel, &VMSearch{vs: vs, datacenter: dc})
|
||||
}
|
||||
}
|
||||
|
||||
@ -170,7 +172,7 @@ func retrieveZoneInformationForNode(nodeName string, connection *VSphere, hostSy
|
||||
// zone precedence will be received by the HostSystem type.
|
||||
for _, ancestor := range ancestors {
|
||||
moType := ancestor.ExtensibleManagedObject.Self.Type
|
||||
if moType == DatacenterType || moType == ClusterComputeResourceType || moType == HostSystemType {
|
||||
if moType == datacenterType || moType == clusterComputeResourceType || moType == hostSystemType {
|
||||
validAncestors = append(validAncestors, ancestor)
|
||||
}
|
||||
}
|
||||
@ -208,7 +210,7 @@ func retrieveZoneInformationForNode(nodeName string, connection *VSphere, hostSy
|
||||
return zones
|
||||
}
|
||||
|
||||
// Generate zone to datastore mapping for easily verifying volume placement
|
||||
// GenerateZoneToDatastoreMap generates a mapping of zone to datastore for easily verifying volume placement
|
||||
func (nm *NodeMapper) GenerateZoneToDatastoreMap() error {
|
||||
// 1. Create zone to hosts map for each VC
|
||||
var vcToZoneHostsMap = make(map[string](map[string][]string))
|
||||
@ -254,7 +256,7 @@ func (nm *NodeMapper) GenerateZoneToDatastoreMap() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Retrieves the common datastores from the specified hosts
|
||||
// retrieveCommonDatastoresAmongHosts retrieves the common datastores from the specified hosts
|
||||
func retrieveCommonDatastoresAmongHosts(hosts []string, hostToDatastoresMap map[string][]string) []string {
|
||||
var datastoreCountMap = make(map[string]int)
|
||||
for _, host := range hosts {
|
||||
@ -272,12 +274,12 @@ func retrieveCommonDatastoresAmongHosts(hosts []string, hostToDatastoresMap map[
|
||||
return commonDatastores
|
||||
}
|
||||
|
||||
// Get all the datastores in the specified zone
|
||||
// GetDatastoresInZone returns all the datastores in the specified zone
|
||||
func (nm *NodeMapper) GetDatastoresInZone(vc string, zone string) []string {
|
||||
return vcToZoneDatastoresMap[vc][zone]
|
||||
}
|
||||
|
||||
// GetNodeInfo return NodeInfo for given nodeName
|
||||
// GetNodeInfo returns NodeInfo for given nodeName
|
||||
func (nm *NodeMapper) GetNodeInfo(nodeName string) *NodeInfo {
|
||||
return nameToNodeInfo[nodeName]
|
||||
}
|
||||
|
@ -36,13 +36,13 @@ import (
|
||||
----------
|
||||
1. Create VMDK.
|
||||
2. Create pv with label volume-type:ssd, volume path set to vmdk created in previous step, and PersistentVolumeReclaimPolicy is set to Delete.
|
||||
3. Create PVC (pvc_vvol) with label selector to match with volume-type:vvol
|
||||
4. Create PVC (pvc_ssd) with label selector to match with volume-type:ssd
|
||||
5. Wait and verify pvc_ssd is bound with PV.
|
||||
6. Verify Status of pvc_vvol is still pending.
|
||||
7. Delete pvc_ssd.
|
||||
3. Create PVC (pvcVvol) with label selector to match with volume-type:vvol
|
||||
4. Create PVC (pvcSsd) with label selector to match with volume-type:ssd
|
||||
5. Wait and verify pvSsd is bound with PV.
|
||||
6. Verify Status of pvcVvol is still pending.
|
||||
7. Delete pvcSsd.
|
||||
8. verify associated pv is also deleted.
|
||||
9. delete pvc_vvol
|
||||
9. delete pvcVvol
|
||||
|
||||
*/
|
||||
var _ = utils.SIGDescribe("PersistentVolumes [Feature:LabelSelector]", func() {
|
||||
@ -50,9 +50,9 @@ var _ = utils.SIGDescribe("PersistentVolumes [Feature:LabelSelector]", func() {
|
||||
var (
|
||||
c clientset.Interface
|
||||
ns string
|
||||
pv_ssd *v1.PersistentVolume
|
||||
pvc_ssd *v1.PersistentVolumeClaim
|
||||
pvc_vvol *v1.PersistentVolumeClaim
|
||||
pvSsd *v1.PersistentVolume
|
||||
pvcSsd *v1.PersistentVolumeClaim
|
||||
pvcVvol *v1.PersistentVolumeClaim
|
||||
volumePath string
|
||||
ssdlabels map[string]string
|
||||
vvollabels map[string]string
|
||||
@ -77,35 +77,35 @@ var _ = utils.SIGDescribe("PersistentVolumes [Feature:LabelSelector]", func() {
|
||||
ginkgo.AfterEach(func() {
|
||||
ginkgo.By("Running clean up actions")
|
||||
if framework.ProviderIs("vsphere") {
|
||||
testCleanupVSpherePVClabelselector(c, ns, nodeInfo, volumePath, pv_ssd, pvc_ssd, pvc_vvol)
|
||||
testCleanupVSpherePVClabelselector(c, ns, nodeInfo, volumePath, pvSsd, pvcSsd, pvcVvol)
|
||||
}
|
||||
})
|
||||
ginkgo.It("should bind volume with claim for given label", func() {
|
||||
volumePath, pv_ssd, pvc_ssd, pvc_vvol, err = testSetupVSpherePVClabelselector(c, nodeInfo, ns, ssdlabels, vvollabels)
|
||||
volumePath, pvSsd, pvcSsd, pvcVvol, err = testSetupVSpherePVClabelselector(c, nodeInfo, ns, ssdlabels, vvollabels)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By("wait for the pvc_ssd to bind with pv_ssd")
|
||||
framework.ExpectNoError(e2epv.WaitOnPVandPVC(c, ns, pv_ssd, pvc_ssd))
|
||||
ginkgo.By("wait for the pvcSsd to bind with pvSsd")
|
||||
framework.ExpectNoError(e2epv.WaitOnPVandPVC(c, ns, pvSsd, pvcSsd))
|
||||
|
||||
ginkgo.By("Verify status of pvc_vvol is pending")
|
||||
err = e2epv.WaitForPersistentVolumeClaimPhase(v1.ClaimPending, c, ns, pvc_vvol.Name, 3*time.Second, 300*time.Second)
|
||||
ginkgo.By("Verify status of pvcVvol is pending")
|
||||
err = e2epv.WaitForPersistentVolumeClaimPhase(v1.ClaimPending, c, ns, pvcVvol.Name, 3*time.Second, 300*time.Second)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By("delete pvc_ssd")
|
||||
framework.ExpectNoError(e2epv.DeletePersistentVolumeClaim(c, pvc_ssd.Name, ns), "Failed to delete PVC ", pvc_ssd.Name)
|
||||
ginkgo.By("delete pvcSsd")
|
||||
framework.ExpectNoError(e2epv.DeletePersistentVolumeClaim(c, pvcSsd.Name, ns), "Failed to delete PVC ", pvcSsd.Name)
|
||||
|
||||
ginkgo.By("verify pv_ssd is deleted")
|
||||
err = framework.WaitForPersistentVolumeDeleted(c, pv_ssd.Name, 3*time.Second, 300*time.Second)
|
||||
ginkgo.By("verify pvSsd is deleted")
|
||||
err = framework.WaitForPersistentVolumeDeleted(c, pvSsd.Name, 3*time.Second, 300*time.Second)
|
||||
framework.ExpectNoError(err)
|
||||
volumePath = ""
|
||||
|
||||
ginkgo.By("delete pvc_vvol")
|
||||
framework.ExpectNoError(e2epv.DeletePersistentVolumeClaim(c, pvc_vvol.Name, ns), "Failed to delete PVC ", pvc_vvol.Name)
|
||||
ginkgo.By("delete pvcVvol")
|
||||
framework.ExpectNoError(e2epv.DeletePersistentVolumeClaim(c, pvcVvol.Name, ns), "Failed to delete PVC ", pvcVvol.Name)
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
func testSetupVSpherePVClabelselector(c clientset.Interface, nodeInfo *NodeInfo, ns string, ssdlabels map[string]string, vvollabels map[string]string) (volumePath string, pv_ssd *v1.PersistentVolume, pvc_ssd *v1.PersistentVolumeClaim, pvc_vvol *v1.PersistentVolumeClaim, err error) {
|
||||
func testSetupVSpherePVClabelselector(c clientset.Interface, nodeInfo *NodeInfo, ns string, ssdlabels map[string]string, vvollabels map[string]string) (volumePath string, pvSsd *v1.PersistentVolume, pvcSsd *v1.PersistentVolumeClaim, pvcVvol *v1.PersistentVolumeClaim, err error) {
|
||||
ginkgo.By("creating vmdk")
|
||||
volumePath = ""
|
||||
volumePath, err = nodeInfo.VSphere.CreateVolume(&VolumeOptions{}, nodeInfo.DataCenterRef)
|
||||
@ -114,37 +114,37 @@ func testSetupVSpherePVClabelselector(c clientset.Interface, nodeInfo *NodeInfo,
|
||||
}
|
||||
|
||||
ginkgo.By("creating the pv with label volume-type:ssd")
|
||||
pv_ssd = getVSpherePersistentVolumeSpec(volumePath, v1.PersistentVolumeReclaimDelete, ssdlabels)
|
||||
pv_ssd, err = c.CoreV1().PersistentVolumes().Create(pv_ssd)
|
||||
pvSsd = getVSpherePersistentVolumeSpec(volumePath, v1.PersistentVolumeReclaimDelete, ssdlabels)
|
||||
pvSsd, err = c.CoreV1().PersistentVolumes().Create(pvSsd)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
ginkgo.By("creating pvc with label selector to match with volume-type:vvol")
|
||||
pvc_vvol = getVSpherePersistentVolumeClaimSpec(ns, vvollabels)
|
||||
pvc_vvol, err = c.CoreV1().PersistentVolumeClaims(ns).Create(pvc_vvol)
|
||||
pvcVvol = getVSpherePersistentVolumeClaimSpec(ns, vvollabels)
|
||||
pvcVvol, err = c.CoreV1().PersistentVolumeClaims(ns).Create(pvcVvol)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
ginkgo.By("creating pvc with label selector to match with volume-type:ssd")
|
||||
pvc_ssd = getVSpherePersistentVolumeClaimSpec(ns, ssdlabels)
|
||||
pvc_ssd, err = c.CoreV1().PersistentVolumeClaims(ns).Create(pvc_ssd)
|
||||
pvcSsd = getVSpherePersistentVolumeClaimSpec(ns, ssdlabels)
|
||||
pvcSsd, err = c.CoreV1().PersistentVolumeClaims(ns).Create(pvcSsd)
|
||||
return
|
||||
}
|
||||
|
||||
func testCleanupVSpherePVClabelselector(c clientset.Interface, ns string, nodeInfo *NodeInfo, volumePath string, pv_ssd *v1.PersistentVolume, pvc_ssd *v1.PersistentVolumeClaim, pvc_vvol *v1.PersistentVolumeClaim) {
|
||||
func testCleanupVSpherePVClabelselector(c clientset.Interface, ns string, nodeInfo *NodeInfo, volumePath string, pvSsd *v1.PersistentVolume, pvcSsd *v1.PersistentVolumeClaim, pvcVvol *v1.PersistentVolumeClaim) {
|
||||
ginkgo.By("running testCleanupVSpherePVClabelselector")
|
||||
if len(volumePath) > 0 {
|
||||
nodeInfo.VSphere.DeleteVolume(volumePath, nodeInfo.DataCenterRef)
|
||||
}
|
||||
if pvc_ssd != nil {
|
||||
framework.ExpectNoError(e2epv.DeletePersistentVolumeClaim(c, pvc_ssd.Name, ns), "Failed to delete PVC ", pvc_ssd.Name)
|
||||
if pvcSsd != nil {
|
||||
framework.ExpectNoError(e2epv.DeletePersistentVolumeClaim(c, pvcSsd.Name, ns), "Failed to delete PVC ", pvcSsd.Name)
|
||||
}
|
||||
if pvc_vvol != nil {
|
||||
framework.ExpectNoError(e2epv.DeletePersistentVolumeClaim(c, pvc_vvol.Name, ns), "Failed to delete PVC ", pvc_vvol.Name)
|
||||
if pvcVvol != nil {
|
||||
framework.ExpectNoError(e2epv.DeletePersistentVolumeClaim(c, pvcVvol.Name, ns), "Failed to delete PVC ", pvcVvol.Name)
|
||||
}
|
||||
if pv_ssd != nil {
|
||||
framework.ExpectNoError(e2epv.DeletePersistentVolume(c, pv_ssd.Name), "Failed to delete PV ", pv_ssd.Name)
|
||||
if pvSsd != nil {
|
||||
framework.ExpectNoError(e2epv.DeletePersistentVolume(c, pvSsd.Name), "Failed to delete PV ", pvSsd.Name)
|
||||
}
|
||||
}
|
||||
|
@ -34,14 +34,14 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
VolDir = "kubevols"
|
||||
DefaultDiskCapacityKB = 2097152
|
||||
DefaultDiskFormat = "thin"
|
||||
DefaultSCSIControllerType = "lsiLogic"
|
||||
VirtualMachineType = "VirtualMachine"
|
||||
volDir = "kubevols"
|
||||
defaultDiskCapacityKB = 2097152
|
||||
defaultDiskFormat = "thin"
|
||||
defaultSCSIControllerType = "lsiLogic"
|
||||
virtualMachineType = "VirtualMachine"
|
||||
)
|
||||
|
||||
// Represents a vSphere instance where one or more kubernetes nodes are running.
|
||||
// VSphere represents a vSphere instance where one or more kubernetes nodes are running.
|
||||
type VSphere struct {
|
||||
Config *Config
|
||||
Client *govmomi.Client
|
||||
@ -63,7 +63,7 @@ func (vs *VSphere) GetDatacenter(ctx context.Context, datacenterPath string) (*o
|
||||
return finder.Datacenter(ctx, datacenterPath)
|
||||
}
|
||||
|
||||
// GetDatacenter returns the DataCenter Object for the given datacenterPath
|
||||
// GetDatacenterFromObjectReference returns the DataCenter Object for the given datacenter reference
|
||||
func (vs *VSphere) GetDatacenterFromObjectReference(ctx context.Context, dc object.Reference) *object.Datacenter {
|
||||
Connect(ctx, vs)
|
||||
return object.NewDatacenter(vs.Client.Client, dc.Reference())
|
||||
@ -76,7 +76,7 @@ func (vs *VSphere) GetAllDatacenter(ctx context.Context) ([]*object.Datacenter,
|
||||
return finder.DatacenterList(ctx, "*")
|
||||
}
|
||||
|
||||
// GetVMByUUID gets the VM object Reference from the given vmUUID
|
||||
// GetVMByUUID returns the VM object Reference from the given vmUUID
|
||||
func (vs *VSphere) GetVMByUUID(ctx context.Context, vmUUID string, dc object.Reference) (object.Reference, error) {
|
||||
Connect(ctx, vs)
|
||||
datacenter := vs.GetDatacenterFromObjectReference(ctx, dc)
|
||||
@ -85,7 +85,7 @@ func (vs *VSphere) GetVMByUUID(ctx context.Context, vmUUID string, dc object.Ref
|
||||
return s.FindByUuid(ctx, datacenter, vmUUID, true, nil)
|
||||
}
|
||||
|
||||
// Get host object reference of the host on which the specified VM resides
|
||||
// GetHostFromVMReference returns host object reference of the host on which the specified VM resides
|
||||
func (vs *VSphere) GetHostFromVMReference(ctx context.Context, vm types.ManagedObjectReference) types.ManagedObjectReference {
|
||||
Connect(ctx, vs)
|
||||
var vmMo mo.VirtualMachine
|
||||
@ -94,7 +94,7 @@ func (vs *VSphere) GetHostFromVMReference(ctx context.Context, vm types.ManagedO
|
||||
return host
|
||||
}
|
||||
|
||||
// Get the datastore references of all the datastores mounted on the specified host
|
||||
// GetDatastoresMountedOnHost returns the datastore references of all the datastores mounted on the specified host
|
||||
func (vs *VSphere) GetDatastoresMountedOnHost(ctx context.Context, host types.ManagedObjectReference) []types.ManagedObjectReference {
|
||||
Connect(ctx, vs)
|
||||
var hostMo mo.HostSystem
|
||||
@ -102,7 +102,7 @@ func (vs *VSphere) GetDatastoresMountedOnHost(ctx context.Context, host types.Ma
|
||||
return hostMo.Datastore
|
||||
}
|
||||
|
||||
// Get the datastore reference of the specified datastore
|
||||
// GetDatastoreRefFromName returns the datastore reference of the specified datastore
|
||||
func (vs *VSphere) GetDatastoreRefFromName(ctx context.Context, dc object.Reference, datastoreName string) (types.ManagedObjectReference, error) {
|
||||
Connect(ctx, vs)
|
||||
datacenter := object.NewDatacenter(vs.Client.Client, dc.Reference())
|
||||
@ -148,7 +148,7 @@ func (vs *VSphere) CreateVolume(volumeOptions *VolumeOptions, dataCenterRef type
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("Failed while searching for datastore: %s. err: %+v", volumeOptions.Datastore, err)
|
||||
}
|
||||
directoryPath := filepath.Clean(ds.Path(VolDir)) + "/"
|
||||
directoryPath := filepath.Clean(ds.Path(volDir)) + "/"
|
||||
fileManager := object.NewFileManager(ds.Client())
|
||||
err = fileManager.MakeDirectory(ctx, directoryPath, datacenter, false)
|
||||
if err != nil {
|
||||
@ -237,7 +237,7 @@ func (vs *VSphere) IsVMPresent(vmName string, dataCenterRef types.ManagedObjectR
|
||||
return
|
||||
}
|
||||
for _, vmFoldersChild := range vmFoldersChildren {
|
||||
if vmFoldersChild.Reference().Type == VirtualMachineType {
|
||||
if vmFoldersChild.Reference().Type == virtualMachineType {
|
||||
if object.NewVirtualMachine(vs.Client.Client, vmFoldersChild.Reference()).Name() == vmName {
|
||||
return true, nil
|
||||
}
|
||||
@ -255,15 +255,15 @@ func (vs *VSphere) initVolumeOptions(volumeOptions *VolumeOptions) {
|
||||
volumeOptions.Datastore = vs.Config.DefaultDatastore
|
||||
}
|
||||
if volumeOptions.CapacityKB == 0 {
|
||||
volumeOptions.CapacityKB = DefaultDiskCapacityKB
|
||||
volumeOptions.CapacityKB = defaultDiskCapacityKB
|
||||
}
|
||||
if volumeOptions.Name == "" {
|
||||
volumeOptions.Name = "e2e-vmdk-" + strconv.FormatInt(time.Now().UnixNano(), 10)
|
||||
}
|
||||
if volumeOptions.DiskFormat == "" {
|
||||
volumeOptions.DiskFormat = DefaultDiskFormat
|
||||
volumeOptions.DiskFormat = defaultDiskFormat
|
||||
}
|
||||
if volumeOptions.SCSIControllerType == "" {
|
||||
volumeOptions.SCSIControllerType = DefaultSCSIControllerType
|
||||
volumeOptions.SCSIControllerType = defaultSCSIControllerType
|
||||
}
|
||||
}
|
||||
|
@ -24,36 +24,38 @@ import (
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
)
|
||||
|
||||
// environment variables related to datastore parameters
|
||||
const (
|
||||
SPBMPolicyName = "VSPHERE_SPBM_POLICY_NAME"
|
||||
StorageClassDatastoreName = "VSPHERE_DATASTORE"
|
||||
SecondSharedDatastore = "VSPHERE_SECOND_SHARED_DATASTORE"
|
||||
KubernetesClusterName = "VSPHERE_KUBERNETES_CLUSTER"
|
||||
SPBMTagPolicy = "VSPHERE_SPBM_TAG_POLICY"
|
||||
)
|
||||
|
||||
const (
|
||||
VCPClusterDatastore = "CLUSTER_DATASTORE"
|
||||
SPBMPolicyDataStoreCluster = "VSPHERE_SPBM_POLICY_DS_CLUSTER"
|
||||
)
|
||||
|
||||
// environment variables used for scaling tests
|
||||
const (
|
||||
VCPScaleVolumeCount = "VCP_SCALE_VOLUME_COUNT"
|
||||
VCPScaleVolumesPerPod = "VCP_SCALE_VOLUME_PER_POD"
|
||||
VCPScaleInstances = "VCP_SCALE_INSTANCES"
|
||||
)
|
||||
|
||||
// environment variables used for stress tests
|
||||
const (
|
||||
VCPStressInstances = "VCP_STRESS_INSTANCES"
|
||||
VCPStressIterations = "VCP_STRESS_ITERATIONS"
|
||||
)
|
||||
|
||||
// environment variables used for performance tests
|
||||
const (
|
||||
VCPPerfVolumeCount = "VCP_PERF_VOLUME_COUNT"
|
||||
VCPPerfVolumesPerPod = "VCP_PERF_VOLUME_PER_POD"
|
||||
VCPPerfIterations = "VCP_PERF_ITERATIONS"
|
||||
)
|
||||
|
||||
// environment variables used for zone tests
|
||||
const (
|
||||
VCPZoneVsanDatastore1 = "VCP_ZONE_VSANDATASTORE1"
|
||||
VCPZoneVsanDatastore2 = "VCP_ZONE_VSANDATASTORE2"
|
||||
@ -67,12 +69,42 @@ const (
|
||||
VCPInvalidZone = "VCP_INVALID_ZONE"
|
||||
)
|
||||
|
||||
// storage class parameters
|
||||
const (
|
||||
Datastore = "datastore"
|
||||
PolicyDiskStripes = "diskStripes"
|
||||
PolicyHostFailuresToTolerate = "hostFailuresToTolerate"
|
||||
PolicyCacheReservation = "cacheReservation"
|
||||
PolicyObjectSpaceReservation = "objectSpaceReservation"
|
||||
PolicyIopsLimit = "iopsLimit"
|
||||
DiskFormat = "diskformat"
|
||||
SpbmStoragePolicy = "storagepolicyname"
|
||||
)
|
||||
|
||||
// test values for storage class parameters
|
||||
const (
|
||||
ThinDisk = "thin"
|
||||
BronzeStoragePolicy = "bronze"
|
||||
HostFailuresToTolerateCapabilityVal = "0"
|
||||
CacheReservationCapabilityVal = "20"
|
||||
DiskStripesCapabilityVal = "1"
|
||||
ObjectSpaceReservationCapabilityVal = "30"
|
||||
IopsLimitCapabilityVal = "100"
|
||||
StripeWidthCapabilityVal = "2"
|
||||
DiskStripesCapabilityInvalidVal = "14"
|
||||
HostFailuresToTolerateCapabilityInvalidVal = "4"
|
||||
)
|
||||
|
||||
// GetAndExpectStringEnvVar returns the string value of an environment variable or fails if
|
||||
// the variable is not set
|
||||
func GetAndExpectStringEnvVar(varName string) string {
|
||||
varValue := os.Getenv(varName)
|
||||
gomega.Expect(varValue).NotTo(gomega.BeEmpty(), "ENV "+varName+" is not set")
|
||||
return varValue
|
||||
}
|
||||
|
||||
// GetAndExpectIntEnvVar returns the integer value of an environment variable or fails if
|
||||
// the variable is not set
|
||||
func GetAndExpectIntEnvVar(varName string) int {
|
||||
varValue := GetAndExpectStringEnvVar(varName)
|
||||
varIntValue, err := strconv.Atoi(varValue)
|
||||
|
@ -129,7 +129,7 @@ var _ = utils.SIGDescribe("vcp at scale [Feature:vsphere] ", func() {
|
||||
case storageclass1:
|
||||
scParams = nil
|
||||
case storageclass2:
|
||||
scParams[Policy_HostFailuresToTolerate] = "1"
|
||||
scParams[PolicyHostFailuresToTolerate] = "1"
|
||||
case storageclass3:
|
||||
scParams[SpbmStoragePolicy] = policyName
|
||||
case storageclass4:
|
||||
|
@ -92,7 +92,7 @@ var _ = utils.SIGDescribe("vsphere cloud provider stress [Feature:vsphere]", fun
|
||||
case storageclass2:
|
||||
var scVSanParameters map[string]string
|
||||
scVSanParameters = make(map[string]string)
|
||||
scVSanParameters[Policy_HostFailuresToTolerate] = "1"
|
||||
scVSanParameters[PolicyHostFailuresToTolerate] = "1"
|
||||
sc, err = client.StorageV1().StorageClasses().Create(getVSphereStorageClassSpec(storageclass2, scVSanParameters, nil, ""))
|
||||
case storageclass3:
|
||||
var scSPBMPolicyParameters map[string]string
|
||||
@ -115,21 +115,22 @@ var _ = utils.SIGDescribe("vsphere cloud provider stress [Feature:vsphere]", fun
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(instances)
|
||||
for instanceCount := 0; instanceCount < instances; instanceCount++ {
|
||||
instanceId := fmt.Sprintf("Thread:%v", instanceCount+1)
|
||||
go PerformVolumeLifeCycleInParallel(f, client, namespace, instanceId, scArrays[instanceCount%len(scArrays)], iterations, &wg)
|
||||
instanceID := fmt.Sprintf("Thread:%v", instanceCount+1)
|
||||
go PerformVolumeLifeCycleInParallel(f, client, namespace, instanceID, scArrays[instanceCount%len(scArrays)], iterations, &wg)
|
||||
}
|
||||
wg.Wait()
|
||||
})
|
||||
|
||||
})
|
||||
|
||||
// goroutine to perform volume lifecycle operations in parallel
|
||||
func PerformVolumeLifeCycleInParallel(f *framework.Framework, client clientset.Interface, namespace string, instanceId string, sc *storagev1.StorageClass, iterations int, wg *sync.WaitGroup) {
|
||||
// PerformVolumeLifeCycleInParallel performs volume lifecycle operations
|
||||
// Called as a go routine to perform operations in parallel
|
||||
func PerformVolumeLifeCycleInParallel(f *framework.Framework, client clientset.Interface, namespace string, instanceID string, sc *storagev1.StorageClass, iterations int, wg *sync.WaitGroup) {
|
||||
defer wg.Done()
|
||||
defer ginkgo.GinkgoRecover()
|
||||
|
||||
for iterationCount := 0; iterationCount < iterations; iterationCount++ {
|
||||
logPrefix := fmt.Sprintf("Instance: [%v], Iteration: [%v] :", instanceId, iterationCount+1)
|
||||
logPrefix := fmt.Sprintf("Instance: [%v], Iteration: [%v] :", instanceID, iterationCount+1)
|
||||
ginkgo.By(fmt.Sprintf("%v Creating PVC using the Storage Class: %v", logPrefix, sc.Name))
|
||||
pvclaim, err := e2epv.CreatePVC(client, namespace, getVSphereClaimSpecWithStorageClass(namespace, "1Gi", sc))
|
||||
framework.ExpectNoError(err)
|
||||
|
@ -54,8 +54,8 @@ const (
|
||||
storageclass2 = "sc-vsan"
|
||||
storageclass3 = "sc-spbm"
|
||||
storageclass4 = "sc-user-specified-ds"
|
||||
DummyDiskName = "kube-dummyDisk.vmdk"
|
||||
ProviderPrefix = "vsphere://"
|
||||
dummyDiskName = "kube-dummyDisk.vmdk"
|
||||
providerPrefix = "vsphere://"
|
||||
)
|
||||
|
||||
// volumeState represents the state of a volume.
|
||||
@ -446,7 +446,7 @@ func getCanonicalVolumePath(ctx context.Context, dc *object.Datacenter, volumePa
|
||||
dsFolder := dsPath[0]
|
||||
// Get the datastore folder ID if datastore or folder doesn't exist in datastoreFolderIDMap
|
||||
if !isValidUUID(dsFolder) {
|
||||
dummyDiskVolPath := "[" + datastore + "] " + dsFolder + "/" + DummyDiskName
|
||||
dummyDiskVolPath := "[" + datastore + "] " + dsFolder + "/" + dummyDiskName
|
||||
// Querying a non-existent dummy disk on the datastore folder.
|
||||
// It would fail and return an folder ID in the error message.
|
||||
_, err := getVirtualDiskPage83Data(ctx, dc, dummyDiskVolPath)
|
||||
@ -546,9 +546,8 @@ func getVirtualDeviceByPath(ctx context.Context, vm *object.VirtualMachine, disk
|
||||
if matchVirtualDiskAndVolPath(backing.FileName, diskPath) {
|
||||
framework.Logf("Found VirtualDisk backing with filename %q for diskPath %q", backing.FileName, diskPath)
|
||||
return device, nil
|
||||
} else {
|
||||
framework.Logf("VirtualDisk backing filename %q does not match with diskPath %q", backing.FileName, diskPath)
|
||||
}
|
||||
framework.Logf("VirtualDisk backing filename %q does not match with diskPath %q", backing.FileName, diskPath)
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -745,10 +744,10 @@ func diskIsAttached(volPath string, nodeName string) (bool, error) {
|
||||
// getUUIDFromProviderID strips ProviderPrefix - "vsphere://" from the providerID
|
||||
// this gives the VM UUID which can be used to find Node VM from vCenter
|
||||
func getUUIDFromProviderID(providerID string) string {
|
||||
return strings.TrimPrefix(providerID, ProviderPrefix)
|
||||
return strings.TrimPrefix(providerID, providerPrefix)
|
||||
}
|
||||
|
||||
// GetAllReadySchedulableNodeInfos returns NodeInfo objects for all nodes with Ready and schedulable state
|
||||
// GetReadySchedulableNodeInfos returns NodeInfo objects for all nodes with Ready and schedulable state
|
||||
func GetReadySchedulableNodeInfos() []*NodeInfo {
|
||||
nodeList, err := e2enode.GetReadySchedulableNodes(f.ClientSet)
|
||||
framework.ExpectNoError(err)
|
||||
|
@ -33,8 +33,8 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
InvalidDatastore = "invalidDatastore"
|
||||
DatastoreSCName = "datastoresc"
|
||||
invalidDatastore = "invalidDatastore"
|
||||
datastoreSCName = "datastoresc"
|
||||
)
|
||||
|
||||
/*
|
||||
@ -66,11 +66,11 @@ var _ = utils.SIGDescribe("Volume Provisioning on Datastore [Feature:vsphere]",
|
||||
|
||||
ginkgo.It("verify dynamically provisioned pv using storageclass fails on an invalid datastore", func() {
|
||||
ginkgo.By("Invoking Test for invalid datastore")
|
||||
scParameters[Datastore] = InvalidDatastore
|
||||
scParameters[Datastore] = invalidDatastore
|
||||
scParameters[DiskFormat] = ThinDisk
|
||||
err := invokeInvalidDatastoreTestNeg(client, namespace, scParameters)
|
||||
framework.ExpectError(err)
|
||||
errorMsg := `Failed to provision volume with StorageClass \"` + DatastoreSCName + `\": Datastore '` + InvalidDatastore + `' not found`
|
||||
errorMsg := `Failed to provision volume with StorageClass \"` + datastoreSCName + `\": Datastore '` + invalidDatastore + `' not found`
|
||||
if !strings.Contains(err.Error(), errorMsg) {
|
||||
framework.ExpectNoError(err, errorMsg)
|
||||
}
|
||||
@ -79,7 +79,7 @@ var _ = utils.SIGDescribe("Volume Provisioning on Datastore [Feature:vsphere]",
|
||||
|
||||
func invokeInvalidDatastoreTestNeg(client clientset.Interface, namespace string, scParameters map[string]string) error {
|
||||
ginkgo.By("Creating Storage Class With Invalid Datastore")
|
||||
storageclass, err := client.StorageV1().StorageClasses().Create(getVSphereStorageClassSpec(DatastoreSCName, scParameters, nil, ""))
|
||||
storageclass, err := client.StorageV1().StorageClasses().Create(getVSphereStorageClassSpec(datastoreSCName, scParameters, nil, ""))
|
||||
framework.ExpectNoError(err, fmt.Sprintf("Failed to create storage class with err: %v", err))
|
||||
defer client.StorageV1().StorageClasses().Delete(storageclass.Name, nil)
|
||||
|
||||
|
@ -31,7 +31,7 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
DiskSizeSCName = "disksizesc"
|
||||
diskSizeSCName = "disksizesc"
|
||||
)
|
||||
|
||||
/*
|
||||
@ -68,7 +68,7 @@ var _ = utils.SIGDescribe("Volume Disk Size [Feature:vsphere]", func() {
|
||||
expectedDiskSize := "1Mi"
|
||||
|
||||
ginkgo.By("Creating Storage Class")
|
||||
storageclass, err := client.StorageV1().StorageClasses().Create(getVSphereStorageClassSpec(DiskSizeSCName, scParameters, nil, ""))
|
||||
storageclass, err := client.StorageV1().StorageClasses().Create(getVSphereStorageClassSpec(diskSizeSCName, scParameters, nil, ""))
|
||||
framework.ExpectNoError(err)
|
||||
defer client.StorageV1().StorageClasses().Delete(storageclass.Name, nil)
|
||||
|
||||
|
@ -33,10 +33,10 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
Ext4FSType = "ext4"
|
||||
Ext3FSType = "ext3"
|
||||
InvalidFSType = "ext10"
|
||||
ExecCommand = "/bin/df -T /mnt/volume1 | /bin/awk 'FNR == 2 {print $2}' > /mnt/volume1/fstype && while true ; do sleep 2 ; done"
|
||||
ext4FSType = "ext4"
|
||||
ext3FSType = "ext3"
|
||||
invalidFSType = "ext10"
|
||||
execCommand = "/bin/df -T /mnt/volume1 | /bin/awk 'FNR == 2 {print $2}' > /mnt/volume1/fstype && while true ; do sleep 2 ; done"
|
||||
)
|
||||
|
||||
/*
|
||||
@ -81,17 +81,17 @@ var _ = utils.SIGDescribe("Volume FStype [Feature:vsphere]", func() {
|
||||
|
||||
ginkgo.It("verify fstype - ext3 formatted volume", func() {
|
||||
ginkgo.By("Invoking Test for fstype: ext3")
|
||||
invokeTestForFstype(f, client, namespace, Ext3FSType, Ext3FSType)
|
||||
invokeTestForFstype(f, client, namespace, ext3FSType, ext3FSType)
|
||||
})
|
||||
|
||||
ginkgo.It("verify fstype - default value should be ext4", func() {
|
||||
ginkgo.By("Invoking Test for fstype: Default Value - ext4")
|
||||
invokeTestForFstype(f, client, namespace, "", Ext4FSType)
|
||||
invokeTestForFstype(f, client, namespace, "", ext4FSType)
|
||||
})
|
||||
|
||||
ginkgo.It("verify invalid fstype", func() {
|
||||
ginkgo.By("Invoking Test for fstype: invalid Value")
|
||||
invokeTestForInvalidFstype(f, client, namespace, InvalidFSType)
|
||||
invokeTestForInvalidFstype(f, client, namespace, invalidFSType)
|
||||
})
|
||||
})
|
||||
|
||||
@ -127,7 +127,7 @@ func invokeTestForInvalidFstype(f *framework.Framework, client clientset.Interfa
|
||||
var pvclaims []*v1.PersistentVolumeClaim
|
||||
pvclaims = append(pvclaims, pvclaim)
|
||||
// Create pod to attach Volume to Node
|
||||
pod, err := e2epod.CreatePod(client, namespace, nil, pvclaims, false, ExecCommand)
|
||||
pod, err := e2epod.CreatePod(client, namespace, nil, pvclaims, false, execCommand)
|
||||
framework.ExpectError(err)
|
||||
|
||||
eventList, err := client.CoreV1().Events(namespace).List(metav1.ListOptions{})
|
||||
@ -171,7 +171,7 @@ func createPodAndVerifyVolumeAccessible(client clientset.Interface, namespace st
|
||||
pvclaims = append(pvclaims, pvclaim)
|
||||
ginkgo.By("Creating pod to attach PV to the node")
|
||||
// Create pod to attach Volume to Node
|
||||
pod, err := e2epod.CreatePod(client, namespace, nil, pvclaims, false, ExecCommand)
|
||||
pod, err := e2epod.CreatePod(client, namespace, nil, pvclaims, false, execCommand)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
// Asserts: Right disk is attached to the pod
|
||||
|
@ -51,7 +51,7 @@ import (
|
||||
|
||||
var _ = utils.SIGDescribe("Volume Operations Storm [Feature:vsphere]", func() {
|
||||
f := framework.NewDefaultFramework("volume-ops-storm")
|
||||
const DEFAULT_VOLUME_OPS_SCALE = 30
|
||||
const defaultVolumeOpsScale = 30
|
||||
var (
|
||||
client clientset.Interface
|
||||
namespace string
|
||||
@ -59,7 +59,7 @@ var _ = utils.SIGDescribe("Volume Operations Storm [Feature:vsphere]", func() {
|
||||
pvclaims []*v1.PersistentVolumeClaim
|
||||
persistentvolumes []*v1.PersistentVolume
|
||||
err error
|
||||
volume_ops_scale int
|
||||
volumeOpsScale int
|
||||
)
|
||||
ginkgo.BeforeEach(func() {
|
||||
e2eskipper.SkipUnlessProviderIs("vsphere")
|
||||
@ -68,12 +68,12 @@ var _ = utils.SIGDescribe("Volume Operations Storm [Feature:vsphere]", func() {
|
||||
namespace = f.Namespace.Name
|
||||
gomega.Expect(GetReadySchedulableNodeInfos()).NotTo(gomega.BeEmpty())
|
||||
if scale := os.Getenv("VOLUME_OPS_SCALE"); scale != "" {
|
||||
volume_ops_scale, err = strconv.Atoi(scale)
|
||||
volumeOpsScale, err = strconv.Atoi(scale)
|
||||
framework.ExpectNoError(err)
|
||||
} else {
|
||||
volume_ops_scale = DEFAULT_VOLUME_OPS_SCALE
|
||||
volumeOpsScale = defaultVolumeOpsScale
|
||||
}
|
||||
pvclaims = make([]*v1.PersistentVolumeClaim, volume_ops_scale)
|
||||
pvclaims = make([]*v1.PersistentVolumeClaim, volumeOpsScale)
|
||||
})
|
||||
ginkgo.AfterEach(func() {
|
||||
ginkgo.By("Deleting PVCs")
|
||||
@ -86,7 +86,7 @@ var _ = utils.SIGDescribe("Volume Operations Storm [Feature:vsphere]", func() {
|
||||
})
|
||||
|
||||
ginkgo.It("should create pod with many volumes and verify no attach call fails", func() {
|
||||
ginkgo.By(fmt.Sprintf("Running test with VOLUME_OPS_SCALE: %v", volume_ops_scale))
|
||||
ginkgo.By(fmt.Sprintf("Running test with VOLUME_OPS_SCALE: %v", volumeOpsScale))
|
||||
ginkgo.By("Creating Storage Class")
|
||||
scParameters := make(map[string]string)
|
||||
scParameters["diskformat"] = "thin"
|
||||
@ -95,7 +95,7 @@ var _ = utils.SIGDescribe("Volume Operations Storm [Feature:vsphere]", func() {
|
||||
|
||||
ginkgo.By("Creating PVCs using the Storage Class")
|
||||
count := 0
|
||||
for count < volume_ops_scale {
|
||||
for count < volumeOpsScale {
|
||||
pvclaims[count], err = e2epv.CreatePVC(client, namespace, getVSphereClaimSpecWithStorageClass(namespace, "2Gi", storageclass))
|
||||
framework.ExpectNoError(err)
|
||||
count++
|
||||
|
@ -137,7 +137,7 @@ func getTestStorageClasses(client clientset.Interface, policyName, datastoreName
|
||||
case storageclass2:
|
||||
var scVSanParameters map[string]string
|
||||
scVSanParameters = make(map[string]string)
|
||||
scVSanParameters[Policy_HostFailuresToTolerate] = "1"
|
||||
scVSanParameters[PolicyHostFailuresToTolerate] = "1"
|
||||
sc, err = client.StorageV1().StorageClasses().Create(getVSphereStorageClassSpec(storageclass2, scVSanParameters, nil, ""))
|
||||
case storageclass3:
|
||||
var scSPBMPolicyParameters map[string]string
|
||||
|
@ -37,28 +37,10 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
VmfsDatastore = "sharedVmfs-0"
|
||||
VsanDatastore = "vsanDatastore"
|
||||
Datastore = "datastore"
|
||||
Policy_DiskStripes = "diskStripes"
|
||||
Policy_HostFailuresToTolerate = "hostFailuresToTolerate"
|
||||
Policy_CacheReservation = "cacheReservation"
|
||||
Policy_ObjectSpaceReservation = "objectSpaceReservation"
|
||||
Policy_IopsLimit = "iopsLimit"
|
||||
DiskFormat = "diskformat"
|
||||
ThinDisk = "thin"
|
||||
SpbmStoragePolicy = "storagepolicyname"
|
||||
BronzeStoragePolicy = "bronze"
|
||||
HostFailuresToTolerateCapabilityVal = "0"
|
||||
CacheReservationCapabilityVal = "20"
|
||||
DiskStripesCapabilityVal = "1"
|
||||
ObjectSpaceReservationCapabilityVal = "30"
|
||||
IopsLimitCapabilityVal = "100"
|
||||
StripeWidthCapabilityVal = "2"
|
||||
DiskStripesCapabilityInvalidVal = "14"
|
||||
HostFailuresToTolerateCapabilityInvalidVal = "4"
|
||||
DummyVMPrefixName = "vsphere-k8s"
|
||||
DiskStripesCapabilityMaxVal = "11"
|
||||
vmfsDatastore = "sharedVmfs-0"
|
||||
vsanDatastore = "vsanDatastore"
|
||||
dummyVMPrefixName = "vsphere-k8s"
|
||||
diskStripesCapabilityMaxVal = "11"
|
||||
)
|
||||
|
||||
/*
|
||||
@ -120,8 +102,8 @@ var _ = utils.SIGDescribe("Storage Policy Based Volume Provisioning [Feature:vsp
|
||||
// Valid policy.
|
||||
ginkgo.It("verify VSAN storage capability with valid hostFailuresToTolerate and cacheReservation values is honored for dynamically provisioned pvc using storageclass", func() {
|
||||
ginkgo.By(fmt.Sprintf("Invoking test for VSAN policy hostFailuresToTolerate: %s, cacheReservation: %s", HostFailuresToTolerateCapabilityVal, CacheReservationCapabilityVal))
|
||||
scParameters[Policy_HostFailuresToTolerate] = HostFailuresToTolerateCapabilityVal
|
||||
scParameters[Policy_CacheReservation] = CacheReservationCapabilityVal
|
||||
scParameters[PolicyHostFailuresToTolerate] = HostFailuresToTolerateCapabilityVal
|
||||
scParameters[PolicyCacheReservation] = CacheReservationCapabilityVal
|
||||
framework.Logf("Invoking test for VSAN storage capabilities: %+v", scParameters)
|
||||
invokeValidPolicyTest(f, client, namespace, scParameters)
|
||||
})
|
||||
@ -129,8 +111,8 @@ var _ = utils.SIGDescribe("Storage Policy Based Volume Provisioning [Feature:vsp
|
||||
// Valid policy.
|
||||
ginkgo.It("verify VSAN storage capability with valid diskStripes and objectSpaceReservation values is honored for dynamically provisioned pvc using storageclass", func() {
|
||||
ginkgo.By(fmt.Sprintf("Invoking test for VSAN policy diskStripes: %s, objectSpaceReservation: %s", DiskStripesCapabilityVal, ObjectSpaceReservationCapabilityVal))
|
||||
scParameters[Policy_DiskStripes] = "1"
|
||||
scParameters[Policy_ObjectSpaceReservation] = "30"
|
||||
scParameters[PolicyDiskStripes] = "1"
|
||||
scParameters[PolicyObjectSpaceReservation] = "30"
|
||||
framework.Logf("Invoking test for VSAN storage capabilities: %+v", scParameters)
|
||||
invokeValidPolicyTest(f, client, namespace, scParameters)
|
||||
})
|
||||
@ -138,9 +120,9 @@ var _ = utils.SIGDescribe("Storage Policy Based Volume Provisioning [Feature:vsp
|
||||
// Valid policy.
|
||||
ginkgo.It("verify VSAN storage capability with valid diskStripes and objectSpaceReservation values and a VSAN datastore is honored for dynamically provisioned pvc using storageclass", func() {
|
||||
ginkgo.By(fmt.Sprintf("Invoking test for VSAN policy diskStripes: %s, objectSpaceReservation: %s", DiskStripesCapabilityVal, ObjectSpaceReservationCapabilityVal))
|
||||
scParameters[Policy_DiskStripes] = DiskStripesCapabilityVal
|
||||
scParameters[Policy_ObjectSpaceReservation] = ObjectSpaceReservationCapabilityVal
|
||||
scParameters[Datastore] = VsanDatastore
|
||||
scParameters[PolicyDiskStripes] = DiskStripesCapabilityVal
|
||||
scParameters[PolicyObjectSpaceReservation] = ObjectSpaceReservationCapabilityVal
|
||||
scParameters[Datastore] = vsanDatastore
|
||||
framework.Logf("Invoking test for VSAN storage capabilities: %+v", scParameters)
|
||||
invokeValidPolicyTest(f, client, namespace, scParameters)
|
||||
})
|
||||
@ -148,8 +130,8 @@ var _ = utils.SIGDescribe("Storage Policy Based Volume Provisioning [Feature:vsp
|
||||
// Valid policy.
|
||||
ginkgo.It("verify VSAN storage capability with valid objectSpaceReservation and iopsLimit values is honored for dynamically provisioned pvc using storageclass", func() {
|
||||
ginkgo.By(fmt.Sprintf("Invoking test for VSAN policy objectSpaceReservation: %s, iopsLimit: %s", ObjectSpaceReservationCapabilityVal, IopsLimitCapabilityVal))
|
||||
scParameters[Policy_ObjectSpaceReservation] = ObjectSpaceReservationCapabilityVal
|
||||
scParameters[Policy_IopsLimit] = IopsLimitCapabilityVal
|
||||
scParameters[PolicyObjectSpaceReservation] = ObjectSpaceReservationCapabilityVal
|
||||
scParameters[PolicyIopsLimit] = IopsLimitCapabilityVal
|
||||
framework.Logf("Invoking test for VSAN storage capabilities: %+v", scParameters)
|
||||
invokeValidPolicyTest(f, client, namespace, scParameters)
|
||||
})
|
||||
@ -158,7 +140,7 @@ var _ = utils.SIGDescribe("Storage Policy Based Volume Provisioning [Feature:vsp
|
||||
ginkgo.It("verify VSAN storage capability with invalid capability name objectSpaceReserve is not honored for dynamically provisioned pvc using storageclass", func() {
|
||||
ginkgo.By(fmt.Sprintf("Invoking test for VSAN policy objectSpaceReserve: %s, stripeWidth: %s", ObjectSpaceReservationCapabilityVal, StripeWidthCapabilityVal))
|
||||
scParameters["objectSpaceReserve"] = ObjectSpaceReservationCapabilityVal
|
||||
scParameters[Policy_DiskStripes] = StripeWidthCapabilityVal
|
||||
scParameters[PolicyDiskStripes] = StripeWidthCapabilityVal
|
||||
framework.Logf("Invoking test for VSAN storage capabilities: %+v", scParameters)
|
||||
err := invokeInvalidPolicyTestNeg(client, namespace, scParameters)
|
||||
framework.ExpectError(err)
|
||||
@ -172,12 +154,12 @@ var _ = utils.SIGDescribe("Storage Policy Based Volume Provisioning [Feature:vsp
|
||||
// diskStripes value has to be between 1 and 12.
|
||||
ginkgo.It("verify VSAN storage capability with invalid diskStripes value is not honored for dynamically provisioned pvc using storageclass", func() {
|
||||
ginkgo.By(fmt.Sprintf("Invoking test for VSAN policy diskStripes: %s, cacheReservation: %s", DiskStripesCapabilityInvalidVal, CacheReservationCapabilityVal))
|
||||
scParameters[Policy_DiskStripes] = DiskStripesCapabilityInvalidVal
|
||||
scParameters[Policy_CacheReservation] = CacheReservationCapabilityVal
|
||||
scParameters[PolicyDiskStripes] = DiskStripesCapabilityInvalidVal
|
||||
scParameters[PolicyCacheReservation] = CacheReservationCapabilityVal
|
||||
framework.Logf("Invoking test for VSAN storage capabilities: %+v", scParameters)
|
||||
err := invokeInvalidPolicyTestNeg(client, namespace, scParameters)
|
||||
framework.ExpectError(err)
|
||||
errorMsg := "Invalid value for " + Policy_DiskStripes + "."
|
||||
errorMsg := "Invalid value for " + PolicyDiskStripes + "."
|
||||
if !strings.Contains(err.Error(), errorMsg) {
|
||||
framework.ExpectNoError(err, errorMsg)
|
||||
}
|
||||
@ -187,11 +169,11 @@ var _ = utils.SIGDescribe("Storage Policy Based Volume Provisioning [Feature:vsp
|
||||
// hostFailuresToTolerate value has to be between 0 and 3 including.
|
||||
ginkgo.It("verify VSAN storage capability with invalid hostFailuresToTolerate value is not honored for dynamically provisioned pvc using storageclass", func() {
|
||||
ginkgo.By(fmt.Sprintf("Invoking test for VSAN policy hostFailuresToTolerate: %s", HostFailuresToTolerateCapabilityInvalidVal))
|
||||
scParameters[Policy_HostFailuresToTolerate] = HostFailuresToTolerateCapabilityInvalidVal
|
||||
scParameters[PolicyHostFailuresToTolerate] = HostFailuresToTolerateCapabilityInvalidVal
|
||||
framework.Logf("Invoking test for VSAN storage capabilities: %+v", scParameters)
|
||||
err := invokeInvalidPolicyTestNeg(client, namespace, scParameters)
|
||||
framework.ExpectError(err)
|
||||
errorMsg := "Invalid value for " + Policy_HostFailuresToTolerate + "."
|
||||
errorMsg := "Invalid value for " + PolicyHostFailuresToTolerate + "."
|
||||
if !strings.Contains(err.Error(), errorMsg) {
|
||||
framework.ExpectNoError(err, errorMsg)
|
||||
}
|
||||
@ -200,14 +182,14 @@ var _ = utils.SIGDescribe("Storage Policy Based Volume Provisioning [Feature:vsp
|
||||
// Specify a valid VSAN policy on a non-VSAN test bed.
|
||||
// The test should fail.
|
||||
ginkgo.It("verify VSAN storage capability with non-vsan datastore is not honored for dynamically provisioned pvc using storageclass", func() {
|
||||
ginkgo.By(fmt.Sprintf("Invoking test for VSAN policy diskStripes: %s, objectSpaceReservation: %s and a non-VSAN datastore: %s", DiskStripesCapabilityVal, ObjectSpaceReservationCapabilityVal, VmfsDatastore))
|
||||
scParameters[Policy_DiskStripes] = DiskStripesCapabilityVal
|
||||
scParameters[Policy_ObjectSpaceReservation] = ObjectSpaceReservationCapabilityVal
|
||||
scParameters[Datastore] = VmfsDatastore
|
||||
ginkgo.By(fmt.Sprintf("Invoking test for VSAN policy diskStripes: %s, objectSpaceReservation: %s and a non-VSAN datastore: %s", DiskStripesCapabilityVal, ObjectSpaceReservationCapabilityVal, vmfsDatastore))
|
||||
scParameters[PolicyDiskStripes] = DiskStripesCapabilityVal
|
||||
scParameters[PolicyObjectSpaceReservation] = ObjectSpaceReservationCapabilityVal
|
||||
scParameters[Datastore] = vmfsDatastore
|
||||
framework.Logf("Invoking test for VSAN storage capabilities: %+v", scParameters)
|
||||
err := invokeInvalidPolicyTestNeg(client, namespace, scParameters)
|
||||
framework.ExpectError(err)
|
||||
errorMsg := "The specified datastore: \\\"" + VmfsDatastore + "\\\" is not a VSAN datastore. " +
|
||||
errorMsg := "The specified datastore: \\\"" + vmfsDatastore + "\\\" is not a VSAN datastore. " +
|
||||
"The policy parameters will work only with VSAN Datastore."
|
||||
if !strings.Contains(err.Error(), errorMsg) {
|
||||
framework.ExpectNoError(err, errorMsg)
|
||||
@ -223,18 +205,18 @@ var _ = utils.SIGDescribe("Storage Policy Based Volume Provisioning [Feature:vsp
|
||||
})
|
||||
|
||||
ginkgo.It("verify clean up of stale dummy VM for dynamically provisioned pvc using SPBM policy", func() {
|
||||
scParameters[Policy_DiskStripes] = DiskStripesCapabilityMaxVal
|
||||
scParameters[Policy_ObjectSpaceReservation] = ObjectSpaceReservationCapabilityVal
|
||||
scParameters[Datastore] = VsanDatastore
|
||||
scParameters[PolicyDiskStripes] = diskStripesCapabilityMaxVal
|
||||
scParameters[PolicyObjectSpaceReservation] = ObjectSpaceReservationCapabilityVal
|
||||
scParameters[Datastore] = vsanDatastore
|
||||
framework.Logf("Invoking test for SPBM storage policy: %+v", scParameters)
|
||||
kubernetesClusterName := GetAndExpectStringEnvVar(KubernetesClusterName)
|
||||
invokeStaleDummyVMTestWithStoragePolicy(client, masterNode, namespace, kubernetesClusterName, scParameters)
|
||||
})
|
||||
|
||||
ginkgo.It("verify if a SPBM policy is not honored on a non-compatible datastore for dynamically provisioned pvc using storageclass", func() {
|
||||
ginkgo.By(fmt.Sprintf("Invoking test for SPBM policy: %s and datastore: %s", tagPolicy, VsanDatastore))
|
||||
ginkgo.By(fmt.Sprintf("Invoking test for SPBM policy: %s and datastore: %s", tagPolicy, vsanDatastore))
|
||||
scParameters[SpbmStoragePolicy] = tagPolicy
|
||||
scParameters[Datastore] = VsanDatastore
|
||||
scParameters[Datastore] = vsanDatastore
|
||||
scParameters[DiskFormat] = ThinDisk
|
||||
framework.Logf("Invoking test for SPBM storage policy on a non-compatible datastore: %+v", scParameters)
|
||||
err := invokeInvalidPolicyTestNeg(client, namespace, scParameters)
|
||||
@ -262,7 +244,7 @@ var _ = utils.SIGDescribe("Storage Policy Based Volume Provisioning [Feature:vsp
|
||||
ginkgo.By(fmt.Sprintf("Invoking test for SPBM policy: %s with VSAN storage capabilities", policyName))
|
||||
scParameters[SpbmStoragePolicy] = policyName
|
||||
gomega.Expect(scParameters[SpbmStoragePolicy]).NotTo(gomega.BeEmpty())
|
||||
scParameters[Policy_DiskStripes] = DiskStripesCapabilityVal
|
||||
scParameters[PolicyDiskStripes] = DiskStripesCapabilityVal
|
||||
scParameters[DiskFormat] = ThinDisk
|
||||
framework.Logf("Invoking test for SPBM storage policy and VSAN capabilities together: %+v", scParameters)
|
||||
err := invokeInvalidPolicyTestNeg(client, namespace, scParameters)
|
||||
@ -351,7 +333,7 @@ func invokeStaleDummyVMTestWithStoragePolicy(client clientset.Interface, masterN
|
||||
|
||||
fnvHash := fnv.New32a()
|
||||
fnvHash.Write([]byte(vmName))
|
||||
dummyVMFullName := DummyVMPrefixName + "-" + fmt.Sprint(fnvHash.Sum32())
|
||||
dummyVMFullName := dummyVMPrefixName + "-" + fmt.Sprint(fnvHash.Sum32())
|
||||
errorMsg := "Dummy VM - " + vmName + "is still present. Failing the test.."
|
||||
nodeInfo := TestContext.NodeMapper.GetNodeInfo(masterNode)
|
||||
isVMPresentFlag, _ := nodeInfo.VSphere.IsVMPresent(dummyVMFullName, nodeInfo.DataCenterRef)
|
||||
|
@ -297,20 +297,20 @@ var _ = utils.SIGDescribe("Zone Support", func() {
|
||||
})
|
||||
|
||||
ginkgo.It("Verify PVC creation with an invalid VSAN capability along with a compatible zone combination specified in storage class fails", func() {
|
||||
ginkgo.By(fmt.Sprintf("Creating storage class with %s :%s and zone :%s", Policy_HostFailuresToTolerate, HostFailuresToTolerateCapabilityInvalidVal, zoneA))
|
||||
scParameters[Policy_HostFailuresToTolerate] = HostFailuresToTolerateCapabilityInvalidVal
|
||||
ginkgo.By(fmt.Sprintf("Creating storage class with %s :%s and zone :%s", PolicyHostFailuresToTolerate, HostFailuresToTolerateCapabilityInvalidVal, zoneA))
|
||||
scParameters[PolicyHostFailuresToTolerate] = HostFailuresToTolerateCapabilityInvalidVal
|
||||
zones = append(zones, zoneA)
|
||||
err := verifyPVCCreationFails(client, namespace, scParameters, zones, "")
|
||||
errorMsg := "Invalid value for " + Policy_HostFailuresToTolerate + "."
|
||||
errorMsg := "Invalid value for " + PolicyHostFailuresToTolerate + "."
|
||||
if !strings.Contains(err.Error(), errorMsg) {
|
||||
framework.ExpectNoError(err, errorMsg)
|
||||
}
|
||||
})
|
||||
|
||||
ginkgo.It("Verify a pod is created and attached to a dynamically created PV, based on a VSAN capability, datastore and compatible zone specified in storage class", func() {
|
||||
ginkgo.By(fmt.Sprintf("Creating storage class with %s :%s, %s :%s, datastore :%s and zone :%s", Policy_ObjectSpaceReservation, ObjectSpaceReservationCapabilityVal, Policy_IopsLimit, IopsLimitCapabilityVal, vsanDatastore1, zoneA))
|
||||
scParameters[Policy_ObjectSpaceReservation] = ObjectSpaceReservationCapabilityVal
|
||||
scParameters[Policy_IopsLimit] = IopsLimitCapabilityVal
|
||||
ginkgo.By(fmt.Sprintf("Creating storage class with %s :%s, %s :%s, datastore :%s and zone :%s", PolicyObjectSpaceReservation, ObjectSpaceReservationCapabilityVal, PolicyIopsLimit, IopsLimitCapabilityVal, vsanDatastore1, zoneA))
|
||||
scParameters[PolicyObjectSpaceReservation] = ObjectSpaceReservationCapabilityVal
|
||||
scParameters[PolicyIopsLimit] = IopsLimitCapabilityVal
|
||||
scParameters[Datastore] = vsanDatastore1
|
||||
zones = append(zones, zoneA)
|
||||
verifyPVCAndPodCreationSucceeds(client, namespace, scParameters, zones, "")
|
||||
|
Loading…
Reference in New Issue
Block a user