mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-21 19:01:49 +00:00
Fix golint errors in test/e2e/storage/vsphere
This commit is contained in:
parent
9ae36ba81a
commit
18f05ef5b9
@ -515,5 +515,4 @@ staging/src/k8s.io/sample-apiserver/pkg/apis/wardle/v1alpha1
|
|||||||
staging/src/k8s.io/sample-apiserver/pkg/registry/wardle/fischer
|
staging/src/k8s.io/sample-apiserver/pkg/registry/wardle/fischer
|
||||||
staging/src/k8s.io/sample-apiserver/pkg/registry/wardle/flunder
|
staging/src/k8s.io/sample-apiserver/pkg/registry/wardle/flunder
|
||||||
test/e2e/common
|
test/e2e/common
|
||||||
test/e2e/storage/vsphere
|
|
||||||
test/utils
|
test/utils
|
||||||
|
@ -49,7 +49,7 @@ func bootstrapOnce() {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
framework.Failf("Failed to get nodes: %v", err)
|
framework.Failf("Failed to get nodes: %v", err)
|
||||||
}
|
}
|
||||||
TestContext = VSphereContext{NodeMapper: &NodeMapper{}, VSphereInstances: vsphereInstances}
|
TestContext = Context{NodeMapper: &NodeMapper{}, VSphereInstances: vsphereInstances}
|
||||||
// 3. Get Node to VSphere mapping
|
// 3. Get Node to VSphere mapping
|
||||||
err = TestContext.NodeMapper.GenerateNodeMap(vsphereInstances, *nodeList)
|
err = TestContext.NodeMapper.GenerateNodeMap(vsphereInstances, *nodeList)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -99,7 +99,7 @@ func GetVSphereInstances() (map[string]*VSphere, error) {
|
|||||||
func getConfig() (*ConfigFile, error) {
|
func getConfig() (*ConfigFile, error) {
|
||||||
if confFileLocation == "" {
|
if confFileLocation == "" {
|
||||||
if framework.TestContext.CloudConfig.ConfigFile == "" {
|
if framework.TestContext.CloudConfig.ConfigFile == "" {
|
||||||
return nil, fmt.Errorf("Env variable 'VSPHERE_CONF_FILE' is not set, and no config-file specified")
|
return nil, fmt.Errorf("env variable 'VSPHERE_CONF_FILE' is not set, and no config-file specified")
|
||||||
}
|
}
|
||||||
confFileLocation = framework.TestContext.CloudConfig.ConfigFile
|
confFileLocation = framework.TestContext.CloudConfig.ConfigFile
|
||||||
}
|
}
|
||||||
|
@ -17,10 +17,10 @@ limitations under the License.
|
|||||||
package vsphere
|
package vsphere
|
||||||
|
|
||||||
// Context holds common information for vSphere tests
|
// Context holds common information for vSphere tests
|
||||||
type VSphereContext struct {
|
type Context struct {
|
||||||
NodeMapper *NodeMapper
|
NodeMapper *NodeMapper
|
||||||
VSphereInstances map[string]*VSphere
|
VSphereInstances map[string]*VSphere
|
||||||
}
|
}
|
||||||
|
|
||||||
// TestContext should be used by all tests to access common context data. It should be initialized only once, during bootstrapping the tests.
|
// TestContext should be used by all tests to access common context data. It should be initialized only once, during bootstrapping the tests.
|
||||||
var TestContext VSphereContext
|
var TestContext Context
|
||||||
|
@ -33,9 +33,11 @@ import (
|
|||||||
neturl "net/url"
|
neturl "net/url"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// NodeMapper contains information to generate nameToNodeInfo and vcToZoneDatastore maps
|
||||||
type NodeMapper struct {
|
type NodeMapper struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// NodeInfo contains information about vcenter nodes
|
||||||
type NodeInfo struct {
|
type NodeInfo struct {
|
||||||
Name string
|
Name string
|
||||||
DataCenterRef types.ManagedObjectReference
|
DataCenterRef types.ManagedObjectReference
|
||||||
@ -46,9 +48,9 @@ type NodeInfo struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
const (
|
const (
|
||||||
DatacenterType = "Datacenter"
|
datacenterType = "Datacenter"
|
||||||
ClusterComputeResourceType = "ClusterComputeResource"
|
clusterComputeResourceType = "ClusterComputeResource"
|
||||||
HostSystemType = "HostSystem"
|
hostSystemType = "HostSystem"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
@ -58,13 +60,13 @@ var (
|
|||||||
|
|
||||||
// GenerateNodeMap populates node name to node info map
|
// GenerateNodeMap populates node name to node info map
|
||||||
func (nm *NodeMapper) GenerateNodeMap(vSphereInstances map[string]*VSphere, nodeList v1.NodeList) error {
|
func (nm *NodeMapper) GenerateNodeMap(vSphereInstances map[string]*VSphere, nodeList v1.NodeList) error {
|
||||||
type VmSearch struct {
|
type VMSearch struct {
|
||||||
vs *VSphere
|
vs *VSphere
|
||||||
datacenter *object.Datacenter
|
datacenter *object.Datacenter
|
||||||
}
|
}
|
||||||
|
|
||||||
var wg sync.WaitGroup
|
var wg sync.WaitGroup
|
||||||
var queueChannel []*VmSearch
|
var queueChannel []*VMSearch
|
||||||
|
|
||||||
var datacenters []*object.Datacenter
|
var datacenters []*object.Datacenter
|
||||||
var err error
|
var err error
|
||||||
@ -99,7 +101,7 @@ func (nm *NodeMapper) GenerateNodeMap(vSphereInstances map[string]*VSphere, node
|
|||||||
|
|
||||||
for _, dc := range datacenters {
|
for _, dc := range datacenters {
|
||||||
framework.Logf("Search candidates vc=%s and datacenter=%s", vs.Config.Hostname, dc.Name())
|
framework.Logf("Search candidates vc=%s and datacenter=%s", vs.Config.Hostname, dc.Name())
|
||||||
queueChannel = append(queueChannel, &VmSearch{vs: vs, datacenter: dc})
|
queueChannel = append(queueChannel, &VMSearch{vs: vs, datacenter: dc})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -170,7 +172,7 @@ func retrieveZoneInformationForNode(nodeName string, connection *VSphere, hostSy
|
|||||||
// zone precedence will be received by the HostSystem type.
|
// zone precedence will be received by the HostSystem type.
|
||||||
for _, ancestor := range ancestors {
|
for _, ancestor := range ancestors {
|
||||||
moType := ancestor.ExtensibleManagedObject.Self.Type
|
moType := ancestor.ExtensibleManagedObject.Self.Type
|
||||||
if moType == DatacenterType || moType == ClusterComputeResourceType || moType == HostSystemType {
|
if moType == datacenterType || moType == clusterComputeResourceType || moType == hostSystemType {
|
||||||
validAncestors = append(validAncestors, ancestor)
|
validAncestors = append(validAncestors, ancestor)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -208,7 +210,7 @@ func retrieveZoneInformationForNode(nodeName string, connection *VSphere, hostSy
|
|||||||
return zones
|
return zones
|
||||||
}
|
}
|
||||||
|
|
||||||
// Generate zone to datastore mapping for easily verifying volume placement
|
// GenerateZoneToDatastoreMap generates a mapping of zone to datastore for easily verifying volume placement
|
||||||
func (nm *NodeMapper) GenerateZoneToDatastoreMap() error {
|
func (nm *NodeMapper) GenerateZoneToDatastoreMap() error {
|
||||||
// 1. Create zone to hosts map for each VC
|
// 1. Create zone to hosts map for each VC
|
||||||
var vcToZoneHostsMap = make(map[string](map[string][]string))
|
var vcToZoneHostsMap = make(map[string](map[string][]string))
|
||||||
@ -254,7 +256,7 @@ func (nm *NodeMapper) GenerateZoneToDatastoreMap() error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Retrieves the common datastores from the specified hosts
|
// retrieveCommonDatastoresAmongHosts retrieves the common datastores from the specified hosts
|
||||||
func retrieveCommonDatastoresAmongHosts(hosts []string, hostToDatastoresMap map[string][]string) []string {
|
func retrieveCommonDatastoresAmongHosts(hosts []string, hostToDatastoresMap map[string][]string) []string {
|
||||||
var datastoreCountMap = make(map[string]int)
|
var datastoreCountMap = make(map[string]int)
|
||||||
for _, host := range hosts {
|
for _, host := range hosts {
|
||||||
@ -272,12 +274,12 @@ func retrieveCommonDatastoresAmongHosts(hosts []string, hostToDatastoresMap map[
|
|||||||
return commonDatastores
|
return commonDatastores
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get all the datastores in the specified zone
|
// GetDatastoresInZone returns all the datastores in the specified zone
|
||||||
func (nm *NodeMapper) GetDatastoresInZone(vc string, zone string) []string {
|
func (nm *NodeMapper) GetDatastoresInZone(vc string, zone string) []string {
|
||||||
return vcToZoneDatastoresMap[vc][zone]
|
return vcToZoneDatastoresMap[vc][zone]
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetNodeInfo return NodeInfo for given nodeName
|
// GetNodeInfo returns NodeInfo for given nodeName
|
||||||
func (nm *NodeMapper) GetNodeInfo(nodeName string) *NodeInfo {
|
func (nm *NodeMapper) GetNodeInfo(nodeName string) *NodeInfo {
|
||||||
return nameToNodeInfo[nodeName]
|
return nameToNodeInfo[nodeName]
|
||||||
}
|
}
|
||||||
|
@ -36,13 +36,13 @@ import (
|
|||||||
----------
|
----------
|
||||||
1. Create VMDK.
|
1. Create VMDK.
|
||||||
2. Create pv with label volume-type:ssd, volume path set to vmdk created in previous step, and PersistentVolumeReclaimPolicy is set to Delete.
|
2. Create pv with label volume-type:ssd, volume path set to vmdk created in previous step, and PersistentVolumeReclaimPolicy is set to Delete.
|
||||||
3. Create PVC (pvc_vvol) with label selector to match with volume-type:vvol
|
3. Create PVC (pvcVvol) with label selector to match with volume-type:vvol
|
||||||
4. Create PVC (pvc_ssd) with label selector to match with volume-type:ssd
|
4. Create PVC (pvcSsd) with label selector to match with volume-type:ssd
|
||||||
5. Wait and verify pvc_ssd is bound with PV.
|
5. Wait and verify pvSsd is bound with PV.
|
||||||
6. Verify Status of pvc_vvol is still pending.
|
6. Verify Status of pvcVvol is still pending.
|
||||||
7. Delete pvc_ssd.
|
7. Delete pvcSsd.
|
||||||
8. verify associated pv is also deleted.
|
8. verify associated pv is also deleted.
|
||||||
9. delete pvc_vvol
|
9. delete pvcVvol
|
||||||
|
|
||||||
*/
|
*/
|
||||||
var _ = utils.SIGDescribe("PersistentVolumes [Feature:LabelSelector]", func() {
|
var _ = utils.SIGDescribe("PersistentVolumes [Feature:LabelSelector]", func() {
|
||||||
@ -50,9 +50,9 @@ var _ = utils.SIGDescribe("PersistentVolumes [Feature:LabelSelector]", func() {
|
|||||||
var (
|
var (
|
||||||
c clientset.Interface
|
c clientset.Interface
|
||||||
ns string
|
ns string
|
||||||
pv_ssd *v1.PersistentVolume
|
pvSsd *v1.PersistentVolume
|
||||||
pvc_ssd *v1.PersistentVolumeClaim
|
pvcSsd *v1.PersistentVolumeClaim
|
||||||
pvc_vvol *v1.PersistentVolumeClaim
|
pvcVvol *v1.PersistentVolumeClaim
|
||||||
volumePath string
|
volumePath string
|
||||||
ssdlabels map[string]string
|
ssdlabels map[string]string
|
||||||
vvollabels map[string]string
|
vvollabels map[string]string
|
||||||
@ -77,35 +77,35 @@ var _ = utils.SIGDescribe("PersistentVolumes [Feature:LabelSelector]", func() {
|
|||||||
ginkgo.AfterEach(func() {
|
ginkgo.AfterEach(func() {
|
||||||
ginkgo.By("Running clean up actions")
|
ginkgo.By("Running clean up actions")
|
||||||
if framework.ProviderIs("vsphere") {
|
if framework.ProviderIs("vsphere") {
|
||||||
testCleanupVSpherePVClabelselector(c, ns, nodeInfo, volumePath, pv_ssd, pvc_ssd, pvc_vvol)
|
testCleanupVSpherePVClabelselector(c, ns, nodeInfo, volumePath, pvSsd, pvcSsd, pvcVvol)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
ginkgo.It("should bind volume with claim for given label", func() {
|
ginkgo.It("should bind volume with claim for given label", func() {
|
||||||
volumePath, pv_ssd, pvc_ssd, pvc_vvol, err = testSetupVSpherePVClabelselector(c, nodeInfo, ns, ssdlabels, vvollabels)
|
volumePath, pvSsd, pvcSsd, pvcVvol, err = testSetupVSpherePVClabelselector(c, nodeInfo, ns, ssdlabels, vvollabels)
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
|
|
||||||
ginkgo.By("wait for the pvc_ssd to bind with pv_ssd")
|
ginkgo.By("wait for the pvcSsd to bind with pvSsd")
|
||||||
framework.ExpectNoError(e2epv.WaitOnPVandPVC(c, ns, pv_ssd, pvc_ssd))
|
framework.ExpectNoError(e2epv.WaitOnPVandPVC(c, ns, pvSsd, pvcSsd))
|
||||||
|
|
||||||
ginkgo.By("Verify status of pvc_vvol is pending")
|
ginkgo.By("Verify status of pvcVvol is pending")
|
||||||
err = e2epv.WaitForPersistentVolumeClaimPhase(v1.ClaimPending, c, ns, pvc_vvol.Name, 3*time.Second, 300*time.Second)
|
err = e2epv.WaitForPersistentVolumeClaimPhase(v1.ClaimPending, c, ns, pvcVvol.Name, 3*time.Second, 300*time.Second)
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
|
|
||||||
ginkgo.By("delete pvc_ssd")
|
ginkgo.By("delete pvcSsd")
|
||||||
framework.ExpectNoError(e2epv.DeletePersistentVolumeClaim(c, pvc_ssd.Name, ns), "Failed to delete PVC ", pvc_ssd.Name)
|
framework.ExpectNoError(e2epv.DeletePersistentVolumeClaim(c, pvcSsd.Name, ns), "Failed to delete PVC ", pvcSsd.Name)
|
||||||
|
|
||||||
ginkgo.By("verify pv_ssd is deleted")
|
ginkgo.By("verify pvSsd is deleted")
|
||||||
err = framework.WaitForPersistentVolumeDeleted(c, pv_ssd.Name, 3*time.Second, 300*time.Second)
|
err = framework.WaitForPersistentVolumeDeleted(c, pvSsd.Name, 3*time.Second, 300*time.Second)
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
volumePath = ""
|
volumePath = ""
|
||||||
|
|
||||||
ginkgo.By("delete pvc_vvol")
|
ginkgo.By("delete pvcVvol")
|
||||||
framework.ExpectNoError(e2epv.DeletePersistentVolumeClaim(c, pvc_vvol.Name, ns), "Failed to delete PVC ", pvc_vvol.Name)
|
framework.ExpectNoError(e2epv.DeletePersistentVolumeClaim(c, pvcVvol.Name, ns), "Failed to delete PVC ", pvcVvol.Name)
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
func testSetupVSpherePVClabelselector(c clientset.Interface, nodeInfo *NodeInfo, ns string, ssdlabels map[string]string, vvollabels map[string]string) (volumePath string, pv_ssd *v1.PersistentVolume, pvc_ssd *v1.PersistentVolumeClaim, pvc_vvol *v1.PersistentVolumeClaim, err error) {
|
func testSetupVSpherePVClabelselector(c clientset.Interface, nodeInfo *NodeInfo, ns string, ssdlabels map[string]string, vvollabels map[string]string) (volumePath string, pvSsd *v1.PersistentVolume, pvcSsd *v1.PersistentVolumeClaim, pvcVvol *v1.PersistentVolumeClaim, err error) {
|
||||||
ginkgo.By("creating vmdk")
|
ginkgo.By("creating vmdk")
|
||||||
volumePath = ""
|
volumePath = ""
|
||||||
volumePath, err = nodeInfo.VSphere.CreateVolume(&VolumeOptions{}, nodeInfo.DataCenterRef)
|
volumePath, err = nodeInfo.VSphere.CreateVolume(&VolumeOptions{}, nodeInfo.DataCenterRef)
|
||||||
@ -114,37 +114,37 @@ func testSetupVSpherePVClabelselector(c clientset.Interface, nodeInfo *NodeInfo,
|
|||||||
}
|
}
|
||||||
|
|
||||||
ginkgo.By("creating the pv with label volume-type:ssd")
|
ginkgo.By("creating the pv with label volume-type:ssd")
|
||||||
pv_ssd = getVSpherePersistentVolumeSpec(volumePath, v1.PersistentVolumeReclaimDelete, ssdlabels)
|
pvSsd = getVSpherePersistentVolumeSpec(volumePath, v1.PersistentVolumeReclaimDelete, ssdlabels)
|
||||||
pv_ssd, err = c.CoreV1().PersistentVolumes().Create(pv_ssd)
|
pvSsd, err = c.CoreV1().PersistentVolumes().Create(pvSsd)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
ginkgo.By("creating pvc with label selector to match with volume-type:vvol")
|
ginkgo.By("creating pvc with label selector to match with volume-type:vvol")
|
||||||
pvc_vvol = getVSpherePersistentVolumeClaimSpec(ns, vvollabels)
|
pvcVvol = getVSpherePersistentVolumeClaimSpec(ns, vvollabels)
|
||||||
pvc_vvol, err = c.CoreV1().PersistentVolumeClaims(ns).Create(pvc_vvol)
|
pvcVvol, err = c.CoreV1().PersistentVolumeClaims(ns).Create(pvcVvol)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
ginkgo.By("creating pvc with label selector to match with volume-type:ssd")
|
ginkgo.By("creating pvc with label selector to match with volume-type:ssd")
|
||||||
pvc_ssd = getVSpherePersistentVolumeClaimSpec(ns, ssdlabels)
|
pvcSsd = getVSpherePersistentVolumeClaimSpec(ns, ssdlabels)
|
||||||
pvc_ssd, err = c.CoreV1().PersistentVolumeClaims(ns).Create(pvc_ssd)
|
pvcSsd, err = c.CoreV1().PersistentVolumeClaims(ns).Create(pvcSsd)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
func testCleanupVSpherePVClabelselector(c clientset.Interface, ns string, nodeInfo *NodeInfo, volumePath string, pv_ssd *v1.PersistentVolume, pvc_ssd *v1.PersistentVolumeClaim, pvc_vvol *v1.PersistentVolumeClaim) {
|
func testCleanupVSpherePVClabelselector(c clientset.Interface, ns string, nodeInfo *NodeInfo, volumePath string, pvSsd *v1.PersistentVolume, pvcSsd *v1.PersistentVolumeClaim, pvcVvol *v1.PersistentVolumeClaim) {
|
||||||
ginkgo.By("running testCleanupVSpherePVClabelselector")
|
ginkgo.By("running testCleanupVSpherePVClabelselector")
|
||||||
if len(volumePath) > 0 {
|
if len(volumePath) > 0 {
|
||||||
nodeInfo.VSphere.DeleteVolume(volumePath, nodeInfo.DataCenterRef)
|
nodeInfo.VSphere.DeleteVolume(volumePath, nodeInfo.DataCenterRef)
|
||||||
}
|
}
|
||||||
if pvc_ssd != nil {
|
if pvcSsd != nil {
|
||||||
framework.ExpectNoError(e2epv.DeletePersistentVolumeClaim(c, pvc_ssd.Name, ns), "Failed to delete PVC ", pvc_ssd.Name)
|
framework.ExpectNoError(e2epv.DeletePersistentVolumeClaim(c, pvcSsd.Name, ns), "Failed to delete PVC ", pvcSsd.Name)
|
||||||
}
|
}
|
||||||
if pvc_vvol != nil {
|
if pvcVvol != nil {
|
||||||
framework.ExpectNoError(e2epv.DeletePersistentVolumeClaim(c, pvc_vvol.Name, ns), "Failed to delete PVC ", pvc_vvol.Name)
|
framework.ExpectNoError(e2epv.DeletePersistentVolumeClaim(c, pvcVvol.Name, ns), "Failed to delete PVC ", pvcVvol.Name)
|
||||||
}
|
}
|
||||||
if pv_ssd != nil {
|
if pvSsd != nil {
|
||||||
framework.ExpectNoError(e2epv.DeletePersistentVolume(c, pv_ssd.Name), "Failed to delete PV ", pv_ssd.Name)
|
framework.ExpectNoError(e2epv.DeletePersistentVolume(c, pvSsd.Name), "Failed to delete PV ", pvSsd.Name)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -34,14 +34,14 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
VolDir = "kubevols"
|
volDir = "kubevols"
|
||||||
DefaultDiskCapacityKB = 2097152
|
defaultDiskCapacityKB = 2097152
|
||||||
DefaultDiskFormat = "thin"
|
defaultDiskFormat = "thin"
|
||||||
DefaultSCSIControllerType = "lsiLogic"
|
defaultSCSIControllerType = "lsiLogic"
|
||||||
VirtualMachineType = "VirtualMachine"
|
virtualMachineType = "VirtualMachine"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Represents a vSphere instance where one or more kubernetes nodes are running.
|
// VSphere represents a vSphere instance where one or more kubernetes nodes are running.
|
||||||
type VSphere struct {
|
type VSphere struct {
|
||||||
Config *Config
|
Config *Config
|
||||||
Client *govmomi.Client
|
Client *govmomi.Client
|
||||||
@ -63,7 +63,7 @@ func (vs *VSphere) GetDatacenter(ctx context.Context, datacenterPath string) (*o
|
|||||||
return finder.Datacenter(ctx, datacenterPath)
|
return finder.Datacenter(ctx, datacenterPath)
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetDatacenter returns the DataCenter Object for the given datacenterPath
|
// GetDatacenterFromObjectReference returns the DataCenter Object for the given datacenter reference
|
||||||
func (vs *VSphere) GetDatacenterFromObjectReference(ctx context.Context, dc object.Reference) *object.Datacenter {
|
func (vs *VSphere) GetDatacenterFromObjectReference(ctx context.Context, dc object.Reference) *object.Datacenter {
|
||||||
Connect(ctx, vs)
|
Connect(ctx, vs)
|
||||||
return object.NewDatacenter(vs.Client.Client, dc.Reference())
|
return object.NewDatacenter(vs.Client.Client, dc.Reference())
|
||||||
@ -76,7 +76,7 @@ func (vs *VSphere) GetAllDatacenter(ctx context.Context) ([]*object.Datacenter,
|
|||||||
return finder.DatacenterList(ctx, "*")
|
return finder.DatacenterList(ctx, "*")
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetVMByUUID gets the VM object Reference from the given vmUUID
|
// GetVMByUUID returns the VM object Reference from the given vmUUID
|
||||||
func (vs *VSphere) GetVMByUUID(ctx context.Context, vmUUID string, dc object.Reference) (object.Reference, error) {
|
func (vs *VSphere) GetVMByUUID(ctx context.Context, vmUUID string, dc object.Reference) (object.Reference, error) {
|
||||||
Connect(ctx, vs)
|
Connect(ctx, vs)
|
||||||
datacenter := vs.GetDatacenterFromObjectReference(ctx, dc)
|
datacenter := vs.GetDatacenterFromObjectReference(ctx, dc)
|
||||||
@ -85,7 +85,7 @@ func (vs *VSphere) GetVMByUUID(ctx context.Context, vmUUID string, dc object.Ref
|
|||||||
return s.FindByUuid(ctx, datacenter, vmUUID, true, nil)
|
return s.FindByUuid(ctx, datacenter, vmUUID, true, nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get host object reference of the host on which the specified VM resides
|
// GetHostFromVMReference returns host object reference of the host on which the specified VM resides
|
||||||
func (vs *VSphere) GetHostFromVMReference(ctx context.Context, vm types.ManagedObjectReference) types.ManagedObjectReference {
|
func (vs *VSphere) GetHostFromVMReference(ctx context.Context, vm types.ManagedObjectReference) types.ManagedObjectReference {
|
||||||
Connect(ctx, vs)
|
Connect(ctx, vs)
|
||||||
var vmMo mo.VirtualMachine
|
var vmMo mo.VirtualMachine
|
||||||
@ -94,7 +94,7 @@ func (vs *VSphere) GetHostFromVMReference(ctx context.Context, vm types.ManagedO
|
|||||||
return host
|
return host
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get the datastore references of all the datastores mounted on the specified host
|
// GetDatastoresMountedOnHost returns the datastore references of all the datastores mounted on the specified host
|
||||||
func (vs *VSphere) GetDatastoresMountedOnHost(ctx context.Context, host types.ManagedObjectReference) []types.ManagedObjectReference {
|
func (vs *VSphere) GetDatastoresMountedOnHost(ctx context.Context, host types.ManagedObjectReference) []types.ManagedObjectReference {
|
||||||
Connect(ctx, vs)
|
Connect(ctx, vs)
|
||||||
var hostMo mo.HostSystem
|
var hostMo mo.HostSystem
|
||||||
@ -102,7 +102,7 @@ func (vs *VSphere) GetDatastoresMountedOnHost(ctx context.Context, host types.Ma
|
|||||||
return hostMo.Datastore
|
return hostMo.Datastore
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get the datastore reference of the specified datastore
|
// GetDatastoreRefFromName returns the datastore reference of the specified datastore
|
||||||
func (vs *VSphere) GetDatastoreRefFromName(ctx context.Context, dc object.Reference, datastoreName string) (types.ManagedObjectReference, error) {
|
func (vs *VSphere) GetDatastoreRefFromName(ctx context.Context, dc object.Reference, datastoreName string) (types.ManagedObjectReference, error) {
|
||||||
Connect(ctx, vs)
|
Connect(ctx, vs)
|
||||||
datacenter := object.NewDatacenter(vs.Client.Client, dc.Reference())
|
datacenter := object.NewDatacenter(vs.Client.Client, dc.Reference())
|
||||||
@ -148,7 +148,7 @@ func (vs *VSphere) CreateVolume(volumeOptions *VolumeOptions, dataCenterRef type
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return "", fmt.Errorf("Failed while searching for datastore: %s. err: %+v", volumeOptions.Datastore, err)
|
return "", fmt.Errorf("Failed while searching for datastore: %s. err: %+v", volumeOptions.Datastore, err)
|
||||||
}
|
}
|
||||||
directoryPath := filepath.Clean(ds.Path(VolDir)) + "/"
|
directoryPath := filepath.Clean(ds.Path(volDir)) + "/"
|
||||||
fileManager := object.NewFileManager(ds.Client())
|
fileManager := object.NewFileManager(ds.Client())
|
||||||
err = fileManager.MakeDirectory(ctx, directoryPath, datacenter, false)
|
err = fileManager.MakeDirectory(ctx, directoryPath, datacenter, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -237,7 +237,7 @@ func (vs *VSphere) IsVMPresent(vmName string, dataCenterRef types.ManagedObjectR
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
for _, vmFoldersChild := range vmFoldersChildren {
|
for _, vmFoldersChild := range vmFoldersChildren {
|
||||||
if vmFoldersChild.Reference().Type == VirtualMachineType {
|
if vmFoldersChild.Reference().Type == virtualMachineType {
|
||||||
if object.NewVirtualMachine(vs.Client.Client, vmFoldersChild.Reference()).Name() == vmName {
|
if object.NewVirtualMachine(vs.Client.Client, vmFoldersChild.Reference()).Name() == vmName {
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
@ -255,15 +255,15 @@ func (vs *VSphere) initVolumeOptions(volumeOptions *VolumeOptions) {
|
|||||||
volumeOptions.Datastore = vs.Config.DefaultDatastore
|
volumeOptions.Datastore = vs.Config.DefaultDatastore
|
||||||
}
|
}
|
||||||
if volumeOptions.CapacityKB == 0 {
|
if volumeOptions.CapacityKB == 0 {
|
||||||
volumeOptions.CapacityKB = DefaultDiskCapacityKB
|
volumeOptions.CapacityKB = defaultDiskCapacityKB
|
||||||
}
|
}
|
||||||
if volumeOptions.Name == "" {
|
if volumeOptions.Name == "" {
|
||||||
volumeOptions.Name = "e2e-vmdk-" + strconv.FormatInt(time.Now().UnixNano(), 10)
|
volumeOptions.Name = "e2e-vmdk-" + strconv.FormatInt(time.Now().UnixNano(), 10)
|
||||||
}
|
}
|
||||||
if volumeOptions.DiskFormat == "" {
|
if volumeOptions.DiskFormat == "" {
|
||||||
volumeOptions.DiskFormat = DefaultDiskFormat
|
volumeOptions.DiskFormat = defaultDiskFormat
|
||||||
}
|
}
|
||||||
if volumeOptions.SCSIControllerType == "" {
|
if volumeOptions.SCSIControllerType == "" {
|
||||||
volumeOptions.SCSIControllerType = DefaultSCSIControllerType
|
volumeOptions.SCSIControllerType = defaultSCSIControllerType
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -24,36 +24,38 @@ import (
|
|||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// environment variables related to datastore parameters
|
||||||
const (
|
const (
|
||||||
SPBMPolicyName = "VSPHERE_SPBM_POLICY_NAME"
|
SPBMPolicyName = "VSPHERE_SPBM_POLICY_NAME"
|
||||||
StorageClassDatastoreName = "VSPHERE_DATASTORE"
|
StorageClassDatastoreName = "VSPHERE_DATASTORE"
|
||||||
SecondSharedDatastore = "VSPHERE_SECOND_SHARED_DATASTORE"
|
SecondSharedDatastore = "VSPHERE_SECOND_SHARED_DATASTORE"
|
||||||
KubernetesClusterName = "VSPHERE_KUBERNETES_CLUSTER"
|
KubernetesClusterName = "VSPHERE_KUBERNETES_CLUSTER"
|
||||||
SPBMTagPolicy = "VSPHERE_SPBM_TAG_POLICY"
|
SPBMTagPolicy = "VSPHERE_SPBM_TAG_POLICY"
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
VCPClusterDatastore = "CLUSTER_DATASTORE"
|
VCPClusterDatastore = "CLUSTER_DATASTORE"
|
||||||
SPBMPolicyDataStoreCluster = "VSPHERE_SPBM_POLICY_DS_CLUSTER"
|
SPBMPolicyDataStoreCluster = "VSPHERE_SPBM_POLICY_DS_CLUSTER"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// environment variables used for scaling tests
|
||||||
const (
|
const (
|
||||||
VCPScaleVolumeCount = "VCP_SCALE_VOLUME_COUNT"
|
VCPScaleVolumeCount = "VCP_SCALE_VOLUME_COUNT"
|
||||||
VCPScaleVolumesPerPod = "VCP_SCALE_VOLUME_PER_POD"
|
VCPScaleVolumesPerPod = "VCP_SCALE_VOLUME_PER_POD"
|
||||||
VCPScaleInstances = "VCP_SCALE_INSTANCES"
|
VCPScaleInstances = "VCP_SCALE_INSTANCES"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// environment variables used for stress tests
|
||||||
const (
|
const (
|
||||||
VCPStressInstances = "VCP_STRESS_INSTANCES"
|
VCPStressInstances = "VCP_STRESS_INSTANCES"
|
||||||
VCPStressIterations = "VCP_STRESS_ITERATIONS"
|
VCPStressIterations = "VCP_STRESS_ITERATIONS"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// environment variables used for performance tests
|
||||||
const (
|
const (
|
||||||
VCPPerfVolumeCount = "VCP_PERF_VOLUME_COUNT"
|
VCPPerfVolumeCount = "VCP_PERF_VOLUME_COUNT"
|
||||||
VCPPerfVolumesPerPod = "VCP_PERF_VOLUME_PER_POD"
|
VCPPerfVolumesPerPod = "VCP_PERF_VOLUME_PER_POD"
|
||||||
VCPPerfIterations = "VCP_PERF_ITERATIONS"
|
VCPPerfIterations = "VCP_PERF_ITERATIONS"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// environment variables used for zone tests
|
||||||
const (
|
const (
|
||||||
VCPZoneVsanDatastore1 = "VCP_ZONE_VSANDATASTORE1"
|
VCPZoneVsanDatastore1 = "VCP_ZONE_VSANDATASTORE1"
|
||||||
VCPZoneVsanDatastore2 = "VCP_ZONE_VSANDATASTORE2"
|
VCPZoneVsanDatastore2 = "VCP_ZONE_VSANDATASTORE2"
|
||||||
@ -67,12 +69,42 @@ const (
|
|||||||
VCPInvalidZone = "VCP_INVALID_ZONE"
|
VCPInvalidZone = "VCP_INVALID_ZONE"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// storage class parameters
|
||||||
|
const (
|
||||||
|
Datastore = "datastore"
|
||||||
|
PolicyDiskStripes = "diskStripes"
|
||||||
|
PolicyHostFailuresToTolerate = "hostFailuresToTolerate"
|
||||||
|
PolicyCacheReservation = "cacheReservation"
|
||||||
|
PolicyObjectSpaceReservation = "objectSpaceReservation"
|
||||||
|
PolicyIopsLimit = "iopsLimit"
|
||||||
|
DiskFormat = "diskformat"
|
||||||
|
SpbmStoragePolicy = "storagepolicyname"
|
||||||
|
)
|
||||||
|
|
||||||
|
// test values for storage class parameters
|
||||||
|
const (
|
||||||
|
ThinDisk = "thin"
|
||||||
|
BronzeStoragePolicy = "bronze"
|
||||||
|
HostFailuresToTolerateCapabilityVal = "0"
|
||||||
|
CacheReservationCapabilityVal = "20"
|
||||||
|
DiskStripesCapabilityVal = "1"
|
||||||
|
ObjectSpaceReservationCapabilityVal = "30"
|
||||||
|
IopsLimitCapabilityVal = "100"
|
||||||
|
StripeWidthCapabilityVal = "2"
|
||||||
|
DiskStripesCapabilityInvalidVal = "14"
|
||||||
|
HostFailuresToTolerateCapabilityInvalidVal = "4"
|
||||||
|
)
|
||||||
|
|
||||||
|
// GetAndExpectStringEnvVar returns the string value of an environment variable or fails if
|
||||||
|
// the variable is not set
|
||||||
func GetAndExpectStringEnvVar(varName string) string {
|
func GetAndExpectStringEnvVar(varName string) string {
|
||||||
varValue := os.Getenv(varName)
|
varValue := os.Getenv(varName)
|
||||||
gomega.Expect(varValue).NotTo(gomega.BeEmpty(), "ENV "+varName+" is not set")
|
gomega.Expect(varValue).NotTo(gomega.BeEmpty(), "ENV "+varName+" is not set")
|
||||||
return varValue
|
return varValue
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// GetAndExpectIntEnvVar returns the integer value of an environment variable or fails if
|
||||||
|
// the variable is not set
|
||||||
func GetAndExpectIntEnvVar(varName string) int {
|
func GetAndExpectIntEnvVar(varName string) int {
|
||||||
varValue := GetAndExpectStringEnvVar(varName)
|
varValue := GetAndExpectStringEnvVar(varName)
|
||||||
varIntValue, err := strconv.Atoi(varValue)
|
varIntValue, err := strconv.Atoi(varValue)
|
||||||
|
@ -129,7 +129,7 @@ var _ = utils.SIGDescribe("vcp at scale [Feature:vsphere] ", func() {
|
|||||||
case storageclass1:
|
case storageclass1:
|
||||||
scParams = nil
|
scParams = nil
|
||||||
case storageclass2:
|
case storageclass2:
|
||||||
scParams[Policy_HostFailuresToTolerate] = "1"
|
scParams[PolicyHostFailuresToTolerate] = "1"
|
||||||
case storageclass3:
|
case storageclass3:
|
||||||
scParams[SpbmStoragePolicy] = policyName
|
scParams[SpbmStoragePolicy] = policyName
|
||||||
case storageclass4:
|
case storageclass4:
|
||||||
|
@ -92,7 +92,7 @@ var _ = utils.SIGDescribe("vsphere cloud provider stress [Feature:vsphere]", fun
|
|||||||
case storageclass2:
|
case storageclass2:
|
||||||
var scVSanParameters map[string]string
|
var scVSanParameters map[string]string
|
||||||
scVSanParameters = make(map[string]string)
|
scVSanParameters = make(map[string]string)
|
||||||
scVSanParameters[Policy_HostFailuresToTolerate] = "1"
|
scVSanParameters[PolicyHostFailuresToTolerate] = "1"
|
||||||
sc, err = client.StorageV1().StorageClasses().Create(getVSphereStorageClassSpec(storageclass2, scVSanParameters, nil, ""))
|
sc, err = client.StorageV1().StorageClasses().Create(getVSphereStorageClassSpec(storageclass2, scVSanParameters, nil, ""))
|
||||||
case storageclass3:
|
case storageclass3:
|
||||||
var scSPBMPolicyParameters map[string]string
|
var scSPBMPolicyParameters map[string]string
|
||||||
@ -115,21 +115,22 @@ var _ = utils.SIGDescribe("vsphere cloud provider stress [Feature:vsphere]", fun
|
|||||||
var wg sync.WaitGroup
|
var wg sync.WaitGroup
|
||||||
wg.Add(instances)
|
wg.Add(instances)
|
||||||
for instanceCount := 0; instanceCount < instances; instanceCount++ {
|
for instanceCount := 0; instanceCount < instances; instanceCount++ {
|
||||||
instanceId := fmt.Sprintf("Thread:%v", instanceCount+1)
|
instanceID := fmt.Sprintf("Thread:%v", instanceCount+1)
|
||||||
go PerformVolumeLifeCycleInParallel(f, client, namespace, instanceId, scArrays[instanceCount%len(scArrays)], iterations, &wg)
|
go PerformVolumeLifeCycleInParallel(f, client, namespace, instanceID, scArrays[instanceCount%len(scArrays)], iterations, &wg)
|
||||||
}
|
}
|
||||||
wg.Wait()
|
wg.Wait()
|
||||||
})
|
})
|
||||||
|
|
||||||
})
|
})
|
||||||
|
|
||||||
// goroutine to perform volume lifecycle operations in parallel
|
// PerformVolumeLifeCycleInParallel performs volume lifecycle operations
|
||||||
func PerformVolumeLifeCycleInParallel(f *framework.Framework, client clientset.Interface, namespace string, instanceId string, sc *storagev1.StorageClass, iterations int, wg *sync.WaitGroup) {
|
// Called as a go routine to perform operations in parallel
|
||||||
|
func PerformVolumeLifeCycleInParallel(f *framework.Framework, client clientset.Interface, namespace string, instanceID string, sc *storagev1.StorageClass, iterations int, wg *sync.WaitGroup) {
|
||||||
defer wg.Done()
|
defer wg.Done()
|
||||||
defer ginkgo.GinkgoRecover()
|
defer ginkgo.GinkgoRecover()
|
||||||
|
|
||||||
for iterationCount := 0; iterationCount < iterations; iterationCount++ {
|
for iterationCount := 0; iterationCount < iterations; iterationCount++ {
|
||||||
logPrefix := fmt.Sprintf("Instance: [%v], Iteration: [%v] :", instanceId, iterationCount+1)
|
logPrefix := fmt.Sprintf("Instance: [%v], Iteration: [%v] :", instanceID, iterationCount+1)
|
||||||
ginkgo.By(fmt.Sprintf("%v Creating PVC using the Storage Class: %v", logPrefix, sc.Name))
|
ginkgo.By(fmt.Sprintf("%v Creating PVC using the Storage Class: %v", logPrefix, sc.Name))
|
||||||
pvclaim, err := e2epv.CreatePVC(client, namespace, getVSphereClaimSpecWithStorageClass(namespace, "1Gi", sc))
|
pvclaim, err := e2epv.CreatePVC(client, namespace, getVSphereClaimSpecWithStorageClass(namespace, "1Gi", sc))
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
|
@ -54,8 +54,8 @@ const (
|
|||||||
storageclass2 = "sc-vsan"
|
storageclass2 = "sc-vsan"
|
||||||
storageclass3 = "sc-spbm"
|
storageclass3 = "sc-spbm"
|
||||||
storageclass4 = "sc-user-specified-ds"
|
storageclass4 = "sc-user-specified-ds"
|
||||||
DummyDiskName = "kube-dummyDisk.vmdk"
|
dummyDiskName = "kube-dummyDisk.vmdk"
|
||||||
ProviderPrefix = "vsphere://"
|
providerPrefix = "vsphere://"
|
||||||
)
|
)
|
||||||
|
|
||||||
// volumeState represents the state of a volume.
|
// volumeState represents the state of a volume.
|
||||||
@ -446,7 +446,7 @@ func getCanonicalVolumePath(ctx context.Context, dc *object.Datacenter, volumePa
|
|||||||
dsFolder := dsPath[0]
|
dsFolder := dsPath[0]
|
||||||
// Get the datastore folder ID if datastore or folder doesn't exist in datastoreFolderIDMap
|
// Get the datastore folder ID if datastore or folder doesn't exist in datastoreFolderIDMap
|
||||||
if !isValidUUID(dsFolder) {
|
if !isValidUUID(dsFolder) {
|
||||||
dummyDiskVolPath := "[" + datastore + "] " + dsFolder + "/" + DummyDiskName
|
dummyDiskVolPath := "[" + datastore + "] " + dsFolder + "/" + dummyDiskName
|
||||||
// Querying a non-existent dummy disk on the datastore folder.
|
// Querying a non-existent dummy disk on the datastore folder.
|
||||||
// It would fail and return an folder ID in the error message.
|
// It would fail and return an folder ID in the error message.
|
||||||
_, err := getVirtualDiskPage83Data(ctx, dc, dummyDiskVolPath)
|
_, err := getVirtualDiskPage83Data(ctx, dc, dummyDiskVolPath)
|
||||||
@ -546,9 +546,8 @@ func getVirtualDeviceByPath(ctx context.Context, vm *object.VirtualMachine, disk
|
|||||||
if matchVirtualDiskAndVolPath(backing.FileName, diskPath) {
|
if matchVirtualDiskAndVolPath(backing.FileName, diskPath) {
|
||||||
framework.Logf("Found VirtualDisk backing with filename %q for diskPath %q", backing.FileName, diskPath)
|
framework.Logf("Found VirtualDisk backing with filename %q for diskPath %q", backing.FileName, diskPath)
|
||||||
return device, nil
|
return device, nil
|
||||||
} else {
|
|
||||||
framework.Logf("VirtualDisk backing filename %q does not match with diskPath %q", backing.FileName, diskPath)
|
|
||||||
}
|
}
|
||||||
|
framework.Logf("VirtualDisk backing filename %q does not match with diskPath %q", backing.FileName, diskPath)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -745,10 +744,10 @@ func diskIsAttached(volPath string, nodeName string) (bool, error) {
|
|||||||
// getUUIDFromProviderID strips ProviderPrefix - "vsphere://" from the providerID
|
// getUUIDFromProviderID strips ProviderPrefix - "vsphere://" from the providerID
|
||||||
// this gives the VM UUID which can be used to find Node VM from vCenter
|
// this gives the VM UUID which can be used to find Node VM from vCenter
|
||||||
func getUUIDFromProviderID(providerID string) string {
|
func getUUIDFromProviderID(providerID string) string {
|
||||||
return strings.TrimPrefix(providerID, ProviderPrefix)
|
return strings.TrimPrefix(providerID, providerPrefix)
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetAllReadySchedulableNodeInfos returns NodeInfo objects for all nodes with Ready and schedulable state
|
// GetReadySchedulableNodeInfos returns NodeInfo objects for all nodes with Ready and schedulable state
|
||||||
func GetReadySchedulableNodeInfos() []*NodeInfo {
|
func GetReadySchedulableNodeInfos() []*NodeInfo {
|
||||||
nodeList, err := e2enode.GetReadySchedulableNodes(f.ClientSet)
|
nodeList, err := e2enode.GetReadySchedulableNodes(f.ClientSet)
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
|
@ -33,8 +33,8 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
InvalidDatastore = "invalidDatastore"
|
invalidDatastore = "invalidDatastore"
|
||||||
DatastoreSCName = "datastoresc"
|
datastoreSCName = "datastoresc"
|
||||||
)
|
)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -66,11 +66,11 @@ var _ = utils.SIGDescribe("Volume Provisioning on Datastore [Feature:vsphere]",
|
|||||||
|
|
||||||
ginkgo.It("verify dynamically provisioned pv using storageclass fails on an invalid datastore", func() {
|
ginkgo.It("verify dynamically provisioned pv using storageclass fails on an invalid datastore", func() {
|
||||||
ginkgo.By("Invoking Test for invalid datastore")
|
ginkgo.By("Invoking Test for invalid datastore")
|
||||||
scParameters[Datastore] = InvalidDatastore
|
scParameters[Datastore] = invalidDatastore
|
||||||
scParameters[DiskFormat] = ThinDisk
|
scParameters[DiskFormat] = ThinDisk
|
||||||
err := invokeInvalidDatastoreTestNeg(client, namespace, scParameters)
|
err := invokeInvalidDatastoreTestNeg(client, namespace, scParameters)
|
||||||
framework.ExpectError(err)
|
framework.ExpectError(err)
|
||||||
errorMsg := `Failed to provision volume with StorageClass \"` + DatastoreSCName + `\": Datastore '` + InvalidDatastore + `' not found`
|
errorMsg := `Failed to provision volume with StorageClass \"` + datastoreSCName + `\": Datastore '` + invalidDatastore + `' not found`
|
||||||
if !strings.Contains(err.Error(), errorMsg) {
|
if !strings.Contains(err.Error(), errorMsg) {
|
||||||
framework.ExpectNoError(err, errorMsg)
|
framework.ExpectNoError(err, errorMsg)
|
||||||
}
|
}
|
||||||
@ -79,7 +79,7 @@ var _ = utils.SIGDescribe("Volume Provisioning on Datastore [Feature:vsphere]",
|
|||||||
|
|
||||||
func invokeInvalidDatastoreTestNeg(client clientset.Interface, namespace string, scParameters map[string]string) error {
|
func invokeInvalidDatastoreTestNeg(client clientset.Interface, namespace string, scParameters map[string]string) error {
|
||||||
ginkgo.By("Creating Storage Class With Invalid Datastore")
|
ginkgo.By("Creating Storage Class With Invalid Datastore")
|
||||||
storageclass, err := client.StorageV1().StorageClasses().Create(getVSphereStorageClassSpec(DatastoreSCName, scParameters, nil, ""))
|
storageclass, err := client.StorageV1().StorageClasses().Create(getVSphereStorageClassSpec(datastoreSCName, scParameters, nil, ""))
|
||||||
framework.ExpectNoError(err, fmt.Sprintf("Failed to create storage class with err: %v", err))
|
framework.ExpectNoError(err, fmt.Sprintf("Failed to create storage class with err: %v", err))
|
||||||
defer client.StorageV1().StorageClasses().Delete(storageclass.Name, nil)
|
defer client.StorageV1().StorageClasses().Delete(storageclass.Name, nil)
|
||||||
|
|
||||||
|
@ -31,7 +31,7 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
DiskSizeSCName = "disksizesc"
|
diskSizeSCName = "disksizesc"
|
||||||
)
|
)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -68,7 +68,7 @@ var _ = utils.SIGDescribe("Volume Disk Size [Feature:vsphere]", func() {
|
|||||||
expectedDiskSize := "1Mi"
|
expectedDiskSize := "1Mi"
|
||||||
|
|
||||||
ginkgo.By("Creating Storage Class")
|
ginkgo.By("Creating Storage Class")
|
||||||
storageclass, err := client.StorageV1().StorageClasses().Create(getVSphereStorageClassSpec(DiskSizeSCName, scParameters, nil, ""))
|
storageclass, err := client.StorageV1().StorageClasses().Create(getVSphereStorageClassSpec(diskSizeSCName, scParameters, nil, ""))
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
defer client.StorageV1().StorageClasses().Delete(storageclass.Name, nil)
|
defer client.StorageV1().StorageClasses().Delete(storageclass.Name, nil)
|
||||||
|
|
||||||
|
@ -33,10 +33,10 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
Ext4FSType = "ext4"
|
ext4FSType = "ext4"
|
||||||
Ext3FSType = "ext3"
|
ext3FSType = "ext3"
|
||||||
InvalidFSType = "ext10"
|
invalidFSType = "ext10"
|
||||||
ExecCommand = "/bin/df -T /mnt/volume1 | /bin/awk 'FNR == 2 {print $2}' > /mnt/volume1/fstype && while true ; do sleep 2 ; done"
|
execCommand = "/bin/df -T /mnt/volume1 | /bin/awk 'FNR == 2 {print $2}' > /mnt/volume1/fstype && while true ; do sleep 2 ; done"
|
||||||
)
|
)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -81,17 +81,17 @@ var _ = utils.SIGDescribe("Volume FStype [Feature:vsphere]", func() {
|
|||||||
|
|
||||||
ginkgo.It("verify fstype - ext3 formatted volume", func() {
|
ginkgo.It("verify fstype - ext3 formatted volume", func() {
|
||||||
ginkgo.By("Invoking Test for fstype: ext3")
|
ginkgo.By("Invoking Test for fstype: ext3")
|
||||||
invokeTestForFstype(f, client, namespace, Ext3FSType, Ext3FSType)
|
invokeTestForFstype(f, client, namespace, ext3FSType, ext3FSType)
|
||||||
})
|
})
|
||||||
|
|
||||||
ginkgo.It("verify fstype - default value should be ext4", func() {
|
ginkgo.It("verify fstype - default value should be ext4", func() {
|
||||||
ginkgo.By("Invoking Test for fstype: Default Value - ext4")
|
ginkgo.By("Invoking Test for fstype: Default Value - ext4")
|
||||||
invokeTestForFstype(f, client, namespace, "", Ext4FSType)
|
invokeTestForFstype(f, client, namespace, "", ext4FSType)
|
||||||
})
|
})
|
||||||
|
|
||||||
ginkgo.It("verify invalid fstype", func() {
|
ginkgo.It("verify invalid fstype", func() {
|
||||||
ginkgo.By("Invoking Test for fstype: invalid Value")
|
ginkgo.By("Invoking Test for fstype: invalid Value")
|
||||||
invokeTestForInvalidFstype(f, client, namespace, InvalidFSType)
|
invokeTestForInvalidFstype(f, client, namespace, invalidFSType)
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
@ -127,7 +127,7 @@ func invokeTestForInvalidFstype(f *framework.Framework, client clientset.Interfa
|
|||||||
var pvclaims []*v1.PersistentVolumeClaim
|
var pvclaims []*v1.PersistentVolumeClaim
|
||||||
pvclaims = append(pvclaims, pvclaim)
|
pvclaims = append(pvclaims, pvclaim)
|
||||||
// Create pod to attach Volume to Node
|
// Create pod to attach Volume to Node
|
||||||
pod, err := e2epod.CreatePod(client, namespace, nil, pvclaims, false, ExecCommand)
|
pod, err := e2epod.CreatePod(client, namespace, nil, pvclaims, false, execCommand)
|
||||||
framework.ExpectError(err)
|
framework.ExpectError(err)
|
||||||
|
|
||||||
eventList, err := client.CoreV1().Events(namespace).List(metav1.ListOptions{})
|
eventList, err := client.CoreV1().Events(namespace).List(metav1.ListOptions{})
|
||||||
@ -171,7 +171,7 @@ func createPodAndVerifyVolumeAccessible(client clientset.Interface, namespace st
|
|||||||
pvclaims = append(pvclaims, pvclaim)
|
pvclaims = append(pvclaims, pvclaim)
|
||||||
ginkgo.By("Creating pod to attach PV to the node")
|
ginkgo.By("Creating pod to attach PV to the node")
|
||||||
// Create pod to attach Volume to Node
|
// Create pod to attach Volume to Node
|
||||||
pod, err := e2epod.CreatePod(client, namespace, nil, pvclaims, false, ExecCommand)
|
pod, err := e2epod.CreatePod(client, namespace, nil, pvclaims, false, execCommand)
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
|
|
||||||
// Asserts: Right disk is attached to the pod
|
// Asserts: Right disk is attached to the pod
|
||||||
|
@ -51,7 +51,7 @@ import (
|
|||||||
|
|
||||||
var _ = utils.SIGDescribe("Volume Operations Storm [Feature:vsphere]", func() {
|
var _ = utils.SIGDescribe("Volume Operations Storm [Feature:vsphere]", func() {
|
||||||
f := framework.NewDefaultFramework("volume-ops-storm")
|
f := framework.NewDefaultFramework("volume-ops-storm")
|
||||||
const DEFAULT_VOLUME_OPS_SCALE = 30
|
const defaultVolumeOpsScale = 30
|
||||||
var (
|
var (
|
||||||
client clientset.Interface
|
client clientset.Interface
|
||||||
namespace string
|
namespace string
|
||||||
@ -59,7 +59,7 @@ var _ = utils.SIGDescribe("Volume Operations Storm [Feature:vsphere]", func() {
|
|||||||
pvclaims []*v1.PersistentVolumeClaim
|
pvclaims []*v1.PersistentVolumeClaim
|
||||||
persistentvolumes []*v1.PersistentVolume
|
persistentvolumes []*v1.PersistentVolume
|
||||||
err error
|
err error
|
||||||
volume_ops_scale int
|
volumeOpsScale int
|
||||||
)
|
)
|
||||||
ginkgo.BeforeEach(func() {
|
ginkgo.BeforeEach(func() {
|
||||||
e2eskipper.SkipUnlessProviderIs("vsphere")
|
e2eskipper.SkipUnlessProviderIs("vsphere")
|
||||||
@ -68,12 +68,12 @@ var _ = utils.SIGDescribe("Volume Operations Storm [Feature:vsphere]", func() {
|
|||||||
namespace = f.Namespace.Name
|
namespace = f.Namespace.Name
|
||||||
gomega.Expect(GetReadySchedulableNodeInfos()).NotTo(gomega.BeEmpty())
|
gomega.Expect(GetReadySchedulableNodeInfos()).NotTo(gomega.BeEmpty())
|
||||||
if scale := os.Getenv("VOLUME_OPS_SCALE"); scale != "" {
|
if scale := os.Getenv("VOLUME_OPS_SCALE"); scale != "" {
|
||||||
volume_ops_scale, err = strconv.Atoi(scale)
|
volumeOpsScale, err = strconv.Atoi(scale)
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
} else {
|
} else {
|
||||||
volume_ops_scale = DEFAULT_VOLUME_OPS_SCALE
|
volumeOpsScale = defaultVolumeOpsScale
|
||||||
}
|
}
|
||||||
pvclaims = make([]*v1.PersistentVolumeClaim, volume_ops_scale)
|
pvclaims = make([]*v1.PersistentVolumeClaim, volumeOpsScale)
|
||||||
})
|
})
|
||||||
ginkgo.AfterEach(func() {
|
ginkgo.AfterEach(func() {
|
||||||
ginkgo.By("Deleting PVCs")
|
ginkgo.By("Deleting PVCs")
|
||||||
@ -86,7 +86,7 @@ var _ = utils.SIGDescribe("Volume Operations Storm [Feature:vsphere]", func() {
|
|||||||
})
|
})
|
||||||
|
|
||||||
ginkgo.It("should create pod with many volumes and verify no attach call fails", func() {
|
ginkgo.It("should create pod with many volumes and verify no attach call fails", func() {
|
||||||
ginkgo.By(fmt.Sprintf("Running test with VOLUME_OPS_SCALE: %v", volume_ops_scale))
|
ginkgo.By(fmt.Sprintf("Running test with VOLUME_OPS_SCALE: %v", volumeOpsScale))
|
||||||
ginkgo.By("Creating Storage Class")
|
ginkgo.By("Creating Storage Class")
|
||||||
scParameters := make(map[string]string)
|
scParameters := make(map[string]string)
|
||||||
scParameters["diskformat"] = "thin"
|
scParameters["diskformat"] = "thin"
|
||||||
@ -95,7 +95,7 @@ var _ = utils.SIGDescribe("Volume Operations Storm [Feature:vsphere]", func() {
|
|||||||
|
|
||||||
ginkgo.By("Creating PVCs using the Storage Class")
|
ginkgo.By("Creating PVCs using the Storage Class")
|
||||||
count := 0
|
count := 0
|
||||||
for count < volume_ops_scale {
|
for count < volumeOpsScale {
|
||||||
pvclaims[count], err = e2epv.CreatePVC(client, namespace, getVSphereClaimSpecWithStorageClass(namespace, "2Gi", storageclass))
|
pvclaims[count], err = e2epv.CreatePVC(client, namespace, getVSphereClaimSpecWithStorageClass(namespace, "2Gi", storageclass))
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
count++
|
count++
|
||||||
|
@ -137,7 +137,7 @@ func getTestStorageClasses(client clientset.Interface, policyName, datastoreName
|
|||||||
case storageclass2:
|
case storageclass2:
|
||||||
var scVSanParameters map[string]string
|
var scVSanParameters map[string]string
|
||||||
scVSanParameters = make(map[string]string)
|
scVSanParameters = make(map[string]string)
|
||||||
scVSanParameters[Policy_HostFailuresToTolerate] = "1"
|
scVSanParameters[PolicyHostFailuresToTolerate] = "1"
|
||||||
sc, err = client.StorageV1().StorageClasses().Create(getVSphereStorageClassSpec(storageclass2, scVSanParameters, nil, ""))
|
sc, err = client.StorageV1().StorageClasses().Create(getVSphereStorageClassSpec(storageclass2, scVSanParameters, nil, ""))
|
||||||
case storageclass3:
|
case storageclass3:
|
||||||
var scSPBMPolicyParameters map[string]string
|
var scSPBMPolicyParameters map[string]string
|
||||||
|
@ -37,28 +37,10 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
VmfsDatastore = "sharedVmfs-0"
|
vmfsDatastore = "sharedVmfs-0"
|
||||||
VsanDatastore = "vsanDatastore"
|
vsanDatastore = "vsanDatastore"
|
||||||
Datastore = "datastore"
|
dummyVMPrefixName = "vsphere-k8s"
|
||||||
Policy_DiskStripes = "diskStripes"
|
diskStripesCapabilityMaxVal = "11"
|
||||||
Policy_HostFailuresToTolerate = "hostFailuresToTolerate"
|
|
||||||
Policy_CacheReservation = "cacheReservation"
|
|
||||||
Policy_ObjectSpaceReservation = "objectSpaceReservation"
|
|
||||||
Policy_IopsLimit = "iopsLimit"
|
|
||||||
DiskFormat = "diskformat"
|
|
||||||
ThinDisk = "thin"
|
|
||||||
SpbmStoragePolicy = "storagepolicyname"
|
|
||||||
BronzeStoragePolicy = "bronze"
|
|
||||||
HostFailuresToTolerateCapabilityVal = "0"
|
|
||||||
CacheReservationCapabilityVal = "20"
|
|
||||||
DiskStripesCapabilityVal = "1"
|
|
||||||
ObjectSpaceReservationCapabilityVal = "30"
|
|
||||||
IopsLimitCapabilityVal = "100"
|
|
||||||
StripeWidthCapabilityVal = "2"
|
|
||||||
DiskStripesCapabilityInvalidVal = "14"
|
|
||||||
HostFailuresToTolerateCapabilityInvalidVal = "4"
|
|
||||||
DummyVMPrefixName = "vsphere-k8s"
|
|
||||||
DiskStripesCapabilityMaxVal = "11"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -120,8 +102,8 @@ var _ = utils.SIGDescribe("Storage Policy Based Volume Provisioning [Feature:vsp
|
|||||||
// Valid policy.
|
// Valid policy.
|
||||||
ginkgo.It("verify VSAN storage capability with valid hostFailuresToTolerate and cacheReservation values is honored for dynamically provisioned pvc using storageclass", func() {
|
ginkgo.It("verify VSAN storage capability with valid hostFailuresToTolerate and cacheReservation values is honored for dynamically provisioned pvc using storageclass", func() {
|
||||||
ginkgo.By(fmt.Sprintf("Invoking test for VSAN policy hostFailuresToTolerate: %s, cacheReservation: %s", HostFailuresToTolerateCapabilityVal, CacheReservationCapabilityVal))
|
ginkgo.By(fmt.Sprintf("Invoking test for VSAN policy hostFailuresToTolerate: %s, cacheReservation: %s", HostFailuresToTolerateCapabilityVal, CacheReservationCapabilityVal))
|
||||||
scParameters[Policy_HostFailuresToTolerate] = HostFailuresToTolerateCapabilityVal
|
scParameters[PolicyHostFailuresToTolerate] = HostFailuresToTolerateCapabilityVal
|
||||||
scParameters[Policy_CacheReservation] = CacheReservationCapabilityVal
|
scParameters[PolicyCacheReservation] = CacheReservationCapabilityVal
|
||||||
framework.Logf("Invoking test for VSAN storage capabilities: %+v", scParameters)
|
framework.Logf("Invoking test for VSAN storage capabilities: %+v", scParameters)
|
||||||
invokeValidPolicyTest(f, client, namespace, scParameters)
|
invokeValidPolicyTest(f, client, namespace, scParameters)
|
||||||
})
|
})
|
||||||
@ -129,8 +111,8 @@ var _ = utils.SIGDescribe("Storage Policy Based Volume Provisioning [Feature:vsp
|
|||||||
// Valid policy.
|
// Valid policy.
|
||||||
ginkgo.It("verify VSAN storage capability with valid diskStripes and objectSpaceReservation values is honored for dynamically provisioned pvc using storageclass", func() {
|
ginkgo.It("verify VSAN storage capability with valid diskStripes and objectSpaceReservation values is honored for dynamically provisioned pvc using storageclass", func() {
|
||||||
ginkgo.By(fmt.Sprintf("Invoking test for VSAN policy diskStripes: %s, objectSpaceReservation: %s", DiskStripesCapabilityVal, ObjectSpaceReservationCapabilityVal))
|
ginkgo.By(fmt.Sprintf("Invoking test for VSAN policy diskStripes: %s, objectSpaceReservation: %s", DiskStripesCapabilityVal, ObjectSpaceReservationCapabilityVal))
|
||||||
scParameters[Policy_DiskStripes] = "1"
|
scParameters[PolicyDiskStripes] = "1"
|
||||||
scParameters[Policy_ObjectSpaceReservation] = "30"
|
scParameters[PolicyObjectSpaceReservation] = "30"
|
||||||
framework.Logf("Invoking test for VSAN storage capabilities: %+v", scParameters)
|
framework.Logf("Invoking test for VSAN storage capabilities: %+v", scParameters)
|
||||||
invokeValidPolicyTest(f, client, namespace, scParameters)
|
invokeValidPolicyTest(f, client, namespace, scParameters)
|
||||||
})
|
})
|
||||||
@ -138,9 +120,9 @@ var _ = utils.SIGDescribe("Storage Policy Based Volume Provisioning [Feature:vsp
|
|||||||
// Valid policy.
|
// Valid policy.
|
||||||
ginkgo.It("verify VSAN storage capability with valid diskStripes and objectSpaceReservation values and a VSAN datastore is honored for dynamically provisioned pvc using storageclass", func() {
|
ginkgo.It("verify VSAN storage capability with valid diskStripes and objectSpaceReservation values and a VSAN datastore is honored for dynamically provisioned pvc using storageclass", func() {
|
||||||
ginkgo.By(fmt.Sprintf("Invoking test for VSAN policy diskStripes: %s, objectSpaceReservation: %s", DiskStripesCapabilityVal, ObjectSpaceReservationCapabilityVal))
|
ginkgo.By(fmt.Sprintf("Invoking test for VSAN policy diskStripes: %s, objectSpaceReservation: %s", DiskStripesCapabilityVal, ObjectSpaceReservationCapabilityVal))
|
||||||
scParameters[Policy_DiskStripes] = DiskStripesCapabilityVal
|
scParameters[PolicyDiskStripes] = DiskStripesCapabilityVal
|
||||||
scParameters[Policy_ObjectSpaceReservation] = ObjectSpaceReservationCapabilityVal
|
scParameters[PolicyObjectSpaceReservation] = ObjectSpaceReservationCapabilityVal
|
||||||
scParameters[Datastore] = VsanDatastore
|
scParameters[Datastore] = vsanDatastore
|
||||||
framework.Logf("Invoking test for VSAN storage capabilities: %+v", scParameters)
|
framework.Logf("Invoking test for VSAN storage capabilities: %+v", scParameters)
|
||||||
invokeValidPolicyTest(f, client, namespace, scParameters)
|
invokeValidPolicyTest(f, client, namespace, scParameters)
|
||||||
})
|
})
|
||||||
@ -148,8 +130,8 @@ var _ = utils.SIGDescribe("Storage Policy Based Volume Provisioning [Feature:vsp
|
|||||||
// Valid policy.
|
// Valid policy.
|
||||||
ginkgo.It("verify VSAN storage capability with valid objectSpaceReservation and iopsLimit values is honored for dynamically provisioned pvc using storageclass", func() {
|
ginkgo.It("verify VSAN storage capability with valid objectSpaceReservation and iopsLimit values is honored for dynamically provisioned pvc using storageclass", func() {
|
||||||
ginkgo.By(fmt.Sprintf("Invoking test for VSAN policy objectSpaceReservation: %s, iopsLimit: %s", ObjectSpaceReservationCapabilityVal, IopsLimitCapabilityVal))
|
ginkgo.By(fmt.Sprintf("Invoking test for VSAN policy objectSpaceReservation: %s, iopsLimit: %s", ObjectSpaceReservationCapabilityVal, IopsLimitCapabilityVal))
|
||||||
scParameters[Policy_ObjectSpaceReservation] = ObjectSpaceReservationCapabilityVal
|
scParameters[PolicyObjectSpaceReservation] = ObjectSpaceReservationCapabilityVal
|
||||||
scParameters[Policy_IopsLimit] = IopsLimitCapabilityVal
|
scParameters[PolicyIopsLimit] = IopsLimitCapabilityVal
|
||||||
framework.Logf("Invoking test for VSAN storage capabilities: %+v", scParameters)
|
framework.Logf("Invoking test for VSAN storage capabilities: %+v", scParameters)
|
||||||
invokeValidPolicyTest(f, client, namespace, scParameters)
|
invokeValidPolicyTest(f, client, namespace, scParameters)
|
||||||
})
|
})
|
||||||
@ -158,7 +140,7 @@ var _ = utils.SIGDescribe("Storage Policy Based Volume Provisioning [Feature:vsp
|
|||||||
ginkgo.It("verify VSAN storage capability with invalid capability name objectSpaceReserve is not honored for dynamically provisioned pvc using storageclass", func() {
|
ginkgo.It("verify VSAN storage capability with invalid capability name objectSpaceReserve is not honored for dynamically provisioned pvc using storageclass", func() {
|
||||||
ginkgo.By(fmt.Sprintf("Invoking test for VSAN policy objectSpaceReserve: %s, stripeWidth: %s", ObjectSpaceReservationCapabilityVal, StripeWidthCapabilityVal))
|
ginkgo.By(fmt.Sprintf("Invoking test for VSAN policy objectSpaceReserve: %s, stripeWidth: %s", ObjectSpaceReservationCapabilityVal, StripeWidthCapabilityVal))
|
||||||
scParameters["objectSpaceReserve"] = ObjectSpaceReservationCapabilityVal
|
scParameters["objectSpaceReserve"] = ObjectSpaceReservationCapabilityVal
|
||||||
scParameters[Policy_DiskStripes] = StripeWidthCapabilityVal
|
scParameters[PolicyDiskStripes] = StripeWidthCapabilityVal
|
||||||
framework.Logf("Invoking test for VSAN storage capabilities: %+v", scParameters)
|
framework.Logf("Invoking test for VSAN storage capabilities: %+v", scParameters)
|
||||||
err := invokeInvalidPolicyTestNeg(client, namespace, scParameters)
|
err := invokeInvalidPolicyTestNeg(client, namespace, scParameters)
|
||||||
framework.ExpectError(err)
|
framework.ExpectError(err)
|
||||||
@ -172,12 +154,12 @@ var _ = utils.SIGDescribe("Storage Policy Based Volume Provisioning [Feature:vsp
|
|||||||
// diskStripes value has to be between 1 and 12.
|
// diskStripes value has to be between 1 and 12.
|
||||||
ginkgo.It("verify VSAN storage capability with invalid diskStripes value is not honored for dynamically provisioned pvc using storageclass", func() {
|
ginkgo.It("verify VSAN storage capability with invalid diskStripes value is not honored for dynamically provisioned pvc using storageclass", func() {
|
||||||
ginkgo.By(fmt.Sprintf("Invoking test for VSAN policy diskStripes: %s, cacheReservation: %s", DiskStripesCapabilityInvalidVal, CacheReservationCapabilityVal))
|
ginkgo.By(fmt.Sprintf("Invoking test for VSAN policy diskStripes: %s, cacheReservation: %s", DiskStripesCapabilityInvalidVal, CacheReservationCapabilityVal))
|
||||||
scParameters[Policy_DiskStripes] = DiskStripesCapabilityInvalidVal
|
scParameters[PolicyDiskStripes] = DiskStripesCapabilityInvalidVal
|
||||||
scParameters[Policy_CacheReservation] = CacheReservationCapabilityVal
|
scParameters[PolicyCacheReservation] = CacheReservationCapabilityVal
|
||||||
framework.Logf("Invoking test for VSAN storage capabilities: %+v", scParameters)
|
framework.Logf("Invoking test for VSAN storage capabilities: %+v", scParameters)
|
||||||
err := invokeInvalidPolicyTestNeg(client, namespace, scParameters)
|
err := invokeInvalidPolicyTestNeg(client, namespace, scParameters)
|
||||||
framework.ExpectError(err)
|
framework.ExpectError(err)
|
||||||
errorMsg := "Invalid value for " + Policy_DiskStripes + "."
|
errorMsg := "Invalid value for " + PolicyDiskStripes + "."
|
||||||
if !strings.Contains(err.Error(), errorMsg) {
|
if !strings.Contains(err.Error(), errorMsg) {
|
||||||
framework.ExpectNoError(err, errorMsg)
|
framework.ExpectNoError(err, errorMsg)
|
||||||
}
|
}
|
||||||
@ -187,11 +169,11 @@ var _ = utils.SIGDescribe("Storage Policy Based Volume Provisioning [Feature:vsp
|
|||||||
// hostFailuresToTolerate value has to be between 0 and 3 including.
|
// hostFailuresToTolerate value has to be between 0 and 3 including.
|
||||||
ginkgo.It("verify VSAN storage capability with invalid hostFailuresToTolerate value is not honored for dynamically provisioned pvc using storageclass", func() {
|
ginkgo.It("verify VSAN storage capability with invalid hostFailuresToTolerate value is not honored for dynamically provisioned pvc using storageclass", func() {
|
||||||
ginkgo.By(fmt.Sprintf("Invoking test for VSAN policy hostFailuresToTolerate: %s", HostFailuresToTolerateCapabilityInvalidVal))
|
ginkgo.By(fmt.Sprintf("Invoking test for VSAN policy hostFailuresToTolerate: %s", HostFailuresToTolerateCapabilityInvalidVal))
|
||||||
scParameters[Policy_HostFailuresToTolerate] = HostFailuresToTolerateCapabilityInvalidVal
|
scParameters[PolicyHostFailuresToTolerate] = HostFailuresToTolerateCapabilityInvalidVal
|
||||||
framework.Logf("Invoking test for VSAN storage capabilities: %+v", scParameters)
|
framework.Logf("Invoking test for VSAN storage capabilities: %+v", scParameters)
|
||||||
err := invokeInvalidPolicyTestNeg(client, namespace, scParameters)
|
err := invokeInvalidPolicyTestNeg(client, namespace, scParameters)
|
||||||
framework.ExpectError(err)
|
framework.ExpectError(err)
|
||||||
errorMsg := "Invalid value for " + Policy_HostFailuresToTolerate + "."
|
errorMsg := "Invalid value for " + PolicyHostFailuresToTolerate + "."
|
||||||
if !strings.Contains(err.Error(), errorMsg) {
|
if !strings.Contains(err.Error(), errorMsg) {
|
||||||
framework.ExpectNoError(err, errorMsg)
|
framework.ExpectNoError(err, errorMsg)
|
||||||
}
|
}
|
||||||
@ -200,14 +182,14 @@ var _ = utils.SIGDescribe("Storage Policy Based Volume Provisioning [Feature:vsp
|
|||||||
// Specify a valid VSAN policy on a non-VSAN test bed.
|
// Specify a valid VSAN policy on a non-VSAN test bed.
|
||||||
// The test should fail.
|
// The test should fail.
|
||||||
ginkgo.It("verify VSAN storage capability with non-vsan datastore is not honored for dynamically provisioned pvc using storageclass", func() {
|
ginkgo.It("verify VSAN storage capability with non-vsan datastore is not honored for dynamically provisioned pvc using storageclass", func() {
|
||||||
ginkgo.By(fmt.Sprintf("Invoking test for VSAN policy diskStripes: %s, objectSpaceReservation: %s and a non-VSAN datastore: %s", DiskStripesCapabilityVal, ObjectSpaceReservationCapabilityVal, VmfsDatastore))
|
ginkgo.By(fmt.Sprintf("Invoking test for VSAN policy diskStripes: %s, objectSpaceReservation: %s and a non-VSAN datastore: %s", DiskStripesCapabilityVal, ObjectSpaceReservationCapabilityVal, vmfsDatastore))
|
||||||
scParameters[Policy_DiskStripes] = DiskStripesCapabilityVal
|
scParameters[PolicyDiskStripes] = DiskStripesCapabilityVal
|
||||||
scParameters[Policy_ObjectSpaceReservation] = ObjectSpaceReservationCapabilityVal
|
scParameters[PolicyObjectSpaceReservation] = ObjectSpaceReservationCapabilityVal
|
||||||
scParameters[Datastore] = VmfsDatastore
|
scParameters[Datastore] = vmfsDatastore
|
||||||
framework.Logf("Invoking test for VSAN storage capabilities: %+v", scParameters)
|
framework.Logf("Invoking test for VSAN storage capabilities: %+v", scParameters)
|
||||||
err := invokeInvalidPolicyTestNeg(client, namespace, scParameters)
|
err := invokeInvalidPolicyTestNeg(client, namespace, scParameters)
|
||||||
framework.ExpectError(err)
|
framework.ExpectError(err)
|
||||||
errorMsg := "The specified datastore: \\\"" + VmfsDatastore + "\\\" is not a VSAN datastore. " +
|
errorMsg := "The specified datastore: \\\"" + vmfsDatastore + "\\\" is not a VSAN datastore. " +
|
||||||
"The policy parameters will work only with VSAN Datastore."
|
"The policy parameters will work only with VSAN Datastore."
|
||||||
if !strings.Contains(err.Error(), errorMsg) {
|
if !strings.Contains(err.Error(), errorMsg) {
|
||||||
framework.ExpectNoError(err, errorMsg)
|
framework.ExpectNoError(err, errorMsg)
|
||||||
@ -223,18 +205,18 @@ var _ = utils.SIGDescribe("Storage Policy Based Volume Provisioning [Feature:vsp
|
|||||||
})
|
})
|
||||||
|
|
||||||
ginkgo.It("verify clean up of stale dummy VM for dynamically provisioned pvc using SPBM policy", func() {
|
ginkgo.It("verify clean up of stale dummy VM for dynamically provisioned pvc using SPBM policy", func() {
|
||||||
scParameters[Policy_DiskStripes] = DiskStripesCapabilityMaxVal
|
scParameters[PolicyDiskStripes] = diskStripesCapabilityMaxVal
|
||||||
scParameters[Policy_ObjectSpaceReservation] = ObjectSpaceReservationCapabilityVal
|
scParameters[PolicyObjectSpaceReservation] = ObjectSpaceReservationCapabilityVal
|
||||||
scParameters[Datastore] = VsanDatastore
|
scParameters[Datastore] = vsanDatastore
|
||||||
framework.Logf("Invoking test for SPBM storage policy: %+v", scParameters)
|
framework.Logf("Invoking test for SPBM storage policy: %+v", scParameters)
|
||||||
kubernetesClusterName := GetAndExpectStringEnvVar(KubernetesClusterName)
|
kubernetesClusterName := GetAndExpectStringEnvVar(KubernetesClusterName)
|
||||||
invokeStaleDummyVMTestWithStoragePolicy(client, masterNode, namespace, kubernetesClusterName, scParameters)
|
invokeStaleDummyVMTestWithStoragePolicy(client, masterNode, namespace, kubernetesClusterName, scParameters)
|
||||||
})
|
})
|
||||||
|
|
||||||
ginkgo.It("verify if a SPBM policy is not honored on a non-compatible datastore for dynamically provisioned pvc using storageclass", func() {
|
ginkgo.It("verify if a SPBM policy is not honored on a non-compatible datastore for dynamically provisioned pvc using storageclass", func() {
|
||||||
ginkgo.By(fmt.Sprintf("Invoking test for SPBM policy: %s and datastore: %s", tagPolicy, VsanDatastore))
|
ginkgo.By(fmt.Sprintf("Invoking test for SPBM policy: %s and datastore: %s", tagPolicy, vsanDatastore))
|
||||||
scParameters[SpbmStoragePolicy] = tagPolicy
|
scParameters[SpbmStoragePolicy] = tagPolicy
|
||||||
scParameters[Datastore] = VsanDatastore
|
scParameters[Datastore] = vsanDatastore
|
||||||
scParameters[DiskFormat] = ThinDisk
|
scParameters[DiskFormat] = ThinDisk
|
||||||
framework.Logf("Invoking test for SPBM storage policy on a non-compatible datastore: %+v", scParameters)
|
framework.Logf("Invoking test for SPBM storage policy on a non-compatible datastore: %+v", scParameters)
|
||||||
err := invokeInvalidPolicyTestNeg(client, namespace, scParameters)
|
err := invokeInvalidPolicyTestNeg(client, namespace, scParameters)
|
||||||
@ -262,7 +244,7 @@ var _ = utils.SIGDescribe("Storage Policy Based Volume Provisioning [Feature:vsp
|
|||||||
ginkgo.By(fmt.Sprintf("Invoking test for SPBM policy: %s with VSAN storage capabilities", policyName))
|
ginkgo.By(fmt.Sprintf("Invoking test for SPBM policy: %s with VSAN storage capabilities", policyName))
|
||||||
scParameters[SpbmStoragePolicy] = policyName
|
scParameters[SpbmStoragePolicy] = policyName
|
||||||
gomega.Expect(scParameters[SpbmStoragePolicy]).NotTo(gomega.BeEmpty())
|
gomega.Expect(scParameters[SpbmStoragePolicy]).NotTo(gomega.BeEmpty())
|
||||||
scParameters[Policy_DiskStripes] = DiskStripesCapabilityVal
|
scParameters[PolicyDiskStripes] = DiskStripesCapabilityVal
|
||||||
scParameters[DiskFormat] = ThinDisk
|
scParameters[DiskFormat] = ThinDisk
|
||||||
framework.Logf("Invoking test for SPBM storage policy and VSAN capabilities together: %+v", scParameters)
|
framework.Logf("Invoking test for SPBM storage policy and VSAN capabilities together: %+v", scParameters)
|
||||||
err := invokeInvalidPolicyTestNeg(client, namespace, scParameters)
|
err := invokeInvalidPolicyTestNeg(client, namespace, scParameters)
|
||||||
@ -351,7 +333,7 @@ func invokeStaleDummyVMTestWithStoragePolicy(client clientset.Interface, masterN
|
|||||||
|
|
||||||
fnvHash := fnv.New32a()
|
fnvHash := fnv.New32a()
|
||||||
fnvHash.Write([]byte(vmName))
|
fnvHash.Write([]byte(vmName))
|
||||||
dummyVMFullName := DummyVMPrefixName + "-" + fmt.Sprint(fnvHash.Sum32())
|
dummyVMFullName := dummyVMPrefixName + "-" + fmt.Sprint(fnvHash.Sum32())
|
||||||
errorMsg := "Dummy VM - " + vmName + "is still present. Failing the test.."
|
errorMsg := "Dummy VM - " + vmName + "is still present. Failing the test.."
|
||||||
nodeInfo := TestContext.NodeMapper.GetNodeInfo(masterNode)
|
nodeInfo := TestContext.NodeMapper.GetNodeInfo(masterNode)
|
||||||
isVMPresentFlag, _ := nodeInfo.VSphere.IsVMPresent(dummyVMFullName, nodeInfo.DataCenterRef)
|
isVMPresentFlag, _ := nodeInfo.VSphere.IsVMPresent(dummyVMFullName, nodeInfo.DataCenterRef)
|
||||||
|
@ -297,20 +297,20 @@ var _ = utils.SIGDescribe("Zone Support", func() {
|
|||||||
})
|
})
|
||||||
|
|
||||||
ginkgo.It("Verify PVC creation with an invalid VSAN capability along with a compatible zone combination specified in storage class fails", func() {
|
ginkgo.It("Verify PVC creation with an invalid VSAN capability along with a compatible zone combination specified in storage class fails", func() {
|
||||||
ginkgo.By(fmt.Sprintf("Creating storage class with %s :%s and zone :%s", Policy_HostFailuresToTolerate, HostFailuresToTolerateCapabilityInvalidVal, zoneA))
|
ginkgo.By(fmt.Sprintf("Creating storage class with %s :%s and zone :%s", PolicyHostFailuresToTolerate, HostFailuresToTolerateCapabilityInvalidVal, zoneA))
|
||||||
scParameters[Policy_HostFailuresToTolerate] = HostFailuresToTolerateCapabilityInvalidVal
|
scParameters[PolicyHostFailuresToTolerate] = HostFailuresToTolerateCapabilityInvalidVal
|
||||||
zones = append(zones, zoneA)
|
zones = append(zones, zoneA)
|
||||||
err := verifyPVCCreationFails(client, namespace, scParameters, zones, "")
|
err := verifyPVCCreationFails(client, namespace, scParameters, zones, "")
|
||||||
errorMsg := "Invalid value for " + Policy_HostFailuresToTolerate + "."
|
errorMsg := "Invalid value for " + PolicyHostFailuresToTolerate + "."
|
||||||
if !strings.Contains(err.Error(), errorMsg) {
|
if !strings.Contains(err.Error(), errorMsg) {
|
||||||
framework.ExpectNoError(err, errorMsg)
|
framework.ExpectNoError(err, errorMsg)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
ginkgo.It("Verify a pod is created and attached to a dynamically created PV, based on a VSAN capability, datastore and compatible zone specified in storage class", func() {
|
ginkgo.It("Verify a pod is created and attached to a dynamically created PV, based on a VSAN capability, datastore and compatible zone specified in storage class", func() {
|
||||||
ginkgo.By(fmt.Sprintf("Creating storage class with %s :%s, %s :%s, datastore :%s and zone :%s", Policy_ObjectSpaceReservation, ObjectSpaceReservationCapabilityVal, Policy_IopsLimit, IopsLimitCapabilityVal, vsanDatastore1, zoneA))
|
ginkgo.By(fmt.Sprintf("Creating storage class with %s :%s, %s :%s, datastore :%s and zone :%s", PolicyObjectSpaceReservation, ObjectSpaceReservationCapabilityVal, PolicyIopsLimit, IopsLimitCapabilityVal, vsanDatastore1, zoneA))
|
||||||
scParameters[Policy_ObjectSpaceReservation] = ObjectSpaceReservationCapabilityVal
|
scParameters[PolicyObjectSpaceReservation] = ObjectSpaceReservationCapabilityVal
|
||||||
scParameters[Policy_IopsLimit] = IopsLimitCapabilityVal
|
scParameters[PolicyIopsLimit] = IopsLimitCapabilityVal
|
||||||
scParameters[Datastore] = vsanDatastore1
|
scParameters[Datastore] = vsanDatastore1
|
||||||
zones = append(zones, zoneA)
|
zones = append(zones, zoneA)
|
||||||
verifyPVCAndPodCreationSucceeds(client, namespace, scParameters, zones, "")
|
verifyPVCAndPodCreationSucceeds(client, namespace, scParameters, zones, "")
|
||||||
|
Loading…
Reference in New Issue
Block a user