mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-23 19:56:01 +00:00
Add multi-vc configuration for e2e tests
This commit is contained in:
parent
7dadeee5e8
commit
4683a9471f
@ -23,3 +23,4 @@ retry_time
|
|||||||
file_content_in_loop
|
file_content_in_loop
|
||||||
break_on_expected_content
|
break_on_expected_content
|
||||||
Premium_LRS
|
Premium_LRS
|
||||||
|
VCP_STRESS_ITERATIONS
|
||||||
|
@ -21,7 +21,6 @@ go_library(
|
|||||||
"//pkg/cloudprovider/providers/vsphere/vclib/diskmanagers:go_default_library",
|
"//pkg/cloudprovider/providers/vsphere/vclib/diskmanagers:go_default_library",
|
||||||
"//pkg/controller:go_default_library",
|
"//pkg/controller:go_default_library",
|
||||||
"//vendor/github.com/golang/glog:go_default_library",
|
"//vendor/github.com/golang/glog:go_default_library",
|
||||||
"//vendor/github.com/vmware/govmomi:go_default_library",
|
|
||||||
"//vendor/github.com/vmware/govmomi/vim25:go_default_library",
|
"//vendor/github.com/vmware/govmomi/vim25:go_default_library",
|
||||||
"//vendor/github.com/vmware/govmomi/vim25/mo:go_default_library",
|
"//vendor/github.com/vmware/govmomi/vim25/mo:go_default_library",
|
||||||
"//vendor/golang.org/x/net/context:go_default_library",
|
"//vendor/golang.org/x/net/context:go_default_library",
|
||||||
|
@ -354,3 +354,10 @@ func (nm *NodeManager) renewNodeInfo(nodeInfo *NodeInfo, reconnect bool) (*NodeI
|
|||||||
vm := nodeInfo.vm.RenewVM(vsphereInstance.conn.GoVmomiClient)
|
vm := nodeInfo.vm.RenewVM(vsphereInstance.conn.GoVmomiClient)
|
||||||
return &NodeInfo{vm: &vm, dataCenter: vm.Datacenter, vcServer: nodeInfo.vcServer}, nil
|
return &NodeInfo{vm: &vm, dataCenter: vm.Datacenter, vcServer: nodeInfo.vcServer}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (nodeInfo *NodeInfo) VM() *vclib.VirtualMachine {
|
||||||
|
if nodeInfo == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return nodeInfo.vm
|
||||||
|
}
|
||||||
|
@ -1159,3 +1159,10 @@ func (vs *VSphere) NodeDeleted(obj interface{}) {
|
|||||||
glog.V(4).Infof("Node deleted: %+v", node)
|
glog.V(4).Infof("Node deleted: %+v", node)
|
||||||
vs.nodeManager.UnRegisterNode(node)
|
vs.nodeManager.UnRegisterNode(node)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (vs *VSphere) NodeManager() (nodeManager *NodeManager) {
|
||||||
|
if vs == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return vs.nodeManager
|
||||||
|
}
|
||||||
|
@ -21,12 +21,10 @@ import (
|
|||||||
"errors"
|
"errors"
|
||||||
"os"
|
"os"
|
||||||
"regexp"
|
"regexp"
|
||||||
"runtime"
|
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/golang/glog"
|
"github.com/golang/glog"
|
||||||
"github.com/vmware/govmomi"
|
|
||||||
"github.com/vmware/govmomi/vim25"
|
"github.com/vmware/govmomi/vim25"
|
||||||
|
|
||||||
"fmt"
|
"fmt"
|
||||||
@ -34,7 +32,6 @@ import (
|
|||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
|
||||||
"github.com/vmware/govmomi/vim25/mo"
|
"github.com/vmware/govmomi/vim25/mo"
|
||||||
"k8s.io/api/core/v1"
|
|
||||||
k8stypes "k8s.io/apimachinery/pkg/types"
|
k8stypes "k8s.io/apimachinery/pkg/types"
|
||||||
"k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib"
|
"k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib"
|
||||||
"k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib/diskmanagers"
|
"k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib/diskmanagers"
|
||||||
@ -46,63 +43,37 @@ const (
|
|||||||
Folder = "Folder"
|
Folder = "Folder"
|
||||||
VirtualMachine = "VirtualMachine"
|
VirtualMachine = "VirtualMachine"
|
||||||
DummyDiskName = "kube-dummyDisk.vmdk"
|
DummyDiskName = "kube-dummyDisk.vmdk"
|
||||||
|
vSphereConfFileEnvVar = "VSPHERE_CONF_FILE"
|
||||||
)
|
)
|
||||||
|
|
||||||
// GetVSphere reads vSphere configuration from system environment and construct vSphere object
|
// GetVSphere reads vSphere configuration from system environment and construct vSphere object
|
||||||
func GetVSphere() (*VSphere, error) {
|
func GetVSphere() (*VSphere, error) {
|
||||||
cfg := getVSphereConfig()
|
cfg, err := getVSphereConfig()
|
||||||
vSphereConn := getVSphereConn(cfg)
|
|
||||||
client, err := GetgovmomiClient(vSphereConn)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
vSphereConn.GoVmomiClient = client
|
vs, err := newControllerNode(*cfg)
|
||||||
vsphereIns := &VSphereInstance{
|
if err != nil {
|
||||||
conn: vSphereConn,
|
return nil, err
|
||||||
cfg: &VirtualCenterConfig{
|
|
||||||
User: cfg.Global.User,
|
|
||||||
Password: cfg.Global.Password,
|
|
||||||
VCenterPort: cfg.Global.VCenterPort,
|
|
||||||
Datacenters: cfg.Global.Datacenters,
|
|
||||||
RoundTripperCount: cfg.Global.RoundTripperCount,
|
|
||||||
},
|
|
||||||
}
|
}
|
||||||
vsphereInsMap := make(map[string]*VSphereInstance)
|
|
||||||
vsphereInsMap[cfg.Global.VCenterIP] = vsphereIns
|
|
||||||
// TODO: Initialize nodeManager and set it in VSphere.
|
|
||||||
vs := &VSphere{
|
|
||||||
vsphereInstanceMap: vsphereInsMap,
|
|
||||||
hostName: "",
|
|
||||||
cfg: cfg,
|
|
||||||
nodeManager: &NodeManager{
|
|
||||||
vsphereInstanceMap: vsphereInsMap,
|
|
||||||
nodeInfoMap: make(map[string]*NodeInfo),
|
|
||||||
registeredNodes: make(map[string]*v1.Node),
|
|
||||||
},
|
|
||||||
}
|
|
||||||
runtime.SetFinalizer(vs, logout)
|
|
||||||
return vs, nil
|
return vs, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func getVSphereConfig() *VSphereConfig {
|
func getVSphereConfig() (*VSphereConfig, error) {
|
||||||
var cfg VSphereConfig
|
confFileLocation := os.Getenv(vSphereConfFileEnvVar)
|
||||||
cfg.Global.VCenterIP = os.Getenv("VSPHERE_VCENTER")
|
if confFileLocation == "" {
|
||||||
cfg.Global.VCenterPort = os.Getenv("VSPHERE_VCENTER_PORT")
|
return nil, fmt.Errorf("Env variable 'VSPHERE_CONF_FILE' is not set.")
|
||||||
cfg.Global.User = os.Getenv("VSPHERE_USER")
|
|
||||||
cfg.Global.Password = os.Getenv("VSPHERE_PASSWORD")
|
|
||||||
cfg.Global.Datacenters = os.Getenv("VSPHERE_DATACENTER")
|
|
||||||
cfg.Global.DefaultDatastore = os.Getenv("VSPHERE_DATASTORE")
|
|
||||||
cfg.Global.WorkingDir = os.Getenv("VSPHERE_WORKING_DIR")
|
|
||||||
cfg.Global.VMName = os.Getenv("VSPHERE_VM_NAME")
|
|
||||||
cfg.Global.InsecureFlag = false
|
|
||||||
if strings.ToLower(os.Getenv("VSPHERE_INSECURE")) == "true" {
|
|
||||||
cfg.Global.InsecureFlag = true
|
|
||||||
}
|
}
|
||||||
cfg.Workspace.VCenterIP = cfg.Global.VCenterIP
|
confFile, err := os.Open(confFileLocation)
|
||||||
cfg.Workspace.Datacenter = cfg.Global.Datacenters
|
if err != nil {
|
||||||
cfg.Workspace.DefaultDatastore = cfg.Global.DefaultDatastore
|
return nil, err
|
||||||
cfg.Workspace.Folder = cfg.Global.WorkingDir
|
}
|
||||||
return &cfg
|
defer confFile.Close()
|
||||||
|
cfg, err := readConfig(confFile)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &cfg, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func getVSphereConn(cfg *VSphereConfig) *vclib.VSphereConnection {
|
func getVSphereConn(cfg *VSphereConfig) *vclib.VSphereConnection {
|
||||||
@ -117,16 +88,6 @@ func getVSphereConn(cfg *VSphereConfig) *vclib.VSphereConnection {
|
|||||||
return vSphereConn
|
return vSphereConn
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetgovmomiClient gets the goVMOMI client for the vsphere connection object
|
|
||||||
func GetgovmomiClient(conn *vclib.VSphereConnection) (*govmomi.Client, error) {
|
|
||||||
if conn == nil {
|
|
||||||
cfg := getVSphereConfig()
|
|
||||||
conn = getVSphereConn(cfg)
|
|
||||||
}
|
|
||||||
client, err := conn.NewClient(context.TODO())
|
|
||||||
return client, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Returns the accessible datastores for the given node VM.
|
// Returns the accessible datastores for the given node VM.
|
||||||
func getAccessibleDatastores(ctx context.Context, nodeVmDetail *NodeDetails, nodeManager *NodeManager) ([]*vclib.DatastoreInfo, error) {
|
func getAccessibleDatastores(ctx context.Context, nodeVmDetail *NodeDetails, nodeManager *NodeManager) ([]*vclib.DatastoreInfo, error) {
|
||||||
accessibleDatastores, err := nodeVmDetail.vm.GetAllAccessibleDatastores(ctx)
|
accessibleDatastores, err := nodeVmDetail.vm.GetAllAccessibleDatastores(ctx)
|
||||||
@ -512,3 +473,40 @@ func (vs *VSphere) checkDiskAttached(ctx context.Context, nodes []k8stypes.NodeN
|
|||||||
}
|
}
|
||||||
return nodesToRetry, nil
|
return nodesToRetry, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (vs *VSphere) IsDummyVMPresent(vmName string) (bool, error) {
|
||||||
|
isDummyVMPresent := false
|
||||||
|
|
||||||
|
// Create context
|
||||||
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
vsi, err := vs.getVSphereInstanceForServer(vs.cfg.Workspace.VCenterIP, ctx)
|
||||||
|
if err != nil {
|
||||||
|
return isDummyVMPresent, err
|
||||||
|
}
|
||||||
|
|
||||||
|
dc, err := vclib.GetDatacenter(ctx, vsi.conn, vs.cfg.Workspace.Datacenter)
|
||||||
|
if err != nil {
|
||||||
|
return isDummyVMPresent, err
|
||||||
|
}
|
||||||
|
|
||||||
|
vmFolder, err := dc.GetFolderByPath(ctx, vs.cfg.Workspace.Folder)
|
||||||
|
if err != nil {
|
||||||
|
return isDummyVMPresent, err
|
||||||
|
}
|
||||||
|
|
||||||
|
vms, err := vmFolder.GetVirtualMachines(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return isDummyVMPresent, err
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, vm := range vms {
|
||||||
|
if vm.Name() == vmName {
|
||||||
|
isDummyVMPresent = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return isDummyVMPresent, nil
|
||||||
|
}
|
||||||
|
@ -11,6 +11,7 @@ go_library(
|
|||||||
"persistent_volumes-vsphere.go",
|
"persistent_volumes-vsphere.go",
|
||||||
"pv_reclaimpolicy.go",
|
"pv_reclaimpolicy.go",
|
||||||
"pvc_label_selector.go",
|
"pvc_label_selector.go",
|
||||||
|
"vsphere_common.go",
|
||||||
"vsphere_scale.go",
|
"vsphere_scale.go",
|
||||||
"vsphere_statefulsets.go",
|
"vsphere_statefulsets.go",
|
||||||
"vsphere_stress.go",
|
"vsphere_stress.go",
|
||||||
@ -36,7 +37,6 @@ go_library(
|
|||||||
"//test/e2e/storage/utils:go_default_library",
|
"//test/e2e/storage/utils:go_default_library",
|
||||||
"//vendor/github.com/onsi/ginkgo:go_default_library",
|
"//vendor/github.com/onsi/ginkgo:go_default_library",
|
||||||
"//vendor/github.com/onsi/gomega:go_default_library",
|
"//vendor/github.com/onsi/gomega:go_default_library",
|
||||||
"//vendor/github.com/vmware/govmomi/find:go_default_library",
|
|
||||||
"//vendor/github.com/vmware/govmomi/vim25/types:go_default_library",
|
"//vendor/github.com/vmware/govmomi/vim25/types:go_default_library",
|
||||||
"//vendor/golang.org/x/net/context:go_default_library",
|
"//vendor/golang.org/x/net/context:go_default_library",
|
||||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||||
|
66
test/e2e/storage/vsphere/vsphere_common.go
Normal file
66
test/e2e/storage/vsphere/vsphere_common.go
Normal file
@ -0,0 +1,66 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2017 The Kubernetes Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package vsphere
|
||||||
|
|
||||||
|
import (
|
||||||
|
. "github.com/onsi/gomega"
|
||||||
|
"os"
|
||||||
|
"strconv"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
SPBMPolicyName = "VSPHERE_SPBM_POLICY_NAME"
|
||||||
|
StorageClassDatastoreName = "VSPHERE_DATASTORE"
|
||||||
|
SecondSharedDatastore = "VSPHERE_SECOND_SHARED_DATASTORE"
|
||||||
|
KubernetesClusterName = "VSPHERE_KUBERNETES_CLUSTER"
|
||||||
|
SPBMTagPolicy = "VSPHERE_SPBM_TAG_POLICY"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
VCPClusterDatastore = "CLUSTER_DATASTORE"
|
||||||
|
SPBMPolicyDataStoreCluster = "VSPHERE_SPBM_POLICY_DS_CLUSTER"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
VCPScaleVolumeCount = "VCP_SCALE_VOLUME_COUNT"
|
||||||
|
VCPScaleVolumesPerPod = "VCP_SCALE_VOLUME_PER_POD"
|
||||||
|
VCPScaleInstances = "VCP_SCALE_INSTANCES"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
VCPStressInstances = "VCP_STRESS_INSTANCES"
|
||||||
|
VCPStressIterations = "VCP_STRESS_ITERATIONS"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
VCPPerfVolumeCount = "VCP_PERF_VOLUME_COUNT"
|
||||||
|
VCPPerfVolumesPerPod = "VCP_PERF_VOLUME_PER_POD"
|
||||||
|
VCPPerfIterations = "VCP_PERF_ITERATIONS"
|
||||||
|
)
|
||||||
|
|
||||||
|
func GetAndExpectStringEnvVar(varName string) string {
|
||||||
|
varValue := os.Getenv(varName)
|
||||||
|
Expect(varValue).NotTo(BeEmpty(), "ENV "+varName+" is not set")
|
||||||
|
return varValue
|
||||||
|
}
|
||||||
|
|
||||||
|
func GetAndExpectIntEnvVar(varName string) int {
|
||||||
|
varValue := GetAndExpectStringEnvVar(varName)
|
||||||
|
varIntValue, err := strconv.Atoi(varValue)
|
||||||
|
Expect(err).NotTo(HaveOccurred(), "Error Parsing "+varName)
|
||||||
|
return varIntValue
|
||||||
|
}
|
@ -18,7 +18,6 @@ package vsphere
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
|
||||||
"strconv"
|
"strconv"
|
||||||
|
|
||||||
. "github.com/onsi/ginkgo"
|
. "github.com/onsi/ginkgo"
|
||||||
@ -63,12 +62,11 @@ var _ = utils.SIGDescribe("vcp at scale [Feature:vsphere] ", func() {
|
|||||||
volumeCount int
|
volumeCount int
|
||||||
numberOfInstances int
|
numberOfInstances int
|
||||||
volumesPerPod int
|
volumesPerPod int
|
||||||
nodeVolumeMapChan chan map[string][]string
|
|
||||||
nodes *v1.NodeList
|
|
||||||
policyName string
|
policyName string
|
||||||
datastoreName string
|
datastoreName string
|
||||||
|
nodeVolumeMapChan chan map[string][]string
|
||||||
|
nodes *v1.NodeList
|
||||||
scNames = []string{storageclass1, storageclass2, storageclass3, storageclass4}
|
scNames = []string{storageclass1, storageclass2, storageclass3, storageclass4}
|
||||||
err error
|
|
||||||
)
|
)
|
||||||
|
|
||||||
BeforeEach(func() {
|
BeforeEach(func() {
|
||||||
@ -78,27 +76,15 @@ var _ = utils.SIGDescribe("vcp at scale [Feature:vsphere] ", func() {
|
|||||||
nodeVolumeMapChan = make(chan map[string][]string)
|
nodeVolumeMapChan = make(chan map[string][]string)
|
||||||
|
|
||||||
// Read the environment variables
|
// Read the environment variables
|
||||||
volumeCountStr := os.Getenv("VCP_SCALE_VOLUME_COUNT")
|
volumeCount = GetAndExpectIntEnvVar(VCPScaleVolumeCount)
|
||||||
Expect(volumeCountStr).NotTo(BeEmpty(), "ENV VCP_SCALE_VOLUME_COUNT is not set")
|
volumesPerPod = GetAndExpectIntEnvVar(VCPScaleVolumesPerPod)
|
||||||
volumeCount, err = strconv.Atoi(volumeCountStr)
|
|
||||||
Expect(err).NotTo(HaveOccurred(), "Error Parsing VCP_SCALE_VOLUME_COUNT")
|
|
||||||
|
|
||||||
volumesPerPodStr := os.Getenv("VCP_SCALE_VOLUME_PER_POD")
|
numberOfInstances = GetAndExpectIntEnvVar(VCPScaleInstances)
|
||||||
Expect(volumesPerPodStr).NotTo(BeEmpty(), "ENV VCP_SCALE_VOLUME_PER_POD is not set")
|
|
||||||
volumesPerPod, err = strconv.Atoi(volumesPerPodStr)
|
|
||||||
Expect(err).NotTo(HaveOccurred(), "Error Parsing VCP_SCALE_VOLUME_PER_POD")
|
|
||||||
|
|
||||||
numberOfInstancesStr := os.Getenv("VCP_SCALE_INSTANCES")
|
|
||||||
Expect(numberOfInstancesStr).NotTo(BeEmpty(), "ENV VCP_SCALE_INSTANCES is not set")
|
|
||||||
numberOfInstances, err = strconv.Atoi(numberOfInstancesStr)
|
|
||||||
Expect(err).NotTo(HaveOccurred(), "Error Parsing VCP_SCALE_INSTANCES")
|
|
||||||
Expect(numberOfInstances > 5).NotTo(BeTrue(), "Maximum allowed instances are 5")
|
Expect(numberOfInstances > 5).NotTo(BeTrue(), "Maximum allowed instances are 5")
|
||||||
Expect(numberOfInstances > volumeCount).NotTo(BeTrue(), "Number of instances should be less than the total volume count")
|
Expect(numberOfInstances > volumeCount).NotTo(BeTrue(), "Number of instances should be less than the total volume count")
|
||||||
|
|
||||||
policyName = os.Getenv("VSPHERE_SPBM_POLICY_NAME")
|
policyName = GetAndExpectStringEnvVar(SPBMPolicyName)
|
||||||
datastoreName = os.Getenv("VSPHERE_DATASTORE")
|
datastoreName = GetAndExpectStringEnvVar(StorageClassDatastoreName)
|
||||||
Expect(policyName).NotTo(BeEmpty(), "ENV VSPHERE_SPBM_POLICY_NAME is not set")
|
|
||||||
Expect(datastoreName).NotTo(BeEmpty(), "ENV VSPHERE_DATASTORE is not set")
|
|
||||||
|
|
||||||
nodes = framework.GetReadySchedulableNodesOrDie(client)
|
nodes = framework.GetReadySchedulableNodesOrDie(client)
|
||||||
if len(nodes.Items) < 2 {
|
if len(nodes.Items) < 2 {
|
||||||
|
@ -18,8 +18,6 @@ package vsphere
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
|
||||||
"strconv"
|
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
. "github.com/onsi/ginkgo"
|
. "github.com/onsi/ginkgo"
|
||||||
@ -50,10 +48,10 @@ var _ = utils.SIGDescribe("vsphere cloud provider stress [Feature:vsphere]", fun
|
|||||||
namespace string
|
namespace string
|
||||||
instances int
|
instances int
|
||||||
iterations int
|
iterations int
|
||||||
err error
|
|
||||||
scNames = []string{storageclass1, storageclass2, storageclass3, storageclass4}
|
|
||||||
policyName string
|
policyName string
|
||||||
datastoreName string
|
datastoreName string
|
||||||
|
err error
|
||||||
|
scNames = []string{storageclass1, storageclass2, storageclass3, storageclass4}
|
||||||
)
|
)
|
||||||
|
|
||||||
BeforeEach(func() {
|
BeforeEach(func() {
|
||||||
@ -67,23 +65,16 @@ var _ = utils.SIGDescribe("vsphere cloud provider stress [Feature:vsphere]", fun
|
|||||||
// if VCP_STRESS_INSTANCES = 12 and VCP_STRESS_ITERATIONS is 10. 12 threads will run in parallel for 10 times.
|
// if VCP_STRESS_INSTANCES = 12 and VCP_STRESS_ITERATIONS is 10. 12 threads will run in parallel for 10 times.
|
||||||
// Resulting 120 Volumes and POD Creation. Volumes will be provisioned with each different types of Storage Class,
|
// Resulting 120 Volumes and POD Creation. Volumes will be provisioned with each different types of Storage Class,
|
||||||
// Each iteration creates PVC, verify PV is provisioned, then creates a pod, verify volume is attached to the node, and then delete the pod and delete pvc.
|
// Each iteration creates PVC, verify PV is provisioned, then creates a pod, verify volume is attached to the node, and then delete the pod and delete pvc.
|
||||||
instancesStr := os.Getenv("VCP_STRESS_INSTANCES")
|
instances = GetAndExpectIntEnvVar(VCPStressInstances)
|
||||||
Expect(instancesStr).NotTo(BeEmpty(), "ENV VCP_STRESS_INSTANCES is not set")
|
|
||||||
instances, err = strconv.Atoi(instancesStr)
|
|
||||||
Expect(err).NotTo(HaveOccurred(), "Error Parsing VCP-STRESS-INSTANCES")
|
|
||||||
Expect(instances <= volumesPerNode*len(nodeList.Items)).To(BeTrue(), fmt.Sprintf("Number of Instances should be less or equal: %v", volumesPerNode*len(nodeList.Items)))
|
Expect(instances <= volumesPerNode*len(nodeList.Items)).To(BeTrue(), fmt.Sprintf("Number of Instances should be less or equal: %v", volumesPerNode*len(nodeList.Items)))
|
||||||
Expect(instances > len(scNames)).To(BeTrue(), "VCP_STRESS_INSTANCES should be greater than 3 to utilize all 4 types of storage classes")
|
Expect(instances > len(scNames)).To(BeTrue(), "VCP_STRESS_INSTANCES should be greater than 3 to utilize all 4 types of storage classes")
|
||||||
|
|
||||||
iterationStr := os.Getenv("VCP_STRESS_ITERATIONS")
|
iterations = GetAndExpectIntEnvVar(VCPStressIterations)
|
||||||
Expect(instancesStr).NotTo(BeEmpty(), "ENV VCP_STRESS_ITERATIONS is not set")
|
|
||||||
iterations, err = strconv.Atoi(iterationStr)
|
|
||||||
Expect(err).NotTo(HaveOccurred(), "Error Parsing VCP_STRESS_ITERATIONS")
|
Expect(err).NotTo(HaveOccurred(), "Error Parsing VCP_STRESS_ITERATIONS")
|
||||||
Expect(iterations > 0).To(BeTrue(), "VCP_STRESS_ITERATIONS should be greater than 0")
|
Expect(iterations > 0).To(BeTrue(), "VCP_STRESS_ITERATIONS should be greater than 0")
|
||||||
|
|
||||||
policyName = os.Getenv("VSPHERE_SPBM_POLICY_NAME")
|
policyName = GetAndExpectStringEnvVar(SPBMPolicyName)
|
||||||
datastoreName = os.Getenv("VSPHERE_DATASTORE")
|
datastoreName = GetAndExpectStringEnvVar(StorageClassDatastoreName)
|
||||||
Expect(policyName).NotTo(BeEmpty(), "ENV VSPHERE_SPBM_POLICY_NAME is not set")
|
|
||||||
Expect(datastoreName).NotTo(BeEmpty(), "ENV VSPHERE_DATASTORE is not set")
|
|
||||||
})
|
})
|
||||||
|
|
||||||
It("vsphere stress tests", func() {
|
It("vsphere stress tests", func() {
|
||||||
|
@ -18,7 +18,6 @@ package vsphere
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
|
||||||
|
|
||||||
. "github.com/onsi/ginkgo"
|
. "github.com/onsi/ginkgo"
|
||||||
. "github.com/onsi/gomega"
|
. "github.com/onsi/gomega"
|
||||||
@ -43,18 +42,19 @@ import (
|
|||||||
var _ = utils.SIGDescribe("Volume Provisioning On Clustered Datastore [Feature:vsphere]", func() {
|
var _ = utils.SIGDescribe("Volume Provisioning On Clustered Datastore [Feature:vsphere]", func() {
|
||||||
f := framework.NewDefaultFramework("volume-provision")
|
f := framework.NewDefaultFramework("volume-provision")
|
||||||
|
|
||||||
var client clientset.Interface
|
var (
|
||||||
var namespace string
|
client clientset.Interface
|
||||||
var scParameters map[string]string
|
namespace string
|
||||||
var clusterDatastore string
|
scParameters map[string]string
|
||||||
|
clusterDatastore string
|
||||||
|
)
|
||||||
|
|
||||||
BeforeEach(func() {
|
BeforeEach(func() {
|
||||||
framework.SkipUnlessProviderIs("vsphere")
|
framework.SkipUnlessProviderIs("vsphere")
|
||||||
client = f.ClientSet
|
client = f.ClientSet
|
||||||
namespace = f.Namespace.Name
|
namespace = f.Namespace.Name
|
||||||
scParameters = make(map[string]string)
|
scParameters = make(map[string]string)
|
||||||
|
clusterDatastore = GetAndExpectStringEnvVar(VCPClusterDatastore)
|
||||||
clusterDatastore = os.Getenv("CLUSTER_DATASTORE")
|
|
||||||
Expect(clusterDatastore).NotTo(BeEmpty(), "Please set CLUSTER_DATASTORE system environment. eg: export CLUSTER_DATASTORE=<cluster_name>/<datastore_name")
|
|
||||||
})
|
})
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -129,9 +129,8 @@ var _ = utils.SIGDescribe("Volume Provisioning On Clustered Datastore [Feature:v
|
|||||||
2. invokeValidPolicyTest - util to do e2e dynamic provision test
|
2. invokeValidPolicyTest - util to do e2e dynamic provision test
|
||||||
*/
|
*/
|
||||||
It("verify dynamic provision with spbm policy on clustered datastore", func() {
|
It("verify dynamic provision with spbm policy on clustered datastore", func() {
|
||||||
storagePolicy := os.Getenv("VSPHERE_SPBM_POLICY_DS_CLUSTER")
|
policyDatastoreCluster := GetAndExpectStringEnvVar(SPBMPolicyDataStoreCluster)
|
||||||
Expect(storagePolicy).NotTo(BeEmpty(), "Please set VSPHERE_SPBM_POLICY_DS_CLUSTER system environment")
|
scParameters[SpbmStoragePolicy] = policyDatastoreCluster
|
||||||
scParameters[SpbmStoragePolicy] = storagePolicy
|
|
||||||
invokeValidPolicyTest(f, client, namespace, scParameters)
|
invokeValidPolicyTest(f, client, namespace, scParameters)
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
@ -17,12 +17,10 @@ limitations under the License.
|
|||||||
package vsphere
|
package vsphere
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"os"
|
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
|
||||||
. "github.com/onsi/ginkgo"
|
. "github.com/onsi/ginkgo"
|
||||||
. "github.com/onsi/gomega"
|
. "github.com/onsi/gomega"
|
||||||
"github.com/vmware/govmomi/find"
|
|
||||||
"github.com/vmware/govmomi/vim25/types"
|
"github.com/vmware/govmomi/vim25/types"
|
||||||
"golang.org/x/net/context"
|
"golang.org/x/net/context"
|
||||||
"k8s.io/api/core/v1"
|
"k8s.io/api/core/v1"
|
||||||
@ -30,7 +28,6 @@ import (
|
|||||||
k8stype "k8s.io/apimachinery/pkg/types"
|
k8stype "k8s.io/apimachinery/pkg/types"
|
||||||
"k8s.io/apimachinery/pkg/util/uuid"
|
"k8s.io/apimachinery/pkg/util/uuid"
|
||||||
clientset "k8s.io/client-go/kubernetes"
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
vsphere "k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere"
|
|
||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
"k8s.io/kubernetes/test/e2e/storage/utils"
|
"k8s.io/kubernetes/test/e2e/storage/utils"
|
||||||
)
|
)
|
||||||
@ -152,7 +149,7 @@ func invokeTest(f *framework.Framework, client clientset.Interface, namespace st
|
|||||||
|
|
||||||
By("Waiting for pod to be running")
|
By("Waiting for pod to be running")
|
||||||
Expect(framework.WaitForPodNameRunningInNamespace(client, pod.Name, namespace)).To(Succeed())
|
Expect(framework.WaitForPodNameRunningInNamespace(client, pod.Name, namespace)).To(Succeed())
|
||||||
Expect(verifyDiskFormat(nodeName, pv.Spec.VsphereVolume.VolumePath, diskFormat)).To(BeTrue(), "DiskFormat Verification Failed")
|
Expect(verifyDiskFormat(client, nodeName, pv.Spec.VsphereVolume.VolumePath, diskFormat)).To(BeTrue(), "DiskFormat Verification Failed")
|
||||||
|
|
||||||
var volumePaths []string
|
var volumePaths []string
|
||||||
volumePaths = append(volumePaths, pv.Spec.VsphereVolume.VolumePath)
|
volumePaths = append(volumePaths, pv.Spec.VsphereVolume.VolumePath)
|
||||||
@ -162,22 +159,22 @@ func invokeTest(f *framework.Framework, client clientset.Interface, namespace st
|
|||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func verifyDiskFormat(nodeName string, pvVolumePath string, diskFormat string) bool {
|
func verifyDiskFormat(client clientset.Interface, nodeName string, pvVolumePath string, diskFormat string) bool {
|
||||||
By("Verifing disk format")
|
By("Verifing disk format")
|
||||||
eagerlyScrub := false
|
eagerlyScrub := false
|
||||||
thinProvisioned := false
|
thinProvisioned := false
|
||||||
diskFound := false
|
diskFound := false
|
||||||
pvvmdkfileName := filepath.Base(pvVolumePath) + filepath.Ext(pvVolumePath)
|
pvvmdkfileName := filepath.Base(pvVolumePath) + filepath.Ext(pvVolumePath)
|
||||||
|
|
||||||
govMoMiClient, err := vsphere.GetgovmomiClient(nil)
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
vsp, err := getVSphere(client)
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
nodeInfo, err := vsp.NodeManager().GetNodeInfo(k8stype.NodeName(nodeName))
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
|
||||||
f := find.NewFinder(govMoMiClient.Client, true)
|
vmDevices, err := nodeInfo.VM().Device(ctx)
|
||||||
ctx, _ := context.WithCancel(context.Background())
|
|
||||||
vm, err := f.VirtualMachine(ctx, os.Getenv("VSPHERE_WORKING_DIR")+nodeName)
|
|
||||||
Expect(err).NotTo(HaveOccurred())
|
|
||||||
|
|
||||||
vmDevices, err := vm.Device(ctx)
|
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
|
||||||
disks := vmDevices.SelectByType((*types.VirtualDisk)(nil))
|
disks := vmDevices.SelectByType((*types.VirtualDisk)(nil))
|
||||||
|
@ -18,7 +18,6 @@ package vsphere
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
@ -58,8 +57,7 @@ var _ = utils.SIGDescribe("Volume Disk Size [Feature:vsphere]", func() {
|
|||||||
client = f.ClientSet
|
client = f.ClientSet
|
||||||
namespace = f.Namespace.Name
|
namespace = f.Namespace.Name
|
||||||
scParameters = make(map[string]string)
|
scParameters = make(map[string]string)
|
||||||
datastore = os.Getenv("VSPHERE_DATASTORE")
|
datastore = GetAndExpectStringEnvVar(StorageClassDatastoreName)
|
||||||
Expect(datastore).NotTo(BeEmpty())
|
|
||||||
nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
|
nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
|
||||||
if !(len(nodeList.Items) > 0) {
|
if !(len(nodeList.Items) > 0) {
|
||||||
framework.Failf("Unable to find ready and schedulable Node")
|
framework.Failf("Unable to find ready and schedulable Node")
|
||||||
|
@ -18,13 +18,10 @@ package vsphere
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
. "github.com/onsi/ginkgo"
|
. "github.com/onsi/ginkgo"
|
||||||
. "github.com/onsi/gomega"
|
. "github.com/onsi/gomega"
|
||||||
"github.com/vmware/govmomi/find"
|
|
||||||
"golang.org/x/net/context"
|
"golang.org/x/net/context"
|
||||||
|
|
||||||
vimtypes "github.com/vmware/govmomi/vim25/types"
|
vimtypes "github.com/vmware/govmomi/vim25/types"
|
||||||
@ -50,7 +47,6 @@ var _ = utils.SIGDescribe("Node Poweroff [Feature:vsphere] [Slow] [Disruptive]",
|
|||||||
client clientset.Interface
|
client clientset.Interface
|
||||||
namespace string
|
namespace string
|
||||||
vsp *vsphere.VSphere
|
vsp *vsphere.VSphere
|
||||||
workingDir string
|
|
||||||
err error
|
err error
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -64,8 +60,6 @@ var _ = utils.SIGDescribe("Node Poweroff [Feature:vsphere] [Slow] [Disruptive]",
|
|||||||
Expect(len(nodeList.Items) > 1).To(BeTrue(), "At least 2 nodes are required for this test")
|
Expect(len(nodeList.Items) > 1).To(BeTrue(), "At least 2 nodes are required for this test")
|
||||||
vsp, err = getVSphere(client)
|
vsp, err = getVSphere(client)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred())
|
||||||
workingDir = os.Getenv("VSPHERE_WORKING_DIR")
|
|
||||||
Expect(workingDir).NotTo(BeEmpty())
|
|
||||||
})
|
})
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -118,16 +112,10 @@ var _ = utils.SIGDescribe("Node Poweroff [Feature:vsphere] [Slow] [Disruptive]",
|
|||||||
Expect(isAttached).To(BeTrue(), "Disk is not attached to the node")
|
Expect(isAttached).To(BeTrue(), "Disk is not attached to the node")
|
||||||
|
|
||||||
By(fmt.Sprintf("Power off the node: %v", node1))
|
By(fmt.Sprintf("Power off the node: %v", node1))
|
||||||
govMoMiClient, err := vsphere.GetgovmomiClient(nil)
|
nodeInfo, err := vsp.NodeManager().GetNodeInfo(node1)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
vm := nodeInfo.VM()
|
||||||
f := find.NewFinder(govMoMiClient.Client, true)
|
|
||||||
ctx, _ := context.WithCancel(context.Background())
|
ctx, _ := context.WithCancel(context.Background())
|
||||||
|
|
||||||
vmPath := filepath.Join(workingDir, string(node1))
|
|
||||||
vm, err := f.VirtualMachine(ctx, vmPath)
|
|
||||||
Expect(err).NotTo(HaveOccurred())
|
|
||||||
|
|
||||||
_, err = vm.PowerOff(ctx)
|
_, err = vm.PowerOff(ctx)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred())
|
||||||
defer vm.PowerOn(ctx)
|
defer vm.PowerOn(ctx)
|
||||||
|
@ -18,8 +18,6 @@ package vsphere
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
|
||||||
"strconv"
|
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
. "github.com/onsi/ginkgo"
|
. "github.com/onsi/ginkgo"
|
||||||
@ -56,39 +54,25 @@ var _ = utils.SIGDescribe("vcp-performance [Feature:vsphere]", func() {
|
|||||||
client clientset.Interface
|
client clientset.Interface
|
||||||
namespace string
|
namespace string
|
||||||
nodeSelectorList []*NodeSelector
|
nodeSelectorList []*NodeSelector
|
||||||
|
policyName string
|
||||||
|
datastoreName string
|
||||||
volumeCount int
|
volumeCount int
|
||||||
volumesPerPod int
|
volumesPerPod int
|
||||||
iterations int
|
iterations int
|
||||||
policyName string
|
|
||||||
datastoreName string
|
|
||||||
)
|
)
|
||||||
|
|
||||||
BeforeEach(func() {
|
BeforeEach(func() {
|
||||||
var err error
|
|
||||||
framework.SkipUnlessProviderIs("vsphere")
|
framework.SkipUnlessProviderIs("vsphere")
|
||||||
client = f.ClientSet
|
client = f.ClientSet
|
||||||
namespace = f.Namespace.Name
|
namespace = f.Namespace.Name
|
||||||
|
|
||||||
// Read the environment variables
|
// Read the environment variables
|
||||||
volumeCountStr := os.Getenv("VCP_PERF_VOLUME_COUNT")
|
volumeCount = GetAndExpectIntEnvVar(VCPPerfVolumeCount)
|
||||||
Expect(volumeCountStr).NotTo(BeEmpty(), "ENV VCP_PERF_VOLUME_COUNT is not set")
|
volumesPerPod = GetAndExpectIntEnvVar(VCPPerfVolumesPerPod)
|
||||||
volumeCount, err = strconv.Atoi(volumeCountStr)
|
iterations = GetAndExpectIntEnvVar(VCPPerfIterations)
|
||||||
Expect(err).NotTo(HaveOccurred(), "Error Parsing VCP_PERF_VOLUME_COUNT")
|
|
||||||
|
|
||||||
volumesPerPodStr := os.Getenv("VCP_PERF_VOLUME_PER_POD")
|
policyName = GetAndExpectStringEnvVar(SPBMPolicyName)
|
||||||
Expect(volumesPerPodStr).NotTo(BeEmpty(), "ENV VCP_PERF_VOLUME_PER_POD is not set")
|
datastoreName = GetAndExpectStringEnvVar(StorageClassDatastoreName)
|
||||||
volumesPerPod, err = strconv.Atoi(volumesPerPodStr)
|
|
||||||
Expect(err).NotTo(HaveOccurred(), "Error Parsing VCP_PERF_VOLUME_PER_POD")
|
|
||||||
|
|
||||||
iterationsStr := os.Getenv("VCP_PERF_ITERATIONS")
|
|
||||||
Expect(iterationsStr).NotTo(BeEmpty(), "ENV VCP_PERF_ITERATIONS is not set")
|
|
||||||
iterations, err = strconv.Atoi(iterationsStr)
|
|
||||||
Expect(err).NotTo(HaveOccurred(), "Error Parsing VCP_PERF_ITERATIONS")
|
|
||||||
|
|
||||||
policyName = os.Getenv("VSPHERE_SPBM_GOLD_POLICY")
|
|
||||||
datastoreName = os.Getenv("VSPHERE_DATASTORE")
|
|
||||||
Expect(policyName).NotTo(BeEmpty(), "ENV VSPHERE_SPBM_GOLD_POLICY is not set")
|
|
||||||
Expect(datastoreName).NotTo(BeEmpty(), "ENV VSPHERE_DATASTORE is not set")
|
|
||||||
|
|
||||||
nodes := framework.GetReadySchedulableNodesOrDie(client)
|
nodes := framework.GetReadySchedulableNodesOrDie(client)
|
||||||
Expect(len(nodes.Items)).To(BeNumerically(">=", 1), "Requires at least %d nodes (not %d)", 2, len(nodes.Items))
|
Expect(len(nodes.Items)).To(BeNumerically(">=", 1), "Requires at least %d nodes (not %d)", 2, len(nodes.Items))
|
||||||
|
@ -18,7 +18,6 @@ package vsphere
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
|
||||||
"strconv"
|
"strconv"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
@ -225,7 +224,7 @@ var _ = utils.SIGDescribe("Volume Placement", func() {
|
|||||||
volumeOptions = new(vclib.VolumeOptions)
|
volumeOptions = new(vclib.VolumeOptions)
|
||||||
volumeOptions.CapacityKB = 2097152
|
volumeOptions.CapacityKB = 2097152
|
||||||
volumeOptions.Name = "e2e-vmdk-" + strconv.FormatInt(time.Now().UnixNano(), 10)
|
volumeOptions.Name = "e2e-vmdk-" + strconv.FormatInt(time.Now().UnixNano(), 10)
|
||||||
volumeOptions.Datastore = os.Getenv("VSPHERE_SECOND_SHARED_DATASTORE")
|
volumeOptions.Datastore = GetAndExpectStringEnvVar(SecondSharedDatastore)
|
||||||
volumePath, err := createVSphereVolume(vsp, volumeOptions)
|
volumePath, err := createVSphereVolume(vsp, volumeOptions)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred())
|
||||||
volumePaths = append(volumePaths, volumePath)
|
volumePaths = append(volumePaths, volumePath)
|
||||||
|
@ -19,21 +19,16 @@ package vsphere
|
|||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"hash/fnv"
|
"hash/fnv"
|
||||||
"os"
|
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
. "github.com/onsi/ginkgo"
|
. "github.com/onsi/ginkgo"
|
||||||
. "github.com/onsi/gomega"
|
. "github.com/onsi/gomega"
|
||||||
|
|
||||||
"github.com/vmware/govmomi/find"
|
|
||||||
"golang.org/x/net/context"
|
|
||||||
"k8s.io/api/core/v1"
|
"k8s.io/api/core/v1"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
k8stype "k8s.io/apimachinery/pkg/types"
|
k8stype "k8s.io/apimachinery/pkg/types"
|
||||||
clientset "k8s.io/client-go/kubernetes"
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
vsphere "k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere"
|
|
||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
"k8s.io/kubernetes/test/e2e/storage/utils"
|
"k8s.io/kubernetes/test/e2e/storage/utils"
|
||||||
)
|
)
|
||||||
@ -98,11 +93,15 @@ var _ = utils.SIGDescribe("Storage Policy Based Volume Provisioning [Feature:vsp
|
|||||||
client clientset.Interface
|
client clientset.Interface
|
||||||
namespace string
|
namespace string
|
||||||
scParameters map[string]string
|
scParameters map[string]string
|
||||||
|
policyName string
|
||||||
|
tagPolicy string
|
||||||
)
|
)
|
||||||
BeforeEach(func() {
|
BeforeEach(func() {
|
||||||
framework.SkipUnlessProviderIs("vsphere")
|
framework.SkipUnlessProviderIs("vsphere")
|
||||||
client = f.ClientSet
|
client = f.ClientSet
|
||||||
namespace = f.Namespace.Name
|
namespace = f.Namespace.Name
|
||||||
|
policyName = GetAndExpectStringEnvVar(SPBMPolicyName)
|
||||||
|
tagPolicy = GetAndExpectStringEnvVar(SPBMTagPolicy)
|
||||||
framework.Logf("framework: %+v", f)
|
framework.Logf("framework: %+v", f)
|
||||||
scParameters = make(map[string]string)
|
scParameters = make(map[string]string)
|
||||||
nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
|
nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
|
||||||
@ -113,47 +112,47 @@ var _ = utils.SIGDescribe("Storage Policy Based Volume Provisioning [Feature:vsp
|
|||||||
|
|
||||||
// Valid policy.
|
// Valid policy.
|
||||||
It("verify VSAN storage capability with valid hostFailuresToTolerate and cacheReservation values is honored for dynamically provisioned pvc using storageclass", func() {
|
It("verify VSAN storage capability with valid hostFailuresToTolerate and cacheReservation values is honored for dynamically provisioned pvc using storageclass", func() {
|
||||||
By(fmt.Sprintf("Invoking Test for VSAN policy hostFailuresToTolerate: %s, cacheReservation: %s", HostFailuresToTolerateCapabilityVal, CacheReservationCapabilityVal))
|
By(fmt.Sprintf("Invoking test for VSAN policy hostFailuresToTolerate: %s, cacheReservation: %s", HostFailuresToTolerateCapabilityVal, CacheReservationCapabilityVal))
|
||||||
scParameters[Policy_HostFailuresToTolerate] = HostFailuresToTolerateCapabilityVal
|
scParameters[Policy_HostFailuresToTolerate] = HostFailuresToTolerateCapabilityVal
|
||||||
scParameters[Policy_CacheReservation] = CacheReservationCapabilityVal
|
scParameters[Policy_CacheReservation] = CacheReservationCapabilityVal
|
||||||
framework.Logf("Invoking Test for VSAN storage capabilities: %+v", scParameters)
|
framework.Logf("Invoking test for VSAN storage capabilities: %+v", scParameters)
|
||||||
invokeValidPolicyTest(f, client, namespace, scParameters)
|
invokeValidPolicyTest(f, client, namespace, scParameters)
|
||||||
})
|
})
|
||||||
|
|
||||||
// Valid policy.
|
// Valid policy.
|
||||||
It("verify VSAN storage capability with valid diskStripes and objectSpaceReservation values is honored for dynamically provisioned pvc using storageclass", func() {
|
It("verify VSAN storage capability with valid diskStripes and objectSpaceReservation values is honored for dynamically provisioned pvc using storageclass", func() {
|
||||||
By(fmt.Sprintf("Invoking Test for VSAN policy diskStripes: %s, objectSpaceReservation: %s", DiskStripesCapabilityVal, ObjectSpaceReservationCapabilityVal))
|
By(fmt.Sprintf("Invoking test for VSAN policy diskStripes: %s, objectSpaceReservation: %s", DiskStripesCapabilityVal, ObjectSpaceReservationCapabilityVal))
|
||||||
scParameters[Policy_DiskStripes] = "1"
|
scParameters[Policy_DiskStripes] = "1"
|
||||||
scParameters[Policy_ObjectSpaceReservation] = "30"
|
scParameters[Policy_ObjectSpaceReservation] = "30"
|
||||||
framework.Logf("Invoking Test for VSAN storage capabilities: %+v", scParameters)
|
framework.Logf("Invoking test for VSAN storage capabilities: %+v", scParameters)
|
||||||
invokeValidPolicyTest(f, client, namespace, scParameters)
|
invokeValidPolicyTest(f, client, namespace, scParameters)
|
||||||
})
|
})
|
||||||
|
|
||||||
// Valid policy.
|
// Valid policy.
|
||||||
It("verify VSAN storage capability with valid diskStripes and objectSpaceReservation values and a VSAN datastore is honored for dynamically provisioned pvc using storageclass", func() {
|
It("verify VSAN storage capability with valid diskStripes and objectSpaceReservation values and a VSAN datastore is honored for dynamically provisioned pvc using storageclass", func() {
|
||||||
By(fmt.Sprintf("Invoking Test for VSAN policy diskStripes: %s, objectSpaceReservation: %s", DiskStripesCapabilityVal, ObjectSpaceReservationCapabilityVal))
|
By(fmt.Sprintf("Invoking test for VSAN policy diskStripes: %s, objectSpaceReservation: %s", DiskStripesCapabilityVal, ObjectSpaceReservationCapabilityVal))
|
||||||
scParameters[Policy_DiskStripes] = DiskStripesCapabilityVal
|
scParameters[Policy_DiskStripes] = DiskStripesCapabilityVal
|
||||||
scParameters[Policy_ObjectSpaceReservation] = ObjectSpaceReservationCapabilityVal
|
scParameters[Policy_ObjectSpaceReservation] = ObjectSpaceReservationCapabilityVal
|
||||||
scParameters[Datastore] = VsanDatastore
|
scParameters[Datastore] = VsanDatastore
|
||||||
framework.Logf("Invoking Test for VSAN storage capabilities: %+v", scParameters)
|
framework.Logf("Invoking test for VSAN storage capabilities: %+v", scParameters)
|
||||||
invokeValidPolicyTest(f, client, namespace, scParameters)
|
invokeValidPolicyTest(f, client, namespace, scParameters)
|
||||||
})
|
})
|
||||||
|
|
||||||
// Valid policy.
|
// Valid policy.
|
||||||
It("verify VSAN storage capability with valid objectSpaceReservation and iopsLimit values is honored for dynamically provisioned pvc using storageclass", func() {
|
It("verify VSAN storage capability with valid objectSpaceReservation and iopsLimit values is honored for dynamically provisioned pvc using storageclass", func() {
|
||||||
By(fmt.Sprintf("Invoking Test for VSAN policy objectSpaceReservation: %s, iopsLimit: %s", ObjectSpaceReservationCapabilityVal, IopsLimitCapabilityVal))
|
By(fmt.Sprintf("Invoking test for VSAN policy objectSpaceReservation: %s, iopsLimit: %s", ObjectSpaceReservationCapabilityVal, IopsLimitCapabilityVal))
|
||||||
scParameters[Policy_ObjectSpaceReservation] = ObjectSpaceReservationCapabilityVal
|
scParameters[Policy_ObjectSpaceReservation] = ObjectSpaceReservationCapabilityVal
|
||||||
scParameters[Policy_IopsLimit] = IopsLimitCapabilityVal
|
scParameters[Policy_IopsLimit] = IopsLimitCapabilityVal
|
||||||
framework.Logf("Invoking Test for VSAN storage capabilities: %+v", scParameters)
|
framework.Logf("Invoking test for VSAN storage capabilities: %+v", scParameters)
|
||||||
invokeValidPolicyTest(f, client, namespace, scParameters)
|
invokeValidPolicyTest(f, client, namespace, scParameters)
|
||||||
})
|
})
|
||||||
|
|
||||||
// Invalid VSAN storage capabilties parameters.
|
// Invalid VSAN storage capabilties parameters.
|
||||||
It("verify VSAN storage capability with invalid capability name objectSpaceReserve is not honored for dynamically provisioned pvc using storageclass", func() {
|
It("verify VSAN storage capability with invalid capability name objectSpaceReserve is not honored for dynamically provisioned pvc using storageclass", func() {
|
||||||
By(fmt.Sprintf("Invoking Test for VSAN policy objectSpaceReserve: %s, stripeWidth: %s", ObjectSpaceReservationCapabilityVal, StripeWidthCapabilityVal))
|
By(fmt.Sprintf("Invoking test for VSAN policy objectSpaceReserve: %s, stripeWidth: %s", ObjectSpaceReservationCapabilityVal, StripeWidthCapabilityVal))
|
||||||
scParameters["objectSpaceReserve"] = ObjectSpaceReservationCapabilityVal
|
scParameters["objectSpaceReserve"] = ObjectSpaceReservationCapabilityVal
|
||||||
scParameters[Policy_DiskStripes] = StripeWidthCapabilityVal
|
scParameters[Policy_DiskStripes] = StripeWidthCapabilityVal
|
||||||
framework.Logf("Invoking Test for VSAN storage capabilities: %+v", scParameters)
|
framework.Logf("Invoking test for VSAN storage capabilities: %+v", scParameters)
|
||||||
err := invokeInvalidPolicyTestNeg(client, namespace, scParameters)
|
err := invokeInvalidPolicyTestNeg(client, namespace, scParameters)
|
||||||
Expect(err).To(HaveOccurred())
|
Expect(err).To(HaveOccurred())
|
||||||
errorMsg := "invalid option \\\"objectSpaceReserve\\\" for volume plugin kubernetes.io/vsphere-volume"
|
errorMsg := "invalid option \\\"objectSpaceReserve\\\" for volume plugin kubernetes.io/vsphere-volume"
|
||||||
@ -165,10 +164,10 @@ var _ = utils.SIGDescribe("Storage Policy Based Volume Provisioning [Feature:vsp
|
|||||||
// Invalid policy on a VSAN test bed.
|
// Invalid policy on a VSAN test bed.
|
||||||
// diskStripes value has to be between 1 and 12.
|
// diskStripes value has to be between 1 and 12.
|
||||||
It("verify VSAN storage capability with invalid diskStripes value is not honored for dynamically provisioned pvc using storageclass", func() {
|
It("verify VSAN storage capability with invalid diskStripes value is not honored for dynamically provisioned pvc using storageclass", func() {
|
||||||
By(fmt.Sprintf("Invoking Test for VSAN policy diskStripes: %s, cacheReservation: %s", DiskStripesCapabilityInvalidVal, CacheReservationCapabilityVal))
|
By(fmt.Sprintf("Invoking test for VSAN policy diskStripes: %s, cacheReservation: %s", DiskStripesCapabilityInvalidVal, CacheReservationCapabilityVal))
|
||||||
scParameters[Policy_DiskStripes] = DiskStripesCapabilityInvalidVal
|
scParameters[Policy_DiskStripes] = DiskStripesCapabilityInvalidVal
|
||||||
scParameters[Policy_CacheReservation] = CacheReservationCapabilityVal
|
scParameters[Policy_CacheReservation] = CacheReservationCapabilityVal
|
||||||
framework.Logf("Invoking Test for VSAN storage capabilities: %+v", scParameters)
|
framework.Logf("Invoking test for VSAN storage capabilities: %+v", scParameters)
|
||||||
err := invokeInvalidPolicyTestNeg(client, namespace, scParameters)
|
err := invokeInvalidPolicyTestNeg(client, namespace, scParameters)
|
||||||
Expect(err).To(HaveOccurred())
|
Expect(err).To(HaveOccurred())
|
||||||
errorMsg := "Invalid value for " + Policy_DiskStripes + "."
|
errorMsg := "Invalid value for " + Policy_DiskStripes + "."
|
||||||
@ -180,9 +179,9 @@ var _ = utils.SIGDescribe("Storage Policy Based Volume Provisioning [Feature:vsp
|
|||||||
// Invalid policy on a VSAN test bed.
|
// Invalid policy on a VSAN test bed.
|
||||||
// hostFailuresToTolerate value has to be between 0 and 3 including.
|
// hostFailuresToTolerate value has to be between 0 and 3 including.
|
||||||
It("verify VSAN storage capability with invalid hostFailuresToTolerate value is not honored for dynamically provisioned pvc using storageclass", func() {
|
It("verify VSAN storage capability with invalid hostFailuresToTolerate value is not honored for dynamically provisioned pvc using storageclass", func() {
|
||||||
By(fmt.Sprintf("Invoking Test for VSAN policy hostFailuresToTolerate: %s", HostFailuresToTolerateCapabilityInvalidVal))
|
By(fmt.Sprintf("Invoking test for VSAN policy hostFailuresToTolerate: %s", HostFailuresToTolerateCapabilityInvalidVal))
|
||||||
scParameters[Policy_HostFailuresToTolerate] = HostFailuresToTolerateCapabilityInvalidVal
|
scParameters[Policy_HostFailuresToTolerate] = HostFailuresToTolerateCapabilityInvalidVal
|
||||||
framework.Logf("Invoking Test for VSAN storage capabilities: %+v", scParameters)
|
framework.Logf("Invoking test for VSAN storage capabilities: %+v", scParameters)
|
||||||
err := invokeInvalidPolicyTestNeg(client, namespace, scParameters)
|
err := invokeInvalidPolicyTestNeg(client, namespace, scParameters)
|
||||||
Expect(err).To(HaveOccurred())
|
Expect(err).To(HaveOccurred())
|
||||||
errorMsg := "Invalid value for " + Policy_HostFailuresToTolerate + "."
|
errorMsg := "Invalid value for " + Policy_HostFailuresToTolerate + "."
|
||||||
@ -194,11 +193,11 @@ var _ = utils.SIGDescribe("Storage Policy Based Volume Provisioning [Feature:vsp
|
|||||||
// Specify a valid VSAN policy on a non-VSAN test bed.
|
// Specify a valid VSAN policy on a non-VSAN test bed.
|
||||||
// The test should fail.
|
// The test should fail.
|
||||||
It("verify VSAN storage capability with non-vsan datastore is not honored for dynamically provisioned pvc using storageclass", func() {
|
It("verify VSAN storage capability with non-vsan datastore is not honored for dynamically provisioned pvc using storageclass", func() {
|
||||||
By(fmt.Sprintf("Invoking Test for VSAN policy diskStripes: %s, objectSpaceReservation: %s and a non-VSAN datastore: %s", DiskStripesCapabilityVal, ObjectSpaceReservationCapabilityVal, VmfsDatastore))
|
By(fmt.Sprintf("Invoking test for VSAN policy diskStripes: %s, objectSpaceReservation: %s and a non-VSAN datastore: %s", DiskStripesCapabilityVal, ObjectSpaceReservationCapabilityVal, VmfsDatastore))
|
||||||
scParameters[Policy_DiskStripes] = DiskStripesCapabilityVal
|
scParameters[Policy_DiskStripes] = DiskStripesCapabilityVal
|
||||||
scParameters[Policy_ObjectSpaceReservation] = ObjectSpaceReservationCapabilityVal
|
scParameters[Policy_ObjectSpaceReservation] = ObjectSpaceReservationCapabilityVal
|
||||||
scParameters[Datastore] = VmfsDatastore
|
scParameters[Datastore] = VmfsDatastore
|
||||||
framework.Logf("Invoking Test for VSAN storage capabilities: %+v", scParameters)
|
framework.Logf("Invoking test for VSAN storage capabilities: %+v", scParameters)
|
||||||
err := invokeInvalidPolicyTestNeg(client, namespace, scParameters)
|
err := invokeInvalidPolicyTestNeg(client, namespace, scParameters)
|
||||||
Expect(err).To(HaveOccurred())
|
Expect(err).To(HaveOccurred())
|
||||||
errorMsg := "The specified datastore: \\\"" + VmfsDatastore + "\\\" is not a VSAN datastore. " +
|
errorMsg := "The specified datastore: \\\"" + VmfsDatastore + "\\\" is not a VSAN datastore. " +
|
||||||
@ -209,12 +208,10 @@ var _ = utils.SIGDescribe("Storage Policy Based Volume Provisioning [Feature:vsp
|
|||||||
})
|
})
|
||||||
|
|
||||||
It("verify an existing and compatible SPBM policy is honored for dynamically provisioned pvc using storageclass", func() {
|
It("verify an existing and compatible SPBM policy is honored for dynamically provisioned pvc using storageclass", func() {
|
||||||
By(fmt.Sprintf("Invoking Test for SPBM policy: %s", os.Getenv("VSPHERE_SPBM_GOLD_POLICY")))
|
By(fmt.Sprintf("Invoking test for SPBM policy: %s", policyName))
|
||||||
goldPolicy := os.Getenv("VSPHERE_SPBM_GOLD_POLICY")
|
scParameters[SpbmStoragePolicy] = policyName
|
||||||
Expect(goldPolicy).NotTo(BeEmpty())
|
|
||||||
scParameters[SpbmStoragePolicy] = goldPolicy
|
|
||||||
scParameters[DiskFormat] = ThinDisk
|
scParameters[DiskFormat] = ThinDisk
|
||||||
framework.Logf("Invoking Test for SPBM storage policy: %+v", scParameters)
|
framework.Logf("Invoking test for SPBM storage policy: %+v", scParameters)
|
||||||
invokeValidPolicyTest(f, client, namespace, scParameters)
|
invokeValidPolicyTest(f, client, namespace, scParameters)
|
||||||
})
|
})
|
||||||
|
|
||||||
@ -222,33 +219,30 @@ var _ = utils.SIGDescribe("Storage Policy Based Volume Provisioning [Feature:vsp
|
|||||||
scParameters[Policy_DiskStripes] = DiskStripesCapabilityMaxVal
|
scParameters[Policy_DiskStripes] = DiskStripesCapabilityMaxVal
|
||||||
scParameters[Policy_ObjectSpaceReservation] = ObjectSpaceReservationCapabilityVal
|
scParameters[Policy_ObjectSpaceReservation] = ObjectSpaceReservationCapabilityVal
|
||||||
scParameters[Datastore] = VsanDatastore
|
scParameters[Datastore] = VsanDatastore
|
||||||
framework.Logf("Invoking Test for SPBM storage policy: %+v", scParameters)
|
framework.Logf("Invoking test for SPBM storage policy: %+v", scParameters)
|
||||||
clusterName := os.Getenv("VSPHERE_KUBERNETES_CLUSTER")
|
kubernetesClusterName := GetAndExpectStringEnvVar(KubernetesClusterName)
|
||||||
Expect(clusterName).NotTo(BeEmpty())
|
invokeStaleDummyVMTestWithStoragePolicy(client, namespace, kubernetesClusterName, scParameters)
|
||||||
invokeStaleDummyVMTestWithStoragePolicy(client, namespace, clusterName, scParameters)
|
|
||||||
})
|
})
|
||||||
|
|
||||||
It("verify if a SPBM policy is not honored on a non-compatible datastore for dynamically provisioned pvc using storageclass", func() {
|
It("verify if a SPBM policy is not honored on a non-compatible datastore for dynamically provisioned pvc using storageclass", func() {
|
||||||
By(fmt.Sprintf("Invoking Test for SPBM policy: %s and datastore: %s", os.Getenv("VSPHERE_SPBM_TAG_POLICY"), VsanDatastore))
|
By(fmt.Sprintf("Invoking test for SPBM policy: %s and datastore: %s", tagPolicy, VsanDatastore))
|
||||||
tagPolicy := os.Getenv("VSPHERE_SPBM_TAG_POLICY")
|
|
||||||
Expect(tagPolicy).NotTo(BeEmpty())
|
|
||||||
scParameters[SpbmStoragePolicy] = tagPolicy
|
scParameters[SpbmStoragePolicy] = tagPolicy
|
||||||
scParameters[Datastore] = VsanDatastore
|
scParameters[Datastore] = VsanDatastore
|
||||||
scParameters[DiskFormat] = ThinDisk
|
scParameters[DiskFormat] = ThinDisk
|
||||||
framework.Logf("Invoking Test for SPBM storage policy on a non-compatible datastore: %+v", scParameters)
|
framework.Logf("Invoking test for SPBM storage policy on a non-compatible datastore: %+v", scParameters)
|
||||||
err := invokeInvalidPolicyTestNeg(client, namespace, scParameters)
|
err := invokeInvalidPolicyTestNeg(client, namespace, scParameters)
|
||||||
Expect(err).To(HaveOccurred())
|
Expect(err).To(HaveOccurred())
|
||||||
errorMsg := "User specified datastore is not compatible with the storagePolicy: \\\"" + os.Getenv("VSPHERE_SPBM_TAG_POLICY") + "\\\""
|
errorMsg := "User specified datastore is not compatible with the storagePolicy: \\\"" + tagPolicy + "\\\""
|
||||||
if !strings.Contains(err.Error(), errorMsg) {
|
if !strings.Contains(err.Error(), errorMsg) {
|
||||||
Expect(err).NotTo(HaveOccurred(), errorMsg)
|
Expect(err).NotTo(HaveOccurred(), errorMsg)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
It("verify if a non-existing SPBM policy is not honored for dynamically provisioned pvc using storageclass", func() {
|
It("verify if a non-existing SPBM policy is not honored for dynamically provisioned pvc using storageclass", func() {
|
||||||
By(fmt.Sprintf("Invoking Test for SPBM policy: %s", BronzeStoragePolicy))
|
By(fmt.Sprintf("Invoking test for SPBM policy: %s", BronzeStoragePolicy))
|
||||||
scParameters[SpbmStoragePolicy] = BronzeStoragePolicy
|
scParameters[SpbmStoragePolicy] = BronzeStoragePolicy
|
||||||
scParameters[DiskFormat] = ThinDisk
|
scParameters[DiskFormat] = ThinDisk
|
||||||
framework.Logf("Invoking Test for non-existing SPBM storage policy: %+v", scParameters)
|
framework.Logf("Invoking test for non-existing SPBM storage policy: %+v", scParameters)
|
||||||
err := invokeInvalidPolicyTestNeg(client, namespace, scParameters)
|
err := invokeInvalidPolicyTestNeg(client, namespace, scParameters)
|
||||||
Expect(err).To(HaveOccurred())
|
Expect(err).To(HaveOccurred())
|
||||||
errorMsg := "no pbm profile found with name: \\\"" + BronzeStoragePolicy + "\\"
|
errorMsg := "no pbm profile found with name: \\\"" + BronzeStoragePolicy + "\\"
|
||||||
@ -258,14 +252,12 @@ var _ = utils.SIGDescribe("Storage Policy Based Volume Provisioning [Feature:vsp
|
|||||||
})
|
})
|
||||||
|
|
||||||
It("verify an if a SPBM policy and VSAN capabilities cannot be honored for dynamically provisioned pvc using storageclass", func() {
|
It("verify an if a SPBM policy and VSAN capabilities cannot be honored for dynamically provisioned pvc using storageclass", func() {
|
||||||
By(fmt.Sprintf("Invoking Test for SPBM policy: %s with VSAN storage capabilities", os.Getenv("VSPHERE_SPBM_GOLD_POLICY")))
|
By(fmt.Sprintf("Invoking test for SPBM policy: %s with VSAN storage capabilities", policyName))
|
||||||
goldPolicy := os.Getenv("VSPHERE_SPBM_GOLD_POLICY")
|
scParameters[SpbmStoragePolicy] = policyName
|
||||||
Expect(goldPolicy).NotTo(BeEmpty())
|
|
||||||
scParameters[SpbmStoragePolicy] = goldPolicy
|
|
||||||
Expect(scParameters[SpbmStoragePolicy]).NotTo(BeEmpty())
|
Expect(scParameters[SpbmStoragePolicy]).NotTo(BeEmpty())
|
||||||
scParameters[Policy_DiskStripes] = DiskStripesCapabilityVal
|
scParameters[Policy_DiskStripes] = DiskStripesCapabilityVal
|
||||||
scParameters[DiskFormat] = ThinDisk
|
scParameters[DiskFormat] = ThinDisk
|
||||||
framework.Logf("Invoking Test for SPBM storage policy and VSAN capabilities together: %+v", scParameters)
|
framework.Logf("Invoking test for SPBM storage policy and VSAN capabilities together: %+v", scParameters)
|
||||||
err := invokeInvalidPolicyTestNeg(client, namespace, scParameters)
|
err := invokeInvalidPolicyTestNeg(client, namespace, scParameters)
|
||||||
Expect(err).To(HaveOccurred())
|
Expect(err).To(HaveOccurred())
|
||||||
errorMsg := "Cannot specify storage policy capabilities along with storage policy name. Please specify only one"
|
errorMsg := "Cannot specify storage policy capabilities along with storage policy name. Please specify only one"
|
||||||
@ -355,23 +347,7 @@ func invokeStaleDummyVMTestWithStoragePolicy(client clientset.Interface, namespa
|
|||||||
fnvHash.Write([]byte(vmName))
|
fnvHash.Write([]byte(vmName))
|
||||||
dummyVMFullName := DummyVMPrefixName + "-" + fmt.Sprint(fnvHash.Sum32())
|
dummyVMFullName := DummyVMPrefixName + "-" + fmt.Sprint(fnvHash.Sum32())
|
||||||
errorMsg := "Dummy VM - " + vmName + "is still present. Failing the test.."
|
errorMsg := "Dummy VM - " + vmName + "is still present. Failing the test.."
|
||||||
Expect(isDummyVMPresent(dummyVMFullName)).NotTo(BeTrue(), errorMsg)
|
vsp, err := getVSphere(client)
|
||||||
}
|
|
||||||
|
|
||||||
func isDummyVMPresent(vmName string) bool {
|
|
||||||
By("Verifing if the dummy VM is deleted by the vSphere Cloud Provider clean up routine")
|
|
||||||
govMoMiClient, err := vsphere.GetgovmomiClient(nil)
|
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
Expect(vsp.IsDummyVMPresent(dummyVMFullName)).NotTo(BeTrue(), errorMsg)
|
||||||
f := find.NewFinder(govMoMiClient.Client, true)
|
|
||||||
ctx, _ := context.WithCancel(context.Background())
|
|
||||||
|
|
||||||
workingDir := os.Getenv("VSPHERE_WORKING_DIR")
|
|
||||||
Expect(workingDir).NotTo(BeEmpty())
|
|
||||||
vmPath := workingDir + vmName
|
|
||||||
_, err = f.VirtualMachine(ctx, vmPath)
|
|
||||||
if err != nil {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user