Merge pull request #79931 from skarthiksrinivas/vsphere_volume_selectednode

Provision vSphere volume as per selectedNode
This commit is contained in:
Kubernetes Prow Robot 2019-08-15 11:06:35 -07:00 committed by GitHub
commit d8c8c8eded
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
22 changed files with 248 additions and 87 deletions

View File

@ -171,7 +171,7 @@ func (plugin *vsphereVolumePlugin) ConstructVolumeSpec(volumeName, mountPath str
// Abstract interface to disk operations.
type vdManager interface {
// Creates a volume
CreateVolume(provisioner *vsphereVolumeProvisioner, selectedZone []string) (volSpec *VolumeSpec, err error)
CreateVolume(provisioner *vsphereVolumeProvisioner, selectedNode *v1.Node, selectedZone []string) (volSpec *VolumeSpec, err error)
// Deletes a volume
DeleteVolume(deleter *vsphereVolumeDeleter) error
}
@ -368,14 +368,14 @@ func (v *vsphereVolumeProvisioner) Provision(selectedNode *v1.Node, allowedTopol
if !util.AccessModesContainedInAll(v.plugin.GetAccessModes(), v.options.PVC.Spec.AccessModes) {
return nil, fmt.Errorf("invalid AccessModes %v: only AccessModes %v are supported", v.options.PVC.Spec.AccessModes, v.plugin.GetAccessModes())
}
klog.V(1).Infof("Provision with allowedTopologies : %s", allowedTopologies)
klog.V(1).Infof("Provision with selectedNode: %s and allowedTopologies : %s", getNodeName(selectedNode), allowedTopologies)
selectedZones, err := volumehelpers.ZonesFromAllowedTopologies(allowedTopologies)
if err != nil {
return nil, err
}
klog.V(4).Infof("Selected zones for volume : %s", selectedZones)
volSpec, err := v.manager.CreateVolume(v, selectedZones.List())
volSpec, err := v.manager.CreateVolume(v, selectedNode, selectedZones.List())
if err != nil {
return nil, err
}
@ -465,3 +465,10 @@ func getVolumeSource(
return nil, false, fmt.Errorf("Spec does not reference a VSphere volume type")
}
func getNodeName(node *v1.Node) string {
if node == nil {
return ""
}
return node.Name
}

View File

@ -62,7 +62,7 @@ func TestCanSupport(t *testing.T) {
type fakePDManager struct {
}
func (fake *fakePDManager) CreateVolume(v *vsphereVolumeProvisioner, selectedZone []string) (volSpec *VolumeSpec, err error) {
func (fake *fakePDManager) CreateVolume(v *vsphereVolumeProvisioner, selectedNode *v1.Node, selectedZone []string) (volSpec *VolumeSpec, err error) {
volSpec = &VolumeSpec{
Path: "[local] test-volume-name.vmdk",
Size: 100,

View File

@ -86,7 +86,7 @@ func verifyDevicePath(path string) (string, error) {
}
// CreateVolume creates a vSphere volume.
func (util *VsphereDiskUtil) CreateVolume(v *vsphereVolumeProvisioner, selectedZone []string) (volSpec *VolumeSpec, err error) {
func (util *VsphereDiskUtil) CreateVolume(v *vsphereVolumeProvisioner, selectedNode *v1.Node, selectedZone []string) (volSpec *VolumeSpec, err error) {
var fstype string
cloud, err := getCloudProvider(v.plugin.host.GetCloudProvider())
if err != nil {
@ -108,6 +108,7 @@ func (util *VsphereDiskUtil) CreateVolume(v *vsphereVolumeProvisioner, selectedZ
}
volumeOptions.Zone = selectedZone
volumeOptions.SelectedNode = selectedNode
// Apply Parameters (case-insensitive). We leave validation of
// the values to the cloud provider.
for parameter, value := range v.options.Parameters {

View File

@ -464,8 +464,8 @@ func (nm *NodeManager) GetHostsInZone(ctx context.Context, zoneFailureDomain str
return nil, err
}
klog.V(4).Infof("Node Details: %v", nodeDetails)
// Return those hosts that are in the given zone.
hosts := make([]*object.HostSystem, 0)
// Build a map of Host moRef to HostSystem
hostMap := make(map[string]*object.HostSystem)
for _, n := range nodeDetails {
// Match the provided zone failure domain with the node.
klog.V(9).Infof("Matching provided zone %s with node %s zone %s", zoneFailureDomain, n.NodeName, n.Zone.FailureDomain)
@ -475,9 +475,14 @@ func (nm *NodeManager) GetHostsInZone(ctx context.Context, zoneFailureDomain str
klog.Errorf("Failed to get host system for VM %s. err: %+v", n.vm, err)
continue
}
hosts = append(hosts, host)
hostMap[host.Reference().Value] = host
}
}
// Build the unique list of hosts.
hosts := make([]*object.HostSystem, 0)
for _, value := range hostMap {
hosts = append(hosts, value)
}
klog.V(4).Infof("GetHostsInZone %v returning: %v", zoneFailureDomain, hosts)
return hosts, nil
}

View File

@ -25,6 +25,7 @@ go_library(
importmap = "k8s.io/kubernetes/vendor/k8s.io/legacy-cloud-providers/vsphere/vclib",
importpath = "k8s.io/legacy-cloud-providers/vsphere/vclib",
deps = [
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/client-go/pkg/version:go_default_library",
"//vendor/github.com/prometheus/client_golang/prometheus:go_default_library",
"//vendor/github.com/vmware/govmomi/find:go_default_library",

View File

@ -19,6 +19,7 @@ package vclib
import (
"strings"
"k8s.io/api/core/v1"
"k8s.io/klog"
)
@ -34,6 +35,7 @@ type VolumeOptions struct {
StoragePolicyID string
SCSIControllerType string
Zone []string
SelectedNode *v1.Node
}
var (

View File

@ -1198,12 +1198,37 @@ func (vs *VSphere) CreateVolume(volumeOptions *vclib.VolumeOptions) (canonicalVo
}
var vmOptions *vclib.VMOptions
var zonesToSearch []string
if volumeOptions.SelectedNode != nil {
if len(volumeOptions.Zone) > 1 {
// In waitForFirstConsumer mode, if more than one allowedTopologies is specified, the volume should satisfy all these.
zonesToSearch = volumeOptions.Zone
} else {
// Pick the selectedNode's zone, if available.
nodeInfo, err := vs.nodeManager.GetNodeInfoWithNodeObject(volumeOptions.SelectedNode)
if err != nil {
klog.Errorf("Unable to get node information for %s. err: %+v", volumeOptions.SelectedNode.Name, err)
return "", err
}
klog.V(4).Infof("selectedNode info : %s", nodeInfo)
if nodeInfo.zone != nil && nodeInfo.zone.FailureDomain != "" {
zonesToSearch = append(zonesToSearch, nodeInfo.zone.FailureDomain)
}
}
} else {
// If no selectedNode, pick allowedTopologies, if provided.
zonesToSearch = volumeOptions.Zone
}
klog.V(1).Infof("Volume topology : %s", zonesToSearch)
if volumeOptions.VSANStorageProfileData != "" || volumeOptions.StoragePolicyName != "" {
// If datastore and zone are specified, first validate if the datastore is in the provided zone.
if len(volumeOptions.Zone) != 0 && volumeOptions.Datastore != "" {
klog.V(4).Infof("Specified zone : %s, datastore : %s", volumeOptions.Zone, volumeOptions.Datastore)
dsList, err = getDatastoresForZone(ctx, vs.nodeManager, volumeOptions.Zone)
if len(zonesToSearch) != 0 && volumeOptions.Datastore != "" {
klog.V(4).Infof("Specified zone : %s, datastore : %s", zonesToSearch, volumeOptions.Datastore)
dsList, err = getDatastoresForZone(ctx, vs.nodeManager, zonesToSearch)
if err != nil {
klog.Errorf("Failed to find a shared datastore matching zone %s. err: %+v", zonesToSearch, err)
return "", err
}
@ -1215,7 +1240,7 @@ func (vs *VSphere) CreateVolume(volumeOptions *vclib.VolumeOptions) (canonicalVo
}
}
if !found {
err := fmt.Errorf("The specified datastore %s does not match the provided zones : %s", volumeOptions.Datastore, volumeOptions.Zone)
err := fmt.Errorf("The specified datastore %s does not match the provided zones : %s", volumeOptions.Datastore, zonesToSearch)
klog.Error(err)
return "", err
}
@ -1234,25 +1259,19 @@ func (vs *VSphere) CreateVolume(volumeOptions *vclib.VolumeOptions) (canonicalVo
cleanUpRoutineInitLock.Unlock()
}
if volumeOptions.StoragePolicyName != "" && volumeOptions.Datastore == "" {
if len(volumeOptions.Zone) == 0 {
if len(zonesToSearch) == 0 {
klog.V(4).Infof("Selecting a shared datastore as per the storage policy %s", volumeOptions.StoragePolicyName)
datastoreInfo, err = getPbmCompatibleDatastore(ctx, vsi.conn.Client, volumeOptions.StoragePolicyName, vs.nodeManager)
} else {
// If zone is specified, first get the datastores in the zone.
dsList, err = getDatastoresForZone(ctx, vs.nodeManager, volumeOptions.Zone)
dsList, err = getDatastoresForZone(ctx, vs.nodeManager, zonesToSearch)
if err != nil {
klog.Errorf("Failed to find a shared datastore matching zone %s. err: %+v", volumeOptions.Zone, err)
return "", err
}
// If unable to get any datastore, fail the operation.
if len(dsList) == 0 {
err := fmt.Errorf("Failed to find a shared datastore matching zone %s", volumeOptions.Zone)
klog.Error(err)
klog.Errorf("Failed to find a shared datastore matching zone %s. err: %+v", zonesToSearch, err)
return "", err
}
klog.V(4).Infof("Specified zone : %s. Picking a datastore as per the storage policy %s among the zoned datastores : %s", volumeOptions.Zone,
klog.V(4).Infof("Specified zone : %s. Picking a datastore as per the storage policy %s among the zoned datastores : %s", zonesToSearch,
volumeOptions.StoragePolicyName, dsList)
// Among the compatible datastores, select the one based on the maximum free space.
datastoreInfo, err = getPbmCompatibleZonedDatastore(ctx, vsi.conn.Client, volumeOptions.StoragePolicyName, dsList)
@ -1264,17 +1283,17 @@ func (vs *VSphere) CreateVolume(volumeOptions *vclib.VolumeOptions) (canonicalVo
klog.V(1).Infof("Datastore selected as per policy : %s", datastoreInfo.Info.Name)
} else {
// If zone is specified, pick the datastore in the zone with maximum free space within the zone.
if volumeOptions.Datastore == "" && len(volumeOptions.Zone) != 0 {
klog.V(4).Infof("Specified zone : %s", volumeOptions.Zone)
dsList, err = getDatastoresForZone(ctx, vs.nodeManager, volumeOptions.Zone)
if volumeOptions.Datastore == "" && len(zonesToSearch) != 0 {
klog.V(4).Infof("Specified zone : %s", zonesToSearch)
dsList, err = getDatastoresForZone(ctx, vs.nodeManager, zonesToSearch)
if err != nil {
klog.Errorf("Failed to find a shared datastore matching zone %s. err: %+v", volumeOptions.Zone, err)
klog.Errorf("Failed to find a shared datastore matching zone %s. err: %+v", zonesToSearch, err)
return "", err
}
// If unable to get any datastore, fail the operation
if len(dsList) == 0 {
err := fmt.Errorf("Failed to find a shared datastore matching zone %s", volumeOptions.Zone)
err := fmt.Errorf("Failed to find a shared datastore matching zone %s", zonesToSearch)
klog.Error(err)
return "", err
}
@ -1284,11 +1303,11 @@ func (vs *VSphere) CreateVolume(volumeOptions *vclib.VolumeOptions) (canonicalVo
klog.Errorf("Failed to get shared datastore: %+v", err)
return "", err
}
klog.V(1).Infof("Specified zone : %s. Selected datastore : %s", volumeOptions.Zone, datastoreInfo.Info.Name)
klog.V(1).Infof("Specified zone : %s. Selected datastore : %s", zonesToSearch, datastoreInfo.Info.Name)
} else {
var sharedDsList []*vclib.DatastoreInfo
var err error
if len(volumeOptions.Zone) == 0 {
if len(zonesToSearch) == 0 {
// If zone is not provided, get the shared datastore across all node VMs.
klog.V(4).Infof("Validating if datastore %s is shared across all node VMs", datastoreName)
sharedDsList, err = getSharedDatastoresInK8SCluster(ctx, vs.nodeManager)
@ -1300,14 +1319,14 @@ func (vs *VSphere) CreateVolume(volumeOptions *vclib.VolumeOptions) (canonicalVo
err = fmt.Errorf("The specified datastore %s is not a shared datastore across node VMs", datastoreName)
} else {
// If zone is provided, get the shared datastores in that zone.
klog.V(4).Infof("Validating if datastore %s is in zone %s ", datastoreName, volumeOptions.Zone)
sharedDsList, err = getDatastoresForZone(ctx, vs.nodeManager, volumeOptions.Zone)
klog.V(4).Infof("Validating if datastore %s is in zone %s ", datastoreName, zonesToSearch)
sharedDsList, err = getDatastoresForZone(ctx, vs.nodeManager, zonesToSearch)
if err != nil {
klog.Errorf("Failed to find a shared datastore matching zone %s. err: %+v", volumeOptions.Zone, err)
klog.Errorf("Failed to find a shared datastore matching zone %s. err: %+v", zonesToSearch, err)
return "", err
}
// Prepare error msg to be used later, if required.
err = fmt.Errorf("The specified datastore %s does not match the provided zones : %s", datastoreName, volumeOptions.Zone)
err = fmt.Errorf("The specified datastore %s does not match the provided zones : %s", datastoreName, zonesToSearch)
}
found := false
// Check if the selected datastore belongs to the list of shared datastores computed.

View File

@ -306,6 +306,10 @@ func getDatastoresForZone(ctx context.Context, nodeManager *NodeManager, selecte
sharedDatastoresPerZone = dsObjList
} else {
sharedDatastoresPerZone = intersect(sharedDatastoresPerZone, dsObjList)
if len(sharedDatastoresPerZone) == 0 {
klog.V(4).Infof("No shared datastores found among hosts %s", hosts)
return nil, fmt.Errorf("No matching datastores found in the kubernetes cluster for zone %s", zone)
}
}
klog.V(9).Infof("Shared datastore list after processing host %s : %s", host, sharedDatastoresPerZone)
}
@ -314,6 +318,9 @@ func getDatastoresForZone(ctx context.Context, nodeManager *NodeManager, selecte
sharedDatastores = sharedDatastoresPerZone
} else {
sharedDatastores = intersect(sharedDatastores, sharedDatastoresPerZone)
if len(sharedDatastores) == 0 {
return nil, fmt.Errorf("No matching datastores found in the kubernetes cluster across zones %s", selectedZones)
}
}
}
klog.V(1).Infof("Returning selected datastores : %s", sharedDatastores)

View File

@ -39,6 +39,7 @@ go_library(
],
importpath = "k8s.io/kubernetes/test/e2e/storage/vsphere",
deps = [
"//pkg/controller/volume/events:go_default_library",
"//staging/src/k8s.io/api/apps/v1:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/api/storage/v1:go_default_library",

View File

@ -129,7 +129,7 @@ var _ = utils.SIGDescribe("vcp at scale [Feature:vsphere] ", func() {
case storageclass4:
scParams[Datastore] = datastoreName
}
sc, err = client.StorageV1().StorageClasses().Create(getVSphereStorageClassSpec(scname, scParams, nil))
sc, err = client.StorageV1().StorageClasses().Create(getVSphereStorageClassSpec(scname, scParams, nil, ""))
gomega.Expect(sc).NotTo(gomega.BeNil(), "Storage class is empty")
framework.ExpectNoError(err, "Failed to create storage class")
defer client.StorageV1().StorageClasses().Delete(scname, nil)

View File

@ -74,7 +74,7 @@ var _ = utils.SIGDescribe("vsphere statefulset", func() {
ginkgo.By("Creating StorageClass for Statefulset")
scParameters := make(map[string]string)
scParameters["diskformat"] = "thin"
scSpec := getVSphereStorageClassSpec(storageclassname, scParameters, nil)
scSpec := getVSphereStorageClassSpec(storageclassname, scParameters, nil, "")
sc, err := client.StorageV1().StorageClasses().Create(scSpec)
framework.ExpectNoError(err)
defer client.StorageV1().StorageClasses().Delete(sc.Name, nil)

View File

@ -85,22 +85,22 @@ var _ = utils.SIGDescribe("vsphere cloud provider stress [Feature:vsphere]", fun
var err error
switch scname {
case storageclass1:
sc, err = client.StorageV1().StorageClasses().Create(getVSphereStorageClassSpec(storageclass1, nil, nil))
sc, err = client.StorageV1().StorageClasses().Create(getVSphereStorageClassSpec(storageclass1, nil, nil, ""))
case storageclass2:
var scVSanParameters map[string]string
scVSanParameters = make(map[string]string)
scVSanParameters[Policy_HostFailuresToTolerate] = "1"
sc, err = client.StorageV1().StorageClasses().Create(getVSphereStorageClassSpec(storageclass2, scVSanParameters, nil))
sc, err = client.StorageV1().StorageClasses().Create(getVSphereStorageClassSpec(storageclass2, scVSanParameters, nil, ""))
case storageclass3:
var scSPBMPolicyParameters map[string]string
scSPBMPolicyParameters = make(map[string]string)
scSPBMPolicyParameters[SpbmStoragePolicy] = policyName
sc, err = client.StorageV1().StorageClasses().Create(getVSphereStorageClassSpec(storageclass3, scSPBMPolicyParameters, nil))
sc, err = client.StorageV1().StorageClasses().Create(getVSphereStorageClassSpec(storageclass3, scSPBMPolicyParameters, nil, ""))
case storageclass4:
var scWithDSParameters map[string]string
scWithDSParameters = make(map[string]string)
scWithDSParameters[Datastore] = datastoreName
scWithDatastoreSpec := getVSphereStorageClassSpec(storageclass4, scWithDSParameters, nil)
scWithDatastoreSpec := getVSphereStorageClassSpec(storageclass4, scWithDSParameters, nil, "")
sc, err = client.StorageV1().StorageClasses().Create(scWithDatastoreSpec)
}
gomega.Expect(sc).NotTo(gomega.BeNil())

View File

@ -212,7 +212,7 @@ func verifyContentOfVSpherePV(client clientset.Interface, pvc *v1.PersistentVolu
e2elog.Logf("Successfully verified content of the volume")
}
func getVSphereStorageClassSpec(name string, scParameters map[string]string, zones []string) *storagev1.StorageClass {
func getVSphereStorageClassSpec(name string, scParameters map[string]string, zones []string, volumeBindingMode storagev1.VolumeBindingMode) *storagev1.StorageClass {
var sc *storagev1.StorageClass
sc = &storagev1.StorageClass{
@ -238,6 +238,10 @@ func getVSphereStorageClassSpec(name string, scParameters map[string]string, zon
}
sc.AllowedTopologies = append(sc.AllowedTopologies, term)
}
if volumeBindingMode != "" {
mode := storagev1.VolumeBindingMode(string(volumeBindingMode))
sc.VolumeBindingMode = &mode
}
return sc
}

View File

@ -70,7 +70,7 @@ var _ = utils.SIGDescribe("Volume Provisioning on Datastore [Feature:vsphere]",
scParameters[DiskFormat] = ThinDisk
err := invokeInvalidDatastoreTestNeg(client, namespace, scParameters)
framework.ExpectError(err)
errorMsg := `Failed to provision volume with StorageClass \"` + DatastoreSCName + `\": Datastore ` + InvalidDatastore + ` not found`
errorMsg := `Failed to provision volume with StorageClass \"` + DatastoreSCName + `\": Datastore '` + InvalidDatastore + `' not found`
if !strings.Contains(err.Error(), errorMsg) {
framework.ExpectNoError(err, errorMsg)
}
@ -79,7 +79,7 @@ var _ = utils.SIGDescribe("Volume Provisioning on Datastore [Feature:vsphere]",
func invokeInvalidDatastoreTestNeg(client clientset.Interface, namespace string, scParameters map[string]string) error {
ginkgo.By("Creating Storage Class With Invalid Datastore")
storageclass, err := client.StorageV1().StorageClasses().Create(getVSphereStorageClassSpec(DatastoreSCName, scParameters, nil))
storageclass, err := client.StorageV1().StorageClasses().Create(getVSphereStorageClassSpec(DatastoreSCName, scParameters, nil, ""))
framework.ExpectNoError(err, fmt.Sprintf("Failed to create storage class with err: %v", err))
defer client.StorageV1().StorageClasses().Delete(storageclass.Name, nil)

View File

@ -108,7 +108,7 @@ func invokeTest(f *framework.Framework, client clientset.Interface, namespace st
scParameters["diskformat"] = diskFormat
ginkgo.By("Creating Storage Class With DiskFormat")
storageClassSpec := getVSphereStorageClassSpec("thinsc", scParameters, nil)
storageClassSpec := getVSphereStorageClassSpec("thinsc", scParameters, nil, "")
storageclass, err := client.StorageV1().StorageClasses().Create(storageClassSpec)
framework.ExpectNoError(err)

View File

@ -66,7 +66,7 @@ var _ = utils.SIGDescribe("Volume Disk Size [Feature:vsphere]", func() {
expectedDiskSize := "1Mi"
ginkgo.By("Creating Storage Class")
storageclass, err := client.StorageV1().StorageClasses().Create(getVSphereStorageClassSpec(DiskSizeSCName, scParameters, nil))
storageclass, err := client.StorageV1().StorageClasses().Create(getVSphereStorageClassSpec(DiskSizeSCName, scParameters, nil, ""))
framework.ExpectNoError(err)
defer client.StorageV1().StorageClasses().Delete(storageclass.Name, nil)

View File

@ -147,7 +147,7 @@ func invokeTestForInvalidFstype(f *framework.Framework, client clientset.Interfa
}
func createVolume(client clientset.Interface, namespace string, scParameters map[string]string) (*v1.PersistentVolumeClaim, []*v1.PersistentVolume) {
storageclass, err := client.StorageV1().StorageClasses().Create(getVSphereStorageClassSpec("fstype", scParameters, nil))
storageclass, err := client.StorageV1().StorageClasses().Create(getVSphereStorageClassSpec("fstype", scParameters, nil, ""))
framework.ExpectNoError(err)
defer client.StorageV1().StorageClasses().Delete(storageclass.Name, nil)

View File

@ -77,7 +77,7 @@ var _ = utils.SIGDescribe("Node Poweroff [Feature:vsphere] [Slow] [Disruptive]",
*/
ginkgo.It("verify volume status after node power off", func() {
ginkgo.By("Creating a Storage Class")
storageClassSpec := getVSphereStorageClassSpec("test-sc", nil, nil)
storageClassSpec := getVSphereStorageClassSpec("test-sc", nil, nil, "")
storageclass, err := client.StorageV1().StorageClasses().Create(storageClassSpec)
framework.ExpectNoError(err, fmt.Sprintf("Failed to create storage class with err: %v", err))
defer client.StorageV1().StorageClasses().Delete(storageclass.Name, nil)

View File

@ -87,7 +87,7 @@ var _ = utils.SIGDescribe("Volume Operations Storm [Feature:vsphere]", func() {
ginkgo.By("Creating Storage Class")
scParameters := make(map[string]string)
scParameters["diskformat"] = "thin"
storageclass, err = client.StorageV1().StorageClasses().Create(getVSphereStorageClassSpec("thinsc", scParameters, nil))
storageclass, err = client.StorageV1().StorageClasses().Create(getVSphereStorageClassSpec("thinsc", scParameters, nil, ""))
framework.ExpectNoError(err)
ginkgo.By("Creating PVCs using the Storage Class")

View File

@ -129,22 +129,22 @@ func getTestStorageClasses(client clientset.Interface, policyName, datastoreName
var err error
switch scname {
case storageclass1:
sc, err = client.StorageV1().StorageClasses().Create(getVSphereStorageClassSpec(storageclass1, nil, nil))
sc, err = client.StorageV1().StorageClasses().Create(getVSphereStorageClassSpec(storageclass1, nil, nil, ""))
case storageclass2:
var scVSanParameters map[string]string
scVSanParameters = make(map[string]string)
scVSanParameters[Policy_HostFailuresToTolerate] = "1"
sc, err = client.StorageV1().StorageClasses().Create(getVSphereStorageClassSpec(storageclass2, scVSanParameters, nil))
sc, err = client.StorageV1().StorageClasses().Create(getVSphereStorageClassSpec(storageclass2, scVSanParameters, nil, ""))
case storageclass3:
var scSPBMPolicyParameters map[string]string
scSPBMPolicyParameters = make(map[string]string)
scSPBMPolicyParameters[SpbmStoragePolicy] = policyName
sc, err = client.StorageV1().StorageClasses().Create(getVSphereStorageClassSpec(storageclass3, scSPBMPolicyParameters, nil))
sc, err = client.StorageV1().StorageClasses().Create(getVSphereStorageClassSpec(storageclass3, scSPBMPolicyParameters, nil, ""))
case storageclass4:
var scWithDSParameters map[string]string
scWithDSParameters = make(map[string]string)
scWithDSParameters[Datastore] = datastoreName
scWithDatastoreSpec := getVSphereStorageClassSpec(storageclass4, scWithDSParameters, nil)
scWithDatastoreSpec := getVSphereStorageClassSpec(storageclass4, scWithDSParameters, nil, "")
sc, err = client.StorageV1().StorageClasses().Create(scWithDatastoreSpec)
}
gomega.Expect(sc).NotTo(gomega.BeNil())

View File

@ -280,7 +280,7 @@ var _ = utils.SIGDescribe("Storage Policy Based Volume Provisioning [Feature:vsp
func invokeValidPolicyTest(f *framework.Framework, client clientset.Interface, namespace string, scParameters map[string]string) {
ginkgo.By("Creating Storage Class With storage policy params")
storageclass, err := client.StorageV1().StorageClasses().Create(getVSphereStorageClassSpec("storagepolicysc", scParameters, nil))
storageclass, err := client.StorageV1().StorageClasses().Create(getVSphereStorageClassSpec("storagepolicysc", scParameters, nil, ""))
framework.ExpectNoError(err, fmt.Sprintf("Failed to create storage class with err: %v", err))
defer client.StorageV1().StorageClasses().Delete(storageclass.Name, nil)
@ -312,7 +312,7 @@ func invokeValidPolicyTest(f *framework.Framework, client clientset.Interface, n
func invokeInvalidPolicyTestNeg(client clientset.Interface, namespace string, scParameters map[string]string) error {
ginkgo.By("Creating Storage Class With storage policy params")
storageclass, err := client.StorageV1().StorageClasses().Create(getVSphereStorageClassSpec("storagepolicysc", scParameters, nil))
storageclass, err := client.StorageV1().StorageClasses().Create(getVSphereStorageClassSpec("storagepolicysc", scParameters, nil, ""))
framework.ExpectNoError(err, fmt.Sprintf("Failed to create storage class with err: %v", err))
defer client.StorageV1().StorageClasses().Delete(storageclass.Name, nil)
@ -331,7 +331,7 @@ func invokeInvalidPolicyTestNeg(client clientset.Interface, namespace string, sc
func invokeStaleDummyVMTestWithStoragePolicy(client clientset.Interface, masterNode string, namespace string, clusterName string, scParameters map[string]string) {
ginkgo.By("Creating Storage Class With storage policy params")
storageclass, err := client.StorageV1().StorageClasses().Create(getVSphereStorageClassSpec("storagepolicysc", scParameters, nil))
storageclass, err := client.StorageV1().StorageClasses().Create(getVSphereStorageClassSpec("storagepolicysc", scParameters, nil, ""))
framework.ExpectNoError(err, fmt.Sprintf("Failed to create storage class with err: %v", err))
defer client.StorageV1().StorageClasses().Delete(storageclass.Name, nil)

View File

@ -25,8 +25,10 @@ import (
"github.com/onsi/gomega"
v1 "k8s.io/api/core/v1"
storage "k8s.io/api/storage/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clientset "k8s.io/client-go/kubernetes"
volumeevents "k8s.io/kubernetes/pkg/controller/volume/events"
"k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
"k8s.io/kubernetes/test/e2e/storage/utils"
@ -131,7 +133,7 @@ var _ = utils.SIGDescribe("Zone Support", func() {
ginkgo.It("Verify PVC creation with invalid zone specified in storage class fails", func() {
ginkgo.By(fmt.Sprintf("Creating storage class with unknown zone : %s", zoneD))
zones = append(zones, zoneD)
err := verifyPVCCreationFails(client, namespace, nil, zones)
err := verifyPVCCreationFails(client, namespace, nil, zones, "")
framework.ExpectError(err)
errorMsg := "Failed to find a shared datastore matching zone [" + zoneD + "]"
if !strings.Contains(err.Error(), errorMsg) {
@ -142,29 +144,29 @@ var _ = utils.SIGDescribe("Zone Support", func() {
ginkgo.It("Verify a pod is created and attached to a dynamically created PV, based on allowed zones specified in storage class ", func() {
ginkgo.By(fmt.Sprintf("Creating storage class with zones :%s", zoneA))
zones = append(zones, zoneA)
verifyPVCAndPodCreationSucceeds(client, namespace, nil, zones)
verifyPVCAndPodCreationSucceeds(client, namespace, nil, zones, "")
})
ginkgo.It("Verify a pod is created and attached to a dynamically created PV, based on multiple zones specified in storage class ", func() {
ginkgo.By(fmt.Sprintf("Creating storage class with zones :%s, %s", zoneA, zoneB))
zones = append(zones, zoneA)
zones = append(zones, zoneB)
verifyPVCAndPodCreationSucceeds(client, namespace, nil, zones)
verifyPVCAndPodCreationSucceeds(client, namespace, nil, zones, "")
})
ginkgo.It("Verify a pod is created and attached to a dynamically created PV, based on the allowed zones and datastore specified in storage class", func() {
ginkgo.By(fmt.Sprintf("Creating storage class with zone :%s and datastore :%s", zoneA, vsanDatastore1))
scParameters[Datastore] = vsanDatastore1
zones = append(zones, zoneA)
verifyPVCAndPodCreationSucceeds(client, namespace, scParameters, zones)
verifyPVCAndPodCreationSucceeds(client, namespace, scParameters, zones, "")
})
ginkgo.It("Verify PVC creation with incompatible datastore and zone combination specified in storage class fails", func() {
ginkgo.By(fmt.Sprintf("Creating storage class with zone :%s and datastore :%s", zoneC, vsanDatastore1))
scParameters[Datastore] = vsanDatastore1
zones = append(zones, zoneC)
err := verifyPVCCreationFails(client, namespace, scParameters, zones)
errorMsg := "The specified datastore " + scParameters[Datastore] + " does not match the provided zones : [" + zoneC + "]"
err := verifyPVCCreationFails(client, namespace, scParameters, zones, "")
errorMsg := "No matching datastores found in the kubernetes cluster for zone " + zoneC
if !strings.Contains(err.Error(), errorMsg) {
framework.ExpectNoError(err, errorMsg)
}
@ -174,21 +176,21 @@ var _ = utils.SIGDescribe("Zone Support", func() {
ginkgo.By(fmt.Sprintf("Creating storage class with zone :%s and storage policy :%s", zoneA, compatPolicy))
scParameters[SpbmStoragePolicy] = compatPolicy
zones = append(zones, zoneA)
verifyPVCAndPodCreationSucceeds(client, namespace, scParameters, zones)
verifyPVCAndPodCreationSucceeds(client, namespace, scParameters, zones, "")
})
ginkgo.It("Verify a pod is created on a non-Workspace zone and attached to a dynamically created PV, based on the allowed zones and storage policy specified in storage class", func() {
ginkgo.By(fmt.Sprintf("Creating storage class with zone :%s and storage policy :%s", zoneB, compatPolicy))
scParameters[SpbmStoragePolicy] = compatPolicy
zones = append(zones, zoneB)
verifyPVCAndPodCreationSucceeds(client, namespace, scParameters, zones)
verifyPVCAndPodCreationSucceeds(client, namespace, scParameters, zones, "")
})
ginkgo.It("Verify PVC creation with incompatible storagePolicy and zone combination specified in storage class fails", func() {
ginkgo.By(fmt.Sprintf("Creating storage class with zone :%s and storage policy :%s", zoneA, nonCompatPolicy))
scParameters[SpbmStoragePolicy] = nonCompatPolicy
zones = append(zones, zoneA)
err := verifyPVCCreationFails(client, namespace, scParameters, zones)
err := verifyPVCCreationFails(client, namespace, scParameters, zones, "")
errorMsg := "No compatible datastores found that satisfy the storage policy requirements"
if !strings.Contains(err.Error(), errorMsg) {
framework.ExpectNoError(err, errorMsg)
@ -200,7 +202,7 @@ var _ = utils.SIGDescribe("Zone Support", func() {
scParameters[SpbmStoragePolicy] = compatPolicy
scParameters[Datastore] = vsanDatastore1
zones = append(zones, zoneA)
verifyPVCAndPodCreationSucceeds(client, namespace, scParameters, zones)
verifyPVCAndPodCreationSucceeds(client, namespace, scParameters, zones, "")
})
ginkgo.It("Verify PVC creation with incompatible storage policy along with compatible zone and datastore combination specified in storage class fails", func() {
@ -208,7 +210,7 @@ var _ = utils.SIGDescribe("Zone Support", func() {
scParameters[SpbmStoragePolicy] = nonCompatPolicy
scParameters[Datastore] = vsanDatastore1
zones = append(zones, zoneA)
err := verifyPVCCreationFails(client, namespace, scParameters, zones)
err := verifyPVCCreationFails(client, namespace, scParameters, zones, "")
errorMsg := "User specified datastore is not compatible with the storagePolicy: \\\"" + nonCompatPolicy + "\\\"."
if !strings.Contains(err.Error(), errorMsg) {
framework.ExpectNoError(err, errorMsg)
@ -220,8 +222,8 @@ var _ = utils.SIGDescribe("Zone Support", func() {
scParameters[SpbmStoragePolicy] = compatPolicy
scParameters[Datastore] = vsanDatastore2
zones = append(zones, zoneC)
err := verifyPVCCreationFails(client, namespace, scParameters, zones)
errorMsg := "The specified datastore " + scParameters[Datastore] + " does not match the provided zones : [" + zoneC + "]"
err := verifyPVCCreationFails(client, namespace, scParameters, zones, "")
errorMsg := "No matching datastores found in the kubernetes cluster for zone " + zoneC
if !strings.Contains(err.Error(), errorMsg) {
framework.ExpectNoError(err, errorMsg)
}
@ -229,7 +231,7 @@ var _ = utils.SIGDescribe("Zone Support", func() {
ginkgo.It("Verify PVC creation fails if no zones are specified in the storage class (No shared datastores exist among all the nodes)", func() {
ginkgo.By(fmt.Sprintf("Creating storage class with no zones"))
err := verifyPVCCreationFails(client, namespace, nil, nil)
err := verifyPVCCreationFails(client, namespace, nil, nil, "")
errorMsg := "No shared datastores found in the Kubernetes cluster"
if !strings.Contains(err.Error(), errorMsg) {
framework.ExpectNoError(err, errorMsg)
@ -239,7 +241,7 @@ var _ = utils.SIGDescribe("Zone Support", func() {
ginkgo.It("Verify PVC creation fails if only datastore is specified in the storage class (No shared datastores exist among all the nodes)", func() {
ginkgo.By(fmt.Sprintf("Creating storage class with datastore :%s", vsanDatastore1))
scParameters[Datastore] = vsanDatastore1
err := verifyPVCCreationFails(client, namespace, scParameters, nil)
err := verifyPVCCreationFails(client, namespace, scParameters, nil, "")
errorMsg := "No shared datastores found in the Kubernetes cluster"
if !strings.Contains(err.Error(), errorMsg) {
framework.ExpectNoError(err, errorMsg)
@ -249,7 +251,7 @@ var _ = utils.SIGDescribe("Zone Support", func() {
ginkgo.It("Verify PVC creation fails if only storage policy is specified in the storage class (No shared datastores exist among all the nodes)", func() {
ginkgo.By(fmt.Sprintf("Creating storage class with storage policy :%s", compatPolicy))
scParameters[SpbmStoragePolicy] = compatPolicy
err := verifyPVCCreationFails(client, namespace, scParameters, nil)
err := verifyPVCCreationFails(client, namespace, scParameters, nil, "")
errorMsg := "No shared datastores found in the Kubernetes cluster"
if !strings.Contains(err.Error(), errorMsg) {
framework.ExpectNoError(err, errorMsg)
@ -260,7 +262,7 @@ var _ = utils.SIGDescribe("Zone Support", func() {
ginkgo.By(fmt.Sprintf("Creating storage class with storage policy :%s and datastore :%s", compatPolicy, vsanDatastore1))
scParameters[SpbmStoragePolicy] = compatPolicy
scParameters[Datastore] = vsanDatastore1
err := verifyPVCCreationFails(client, namespace, scParameters, nil)
err := verifyPVCCreationFails(client, namespace, scParameters, nil, "")
errorMsg := "No shared datastores found in the Kubernetes cluster"
if !strings.Contains(err.Error(), errorMsg) {
framework.ExpectNoError(err, errorMsg)
@ -270,8 +272,8 @@ var _ = utils.SIGDescribe("Zone Support", func() {
ginkgo.It("Verify PVC creation fails if the availability zone specified in the storage class have no shared datastores under it.", func() {
ginkgo.By(fmt.Sprintf("Creating storage class with zone :%s", zoneC))
zones = append(zones, zoneC)
err := verifyPVCCreationFails(client, namespace, nil, zones)
errorMsg := "Failed to find a shared datastore matching zone [" + zoneC + "]"
err := verifyPVCCreationFails(client, namespace, nil, zones, "")
errorMsg := "No matching datastores found in the kubernetes cluster for zone " + zoneC
if !strings.Contains(err.Error(), errorMsg) {
framework.ExpectNoError(err, errorMsg)
}
@ -281,8 +283,8 @@ var _ = utils.SIGDescribe("Zone Support", func() {
ginkgo.By(fmt.Sprintf("Creating storage class with the following zones :%s and %s", zoneA, zoneC))
zones = append(zones, zoneA)
zones = append(zones, zoneC)
err := verifyPVCCreationFails(client, namespace, nil, zones)
errorMsg := "Failed to find a shared datastore matching zone [" + zoneA + " " + zoneC + "]"
err := verifyPVCCreationFails(client, namespace, nil, zones, "")
errorMsg := "No matching datastores found in the kubernetes cluster for zone " + zoneC
if !strings.Contains(err.Error(), errorMsg) {
framework.ExpectNoError(err, errorMsg)
}
@ -292,7 +294,7 @@ var _ = utils.SIGDescribe("Zone Support", func() {
ginkgo.By(fmt.Sprintf("Creating storage class with %s :%s and zone :%s", Policy_HostFailuresToTolerate, HostFailuresToTolerateCapabilityInvalidVal, zoneA))
scParameters[Policy_HostFailuresToTolerate] = HostFailuresToTolerateCapabilityInvalidVal
zones = append(zones, zoneA)
err := verifyPVCCreationFails(client, namespace, scParameters, zones)
err := verifyPVCCreationFails(client, namespace, scParameters, zones, "")
errorMsg := "Invalid value for " + Policy_HostFailuresToTolerate + "."
if !strings.Contains(err.Error(), errorMsg) {
framework.ExpectNoError(err, errorMsg)
@ -305,12 +307,56 @@ var _ = utils.SIGDescribe("Zone Support", func() {
scParameters[Policy_IopsLimit] = IopsLimitCapabilityVal
scParameters[Datastore] = vsanDatastore1
zones = append(zones, zoneA)
verifyPVCAndPodCreationSucceeds(client, namespace, scParameters, zones)
verifyPVCAndPodCreationSucceeds(client, namespace, scParameters, zones, "")
})
ginkgo.It("Verify a pod is created and attached to a dynamically created PV with storage policy specified in storage class in waitForFirstConsumer binding mode", func() {
ginkgo.By(fmt.Sprintf("Creating storage class with waitForFirstConsumer mode and storage policy :%s", compatPolicy))
scParameters[SpbmStoragePolicy] = compatPolicy
verifyPVCAndPodCreationSucceeds(client, namespace, scParameters, nil, storage.VolumeBindingWaitForFirstConsumer)
})
ginkgo.It("Verify a pod is created and attached to a dynamically created PV with storage policy specified in storage class in waitForFirstConsumer binding mode with allowedTopologies", func() {
ginkgo.By(fmt.Sprintf("Creating storage class with waitForFirstConsumer mode, storage policy :%s and zone :%s", compatPolicy, zoneA))
scParameters[SpbmStoragePolicy] = compatPolicy
zones = append(zones, zoneA)
verifyPVCAndPodCreationSucceeds(client, namespace, scParameters, zones, storage.VolumeBindingWaitForFirstConsumer)
})
ginkgo.It("Verify a pod is created and attached to a dynamically created PV with storage policy specified in storage class in waitForFirstConsumer binding mode with multiple allowedTopologies", func() {
ginkgo.By(fmt.Sprintf("Creating storage class with waitForFirstConsumer mode and zones : %s, %s", zoneA, zoneB))
zones = append(zones, zoneA)
zones = append(zones, zoneB)
verifyPVCAndPodCreationSucceeds(client, namespace, nil, zones, storage.VolumeBindingWaitForFirstConsumer)
})
ginkgo.It("Verify a PVC creation fails when multiple zones are specified in the storage class without shared datastores among the zones in waitForFirstConsumer binding mode", func() {
ginkgo.By(fmt.Sprintf("Creating storage class with waitForFirstConsumer mode and following zones :%s and %s", zoneA, zoneC))
zones = append(zones, zoneA)
zones = append(zones, zoneC)
err := verifyPodAndPvcCreationFailureOnWaitForFirstConsumerMode(client, namespace, nil, zones)
framework.ExpectError(err)
errorMsg := "No matching datastores found in the kubernetes cluster for zone " + zoneC
if !strings.Contains(err.Error(), errorMsg) {
framework.ExpectNoError(err, errorMsg)
}
})
ginkgo.It("Verify a pod fails to get scheduled when conflicting volume topology (allowedTopologies) and pod scheduling constraints(nodeSelector) are specified", func() {
ginkgo.By(fmt.Sprintf("Creating storage class with waitForFirstConsumerMode, storage policy :%s and zone :%s", compatPolicy, zoneA))
scParameters[SpbmStoragePolicy] = compatPolicy
// allowedTopologies set as zoneA
zones = append(zones, zoneA)
nodeSelectorMap := map[string]string{
// nodeSelector set as zoneB
v1.LabelZoneFailureDomain: zoneB,
}
verifyPodSchedulingFails(client, namespace, nodeSelectorMap, scParameters, zones, storage.VolumeBindingWaitForFirstConsumer)
})
})
func verifyPVCAndPodCreationSucceeds(client clientset.Interface, namespace string, scParameters map[string]string, zones []string) {
storageclass, err := client.StorageV1().StorageClasses().Create(getVSphereStorageClassSpec("zone-sc", scParameters, zones))
func verifyPVCAndPodCreationSucceeds(client clientset.Interface, namespace string, scParameters map[string]string, zones []string, volumeBindingMode storage.VolumeBindingMode) {
storageclass, err := client.StorageV1().StorageClasses().Create(getVSphereStorageClassSpec("zone-sc", scParameters, zones, volumeBindingMode))
framework.ExpectNoError(err, fmt.Sprintf("Failed to create storage class with err: %v", err))
defer client.StorageV1().StorageClasses().Delete(storageclass.Name, nil)
@ -321,16 +367,25 @@ func verifyPVCAndPodCreationSucceeds(client clientset.Interface, namespace strin
var pvclaims []*v1.PersistentVolumeClaim
pvclaims = append(pvclaims, pvclaim)
ginkgo.By("Waiting for claim to be in bound phase")
persistentvolumes, err := framework.WaitForPVClaimBoundPhase(client, pvclaims, framework.ClaimProvisionTimeout)
framework.ExpectNoError(err)
var persistentvolumes []*v1.PersistentVolume
// If WaitForFirstConsumer mode, verify pvc binding status after pod creation. For immediate mode, do now.
if volumeBindingMode != storage.VolumeBindingWaitForFirstConsumer {
persistentvolumes = waitForPVClaimBoundPhase(client, pvclaims, framework.ClaimProvisionTimeout)
}
ginkgo.By("Creating pod to attach PV to the node")
pod, err := framework.CreatePod(client, namespace, nil, pvclaims, false, "")
framework.ExpectNoError(err)
ginkgo.By("Verify persistent volume was created on the right zone")
verifyVolumeCreationOnRightZone(persistentvolumes, pod.Spec.NodeName, zones)
if volumeBindingMode == storage.VolumeBindingWaitForFirstConsumer {
persistentvolumes = waitForPVClaimBoundPhase(client, pvclaims, framework.ClaimProvisionTimeout)
}
if zones != nil {
ginkgo.By("Verify persistent volume was created on the right zone")
verifyVolumeCreationOnRightZone(persistentvolumes, pod.Spec.NodeName, zones)
}
ginkgo.By("Verify the volume is accessible and available in the pod")
verifyVSphereVolumesAccessible(client, pod, persistentvolumes)
@ -342,8 +397,67 @@ func verifyPVCAndPodCreationSucceeds(client clientset.Interface, namespace strin
waitForVSphereDiskToDetach(persistentvolumes[0].Spec.VsphereVolume.VolumePath, pod.Spec.NodeName)
}
func verifyPVCCreationFails(client clientset.Interface, namespace string, scParameters map[string]string, zones []string) error {
storageclass, err := client.StorageV1().StorageClasses().Create(getVSphereStorageClassSpec("zone-sc", scParameters, zones))
func verifyPodAndPvcCreationFailureOnWaitForFirstConsumerMode(client clientset.Interface, namespace string, scParameters map[string]string, zones []string) error {
storageclass, err := client.StorageV1().StorageClasses().Create(getVSphereStorageClassSpec("zone-sc", scParameters, zones, storage.VolumeBindingWaitForFirstConsumer))
framework.ExpectNoError(err, fmt.Sprintf("Failed to create storage class with err: %v", err))
defer client.StorageV1().StorageClasses().Delete(storageclass.Name, nil)
ginkgo.By("Creating PVC using the Storage Class")
pvclaim, err := framework.CreatePVC(client, namespace, getVSphereClaimSpecWithStorageClass(namespace, "2Gi", storageclass))
framework.ExpectNoError(err)
defer framework.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace)
var pvclaims []*v1.PersistentVolumeClaim
pvclaims = append(pvclaims, pvclaim)
ginkgo.By("Creating a pod")
pod := framework.MakePod(namespace, nil, pvclaims, false, "")
pod, err = client.CoreV1().Pods(namespace).Create(pod)
framework.ExpectNoError(err)
defer framework.DeletePodWithWait(f, client, pod)
ginkgo.By("Waiting for claim to be in bound phase")
err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, client, pvclaim.Namespace, pvclaim.Name, framework.Poll, 2*time.Minute)
framework.ExpectError(err)
eventList, err := client.CoreV1().Events(pvclaim.Namespace).List(metav1.ListOptions{})
// Look for PVC ProvisioningFailed event and return the message.
for _, event := range eventList.Items {
if event.Source.Component == "persistentvolume-controller" && event.Reason == volumeevents.ProvisioningFailed {
return fmt.Errorf("Failure message: %s", event.Message)
}
}
return nil
}
func waitForPVClaimBoundPhase(client clientset.Interface, pvclaims []*v1.PersistentVolumeClaim, timeout time.Duration) []*v1.PersistentVolume {
ginkgo.By("Waiting for claim to be in bound phase")
persistentvolumes, err := framework.WaitForPVClaimBoundPhase(client, pvclaims, timeout)
framework.ExpectNoError(err)
return persistentvolumes
}
func verifyPodSchedulingFails(client clientset.Interface, namespace string, nodeSelector map[string]string, scParameters map[string]string, zones []string, volumeBindingMode storage.VolumeBindingMode) {
storageclass, err := client.StorageV1().StorageClasses().Create(getVSphereStorageClassSpec("zone-sc", scParameters, zones, volumeBindingMode))
framework.ExpectNoError(err, fmt.Sprintf("Failed to create storage class with err: %v", err))
defer client.StorageV1().StorageClasses().Delete(storageclass.Name, nil)
ginkgo.By("Creating PVC using the Storage Class")
pvclaim, err := framework.CreatePVC(client, namespace, getVSphereClaimSpecWithStorageClass(namespace, "2Gi", storageclass))
framework.ExpectNoError(err)
defer framework.DeletePersistentVolumeClaim(client, pvclaim.Name, namespace)
var pvclaims []*v1.PersistentVolumeClaim
pvclaims = append(pvclaims, pvclaim)
ginkgo.By("Creating a pod")
pod, err := framework.CreateUnschedulablePod(client, namespace, nodeSelector, pvclaims, false, "")
framework.ExpectNoError(err)
defer framework.DeletePodWithWait(f, client, pod)
}
func verifyPVCCreationFails(client clientset.Interface, namespace string, scParameters map[string]string, zones []string, volumeBindingMode storage.VolumeBindingMode) error {
storageclass, err := client.StorageV1().StorageClasses().Create(getVSphereStorageClassSpec("zone-sc", scParameters, zones, volumeBindingMode))
framework.ExpectNoError(err, fmt.Sprintf("Failed to create storage class with err: %v", err))
defer client.StorageV1().StorageClasses().Delete(storageclass.Name, nil)
@ -365,7 +479,7 @@ func verifyPVCCreationFails(client clientset.Interface, namespace string, scPara
}
func verifyPVZoneLabels(client clientset.Interface, namespace string, scParameters map[string]string, zones []string) {
storageclass, err := client.StorageV1().StorageClasses().Create(getVSphereStorageClassSpec("zone-sc", nil, zones))
storageclass, err := client.StorageV1().StorageClasses().Create(getVSphereStorageClassSpec("zone-sc", nil, zones, ""))
framework.ExpectNoError(err, fmt.Sprintf("Failed to create storage class with err: %v", err))
defer client.StorageV1().StorageClasses().Delete(storageclass.Name, nil)