mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-30 15:05:27 +00:00
SPBM policy ID support in vsphere cloud provider
This commit is contained in:
parent
668fa94ccb
commit
eb3cf509e5
@ -5,7 +5,9 @@
|
||||
- [Volumes](#volumes)
|
||||
- [Persistent Volumes](#persistent-volumes)
|
||||
- [Storage Class](#storage-class)
|
||||
- [Virtual SAN policy support inside Kubernetes](#virtual-san-policy-support-inside-kubernetes)
|
||||
- [Storage Policy Management inside kubernetes] (#storage-policy-management-inside-kubernetes)
|
||||
- [Using existing vCenter SPBM policy] (#using-existing-vcenter-spbm-policy)
|
||||
- [Virtual SAN policy support](#virtual-san-policy-support)
|
||||
- [Stateful Set](#stateful-set)
|
||||
|
||||
## Prerequisites
|
||||
@ -374,7 +376,47 @@
|
||||
pvpod 1/1 Running 0 48m
|
||||
```
|
||||
|
||||
### Virtual SAN policy support inside Kubernetes
|
||||
### Storage Policy Management inside kubernetes
|
||||
#### Using existing vCenter SPBM policy
|
||||
Admins can use the existing vCenter Storage Policy Based Management (SPBM) policy to configure a persistent volume with the SPBM policy.
|
||||
|
||||
__Note: Here you don't need to create persistent volume it is created for you.__
|
||||
1. Create Storage Class.
|
||||
|
||||
Example 1:
|
||||
|
||||
```yaml
|
||||
kind: StorageClass
|
||||
apiVersion: storage.k8s.io/v1
|
||||
metadata:
|
||||
name: fast
|
||||
provisioner: kubernetes.io/vsphere-volume
|
||||
parameters:
|
||||
diskformat: zeroedthick
|
||||
storagePolicyName: gold
|
||||
```
|
||||
[Download example](vsphere-volume-spbm-policy.yaml?raw=true)
|
||||
|
||||
The admin specifies the SPBM policy - "gold" as part of storage class definition for dynamic volume provisioning. When a PVC is created, the persistent volume will be provisioned on a compatible datastore with maximum free space that satisfies the "gold" storage policy requirements.
|
||||
|
||||
Example 2:
|
||||
|
||||
```yaml
|
||||
kind: StorageClass
|
||||
apiVersion: storage.k8s.io/v1
|
||||
metadata:
|
||||
name: fast
|
||||
provisioner: kubernetes.io/vsphere-volume
|
||||
parameters:
|
||||
diskformat: zeroedthick
|
||||
storagePolicyName: gold
|
||||
datastore: VSANDatastore
|
||||
```
|
||||
[Download example](vsphere-volume-spbm-policy-with-datastore.yaml?raw=true)
|
||||
|
||||
The admin can also specify a custom datastore where he wants the volume to be provisioned along with the SPBM policy name. When a PVC is created, the vSphere Cloud Provider checks if the user specified datastore satisfies the "gold" storage policy requirements. If yes, it will provision the persistent volume on user specified datastore. If not, it will error out to the user that the user specified datastore is not compatible with "gold" storage policy requirements.
|
||||
|
||||
#### Virtual SAN policy support
|
||||
|
||||
Vsphere Infrastructure(VI) Admins will have the ability to specify custom Virtual SAN Storage Capabilities during dynamic volume provisioning. You can now define storage requirements, such as performance and availability, in the form of storage capabilities during dynamic volume provisioning. The storage capability requirements are converted into a Virtual SAN policy which are then pushed down to the Virtual SAN layer when a persistent volume (virtual disk) is being created. The virtual disk is distributed across the Virtual SAN datastore to meet the requirements.
|
||||
|
||||
|
@ -0,0 +1,9 @@
|
||||
kind: StorageClass
|
||||
apiVersion: storage.k8s.io/v1
|
||||
metadata:
|
||||
name: fast
|
||||
provisioner: kubernetes.io/vsphere-volume
|
||||
parameters:
|
||||
diskformat: zeroedthick
|
||||
storagePolicyName: gold
|
||||
datastore: VSANDatastore
|
8
examples/volumes/vsphere/vsphere-volume-spbm-policy.yaml
Normal file
8
examples/volumes/vsphere/vsphere-volume-spbm-policy.yaml
Normal file
@ -0,0 +1,8 @@
|
||||
kind: StorageClass
|
||||
apiVersion: storage.k8s.io/v1
|
||||
metadata:
|
||||
name: fast
|
||||
provisioner: kubernetes.io/vsphere-volume
|
||||
parameters:
|
||||
diskformat: zeroedthick
|
||||
storagePolicyName: gold
|
@ -24,6 +24,8 @@ go_library(
|
||||
"//vendor/github.com/vmware/govmomi:go_default_library",
|
||||
"//vendor/github.com/vmware/govmomi/find:go_default_library",
|
||||
"//vendor/github.com/vmware/govmomi/object:go_default_library",
|
||||
"//vendor/github.com/vmware/govmomi/pbm:go_default_library",
|
||||
"//vendor/github.com/vmware/govmomi/pbm/types:go_default_library",
|
||||
"//vendor/github.com/vmware/govmomi/property:go_default_library",
|
||||
"//vendor/github.com/vmware/govmomi/session:go_default_library",
|
||||
"//vendor/github.com/vmware/govmomi/vim25:go_default_library",
|
||||
|
@ -45,6 +45,7 @@ import (
|
||||
"github.com/vmware/govmomi/vim25/types"
|
||||
"golang.org/x/net/context"
|
||||
|
||||
pbm "github.com/vmware/govmomi/pbm"
|
||||
k8stypes "k8s.io/apimachinery/pkg/types"
|
||||
k8runtime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
@ -83,6 +84,7 @@ const (
|
||||
CleanUpDummyVMRoutine_Interval = 5
|
||||
UUIDPath = "/sys/class/dmi/id/product_serial"
|
||||
UUIDPrefix = "VMware-"
|
||||
NameProperty = "name"
|
||||
)
|
||||
|
||||
// Controller types that are currently supported for hot attach of disks
|
||||
@ -166,7 +168,7 @@ type VSphereConfig struct {
|
||||
type Volumes interface {
|
||||
// AttachDisk attaches given disk to given node. Current node
|
||||
// is used when nodeName is empty string.
|
||||
AttachDisk(vmDiskPath string, nodeName k8stypes.NodeName) (diskID string, diskUUID string, err error)
|
||||
AttachDisk(vmDiskPath string, storagePolicyID string, nodeName k8stypes.NodeName) (diskID string, diskUUID string, err error)
|
||||
|
||||
// DetachDisk detaches given disk to given node. Current node
|
||||
// is used when nodeName is empty string.
|
||||
@ -190,12 +192,14 @@ type Volumes interface {
|
||||
|
||||
// VolumeOptions specifies capacity, tags, name and diskFormat for a volume.
|
||||
type VolumeOptions struct {
|
||||
CapacityKB int
|
||||
Tags map[string]string
|
||||
Name string
|
||||
DiskFormat string
|
||||
Datastore string
|
||||
StorageProfileData string
|
||||
CapacityKB int
|
||||
Tags map[string]string
|
||||
Name string
|
||||
DiskFormat string
|
||||
Datastore string
|
||||
VSANStorageProfileData string
|
||||
StoragePolicyName string
|
||||
StoragePolicyID string
|
||||
}
|
||||
|
||||
// Generates Valid Options for Diskformat
|
||||
@ -741,7 +745,7 @@ func cleanUpController(ctx context.Context, newSCSIController types.BaseVirtualD
|
||||
}
|
||||
|
||||
// Attaches given virtual disk volume to the compute running kubelet.
|
||||
func (vs *VSphere) AttachDisk(vmDiskPath string, nodeName k8stypes.NodeName) (diskID string, diskUUID string, err error) {
|
||||
func (vs *VSphere) AttachDisk(vmDiskPath string, storagePolicyID string, nodeName k8stypes.NodeName) (diskID string, diskUUID string, err error) {
|
||||
var newSCSIController types.BaseVirtualDevice
|
||||
|
||||
// Create context
|
||||
@ -792,14 +796,8 @@ func (vs *VSphere) AttachDisk(vmDiskPath string, nodeName k8stypes.NodeName) (di
|
||||
return "", "", err
|
||||
}
|
||||
|
||||
// verify scsi controller in virtual machine
|
||||
vmDevices, err := vm.Device(ctx)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
|
||||
// Get VM device list
|
||||
_, vmDevices, _, err = getVirtualMachineDevices(ctx, vs.cfg, vs.client, vSphereInstance)
|
||||
_, vmDevices, _, err := getVirtualMachineDevices(ctx, vs.cfg, vs.client, vSphereInstance)
|
||||
if err != nil {
|
||||
glog.Errorf("cannot get vmDevices for VM err=%s", err)
|
||||
return "", "", fmt.Errorf("cannot get vmDevices for VM err=%s", err)
|
||||
@ -818,9 +816,9 @@ func (vs *VSphere) AttachDisk(vmDiskPath string, nodeName k8stypes.NodeName) (di
|
||||
|
||||
// Create a new finder
|
||||
f := find.NewFinder(vs.client.Client, true)
|
||||
|
||||
// Set data center
|
||||
f.SetDatacenter(dc)
|
||||
|
||||
datastorePathObj := new(object.DatastorePath)
|
||||
isSuccess := datastorePathObj.FromString(vmDiskPath)
|
||||
if !isSuccess {
|
||||
@ -844,28 +842,54 @@ func (vs *VSphere) AttachDisk(vmDiskPath string, nodeName k8stypes.NodeName) (di
|
||||
backing := disk.Backing.(*types.VirtualDiskFlatVer2BackingInfo)
|
||||
backing.DiskMode = string(types.VirtualDiskModeIndependent_persistent)
|
||||
|
||||
// Attach disk to the VM
|
||||
err = vm.AddDevice(ctx, disk)
|
||||
virtualMachineConfigSpec := types.VirtualMachineConfigSpec{}
|
||||
deviceConfigSpec := &types.VirtualDeviceConfigSpec{
|
||||
Device: disk,
|
||||
Operation: types.VirtualDeviceConfigSpecOperationAdd,
|
||||
}
|
||||
// Configure the disk with the SPBM profile only if ProfileID is not empty.
|
||||
if storagePolicyID != "" {
|
||||
profileSpec := &types.VirtualMachineDefinedProfileSpec{
|
||||
ProfileId: storagePolicyID,
|
||||
}
|
||||
deviceConfigSpec.Profile = append(deviceConfigSpec.Profile, profileSpec)
|
||||
}
|
||||
virtualMachineConfigSpec.DeviceChange = append(virtualMachineConfigSpec.DeviceChange, deviceConfigSpec)
|
||||
task, err := vm.Reconfigure(ctx, virtualMachineConfigSpec)
|
||||
if err != nil {
|
||||
glog.Errorf("cannot attach disk to the vm - %v", err)
|
||||
glog.Errorf("Failed to attach the disk with storagePolicy: %+q with err - %v", storagePolicyID, err)
|
||||
if newSCSICreated {
|
||||
cleanUpController(ctx, newSCSIController, vmDevices, vm)
|
||||
}
|
||||
return "", "", err
|
||||
}
|
||||
err = task.Wait(ctx)
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to attach the disk with storagePolicy: %+q with err - %v", storagePolicyID, err)
|
||||
if newSCSICreated {
|
||||
cleanUpController(ctx, newSCSIController, vmDevices, vm)
|
||||
}
|
||||
return "", "", err
|
||||
}
|
||||
|
||||
vmDevices, err = vm.Device(ctx)
|
||||
deviceName, diskUUID, err := getVMDiskInfo(ctx, vm, disk)
|
||||
if err != nil {
|
||||
if newSCSICreated {
|
||||
cleanUpController(ctx, newSCSIController, vmDevices, vm)
|
||||
}
|
||||
vs.DetachDisk(deviceName, nodeName)
|
||||
return "", "", err
|
||||
}
|
||||
return deviceName, diskUUID, nil
|
||||
}
|
||||
|
||||
func getVMDiskInfo(ctx context.Context, vm *object.VirtualMachine, disk *types.VirtualDisk) (string, string, error) {
|
||||
vmDevices, err := vm.Device(ctx)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
devices := vmDevices.SelectByType(disk)
|
||||
if len(devices) < 1 {
|
||||
if newSCSICreated {
|
||||
cleanUpController(ctx, newSCSIController, vmDevices, vm)
|
||||
}
|
||||
return "", "", ErrNoDevicesFound
|
||||
}
|
||||
|
||||
@ -874,18 +898,13 @@ func (vs *VSphere) AttachDisk(vmDiskPath string, nodeName k8stypes.NodeName) (di
|
||||
deviceName := devices.Name(newDevice)
|
||||
|
||||
// get device uuid
|
||||
diskUUID, err = getVirtualDiskUUID(newDevice)
|
||||
diskUUID, err := getVirtualDiskUUID(newDevice)
|
||||
if err != nil {
|
||||
if newSCSICreated {
|
||||
cleanUpController(ctx, newSCSIController, vmDevices, vm)
|
||||
}
|
||||
vs.DetachDisk(deviceName, nodeName)
|
||||
return "", "", err
|
||||
}
|
||||
|
||||
return deviceName, diskUUID, nil
|
||||
}
|
||||
|
||||
func getNextUnitNumber(devices object.VirtualDeviceList, c types.BaseVirtualController) (int32, error) {
|
||||
// get next available SCSI controller unit number
|
||||
var takenUnitNumbers [SCSIDeviceSlots]bool
|
||||
@ -1275,19 +1294,47 @@ func (vs *VSphere) CreateVolume(volumeOptions *VolumeOptions) (volumePath string
|
||||
dc, err := f.Datacenter(ctx, vs.cfg.Global.Datacenter)
|
||||
f.SetDatacenter(dc)
|
||||
|
||||
if volumeOptions.StoragePolicyName != "" {
|
||||
// Get the pbm client
|
||||
pbmClient, err := pbm.NewClient(ctx, vs.client.Client)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
volumeOptions.StoragePolicyID, err = pbmClient.ProfileIDByName(ctx, volumeOptions.StoragePolicyName)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
compatibilityResult, err := vs.GetPlacementCompatibilityResult(ctx, pbmClient, volumeOptions.StoragePolicyID)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if len(compatibilityResult) < 1 {
|
||||
return "", fmt.Errorf("There are no compatible datastores that satisfy the storage policy: %+q requirements", volumeOptions.StoragePolicyID)
|
||||
}
|
||||
|
||||
if volumeOptions.Datastore != "" {
|
||||
ok, nonCompatibleDsref := vs.IsUserSpecifiedDatastoreNonCompatible(ctx, compatibilityResult, volumeOptions.Datastore)
|
||||
if ok {
|
||||
faultMsg := GetNonCompatibleDatastoreFaultMsg(compatibilityResult, *nonCompatibleDsref)
|
||||
return "", fmt.Errorf("User specified datastore: %q is not compatible with the storagePolicy: %q. Failed with faults: %+q", volumeOptions.Datastore, volumeOptions.StoragePolicyName, faultMsg)
|
||||
}
|
||||
} else {
|
||||
dsMoList, err := vs.GetCompatibleDatastoresMo(ctx, compatibilityResult)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
dsMo := GetMostFreeDatastore(dsMoList)
|
||||
datastore = dsMo.Info.GetDatastoreInfo().Name
|
||||
}
|
||||
}
|
||||
ds, err := f.Datastore(ctx, datastore)
|
||||
if err != nil {
|
||||
glog.Errorf("Failed while searching for datastore %+q. err %s", datastore, err)
|
||||
return "", err
|
||||
}
|
||||
|
||||
// Create a disk with the VSAN storage capabilities specified in the volumeOptions.StorageProfileData.
|
||||
// This is achieved by following steps:
|
||||
// 1. Create dummy VM if not already present.
|
||||
// 2. Add a new disk to the VM by performing VM reconfigure.
|
||||
// 3. Detach the new disk from the dummy VM.
|
||||
// 4. Delete the dummy VM.
|
||||
if volumeOptions.StorageProfileData != "" {
|
||||
if volumeOptions.VSANStorageProfileData != "" {
|
||||
// Check if the datastore is VSAN if any capability requirements are specified.
|
||||
// VSphere cloud provider now only supports VSAN capabilities requirements
|
||||
ok, err := checkIfDatastoreTypeIsVSAN(vs.client, ds)
|
||||
@ -1300,7 +1347,14 @@ func (vs *VSphere) CreateVolume(volumeOptions *VolumeOptions) (volumePath string
|
||||
" The policy parameters will work only with VSAN Datastore."+
|
||||
" So, please specify a valid VSAN datastore in Storage class definition.", datastore)
|
||||
}
|
||||
|
||||
}
|
||||
// Create a disk with the VSAN storage capabilities specified in the volumeOptions.VSANStorageProfileData.
|
||||
// This is achieved by following steps:
|
||||
// 1. Create dummy VM if not already present.
|
||||
// 2. Add a new disk to the VM by performing VM reconfigure.
|
||||
// 3. Detach the new disk from the dummy VM.
|
||||
// 4. Delete the dummy VM.
|
||||
if volumeOptions.VSANStorageProfileData != "" || volumeOptions.StoragePolicyName != "" {
|
||||
// Acquire a read lock to ensure multiple PVC requests can be processed simultaneously.
|
||||
cleanUpDummyVMLock.RLock()
|
||||
defer cleanUpDummyVMLock.RUnlock()
|
||||
@ -1487,7 +1541,7 @@ func (vs *VSphere) cleanUpDummyVMs(dummyVMPrefix string) {
|
||||
f.SetDatacenter(dc)
|
||||
|
||||
// Get the folder reference for global working directory where the dummy VM needs to be created.
|
||||
vmFolder, err := getFolder(ctx, vs.client, vs.cfg.Global.Datacenter, vs.cfg.Global.WorkingDir)
|
||||
vmFolder, err := f.Folder(ctx, strings.TrimSuffix(vs.cfg.Global.WorkingDir, "/"))
|
||||
if err != nil {
|
||||
glog.V(4).Infof("[cleanUpDummyVMs] Unable to get the kubernetes folder: %q reference with err: %+v", vs.cfg.Global.WorkingDir, err)
|
||||
continue
|
||||
@ -1495,12 +1549,19 @@ func (vs *VSphere) cleanUpDummyVMs(dummyVMPrefix string) {
|
||||
|
||||
// A write lock is acquired to make sure the cleanUp routine doesn't delete any VM's created by ongoing PVC requests.
|
||||
cleanUpDummyVMLock.Lock()
|
||||
dummyVMRefList, err := getDummyVMList(ctx, vs.client, vmFolder, dummyVMPrefix)
|
||||
vmMoList, err := vs.GetVMsInsideFolder(ctx, vmFolder, []string{NameProperty})
|
||||
if err != nil {
|
||||
glog.V(4).Infof("[cleanUpDummyVMs] Unable to get dummy VM list in the kubernetes cluster: %q reference with err: %+v", vs.cfg.Global.WorkingDir, err)
|
||||
glog.V(4).Infof("[cleanUpDummyVMs] Unable to get VM list in the kubernetes cluster: %q reference with err: %+v", vs.cfg.Global.WorkingDir, err)
|
||||
cleanUpDummyVMLock.Unlock()
|
||||
continue
|
||||
}
|
||||
var dummyVMRefList []*object.VirtualMachine
|
||||
for _, vmMo := range vmMoList {
|
||||
if strings.HasPrefix(vmMo.Name, dummyVMPrefix) {
|
||||
dummyVMRefList = append(dummyVMRefList, object.NewVirtualMachine(vs.client.Client, vmMo.Reference()))
|
||||
}
|
||||
}
|
||||
|
||||
for _, dummyVMRef := range dummyVMRefList {
|
||||
err = deleteVM(ctx, dummyVMRef)
|
||||
if err != nil {
|
||||
@ -1512,33 +1573,6 @@ func (vs *VSphere) cleanUpDummyVMs(dummyVMPrefix string) {
|
||||
}
|
||||
}
|
||||
|
||||
// Get the dummy VM list from the kubernetes working directory.
|
||||
func getDummyVMList(ctx context.Context, c *govmomi.Client, vmFolder *object.Folder, dummyVMPrefix string) ([]*object.VirtualMachine, error) {
|
||||
vmFolders, err := vmFolder.Children(ctx)
|
||||
if err != nil {
|
||||
glog.V(4).Infof("Unable to retrieve the virtual machines from the kubernetes cluster: %+v", vmFolder)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var dummyVMRefList []*object.VirtualMachine
|
||||
pc := property.DefaultCollector(c.Client)
|
||||
for _, vmFolder := range vmFolders {
|
||||
if vmFolder.Reference().Type == "VirtualMachine" {
|
||||
var vmRefs []types.ManagedObjectReference
|
||||
var vmMorefs []mo.VirtualMachine
|
||||
vmRefs = append(vmRefs, vmFolder.Reference())
|
||||
err = pc.Retrieve(ctx, vmRefs, []string{"name"}, &vmMorefs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if strings.HasPrefix(vmMorefs[0].Name, dummyVMPrefix) {
|
||||
dummyVMRefList = append(dummyVMRefList, object.NewVirtualMachine(c.Client, vmRefs[0]))
|
||||
}
|
||||
}
|
||||
}
|
||||
return dummyVMRefList, nil
|
||||
}
|
||||
|
||||
func (vs *VSphere) createDummyVM(ctx context.Context, datacenter *object.Datacenter, datastore *object.Datastore, vmName string) (*object.VirtualMachine, error) {
|
||||
// Create a virtual machine config spec with 1 SCSI adapter.
|
||||
virtualMachineConfigSpec := types.VirtualMachineConfigSpec{
|
||||
@ -1571,13 +1605,14 @@ func (vs *VSphere) createDummyVM(ctx context.Context, datacenter *object.Datacen
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Get the folder reference for global working directory where the dummy VM needs to be created.
|
||||
vmFolder, err := getFolder(ctx, vs.client, vs.cfg.Global.Datacenter, vs.cfg.Global.WorkingDir)
|
||||
f := find.NewFinder(vs.client.Client, true)
|
||||
dc, err := f.Datacenter(ctx, vs.cfg.Global.Datacenter)
|
||||
f.SetDatacenter(dc)
|
||||
vmFolder, err := f.Folder(ctx, strings.TrimSuffix(vs.cfg.Global.WorkingDir, "/"))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Failed to get the folder reference for %q with err: %+v", vs.cfg.Global.WorkingDir, err)
|
||||
}
|
||||
|
||||
task, err := vmFolder.CreateVM(ctx, virtualMachineConfigSpec, resourcePool, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -1674,12 +1709,17 @@ func (vs *VSphere) createVirtualDiskWithPolicy(ctx context.Context, datacenter *
|
||||
FileOperation: types.VirtualDeviceConfigSpecFileOperationCreate,
|
||||
}
|
||||
|
||||
storageProfileSpec := &types.VirtualMachineDefinedProfileSpec{
|
||||
ProfileId: "",
|
||||
ProfileData: &types.VirtualMachineProfileRawData{
|
||||
storageProfileSpec := &types.VirtualMachineDefinedProfileSpec{}
|
||||
// Is PBM storage policy ID is present, set the storage spec profile ID,
|
||||
// else, set raw the VSAN policy string.
|
||||
if volumeOptions.StoragePolicyID != "" {
|
||||
storageProfileSpec.ProfileId = volumeOptions.StoragePolicyID
|
||||
} else if volumeOptions.VSANStorageProfileData != "" {
|
||||
storageProfileSpec.ProfileId = ""
|
||||
storageProfileSpec.ProfileData = &types.VirtualMachineProfileRawData{
|
||||
ExtensionKey: "com.vmware.vim.sps",
|
||||
ObjectData: volumeOptions.StorageProfileData,
|
||||
},
|
||||
ObjectData: volumeOptions.VSANStorageProfileData,
|
||||
}
|
||||
}
|
||||
|
||||
deviceConfigSpec.Profile = append(deviceConfigSpec.Profile, storageProfileSpec)
|
||||
@ -1818,52 +1858,6 @@ func makeDirectoryInDatastore(c *govmomi.Client, dc *object.Datacenter, path str
|
||||
return err
|
||||
}
|
||||
|
||||
// Get the folder for a given VM
|
||||
func getFolder(ctx context.Context, c *govmomi.Client, datacenterName string, folderName string) (*object.Folder, error) {
|
||||
f := find.NewFinder(c.Client, true)
|
||||
|
||||
// Fetch and set data center
|
||||
dc, err := f.Datacenter(ctx, datacenterName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
f.SetDatacenter(dc)
|
||||
|
||||
folderName = strings.TrimSuffix(folderName, "/")
|
||||
dcFolders, err := dc.Folders(ctx)
|
||||
vmFolders, _ := dcFolders.VmFolder.Children(ctx)
|
||||
|
||||
var vmFolderRefs []types.ManagedObjectReference
|
||||
for _, vmFolder := range vmFolders {
|
||||
vmFolderRefs = append(vmFolderRefs, vmFolder.Reference())
|
||||
}
|
||||
|
||||
// Get only references of type folder.
|
||||
var folderRefs []types.ManagedObjectReference
|
||||
for _, vmFolder := range vmFolderRefs {
|
||||
if vmFolder.Type == "Folder" {
|
||||
folderRefs = append(folderRefs, vmFolder)
|
||||
}
|
||||
}
|
||||
|
||||
// Find the specific folder reference matching the folder name.
|
||||
var resultFolder *object.Folder
|
||||
pc := property.DefaultCollector(c.Client)
|
||||
for _, folderRef := range folderRefs {
|
||||
var refs []types.ManagedObjectReference
|
||||
var folderMorefs []mo.Folder
|
||||
refs = append(refs, folderRef)
|
||||
err = pc.Retrieve(ctx, refs, []string{"name"}, &folderMorefs)
|
||||
for _, fref := range folderMorefs {
|
||||
if fref.Name == folderName {
|
||||
resultFolder = object.NewFolder(c.Client, folderRef)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return resultFolder, nil
|
||||
}
|
||||
|
||||
// Delete the VM.
|
||||
func deleteVM(ctx context.Context, vm *object.VirtualMachine) error {
|
||||
destroyTask, err := vm.Destroy(ctx)
|
||||
|
@ -232,7 +232,7 @@ func TestVolumes(t *testing.T) {
|
||||
t.Fatalf("Cannot create a new VMDK volume: %v", err)
|
||||
}
|
||||
|
||||
_, _, err = vs.AttachDisk(volPath, "")
|
||||
_, _, err = vs.AttachDisk(volPath, "", "")
|
||||
if err != nil {
|
||||
t.Fatalf("Cannot attach volume(%s) to VM(%s): %v", volPath, nodeName, err)
|
||||
}
|
||||
|
@ -18,10 +18,28 @@ package vsphere
|
||||
|
||||
import (
|
||||
"context"
|
||||
"github.com/vmware/govmomi"
|
||||
"os"
|
||||
"runtime"
|
||||
"strings"
|
||||
|
||||
"fmt"
|
||||
|
||||
"github.com/vmware/govmomi"
|
||||
"github.com/vmware/govmomi/find"
|
||||
"github.com/vmware/govmomi/object"
|
||||
"github.com/vmware/govmomi/pbm"
|
||||
"github.com/vmware/govmomi/property"
|
||||
"github.com/vmware/govmomi/vim25/mo"
|
||||
"github.com/vmware/govmomi/vim25/types"
|
||||
|
||||
pbmtypes "github.com/vmware/govmomi/pbm/types"
|
||||
)
|
||||
|
||||
const (
|
||||
DatastoreProperty = "datastore"
|
||||
DatastoreInfoProperty = "info"
|
||||
Folder = "Folder"
|
||||
VirtualMachine = "VirtualMachine"
|
||||
)
|
||||
|
||||
// Reads vSphere configuration from system environment and construct vSphere object
|
||||
@ -63,3 +81,226 @@ func GetgovmomiClient(cfg *VSphereConfig) (*govmomi.Client, error) {
|
||||
client, err := newClient(context.TODO(), cfg)
|
||||
return client, err
|
||||
}
|
||||
|
||||
// Get placement compatibility result based on storage policy requirements.
|
||||
func (vs *VSphere) GetPlacementCompatibilityResult(ctx context.Context, pbmClient *pbm.Client, storagePolicyID string) (pbm.PlacementCompatibilityResult, error) {
|
||||
datastores, err := vs.getSharedDatastoresInK8SCluster(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var hubs []pbmtypes.PbmPlacementHub
|
||||
for _, ds := range datastores {
|
||||
hubs = append(hubs, pbmtypes.PbmPlacementHub{
|
||||
HubType: ds.Type,
|
||||
HubId: ds.Value,
|
||||
})
|
||||
}
|
||||
req := []pbmtypes.BasePbmPlacementRequirement{
|
||||
&pbmtypes.PbmPlacementCapabilityProfileRequirement{
|
||||
ProfileId: pbmtypes.PbmProfileId{
|
||||
UniqueId: storagePolicyID,
|
||||
},
|
||||
},
|
||||
}
|
||||
res, err := pbmClient.CheckRequirements(ctx, hubs, nil, req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return res, nil
|
||||
}
|
||||
|
||||
// Verify if the user specified datastore is in the list of non-compatible datastores.
|
||||
// If yes, return the non compatible datastore reference.
|
||||
func (vs *VSphere) IsUserSpecifiedDatastoreNonCompatible(ctx context.Context, compatibilityResult pbm.PlacementCompatibilityResult, dsName string) (bool, *types.ManagedObjectReference) {
|
||||
dsMoList := vs.GetNonCompatibleDatastoresMo(ctx, compatibilityResult)
|
||||
for _, ds := range dsMoList {
|
||||
if ds.Info.GetDatastoreInfo().Name == dsName {
|
||||
dsMoRef := ds.Reference()
|
||||
return true, &dsMoRef
|
||||
}
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
|
||||
func GetNonCompatibleDatastoreFaultMsg(compatibilityResult pbm.PlacementCompatibilityResult, dsMoref types.ManagedObjectReference) string {
|
||||
var faultMsg string
|
||||
for _, res := range compatibilityResult {
|
||||
if res.Hub.HubId == dsMoref.Value {
|
||||
for _, err := range res.Error {
|
||||
faultMsg = faultMsg + err.LocalizedMessage
|
||||
}
|
||||
}
|
||||
}
|
||||
return faultMsg
|
||||
}
|
||||
|
||||
// Get the best fit compatible datastore by free space.
|
||||
func GetMostFreeDatastore(dsMo []mo.Datastore) mo.Datastore {
|
||||
var curMax int64
|
||||
curMax = -1
|
||||
var index int
|
||||
for i, ds := range dsMo {
|
||||
dsFreeSpace := ds.Info.GetDatastoreInfo().FreeSpace
|
||||
if dsFreeSpace > curMax {
|
||||
curMax = dsFreeSpace
|
||||
index = i
|
||||
}
|
||||
}
|
||||
return dsMo[index]
|
||||
}
|
||||
|
||||
func (vs *VSphere) GetCompatibleDatastoresMo(ctx context.Context, compatibilityResult pbm.PlacementCompatibilityResult) ([]mo.Datastore, error) {
|
||||
compatibleHubs := compatibilityResult.CompatibleDatastores()
|
||||
// Return an error if there are no compatible datastores.
|
||||
if len(compatibleHubs) < 1 {
|
||||
return nil, fmt.Errorf("There are no compatible datastores that satisfy the storage policy requirements")
|
||||
}
|
||||
dsMoList, err := vs.getDatastoreMo(ctx, compatibleHubs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return dsMoList, nil
|
||||
}
|
||||
|
||||
func (vs *VSphere) GetNonCompatibleDatastoresMo(ctx context.Context, compatibilityResult pbm.PlacementCompatibilityResult) []mo.Datastore {
|
||||
nonCompatibleHubs := compatibilityResult.NonCompatibleDatastores()
|
||||
// Return an error if there are no compatible datastores.
|
||||
if len(nonCompatibleHubs) < 1 {
|
||||
return nil
|
||||
}
|
||||
dsMoList, err := vs.getDatastoreMo(ctx, nonCompatibleHubs)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
return dsMoList
|
||||
}
|
||||
|
||||
// Get the datastore managed objects for the place hubs using property collector.
|
||||
func (vs *VSphere) getDatastoreMo(ctx context.Context, hubs []pbmtypes.PbmPlacementHub) ([]mo.Datastore, error) {
|
||||
var dsMoRefs []types.ManagedObjectReference
|
||||
for _, hub := range hubs {
|
||||
dsMoRefs = append(dsMoRefs, types.ManagedObjectReference{
|
||||
Type: hub.HubType,
|
||||
Value: hub.HubId,
|
||||
})
|
||||
}
|
||||
|
||||
pc := property.DefaultCollector(vs.client.Client)
|
||||
var dsMoList []mo.Datastore
|
||||
err := pc.Retrieve(ctx, dsMoRefs, []string{DatastoreInfoProperty}, &dsMoList)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return dsMoList, nil
|
||||
}
|
||||
|
||||
// Get all datastores accessible for the virtual machine object.
|
||||
func (vs *VSphere) getSharedDatastoresInK8SCluster(ctx context.Context) ([]types.ManagedObjectReference, error) {
|
||||
f := find.NewFinder(vs.client.Client, true)
|
||||
dc, err := f.Datacenter(ctx, vs.cfg.Global.Datacenter)
|
||||
f.SetDatacenter(dc)
|
||||
vmFolder, err := f.Folder(ctx, strings.TrimSuffix(vs.cfg.Global.WorkingDir, "/"))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
vmMoList, err := vs.GetVMsInsideFolder(ctx, vmFolder, []string{NameProperty})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
index := 0
|
||||
var sharedDs []string
|
||||
for _, vmMo := range vmMoList {
|
||||
if !strings.HasPrefix(vmMo.Name, DummyVMPrefixName) {
|
||||
accessibleDatastores, err := vs.getAllAccessibleDatastores(ctx, vmMo)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if index == 0 {
|
||||
sharedDs = accessibleDatastores
|
||||
} else {
|
||||
sharedDs = intersect(sharedDs, accessibleDatastores)
|
||||
if len(sharedDs) == 0 {
|
||||
return nil, fmt.Errorf("No shared datastores found in the Kubernetes cluster")
|
||||
}
|
||||
}
|
||||
index++
|
||||
}
|
||||
}
|
||||
var sharedDSMorefs []types.ManagedObjectReference
|
||||
for _, ds := range sharedDs {
|
||||
sharedDSMorefs = append(sharedDSMorefs, types.ManagedObjectReference{
|
||||
Value: ds,
|
||||
Type: "Datastore",
|
||||
})
|
||||
}
|
||||
return sharedDSMorefs, nil
|
||||
}
|
||||
|
||||
func intersect(list1 []string, list2 []string) []string {
|
||||
var sharedList []string
|
||||
for _, val1 := range list1 {
|
||||
// Check if val1 is found in list2
|
||||
for _, val2 := range list2 {
|
||||
if val1 == val2 {
|
||||
sharedList = append(sharedList, val1)
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
return sharedList
|
||||
}
|
||||
|
||||
// Get the VM list inside a folder.
|
||||
func (vs *VSphere) GetVMsInsideFolder(ctx context.Context, vmFolder *object.Folder, properties []string) ([]mo.VirtualMachine, error) {
|
||||
vmFolders, err := vmFolder.Children(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
pc := property.DefaultCollector(vs.client.Client)
|
||||
var vmRefs []types.ManagedObjectReference
|
||||
var vmMoList []mo.VirtualMachine
|
||||
for _, vmFolder := range vmFolders {
|
||||
if vmFolder.Reference().Type == VirtualMachine {
|
||||
vmRefs = append(vmRefs, vmFolder.Reference())
|
||||
}
|
||||
}
|
||||
err = pc.Retrieve(ctx, vmRefs, properties, &vmMoList)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return vmMoList, nil
|
||||
}
|
||||
|
||||
// Get the datastores accessible for the virtual machine object.
|
||||
func (vs *VSphere) getAllAccessibleDatastores(ctx context.Context, vmMo mo.VirtualMachine) ([]string, error) {
|
||||
f := find.NewFinder(vs.client.Client, true)
|
||||
dc, err := f.Datacenter(ctx, vs.cfg.Global.Datacenter)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
f.SetDatacenter(dc)
|
||||
vmRegex := vs.cfg.Global.WorkingDir + vmMo.Name
|
||||
vmObj, err := f.VirtualMachine(ctx, vmRegex)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
host, err := vmObj.HostSystem(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var hostSystemMo mo.HostSystem
|
||||
s := object.NewSearchIndex(vs.client.Client)
|
||||
err = s.Properties(ctx, host.Reference(), []string{DatastoreProperty}, &hostSystemMo)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var dsRefValues []string
|
||||
for _, dsRef := range hostSystemMo.Datastore {
|
||||
dsRefValues = append(dsRefValues, dsRef.Value)
|
||||
}
|
||||
return dsRefValues, nil
|
||||
}
|
||||
|
@ -75,7 +75,7 @@ func (attacher *vsphereVMDKAttacher) Attach(spec *volume.Spec, nodeName types.No
|
||||
|
||||
// vsphereCloud.AttachDisk checks if disk is already attached to host and
|
||||
// succeeds in that case, so no need to do that separately.
|
||||
_, diskUUID, err := attacher.vsphereVolumes.AttachDisk(volumeSource.VolumePath, nodeName)
|
||||
_, diskUUID, err := attacher.vsphereVolumes.AttachDisk(volumeSource.VolumePath, volumeSource.StoragePolicyID, nodeName)
|
||||
if err != nil {
|
||||
glog.Errorf("Error attaching volume %q to node %q: %+v", volumeSource.VolumePath, nodeName, err)
|
||||
return "", err
|
||||
|
@ -233,7 +233,7 @@ type diskIsAttachedCall struct {
|
||||
ret error
|
||||
}
|
||||
|
||||
func (testcase *testcase) AttachDisk(diskName string, nodeName types.NodeName) (string, string, error) {
|
||||
func (testcase *testcase) AttachDisk(diskName string, storagePolicyName string, nodeName types.NodeName) (string, string, error) {
|
||||
expected := &testcase.attach
|
||||
|
||||
if expected.diskName == "" && expected.nodeName == "" {
|
||||
|
@ -152,7 +152,7 @@ func (plugin *vsphereVolumePlugin) ConstructVolumeSpec(volumeName, mountPath str
|
||||
// Abstract interface to disk operations.
|
||||
type vdManager interface {
|
||||
// Creates a volume
|
||||
CreateVolume(provisioner *vsphereVolumeProvisioner) (vmDiskPath string, volumeSizeGB int, fstype string, err error)
|
||||
CreateVolume(provisioner *vsphereVolumeProvisioner) (volSpec *VolumeSpec, err error)
|
||||
// Deletes a volume
|
||||
DeleteVolume(deleter *vsphereVolumeDeleter) error
|
||||
}
|
||||
@ -344,13 +344,13 @@ func (plugin *vsphereVolumePlugin) newProvisionerInternal(options volume.VolumeO
|
||||
}
|
||||
|
||||
func (v *vsphereVolumeProvisioner) Provision() (*v1.PersistentVolume, error) {
|
||||
vmDiskPath, sizeKB, fstype, err := v.manager.CreateVolume(v)
|
||||
volSpec, err := v.manager.CreateVolume(v)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if fstype == "" {
|
||||
fstype = "ext4"
|
||||
if volSpec.Fstype == "" {
|
||||
volSpec.Fstype = "ext4"
|
||||
}
|
||||
|
||||
pv := &v1.PersistentVolume{
|
||||
@ -365,12 +365,14 @@ func (v *vsphereVolumeProvisioner) Provision() (*v1.PersistentVolume, error) {
|
||||
PersistentVolumeReclaimPolicy: v.options.PersistentVolumeReclaimPolicy,
|
||||
AccessModes: v.options.PVC.Spec.AccessModes,
|
||||
Capacity: v1.ResourceList{
|
||||
v1.ResourceName(v1.ResourceStorage): resource.MustParse(fmt.Sprintf("%dKi", sizeKB)),
|
||||
v1.ResourceName(v1.ResourceStorage): resource.MustParse(fmt.Sprintf("%dKi", volSpec.Size)),
|
||||
},
|
||||
PersistentVolumeSource: v1.PersistentVolumeSource{
|
||||
VsphereVolume: &v1.VsphereVirtualDiskVolumeSource{
|
||||
VolumePath: vmDiskPath,
|
||||
FSType: fstype,
|
||||
VolumePath: volSpec.Path,
|
||||
FSType: volSpec.Fstype,
|
||||
StoragePolicyName: volSpec.StoragePolicyName,
|
||||
StoragePolicyID: volSpec.StoragePolicyID,
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -63,8 +63,15 @@ func getFakeDeviceName(host volume.VolumeHost, volPath string) string {
|
||||
return path.Join(host.GetPluginDir(vsphereVolumePluginName), "device", volPath)
|
||||
}
|
||||
|
||||
func (fake *fakePDManager) CreateVolume(v *vsphereVolumeProvisioner) (vmDiskPath string, volumeSizeKB int, fstype string, err error) {
|
||||
return "[local] test-volume-name.vmdk", 100, "ext4", nil
|
||||
func (fake *fakePDManager) CreateVolume(v *vsphereVolumeProvisioner) (volSpec *VolumeSpec, err error) {
|
||||
volSpec = &VolumeSpec{
|
||||
Path: "[local] test-volume-name.vmdk",
|
||||
Size: 100,
|
||||
Fstype: "ext4",
|
||||
StoragePolicyName: "gold",
|
||||
StoragePolicyID: "1234",
|
||||
}
|
||||
return volSpec, nil
|
||||
}
|
||||
|
||||
func (fake *fakePDManager) DeleteVolume(vd *vsphereVolumeDeleter) error {
|
||||
@ -155,6 +162,10 @@ func TestPlugin(t *testing.T) {
|
||||
t.Errorf("Provision() returned unexpected path %s", persistentSpec.Spec.PersistentVolumeSource.VsphereVolume.VolumePath)
|
||||
}
|
||||
|
||||
if persistentSpec.Spec.PersistentVolumeSource.VsphereVolume.StoragePolicyName != "gold" {
|
||||
t.Errorf("Provision() returned unexpected storagepolicy name %s", persistentSpec.Spec.PersistentVolumeSource.VsphereVolume.StoragePolicyName)
|
||||
}
|
||||
|
||||
cap := persistentSpec.Spec.Capacity[v1.ResourceStorage]
|
||||
size := cap.Value()
|
||||
if size != 100*1024 {
|
||||
|
@ -39,6 +39,7 @@ const (
|
||||
diskformat = "diskformat"
|
||||
datastore = "datastore"
|
||||
Fstype = "fstype"
|
||||
StoragePolicyName = "storagepolicyname"
|
||||
|
||||
HostFailuresToTolerateCapability = "hostfailurestotolerate"
|
||||
ForceProvisioningCapability = "forceprovisioning"
|
||||
@ -63,6 +64,14 @@ var ErrProbeVolume = errors.New("Error scanning attached volumes")
|
||||
|
||||
type VsphereDiskUtil struct{}
|
||||
|
||||
type VolumeSpec struct {
|
||||
Path string
|
||||
Size int
|
||||
Fstype string
|
||||
StoragePolicyID string
|
||||
StoragePolicyName string
|
||||
}
|
||||
|
||||
func verifyDevicePath(path string) (string, error) {
|
||||
if pathExists, err := volumeutil.PathExists(path); err != nil {
|
||||
return "", fmt.Errorf("Error checking if path exists: %v", err)
|
||||
@ -74,11 +83,11 @@ func verifyDevicePath(path string) (string, error) {
|
||||
}
|
||||
|
||||
// CreateVolume creates a vSphere volume.
|
||||
func (util *VsphereDiskUtil) CreateVolume(v *vsphereVolumeProvisioner) (vmDiskPath string, volumeSizeKB int, fstype string, err error) {
|
||||
|
||||
func (util *VsphereDiskUtil) CreateVolume(v *vsphereVolumeProvisioner) (volSpec *VolumeSpec, err error) {
|
||||
var fstype string
|
||||
cloud, err := getCloudProvider(v.plugin.host.GetCloudProvider())
|
||||
if err != nil {
|
||||
return "", 0, "", err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
capacity := v.options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)]
|
||||
@ -103,37 +112,48 @@ func (util *VsphereDiskUtil) CreateVolume(v *vsphereVolumeProvisioner) (vmDiskPa
|
||||
case Fstype:
|
||||
fstype = value
|
||||
glog.V(4).Infof("Setting fstype as %q", fstype)
|
||||
case StoragePolicyName:
|
||||
volumeOptions.StoragePolicyName = value
|
||||
glog.V(4).Infof("Setting StoragePolicyName as %q", volumeOptions.StoragePolicyName)
|
||||
case HostFailuresToTolerateCapability, ForceProvisioningCapability,
|
||||
CacheReservationCapability, DiskStripesCapability,
|
||||
ObjectSpaceReservationCapability, IopsLimitCapability:
|
||||
capabilityData, err := validateVSANCapability(strings.ToLower(parameter), value)
|
||||
if err != nil {
|
||||
return "", 0, "", err
|
||||
} else {
|
||||
volumeOptions.StorageProfileData += capabilityData
|
||||
return nil, err
|
||||
}
|
||||
|
||||
volumeOptions.VSANStorageProfileData += capabilityData
|
||||
default:
|
||||
return "", 0, "", fmt.Errorf("invalid option %q for volume plugin %s", parameter, v.plugin.GetPluginName())
|
||||
return nil, fmt.Errorf("invalid option %q for volume plugin %s", parameter, v.plugin.GetPluginName())
|
||||
}
|
||||
}
|
||||
|
||||
if volumeOptions.StorageProfileData != "" {
|
||||
volumeOptions.StorageProfileData = "(" + volumeOptions.StorageProfileData + ")"
|
||||
if volumeOptions.VSANStorageProfileData != "" {
|
||||
if volumeOptions.StoragePolicyName != "" {
|
||||
return nil, fmt.Errorf("Cannot specify storage policy capabilities along with storage policy name. Please specify only one.")
|
||||
}
|
||||
volumeOptions.VSANStorageProfileData = "(" + volumeOptions.VSANStorageProfileData + ")"
|
||||
}
|
||||
glog.V(1).Infof("StorageProfileData in vsphere volume %q", volumeOptions.StorageProfileData)
|
||||
glog.V(4).Infof("VSANStorageProfileData in vsphere volume %q", volumeOptions.VSANStorageProfileData)
|
||||
// TODO: implement PVC.Selector parsing
|
||||
if v.options.PVC.Spec.Selector != nil {
|
||||
return "", 0, "", fmt.Errorf("claim.Spec.Selector is not supported for dynamic provisioning on vSphere")
|
||||
return nil, fmt.Errorf("claim.Spec.Selector is not supported for dynamic provisioning on vSphere")
|
||||
}
|
||||
|
||||
vmDiskPath, err = cloud.CreateVolume(volumeOptions)
|
||||
vmDiskPath, err := cloud.CreateVolume(volumeOptions)
|
||||
if err != nil {
|
||||
glog.V(2).Infof("Error creating vsphere volume: %v", err)
|
||||
return "", 0, "", err
|
||||
return nil, err
|
||||
}
|
||||
volSpec = &VolumeSpec{
|
||||
Path: vmDiskPath,
|
||||
Size: volSizeKB,
|
||||
Fstype: fstype,
|
||||
StoragePolicyName: volumeOptions.StoragePolicyName,
|
||||
StoragePolicyID: volumeOptions.StoragePolicyID,
|
||||
}
|
||||
glog.V(2).Infof("Successfully created vsphere volume %s", name)
|
||||
return vmDiskPath, volSizeKB, fstype, nil
|
||||
return volSpec, nil
|
||||
}
|
||||
|
||||
// DeleteVolume deletes a vSphere volume.
|
||||
|
Loading…
Reference in New Issue
Block a user