diff --git a/pkg/cloudprovider/providers/vsphere/BUILD b/pkg/cloudprovider/providers/vsphere/BUILD index 6b0e720f385..41f2d4bf37d 100644 --- a/pkg/cloudprovider/providers/vsphere/BUILD +++ b/pkg/cloudprovider/providers/vsphere/BUILD @@ -12,32 +12,24 @@ go_library( name = "go_default_library", srcs = [ "vsphere.go", - "vsphere_metrics.go", "vsphere_util.go", ], tags = ["automanaged"], deps = [ "//pkg/api/v1/helper:go_default_library", "//pkg/cloudprovider:go_default_library", + "//pkg/cloudprovider/providers/vsphere/vclib:go_default_library", + "//pkg/cloudprovider/providers/vsphere/vclib/diskmanagers:go_default_library", "//pkg/controller:go_default_library", "//vendor/github.com/golang/glog:go_default_library", - "//vendor/github.com/prometheus/client_golang/prometheus:go_default_library", "//vendor/github.com/vmware/govmomi:go_default_library", - "//vendor/github.com/vmware/govmomi/find:go_default_library", "//vendor/github.com/vmware/govmomi/object:go_default_library", - "//vendor/github.com/vmware/govmomi/pbm:go_default_library", - "//vendor/github.com/vmware/govmomi/pbm/types:go_default_library", - "//vendor/github.com/vmware/govmomi/property:go_default_library", - "//vendor/github.com/vmware/govmomi/session:go_default_library", "//vendor/github.com/vmware/govmomi/vim25:go_default_library", "//vendor/github.com/vmware/govmomi/vim25/mo:go_default_library", - "//vendor/github.com/vmware/govmomi/vim25/soap:go_default_library", - "//vendor/github.com/vmware/govmomi/vim25/types:go_default_library", "//vendor/golang.org/x/net/context:go_default_library", "//vendor/gopkg.in/gcfg.v1:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", - "//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library", ], ) @@ -48,6 +40,7 @@ go_test( tags = ["automanaged"], deps = [ "//pkg/cloudprovider:go_default_library", + "//pkg/cloudprovider/providers/vsphere/vclib:go_default_library", "//vendor/golang.org/x/net/context:go_default_library", "//vendor/k8s.io/apimachinery/pkg/types:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/rand:go_default_library", @@ -63,6 +56,9 @@ filegroup( filegroup( name = "all-srcs", - srcs = [":package-srcs"], + srcs = [ + ":package-srcs", + "//pkg/cloudprovider/providers/vsphere/vclib:all-srcs", + ], tags = ["automanaged"], ) diff --git a/pkg/cloudprovider/providers/vsphere/vsphere.go b/pkg/cloudprovider/providers/vsphere/vsphere.go index 91f8e3734b7..b1409ea3b28 100644 --- a/pkg/cloudprovider/providers/vsphere/vsphere.go +++ b/pkg/cloudprovider/providers/vsphere/vsphere.go @@ -20,12 +20,9 @@ import ( "errors" "fmt" "io" - "io/ioutil" "net" - "net/url" "path" "path/filepath" - "regexp" "runtime" "strings" "sync" @@ -34,96 +31,45 @@ import ( "gopkg.in/gcfg.v1" "github.com/golang/glog" - "github.com/vmware/govmomi" - "github.com/vmware/govmomi/find" - "github.com/vmware/govmomi/object" - "github.com/vmware/govmomi/property" - "github.com/vmware/govmomi/session" - "github.com/vmware/govmomi/vim25" - "github.com/vmware/govmomi/vim25/mo" - "github.com/vmware/govmomi/vim25/soap" - "github.com/vmware/govmomi/vim25/types" "golang.org/x/net/context" - - pbm "github.com/vmware/govmomi/pbm" "k8s.io/api/core/v1" k8stypes "k8s.io/apimachinery/pkg/types" - k8runtime "k8s.io/apimachinery/pkg/util/runtime" v1helper "k8s.io/kubernetes/pkg/api/v1/helper" "k8s.io/kubernetes/pkg/cloudprovider" + "k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib" + "k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib/diskmanagers" "k8s.io/kubernetes/pkg/controller" ) +// VSphere Cloud Provider constants const ( - ProviderName = "vsphere" - ActivePowerState = "poweredOn" - SCSIControllerType = "scsi" - LSILogicControllerType = "lsiLogic" - BusLogicControllerType = "busLogic" - PVSCSIControllerType = "pvscsi" - LSILogicSASControllerType = "lsiLogic-sas" - SCSIControllerLimit = 4 - SCSIControllerDeviceLimit = 15 - SCSIDeviceSlots = 16 - SCSIReservedSlot = 7 - ThinDiskType = "thin" - PreallocatedDiskType = "preallocated" - EagerZeroedThickDiskType = "eagerZeroedThick" - ZeroedThickDiskType = "zeroedThick" - VolDir = "kubevols" - RoundTripperDefaultCount = 3 - DummyVMPrefixName = "vsphere-k8s" - VSANDatastoreType = "vsan" - MAC_OUI_VC = "00:50:56" - MAC_OUI_ESX = "00:0c:29" - DiskNotFoundErrMsg = "No vSphere disk ID found" - NoDiskUUIDFoundErrMsg = "No disk UUID found" - NoDevicesFoundErrMsg = "No devices found" - NonSupportedControllerTypeErrMsg = "Disk is attached to non-supported controller type" - FileAlreadyExistErrMsg = "File requested already exist" - CleanUpDummyVMRoutine_Interval = 5 - UUIDPath = "/sys/class/dmi/id/product_serial" - UUIDPrefix = "VMware-" - NameProperty = "name" + ProviderName = "vsphere" + VolDir = "kubevols" + RoundTripperDefaultCount = 3 + DummyVMPrefixName = "vsphere-k8s" + VSANDatastoreType = "vsan" + MacOuiVC = "00:50:56" + MacOuiEsx = "00:0c:29" + CleanUpDummyVMRoutineInterval = 5 + UUIDPath = "/sys/class/dmi/id/product_serial" + UUIDPrefix = "VMware-" ) -// Controller types that are currently supported for hot attach of disks -// lsilogic driver type is currently not supported because,when a device gets detached -// it fails to remove the device from the /dev path (which should be manually done) -// making the subsequent attaches to the node to fail. -// TODO: Add support for lsilogic driver type -var supportedSCSIControllerType = []string{strings.ToLower(LSILogicSASControllerType), PVSCSIControllerType} - -// Maps user options to API parameters. -// Keeping user options consistent with docker volume plugin for vSphere. -// API: http://pubs.vmware.com/vsphere-60/index.jsp#com.vmware.wssdk.apiref.doc/vim.VirtualDiskManager.VirtualDiskType.html -var diskFormatValidType = map[string]string{ - ThinDiskType: ThinDiskType, - strings.ToLower(EagerZeroedThickDiskType): EagerZeroedThickDiskType, - strings.ToLower(ZeroedThickDiskType): PreallocatedDiskType, -} - -var DiskformatValidOptions = generateDiskFormatValidOptions() var cleanUpRoutineInitialized = false -var ErrNoDiskUUIDFound = errors.New(NoDiskUUIDFoundErrMsg) -var ErrNoDiskIDFound = errors.New(DiskNotFoundErrMsg) -var ErrNoDevicesFound = errors.New(NoDevicesFoundErrMsg) -var ErrNonSupportedControllerType = errors.New(NonSupportedControllerTypeErrMsg) -var ErrFileAlreadyExist = errors.New(FileAlreadyExistErrMsg) - var clientLock sync.Mutex var cleanUpRoutineInitLock sync.Mutex var cleanUpDummyVMLock sync.RWMutex // VSphere is an implementation of cloud provider Interface for VSphere. type VSphere struct { - client *govmomi.Client - cfg *VSphereConfig + conn *vclib.VSphereConnection + cfg *VSphereConfig // InstanceID of the server where this VSphere object is instantiated. localInstanceID string } +// VSphereConfig information that is used by vSphere Cloud Provider to connect to VC type VSphereConfig struct { Global struct { // vCenter username. @@ -168,7 +114,7 @@ type VSphereConfig struct { type Volumes interface { // AttachDisk attaches given disk to given node. Current node // is used when nodeName is empty string. - AttachDisk(vmDiskPath string, storagePolicyID string, nodeName k8stypes.NodeName) (diskID string, diskUUID string, err error) + AttachDisk(vmDiskPath string, storagePolicyID string, nodeName k8stypes.NodeName) (diskUUID string, err error) // DetachDisk detaches given disk to given node. Current node // is used when nodeName is empty string. @@ -184,34 +130,12 @@ type Volumes interface { DisksAreAttached(volPath []string, nodeName k8stypes.NodeName) (map[string]bool, error) // CreateVolume creates a new vmdk with specified parameters. - CreateVolume(volumeOptions *VolumeOptions) (volumePath string, err error) + CreateVolume(volumeOptions *vclib.VolumeOptions) (volumePath string, err error) // DeleteVolume deletes vmdk. DeleteVolume(vmDiskPath string) error } -// VolumeOptions specifies capacity, tags, name and diskFormat for a volume. -type VolumeOptions struct { - CapacityKB int - Tags map[string]string - Name string - DiskFormat string - Datastore string - VSANStorageProfileData string - StoragePolicyName string - StoragePolicyID string -} - -// Generates Valid Options for Diskformat -func generateDiskFormatValidOptions() string { - validopts := "" - for diskformat := range diskFormatValidType { - validopts += (diskformat + ", ") - } - validopts = strings.TrimSuffix(validopts, ", ") - return validopts -} - // Parses vSphere cloud config file and stores it into VSphereConfig. func readConfig(config io.Reader) (VSphereConfig, error) { if config == nil { @@ -225,7 +149,7 @@ func readConfig(config io.Reader) (VSphereConfig, error) { } func init() { - registerMetrics() + vclib.RegisterMetrics() cloudprovider.RegisterCloudProvider(ProviderName, func(config io.Reader) (cloudprovider.Interface, error) { cfg, err := readConfig(config) if err != nil { @@ -238,96 +162,16 @@ func init() { // Initialize passes a Kubernetes clientBuilder interface to the cloud provider func (vs *VSphere) Initialize(clientBuilder controller.ControllerClientBuilder) {} -// UUID gets the BIOS UUID via the sys interface. This UUID is known by vsphere -func getvmUUID() (string, error) { - id, err := ioutil.ReadFile(UUIDPath) - if err != nil { - return "", fmt.Errorf("error retrieving vm uuid: %s", err) - } - uuidFromFile := string(id[:]) - //strip leading and trailing white space and new line char - uuid := strings.TrimSpace(uuidFromFile) - // check the uuid starts with "VMware-" - if !strings.HasPrefix(uuid, UUIDPrefix) { - return "", fmt.Errorf("Failed to match Prefix, UUID read from the file is %v", uuidFromFile) - } - // Strip the prefix and while spaces and - - uuid = strings.Replace(uuid[len(UUIDPrefix):(len(uuid))], " ", "", -1) - uuid = strings.Replace(uuid, "-", "", -1) - if len(uuid) != 32 { - return "", fmt.Errorf("Length check failed, UUID read from the file is %v", uuidFromFile) - } - // need to add dashes, e.g. "564d395e-d807-e18a-cb25-b79f65eb2b9f" - uuid = fmt.Sprintf("%s-%s-%s-%s-%s", uuid[0:8], uuid[8:12], uuid[12:16], uuid[16:20], uuid[20:32]) - return uuid, nil -} - -// Returns the name of the VM on which this code is running. -// Will attempt to determine the machine's name via it's UUID in this precedence order, failing if neither have a UUID: -// * cloud config value VMUUID -// * sysfs entry -func getVMName(client *govmomi.Client, cfg *VSphereConfig) (string, error) { - var vmUUID string - var err error - - if cfg.Global.VMUUID != "" { - vmUUID = cfg.Global.VMUUID - } else { - // This needs root privileges on the host, and will fail otherwise. - vmUUID, err = getvmUUID() - if err != nil { - return "", err - } - cfg.Global.VMUUID = vmUUID - } - - if vmUUID == "" { - return "", fmt.Errorf("unable to determine machine ID from cloud configuration or sysfs") - } - - // Create context - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - // Create a new finder - f := find.NewFinder(client.Client, true) - - // Fetch and set data center - dc, err := f.Datacenter(ctx, cfg.Global.Datacenter) - if err != nil { - return "", err - } - f.SetDatacenter(dc) - - s := object.NewSearchIndex(client.Client) - - svm, err := s.FindByUuid(ctx, dc, strings.ToLower(strings.TrimSpace(vmUUID)), true, nil) - if err != nil { - return "", err - } - - if svm == nil { - return "", fmt.Errorf("unable to find machine reference by UUID") - } - - var vm mo.VirtualMachine - err = s.Properties(ctx, svm.Reference(), []string{"name"}, &vm) - if err != nil { - return "", err - } - - return vm.Name, nil -} - func newVSphere(cfg VSphereConfig) (*VSphere, error) { + var err error if cfg.Disk.SCSIControllerType == "" { - cfg.Disk.SCSIControllerType = PVSCSIControllerType - } else if !checkControllerSupported(cfg.Disk.SCSIControllerType) { + cfg.Disk.SCSIControllerType = vclib.PVSCSIControllerType + } else if !vclib.CheckControllerSupported(cfg.Disk.SCSIControllerType) { glog.Errorf("%v is not a supported SCSI Controller type. Please configure 'lsilogic-sas' OR 'pvscsi'", cfg.Disk.SCSIControllerType) return nil, errors.New("Controller type not supported. Please configure 'lsilogic-sas' OR 'pvscsi'") } if cfg.Global.WorkingDir != "" { - cfg.Global.WorkingDir = path.Clean(cfg.Global.WorkingDir) + "/" + cfg.Global.WorkingDir = path.Clean(cfg.Global.WorkingDir) } if cfg.Global.RoundTripperCount == 0 { cfg.Global.RoundTripperCount = RoundTripperDefaultCount @@ -335,133 +179,66 @@ func newVSphere(cfg VSphereConfig) (*VSphere, error) { if cfg.Global.VCenterPort == "" { cfg.Global.VCenterPort = "443" } + if cfg.Global.VMUUID == "" { + // This needs root privileges on the host, and will fail otherwise. + cfg.Global.VMUUID, err = getvmUUID() + if err != nil { + glog.Errorf("Failed to get VM UUID. err: %+v", err) + return nil, err + } + } + vSphereConn := vclib.VSphereConnection{ + Username: cfg.Global.User, + Password: cfg.Global.Password, + Hostname: cfg.Global.VCenterIP, + Insecure: cfg.Global.InsecureFlag, + RoundTripperCount: cfg.Global.RoundTripperCount, + Port: cfg.Global.VCenterPort, + } + var instanceID string - var c *govmomi.Client - var id string if cfg.Global.VMName == "" { // if VMName is not set in the cloud config file, each nodes (including worker nodes) need credentials to obtain VMName from vCenter glog.V(4).Infof("Cannot find VMName from cloud config file, start obtaining it from vCenter") - c, err := newClient(context.TODO(), &cfg) + // Create context + ctx, cancel := context.WithCancel(context.TODO()) + defer cancel() + err = vSphereConn.Connect(ctx) + if err != nil { + glog.Errorf("Failed to connect to vSphere") + return nil, err + } + dc, err := vclib.GetDatacenter(ctx, &vSphereConn, cfg.Global.Datacenter) if err != nil { return nil, err } - - id, err = getVMName(c, &cfg) + vm, err := dc.GetVMByUUID(ctx, cfg.Global.VMUUID) if err != nil { return nil, err } + vmName, err := vm.ObjectName(ctx) + if err != nil { + return nil, err + } + instanceID = vmName } else { - id = cfg.Global.VMName + instanceID = cfg.Global.VMName } - vs := VSphere{ - client: c, + conn: &vSphereConn, cfg: &cfg, - localInstanceID: id, + localInstanceID: instanceID, } runtime.SetFinalizer(&vs, logout) - return &vs, nil } -// Returns if the given controller type is supported by the plugin -func checkControllerSupported(ctrlType string) bool { - for _, c := range supportedSCSIControllerType { - if ctrlType == c { - return true - } - } - return false -} - func logout(vs *VSphere) { - if vs.client != nil { - vs.client.Logout(context.TODO()) + if vs.conn.GoVmomiClient != nil { + vs.conn.GoVmomiClient.Logout(context.TODO()) } } -func newClient(ctx context.Context, cfg *VSphereConfig) (*govmomi.Client, error) { - // Parse URL from string - u, err := url.Parse(fmt.Sprintf("https://%s:%s/sdk", cfg.Global.VCenterIP, cfg.Global.VCenterPort)) - if err != nil { - return nil, err - } - // set username and password for the URL - u.User = url.UserPassword(cfg.Global.User, cfg.Global.Password) - - // Connect and log in to ESX or vCenter - c, err := govmomi.NewClient(ctx, u, cfg.Global.InsecureFlag) - if err != nil { - return nil, err - } - - // Add retry functionality - c.RoundTripper = vim25.Retry(c.RoundTripper, vim25.TemporaryNetworkError(int(cfg.Global.RoundTripperCount))) - - return c, nil -} - -// Returns a client which communicates with vCenter. -// This client can used to perform further vCenter operations. -func vSphereLogin(ctx context.Context, vs *VSphere) error { - var err error - clientLock.Lock() - defer clientLock.Unlock() - if vs.client == nil { - vs.client, err = newClient(ctx, vs.cfg) - if err != nil { - return err - } - return nil - } - - m := session.NewManager(vs.client.Client) - // retrieve client's current session - u, err := m.UserSession(ctx) - if err != nil { - glog.Errorf("Error while obtaining user session. err: %q", err) - return err - } - if u != nil { - return nil - } - - glog.Warningf("Creating new client session since the existing session is not valid or not authenticated") - vs.client.Logout(ctx) - vs.client, err = newClient(ctx, vs.cfg) - if err != nil { - return err - } - - return nil -} - -// Returns vSphere object `virtual machine` by its name. -func getVirtualMachineByName(ctx context.Context, cfg *VSphereConfig, c *govmomi.Client, nodeName k8stypes.NodeName) (*object.VirtualMachine, error) { - name := nodeNameToVMName(nodeName) - - // Create a new finder - f := find.NewFinder(c.Client, true) - - // Fetch and set data center - dc, err := f.Datacenter(ctx, cfg.Global.Datacenter) - if err != nil { - return nil, err - } - f.SetDatacenter(dc) - - vmRegex := cfg.Global.WorkingDir + name - - // Retrieve vm by name - //TODO: also look for vm inside subfolders - vm, err := f.VirtualMachine(ctx, vmRegex) - if err != nil { - return nil, err - } - - return vm, nil -} - // Instances returns an implementation of Instances for vSphere. func (vs *VSphere) Instances() (cloudprovider.Instances, bool) { return vs, true @@ -469,13 +246,11 @@ func (vs *VSphere) Instances() (cloudprovider.Instances, bool) { func getLocalIP() ([]v1.NodeAddress, error) { addrs := []v1.NodeAddress{} - ifaces, err := net.Interfaces() if err != nil { glog.Errorf("net.Interfaces() failed for NodeAddresses - %v", err) return nil, err } - for _, i := range ifaces { localAddrs, err := i.Addrs() if err != nil { @@ -486,8 +261,8 @@ func getLocalIP() ([]v1.NodeAddress, error) { if ipnet.IP.To4() != nil { // Filter external IP by MAC address OUIs from vCenter and from ESX var addressType v1.NodeAddressType - if strings.HasPrefix(i.HardwareAddr.String(), MAC_OUI_VC) || - strings.HasPrefix(i.HardwareAddr.String(), MAC_OUI_ESX) { + if strings.HasPrefix(i.HardwareAddr.String(), MacOuiVC) || + strings.HasPrefix(i.HardwareAddr.String(), MacOuiEsx) { v1helper.AddToNodeAddresses(&addrs, v1.NodeAddress{ Type: v1.NodeExternalIP, @@ -508,55 +283,47 @@ func getLocalIP() ([]v1.NodeAddress, error) { return addrs, nil } -// getVMandMO returns the VM object and required field from the VM object -func (vs *VSphere) getVMandMO(ctx context.Context, nodeName k8stypes.NodeName, field string) (vm *object.VirtualMachine, mvm *mo.VirtualMachine, err error) { - // Ensure client is logged in and session is valid - err = vSphereLogin(ctx, vs) +// Get the VM Managed Object instance by from the node +func (vs *VSphere) getVMByName(ctx context.Context, nodeName k8stypes.NodeName) (*vclib.VirtualMachine, error) { + dc, err := vclib.GetDatacenter(ctx, vs.conn, vs.cfg.Global.Datacenter) if err != nil { - glog.Errorf("Failed to login into vCenter - %v", err) - return nil, nil, err + return nil, err } - - vm, err = getVirtualMachineByName(ctx, vs.cfg, vs.client, nodeName) + vmPath := vs.cfg.Global.WorkingDir + "/" + nodeNameToVMName(nodeName) + vm, err := dc.GetVMByPath(ctx, vmPath) if err != nil { - if _, ok := err.(*find.NotFoundError); ok { - return nil, nil, cloudprovider.InstanceNotFound - } - return nil, nil, err + return nil, err } - - // Retrieve required field from VM object - var movm mo.VirtualMachine - collector := property.DefaultCollector(vs.client.Client) - err = collector.RetrieveOne(ctx, vm.Reference(), []string{field}, &movm) - if err != nil { - return nil, nil, err - } - - return vm, &movm, nil + return vm, nil } // NodeAddresses is an implementation of Instances.NodeAddresses. func (vs *VSphere) NodeAddresses(nodeName k8stypes.NodeName) ([]v1.NodeAddress, error) { + // Get local IP addresses if node is local node if vs.localInstanceID == nodeNameToVMName(nodeName) { - /* Get local IP addresses if node is local node */ return getLocalIP() } - addrs := []v1.NodeAddress{} - // Create context ctx, cancel := context.WithCancel(context.Background()) defer cancel() - - _, mvm, err := vs.getVMandMO(ctx, nodeName, "guest.net") + // Ensure client is logged in and session is valid + err := vs.conn.Connect(ctx) if err != nil { - glog.Errorf("Failed to getVMandMO for NodeAddresses: err %v", err) - return addrs, err + return nil, err + } + vm, err := vs.getVMByName(ctx, nodeName) + if err != nil { + glog.Errorf("Failed to get VM object for node: %q. err: +%v", nodeNameToVMName(nodeName), err) + return nil, err + } + vmMoList, err := vm.Datacenter.GetVMMoList(ctx, []*vclib.VirtualMachine{vm}, []string{"guest.net"}) + if err != nil { + glog.Errorf("Failed to get VM Managed object with property guest.net for node: %q. err: +%v", nodeNameToVMName(nodeName), err) + return nil, err } - // retrieve VM's ip(s) - for _, v := range mvm.Guest.Net { + for _, v := range vmMoList[0].Guest.Net { if vs.cfg.Network.PublicNetwork == v.Network { for _, ip := range v.IpAddress { if net.ParseIP(ip).To4() != nil { @@ -584,10 +351,12 @@ func (vs *VSphere) NodeAddressesByProviderID(providerID string) ([]v1.NodeAddres return vs.NodeAddresses(vmNameToNodeName(vmName)) } +// AddSSHKeyToAllInstances add SSH key to all instances func (vs *VSphere) AddSSHKeyToAllInstances(user string, keyData []byte) error { return errors.New("unimplemented") } +// CurrentNodeName gives the current node name func (vs *VSphere) CurrentNodeName(hostname string) (k8stypes.NodeName, error) { return vmNameToNodeName(vs.localInstanceID), nil } @@ -604,59 +373,35 @@ func vmNameToNodeName(vmName string) k8stypes.NodeName { // ExternalID returns the cloud provider ID of the node with the specified Name (deprecated). func (vs *VSphere) ExternalID(nodeName k8stypes.NodeName) (string, error) { - if vs.localInstanceID == nodeNameToVMName(nodeName) { - return vs.cfg.Global.WorkingDir + vs.localInstanceID, nil - } - - // Create context - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - vm, mvm, err := vs.getVMandMO(ctx, nodeName, "summary") - if err != nil { - glog.Errorf("Failed to getVMandMO for ExternalID: err %v", err) - return "", err - } - - if mvm.Summary.Runtime.PowerState == ActivePowerState { - return vm.InventoryPath, nil - } - - if mvm.Summary.Config.Template == false { - glog.Warningf("VM %s, is not in %s state", nodeName, ActivePowerState) - } else { - glog.Warningf("VM %s, is a template", nodeName) - } - - return "", cloudprovider.InstanceNotFound + return vs.InstanceID(nodeName) } // InstanceID returns the cloud provider ID of the node with the specified Name. func (vs *VSphere) InstanceID(nodeName k8stypes.NodeName) (string, error) { if vs.localInstanceID == nodeNameToVMName(nodeName) { - return vs.cfg.Global.WorkingDir + vs.localInstanceID, nil + return vs.cfg.Global.WorkingDir + "/" + vs.localInstanceID, nil } - // Create context ctx, cancel := context.WithCancel(context.Background()) defer cancel() - - vm, mvm, err := vs.getVMandMO(ctx, nodeName, "summary") + // Ensure client is logged in and session is valid + err := vs.conn.Connect(ctx) if err != nil { - glog.Errorf("Failed to getVMandMO for InstanceID: err %v", err) return "", err } - - if mvm.Summary.Runtime.PowerState == ActivePowerState { + vm, err := vs.getVMByName(ctx, nodeName) + if err != nil { + glog.Errorf("Failed to get VM object for node: %q. err: +%v", nodeNameToVMName(nodeName), err) + return "", err + } + nodeExist, err := vm.Exists(ctx) + if err != nil { + glog.Errorf("Failed to check whether node %q exist. err: %+v.", nodeNameToVMName(nodeName), err) + return "", err + } + if nodeExist { return "/" + vm.InventoryPath, nil } - - if mvm.Summary.Config.Template == false { - glog.Warningf("VM %s, is not in %s state", nodeName, ActivePowerState) - } else { - glog.Warningf("VM %s, is a template", nodeName) - } - return "", cloudprovider.InstanceNotFound } @@ -688,7 +433,6 @@ func (vs *VSphere) LoadBalancer() (cloudprovider.LoadBalancer, bool) { // Zones returns an implementation of Zones for Google vSphere. func (vs *VSphere) Zones() (cloudprovider.Zones, bool) { glog.V(1).Info("The vSphere cloud provider does not support zones") - return nil, false } @@ -702,308 +446,73 @@ func (vs *VSphere) ScrubDNS(nameservers, searches []string) (nsOut, srchOut []st return nameservers, searches } -// Returns vSphere objects virtual machine, virtual device list, datastore and datacenter. -func getVirtualMachineDevices(ctx context.Context, cfg *VSphereConfig, c *govmomi.Client, name string) (*object.VirtualMachine, object.VirtualDeviceList, *object.Datacenter, error) { - // Create a new finder - f := find.NewFinder(c.Client, true) - - // Fetch and set data center - dc, err := f.Datacenter(ctx, cfg.Global.Datacenter) - if err != nil { - return nil, nil, nil, err - } - f.SetDatacenter(dc) - - vmRegex := cfg.Global.WorkingDir + name - - vm, err := f.VirtualMachine(ctx, vmRegex) - if err != nil { - return nil, nil, nil, err - } - - // Get devices from VM - vmDevices, err := vm.Device(ctx) - if err != nil { - return nil, nil, nil, err - } - return vm, vmDevices, dc, nil -} - -// Removes SCSI controller which is latest attached to VM. -func cleanUpController(ctx context.Context, newSCSIController types.BaseVirtualDevice, vmDevices object.VirtualDeviceList, vm *object.VirtualMachine) error { - if newSCSIController == nil || vmDevices == nil || vm == nil { - return nil - } - ctls := vmDevices.SelectByType(newSCSIController) - if len(ctls) < 1 { - return ErrNoDevicesFound - } - newScsi := ctls[len(ctls)-1] - err := vm.RemoveDevice(ctx, true, newScsi) - if err != nil { - return err - } - return nil -} - -// Attaches given virtual disk volume to the compute running kubelet. -func (vs *VSphere) AttachDisk(vmDiskPath string, storagePolicyID string, nodeName k8stypes.NodeName) (diskID string, diskUUID string, err error) { - attachDiskInternal := func(vmDiskPath string, storagePolicyID string, nodeName k8stypes.NodeName) (diskID string, diskUUID string, err error) { - var newSCSIController types.BaseVirtualDevice - +// AttachDisk attaches given virtual disk volume to the compute running kubelet. +func (vs *VSphere) AttachDisk(vmDiskPath string, storagePolicyID string, nodeName k8stypes.NodeName) (diskUUID string, err error) { + attachDiskInternal := func(vmDiskPath string, storagePolicyID string, nodeName k8stypes.NodeName) (diskUUID string, err error) { + if nodeName == "" { + nodeName = vmNameToNodeName(vs.localInstanceID) + } // Create context ctx, cancel := context.WithCancel(context.Background()) defer cancel() - // Ensure client is logged in and session is valid - err = vSphereLogin(ctx, vs) + err = vs.conn.Connect(ctx) if err != nil { - glog.Errorf("Failed to login into vCenter - %v", err) - return "", "", err + return "", err } - - // Find virtual machine to attach disk to - var vSphereInstance string - if nodeName == "" { - vSphereInstance = vs.localInstanceID - nodeName = vmNameToNodeName(vSphereInstance) - } else { - vSphereInstance = nodeNameToVMName(nodeName) - } - - // Get VM device list - vm, vmDevices, dc, err := getVirtualMachineDevices(ctx, vs.cfg, vs.client, vSphereInstance) + vm, err := vs.getVMByName(ctx, nodeName) if err != nil { - return "", "", err + glog.Errorf("Failed to get VM object for node: %q. err: +%v", nodeNameToVMName(nodeName), err) + return "", err } - - attached, err := checkDiskAttached(vmDiskPath, vmDevices, dc, vs.client) + diskUUID, err = vm.AttachDisk(ctx, vmDiskPath, &vclib.VolumeOptions{SCSIControllerType: vclib.PVSCSIControllerType, StoragePolicyID: storagePolicyID}) if err != nil { - return "", "", err + glog.Errorf("Failed to attach disk: %s for node: %s. err: +%v", vmDiskPath, nodeNameToVMName(nodeName), err) + return "", err } - if attached { - diskID, _ = getVirtualDiskID(vmDiskPath, vmDevices, dc, vs.client) - diskUUID, _ = getVirtualDiskUUIDByPath(vmDiskPath, dc, vs.client) - return diskID, diskUUID, nil - } - - var diskControllerType = vs.cfg.Disk.SCSIControllerType - // find SCSI controller of particular type from VM devices - scsiControllersOfRequiredType := getSCSIControllersOfType(vmDevices, diskControllerType) - scsiController := getAvailableSCSIController(scsiControllersOfRequiredType) - newSCSICreated := false - if scsiController == nil { - newSCSIController, err = createAndAttachSCSIControllerToVM(ctx, vm, diskControllerType) - if err != nil { - glog.Errorf("Failed to create SCSI controller for VM :%q with err: %+v", vm.Name(), err) - return "", "", err - } - - // Get VM device list - _, vmDevices, _, err := getVirtualMachineDevices(ctx, vs.cfg, vs.client, vSphereInstance) - if err != nil { - glog.Errorf("cannot get vmDevices for VM err=%s", err) - return "", "", fmt.Errorf("cannot get vmDevices for VM err=%s", err) - } - - scsiControllersOfRequiredType := getSCSIControllersOfType(vmDevices, diskControllerType) - scsiController := getAvailableSCSIController(scsiControllersOfRequiredType) - if scsiController == nil { - glog.Errorf("cannot find SCSI controller in VM") - // attempt clean up of scsi controller - cleanUpController(ctx, newSCSIController, vmDevices, vm) - return "", "", fmt.Errorf("cannot find SCSI controller in VM") - } - newSCSICreated = true - } - - // Create a new finder - f := find.NewFinder(vs.client.Client, true) - // Set data center - f.SetDatacenter(dc) - - datastorePathObj := new(object.DatastorePath) - isSuccess := datastorePathObj.FromString(vmDiskPath) - if !isSuccess { - glog.Errorf("Failed to parse vmDiskPath: %+q", vmDiskPath) - return "", "", errors.New("Failed to parse vmDiskPath") - } - ds, err := f.Datastore(ctx, datastorePathObj.Datastore) - if err != nil { - glog.Errorf("Failed while searching for datastore %+q. err %s", datastorePathObj.Datastore, err) - return "", "", err - } - vmDiskPath = removeClusterFromVDiskPath(vmDiskPath) - disk := vmDevices.CreateDisk(scsiController, ds.Reference(), vmDiskPath) - unitNumber, err := getNextUnitNumber(vmDevices, scsiController) - if err != nil { - glog.Errorf("cannot attach disk to VM, limit reached - %v.", err) - return "", "", err - } - *disk.UnitNumber = unitNumber - - backing := disk.Backing.(*types.VirtualDiskFlatVer2BackingInfo) - backing.DiskMode = string(types.VirtualDiskModeIndependent_persistent) - - virtualMachineConfigSpec := types.VirtualMachineConfigSpec{} - deviceConfigSpec := &types.VirtualDeviceConfigSpec{ - Device: disk, - Operation: types.VirtualDeviceConfigSpecOperationAdd, - } - // Configure the disk with the SPBM profile only if ProfileID is not empty. - if storagePolicyID != "" { - profileSpec := &types.VirtualMachineDefinedProfileSpec{ - ProfileId: storagePolicyID, - } - deviceConfigSpec.Profile = append(deviceConfigSpec.Profile, profileSpec) - } - virtualMachineConfigSpec.DeviceChange = append(virtualMachineConfigSpec.DeviceChange, deviceConfigSpec) - requestTime := time.Now() - task, err := vm.Reconfigure(ctx, virtualMachineConfigSpec) - if err != nil { - recordvSphereMetric(api_attachvolume, requestTime, err) - glog.Errorf("Failed to attach the disk with storagePolicy: %+q with err - %v", storagePolicyID, err) - if newSCSICreated { - cleanUpController(ctx, newSCSIController, vmDevices, vm) - } - return "", "", err - } - err = task.Wait(ctx) - recordvSphereMetric(api_attachvolume, requestTime, err) - if err != nil { - glog.Errorf("Failed to attach the disk with storagePolicy: %+q with err - %v", storagePolicyID, err) - if newSCSICreated { - cleanUpController(ctx, newSCSIController, vmDevices, vm) - } - return "", "", err - } - - deviceName, diskUUID, err := getVMDiskInfo(ctx, vm, disk) - if err != nil { - if newSCSICreated { - cleanUpController(ctx, newSCSIController, vmDevices, vm) - } - vs.DetachDisk(deviceName, nodeName) - return "", "", err - } - return deviceName, diskUUID, nil + return diskUUID, nil } requestTime := time.Now() - diskID, diskUUID, err = attachDiskInternal(vmDiskPath, storagePolicyID, nodeName) - recordvSphereMetric(operation_attachvolume, requestTime, err) - return diskID, diskUUID, err + diskUUID, err = attachDiskInternal(vmDiskPath, storagePolicyID, nodeName) + vclib.RecordvSphereMetric(vclib.OperationAttachVolume, requestTime, err) + return diskUUID, err } -func getVMDiskInfo(ctx context.Context, vm *object.VirtualMachine, disk *types.VirtualDisk) (string, string, error) { - vmDevices, err := vm.Device(ctx) - if err != nil { - return "", "", err - } - devices := vmDevices.SelectByType(disk) - if len(devices) < 1 { - return "", "", ErrNoDevicesFound - } - - // get new disk id - newDevice := devices[len(devices)-1] - deviceName := devices.Name(newDevice) - - // get device uuid - diskUUID, err := getVirtualDiskUUID(newDevice) - if err != nil { - return "", "", err - } - - return deviceName, diskUUID, nil -} -func getNextUnitNumber(devices object.VirtualDeviceList, c types.BaseVirtualController) (int32, error) { - // get next available SCSI controller unit number - var takenUnitNumbers [SCSIDeviceSlots]bool - takenUnitNumbers[SCSIReservedSlot] = true - key := c.GetVirtualController().Key - - for _, device := range devices { - d := device.GetVirtualDevice() - if d.ControllerKey == key { - if d.UnitNumber != nil { - takenUnitNumbers[*d.UnitNumber] = true - } +// DetachDisk detaches given virtual disk volume from the compute running kubelet. +func (vs *VSphere) DetachDisk(volPath string, nodeName k8stypes.NodeName) error { + detachDiskInternal := func(volPath string, nodeName k8stypes.NodeName) error { + if nodeName == "" { + nodeName = vmNameToNodeName(vs.localInstanceID) } - } - for unitNumber, takenUnitNumber := range takenUnitNumbers { - if !takenUnitNumber { - return int32(unitNumber), nil + // Create context + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + // Ensure client is logged in and session is valid + err := vs.conn.Connect(ctx) + if err != nil { + return err } - } - return -1, fmt.Errorf("SCSI Controller with key=%d does not have any available slots (LUN).", key) -} - -func getSCSIController(vmDevices object.VirtualDeviceList, scsiType string) *types.VirtualController { - // get virtual scsi controller of passed argument type - for _, device := range vmDevices { - devType := vmDevices.Type(device) - if devType == scsiType { - if c, ok := device.(types.BaseVirtualController); ok { - return c.GetVirtualController() - } + vm, err := vs.getVMByName(ctx, nodeName) + if err != nil { + glog.Errorf("Failed to get VM object for node: %q. err: +%v", nodeNameToVMName(nodeName), err) + return err } - } - return nil -} - -func getSCSIControllersOfType(vmDevices object.VirtualDeviceList, scsiType string) []*types.VirtualController { - // get virtual scsi controllers of passed argument type - var scsiControllers []*types.VirtualController - for _, device := range vmDevices { - devType := vmDevices.Type(device) - if devType == scsiType { - if c, ok := device.(types.BaseVirtualController); ok { - scsiControllers = append(scsiControllers, c.GetVirtualController()) - } + err = vm.DetachDisk(ctx, volPath) + if err != nil { + glog.Errorf("Failed to detach disk: %s for node: %s. err: +%v", volPath, nodeNameToVMName(nodeName), err) + return err } + return nil } - return scsiControllers -} - -func getSCSIControllers(vmDevices object.VirtualDeviceList) []*types.VirtualController { - // get all virtual scsi controllers - var scsiControllers []*types.VirtualController - for _, device := range vmDevices { - devType := vmDevices.Type(device) - switch devType { - case SCSIControllerType, strings.ToLower(LSILogicControllerType), strings.ToLower(BusLogicControllerType), PVSCSIControllerType, strings.ToLower(LSILogicSASControllerType): - if c, ok := device.(types.BaseVirtualController); ok { - scsiControllers = append(scsiControllers, c.GetVirtualController()) - } - } - } - return scsiControllers -} - -func getAvailableSCSIController(scsiControllers []*types.VirtualController) *types.VirtualController { - // get SCSI controller which has space for adding more devices - for _, controller := range scsiControllers { - if len(controller.Device) < SCSIControllerDeviceLimit { - return controller - } - } - return nil + requestTime := time.Now() + err := detachDiskInternal(volPath, nodeName) + vclib.RecordvSphereMetric(vclib.OperationDetachVolume, requestTime, nil) + return err } // DiskIsAttached returns if disk is attached to the VM using controllers supported by the plugin. func (vs *VSphere) DiskIsAttached(volPath string, nodeName k8stypes.NodeName) (bool, error) { diskIsAttachedInternal := func(volPath string, nodeName k8stypes.NodeName) (bool, error) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - // Ensure client is logged in and session is valid - err := vSphereLogin(ctx, vs) - if err != nil { - glog.Errorf("Failed to login into vCenter - %v", err) - return false, err - } - - // Find VM to detach disk from var vSphereInstance string if nodeName == "" { vSphereInstance = vs.localInstanceID @@ -1011,52 +520,53 @@ func (vs *VSphere) DiskIsAttached(volPath string, nodeName k8stypes.NodeName) (b } else { vSphereInstance = nodeNameToVMName(nodeName) } - - nodeExist, err := vs.NodeExists(nodeName) + // Create context + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + // Ensure client is logged in and session is valid + err := vs.conn.Connect(ctx) if err != nil { - glog.Errorf("Failed to check whether node exist. err: %s.", err) return false, err } - + vm, err := vs.getVMByName(ctx, nodeName) + if err != nil { + glog.Errorf("Failed to get VM object for node: %q. err: +%v", vSphereInstance, err) + return false, err + } + nodeExist, err := vm.Exists(ctx) + if err != nil { + glog.Errorf("Failed to check whether node %q exist. err: %+v", vSphereInstance, err) + return false, err + } if !nodeExist { - glog.Errorf("DiskIsAttached failed to determine whether disk %q is still attached: node %q does not exist", + glog.Errorf("DiskIsAttached failed to determine whether disk %q is still attached: node %q is powered off", volPath, vSphereInstance) - return false, fmt.Errorf("DiskIsAttached failed to determine whether disk %q is still attached: node %q does not exist", + return false, fmt.Errorf("DiskIsAttached failed to determine whether disk %q is still attached: node %q is powered off", volPath, vSphereInstance) } - - // Get VM device list - _, vmDevices, dc, err := getVirtualMachineDevices(ctx, vs.cfg, vs.client, vSphereInstance) + attached, err := vm.IsDiskAttached(ctx, volPath) if err != nil { - glog.Errorf("Failed to get VM devices for VM %#q. err: %s", vSphereInstance, err) - return false, err + glog.Errorf("DiskIsAttached failed to determine whether disk %q is still attached on node %q", + volPath, + vSphereInstance) } - - attached, err := checkDiskAttached(volPath, vmDevices, dc, vs.client) return attached, err } requestTime := time.Now() isAttached, err := diskIsAttachedInternal(volPath, nodeName) - recordvSphereMetric(operation_diskIsAttached, requestTime, err) + vclib.RecordvSphereMetric(vclib.OperationDiskIsAttached, requestTime, err) return isAttached, err } // DisksAreAttached returns if disks are attached to the VM using controllers supported by the plugin. func (vs *VSphere) DisksAreAttached(volPaths []string, nodeName k8stypes.NodeName) (map[string]bool, error) { disksAreAttachedInternal := func(volPaths []string, nodeName k8stypes.NodeName) (map[string]bool, error) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - // Create vSphere client - err := vSphereLogin(ctx, vs) - if err != nil { - glog.Errorf("Failed to login into vCenter, err: %v", err) - return nil, err + attached := make(map[string]bool) + if len(volPaths) == 0 { + return attached, nil } - - // Find VM to detach disk from var vSphereInstance string if nodeName == "" { vSphereInstance = vs.localInstanceID @@ -1064,14 +574,24 @@ func (vs *VSphere) DisksAreAttached(volPaths []string, nodeName k8stypes.NodeNam } else { vSphereInstance = nodeNameToVMName(nodeName) } - - nodeExist, err := vs.NodeExists(nodeName) - + // Create context + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + // Ensure client is logged in and session is valid + err := vs.conn.Connect(ctx) if err != nil { - glog.Errorf("Failed to check whether node exist. err: %s.", err) return nil, err } - + vm, err := vs.getVMByName(ctx, nodeName) + if err != nil { + glog.Errorf("Failed to get VM object for node: %q. err: +%v", vSphereInstance, err) + return nil, err + } + nodeExist, err := vm.Exists(ctx) + if err != nil { + glog.Errorf("Failed to check whether node %q exist. err: %+v", vSphereInstance, err) + return nil, err + } if !nodeExist { glog.Errorf("DisksAreAttached failed to determine whether disks %v are still attached: node %q does not exist", volPaths, @@ -1080,17 +600,8 @@ func (vs *VSphere) DisksAreAttached(volPaths []string, nodeName k8stypes.NodeNam volPaths, vSphereInstance) } - - // Get VM device list - _, vmDevices, dc, err := getVirtualMachineDevices(ctx, vs.cfg, vs.client, vSphereInstance) - if err != nil { - glog.Errorf("Failed to get VM devices for VM %#q. err: %s", vSphereInstance, err) - return nil, err - } - - attached := make(map[string]bool) for _, volPath := range volPaths { - result, err := checkDiskAttached(volPath, vmDevices, dc, vs.client) + result, err := vm.IsDiskAttached(ctx, volPath) if err == nil { if result { attached[volPath] = true @@ -1098,6 +609,10 @@ func (vs *VSphere) DisksAreAttached(volPaths []string, nodeName k8stypes.NodeNam attached[volPath] = false } } else { + glog.Errorf("DisksAreAttached failed to determine whether disk %q from volPaths %+v is still attached on node %q", + volPath, + volPaths, + vSphereInstance) return nil, err } } @@ -1105,829 +620,130 @@ func (vs *VSphere) DisksAreAttached(volPaths []string, nodeName k8stypes.NodeNam } requestTime := time.Now() attached, err := disksAreAttachedInternal(volPaths, nodeName) - recordvSphereMetric(operation_disksAreAttached, requestTime, err) + vclib.RecordvSphereMetric(vclib.OperationDisksAreAttached, requestTime, err) return attached, err } -func checkDiskAttached(volPath string, vmdevices object.VirtualDeviceList, dc *object.Datacenter, client *govmomi.Client) (bool, error) { - _, err := getVirtualDiskControllerKey(volPath, vmdevices, dc, client) - if err != nil { - if err == ErrNoDevicesFound { - return false, nil - } - glog.Errorf("Failed to check whether disk is attached. err: %s", err) - return false, err - } - return true, nil -} - -// Returns the object key that denotes the controller object to which vmdk is attached. -func getVirtualDiskControllerKey(volPath string, vmDevices object.VirtualDeviceList, dc *object.Datacenter, client *govmomi.Client) (int32, error) { - volPath = removeClusterFromVDiskPath(volPath) - volumeUUID, err := getVirtualDiskUUIDByPath(volPath, dc, client) - - if err != nil { - glog.Errorf("disk uuid not found for %v. err: %s", volPath, err) - return -1, err - } - - // filter vm devices to retrieve disk ID for the given vmdk file - for _, device := range vmDevices { - if vmDevices.TypeName(device) == "VirtualDisk" { - diskUUID, _ := getVirtualDiskUUID(device) - if diskUUID == volumeUUID { - return device.GetVirtualDevice().ControllerKey, nil - } - } - } - return -1, ErrNoDevicesFound -} - -// Returns key of the controller. -// Key is unique id that distinguishes one device from other devices in the same virtual machine. -func getControllerKey(scsiType string, vmDevices object.VirtualDeviceList) (int32, error) { - for _, device := range vmDevices { - devType := vmDevices.Type(device) - if devType == scsiType { - if c, ok := device.(types.BaseVirtualController); ok { - return c.GetVirtualController().Key, nil - } - } - } - return -1, ErrNoDevicesFound -} - -// Returns formatted UUID for a virtual disk device. -func getVirtualDiskUUID(newDevice types.BaseVirtualDevice) (string, error) { - vd := newDevice.GetVirtualDevice() - - if b, ok := vd.Backing.(*types.VirtualDiskFlatVer2BackingInfo); ok { - uuid := formatVirtualDiskUUID(b.Uuid) - return uuid, nil - } - return "", ErrNoDiskUUIDFound -} - -func formatVirtualDiskUUID(uuid string) string { - uuidwithNoSpace := strings.Replace(uuid, " ", "", -1) - uuidWithNoHypens := strings.Replace(uuidwithNoSpace, "-", "", -1) - return strings.ToLower(uuidWithNoHypens) -} - -// Gets virtual disk UUID by datastore (namespace) path -// -// volPath can be namespace path (e.g. "[vsanDatastore] volumes/test.vmdk") or -// uuid path (e.g. "[vsanDatastore] 59427457-6c5a-a917-7997-0200103eedbc/test.vmdk"). -// `volumes` in this case would be a symlink to -// `59427457-6c5a-a917-7997-0200103eedbc`. -// -// We want users to use namespace path. It is good for attaching the disk, -// but for detaching the API requires uuid path. Hence, to detach the right -// device we have to convert the namespace path to uuid path. -func getVirtualDiskUUIDByPath(volPath string, dc *object.Datacenter, client *govmomi.Client) (string, error) { - if len(volPath) > 0 && filepath.Ext(volPath) != ".vmdk" { - volPath += ".vmdk" - } - - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - // VirtualDiskManager provides a way to manage and manipulate virtual disks on vmware datastores. - vdm := object.NewVirtualDiskManager(client.Client) - // Returns uuid of vmdk virtual disk - diskUUID, err := vdm.QueryVirtualDiskUuid(ctx, volPath, dc) - - if err != nil { - return "", ErrNoDiskUUIDFound - } - - diskUUID = formatVirtualDiskUUID(diskUUID) - - return diskUUID, nil -} - -// Returns a device id which is internal vSphere API identifier for the attached virtual disk. -func getVirtualDiskID(volPath string, vmDevices object.VirtualDeviceList, dc *object.Datacenter, client *govmomi.Client) (string, error) { - volumeUUID, err := getVirtualDiskUUIDByPath(volPath, dc, client) - - if err != nil { - glog.Warningf("disk uuid not found for %v ", volPath) - return "", err - } - - // filter vm devices to retrieve disk ID for the given vmdk file - for _, device := range vmDevices { - if vmDevices.TypeName(device) == "VirtualDisk" { - diskUUID, _ := getVirtualDiskUUID(device) - if diskUUID == volumeUUID { - return vmDevices.Name(device), nil - } - } - } - return "", ErrNoDiskIDFound -} - -// DetachDisk detaches given virtual disk volume from the compute running kubelet. -func (vs *VSphere) DetachDisk(volPath string, nodeName k8stypes.NodeName) error { - detachDiskInternal := func(volPath string, nodeName k8stypes.NodeName) error { - // Create context - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - // Ensure client is logged in and session is valid - err := vSphereLogin(ctx, vs) - if err != nil { - glog.Errorf("Failed to login into vCenter - %v", err) - return err - } - - // Find virtual machine to attach disk to - var vSphereInstance string - if nodeName == "" { - vSphereInstance = vs.localInstanceID - nodeName = vmNameToNodeName(vSphereInstance) - } else { - vSphereInstance = nodeNameToVMName(nodeName) - } - - vm, vmDevices, dc, err := getVirtualMachineDevices(ctx, vs.cfg, vs.client, vSphereInstance) - - if err != nil { - return err - } - volPath = removeClusterFromVDiskPath(volPath) - diskID, err := getVirtualDiskID(volPath, vmDevices, dc, vs.client) - if err != nil { - glog.Warningf("disk ID not found for %v ", volPath) - return err - } - - // Gets virtual disk device - device := vmDevices.Find(diskID) - if device == nil { - return fmt.Errorf("device '%s' not found", diskID) - } - - // Detach disk from VM - requestTime := time.Now() - err = vm.RemoveDevice(ctx, true, device) - recordvSphereMetric(api_detachvolume, requestTime, err) - if err != nil { - return err - } - return nil - } - requestTime := time.Now() - err := detachDiskInternal(volPath, nodeName) - recordvSphereMetric(operation_detachvolume, requestTime, nil) - return err -} - -// CreateVolume creates a volume of given size (in KiB). -func (vs *VSphere) CreateVolume(volumeOptions *VolumeOptions) (volumePath string, err error) { - createVolumeInternal := func(volumeOptions *VolumeOptions) (volumePath string, err error) { +// CreateVolume creates a volume of given size (in KiB) and return the volume path. +// If the volumeOptions.Datastore is part of datastore cluster for example - [DatastoreCluster/sharedVmfs-0] then +// return value will be [DatastoreCluster/sharedVmfs-0] kubevols/.vmdk +// else return value will be [sharedVmfs-0] kubevols/.vmdk +func (vs *VSphere) CreateVolume(volumeOptions *vclib.VolumeOptions) (volumePath string, err error) { + glog.V(1).Infof("Starting to create a vSphere volume with volumeOptions: %+v", volumeOptions) + createVolumeInternal := func(volumeOptions *vclib.VolumeOptions) (volumePath string, err error) { var datastore string - var destVolPath string - - // Default datastore is the datastore in the vSphere config file that is used initialize vSphere cloud provider. + // Default datastore is the datastore in the vSphere config file that is used to initialize vSphere cloud provider. if volumeOptions.Datastore == "" { datastore = vs.cfg.Global.Datastore } else { datastore = volumeOptions.Datastore } - - // Default diskformat as 'thin' - if volumeOptions.DiskFormat == "" { - volumeOptions.DiskFormat = ThinDiskType - } - - if _, ok := diskFormatValidType[volumeOptions.DiskFormat]; !ok { - return "", fmt.Errorf("Cannot create disk. Error diskformat %+q."+ - " Valid options are %s.", volumeOptions.DiskFormat, DiskformatValidOptions) - } - // Create context ctx, cancel := context.WithCancel(context.Background()) defer cancel() - // Ensure client is logged in and session is valid - err = vSphereLogin(ctx, vs) + err = vs.conn.Connect(ctx) if err != nil { - glog.Errorf("Failed to login into vCenter - %v", err) return "", err } - - // Create a new finder - f := find.NewFinder(vs.client.Client, true) - - // Fetch and set data center - dc, err := f.Datacenter(ctx, vs.cfg.Global.Datacenter) - f.SetDatacenter(dc) - - if volumeOptions.StoragePolicyName != "" { - // Get the pbm client - pbmClient, err := pbm.NewClient(ctx, vs.client.Client) - if err != nil { - return "", err - } - volumeOptions.StoragePolicyID, err = pbmClient.ProfileIDByName(ctx, volumeOptions.StoragePolicyName) - if err != nil { - recordvSphereMetric(operation_createvolume_with_policy, time.Time{}, err) - return "", err - } - - compatibilityResult, err := vs.GetPlacementCompatibilityResult(ctx, pbmClient, volumeOptions.StoragePolicyID) - if err != nil { - return "", err - } - if len(compatibilityResult) < 1 { - return "", fmt.Errorf("There are no compatible datastores that satisfy the storage policy: %+q requirements", volumeOptions.StoragePolicyID) - } - - if volumeOptions.Datastore != "" { - ok, nonCompatibleDsref := vs.IsUserSpecifiedDatastoreNonCompatible(ctx, compatibilityResult, volumeOptions.Datastore) - if ok { - faultMsg := GetNonCompatibleDatastoreFaultMsg(compatibilityResult, *nonCompatibleDsref) - return "", fmt.Errorf("User specified datastore: %q is not compatible with the storagePolicy: %q. Failed with faults: %+q", volumeOptions.Datastore, volumeOptions.StoragePolicyName, faultMsg) - } - } else { - dsMoList, err := vs.GetCompatibleDatastoresMo(ctx, compatibilityResult) - if err != nil { - recordvSphereMetric(operation_createvolume_with_raw_vsan_policy, time.Time{}, err) - return "", err - } - dsMo := GetMostFreeDatastore(dsMoList) - datastore = dsMo.Info.GetDatastoreInfo().Name - } - } - ds, err := f.Datastore(ctx, datastore) + dc, err := vclib.GetDatacenter(ctx, vs.conn, vs.cfg.Global.Datacenter) if err != nil { - glog.Errorf("Failed while searching for datastore %+q. err %s", datastore, err) return "", err } - - if volumeOptions.VSANStorageProfileData != "" { - // Check if the datastore is VSAN if any capability requirements are specified. - // VSphere cloud provider now only supports VSAN capabilities requirements - ok, err := checkIfDatastoreTypeIsVSAN(vs.client, ds) - if err != nil { - return "", fmt.Errorf("Failed while determining whether the datastore: %q"+ - " is VSAN or not.", datastore) - } - if !ok { - return "", fmt.Errorf("The specified datastore: %q is not a VSAN datastore."+ - " The policy parameters will work only with VSAN Datastore."+ - " So, please specify a valid VSAN datastore in Storage class definition.", datastore) - } - } - // Create a disk with the VSAN storage capabilities specified in the volumeOptions.VSANStorageProfileData. - // This is achieved by following steps: - // 1. Create dummy VM if not already present. - // 2. Add a new disk to the VM by performing VM reconfigure. - // 3. Detach the new disk from the dummy VM. - // 4. Delete the dummy VM. + var vmOptions *vclib.VMOptions if volumeOptions.VSANStorageProfileData != "" || volumeOptions.StoragePolicyName != "" { // Acquire a read lock to ensure multiple PVC requests can be processed simultaneously. cleanUpDummyVMLock.RLock() defer cleanUpDummyVMLock.RUnlock() - // Create a new background routine that will delete any dummy VM's that are left stale. // This routine will get executed for every 5 minutes and gets initiated only once in its entire lifetime. cleanUpRoutineInitLock.Lock() if !cleanUpRoutineInitialized { + glog.V(1).Infof("Starting a clean up routine to remove stale dummy VM's") go vs.cleanUpDummyVMs(DummyVMPrefixName) cleanUpRoutineInitialized = true } cleanUpRoutineInitLock.Unlock() - - // Check if the VM exists in kubernetes cluster folder. - // The kubernetes cluster folder - vs.cfg.Global.WorkingDir is where all the nodes in the kubernetes cluster are created. - dummyVMFullName := DummyVMPrefixName + "-" + volumeOptions.Name - vmRegex := vs.cfg.Global.WorkingDir + dummyVMFullName - dummyVM, err := f.VirtualMachine(ctx, vmRegex) + vmOptions, err = vs.setVMOptions(ctx, dc) if err != nil { - // 1. Create a dummy VM and return the VM reference. - dummyVM, err = vs.createDummyVM(ctx, dc, ds, dummyVMFullName) - if err != nil { - return "", err - } - } - - // 2. Reconfigure the VM to attach the disk with the VSAN policy configured. - vmDiskPath, err := vs.createVirtualDiskWithPolicy(ctx, dc, ds, dummyVM, volumeOptions) - fileAlreadyExist := false - if err != nil { - vmDiskPath = filepath.Clean(ds.Path(VolDir)) + "/" + volumeOptions.Name + ".vmdk" - errorMessage := fmt.Sprintf("Cannot complete the operation because the file or folder %s already exists", vmDiskPath) - if errorMessage == err.Error() { - //Skip error and continue to detach the disk as the disk was already created on the datastore. - fileAlreadyExist = true - glog.V(1).Infof("File: %v already exists", vmDiskPath) - } else { - glog.Errorf("Failed to attach the disk to VM: %q with err: %+v", dummyVMFullName, err) - return "", err - } - } - - dummyVMNodeName := vmNameToNodeName(dummyVMFullName) - // 3. Detach the disk from the dummy VM. - err = vs.DetachDisk(vmDiskPath, dummyVMNodeName) - if err != nil { - if DiskNotFoundErrMsg == err.Error() && fileAlreadyExist { - // Skip error if disk was already detached from the dummy VM but still present on the datastore. - glog.V(1).Infof("File: %v is already detached", vmDiskPath) - } else { - glog.Errorf("Failed to detach the disk: %q from VM: %q with err: %+v", vmDiskPath, dummyVMFullName, err) - return "", fmt.Errorf("Failed to create the volume: %q with err: %+v", volumeOptions.Name, err) - } - } - - // 4. Delete the dummy VM - err = deleteVM(ctx, dummyVM) - if err != nil { - return "", fmt.Errorf("Failed to destroy the vm: %q with err: %+v", dummyVMFullName, err) - } - destVolPath = vmDiskPath - } else { - // Create a virtual disk directly if no VSAN storage capabilities are specified by the user. - destVolPath, err = createVirtualDisk(ctx, vs.client, dc, ds, volumeOptions) - if err != nil { - return "", fmt.Errorf("Failed to create the virtual disk having name: %+q with err: %+v", destVolPath, err) + glog.Errorf("Failed to set VM options requires to create a vsphere volume. err: %+v", err) + return "", err } } - + if volumeOptions.StoragePolicyName != "" && volumeOptions.Datastore == "" { + datastore, err = getPbmCompatibleDatastore(ctx, dc.Client(), volumeOptions.StoragePolicyName, vmOptions.VMFolder) + if err != nil { + glog.Errorf("Failed to get pbm compatible datastore with storagePolicy: %s. err: %+v", volumeOptions.StoragePolicyName, err) + return "", err + } + } + ds, err := dc.GetDatastoreByName(ctx, datastore) + if err != nil { + return "", err + } + volumeOptions.Datastore = datastore + kubeVolsPath := filepath.Clean(ds.Path(VolDir)) + "/" + err = ds.CreateDirectory(ctx, kubeVolsPath, false) + if err != nil && err != vclib.ErrFileAlreadyExist { + glog.Errorf("Cannot create dir %#v. err %s", kubeVolsPath, err) + return "", err + } + volumePath = kubeVolsPath + volumeOptions.Name + ".vmdk" + disk := diskmanagers.VirtualDisk{ + DiskPath: volumePath, + VolumeOptions: volumeOptions, + VMOptions: vmOptions, + } + err = disk.Create(ctx, ds) + if err != nil { + glog.Errorf("Failed to create a vsphere volume with volumeOptions: %+v on datastore: %s. err: %+v", volumeOptions, datastore, err) + return "", err + } if filepath.Base(datastore) != datastore { - // If Datastore is within cluster, add cluster path to the destVolPath - destVolPath = strings.Replace(destVolPath, filepath.Base(datastore), datastore, 1) + // If datastore is within cluster, add cluster path to the volumePath + volumePath = strings.Replace(volumePath, filepath.Base(datastore), datastore, 1) } - glog.V(1).Infof("VM Disk path is %+q", destVolPath) - return destVolPath, nil + return volumePath, nil } requestTime := time.Now() volumePath, err = createVolumeInternal(volumeOptions) - recordCreateVolumeMetric(volumeOptions, requestTime, err) - if err != nil { - return "", err - } - return volumePath, nil + vclib.RecordCreateVolumeMetric(volumeOptions, requestTime, err) + return volumePath, err } // DeleteVolume deletes a volume given volume name. -// Also, deletes the folder where the volume resides. func (vs *VSphere) DeleteVolume(vmDiskPath string) error { + glog.V(1).Infof("Starting to delete vSphere volume with vmDiskPath: %s", vmDiskPath) deleteVolumeInternal := func(vmDiskPath string) error { // Create context ctx, cancel := context.WithCancel(context.Background()) defer cancel() - // Ensure client is logged in and session is valid - err := vSphereLogin(ctx, vs) + err := vs.conn.Connect(ctx) if err != nil { - glog.Errorf("Failed to login into vCenter - %v", err) return err } - - // Create a new finder - f := find.NewFinder(vs.client.Client, true) - - // Fetch and set data center - dc, err := f.Datacenter(ctx, vs.cfg.Global.Datacenter) - f.SetDatacenter(dc) - - // Create a virtual disk manager - virtualDiskManager := object.NewVirtualDiskManager(vs.client.Client) - - if filepath.Ext(vmDiskPath) != ".vmdk" { - vmDiskPath += ".vmdk" - } - - // Get the vmDisk Name - diskNameWithExt := path.Base(vmDiskPath) - diskName := strings.TrimSuffix(diskNameWithExt, filepath.Ext(diskNameWithExt)) - - // Search for the dummyVM if present and delete it. - dummyVMFullName := DummyVMPrefixName + "-" + diskName - vmRegex := vs.cfg.Global.WorkingDir + dummyVMFullName - dummyVM, err := f.VirtualMachine(ctx, vmRegex) - if err == nil { - err = deleteVM(ctx, dummyVM) - if err != nil { - return fmt.Errorf("Failed to destroy the vm: %q with err: %+v", dummyVMFullName, err) - } - } - - // Delete virtual disk - vmDiskPath = removeClusterFromVDiskPath(vmDiskPath) - requestTime := time.Now() - task, err := virtualDiskManager.DeleteVirtualDisk(ctx, vmDiskPath, dc) + dc, err := vclib.GetDatacenter(ctx, vs.conn, vs.cfg.Global.Datacenter) if err != nil { - recordvSphereMetric(api_deletevolume, requestTime, err) return err } - err = task.Wait(ctx) - recordvSphereMetric(api_deletevolume, requestTime, err) + ds, err := dc.GetDatastoreByName(ctx, vs.cfg.Global.Datastore) + if err != nil { + return err + } + disk := diskmanagers.VirtualDisk{ + DiskPath: vmDiskPath, + VolumeOptions: &vclib.VolumeOptions{}, + VMOptions: &vclib.VMOptions{}, + } + err = disk.Delete(ctx, ds) + if err != nil { + glog.Errorf("Failed to delete vsphere volume with vmDiskPath: %s. err: %+v", vmDiskPath, err) + } return err } requestTime := time.Now() err := deleteVolumeInternal(vmDiskPath) - recordvSphereMetric(operation_deletevolume, requestTime, err) + vclib.RecordvSphereMetric(vclib.OperationDeleteVolume, requestTime, err) return err } - -// NodeExists checks if the node with given nodeName exist. -// Returns false if VM doesn't exist or VM is in powerOff state. -func (vs *VSphere) NodeExists(nodeName k8stypes.NodeName) (bool, error) { - if nodeName == "" { - return false, nil - } - - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - _, mvm, err := vs.getVMandMO(ctx, nodeName, "summary") - if err != nil { - glog.Errorf("Failed to getVMandMO for NodeExists: err %v", err) - return false, err - } - - if mvm.Summary.Runtime.PowerState == ActivePowerState { - return true, nil - } - - if mvm.Summary.Config.Template == false { - glog.Warningf("VM %s, is not in %s state", nodeName, ActivePowerState) - } else { - glog.Warningf("VM %s, is a template", nodeName) - } - - return false, nil -} - -// A background routine which will be responsible for deleting stale dummy VM's. -func (vs *VSphere) cleanUpDummyVMs(dummyVMPrefix string) { - // Create context - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - for { - time.Sleep(CleanUpDummyVMRoutine_Interval * time.Minute) - // Ensure client is logged in and session is valid - err := vSphereLogin(ctx, vs) - if err != nil { - glog.V(4).Infof("[cleanUpDummyVMs] Unable to login to vSphere with err: %+v", err) - continue - } - - // Create a new finder - f := find.NewFinder(vs.client.Client, true) - - // Fetch and set data center - dc, err := f.Datacenter(ctx, vs.cfg.Global.Datacenter) - if err != nil { - glog.V(4).Infof("[cleanUpDummyVMs] Unable to fetch the datacenter: %q with err: %+v", vs.cfg.Global.Datacenter, err) - continue - } - f.SetDatacenter(dc) - - // Get the folder reference for global working directory where the dummy VM needs to be created. - vmFolder, err := f.Folder(ctx, strings.TrimSuffix(vs.cfg.Global.WorkingDir, "/")) - if err != nil { - glog.V(4).Infof("[cleanUpDummyVMs] Unable to get the kubernetes folder: %q reference with err: %+v", vs.cfg.Global.WorkingDir, err) - continue - } - - // A write lock is acquired to make sure the cleanUp routine doesn't delete any VM's created by ongoing PVC requests. - cleanUpDummyVMLock.Lock() - vmMoList, err := vs.GetVMsInsideFolder(ctx, vmFolder, []string{NameProperty}) - if err != nil { - glog.V(4).Infof("[cleanUpDummyVMs] Unable to get VM list in the kubernetes cluster: %q reference with err: %+v", vs.cfg.Global.WorkingDir, err) - cleanUpDummyVMLock.Unlock() - continue - } - var dummyVMRefList []*object.VirtualMachine - for _, vmMo := range vmMoList { - if strings.HasPrefix(vmMo.Name, dummyVMPrefix) { - dummyVMRefList = append(dummyVMRefList, object.NewVirtualMachine(vs.client.Client, vmMo.Reference())) - } - } - - for _, dummyVMRef := range dummyVMRefList { - err = deleteVM(ctx, dummyVMRef) - if err != nil { - glog.V(4).Infof("[cleanUpDummyVMs] Unable to delete dummy VM: %q with err: %+v", dummyVMRef.Name(), err) - continue - } - } - cleanUpDummyVMLock.Unlock() - } -} - -func (vs *VSphere) createDummyVM(ctx context.Context, datacenter *object.Datacenter, datastore *object.Datastore, vmName string) (*object.VirtualMachine, error) { - // Create a virtual machine config spec with 1 SCSI adapter. - virtualMachineConfigSpec := types.VirtualMachineConfigSpec{ - Name: vmName, - Files: &types.VirtualMachineFileInfo{ - VmPathName: "[" + datastore.Name() + "]", - }, - NumCPUs: 1, - MemoryMB: 4, - DeviceChange: []types.BaseVirtualDeviceConfigSpec{ - &types.VirtualDeviceConfigSpec{ - Operation: types.VirtualDeviceConfigSpecOperationAdd, - Device: &types.ParaVirtualSCSIController{ - VirtualSCSIController: types.VirtualSCSIController{ - SharedBus: types.VirtualSCSISharingNoSharing, - VirtualController: types.VirtualController{ - BusNumber: 0, - VirtualDevice: types.VirtualDevice{ - Key: 1000, - }, - }, - }, - }, - }, - }, - } - - // Get the resource pool for current node. This is where dummy VM will be created. - resourcePool, err := vs.getCurrentNodeResourcePool(ctx, datacenter) - if err != nil { - return nil, err - } - // Get the folder reference for global working directory where the dummy VM needs to be created. - f := find.NewFinder(vs.client.Client, true) - dc, err := f.Datacenter(ctx, vs.cfg.Global.Datacenter) - f.SetDatacenter(dc) - vmFolder, err := f.Folder(ctx, strings.TrimSuffix(vs.cfg.Global.WorkingDir, "/")) - if err != nil { - return nil, fmt.Errorf("Failed to get the folder reference for %q with err: %+v", vs.cfg.Global.WorkingDir, err) - } - task, err := vmFolder.CreateVM(ctx, virtualMachineConfigSpec, resourcePool, nil) - if err != nil { - return nil, err - } - - dummyVMTaskInfo, err := task.WaitForResult(ctx, nil) - if err != nil { - return nil, err - } - - vmRef := dummyVMTaskInfo.Result.(object.Reference) - dummyVM := object.NewVirtualMachine(vs.client.Client, vmRef.Reference()) - return dummyVM, nil -} - -func (vs *VSphere) getCurrentNodeResourcePool(ctx context.Context, datacenter *object.Datacenter) (*object.ResourcePool, error) { - // Create a new finder - f := find.NewFinder(vs.client.Client, true) - f.SetDatacenter(datacenter) - - vmRegex := vs.cfg.Global.WorkingDir + vs.localInstanceID - currentVM, err := f.VirtualMachine(ctx, vmRegex) - if err != nil { - return nil, err - } - - currentVMHost, err := currentVM.HostSystem(ctx) - if err != nil { - return nil, err - } - - // Get the resource pool for the current node. - // We create the dummy VM in the same resource pool as current node. - resourcePool, err := currentVMHost.ResourcePool(ctx) - if err != nil { - return nil, err - } - - return resourcePool, nil -} - -// Creates a virtual disk with the policy configured to the disk. -// A call to this function is made only when a user specifies VSAN storage capabilties in the storage class definition. -func (vs *VSphere) createVirtualDiskWithPolicy(ctx context.Context, datacenter *object.Datacenter, datastore *object.Datastore, virtualMachine *object.VirtualMachine, volumeOptions *VolumeOptions) (string, error) { - var diskFormat string - diskFormat = diskFormatValidType[volumeOptions.DiskFormat] - - vmDevices, err := virtualMachine.Device(ctx) - if err != nil { - return "", err - } - var diskControllerType = vs.cfg.Disk.SCSIControllerType - // find SCSI controller of particular type from VM devices - scsiControllersOfRequiredType := getSCSIControllersOfType(vmDevices, diskControllerType) - scsiController := scsiControllersOfRequiredType[0] - - kubeVolsPath := filepath.Clean(datastore.Path(VolDir)) + "/" - // Create a kubevols directory in the datastore if one doesn't exist. - err = makeDirectoryInDatastore(vs.client, datacenter, kubeVolsPath, false) - if err != nil && err != ErrFileAlreadyExist { - glog.Errorf("Cannot create dir %#v. err %s", kubeVolsPath, err) - return "", err - } - - glog.V(4).Infof("Created dir with path as %+q", kubeVolsPath) - - vmDiskPath := kubeVolsPath + volumeOptions.Name + ".vmdk" - disk := vmDevices.CreateDisk(scsiController, datastore.Reference(), vmDiskPath) - unitNumber, err := getNextUnitNumber(vmDevices, scsiController) - if err != nil { - glog.Errorf("cannot attach disk to VM, limit reached - %v.", err) - return "", err - } - *disk.UnitNumber = unitNumber - disk.CapacityInKB = int64(volumeOptions.CapacityKB) - - backing := disk.Backing.(*types.VirtualDiskFlatVer2BackingInfo) - backing.DiskMode = string(types.VirtualDiskModeIndependent_persistent) - - switch diskFormat { - case ThinDiskType: - backing.ThinProvisioned = types.NewBool(true) - case EagerZeroedThickDiskType: - backing.EagerlyScrub = types.NewBool(true) - default: - backing.ThinProvisioned = types.NewBool(false) - } - - // Reconfigure VM - virtualMachineConfigSpec := types.VirtualMachineConfigSpec{} - deviceConfigSpec := &types.VirtualDeviceConfigSpec{ - Device: disk, - Operation: types.VirtualDeviceConfigSpecOperationAdd, - FileOperation: types.VirtualDeviceConfigSpecFileOperationCreate, - } - - storageProfileSpec := &types.VirtualMachineDefinedProfileSpec{} - // Is PBM storage policy ID is present, set the storage spec profile ID, - // else, set raw the VSAN policy string. - if volumeOptions.StoragePolicyID != "" { - storageProfileSpec.ProfileId = volumeOptions.StoragePolicyID - } else if volumeOptions.VSANStorageProfileData != "" { - storageProfileSpec.ProfileId = "" - storageProfileSpec.ProfileData = &types.VirtualMachineProfileRawData{ - ExtensionKey: "com.vmware.vim.sps", - ObjectData: volumeOptions.VSANStorageProfileData, - } - } - - deviceConfigSpec.Profile = append(deviceConfigSpec.Profile, storageProfileSpec) - virtualMachineConfigSpec.DeviceChange = append(virtualMachineConfigSpec.DeviceChange, deviceConfigSpec) - task, err := virtualMachine.Reconfigure(ctx, virtualMachineConfigSpec) - if err != nil { - glog.Errorf("Failed to reconfigure the VM with the disk with err - %v.", err) - return "", err - } - - err = task.Wait(ctx) - if err != nil { - glog.Errorf("Failed to reconfigure the VM with the disk with err - %v.", err) - return "", err - } - - return vmDiskPath, nil -} - -// creating a scsi controller as there is none found. -func createAndAttachSCSIControllerToVM(ctx context.Context, vm *object.VirtualMachine, diskControllerType string) (types.BaseVirtualDevice, error) { - // Get VM device list - vmDevices, err := vm.Device(ctx) - if err != nil { - return nil, err - } - allSCSIControllers := getSCSIControllers(vmDevices) - if len(allSCSIControllers) >= SCSIControllerLimit { - // we reached the maximum number of controllers we can attach - return nil, fmt.Errorf("SCSI Controller Limit of %d has been reached, cannot create another SCSI controller", SCSIControllerLimit) - } - newSCSIController, err := vmDevices.CreateSCSIController(diskControllerType) - if err != nil { - k8runtime.HandleError(fmt.Errorf("error creating new SCSI controller: %v", err)) - return nil, err - } - configNewSCSIController := newSCSIController.(types.BaseVirtualSCSIController).GetVirtualSCSIController() - hotAndRemove := true - configNewSCSIController.HotAddRemove = &hotAndRemove - configNewSCSIController.SharedBus = types.VirtualSCSISharing(types.VirtualSCSISharingNoSharing) - - // add the scsi controller to virtual machine - err = vm.AddDevice(context.TODO(), newSCSIController) - if err != nil { - glog.V(1).Infof("cannot add SCSI controller to vm - %v", err) - // attempt clean up of scsi controller - if vmDevices, err := vm.Device(ctx); err == nil { - cleanUpController(ctx, newSCSIController, vmDevices, vm) - } - return nil, err - } - return newSCSIController, nil -} - -// Create a virtual disk. -func createVirtualDisk(ctx context.Context, c *govmomi.Client, dc *object.Datacenter, ds *object.Datastore, volumeOptions *VolumeOptions) (string, error) { - kubeVolsPath := filepath.Clean(ds.Path(VolDir)) + "/" - // Create a kubevols directory in the datastore if one doesn't exist. - err := makeDirectoryInDatastore(c, dc, kubeVolsPath, false) - if err != nil && err != ErrFileAlreadyExist { - glog.Errorf("Cannot create dir %#v. err %s", kubeVolsPath, err) - return "", err - } - - glog.V(4).Infof("Created dir with path as %+q", kubeVolsPath) - vmDiskPath := kubeVolsPath + volumeOptions.Name + ".vmdk" - - diskFormat := diskFormatValidType[volumeOptions.DiskFormat] - - // Create a virtual disk manager - virtualDiskManager := object.NewVirtualDiskManager(c.Client) - - // Create specification for new virtual disk - vmDiskSpec := &types.FileBackedVirtualDiskSpec{ - VirtualDiskSpec: types.VirtualDiskSpec{ - AdapterType: LSILogicControllerType, - DiskType: diskFormat, - }, - CapacityKb: int64(volumeOptions.CapacityKB), - } - - // Create virtual disk - requestTime := time.Now() - task, err := virtualDiskManager.CreateVirtualDisk(ctx, vmDiskPath, dc, vmDiskSpec) - if err != nil { - recordvSphereMetric(api_createvolume, requestTime, err) - return "", err - } - err = task.Wait(ctx) - recordvSphereMetric(api_createvolume, requestTime, err) - return vmDiskPath, err -} - -// Check if the provided datastore is VSAN -func checkIfDatastoreTypeIsVSAN(c *govmomi.Client, datastore *object.Datastore) (bool, error) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - pc := property.DefaultCollector(c.Client) - - // Convert datastores into list of references - var dsRefs []types.ManagedObjectReference - dsRefs = append(dsRefs, datastore.Reference()) - - // Retrieve summary property for the given datastore - var dsMorefs []mo.Datastore - err := pc.Retrieve(ctx, dsRefs, []string{"summary"}, &dsMorefs) - if err != nil { - return false, err - } - - for _, ds := range dsMorefs { - if ds.Summary.Type == VSANDatastoreType { - return true, nil - } - } - return false, nil -} - -// Creates a folder using the specified name. -// If the intermediate level folders do not exist, -// and the parameter createParents is true, -// all the non-existent folders are created. -func makeDirectoryInDatastore(c *govmomi.Client, dc *object.Datacenter, path string, createParents bool) error { - - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - fileManager := object.NewFileManager(c.Client) - err := fileManager.MakeDirectory(ctx, path, dc, createParents) - if err != nil { - if soap.IsSoapFault(err) { - soapFault := soap.ToSoapFault(err) - if _, ok := soapFault.VimFault().(types.FileAlreadyExists); ok { - return ErrFileAlreadyExist - } - } - } - - return err -} - -// Delete the VM. -func deleteVM(ctx context.Context, vm *object.VirtualMachine) error { - destroyTask, err := vm.Destroy(ctx) - if err != nil { - return err - } - return destroyTask.Wait(ctx) -} - -// Remove the cluster or folder path from the vDiskPath -// for vDiskPath [DatastoreCluster/sharedVmfs-0] kubevols/e2e-vmdk-1234.vmdk, return value is [sharedVmfs-0] kubevols/e2e-vmdk-1234.vmdk -// for vDiskPath [sharedVmfs-0] kubevols/e2e-vmdk-1234.vmdk, return value remains same [sharedVmfs-0] kubevols/e2e-vmdk-1234.vmdk - -func removeClusterFromVDiskPath(vDiskPath string) string { - datastore := regexp.MustCompile("\\[(.*?)\\]").FindStringSubmatch(vDiskPath)[1] - if filepath.Base(datastore) != datastore { - vDiskPath = strings.Replace(vDiskPath, datastore, filepath.Base(datastore), 1) - } - return vDiskPath -} diff --git a/pkg/cloudprovider/providers/vsphere/vsphere_metrics.go b/pkg/cloudprovider/providers/vsphere/vsphere_metrics.go deleted file mode 100644 index 81beb0b4a8d..00000000000 --- a/pkg/cloudprovider/providers/vsphere/vsphere_metrics.go +++ /dev/null @@ -1,127 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package vsphere - -import ( - "github.com/prometheus/client_golang/prometheus" - "time" -) - -const ( - api_createvolume = "CreateVolume" - api_deletevolume = "DeleteVolume" - api_attachvolume = "AttachVolume" - api_detachvolume = "DetachVolume" -) - -const ( - operation_deletevolume = "DeleteVolumeOperation" - operation_attachvolume = "AttachVolumeOperation" - operation_detachvolume = "DetachVolumeOperation" - operation_diskIsAttached = "DiskIsAttachedOperation" - operation_disksAreAttached = "DisksAreAttachedOperation" - operation_createvolume = "CreateVolumeOperation" - operation_createvolume_with_policy = "CreateVolumeWithPolicyOperation" - operation_createvolume_with_raw_vsan_policy = "CreateVolumeWithRawVSANPolicyOperation" -) - -// vsphereApiMetric is for recording latency of Single API Call. -var vsphereApiMetric = prometheus.NewHistogramVec( - prometheus.HistogramOpts{ - Name: "cloudprovider_vsphere_api_request_duration_seconds", - Help: "Latency of vsphere api call", - }, - []string{"request"}, -) - -var vsphereApiErrorMetric = prometheus.NewCounterVec( - prometheus.CounterOpts{ - Name: "cloudprovider_vsphere_api_request_errors", - Help: "vsphere Api errors", - }, - []string{"request"}, -) - -// vsphereOperationMetric is for recording latency of vSphere Operation which invokes multiple APIs to get the task done. -var vsphereOperationMetric = prometheus.NewHistogramVec( - prometheus.HistogramOpts{ - Name: "cloudprovider_vsphere_operation_duration_seconds", - Help: "Latency of vsphere operation call", - }, - []string{"operation"}, -) - -var vsphereOperationErrorMetric = prometheus.NewCounterVec( - prometheus.CounterOpts{ - Name: "cloudprovider_vsphere_operation_errors", - Help: "vsphere operation errors", - }, - []string{"operation"}, -) - -func registerMetrics() { - prometheus.MustRegister(vsphereApiMetric) - prometheus.MustRegister(vsphereApiErrorMetric) - prometheus.MustRegister(vsphereOperationMetric) - prometheus.MustRegister(vsphereOperationErrorMetric) -} - -func recordvSphereMetric(actionName string, requestTime time.Time, err error) { - switch actionName { - case api_createvolume, api_deletevolume, api_attachvolume, api_detachvolume: - recordvSphereAPIMetric(actionName, requestTime, err) - default: - recordvSphereOperationMetric(actionName, requestTime, err) - } -} - -func recordvSphereAPIMetric(actionName string, requestTime time.Time, err error) { - if err != nil { - vsphereApiErrorMetric.With(prometheus.Labels{"request": actionName}).Inc() - } else { - vsphereApiMetric.With(prometheus.Labels{"request": actionName}).Observe(calculateTimeTaken(requestTime)) - } -} - -func recordvSphereOperationMetric(actionName string, requestTime time.Time, err error) { - if err != nil { - vsphereOperationErrorMetric.With(prometheus.Labels{"operation": actionName}).Inc() - } else { - vsphereOperationMetric.With(prometheus.Labels{"operation": actionName}).Observe(calculateTimeTaken(requestTime)) - } -} - -func recordCreateVolumeMetric(volumeOptions *VolumeOptions, requestTime time.Time, err error) { - var actionName string - if volumeOptions.StoragePolicyName != "" { - actionName = operation_createvolume_with_policy - } else if volumeOptions.VSANStorageProfileData != "" { - actionName = operation_createvolume_with_raw_vsan_policy - } else { - actionName = operation_createvolume - } - recordvSphereMetric(actionName, requestTime, err) -} - -func calculateTimeTaken(requestBeginTime time.Time) (timeTaken float64) { - if !requestBeginTime.IsZero() { - timeTaken = time.Since(requestBeginTime).Seconds() - } else { - timeTaken = 0 - } - return timeTaken -} diff --git a/pkg/cloudprovider/providers/vsphere/vsphere_test.go b/pkg/cloudprovider/providers/vsphere/vsphere_test.go index 4dd44a6e7b2..b8b54e99aef 100644 --- a/pkg/cloudprovider/providers/vsphere/vsphere_test.go +++ b/pkg/cloudprovider/providers/vsphere/vsphere_test.go @@ -27,6 +27,7 @@ import ( "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/rand" "k8s.io/kubernetes/pkg/cloudprovider" + "k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib" ) func configFromEnv() (cfg VSphereConfig, ok bool) { @@ -125,11 +126,11 @@ func TestVSphereLogin(t *testing.T) { defer cancel() // Create vSphere client - err = vSphereLogin(ctx, vs) + err = vs.conn.Connect(ctx) if err != nil { - t.Errorf("Failed to create vSpere client: %s", err) + t.Errorf("Failed to connect to vSphere: %s", err) } - defer vs.client.Logout(ctx) + defer vs.conn.GoVmomiClient.Logout(ctx) } func TestZones(t *testing.T) { @@ -168,14 +169,14 @@ func TestInstances(t *testing.T) { t.Fatalf("CurrentNodeName() failed: %s", err) } - externalId, err := i.ExternalID(nodeName) + externalID, err := i.ExternalID(nodeName) if err != nil { t.Fatalf("Instances.ExternalID(%s) failed: %s", nodeName, err) } - t.Logf("Found ExternalID(%s) = %s\n", nodeName, externalId) + t.Logf("Found ExternalID(%s) = %s\n", nodeName, externalID) nonExistingVM := types.NodeName(rand.String(15)) - externalId, err = i.ExternalID(nonExistingVM) + externalID, err = i.ExternalID(nonExistingVM) if err == cloudprovider.InstanceNotFound { t.Logf("VM %s was not found as expected\n", nonExistingVM) } else if err == nil { @@ -184,13 +185,13 @@ func TestInstances(t *testing.T) { t.Fatalf("Instances.ExternalID did not fail as expected, err: %v", err) } - instanceId, err := i.InstanceID(nodeName) + instanceID, err := i.InstanceID(nodeName) if err != nil { t.Fatalf("Instances.InstanceID(%s) failed: %s", nodeName, err) } - t.Logf("Found InstanceID(%s) = %s\n", nodeName, instanceId) + t.Logf("Found InstanceID(%s) = %s\n", nodeName, instanceID) - instanceId, err = i.InstanceID(nonExistingVM) + instanceID, err = i.InstanceID(nonExistingVM) if err == cloudprovider.InstanceNotFound { t.Logf("VM %s was not found as expected\n", nonExistingVM) } else if err == nil { @@ -222,7 +223,7 @@ func TestVolumes(t *testing.T) { t.Fatalf("CurrentNodeName() failed: %s", err) } - volumeOptions := &VolumeOptions{ + volumeOptions := &vclib.VolumeOptions{ CapacityKB: 1 * 1024 * 1024, Tags: nil, Name: "kubernetes-test-volume-" + rand.String(10), @@ -233,7 +234,7 @@ func TestVolumes(t *testing.T) { t.Fatalf("Cannot create a new VMDK volume: %v", err) } - _, _, err = vs.AttachDisk(volPath, "", "") + _, err = vs.AttachDisk(volPath, "", "") if err != nil { t.Fatalf("Cannot attach volume(%s) to VM(%s): %v", volPath, nodeName, err) } @@ -249,36 +250,3 @@ func TestVolumes(t *testing.T) { // t.Fatalf("Cannot delete VMDK volume %s: %v", volPath, err) // } } - -func TestGetVMName(t *testing.T) { - cfg, ok := configFromEnv() - if !ok { - t.Skipf("No config found in environment") - } - - // Create vSphere configuration object - vs, err := newVSphere(cfg) - if err != nil { - t.Fatalf("Failed to construct/authenticate vSphere: %s", err) - } - - // Create context - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - // Create vSphere client - err = vSphereLogin(ctx, vs) - if err != nil { - t.Errorf("Failed to create vSpere client: %s", err) - } - defer vs.client.Logout(ctx) - - // Get VM name - vmName, err := getVMName(vs.client, &cfg) - if err != nil { - t.Fatalf("Failed to get VM name: %s", err) - } - if vmName != "vmname" { - t.Errorf("Expect VM name 'vmname', got: %s", vmName) - } -} diff --git a/pkg/cloudprovider/providers/vsphere/vsphere_util.go b/pkg/cloudprovider/providers/vsphere/vsphere_util.go index a5bd63b15c5..54e09cd7d56 100644 --- a/pkg/cloudprovider/providers/vsphere/vsphere_util.go +++ b/pkg/cloudprovider/providers/vsphere/vsphere_util.go @@ -18,21 +18,23 @@ package vsphere import ( "context" + "errors" + "io/ioutil" "os" "runtime" "strings" + "time" + + "github.com/golang/glog" + "github.com/vmware/govmomi" + "github.com/vmware/govmomi/object" + "github.com/vmware/govmomi/vim25" + "github.com/vmware/govmomi/vim25/mo" "fmt" - "github.com/vmware/govmomi" - "github.com/vmware/govmomi/find" - "github.com/vmware/govmomi/object" - "github.com/vmware/govmomi/pbm" - "github.com/vmware/govmomi/property" - "github.com/vmware/govmomi/vim25/mo" - "github.com/vmware/govmomi/vim25/types" - - pbmtypes "github.com/vmware/govmomi/pbm/types" + "k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib" + "k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib/diskmanagers" ) const ( @@ -42,15 +44,17 @@ const ( VirtualMachine = "VirtualMachine" ) -// Reads vSphere configuration from system environment and construct vSphere object +// GetVSphere reads vSphere configuration from system environment and construct vSphere object func GetVSphere() (*VSphere, error) { cfg := getVSphereConfig() - client, err := GetgovmomiClient(cfg) + vSphereConn := getVSphereConn(cfg) + client, err := GetgovmomiClient(vSphereConn) if err != nil { return nil, err } + vSphereConn.GoVmomiClient = client vs := &VSphere{ - client: client, + conn: vSphereConn, cfg: cfg, localInstanceID: "", } @@ -75,233 +79,217 @@ func getVSphereConfig() *VSphereConfig { return &cfg } -func GetgovmomiClient(cfg *VSphereConfig) (*govmomi.Client, error) { - if cfg == nil { - cfg = getVSphereConfig() +func getVSphereConn(cfg *VSphereConfig) *vclib.VSphereConnection { + vSphereConn := &vclib.VSphereConnection{ + Username: cfg.Global.User, + Password: cfg.Global.Password, + Hostname: cfg.Global.VCenterIP, + Insecure: cfg.Global.InsecureFlag, + RoundTripperCount: cfg.Global.RoundTripperCount, + Port: cfg.Global.VCenterPort, } - client, err := newClient(context.TODO(), cfg) + return vSphereConn +} + +// GetgovmomiClient gets the goVMOMI client for the vsphere connection object +func GetgovmomiClient(conn *vclib.VSphereConnection) (*govmomi.Client, error) { + if conn == nil { + cfg := getVSphereConfig() + conn = getVSphereConn(cfg) + } + client, err := conn.NewClient(context.TODO()) return client, err } -// Get placement compatibility result based on storage policy requirements. -func (vs *VSphere) GetPlacementCompatibilityResult(ctx context.Context, pbmClient *pbm.Client, storagePolicyID string) (pbm.PlacementCompatibilityResult, error) { - datastores, err := vs.getSharedDatastoresInK8SCluster(ctx) +// getvmUUID gets the BIOS UUID via the sys interface. This UUID is known by vsphere +func getvmUUID() (string, error) { + id, err := ioutil.ReadFile(UUIDPath) if err != nil { - return nil, err + return "", fmt.Errorf("error retrieving vm uuid: %s", err) } - var hubs []pbmtypes.PbmPlacementHub - for _, ds := range datastores { - hubs = append(hubs, pbmtypes.PbmPlacementHub{ - HubType: ds.Type, - HubId: ds.Value, - }) + uuidFromFile := string(id[:]) + //strip leading and trailing white space and new line char + uuid := strings.TrimSpace(uuidFromFile) + // check the uuid starts with "VMware-" + if !strings.HasPrefix(uuid, UUIDPrefix) { + return "", fmt.Errorf("Failed to match Prefix, UUID read from the file is %v", uuidFromFile) } - req := []pbmtypes.BasePbmPlacementRequirement{ - &pbmtypes.PbmPlacementCapabilityProfileRequirement{ - ProfileId: pbmtypes.PbmProfileId{ - UniqueId: storagePolicyID, - }, - }, + // Strip the prefix and while spaces and - + uuid = strings.Replace(uuid[len(UUIDPrefix):(len(uuid))], " ", "", -1) + uuid = strings.Replace(uuid, "-", "", -1) + if len(uuid) != 32 { + return "", fmt.Errorf("Length check failed, UUID read from the file is %v", uuidFromFile) } - res, err := pbmClient.CheckRequirements(ctx, hubs, nil, req) - if err != nil { - return nil, err - } - return res, nil -} - -// Verify if the user specified datastore is in the list of non-compatible datastores. -// If yes, return the non compatible datastore reference. -func (vs *VSphere) IsUserSpecifiedDatastoreNonCompatible(ctx context.Context, compatibilityResult pbm.PlacementCompatibilityResult, dsName string) (bool, *types.ManagedObjectReference) { - dsMoList := vs.GetNonCompatibleDatastoresMo(ctx, compatibilityResult) - for _, ds := range dsMoList { - if ds.Info.GetDatastoreInfo().Name == dsName { - dsMoRef := ds.Reference() - return true, &dsMoRef - } - } - return false, nil -} - -func GetNonCompatibleDatastoreFaultMsg(compatibilityResult pbm.PlacementCompatibilityResult, dsMoref types.ManagedObjectReference) string { - var faultMsg string - for _, res := range compatibilityResult { - if res.Hub.HubId == dsMoref.Value { - for _, err := range res.Error { - faultMsg = faultMsg + err.LocalizedMessage - } - } - } - return faultMsg -} - -// Get the best fit compatible datastore by free space. -func GetMostFreeDatastore(dsMo []mo.Datastore) mo.Datastore { - var curMax int64 - curMax = -1 - var index int - for i, ds := range dsMo { - dsFreeSpace := ds.Info.GetDatastoreInfo().FreeSpace - if dsFreeSpace > curMax { - curMax = dsFreeSpace - index = i - } - } - return dsMo[index] -} - -func (vs *VSphere) GetCompatibleDatastoresMo(ctx context.Context, compatibilityResult pbm.PlacementCompatibilityResult) ([]mo.Datastore, error) { - compatibleHubs := compatibilityResult.CompatibleDatastores() - // Return an error if there are no compatible datastores. - if len(compatibleHubs) < 1 { - return nil, fmt.Errorf("There are no compatible datastores that satisfy the storage policy requirements") - } - dsMoList, err := vs.getDatastoreMo(ctx, compatibleHubs) - if err != nil { - return nil, err - } - return dsMoList, nil -} - -func (vs *VSphere) GetNonCompatibleDatastoresMo(ctx context.Context, compatibilityResult pbm.PlacementCompatibilityResult) []mo.Datastore { - nonCompatibleHubs := compatibilityResult.NonCompatibleDatastores() - // Return an error if there are no compatible datastores. - if len(nonCompatibleHubs) < 1 { - return nil - } - dsMoList, err := vs.getDatastoreMo(ctx, nonCompatibleHubs) - if err != nil { - return nil - } - return dsMoList -} - -// Get the datastore managed objects for the place hubs using property collector. -func (vs *VSphere) getDatastoreMo(ctx context.Context, hubs []pbmtypes.PbmPlacementHub) ([]mo.Datastore, error) { - var dsMoRefs []types.ManagedObjectReference - for _, hub := range hubs { - dsMoRefs = append(dsMoRefs, types.ManagedObjectReference{ - Type: hub.HubType, - Value: hub.HubId, - }) - } - - pc := property.DefaultCollector(vs.client.Client) - var dsMoList []mo.Datastore - err := pc.Retrieve(ctx, dsMoRefs, []string{DatastoreInfoProperty}, &dsMoList) - if err != nil { - return nil, err - } - return dsMoList, nil + // need to add dashes, e.g. "564d395e-d807-e18a-cb25-b79f65eb2b9f" + uuid = fmt.Sprintf("%s-%s-%s-%s-%s", uuid[0:8], uuid[8:12], uuid[12:16], uuid[16:20], uuid[20:32]) + return uuid, nil } // Get all datastores accessible for the virtual machine object. -func (vs *VSphere) getSharedDatastoresInK8SCluster(ctx context.Context) ([]types.ManagedObjectReference, error) { - f := find.NewFinder(vs.client.Client, true) - dc, err := f.Datacenter(ctx, vs.cfg.Global.Datacenter) - f.SetDatacenter(dc) - vmFolder, err := f.Folder(ctx, strings.TrimSuffix(vs.cfg.Global.WorkingDir, "/")) +func getSharedDatastoresInK8SCluster(ctx context.Context, folder *vclib.Folder) ([]*vclib.Datastore, error) { + vmList, err := folder.GetVirtualMachines(ctx) if err != nil { + glog.Errorf("Failed to get virtual machines in the kubernetes cluster: %s, err: %+v", folder.InventoryPath, err) return nil, err } - vmMoList, err := vs.GetVMsInsideFolder(ctx, vmFolder, []string{NameProperty}) - if err != nil { - return nil, err + if vmList == nil || len(vmList) == 0 { + glog.Errorf("No virtual machines found in the kubernetes cluster: %s", folder.InventoryPath) + return nil, fmt.Errorf("No virtual machines found in the kubernetes cluster: %s", folder.InventoryPath) } index := 0 - var sharedDs []string - for _, vmMo := range vmMoList { - if !strings.HasPrefix(vmMo.Name, DummyVMPrefixName) { - accessibleDatastores, err := vs.getAllAccessibleDatastores(ctx, vmMo) + var sharedDatastores []*vclib.Datastore + for _, vm := range vmList { + vmName, err := vm.ObjectName(ctx) + if err != nil { + return nil, err + } + if !strings.HasPrefix(vmName, DummyVMPrefixName) { + accessibleDatastores, err := vm.GetAllAccessibleDatastores(ctx) if err != nil { return nil, err } if index == 0 { - sharedDs = accessibleDatastores + sharedDatastores = accessibleDatastores } else { - sharedDs = intersect(sharedDs, accessibleDatastores) - if len(sharedDs) == 0 { - return nil, fmt.Errorf("No shared datastores found in the Kubernetes cluster") + sharedDatastores = intersect(sharedDatastores, accessibleDatastores) + if len(sharedDatastores) == 0 { + return nil, fmt.Errorf("No shared datastores found in the Kubernetes cluster: %s", folder.InventoryPath) } } index++ } } - var sharedDSMorefs []types.ManagedObjectReference - for _, ds := range sharedDs { - sharedDSMorefs = append(sharedDSMorefs, types.ManagedObjectReference{ - Value: ds, - Type: "Datastore", - }) - } - return sharedDSMorefs, nil + return sharedDatastores, nil } -func intersect(list1 []string, list2 []string) []string { - var sharedList []string +func intersect(list1 []*vclib.Datastore, list2 []*vclib.Datastore) []*vclib.Datastore { + var sharedDs []*vclib.Datastore for _, val1 := range list1 { // Check if val1 is found in list2 for _, val2 := range list2 { - if val1 == val2 { - sharedList = append(sharedList, val1) + if val1.Reference().Value == val2.Reference().Value { + sharedDs = append(sharedDs, val1) break } } } - return sharedList -} - -// Get the VM list inside a folder. -func (vs *VSphere) GetVMsInsideFolder(ctx context.Context, vmFolder *object.Folder, properties []string) ([]mo.VirtualMachine, error) { - vmFolders, err := vmFolder.Children(ctx) - if err != nil { - return nil, err - } - - pc := property.DefaultCollector(vs.client.Client) - var vmRefs []types.ManagedObjectReference - var vmMoList []mo.VirtualMachine - for _, vmFolder := range vmFolders { - if vmFolder.Reference().Type == VirtualMachine { - vmRefs = append(vmRefs, vmFolder.Reference()) - } - } - err = pc.Retrieve(ctx, vmRefs, properties, &vmMoList) - if err != nil { - return nil, err - } - return vmMoList, nil + return sharedDs } // Get the datastores accessible for the virtual machine object. -func (vs *VSphere) getAllAccessibleDatastores(ctx context.Context, vmMo mo.VirtualMachine) ([]string, error) { - f := find.NewFinder(vs.client.Client, true) - dc, err := f.Datacenter(ctx, vs.cfg.Global.Datacenter) - if err != nil { - return nil, err +func getAllAccessibleDatastores(ctx context.Context, client *vim25.Client, vmMo mo.VirtualMachine) ([]string, error) { + host := vmMo.Summary.Runtime.Host + if host == nil { + return nil, errors.New("VM doesn't have a HostSystem") } - f.SetDatacenter(dc) - vmRegex := vs.cfg.Global.WorkingDir + vmMo.Name - vmObj, err := f.VirtualMachine(ctx, vmRegex) - if err != nil { - return nil, err - } - - host, err := vmObj.HostSystem(ctx) - if err != nil { - return nil, err - } - var hostSystemMo mo.HostSystem - s := object.NewSearchIndex(vs.client.Client) - err = s.Properties(ctx, host.Reference(), []string{DatastoreProperty}, &hostSystemMo) + s := object.NewSearchIndex(client) + err := s.Properties(ctx, host.Reference(), []string{DatastoreProperty}, &hostSystemMo) if err != nil { return nil, err } - var dsRefValues []string for _, dsRef := range hostSystemMo.Datastore { dsRefValues = append(dsRefValues, dsRef.Value) } return dsRefValues, nil } + +// getMostFreeDatastore gets the best fit compatible datastore by free space. +func getMostFreeDatastoreName(ctx context.Context, client *vim25.Client, dsObjList []*vclib.Datastore) (string, error) { + dsMoList, err := dsObjList[0].Datacenter.GetDatastoreMoList(ctx, dsObjList, []string{DatastoreInfoProperty}) + if err != nil { + return "", err + } + var curMax int64 + curMax = -1 + var index int + for i, dsMo := range dsMoList { + dsFreeSpace := dsMo.Info.GetDatastoreInfo().FreeSpace + if dsFreeSpace > curMax { + curMax = dsFreeSpace + index = i + } + } + return dsMoList[index].Info.GetDatastoreInfo().Name, nil +} + +func getPbmCompatibleDatastore(ctx context.Context, client *vim25.Client, storagePolicyName string, folder *vclib.Folder) (string, error) { + pbmClient, err := vclib.NewPbmClient(ctx, client) + if err != nil { + return "", err + } + storagePolicyID, err := pbmClient.ProfileIDByName(ctx, storagePolicyName) + if err != nil { + glog.Errorf("Failed to get Profile ID by name: %s. err: %+v", storagePolicyName, err) + return "", err + } + sharedDsList, err := getSharedDatastoresInK8SCluster(ctx, folder) + if err != nil { + glog.Errorf("Failed to get shared datastores from kubernetes cluster: %s. err: %+v", folder.InventoryPath, err) + return "", err + } + compatibleDatastores, _, err := pbmClient.GetCompatibleDatastores(ctx, storagePolicyID, sharedDsList) + if err != nil { + glog.Errorf("Failed to get compatible datastores from datastores : %+v with storagePolicy: %s. err: %+v", sharedDsList, storagePolicyID, err) + return "", err + } + datastore, err := getMostFreeDatastoreName(ctx, client, compatibleDatastores) + if err != nil { + glog.Errorf("Failed to get most free datastore from compatible datastores: %+v. err: %+v", compatibleDatastores, err) + return "", err + } + return datastore, err +} + +func (vs *VSphere) setVMOptions(ctx context.Context, dc *vclib.Datacenter) (*vclib.VMOptions, error) { + var vmOptions vclib.VMOptions + vm, err := dc.GetVMByPath(ctx, vs.cfg.Global.WorkingDir+"/"+vs.localInstanceID) + if err != nil { + return nil, err + } + resourcePool, err := vm.GetResourcePool(ctx) + if err != nil { + return nil, err + } + folder, err := dc.GetFolderByPath(ctx, vs.cfg.Global.WorkingDir) + if err != nil { + return nil, err + } + vmOptions.VMFolder = folder + vmOptions.VMResourcePool = resourcePool + return &vmOptions, nil +} + +// A background routine which will be responsible for deleting stale dummy VM's. +func (vs *VSphere) cleanUpDummyVMs(dummyVMPrefix string) { + // Create context + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + for { + time.Sleep(CleanUpDummyVMRoutineInterval * time.Minute) + // Ensure client is logged in and session is valid + err := vs.conn.Connect(ctx) + if err != nil { + glog.V(4).Infof("Failed to connect to VC with err: %+v. Retrying again...", err) + continue + } + dc, err := vclib.GetDatacenter(ctx, vs.conn, vs.cfg.Global.Datacenter) + if err != nil { + glog.V(4).Infof("Failed to get the datacenter: %s from VC. err: %+v", vs.cfg.Global.Datacenter, err) + continue + } + // Get the folder reference for global working directory where the dummy VM needs to be created. + vmFolder, err := dc.GetFolderByPath(ctx, vs.cfg.Global.WorkingDir) + if err != nil { + glog.V(4).Infof("Unable to get the kubernetes folder: %q reference. err: %+v", vs.cfg.Global.WorkingDir, err) + continue + } + // A write lock is acquired to make sure the cleanUp routine doesn't delete any VM's created by ongoing PVC requests. + defer cleanUpDummyVMLock.Lock() + err = diskmanagers.CleanUpDummyVMs(ctx, vmFolder, dc) + if err != nil { + glog.V(4).Infof("Unable to clean up dummy VM's in the kubernetes cluster: %q. err: %+v", vs.cfg.Global.WorkingDir, err) + } + } +} diff --git a/pkg/volume/vsphere_volume/BUILD b/pkg/volume/vsphere_volume/BUILD index 452fe1ae8bb..426a7efeaf2 100644 --- a/pkg/volume/vsphere_volume/BUILD +++ b/pkg/volume/vsphere_volume/BUILD @@ -19,6 +19,7 @@ go_library( deps = [ "//pkg/cloudprovider:go_default_library", "//pkg/cloudprovider/providers/vsphere:go_default_library", + "//pkg/cloudprovider/providers/vsphere/vclib:go_default_library", "//pkg/util/keymutex:go_default_library", "//pkg/util/mount:go_default_library", "//pkg/util/strings:go_default_library", @@ -43,7 +44,7 @@ go_test( library = ":go_default_library", tags = ["automanaged"], deps = [ - "//pkg/cloudprovider/providers/vsphere:go_default_library", + "//pkg/cloudprovider/providers/vsphere/vclib:go_default_library", "//pkg/util/mount:go_default_library", "//pkg/volume:go_default_library", "//pkg/volume/testing:go_default_library", diff --git a/pkg/volume/vsphere_volume/attacher.go b/pkg/volume/vsphere_volume/attacher.go index 9155d84adaf..f7a58c80d4e 100644 --- a/pkg/volume/vsphere_volume/attacher.go +++ b/pkg/volume/vsphere_volume/attacher.go @@ -75,7 +75,7 @@ func (attacher *vsphereVMDKAttacher) Attach(spec *volume.Spec, nodeName types.No // vsphereCloud.AttachDisk checks if disk is already attached to host and // succeeds in that case, so no need to do that separately. - _, diskUUID, err := attacher.vsphereVolumes.AttachDisk(volumeSource.VolumePath, volumeSource.StoragePolicyID, nodeName) + diskUUID, err := attacher.vsphereVolumes.AttachDisk(volumeSource.VolumePath, volumeSource.StoragePolicyID, nodeName) if err != nil { glog.Errorf("Error attaching volume %q to node %q: %+v", volumeSource.VolumePath, nodeName, err) return "", err diff --git a/pkg/volume/vsphere_volume/attacher_test.go b/pkg/volume/vsphere_volume/attacher_test.go index a6f07625334..dbd007dfb06 100644 --- a/pkg/volume/vsphere_volume/attacher_test.go +++ b/pkg/volume/vsphere_volume/attacher_test.go @@ -21,7 +21,7 @@ import ( "testing" "k8s.io/api/core/v1" - "k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere" + "k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib" "k8s.io/kubernetes/pkg/volume" volumetest "k8s.io/kubernetes/pkg/volume/testing" @@ -233,29 +233,29 @@ type diskIsAttachedCall struct { ret error } -func (testcase *testcase) AttachDisk(diskName string, storagePolicyName string, nodeName types.NodeName) (string, string, error) { +func (testcase *testcase) AttachDisk(diskName string, storagePolicyName string, nodeName types.NodeName) (string, error) { expected := &testcase.attach if expected.diskName == "" && expected.nodeName == "" { // testcase.attach looks uninitialized, test did not expect to call // AttachDisk testcase.t.Errorf("Unexpected AttachDisk call!") - return "", "", errors.New("Unexpected AttachDisk call!") + return "", errors.New("Unexpected AttachDisk call!") } if expected.diskName != diskName { testcase.t.Errorf("Unexpected AttachDisk call: expected diskName %s, got %s", expected.diskName, diskName) - return "", "", errors.New("Unexpected AttachDisk call: wrong diskName") + return "", errors.New("Unexpected AttachDisk call: wrong diskName") } if expected.nodeName != nodeName { testcase.t.Errorf("Unexpected AttachDisk call: expected nodeName %s, got %s", expected.nodeName, nodeName) - return "", "", errors.New("Unexpected AttachDisk call: wrong nodeName") + return "", errors.New("Unexpected AttachDisk call: wrong nodeName") } glog.V(4).Infof("AttachDisk call: %s, %s, returning %q, %v", diskName, nodeName, expected.retDeviceUUID, expected.ret) - return "", expected.retDeviceUUID, expected.ret + return expected.retDeviceUUID, expected.ret } func (testcase *testcase) DetachDisk(diskName string, nodeName types.NodeName) error { @@ -312,7 +312,7 @@ func (testcase *testcase) DisksAreAttached(diskNames []string, nodeName types.No return nil, errors.New("Not implemented") } -func (testcase *testcase) CreateVolume(volumeOptions *vsphere.VolumeOptions) (volumePath string, err error) { +func (testcase *testcase) CreateVolume(volumeOptions *vclib.VolumeOptions) (volumePath string, err error) { return "", errors.New("Not implemented") } diff --git a/pkg/volume/vsphere_volume/vsphere_volume_util.go b/pkg/volume/vsphere_volume/vsphere_volume_util.go index 4a43402a61c..62ce1f7620b 100644 --- a/pkg/volume/vsphere_volume/vsphere_volume_util.go +++ b/pkg/volume/vsphere_volume/vsphere_volume_util.go @@ -27,6 +27,7 @@ import ( "k8s.io/api/core/v1" "k8s.io/kubernetes/pkg/cloudprovider" "k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere" + "k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib" "k8s.io/kubernetes/pkg/volume" volumeutil "k8s.io/kubernetes/pkg/volume/util" ) @@ -94,7 +95,7 @@ func (util *VsphereDiskUtil) CreateVolume(v *vsphereVolumeProvisioner) (volSpec // vSphere works with kilobytes, convert to KiB with rounding up volSizeKB := int(volume.RoundUpSize(volSizeBytes, 1024)) name := volume.GenerateVolumeName(v.options.ClusterName, v.options.PVName, 255) - volumeOptions := &vsphere.VolumeOptions{ + volumeOptions := &vclib.VolumeOptions{ CapacityKB: volSizeKB, Tags: *v.options.CloudTags, Name: name, @@ -129,7 +130,7 @@ func (util *VsphereDiskUtil) CreateVolume(v *vsphereVolumeProvisioner) (volSpec if volumeOptions.VSANStorageProfileData != "" { if volumeOptions.StoragePolicyName != "" { - return nil, fmt.Errorf("Cannot specify storage policy capabilities along with storage policy name. Please specify only one.") + return nil, fmt.Errorf("Cannot specify storage policy capabilities along with storage policy name. Please specify only one") } volumeOptions.VSANStorageProfileData = "(" + volumeOptions.VSANStorageProfileData + ")" } @@ -141,7 +142,6 @@ func (util *VsphereDiskUtil) CreateVolume(v *vsphereVolumeProvisioner) (volSpec vmDiskPath, err := cloud.CreateVolume(volumeOptions) if err != nil { - glog.V(2).Infof("Error creating vsphere volume: %v", err) return nil, err } volSpec = &VolumeSpec{