mirror of
https://github.com/k3s-io/kubernetes.git
synced 2026-01-04 15:05:20 +00:00
Adding vSphere Volume support for vSphere Cloud Provider
This commit is contained in:
@@ -41,6 +41,11 @@ import (
|
||||
|
||||
const ProviderName = "vsphere"
|
||||
const ActivePowerState = "poweredOn"
|
||||
const DefaultDiskController = "scsi"
|
||||
const DefaultSCSIControllerType = "lsilogic"
|
||||
|
||||
var ErrNoDiskUUIDFound = errors.New("no disk UUID found")
|
||||
var ErrNoDevicesFound = errors.New("No devices found")
|
||||
|
||||
// VSphere is an implementation of cloud provider Interface for VSphere.
|
||||
type VSphere struct {
|
||||
@@ -63,6 +68,10 @@ type VSphereConfig struct {
|
||||
Network struct {
|
||||
PublicNetwork string `gcfg:"public-network"`
|
||||
}
|
||||
Disk struct {
|
||||
DiskController string `dcfg:"diskcontroller"`
|
||||
SCSIControllerType string `dcfg:"scsicontrollertype"`
|
||||
}
|
||||
}
|
||||
|
||||
func readConfig(config io.Reader) (VSphereConfig, error) {
|
||||
@@ -136,6 +145,12 @@ func newVSphere(cfg VSphereConfig) (*VSphere, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if cfg.Disk.DiskController == "" {
|
||||
cfg.Disk.DiskController = DefaultDiskController
|
||||
}
|
||||
if cfg.Disk.SCSIControllerType == "" {
|
||||
cfg.Disk.SCSIControllerType = DefaultSCSIControllerType
|
||||
}
|
||||
vs := VSphere{
|
||||
cfg: &cfg,
|
||||
localInstanceID: id,
|
||||
@@ -429,3 +444,292 @@ func (vs *VSphere) Routes() (cloudprovider.Routes, bool) {
|
||||
func (vs *VSphere) ScrubDNS(nameservers, searches []string) (nsOut, srchOut []string) {
|
||||
return nameservers, searches
|
||||
}
|
||||
|
||||
func getVirtualMachineDevices(cfg *VSphereConfig, ctx context.Context, c *govmomi.Client, name string) (*object.VirtualMachine, object.VirtualDeviceList, *object.Datastore, error) {
|
||||
|
||||
// Create a new finder
|
||||
f := find.NewFinder(c.Client, true)
|
||||
|
||||
// Fetch and set data center
|
||||
dc, err := f.Datacenter(ctx, cfg.Global.Datacenter)
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
f.SetDatacenter(dc)
|
||||
|
||||
// Find datastores
|
||||
ds, err := f.Datastore(ctx, cfg.Global.Datastore)
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
|
||||
vm, err := f.VirtualMachine(ctx, name)
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
|
||||
// Get devices from VM
|
||||
vmDevices, err := vm.Device(ctx)
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
return vm, vmDevices, ds, nil
|
||||
}
|
||||
|
||||
//cleaning up the controller
|
||||
func cleanUpController(newSCSIController types.BaseVirtualDevice, vmDevices object.VirtualDeviceList, vm *object.VirtualMachine, ctx context.Context) error {
|
||||
ctls := vmDevices.SelectByType(newSCSIController)
|
||||
if len(ctls) < 1 {
|
||||
return ErrNoDevicesFound
|
||||
}
|
||||
newScsi := ctls[len(ctls)-1]
|
||||
err := vm.RemoveDevice(ctx, true, newScsi)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Attaches given virtual disk volume to the compute running kubelet.
|
||||
func (vs *VSphere) AttachDisk(vmDiskPath string, nodeName string) (diskID string, diskUUID string, err error) {
|
||||
// Create context
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
// Create vSphere client
|
||||
c, err := vsphereLogin(vs.cfg, ctx)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
defer c.Logout(ctx)
|
||||
|
||||
// Find virtual machine to attach disk to
|
||||
var vSphereInstance string
|
||||
if nodeName == "" {
|
||||
vSphereInstance = vs.localInstanceID
|
||||
} else {
|
||||
vSphereInstance = nodeName
|
||||
}
|
||||
|
||||
// Get VM device list
|
||||
vm, vmDevices, ds, err := getVirtualMachineDevices(vs.cfg, ctx, c, vSphereInstance)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
|
||||
// find SCSI controller to attach the disk
|
||||
var newSCSICreated bool = false
|
||||
var newSCSIController types.BaseVirtualDevice
|
||||
diskController, err := vmDevices.FindDiskController(vs.cfg.Disk.DiskController)
|
||||
if err != nil {
|
||||
// create a scsi controller if there is not one
|
||||
newSCSIController, err := vmDevices.CreateSCSIController(vs.cfg.Disk.SCSIControllerType)
|
||||
if err != nil {
|
||||
glog.V(3).Infof("Cannot create new SCSI controller - %v", err)
|
||||
return "", "", err
|
||||
}
|
||||
configNewSCSIController := newSCSIController.(types.BaseVirtualSCSIController).GetVirtualSCSIController()
|
||||
hotAndRemove := true
|
||||
configNewSCSIController.HotAddRemove = &hotAndRemove
|
||||
configNewSCSIController.SharedBus = types.VirtualSCSISharing(types.VirtualSCSISharingNoSharing)
|
||||
|
||||
// add the scsi controller to virtual machine
|
||||
err = vm.AddDevice(context.TODO(), newSCSIController)
|
||||
if err != nil {
|
||||
glog.V(3).Infof("Cannot add SCSI controller to vm - %v", err)
|
||||
// attempt clean up of scsi controller
|
||||
if vmDevices, err := vm.Device(ctx); err == nil {
|
||||
cleanUpController(newSCSIController, vmDevices, vm, ctx)
|
||||
}
|
||||
return "", "", err
|
||||
}
|
||||
|
||||
// verify scsi controller in virtual machine
|
||||
vmDevices, err = vm.Device(ctx)
|
||||
if err != nil {
|
||||
//cannot cleanup if there is no device list
|
||||
return "", "", err
|
||||
}
|
||||
if diskController, err = vmDevices.FindDiskController(vs.cfg.Disk.DiskController); err != nil {
|
||||
glog.V(3).Infof("Cannot find disk controller - %v", err)
|
||||
// attempt clean up of scsi controller
|
||||
cleanUpController(newSCSIController, vmDevices, vm, ctx)
|
||||
return "", "", err
|
||||
}
|
||||
newSCSICreated = true
|
||||
}
|
||||
|
||||
disk := vmDevices.CreateDisk(diskController, ds.Reference(), vmDiskPath)
|
||||
backing := disk.Backing.(*types.VirtualDiskFlatVer2BackingInfo)
|
||||
backing.DiskMode = string(types.VirtualDiskModeIndependent_persistent)
|
||||
disk = vmDevices.ChildDisk(disk)
|
||||
|
||||
// Attach disk to the VM
|
||||
err = vm.AddDevice(ctx, disk)
|
||||
if err != nil {
|
||||
glog.V(3).Infof("Cannot add disk to the vm - %v", err)
|
||||
if newSCSICreated {
|
||||
cleanUpController(newSCSIController, vmDevices, vm, ctx)
|
||||
}
|
||||
return "", "", err
|
||||
}
|
||||
|
||||
vmDevices, err = vm.Device(ctx)
|
||||
if err != nil {
|
||||
if newSCSICreated {
|
||||
cleanUpController(newSCSIController, vmDevices, vm, ctx)
|
||||
}
|
||||
return "", "", err
|
||||
}
|
||||
devices := vmDevices.SelectByType(disk)
|
||||
if len(devices) < 1 {
|
||||
if newSCSICreated {
|
||||
cleanUpController(newSCSIController, vmDevices, vm, ctx)
|
||||
}
|
||||
return "", "", ErrNoDevicesFound
|
||||
}
|
||||
|
||||
// get new disk id
|
||||
newDevice := devices[len(devices)-1]
|
||||
deviceName := devices.Name(newDevice)
|
||||
|
||||
// get device uuid
|
||||
diskUUID, err = getVirtualDiskUUID(newDevice)
|
||||
if err != nil {
|
||||
if newSCSICreated {
|
||||
cleanUpController(newSCSIController, vmDevices, vm, ctx)
|
||||
}
|
||||
vs.DetachDisk(deviceName, vSphereInstance)
|
||||
return "", "", err
|
||||
}
|
||||
|
||||
return deviceName, diskUUID, nil
|
||||
}
|
||||
|
||||
func getVirtualDiskUUID(newDevice types.BaseVirtualDevice) (string, error) {
|
||||
vd := newDevice.GetVirtualDevice()
|
||||
|
||||
if b, ok := vd.Backing.(*types.VirtualDiskFlatVer2BackingInfo); ok {
|
||||
uuidWithNoHypens := strings.Replace(b.Uuid, "-", "", -1)
|
||||
return uuidWithNoHypens, nil
|
||||
}
|
||||
return "", ErrNoDiskUUIDFound
|
||||
}
|
||||
|
||||
// Detaches given virtual disk volume from the compute running kubelet.
|
||||
func (vs *VSphere) DetachDisk(diskID string, nodeName string) error {
|
||||
// Create context
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
// Create vSphere client
|
||||
c, err := vsphereLogin(vs.cfg, ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer c.Logout(ctx)
|
||||
|
||||
// Find VM to detach disk from
|
||||
var vSphereInstance string
|
||||
if nodeName == "" {
|
||||
vSphereInstance = vs.localInstanceID
|
||||
} else {
|
||||
vSphereInstance = nodeName
|
||||
}
|
||||
|
||||
vm, vmDevices, _, err := getVirtualMachineDevices(vs.cfg, ctx, c, vSphereInstance)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Remove disk from VM
|
||||
device := vmDevices.Find(diskID)
|
||||
if device == nil {
|
||||
return fmt.Errorf("device '%s' not found", diskID)
|
||||
}
|
||||
|
||||
err = vm.RemoveDevice(ctx, false, device)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Create a volume of given size (in KiB).
|
||||
func (vs *VSphere) CreateVolume(name string, size int, tags *map[string]string) (volumePath string, err error) {
|
||||
// Create context
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
// Create vSphere client
|
||||
c, err := vsphereLogin(vs.cfg, ctx)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
defer c.Logout(ctx)
|
||||
|
||||
// Create a new finder
|
||||
f := find.NewFinder(c.Client, true)
|
||||
|
||||
// Fetch and set data center
|
||||
dc, err := f.Datacenter(ctx, vs.cfg.Global.Datacenter)
|
||||
f.SetDatacenter(dc)
|
||||
|
||||
// Create a virtual disk manager
|
||||
vmDiskPath := "[" + vs.cfg.Global.Datastore + "] " + name + ".vmdk"
|
||||
virtualDiskManager := object.NewVirtualDiskManager(c.Client)
|
||||
|
||||
// Create specification for new virtual disk
|
||||
vmDiskSpec := &types.FileBackedVirtualDiskSpec{
|
||||
VirtualDiskSpec: types.VirtualDiskSpec{
|
||||
AdapterType: (*tags)["adapterType"],
|
||||
DiskType: (*tags)["diskType"],
|
||||
},
|
||||
CapacityKb: int64(size),
|
||||
}
|
||||
|
||||
// Create virtual disk
|
||||
task, err := virtualDiskManager.CreateVirtualDisk(ctx, vmDiskPath, dc, vmDiskSpec)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
err = task.Wait(ctx)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return vmDiskPath, nil
|
||||
}
|
||||
|
||||
// Deletes a volume given volume name.
|
||||
func (vs *VSphere) DeleteVolume(vmDiskPath string) error {
|
||||
// Create context
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
// Create vSphere client
|
||||
c, err := vsphereLogin(vs.cfg, ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer c.Logout(ctx)
|
||||
|
||||
// Create a new finder
|
||||
f := find.NewFinder(c.Client, true)
|
||||
|
||||
// Fetch and set data center
|
||||
dc, err := f.Datacenter(ctx, vs.cfg.Global.Datacenter)
|
||||
f.SetDatacenter(dc)
|
||||
|
||||
// Create a virtual disk manager
|
||||
virtualDiskManager := object.NewVirtualDiskManager(c.Client)
|
||||
|
||||
// Delete virtual disk
|
||||
task, err := virtualDiskManager.DeleteVirtualDisk(ctx, vmDiskPath, dc)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return task.Wait(ctx)
|
||||
}
|
||||
|
||||
@@ -24,6 +24,7 @@ import (
|
||||
"testing"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
"k8s.io/kubernetes/pkg/util/rand"
|
||||
)
|
||||
|
||||
func configFromEnv() (cfg VSphereConfig, ok bool) {
|
||||
@@ -35,6 +36,7 @@ func configFromEnv() (cfg VSphereConfig, ok bool) {
|
||||
cfg.Global.Password = os.Getenv("VSPHERE_PASSWORD")
|
||||
cfg.Global.Datacenter = os.Getenv("VSPHERE_DATACENTER")
|
||||
cfg.Network.PublicNetwork = os.Getenv("VSPHERE_PUBLIC_NETWORK")
|
||||
cfg.Global.Datastore = os.Getenv("VSPHERE_DATASTORE")
|
||||
if os.Getenv("VSPHERE_INSECURE") != "" {
|
||||
InsecureFlag, err = strconv.ParseBool(os.Getenv("VSPHERE_INSECURE"))
|
||||
} else {
|
||||
@@ -187,3 +189,54 @@ func TestInstances(t *testing.T) {
|
||||
}
|
||||
t.Logf("Found NodeAddresses(%s) = %s\n", srvs[0], addrs)
|
||||
}
|
||||
|
||||
func TestVolumes(t *testing.T) {
|
||||
cfg, ok := configFromEnv()
|
||||
if !ok {
|
||||
t.Skipf("No config found in environment")
|
||||
}
|
||||
|
||||
vs, err := newVSphere(cfg)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to construct/authenticate vSphere: %s", err)
|
||||
}
|
||||
|
||||
i, ok := vs.Instances()
|
||||
if !ok {
|
||||
t.Fatalf("Instances() returned false")
|
||||
}
|
||||
|
||||
srvs, err := i.List("*")
|
||||
if err != nil {
|
||||
t.Fatalf("Instances.List() failed: %s", err)
|
||||
}
|
||||
if len(srvs) == 0 {
|
||||
t.Fatalf("Instances.List() returned zero servers")
|
||||
}
|
||||
|
||||
tags := map[string]string{
|
||||
"adapterType": "lsiLogic",
|
||||
"diskType": "thin",
|
||||
}
|
||||
|
||||
volPath, err := vs.CreateVolume("kubernetes-test-volume-"+rand.String(10), 1*1024*1024, &tags)
|
||||
if err != nil {
|
||||
t.Fatalf("Cannot create a new VMDK volume: %v", err)
|
||||
}
|
||||
|
||||
diskID, _, err := vs.AttachDisk(volPath, "")
|
||||
if err != nil {
|
||||
t.Fatalf("Cannot attach volume(%s) to VM(%s): %v", volPath, srvs[0], err)
|
||||
}
|
||||
|
||||
err = vs.DetachDisk(diskID, "")
|
||||
if err != nil {
|
||||
t.Fatalf("Cannot detach disk(%s) from VM(%s): %v", diskID, srvs[0], err)
|
||||
}
|
||||
|
||||
// todo: Deleting a volume after detach currently not working through API or UI (vSphere)
|
||||
// err = vs.DeleteVolume(volPath)
|
||||
// if err != nil {
|
||||
// t.Fatalf("Cannot delete VMDK volume %s: %v", volPath, err)
|
||||
// }
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user