removing production code usage from e2e tests code

This commit is contained in:
Divyen Patel 2018-02-07 19:41:23 -08:00
parent 66ccfcb4c7
commit c0490fa623
22 changed files with 606 additions and 304 deletions

View File

@ -503,24 +503,21 @@ var _ = utils.SIGDescribe("Volumes", func() {
Describe("vsphere [Feature:Volumes]", func() { Describe("vsphere [Feature:Volumes]", func() {
It("should be mountable", func() { It("should be mountable", func() {
framework.SkipUnlessProviderIs("vsphere") framework.SkipUnlessProviderIs("vsphere")
vspheretest.Bootstrap(f)
nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
Expect(nodeList.Items).NotTo(BeEmpty(), "Unable to find ready and schedulable Node")
nodeInfo := vspheretest.TestContext.NodeMapper.GetNodeInfo(nodeList.Items[0].Name)
var volumePath string var volumePath string
config := framework.VolumeTestConfig{ config := framework.VolumeTestConfig{
Namespace: namespace.Name, Namespace: namespace.Name,
Prefix: "vsphere", Prefix: "vsphere",
} }
By("creating a test vsphere volume") volumePath, err := nodeInfo.VSphere.CreateVolume(&vspheretest.VolumeOptions{}, nodeInfo.DataCenterRef)
c, err := framework.LoadClientset()
if err != nil {
return
}
vsp, err := vspheretest.GetVSphere(c)
Expect(err).NotTo(HaveOccurred())
volumePath, err = vspheretest.CreateVSphereVolume(vsp, nil)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
defer func() { defer func() {
vsp.DeleteVolume(volumePath) nodeInfo.VSphere.DeleteVolume(volumePath, nodeInfo.DataCenterRef)
}() }()
defer func() { defer func() {

View File

@ -37,8 +37,6 @@ go_library(
], ],
importpath = "k8s.io/kubernetes/test/e2e/storage/vsphere", importpath = "k8s.io/kubernetes/test/e2e/storage/vsphere",
deps = [ deps = [
"//pkg/cloudprovider/providers/vsphere:go_default_library",
"//pkg/cloudprovider/providers/vsphere/vclib:go_default_library",
"//pkg/volume/util/volumehelper:go_default_library", "//pkg/volume/util/volumehelper:go_default_library",
"//test/e2e/framework:go_default_library", "//test/e2e/framework:go_default_library",
"//test/e2e/storage/utils:go_default_library", "//test/e2e/storage/utils:go_default_library",
@ -51,6 +49,7 @@ go_library(
"//vendor/github.com/vmware/govmomi/session:go_default_library", "//vendor/github.com/vmware/govmomi/session:go_default_library",
"//vendor/github.com/vmware/govmomi/vim25:go_default_library", "//vendor/github.com/vmware/govmomi/vim25:go_default_library",
"//vendor/github.com/vmware/govmomi/vim25/mo:go_default_library", "//vendor/github.com/vmware/govmomi/vim25/mo:go_default_library",
"//vendor/github.com/vmware/govmomi/vim25/soap:go_default_library",
"//vendor/github.com/vmware/govmomi/vim25/types:go_default_library", "//vendor/github.com/vmware/govmomi/vim25/types:go_default_library",
"//vendor/golang.org/x/net/context:go_default_library", "//vendor/golang.org/x/net/context:go_default_library",
"//vendor/gopkg.in/gcfg.v1:go_default_library", "//vendor/gopkg.in/gcfg.v1:go_default_library",
@ -61,7 +60,6 @@ go_library(
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library", "//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/uuid:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//vendor/k8s.io/client-go/kubernetes:go_default_library", "//vendor/k8s.io/client-go/kubernetes:go_default_library",

View File

@ -42,6 +42,7 @@ type Config struct {
Datacenters string Datacenters string
RoundTripperCount uint RoundTripperCount uint
DefaultDatastore string DefaultDatastore string
Folder string
} }
// ConfigFile represents the content of vsphere.conf file. // ConfigFile represents the content of vsphere.conf file.
@ -166,6 +167,7 @@ func populateInstanceMap(cfg *ConfigFile) (map[string]*VSphere, error) {
} }
vcConfig.DefaultDatastore = cfg.Workspace.DefaultDatastore vcConfig.DefaultDatastore = cfg.Workspace.DefaultDatastore
vcConfig.Folder = cfg.Workspace.Folder
vsphereIns := VSphere{ vsphereIns := VSphere{
Config: vcConfig, Config: vcConfig,

View File

@ -90,9 +90,8 @@ func (nm *NodeMapper) GenerateNodeMap(vSphereInstances map[string]*VSphere, node
for _, node := range nodeList.Items { for _, node := range nodeList.Items {
n := node n := node
go func() { go func() {
nodeUUID := n.Status.NodeInfo.SystemUUID nodeUUID := getUUIDFromProviderID(n.Spec.ProviderID)
framework.Logf("Searching for node with UUID: %s", nodeUUID) framework.Logf("Searching for node with UUID: %s", nodeUUID)
for _, res := range queueChannel { for _, res := range queueChannel {
ctx, cancel := context.WithCancel(context.Background()) ctx, cancel := context.WithCancel(context.Background())
@ -107,7 +106,7 @@ func (nm *NodeMapper) GenerateNodeMap(vSphereInstances map[string]*VSphere, node
framework.Logf("Found node %s as vm=%+v in vc=%s and datacenter=%s", framework.Logf("Found node %s as vm=%+v in vc=%s and datacenter=%s",
n.Name, vm, res.vs.Config.Hostname, res.datacenter.Name()) n.Name, vm, res.vs.Config.Hostname, res.datacenter.Name())
nodeInfo := &NodeInfo{Name: n.Name, DataCenterRef: res.datacenter.Reference(), VirtualMachineRef: vm.Reference(), VSphere: res.vs} nodeInfo := &NodeInfo{Name: n.Name, DataCenterRef: res.datacenter.Reference(), VirtualMachineRef: vm.Reference(), VSphere: res.vs}
nameToNodeInfo[n.Name] = nodeInfo nm.SetNodeInfo(n.Name, nodeInfo)
break break
} }
} }

View File

@ -24,9 +24,7 @@ import (
"k8s.io/api/core/v1" "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/types"
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
vsphere "k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/storage/utils" "k8s.io/kubernetes/test/e2e/storage/utils"
) )
@ -42,11 +40,11 @@ var _ = utils.SIGDescribe("PersistentVolumes:vsphere", func() {
clientPod *v1.Pod clientPod *v1.Pod
pvConfig framework.PersistentVolumeConfig pvConfig framework.PersistentVolumeConfig
pvcConfig framework.PersistentVolumeClaimConfig pvcConfig framework.PersistentVolumeClaimConfig
vsp *vsphere.VSphere
err error err error
node types.NodeName node string
volLabel labels.Set volLabel labels.Set
selector *metav1.LabelSelector selector *metav1.LabelSelector
nodeInfo *NodeInfo
) )
f := framework.NewDefaultFramework("pv") f := framework.NewDefaultFramework("pv")
@ -66,16 +64,17 @@ var _ = utils.SIGDescribe("PersistentVolumes:vsphere", func() {
clientPod = nil clientPod = nil
pvc = nil pvc = nil
pv = nil pv = nil
nodes := framework.GetReadySchedulableNodesOrDie(c)
if len(nodes.Items) < 1 {
framework.Skipf("Requires at least %d node", 1)
}
nodeInfo = TestContext.NodeMapper.GetNodeInfo(nodes.Items[0].Name)
volLabel = labels.Set{framework.VolumeSelectorKey: ns} volLabel = labels.Set{framework.VolumeSelectorKey: ns}
selector = metav1.SetAsLabelSelector(volLabel) selector = metav1.SetAsLabelSelector(volLabel)
if vsp == nil {
vsp, err = getVSphere(c)
Expect(err).NotTo(HaveOccurred())
}
if volumePath == "" { if volumePath == "" {
volumePath, err = createVSphereVolume(vsp, nil) volumePath, err = nodeInfo.VSphere.CreateVolume(&VolumeOptions{}, nodeInfo.DataCenterRef)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
pvConfig = framework.PersistentVolumeConfig{ pvConfig = framework.PersistentVolumeConfig{
NamePrefix: "vspherepv-", NamePrefix: "vspherepv-",
@ -103,10 +102,10 @@ var _ = utils.SIGDescribe("PersistentVolumes:vsphere", func() {
By("Creating the Client Pod") By("Creating the Client Pod")
clientPod, err = framework.CreateClientPod(c, ns, pvc) clientPod, err = framework.CreateClientPod(c, ns, pvc)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
node = types.NodeName(clientPod.Spec.NodeName) node = clientPod.Spec.NodeName
By("Verify disk should be attached to the node") By("Verify disk should be attached to the node")
isAttached, err := verifyVSphereDiskAttached(c, vsp, volumePath, node) isAttached, err := diskIsAttached(volumePath, node)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
Expect(isAttached).To(BeTrue(), "disk is not attached with the node") Expect(isAttached).To(BeTrue(), "disk is not attached with the node")
}) })
@ -134,12 +133,8 @@ var _ = utils.SIGDescribe("PersistentVolumes:vsphere", func() {
framework.AddCleanupAction(func() { framework.AddCleanupAction(func() {
// Cleanup actions will be called even when the tests are skipped and leaves namespace unset. // Cleanup actions will be called even when the tests are skipped and leaves namespace unset.
if len(ns) > 0 && len(volumePath) > 0 { if len(ns) > 0 && len(volumePath) > 0 {
client, err := framework.LoadClientset() framework.ExpectNoError(waitForVSphereDiskToDetach(volumePath, node))
if err != nil { nodeInfo.VSphere.DeleteVolume(volumePath, nodeInfo.DataCenterRef)
return
}
framework.ExpectNoError(waitForVSphereDiskToDetach(client, vsp, volumePath, node))
vsp.DeleteVolume(volumePath)
} }
}) })
@ -218,6 +213,6 @@ var _ = utils.SIGDescribe("PersistentVolumes:vsphere", func() {
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("Verifying Persistent Disk detaches") By("Verifying Persistent Disk detaches")
waitForVSphereDiskToDetach(c, vsp, volumePath, node) waitForVSphereDiskToDetach(volumePath, node)
}) })
}) })

View File

@ -25,9 +25,7 @@ import (
"k8s.io/api/core/v1" "k8s.io/api/core/v1"
apierrs "k8s.io/apimachinery/pkg/api/errors" apierrs "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
vsphere "k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/storage/utils" "k8s.io/kubernetes/test/e2e/storage/utils"
) )
@ -40,6 +38,7 @@ var _ = utils.SIGDescribe("PersistentVolumes [Feature:ReclaimPolicy]", func() {
volumePath string volumePath string
pv *v1.PersistentVolume pv *v1.PersistentVolume
pvc *v1.PersistentVolumeClaim pvc *v1.PersistentVolumeClaim
nodeInfo *NodeInfo
) )
BeforeEach(func() { BeforeEach(func() {
@ -51,15 +50,19 @@ var _ = utils.SIGDescribe("PersistentVolumes [Feature:ReclaimPolicy]", func() {
utils.SIGDescribe("persistentvolumereclaim:vsphere", func() { utils.SIGDescribe("persistentvolumereclaim:vsphere", func() {
BeforeEach(func() { BeforeEach(func() {
framework.SkipUnlessProviderIs("vsphere") framework.SkipUnlessProviderIs("vsphere")
Bootstrap(f)
nodes := framework.GetReadySchedulableNodesOrDie(c)
if len(nodes.Items) < 1 {
framework.Skipf("Requires at least %d node", 1)
}
nodeInfo = TestContext.NodeMapper.GetNodeInfo(nodes.Items[0].Name)
pv = nil pv = nil
pvc = nil pvc = nil
volumePath = "" volumePath = ""
}) })
AfterEach(func() { AfterEach(func() {
vsp, err := getVSphere(c) testCleanupVSpherePersistentVolumeReclaim(c, nodeInfo, ns, volumePath, pv, pvc)
Expect(err).NotTo(HaveOccurred())
testCleanupVSpherePersistentVolumeReclaim(vsp, c, ns, volumePath, pv, pvc)
}) })
/* /*
@ -75,10 +78,8 @@ var _ = utils.SIGDescribe("PersistentVolumes [Feature:ReclaimPolicy]", func() {
6. Verify PV is deleted automatically. 6. Verify PV is deleted automatically.
*/ */
It("should delete persistent volume when reclaimPolicy set to delete and associated claim is deleted", func() { It("should delete persistent volume when reclaimPolicy set to delete and associated claim is deleted", func() {
vsp, err := getVSphere(c) var err error
Expect(err).NotTo(HaveOccurred()) volumePath, pv, pvc, err = testSetupVSpherePersistentVolumeReclaim(c, nodeInfo, ns, v1.PersistentVolumeReclaimDelete)
volumePath, pv, pvc, err = testSetupVSpherePersistentVolumeReclaim(vsp, c, ns, v1.PersistentVolumeReclaimDelete)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
deletePVCAfterBind(c, ns, pvc, pv) deletePVCAfterBind(c, ns, pvc, pv)
@ -105,10 +106,9 @@ var _ = utils.SIGDescribe("PersistentVolumes [Feature:ReclaimPolicy]", func() {
9. Verify PV should be detached from the node and automatically deleted. 9. Verify PV should be detached from the node and automatically deleted.
*/ */
It("should not detach and unmount PV when associated pvc with delete as reclaimPolicy is deleted when it is in use by the pod", func() { It("should not detach and unmount PV when associated pvc with delete as reclaimPolicy is deleted when it is in use by the pod", func() {
vsp, err := getVSphere(c) var err error
Expect(err).NotTo(HaveOccurred())
volumePath, pv, pvc, err = testSetupVSpherePersistentVolumeReclaim(vsp, c, ns, v1.PersistentVolumeReclaimDelete) volumePath, pv, pvc, err = testSetupVSpherePersistentVolumeReclaim(c, nodeInfo, ns, v1.PersistentVolumeReclaimDelete)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
// Wait for PV and PVC to Bind // Wait for PV and PVC to Bind
framework.ExpectNoError(framework.WaitOnPVandPVC(c, ns, pv, pvc)) framework.ExpectNoError(framework.WaitOnPVandPVC(c, ns, pv, pvc))
@ -116,7 +116,6 @@ var _ = utils.SIGDescribe("PersistentVolumes [Feature:ReclaimPolicy]", func() {
By("Creating the Pod") By("Creating the Pod")
pod, err := framework.CreateClientPod(c, ns, pvc) pod, err := framework.CreateClientPod(c, ns, pvc)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
node := types.NodeName(pod.Spec.NodeName)
By("Deleting the Claim") By("Deleting the Claim")
framework.ExpectNoError(framework.DeletePersistentVolumeClaim(c, pvc.Name, ns), "Failed to delete PVC ", pvc.Name) framework.ExpectNoError(framework.DeletePersistentVolumeClaim(c, pvc.Name, ns), "Failed to delete PVC ", pvc.Name)
@ -128,19 +127,19 @@ var _ = utils.SIGDescribe("PersistentVolumes [Feature:ReclaimPolicy]", func() {
Expect(framework.WaitForPersistentVolumePhase(v1.VolumeFailed, c, pv.Name, 1*time.Second, 60*time.Second)).NotTo(HaveOccurred()) Expect(framework.WaitForPersistentVolumePhase(v1.VolumeFailed, c, pv.Name, 1*time.Second, 60*time.Second)).NotTo(HaveOccurred())
By("Verify the volume is attached to the node") By("Verify the volume is attached to the node")
isVolumeAttached, verifyDiskAttachedError := verifyVSphereDiskAttached(c, vsp, pv.Spec.VsphereVolume.VolumePath, node) isVolumeAttached, verifyDiskAttachedError := diskIsAttached(pv.Spec.VsphereVolume.VolumePath, pod.Spec.NodeName)
Expect(verifyDiskAttachedError).NotTo(HaveOccurred()) Expect(verifyDiskAttachedError).NotTo(HaveOccurred())
Expect(isVolumeAttached).To(BeTrue()) Expect(isVolumeAttached).To(BeTrue())
By("Verify the volume is accessible and available in the pod") By("Verify the volume is accessible and available in the pod")
verifyVSphereVolumesAccessible(c, pod, []*v1.PersistentVolume{pv}, vsp) verifyVSphereVolumesAccessible(c, pod, []*v1.PersistentVolume{pv})
framework.Logf("Verified that Volume is accessible in the POD after deleting PV claim") framework.Logf("Verified that Volume is accessible in the POD after deleting PV claim")
By("Deleting the Pod") By("Deleting the Pod")
framework.ExpectNoError(framework.DeletePodWithWait(f, c, pod), "Failed to delete pod ", pod.Name) framework.ExpectNoError(framework.DeletePodWithWait(f, c, pod), "Failed to delete pod ", pod.Name)
By("Verify PV is detached from the node after Pod is deleted") By("Verify PV is detached from the node after Pod is deleted")
Expect(waitForVSphereDiskToDetach(c, vsp, pv.Spec.VsphereVolume.VolumePath, types.NodeName(pod.Spec.NodeName))).NotTo(HaveOccurred()) Expect(waitForVSphereDiskToDetach(pv.Spec.VsphereVolume.VolumePath, pod.Spec.NodeName)).NotTo(HaveOccurred())
By("Verify PV should be deleted automatically") By("Verify PV should be deleted automatically")
framework.ExpectNoError(framework.WaitForPersistentVolumeDeleted(c, pv.Name, 1*time.Second, 30*time.Second)) framework.ExpectNoError(framework.WaitForPersistentVolumeDeleted(c, pv.Name, 1*time.Second, 30*time.Second))
@ -167,11 +166,10 @@ var _ = utils.SIGDescribe("PersistentVolumes [Feature:ReclaimPolicy]", func() {
*/ */
It("should retain persistent volume when reclaimPolicy set to retain when associated claim is deleted", func() { It("should retain persistent volume when reclaimPolicy set to retain when associated claim is deleted", func() {
var err error
var volumeFileContent = "hello from vsphere cloud provider, Random Content is :" + strconv.FormatInt(time.Now().UnixNano(), 10) var volumeFileContent = "hello from vsphere cloud provider, Random Content is :" + strconv.FormatInt(time.Now().UnixNano(), 10)
vsp, err := getVSphere(c)
Expect(err).NotTo(HaveOccurred())
volumePath, pv, pvc, err = testSetupVSpherePersistentVolumeReclaim(vsp, c, ns, v1.PersistentVolumeReclaimRetain) volumePath, pv, pvc, err = testSetupVSpherePersistentVolumeReclaim(c, nodeInfo, ns, v1.PersistentVolumeReclaimRetain)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
writeContentToVSpherePV(c, pvc, volumeFileContent) writeContentToVSpherePV(c, pvc, volumeFileContent)
@ -205,10 +203,10 @@ var _ = utils.SIGDescribe("PersistentVolumes [Feature:ReclaimPolicy]", func() {
}) })
// Test Setup for persistentvolumereclaim tests for vSphere Provider // Test Setup for persistentvolumereclaim tests for vSphere Provider
func testSetupVSpherePersistentVolumeReclaim(vsp *vsphere.VSphere, c clientset.Interface, ns string, persistentVolumeReclaimPolicy v1.PersistentVolumeReclaimPolicy) (volumePath string, pv *v1.PersistentVolume, pvc *v1.PersistentVolumeClaim, err error) { func testSetupVSpherePersistentVolumeReclaim(c clientset.Interface, nodeInfo *NodeInfo, ns string, persistentVolumeReclaimPolicy v1.PersistentVolumeReclaimPolicy) (volumePath string, pv *v1.PersistentVolume, pvc *v1.PersistentVolumeClaim, err error) {
By("running testSetupVSpherePersistentVolumeReclaim") By("running testSetupVSpherePersistentVolumeReclaim")
By("creating vmdk") By("creating vmdk")
volumePath, err = createVSphereVolume(vsp, nil) volumePath, err = nodeInfo.VSphere.CreateVolume(&VolumeOptions{}, nodeInfo.DataCenterRef)
if err != nil { if err != nil {
return return
} }
@ -225,10 +223,11 @@ func testSetupVSpherePersistentVolumeReclaim(vsp *vsphere.VSphere, c clientset.I
} }
// Test Cleanup for persistentvolumereclaim tests for vSphere Provider // Test Cleanup for persistentvolumereclaim tests for vSphere Provider
func testCleanupVSpherePersistentVolumeReclaim(vsp *vsphere.VSphere, c clientset.Interface, ns string, volumePath string, pv *v1.PersistentVolume, pvc *v1.PersistentVolumeClaim) { func testCleanupVSpherePersistentVolumeReclaim(c clientset.Interface, nodeInfo *NodeInfo, ns string, volumePath string, pv *v1.PersistentVolume, pvc *v1.PersistentVolumeClaim) {
By("running testCleanupVSpherePersistentVolumeReclaim") By("running testCleanupVSpherePersistentVolumeReclaim")
if len(volumePath) > 0 { if len(volumePath) > 0 {
vsp.DeleteVolume(volumePath) err := nodeInfo.VSphere.DeleteVolume(volumePath, nodeInfo.DataCenterRef)
Expect(err).NotTo(HaveOccurred())
} }
if pv != nil { if pv != nil {
framework.ExpectNoError(framework.DeletePersistentVolume(c, pv.Name), "Failed to delete PV ", pv.Name) framework.ExpectNoError(framework.DeletePersistentVolume(c, pv.Name), "Failed to delete PV ", pv.Name)

View File

@ -56,11 +56,18 @@ var _ = utils.SIGDescribe("PersistentVolumes [Feature:LabelSelector]", func() {
ssdlabels map[string]string ssdlabels map[string]string
vvollabels map[string]string vvollabels map[string]string
err error err error
nodeInfo *NodeInfo
) )
BeforeEach(func() { BeforeEach(func() {
framework.SkipUnlessProviderIs("vsphere") framework.SkipUnlessProviderIs("vsphere")
c = f.ClientSet c = f.ClientSet
ns = f.Namespace.Name ns = f.Namespace.Name
Bootstrap(f)
nodes := framework.GetReadySchedulableNodesOrDie(c)
if len(nodes.Items) < 1 {
framework.Skipf("Requires at least %d node", 1)
}
nodeInfo = TestContext.NodeMapper.GetNodeInfo(nodes.Items[0].Name)
framework.ExpectNoError(framework.WaitForAllNodesSchedulable(c, framework.TestContext.NodeSchedulableTimeout)) framework.ExpectNoError(framework.WaitForAllNodesSchedulable(c, framework.TestContext.NodeSchedulableTimeout))
ssdlabels = make(map[string]string) ssdlabels = make(map[string]string)
ssdlabels["volume-type"] = "ssd" ssdlabels["volume-type"] = "ssd"
@ -73,11 +80,11 @@ var _ = utils.SIGDescribe("PersistentVolumes [Feature:LabelSelector]", func() {
AfterEach(func() { AfterEach(func() {
By("Running clean up actions") By("Running clean up actions")
if framework.ProviderIs("vsphere") { if framework.ProviderIs("vsphere") {
testCleanupVSpherePVClabelselector(c, ns, volumePath, pv_ssd, pvc_ssd, pvc_vvol) testCleanupVSpherePVClabelselector(c, ns, nodeInfo, volumePath, pv_ssd, pvc_ssd, pvc_vvol)
} }
}) })
It("should bind volume with claim for given label", func() { It("should bind volume with claim for given label", func() {
volumePath, pv_ssd, pvc_ssd, pvc_vvol, err = testSetupVSpherePVClabelselector(c, ns, ssdlabels, vvollabels) volumePath, pv_ssd, pvc_ssd, pvc_vvol, err = testSetupVSpherePVClabelselector(c, nodeInfo, ns, ssdlabels, vvollabels)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("wait for the pvc_ssd to bind with pv_ssd") By("wait for the pvc_ssd to bind with pv_ssd")
@ -101,12 +108,11 @@ var _ = utils.SIGDescribe("PersistentVolumes [Feature:LabelSelector]", func() {
}) })
}) })
func testSetupVSpherePVClabelselector(c clientset.Interface, ns string, ssdlabels map[string]string, vvollabels map[string]string) (volumePath string, pv_ssd *v1.PersistentVolume, pvc_ssd *v1.PersistentVolumeClaim, pvc_vvol *v1.PersistentVolumeClaim, err error) { func testSetupVSpherePVClabelselector(c clientset.Interface, nodeInfo *NodeInfo, ns string, ssdlabels map[string]string, vvollabels map[string]string) (volumePath string, pv_ssd *v1.PersistentVolume, pvc_ssd *v1.PersistentVolumeClaim, pvc_vvol *v1.PersistentVolumeClaim, err error) {
volumePath = "" volumePath = ""
By("creating vmdk") By("creating vmdk")
vsp, err := getVSphere(c)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
volumePath, err = createVSphereVolume(vsp, nil) volumePath, err = nodeInfo.VSphere.CreateVolume(&VolumeOptions{}, nodeInfo.DataCenterRef)
if err != nil { if err != nil {
return return
} }
@ -131,12 +137,10 @@ func testSetupVSpherePVClabelselector(c clientset.Interface, ns string, ssdlabel
return return
} }
func testCleanupVSpherePVClabelselector(c clientset.Interface, ns string, volumePath string, pv_ssd *v1.PersistentVolume, pvc_ssd *v1.PersistentVolumeClaim, pvc_vvol *v1.PersistentVolumeClaim) { func testCleanupVSpherePVClabelselector(c clientset.Interface, ns string, nodeInfo *NodeInfo, volumePath string, pv_ssd *v1.PersistentVolume, pvc_ssd *v1.PersistentVolumeClaim, pvc_vvol *v1.PersistentVolumeClaim) {
By("running testCleanupVSpherePVClabelselector") By("running testCleanupVSpherePVClabelselector")
if len(volumePath) > 0 { if len(volumePath) > 0 {
vsp, err := getVSphere(c) nodeInfo.VSphere.DeleteVolume(volumePath, nodeInfo.DataCenterRef)
Expect(err).NotTo(HaveOccurred())
vsp.DeleteVolume(volumePath)
} }
if pvc_ssd != nil { if pvc_ssd != nil {
framework.ExpectNoError(framework.DeletePersistentVolumeClaim(c, pvc_ssd.Name, ns), "Failed to delete PVC ", pvc_ssd.Name) framework.ExpectNoError(framework.DeletePersistentVolumeClaim(c, pvc_ssd.Name, ns), "Failed to delete PVC ", pvc_ssd.Name)

View File

@ -17,11 +17,26 @@ limitations under the License.
package vsphere package vsphere
import ( import (
"fmt"
"github.com/vmware/govmomi" "github.com/vmware/govmomi"
"github.com/vmware/govmomi/find" "github.com/vmware/govmomi/find"
"github.com/vmware/govmomi/object" "github.com/vmware/govmomi/object"
"github.com/vmware/govmomi/vim25/soap"
"github.com/vmware/govmomi/vim25/types"
"golang.org/x/net/context" "golang.org/x/net/context"
"k8s.io/kubernetes/test/e2e/framework"
"path/filepath"
"strconv"
"strings" "strings"
"time"
)
const (
VolDir = "kubevols"
DefaultDiskCapacityKB = 2097152
DefaultDiskFormat = "thin"
DefaultSCSIControllerType = "lsiLogic"
VirtualMachineType = "VirtualMachine"
) )
// Represents a vSphere instance where one or more kubernetes nodes are running. // Represents a vSphere instance where one or more kubernetes nodes are running.
@ -30,6 +45,15 @@ type VSphere struct {
Client *govmomi.Client Client *govmomi.Client
} }
// VolumeOptions specifies various options for a volume.
type VolumeOptions struct {
Name string
CapacityKB int
DiskFormat string
SCSIControllerType string
Datastore string
}
// GetDatacenter returns the DataCenter Object for the given datacenterPath // GetDatacenter returns the DataCenter Object for the given datacenterPath
func (vs *VSphere) GetDatacenter(ctx context.Context, datacenterPath string) (*object.Datacenter, error) { func (vs *VSphere) GetDatacenter(ctx context.Context, datacenterPath string) (*object.Datacenter, error) {
Connect(ctx, vs) Connect(ctx, vs)
@ -37,6 +61,12 @@ func (vs *VSphere) GetDatacenter(ctx context.Context, datacenterPath string) (*o
return finder.Datacenter(ctx, datacenterPath) return finder.Datacenter(ctx, datacenterPath)
} }
// GetDatacenter returns the DataCenter Object for the given datacenterPath
func (vs *VSphere) GetDatacenterFromObjectReference(ctx context.Context, dc object.Reference) *object.Datacenter {
Connect(ctx, vs)
return object.NewDatacenter(vs.Client.Client, dc.Reference())
}
// GetAllDatacenter returns all the DataCenter Objects // GetAllDatacenter returns all the DataCenter Objects
func (vs *VSphere) GetAllDatacenter(ctx context.Context) ([]*object.Datacenter, error) { func (vs *VSphere) GetAllDatacenter(ctx context.Context) ([]*object.Datacenter, error) {
Connect(ctx, vs) Connect(ctx, vs)
@ -44,11 +74,159 @@ func (vs *VSphere) GetAllDatacenter(ctx context.Context) ([]*object.Datacenter,
return finder.DatacenterList(ctx, "*") return finder.DatacenterList(ctx, "*")
} }
// GetVMByUUID gets the VM object from the given vmUUID // GetVMByUUID gets the VM object Reference from the given vmUUID
func (vs *VSphere) GetVMByUUID(ctx context.Context, vmUUID string, dc object.Reference) (object.Reference, error) { func (vs *VSphere) GetVMByUUID(ctx context.Context, vmUUID string, dc object.Reference) (object.Reference, error) {
Connect(ctx, vs) Connect(ctx, vs)
datacenter := object.NewDatacenter(vs.Client.Client, dc.Reference()) datacenter := vs.GetDatacenterFromObjectReference(ctx, dc)
s := object.NewSearchIndex(vs.Client.Client) s := object.NewSearchIndex(vs.Client.Client)
vmUUID = strings.ToLower(strings.TrimSpace(vmUUID)) vmUUID = strings.ToLower(strings.TrimSpace(vmUUID))
return s.FindByUuid(ctx, datacenter, vmUUID, true, nil) return s.FindByUuid(ctx, datacenter, vmUUID, true, nil)
} }
// GetFolderByPath gets the Folder Object Reference from the given folder path
// folderPath should be the full path to folder
func (vs *VSphere) GetFolderByPath(ctx context.Context, dc object.Reference, folderPath string) (vmFolderMor types.ManagedObjectReference, err error) {
Connect(ctx, vs)
datacenter := object.NewDatacenter(vs.Client.Client, dc.Reference())
finder := find.NewFinder(datacenter.Client(), true)
finder.SetDatacenter(datacenter)
vmFolder, err := finder.Folder(ctx, folderPath)
if err != nil {
framework.Logf("Failed to get the folder reference for %s. err: %+v", folderPath, err)
return vmFolderMor, err
}
return vmFolder.Reference(), nil
}
func (vs *VSphere) CreateVolume(volumeOptions *VolumeOptions, dataCenterRef types.ManagedObjectReference) (string, error) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
Connect(ctx, vs)
datacenter := object.NewDatacenter(vs.Client.Client, dataCenterRef)
var (
err error
directoryAlreadyPresent = false
)
if datacenter == nil {
err = fmt.Errorf("datacenter is nil")
return "", err
}
if volumeOptions == nil {
volumeOptions = &VolumeOptions{}
}
if volumeOptions.Datastore == "" {
volumeOptions.Datastore = vs.Config.DefaultDatastore
}
if volumeOptions.CapacityKB == 0 {
volumeOptions.CapacityKB = DefaultDiskCapacityKB
}
if volumeOptions.Name == "" {
volumeOptions.Name = "e2e-vmdk-" + strconv.FormatInt(time.Now().UnixNano(), 10)
}
if volumeOptions.DiskFormat == "" {
volumeOptions.DiskFormat = DefaultDiskFormat
}
if volumeOptions.SCSIControllerType == "" {
volumeOptions.SCSIControllerType = DefaultSCSIControllerType
}
finder := find.NewFinder(datacenter.Client(), true)
finder.SetDatacenter(datacenter)
ds, err := finder.Datastore(ctx, volumeOptions.Datastore)
if err != nil {
err = fmt.Errorf("Failed while searching for datastore: %s. err: %+v", volumeOptions.Datastore, err)
return "", err
}
directoryPath := filepath.Clean(ds.Path(VolDir)) + "/"
fileManager := object.NewFileManager(ds.Client())
err = fileManager.MakeDirectory(ctx, directoryPath, datacenter, false)
if err != nil {
if soap.IsSoapFault(err) {
soapFault := soap.ToSoapFault(err)
if _, ok := soapFault.VimFault().(types.FileAlreadyExists); ok {
directoryAlreadyPresent = true
framework.Logf("Directory with the path %+q is already present", directoryPath)
}
}
if !directoryAlreadyPresent {
framework.Logf("Cannot create dir %#v. err %s", directoryPath, err)
return "", err
}
}
framework.Logf("Created dir with path as %+q", directoryPath)
vmdkPath := directoryPath + volumeOptions.Name + ".vmdk"
// Create a virtual disk manager
vdm := object.NewVirtualDiskManager(ds.Client())
// Create specification for new virtual disk
vmDiskSpec := &types.FileBackedVirtualDiskSpec{
VirtualDiskSpec: types.VirtualDiskSpec{
AdapterType: volumeOptions.SCSIControllerType,
DiskType: volumeOptions.DiskFormat,
},
CapacityKb: int64(volumeOptions.CapacityKB),
}
// Create virtual disk
task, err := vdm.CreateVirtualDisk(ctx, vmdkPath, datacenter, vmDiskSpec)
if err != nil {
framework.Logf("Failed to create virtual disk: %s. err: %+v", vmdkPath, err)
return "", err
}
taskInfo, err := task.WaitForResult(ctx, nil)
if err != nil {
framework.Logf("Failed to complete virtual disk creation: %s. err: %+v", vmdkPath, err)
return "", err
}
volumePath := taskInfo.Result.(string)
canonicalDiskPath, err := getCanonicalVolumePath(ctx, datacenter, volumePath)
if err != nil {
return "", err
}
return canonicalDiskPath, nil
}
func (vs *VSphere) DeleteVolume(volumePath string, dataCenterRef types.ManagedObjectReference) error {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
Connect(ctx, vs)
datacenter := object.NewDatacenter(vs.Client.Client, dataCenterRef)
virtualDiskManager := object.NewVirtualDiskManager(datacenter.Client())
diskPath := removeStorageClusterORFolderNameFromVDiskPath(volumePath)
// Delete virtual disk
task, err := virtualDiskManager.DeleteVirtualDisk(ctx, diskPath, datacenter)
if err != nil {
framework.Logf("Failed to delete virtual disk. err: %v", err)
return err
}
err = task.Wait(ctx)
if err != nil {
framework.Logf("Failed to delete virtual disk. err: %v", err)
return err
}
return nil
}
func (vs *VSphere) IsVMPresent(vmName string, dataCenterRef types.ManagedObjectReference) (isVMPresent bool, err error) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
Connect(ctx, vs)
folderMor, err := vs.GetFolderByPath(ctx, dataCenterRef, vs.Config.Folder)
if err != nil {
return
}
vmFolder := object.NewFolder(vs.Client.Client, folderMor)
vmFoldersChildren, err := vmFolder.Children(ctx)
if err != nil {
framework.Logf("Failed to get children from Folder: %s. err: %+v", vmFolder.InventoryPath, err)
return
}
for _, vmFoldersChild := range vmFoldersChildren {
if vmFoldersChild.Reference().Type == VirtualMachineType {
if object.NewVirtualMachine(vs.Client.Client, vmFoldersChild.Reference()).Name() == vmName {
return true, nil
}
}
}
return
}

View File

@ -25,9 +25,7 @@ import (
"k8s.io/api/core/v1" "k8s.io/api/core/v1"
storageV1 "k8s.io/api/storage/v1" storageV1 "k8s.io/api/storage/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
k8stypes "k8s.io/apimachinery/pkg/types"
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/storage/utils" "k8s.io/kubernetes/test/e2e/storage/utils"
) )
@ -71,6 +69,7 @@ var _ = utils.SIGDescribe("vcp at scale [Feature:vsphere] ", func() {
BeforeEach(func() { BeforeEach(func() {
framework.SkipUnlessProviderIs("vsphere") framework.SkipUnlessProviderIs("vsphere")
Bootstrap(f)
client = f.ClientSet client = f.ClientSet
namespace = f.Namespace.Name namespace = f.Namespace.Name
nodeVolumeMapChan = make(chan map[string][]string) nodeVolumeMapChan = make(chan map[string][]string)
@ -90,6 +89,7 @@ var _ = utils.SIGDescribe("vcp at scale [Feature:vsphere] ", func() {
if len(nodes.Items) < 2 { if len(nodes.Items) < 2 {
framework.Skipf("Requires at least %d nodes (not %d)", 2, len(nodes.Items)) framework.Skipf("Requires at least %d nodes (not %d)", 2, len(nodes.Items))
} }
//nodeInfo = TestContext.NodeMapper.GetNodeInfo(nodes.Items[0].Name)
// Verify volume count specified by the user can be satisfied // Verify volume count specified by the user can be satisfied
if volumeCount > volumesPerNode*len(nodes.Items) { if volumeCount > volumesPerNode*len(nodes.Items) {
framework.Skipf("Cannot attach %d volumes to %d nodes. Maximum volumes that can be attached on %d nodes is %d", volumeCount, len(nodes.Items), len(nodes.Items), volumesPerNode*len(nodes.Items)) framework.Skipf("Cannot attach %d volumes to %d nodes. Maximum volumes that can be attached on %d nodes is %d", volumeCount, len(nodes.Items), len(nodes.Items), volumesPerNode*len(nodes.Items))
@ -111,7 +111,7 @@ var _ = utils.SIGDescribe("vcp at scale [Feature:vsphere] ", func() {
It("vsphere scale tests", func() { It("vsphere scale tests", func() {
var pvcClaimList []string var pvcClaimList []string
nodeVolumeMap := make(map[k8stypes.NodeName][]string) nodeVolumeMap := make(map[string][]string)
// Volumes will be provisioned with each different types of Storage Class // Volumes will be provisioned with each different types of Storage Class
scArrays := make([]*storageV1.StorageClass, len(scNames)) scArrays := make([]*storageV1.StorageClass, len(scNames))
for index, scname := range scNames { for index, scname := range scNames {
@ -137,22 +137,19 @@ var _ = utils.SIGDescribe("vcp at scale [Feature:vsphere] ", func() {
scArrays[index] = sc scArrays[index] = sc
} }
vsp, err := getVSphere(client)
Expect(err).NotTo(HaveOccurred())
volumeCountPerInstance := volumeCount / numberOfInstances volumeCountPerInstance := volumeCount / numberOfInstances
for instanceCount := 0; instanceCount < numberOfInstances; instanceCount++ { for instanceCount := 0; instanceCount < numberOfInstances; instanceCount++ {
if instanceCount == numberOfInstances-1 { if instanceCount == numberOfInstances-1 {
volumeCountPerInstance = volumeCount volumeCountPerInstance = volumeCount
} }
volumeCount = volumeCount - volumeCountPerInstance volumeCount = volumeCount - volumeCountPerInstance
go VolumeCreateAndAttach(client, namespace, scArrays, volumeCountPerInstance, volumesPerPod, nodeSelectorList, nodeVolumeMapChan, vsp) go VolumeCreateAndAttach(client, namespace, scArrays, volumeCountPerInstance, volumesPerPod, nodeSelectorList, nodeVolumeMapChan)
} }
// Get the list of all volumes attached to each node from the go routines by reading the data from the channel // Get the list of all volumes attached to each node from the go routines by reading the data from the channel
for instanceCount := 0; instanceCount < numberOfInstances; instanceCount++ { for instanceCount := 0; instanceCount < numberOfInstances; instanceCount++ {
for node, volumeList := range <-nodeVolumeMapChan { for node, volumeList := range <-nodeVolumeMapChan {
nodeVolumeMap[k8stypes.NodeName(node)] = append(nodeVolumeMap[k8stypes.NodeName(node)], volumeList...) nodeVolumeMap[node] = append(nodeVolumeMap[node], volumeList...)
} }
} }
podList, err := client.CoreV1().Pods(namespace).List(metav1.ListOptions{}) podList, err := client.CoreV1().Pods(namespace).List(metav1.ListOptions{})
@ -163,7 +160,7 @@ var _ = utils.SIGDescribe("vcp at scale [Feature:vsphere] ", func() {
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
} }
By("Waiting for volumes to be detached from the node") By("Waiting for volumes to be detached from the node")
err = waitForVSphereDisksToDetach(client, vsp, nodeVolumeMap) err = waitForVSphereDisksToDetach(nodeVolumeMap)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
for _, pvcClaim := range pvcClaimList { for _, pvcClaim := range pvcClaimList {
@ -185,7 +182,7 @@ func getClaimsForPod(pod *v1.Pod, volumesPerPod int) []string {
} }
// VolumeCreateAndAttach peforms create and attach operations of vSphere persistent volumes at scale // VolumeCreateAndAttach peforms create and attach operations of vSphere persistent volumes at scale
func VolumeCreateAndAttach(client clientset.Interface, namespace string, sc []*storageV1.StorageClass, volumeCountPerInstance int, volumesPerPod int, nodeSelectorList []*NodeSelector, nodeVolumeMapChan chan map[string][]string, vsp *vsphere.VSphere) { func VolumeCreateAndAttach(client clientset.Interface, namespace string, sc []*storageV1.StorageClass, volumeCountPerInstance int, volumesPerPod int, nodeSelectorList []*NodeSelector, nodeVolumeMapChan chan map[string][]string) {
defer GinkgoRecover() defer GinkgoRecover()
nodeVolumeMap := make(map[string][]string) nodeVolumeMap := make(map[string][]string)
nodeSelectorIndex := 0 nodeSelectorIndex := 0
@ -215,7 +212,7 @@ func VolumeCreateAndAttach(client clientset.Interface, namespace string, sc []*s
nodeVolumeMap[pod.Spec.NodeName] = append(nodeVolumeMap[pod.Spec.NodeName], pv.Spec.VsphereVolume.VolumePath) nodeVolumeMap[pod.Spec.NodeName] = append(nodeVolumeMap[pod.Spec.NodeName], pv.Spec.VsphereVolume.VolumePath)
} }
By("Verify the volume is accessible and available in the pod") By("Verify the volume is accessible and available in the pod")
verifyVSphereVolumesAccessible(client, pod, persistentvolumes, vsp) verifyVSphereVolumesAccessible(client, pod, persistentvolumes)
nodeSelectorIndex++ nodeSelectorIndex++
} }
nodeVolumeMapChan <- nodeVolumeMap nodeVolumeMapChan <- nodeVolumeMap

View File

@ -22,7 +22,6 @@ import (
. "github.com/onsi/gomega" . "github.com/onsi/gomega"
apierrs "k8s.io/apimachinery/pkg/api/errors" apierrs "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/storage/utils" "k8s.io/kubernetes/test/e2e/storage/utils"
@ -60,6 +59,7 @@ var _ = utils.SIGDescribe("vsphere statefulset", func() {
framework.SkipUnlessProviderIs("vsphere") framework.SkipUnlessProviderIs("vsphere")
namespace = f.Namespace.Name namespace = f.Namespace.Name
client = f.ClientSet client = f.ClientSet
Bootstrap(f)
}) })
AfterEach(func() { AfterEach(func() {
framework.Logf("Deleting all statefulset in namespace: %v", namespace) framework.Logf("Deleting all statefulset in namespace: %v", namespace)
@ -104,9 +104,6 @@ var _ = utils.SIGDescribe("vsphere statefulset", func() {
Expect(scaledownErr).NotTo(HaveOccurred()) Expect(scaledownErr).NotTo(HaveOccurred())
statefulsetTester.WaitForStatusReadyReplicas(statefulset, replicas-1) statefulsetTester.WaitForStatusReadyReplicas(statefulset, replicas-1)
vsp, err := getVSphere(client)
Expect(err).NotTo(HaveOccurred())
// After scale down, verify vsphere volumes are detached from deleted pods // After scale down, verify vsphere volumes are detached from deleted pods
By("Verify Volumes are detached from Nodes after Statefulsets is scaled down") By("Verify Volumes are detached from Nodes after Statefulsets is scaled down")
for _, sspod := range ssPodsBeforeScaleDown.Items { for _, sspod := range ssPodsBeforeScaleDown.Items {
@ -117,7 +114,7 @@ var _ = utils.SIGDescribe("vsphere statefulset", func() {
if volumespec.PersistentVolumeClaim != nil { if volumespec.PersistentVolumeClaim != nil {
vSpherediskPath := getvSphereVolumePathFromClaim(client, statefulset.Namespace, volumespec.PersistentVolumeClaim.ClaimName) vSpherediskPath := getvSphereVolumePathFromClaim(client, statefulset.Namespace, volumespec.PersistentVolumeClaim.ClaimName)
framework.Logf("Waiting for Volume: %q to detach from Node: %q", vSpherediskPath, sspod.Spec.NodeName) framework.Logf("Waiting for Volume: %q to detach from Node: %q", vSpherediskPath, sspod.Spec.NodeName)
Expect(waitForVSphereDiskToDetach(client, vsp, vSpherediskPath, types.NodeName(sspod.Spec.NodeName))).NotTo(HaveOccurred()) Expect(waitForVSphereDiskToDetach(vSpherediskPath, sspod.Spec.NodeName)).NotTo(HaveOccurred())
} }
} }
} }
@ -146,7 +143,7 @@ var _ = utils.SIGDescribe("vsphere statefulset", func() {
framework.Logf("Verify Volume: %q is attached to the Node: %q", vSpherediskPath, sspod.Spec.NodeName) framework.Logf("Verify Volume: %q is attached to the Node: %q", vSpherediskPath, sspod.Spec.NodeName)
// Verify scale up has re-attached the same volumes and not introduced new volume // Verify scale up has re-attached the same volumes and not introduced new volume
Expect(volumesBeforeScaleDown[vSpherediskPath] == "").To(BeFalse()) Expect(volumesBeforeScaleDown[vSpherediskPath] == "").To(BeFalse())
isVolumeAttached, verifyDiskAttachedError := verifyVSphereDiskAttached(client, vsp, vSpherediskPath, types.NodeName(sspod.Spec.NodeName)) isVolumeAttached, verifyDiskAttachedError := diskIsAttached(vSpherediskPath, sspod.Spec.NodeName)
Expect(isVolumeAttached).To(BeTrue()) Expect(isVolumeAttached).To(BeTrue())
Expect(verifyDiskAttachedError).NotTo(HaveOccurred()) Expect(verifyDiskAttachedError).NotTo(HaveOccurred())
} }

View File

@ -25,7 +25,6 @@ import (
"k8s.io/api/core/v1" "k8s.io/api/core/v1"
storageV1 "k8s.io/api/storage/v1" storageV1 "k8s.io/api/storage/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/storage/utils" "k8s.io/kubernetes/test/e2e/storage/utils"
@ -125,8 +124,7 @@ var _ = utils.SIGDescribe("vsphere cloud provider stress [Feature:vsphere]", fun
func PerformVolumeLifeCycleInParallel(f *framework.Framework, client clientset.Interface, namespace string, instanceId string, sc *storageV1.StorageClass, iterations int, wg *sync.WaitGroup) { func PerformVolumeLifeCycleInParallel(f *framework.Framework, client clientset.Interface, namespace string, instanceId string, sc *storageV1.StorageClass, iterations int, wg *sync.WaitGroup) {
defer wg.Done() defer wg.Done()
defer GinkgoRecover() defer GinkgoRecover()
vsp, err := getVSphere(f.ClientSet)
Expect(err).NotTo(HaveOccurred())
for iterationCount := 0; iterationCount < iterations; iterationCount++ { for iterationCount := 0; iterationCount < iterations; iterationCount++ {
logPrefix := fmt.Sprintf("Instance: [%v], Iteration: [%v] :", instanceId, iterationCount+1) logPrefix := fmt.Sprintf("Instance: [%v], Iteration: [%v] :", instanceId, iterationCount+1)
By(fmt.Sprintf("%v Creating PVC using the Storage Class: %v", logPrefix, sc.Name)) By(fmt.Sprintf("%v Creating PVC using the Storage Class: %v", logPrefix, sc.Name))
@ -153,19 +151,19 @@ func PerformVolumeLifeCycleInParallel(f *framework.Framework, client clientset.I
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By(fmt.Sprintf("%v Verifing the volume: %v is attached to the node VM: %v", logPrefix, persistentvolumes[0].Spec.VsphereVolume.VolumePath, pod.Spec.NodeName)) By(fmt.Sprintf("%v Verifing the volume: %v is attached to the node VM: %v", logPrefix, persistentvolumes[0].Spec.VsphereVolume.VolumePath, pod.Spec.NodeName))
isVolumeAttached, verifyDiskAttachedError := verifyVSphereDiskAttached(client, vsp, persistentvolumes[0].Spec.VsphereVolume.VolumePath, types.NodeName(pod.Spec.NodeName)) isVolumeAttached, verifyDiskAttachedError := diskIsAttached(persistentvolumes[0].Spec.VsphereVolume.VolumePath, pod.Spec.NodeName)
Expect(isVolumeAttached).To(BeTrue()) Expect(isVolumeAttached).To(BeTrue())
Expect(verifyDiskAttachedError).NotTo(HaveOccurred()) Expect(verifyDiskAttachedError).NotTo(HaveOccurred())
By(fmt.Sprintf("%v Verifing the volume: %v is accessible in the pod: %v", logPrefix, persistentvolumes[0].Spec.VsphereVolume.VolumePath, pod.Name)) By(fmt.Sprintf("%v Verifing the volume: %v is accessible in the pod: %v", logPrefix, persistentvolumes[0].Spec.VsphereVolume.VolumePath, pod.Name))
verifyVSphereVolumesAccessible(client, pod, persistentvolumes, vsp) verifyVSphereVolumesAccessible(client, pod, persistentvolumes)
By(fmt.Sprintf("%v Deleting pod: %v", logPrefix, pod.Name)) By(fmt.Sprintf("%v Deleting pod: %v", logPrefix, pod.Name))
err = framework.DeletePodWithWait(f, client, pod) err = framework.DeletePodWithWait(f, client, pod)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By(fmt.Sprintf("%v Waiting for volume: %v to be detached from the node: %v", logPrefix, persistentvolumes[0].Spec.VsphereVolume.VolumePath, pod.Spec.NodeName)) By(fmt.Sprintf("%v Waiting for volume: %v to be detached from the node: %v", logPrefix, persistentvolumes[0].Spec.VsphereVolume.VolumePath, pod.Spec.NodeName))
err = waitForVSphereDiskToDetach(client, vsp, persistentvolumes[0].Spec.VsphereVolume.VolumePath, types.NodeName(pod.Spec.NodeName)) err = waitForVSphereDiskToDetach(persistentvolumes[0].Spec.VsphereVolume.VolumePath, pod.Spec.NodeName)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By(fmt.Sprintf("%v Deleting the Claim: %v", logPrefix, pvclaim.Name)) By(fmt.Sprintf("%v Deleting the Claim: %v", logPrefix, pvclaim.Name))

View File

@ -19,30 +19,31 @@ package vsphere
import ( import (
"fmt" "fmt"
"path/filepath" "path/filepath"
"strconv"
"time" "time"
"github.com/golang/glog"
. "github.com/onsi/gomega" . "github.com/onsi/gomega"
"github.com/vmware/govmomi/object" "github.com/vmware/govmomi/object"
"github.com/vmware/govmomi/vim25/mo" "github.com/vmware/govmomi/vim25/mo"
vim25types "github.com/vmware/govmomi/vim25/types"
"golang.org/x/net/context"
"k8s.io/api/core/v1" "k8s.io/api/core/v1"
storage "k8s.io/api/storage/v1" storage "k8s.io/api/storage/v1"
"k8s.io/apimachinery/pkg/api/resource" "k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/uuid" "k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/apimachinery/pkg/util/wait" "k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
vsphere "k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere"
"k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib"
"k8s.io/kubernetes/pkg/volume/util/volumehelper" "k8s.io/kubernetes/pkg/volume/util/volumehelper"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/storage/utils" "k8s.io/kubernetes/test/e2e/storage/utils"
"context"
"github.com/vmware/govmomi/find" "github.com/vmware/govmomi/find"
vimtypes "github.com/vmware/govmomi/vim25/types" vimtypes "github.com/vmware/govmomi/vim25/types"
"regexp"
"strings"
) )
const ( const (
@ -51,6 +52,8 @@ const (
storageclass2 = "sc-vsan" storageclass2 = "sc-vsan"
storageclass3 = "sc-spbm" storageclass3 = "sc-spbm"
storageclass4 = "sc-user-specified-ds" storageclass4 = "sc-user-specified-ds"
DummyDiskName = "kube-dummyDisk.vmdk"
ProviderPrefix = "vsphere://"
) )
// volumeState represents the state of a volume. // volumeState represents the state of a volume.
@ -61,37 +64,16 @@ const (
volumeStateAttached volumeState = 2 volumeStateAttached volumeState = 2
) )
// Sanity check for vSphere testing. Verify the persistent disk attached to the node.
func verifyVSphereDiskAttached(c clientset.Interface, vsp *vsphere.VSphere, volumePath string, nodeName types.NodeName) (bool, error) {
var (
isAttached bool
err error
)
if vsp == nil {
vsp, err = getVSphere(c)
Expect(err).NotTo(HaveOccurred())
}
isAttached, err = vsp.DiskIsAttached(volumePath, nodeName)
Expect(err).NotTo(HaveOccurred())
return isAttached, err
}
// Wait until vsphere volumes are detached from the list of nodes or time out after 5 minutes // Wait until vsphere volumes are detached from the list of nodes or time out after 5 minutes
func waitForVSphereDisksToDetach(c clientset.Interface, vsp *vsphere.VSphere, nodeVolumes map[types.NodeName][]string) error { func waitForVSphereDisksToDetach(nodeVolumes map[string][]string) error {
var ( var (
err error err error
disksAttached = true disksAttached = true
detachTimeout = 5 * time.Minute detachTimeout = 5 * time.Minute
detachPollTime = 10 * time.Second detachPollTime = 10 * time.Second
) )
if vsp == nil {
vsp, err = getVSphere(c)
if err != nil {
return err
}
}
err = wait.Poll(detachPollTime, detachTimeout, func() (bool, error) { err = wait.Poll(detachPollTime, detachTimeout, func() (bool, error) {
attachedResult, err := vsp.DisksAreAttached(nodeVolumes) attachedResult, err := disksAreAttached(nodeVolumes)
if err != nil { if err != nil {
return false, err return false, err
} }
@ -117,7 +99,7 @@ func waitForVSphereDisksToDetach(c clientset.Interface, vsp *vsphere.VSphere, no
} }
// Wait until vsphere vmdk moves to expected state on the given node, or time out after 6 minutes // Wait until vsphere vmdk moves to expected state on the given node, or time out after 6 minutes
func waitForVSphereDiskStatus(c clientset.Interface, vsp *vsphere.VSphere, volumePath string, nodeName types.NodeName, expectedState volumeState) error { func waitForVSphereDiskStatus(volumePath string, nodeName string, expectedState volumeState) error {
var ( var (
err error err error
diskAttached bool diskAttached bool
@ -137,7 +119,7 @@ func waitForVSphereDiskStatus(c clientset.Interface, vsp *vsphere.VSphere, volum
} }
err = wait.Poll(pollTime, timeout, func() (bool, error) { err = wait.Poll(pollTime, timeout, func() (bool, error) {
diskAttached, err = verifyVSphereDiskAttached(c, vsp, volumePath, nodeName) diskAttached, err = diskIsAttached(volumePath, nodeName)
if err != nil { if err != nil {
return true, err return true, err
} }
@ -161,13 +143,13 @@ func waitForVSphereDiskStatus(c clientset.Interface, vsp *vsphere.VSphere, volum
} }
// Wait until vsphere vmdk is attached from the given node or time out after 6 minutes // Wait until vsphere vmdk is attached from the given node or time out after 6 minutes
func waitForVSphereDiskToAttach(c clientset.Interface, vsp *vsphere.VSphere, volumePath string, nodeName types.NodeName) error { func waitForVSphereDiskToAttach(volumePath string, nodeName string) error {
return waitForVSphereDiskStatus(c, vsp, volumePath, nodeName, volumeStateAttached) return waitForVSphereDiskStatus(volumePath, nodeName, volumeStateAttached)
} }
// Wait until vsphere vmdk is detached from the given node or time out after 6 minutes // Wait until vsphere vmdk is detached from the given node or time out after 6 minutes
func waitForVSphereDiskToDetach(c clientset.Interface, vsp *vsphere.VSphere, volumePath string, nodeName types.NodeName) error { func waitForVSphereDiskToDetach(volumePath string, nodeName string) error {
return waitForVSphereDiskStatus(c, vsp, volumePath, nodeName, volumeStateDetached) return waitForVSphereDiskStatus(volumePath, nodeName, volumeStateDetached)
} }
// function to create vsphere volume spec with given VMDK volume path, Reclaim Policy and labels // function to create vsphere volume spec with given VMDK volume path, Reclaim Policy and labels
@ -241,27 +223,6 @@ func getVSpherePersistentVolumeClaimSpec(namespace string, labels map[string]str
return pvc return pvc
} }
// function to create vmdk volume
func createVSphereVolume(vsp *vsphere.VSphere, volumeOptions *vclib.VolumeOptions) (string, error) {
var (
volumePath string
err error
)
if volumeOptions == nil {
volumeOptions = new(vclib.VolumeOptions)
volumeOptions.CapacityKB = 2097152
volumeOptions.Name = "e2e-vmdk-" + strconv.FormatInt(time.Now().UnixNano(), 10)
}
volumePath, err = vsp.CreateVolume(volumeOptions)
Expect(err).NotTo(HaveOccurred())
return volumePath, nil
}
// CreateVSphereVolume creates a vmdk volume
func CreateVSphereVolume(vsp *vsphere.VSphere, volumeOptions *vclib.VolumeOptions) (string, error) {
return createVSphereVolume(vsp, volumeOptions)
}
// function to write content to the volume backed by given PVC // function to write content to the volume backed by given PVC
func writeContentToVSpherePV(client clientset.Interface, pvc *v1.PersistentVolumeClaim, expectedContent string) { func writeContentToVSpherePV(client clientset.Interface, pvc *v1.PersistentVolumeClaim, expectedContent string) {
utils.RunInPodWithVolume(client, pvc.Namespace, pvc.Name, "echo "+expectedContent+" > /mnt/test/data") utils.RunInPodWithVolume(client, pvc.Namespace, pvc.Name, "echo "+expectedContent+" > /mnt/test/data")
@ -426,12 +387,12 @@ func createEmptyFilesOnVSphereVolume(namespace string, podName string, filePaths
} }
// verify volumes are attached to the node and are accessible in pod // verify volumes are attached to the node and are accessible in pod
func verifyVSphereVolumesAccessible(c clientset.Interface, pod *v1.Pod, persistentvolumes []*v1.PersistentVolume, vsp *vsphere.VSphere) { func verifyVSphereVolumesAccessible(c clientset.Interface, pod *v1.Pod, persistentvolumes []*v1.PersistentVolume) {
nodeName := pod.Spec.NodeName nodeName := pod.Spec.NodeName
namespace := pod.Namespace namespace := pod.Namespace
for index, pv := range persistentvolumes { for index, pv := range persistentvolumes {
// Verify disks are attached to the node // Verify disks are attached to the node
isAttached, err := verifyVSphereDiskAttached(c, vsp, pv.Spec.VsphereVolume.VolumePath, types.NodeName(nodeName)) isAttached, err := diskIsAttached(pv.Spec.VsphereVolume.VolumePath, nodeName)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
Expect(isAttached).To(BeTrue(), fmt.Sprintf("disk %v is not attached with the node", pv.Spec.VsphereVolume.VolumePath)) Expect(isAttached).To(BeTrue(), fmt.Sprintf("disk %v is not attached with the node", pv.Spec.VsphereVolume.VolumePath))
// Verify Volumes are accessible // Verify Volumes are accessible
@ -450,29 +411,182 @@ func getvSphereVolumePathFromClaim(client clientset.Interface, namespace string,
return pv.Spec.VsphereVolume.VolumePath return pv.Spec.VsphereVolume.VolumePath
} }
func addNodesToVCP(vsp *vsphere.VSphere, c clientset.Interface) error { // Get canonical volume path for volume Path.
nodes, err := c.CoreV1().Nodes().List(metav1.ListOptions{}) // Example1: The canonical path for volume path - [vsanDatastore] kubevols/volume.vmdk will be [vsanDatastore] 25d8b159-948c-4b73-e499-02001ad1b044/volume.vmdk
// Example2: The canonical path for volume path - [vsanDatastore] 25d8b159-948c-4b73-e499-02001ad1b044/volume.vmdk will be same as volume Path.
func getCanonicalVolumePath(ctx context.Context, dc *object.Datacenter, volumePath string) (string, error) {
var folderID string
canonicalVolumePath := volumePath
dsPathObj, err := getDatastorePathObjFromVMDiskPath(volumePath)
if err != nil { if err != nil {
return err return "", err
} }
for _, node := range nodes.Items { dsPath := strings.Split(strings.TrimSpace(dsPathObj.Path), "/")
vsp.NodeAdded(&node) if len(dsPath) <= 1 {
return canonicalVolumePath, nil
} }
return nil datastore := dsPathObj.Datastore
dsFolder := dsPath[0]
// Get the datastore folder ID if datastore or folder doesn't exist in datastoreFolderIDMap
if !isValidUUID(dsFolder) {
dummyDiskVolPath := "[" + datastore + "] " + dsFolder + "/" + DummyDiskName
// Querying a non-existent dummy disk on the datastore folder.
// It would fail and return an folder ID in the error message.
_, err := getVirtualDiskPage83Data(ctx, dc, dummyDiskVolPath)
if err != nil {
re := regexp.MustCompile("File (.*?) was not found")
match := re.FindStringSubmatch(err.Error())
canonicalVolumePath = match[1]
}
}
diskPath := getPathFromVMDiskPath(canonicalVolumePath)
if diskPath == "" {
return "", fmt.Errorf("Failed to parse canonicalVolumePath: %s in getcanonicalVolumePath method", canonicalVolumePath)
}
folderID = strings.Split(strings.TrimSpace(diskPath), "/")[0]
canonicalVolumePath = strings.Replace(volumePath, dsFolder, folderID, 1)
return canonicalVolumePath, nil
} }
func getVSphere(c clientset.Interface) (*vsphere.VSphere, error) { // getPathFromVMDiskPath retrieves the path from VM Disk Path.
vsp, err := vsphere.GetVSphere() // Example: For vmDiskPath - [vsanDatastore] kubevols/volume.vmdk, the path is kubevols/volume.vmdk
func getPathFromVMDiskPath(vmDiskPath string) string {
datastorePathObj := new(object.DatastorePath)
isSuccess := datastorePathObj.FromString(vmDiskPath)
if !isSuccess {
framework.Logf("Failed to parse vmDiskPath: %s", vmDiskPath)
return ""
}
return datastorePathObj.Path
}
//getDatastorePathObjFromVMDiskPath gets the datastorePathObj from VM disk path.
func getDatastorePathObjFromVMDiskPath(vmDiskPath string) (*object.DatastorePath, error) {
datastorePathObj := new(object.DatastorePath)
isSuccess := datastorePathObj.FromString(vmDiskPath)
if !isSuccess {
framework.Logf("Failed to parse volPath: %s", vmDiskPath)
return nil, fmt.Errorf("Failed to parse volPath: %s", vmDiskPath)
}
return datastorePathObj, nil
}
// getVirtualDiskPage83Data gets the virtual disk UUID by diskPath
func getVirtualDiskPage83Data(ctx context.Context, dc *object.Datacenter, diskPath string) (string, error) {
if len(diskPath) > 0 && filepath.Ext(diskPath) != ".vmdk" {
diskPath += ".vmdk"
}
vdm := object.NewVirtualDiskManager(dc.Client())
// Returns uuid of vmdk virtual disk
diskUUID, err := vdm.QueryVirtualDiskUuid(ctx, diskPath, dc)
if err != nil { if err != nil {
glog.Warningf("QueryVirtualDiskUuid failed for diskPath: %q. err: %+v", diskPath, err)
return "", err
}
diskUUID = formatVirtualDiskUUID(diskUUID)
return diskUUID, nil
}
// formatVirtualDiskUUID removes any spaces and hyphens in UUID
// Example UUID input is 42375390-71f9-43a3-a770-56803bcd7baa and output after format is 4237539071f943a3a77056803bcd7baa
func formatVirtualDiskUUID(uuid string) string {
uuidwithNoSpace := strings.Replace(uuid, " ", "", -1)
uuidWithNoHypens := strings.Replace(uuidwithNoSpace, "-", "", -1)
return strings.ToLower(uuidWithNoHypens)
}
//isValidUUID checks if the string is a valid UUID.
func isValidUUID(uuid string) bool {
r := regexp.MustCompile("^[a-fA-F0-9]{8}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[[a-fA-F0-9]{4}-[a-fA-F0-9]{12}$")
return r.MatchString(uuid)
}
// removeStorageClusterORFolderNameFromVDiskPath removes the cluster or folder path from the vDiskPath
// for vDiskPath [DatastoreCluster/sharedVmfs-0] kubevols/e2e-vmdk-1234.vmdk, return value is [sharedVmfs-0] kubevols/e2e-vmdk-1234.vmdk
// for vDiskPath [sharedVmfs-0] kubevols/e2e-vmdk-1234.vmdk, return value remains same [sharedVmfs-0] kubevols/e2e-vmdk-1234.vmdk
func removeStorageClusterORFolderNameFromVDiskPath(vDiskPath string) string {
datastore := regexp.MustCompile("\\[(.*?)\\]").FindStringSubmatch(vDiskPath)[1]
if filepath.Base(datastore) != datastore {
vDiskPath = strings.Replace(vDiskPath, datastore, filepath.Base(datastore), 1)
}
return vDiskPath
}
// isDiskAttached checks if disk is attached to the VM.
func isDiskAttached(ctx context.Context, vm *object.VirtualMachine, diskPath string) (bool, error) {
device, err := getVirtualDeviceByPath(ctx, vm, diskPath)
if err != nil {
return false, err
}
if device != nil {
return true, nil
}
return false, nil
}
// getVirtualDeviceByPath gets the virtual device by path
func getVirtualDeviceByPath(ctx context.Context, vm *object.VirtualMachine, diskPath string) (vim25types.BaseVirtualDevice, error) {
vmDevices, err := vm.Device(ctx)
if err != nil {
framework.Logf("Failed to get the devices for VM: %q. err: %+v", vm.InventoryPath, err)
return nil, err return nil, err
} }
addNodesToVCP(vsp, c)
return vsp, nil // filter vm devices to retrieve device for the given vmdk file identified by disk path
for _, device := range vmDevices {
if vmDevices.TypeName(device) == "VirtualDisk" {
virtualDevice := device.GetVirtualDevice()
if backing, ok := virtualDevice.Backing.(*vim25types.VirtualDiskFlatVer2BackingInfo); ok {
if matchVirtualDiskAndVolPath(backing.FileName, diskPath) {
framework.Logf("Found VirtualDisk backing with filename %q for diskPath %q", backing.FileName, diskPath)
return device, nil
}
}
}
}
return nil, nil
} }
// GetVSphere returns vsphere cloud provider func matchVirtualDiskAndVolPath(diskPath, volPath string) bool {
func GetVSphere(c clientset.Interface) (*vsphere.VSphere, error) { fileExt := ".vmdk"
return getVSphere(c) diskPath = strings.TrimSuffix(diskPath, fileExt)
volPath = strings.TrimSuffix(volPath, fileExt)
return diskPath == volPath
}
// convertVolPathsToDevicePaths removes cluster or folder path from volPaths and convert to canonicalPath
func convertVolPathsToDevicePaths(ctx context.Context, nodeVolumes map[string][]string) (map[string][]string, error) {
vmVolumes := make(map[string][]string)
for nodeName, volPaths := range nodeVolumes {
nodeInfo := TestContext.NodeMapper.GetNodeInfo(nodeName)
datacenter := nodeInfo.VSphere.GetDatacenterFromObjectReference(ctx, nodeInfo.DataCenterRef)
for i, volPath := range volPaths {
deviceVolPath, err := convertVolPathToDevicePath(ctx, datacenter, volPath)
if err != nil {
framework.Logf("Failed to convert vsphere volume path %s to device path for volume %s. err: %+v", volPath, deviceVolPath, err)
return nil, err
}
volPaths[i] = deviceVolPath
}
vmVolumes[nodeName] = volPaths
}
return vmVolumes, nil
}
func convertVolPathToDevicePath(ctx context.Context, dc *object.Datacenter, volPath string) (string, error) {
volPath = removeStorageClusterORFolderNameFromVDiskPath(volPath)
// Get the canonical volume path for volPath.
canonicalVolumePath, err := getCanonicalVolumePath(ctx, dc, volPath)
if err != nil {
framework.Logf("Failed to get canonical vsphere volume path for volume: %s. err: %+v", volPath, err)
return "", err
}
// Check if the volume path contains .vmdk extension. If not, add the extension and update the nodeVolumes Map
if len(canonicalVolumePath) > 0 && filepath.Ext(canonicalVolumePath) != ".vmdk" {
canonicalVolumePath += ".vmdk"
}
return canonicalVolumePath, nil
} }
// get .vmx file path for a virtual machine // get .vmx file path for a virtual machine
@ -567,3 +681,55 @@ func registerNodeVM(nodeName, workingDir, vmxFilePath string, rpool *object.Reso
poweronNodeVM(nodeName, vm) poweronNodeVM(nodeName, vm)
} }
func disksAreAttached(nodeVolumes map[string][]string) (nodeVolumesAttachMap map[string]map[string]bool, err error) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
disksAttached := make(map[string]map[string]bool)
if len(nodeVolumes) == 0 {
return disksAttached, nil
}
// Convert VolPaths into canonical form so that it can be compared with the VM device path.
vmVolumes, err := convertVolPathsToDevicePaths(ctx, nodeVolumes)
if err != nil {
framework.Logf("Failed to convert volPaths to devicePaths: %+v. err: %+v", nodeVolumes, err)
return nil, err
}
for vm, volumes := range vmVolumes {
volumeAttachedMap := make(map[string]bool)
for _, volume := range volumes {
attached, err := diskIsAttached(volume, vm)
if err != nil {
return nil, err
}
volumeAttachedMap[volume] = attached
}
nodeVolumesAttachMap[vm] = volumeAttachedMap
}
return disksAttached, nil
}
// diskIsAttached returns if disk is attached to the VM using controllers supported by the plugin.
func diskIsAttached(volPath string, nodeName string) (bool, error) {
// Create context
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
nodeInfo := TestContext.NodeMapper.GetNodeInfo(nodeName)
Connect(ctx, nodeInfo.VSphere)
vm := object.NewVirtualMachine(nodeInfo.VSphere.Client.Client, nodeInfo.VirtualMachineRef)
volPath = removeStorageClusterORFolderNameFromVDiskPath(volPath)
attached, err := isDiskAttached(ctx, vm, volPath)
if err != nil {
framework.Logf("diskIsAttached failed to determine whether disk %q is still attached on node %q",
volPath,
nodeName)
}
framework.Logf("diskIsAttached result: %v and error: %v, for volume: %s", attached, err, volPath)
return attached, err
}
func getUUIDFromProviderID(providerID string) string {
return strings.TrimPrefix(providerID, ProviderPrefix)
}

View File

@ -18,13 +18,13 @@ package vsphere
import ( import (
"fmt" "fmt"
"math/rand"
"time"
. "github.com/onsi/ginkgo" . "github.com/onsi/ginkgo"
. "github.com/onsi/gomega" . "github.com/onsi/gomega"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/storage/utils" "k8s.io/kubernetes/test/e2e/storage/utils"
) )
@ -47,6 +47,7 @@ var _ = utils.SIGDescribe("Volume Provisioning On Clustered Datastore [Feature:v
namespace string namespace string
scParameters map[string]string scParameters map[string]string
clusterDatastore string clusterDatastore string
nodeInfo *NodeInfo
) )
BeforeEach(func() { BeforeEach(func() {
@ -54,6 +55,11 @@ var _ = utils.SIGDescribe("Volume Provisioning On Clustered Datastore [Feature:v
Bootstrap(f) Bootstrap(f)
client = f.ClientSet client = f.ClientSet
namespace = f.Namespace.Name namespace = f.Namespace.Name
nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
Expect(nodeList.Items).NotTo(BeEmpty(), "Unable to find ready and schedulable Node")
rand.Seed(time.Now().Unix())
nodeInfo = TestContext.NodeMapper.GetNodeInfo(nodeList.Items[rand.Int()%len(nodeList.Items)].Name)
scParameters = make(map[string]string) scParameters = make(map[string]string)
clusterDatastore = GetAndExpectStringEnvVar(VCPClusterDatastore) clusterDatastore = GetAndExpectStringEnvVar(VCPClusterDatastore)
}) })
@ -70,21 +76,19 @@ var _ = utils.SIGDescribe("Volume Provisioning On Clustered Datastore [Feature:v
It("verify static provisioning on clustered datastore", func() { It("verify static provisioning on clustered datastore", func() {
var volumePath string var volumePath string
vsp, err := getVSphere(client)
Expect(err).NotTo(HaveOccurred())
By("creating a test vsphere volume") By("creating a test vsphere volume")
volumeOptions := new(vclib.VolumeOptions) volumeOptions := new(VolumeOptions)
volumeOptions.CapacityKB = 2097152 volumeOptions.CapacityKB = 2097152
volumeOptions.Name = "e2e-vmdk-" + namespace volumeOptions.Name = "e2e-vmdk-" + namespace
volumeOptions.Datastore = clusterDatastore volumeOptions.Datastore = clusterDatastore
volumePath, err = createVSphereVolume(vsp, volumeOptions) volumePath, err := nodeInfo.VSphere.CreateVolume(volumeOptions, nodeInfo.DataCenterRef)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
defer func() { defer func() {
By("Deleting the vsphere volume") By("Deleting the vsphere volume")
vsp.DeleteVolume(volumePath) nodeInfo.VSphere.DeleteVolume(volumePath, nodeInfo.DataCenterRef)
}() }()
podspec := getVSpherePodSpecWithVolumePaths([]string{volumePath}, nil, nil) podspec := getVSpherePodSpecWithVolumePaths([]string{volumePath}, nil, nil)
@ -98,10 +102,10 @@ var _ = utils.SIGDescribe("Volume Provisioning On Clustered Datastore [Feature:v
// get fresh pod info // get fresh pod info
pod, err = client.CoreV1().Pods(namespace).Get(pod.Name, metav1.GetOptions{}) pod, err = client.CoreV1().Pods(namespace).Get(pod.Name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
nodeName := types.NodeName(pod.Spec.NodeName) nodeName := pod.Spec.NodeName
By("Verifying volume is attached") By("Verifying volume is attached")
isAttached, err := verifyVSphereDiskAttached(client, vsp, volumePath, nodeName) isAttached, err := diskIsAttached(volumePath, nodeName)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
Expect(isAttached).To(BeTrue(), fmt.Sprintf("disk: %s is not attached with the node: %v", volumePath, nodeName)) Expect(isAttached).To(BeTrue(), fmt.Sprintf("disk: %s is not attached with the node: %v", volumePath, nodeName))
@ -110,7 +114,7 @@ var _ = utils.SIGDescribe("Volume Provisioning On Clustered Datastore [Feature:v
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("Waiting for volumes to be detached from the node") By("Waiting for volumes to be detached from the node")
err = waitForVSphereDiskToDetach(client, vsp, volumePath, nodeName) err = waitForVSphereDiskToDetach(volumePath, nodeName)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
}) })

View File

@ -17,15 +17,17 @@ limitations under the License.
package vsphere package vsphere
import ( import (
"math/rand"
"path/filepath" "path/filepath"
"time"
. "github.com/onsi/ginkgo" . "github.com/onsi/ginkgo"
. "github.com/onsi/gomega" . "github.com/onsi/gomega"
"github.com/vmware/govmomi/object"
"github.com/vmware/govmomi/vim25/types" "github.com/vmware/govmomi/vim25/types"
"golang.org/x/net/context" "golang.org/x/net/context"
"k8s.io/api/core/v1" "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
k8stype "k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/uuid" "k8s.io/apimachinery/pkg/util/uuid"
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
@ -70,7 +72,8 @@ var _ = utils.SIGDescribe("Volume Disk Format [Feature:vsphere]", func() {
namespace = f.Namespace.Name namespace = f.Namespace.Name
nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet) nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
if len(nodeList.Items) != 0 { if len(nodeList.Items) != 0 {
nodeName = nodeList.Items[0].Name rand.Seed(time.Now().Unix())
nodeName = nodeList.Items[rand.Int()%len(nodeList.Items)].Name
} else { } else {
framework.Failf("Unable to find ready and schedulable Node") framework.Failf("Unable to find ready and schedulable Node")
} }
@ -147,19 +150,21 @@ func invokeTest(f *framework.Framework, client clientset.Interface, namespace st
pod, err := client.CoreV1().Pods(namespace).Create(podSpec) pod, err := client.CoreV1().Pods(namespace).Create(podSpec)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
vsp, err := getVSphere(client)
Expect(err).NotTo(HaveOccurred())
verifyVSphereDiskAttached(client, vsp, pv.Spec.VsphereVolume.VolumePath, k8stype.NodeName(nodeName))
By("Waiting for pod to be running") By("Waiting for pod to be running")
Expect(framework.WaitForPodNameRunningInNamespace(client, pod.Name, namespace)).To(Succeed()) Expect(framework.WaitForPodNameRunningInNamespace(client, pod.Name, namespace)).To(Succeed())
isAttached, err := diskIsAttached(pv.Spec.VsphereVolume.VolumePath, nodeName)
Expect(isAttached).To(BeTrue())
Expect(err).NotTo(HaveOccurred())
By("Verify Disk Format")
Expect(verifyDiskFormat(client, nodeName, pv.Spec.VsphereVolume.VolumePath, diskFormat)).To(BeTrue(), "DiskFormat Verification Failed") Expect(verifyDiskFormat(client, nodeName, pv.Spec.VsphereVolume.VolumePath, diskFormat)).To(BeTrue(), "DiskFormat Verification Failed")
var volumePaths []string var volumePaths []string
volumePaths = append(volumePaths, pv.Spec.VsphereVolume.VolumePath) volumePaths = append(volumePaths, pv.Spec.VsphereVolume.VolumePath)
By("Delete pod and wait for volume to be detached from node") By("Delete pod and wait for volume to be detached from node")
deletePodAndWaitForVolumeToDetach(f, client, pod, vsp, nodeName, volumePaths) deletePodAndWaitForVolumeToDetach(f, client, pod, nodeName, volumePaths)
} }
@ -173,12 +178,9 @@ func verifyDiskFormat(client clientset.Interface, nodeName string, pvVolumePath
ctx, cancel := context.WithCancel(context.Background()) ctx, cancel := context.WithCancel(context.Background())
defer cancel() defer cancel()
vsp, err := getVSphere(client) nodeInfo := TestContext.NodeMapper.GetNodeInfo(nodeName)
Expect(err).NotTo(HaveOccurred()) vm := object.NewVirtualMachine(nodeInfo.VSphere.Client.Client, nodeInfo.VirtualMachineRef)
nodeInfo, err := vsp.NodeManager().GetNodeInfo(k8stype.NodeName(nodeName)) vmDevices, err := vm.Device(ctx)
Expect(err).NotTo(HaveOccurred())
vmDevices, err := nodeInfo.VM().Device(ctx)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
disks := vmDevices.SelectByType((*types.VirtualDisk)(nil)) disks := vmDevices.SelectByType((*types.VirtualDisk)(nil))

View File

@ -59,10 +59,6 @@ var _ = utils.SIGDescribe("Volume Disk Size [Feature:vsphere]", func() {
namespace = f.Namespace.Name namespace = f.Namespace.Name
scParameters = make(map[string]string) scParameters = make(map[string]string)
datastore = GetAndExpectStringEnvVar(StorageClassDatastoreName) datastore = GetAndExpectStringEnvVar(StorageClassDatastoreName)
nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
if !(len(nodeList.Items) > 0) {
framework.Failf("Unable to find ready and schedulable Node")
}
}) })
It("verify dynamically provisioned pv using storageclass with an invalid disk size fails", func() { It("verify dynamically provisioned pv using storageclass with an invalid disk size fails", func() {

View File

@ -24,9 +24,7 @@ import (
. "github.com/onsi/gomega" . "github.com/onsi/gomega"
"k8s.io/api/core/v1" "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
k8stype "k8s.io/apimachinery/pkg/types"
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
vsphere "k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/storage/utils" "k8s.io/kubernetes/test/e2e/storage/utils"
) )
@ -75,8 +73,7 @@ var _ = utils.SIGDescribe("Volume FStype [Feature:vsphere]", func() {
Bootstrap(f) Bootstrap(f)
client = f.ClientSet client = f.ClientSet
namespace = f.Namespace.Name namespace = f.Namespace.Name
nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet) Expect(framework.GetReadySchedulableNodesOrDie(f.ClientSet).Items).NotTo(BeEmpty(), "Unable to find ready and schedulable Node")
Expect(len(nodeList.Items)).NotTo(BeZero(), "Unable to find ready and schedulable Node")
}) })
It("verify fstype - ext3 formatted volume", func() { It("verify fstype - ext3 formatted volume", func() {
@ -99,28 +96,24 @@ func invokeTestForFstype(f *framework.Framework, client clientset.Interface, nam
framework.Logf("Invoking Test for fstype: %s", fstype) framework.Logf("Invoking Test for fstype: %s", fstype)
scParameters := make(map[string]string) scParameters := make(map[string]string)
scParameters["fstype"] = fstype scParameters["fstype"] = fstype
vsp, err := getVSphere(client)
Expect(err).NotTo(HaveOccurred())
// Create Persistent Volume // Create Persistent Volume
By("Creating Storage Class With Fstype") By("Creating Storage Class With Fstype")
pvclaim, persistentvolumes := createVolume(client, namespace, scParameters) pvclaim, persistentvolumes := createVolume(client, namespace, scParameters)
// Create Pod and verify the persistent volume is accessible // Create Pod and verify the persistent volume is accessible
pod := createPodAndVerifyVolumeAccessible(client, namespace, pvclaim, persistentvolumes, vsp) pod := createPodAndVerifyVolumeAccessible(client, namespace, pvclaim, persistentvolumes)
_, err = framework.LookForStringInPodExec(namespace, pod.Name, []string{"/bin/cat", "/mnt/volume1/fstype"}, expectedContent, time.Minute) _, err := framework.LookForStringInPodExec(namespace, pod.Name, []string{"/bin/cat", "/mnt/volume1/fstype"}, expectedContent, time.Minute)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
// Detach and delete volume // Detach and delete volume
detachVolume(f, client, vsp, pod, persistentvolumes[0].Spec.VsphereVolume.VolumePath) detachVolume(f, client, pod, persistentvolumes[0].Spec.VsphereVolume.VolumePath)
deleteVolume(client, pvclaim.Name, namespace) deleteVolume(client, pvclaim.Name, namespace)
} }
func invokeTestForInvalidFstype(f *framework.Framework, client clientset.Interface, namespace string, fstype string) { func invokeTestForInvalidFstype(f *framework.Framework, client clientset.Interface, namespace string, fstype string) {
scParameters := make(map[string]string) scParameters := make(map[string]string)
scParameters["fstype"] = fstype scParameters["fstype"] = fstype
vsp, err := getVSphere(client)
Expect(err).NotTo(HaveOccurred())
// Create Persistent Volume // Create Persistent Volume
By("Creating Storage Class With Invalid Fstype") By("Creating Storage Class With Invalid Fstype")
@ -136,7 +129,7 @@ func invokeTestForInvalidFstype(f *framework.Framework, client clientset.Interfa
eventList, err := client.CoreV1().Events(namespace).List(metav1.ListOptions{}) eventList, err := client.CoreV1().Events(namespace).List(metav1.ListOptions{})
// Detach and delete volume // Detach and delete volume
detachVolume(f, client, vsp, pod, persistentvolumes[0].Spec.VsphereVolume.VolumePath) detachVolume(f, client, pod, persistentvolumes[0].Spec.VsphereVolume.VolumePath)
deleteVolume(client, pvclaim.Name, namespace) deleteVolume(client, pvclaim.Name, namespace)
Expect(eventList.Items).NotTo(BeEmpty()) Expect(eventList.Items).NotTo(BeEmpty())
@ -167,7 +160,7 @@ func createVolume(client clientset.Interface, namespace string, scParameters map
return pvclaim, persistentvolumes return pvclaim, persistentvolumes
} }
func createPodAndVerifyVolumeAccessible(client clientset.Interface, namespace string, pvclaim *v1.PersistentVolumeClaim, persistentvolumes []*v1.PersistentVolume, vsp *vsphere.VSphere) *v1.Pod { func createPodAndVerifyVolumeAccessible(client clientset.Interface, namespace string, pvclaim *v1.PersistentVolumeClaim, persistentvolumes []*v1.PersistentVolume) *v1.Pod {
var pvclaims []*v1.PersistentVolumeClaim var pvclaims []*v1.PersistentVolumeClaim
pvclaims = append(pvclaims, pvclaim) pvclaims = append(pvclaims, pvclaim)
By("Creating pod to attach PV to the node") By("Creating pod to attach PV to the node")
@ -177,16 +170,16 @@ func createPodAndVerifyVolumeAccessible(client clientset.Interface, namespace st
// Asserts: Right disk is attached to the pod // Asserts: Right disk is attached to the pod
By("Verify the volume is accessible and available in the pod") By("Verify the volume is accessible and available in the pod")
verifyVSphereVolumesAccessible(client, pod, persistentvolumes, vsp) verifyVSphereVolumesAccessible(client, pod, persistentvolumes)
return pod return pod
} }
func detachVolume(f *framework.Framework, client clientset.Interface, vsp *vsphere.VSphere, pod *v1.Pod, volPath string) { func detachVolume(f *framework.Framework, client clientset.Interface, pod *v1.Pod, volPath string) {
By("Deleting pod") By("Deleting pod")
framework.DeletePodWithWait(f, client, pod) framework.DeletePodWithWait(f, client, pod)
By("Waiting for volumes to be detached from the node") By("Waiting for volumes to be detached from the node")
waitForVSphereDiskToDetach(client, vsp, volPath, k8stype.NodeName(pod.Spec.NodeName)) waitForVSphereDiskToDetach(volPath, pod.Spec.NodeName)
} }
func deleteVolume(client clientset.Interface, pvclaimName string, namespace string) { func deleteVolume(client clientset.Interface, pvclaimName string, namespace string) {

View File

@ -24,7 +24,6 @@ import (
"k8s.io/api/core/v1" "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/uuid" "k8s.io/apimachinery/pkg/util/uuid"
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
@ -54,6 +53,7 @@ var _ = utils.SIGDescribe("Volume Attach Verify [Feature:vsphere][Serial][Disrup
numNodes int numNodes int
nodeKeyValueLabelList []map[string]string nodeKeyValueLabelList []map[string]string
nodeNameList []string nodeNameList []string
nodeInfo *NodeInfo
) )
BeforeEach(func() { BeforeEach(func() {
framework.SkipUnlessProviderIs("vsphere") framework.SkipUnlessProviderIs("vsphere")
@ -67,7 +67,7 @@ var _ = utils.SIGDescribe("Volume Attach Verify [Feature:vsphere][Serial][Disrup
if numNodes < 2 { if numNodes < 2 {
framework.Skipf("Requires at least %d nodes (not %d)", 2, len(nodes.Items)) framework.Skipf("Requires at least %d nodes (not %d)", 2, len(nodes.Items))
} }
nodeInfo = TestContext.NodeMapper.GetNodeInfo(nodes.Items[0].Name)
for i := 0; i < numNodes; i++ { for i := 0; i < numNodes; i++ {
nodeName := nodes.Items[i].Name nodeName := nodes.Items[i].Name
nodeNameList = append(nodeNameList, nodeName) nodeNameList = append(nodeNameList, nodeName)
@ -80,15 +80,11 @@ var _ = utils.SIGDescribe("Volume Attach Verify [Feature:vsphere][Serial][Disrup
}) })
It("verify volume remains attached after master kubelet restart", func() { It("verify volume remains attached after master kubelet restart", func() {
vsp, err := getVSphere(client)
Expect(err).NotTo(HaveOccurred())
// Create pod on each node // Create pod on each node
for i := 0; i < numNodes; i++ { for i := 0; i < numNodes; i++ {
By(fmt.Sprintf("%d: Creating a test vsphere volume", i)) By(fmt.Sprintf("%d: Creating a test vsphere volume", i))
volumePath, err := createVSphereVolume(vsp, nil) volumePath, err := nodeInfo.VSphere.CreateVolume(&VolumeOptions{}, nodeInfo.DataCenterRef)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
volumePaths = append(volumePaths, volumePath) volumePaths = append(volumePaths, volumePath)
By(fmt.Sprintf("Creating pod %d on node %v", i, nodeNameList[i])) By(fmt.Sprintf("Creating pod %d on node %v", i, nodeNameList[i]))
@ -105,9 +101,9 @@ var _ = utils.SIGDescribe("Volume Attach Verify [Feature:vsphere][Serial][Disrup
pods = append(pods, pod) pods = append(pods, pod)
nodeName := types.NodeName(pod.Spec.NodeName) nodeName := pod.Spec.NodeName
By(fmt.Sprintf("Verify volume %s is attached to the pod %v", volumePath, nodeName)) By(fmt.Sprintf("Verify volume %s is attached to the pod %s", volumePath, nodeName))
isAttached, err := verifyVSphereDiskAttached(client, vsp, volumePath, types.NodeName(nodeName)) isAttached, err := diskIsAttached(volumePath, nodeName)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
Expect(isAttached).To(BeTrue(), fmt.Sprintf("disk: %s is not attached with the node", volumePath)) Expect(isAttached).To(BeTrue(), fmt.Sprintf("disk: %s is not attached with the node", volumePath))
@ -115,7 +111,7 @@ var _ = utils.SIGDescribe("Volume Attach Verify [Feature:vsphere][Serial][Disrup
By("Restarting kubelet on master node") By("Restarting kubelet on master node")
masterAddress := framework.GetMasterHost() + ":22" masterAddress := framework.GetMasterHost() + ":22"
err = framework.RestartKubelet(masterAddress) err := framework.RestartKubelet(masterAddress)
Expect(err).NotTo(HaveOccurred(), "Unable to restart kubelet on master node") Expect(err).NotTo(HaveOccurred(), "Unable to restart kubelet on master node")
By("Verifying the kubelet on master node is up") By("Verifying the kubelet on master node is up")
@ -124,23 +120,22 @@ var _ = utils.SIGDescribe("Volume Attach Verify [Feature:vsphere][Serial][Disrup
for i, pod := range pods { for i, pod := range pods {
volumePath := volumePaths[i] volumePath := volumePaths[i]
nodeName := pod.Spec.NodeName
nodeName := types.NodeName(pod.Spec.NodeName)
By(fmt.Sprintf("After master restart, verify volume %v is attached to the pod %v", volumePath, nodeName)) By(fmt.Sprintf("After master restart, verify volume %v is attached to the pod %v", volumePath, nodeName))
isAttached, err := verifyVSphereDiskAttached(client, vsp, volumePaths[i], types.NodeName(nodeName)) isAttached, err := diskIsAttached(volumePaths[i], nodeName)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
Expect(isAttached).To(BeTrue(), fmt.Sprintf("disk: %s is not attached with the node", volumePath)) Expect(isAttached).To(BeTrue(), fmt.Sprintf("disk: %s is not attached with the node", volumePath))
By(fmt.Sprintf("Deleting pod on node %v", nodeName)) By(fmt.Sprintf("Deleting pod on node %s", nodeName))
err = framework.DeletePodWithWait(f, client, pod) err = framework.DeletePodWithWait(f, client, pod)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By(fmt.Sprintf("Waiting for volume %s to be detached from the node %v", volumePath, nodeName)) By(fmt.Sprintf("Waiting for volume %s to be detached from the node %s", volumePath, nodeName))
err = waitForVSphereDiskToDetach(client, vsp, volumePath, types.NodeName(nodeName)) err = waitForVSphereDiskToDetach(volumePath, nodeName)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By(fmt.Sprintf("Deleting volume %s", volumePath)) By(fmt.Sprintf("Deleting volume %s", volumePath))
err = vsp.DeleteVolume(volumePath) err = nodeInfo.VSphere.DeleteVolume(volumePath, nodeInfo.DataCenterRef)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
} }
}) })

View File

@ -24,14 +24,13 @@ import (
. "github.com/onsi/gomega" . "github.com/onsi/gomega"
"golang.org/x/net/context" "golang.org/x/net/context"
"github.com/vmware/govmomi/object"
vimtypes "github.com/vmware/govmomi/vim25/types" vimtypes "github.com/vmware/govmomi/vim25/types"
"k8s.io/api/core/v1" "k8s.io/api/core/v1"
extensions "k8s.io/api/extensions/v1beta1" extensions "k8s.io/api/extensions/v1beta1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/wait" "k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/storage/utils" "k8s.io/kubernetes/test/e2e/storage/utils"
) )
@ -46,8 +45,6 @@ var _ = utils.SIGDescribe("Node Poweroff [Feature:vsphere] [Slow] [Disruptive]",
var ( var (
client clientset.Interface client clientset.Interface
namespace string namespace string
vsp *vsphere.VSphere
err error
) )
BeforeEach(func() { BeforeEach(func() {
@ -59,8 +56,6 @@ var _ = utils.SIGDescribe("Node Poweroff [Feature:vsphere] [Slow] [Disruptive]",
nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet) nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
Expect(nodeList.Items).NotTo(BeEmpty(), "Unable to find ready and schedulable Node") Expect(nodeList.Items).NotTo(BeEmpty(), "Unable to find ready and schedulable Node")
Expect(len(nodeList.Items) > 1).To(BeTrue(), "At least 2 nodes are required for this test") Expect(len(nodeList.Items) > 1).To(BeTrue(), "At least 2 nodes are required for this test")
vsp, err = getVSphere(client)
Expect(err).NotTo(HaveOccurred())
}) })
/* /*
@ -105,17 +100,17 @@ var _ = utils.SIGDescribe("Node Poweroff [Feature:vsphere] [Slow] [Disruptive]",
podList, err := framework.GetPodsForDeployment(client, deployment) podList, err := framework.GetPodsForDeployment(client, deployment)
Expect(podList.Items).NotTo(BeEmpty()) Expect(podList.Items).NotTo(BeEmpty())
pod := podList.Items[0] pod := podList.Items[0]
node1 := types.NodeName(pod.Spec.NodeName) node1 := pod.Spec.NodeName
By(fmt.Sprintf("Verify disk is attached to the node: %v", node1)) By(fmt.Sprintf("Verify disk is attached to the node: %v", node1))
isAttached, err := verifyVSphereDiskAttached(client, vsp, volumePath, node1) isAttached, err := diskIsAttached(volumePath, node1)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
Expect(isAttached).To(BeTrue(), "Disk is not attached to the node") Expect(isAttached).To(BeTrue(), "Disk is not attached to the node")
By(fmt.Sprintf("Power off the node: %v", node1)) By(fmt.Sprintf("Power off the node: %v", node1))
nodeInfo, err := vsp.NodeManager().GetNodeInfo(node1)
Expect(err).NotTo(HaveOccurred()) nodeInfo := TestContext.NodeMapper.GetNodeInfo(node1)
vm := nodeInfo.VM() vm := object.NewVirtualMachine(nodeInfo.VSphere.Client.Client, nodeInfo.VirtualMachineRef)
ctx, _ := context.WithCancel(context.Background()) ctx, _ := context.WithCancel(context.Background())
_, err = vm.PowerOff(ctx) _, err = vm.PowerOff(ctx)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
@ -129,11 +124,11 @@ var _ = utils.SIGDescribe("Node Poweroff [Feature:vsphere] [Slow] [Disruptive]",
Expect(err).NotTo(HaveOccurred(), "Pod did not fail over to a different node") Expect(err).NotTo(HaveOccurred(), "Pod did not fail over to a different node")
By(fmt.Sprintf("Waiting for disk to be attached to the new node: %v", node2)) By(fmt.Sprintf("Waiting for disk to be attached to the new node: %v", node2))
err = waitForVSphereDiskToAttach(client, vsp, volumePath, node2) err = waitForVSphereDiskToAttach(volumePath, node2)
Expect(err).NotTo(HaveOccurred(), "Disk is not attached to the node") Expect(err).NotTo(HaveOccurred(), "Disk is not attached to the node")
By(fmt.Sprintf("Waiting for disk to be detached from the previous node: %v", node1)) By(fmt.Sprintf("Waiting for disk to be detached from the previous node: %v", node1))
err = waitForVSphereDiskToDetach(client, vsp, volumePath, node1) err = waitForVSphereDiskToDetach(volumePath, node1)
Expect(err).NotTo(HaveOccurred(), "Disk is not detached from the node") Expect(err).NotTo(HaveOccurred(), "Disk is not detached from the node")
By(fmt.Sprintf("Power on the previous node: %v", node1)) By(fmt.Sprintf("Power on the previous node: %v", node1))
@ -144,10 +139,10 @@ var _ = utils.SIGDescribe("Node Poweroff [Feature:vsphere] [Slow] [Disruptive]",
}) })
// Wait until the pod failed over to a different node, or time out after 3 minutes // Wait until the pod failed over to a different node, or time out after 3 minutes
func waitForPodToFailover(client clientset.Interface, deployment *extensions.Deployment, oldNode types.NodeName) (types.NodeName, error) { func waitForPodToFailover(client clientset.Interface, deployment *extensions.Deployment, oldNode string) (string, error) {
var ( var (
err error err error
newNode types.NodeName newNode string
timeout = 3 * time.Minute timeout = 3 * time.Minute
pollTime = 10 * time.Second pollTime = 10 * time.Second
) )
@ -178,10 +173,10 @@ func waitForPodToFailover(client clientset.Interface, deployment *extensions.Dep
return getNodeForDeployment(client, deployment) return getNodeForDeployment(client, deployment)
} }
func getNodeForDeployment(client clientset.Interface, deployment *extensions.Deployment) (types.NodeName, error) { func getNodeForDeployment(client clientset.Interface, deployment *extensions.Deployment) (string, error) {
podList, err := framework.GetPodsForDeployment(client, deployment) podList, err := framework.GetPodsForDeployment(client, deployment)
if err != nil { if err != nil {
return "", err return "", err
} }
return types.NodeName(podList.Items[0].Spec.NodeName), nil return podList.Items[0].Spec.NodeName, nil
} }

View File

@ -25,9 +25,7 @@ import (
. "github.com/onsi/gomega" . "github.com/onsi/gomega"
"k8s.io/api/core/v1" "k8s.io/api/core/v1"
storage "k8s.io/api/storage/v1" storage "k8s.io/api/storage/v1"
k8stype "k8s.io/apimachinery/pkg/types"
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
vsphere "k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/storage/utils" "k8s.io/kubernetes/test/e2e/storage/utils"
) )
@ -59,7 +57,6 @@ var _ = utils.SIGDescribe("Volume Operations Storm [Feature:vsphere]", func() {
persistentvolumes []*v1.PersistentVolume persistentvolumes []*v1.PersistentVolume
err error err error
volume_ops_scale int volume_ops_scale int
vsp *vsphere.VSphere
) )
BeforeEach(func() { BeforeEach(func() {
framework.SkipUnlessProviderIs("vsphere") framework.SkipUnlessProviderIs("vsphere")
@ -77,8 +74,6 @@ var _ = utils.SIGDescribe("Volume Operations Storm [Feature:vsphere]", func() {
volume_ops_scale = DEFAULT_VOLUME_OPS_SCALE volume_ops_scale = DEFAULT_VOLUME_OPS_SCALE
} }
pvclaims = make([]*v1.PersistentVolumeClaim, volume_ops_scale) pvclaims = make([]*v1.PersistentVolumeClaim, volume_ops_scale)
vsp, err = getVSphere(client)
Expect(err).NotTo(HaveOccurred())
}) })
AfterEach(func() { AfterEach(func() {
By("Deleting PVCs") By("Deleting PVCs")
@ -115,14 +110,14 @@ var _ = utils.SIGDescribe("Volume Operations Storm [Feature:vsphere]", func() {
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("Verify all volumes are accessible and available in the pod") By("Verify all volumes are accessible and available in the pod")
verifyVSphereVolumesAccessible(client, pod, persistentvolumes, vsp) verifyVSphereVolumesAccessible(client, pod, persistentvolumes)
By("Deleting pod") By("Deleting pod")
framework.ExpectNoError(framework.DeletePodWithWait(f, client, pod)) framework.ExpectNoError(framework.DeletePodWithWait(f, client, pod))
By("Waiting for volumes to be detached from the node") By("Waiting for volumes to be detached from the node")
for _, pv := range persistentvolumes { for _, pv := range persistentvolumes {
waitForVSphereDiskToDetach(client, vsp, pv.Spec.VsphereVolume.VolumePath, k8stype.NodeName(pod.Spec.NodeName)) waitForVSphereDiskToDetach(pv.Spec.VsphereVolume.VolumePath, pod.Spec.NodeName)
} }
}) })
}) })

View File

@ -24,7 +24,6 @@ import (
. "github.com/onsi/gomega" . "github.com/onsi/gomega"
"k8s.io/api/core/v1" "k8s.io/api/core/v1"
storageV1 "k8s.io/api/storage/v1" storageV1 "k8s.io/api/storage/v1"
"k8s.io/apimachinery/pkg/types"
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/storage/utils" "k8s.io/kubernetes/test/e2e/storage/utils"
@ -161,7 +160,7 @@ func invokeVolumeLifeCyclePerformance(f *framework.Framework, client clientset.I
totalpvs [][]*v1.PersistentVolume totalpvs [][]*v1.PersistentVolume
totalpods []*v1.Pod totalpods []*v1.Pod
) )
nodeVolumeMap := make(map[types.NodeName][]string) nodeVolumeMap := make(map[string][]string)
latency = make(map[string]float64) latency = make(map[string]float64)
numPods := volumeCount / volumesPerPod numPods := volumeCount / volumesPerPod
@ -198,18 +197,14 @@ func invokeVolumeLifeCyclePerformance(f *framework.Framework, client clientset.I
elapsed = time.Since(start) elapsed = time.Since(start)
latency[AttachOp] = elapsed.Seconds() latency[AttachOp] = elapsed.Seconds()
// Verify access to the volumes
vsp, err := getVSphere(client)
Expect(err).NotTo(HaveOccurred())
for i, pod := range totalpods { for i, pod := range totalpods {
verifyVSphereVolumesAccessible(client, pod, totalpvs[i], vsp) verifyVSphereVolumesAccessible(client, pod, totalpvs[i])
} }
By("Deleting pods") By("Deleting pods")
start = time.Now() start = time.Now()
for _, pod := range totalpods { for _, pod := range totalpods {
err = framework.DeletePodWithWait(f, client, pod) err := framework.DeletePodWithWait(f, client, pod)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
} }
elapsed = time.Since(start) elapsed = time.Since(start)
@ -217,12 +212,11 @@ func invokeVolumeLifeCyclePerformance(f *framework.Framework, client clientset.I
for i, pod := range totalpods { for i, pod := range totalpods {
for _, pv := range totalpvs[i] { for _, pv := range totalpvs[i] {
nodeName := types.NodeName(pod.Spec.NodeName) nodeVolumeMap[pod.Spec.NodeName] = append(nodeVolumeMap[pod.Spec.NodeName], pv.Spec.VsphereVolume.VolumePath)
nodeVolumeMap[nodeName] = append(nodeVolumeMap[nodeName], pv.Spec.VsphereVolume.VolumePath)
} }
} }
err = waitForVSphereDisksToDetach(client, vsp, nodeVolumeMap) err := waitForVSphereDisksToDetach(nodeVolumeMap)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("Deleting the PVCs") By("Deleting the PVCs")

View File

@ -24,11 +24,8 @@ import (
. "github.com/onsi/ginkgo" . "github.com/onsi/ginkgo"
. "github.com/onsi/gomega" . "github.com/onsi/gomega"
"k8s.io/api/core/v1" "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/uuid" "k8s.io/apimachinery/pkg/util/uuid"
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
vsphere "k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere"
"k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere/vclib"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/storage/utils" "k8s.io/kubernetes/test/e2e/storage/utils"
) )
@ -41,14 +38,14 @@ var _ = utils.SIGDescribe("Volume Placement", func() {
var ( var (
c clientset.Interface c clientset.Interface
ns string ns string
vsp *vsphere.VSphere
volumePaths []string volumePaths []string
node1Name string node1Name string
node1KeyValueLabel map[string]string node1KeyValueLabel map[string]string
node2Name string node2Name string
node2KeyValueLabel map[string]string node2KeyValueLabel map[string]string
isNodeLabeled bool isNodeLabeled bool
err error nodeInfo *NodeInfo
vsp *VSphere
) )
BeforeEach(func() { BeforeEach(func() {
framework.SkipUnlessProviderIs("vsphere") framework.SkipUnlessProviderIs("vsphere")
@ -60,17 +57,17 @@ var _ = utils.SIGDescribe("Volume Placement", func() {
node1Name, node1KeyValueLabel, node2Name, node2KeyValueLabel = testSetupVolumePlacement(c, ns) node1Name, node1KeyValueLabel, node2Name, node2KeyValueLabel = testSetupVolumePlacement(c, ns)
isNodeLabeled = true isNodeLabeled = true
} }
nodeInfo = TestContext.NodeMapper.GetNodeInfo(node1Name)
vsp = nodeInfo.VSphere
By("creating vmdk") By("creating vmdk")
vsp, err = getVSphere(c) volumePath, err := vsp.CreateVolume(&VolumeOptions{}, nodeInfo.DataCenterRef)
Expect(err).NotTo(HaveOccurred())
volumePath, err := createVSphereVolume(vsp, nil)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
volumePaths = append(volumePaths, volumePath) volumePaths = append(volumePaths, volumePath)
}) })
AfterEach(func() { AfterEach(func() {
for _, volumePath := range volumePaths { for _, volumePath := range volumePaths {
vsp.DeleteVolume(volumePath) vsp.DeleteVolume(volumePath, nodeInfo.DataCenterRef)
} }
volumePaths = nil volumePaths = nil
}) })
@ -107,24 +104,24 @@ var _ = utils.SIGDescribe("Volume Placement", func() {
It("should create and delete pod with the same volume source on the same worker node", func() { It("should create and delete pod with the same volume source on the same worker node", func() {
var volumeFiles []string var volumeFiles []string
pod := createPodWithVolumeAndNodeSelector(c, ns, vsp, node1Name, node1KeyValueLabel, volumePaths) pod := createPodWithVolumeAndNodeSelector(c, ns, node1Name, node1KeyValueLabel, volumePaths)
// Create empty files on the mounted volumes on the pod to verify volume is writable // Create empty files on the mounted volumes on the pod to verify volume is writable
// Verify newly and previously created files present on the volume mounted on the pod // Verify newly and previously created files present on the volume mounted on the pod
newEmptyFileName := fmt.Sprintf("/mnt/volume1/%v_1.txt", ns) newEmptyFileName := fmt.Sprintf("/mnt/volume1/%v_1.txt", ns)
volumeFiles = append(volumeFiles, newEmptyFileName) volumeFiles = append(volumeFiles, newEmptyFileName)
createAndVerifyFilesOnVolume(ns, pod.Name, []string{newEmptyFileName}, volumeFiles) createAndVerifyFilesOnVolume(ns, pod.Name, []string{newEmptyFileName}, volumeFiles)
deletePodAndWaitForVolumeToDetach(f, c, pod, vsp, node1Name, volumePaths) deletePodAndWaitForVolumeToDetach(f, c, pod, node1Name, volumePaths)
By(fmt.Sprintf("Creating pod on the same node: %v", node1Name)) By(fmt.Sprintf("Creating pod on the same node: %v", node1Name))
pod = createPodWithVolumeAndNodeSelector(c, ns, vsp, node1Name, node1KeyValueLabel, volumePaths) pod = createPodWithVolumeAndNodeSelector(c, ns, node1Name, node1KeyValueLabel, volumePaths)
// Create empty files on the mounted volumes on the pod to verify volume is writable // Create empty files on the mounted volumes on the pod to verify volume is writable
// Verify newly and previously created files present on the volume mounted on the pod // Verify newly and previously created files present on the volume mounted on the pod
newEmptyFileName = fmt.Sprintf("/mnt/volume1/%v_2.txt", ns) newEmptyFileName = fmt.Sprintf("/mnt/volume1/%v_2.txt", ns)
volumeFiles = append(volumeFiles, newEmptyFileName) volumeFiles = append(volumeFiles, newEmptyFileName)
createAndVerifyFilesOnVolume(ns, pod.Name, []string{newEmptyFileName}, volumeFiles) createAndVerifyFilesOnVolume(ns, pod.Name, []string{newEmptyFileName}, volumeFiles)
deletePodAndWaitForVolumeToDetach(f, c, pod, vsp, node1Name, volumePaths) deletePodAndWaitForVolumeToDetach(f, c, pod, node1Name, volumePaths)
}) })
/* /*
@ -147,23 +144,23 @@ var _ = utils.SIGDescribe("Volume Placement", func() {
It("should create and delete pod with the same volume source attach/detach to different worker nodes", func() { It("should create and delete pod with the same volume source attach/detach to different worker nodes", func() {
var volumeFiles []string var volumeFiles []string
pod := createPodWithVolumeAndNodeSelector(c, ns, vsp, node1Name, node1KeyValueLabel, volumePaths) pod := createPodWithVolumeAndNodeSelector(c, ns, node1Name, node1KeyValueLabel, volumePaths)
// Create empty files on the mounted volumes on the pod to verify volume is writable // Create empty files on the mounted volumes on the pod to verify volume is writable
// Verify newly and previously created files present on the volume mounted on the pod // Verify newly and previously created files present on the volume mounted on the pod
newEmptyFileName := fmt.Sprintf("/mnt/volume1/%v_1.txt", ns) newEmptyFileName := fmt.Sprintf("/mnt/volume1/%v_1.txt", ns)
volumeFiles = append(volumeFiles, newEmptyFileName) volumeFiles = append(volumeFiles, newEmptyFileName)
createAndVerifyFilesOnVolume(ns, pod.Name, []string{newEmptyFileName}, volumeFiles) createAndVerifyFilesOnVolume(ns, pod.Name, []string{newEmptyFileName}, volumeFiles)
deletePodAndWaitForVolumeToDetach(f, c, pod, vsp, node1Name, volumePaths) deletePodAndWaitForVolumeToDetach(f, c, pod, node1Name, volumePaths)
By(fmt.Sprintf("Creating pod on the another node: %v", node2Name)) By(fmt.Sprintf("Creating pod on the another node: %v", node2Name))
pod = createPodWithVolumeAndNodeSelector(c, ns, vsp, node2Name, node2KeyValueLabel, volumePaths) pod = createPodWithVolumeAndNodeSelector(c, ns, node2Name, node2KeyValueLabel, volumePaths)
newEmptyFileName = fmt.Sprintf("/mnt/volume1/%v_2.txt", ns) newEmptyFileName = fmt.Sprintf("/mnt/volume1/%v_2.txt", ns)
volumeFiles = append(volumeFiles, newEmptyFileName) volumeFiles = append(volumeFiles, newEmptyFileName)
// Create empty files on the mounted volumes on the pod to verify volume is writable // Create empty files on the mounted volumes on the pod to verify volume is writable
// Verify newly and previously created files present on the volume mounted on the pod // Verify newly and previously created files present on the volume mounted on the pod
createAndVerifyFilesOnVolume(ns, pod.Name, []string{newEmptyFileName}, volumeFiles) createAndVerifyFilesOnVolume(ns, pod.Name, []string{newEmptyFileName}, volumeFiles)
deletePodAndWaitForVolumeToDetach(f, c, pod, vsp, node2Name, volumePaths) deletePodAndWaitForVolumeToDetach(f, c, pod, node2Name, volumePaths)
}) })
/* /*
@ -182,12 +179,12 @@ var _ = utils.SIGDescribe("Volume Placement", func() {
It("should create and delete pod with multiple volumes from same datastore", func() { It("should create and delete pod with multiple volumes from same datastore", func() {
By("creating another vmdk") By("creating another vmdk")
volumePath, err := createVSphereVolume(vsp, nil) volumePath, err := vsp.CreateVolume(&VolumeOptions{}, nodeInfo.DataCenterRef)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
volumePaths = append(volumePaths, volumePath) volumePaths = append(volumePaths, volumePath)
By(fmt.Sprintf("Creating pod on the node: %v with volume: %v and volume: %v", node1Name, volumePaths[0], volumePaths[1])) By(fmt.Sprintf("Creating pod on the node: %v with volume: %v and volume: %v", node1Name, volumePaths[0], volumePaths[1]))
pod := createPodWithVolumeAndNodeSelector(c, ns, vsp, node1Name, node1KeyValueLabel, volumePaths) pod := createPodWithVolumeAndNodeSelector(c, ns, node1Name, node1KeyValueLabel, volumePaths)
// Create empty files on the mounted volumes on the pod to verify volume is writable // Create empty files on the mounted volumes on the pod to verify volume is writable
// Verify newly and previously created files present on the volume mounted on the pod // Verify newly and previously created files present on the volume mounted on the pod
volumeFiles := []string{ volumeFiles := []string{
@ -195,9 +192,9 @@ var _ = utils.SIGDescribe("Volume Placement", func() {
fmt.Sprintf("/mnt/volume2/%v_1.txt", ns), fmt.Sprintf("/mnt/volume2/%v_1.txt", ns),
} }
createAndVerifyFilesOnVolume(ns, pod.Name, volumeFiles, volumeFiles) createAndVerifyFilesOnVolume(ns, pod.Name, volumeFiles, volumeFiles)
deletePodAndWaitForVolumeToDetach(f, c, pod, vsp, node1Name, volumePaths) deletePodAndWaitForVolumeToDetach(f, c, pod, node1Name, volumePaths)
By(fmt.Sprintf("Creating pod on the node: %v with volume :%v and volume: %v", node1Name, volumePaths[0], volumePaths[1])) By(fmt.Sprintf("Creating pod on the node: %v with volume :%v and volume: %v", node1Name, volumePaths[0], volumePaths[1]))
pod = createPodWithVolumeAndNodeSelector(c, ns, vsp, node1Name, node1KeyValueLabel, volumePaths) pod = createPodWithVolumeAndNodeSelector(c, ns, node1Name, node1KeyValueLabel, volumePaths)
// Create empty files on the mounted volumes on the pod to verify volume is writable // Create empty files on the mounted volumes on the pod to verify volume is writable
// Verify newly and previously created files present on the volume mounted on the pod // Verify newly and previously created files present on the volume mounted on the pod
newEmptyFilesNames := []string{ newEmptyFilesNames := []string{
@ -224,17 +221,18 @@ var _ = utils.SIGDescribe("Volume Placement", func() {
*/ */
It("should create and delete pod with multiple volumes from different datastore", func() { It("should create and delete pod with multiple volumes from different datastore", func() {
By("creating another vmdk on non default shared datastore") By("creating another vmdk on non default shared datastore")
var volumeOptions *vclib.VolumeOptions var volumeOptions *VolumeOptions
volumeOptions = new(vclib.VolumeOptions) volumeOptions = new(VolumeOptions)
volumeOptions.CapacityKB = 2097152 volumeOptions.CapacityKB = 2097152
volumeOptions.Name = "e2e-vmdk-" + strconv.FormatInt(time.Now().UnixNano(), 10) volumeOptions.Name = "e2e-vmdk-" + strconv.FormatInt(time.Now().UnixNano(), 10)
volumeOptions.Datastore = GetAndExpectStringEnvVar(SecondSharedDatastore) volumeOptions.Datastore = GetAndExpectStringEnvVar(SecondSharedDatastore)
volumePath, err := createVSphereVolume(vsp, volumeOptions) volumePath, err := vsp.CreateVolume(volumeOptions, nodeInfo.DataCenterRef)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
volumePaths = append(volumePaths, volumePath) volumePaths = append(volumePaths, volumePath)
By(fmt.Sprintf("Creating pod on the node: %v with volume :%v and volume: %v", node1Name, volumePaths[0], volumePaths[1])) By(fmt.Sprintf("Creating pod on the node: %v with volume :%v and volume: %v", node1Name, volumePaths[0], volumePaths[1]))
pod := createPodWithVolumeAndNodeSelector(c, ns, vsp, node1Name, node1KeyValueLabel, volumePaths) pod := createPodWithVolumeAndNodeSelector(c, ns, node1Name, node1KeyValueLabel, volumePaths)
// Create empty files on the mounted volumes on the pod to verify volume is writable // Create empty files on the mounted volumes on the pod to verify volume is writable
// Verify newly and previously created files present on the volume mounted on the pod // Verify newly and previously created files present on the volume mounted on the pod
@ -243,10 +241,10 @@ var _ = utils.SIGDescribe("Volume Placement", func() {
fmt.Sprintf("/mnt/volume2/%v_1.txt", ns), fmt.Sprintf("/mnt/volume2/%v_1.txt", ns),
} }
createAndVerifyFilesOnVolume(ns, pod.Name, volumeFiles, volumeFiles) createAndVerifyFilesOnVolume(ns, pod.Name, volumeFiles, volumeFiles)
deletePodAndWaitForVolumeToDetach(f, c, pod, vsp, node1Name, volumePaths) deletePodAndWaitForVolumeToDetach(f, c, pod, node1Name, volumePaths)
By(fmt.Sprintf("Creating pod on the node: %v with volume :%v and volume: %v", node1Name, volumePaths[0], volumePaths[1])) By(fmt.Sprintf("Creating pod on the node: %v with volume :%v and volume: %v", node1Name, volumePaths[0], volumePaths[1]))
pod = createPodWithVolumeAndNodeSelector(c, ns, vsp, node1Name, node1KeyValueLabel, volumePaths) pod = createPodWithVolumeAndNodeSelector(c, ns, node1Name, node1KeyValueLabel, volumePaths)
// Create empty files on the mounted volumes on the pod to verify volume is writable // Create empty files on the mounted volumes on the pod to verify volume is writable
// Verify newly and previously created files present on the volume mounted on the pod // Verify newly and previously created files present on the volume mounted on the pod
newEmptyFileNames := []string{ newEmptyFileNames := []string{
@ -256,7 +254,7 @@ var _ = utils.SIGDescribe("Volume Placement", func() {
volumeFiles = append(volumeFiles, newEmptyFileNames[0]) volumeFiles = append(volumeFiles, newEmptyFileNames[0])
volumeFiles = append(volumeFiles, newEmptyFileNames[1]) volumeFiles = append(volumeFiles, newEmptyFileNames[1])
createAndVerifyFilesOnVolume(ns, pod.Name, newEmptyFileNames, volumeFiles) createAndVerifyFilesOnVolume(ns, pod.Name, newEmptyFileNames, volumeFiles)
deletePodAndWaitForVolumeToDetach(f, c, pod, vsp, node1Name, volumePaths) deletePodAndWaitForVolumeToDetach(f, c, pod, node1Name, volumePaths)
}) })
/* /*
@ -289,24 +287,24 @@ var _ = utils.SIGDescribe("Volume Placement", func() {
framework.ExpectNoError(framework.DeletePodWithWait(f, c, podB), "defer: Failed to delete pod ", podB.Name) framework.ExpectNoError(framework.DeletePodWithWait(f, c, podB), "defer: Failed to delete pod ", podB.Name)
By(fmt.Sprintf("wait for volumes to be detached from the node: %v", node1Name)) By(fmt.Sprintf("wait for volumes to be detached from the node: %v", node1Name))
for _, volumePath := range volumePaths { for _, volumePath := range volumePaths {
framework.ExpectNoError(waitForVSphereDiskToDetach(c, vsp, volumePath, types.NodeName(node1Name))) framework.ExpectNoError(waitForVSphereDiskToDetach(volumePath, node1Name))
} }
}() }()
testvolumePathsPodA = append(testvolumePathsPodA, volumePaths[0]) testvolumePathsPodA = append(testvolumePathsPodA, volumePaths[0])
// Create another VMDK Volume // Create another VMDK Volume
By("creating another vmdk") By("creating another vmdk")
volumePath, err := createVSphereVolume(vsp, nil) volumePath, err := vsp.CreateVolume(&VolumeOptions{}, nodeInfo.DataCenterRef)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
volumePaths = append(volumePaths, volumePath) volumePaths = append(volumePaths, volumePath)
testvolumePathsPodB = append(testvolumePathsPodA, volumePath) testvolumePathsPodB = append(testvolumePathsPodA, volumePath)
for index := 0; index < 5; index++ { for index := 0; index < 5; index++ {
By(fmt.Sprintf("Creating pod-A on the node: %v with volume: %v", node1Name, testvolumePathsPodA[0])) By(fmt.Sprintf("Creating pod-A on the node: %v with volume: %v", node1Name, testvolumePathsPodA[0]))
podA = createPodWithVolumeAndNodeSelector(c, ns, vsp, node1Name, node1KeyValueLabel, testvolumePathsPodA) podA = createPodWithVolumeAndNodeSelector(c, ns, node1Name, node1KeyValueLabel, testvolumePathsPodA)
By(fmt.Sprintf("Creating pod-B on the node: %v with volume: %v", node1Name, testvolumePathsPodB[0])) By(fmt.Sprintf("Creating pod-B on the node: %v with volume: %v", node1Name, testvolumePathsPodB[0]))
podB = createPodWithVolumeAndNodeSelector(c, ns, vsp, node1Name, node1KeyValueLabel, testvolumePathsPodB) podB = createPodWithVolumeAndNodeSelector(c, ns, node1Name, node1KeyValueLabel, testvolumePathsPodB)
podAFileName := fmt.Sprintf("/mnt/volume1/podA_%v_%v.txt", ns, index+1) podAFileName := fmt.Sprintf("/mnt/volume1/podA_%v_%v.txt", ns, index+1)
podBFileName := fmt.Sprintf("/mnt/volume1/podB_%v_%v.txt", ns, index+1) podBFileName := fmt.Sprintf("/mnt/volume1/podB_%v_%v.txt", ns, index+1)
@ -353,7 +351,7 @@ func testSetupVolumePlacement(client clientset.Interface, namespace string) (nod
return node1Name, node1KeyValueLabel, node2Name, node2KeyValueLabel return node1Name, node1KeyValueLabel, node2Name, node2KeyValueLabel
} }
func createPodWithVolumeAndNodeSelector(client clientset.Interface, namespace string, vsp *vsphere.VSphere, nodeName string, nodeKeyValueLabel map[string]string, volumePaths []string) *v1.Pod { func createPodWithVolumeAndNodeSelector(client clientset.Interface, namespace string, nodeName string, nodeKeyValueLabel map[string]string, volumePaths []string) *v1.Pod {
var pod *v1.Pod var pod *v1.Pod
var err error var err error
By(fmt.Sprintf("Creating pod on the node: %v", nodeName)) By(fmt.Sprintf("Creating pod on the node: %v", nodeName))
@ -366,7 +364,7 @@ func createPodWithVolumeAndNodeSelector(client clientset.Interface, namespace st
By(fmt.Sprintf("Verify volume is attached to the node:%v", nodeName)) By(fmt.Sprintf("Verify volume is attached to the node:%v", nodeName))
for _, volumePath := range volumePaths { for _, volumePath := range volumePaths {
isAttached, err := verifyVSphereDiskAttached(client, vsp, volumePath, types.NodeName(nodeName)) isAttached, err := diskIsAttached(volumePath, nodeName)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
Expect(isAttached).To(BeTrue(), "disk:"+volumePath+" is not attached with the node") Expect(isAttached).To(BeTrue(), "disk:"+volumePath+" is not attached with the node")
} }
@ -383,12 +381,12 @@ func createAndVerifyFilesOnVolume(namespace string, podname string, newEmptyfile
verifyFilesExistOnVSphereVolume(namespace, podname, filesToCheck) verifyFilesExistOnVSphereVolume(namespace, podname, filesToCheck)
} }
func deletePodAndWaitForVolumeToDetach(f *framework.Framework, c clientset.Interface, pod *v1.Pod, vsp *vsphere.VSphere, nodeName string, volumePaths []string) { func deletePodAndWaitForVolumeToDetach(f *framework.Framework, c clientset.Interface, pod *v1.Pod, nodeName string, volumePaths []string) {
By("Deleting pod") By("Deleting pod")
framework.ExpectNoError(framework.DeletePodWithWait(f, c, pod), "Failed to delete pod ", pod.Name) framework.ExpectNoError(framework.DeletePodWithWait(f, c, pod), "Failed to delete pod ", pod.Name)
By("Waiting for volume to be detached from the node") By("Waiting for volume to be detached from the node")
for _, volumePath := range volumePaths { for _, volumePath := range volumePaths {
framework.ExpectNoError(waitForVSphereDiskToDetach(c, vsp, volumePath, types.NodeName(nodeName))) framework.ExpectNoError(waitForVSphereDiskToDetach(volumePath, nodeName))
} }
} }

View File

@ -27,7 +27,6 @@ import (
. "github.com/onsi/gomega" . "github.com/onsi/gomega"
"k8s.io/api/core/v1" "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
k8stype "k8s.io/apimachinery/pkg/types"
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/storage/utils" "k8s.io/kubernetes/test/e2e/storage/utils"
@ -95,6 +94,7 @@ var _ = utils.SIGDescribe("Storage Policy Based Volume Provisioning [Feature:vsp
scParameters map[string]string scParameters map[string]string
policyName string policyName string
tagPolicy string tagPolicy string
masterNode string
) )
BeforeEach(func() { BeforeEach(func() {
framework.SkipUnlessProviderIs("vsphere") framework.SkipUnlessProviderIs("vsphere")
@ -109,6 +109,9 @@ var _ = utils.SIGDescribe("Storage Policy Based Volume Provisioning [Feature:vsp
if !(len(nodeList.Items) > 0) { if !(len(nodeList.Items) > 0) {
framework.Failf("Unable to find ready and schedulable Node") framework.Failf("Unable to find ready and schedulable Node")
} }
masternodes, _ := framework.GetMasterAndWorkerNodesOrDie(client)
Expect(masternodes).NotTo(BeEmpty())
masterNode = masternodes.List()[0]
}) })
// Valid policy. // Valid policy.
@ -222,7 +225,7 @@ var _ = utils.SIGDescribe("Storage Policy Based Volume Provisioning [Feature:vsp
scParameters[Datastore] = VsanDatastore scParameters[Datastore] = VsanDatastore
framework.Logf("Invoking test for SPBM storage policy: %+v", scParameters) framework.Logf("Invoking test for SPBM storage policy: %+v", scParameters)
kubernetesClusterName := GetAndExpectStringEnvVar(KubernetesClusterName) kubernetesClusterName := GetAndExpectStringEnvVar(KubernetesClusterName)
invokeStaleDummyVMTestWithStoragePolicy(client, namespace, kubernetesClusterName, scParameters) invokeStaleDummyVMTestWithStoragePolicy(client, masterNode, namespace, kubernetesClusterName, scParameters)
}) })
It("verify if a SPBM policy is not honored on a non-compatible datastore for dynamically provisioned pvc using storageclass", func() { It("verify if a SPBM policy is not honored on a non-compatible datastore for dynamically provisioned pvc using storageclass", func() {
@ -290,16 +293,14 @@ func invokeValidPolicyTest(f *framework.Framework, client clientset.Interface, n
pod, err := framework.CreatePod(client, namespace, nil, pvclaims, false, "") pod, err := framework.CreatePod(client, namespace, nil, pvclaims, false, "")
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
vsp, err := getVSphere(client)
Expect(err).NotTo(HaveOccurred())
By("Verify the volume is accessible and available in the pod") By("Verify the volume is accessible and available in the pod")
verifyVSphereVolumesAccessible(client, pod, persistentvolumes, vsp) verifyVSphereVolumesAccessible(client, pod, persistentvolumes)
By("Deleting pod") By("Deleting pod")
framework.DeletePodWithWait(f, client, pod) framework.DeletePodWithWait(f, client, pod)
By("Waiting for volumes to be detached from the node") By("Waiting for volumes to be detached from the node")
waitForVSphereDiskToDetach(client, vsp, persistentvolumes[0].Spec.VsphereVolume.VolumePath, k8stype.NodeName(pod.Spec.NodeName)) waitForVSphereDiskToDetach(persistentvolumes[0].Spec.VsphereVolume.VolumePath, pod.Spec.NodeName)
} }
func invokeInvalidPolicyTestNeg(client clientset.Interface, namespace string, scParameters map[string]string) error { func invokeInvalidPolicyTestNeg(client clientset.Interface, namespace string, scParameters map[string]string) error {
@ -321,7 +322,7 @@ func invokeInvalidPolicyTestNeg(client clientset.Interface, namespace string, sc
return fmt.Errorf("Failure message: %+q", eventList.Items[0].Message) return fmt.Errorf("Failure message: %+q", eventList.Items[0].Message)
} }
func invokeStaleDummyVMTestWithStoragePolicy(client clientset.Interface, namespace string, clusterName string, scParameters map[string]string) { func invokeStaleDummyVMTestWithStoragePolicy(client clientset.Interface, masterNode string, namespace string, clusterName string, scParameters map[string]string) {
By("Creating Storage Class With storage policy params") By("Creating Storage Class With storage policy params")
storageclass, err := client.StorageV1().StorageClasses().Create(getVSphereStorageClassSpec("storagepolicysc", scParameters)) storageclass, err := client.StorageV1().StorageClasses().Create(getVSphereStorageClassSpec("storagepolicysc", scParameters))
Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("Failed to create storage class with err: %v", err)) Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("Failed to create storage class with err: %v", err))
@ -348,7 +349,6 @@ func invokeStaleDummyVMTestWithStoragePolicy(client clientset.Interface, namespa
fnvHash.Write([]byte(vmName)) fnvHash.Write([]byte(vmName))
dummyVMFullName := DummyVMPrefixName + "-" + fmt.Sprint(fnvHash.Sum32()) dummyVMFullName := DummyVMPrefixName + "-" + fmt.Sprint(fnvHash.Sum32())
errorMsg := "Dummy VM - " + vmName + "is still present. Failing the test.." errorMsg := "Dummy VM - " + vmName + "is still present. Failing the test.."
vsp, err := getVSphere(client) nodeInfo := TestContext.NodeMapper.GetNodeInfo(masterNode)
Expect(err).NotTo(HaveOccurred()) Expect(nodeInfo.VSphere.IsVMPresent(dummyVMFullName, nodeInfo.DataCenterRef)).NotTo(BeTrue(), errorMsg)
Expect(vsp.IsDummyVMPresent(dummyVMFullName)).NotTo(BeTrue(), errorMsg)
} }