Merge pull request #54018 from vmware/vSphereScaleTest

Automatic merge from submit-queue (batch tested with PRs 55301, 55319, 54018, 55322, 55125). If you want to cherry-pick this change to another branch, please follow the instructions <a href="https://github.com/kubernetes/community/blob/master/contributors/devel/cherry-picks.md">here</a>.

E2E scale test for vSphere Cloud Provider Volume lifecycle operations

This PR adds an E2E test for vSphere Cloud Provider which will create/attach/detach/detach the volumes at scale with multiple threads based on user configurable values for number of volumes, volumes per pod and number of threads. (Since this is a scale test, number of threads would be low. This is only used to speed up the operation)

Test performs following tasks.

1. Create Storage Classes of 4 Categories (Default, SC with Non Default Datastore, SC with SPBM Policy, SC with VSAN Storage Capalibilies.)
2. Read VCP_SCALE_VOLUME_COUNT from System Environment.
3. Launch VCP_SCALE_INSTANCES go routines for creating VCP_SCALE_VOLUME_COUNT volumes. Each go routine is responsible for create/attach of VCP_SCALE_VOLUME_COUNT/VCP_SCALE_INSTANCES volumes.
4. Read VCP_SCALE_VOLUMES_PER_POD from System Environment. Each pod will be have VCP_SCALE_VOLUMES_PER_POD attached to it.
5. Once all the go routines are completed, we delete all the pods and volumes.

Which issue this PR fixes
fixes # vmware#291

```release-note
None
```
This commit is contained in:
Kubernetes Submit Queue 2017-11-08 20:23:28 -08:00 committed by GitHub
commit 1c9d6f53af
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
13 changed files with 322 additions and 29 deletions

View File

@ -790,12 +790,12 @@ func deletePD(pdName string) error {
// Returns a pod definition based on the namespace. The pod references the PVC's
// name.
func MakeWritePod(ns string, pvc *v1.PersistentVolumeClaim) *v1.Pod {
return MakePod(ns, []*v1.PersistentVolumeClaim{pvc}, true, "touch /mnt/volume1/SUCCESS && (id -G | grep -E '\\b777\\b')")
return MakePod(ns, nil, []*v1.PersistentVolumeClaim{pvc}, true, "touch /mnt/volume1/SUCCESS && (id -G | grep -E '\\b777\\b')")
}
// Returns a pod definition based on the namespace. The pod references the PVC's
// name. A slice of BASH commands can be supplied as args to be run by the pod
func MakePod(ns string, pvclaims []*v1.PersistentVolumeClaim, isPrivileged bool, command string) *v1.Pod {
func MakePod(ns string, nodeSelector map[string]string, pvclaims []*v1.PersistentVolumeClaim, isPrivileged bool, command string) *v1.Pod {
if len(command) == 0 {
command = "while true; do sleep 1; done"
}
@ -832,6 +832,9 @@ func MakePod(ns string, pvclaims []*v1.PersistentVolumeClaim, isPrivileged bool,
}
podSpec.Spec.Containers[0].VolumeMounts = volumeMounts
podSpec.Spec.Volumes = volumes
if nodeSelector != nil {
podSpec.Spec.NodeSelector = nodeSelector
}
return podSpec
}
@ -886,9 +889,9 @@ func MakeSecPod(ns string, pvclaims []*v1.PersistentVolumeClaim, isPrivileged bo
return podSpec
}
// create pod with given claims
func CreatePod(client clientset.Interface, namespace string, pvclaims []*v1.PersistentVolumeClaim, isPrivileged bool, command string) (*v1.Pod, error) {
pod := MakePod(namespace, pvclaims, isPrivileged, command)
// CreatePod with given claims based on node selector
func CreatePod(client clientset.Interface, namespace string, nodeSelector map[string]string, pvclaims []*v1.PersistentVolumeClaim, isPrivileged bool, command string) (*v1.Pod, error) {
pod := MakePod(namespace, nodeSelector, pvclaims, isPrivileged, command)
pod, err := client.CoreV1().Pods(namespace).Create(pod)
if err != nil {
return nil, fmt.Errorf("pod Create API error: %v", err)
@ -928,7 +931,7 @@ func CreateSecPod(client clientset.Interface, namespace string, pvclaims []*v1.P
// Define and create a pod with a mounted PV. Pod runs infinite loop until killed.
func CreateClientPod(c clientset.Interface, ns string, pvc *v1.PersistentVolumeClaim) (*v1.Pod, error) {
return CreatePod(c, ns, []*v1.PersistentVolumeClaim{pvc}, true, "")
return CreatePod(c, ns, nil, []*v1.PersistentVolumeClaim{pvc}, true, "")
}
// wait until all pvcs phase set to bound

View File

@ -309,8 +309,8 @@ func PodsUseStaticPVsOrFail(f *framework.Framework, podCount int, image string)
By("Creating pods for each static PV")
for _, config := range configs {
podConfig := framework.MakePod(ns, []*v1.PersistentVolumeClaim{config.pvc}, false, "")
config.pod, err = c.CoreV1().Pods(ns).Create(podConfig)
podConfig := framework.MakePod(ns, nil, []*v1.PersistentVolumeClaim{config.pvc}, false, "")
config.pod, err = c.Core().Pods(ns).Create(podConfig)
Expect(err).NotTo(HaveOccurred())
}

View File

@ -23,6 +23,7 @@ go_library(
"volume_metrics.go",
"volume_provisioning.go",
"volumes.go",
"vsphere_scale.go",
"vsphere_statefulsets.go",
"vsphere_stress.go",
"vsphere_utils.go",

View File

@ -151,7 +151,7 @@ var _ = SIGDescribe("PersistentVolumes[Disruptive][Flaky]", func() {
framework.ExpectNoError(framework.WaitOnPVandPVC(c, ns, pv2, pvc2))
By("Attaching both PVC's to a single pod")
clientPod, err = framework.CreatePod(c, ns, []*v1.PersistentVolumeClaim{pvc1, pvc2}, true, "")
clientPod, err = framework.CreatePod(c, ns, nil, []*v1.PersistentVolumeClaim{pvc1, pvc2}, true, "")
Expect(err).NotTo(HaveOccurred())
})
@ -309,7 +309,7 @@ func initTestCase(f *framework.Framework, c clientset.Interface, pvConfig framew
}
}()
Expect(err).NotTo(HaveOccurred())
pod := framework.MakePod(ns, []*v1.PersistentVolumeClaim{pvc}, true, "")
pod := framework.MakePod(ns, nil, []*v1.PersistentVolumeClaim{pvc}, true, "")
pod.Spec.NodeName = nodeName
framework.Logf("Creating NFS client pod.")
pod, err = c.CoreV1().Pods(ns).Create(pod)

View File

@ -292,7 +292,7 @@ var _ = SIGDescribe("PersistentVolumes", func() {
// If a file is detected in /mnt, fail the pod and do not restart it.
By("Verifying the mount has been cleaned.")
mount := pod.Spec.Containers[0].VolumeMounts[0].MountPath
pod = framework.MakePod(ns, []*v1.PersistentVolumeClaim{pvc}, true, fmt.Sprintf("[ $(ls -A %s | wc -l) -eq 0 ] && exit 0 || exit 1", mount))
pod = framework.MakePod(ns, nil, []*v1.PersistentVolumeClaim{pvc}, true, fmt.Sprintf("[ $(ls -A %s | wc -l) -eq 0 ] && exit 0 || exit 1", mount))
pod, err = c.CoreV1().Pods(ns).Create(pod)
Expect(err).NotTo(HaveOccurred())
framework.ExpectNoError(framework.WaitForPodSuccessInNamespace(c, pod.Name, ns))

View File

@ -87,7 +87,7 @@ var _ = SIGDescribe("[Serial] Volume metrics", func() {
claims := []*v1.PersistentVolumeClaim{pvc}
pod := framework.MakePod(ns, claims, false, "")
pod := framework.MakePod(ns, nil, claims, false, "")
pod, err = c.CoreV1().Pods(ns).Create(pod)
Expect(err).NotTo(HaveOccurred())
@ -115,7 +115,7 @@ var _ = SIGDescribe("[Serial] Volume metrics", func() {
Expect(pvc).ToNot(Equal(nil))
claims := []*v1.PersistentVolumeClaim{pvc}
pod := framework.MakePod(ns, claims, false, "")
pod := framework.MakePod(ns, nil, claims, false, "")
pod, err = c.CoreV1().Pods(ns).Create(pod)
Expect(err).NotTo(HaveOccurred())

View File

@ -0,0 +1,247 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package storage
import (
"fmt"
"os"
"strconv"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"k8s.io/api/core/v1"
storageV1 "k8s.io/api/storage/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
k8stypes "k8s.io/apimachinery/pkg/types"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere"
"k8s.io/kubernetes/test/e2e/framework"
)
/*
Perform vsphere volume life cycle management at scale based on user configurable value for number of volumes.
The following actions will be performed as part of this test.
1. Create Storage Classes of 4 Categories (Default, SC with Non Default Datastore, SC with SPBM Policy, SC with VSAN Storage Capalibilies.)
2. Read VCP_SCALE_VOLUME_COUNT, VCP_SCALE_INSTANCES, VCP_SCALE_VOLUMES_PER_POD, VSPHERE_SPBM_POLICY_NAME, VSPHERE_DATASTORE from System Environment.
3. Launch VCP_SCALE_INSTANCES goroutine for creating VCP_SCALE_VOLUME_COUNT volumes. Each goroutine is responsible for create/attach of VCP_SCALE_VOLUME_COUNT/VCP_SCALE_INSTANCES volumes.
4. Read VCP_SCALE_VOLUMES_PER_POD from System Environment. Each pod will be have VCP_SCALE_VOLUMES_PER_POD attached to it.
5. Once all the go routines are completed, we delete all the pods and volumes.
*/
const (
NodeLabelKey = "vsphere_e2e_label"
)
// NodeSelector holds
type NodeSelector struct {
labelKey string
labelValue string
}
var _ = SIGDescribe("vcp at scale [Feature:vsphere] ", func() {
f := framework.NewDefaultFramework("vcp-at-scale")
var (
client clientset.Interface
namespace string
nodeSelectorList []*NodeSelector
volumeCount int
numberOfInstances int
volumesPerPod int
nodeVolumeMapChan chan map[string][]string
nodes *v1.NodeList
policyName string
datastoreName string
scNames = []string{storageclass1, storageclass2, storageclass3, storageclass4}
err error
)
BeforeEach(func() {
framework.SkipUnlessProviderIs("vsphere")
client = f.ClientSet
namespace = f.Namespace.Name
nodeVolumeMapChan = make(chan map[string][]string)
// Read the environment variables
volumeCountStr := os.Getenv("VCP_SCALE_VOLUME_COUNT")
Expect(volumeCountStr).NotTo(BeEmpty(), "ENV VCP_SCALE_VOLUME_COUNT is not set")
volumeCount, err = strconv.Atoi(volumeCountStr)
Expect(err).NotTo(HaveOccurred(), "Error Parsing VCP_SCALE_VOLUME_COUNT")
volumesPerPodStr := os.Getenv("VCP_SCALE_VOLUME_PER_POD")
Expect(volumesPerPodStr).NotTo(BeEmpty(), "ENV VCP_SCALE_VOLUME_PER_POD is not set")
volumesPerPod, err = strconv.Atoi(volumesPerPodStr)
Expect(err).NotTo(HaveOccurred(), "Error Parsing VCP_SCALE_VOLUME_PER_POD")
numberOfInstancesStr := os.Getenv("VCP_SCALE_INSTANCES")
Expect(numberOfInstancesStr).NotTo(BeEmpty(), "ENV VCP_SCALE_INSTANCES is not set")
numberOfInstances, err = strconv.Atoi(numberOfInstancesStr)
Expect(err).NotTo(HaveOccurred(), "Error Parsing VCP_SCALE_INSTANCES")
Expect(numberOfInstances > 5).NotTo(BeTrue(), "Maximum allowed instances are 5")
Expect(numberOfInstances > volumeCount).NotTo(BeTrue(), "Number of instances should be less than the total volume count")
policyName = os.Getenv("VSPHERE_SPBM_POLICY_NAME")
datastoreName = os.Getenv("VSPHERE_DATASTORE")
Expect(policyName).NotTo(BeEmpty(), "ENV VSPHERE_SPBM_POLICY_NAME is not set")
Expect(datastoreName).NotTo(BeEmpty(), "ENV VSPHERE_DATASTORE is not set")
nodes = framework.GetReadySchedulableNodesOrDie(client)
if len(nodes.Items) < 2 {
framework.Skipf("Requires at least %d nodes (not %d)", 2, len(nodes.Items))
}
// Verify volume count specified by the user can be satisfied
if volumeCount > volumesPerNode*len(nodes.Items) {
framework.Skipf("Cannot attach %d volumes to %d nodes. Maximum volumes that can be attached on %d nodes is %d", volumeCount, len(nodes.Items), len(nodes.Items), volumesPerNode*len(nodes.Items))
}
nodeSelectorList = createNodeLabels(client, namespace, nodes)
})
/*
Remove labels from all the nodes
*/
framework.AddCleanupAction(func() {
for _, node := range nodes.Items {
framework.RemoveLabelOffNode(client, node.Name, NodeLabelKey)
}
})
It("vsphere scale tests", func() {
var pvcClaimList []string
nodeVolumeMap := make(map[k8stypes.NodeName][]string)
// Volumes will be provisioned with each different types of Storage Class
scArrays := make([]*storageV1.StorageClass, len(scNames))
for index, scname := range scNames {
// Create vSphere Storage Class
By(fmt.Sprintf("Creating Storage Class : %q", scname))
var sc *storageV1.StorageClass
scParams := make(map[string]string)
var err error
switch scname {
case storageclass1:
scParams = nil
case storageclass2:
scParams[Policy_HostFailuresToTolerate] = "1"
case storageclass3:
scParams[SpbmStoragePolicy] = policyName
case storageclass4:
scParams[Datastore] = datastoreName
}
sc, err = client.StorageV1().StorageClasses().Create(getVSphereStorageClassSpec(scname, scParams))
Expect(sc).NotTo(BeNil(), "Storage class is empty")
Expect(err).NotTo(HaveOccurred(), "Failed to create storage class")
defer client.StorageV1().StorageClasses().Delete(scname, nil)
scArrays[index] = sc
}
vsp, err := vsphere.GetVSphere()
Expect(err).NotTo(HaveOccurred())
volumeCountPerInstance := volumeCount / numberOfInstances
for instanceCount := 0; instanceCount < numberOfInstances; instanceCount++ {
if instanceCount == numberOfInstances-1 {
volumeCountPerInstance = volumeCount
}
volumeCount = volumeCount - volumeCountPerInstance
go VolumeCreateAndAttach(client, namespace, scArrays, volumeCountPerInstance, volumesPerPod, nodeSelectorList, nodeVolumeMapChan, vsp)
}
// Get the list of all volumes attached to each node from the go routines by reading the data from the channel
for instanceCount := 0; instanceCount < numberOfInstances; instanceCount++ {
for node, volumeList := range <-nodeVolumeMapChan {
nodeVolumeMap[k8stypes.NodeName(node)] = append(nodeVolumeMap[k8stypes.NodeName(node)], volumeList...)
}
}
podList, err := client.CoreV1().Pods(namespace).List(metav1.ListOptions{})
for _, pod := range podList.Items {
pvcClaimList = append(pvcClaimList, getClaimsForPod(&pod, volumesPerPod)...)
By("Deleting pod")
err = framework.DeletePodWithWait(f, client, &pod)
Expect(err).NotTo(HaveOccurred())
}
By("Waiting for volumes to be detached from the node")
err = waitForVSphereDisksToDetach(vsp, nodeVolumeMap)
Expect(err).NotTo(HaveOccurred())
for _, pvcClaim := range pvcClaimList {
err = framework.DeletePersistentVolumeClaim(client, pvcClaim, namespace)
Expect(err).NotTo(HaveOccurred())
}
})
})
// Get PVC claims for the pod
func getClaimsForPod(pod *v1.Pod, volumesPerPod int) []string {
pvcClaimList := make([]string, volumesPerPod)
for i, volumespec := range pod.Spec.Volumes {
if volumespec.PersistentVolumeClaim != nil {
pvcClaimList[i] = volumespec.PersistentVolumeClaim.ClaimName
}
}
return pvcClaimList
}
// VolumeCreateAndAttach peforms create and attach operations of vSphere persistent volumes at scale
func VolumeCreateAndAttach(client clientset.Interface, namespace string, sc []*storageV1.StorageClass, volumeCountPerInstance int, volumesPerPod int, nodeSelectorList []*NodeSelector, nodeVolumeMapChan chan map[string][]string, vsp *vsphere.VSphere) {
defer GinkgoRecover()
nodeVolumeMap := make(map[string][]string)
nodeSelectorIndex := 0
for index := 0; index < volumeCountPerInstance; index = index + volumesPerPod {
if (volumeCountPerInstance - index) < volumesPerPod {
volumesPerPod = volumeCountPerInstance - index
}
pvclaims := make([]*v1.PersistentVolumeClaim, volumesPerPod)
for i := 0; i < volumesPerPod; i++ {
By("Creating PVC using the Storage Class")
pvclaim, err := framework.CreatePVC(client, namespace, getVSphereClaimSpecWithStorageClassAnnotation(namespace, "2Gi", sc[index%len(sc)]))
Expect(err).NotTo(HaveOccurred())
pvclaims[i] = pvclaim
}
By("Waiting for claim to be in bound phase")
persistentvolumes, err := framework.WaitForPVClaimBoundPhase(client, pvclaims, framework.ClaimProvisionTimeout)
Expect(err).NotTo(HaveOccurred())
By("Creating pod to attach PV to the node")
nodeSelector := nodeSelectorList[nodeSelectorIndex%len(nodeSelectorList)]
// Create pod to attach Volume to Node
pod, err := framework.CreatePod(client, namespace, map[string]string{nodeSelector.labelKey: nodeSelector.labelValue}, pvclaims, false, "")
Expect(err).NotTo(HaveOccurred())
for _, pv := range persistentvolumes {
nodeVolumeMap[pod.Spec.NodeName] = append(nodeVolumeMap[pod.Spec.NodeName], pv.Spec.VsphereVolume.VolumePath)
}
By("Verify the volume is accessible and available in the pod")
verifyVSphereVolumesAccessible(pod, persistentvolumes, vsp)
nodeSelectorIndex++
}
nodeVolumeMapChan <- nodeVolumeMap
close(nodeVolumeMapChan)
}
func createNodeLabels(client clientset.Interface, namespace string, nodes *v1.NodeList) []*NodeSelector {
var nodeSelectorList []*NodeSelector
for i, node := range nodes.Items {
labelVal := "vsphere_e2e_" + strconv.Itoa(i)
nodeSelector := &NodeSelector{
labelKey: NodeLabelKey,
labelValue: labelVal,
}
nodeSelectorList = append(nodeSelectorList, nodeSelector)
framework.AddOrUpdateLabelOnNode(client, node.Name, NodeLabelKey, labelVal)
}
return nodeSelectorList
}

View File

@ -18,6 +18,10 @@ package storage
import (
"fmt"
"os"
"strconv"
"sync"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"k8s.io/api/core/v1"
@ -28,9 +32,6 @@ import (
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere"
"k8s.io/kubernetes/test/e2e/framework"
"os"
"strconv"
"sync"
)
/*
@ -45,13 +46,6 @@ import (
*/
var _ = SIGDescribe("vsphere cloud provider stress [Feature:vsphere]", func() {
f := framework.NewDefaultFramework("vcp-stress")
const (
volumesPerNode = 55
storageclass1 = "sc-default"
storageclass2 = "sc-vsan"
storageclass3 = "sc-spbm"
storageclass4 = "sc-user-specified-ds"
)
var (
client clientset.Interface
namespace string
@ -159,7 +153,7 @@ func PerformVolumeLifeCycleInParallel(f *framework.Framework, client clientset.I
By(fmt.Sprintf("%v Creating Pod using the claim: %v", logPrefix, pvclaim.Name))
// Create pod to attach Volume to Node
pod, err := framework.CreatePod(client, namespace, pvclaims, false, "")
pod, err := framework.CreatePod(client, namespace, nil, pvclaims, false, "")
Expect(err).NotTo(HaveOccurred())
By(fmt.Sprintf("%v Waiting for the Pod: %v to be in the running state", logPrefix, pod.Name))

View File

@ -38,6 +38,14 @@ import (
"k8s.io/kubernetes/test/e2e/framework"
)
const (
volumesPerNode = 55
storageclass1 = "sc-default"
storageclass2 = "sc-vsan"
storageclass3 = "sc-spbm"
storageclass4 = "sc-user-specified-ds"
)
// Sanity check for vSphere testing. Verify the persistent disk attached to the node.
func verifyVSphereDiskAttached(vsp *vsphere.VSphere, volumePath string, nodeName types.NodeName) (bool, error) {
var (
@ -53,6 +61,46 @@ func verifyVSphereDiskAttached(vsp *vsphere.VSphere, volumePath string, nodeName
return isAttached, err
}
// Wait until vsphere volumes are detached from the list of nodes or time out after 5 minutes
func waitForVSphereDisksToDetach(vsp *vsphere.VSphere, nodeVolumes map[k8stype.NodeName][]string) error {
var (
err error
disksAttached = true
detachTimeout = 5 * time.Minute
detachPollTime = 10 * time.Second
)
if vsp == nil {
vsp, err = vsphere.GetVSphere()
if err != nil {
return err
}
}
err = wait.Poll(detachPollTime, detachTimeout, func() (bool, error) {
attachedResult, err := vsp.DisksAreAttached(nodeVolumes)
if err != nil {
return false, err
}
for nodeName, nodeVolumes := range attachedResult {
for volumePath, attached := range nodeVolumes {
if attached {
framework.Logf("Waiting for volumes %q to detach from %q.", volumePath, string(nodeName))
return false, nil
}
}
}
disksAttached = false
framework.Logf("Volume are successfully detached from all the nodes: %+v", nodeVolumes)
return true, nil
})
if err != nil {
return err
}
if disksAttached {
return fmt.Errorf("Gave up waiting for volumes to detach after %v", detachTimeout)
}
return nil
}
// Wait until vsphere vmdk is deteched from the given node or time out after 5 minutes
func waitForVSphereDiskToDetach(vsp *vsphere.VSphere, volumePath string, nodeName types.NodeName) error {
var (

View File

@ -128,7 +128,7 @@ func invokeTestForInvalidFstype(f *framework.Framework, client clientset.Interfa
var pvclaims []*v1.PersistentVolumeClaim
pvclaims = append(pvclaims, pvclaim)
// Create pod to attach Volume to Node
pod, err := framework.CreatePod(client, namespace, pvclaims, false, ExecCommand)
pod, err := framework.CreatePod(client, namespace, nil, pvclaims, false, ExecCommand)
Expect(err).To(HaveOccurred())
eventList, err := client.CoreV1().Events(namespace).List(metav1.ListOptions{})
@ -170,7 +170,7 @@ func createPodAndVerifyVolumeAccessible(client clientset.Interface, namespace st
pvclaims = append(pvclaims, pvclaim)
By("Creating pod to attach PV to the node")
// Create pod to attach Volume to Node
pod, err := framework.CreatePod(client, namespace, pvclaims, false, ExecCommand)
pod, err := framework.CreatePod(client, namespace, nil, pvclaims, false, "")
Expect(err).NotTo(HaveOccurred())
// Asserts: Right disk is attached to the pod

View File

@ -109,7 +109,7 @@ var _ = SIGDescribe("Volume Operations Storm [Feature:vsphere]", func() {
Expect(err).NotTo(HaveOccurred())
By("Creating pod to attach PVs to the node")
pod, err := framework.CreatePod(client, namespace, pvclaims, false, "")
pod, err := framework.CreatePod(client, namespace, nil, pvclaims, false, "")
Expect(err).NotTo(HaveOccurred())
By("Verify all volumes are accessible and available in the pod")

View File

@ -292,7 +292,7 @@ func invokeValidPolicyTest(f *framework.Framework, client clientset.Interface, n
By("Creating pod to attach PV to the node")
// Create pod to attach Volume to Node
pod, err := framework.CreatePod(client, namespace, pvclaims, false, "")
pod, err := framework.CreatePod(client, namespace, nil, pvclaims, false, "")
Expect(err).NotTo(HaveOccurred())
vsp, err := vsphere.GetVSphere()

View File

@ -98,7 +98,7 @@ func (t *PersistentVolumeUpgradeTest) Teardown(f *framework.Framework) {
// testPod creates a pod that consumes a pv and prints it out. The output is then verified.
func (t *PersistentVolumeUpgradeTest) testPod(f *framework.Framework, cmd string) {
pod := framework.MakePod(f.Namespace.Name, []*v1.PersistentVolumeClaim{t.pvc}, false, cmd)
pod := framework.MakePod(f.Namespace.Name, nil, []*v1.PersistentVolumeClaim{t.pvc}, false, cmd)
expectedOutput := []string{pvTestData}
f.TestContainerOutput("pod consumes pv", pod, 0, expectedOutput)
}