Fix provisioning tests to allow delayed binding

This commit is contained in:
Michelle Au 2019-03-27 18:40:36 -07:00
parent bbf6683e22
commit 634be030a8
5 changed files with 145 additions and 100 deletions

View File

@ -21,7 +21,7 @@ import (
"time"
. "github.com/onsi/ginkgo"
"k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
apierrs "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@ -1007,3 +1007,15 @@ func CreatePVSource(zone string) (*v1.PersistentVolumeSource, error) {
func DeletePVSource(pvSource *v1.PersistentVolumeSource) error {
return TestContext.CloudConfig.Provider.DeletePVSource(pvSource)
}
func GetBoundPV(client clientset.Interface, pvc *v1.PersistentVolumeClaim) (*v1.PersistentVolume, error) {
// Get new copy of the claim
claim, err := client.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get(pvc.Name, metav1.GetOptions{})
if err != nil {
return nil, err
}
// Get the bound PV
pv, err := client.CoreV1().PersistentVolumes().Get(claim.Spec.VolumeName, metav1.GetOptions{})
return pv, err
}

View File

@ -153,7 +153,7 @@ func testTopologyNegative(cs clientset.Interface, suffix, namespace string, dela
if delayBinding {
test.TestBindingWaitForFirstConsumer(nodeSelector, true /* expect unschedulable */)
} else {
test.PvCheck = func(claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume) {
test.PvCheck = func(claim *v1.PersistentVolumeClaim) {
// Ensure that a pod cannot be scheduled in an unsuitable zone.
pod := testsuites.StartInPodWithVolume(cs, namespace, claim.Name, "pvc-tester-unschedulable", "sleep 100000",
framework.NodeSelection{Selector: nodeSelector})

View File

@ -108,14 +108,15 @@ func testVolumeProvisioning(c clientset.Interface, ns string) {
},
ClaimSize: repdMinSize,
ExpectedSize: repdMinSize,
PvCheck: func(claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume) {
var err error
err = checkGCEPD(volume, "pd-standard")
PvCheck: func(claim *v1.PersistentVolumeClaim) {
volume := testsuites.PVWriteReadSingleNodeCheck(c, claim, framework.NodeSelection{})
Expect(volume).NotTo(BeNil())
err := checkGCEPD(volume, "pd-standard")
Expect(err).NotTo(HaveOccurred(), "checkGCEPD")
err = verifyZonesInPV(volume, sets.NewString(cloudZones...), true /* match */)
Expect(err).NotTo(HaveOccurred(), "verifyZonesInPV")
testsuites.PVWriteReadSingleNodeCheck(c, claim, volume, framework.NodeSelection{})
},
},
{
@ -128,16 +129,16 @@ func testVolumeProvisioning(c clientset.Interface, ns string) {
},
ClaimSize: repdMinSize,
ExpectedSize: repdMinSize,
PvCheck: func(claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume) {
var err error
err = checkGCEPD(volume, "pd-standard")
PvCheck: func(claim *v1.PersistentVolumeClaim) {
volume := testsuites.PVWriteReadSingleNodeCheck(c, claim, framework.NodeSelection{})
Expect(volume).NotTo(BeNil())
err := checkGCEPD(volume, "pd-standard")
Expect(err).NotTo(HaveOccurred(), "checkGCEPD")
zones, err := framework.GetClusterZones(c)
Expect(err).NotTo(HaveOccurred(), "GetClusterZones")
err = verifyZonesInPV(volume, zones, false /* match */)
Expect(err).NotTo(HaveOccurred(), "verifyZonesInPV")
testsuites.PVWriteReadSingleNodeCheck(c, claim, volume, framework.NodeSelection{})
},
},
}

View File

@ -23,8 +23,9 @@ import (
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
storage "k8s.io/api/storage/v1"
storagev1 "k8s.io/api/storage/v1"
apierrs "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@ -50,7 +51,7 @@ type StorageClassTest struct {
DelayBinding bool
ClaimSize string
ExpectedSize string
PvCheck func(claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume)
PvCheck func(claim *v1.PersistentVolumeClaim)
VolumeMode *v1.PersistentVolumeMode
AllowVolumeExpansion bool
}
@ -146,6 +147,9 @@ func (p *provisioningTestSuite) defineTests(driver TestDriver, pattern testpatte
init()
defer cleanup()
l.testCase.PvCheck = func(claim *v1.PersistentVolumeClaim) {
PVWriteReadSingleNodeCheck(l.cs, claim, framework.NodeSelection{Name: l.config.ClientNodeName})
}
l.testCase.TestDynamicProvisioning()
})
@ -158,6 +162,9 @@ func (p *provisioningTestSuite) defineTests(driver TestDriver, pattern testpatte
defer cleanup()
l.testCase.Class.MountOptions = dInfo.SupportedMountOption.Union(dInfo.RequiredMountOption).List()
l.testCase.PvCheck = func(claim *v1.PersistentVolumeClaim) {
PVWriteReadSingleNodeCheck(l.cs, claim, framework.NodeSelection{Name: l.config.ClientNodeName})
}
l.testCase.TestDynamicProvisioning()
})
@ -178,26 +185,12 @@ func (p *provisioningTestSuite) defineTests(driver TestDriver, pattern testpatte
if len(nodes.Items) <= 1 {
framework.Skipf("need more than one node - skipping")
}
l.testCase.PvCheck = func(claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume) {
PVMultiNodeCheck(l.cs, claim, volume, framework.NodeSelection{Name: l.config.ClientNodeName})
l.testCase.PvCheck = func(claim *v1.PersistentVolumeClaim) {
PVMultiNodeCheck(l.cs, claim, framework.NodeSelection{Name: l.config.ClientNodeName})
}
l.testCase.TestDynamicProvisioning()
})
It("should create and delete block persistent volumes", func() {
if !dInfo.Capabilities[CapBlock] {
framework.Skipf("Driver %q does not support BlockVolume - skipping", dInfo.Name)
}
init()
defer cleanup()
block := v1.PersistentVolumeBlock
l.testCase.VolumeMode = &block
l.pvc.Spec.VolumeMode = &block
l.testCase.TestDynamicProvisioning()
})
It("should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]", func() {
if !dInfo.Capabilities[CapDataSource] {
framework.Skipf("Driver %q does not support populate data from snapshot - skipping", dInfo.Name)
@ -217,7 +210,7 @@ func (p *provisioningTestSuite) defineTests(driver TestDriver, pattern testpatte
defer cleanupFunc()
l.pvc.Spec.DataSource = dataSource
l.testCase.PvCheck = func(claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume) {
l.testCase.PvCheck = func(claim *v1.PersistentVolumeClaim) {
By("checking whether the created volume has the pre-populated data")
command := fmt.Sprintf("grep '%s' /mnt/test/initialData", claim.Namespace)
RunInPodWithVolume(l.cs, claim.Namespace, claim.Name, "pvc-snapshot-tester", command, framework.NodeSelection{Name: l.config.ClientNodeName})
@ -261,16 +254,39 @@ func (t StorageClassTest) TestDynamicProvisioning() *v1.PersistentVolume {
framework.Failf("Error deleting claim %q. Error: %v", claim.Name, err)
}
}()
err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, client, claim.Namespace, claim.Name, framework.Poll, framework.ClaimProvisionTimeout)
// Run the checker
if t.PvCheck != nil {
t.PvCheck(claim)
}
pv := t.checkProvisioning(client, claim, class)
By(fmt.Sprintf("deleting claim %q/%q", claim.Namespace, claim.Name))
framework.ExpectNoError(client.CoreV1().PersistentVolumeClaims(claim.Namespace).Delete(claim.Name, nil))
// Wait for the PV to get deleted if reclaim policy is Delete. (If it's
// Retain, there's no use waiting because the PV won't be auto-deleted and
// it's expected for the caller to do it.) Technically, the first few delete
// attempts may fail, as the volume is still attached to a node because
// kubelet is slowly cleaning up the previous pod, however it should succeed
// in a couple of minutes. Wait 20 minutes to recover from random cloud
// hiccups.
if pv != nil && pv.Spec.PersistentVolumeReclaimPolicy == v1.PersistentVolumeReclaimDelete {
By(fmt.Sprintf("deleting the claim's PV %q", pv.Name))
framework.ExpectNoError(framework.WaitForPersistentVolumeDeleted(client, pv.Name, 5*time.Second, 20*time.Minute))
}
return pv
}
// checkProvisioning verifies that the claim is bound and has the correct properities
func (t StorageClassTest) checkProvisioning(client clientset.Interface, claim *v1.PersistentVolumeClaim, class *storagev1.StorageClass) *v1.PersistentVolume {
err := framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, client, claim.Namespace, claim.Name, framework.Poll, framework.ClaimProvisionTimeout)
Expect(err).NotTo(HaveOccurred())
By("checking the claim")
// Get new copy of the claim
claim, err = client.CoreV1().PersistentVolumeClaims(claim.Namespace).Get(claim.Name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
// Get the bound PV
pv, err := client.CoreV1().PersistentVolumes().Get(claim.Spec.VolumeName, metav1.GetOptions{})
pv, err := framework.GetBoundPV(client, claim)
Expect(err).NotTo(HaveOccurred())
// Check sizes
@ -284,8 +300,20 @@ func (t StorageClassTest) TestDynamicProvisioning() *v1.PersistentVolume {
// Check PV properties
By("checking the PV")
expectedAccessModes := []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}
Expect(pv.Spec.AccessModes).To(Equal(expectedAccessModes))
// Every access mode in PV should be in PVC
Expect(pv.Spec.AccessModes).NotTo(BeZero())
for _, pvMode := range pv.Spec.AccessModes {
found := false
for _, pvcMode := range claim.Spec.AccessModes {
if pvMode == pvcMode {
found = true
break
}
}
Expect(found).To(BeTrue())
}
Expect(pv.Spec.ClaimRef.Name).To(Equal(claim.ObjectMeta.Name))
Expect(pv.Spec.ClaimRef.Namespace).To(Equal(claim.ObjectMeta.Namespace))
if class == nil {
@ -294,35 +322,15 @@ func (t StorageClassTest) TestDynamicProvisioning() *v1.PersistentVolume {
Expect(pv.Spec.PersistentVolumeReclaimPolicy).To(Equal(*class.ReclaimPolicy))
Expect(pv.Spec.MountOptions).To(Equal(class.MountOptions))
}
if t.VolumeMode != nil {
if claim.Spec.VolumeMode != nil {
Expect(pv.Spec.VolumeMode).NotTo(BeNil())
Expect(*pv.Spec.VolumeMode).To(Equal(*t.VolumeMode))
Expect(*pv.Spec.VolumeMode).To(Equal(*claim.Spec.VolumeMode))
}
// Run the checker
if t.PvCheck != nil {
t.PvCheck(claim, pv)
}
By(fmt.Sprintf("deleting claim %q/%q", claim.Namespace, claim.Name))
framework.ExpectNoError(client.CoreV1().PersistentVolumeClaims(claim.Namespace).Delete(claim.Name, nil))
// Wait for the PV to get deleted if reclaim policy is Delete. (If it's
// Retain, there's no use waiting because the PV won't be auto-deleted and
// it's expected for the caller to do it.) Technically, the first few delete
// attempts may fail, as the volume is still attached to a node because
// kubelet is slowly cleaning up the previous pod, however it should succeed
// in a couple of minutes. Wait 20 minutes to recover from random cloud
// hiccups.
if pv.Spec.PersistentVolumeReclaimPolicy == v1.PersistentVolumeReclaimDelete {
By(fmt.Sprintf("deleting the claim's PV %q", pv.Name))
framework.ExpectNoError(framework.WaitForPersistentVolumeDeleted(client, pv.Name, 5*time.Second, 20*time.Minute))
}
return pv
}
// PVWriteReadSingleNodeCheck checks that a PV retains data on a single node.
// PVWriteReadSingleNodeCheck checks that a PV retains data on a single node
// and returns the PV.
//
// It starts two pods:
// - The first pod writes 'hello word' to the /mnt/test (= the volume) on one node.
@ -335,16 +343,9 @@ func (t StorageClassTest) TestDynamicProvisioning() *v1.PersistentVolume {
// persistent across pods.
//
// This is a common test that can be called from a StorageClassTest.PvCheck.
func PVWriteReadSingleNodeCheck(client clientset.Interface, claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume, node framework.NodeSelection) {
By(fmt.Sprintf("checking the created volume is writable and has the PV's mount options on node %+v", node))
func PVWriteReadSingleNodeCheck(client clientset.Interface, claim *v1.PersistentVolumeClaim, node framework.NodeSelection) *v1.PersistentVolume {
By(fmt.Sprintf("checking the created volume is writable on node %+v", node))
command := "echo 'hello world' > /mnt/test/data"
// We give the first pod the secondary responsibility of checking the volume has
// been mounted with the PV's mount options, if the PV was provisioned with any
for _, option := range volume.Spec.MountOptions {
// Get entry, get mount options at 6th word, replace brackets with commas
command += fmt.Sprintf(" && ( mount | grep 'on /mnt/test' | awk '{print $6}' | sed 's/^(/,/; s/)$/,/' | grep -q ,%s, )", option)
}
command += " || (mount | grep 'on /mnt/test'; false)"
pod := StartInPodWithVolume(client, claim.Namespace, claim.Name, "pvc-volume-tester-writer", command, node)
defer func() {
// pod might be nil now.
@ -357,9 +358,24 @@ func PVWriteReadSingleNodeCheck(client clientset.Interface, claim *v1.Persistent
StopPod(client, pod)
pod = nil // Don't stop twice.
By(fmt.Sprintf("checking the created volume is readable and retains data on the same node %q", actualNodeName))
// Get a new copy of the PV
volume, err := framework.GetBoundPV(client, claim)
Expect(err).NotTo(HaveOccurred())
By(fmt.Sprintf("checking the created volume has the correct mount options, is readable and retains data on the same node %q", actualNodeName))
command = "grep 'hello world' /mnt/test/data"
// We give the second pod the additional responsibility of checking the volume has
// been mounted with the PV's mount options, if the PV was provisioned with any
for _, option := range volume.Spec.MountOptions {
// Get entry, get mount options at 6th word, replace brackets with commas
command += fmt.Sprintf(" && ( mount | grep 'on /mnt/test' | awk '{print $6}' | sed 's/^(/,/; s/)$/,/' | grep -q ,%s, )", option)
}
command += " || (mount | grep 'on /mnt/test'; false)"
RunInPodWithVolume(client, claim.Namespace, claim.Name, "pvc-volume-tester-reader", command, framework.NodeSelection{Name: actualNodeName})
return volume
}
// PVMultiNodeCheck checks that a PV retains data when moved between nodes.
@ -376,7 +392,7 @@ func PVWriteReadSingleNodeCheck(client clientset.Interface, claim *v1.Persistent
// persistent across pods and across nodes.
//
// This is a common test that can be called from a StorageClassTest.PvCheck.
func PVMultiNodeCheck(client clientset.Interface, claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume, node framework.NodeSelection) {
func PVMultiNodeCheck(client clientset.Interface, claim *v1.PersistentVolumeClaim, node framework.NodeSelection) {
Expect(node.Name).To(Equal(""), "this test only works when not locked onto a single node")
var pod *v1.Pod
@ -385,7 +401,7 @@ func PVMultiNodeCheck(client clientset.Interface, claim *v1.PersistentVolumeClai
StopPod(client, pod)
}()
By(fmt.Sprintf("checking the created volume is writable and has the PV's mount options on node %+v", node))
By(fmt.Sprintf("checking the created volume is writable on node %+v", node))
command := "echo 'hello world' > /mnt/test/data"
pod = StartInPodWithVolume(client, claim.Namespace, claim.Name, "pvc-writer-node1", command, node)
framework.ExpectNoError(framework.WaitForPodSuccessInNamespaceSlow(client, pod.Name, pod.Namespace))

View File

@ -33,7 +33,7 @@ import (
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
rbacv1beta1 "k8s.io/api/rbac/v1beta1"
storage "k8s.io/api/storage/v1"
storagebeta "k8s.io/api/storage/v1beta1"
@ -273,10 +273,12 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
},
ClaimSize: "1.5Gi",
ExpectedSize: "2Gi",
PvCheck: func(claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume) {
PvCheck: func(claim *v1.PersistentVolumeClaim) {
volume := testsuites.PVWriteReadSingleNodeCheck(c, claim, framework.NodeSelection{})
Expect(volume).NotTo(BeNil(), "get bound PV")
err := checkGCEPD(volume, "pd-ssd")
Expect(err).NotTo(HaveOccurred(), "checkGCEPD pd-ssd")
testsuites.PVWriteReadSingleNodeCheck(c, claim, volume, framework.NodeSelection{})
},
},
{
@ -288,10 +290,12 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
},
ClaimSize: "1.5Gi",
ExpectedSize: "2Gi",
PvCheck: func(claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume) {
PvCheck: func(claim *v1.PersistentVolumeClaim) {
volume := testsuites.PVWriteReadSingleNodeCheck(c, claim, framework.NodeSelection{})
Expect(volume).NotTo(BeNil(), "get bound PV")
err := checkGCEPD(volume, "pd-standard")
Expect(err).NotTo(HaveOccurred(), "checkGCEPD pd-standard")
testsuites.PVWriteReadSingleNodeCheck(c, claim, volume, framework.NodeSelection{})
},
},
// AWS
@ -305,10 +309,12 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
},
ClaimSize: "1.5Gi",
ExpectedSize: "2Gi",
PvCheck: func(claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume) {
PvCheck: func(claim *v1.PersistentVolumeClaim) {
volume := testsuites.PVWriteReadSingleNodeCheck(c, claim, framework.NodeSelection{})
Expect(volume).NotTo(BeNil(), "get bound PV")
err := checkAWSEBS(volume, "gp2", false)
Expect(err).NotTo(HaveOccurred(), "checkAWSEBS gp2")
testsuites.PVWriteReadSingleNodeCheck(c, claim, volume, framework.NodeSelection{})
},
},
{
@ -321,10 +327,12 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
},
ClaimSize: "3.5Gi",
ExpectedSize: "4Gi", // 4 GiB is minimum for io1
PvCheck: func(claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume) {
PvCheck: func(claim *v1.PersistentVolumeClaim) {
volume := testsuites.PVWriteReadSingleNodeCheck(c, claim, framework.NodeSelection{})
Expect(volume).NotTo(BeNil(), "get bound PV")
err := checkAWSEBS(volume, "io1", false)
Expect(err).NotTo(HaveOccurred(), "checkAWSEBS io1")
testsuites.PVWriteReadSingleNodeCheck(c, claim, volume, framework.NodeSelection{})
},
},
{
@ -336,10 +344,12 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
},
ClaimSize: "500Gi", // minimum for sc1
ExpectedSize: "500Gi",
PvCheck: func(claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume) {
PvCheck: func(claim *v1.PersistentVolumeClaim) {
volume := testsuites.PVWriteReadSingleNodeCheck(c, claim, framework.NodeSelection{})
Expect(volume).NotTo(BeNil(), "get bound PV")
err := checkAWSEBS(volume, "sc1", false)
Expect(err).NotTo(HaveOccurred(), "checkAWSEBS sc1")
testsuites.PVWriteReadSingleNodeCheck(c, claim, volume, framework.NodeSelection{})
},
},
{
@ -351,10 +361,12 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
},
ClaimSize: "500Gi", // minimum for st1
ExpectedSize: "500Gi",
PvCheck: func(claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume) {
PvCheck: func(claim *v1.PersistentVolumeClaim) {
volume := testsuites.PVWriteReadSingleNodeCheck(c, claim, framework.NodeSelection{})
Expect(volume).NotTo(BeNil(), "get bound PV")
err := checkAWSEBS(volume, "st1", false)
Expect(err).NotTo(HaveOccurred(), "checkAWSEBS st1")
testsuites.PVWriteReadSingleNodeCheck(c, claim, volume, framework.NodeSelection{})
},
},
{
@ -366,10 +378,12 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
},
ClaimSize: "1Gi",
ExpectedSize: "1Gi",
PvCheck: func(claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume) {
PvCheck: func(claim *v1.PersistentVolumeClaim) {
volume := testsuites.PVWriteReadSingleNodeCheck(c, claim, framework.NodeSelection{})
Expect(volume).NotTo(BeNil(), "get bound PV")
err := checkAWSEBS(volume, "gp2", true)
Expect(err).NotTo(HaveOccurred(), "checkAWSEBS gp2 encrypted")
testsuites.PVWriteReadSingleNodeCheck(c, claim, volume, framework.NodeSelection{})
},
},
// OpenStack generic tests (works on all OpenStack deployments)
@ -380,8 +394,8 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
Parameters: map[string]string{},
ClaimSize: "1.5Gi",
ExpectedSize: "2Gi",
PvCheck: func(claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume) {
testsuites.PVWriteReadSingleNodeCheck(c, claim, volume, framework.NodeSelection{})
PvCheck: func(claim *v1.PersistentVolumeClaim) {
testsuites.PVWriteReadSingleNodeCheck(c, claim, framework.NodeSelection{})
},
},
{
@ -394,8 +408,8 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
},
ClaimSize: "1.5Gi",
ExpectedSize: "2Gi",
PvCheck: func(claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume) {
testsuites.PVWriteReadSingleNodeCheck(c, claim, volume, framework.NodeSelection{})
PvCheck: func(claim *v1.PersistentVolumeClaim) {
testsuites.PVWriteReadSingleNodeCheck(c, claim, framework.NodeSelection{})
},
},
// vSphere generic test
@ -406,8 +420,8 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
Parameters: map[string]string{},
ClaimSize: "1.5Gi",
ExpectedSize: "1.5Gi",
PvCheck: func(claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume) {
testsuites.PVWriteReadSingleNodeCheck(c, claim, volume, framework.NodeSelection{})
PvCheck: func(claim *v1.PersistentVolumeClaim) {
testsuites.PVWriteReadSingleNodeCheck(c, claim, framework.NodeSelection{})
},
},
// Azure
@ -418,8 +432,8 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
Parameters: map[string]string{},
ClaimSize: "1Gi",
ExpectedSize: "1Gi",
PvCheck: func(claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume) {
testsuites.PVWriteReadSingleNodeCheck(c, claim, volume, framework.NodeSelection{})
PvCheck: func(claim *v1.PersistentVolumeClaim) {
testsuites.PVWriteReadSingleNodeCheck(c, claim, framework.NodeSelection{})
},
},
}
@ -477,10 +491,12 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
},
ClaimSize: "1Gi",
ExpectedSize: "1Gi",
PvCheck: func(claim *v1.PersistentVolumeClaim, volume *v1.PersistentVolume) {
PvCheck: func(claim *v1.PersistentVolumeClaim) {
volume := testsuites.PVWriteReadSingleNodeCheck(c, claim, framework.NodeSelection{})
Expect(volume).NotTo(BeNil(), "get bound PV")
err := checkGCEPD(volume, "pd-standard")
Expect(err).NotTo(HaveOccurred(), "checkGCEPD")
testsuites.PVWriteReadSingleNodeCheck(c, claim, volume, framework.NodeSelection{})
},
}
test.Class = newStorageClass(test, ns, "reclaimpolicy")