e2e storage: eliminate context.TODO and cleanup callbacks

Tests should accept a context from Ginkgo and pass it through to all functions
which may block for a longer period of time. In particular all Kubernetes API
calls through client-go should use that context. Then if a timeout occurs,
the test returns immediately because everything that it could block on will
return.

Cleanup code then needs to run in a separate Ginkgo node, typically
DeferCleanup, which ensures that it gets a separate context which has not timed
out yet.
This commit is contained in:
Patrick Ohly 2022-10-07 18:56:23 +02:00
parent d8d3dc9476
commit f15d7f6cca
11 changed files with 213 additions and 246 deletions

View File

@ -513,7 +513,7 @@ var _ = utils.SIGDescribe("CSI mock volume", func() {
} }
for _, t := range tests { for _, t := range tests {
test := t test := t
ginkgo.It(t.name, func() { ginkgo.It(t.name, func(ctx context.Context) {
init(testParameters{ init(testParameters{
registerDriver: test.deployClusterRegistrar, registerDriver: test.deployClusterRegistrar,
podInfo: test.podInfoOnMount}) podInfo: test.podInfoOnMount})
@ -537,7 +537,7 @@ var _ = utils.SIGDescribe("CSI mock volume", func() {
csiInlineVolumesEnabled := test.expectEphemeral csiInlineVolumesEnabled := test.expectEphemeral
if test.expectPodInfo { if test.expectPodInfo {
ginkgo.By("checking for CSIInlineVolumes feature") ginkgo.By("checking for CSIInlineVolumes feature")
csiInlineVolumesEnabled, err = testsuites.CSIInlineVolumesEnabled(m.cs, f.Timeouts, f.Namespace.Name) csiInlineVolumesEnabled, err = testsuites.CSIInlineVolumesEnabled(ctx, m.cs, f.Timeouts, f.Namespace.Name)
framework.ExpectNoError(err, "failed to test for CSIInlineVolumes") framework.ExpectNoError(err, "failed to test for CSIInlineVolumes")
} }

View File

@ -50,7 +50,6 @@ var _ = utils.SIGDescribe("Mounted volume expand [Feature:StorageProvider]", fun
ns string ns string
pvc *v1.PersistentVolumeClaim pvc *v1.PersistentVolumeClaim
sc *storagev1.StorageClass sc *storagev1.StorageClass
cleanStorageClass func()
nodeName string nodeName string
nodeKeyValueLabel map[string]string nodeKeyValueLabel map[string]string
nodeLabelValue string nodeLabelValue string
@ -59,7 +58,7 @@ var _ = utils.SIGDescribe("Mounted volume expand [Feature:StorageProvider]", fun
f := framework.NewDefaultFramework("mounted-volume-expand") f := framework.NewDefaultFramework("mounted-volume-expand")
f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged
ginkgo.BeforeEach(func() { ginkgo.BeforeEach(func(ctx context.Context) {
e2eskipper.SkipUnlessProviderIs("aws", "gce") e2eskipper.SkipUnlessProviderIs("aws", "gce")
c = f.ClientSet c = f.ClientSet
ns = f.Namespace.Name ns = f.Namespace.Name
@ -84,11 +83,10 @@ var _ = utils.SIGDescribe("Mounted volume expand [Feature:StorageProvider]", fun
Parameters: make(map[string]string), Parameters: make(map[string]string),
} }
sc, cleanStorageClass = testsuites.SetupStorageClass(c, newStorageClass(test, ns, "resizing")) sc = testsuites.SetupStorageClass(ctx, c, newStorageClass(test, ns, "resizing"))
if !*sc.AllowVolumeExpansion { if !*sc.AllowVolumeExpansion {
framework.Failf("Class %s does not allow volume expansion", sc.Name) framework.Failf("Class %s does not allow volume expansion", sc.Name)
} }
ginkgo.DeferCleanup(cleanStorageClass)
pvc = e2epv.MakePersistentVolumeClaim(e2epv.PersistentVolumeClaimConfig{ pvc = e2epv.MakePersistentVolumeClaim(e2epv.PersistentVolumeClaimConfig{
ClaimSize: test.ClaimSize, ClaimSize: test.ClaimSize,

View File

@ -19,6 +19,8 @@ package storage
import ( import (
"context" "context"
"fmt" "fmt"
"time"
"github.com/onsi/ginkgo/v2" "github.com/onsi/ginkgo/v2"
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
storagev1 "k8s.io/api/storage/v1" storagev1 "k8s.io/api/storage/v1"
@ -31,7 +33,6 @@ import (
"k8s.io/kubernetes/test/e2e/storage/testsuites" "k8s.io/kubernetes/test/e2e/storage/testsuites"
"k8s.io/kubernetes/test/e2e/storage/utils" "k8s.io/kubernetes/test/e2e/storage/utils"
admissionapi "k8s.io/pod-security-admission/api" admissionapi "k8s.io/pod-security-admission/api"
"time"
) )
var _ = utils.SIGDescribe("Persistent Volume Claim and StorageClass", func() { var _ = utils.SIGDescribe("Persistent Volume Claim and StorageClass", func() {
@ -60,7 +61,7 @@ var _ = utils.SIGDescribe("Persistent Volume Claim and StorageClass", func() {
}) })
ginkgo.Describe("Retroactive StorageClass assignment [Serial][Disruptive][Feature:RetroactiveDefaultStorageClass]", func() { ginkgo.Describe("Retroactive StorageClass assignment [Serial][Disruptive][Feature:RetroactiveDefaultStorageClass]", func() {
ginkgo.It("should assign default SC to PVCs that have no SC set", func() { ginkgo.It("should assign default SC to PVCs that have no SC set", func(ctx context.Context) {
// Temporarily set all default storage classes as non-default // Temporarily set all default storage classes as non-default
restoreClasses := temporarilyUnsetDefaultClasses(client) restoreClasses := temporarilyUnsetDefaultClasses(client)
@ -81,8 +82,7 @@ var _ = utils.SIGDescribe("Persistent Volume Claim and StorageClass", func() {
}(pvc) }(pvc)
// Create custom default SC // Create custom default SC
storageClass, clearStorageClass := testsuites.SetupStorageClass(client, makeStorageClass(prefixSC)) storageClass := testsuites.SetupStorageClass(ctx, client, makeStorageClass(prefixSC))
defer clearStorageClass()
// Wait for PVC to get updated with the new default SC // Wait for PVC to get updated with the new default SC
pvc, err = waitForPVCStorageClass(client, namespace, pvc.Name, storageClass.Name, f.Timeouts.ClaimBound) pvc, err = waitForPVCStorageClass(client, namespace, pvc.Name, storageClass.Name, f.Timeouts.ClaimBound)

View File

@ -76,22 +76,22 @@ var _ = utils.SIGDescribe("Regional PD", func() {
}) })
ginkgo.Describe("RegionalPD", func() { ginkgo.Describe("RegionalPD", func() {
ginkgo.It("should provision storage [Slow]", func() { ginkgo.It("should provision storage [Slow]", func(ctx context.Context) {
testVolumeProvisioning(c, f.Timeouts, ns) testVolumeProvisioning(ctx, c, f.Timeouts, ns)
}) })
ginkgo.It("should provision storage with delayed binding [Slow]", func() { ginkgo.It("should provision storage with delayed binding [Slow]", func(ctx context.Context) {
testRegionalDelayedBinding(c, ns, 1 /* pvcCount */) testRegionalDelayedBinding(ctx, c, ns, 1 /* pvcCount */)
testRegionalDelayedBinding(c, ns, 3 /* pvcCount */) testRegionalDelayedBinding(ctx, c, ns, 3 /* pvcCount */)
}) })
ginkgo.It("should provision storage in the allowedTopologies [Slow]", func() { ginkgo.It("should provision storage in the allowedTopologies [Slow]", func(ctx context.Context) {
testRegionalAllowedTopologies(c, ns) testRegionalAllowedTopologies(ctx, c, ns)
}) })
ginkgo.It("should provision storage in the allowedTopologies with delayed binding [Slow]", func() { ginkgo.It("should provision storage in the allowedTopologies with delayed binding [Slow]", func(ctx context.Context) {
testRegionalAllowedTopologiesWithDelayedBinding(c, ns, 1 /* pvcCount */) testRegionalAllowedTopologiesWithDelayedBinding(ctx, c, ns, 1 /* pvcCount */)
testRegionalAllowedTopologiesWithDelayedBinding(c, ns, 3 /* pvcCount */) testRegionalAllowedTopologiesWithDelayedBinding(ctx, c, ns, 3 /* pvcCount */)
}) })
ginkgo.It("should failover to a different zone when all nodes in one zone become unreachable [Slow] [Disruptive]", func() { ginkgo.It("should failover to a different zone when all nodes in one zone become unreachable [Slow] [Disruptive]", func() {
@ -100,7 +100,7 @@ var _ = utils.SIGDescribe("Regional PD", func() {
}) })
}) })
func testVolumeProvisioning(c clientset.Interface, t *framework.TimeoutContext, ns string) { func testVolumeProvisioning(ctx context.Context, c clientset.Interface, t *framework.TimeoutContext, ns string) {
cloudZones := getTwoRandomZones(c) cloudZones := getTwoRandomZones(c)
// This test checks that dynamic provisioning can provision a volume // This test checks that dynamic provisioning can provision a volume
@ -119,7 +119,7 @@ func testVolumeProvisioning(c clientset.Interface, t *framework.TimeoutContext,
ClaimSize: repdMinSize, ClaimSize: repdMinSize,
ExpectedSize: repdMinSize, ExpectedSize: repdMinSize,
PvCheck: func(claim *v1.PersistentVolumeClaim) { PvCheck: func(claim *v1.PersistentVolumeClaim) {
volume := testsuites.PVWriteReadSingleNodeCheck(c, t, claim, e2epod.NodeSelection{}) volume := testsuites.PVWriteReadSingleNodeCheck(ctx, c, t, claim, e2epod.NodeSelection{})
gomega.Expect(volume).NotTo(gomega.BeNil()) gomega.Expect(volume).NotTo(gomega.BeNil())
err := checkGCEPD(volume, "pd-standard") err := checkGCEPD(volume, "pd-standard")
@ -141,7 +141,7 @@ func testVolumeProvisioning(c clientset.Interface, t *framework.TimeoutContext,
ClaimSize: repdMinSize, ClaimSize: repdMinSize,
ExpectedSize: repdMinSize, ExpectedSize: repdMinSize,
PvCheck: func(claim *v1.PersistentVolumeClaim) { PvCheck: func(claim *v1.PersistentVolumeClaim) {
volume := testsuites.PVWriteReadSingleNodeCheck(c, t, claim, e2epod.NodeSelection{}) volume := testsuites.PVWriteReadSingleNodeCheck(ctx, c, t, claim, e2epod.NodeSelection{})
gomega.Expect(volume).NotTo(gomega.BeNil()) gomega.Expect(volume).NotTo(gomega.BeNil())
err := checkGCEPD(volume, "pd-standard") err := checkGCEPD(volume, "pd-standard")
@ -156,8 +156,7 @@ func testVolumeProvisioning(c clientset.Interface, t *framework.TimeoutContext,
for _, test := range tests { for _, test := range tests {
test.Client = c test.Client = c
computedStorageClass, clearStorageClass := testsuites.SetupStorageClass(test.Client, newStorageClass(test, ns, "" /* suffix */)) computedStorageClass := testsuites.SetupStorageClass(ctx, test.Client, newStorageClass(test, ns, "" /* suffix */))
defer clearStorageClass()
test.Class = computedStorageClass test.Class = computedStorageClass
test.Claim = e2epv.MakePersistentVolumeClaim(e2epv.PersistentVolumeClaimConfig{ test.Claim = e2epv.MakePersistentVolumeClaim(e2epv.PersistentVolumeClaimConfig{
ClaimSize: test.ClaimSize, ClaimSize: test.ClaimSize,
@ -165,7 +164,7 @@ func testVolumeProvisioning(c clientset.Interface, t *framework.TimeoutContext,
VolumeMode: &test.VolumeMode, VolumeMode: &test.VolumeMode,
}, ns) }, ns)
test.TestDynamicProvisioning() test.TestDynamicProvisioning(ctx)
} }
} }
@ -334,7 +333,7 @@ func addTaint(c clientset.Interface, ns string, nodes []v1.Node, podZone string)
} }
} }
func testRegionalDelayedBinding(c clientset.Interface, ns string, pvcCount int) { func testRegionalDelayedBinding(ctx context.Context, c clientset.Interface, ns string, pvcCount int) {
test := testsuites.StorageClassTest{ test := testsuites.StorageClassTest{
Client: c, Client: c,
Name: "Regional PD storage class with waitForFirstConsumer test on GCE", Name: "Regional PD storage class with waitForFirstConsumer test on GCE",
@ -350,9 +349,7 @@ func testRegionalDelayedBinding(c clientset.Interface, ns string, pvcCount int)
suffix := "delayed-regional" suffix := "delayed-regional"
computedStorageClass, clearStorageClass := testsuites.SetupStorageClass(test.Client, newStorageClass(test, ns, suffix)) test.Class = testsuites.SetupStorageClass(ctx, test.Client, newStorageClass(test, ns, suffix))
defer clearStorageClass()
test.Class = computedStorageClass
var claims []*v1.PersistentVolumeClaim var claims []*v1.PersistentVolumeClaim
for i := 0; i < pvcCount; i++ { for i := 0; i < pvcCount; i++ {
claim := e2epv.MakePersistentVolumeClaim(e2epv.PersistentVolumeClaimConfig{ claim := e2epv.MakePersistentVolumeClaim(e2epv.PersistentVolumeClaimConfig{
@ -362,7 +359,7 @@ func testRegionalDelayedBinding(c clientset.Interface, ns string, pvcCount int)
}, ns) }, ns)
claims = append(claims, claim) claims = append(claims, claim)
} }
pvs, node := test.TestBindingWaitForFirstConsumerMultiPVC(claims, nil /* node selector */, false /* expect unschedulable */) pvs, node := test.TestBindingWaitForFirstConsumerMultiPVC(ctx, claims, nil /* node selector */, false /* expect unschedulable */)
if node == nil { if node == nil {
framework.Failf("unexpected nil node found") framework.Failf("unexpected nil node found")
} }
@ -375,7 +372,7 @@ func testRegionalDelayedBinding(c clientset.Interface, ns string, pvcCount int)
} }
} }
func testRegionalAllowedTopologies(c clientset.Interface, ns string) { func testRegionalAllowedTopologies(ctx context.Context, c clientset.Interface, ns string) {
test := testsuites.StorageClassTest{ test := testsuites.StorageClassTest{
Name: "Regional PD storage class with allowedTopologies test on GCE", Name: "Regional PD storage class with allowedTopologies test on GCE",
Provisioner: "kubernetes.io/gce-pd", Provisioner: "kubernetes.io/gce-pd",
@ -390,9 +387,7 @@ func testRegionalAllowedTopologies(c clientset.Interface, ns string) {
suffix := "topo-regional" suffix := "topo-regional"
test.Client = c test.Client = c
computedStorageClass, clearStorageClass := testsuites.SetupStorageClass(test.Client, newStorageClass(test, ns, suffix)) test.Class = testsuites.SetupStorageClass(ctx, test.Client, newStorageClass(test, ns, suffix))
defer clearStorageClass()
test.Class = computedStorageClass
zones := getTwoRandomZones(c) zones := getTwoRandomZones(c)
addAllowedTopologiesToStorageClass(c, test.Class, zones) addAllowedTopologiesToStorageClass(c, test.Class, zones)
test.Claim = e2epv.MakePersistentVolumeClaim(e2epv.PersistentVolumeClaimConfig{ test.Claim = e2epv.MakePersistentVolumeClaim(e2epv.PersistentVolumeClaimConfig{
@ -402,11 +397,11 @@ func testRegionalAllowedTopologies(c clientset.Interface, ns string) {
VolumeMode: &test.VolumeMode, VolumeMode: &test.VolumeMode,
}, ns) }, ns)
pv := test.TestDynamicProvisioning() pv := test.TestDynamicProvisioning(ctx)
checkZonesFromLabelAndAffinity(pv, sets.NewString(zones...), true) checkZonesFromLabelAndAffinity(pv, sets.NewString(zones...), true)
} }
func testRegionalAllowedTopologiesWithDelayedBinding(c clientset.Interface, ns string, pvcCount int) { func testRegionalAllowedTopologiesWithDelayedBinding(ctx context.Context, c clientset.Interface, ns string, pvcCount int) {
test := testsuites.StorageClassTest{ test := testsuites.StorageClassTest{
Client: c, Client: c,
Timeouts: framework.NewTimeoutContextWithDefaults(), Timeouts: framework.NewTimeoutContextWithDefaults(),
@ -421,9 +416,7 @@ func testRegionalAllowedTopologiesWithDelayedBinding(c clientset.Interface, ns s
} }
suffix := "topo-delayed-regional" suffix := "topo-delayed-regional"
computedStorageClass, clearStorageClass := testsuites.SetupStorageClass(test.Client, newStorageClass(test, ns, suffix)) test.Class = testsuites.SetupStorageClass(ctx, test.Client, newStorageClass(test, ns, suffix))
defer clearStorageClass()
test.Class = computedStorageClass
topoZones := getTwoRandomZones(c) topoZones := getTwoRandomZones(c)
addAllowedTopologiesToStorageClass(c, test.Class, topoZones) addAllowedTopologiesToStorageClass(c, test.Class, topoZones)
var claims []*v1.PersistentVolumeClaim var claims []*v1.PersistentVolumeClaim
@ -435,7 +428,7 @@ func testRegionalAllowedTopologiesWithDelayedBinding(c clientset.Interface, ns s
}, ns) }, ns)
claims = append(claims, claim) claims = append(claims, claim)
} }
pvs, node := test.TestBindingWaitForFirstConsumerMultiPVC(claims, nil /* node selector */, false /* expect unschedulable */) pvs, node := test.TestBindingWaitForFirstConsumerMultiPVC(ctx, claims, nil /* node selector */, false /* expect unschedulable */)
if node == nil { if node == nil {
framework.Failf("unexpected nil node found") framework.Failf("unexpected nil node found")
} }

View File

@ -100,7 +100,7 @@ func (p *capacityTestSuite) DefineTests(driver storageframework.TestDriver, patt
} }
} }
ginkgo.It("provides storage capacity information", func() { ginkgo.It("provides storage capacity information", func(ctx context.Context) {
init() init()
timeout := time.Minute timeout := time.Minute
@ -131,12 +131,12 @@ func (p *capacityTestSuite) DefineTests(driver storageframework.TestDriver, patt
} }
// Create storage class and wait for capacity information. // Create storage class and wait for capacity information.
_, clearProvisionedStorageClass := SetupStorageClass(f.ClientSet, sc) sc := SetupStorageClass(ctx, f.ClientSet, sc)
defer clearProvisionedStorageClass()
listAll.Should(MatchCapacities(matcher), "after creating storage class") listAll.Should(MatchCapacities(matcher), "after creating storage class")
// Delete storage class again and wait for removal of storage capacity information. // Delete storage class again and wait for removal of storage capacity information.
clearProvisionedStorageClass() err := f.ClientSet.StorageV1().StorageClasses().Delete(ctx, sc.Name, metav1.DeleteOptions{})
framework.ExpectNoError(err, "delete storage class")
listAll.ShouldNot(MatchCapacities(matchSC), "after deleting storage class") listAll.ShouldNot(MatchCapacities(matchSC), "after deleting storage class")
}) })
} }

View File

@ -118,7 +118,7 @@ func (p *ephemeralTestSuite) DefineTests(driver storageframework.TestDriver, pat
f := framework.NewFrameworkWithCustomTimeouts("ephemeral", storageframework.GetDriverTimeouts(driver)) f := framework.NewFrameworkWithCustomTimeouts("ephemeral", storageframework.GetDriverTimeouts(driver))
f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged
init := func() { init := func(ctx context.Context) {
if pattern.VolType == storageframework.CSIInlineVolume { if pattern.VolType == storageframework.CSIInlineVolume {
eDriver, _ = driver.(storageframework.EphemeralTestDriver) eDriver, _ = driver.(storageframework.EphemeralTestDriver)
} }
@ -126,7 +126,7 @@ func (p *ephemeralTestSuite) DefineTests(driver storageframework.TestDriver, pat
// The GenericEphemeralVolume feature is GA, but // The GenericEphemeralVolume feature is GA, but
// perhaps this test is run against an older Kubernetes // perhaps this test is run against an older Kubernetes
// where the feature might be disabled. // where the feature might be disabled.
enabled, err := GenericEphemeralVolumesEnabled(f.ClientSet, f.Timeouts, f.Namespace.Name) enabled, err := GenericEphemeralVolumesEnabled(ctx, f.ClientSet, f.Timeouts, f.Namespace.Name)
framework.ExpectNoError(err, "check GenericEphemeralVolume feature") framework.ExpectNoError(err, "check GenericEphemeralVolume feature")
if !enabled { if !enabled {
e2eskipper.Skipf("Cluster doesn't support %q volumes -- skipping", pattern.VolType) e2eskipper.Skipf("Cluster doesn't support %q volumes -- skipping", pattern.VolType)
@ -169,12 +169,12 @@ func (p *ephemeralTestSuite) DefineTests(driver storageframework.TestDriver, pat
framework.ExpectNoError(err, "while cleaning up") framework.ExpectNoError(err, "while cleaning up")
} }
ginkgo.It("should create read-only inline ephemeral volume", func() { ginkgo.It("should create read-only inline ephemeral volume", func(ctx context.Context) {
if pattern.VolMode == v1.PersistentVolumeBlock { if pattern.VolMode == v1.PersistentVolumeBlock {
e2eskipper.Skipf("raw block volumes cannot be read-only") e2eskipper.Skipf("raw block volumes cannot be read-only")
} }
init() init(ctx)
defer cleanup() defer cleanup()
l.testCase.ReadOnly = true l.testCase.ReadOnly = true
@ -187,11 +187,11 @@ func (p *ephemeralTestSuite) DefineTests(driver storageframework.TestDriver, pat
e2evolume.VerifyExecInPodSucceed(f, pod, command) e2evolume.VerifyExecInPodSucceed(f, pod, command)
return nil return nil
} }
l.testCase.TestEphemeral() l.testCase.TestEphemeral(ctx)
}) })
ginkgo.It("should create read/write inline ephemeral volume", func() { ginkgo.It("should create read/write inline ephemeral volume", func(ctx context.Context) {
init() init(ctx)
defer cleanup() defer cleanup()
l.testCase.ReadOnly = false l.testCase.ReadOnly = false
@ -207,15 +207,15 @@ func (p *ephemeralTestSuite) DefineTests(driver storageframework.TestDriver, pat
e2evolume.VerifyExecInPodSucceed(f, pod, command) e2evolume.VerifyExecInPodSucceed(f, pod, command)
return nil return nil
} }
l.testCase.TestEphemeral() l.testCase.TestEphemeral(ctx)
}) })
ginkgo.It("should support expansion of pvcs created for ephemeral pvcs", func() { ginkgo.It("should support expansion of pvcs created for ephemeral pvcs", func(ctx context.Context) {
if pattern.VolType != storageframework.GenericEphemeralVolume { if pattern.VolType != storageframework.GenericEphemeralVolume {
e2eskipper.Skipf("Skipping %s test for expansion", pattern.VolType) e2eskipper.Skipf("Skipping %s test for expansion", pattern.VolType)
} }
init() init(ctx)
defer cleanup() defer cleanup()
if !driver.GetDriverInfo().Capabilities[storageframework.CapOnlineExpansion] { if !driver.GetDriverInfo().Capabilities[storageframework.CapOnlineExpansion] {
@ -236,7 +236,7 @@ func (p *ephemeralTestSuite) DefineTests(driver storageframework.TestDriver, pat
} }
} }
pvcName := fmt.Sprintf("%s-%s", podName, outerPodVolumeSpecName) pvcName := fmt.Sprintf("%s-%s", podName, outerPodVolumeSpecName)
pvc, err := f.ClientSet.CoreV1().PersistentVolumeClaims(pod.Namespace).Get(context.TODO(), pvcName, metav1.GetOptions{}) pvc, err := f.ClientSet.CoreV1().PersistentVolumeClaims(pod.Namespace).Get(ctx, pvcName, metav1.GetOptions{})
framework.ExpectNoError(err, "error getting ephemeral pvc") framework.ExpectNoError(err, "error getting ephemeral pvc")
ginkgo.By("Expanding current pvc") ginkgo.By("Expanding current pvc")
@ -267,12 +267,12 @@ func (p *ephemeralTestSuite) DefineTests(driver storageframework.TestDriver, pat
framework.ExpectEqual(len(pvcConditions), 0, "pvc should not have conditions") framework.ExpectEqual(len(pvcConditions), 0, "pvc should not have conditions")
return nil return nil
} }
l.testCase.TestEphemeral() l.testCase.TestEphemeral(ctx)
}) })
ginkgo.It("should support two pods which have the same volume definition", func() { ginkgo.It("should support two pods which have the same volume definition", func(ctx context.Context) {
init() init(ctx)
defer cleanup() defer cleanup()
// We test in read-only mode if that is all that the driver supports, // We test in read-only mode if that is all that the driver supports,
@ -285,7 +285,7 @@ func (p *ephemeralTestSuite) DefineTests(driver storageframework.TestDriver, pat
l.testCase.RunningPodCheck = func(pod *v1.Pod) interface{} { l.testCase.RunningPodCheck = func(pod *v1.Pod) interface{} {
// Create another pod with the same inline volume attributes. // Create another pod with the same inline volume attributes.
pod2 := StartInPodWithInlineVolume(f.ClientSet, f.Namespace.Name, "inline-volume-tester2", "sleep 100000", pod2 := StartInPodWithInlineVolume(ctx, f.ClientSet, f.Namespace.Name, "inline-volume-tester2", "sleep 100000",
[]v1.VolumeSource{pod.Spec.Volumes[0].VolumeSource}, []v1.VolumeSource{pod.Spec.Volumes[0].VolumeSource},
readOnly, readOnly,
l.testCase.Node) l.testCase.Node)
@ -302,24 +302,24 @@ func (p *ephemeralTestSuite) DefineTests(driver storageframework.TestDriver, pat
e2evolume.VerifyExecInPodSucceed(f, pod2, "[ ! -f /mnt/test-0/hello-world ]") e2evolume.VerifyExecInPodSucceed(f, pod2, "[ ! -f /mnt/test-0/hello-world ]")
} }
defer StopPodAndDependents(f.ClientSet, f.Timeouts, pod2) defer StopPodAndDependents(ctx, f.ClientSet, f.Timeouts, pod2)
return nil return nil
} }
l.testCase.TestEphemeral() l.testCase.TestEphemeral(ctx)
}) })
ginkgo.It("should support multiple inline ephemeral volumes", func() { ginkgo.It("should support multiple inline ephemeral volumes", func(ctx context.Context) {
if pattern.BindingMode == storagev1.VolumeBindingImmediate && if pattern.BindingMode == storagev1.VolumeBindingImmediate &&
pattern.VolType == storageframework.GenericEphemeralVolume { pattern.VolType == storageframework.GenericEphemeralVolume {
e2eskipper.Skipf("Multiple generic ephemeral volumes with immediate binding may cause pod startup failures when the volumes get created in separate topology segments.") e2eskipper.Skipf("Multiple generic ephemeral volumes with immediate binding may cause pod startup failures when the volumes get created in separate topology segments.")
} }
init() init(ctx)
defer cleanup() defer cleanup()
l.testCase.NumInlineVolumes = 2 l.testCase.NumInlineVolumes = 2
l.testCase.TestEphemeral() l.testCase.TestEphemeral(ctx)
}) })
} }
@ -368,7 +368,7 @@ type EphemeralTest struct {
} }
// TestEphemeral tests pod creation with one ephemeral volume. // TestEphemeral tests pod creation with one ephemeral volume.
func (t EphemeralTest) TestEphemeral() { func (t EphemeralTest) TestEphemeral(ctx context.Context) {
client := t.Client client := t.Client
gomega.Expect(client).NotTo(gomega.BeNil(), "EphemeralTest.Client is required") gomega.Expect(client).NotTo(gomega.BeNil(), "EphemeralTest.Client is required")
@ -401,13 +401,13 @@ func (t EphemeralTest) TestEphemeral() {
} }
volumes = append(volumes, volume) volumes = append(volumes, volume)
} }
pod := StartInPodWithInlineVolume(client, t.Namespace, "inline-volume-tester", command, volumes, t.ReadOnly, t.Node) pod := StartInPodWithInlineVolume(ctx, client, t.Namespace, "inline-volume-tester", command, volumes, t.ReadOnly, t.Node)
defer func() { defer func() {
// pod might be nil now. // pod might be nil now.
StopPodAndDependents(client, t.Timeouts, pod) StopPodAndDependents(ctx, client, t.Timeouts, pod)
}() }()
framework.ExpectNoError(e2epod.WaitTimeoutForPodRunningInNamespace(client, pod.Name, pod.Namespace, t.Timeouts.PodStartSlow), "waiting for pod with inline volume") framework.ExpectNoError(e2epod.WaitTimeoutForPodRunningInNamespace(client, pod.Name, pod.Namespace, t.Timeouts.PodStartSlow), "waiting for pod with inline volume")
runningPod, err := client.CoreV1().Pods(pod.Namespace).Get(context.TODO(), pod.Name, metav1.GetOptions{}) runningPod, err := client.CoreV1().Pods(pod.Namespace).Get(ctx, pod.Name, metav1.GetOptions{})
framework.ExpectNoError(err, "get pod") framework.ExpectNoError(err, "get pod")
actualNodeName := runningPod.Spec.NodeName actualNodeName := runningPod.Spec.NodeName
@ -417,12 +417,12 @@ func (t EphemeralTest) TestEphemeral() {
runningPodData = t.RunningPodCheck(pod) runningPodData = t.RunningPodCheck(pod)
} }
StopPodAndDependents(client, t.Timeouts, pod) StopPodAndDependents(ctx, client, t.Timeouts, pod)
pod = nil // Don't stop twice. pod = nil // Don't stop twice.
// There should be no dangling PVCs in the namespace now. There might be for // There should be no dangling PVCs in the namespace now. There might be for
// generic ephemeral volumes, if something went wrong... // generic ephemeral volumes, if something went wrong...
pvcs, err := client.CoreV1().PersistentVolumeClaims(t.Namespace).List(context.TODO(), metav1.ListOptions{}) pvcs, err := client.CoreV1().PersistentVolumeClaims(t.Namespace).List(ctx, metav1.ListOptions{})
framework.ExpectNoError(err, "list PVCs") framework.ExpectNoError(err, "list PVCs")
gomega.Expect(pvcs.Items).Should(gomega.BeEmpty(), "no dangling PVCs") gomega.Expect(pvcs.Items).Should(gomega.BeEmpty(), "no dangling PVCs")
@ -433,7 +433,7 @@ func (t EphemeralTest) TestEphemeral() {
// StartInPodWithInlineVolume starts a command in a pod with given volume(s) mounted to /mnt/test-<number> directory. // StartInPodWithInlineVolume starts a command in a pod with given volume(s) mounted to /mnt/test-<number> directory.
// The caller is responsible for checking the pod and deleting it. // The caller is responsible for checking the pod and deleting it.
func StartInPodWithInlineVolume(c clientset.Interface, ns, podName, command string, volumes []v1.VolumeSource, readOnly bool, node e2epod.NodeSelection) *v1.Pod { func StartInPodWithInlineVolume(ctx context.Context, c clientset.Interface, ns, podName, command string, volumes []v1.VolumeSource, readOnly bool, node e2epod.NodeSelection) *v1.Pod {
pod := &v1.Pod{ pod := &v1.Pod{
TypeMeta: metav1.TypeMeta{ TypeMeta: metav1.TypeMeta{
Kind: "Pod", Kind: "Pod",
@ -484,15 +484,15 @@ func StartInPodWithInlineVolume(c clientset.Interface, ns, podName, command stri
}) })
} }
pod, err := c.CoreV1().Pods(ns).Create(context.TODO(), pod, metav1.CreateOptions{}) pod, err := c.CoreV1().Pods(ns).Create(ctx, pod, metav1.CreateOptions{})
framework.ExpectNoError(err, "failed to create pod") framework.ExpectNoError(err, "failed to create pod")
return pod return pod
} }
// CSIInlineVolumesEnabled checks whether the running cluster has the CSIInlineVolumes feature gate enabled. // CSIInlineVolumesEnabled checks whether the running cluster has the CSIInlineVolumes feature gate enabled.
// It does that by trying to create a pod that uses that feature. // It does that by trying to create a pod that uses that feature.
func CSIInlineVolumesEnabled(c clientset.Interface, t *framework.TimeoutContext, ns string) (bool, error) { func CSIInlineVolumesEnabled(ctx context.Context, c clientset.Interface, t *framework.TimeoutContext, ns string) (bool, error) {
return VolumeSourceEnabled(c, t, ns, v1.VolumeSource{ return VolumeSourceEnabled(ctx, c, t, ns, v1.VolumeSource{
CSI: &v1.CSIVolumeSource{ CSI: &v1.CSIVolumeSource{
Driver: "no-such-driver.example.com", Driver: "no-such-driver.example.com",
}, },
@ -501,9 +501,9 @@ func CSIInlineVolumesEnabled(c clientset.Interface, t *framework.TimeoutContext,
// GenericEphemeralVolumesEnabled checks whether the running cluster has the GenericEphemeralVolume feature gate enabled. // GenericEphemeralVolumesEnabled checks whether the running cluster has the GenericEphemeralVolume feature gate enabled.
// It does that by trying to create a pod that uses that feature. // It does that by trying to create a pod that uses that feature.
func GenericEphemeralVolumesEnabled(c clientset.Interface, t *framework.TimeoutContext, ns string) (bool, error) { func GenericEphemeralVolumesEnabled(ctx context.Context, c clientset.Interface, t *framework.TimeoutContext, ns string) (bool, error) {
storageClassName := "no-such-storage-class" storageClassName := "no-such-storage-class"
return VolumeSourceEnabled(c, t, ns, v1.VolumeSource{ return VolumeSourceEnabled(ctx, c, t, ns, v1.VolumeSource{
Ephemeral: &v1.EphemeralVolumeSource{ Ephemeral: &v1.EphemeralVolumeSource{
VolumeClaimTemplate: &v1.PersistentVolumeClaimTemplate{ VolumeClaimTemplate: &v1.PersistentVolumeClaimTemplate{
Spec: v1.PersistentVolumeClaimSpec{ Spec: v1.PersistentVolumeClaimSpec{
@ -522,7 +522,7 @@ func GenericEphemeralVolumesEnabled(c clientset.Interface, t *framework.TimeoutC
// VolumeSourceEnabled checks whether a certain kind of volume source is enabled by trying // VolumeSourceEnabled checks whether a certain kind of volume source is enabled by trying
// to create a pod that uses it. // to create a pod that uses it.
func VolumeSourceEnabled(c clientset.Interface, t *framework.TimeoutContext, ns string, volume v1.VolumeSource) (bool, error) { func VolumeSourceEnabled(ctx context.Context, c clientset.Interface, t *framework.TimeoutContext, ns string, volume v1.VolumeSource) (bool, error) {
pod := &v1.Pod{ pod := &v1.Pod{
TypeMeta: metav1.TypeMeta{ TypeMeta: metav1.TypeMeta{
Kind: "Pod", Kind: "Pod",
@ -554,12 +554,12 @@ func VolumeSourceEnabled(c clientset.Interface, t *framework.TimeoutContext, ns
}, },
} }
pod, err := c.CoreV1().Pods(ns).Create(context.TODO(), pod, metav1.CreateOptions{}) pod, err := c.CoreV1().Pods(ns).Create(ctx, pod, metav1.CreateOptions{})
switch { switch {
case err == nil: case err == nil:
// Pod was created, feature supported. // Pod was created, feature supported.
StopPodAndDependents(c, t, pod) StopPodAndDependents(ctx, c, t, pod)
return true, nil return true, nil
case apierrors.IsInvalid(err): case apierrors.IsInvalid(err):
// "Invalid" because it uses a feature that isn't supported. // "Invalid" because it uses a feature that isn't supported.

View File

@ -317,7 +317,7 @@ func (t *multiVolumeTestSuite) DefineTests(driver storageframework.TestDriver, p
// [ node1 ] // [ node1 ]
// | | <- same volume mode // | | <- same volume mode
// [volume1] -> [restored volume1 snapshot] // [volume1] -> [restored volume1 snapshot]
ginkgo.It("should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly][Feature:VolumeSnapshotDataSource][Feature:VolumeSourceXFS]", func() { ginkgo.It("should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly][Feature:VolumeSnapshotDataSource][Feature:VolumeSourceXFS]", func(ctx context.Context) {
init() init()
defer cleanup() defer cleanup()
@ -342,8 +342,7 @@ func (t *multiVolumeTestSuite) DefineTests(driver storageframework.TestDriver, p
} }
testConfig := storageframework.ConvertTestConfig(l.config) testConfig := storageframework.ConvertTestConfig(l.config)
dc := l.config.Framework.DynamicClient dc := l.config.Framework.DynamicClient
dataSource, cleanupFunc := prepareSnapshotDataSourceForProvisioning(f, testConfig, l.config, pattern, l.cs, dc, resource.Pvc, resource.Sc, sDriver, pattern.VolMode, expectedContent) dataSource := prepareSnapshotDataSourceForProvisioning(ctx, f, testConfig, l.config, pattern, l.cs, dc, resource.Pvc, resource.Sc, sDriver, pattern.VolMode, expectedContent)
defer cleanupFunc()
// Create 2nd PVC for testing // Create 2nd PVC for testing
pvc2 := &v1.PersistentVolumeClaim{ pvc2 := &v1.PersistentVolumeClaim{
@ -372,7 +371,7 @@ func (t *multiVolumeTestSuite) DefineTests(driver storageframework.TestDriver, p
// [ node1 ] // [ node1 ]
// | | <- same volume mode // | | <- same volume mode
// [volume1] -> [cloned volume1] // [volume1] -> [cloned volume1]
ginkgo.It("should concurrently access the volume and its clone from pods on the same node [LinuxOnly][Feature:VolumeSourceXFS]", func() { ginkgo.It("should concurrently access the volume and its clone from pods on the same node [LinuxOnly][Feature:VolumeSourceXFS]", func(ctx context.Context) {
init() init()
defer cleanup() defer cleanup()
@ -387,8 +386,7 @@ func (t *multiVolumeTestSuite) DefineTests(driver storageframework.TestDriver, p
l.resources = append(l.resources, resource) l.resources = append(l.resources, resource)
pvcs := []*v1.PersistentVolumeClaim{resource.Pvc} pvcs := []*v1.PersistentVolumeClaim{resource.Pvc}
testConfig := storageframework.ConvertTestConfig(l.config) testConfig := storageframework.ConvertTestConfig(l.config)
dataSource, cleanupFunc := preparePVCDataSourceForProvisioning(f, testConfig, l.cs, resource.Pvc, resource.Sc, pattern.VolMode, expectedContent) dataSource := preparePVCDataSourceForProvisioning(ctx, f, testConfig, l.cs, resource.Pvc, resource.Sc, pattern.VolMode, expectedContent)
defer cleanupFunc()
// Create 2nd PVC for testing // Create 2nd PVC for testing
pvc2 := &v1.PersistentVolumeClaim{ pvc2 := &v1.PersistentVolumeClaim{

View File

@ -141,6 +141,7 @@ func (p *provisioningTestSuite) DefineTests(driver storageframework.TestDriver,
// Now do the more expensive test initialization. // Now do the more expensive test initialization.
l.config = driver.PrepareTest(f) l.config = driver.PrepareTest(f)
l.migrationCheck = newMigrationOpCheck(f.ClientSet, f.ClientConfig(), dInfo.InTreePluginName) l.migrationCheck = newMigrationOpCheck(f.ClientSet, f.ClientConfig(), dInfo.InTreePluginName)
ginkgo.DeferCleanup(l.migrationCheck.validateMigrationVolumeOpCounts)
l.cs = l.config.Framework.ClientSet l.cs = l.config.Framework.ClientSet
testVolumeSizeRange := p.GetTestSuiteInfo().SupportedSizeRange testVolumeSizeRange := p.GetTestSuiteInfo().SupportedSizeRange
driverVolumeSizeRange := dDriver.GetDriverInfo().SupportedSizeRange driverVolumeSizeRange := dDriver.GetDriverInfo().SupportedSizeRange
@ -175,11 +176,7 @@ func (p *provisioningTestSuite) DefineTests(driver storageframework.TestDriver,
} }
} }
cleanup := func() { ginkgo.It("should provision storage with mount options", func(ctx context.Context) {
l.migrationCheck.validateMigrationVolumeOpCounts()
}
ginkgo.It("should provision storage with mount options", func() {
if dInfo.SupportedMountOption == nil { if dInfo.SupportedMountOption == nil {
e2eskipper.Skipf("Driver %q does not define supported mount option - skipping", dInfo.Name) e2eskipper.Skipf("Driver %q does not define supported mount option - skipping", dInfo.Name)
} }
@ -188,19 +185,17 @@ func (p *provisioningTestSuite) DefineTests(driver storageframework.TestDriver,
} }
init() init()
defer cleanup()
l.testCase.Class.MountOptions = dInfo.SupportedMountOption.Union(dInfo.RequiredMountOption).List() l.testCase.Class.MountOptions = dInfo.SupportedMountOption.Union(dInfo.RequiredMountOption).List()
l.testCase.PvCheck = func(claim *v1.PersistentVolumeClaim) { l.testCase.PvCheck = func(claim *v1.PersistentVolumeClaim) {
PVWriteReadSingleNodeCheck(l.cs, f.Timeouts, claim, l.config.ClientNodeSelection) PVWriteReadSingleNodeCheck(ctx, l.cs, f.Timeouts, claim, l.config.ClientNodeSelection)
} }
_, clearProvisionedStorageClass := SetupStorageClass(l.testCase.Client, l.testCase.Class) SetupStorageClass(ctx, l.testCase.Client, l.testCase.Class)
defer clearProvisionedStorageClass()
l.testCase.TestDynamicProvisioning() l.testCase.TestDynamicProvisioning(ctx)
}) })
ginkgo.It("should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]", func() { ginkgo.It("should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]", func(ctx context.Context) {
if !dInfo.Capabilities[storageframework.CapSnapshotDataSource] { if !dInfo.Capabilities[storageframework.CapSnapshotDataSource] {
e2eskipper.Skipf("Driver %q does not support populate data from snapshot - skipping", dInfo.Name) e2eskipper.Skipf("Driver %q does not support populate data from snapshot - skipping", dInfo.Name)
} }
@ -214,13 +209,11 @@ func (p *provisioningTestSuite) DefineTests(driver storageframework.TestDriver,
} }
init() init()
defer cleanup()
dc := l.config.Framework.DynamicClient dc := l.config.Framework.DynamicClient
testConfig := storageframework.ConvertTestConfig(l.config) testConfig := storageframework.ConvertTestConfig(l.config)
expectedContent := fmt.Sprintf("Hello from namespace %s", f.Namespace.Name) expectedContent := fmt.Sprintf("Hello from namespace %s", f.Namespace.Name)
dataSource, cleanupFunc := prepareSnapshotDataSourceForProvisioning(f, testConfig, l.config, pattern, l.cs, dc, l.pvc, l.sc, sDriver, pattern.VolMode, expectedContent) dataSource := prepareSnapshotDataSourceForProvisioning(ctx, f, testConfig, l.config, pattern, l.cs, dc, l.pvc, l.sc, sDriver, pattern.VolMode, expectedContent)
defer cleanupFunc()
l.pvc.Spec.DataSource = dataSource l.pvc.Spec.DataSource = dataSource
l.testCase.PvCheck = func(claim *v1.PersistentVolumeClaim) { l.testCase.PvCheck = func(claim *v1.PersistentVolumeClaim) {
@ -235,10 +228,10 @@ func (p *provisioningTestSuite) DefineTests(driver storageframework.TestDriver,
} }
e2evolume.TestVolumeClientSlow(f, testConfig, nil, "", tests) e2evolume.TestVolumeClientSlow(f, testConfig, nil, "", tests)
} }
l.testCase.TestDynamicProvisioning() l.testCase.TestDynamicProvisioning(ctx)
}) })
ginkgo.It("should provision storage with any volume data source [Serial]", func() { ginkgo.It("should provision storage with any volume data source [Serial]", func(ctx context.Context) {
if len(dInfo.InTreePluginName) != 0 { if len(dInfo.InTreePluginName) != 0 {
e2eskipper.Skipf("AnyVolumeDataSource feature only works with CSI drivers - skipping") e2eskipper.Skipf("AnyVolumeDataSource feature only works with CSI drivers - skipping")
} }
@ -247,7 +240,6 @@ func (p *provisioningTestSuite) DefineTests(driver storageframework.TestDriver,
} }
init() init()
defer cleanup()
ginkgo.By("Creating validator namespace") ginkgo.By("Creating validator namespace")
valNamespace, err := f.CreateNamespace(fmt.Sprintf("%s-val", f.Namespace.Name), map[string]string{ valNamespace, err := f.CreateNamespace(fmt.Sprintf("%s-val", f.Namespace.Name), map[string]string{
@ -414,18 +406,16 @@ func (p *provisioningTestSuite) DefineTests(driver storageframework.TestDriver,
e2evolume.TestVolumeClientSlow(f, testConfig, nil, "", tests) e2evolume.TestVolumeClientSlow(f, testConfig, nil, "", tests)
} }
_, clearProvisionedStorageClass := SetupStorageClass(l.testCase.Client, l.testCase.Class) SetupStorageClass(ctx, l.testCase.Client, l.testCase.Class)
defer clearProvisionedStorageClass()
l.testCase.TestDynamicProvisioning() l.testCase.TestDynamicProvisioning(ctx)
}) })
ginkgo.It("should provision storage with pvc data source", func() { ginkgo.It("should provision storage with pvc data source", func(ctx context.Context) {
if !dInfo.Capabilities[storageframework.CapPVCDataSource] { if !dInfo.Capabilities[storageframework.CapPVCDataSource] {
e2eskipper.Skipf("Driver %q does not support cloning - skipping", dInfo.Name) e2eskipper.Skipf("Driver %q does not support cloning - skipping", dInfo.Name)
} }
init() init()
defer cleanup()
if l.config.ClientNodeSelection.Name == "" { if l.config.ClientNodeSelection.Name == "" {
// Schedule all pods to the same topology segment (e.g. a cloud availability zone), some // Schedule all pods to the same topology segment (e.g. a cloud availability zone), some
@ -436,8 +426,7 @@ func (p *provisioningTestSuite) DefineTests(driver storageframework.TestDriver,
} }
testConfig := storageframework.ConvertTestConfig(l.config) testConfig := storageframework.ConvertTestConfig(l.config)
expectedContent := fmt.Sprintf("Hello from namespace %s", f.Namespace.Name) expectedContent := fmt.Sprintf("Hello from namespace %s", f.Namespace.Name)
dataSource, dataSourceCleanup := preparePVCDataSourceForProvisioning(f, testConfig, l.cs, l.sourcePVC, l.sc, pattern.VolMode, expectedContent) dataSource := preparePVCDataSourceForProvisioning(ctx, f, testConfig, l.cs, l.sourcePVC, l.sc, pattern.VolMode, expectedContent)
defer dataSourceCleanup()
l.pvc.Spec.DataSource = dataSource l.pvc.Spec.DataSource = dataSource
l.testCase.NodeSelection = testConfig.ClientNodeSelection l.testCase.NodeSelection = testConfig.ClientNodeSelection
l.testCase.PvCheck = func(claim *v1.PersistentVolumeClaim) { l.testCase.PvCheck = func(claim *v1.PersistentVolumeClaim) {
@ -455,10 +444,10 @@ func (p *provisioningTestSuite) DefineTests(driver storageframework.TestDriver,
// Cloning fails if the source disk is still in the process of detaching, so we wait for the VolumeAttachment to be removed before cloning. // Cloning fails if the source disk is still in the process of detaching, so we wait for the VolumeAttachment to be removed before cloning.
volumeAttachment := e2evolume.GetVolumeAttachmentName(f.ClientSet, testConfig, l.testCase.Provisioner, dataSource.Name, l.sourcePVC.Namespace) volumeAttachment := e2evolume.GetVolumeAttachmentName(f.ClientSet, testConfig, l.testCase.Provisioner, dataSource.Name, l.sourcePVC.Namespace)
e2evolume.WaitForVolumeAttachmentTerminated(volumeAttachment, f.ClientSet, f.Timeouts.DataSourceProvision) e2evolume.WaitForVolumeAttachmentTerminated(volumeAttachment, f.ClientSet, f.Timeouts.DataSourceProvision)
l.testCase.TestDynamicProvisioning() l.testCase.TestDynamicProvisioning(ctx)
}) })
ginkgo.It("should provision storage with pvc data source in parallel [Slow]", func() { ginkgo.It("should provision storage with pvc data source in parallel [Slow]", func(ctx context.Context) {
// Test cloning a single volume multiple times. // Test cloning a single volume multiple times.
if !dInfo.Capabilities[storageframework.CapPVCDataSource] { if !dInfo.Capabilities[storageframework.CapPVCDataSource] {
e2eskipper.Skipf("Driver %q does not support cloning - skipping", dInfo.Name) e2eskipper.Skipf("Driver %q does not support cloning - skipping", dInfo.Name)
@ -468,7 +457,6 @@ func (p *provisioningTestSuite) DefineTests(driver storageframework.TestDriver,
} }
init() init()
defer cleanup()
if l.config.ClientNodeSelection.Name == "" { if l.config.ClientNodeSelection.Name == "" {
// Schedule all pods to the same topology segment (e.g. a cloud availability zone), some // Schedule all pods to the same topology segment (e.g. a cloud availability zone), some
@ -479,8 +467,7 @@ func (p *provisioningTestSuite) DefineTests(driver storageframework.TestDriver,
} }
testConfig := storageframework.ConvertTestConfig(l.config) testConfig := storageframework.ConvertTestConfig(l.config)
expectedContent := fmt.Sprintf("Hello from namespace %s", f.Namespace.Name) expectedContent := fmt.Sprintf("Hello from namespace %s", f.Namespace.Name)
dataSource, dataSourceCleanup := preparePVCDataSourceForProvisioning(f, testConfig, l.cs, l.sourcePVC, l.sc, pattern.VolMode, expectedContent) dataSource := preparePVCDataSourceForProvisioning(ctx, f, testConfig, l.cs, l.sourcePVC, l.sc, pattern.VolMode, expectedContent)
defer dataSourceCleanup()
l.pvc.Spec.DataSource = dataSource l.pvc.Spec.DataSource = dataSource
var wg sync.WaitGroup var wg sync.WaitGroup
@ -511,13 +498,13 @@ func (p *provisioningTestSuite) DefineTests(driver storageframework.TestDriver,
// Cloning fails if the source disk is still in the process of detaching, so we wait for the VolumeAttachment to be removed before cloning. // Cloning fails if the source disk is still in the process of detaching, so we wait for the VolumeAttachment to be removed before cloning.
volumeAttachment := e2evolume.GetVolumeAttachmentName(f.ClientSet, testConfig, l.testCase.Provisioner, dataSource.Name, l.sourcePVC.Namespace) volumeAttachment := e2evolume.GetVolumeAttachmentName(f.ClientSet, testConfig, l.testCase.Provisioner, dataSource.Name, l.sourcePVC.Namespace)
e2evolume.WaitForVolumeAttachmentTerminated(volumeAttachment, f.ClientSet, f.Timeouts.DataSourceProvision) e2evolume.WaitForVolumeAttachmentTerminated(volumeAttachment, f.ClientSet, f.Timeouts.DataSourceProvision)
t.TestDynamicProvisioning() t.TestDynamicProvisioning(ctx)
}(i) }(i)
} }
wg.Wait() wg.Wait()
}) })
ginkgo.It("should mount multiple PV pointing to the same storage on the same node", func() { ginkgo.It("should mount multiple PV pointing to the same storage on the same node", func(ctx context.Context) {
// csi-hostpath driver does not support this test case. In this test case, we have 2 PV containing the same underlying storage. // csi-hostpath driver does not support this test case. In this test case, we have 2 PV containing the same underlying storage.
// during the NodeStage call for the second volume, csi-hostpath fails the call, because it thinks the volume is already staged at a different path. // during the NodeStage call for the second volume, csi-hostpath fails the call, because it thinks the volume is already staged at a different path.
// Note: This is not an issue with driver like PD CSI where the NodeStage is a no-op for block mode. // Note: This is not an issue with driver like PD CSI where the NodeStage is a no-op for block mode.
@ -526,15 +513,13 @@ func (p *provisioningTestSuite) DefineTests(driver storageframework.TestDriver,
} }
init() init()
defer cleanup()
l.testCase.PvCheck = func(claim *v1.PersistentVolumeClaim) { l.testCase.PvCheck = func(claim *v1.PersistentVolumeClaim) {
MultiplePVMountSingleNodeCheck(l.cs, f.Timeouts, claim, l.config.ClientNodeSelection) MultiplePVMountSingleNodeCheck(ctx, l.cs, f.Timeouts, claim, l.config.ClientNodeSelection)
} }
_, clearProvisionedStorageClass := SetupStorageClass(l.testCase.Client, l.testCase.Class) SetupStorageClass(ctx, l.testCase.Client, l.testCase.Class)
defer clearProvisionedStorageClass()
l.testCase.TestDynamicProvisioning() l.testCase.TestDynamicProvisioning(ctx)
}) })
} }
@ -542,49 +527,50 @@ func (p *provisioningTestSuite) DefineTests(driver storageframework.TestDriver,
// then it's returned as it is, if it doesn't exist then it's created first // then it's returned as it is, if it doesn't exist then it's created first
// and then returned, if the spec is nil then we return the `default` StorageClass // and then returned, if the spec is nil then we return the `default` StorageClass
func SetupStorageClass( func SetupStorageClass(
ctx context.Context,
client clientset.Interface, client clientset.Interface,
class *storagev1.StorageClass, class *storagev1.StorageClass,
) (*storagev1.StorageClass, func()) { ) *storagev1.StorageClass {
gomega.Expect(client).NotTo(gomega.BeNil(), "SetupStorageClass.client is required") gomega.Expect(client).NotTo(gomega.BeNil(), "SetupStorageClass.client is required")
var err error var err error
var computedStorageClass *storagev1.StorageClass var computedStorageClass *storagev1.StorageClass
var clearComputedStorageClass = func() {}
if class != nil { if class != nil {
computedStorageClass, err = client.StorageV1().StorageClasses().Get(context.TODO(), class.Name, metav1.GetOptions{}) computedStorageClass, err = client.StorageV1().StorageClasses().Get(ctx, class.Name, metav1.GetOptions{})
if err == nil { if err == nil {
// skip storageclass creation if it already exists // skip storageclass creation if it already exists
ginkgo.By("Storage class " + computedStorageClass.Name + " is already created, skipping creation.") ginkgo.By("Storage class " + computedStorageClass.Name + " is already created, skipping creation.")
} else { } else {
ginkgo.By("Creating a StorageClass") ginkgo.By("Creating a StorageClass")
class, err = client.StorageV1().StorageClasses().Create(context.TODO(), class, metav1.CreateOptions{}) class, err = client.StorageV1().StorageClasses().Create(ctx, class, metav1.CreateOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
computedStorageClass, err = client.StorageV1().StorageClasses().Get(context.TODO(), class.Name, metav1.GetOptions{}) computedStorageClass, err = client.StorageV1().StorageClasses().Get(ctx, class.Name, metav1.GetOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
clearComputedStorageClass = func() { clearComputedStorageClass := func(ctx context.Context) {
framework.Logf("deleting storage class %s", computedStorageClass.Name) framework.Logf("deleting storage class %s", computedStorageClass.Name)
err := client.StorageV1().StorageClasses().Delete(context.TODO(), computedStorageClass.Name, metav1.DeleteOptions{}) err := client.StorageV1().StorageClasses().Delete(ctx, computedStorageClass.Name, metav1.DeleteOptions{})
if err != nil && !apierrors.IsNotFound(err) { if err != nil && !apierrors.IsNotFound(err) {
framework.ExpectNoError(err, "delete storage class") framework.ExpectNoError(err, "delete storage class")
} }
} }
ginkgo.DeferCleanup(clearComputedStorageClass)
} }
} else { } else {
// StorageClass is nil, so the default one will be used // StorageClass is nil, so the default one will be used
scName, err := e2epv.GetDefaultStorageClassName(client) scName, err := e2epv.GetDefaultStorageClassName(client)
framework.ExpectNoError(err) framework.ExpectNoError(err)
ginkgo.By("Wanted storage class is nil, fetching default StorageClass=" + scName) ginkgo.By("Wanted storage class is nil, fetching default StorageClass=" + scName)
computedStorageClass, err = client.StorageV1().StorageClasses().Get(context.TODO(), scName, metav1.GetOptions{}) computedStorageClass, err = client.StorageV1().StorageClasses().Get(ctx, scName, metav1.GetOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
} }
return computedStorageClass, clearComputedStorageClass return computedStorageClass
} }
// TestDynamicProvisioning tests dynamic provisioning with specified StorageClassTest // TestDynamicProvisioning tests dynamic provisioning with specified StorageClassTest
// it's assumed that the StorageClass `t.Class` is already provisioned, // it's assumed that the StorageClass `t.Class` is already provisioned,
// see #ProvisionStorageClass // see #ProvisionStorageClass
func (t StorageClassTest) TestDynamicProvisioning() *v1.PersistentVolume { func (t StorageClassTest) TestDynamicProvisioning(ctx context.Context) *v1.PersistentVolume {
var err error var err error
client := t.Client client := t.Client
gomega.Expect(client).NotTo(gomega.BeNil(), "StorageClassTest.Client is required") gomega.Expect(client).NotTo(gomega.BeNil(), "StorageClassTest.Client is required")
@ -593,16 +579,16 @@ func (t StorageClassTest) TestDynamicProvisioning() *v1.PersistentVolume {
gomega.Expect(claim.GenerateName).NotTo(gomega.BeEmpty(), "StorageClassTest.Claim.GenerateName must not be empty") gomega.Expect(claim.GenerateName).NotTo(gomega.BeEmpty(), "StorageClassTest.Claim.GenerateName must not be empty")
class := t.Class class := t.Class
gomega.Expect(class).NotTo(gomega.BeNil(), "StorageClassTest.Class is required") gomega.Expect(class).NotTo(gomega.BeNil(), "StorageClassTest.Class is required")
class, err = client.StorageV1().StorageClasses().Get(context.TODO(), class.Name, metav1.GetOptions{}) class, err = client.StorageV1().StorageClasses().Get(ctx, class.Name, metav1.GetOptions{})
framework.ExpectNoError(err, "StorageClass.Class "+class.Name+" couldn't be fetched from the cluster") framework.ExpectNoError(err, "StorageClass.Class "+class.Name+" couldn't be fetched from the cluster")
ginkgo.By(fmt.Sprintf("creating claim=%+v", claim)) ginkgo.By(fmt.Sprintf("creating claim=%+v", claim))
claim, err = client.CoreV1().PersistentVolumeClaims(claim.Namespace).Create(context.TODO(), claim, metav1.CreateOptions{}) claim, err = client.CoreV1().PersistentVolumeClaims(claim.Namespace).Create(ctx, claim, metav1.CreateOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
defer func() { defer func() {
framework.Logf("deleting claim %q/%q", claim.Namespace, claim.Name) framework.Logf("deleting claim %q/%q", claim.Namespace, claim.Name)
// typically this claim has already been deleted // typically this claim has already been deleted
err = client.CoreV1().PersistentVolumeClaims(claim.Namespace).Delete(context.TODO(), claim.Name, metav1.DeleteOptions{}) err = client.CoreV1().PersistentVolumeClaims(claim.Namespace).Delete(ctx, claim.Name, metav1.DeleteOptions{})
if err != nil && !apierrors.IsNotFound(err) { if err != nil && !apierrors.IsNotFound(err) {
framework.Failf("Error deleting claim %q. Error: %v", claim.Name, err) framework.Failf("Error deleting claim %q. Error: %v", claim.Name, err)
} }
@ -632,10 +618,10 @@ func (t StorageClassTest) TestDynamicProvisioning() *v1.PersistentVolume {
t.PvCheck(claim) t.PvCheck(claim)
} }
pv := t.checkProvisioning(client, claim, class) pv := t.checkProvisioning(ctx, client, claim, class)
ginkgo.By(fmt.Sprintf("deleting claim %q/%q", claim.Namespace, claim.Name)) ginkgo.By(fmt.Sprintf("deleting claim %q/%q", claim.Namespace, claim.Name))
framework.ExpectNoError(client.CoreV1().PersistentVolumeClaims(claim.Namespace).Delete(context.TODO(), claim.Name, metav1.DeleteOptions{})) framework.ExpectNoError(client.CoreV1().PersistentVolumeClaims(claim.Namespace).Delete(ctx, claim.Name, metav1.DeleteOptions{}))
// Wait for the PV to get deleted if reclaim policy is Delete. (If it's // Wait for the PV to get deleted if reclaim policy is Delete. (If it's
// Retain, there's no use waiting because the PV won't be auto-deleted and // Retain, there's no use waiting because the PV won't be auto-deleted and
@ -653,25 +639,25 @@ func (t StorageClassTest) TestDynamicProvisioning() *v1.PersistentVolume {
} }
// getBoundPV returns a PV details. // getBoundPV returns a PV details.
func getBoundPV(client clientset.Interface, pvc *v1.PersistentVolumeClaim) (*v1.PersistentVolume, error) { func getBoundPV(ctx context.Context, client clientset.Interface, pvc *v1.PersistentVolumeClaim) (*v1.PersistentVolume, error) {
// Get new copy of the claim // Get new copy of the claim
claim, err := client.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get(context.TODO(), pvc.Name, metav1.GetOptions{}) claim, err := client.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get(ctx, pvc.Name, metav1.GetOptions{})
if err != nil { if err != nil {
return nil, err return nil, err
} }
// Get the bound PV // Get the bound PV
pv, err := client.CoreV1().PersistentVolumes().Get(context.TODO(), claim.Spec.VolumeName, metav1.GetOptions{}) pv, err := client.CoreV1().PersistentVolumes().Get(ctx, claim.Spec.VolumeName, metav1.GetOptions{})
return pv, err return pv, err
} }
// checkProvisioning verifies that the claim is bound and has the correct properties // checkProvisioning verifies that the claim is bound and has the correct properties
func (t StorageClassTest) checkProvisioning(client clientset.Interface, claim *v1.PersistentVolumeClaim, class *storagev1.StorageClass) *v1.PersistentVolume { func (t StorageClassTest) checkProvisioning(ctx context.Context, client clientset.Interface, claim *v1.PersistentVolumeClaim, class *storagev1.StorageClass) *v1.PersistentVolume {
err := e2epv.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, client, claim.Namespace, claim.Name, framework.Poll, t.Timeouts.ClaimProvision) err := e2epv.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, client, claim.Namespace, claim.Name, framework.Poll, t.Timeouts.ClaimProvision)
framework.ExpectNoError(err) framework.ExpectNoError(err)
ginkgo.By("checking the claim") ginkgo.By("checking the claim")
pv, err := getBoundPV(client, claim) pv, err := getBoundPV(ctx, client, claim)
framework.ExpectNoError(err) framework.ExpectNoError(err)
// Check sizes // Check sizes
@ -730,23 +716,23 @@ func (t StorageClassTest) checkProvisioning(client clientset.Interface, claim *v
// persistent across pods. // persistent across pods.
// //
// This is a common test that can be called from a StorageClassTest.PvCheck. // This is a common test that can be called from a StorageClassTest.PvCheck.
func PVWriteReadSingleNodeCheck(client clientset.Interface, timeouts *framework.TimeoutContext, claim *v1.PersistentVolumeClaim, node e2epod.NodeSelection) *v1.PersistentVolume { func PVWriteReadSingleNodeCheck(ctx context.Context, client clientset.Interface, timeouts *framework.TimeoutContext, claim *v1.PersistentVolumeClaim, node e2epod.NodeSelection) *v1.PersistentVolume {
ginkgo.By(fmt.Sprintf("checking the created volume is writable on node %+v", node)) ginkgo.By(fmt.Sprintf("checking the created volume is writable on node %+v", node))
command := "echo 'hello world' > /mnt/test/data" command := "echo 'hello world' > /mnt/test/data"
pod := StartInPodWithVolume(client, claim.Namespace, claim.Name, "pvc-volume-tester-writer", command, node) pod := StartInPodWithVolume(ctx, client, claim.Namespace, claim.Name, "pvc-volume-tester-writer", command, node)
defer func() { defer func() {
// pod might be nil now. // pod might be nil now.
StopPod(client, pod) StopPod(ctx, client, pod)
}() }()
framework.ExpectNoError(e2epod.WaitForPodSuccessInNamespaceTimeout(client, pod.Name, pod.Namespace, timeouts.PodStartSlow)) framework.ExpectNoError(e2epod.WaitForPodSuccessInNamespaceTimeout(client, pod.Name, pod.Namespace, timeouts.PodStartSlow))
runningPod, err := client.CoreV1().Pods(pod.Namespace).Get(context.TODO(), pod.Name, metav1.GetOptions{}) runningPod, err := client.CoreV1().Pods(pod.Namespace).Get(ctx, pod.Name, metav1.GetOptions{})
framework.ExpectNoError(err, "get pod") framework.ExpectNoError(err, "get pod")
actualNodeName := runningPod.Spec.NodeName actualNodeName := runningPod.Spec.NodeName
StopPod(client, pod) StopPod(ctx, client, pod)
pod = nil // Don't stop twice. pod = nil // Don't stop twice.
// Get a new copy of the PV // Get a new copy of the PV
e2evolume, err := getBoundPV(client, claim) e2evolume, err := getBoundPV(ctx, client, claim)
framework.ExpectNoError(err) framework.ExpectNoError(err)
ginkgo.By(fmt.Sprintf("checking the created volume has the correct mount options, is readable and retains data on the same node %q", actualNodeName)) ginkgo.By(fmt.Sprintf("checking the created volume has the correct mount options, is readable and retains data on the same node %q", actualNodeName))
@ -764,7 +750,7 @@ func PVWriteReadSingleNodeCheck(client clientset.Interface, timeouts *framework.
// agnhost doesn't support mount // agnhost doesn't support mount
command = "grep 'hello world' /mnt/test/data" command = "grep 'hello world' /mnt/test/data"
} }
RunInPodWithVolume(client, timeouts, claim.Namespace, claim.Name, "pvc-volume-tester-reader", command, e2epod.NodeSelection{Name: actualNodeName}) RunInPodWithVolume(ctx, client, timeouts, claim.Namespace, claim.Name, "pvc-volume-tester-reader", command, e2epod.NodeSelection{Name: actualNodeName})
return e2evolume return e2evolume
} }
@ -783,23 +769,23 @@ func PVWriteReadSingleNodeCheck(client clientset.Interface, timeouts *framework.
// persistent across pods and across nodes. // persistent across pods and across nodes.
// //
// This is a common test that can be called from a StorageClassTest.PvCheck. // This is a common test that can be called from a StorageClassTest.PvCheck.
func PVMultiNodeCheck(client clientset.Interface, timeouts *framework.TimeoutContext, claim *v1.PersistentVolumeClaim, node e2epod.NodeSelection) { func PVMultiNodeCheck(ctx context.Context, client clientset.Interface, timeouts *framework.TimeoutContext, claim *v1.PersistentVolumeClaim, node e2epod.NodeSelection) {
framework.ExpectEqual(node.Name, "", "this test only works when not locked onto a single node") framework.ExpectEqual(node.Name, "", "this test only works when not locked onto a single node")
var pod *v1.Pod var pod *v1.Pod
defer func() { defer func() {
// passing pod = nil is okay. // passing pod = nil is okay.
StopPod(client, pod) StopPod(ctx, client, pod)
}() }()
ginkgo.By(fmt.Sprintf("checking the created volume is writable on node %+v", node)) ginkgo.By(fmt.Sprintf("checking the created volume is writable on node %+v", node))
command := "echo 'hello world' > /mnt/test/data" command := "echo 'hello world' > /mnt/test/data"
pod = StartInPodWithVolume(client, claim.Namespace, claim.Name, "pvc-writer-node1", command, node) pod = StartInPodWithVolume(ctx, client, claim.Namespace, claim.Name, "pvc-writer-node1", command, node)
framework.ExpectNoError(e2epod.WaitForPodSuccessInNamespaceTimeout(client, pod.Name, pod.Namespace, timeouts.PodStartSlow)) framework.ExpectNoError(e2epod.WaitForPodSuccessInNamespaceTimeout(client, pod.Name, pod.Namespace, timeouts.PodStartSlow))
runningPod, err := client.CoreV1().Pods(pod.Namespace).Get(context.TODO(), pod.Name, metav1.GetOptions{}) runningPod, err := client.CoreV1().Pods(pod.Namespace).Get(ctx, pod.Name, metav1.GetOptions{})
framework.ExpectNoError(err, "get pod") framework.ExpectNoError(err, "get pod")
actualNodeName := runningPod.Spec.NodeName actualNodeName := runningPod.Spec.NodeName
StopPod(client, pod) StopPod(ctx, client, pod)
pod = nil // Don't stop twice. pod = nil // Don't stop twice.
// Add node-anti-affinity. // Add node-anti-affinity.
@ -807,17 +793,17 @@ func PVMultiNodeCheck(client clientset.Interface, timeouts *framework.TimeoutCon
e2epod.SetAntiAffinity(&secondNode, actualNodeName) e2epod.SetAntiAffinity(&secondNode, actualNodeName)
ginkgo.By(fmt.Sprintf("checking the created volume is readable and retains data on another node %+v", secondNode)) ginkgo.By(fmt.Sprintf("checking the created volume is readable and retains data on another node %+v", secondNode))
command = "grep 'hello world' /mnt/test/data" command = "grep 'hello world' /mnt/test/data"
pod = StartInPodWithVolume(client, claim.Namespace, claim.Name, "pvc-reader-node2", command, secondNode) pod = StartInPodWithVolume(ctx, client, claim.Namespace, claim.Name, "pvc-reader-node2", command, secondNode)
framework.ExpectNoError(e2epod.WaitForPodSuccessInNamespaceTimeout(client, pod.Name, pod.Namespace, timeouts.PodStartSlow)) framework.ExpectNoError(e2epod.WaitForPodSuccessInNamespaceTimeout(client, pod.Name, pod.Namespace, timeouts.PodStartSlow))
runningPod, err = client.CoreV1().Pods(pod.Namespace).Get(context.TODO(), pod.Name, metav1.GetOptions{}) runningPod, err = client.CoreV1().Pods(pod.Namespace).Get(ctx, pod.Name, metav1.GetOptions{})
framework.ExpectNoError(err, "get pod") framework.ExpectNoError(err, "get pod")
framework.ExpectNotEqual(runningPod.Spec.NodeName, actualNodeName, "second pod should have run on a different node") framework.ExpectNotEqual(runningPod.Spec.NodeName, actualNodeName, "second pod should have run on a different node")
StopPod(client, pod) StopPod(ctx, client, pod)
pod = nil pod = nil
} }
// TestBindingWaitForFirstConsumerMultiPVC tests the binding with WaitForFirstConsumer mode // TestBindingWaitForFirstConsumerMultiPVC tests the binding with WaitForFirstConsumer mode
func (t StorageClassTest) TestBindingWaitForFirstConsumerMultiPVC(claims []*v1.PersistentVolumeClaim, nodeSelector map[string]string, expectUnschedulable bool) ([]*v1.PersistentVolume, *v1.Node) { func (t StorageClassTest) TestBindingWaitForFirstConsumerMultiPVC(ctx context.Context, claims []*v1.PersistentVolumeClaim, nodeSelector map[string]string, expectUnschedulable bool) ([]*v1.PersistentVolume, *v1.Node) {
var err error var err error
framework.ExpectNotEqual(len(claims), 0) framework.ExpectNotEqual(len(claims), 0)
namespace := claims[0].Namespace namespace := claims[0].Namespace
@ -826,7 +812,7 @@ func (t StorageClassTest) TestBindingWaitForFirstConsumerMultiPVC(claims []*v1.P
var claimNames []string var claimNames []string
var createdClaims []*v1.PersistentVolumeClaim var createdClaims []*v1.PersistentVolumeClaim
for _, claim := range claims { for _, claim := range claims {
c, err := t.Client.CoreV1().PersistentVolumeClaims(claim.Namespace).Create(context.TODO(), claim, metav1.CreateOptions{}) c, err := t.Client.CoreV1().PersistentVolumeClaims(claim.Namespace).Create(ctx, claim, metav1.CreateOptions{})
claimNames = append(claimNames, c.Name) claimNames = append(claimNames, c.Name)
createdClaims = append(createdClaims, c) createdClaims = append(createdClaims, c)
framework.ExpectNoError(err) framework.ExpectNoError(err)
@ -850,7 +836,7 @@ func (t StorageClassTest) TestBindingWaitForFirstConsumerMultiPVC(claims []*v1.P
ginkgo.By("checking the claims are in pending state") ginkgo.By("checking the claims are in pending state")
err = e2epv.WaitForPersistentVolumeClaimsPhase(v1.ClaimBound, t.Client, namespace, claimNames, 2*time.Second /* Poll */, t.Timeouts.ClaimProvisionShort, true) err = e2epv.WaitForPersistentVolumeClaimsPhase(v1.ClaimBound, t.Client, namespace, claimNames, 2*time.Second /* Poll */, t.Timeouts.ClaimProvisionShort, true)
framework.ExpectError(err) framework.ExpectError(err)
verifyPVCsPending(t.Client, createdClaims) verifyPVCsPending(ctx, t.Client, createdClaims)
ginkgo.By("creating a pod referring to the claims") ginkgo.By("creating a pod referring to the claims")
// Create a pod referring to the claim and wait for it to get to running // Create a pod referring to the claim and wait for it to get to running
@ -867,25 +853,25 @@ func (t StorageClassTest) TestBindingWaitForFirstConsumerMultiPVC(claims []*v1.P
}() }()
if expectUnschedulable { if expectUnschedulable {
// Verify that no claims are provisioned. // Verify that no claims are provisioned.
verifyPVCsPending(t.Client, createdClaims) verifyPVCsPending(ctx, t.Client, createdClaims)
return nil, nil return nil, nil
} }
// collect node details // collect node details
node, err := t.Client.CoreV1().Nodes().Get(context.TODO(), pod.Spec.NodeName, metav1.GetOptions{}) node, err := t.Client.CoreV1().Nodes().Get(ctx, pod.Spec.NodeName, metav1.GetOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
ginkgo.By("re-checking the claims to see they bound") ginkgo.By("re-checking the claims to see they bound")
var pvs []*v1.PersistentVolume var pvs []*v1.PersistentVolume
for _, claim := range createdClaims { for _, claim := range createdClaims {
// Get new copy of the claim // Get new copy of the claim
claim, err = t.Client.CoreV1().PersistentVolumeClaims(claim.Namespace).Get(context.TODO(), claim.Name, metav1.GetOptions{}) claim, err = t.Client.CoreV1().PersistentVolumeClaims(claim.Namespace).Get(ctx, claim.Name, metav1.GetOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
// make sure claim did bind // make sure claim did bind
err = e2epv.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, t.Client, claim.Namespace, claim.Name, framework.Poll, t.Timeouts.ClaimProvision) err = e2epv.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, t.Client, claim.Namespace, claim.Name, framework.Poll, t.Timeouts.ClaimProvision)
framework.ExpectNoError(err) framework.ExpectNoError(err)
pv, err := t.Client.CoreV1().PersistentVolumes().Get(context.TODO(), claim.Spec.VolumeName, metav1.GetOptions{}) pv, err := t.Client.CoreV1().PersistentVolumes().Get(ctx, claim.Spec.VolumeName, metav1.GetOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
pvs = append(pvs, pv) pvs = append(pvs, pv)
} }
@ -895,20 +881,20 @@ func (t StorageClassTest) TestBindingWaitForFirstConsumerMultiPVC(claims []*v1.P
// RunInPodWithVolume runs a command in a pod with given claim mounted to /mnt directory. // RunInPodWithVolume runs a command in a pod with given claim mounted to /mnt directory.
// It starts, checks, collects output and stops it. // It starts, checks, collects output and stops it.
func RunInPodWithVolume(c clientset.Interface, t *framework.TimeoutContext, ns, claimName, podName, command string, node e2epod.NodeSelection) *v1.Pod { func RunInPodWithVolume(ctx context.Context, c clientset.Interface, t *framework.TimeoutContext, ns, claimName, podName, command string, node e2epod.NodeSelection) *v1.Pod {
pod := StartInPodWithVolume(c, ns, claimName, podName, command, node) pod := StartInPodWithVolume(ctx, c, ns, claimName, podName, command, node)
defer StopPod(c, pod) defer StopPod(ctx, c, pod)
framework.ExpectNoError(e2epod.WaitForPodSuccessInNamespaceTimeout(c, pod.Name, pod.Namespace, t.PodStartSlow)) framework.ExpectNoError(e2epod.WaitForPodSuccessInNamespaceTimeout(c, pod.Name, pod.Namespace, t.PodStartSlow))
// get the latest status of the pod // get the latest status of the pod
pod, err := c.CoreV1().Pods(pod.Namespace).Get(context.TODO(), pod.Name, metav1.GetOptions{}) pod, err := c.CoreV1().Pods(pod.Namespace).Get(ctx, pod.Name, metav1.GetOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
return pod return pod
} }
// StartInPodWithVolume starts a command in a pod with given claim mounted to /mnt directory // StartInPodWithVolume starts a command in a pod with given claim mounted to /mnt directory
// The caller is responsible for checking the pod and deleting it. // The caller is responsible for checking the pod and deleting it.
func StartInPodWithVolume(c clientset.Interface, ns, claimName, podName, command string, node e2epod.NodeSelection) *v1.Pod { func StartInPodWithVolume(ctx context.Context, c clientset.Interface, ns, claimName, podName, command string, node e2epod.NodeSelection) *v1.Pod {
return StartInPodWithVolumeSource(c, v1.VolumeSource{ return StartInPodWithVolumeSource(ctx, c, v1.VolumeSource{
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
ClaimName: claimName, ClaimName: claimName,
}, },
@ -917,7 +903,7 @@ func StartInPodWithVolume(c clientset.Interface, ns, claimName, podName, command
// StartInPodWithVolumeSource starts a command in a pod with given volume mounted to /mnt directory // StartInPodWithVolumeSource starts a command in a pod with given volume mounted to /mnt directory
// The caller is responsible for checking the pod and deleting it. // The caller is responsible for checking the pod and deleting it.
func StartInPodWithVolumeSource(c clientset.Interface, volSrc v1.VolumeSource, ns, podName, command string, node e2epod.NodeSelection) *v1.Pod { func StartInPodWithVolumeSource(ctx context.Context, c clientset.Interface, volSrc v1.VolumeSource, ns, podName, command string, node e2epod.NodeSelection) *v1.Pod {
pod := &v1.Pod{ pod := &v1.Pod{
TypeMeta: metav1.TypeMeta{ TypeMeta: metav1.TypeMeta{
Kind: "Pod", Kind: "Pod",
@ -954,18 +940,18 @@ func StartInPodWithVolumeSource(c clientset.Interface, volSrc v1.VolumeSource, n
} }
e2epod.SetNodeSelection(&pod.Spec, node) e2epod.SetNodeSelection(&pod.Spec, node)
pod, err := c.CoreV1().Pods(ns).Create(context.TODO(), pod, metav1.CreateOptions{}) pod, err := c.CoreV1().Pods(ns).Create(ctx, pod, metav1.CreateOptions{})
framework.ExpectNoError(err, "Failed to create pod: %v", err) framework.ExpectNoError(err, "Failed to create pod: %v", err)
return pod return pod
} }
// StopPod first tries to log the output of the pod's container, then deletes the pod and // StopPod first tries to log the output of the pod's container, then deletes the pod and
// waits for that to succeed. // waits for that to succeed.
func StopPod(c clientset.Interface, pod *v1.Pod) { func StopPod(ctx context.Context, c clientset.Interface, pod *v1.Pod) {
if pod == nil { if pod == nil {
return return
} }
body, err := c.CoreV1().Pods(pod.Namespace).GetLogs(pod.Name, &v1.PodLogOptions{}).Do(context.TODO()).Raw() body, err := c.CoreV1().Pods(pod.Namespace).GetLogs(pod.Name, &v1.PodLogOptions{}).Do(ctx).Raw()
if err != nil { if err != nil {
framework.Logf("Error getting logs for pod %s: %v", pod.Name, err) framework.Logf("Error getting logs for pod %s: %v", pod.Name, err)
} else { } else {
@ -977,11 +963,11 @@ func StopPod(c clientset.Interface, pod *v1.Pod) {
// StopPodAndDependents first tries to log the output of the pod's container, // StopPodAndDependents first tries to log the output of the pod's container,
// then deletes the pod and waits for that to succeed. Also waits for all owned // then deletes the pod and waits for that to succeed. Also waits for all owned
// resources to be deleted. // resources to be deleted.
func StopPodAndDependents(c clientset.Interface, timeouts *framework.TimeoutContext, pod *v1.Pod) { func StopPodAndDependents(ctx context.Context, c clientset.Interface, timeouts *framework.TimeoutContext, pod *v1.Pod) {
if pod == nil { if pod == nil {
return return
} }
body, err := c.CoreV1().Pods(pod.Namespace).GetLogs(pod.Name, &v1.PodLogOptions{}).Do(context.TODO()).Raw() body, err := c.CoreV1().Pods(pod.Namespace).GetLogs(pod.Name, &v1.PodLogOptions{}).Do(ctx).Raw()
if err != nil { if err != nil {
framework.Logf("Error getting logs for pod %s: %v", pod.Name, err) framework.Logf("Error getting logs for pod %s: %v", pod.Name, err)
} else { } else {
@ -990,7 +976,7 @@ func StopPodAndDependents(c clientset.Interface, timeouts *framework.TimeoutCont
// We must wait explicitly for removal of the generic ephemeral volume PVs. // We must wait explicitly for removal of the generic ephemeral volume PVs.
// For that we must find them first... // For that we must find them first...
pvs, err := c.CoreV1().PersistentVolumes().List(context.TODO(), metav1.ListOptions{}) pvs, err := c.CoreV1().PersistentVolumes().List(ctx, metav1.ListOptions{})
framework.ExpectNoError(err, "list PVs") framework.ExpectNoError(err, "list PVs")
var podPVs []v1.PersistentVolume var podPVs []v1.PersistentVolume
for _, pv := range pvs.Items { for _, pv := range pvs.Items {
@ -998,7 +984,7 @@ func StopPodAndDependents(c clientset.Interface, timeouts *framework.TimeoutCont
pv.Spec.ClaimRef.Namespace != pod.Namespace { pv.Spec.ClaimRef.Namespace != pod.Namespace {
continue continue
} }
pvc, err := c.CoreV1().PersistentVolumeClaims(pod.Namespace).Get(context.TODO(), pv.Spec.ClaimRef.Name, metav1.GetOptions{}) pvc, err := c.CoreV1().PersistentVolumeClaims(pod.Namespace).Get(ctx, pv.Spec.ClaimRef.Name, metav1.GetOptions{})
if err != nil && apierrors.IsNotFound(err) { if err != nil && apierrors.IsNotFound(err) {
// Must have been some unrelated PV, otherwise the PVC should exist. // Must have been some unrelated PV, otherwise the PVC should exist.
continue continue
@ -1011,7 +997,7 @@ func StopPodAndDependents(c clientset.Interface, timeouts *framework.TimeoutCont
framework.Logf("Deleting pod %q in namespace %q", pod.Name, pod.Namespace) framework.Logf("Deleting pod %q in namespace %q", pod.Name, pod.Namespace)
deletionPolicy := metav1.DeletePropagationForeground deletionPolicy := metav1.DeletePropagationForeground
err = c.CoreV1().Pods(pod.Namespace).Delete(context.TODO(), pod.Name, err = c.CoreV1().Pods(pod.Namespace).Delete(ctx, pod.Name,
metav1.DeleteOptions{ metav1.DeleteOptions{
// If the pod is the owner of some resources (like ephemeral inline volumes), // If the pod is the owner of some resources (like ephemeral inline volumes),
// then we want to be sure that those are also gone before we return. // then we want to be sure that those are also gone before we return.
@ -1036,16 +1022,17 @@ func StopPodAndDependents(c clientset.Interface, timeouts *framework.TimeoutCont
} }
} }
func verifyPVCsPending(client clientset.Interface, pvcs []*v1.PersistentVolumeClaim) { func verifyPVCsPending(ctx context.Context, client clientset.Interface, pvcs []*v1.PersistentVolumeClaim) {
for _, claim := range pvcs { for _, claim := range pvcs {
// Get new copy of the claim // Get new copy of the claim
claim, err := client.CoreV1().PersistentVolumeClaims(claim.Namespace).Get(context.TODO(), claim.Name, metav1.GetOptions{}) claim, err := client.CoreV1().PersistentVolumeClaims(claim.Namespace).Get(ctx, claim.Name, metav1.GetOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
framework.ExpectEqual(claim.Status.Phase, v1.ClaimPending) framework.ExpectEqual(claim.Status.Phase, v1.ClaimPending)
} }
} }
func prepareSnapshotDataSourceForProvisioning( func prepareSnapshotDataSourceForProvisioning(
ctx context.Context,
f *framework.Framework, f *framework.Framework,
config e2evolume.TestConfig, config e2evolume.TestConfig,
perTestConfig *storageframework.PerTestConfig, perTestConfig *storageframework.PerTestConfig,
@ -1057,14 +1044,14 @@ func prepareSnapshotDataSourceForProvisioning(
sDriver storageframework.SnapshottableTestDriver, sDriver storageframework.SnapshottableTestDriver,
mode v1.PersistentVolumeMode, mode v1.PersistentVolumeMode,
injectContent string, injectContent string,
) (*v1.TypedLocalObjectReference, func()) { ) *v1.TypedLocalObjectReference {
_, clearComputedStorageClass := SetupStorageClass(client, class) SetupStorageClass(ctx, client, class)
if initClaim.ResourceVersion != "" { if initClaim.ResourceVersion != "" {
ginkgo.By("Skipping creation of PVC, it already exists") ginkgo.By("Skipping creation of PVC, it already exists")
} else { } else {
ginkgo.By("[Initialize dataSource]creating a initClaim") ginkgo.By("[Initialize dataSource]creating a initClaim")
updatedClaim, err := client.CoreV1().PersistentVolumeClaims(initClaim.Namespace).Create(context.TODO(), initClaim, metav1.CreateOptions{}) updatedClaim, err := client.CoreV1().PersistentVolumeClaims(initClaim.Namespace).Create(ctx, initClaim, metav1.CreateOptions{})
if apierrors.IsAlreadyExists(err) { if apierrors.IsAlreadyExists(err) {
err = nil err = nil
} }
@ -1092,24 +1079,23 @@ func prepareSnapshotDataSourceForProvisioning(
Name: snapshotResource.Vs.GetName(), Name: snapshotResource.Vs.GetName(),
} }
cleanupFunc := func() { cleanupFunc := func(ctx context.Context) {
framework.Logf("deleting initClaim %q/%q", initClaim.Namespace, initClaim.Name) framework.Logf("deleting initClaim %q/%q", initClaim.Namespace, initClaim.Name)
err := client.CoreV1().PersistentVolumeClaims(initClaim.Namespace).Delete(context.TODO(), initClaim.Name, metav1.DeleteOptions{}) err := client.CoreV1().PersistentVolumeClaims(initClaim.Namespace).Delete(ctx, initClaim.Name, metav1.DeleteOptions{})
if err != nil && !apierrors.IsNotFound(err) { if err != nil && !apierrors.IsNotFound(err) {
framework.Failf("Error deleting initClaim %q. Error: %v", initClaim.Name, err) framework.Failf("Error deleting initClaim %q. Error: %v", initClaim.Name, err)
} }
err = snapshotResource.CleanupResource(f.Timeouts) err = snapshotResource.CleanupResource(f.Timeouts)
framework.ExpectNoError(err) framework.ExpectNoError(err)
clearComputedStorageClass()
} }
ginkgo.DeferCleanup(cleanupFunc)
return dataSourceRef, cleanupFunc return dataSourceRef
} }
func preparePVCDataSourceForProvisioning( func preparePVCDataSourceForProvisioning(
ctx context.Context,
f *framework.Framework, f *framework.Framework,
config e2evolume.TestConfig, config e2evolume.TestConfig,
client clientset.Interface, client clientset.Interface,
@ -1117,15 +1103,15 @@ func preparePVCDataSourceForProvisioning(
class *storagev1.StorageClass, class *storagev1.StorageClass,
mode v1.PersistentVolumeMode, mode v1.PersistentVolumeMode,
injectContent string, injectContent string,
) (*v1.TypedLocalObjectReference, func()) { ) *v1.TypedLocalObjectReference {
_, clearComputedStorageClass := SetupStorageClass(client, class) SetupStorageClass(ctx, client, class)
if source.ResourceVersion != "" { if source.ResourceVersion != "" {
ginkgo.By("Skipping creation of PVC, it already exists") ginkgo.By("Skipping creation of PVC, it already exists")
} else { } else {
ginkgo.By("[Initialize dataSource]creating a source PVC") ginkgo.By("[Initialize dataSource]creating a source PVC")
var err error var err error
source, err = client.CoreV1().PersistentVolumeClaims(source.Namespace).Create(context.TODO(), source, metav1.CreateOptions{}) source, err = client.CoreV1().PersistentVolumeClaims(source.Namespace).Create(ctx, source, metav1.CreateOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
} }
@ -1144,17 +1130,16 @@ func preparePVCDataSourceForProvisioning(
Name: source.GetName(), Name: source.GetName(),
} }
cleanupFunc := func() { cleanupFunc := func(ctx context.Context) {
framework.Logf("deleting source PVC %q/%q", source.Namespace, source.Name) framework.Logf("deleting source PVC %q/%q", source.Namespace, source.Name)
err := client.CoreV1().PersistentVolumeClaims(source.Namespace).Delete(context.TODO(), source.Name, metav1.DeleteOptions{}) err := client.CoreV1().PersistentVolumeClaims(source.Namespace).Delete(ctx, source.Name, metav1.DeleteOptions{})
if err != nil && !apierrors.IsNotFound(err) { if err != nil && !apierrors.IsNotFound(err) {
framework.Failf("Error deleting source PVC %q. Error: %v", source.Name, err) framework.Failf("Error deleting source PVC %q. Error: %v", source.Name, err)
} }
clearComputedStorageClass()
} }
ginkgo.DeferCleanup(cleanupFunc)
return dataSourceRef, cleanupFunc return dataSourceRef
} }
// MultiplePVMountSingleNodeCheck checks that multiple PV pointing to the same underlying storage can be mounted simultaneously on a single node. // MultiplePVMountSingleNodeCheck checks that multiple PV pointing to the same underlying storage can be mounted simultaneously on a single node.
@ -1163,7 +1148,7 @@ func preparePVCDataSourceForProvisioning(
// - Start Pod1 using PVC1, PV1 (which points to a underlying volume v) on node N1. // - Start Pod1 using PVC1, PV1 (which points to a underlying volume v) on node N1.
// - Create PVC2, PV2 and prebind them. PV2 points to the same underlying volume v. // - Create PVC2, PV2 and prebind them. PV2 points to the same underlying volume v.
// - Start Pod2 using PVC2, PV2 (which points to a underlying volume v) on node N1. // - Start Pod2 using PVC2, PV2 (which points to a underlying volume v) on node N1.
func MultiplePVMountSingleNodeCheck(client clientset.Interface, timeouts *framework.TimeoutContext, claim *v1.PersistentVolumeClaim, node e2epod.NodeSelection) { func MultiplePVMountSingleNodeCheck(ctx context.Context, client clientset.Interface, timeouts *framework.TimeoutContext, claim *v1.PersistentVolumeClaim, node e2epod.NodeSelection) {
pod1Config := e2epod.Config{ pod1Config := e2epod.Config{
NS: claim.Namespace, NS: claim.Namespace,
NodeSelection: node, NodeSelection: node,
@ -1178,7 +1163,7 @@ func MultiplePVMountSingleNodeCheck(client clientset.Interface, timeouts *framew
ginkgo.By(fmt.Sprintf("Created Pod %s/%s on node %s", pod1.Namespace, pod1.Name, pod1.Spec.NodeName)) ginkgo.By(fmt.Sprintf("Created Pod %s/%s on node %s", pod1.Namespace, pod1.Name, pod1.Spec.NodeName))
// Create new PV which points to the same underlying storage. Retain policy is used so that deletion of second PVC does not trigger the deletion of its bound PV and underlying storage. // Create new PV which points to the same underlying storage. Retain policy is used so that deletion of second PVC does not trigger the deletion of its bound PV and underlying storage.
e2evolume, err := getBoundPV(client, claim) e2evolume, err := getBoundPV(ctx, client, claim)
framework.ExpectNoError(err) framework.ExpectNoError(err)
pv2Config := e2epv.PersistentVolumeConfig{ pv2Config := e2epv.PersistentVolumeConfig{
NamePrefix: fmt.Sprintf("%s-", "pv"), NamePrefix: fmt.Sprintf("%s-", "pv"),

View File

@ -125,7 +125,7 @@ func (s *snapshottableTestSuite) DefineTests(driver storageframework.TestDriver,
claimSize string claimSize string
originalMntTestData string originalMntTestData string
) )
init := func() { init := func(ctx context.Context) {
sDriver, _ = driver.(storageframework.SnapshottableTestDriver) sDriver, _ = driver.(storageframework.SnapshottableTestDriver)
dDriver, _ = driver.(storageframework.DynamicPVTestDriver) dDriver, _ = driver.(storageframework.DynamicPVTestDriver)
cleanupSteps = make([]func(), 0) cleanupSteps = make([]func(), 0)
@ -147,7 +147,7 @@ func (s *snapshottableTestSuite) DefineTests(driver storageframework.TestDriver,
// sync is available in the Linux and Windows versions of agnhost. // sync is available in the Linux and Windows versions of agnhost.
command := fmt.Sprintf("echo '%s' > %s; sync", originalMntTestData, datapath) command := fmt.Sprintf("echo '%s' > %s; sync", originalMntTestData, datapath)
pod = StartInPodWithVolumeSource(cs, *volumeResource.VolSource, f.Namespace.Name, "pvc-snapshottable-tester", command, config.ClientNodeSelection) pod = StartInPodWithVolumeSource(ctx, cs, *volumeResource.VolSource, f.Namespace.Name, "pvc-snapshottable-tester", command, config.ClientNodeSelection)
// At this point a pod is created with a PVC. How to proceed depends on which test is running. // At this point a pod is created with a PVC. How to proceed depends on which test is running.
} }
@ -172,11 +172,11 @@ func (s *snapshottableTestSuite) DefineTests(driver storageframework.TestDriver,
}) })
ginkgo.Context("", func() { ginkgo.Context("", func() {
ginkgo.It("should check snapshot fields, check restore correctly works, check deletion (ephemeral)", func() { ginkgo.It("should check snapshot fields, check restore correctly works, check deletion (ephemeral)", func(ctx context.Context) {
if pattern.VolType != storageframework.GenericEphemeralVolume { if pattern.VolType != storageframework.GenericEphemeralVolume {
e2eskipper.Skipf("volume type %q is not ephemeral", pattern.VolType) e2eskipper.Skipf("volume type %q is not ephemeral", pattern.VolType)
} }
init() init(ctx)
// delete the pod at the end of the test // delete the pod at the end of the test
cleanupSteps = append(cleanupSteps, func() { cleanupSteps = append(cleanupSteps, func() {
@ -251,9 +251,9 @@ func (s *snapshottableTestSuite) DefineTests(driver storageframework.TestDriver,
}, },
} }
restoredPod = StartInPodWithVolumeSource(cs, volSrc, restoredPVC.Namespace, "restored-pvc-tester", "sleep 300", config.ClientNodeSelection) restoredPod = StartInPodWithVolumeSource(ctx, cs, volSrc, restoredPVC.Namespace, "restored-pvc-tester", "sleep 300", config.ClientNodeSelection)
cleanupSteps = append(cleanupSteps, func() { cleanupSteps = append(cleanupSteps, func() {
StopPod(cs, restoredPod) StopPod(ctx, cs, restoredPod)
}) })
framework.ExpectNoError(e2epod.WaitTimeoutForPodRunningInNamespace(cs, restoredPod.Name, restoredPod.Namespace, f.Timeouts.PodStartSlow)) framework.ExpectNoError(e2epod.WaitTimeoutForPodRunningInNamespace(cs, restoredPod.Name, restoredPod.Namespace, f.Timeouts.PodStartSlow))
if pattern.VolType != storageframework.GenericEphemeralVolume { if pattern.VolType != storageframework.GenericEphemeralVolume {
@ -273,11 +273,11 @@ func (s *snapshottableTestSuite) DefineTests(driver storageframework.TestDriver,
deleteVolumeSnapshot(f, dc, sr, pattern, vscontent) deleteVolumeSnapshot(f, dc, sr, pattern, vscontent)
}) })
ginkgo.It("should check snapshot fields, check restore correctly works after modifying source data, check deletion (persistent)", func() { ginkgo.It("should check snapshot fields, check restore correctly works after modifying source data, check deletion (persistent)", func(ctx context.Context) {
if pattern.VolType == storageframework.GenericEphemeralVolume { if pattern.VolType == storageframework.GenericEphemeralVolume {
e2eskipper.Skipf("volume type %q is ephemeral", pattern.VolType) e2eskipper.Skipf("volume type %q is ephemeral", pattern.VolType)
} }
init() init(ctx)
pvc = volumeResource.Pvc pvc = volumeResource.Pvc
sc = volumeResource.Sc sc = volumeResource.Sc
@ -305,7 +305,7 @@ func (s *snapshottableTestSuite) DefineTests(driver storageframework.TestDriver,
// Delete the pod to force NodeUnpublishVolume (unlike the ephemeral case where the pod is deleted at the end of the test). // Delete the pod to force NodeUnpublishVolume (unlike the ephemeral case where the pod is deleted at the end of the test).
ginkgo.By("[init] deleting the pod") ginkgo.By("[init] deleting the pod")
StopPod(cs, pod) StopPod(ctx, cs, pod)
// At this point we know that: // At this point we know that:
// - a pod was created with a PV that's supposed to have data // - a pod was created with a PV that's supposed to have data
@ -372,7 +372,7 @@ func (s *snapshottableTestSuite) DefineTests(driver storageframework.TestDriver,
// After writing data to a file `sync` flushes the data from memory to disk. // After writing data to a file `sync` flushes the data from memory to disk.
// sync is available in the Linux and Windows versions of agnhost. // sync is available in the Linux and Windows versions of agnhost.
command := fmt.Sprintf("echo '%s' > %s; sync", modifiedMntTestData, datapath) command := fmt.Sprintf("echo '%s' > %s; sync", modifiedMntTestData, datapath)
RunInPodWithVolume(cs, f.Timeouts, pvc.Namespace, pvc.Name, "pvc-snapshottable-data-tester", command, config.ClientNodeSelection) RunInPodWithVolume(ctx, cs, f.Timeouts, pvc.Namespace, pvc.Name, "pvc-snapshottable-data-tester", command, config.ClientNodeSelection)
ginkgo.By("creating a pvc from the snapshot") ginkgo.By("creating a pvc from the snapshot")
claimSize = pvc.Spec.Resources.Requests.Storage().String() claimSize = pvc.Spec.Resources.Requests.Storage().String()
@ -401,9 +401,9 @@ func (s *snapshottableTestSuite) DefineTests(driver storageframework.TestDriver,
}) })
ginkgo.By("starting a pod to use the snapshot") ginkgo.By("starting a pod to use the snapshot")
restoredPod = StartInPodWithVolume(cs, restoredPVC.Namespace, restoredPVC.Name, "restored-pvc-tester", "sleep 300", config.ClientNodeSelection) restoredPod = StartInPodWithVolume(ctx, cs, restoredPVC.Namespace, restoredPVC.Name, "restored-pvc-tester", "sleep 300", config.ClientNodeSelection)
cleanupSteps = append(cleanupSteps, func() { cleanupSteps = append(cleanupSteps, func() {
StopPod(cs, restoredPod) StopPod(ctx, cs, restoredPod)
}) })
framework.ExpectNoError(e2epod.WaitTimeoutForPodRunningInNamespace(cs, restoredPod.Name, restoredPod.Namespace, f.Timeouts.PodStartSlow)) framework.ExpectNoError(e2epod.WaitTimeoutForPodRunningInNamespace(cs, restoredPod.Name, restoredPod.Namespace, f.Timeouts.PodStartSlow))
commands := e2evolume.GenerateReadFileCmd(datapath) commands := e2evolume.GenerateReadFileCmd(datapath)

View File

@ -123,7 +123,7 @@ func (t *volumeLimitsTestSuite) DefineTests(driver storageframework.TestDriver,
// And one extra pod with a CSI volume should get Pending with a condition // And one extra pod with a CSI volume should get Pending with a condition
// that says it's unschedulable because of volume limit. // that says it's unschedulable because of volume limit.
// BEWARE: the test may create lot of volumes and it's really slow. // BEWARE: the test may create lot of volumes and it's really slow.
ginkgo.It("should support volume limits [Serial]", func() { ginkgo.It("should support volume limits [Serial]", func(ctx context.Context) {
driverInfo := driver.GetDriverInfo() driverInfo := driver.GetDriverInfo()
if !driverInfo.Capabilities[storageframework.CapVolumeLimits] { if !driverInfo.Capabilities[storageframework.CapVolumeLimits] {
ginkgo.Skip(fmt.Sprintf("driver %s does not support volume limits", driverInfo.Name)) ginkgo.Skip(fmt.Sprintf("driver %s does not support volume limits", driverInfo.Name))
@ -175,7 +175,7 @@ func (t *volumeLimitsTestSuite) DefineTests(driver storageframework.TestDriver,
// Create <limit> Pods. // Create <limit> Pods.
ginkgo.By(fmt.Sprintf("Creating %d Pod(s) with one volume each", limit)) ginkgo.By(fmt.Sprintf("Creating %d Pod(s) with one volume each", limit))
for i := 0; i < limit; i++ { for i := 0; i < limit; i++ {
pod := StartInPodWithVolumeSource(l.cs, *l.resource.VolSource, l.ns.Name, "volume-limits", "sleep 1000000", selection) pod := StartInPodWithVolumeSource(ctx, l.cs, *l.resource.VolSource, l.ns.Name, "volume-limits", "sleep 1000000", selection)
l.podNames = append(l.podNames, pod.Name) l.podNames = append(l.podNames, pod.Name)
l.pvcNames = append(l.pvcNames, ephemeral.VolumeClaimName(pod, &pod.Spec.Volumes[0])) l.pvcNames = append(l.pvcNames, ephemeral.VolumeClaimName(pod, &pod.Spec.Volumes[0]))
} }
@ -219,7 +219,7 @@ func (t *volumeLimitsTestSuite) DefineTests(driver storageframework.TestDriver,
} }
ginkgo.By("Creating an extra pod with one volume to exceed the limit") ginkgo.By("Creating an extra pod with one volume to exceed the limit")
pod := StartInPodWithVolumeSource(l.cs, *l.resource.VolSource, l.ns.Name, "volume-limits-exceeded", "sleep 10000", selection) pod := StartInPodWithVolumeSource(ctx, l.cs, *l.resource.VolSource, l.ns.Name, "volume-limits-exceeded", "sleep 10000", selection)
l.podNames = append(l.podNames, pod.Name) l.podNames = append(l.podNames, pod.Name)
ginkgo.By("Waiting for the pod to get unschedulable with the right message") ginkgo.By("Waiting for the pod to get unschedulable with the right message")

View File

@ -148,7 +148,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
}) })
ginkgo.Describe("DynamicProvisioner [Slow] [Feature:StorageProvider]", func() { ginkgo.Describe("DynamicProvisioner [Slow] [Feature:StorageProvider]", func() {
ginkgo.It("should provision storage with different parameters", func() { ginkgo.It("should provision storage with different parameters", func(ctx context.Context) {
// This test checks that dynamic provisioning can provision a volume // This test checks that dynamic provisioning can provision a volume
// that can be used to persist data among pods. // that can be used to persist data among pods.
@ -166,7 +166,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
ClaimSize: "1.5Gi", ClaimSize: "1.5Gi",
ExpectedSize: "2Gi", ExpectedSize: "2Gi",
PvCheck: func(claim *v1.PersistentVolumeClaim) { PvCheck: func(claim *v1.PersistentVolumeClaim) {
volume := testsuites.PVWriteReadSingleNodeCheck(c, f.Timeouts, claim, e2epod.NodeSelection{}) volume := testsuites.PVWriteReadSingleNodeCheck(ctx, c, f.Timeouts, claim, e2epod.NodeSelection{})
gomega.Expect(volume).NotTo(gomega.BeNil(), "get bound PV") gomega.Expect(volume).NotTo(gomega.BeNil(), "get bound PV")
err := checkGCEPD(volume, "pd-ssd") err := checkGCEPD(volume, "pd-ssd")
@ -184,7 +184,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
ClaimSize: "1.5Gi", ClaimSize: "1.5Gi",
ExpectedSize: "2Gi", ExpectedSize: "2Gi",
PvCheck: func(claim *v1.PersistentVolumeClaim) { PvCheck: func(claim *v1.PersistentVolumeClaim) {
volume := testsuites.PVWriteReadSingleNodeCheck(c, f.Timeouts, claim, e2epod.NodeSelection{}) volume := testsuites.PVWriteReadSingleNodeCheck(ctx, c, f.Timeouts, claim, e2epod.NodeSelection{})
gomega.Expect(volume).NotTo(gomega.BeNil(), "get bound PV") gomega.Expect(volume).NotTo(gomega.BeNil(), "get bound PV")
err := checkGCEPD(volume, "pd-standard") err := checkGCEPD(volume, "pd-standard")
@ -204,7 +204,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
ClaimSize: "1.5Gi", ClaimSize: "1.5Gi",
ExpectedSize: "2Gi", ExpectedSize: "2Gi",
PvCheck: func(claim *v1.PersistentVolumeClaim) { PvCheck: func(claim *v1.PersistentVolumeClaim) {
volume := testsuites.PVWriteReadSingleNodeCheck(c, f.Timeouts, claim, e2epod.NodeSelection{}) volume := testsuites.PVWriteReadSingleNodeCheck(ctx, c, f.Timeouts, claim, e2epod.NodeSelection{})
gomega.Expect(volume).NotTo(gomega.BeNil(), "get bound PV") gomega.Expect(volume).NotTo(gomega.BeNil(), "get bound PV")
err := checkAWSEBS(volume, "gp2", false) err := checkAWSEBS(volume, "gp2", false)
@ -223,7 +223,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
ClaimSize: "3.5Gi", ClaimSize: "3.5Gi",
ExpectedSize: "4Gi", // 4 GiB is minimum for io1 ExpectedSize: "4Gi", // 4 GiB is minimum for io1
PvCheck: func(claim *v1.PersistentVolumeClaim) { PvCheck: func(claim *v1.PersistentVolumeClaim) {
volume := testsuites.PVWriteReadSingleNodeCheck(c, f.Timeouts, claim, e2epod.NodeSelection{}) volume := testsuites.PVWriteReadSingleNodeCheck(ctx, c, f.Timeouts, claim, e2epod.NodeSelection{})
gomega.Expect(volume).NotTo(gomega.BeNil(), "get bound PV") gomega.Expect(volume).NotTo(gomega.BeNil(), "get bound PV")
err := checkAWSEBS(volume, "io1", false) err := checkAWSEBS(volume, "io1", false)
@ -241,7 +241,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
ClaimSize: "500Gi", // minimum for sc1 ClaimSize: "500Gi", // minimum for sc1
ExpectedSize: "500Gi", ExpectedSize: "500Gi",
PvCheck: func(claim *v1.PersistentVolumeClaim) { PvCheck: func(claim *v1.PersistentVolumeClaim) {
volume := testsuites.PVWriteReadSingleNodeCheck(c, f.Timeouts, claim, e2epod.NodeSelection{}) volume := testsuites.PVWriteReadSingleNodeCheck(ctx, c, f.Timeouts, claim, e2epod.NodeSelection{})
gomega.Expect(volume).NotTo(gomega.BeNil(), "get bound PV") gomega.Expect(volume).NotTo(gomega.BeNil(), "get bound PV")
err := checkAWSEBS(volume, "sc1", false) err := checkAWSEBS(volume, "sc1", false)
@ -259,7 +259,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
ClaimSize: "500Gi", // minimum for st1 ClaimSize: "500Gi", // minimum for st1
ExpectedSize: "500Gi", ExpectedSize: "500Gi",
PvCheck: func(claim *v1.PersistentVolumeClaim) { PvCheck: func(claim *v1.PersistentVolumeClaim) {
volume := testsuites.PVWriteReadSingleNodeCheck(c, f.Timeouts, claim, e2epod.NodeSelection{}) volume := testsuites.PVWriteReadSingleNodeCheck(ctx, c, f.Timeouts, claim, e2epod.NodeSelection{})
gomega.Expect(volume).NotTo(gomega.BeNil(), "get bound PV") gomega.Expect(volume).NotTo(gomega.BeNil(), "get bound PV")
err := checkAWSEBS(volume, "st1", false) err := checkAWSEBS(volume, "st1", false)
@ -277,7 +277,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
ClaimSize: "1Gi", ClaimSize: "1Gi",
ExpectedSize: "1Gi", ExpectedSize: "1Gi",
PvCheck: func(claim *v1.PersistentVolumeClaim) { PvCheck: func(claim *v1.PersistentVolumeClaim) {
volume := testsuites.PVWriteReadSingleNodeCheck(c, f.Timeouts, claim, e2epod.NodeSelection{}) volume := testsuites.PVWriteReadSingleNodeCheck(ctx, c, f.Timeouts, claim, e2epod.NodeSelection{})
gomega.Expect(volume).NotTo(gomega.BeNil(), "get bound PV") gomega.Expect(volume).NotTo(gomega.BeNil(), "get bound PV")
err := checkAWSEBS(volume, "gp2", true) err := checkAWSEBS(volume, "gp2", true)
@ -294,7 +294,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
ClaimSize: "1.5Gi", ClaimSize: "1.5Gi",
ExpectedSize: "1.5Gi", ExpectedSize: "1.5Gi",
PvCheck: func(claim *v1.PersistentVolumeClaim) { PvCheck: func(claim *v1.PersistentVolumeClaim) {
testsuites.PVWriteReadSingleNodeCheck(c, f.Timeouts, claim, e2epod.NodeSelection{}) testsuites.PVWriteReadSingleNodeCheck(ctx, c, f.Timeouts, claim, e2epod.NodeSelection{})
}, },
}, },
// Azure // Azure
@ -307,7 +307,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
ClaimSize: "1Gi", ClaimSize: "1Gi",
ExpectedSize: "1Gi", ExpectedSize: "1Gi",
PvCheck: func(claim *v1.PersistentVolumeClaim) { PvCheck: func(claim *v1.PersistentVolumeClaim) {
testsuites.PVWriteReadSingleNodeCheck(c, f.Timeouts, claim, e2epod.NodeSelection{}) testsuites.PVWriteReadSingleNodeCheck(ctx, c, f.Timeouts, claim, e2epod.NodeSelection{})
}, },
}, },
} }
@ -331,8 +331,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
test.Client = c test.Client = c
// overwrite StorageClass spec with provisioned StorageClass // overwrite StorageClass spec with provisioned StorageClass
storageClass, clearStorageClass := testsuites.SetupStorageClass(test.Client, newStorageClass(test, ns, suffix)) storageClass := testsuites.SetupStorageClass(ctx, test.Client, newStorageClass(test, ns, suffix))
defer clearStorageClass()
test.Class = storageClass test.Class = storageClass
test.Claim = e2epv.MakePersistentVolumeClaim(e2epv.PersistentVolumeClaimConfig{ test.Claim = e2epv.MakePersistentVolumeClaim(e2epv.PersistentVolumeClaimConfig{
@ -341,11 +340,11 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
VolumeMode: &test.VolumeMode, VolumeMode: &test.VolumeMode,
}, ns) }, ns)
test.TestDynamicProvisioning() test.TestDynamicProvisioning(ctx)
} }
}) })
ginkgo.It("should provision storage with non-default reclaim policy Retain", func() { ginkgo.It("should provision storage with non-default reclaim policy Retain", func(ctx context.Context) {
e2eskipper.SkipUnlessProviderIs("gce", "gke") e2eskipper.SkipUnlessProviderIs("gce", "gke")
test := testsuites.StorageClassTest{ test := testsuites.StorageClassTest{
@ -360,7 +359,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
ClaimSize: "1Gi", ClaimSize: "1Gi",
ExpectedSize: "1Gi", ExpectedSize: "1Gi",
PvCheck: func(claim *v1.PersistentVolumeClaim) { PvCheck: func(claim *v1.PersistentVolumeClaim) {
volume := testsuites.PVWriteReadSingleNodeCheck(c, f.Timeouts, claim, e2epod.NodeSelection{}) volume := testsuites.PVWriteReadSingleNodeCheck(ctx, c, f.Timeouts, claim, e2epod.NodeSelection{})
gomega.Expect(volume).NotTo(gomega.BeNil(), "get bound PV") gomega.Expect(volume).NotTo(gomega.BeNil(), "get bound PV")
err := checkGCEPD(volume, "pd-standard") err := checkGCEPD(volume, "pd-standard")
@ -370,8 +369,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
test.Class = newStorageClass(test, ns, "reclaimpolicy") test.Class = newStorageClass(test, ns, "reclaimpolicy")
retain := v1.PersistentVolumeReclaimRetain retain := v1.PersistentVolumeReclaimRetain
test.Class.ReclaimPolicy = &retain test.Class.ReclaimPolicy = &retain
storageClass, clearStorageClass := testsuites.SetupStorageClass(test.Client, test.Class) storageClass := testsuites.SetupStorageClass(ctx, test.Client, test.Class)
defer clearStorageClass()
test.Class = storageClass test.Class = storageClass
test.Claim = e2epv.MakePersistentVolumeClaim(e2epv.PersistentVolumeClaimConfig{ test.Claim = e2epv.MakePersistentVolumeClaim(e2epv.PersistentVolumeClaimConfig{
@ -380,7 +378,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
VolumeMode: &test.VolumeMode, VolumeMode: &test.VolumeMode,
}, ns) }, ns)
pv := test.TestDynamicProvisioning() pv := test.TestDynamicProvisioning(ctx)
ginkgo.By(fmt.Sprintf("waiting for the provisioned PV %q to enter phase %s", pv.Name, v1.VolumeReleased)) ginkgo.By(fmt.Sprintf("waiting for the provisioned PV %q to enter phase %s", pv.Name, v1.VolumeReleased))
framework.ExpectNoError(e2epv.WaitForPersistentVolumePhase(v1.VolumeReleased, c, pv.Name, 1*time.Second, 30*time.Second)) framework.ExpectNoError(e2epv.WaitForPersistentVolumePhase(v1.VolumeReleased, c, pv.Name, 1*time.Second, 30*time.Second))
@ -509,7 +507,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
}) })
ginkgo.Describe("DynamicProvisioner External", func() { ginkgo.Describe("DynamicProvisioner External", func() {
ginkgo.It("should let an external dynamic provisioner create and delete persistent volumes [Slow]", func() { ginkgo.It("should let an external dynamic provisioner create and delete persistent volumes [Slow]", func(ctx context.Context) {
// external dynamic provisioner pods need additional permissions provided by the // external dynamic provisioner pods need additional permissions provided by the
// persistent-volume-provisioner clusterrole and a leader-locking role // persistent-volume-provisioner clusterrole and a leader-locking role
serviceAccountName := "default" serviceAccountName := "default"
@ -557,8 +555,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
ExpectedSize: "1500Mi", ExpectedSize: "1500Mi",
} }
storageClass, clearStorageClass := testsuites.SetupStorageClass(test.Client, newStorageClass(test, ns, "external")) storageClass := testsuites.SetupStorageClass(ctx, test.Client, newStorageClass(test, ns, "external"))
defer clearStorageClass()
test.Class = storageClass test.Class = storageClass
test.Claim = e2epv.MakePersistentVolumeClaim(e2epv.PersistentVolumeClaimConfig{ test.Claim = e2epv.MakePersistentVolumeClaim(e2epv.PersistentVolumeClaimConfig{
@ -569,12 +566,12 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
ginkgo.By("creating a claim with a external provisioning annotation") ginkgo.By("creating a claim with a external provisioning annotation")
test.TestDynamicProvisioning() test.TestDynamicProvisioning(ctx)
}) })
}) })
ginkgo.Describe("DynamicProvisioner Default", func() { ginkgo.Describe("DynamicProvisioner Default", func() {
ginkgo.It("should create and delete default persistent volumes [Slow]", func() { ginkgo.It("should create and delete default persistent volumes [Slow]", func(ctx context.Context) {
e2eskipper.SkipUnlessProviderIs("gce", "aws", "gke", "vsphere", "azure") e2eskipper.SkipUnlessProviderIs("gce", "aws", "gke", "vsphere", "azure")
e2epv.SkipIfNoDefaultStorageClass(c) e2epv.SkipIfNoDefaultStorageClass(c)
@ -592,11 +589,9 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
VolumeMode: &test.VolumeMode, VolumeMode: &test.VolumeMode,
}, ns) }, ns)
// NOTE: this test assumes that there's a default storageclass // NOTE: this test assumes that there's a default storageclass
storageClass, clearStorageClass := testsuites.SetupStorageClass(test.Client, nil) test.Class = testsuites.SetupStorageClass(ctx, test.Client, nil)
test.Class = storageClass
defer clearStorageClass()
test.TestDynamicProvisioning() test.TestDynamicProvisioning(ctx)
}) })
// Modifying the default storage class can be disruptive to other tests that depend on it // Modifying the default storage class can be disruptive to other tests that depend on it
@ -679,7 +674,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
}) })
ginkgo.Describe("Invalid AWS KMS key", func() { ginkgo.Describe("Invalid AWS KMS key", func() {
ginkgo.It("should report an error and create no PV", func() { ginkgo.It("should report an error and create no PV", func(ctx context.Context) {
e2eskipper.SkipUnlessProviderIs("aws") e2eskipper.SkipUnlessProviderIs("aws")
test := testsuites.StorageClassTest{ test := testsuites.StorageClassTest{
Client: c, Client: c,
@ -691,9 +686,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
} }
ginkgo.By("creating a StorageClass") ginkgo.By("creating a StorageClass")
storageClass, clearStorageClass := testsuites.SetupStorageClass(test.Client, newStorageClass(test, ns, "invalid-aws")) test.Class = testsuites.SetupStorageClass(ctx, test.Client, newStorageClass(test, ns, "invalid-aws"))
defer clearStorageClass()
test.Class = storageClass
ginkgo.By("creating a claim object with a suffix for gluster dynamic provisioner") ginkgo.By("creating a claim object with a suffix for gluster dynamic provisioner")
claim := e2epv.MakePersistentVolumeClaim(e2epv.PersistentVolumeClaimConfig{ claim := e2epv.MakePersistentVolumeClaim(e2epv.PersistentVolumeClaimConfig{