diff --git a/test/e2e/storage/csi_mock_volume.go b/test/e2e/storage/csi_mock_volume.go index 8b97c69b100..cba6ce9481d 100644 --- a/test/e2e/storage/csi_mock_volume.go +++ b/test/e2e/storage/csi_mock_volume.go @@ -181,8 +181,7 @@ var _ = utils.SIGDescribe("CSI mock volume", func() { } m.driver = drivers.InitMockCSIDriver(driverOpts) - config, testCleanup := m.driver.PrepareTest(f) - m.testCleanups = append(m.testCleanups, testCleanup) + config := m.driver.PrepareTest(f) m.config = config m.provisioner = config.GetUniqueDriverName() @@ -514,7 +513,7 @@ var _ = utils.SIGDescribe("CSI mock volume", func() { } for _, t := range tests { test := t - ginkgo.It(t.name, func() { + ginkgo.It(t.name, func(ctx context.Context) { init(testParameters{ registerDriver: test.deployClusterRegistrar, podInfo: test.podInfoOnMount}) @@ -538,7 +537,7 @@ var _ = utils.SIGDescribe("CSI mock volume", func() { csiInlineVolumesEnabled := test.expectEphemeral if test.expectPodInfo { ginkgo.By("checking for CSIInlineVolumes feature") - csiInlineVolumesEnabled, err = testsuites.CSIInlineVolumesEnabled(m.cs, f.Timeouts, f.Namespace.Name) + csiInlineVolumesEnabled, err = testsuites.CSIInlineVolumesEnabled(ctx, m.cs, f.Timeouts, f.Namespace.Name) framework.ExpectNoError(err, "failed to test for CSIInlineVolumes") } diff --git a/test/e2e/storage/drivers/csi.go b/test/e2e/storage/drivers/csi.go index 6d567ae3e52..c9e04a08eb4 100644 --- a/test/e2e/storage/drivers/csi.go +++ b/test/e2e/storage/drivers/csi.go @@ -207,7 +207,7 @@ func (h *hostpathCSIDriver) GetSnapshotClass(config *storageframework.PerTestCon return utils.GenerateSnapshotClassSpec(snapshotter, parameters, ns) } -func (h *hostpathCSIDriver) PrepareTest(f *framework.Framework) (*storageframework.PerTestConfig, func()) { +func (h *hostpathCSIDriver) PrepareTest(f *framework.Framework) *storageframework.PerTestConfig { // Create secondary namespace which will be used for creating driver driverNamespace := utils.CreateDriverNamespace(f) driverns := driverNamespace.Name @@ -286,8 +286,9 @@ func (h *hostpathCSIDriver) PrepareTest(f *framework.Framework) (*storageframewo driverns, cleanup, cancelLogging) + ginkgo.DeferCleanup(cleanupFunc) - return config, cleanupFunc + return config } // mockCSI @@ -533,7 +534,7 @@ func (m *mockCSIDriver) GetSnapshotClass(config *storageframework.PerTestConfig, return utils.GenerateSnapshotClassSpec(snapshotter, parameters, ns) } -func (m *mockCSIDriver) PrepareTest(f *framework.Framework) (*storageframework.PerTestConfig, func()) { +func (m *mockCSIDriver) PrepareTest(f *framework.Framework) *storageframework.PerTestConfig { m.clientSet = f.ClientSet // Create secondary namespace which will be used for creating driver @@ -691,12 +692,12 @@ func (m *mockCSIDriver) PrepareTest(f *framework.Framework) (*storageframework.P cleanup, cancelLogging) - cleanupFunc := func() { + ginkgo.DeferCleanup(func() { embeddedCleanup() driverCleanupFunc() - } + }) - return config, cleanupFunc + return config } func (m *mockCSIDriver) interceptGRPC(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (resp interface{}, err error) { @@ -863,7 +864,7 @@ func (g *gcePDCSIDriver) GetSnapshotClass(config *storageframework.PerTestConfig return utils.GenerateSnapshotClassSpec(snapshotter, parameters, ns) } -func (g *gcePDCSIDriver) PrepareTest(f *framework.Framework) (*storageframework.PerTestConfig, func()) { +func (g *gcePDCSIDriver) PrepareTest(f *framework.Framework) *storageframework.PerTestConfig { testns := f.Namespace.Name cfg := &storageframework.PerTestConfig{ Driver: g, @@ -873,7 +874,7 @@ func (g *gcePDCSIDriver) PrepareTest(f *framework.Framework) (*storageframework. if framework.ProviderIs("gke") { framework.Logf("The csi gce-pd driver is automatically installed in GKE. Skipping driver installation.") - return cfg, func() {} + return cfg } ginkgo.By("deploying csi gce-pd driver") @@ -920,13 +921,14 @@ func (g *gcePDCSIDriver) PrepareTest(f *framework.Framework) (*storageframework. driverns, cleanup, cancelLogging) + ginkgo.DeferCleanup(cleanupFunc) return &storageframework.PerTestConfig{ Driver: g, Prefix: "gcepd", Framework: f, DriverNamespace: driverNamespace, - }, cleanupFunc + } } // WaitForCSIDriverRegistrationOnAllNodes waits for the CSINode object to be updated diff --git a/test/e2e/storage/drivers/in_tree.go b/test/e2e/storage/drivers/in_tree.go index 7574ad30455..4b22e38b4a7 100644 --- a/test/e2e/storage/drivers/in_tree.go +++ b/test/e2e/storage/drivers/in_tree.go @@ -158,7 +158,7 @@ func (n *nfsDriver) GetDynamicProvisionStorageClass(config *storageframework.Per return storageframework.GetStorageClass(provisioner, parameters, nil, ns) } -func (n *nfsDriver) PrepareTest(f *framework.Framework) (*storageframework.PerTestConfig, func()) { +func (n *nfsDriver) PrepareTest(f *framework.Framework) *storageframework.PerTestConfig { cs := f.ClientSet ns := f.Namespace n.externalPluginName = fmt.Sprintf("example.com/nfs-%s", ns.Name) @@ -168,6 +168,10 @@ func (n *nfsDriver) PrepareTest(f *framework.Framework) (*storageframework.PerTe err := e2eauth.BindClusterRole(cs.RbacV1(), "cluster-admin", ns.Name, rbacv1.Subject{Kind: rbacv1.ServiceAccountKind, Namespace: ns.Name, Name: "default"}) framework.ExpectNoError(err) + ginkgo.DeferCleanup(func(ctx context.Context) { + clusterRoleBindingName := ns.Name + "--" + "cluster-admin" + cs.RbacV1().ClusterRoleBindings().Delete(ctx, clusterRoleBindingName, *metav1.NewDeleteOptions(0)) + }) err = e2eauth.WaitForAuthorizationUpdate(cs.AuthorizationV1(), serviceaccount.MakeUsername(ns.Name, "default"), @@ -176,16 +180,15 @@ func (n *nfsDriver) PrepareTest(f *framework.Framework) (*storageframework.PerTe ginkgo.By("creating an external dynamic provisioner pod") n.externalProvisionerPod = utils.StartExternalProvisioner(cs, ns.Name, n.externalPluginName) + ginkgo.DeferCleanup(func() { + framework.ExpectNoError(e2epod.DeletePodWithWait(cs, n.externalProvisionerPod)) + }) return &storageframework.PerTestConfig{ - Driver: n, - Prefix: "nfs", - Framework: f, - }, func() { - framework.ExpectNoError(e2epod.DeletePodWithWait(cs, n.externalProvisionerPod)) - clusterRoleBindingName := ns.Name + "--" + "cluster-admin" - cs.RbacV1().ClusterRoleBindings().Delete(context.TODO(), clusterRoleBindingName, *metav1.NewDeleteOptions(0)) - } + Driver: n, + Prefix: "nfs", + Framework: f, + } } func (n *nfsDriver) CreateVolume(config *storageframework.PerTestConfig, volType storageframework.TestVolType) storageframework.TestVolume { @@ -309,12 +312,12 @@ func (i *iSCSIDriver) GetPersistentVolumeSource(readOnly bool, fsType string, e2 return &pvSource, nil } -func (i *iSCSIDriver) PrepareTest(f *framework.Framework) (*storageframework.PerTestConfig, func()) { +func (i *iSCSIDriver) PrepareTest(f *framework.Framework) *storageframework.PerTestConfig { return &storageframework.PerTestConfig{ Driver: i, Prefix: "iscsi", Framework: f, - }, func() {} + } } func (i *iSCSIDriver) CreateVolume(config *storageframework.PerTestConfig, volType storageframework.TestVolType) storageframework.TestVolume { @@ -503,12 +506,12 @@ func (r *rbdDriver) GetPersistentVolumeSource(readOnly bool, fsType string, e2ev return &pvSource, nil } -func (r *rbdDriver) PrepareTest(f *framework.Framework) (*storageframework.PerTestConfig, func()) { +func (r *rbdDriver) PrepareTest(f *framework.Framework) *storageframework.PerTestConfig { return &storageframework.PerTestConfig{ Driver: r, Prefix: "rbd", Framework: f, - }, func() {} + } } func (r *rbdDriver) CreateVolume(config *storageframework.PerTestConfig, volType storageframework.TestVolType) storageframework.TestVolume { @@ -618,12 +621,12 @@ func (c *cephFSDriver) GetPersistentVolumeSource(readOnly bool, fsType string, e }, nil } -func (c *cephFSDriver) PrepareTest(f *framework.Framework) (*storageframework.PerTestConfig, func()) { +func (c *cephFSDriver) PrepareTest(f *framework.Framework) *storageframework.PerTestConfig { return &storageframework.PerTestConfig{ Driver: c, Prefix: "cephfs", Framework: f, - }, func() {} + } } func (c *cephFSDriver) CreateVolume(config *storageframework.PerTestConfig, volType storageframework.TestVolType) storageframework.TestVolume { @@ -695,12 +698,12 @@ func (h *hostPathDriver) GetVolumeSource(readOnly bool, fsType string, e2evolume } } -func (h *hostPathDriver) PrepareTest(f *framework.Framework) (*storageframework.PerTestConfig, func()) { +func (h *hostPathDriver) PrepareTest(f *framework.Framework) *storageframework.PerTestConfig { return &storageframework.PerTestConfig{ Driver: h, Prefix: "hostpath", Framework: f, - }, func() {} + } } func (h *hostPathDriver) CreateVolume(config *storageframework.PerTestConfig, volType storageframework.TestVolType) storageframework.TestVolume { @@ -776,12 +779,12 @@ func (h *hostPathSymlinkDriver) GetVolumeSource(readOnly bool, fsType string, e2 } } -func (h *hostPathSymlinkDriver) PrepareTest(f *framework.Framework) (*storageframework.PerTestConfig, func()) { +func (h *hostPathSymlinkDriver) PrepareTest(f *framework.Framework) *storageframework.PerTestConfig { return &storageframework.PerTestConfig{ Driver: h, Prefix: "hostpathsymlink", Framework: f, - }, func() {} + } } func (h *hostPathSymlinkDriver) CreateVolume(config *storageframework.PerTestConfig, volType storageframework.TestVolType) storageframework.TestVolume { @@ -917,12 +920,12 @@ func (e *emptydirDriver) CreateVolume(config *storageframework.PerTestConfig, vo return nil } -func (e *emptydirDriver) PrepareTest(f *framework.Framework) (*storageframework.PerTestConfig, func()) { +func (e *emptydirDriver) PrepareTest(f *framework.Framework) *storageframework.PerTestConfig { return &storageframework.PerTestConfig{ Driver: e, Prefix: "emptydir", Framework: f, - }, func() {} + } } // GCE @@ -1070,7 +1073,7 @@ func (g *gcePdDriver) GetDynamicProvisionStorageClass(config *storageframework.P return storageframework.GetStorageClass(provisioner, parameters, &delayedBinding, ns) } -func (g *gcePdDriver) PrepareTest(f *framework.Framework) (*storageframework.PerTestConfig, func()) { +func (g *gcePdDriver) PrepareTest(f *framework.Framework) *storageframework.PerTestConfig { config := &storageframework.PerTestConfig{ Driver: g, Prefix: "gcepd", @@ -1084,7 +1087,7 @@ func (g *gcePdDriver) PrepareTest(f *framework.Framework) (*storageframework.Per }, } } - return config, func() {} + return config } @@ -1218,21 +1221,22 @@ func (v *vSphereDriver) GetDynamicProvisionStorageClass(config *storageframework return storageframework.GetStorageClass(provisioner, parameters, nil, ns) } -func (v *vSphereDriver) PrepareTest(f *framework.Framework) (*storageframework.PerTestConfig, func()) { - return &storageframework.PerTestConfig{ - Driver: v, - Prefix: "vsphere", - Framework: f, - }, func() { - // Driver Cleanup function - // Logout each vSphere client connection to prevent session leakage - nodes := vspheretest.GetReadySchedulableNodeInfos() - for _, node := range nodes { - if node.VSphere.Client != nil { - node.VSphere.Client.Logout(context.TODO()) - } +func (v *vSphereDriver) PrepareTest(f *framework.Framework) *storageframework.PerTestConfig { + ginkgo.DeferCleanup(func() { + // Driver Cleanup function + // Logout each vSphere client connection to prevent session leakage + nodes := vspheretest.GetReadySchedulableNodeInfos() + for _, node := range nodes { + if node.VSphere.Client != nil { + node.VSphere.Client.Logout(context.TODO()) } } + }) + return &storageframework.PerTestConfig{ + Driver: v, + Prefix: "vsphere", + Framework: f, + } } func (v *vSphereDriver) CreateVolume(config *storageframework.PerTestConfig, volType storageframework.TestVolType) storageframework.TestVolume { @@ -1364,12 +1368,12 @@ func (a *azureDiskDriver) GetDynamicProvisionStorageClass(config *storageframewo return storageframework.GetStorageClass(provisioner, parameters, &delayedBinding, ns) } -func (a *azureDiskDriver) PrepareTest(f *framework.Framework) (*storageframework.PerTestConfig, func()) { +func (a *azureDiskDriver) PrepareTest(f *framework.Framework) *storageframework.PerTestConfig { return &storageframework.PerTestConfig{ Driver: a, Prefix: "azure", Framework: f, - }, func() {} + } } func (a *azureDiskDriver) CreateVolume(config *storageframework.PerTestConfig, volType storageframework.TestVolType) storageframework.TestVolume { @@ -1503,7 +1507,7 @@ func (a *awsDriver) GetDynamicProvisionStorageClass(config *storageframework.Per return storageframework.GetStorageClass(provisioner, parameters, &delayedBinding, ns) } -func (a *awsDriver) PrepareTest(f *framework.Framework) (*storageframework.PerTestConfig, func()) { +func (a *awsDriver) PrepareTest(f *framework.Framework) *storageframework.PerTestConfig { config := &storageframework.PerTestConfig{ Driver: a, Prefix: "aws", @@ -1517,7 +1521,7 @@ func (a *awsDriver) PrepareTest(f *framework.Framework) (*storageframework.PerTe }, } } - return config, func() {} + return config } func (a *awsDriver) CreateVolume(config *storageframework.PerTestConfig, volType storageframework.TestVolType) storageframework.TestVolume { @@ -1641,7 +1645,7 @@ func (l *localDriver) GetDriverInfo() *storageframework.DriverInfo { func (l *localDriver) SkipUnsupportedTest(pattern storageframework.TestPattern) { } -func (l *localDriver) PrepareTest(f *framework.Framework) (*storageframework.PerTestConfig, func()) { +func (l *localDriver) PrepareTest(f *framework.Framework) *storageframework.PerTestConfig { var err error l.node, err = e2enode.GetRandomReadySchedulableNode(f.ClientSet) framework.ExpectNoError(err) @@ -1663,14 +1667,13 @@ func (l *localDriver) PrepareTest(f *framework.Framework) (*storageframework.Per } } + ginkgo.DeferCleanup(l.hostExec.Cleanup) return &storageframework.PerTestConfig{ - Driver: l, - Prefix: "local", - Framework: f, - ClientNodeSelection: e2epod.NodeSelection{Name: l.node.Name}, - }, func() { - l.hostExec.Cleanup() - } + Driver: l, + Prefix: "local", + Framework: f, + ClientNodeSelection: e2epod.NodeSelection{Name: l.node.Name}, + } } func (l *localDriver) CreateVolume(config *storageframework.PerTestConfig, volType storageframework.TestVolType) storageframework.TestVolume { @@ -1862,12 +1865,12 @@ func (a *azureFileDriver) GetDynamicProvisionStorageClass(config *storageframewo return storageframework.GetStorageClass(provisioner, parameters, &immediateBinding, ns) } -func (a *azureFileDriver) PrepareTest(f *framework.Framework) (*storageframework.PerTestConfig, func()) { +func (a *azureFileDriver) PrepareTest(f *framework.Framework) *storageframework.PerTestConfig { return &storageframework.PerTestConfig{ Driver: a, Prefix: "azure-file", Framework: f, - }, func() {} + } } func (a *azureFileDriver) CreateVolume(config *storageframework.PerTestConfig, volType storageframework.TestVolType) storageframework.TestVolume { diff --git a/test/e2e/storage/external/external.go b/test/e2e/storage/external/external.go index 76305946b5a..1ae31c63ce7 100644 --- a/test/e2e/storage/external/external.go +++ b/test/e2e/storage/external/external.go @@ -415,12 +415,12 @@ func (d *driverDefinition) GetCSIDriverName(e2econfig *storageframework.PerTestC return d.DriverInfo.Name } -func (d *driverDefinition) PrepareTest(f *framework.Framework) (*storageframework.PerTestConfig, func()) { +func (d *driverDefinition) PrepareTest(f *framework.Framework) *storageframework.PerTestConfig { e2econfig := &storageframework.PerTestConfig{ Driver: d, Prefix: "external", Framework: f, ClientNodeSelection: e2epod.NodeSelection{Name: d.ClientNodeName}, } - return e2econfig, func() {} + return e2econfig } diff --git a/test/e2e/storage/framework/testdriver.go b/test/e2e/storage/framework/testdriver.go index 87d0d4e2316..5040071fd0c 100644 --- a/test/e2e/storage/framework/testdriver.go +++ b/test/e2e/storage/framework/testdriver.go @@ -47,9 +47,9 @@ type TestDriver interface { SkipUnsupportedTest(TestPattern) // PrepareTest is called at test execution time each time a new test case is about to start. - // It sets up all necessary resources and returns the per-test configuration - // plus a cleanup function that frees all allocated resources. - PrepareTest(f *framework.Framework) (*PerTestConfig, func()) + // It sets up all necessary resources and returns the per-test configuration. + // Cleanup is handled via ginkgo.DeferCleanup inside PrepareTest. + PrepareTest(f *framework.Framework) *PerTestConfig } // TestVolume is the result of PreprovisionedVolumeTestDriver.CreateVolume. diff --git a/test/e2e/storage/mounted_volume_resize.go b/test/e2e/storage/mounted_volume_resize.go index 46b2913d06a..f837d98723a 100644 --- a/test/e2e/storage/mounted_volume_resize.go +++ b/test/e2e/storage/mounted_volume_resize.go @@ -50,7 +50,6 @@ var _ = utils.SIGDescribe("Mounted volume expand [Feature:StorageProvider]", fun ns string pvc *v1.PersistentVolumeClaim sc *storagev1.StorageClass - cleanStorageClass func() nodeName string nodeKeyValueLabel map[string]string nodeLabelValue string @@ -59,7 +58,7 @@ var _ = utils.SIGDescribe("Mounted volume expand [Feature:StorageProvider]", fun f := framework.NewDefaultFramework("mounted-volume-expand") f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged - ginkgo.BeforeEach(func() { + ginkgo.BeforeEach(func(ctx context.Context) { e2eskipper.SkipUnlessProviderIs("aws", "gce") c = f.ClientSet ns = f.Namespace.Name @@ -84,11 +83,10 @@ var _ = utils.SIGDescribe("Mounted volume expand [Feature:StorageProvider]", fun Parameters: make(map[string]string), } - sc, cleanStorageClass = testsuites.SetupStorageClass(c, newStorageClass(test, ns, "resizing")) + sc = testsuites.SetupStorageClass(ctx, c, newStorageClass(test, ns, "resizing")) if !*sc.AllowVolumeExpansion { framework.Failf("Class %s does not allow volume expansion", sc.Name) } - ginkgo.DeferCleanup(cleanStorageClass) pvc = e2epv.MakePersistentVolumeClaim(e2epv.PersistentVolumeClaimConfig{ ClaimSize: test.ClaimSize, diff --git a/test/e2e/storage/non_graceful_node_shutdown.go b/test/e2e/storage/non_graceful_node_shutdown.go index c9dbb6a0889..d6832b9774e 100644 --- a/test/e2e/storage/non_graceful_node_shutdown.go +++ b/test/e2e/storage/non_graceful_node_shutdown.go @@ -84,12 +84,11 @@ var _ = utils.SIGDescribe("[Feature:NodeOutOfServiceVolumeDetach] [Disruptive] [ // Install gce pd csi driver ginkgo.By("deploying csi gce-pd driver") driver := drivers.InitGcePDCSIDriver() - config, cleanup := driver.PrepareTest(f) + config := driver.PrepareTest(f) dDriver, ok := driver.(storageframework.DynamicPVTestDriver) if !ok { e2eskipper.Skipf("csi driver expected DynamicPVTestDriver but got %v", driver) } - defer cleanup() ginkgo.By("Creating a gce-pd storage class") sc := dDriver.GetDynamicProvisionStorageClass(config, "") _, err := c.StorageV1().StorageClasses().Create(context.TODO(), sc, metav1.CreateOptions{}) diff --git a/test/e2e/storage/pvc_storageclass.go b/test/e2e/storage/pvc_storageclass.go index 65c3f0dd32e..a611b9e645a 100644 --- a/test/e2e/storage/pvc_storageclass.go +++ b/test/e2e/storage/pvc_storageclass.go @@ -19,6 +19,8 @@ package storage import ( "context" "fmt" + "time" + "github.com/onsi/ginkgo/v2" v1 "k8s.io/api/core/v1" storagev1 "k8s.io/api/storage/v1" @@ -31,7 +33,6 @@ import ( "k8s.io/kubernetes/test/e2e/storage/testsuites" "k8s.io/kubernetes/test/e2e/storage/utils" admissionapi "k8s.io/pod-security-admission/api" - "time" ) var _ = utils.SIGDescribe("Persistent Volume Claim and StorageClass", func() { @@ -60,7 +61,7 @@ var _ = utils.SIGDescribe("Persistent Volume Claim and StorageClass", func() { }) ginkgo.Describe("Retroactive StorageClass assignment [Serial][Disruptive][Feature:RetroactiveDefaultStorageClass]", func() { - ginkgo.It("should assign default SC to PVCs that have no SC set", func() { + ginkgo.It("should assign default SC to PVCs that have no SC set", func(ctx context.Context) { // Temporarily set all default storage classes as non-default restoreClasses := temporarilyUnsetDefaultClasses(client) @@ -81,8 +82,7 @@ var _ = utils.SIGDescribe("Persistent Volume Claim and StorageClass", func() { }(pvc) // Create custom default SC - storageClass, clearStorageClass := testsuites.SetupStorageClass(client, makeStorageClass(prefixSC)) - defer clearStorageClass() + storageClass := testsuites.SetupStorageClass(ctx, client, makeStorageClass(prefixSC)) // Wait for PVC to get updated with the new default SC pvc, err = waitForPVCStorageClass(client, namespace, pvc.Name, storageClass.Name, f.Timeouts.ClaimBound) diff --git a/test/e2e/storage/regional_pd.go b/test/e2e/storage/regional_pd.go index 3adb6401fe8..29623d99aba 100644 --- a/test/e2e/storage/regional_pd.go +++ b/test/e2e/storage/regional_pd.go @@ -76,22 +76,22 @@ var _ = utils.SIGDescribe("Regional PD", func() { }) ginkgo.Describe("RegionalPD", func() { - ginkgo.It("should provision storage [Slow]", func() { - testVolumeProvisioning(c, f.Timeouts, ns) + ginkgo.It("should provision storage [Slow]", func(ctx context.Context) { + testVolumeProvisioning(ctx, c, f.Timeouts, ns) }) - ginkgo.It("should provision storage with delayed binding [Slow]", func() { - testRegionalDelayedBinding(c, ns, 1 /* pvcCount */) - testRegionalDelayedBinding(c, ns, 3 /* pvcCount */) + ginkgo.It("should provision storage with delayed binding [Slow]", func(ctx context.Context) { + testRegionalDelayedBinding(ctx, c, ns, 1 /* pvcCount */) + testRegionalDelayedBinding(ctx, c, ns, 3 /* pvcCount */) }) - ginkgo.It("should provision storage in the allowedTopologies [Slow]", func() { - testRegionalAllowedTopologies(c, ns) + ginkgo.It("should provision storage in the allowedTopologies [Slow]", func(ctx context.Context) { + testRegionalAllowedTopologies(ctx, c, ns) }) - ginkgo.It("should provision storage in the allowedTopologies with delayed binding [Slow]", func() { - testRegionalAllowedTopologiesWithDelayedBinding(c, ns, 1 /* pvcCount */) - testRegionalAllowedTopologiesWithDelayedBinding(c, ns, 3 /* pvcCount */) + ginkgo.It("should provision storage in the allowedTopologies with delayed binding [Slow]", func(ctx context.Context) { + testRegionalAllowedTopologiesWithDelayedBinding(ctx, c, ns, 1 /* pvcCount */) + testRegionalAllowedTopologiesWithDelayedBinding(ctx, c, ns, 3 /* pvcCount */) }) ginkgo.It("should failover to a different zone when all nodes in one zone become unreachable [Slow] [Disruptive]", func() { @@ -100,7 +100,7 @@ var _ = utils.SIGDescribe("Regional PD", func() { }) }) -func testVolumeProvisioning(c clientset.Interface, t *framework.TimeoutContext, ns string) { +func testVolumeProvisioning(ctx context.Context, c clientset.Interface, t *framework.TimeoutContext, ns string) { cloudZones := getTwoRandomZones(c) // This test checks that dynamic provisioning can provision a volume @@ -119,7 +119,7 @@ func testVolumeProvisioning(c clientset.Interface, t *framework.TimeoutContext, ClaimSize: repdMinSize, ExpectedSize: repdMinSize, PvCheck: func(claim *v1.PersistentVolumeClaim) { - volume := testsuites.PVWriteReadSingleNodeCheck(c, t, claim, e2epod.NodeSelection{}) + volume := testsuites.PVWriteReadSingleNodeCheck(ctx, c, t, claim, e2epod.NodeSelection{}) gomega.Expect(volume).NotTo(gomega.BeNil()) err := checkGCEPD(volume, "pd-standard") @@ -141,7 +141,7 @@ func testVolumeProvisioning(c clientset.Interface, t *framework.TimeoutContext, ClaimSize: repdMinSize, ExpectedSize: repdMinSize, PvCheck: func(claim *v1.PersistentVolumeClaim) { - volume := testsuites.PVWriteReadSingleNodeCheck(c, t, claim, e2epod.NodeSelection{}) + volume := testsuites.PVWriteReadSingleNodeCheck(ctx, c, t, claim, e2epod.NodeSelection{}) gomega.Expect(volume).NotTo(gomega.BeNil()) err := checkGCEPD(volume, "pd-standard") @@ -156,8 +156,7 @@ func testVolumeProvisioning(c clientset.Interface, t *framework.TimeoutContext, for _, test := range tests { test.Client = c - computedStorageClass, clearStorageClass := testsuites.SetupStorageClass(test.Client, newStorageClass(test, ns, "" /* suffix */)) - defer clearStorageClass() + computedStorageClass := testsuites.SetupStorageClass(ctx, test.Client, newStorageClass(test, ns, "" /* suffix */)) test.Class = computedStorageClass test.Claim = e2epv.MakePersistentVolumeClaim(e2epv.PersistentVolumeClaimConfig{ ClaimSize: test.ClaimSize, @@ -165,7 +164,7 @@ func testVolumeProvisioning(c clientset.Interface, t *framework.TimeoutContext, VolumeMode: &test.VolumeMode, }, ns) - test.TestDynamicProvisioning() + test.TestDynamicProvisioning(ctx) } } @@ -334,7 +333,7 @@ func addTaint(c clientset.Interface, ns string, nodes []v1.Node, podZone string) } } -func testRegionalDelayedBinding(c clientset.Interface, ns string, pvcCount int) { +func testRegionalDelayedBinding(ctx context.Context, c clientset.Interface, ns string, pvcCount int) { test := testsuites.StorageClassTest{ Client: c, Name: "Regional PD storage class with waitForFirstConsumer test on GCE", @@ -350,9 +349,7 @@ func testRegionalDelayedBinding(c clientset.Interface, ns string, pvcCount int) suffix := "delayed-regional" - computedStorageClass, clearStorageClass := testsuites.SetupStorageClass(test.Client, newStorageClass(test, ns, suffix)) - defer clearStorageClass() - test.Class = computedStorageClass + test.Class = testsuites.SetupStorageClass(ctx, test.Client, newStorageClass(test, ns, suffix)) var claims []*v1.PersistentVolumeClaim for i := 0; i < pvcCount; i++ { claim := e2epv.MakePersistentVolumeClaim(e2epv.PersistentVolumeClaimConfig{ @@ -362,7 +359,7 @@ func testRegionalDelayedBinding(c clientset.Interface, ns string, pvcCount int) }, ns) claims = append(claims, claim) } - pvs, node := test.TestBindingWaitForFirstConsumerMultiPVC(claims, nil /* node selector */, false /* expect unschedulable */) + pvs, node := test.TestBindingWaitForFirstConsumerMultiPVC(ctx, claims, nil /* node selector */, false /* expect unschedulable */) if node == nil { framework.Failf("unexpected nil node found") } @@ -375,7 +372,7 @@ func testRegionalDelayedBinding(c clientset.Interface, ns string, pvcCount int) } } -func testRegionalAllowedTopologies(c clientset.Interface, ns string) { +func testRegionalAllowedTopologies(ctx context.Context, c clientset.Interface, ns string) { test := testsuites.StorageClassTest{ Name: "Regional PD storage class with allowedTopologies test on GCE", Provisioner: "kubernetes.io/gce-pd", @@ -390,9 +387,7 @@ func testRegionalAllowedTopologies(c clientset.Interface, ns string) { suffix := "topo-regional" test.Client = c - computedStorageClass, clearStorageClass := testsuites.SetupStorageClass(test.Client, newStorageClass(test, ns, suffix)) - defer clearStorageClass() - test.Class = computedStorageClass + test.Class = testsuites.SetupStorageClass(ctx, test.Client, newStorageClass(test, ns, suffix)) zones := getTwoRandomZones(c) addAllowedTopologiesToStorageClass(c, test.Class, zones) test.Claim = e2epv.MakePersistentVolumeClaim(e2epv.PersistentVolumeClaimConfig{ @@ -402,11 +397,11 @@ func testRegionalAllowedTopologies(c clientset.Interface, ns string) { VolumeMode: &test.VolumeMode, }, ns) - pv := test.TestDynamicProvisioning() + pv := test.TestDynamicProvisioning(ctx) checkZonesFromLabelAndAffinity(pv, sets.NewString(zones...), true) } -func testRegionalAllowedTopologiesWithDelayedBinding(c clientset.Interface, ns string, pvcCount int) { +func testRegionalAllowedTopologiesWithDelayedBinding(ctx context.Context, c clientset.Interface, ns string, pvcCount int) { test := testsuites.StorageClassTest{ Client: c, Timeouts: framework.NewTimeoutContextWithDefaults(), @@ -421,9 +416,7 @@ func testRegionalAllowedTopologiesWithDelayedBinding(c clientset.Interface, ns s } suffix := "topo-delayed-regional" - computedStorageClass, clearStorageClass := testsuites.SetupStorageClass(test.Client, newStorageClass(test, ns, suffix)) - defer clearStorageClass() - test.Class = computedStorageClass + test.Class = testsuites.SetupStorageClass(ctx, test.Client, newStorageClass(test, ns, suffix)) topoZones := getTwoRandomZones(c) addAllowedTopologiesToStorageClass(c, test.Class, topoZones) var claims []*v1.PersistentVolumeClaim @@ -435,7 +428,7 @@ func testRegionalAllowedTopologiesWithDelayedBinding(c clientset.Interface, ns s }, ns) claims = append(claims, claim) } - pvs, node := test.TestBindingWaitForFirstConsumerMultiPVC(claims, nil /* node selector */, false /* expect unschedulable */) + pvs, node := test.TestBindingWaitForFirstConsumerMultiPVC(ctx, claims, nil /* node selector */, false /* expect unschedulable */) if node == nil { framework.Failf("unexpected nil node found") } diff --git a/test/e2e/storage/testsuites/capacity.go b/test/e2e/storage/testsuites/capacity.go index 1c36dfb389c..70fb4449460 100644 --- a/test/e2e/storage/testsuites/capacity.go +++ b/test/e2e/storage/testsuites/capacity.go @@ -33,7 +33,6 @@ import ( e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" e2evolume "k8s.io/kubernetes/test/e2e/framework/volume" storageframework "k8s.io/kubernetes/test/e2e/storage/framework" - storageutils "k8s.io/kubernetes/test/e2e/storage/utils" admissionapi "k8s.io/pod-security-admission/api" ) @@ -81,10 +80,9 @@ func (p *capacityTestSuite) SkipUnsupportedTests(driver storageframework.TestDri func (p *capacityTestSuite) DefineTests(driver storageframework.TestDriver, pattern storageframework.TestPattern) { var ( - dInfo = driver.GetDriverInfo() - dDriver storageframework.DynamicPVTestDriver - driverCleanup func() - sc *storagev1.StorageClass + dInfo = driver.GetDriverInfo() + dDriver storageframework.DynamicPVTestDriver + sc *storagev1.StorageClass ) // Beware that it also registers an AfterEach which renders f unusable. Any code using @@ -95,23 +93,15 @@ func (p *capacityTestSuite) DefineTests(driver storageframework.TestDriver, patt init := func() { dDriver, _ = driver.(storageframework.DynamicPVTestDriver) // Now do the more expensive test initialization. - config, cleanup := driver.PrepareTest(f) - driverCleanup = cleanup + config := driver.PrepareTest(f) sc = dDriver.GetDynamicProvisionStorageClass(config, pattern.FsType) if sc == nil { e2eskipper.Skipf("Driver %q does not define Dynamic Provision StorageClass - skipping", dInfo.Name) } } - cleanup := func() { - err := storageutils.TryFunc(driverCleanup) - driverCleanup = nil - framework.ExpectNoError(err, "while cleaning up driver") - } - - ginkgo.It("provides storage capacity information", func() { + ginkgo.It("provides storage capacity information", func(ctx context.Context) { init() - defer cleanup() timeout := time.Minute pollInterval := time.Second @@ -141,12 +131,12 @@ func (p *capacityTestSuite) DefineTests(driver storageframework.TestDriver, patt } // Create storage class and wait for capacity information. - _, clearProvisionedStorageClass := SetupStorageClass(f.ClientSet, sc) - defer clearProvisionedStorageClass() + sc := SetupStorageClass(ctx, f.ClientSet, sc) listAll.Should(MatchCapacities(matcher), "after creating storage class") // Delete storage class again and wait for removal of storage capacity information. - clearProvisionedStorageClass() + err := f.ClientSet.StorageV1().StorageClasses().Delete(ctx, sc.Name, metav1.DeleteOptions{}) + framework.ExpectNoError(err, "delete storage class") listAll.ShouldNot(MatchCapacities(matchSC), "after deleting storage class") }) } diff --git a/test/e2e/storage/testsuites/disruptive.go b/test/e2e/storage/testsuites/disruptive.go index 16e2707b7c1..f9574cf7f45 100644 --- a/test/e2e/storage/testsuites/disruptive.go +++ b/test/e2e/storage/testsuites/disruptive.go @@ -74,8 +74,7 @@ func (s *disruptiveTestSuite) SkipUnsupportedTests(driver storageframework.TestD func (s *disruptiveTestSuite) DefineTests(driver storageframework.TestDriver, pattern storageframework.TestPattern) { type local struct { - config *storageframework.PerTestConfig - driverCleanup func() + config *storageframework.PerTestConfig cs clientset.Interface ns *v1.Namespace @@ -97,7 +96,7 @@ func (s *disruptiveTestSuite) DefineTests(driver storageframework.TestDriver, pa l.cs = f.ClientSet // Now do the more expensive test initialization. - l.config, l.driverCleanup = driver.PrepareTest(f) + l.config = driver.PrepareTest(f) testVolumeSizeRange := s.GetTestSuiteInfo().SupportedSizeRange l.resource = storageframework.CreateVolumeResource(driver, l.config, pattern, testVolumeSizeRange) @@ -118,8 +117,6 @@ func (s *disruptiveTestSuite) DefineTests(driver storageframework.TestDriver, pa l.resource = nil } - errs = append(errs, storageutils.TryFunc(l.driverCleanup)) - l.driverCleanup = nil framework.ExpectNoError(errors.NewAggregate(errs), "while cleaning up resource") } diff --git a/test/e2e/storage/testsuites/ephemeral.go b/test/e2e/storage/testsuites/ephemeral.go index 817c41a49e2..20e633062c8 100644 --- a/test/e2e/storage/testsuites/ephemeral.go +++ b/test/e2e/storage/testsuites/ephemeral.go @@ -35,7 +35,6 @@ import ( e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" e2evolume "k8s.io/kubernetes/test/e2e/framework/volume" storageframework "k8s.io/kubernetes/test/e2e/storage/framework" - storageutils "k8s.io/kubernetes/test/e2e/storage/utils" admissionapi "k8s.io/pod-security-admission/api" ) @@ -104,8 +103,7 @@ func (p *ephemeralTestSuite) SkipUnsupportedTests(driver storageframework.TestDr func (p *ephemeralTestSuite) DefineTests(driver storageframework.TestDriver, pattern storageframework.TestPattern) { type local struct { - config *storageframework.PerTestConfig - driverCleanup func() + config *storageframework.PerTestConfig testCase *EphemeralTest resource *storageframework.VolumeResource @@ -120,7 +118,7 @@ func (p *ephemeralTestSuite) DefineTests(driver storageframework.TestDriver, pat f := framework.NewFrameworkWithCustomTimeouts("ephemeral", storageframework.GetDriverTimeouts(driver)) f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged - init := func() { + init := func(ctx context.Context) { if pattern.VolType == storageframework.CSIInlineVolume { eDriver, _ = driver.(storageframework.EphemeralTestDriver) } @@ -128,7 +126,7 @@ func (p *ephemeralTestSuite) DefineTests(driver storageframework.TestDriver, pat // The GenericEphemeralVolume feature is GA, but // perhaps this test is run against an older Kubernetes // where the feature might be disabled. - enabled, err := GenericEphemeralVolumesEnabled(f.ClientSet, f.Timeouts, f.Namespace.Name) + enabled, err := GenericEphemeralVolumesEnabled(ctx, f.ClientSet, f.Timeouts, f.Namespace.Name) framework.ExpectNoError(err, "check GenericEphemeralVolume feature") if !enabled { e2eskipper.Skipf("Cluster doesn't support %q volumes -- skipping", pattern.VolType) @@ -138,7 +136,7 @@ func (p *ephemeralTestSuite) DefineTests(driver storageframework.TestDriver, pat l = local{} // Now do the more expensive test initialization. - l.config, l.driverCleanup = driver.PrepareTest(f) + l.config = driver.PrepareTest(f) l.resource = storageframework.CreateVolumeResource(driver, l.config, pattern, e2evolume.SizeRange{}) switch pattern.VolType { @@ -167,17 +165,16 @@ func (p *ephemeralTestSuite) DefineTests(driver storageframework.TestDriver, pat cleanup := func() { var cleanUpErrs []error cleanUpErrs = append(cleanUpErrs, l.resource.CleanupResource()) - cleanUpErrs = append(cleanUpErrs, storageutils.TryFunc(l.driverCleanup)) err := utilerrors.NewAggregate(cleanUpErrs) framework.ExpectNoError(err, "while cleaning up") } - ginkgo.It("should create read-only inline ephemeral volume", func() { + ginkgo.It("should create read-only inline ephemeral volume", func(ctx context.Context) { if pattern.VolMode == v1.PersistentVolumeBlock { e2eskipper.Skipf("raw block volumes cannot be read-only") } - init() + init(ctx) defer cleanup() l.testCase.ReadOnly = true @@ -190,11 +187,11 @@ func (p *ephemeralTestSuite) DefineTests(driver storageframework.TestDriver, pat e2evolume.VerifyExecInPodSucceed(f, pod, command) return nil } - l.testCase.TestEphemeral() + l.testCase.TestEphemeral(ctx) }) - ginkgo.It("should create read/write inline ephemeral volume", func() { - init() + ginkgo.It("should create read/write inline ephemeral volume", func(ctx context.Context) { + init(ctx) defer cleanup() l.testCase.ReadOnly = false @@ -210,15 +207,15 @@ func (p *ephemeralTestSuite) DefineTests(driver storageframework.TestDriver, pat e2evolume.VerifyExecInPodSucceed(f, pod, command) return nil } - l.testCase.TestEphemeral() + l.testCase.TestEphemeral(ctx) }) - ginkgo.It("should support expansion of pvcs created for ephemeral pvcs", func() { + ginkgo.It("should support expansion of pvcs created for ephemeral pvcs", func(ctx context.Context) { if pattern.VolType != storageframework.GenericEphemeralVolume { e2eskipper.Skipf("Skipping %s test for expansion", pattern.VolType) } - init() + init(ctx) defer cleanup() if !driver.GetDriverInfo().Capabilities[storageframework.CapOnlineExpansion] { @@ -239,7 +236,7 @@ func (p *ephemeralTestSuite) DefineTests(driver storageframework.TestDriver, pat } } pvcName := fmt.Sprintf("%s-%s", podName, outerPodVolumeSpecName) - pvc, err := f.ClientSet.CoreV1().PersistentVolumeClaims(pod.Namespace).Get(context.TODO(), pvcName, metav1.GetOptions{}) + pvc, err := f.ClientSet.CoreV1().PersistentVolumeClaims(pod.Namespace).Get(ctx, pvcName, metav1.GetOptions{}) framework.ExpectNoError(err, "error getting ephemeral pvc") ginkgo.By("Expanding current pvc") @@ -270,12 +267,12 @@ func (p *ephemeralTestSuite) DefineTests(driver storageframework.TestDriver, pat framework.ExpectEqual(len(pvcConditions), 0, "pvc should not have conditions") return nil } - l.testCase.TestEphemeral() + l.testCase.TestEphemeral(ctx) }) - ginkgo.It("should support two pods which have the same volume definition", func() { - init() + ginkgo.It("should support two pods which have the same volume definition", func(ctx context.Context) { + init(ctx) defer cleanup() // We test in read-only mode if that is all that the driver supports, @@ -288,7 +285,7 @@ func (p *ephemeralTestSuite) DefineTests(driver storageframework.TestDriver, pat l.testCase.RunningPodCheck = func(pod *v1.Pod) interface{} { // Create another pod with the same inline volume attributes. - pod2 := StartInPodWithInlineVolume(f.ClientSet, f.Namespace.Name, "inline-volume-tester2", "sleep 100000", + pod2 := StartInPodWithInlineVolume(ctx, f.ClientSet, f.Namespace.Name, "inline-volume-tester2", "sleep 100000", []v1.VolumeSource{pod.Spec.Volumes[0].VolumeSource}, readOnly, l.testCase.Node) @@ -305,24 +302,24 @@ func (p *ephemeralTestSuite) DefineTests(driver storageframework.TestDriver, pat e2evolume.VerifyExecInPodSucceed(f, pod2, "[ ! -f /mnt/test-0/hello-world ]") } - defer StopPodAndDependents(f.ClientSet, f.Timeouts, pod2) + defer StopPodAndDependents(ctx, f.ClientSet, f.Timeouts, pod2) return nil } - l.testCase.TestEphemeral() + l.testCase.TestEphemeral(ctx) }) - ginkgo.It("should support multiple inline ephemeral volumes", func() { + ginkgo.It("should support multiple inline ephemeral volumes", func(ctx context.Context) { if pattern.BindingMode == storagev1.VolumeBindingImmediate && pattern.VolType == storageframework.GenericEphemeralVolume { e2eskipper.Skipf("Multiple generic ephemeral volumes with immediate binding may cause pod startup failures when the volumes get created in separate topology segments.") } - init() + init(ctx) defer cleanup() l.testCase.NumInlineVolumes = 2 - l.testCase.TestEphemeral() + l.testCase.TestEphemeral(ctx) }) } @@ -371,7 +368,7 @@ type EphemeralTest struct { } // TestEphemeral tests pod creation with one ephemeral volume. -func (t EphemeralTest) TestEphemeral() { +func (t EphemeralTest) TestEphemeral(ctx context.Context) { client := t.Client gomega.Expect(client).NotTo(gomega.BeNil(), "EphemeralTest.Client is required") @@ -404,13 +401,13 @@ func (t EphemeralTest) TestEphemeral() { } volumes = append(volumes, volume) } - pod := StartInPodWithInlineVolume(client, t.Namespace, "inline-volume-tester", command, volumes, t.ReadOnly, t.Node) + pod := StartInPodWithInlineVolume(ctx, client, t.Namespace, "inline-volume-tester", command, volumes, t.ReadOnly, t.Node) defer func() { // pod might be nil now. - StopPodAndDependents(client, t.Timeouts, pod) + StopPodAndDependents(ctx, client, t.Timeouts, pod) }() framework.ExpectNoError(e2epod.WaitTimeoutForPodRunningInNamespace(client, pod.Name, pod.Namespace, t.Timeouts.PodStartSlow), "waiting for pod with inline volume") - runningPod, err := client.CoreV1().Pods(pod.Namespace).Get(context.TODO(), pod.Name, metav1.GetOptions{}) + runningPod, err := client.CoreV1().Pods(pod.Namespace).Get(ctx, pod.Name, metav1.GetOptions{}) framework.ExpectNoError(err, "get pod") actualNodeName := runningPod.Spec.NodeName @@ -420,12 +417,12 @@ func (t EphemeralTest) TestEphemeral() { runningPodData = t.RunningPodCheck(pod) } - StopPodAndDependents(client, t.Timeouts, pod) + StopPodAndDependents(ctx, client, t.Timeouts, pod) pod = nil // Don't stop twice. // There should be no dangling PVCs in the namespace now. There might be for // generic ephemeral volumes, if something went wrong... - pvcs, err := client.CoreV1().PersistentVolumeClaims(t.Namespace).List(context.TODO(), metav1.ListOptions{}) + pvcs, err := client.CoreV1().PersistentVolumeClaims(t.Namespace).List(ctx, metav1.ListOptions{}) framework.ExpectNoError(err, "list PVCs") gomega.Expect(pvcs.Items).Should(gomega.BeEmpty(), "no dangling PVCs") @@ -436,7 +433,7 @@ func (t EphemeralTest) TestEphemeral() { // StartInPodWithInlineVolume starts a command in a pod with given volume(s) mounted to /mnt/test- directory. // The caller is responsible for checking the pod and deleting it. -func StartInPodWithInlineVolume(c clientset.Interface, ns, podName, command string, volumes []v1.VolumeSource, readOnly bool, node e2epod.NodeSelection) *v1.Pod { +func StartInPodWithInlineVolume(ctx context.Context, c clientset.Interface, ns, podName, command string, volumes []v1.VolumeSource, readOnly bool, node e2epod.NodeSelection) *v1.Pod { pod := &v1.Pod{ TypeMeta: metav1.TypeMeta{ Kind: "Pod", @@ -487,15 +484,15 @@ func StartInPodWithInlineVolume(c clientset.Interface, ns, podName, command stri }) } - pod, err := c.CoreV1().Pods(ns).Create(context.TODO(), pod, metav1.CreateOptions{}) + pod, err := c.CoreV1().Pods(ns).Create(ctx, pod, metav1.CreateOptions{}) framework.ExpectNoError(err, "failed to create pod") return pod } // CSIInlineVolumesEnabled checks whether the running cluster has the CSIInlineVolumes feature gate enabled. // It does that by trying to create a pod that uses that feature. -func CSIInlineVolumesEnabled(c clientset.Interface, t *framework.TimeoutContext, ns string) (bool, error) { - return VolumeSourceEnabled(c, t, ns, v1.VolumeSource{ +func CSIInlineVolumesEnabled(ctx context.Context, c clientset.Interface, t *framework.TimeoutContext, ns string) (bool, error) { + return VolumeSourceEnabled(ctx, c, t, ns, v1.VolumeSource{ CSI: &v1.CSIVolumeSource{ Driver: "no-such-driver.example.com", }, @@ -504,9 +501,9 @@ func CSIInlineVolumesEnabled(c clientset.Interface, t *framework.TimeoutContext, // GenericEphemeralVolumesEnabled checks whether the running cluster has the GenericEphemeralVolume feature gate enabled. // It does that by trying to create a pod that uses that feature. -func GenericEphemeralVolumesEnabled(c clientset.Interface, t *framework.TimeoutContext, ns string) (bool, error) { +func GenericEphemeralVolumesEnabled(ctx context.Context, c clientset.Interface, t *framework.TimeoutContext, ns string) (bool, error) { storageClassName := "no-such-storage-class" - return VolumeSourceEnabled(c, t, ns, v1.VolumeSource{ + return VolumeSourceEnabled(ctx, c, t, ns, v1.VolumeSource{ Ephemeral: &v1.EphemeralVolumeSource{ VolumeClaimTemplate: &v1.PersistentVolumeClaimTemplate{ Spec: v1.PersistentVolumeClaimSpec{ @@ -525,7 +522,7 @@ func GenericEphemeralVolumesEnabled(c clientset.Interface, t *framework.TimeoutC // VolumeSourceEnabled checks whether a certain kind of volume source is enabled by trying // to create a pod that uses it. -func VolumeSourceEnabled(c clientset.Interface, t *framework.TimeoutContext, ns string, volume v1.VolumeSource) (bool, error) { +func VolumeSourceEnabled(ctx context.Context, c clientset.Interface, t *framework.TimeoutContext, ns string, volume v1.VolumeSource) (bool, error) { pod := &v1.Pod{ TypeMeta: metav1.TypeMeta{ Kind: "Pod", @@ -557,12 +554,12 @@ func VolumeSourceEnabled(c clientset.Interface, t *framework.TimeoutContext, ns }, } - pod, err := c.CoreV1().Pods(ns).Create(context.TODO(), pod, metav1.CreateOptions{}) + pod, err := c.CoreV1().Pods(ns).Create(ctx, pod, metav1.CreateOptions{}) switch { case err == nil: // Pod was created, feature supported. - StopPodAndDependents(c, t, pod) + StopPodAndDependents(ctx, c, t, pod) return true, nil case apierrors.IsInvalid(err): // "Invalid" because it uses a feature that isn't supported. diff --git a/test/e2e/storage/testsuites/fsgroupchangepolicy.go b/test/e2e/storage/testsuites/fsgroupchangepolicy.go index 12f0f8a8cf4..542264119a6 100644 --- a/test/e2e/storage/testsuites/fsgroupchangepolicy.go +++ b/test/e2e/storage/testsuites/fsgroupchangepolicy.go @@ -96,10 +96,9 @@ func (s *fsGroupChangePolicyTestSuite) SkipUnsupportedTests(driver storageframew func (s *fsGroupChangePolicyTestSuite) DefineTests(driver storageframework.TestDriver, pattern storageframework.TestPattern) { type local struct { - config *storageframework.PerTestConfig - driverCleanup func() - driver storageframework.TestDriver - resource *storageframework.VolumeResource + config *storageframework.PerTestConfig + driver storageframework.TestDriver + resource *storageframework.VolumeResource } var l local @@ -112,7 +111,7 @@ func (s *fsGroupChangePolicyTestSuite) DefineTests(driver storageframework.TestD e2eskipper.SkipIfNodeOSDistroIs("windows") l = local{} l.driver = driver - l.config, l.driverCleanup = driver.PrepareTest(f) + l.config = driver.PrepareTest(f) testVolumeSizeRange := s.GetTestSuiteInfo().SupportedSizeRange l.resource = storageframework.CreateVolumeResource(l.driver, l.config, pattern, testVolumeSizeRange) } @@ -126,11 +125,6 @@ func (s *fsGroupChangePolicyTestSuite) DefineTests(driver storageframework.TestD l.resource = nil } - if l.driverCleanup != nil { - errs = append(errs, storageutils.TryFunc(l.driverCleanup)) - l.driverCleanup = nil - } - framework.ExpectNoError(errors.NewAggregate(errs), "while cleanup resource") } diff --git a/test/e2e/storage/testsuites/multivolume.go b/test/e2e/storage/testsuites/multivolume.go index e668e93cb75..002fa166ef5 100644 --- a/test/e2e/storage/testsuites/multivolume.go +++ b/test/e2e/storage/testsuites/multivolume.go @@ -87,8 +87,7 @@ func (t *multiVolumeTestSuite) SkipUnsupportedTests(driver storageframework.Test func (t *multiVolumeTestSuite) DefineTests(driver storageframework.TestDriver, pattern storageframework.TestPattern) { type local struct { - config *storageframework.PerTestConfig - driverCleanup func() + config *storageframework.PerTestConfig cs clientset.Interface ns *v1.Namespace @@ -114,7 +113,7 @@ func (t *multiVolumeTestSuite) DefineTests(driver storageframework.TestDriver, p l.driver = driver // Now do the more expensive test initialization. - l.config, l.driverCleanup = driver.PrepareTest(f) + l.config = driver.PrepareTest(f) l.migrationCheck = newMigrationOpCheck(f.ClientSet, f.ClientConfig(), dInfo.InTreePluginName) } @@ -124,8 +123,6 @@ func (t *multiVolumeTestSuite) DefineTests(driver storageframework.TestDriver, p errs = append(errs, resource.CleanupResource()) } - errs = append(errs, storageutils.TryFunc(l.driverCleanup)) - l.driverCleanup = nil framework.ExpectNoError(errors.NewAggregate(errs), "while cleanup resource") l.migrationCheck.validateMigrationVolumeOpCounts() } @@ -320,7 +317,7 @@ func (t *multiVolumeTestSuite) DefineTests(driver storageframework.TestDriver, p // [ node1 ] // | | <- same volume mode // [volume1] -> [restored volume1 snapshot] - ginkgo.It("should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly][Feature:VolumeSnapshotDataSource][Feature:VolumeSourceXFS]", func() { + ginkgo.It("should concurrently access the volume and restored snapshot from pods on the same node [LinuxOnly][Feature:VolumeSnapshotDataSource][Feature:VolumeSourceXFS]", func(ctx context.Context) { init() defer cleanup() @@ -345,8 +342,7 @@ func (t *multiVolumeTestSuite) DefineTests(driver storageframework.TestDriver, p } testConfig := storageframework.ConvertTestConfig(l.config) dc := l.config.Framework.DynamicClient - dataSource, cleanupFunc := prepareSnapshotDataSourceForProvisioning(f, testConfig, l.config, pattern, l.cs, dc, resource.Pvc, resource.Sc, sDriver, pattern.VolMode, expectedContent) - defer cleanupFunc() + dataSource := prepareSnapshotDataSourceForProvisioning(ctx, f, testConfig, l.config, pattern, l.cs, dc, resource.Pvc, resource.Sc, sDriver, pattern.VolMode, expectedContent) // Create 2nd PVC for testing pvc2 := &v1.PersistentVolumeClaim{ @@ -375,7 +371,7 @@ func (t *multiVolumeTestSuite) DefineTests(driver storageframework.TestDriver, p // [ node1 ] // | | <- same volume mode // [volume1] -> [cloned volume1] - ginkgo.It("should concurrently access the volume and its clone from pods on the same node [LinuxOnly][Feature:VolumeSourceXFS]", func() { + ginkgo.It("should concurrently access the volume and its clone from pods on the same node [LinuxOnly][Feature:VolumeSourceXFS]", func(ctx context.Context) { init() defer cleanup() @@ -390,8 +386,7 @@ func (t *multiVolumeTestSuite) DefineTests(driver storageframework.TestDriver, p l.resources = append(l.resources, resource) pvcs := []*v1.PersistentVolumeClaim{resource.Pvc} testConfig := storageframework.ConvertTestConfig(l.config) - dataSource, cleanupFunc := preparePVCDataSourceForProvisioning(f, testConfig, l.cs, resource.Pvc, resource.Sc, pattern.VolMode, expectedContent) - defer cleanupFunc() + dataSource := preparePVCDataSourceForProvisioning(ctx, f, testConfig, l.cs, resource.Pvc, resource.Sc, pattern.VolMode, expectedContent) // Create 2nd PVC for testing pvc2 := &v1.PersistentVolumeClaim{ diff --git a/test/e2e/storage/testsuites/provisioning.go b/test/e2e/storage/testsuites/provisioning.go index fcd9ffca269..f0b13e28479 100644 --- a/test/e2e/storage/testsuites/provisioning.go +++ b/test/e2e/storage/testsuites/provisioning.go @@ -114,8 +114,7 @@ func (p *provisioningTestSuite) SkipUnsupportedTests(driver storageframework.Tes func (p *provisioningTestSuite) DefineTests(driver storageframework.TestDriver, pattern storageframework.TestPattern) { type local struct { - config *storageframework.PerTestConfig - driverCleanup func() + config *storageframework.PerTestConfig testCase *StorageClassTest cs clientset.Interface @@ -140,8 +139,9 @@ func (p *provisioningTestSuite) DefineTests(driver storageframework.TestDriver, l = local{} dDriver, _ = driver.(storageframework.DynamicPVTestDriver) // Now do the more expensive test initialization. - l.config, l.driverCleanup = driver.PrepareTest(f) + l.config = driver.PrepareTest(f) l.migrationCheck = newMigrationOpCheck(f.ClientSet, f.ClientConfig(), dInfo.InTreePluginName) + ginkgo.DeferCleanup(l.migrationCheck.validateMigrationVolumeOpCounts) l.cs = l.config.Framework.ClientSet testVolumeSizeRange := p.GetTestSuiteInfo().SupportedSizeRange driverVolumeSizeRange := dDriver.GetDriverInfo().SupportedSizeRange @@ -176,15 +176,7 @@ func (p *provisioningTestSuite) DefineTests(driver storageframework.TestDriver, } } - cleanup := func() { - err := storageutils.TryFunc(l.driverCleanup) - l.driverCleanup = nil - framework.ExpectNoError(err, "while cleaning up driver") - - l.migrationCheck.validateMigrationVolumeOpCounts() - } - - ginkgo.It("should provision storage with mount options", func() { + ginkgo.It("should provision storage with mount options", func(ctx context.Context) { if dInfo.SupportedMountOption == nil { e2eskipper.Skipf("Driver %q does not define supported mount option - skipping", dInfo.Name) } @@ -193,19 +185,17 @@ func (p *provisioningTestSuite) DefineTests(driver storageframework.TestDriver, } init() - defer cleanup() l.testCase.Class.MountOptions = dInfo.SupportedMountOption.Union(dInfo.RequiredMountOption).List() l.testCase.PvCheck = func(claim *v1.PersistentVolumeClaim) { - PVWriteReadSingleNodeCheck(l.cs, f.Timeouts, claim, l.config.ClientNodeSelection) + PVWriteReadSingleNodeCheck(ctx, l.cs, f.Timeouts, claim, l.config.ClientNodeSelection) } - _, clearProvisionedStorageClass := SetupStorageClass(l.testCase.Client, l.testCase.Class) - defer clearProvisionedStorageClass() + SetupStorageClass(ctx, l.testCase.Client, l.testCase.Class) - l.testCase.TestDynamicProvisioning() + l.testCase.TestDynamicProvisioning(ctx) }) - ginkgo.It("should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]", func() { + ginkgo.It("should provision storage with snapshot data source [Feature:VolumeSnapshotDataSource]", func(ctx context.Context) { if !dInfo.Capabilities[storageframework.CapSnapshotDataSource] { e2eskipper.Skipf("Driver %q does not support populate data from snapshot - skipping", dInfo.Name) } @@ -219,13 +209,11 @@ func (p *provisioningTestSuite) DefineTests(driver storageframework.TestDriver, } init() - defer cleanup() dc := l.config.Framework.DynamicClient testConfig := storageframework.ConvertTestConfig(l.config) expectedContent := fmt.Sprintf("Hello from namespace %s", f.Namespace.Name) - dataSource, cleanupFunc := prepareSnapshotDataSourceForProvisioning(f, testConfig, l.config, pattern, l.cs, dc, l.pvc, l.sc, sDriver, pattern.VolMode, expectedContent) - defer cleanupFunc() + dataSource := prepareSnapshotDataSourceForProvisioning(ctx, f, testConfig, l.config, pattern, l.cs, dc, l.pvc, l.sc, sDriver, pattern.VolMode, expectedContent) l.pvc.Spec.DataSource = dataSource l.testCase.PvCheck = func(claim *v1.PersistentVolumeClaim) { @@ -240,10 +228,10 @@ func (p *provisioningTestSuite) DefineTests(driver storageframework.TestDriver, } e2evolume.TestVolumeClientSlow(f, testConfig, nil, "", tests) } - l.testCase.TestDynamicProvisioning() + l.testCase.TestDynamicProvisioning(ctx) }) - ginkgo.It("should provision storage with any volume data source [Serial]", func() { + ginkgo.It("should provision storage with any volume data source [Serial]", func(ctx context.Context) { if len(dInfo.InTreePluginName) != 0 { e2eskipper.Skipf("AnyVolumeDataSource feature only works with CSI drivers - skipping") } @@ -252,7 +240,6 @@ func (p *provisioningTestSuite) DefineTests(driver storageframework.TestDriver, } init() - defer cleanup() ginkgo.By("Creating validator namespace") valNamespace, err := f.CreateNamespace(fmt.Sprintf("%s-val", f.Namespace.Name), map[string]string{ @@ -419,18 +406,16 @@ func (p *provisioningTestSuite) DefineTests(driver storageframework.TestDriver, e2evolume.TestVolumeClientSlow(f, testConfig, nil, "", tests) } - _, clearProvisionedStorageClass := SetupStorageClass(l.testCase.Client, l.testCase.Class) - defer clearProvisionedStorageClass() + SetupStorageClass(ctx, l.testCase.Client, l.testCase.Class) - l.testCase.TestDynamicProvisioning() + l.testCase.TestDynamicProvisioning(ctx) }) - ginkgo.It("should provision storage with pvc data source", func() { + ginkgo.It("should provision storage with pvc data source", func(ctx context.Context) { if !dInfo.Capabilities[storageframework.CapPVCDataSource] { e2eskipper.Skipf("Driver %q does not support cloning - skipping", dInfo.Name) } init() - defer cleanup() if l.config.ClientNodeSelection.Name == "" { // Schedule all pods to the same topology segment (e.g. a cloud availability zone), some @@ -441,8 +426,7 @@ func (p *provisioningTestSuite) DefineTests(driver storageframework.TestDriver, } testConfig := storageframework.ConvertTestConfig(l.config) expectedContent := fmt.Sprintf("Hello from namespace %s", f.Namespace.Name) - dataSource, dataSourceCleanup := preparePVCDataSourceForProvisioning(f, testConfig, l.cs, l.sourcePVC, l.sc, pattern.VolMode, expectedContent) - defer dataSourceCleanup() + dataSource := preparePVCDataSourceForProvisioning(ctx, f, testConfig, l.cs, l.sourcePVC, l.sc, pattern.VolMode, expectedContent) l.pvc.Spec.DataSource = dataSource l.testCase.NodeSelection = testConfig.ClientNodeSelection l.testCase.PvCheck = func(claim *v1.PersistentVolumeClaim) { @@ -460,10 +444,10 @@ func (p *provisioningTestSuite) DefineTests(driver storageframework.TestDriver, // Cloning fails if the source disk is still in the process of detaching, so we wait for the VolumeAttachment to be removed before cloning. volumeAttachment := e2evolume.GetVolumeAttachmentName(f.ClientSet, testConfig, l.testCase.Provisioner, dataSource.Name, l.sourcePVC.Namespace) e2evolume.WaitForVolumeAttachmentTerminated(volumeAttachment, f.ClientSet, f.Timeouts.DataSourceProvision) - l.testCase.TestDynamicProvisioning() + l.testCase.TestDynamicProvisioning(ctx) }) - ginkgo.It("should provision storage with pvc data source in parallel [Slow]", func() { + ginkgo.It("should provision storage with pvc data source in parallel [Slow]", func(ctx context.Context) { // Test cloning a single volume multiple times. if !dInfo.Capabilities[storageframework.CapPVCDataSource] { e2eskipper.Skipf("Driver %q does not support cloning - skipping", dInfo.Name) @@ -473,7 +457,6 @@ func (p *provisioningTestSuite) DefineTests(driver storageframework.TestDriver, } init() - defer cleanup() if l.config.ClientNodeSelection.Name == "" { // Schedule all pods to the same topology segment (e.g. a cloud availability zone), some @@ -484,8 +467,7 @@ func (p *provisioningTestSuite) DefineTests(driver storageframework.TestDriver, } testConfig := storageframework.ConvertTestConfig(l.config) expectedContent := fmt.Sprintf("Hello from namespace %s", f.Namespace.Name) - dataSource, dataSourceCleanup := preparePVCDataSourceForProvisioning(f, testConfig, l.cs, l.sourcePVC, l.sc, pattern.VolMode, expectedContent) - defer dataSourceCleanup() + dataSource := preparePVCDataSourceForProvisioning(ctx, f, testConfig, l.cs, l.sourcePVC, l.sc, pattern.VolMode, expectedContent) l.pvc.Spec.DataSource = dataSource var wg sync.WaitGroup @@ -516,13 +498,13 @@ func (p *provisioningTestSuite) DefineTests(driver storageframework.TestDriver, // Cloning fails if the source disk is still in the process of detaching, so we wait for the VolumeAttachment to be removed before cloning. volumeAttachment := e2evolume.GetVolumeAttachmentName(f.ClientSet, testConfig, l.testCase.Provisioner, dataSource.Name, l.sourcePVC.Namespace) e2evolume.WaitForVolumeAttachmentTerminated(volumeAttachment, f.ClientSet, f.Timeouts.DataSourceProvision) - t.TestDynamicProvisioning() + t.TestDynamicProvisioning(ctx) }(i) } wg.Wait() }) - ginkgo.It("should mount multiple PV pointing to the same storage on the same node", func() { + ginkgo.It("should mount multiple PV pointing to the same storage on the same node", func(ctx context.Context) { // csi-hostpath driver does not support this test case. In this test case, we have 2 PV containing the same underlying storage. // during the NodeStage call for the second volume, csi-hostpath fails the call, because it thinks the volume is already staged at a different path. // Note: This is not an issue with driver like PD CSI where the NodeStage is a no-op for block mode. @@ -535,15 +517,13 @@ func (p *provisioningTestSuite) DefineTests(driver storageframework.TestDriver, } init() - defer cleanup() l.testCase.PvCheck = func(claim *v1.PersistentVolumeClaim) { - MultiplePVMountSingleNodeCheck(l.cs, f.Timeouts, claim, l.config.ClientNodeSelection) + MultiplePVMountSingleNodeCheck(ctx, l.cs, f.Timeouts, claim, l.config.ClientNodeSelection) } - _, clearProvisionedStorageClass := SetupStorageClass(l.testCase.Client, l.testCase.Class) - defer clearProvisionedStorageClass() + SetupStorageClass(ctx, l.testCase.Client, l.testCase.Class) - l.testCase.TestDynamicProvisioning() + l.testCase.TestDynamicProvisioning(ctx) }) } @@ -551,49 +531,50 @@ func (p *provisioningTestSuite) DefineTests(driver storageframework.TestDriver, // then it's returned as it is, if it doesn't exist then it's created first // and then returned, if the spec is nil then we return the `default` StorageClass func SetupStorageClass( + ctx context.Context, client clientset.Interface, class *storagev1.StorageClass, -) (*storagev1.StorageClass, func()) { +) *storagev1.StorageClass { gomega.Expect(client).NotTo(gomega.BeNil(), "SetupStorageClass.client is required") var err error var computedStorageClass *storagev1.StorageClass - var clearComputedStorageClass = func() {} if class != nil { - computedStorageClass, err = client.StorageV1().StorageClasses().Get(context.TODO(), class.Name, metav1.GetOptions{}) + computedStorageClass, err = client.StorageV1().StorageClasses().Get(ctx, class.Name, metav1.GetOptions{}) if err == nil { // skip storageclass creation if it already exists ginkgo.By("Storage class " + computedStorageClass.Name + " is already created, skipping creation.") } else { ginkgo.By("Creating a StorageClass") - class, err = client.StorageV1().StorageClasses().Create(context.TODO(), class, metav1.CreateOptions{}) + class, err = client.StorageV1().StorageClasses().Create(ctx, class, metav1.CreateOptions{}) framework.ExpectNoError(err) - computedStorageClass, err = client.StorageV1().StorageClasses().Get(context.TODO(), class.Name, metav1.GetOptions{}) + computedStorageClass, err = client.StorageV1().StorageClasses().Get(ctx, class.Name, metav1.GetOptions{}) framework.ExpectNoError(err) - clearComputedStorageClass = func() { + clearComputedStorageClass := func(ctx context.Context) { framework.Logf("deleting storage class %s", computedStorageClass.Name) - err := client.StorageV1().StorageClasses().Delete(context.TODO(), computedStorageClass.Name, metav1.DeleteOptions{}) + err := client.StorageV1().StorageClasses().Delete(ctx, computedStorageClass.Name, metav1.DeleteOptions{}) if err != nil && !apierrors.IsNotFound(err) { framework.ExpectNoError(err, "delete storage class") } } + ginkgo.DeferCleanup(clearComputedStorageClass) } } else { // StorageClass is nil, so the default one will be used scName, err := e2epv.GetDefaultStorageClassName(client) framework.ExpectNoError(err) ginkgo.By("Wanted storage class is nil, fetching default StorageClass=" + scName) - computedStorageClass, err = client.StorageV1().StorageClasses().Get(context.TODO(), scName, metav1.GetOptions{}) + computedStorageClass, err = client.StorageV1().StorageClasses().Get(ctx, scName, metav1.GetOptions{}) framework.ExpectNoError(err) } - return computedStorageClass, clearComputedStorageClass + return computedStorageClass } // TestDynamicProvisioning tests dynamic provisioning with specified StorageClassTest // it's assumed that the StorageClass `t.Class` is already provisioned, // see #ProvisionStorageClass -func (t StorageClassTest) TestDynamicProvisioning() *v1.PersistentVolume { +func (t StorageClassTest) TestDynamicProvisioning(ctx context.Context) *v1.PersistentVolume { var err error client := t.Client gomega.Expect(client).NotTo(gomega.BeNil(), "StorageClassTest.Client is required") @@ -602,16 +583,16 @@ func (t StorageClassTest) TestDynamicProvisioning() *v1.PersistentVolume { gomega.Expect(claim.GenerateName).NotTo(gomega.BeEmpty(), "StorageClassTest.Claim.GenerateName must not be empty") class := t.Class gomega.Expect(class).NotTo(gomega.BeNil(), "StorageClassTest.Class is required") - class, err = client.StorageV1().StorageClasses().Get(context.TODO(), class.Name, metav1.GetOptions{}) + class, err = client.StorageV1().StorageClasses().Get(ctx, class.Name, metav1.GetOptions{}) framework.ExpectNoError(err, "StorageClass.Class "+class.Name+" couldn't be fetched from the cluster") ginkgo.By(fmt.Sprintf("creating claim=%+v", claim)) - claim, err = client.CoreV1().PersistentVolumeClaims(claim.Namespace).Create(context.TODO(), claim, metav1.CreateOptions{}) + claim, err = client.CoreV1().PersistentVolumeClaims(claim.Namespace).Create(ctx, claim, metav1.CreateOptions{}) framework.ExpectNoError(err) defer func() { framework.Logf("deleting claim %q/%q", claim.Namespace, claim.Name) // typically this claim has already been deleted - err = client.CoreV1().PersistentVolumeClaims(claim.Namespace).Delete(context.TODO(), claim.Name, metav1.DeleteOptions{}) + err = client.CoreV1().PersistentVolumeClaims(claim.Namespace).Delete(ctx, claim.Name, metav1.DeleteOptions{}) if err != nil && !apierrors.IsNotFound(err) { framework.Failf("Error deleting claim %q. Error: %v", claim.Name, err) } @@ -641,10 +622,10 @@ func (t StorageClassTest) TestDynamicProvisioning() *v1.PersistentVolume { t.PvCheck(claim) } - pv := t.checkProvisioning(client, claim, class) + pv := t.checkProvisioning(ctx, client, claim, class) ginkgo.By(fmt.Sprintf("deleting claim %q/%q", claim.Namespace, claim.Name)) - framework.ExpectNoError(client.CoreV1().PersistentVolumeClaims(claim.Namespace).Delete(context.TODO(), claim.Name, metav1.DeleteOptions{})) + framework.ExpectNoError(client.CoreV1().PersistentVolumeClaims(claim.Namespace).Delete(ctx, claim.Name, metav1.DeleteOptions{})) // Wait for the PV to get deleted if reclaim policy is Delete. (If it's // Retain, there's no use waiting because the PV won't be auto-deleted and @@ -662,25 +643,25 @@ func (t StorageClassTest) TestDynamicProvisioning() *v1.PersistentVolume { } // getBoundPV returns a PV details. -func getBoundPV(client clientset.Interface, pvc *v1.PersistentVolumeClaim) (*v1.PersistentVolume, error) { +func getBoundPV(ctx context.Context, client clientset.Interface, pvc *v1.PersistentVolumeClaim) (*v1.PersistentVolume, error) { // Get new copy of the claim - claim, err := client.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get(context.TODO(), pvc.Name, metav1.GetOptions{}) + claim, err := client.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get(ctx, pvc.Name, metav1.GetOptions{}) if err != nil { return nil, err } // Get the bound PV - pv, err := client.CoreV1().PersistentVolumes().Get(context.TODO(), claim.Spec.VolumeName, metav1.GetOptions{}) + pv, err := client.CoreV1().PersistentVolumes().Get(ctx, claim.Spec.VolumeName, metav1.GetOptions{}) return pv, err } // checkProvisioning verifies that the claim is bound and has the correct properties -func (t StorageClassTest) checkProvisioning(client clientset.Interface, claim *v1.PersistentVolumeClaim, class *storagev1.StorageClass) *v1.PersistentVolume { +func (t StorageClassTest) checkProvisioning(ctx context.Context, client clientset.Interface, claim *v1.PersistentVolumeClaim, class *storagev1.StorageClass) *v1.PersistentVolume { err := e2epv.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, client, claim.Namespace, claim.Name, framework.Poll, t.Timeouts.ClaimProvision) framework.ExpectNoError(err) ginkgo.By("checking the claim") - pv, err := getBoundPV(client, claim) + pv, err := getBoundPV(ctx, client, claim) framework.ExpectNoError(err) // Check sizes @@ -739,23 +720,23 @@ func (t StorageClassTest) checkProvisioning(client clientset.Interface, claim *v // persistent across pods. // // This is a common test that can be called from a StorageClassTest.PvCheck. -func PVWriteReadSingleNodeCheck(client clientset.Interface, timeouts *framework.TimeoutContext, claim *v1.PersistentVolumeClaim, node e2epod.NodeSelection) *v1.PersistentVolume { +func PVWriteReadSingleNodeCheck(ctx context.Context, client clientset.Interface, timeouts *framework.TimeoutContext, claim *v1.PersistentVolumeClaim, node e2epod.NodeSelection) *v1.PersistentVolume { ginkgo.By(fmt.Sprintf("checking the created volume is writable on node %+v", node)) command := "echo 'hello world' > /mnt/test/data" - pod := StartInPodWithVolume(client, claim.Namespace, claim.Name, "pvc-volume-tester-writer", command, node) + pod := StartInPodWithVolume(ctx, client, claim.Namespace, claim.Name, "pvc-volume-tester-writer", command, node) defer func() { // pod might be nil now. - StopPod(client, pod) + StopPod(ctx, client, pod) }() framework.ExpectNoError(e2epod.WaitForPodSuccessInNamespaceTimeout(client, pod.Name, pod.Namespace, timeouts.PodStartSlow)) - runningPod, err := client.CoreV1().Pods(pod.Namespace).Get(context.TODO(), pod.Name, metav1.GetOptions{}) + runningPod, err := client.CoreV1().Pods(pod.Namespace).Get(ctx, pod.Name, metav1.GetOptions{}) framework.ExpectNoError(err, "get pod") actualNodeName := runningPod.Spec.NodeName - StopPod(client, pod) + StopPod(ctx, client, pod) pod = nil // Don't stop twice. // Get a new copy of the PV - e2evolume, err := getBoundPV(client, claim) + e2evolume, err := getBoundPV(ctx, client, claim) framework.ExpectNoError(err) ginkgo.By(fmt.Sprintf("checking the created volume has the correct mount options, is readable and retains data on the same node %q", actualNodeName)) @@ -773,7 +754,7 @@ func PVWriteReadSingleNodeCheck(client clientset.Interface, timeouts *framework. // agnhost doesn't support mount command = "grep 'hello world' /mnt/test/data" } - RunInPodWithVolume(client, timeouts, claim.Namespace, claim.Name, "pvc-volume-tester-reader", command, e2epod.NodeSelection{Name: actualNodeName}) + RunInPodWithVolume(ctx, client, timeouts, claim.Namespace, claim.Name, "pvc-volume-tester-reader", command, e2epod.NodeSelection{Name: actualNodeName}) return e2evolume } @@ -792,23 +773,23 @@ func PVWriteReadSingleNodeCheck(client clientset.Interface, timeouts *framework. // persistent across pods and across nodes. // // This is a common test that can be called from a StorageClassTest.PvCheck. -func PVMultiNodeCheck(client clientset.Interface, timeouts *framework.TimeoutContext, claim *v1.PersistentVolumeClaim, node e2epod.NodeSelection) { +func PVMultiNodeCheck(ctx context.Context, client clientset.Interface, timeouts *framework.TimeoutContext, claim *v1.PersistentVolumeClaim, node e2epod.NodeSelection) { framework.ExpectEqual(node.Name, "", "this test only works when not locked onto a single node") var pod *v1.Pod defer func() { // passing pod = nil is okay. - StopPod(client, pod) + StopPod(ctx, client, pod) }() ginkgo.By(fmt.Sprintf("checking the created volume is writable on node %+v", node)) command := "echo 'hello world' > /mnt/test/data" - pod = StartInPodWithVolume(client, claim.Namespace, claim.Name, "pvc-writer-node1", command, node) + pod = StartInPodWithVolume(ctx, client, claim.Namespace, claim.Name, "pvc-writer-node1", command, node) framework.ExpectNoError(e2epod.WaitForPodSuccessInNamespaceTimeout(client, pod.Name, pod.Namespace, timeouts.PodStartSlow)) - runningPod, err := client.CoreV1().Pods(pod.Namespace).Get(context.TODO(), pod.Name, metav1.GetOptions{}) + runningPod, err := client.CoreV1().Pods(pod.Namespace).Get(ctx, pod.Name, metav1.GetOptions{}) framework.ExpectNoError(err, "get pod") actualNodeName := runningPod.Spec.NodeName - StopPod(client, pod) + StopPod(ctx, client, pod) pod = nil // Don't stop twice. // Add node-anti-affinity. @@ -816,17 +797,17 @@ func PVMultiNodeCheck(client clientset.Interface, timeouts *framework.TimeoutCon e2epod.SetAntiAffinity(&secondNode, actualNodeName) ginkgo.By(fmt.Sprintf("checking the created volume is readable and retains data on another node %+v", secondNode)) command = "grep 'hello world' /mnt/test/data" - pod = StartInPodWithVolume(client, claim.Namespace, claim.Name, "pvc-reader-node2", command, secondNode) + pod = StartInPodWithVolume(ctx, client, claim.Namespace, claim.Name, "pvc-reader-node2", command, secondNode) framework.ExpectNoError(e2epod.WaitForPodSuccessInNamespaceTimeout(client, pod.Name, pod.Namespace, timeouts.PodStartSlow)) - runningPod, err = client.CoreV1().Pods(pod.Namespace).Get(context.TODO(), pod.Name, metav1.GetOptions{}) + runningPod, err = client.CoreV1().Pods(pod.Namespace).Get(ctx, pod.Name, metav1.GetOptions{}) framework.ExpectNoError(err, "get pod") framework.ExpectNotEqual(runningPod.Spec.NodeName, actualNodeName, "second pod should have run on a different node") - StopPod(client, pod) + StopPod(ctx, client, pod) pod = nil } // TestBindingWaitForFirstConsumerMultiPVC tests the binding with WaitForFirstConsumer mode -func (t StorageClassTest) TestBindingWaitForFirstConsumerMultiPVC(claims []*v1.PersistentVolumeClaim, nodeSelector map[string]string, expectUnschedulable bool) ([]*v1.PersistentVolume, *v1.Node) { +func (t StorageClassTest) TestBindingWaitForFirstConsumerMultiPVC(ctx context.Context, claims []*v1.PersistentVolumeClaim, nodeSelector map[string]string, expectUnschedulable bool) ([]*v1.PersistentVolume, *v1.Node) { var err error framework.ExpectNotEqual(len(claims), 0) namespace := claims[0].Namespace @@ -835,7 +816,7 @@ func (t StorageClassTest) TestBindingWaitForFirstConsumerMultiPVC(claims []*v1.P var claimNames []string var createdClaims []*v1.PersistentVolumeClaim for _, claim := range claims { - c, err := t.Client.CoreV1().PersistentVolumeClaims(claim.Namespace).Create(context.TODO(), claim, metav1.CreateOptions{}) + c, err := t.Client.CoreV1().PersistentVolumeClaims(claim.Namespace).Create(ctx, claim, metav1.CreateOptions{}) claimNames = append(claimNames, c.Name) createdClaims = append(createdClaims, c) framework.ExpectNoError(err) @@ -859,7 +840,7 @@ func (t StorageClassTest) TestBindingWaitForFirstConsumerMultiPVC(claims []*v1.P ginkgo.By("checking the claims are in pending state") err = e2epv.WaitForPersistentVolumeClaimsPhase(v1.ClaimBound, t.Client, namespace, claimNames, 2*time.Second /* Poll */, t.Timeouts.ClaimProvisionShort, true) framework.ExpectError(err) - verifyPVCsPending(t.Client, createdClaims) + verifyPVCsPending(ctx, t.Client, createdClaims) ginkgo.By("creating a pod referring to the claims") // Create a pod referring to the claim and wait for it to get to running @@ -876,25 +857,25 @@ func (t StorageClassTest) TestBindingWaitForFirstConsumerMultiPVC(claims []*v1.P }() if expectUnschedulable { // Verify that no claims are provisioned. - verifyPVCsPending(t.Client, createdClaims) + verifyPVCsPending(ctx, t.Client, createdClaims) return nil, nil } // collect node details - node, err := t.Client.CoreV1().Nodes().Get(context.TODO(), pod.Spec.NodeName, metav1.GetOptions{}) + node, err := t.Client.CoreV1().Nodes().Get(ctx, pod.Spec.NodeName, metav1.GetOptions{}) framework.ExpectNoError(err) ginkgo.By("re-checking the claims to see they bound") var pvs []*v1.PersistentVolume for _, claim := range createdClaims { // Get new copy of the claim - claim, err = t.Client.CoreV1().PersistentVolumeClaims(claim.Namespace).Get(context.TODO(), claim.Name, metav1.GetOptions{}) + claim, err = t.Client.CoreV1().PersistentVolumeClaims(claim.Namespace).Get(ctx, claim.Name, metav1.GetOptions{}) framework.ExpectNoError(err) // make sure claim did bind err = e2epv.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, t.Client, claim.Namespace, claim.Name, framework.Poll, t.Timeouts.ClaimProvision) framework.ExpectNoError(err) - pv, err := t.Client.CoreV1().PersistentVolumes().Get(context.TODO(), claim.Spec.VolumeName, metav1.GetOptions{}) + pv, err := t.Client.CoreV1().PersistentVolumes().Get(ctx, claim.Spec.VolumeName, metav1.GetOptions{}) framework.ExpectNoError(err) pvs = append(pvs, pv) } @@ -904,20 +885,20 @@ func (t StorageClassTest) TestBindingWaitForFirstConsumerMultiPVC(claims []*v1.P // RunInPodWithVolume runs a command in a pod with given claim mounted to /mnt directory. // It starts, checks, collects output and stops it. -func RunInPodWithVolume(c clientset.Interface, t *framework.TimeoutContext, ns, claimName, podName, command string, node e2epod.NodeSelection) *v1.Pod { - pod := StartInPodWithVolume(c, ns, claimName, podName, command, node) - defer StopPod(c, pod) +func RunInPodWithVolume(ctx context.Context, c clientset.Interface, t *framework.TimeoutContext, ns, claimName, podName, command string, node e2epod.NodeSelection) *v1.Pod { + pod := StartInPodWithVolume(ctx, c, ns, claimName, podName, command, node) + defer StopPod(ctx, c, pod) framework.ExpectNoError(e2epod.WaitForPodSuccessInNamespaceTimeout(c, pod.Name, pod.Namespace, t.PodStartSlow)) // get the latest status of the pod - pod, err := c.CoreV1().Pods(pod.Namespace).Get(context.TODO(), pod.Name, metav1.GetOptions{}) + pod, err := c.CoreV1().Pods(pod.Namespace).Get(ctx, pod.Name, metav1.GetOptions{}) framework.ExpectNoError(err) return pod } // StartInPodWithVolume starts a command in a pod with given claim mounted to /mnt directory // The caller is responsible for checking the pod and deleting it. -func StartInPodWithVolume(c clientset.Interface, ns, claimName, podName, command string, node e2epod.NodeSelection) *v1.Pod { - return StartInPodWithVolumeSource(c, v1.VolumeSource{ +func StartInPodWithVolume(ctx context.Context, c clientset.Interface, ns, claimName, podName, command string, node e2epod.NodeSelection) *v1.Pod { + return StartInPodWithVolumeSource(ctx, c, v1.VolumeSource{ PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ ClaimName: claimName, }, @@ -926,7 +907,7 @@ func StartInPodWithVolume(c clientset.Interface, ns, claimName, podName, command // StartInPodWithVolumeSource starts a command in a pod with given volume mounted to /mnt directory // The caller is responsible for checking the pod and deleting it. -func StartInPodWithVolumeSource(c clientset.Interface, volSrc v1.VolumeSource, ns, podName, command string, node e2epod.NodeSelection) *v1.Pod { +func StartInPodWithVolumeSource(ctx context.Context, c clientset.Interface, volSrc v1.VolumeSource, ns, podName, command string, node e2epod.NodeSelection) *v1.Pod { pod := &v1.Pod{ TypeMeta: metav1.TypeMeta{ Kind: "Pod", @@ -963,18 +944,18 @@ func StartInPodWithVolumeSource(c clientset.Interface, volSrc v1.VolumeSource, n } e2epod.SetNodeSelection(&pod.Spec, node) - pod, err := c.CoreV1().Pods(ns).Create(context.TODO(), pod, metav1.CreateOptions{}) + pod, err := c.CoreV1().Pods(ns).Create(ctx, pod, metav1.CreateOptions{}) framework.ExpectNoError(err, "Failed to create pod: %v", err) return pod } // StopPod first tries to log the output of the pod's container, then deletes the pod and // waits for that to succeed. -func StopPod(c clientset.Interface, pod *v1.Pod) { +func StopPod(ctx context.Context, c clientset.Interface, pod *v1.Pod) { if pod == nil { return } - body, err := c.CoreV1().Pods(pod.Namespace).GetLogs(pod.Name, &v1.PodLogOptions{}).Do(context.TODO()).Raw() + body, err := c.CoreV1().Pods(pod.Namespace).GetLogs(pod.Name, &v1.PodLogOptions{}).Do(ctx).Raw() if err != nil { framework.Logf("Error getting logs for pod %s: %v", pod.Name, err) } else { @@ -986,11 +967,11 @@ func StopPod(c clientset.Interface, pod *v1.Pod) { // StopPodAndDependents first tries to log the output of the pod's container, // then deletes the pod and waits for that to succeed. Also waits for all owned // resources to be deleted. -func StopPodAndDependents(c clientset.Interface, timeouts *framework.TimeoutContext, pod *v1.Pod) { +func StopPodAndDependents(ctx context.Context, c clientset.Interface, timeouts *framework.TimeoutContext, pod *v1.Pod) { if pod == nil { return } - body, err := c.CoreV1().Pods(pod.Namespace).GetLogs(pod.Name, &v1.PodLogOptions{}).Do(context.TODO()).Raw() + body, err := c.CoreV1().Pods(pod.Namespace).GetLogs(pod.Name, &v1.PodLogOptions{}).Do(ctx).Raw() if err != nil { framework.Logf("Error getting logs for pod %s: %v", pod.Name, err) } else { @@ -999,7 +980,7 @@ func StopPodAndDependents(c clientset.Interface, timeouts *framework.TimeoutCont // We must wait explicitly for removal of the generic ephemeral volume PVs. // For that we must find them first... - pvs, err := c.CoreV1().PersistentVolumes().List(context.TODO(), metav1.ListOptions{}) + pvs, err := c.CoreV1().PersistentVolumes().List(ctx, metav1.ListOptions{}) framework.ExpectNoError(err, "list PVs") var podPVs []v1.PersistentVolume for _, pv := range pvs.Items { @@ -1007,7 +988,7 @@ func StopPodAndDependents(c clientset.Interface, timeouts *framework.TimeoutCont pv.Spec.ClaimRef.Namespace != pod.Namespace { continue } - pvc, err := c.CoreV1().PersistentVolumeClaims(pod.Namespace).Get(context.TODO(), pv.Spec.ClaimRef.Name, metav1.GetOptions{}) + pvc, err := c.CoreV1().PersistentVolumeClaims(pod.Namespace).Get(ctx, pv.Spec.ClaimRef.Name, metav1.GetOptions{}) if err != nil && apierrors.IsNotFound(err) { // Must have been some unrelated PV, otherwise the PVC should exist. continue @@ -1020,7 +1001,7 @@ func StopPodAndDependents(c clientset.Interface, timeouts *framework.TimeoutCont framework.Logf("Deleting pod %q in namespace %q", pod.Name, pod.Namespace) deletionPolicy := metav1.DeletePropagationForeground - err = c.CoreV1().Pods(pod.Namespace).Delete(context.TODO(), pod.Name, + err = c.CoreV1().Pods(pod.Namespace).Delete(ctx, pod.Name, metav1.DeleteOptions{ // If the pod is the owner of some resources (like ephemeral inline volumes), // then we want to be sure that those are also gone before we return. @@ -1045,16 +1026,17 @@ func StopPodAndDependents(c clientset.Interface, timeouts *framework.TimeoutCont } } -func verifyPVCsPending(client clientset.Interface, pvcs []*v1.PersistentVolumeClaim) { +func verifyPVCsPending(ctx context.Context, client clientset.Interface, pvcs []*v1.PersistentVolumeClaim) { for _, claim := range pvcs { // Get new copy of the claim - claim, err := client.CoreV1().PersistentVolumeClaims(claim.Namespace).Get(context.TODO(), claim.Name, metav1.GetOptions{}) + claim, err := client.CoreV1().PersistentVolumeClaims(claim.Namespace).Get(ctx, claim.Name, metav1.GetOptions{}) framework.ExpectNoError(err) framework.ExpectEqual(claim.Status.Phase, v1.ClaimPending) } } func prepareSnapshotDataSourceForProvisioning( + ctx context.Context, f *framework.Framework, config e2evolume.TestConfig, perTestConfig *storageframework.PerTestConfig, @@ -1066,14 +1048,14 @@ func prepareSnapshotDataSourceForProvisioning( sDriver storageframework.SnapshottableTestDriver, mode v1.PersistentVolumeMode, injectContent string, -) (*v1.TypedLocalObjectReference, func()) { - _, clearComputedStorageClass := SetupStorageClass(client, class) +) *v1.TypedLocalObjectReference { + SetupStorageClass(ctx, client, class) if initClaim.ResourceVersion != "" { ginkgo.By("Skipping creation of PVC, it already exists") } else { ginkgo.By("[Initialize dataSource]creating a initClaim") - updatedClaim, err := client.CoreV1().PersistentVolumeClaims(initClaim.Namespace).Create(context.TODO(), initClaim, metav1.CreateOptions{}) + updatedClaim, err := client.CoreV1().PersistentVolumeClaims(initClaim.Namespace).Create(ctx, initClaim, metav1.CreateOptions{}) if apierrors.IsAlreadyExists(err) { err = nil } @@ -1101,24 +1083,23 @@ func prepareSnapshotDataSourceForProvisioning( Name: snapshotResource.Vs.GetName(), } - cleanupFunc := func() { + cleanupFunc := func(ctx context.Context) { framework.Logf("deleting initClaim %q/%q", initClaim.Namespace, initClaim.Name) - err := client.CoreV1().PersistentVolumeClaims(initClaim.Namespace).Delete(context.TODO(), initClaim.Name, metav1.DeleteOptions{}) + err := client.CoreV1().PersistentVolumeClaims(initClaim.Namespace).Delete(ctx, initClaim.Name, metav1.DeleteOptions{}) if err != nil && !apierrors.IsNotFound(err) { framework.Failf("Error deleting initClaim %q. Error: %v", initClaim.Name, err) } err = snapshotResource.CleanupResource(f.Timeouts) framework.ExpectNoError(err) - - clearComputedStorageClass() - } + ginkgo.DeferCleanup(cleanupFunc) - return dataSourceRef, cleanupFunc + return dataSourceRef } func preparePVCDataSourceForProvisioning( + ctx context.Context, f *framework.Framework, config e2evolume.TestConfig, client clientset.Interface, @@ -1126,15 +1107,15 @@ func preparePVCDataSourceForProvisioning( class *storagev1.StorageClass, mode v1.PersistentVolumeMode, injectContent string, -) (*v1.TypedLocalObjectReference, func()) { - _, clearComputedStorageClass := SetupStorageClass(client, class) +) *v1.TypedLocalObjectReference { + SetupStorageClass(ctx, client, class) if source.ResourceVersion != "" { ginkgo.By("Skipping creation of PVC, it already exists") } else { ginkgo.By("[Initialize dataSource]creating a source PVC") var err error - source, err = client.CoreV1().PersistentVolumeClaims(source.Namespace).Create(context.TODO(), source, metav1.CreateOptions{}) + source, err = client.CoreV1().PersistentVolumeClaims(source.Namespace).Create(ctx, source, metav1.CreateOptions{}) framework.ExpectNoError(err) } @@ -1153,17 +1134,16 @@ func preparePVCDataSourceForProvisioning( Name: source.GetName(), } - cleanupFunc := func() { + cleanupFunc := func(ctx context.Context) { framework.Logf("deleting source PVC %q/%q", source.Namespace, source.Name) - err := client.CoreV1().PersistentVolumeClaims(source.Namespace).Delete(context.TODO(), source.Name, metav1.DeleteOptions{}) + err := client.CoreV1().PersistentVolumeClaims(source.Namespace).Delete(ctx, source.Name, metav1.DeleteOptions{}) if err != nil && !apierrors.IsNotFound(err) { framework.Failf("Error deleting source PVC %q. Error: %v", source.Name, err) } - - clearComputedStorageClass() } + ginkgo.DeferCleanup(cleanupFunc) - return dataSourceRef, cleanupFunc + return dataSourceRef } // MultiplePVMountSingleNodeCheck checks that multiple PV pointing to the same underlying storage can be mounted simultaneously on a single node. @@ -1172,7 +1152,7 @@ func preparePVCDataSourceForProvisioning( // - Start Pod1 using PVC1, PV1 (which points to a underlying volume v) on node N1. // - Create PVC2, PV2 and prebind them. PV2 points to the same underlying volume v. // - Start Pod2 using PVC2, PV2 (which points to a underlying volume v) on node N1. -func MultiplePVMountSingleNodeCheck(client clientset.Interface, timeouts *framework.TimeoutContext, claim *v1.PersistentVolumeClaim, node e2epod.NodeSelection) { +func MultiplePVMountSingleNodeCheck(ctx context.Context, client clientset.Interface, timeouts *framework.TimeoutContext, claim *v1.PersistentVolumeClaim, node e2epod.NodeSelection) { pod1Config := e2epod.Config{ NS: claim.Namespace, NodeSelection: node, @@ -1187,7 +1167,7 @@ func MultiplePVMountSingleNodeCheck(client clientset.Interface, timeouts *framew ginkgo.By(fmt.Sprintf("Created Pod %s/%s on node %s", pod1.Namespace, pod1.Name, pod1.Spec.NodeName)) // Create new PV which points to the same underlying storage. Retain policy is used so that deletion of second PVC does not trigger the deletion of its bound PV and underlying storage. - e2evolume, err := getBoundPV(client, claim) + e2evolume, err := getBoundPV(ctx, client, claim) framework.ExpectNoError(err) pv2Config := e2epv.PersistentVolumeConfig{ NamePrefix: fmt.Sprintf("%s-", "pv"), diff --git a/test/e2e/storage/testsuites/readwriteoncepod.go b/test/e2e/storage/testsuites/readwriteoncepod.go index e28a54220ed..85124d50ef6 100644 --- a/test/e2e/storage/testsuites/readwriteoncepod.go +++ b/test/e2e/storage/testsuites/readwriteoncepod.go @@ -33,7 +33,6 @@ import ( e2epv "k8s.io/kubernetes/test/e2e/framework/pv" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" storageframework "k8s.io/kubernetes/test/e2e/storage/framework" - storageutils "k8s.io/kubernetes/test/e2e/storage/utils" admissionapi "k8s.io/pod-security-admission/api" ) @@ -44,8 +43,7 @@ type readWriteOncePodTestSuite struct { var _ storageframework.TestSuite = &readWriteOncePodTestSuite{} type readWriteOncePodTest struct { - config *storageframework.PerTestConfig - driverCleanup func() + config *storageframework.PerTestConfig cs clientset.Interface volume *storageframework.VolumeResource @@ -96,7 +94,7 @@ func (t *readWriteOncePodTestSuite) DefineTests(driver storageframework.TestDriv init := func() { l = readWriteOncePodTest{} - l.config, l.driverCleanup = driver.PrepareTest(f) + l.config = driver.PrepareTest(f) l.cs = f.ClientSet l.pods = []*v1.Pod{} l.migrationCheck = newMigrationOpCheck(f.ClientSet, f.ClientConfig(), driverInfo.InTreePluginName) @@ -114,7 +112,6 @@ func (t *readWriteOncePodTestSuite) DefineTests(driver storageframework.TestDriv err := l.volume.CleanupResource() errs = append(errs, err) - errs = append(errs, storageutils.TryFunc(l.driverCleanup)) framework.ExpectNoError(errors.NewAggregate(errs), "while cleaning up resource") l.migrationCheck.validateMigrationVolumeOpCounts() } diff --git a/test/e2e/storage/testsuites/snapshottable.go b/test/e2e/storage/testsuites/snapshottable.go index 0e3b247d31c..1f9465ed639 100644 --- a/test/e2e/storage/testsuites/snapshottable.go +++ b/test/e2e/storage/testsuites/snapshottable.go @@ -112,10 +112,9 @@ func (s *snapshottableTestSuite) DefineTests(driver storageframework.TestDriver, ginkgo.Describe("volume snapshot controller", func() { var ( - err error - config *storageframework.PerTestConfig - driverCleanup func() - cleanupSteps []func() + err error + config *storageframework.PerTestConfig + cleanupSteps []func() cs clientset.Interface dc dynamic.Interface @@ -126,7 +125,7 @@ func (s *snapshottableTestSuite) DefineTests(driver storageframework.TestDriver, claimSize string originalMntTestData string ) - init := func() { + init := func(ctx context.Context) { sDriver, _ = driver.(storageframework.SnapshottableTestDriver) dDriver, _ = driver.(storageframework.DynamicPVTestDriver) cleanupSteps = make([]func(), 0) @@ -135,8 +134,7 @@ func (s *snapshottableTestSuite) DefineTests(driver storageframework.TestDriver, dc = f.DynamicClient // Now do the more expensive test initialization. - config, driverCleanup = driver.PrepareTest(f) - cleanupSteps = append(cleanupSteps, driverCleanup) + config = driver.PrepareTest(f) cleanupSteps = append(cleanupSteps, func() { framework.ExpectNoError(volumeResource.CleanupResource()) @@ -149,7 +147,7 @@ func (s *snapshottableTestSuite) DefineTests(driver storageframework.TestDriver, // sync is available in the Linux and Windows versions of agnhost. command := fmt.Sprintf("echo '%s' > %s; sync", originalMntTestData, datapath) - pod = StartInPodWithVolumeSource(cs, *volumeResource.VolSource, f.Namespace.Name, "pvc-snapshottable-tester", command, config.ClientNodeSelection) + pod = StartInPodWithVolumeSource(ctx, cs, *volumeResource.VolSource, f.Namespace.Name, "pvc-snapshottable-tester", command, config.ClientNodeSelection) // At this point a pod is created with a PVC. How to proceed depends on which test is running. } @@ -174,11 +172,11 @@ func (s *snapshottableTestSuite) DefineTests(driver storageframework.TestDriver, }) ginkgo.Context("", func() { - ginkgo.It("should check snapshot fields, check restore correctly works, check deletion (ephemeral)", func() { + ginkgo.It("should check snapshot fields, check restore correctly works, check deletion (ephemeral)", func(ctx context.Context) { if pattern.VolType != storageframework.GenericEphemeralVolume { e2eskipper.Skipf("volume type %q is not ephemeral", pattern.VolType) } - init() + init(ctx) // delete the pod at the end of the test cleanupSteps = append(cleanupSteps, func() { @@ -253,9 +251,9 @@ func (s *snapshottableTestSuite) DefineTests(driver storageframework.TestDriver, }, } - restoredPod = StartInPodWithVolumeSource(cs, volSrc, restoredPVC.Namespace, "restored-pvc-tester", "sleep 300", config.ClientNodeSelection) + restoredPod = StartInPodWithVolumeSource(ctx, cs, volSrc, restoredPVC.Namespace, "restored-pvc-tester", "sleep 300", config.ClientNodeSelection) cleanupSteps = append(cleanupSteps, func() { - StopPod(cs, restoredPod) + StopPod(ctx, cs, restoredPod) }) framework.ExpectNoError(e2epod.WaitTimeoutForPodRunningInNamespace(cs, restoredPod.Name, restoredPod.Namespace, f.Timeouts.PodStartSlow)) if pattern.VolType != storageframework.GenericEphemeralVolume { @@ -275,11 +273,11 @@ func (s *snapshottableTestSuite) DefineTests(driver storageframework.TestDriver, deleteVolumeSnapshot(f, dc, sr, pattern, vscontent) }) - ginkgo.It("should check snapshot fields, check restore correctly works after modifying source data, check deletion (persistent)", func() { + ginkgo.It("should check snapshot fields, check restore correctly works after modifying source data, check deletion (persistent)", func(ctx context.Context) { if pattern.VolType == storageframework.GenericEphemeralVolume { e2eskipper.Skipf("volume type %q is ephemeral", pattern.VolType) } - init() + init(ctx) pvc = volumeResource.Pvc sc = volumeResource.Sc @@ -307,7 +305,7 @@ func (s *snapshottableTestSuite) DefineTests(driver storageframework.TestDriver, // Delete the pod to force NodeUnpublishVolume (unlike the ephemeral case where the pod is deleted at the end of the test). ginkgo.By("[init] deleting the pod") - StopPod(cs, pod) + StopPod(ctx, cs, pod) // At this point we know that: // - a pod was created with a PV that's supposed to have data @@ -374,7 +372,7 @@ func (s *snapshottableTestSuite) DefineTests(driver storageframework.TestDriver, // After writing data to a file `sync` flushes the data from memory to disk. // sync is available in the Linux and Windows versions of agnhost. command := fmt.Sprintf("echo '%s' > %s; sync", modifiedMntTestData, datapath) - RunInPodWithVolume(cs, f.Timeouts, pvc.Namespace, pvc.Name, "pvc-snapshottable-data-tester", command, config.ClientNodeSelection) + RunInPodWithVolume(ctx, cs, f.Timeouts, pvc.Namespace, pvc.Name, "pvc-snapshottable-data-tester", command, config.ClientNodeSelection) ginkgo.By("creating a pvc from the snapshot") claimSize = pvc.Spec.Resources.Requests.Storage().String() @@ -403,9 +401,9 @@ func (s *snapshottableTestSuite) DefineTests(driver storageframework.TestDriver, }) ginkgo.By("starting a pod to use the snapshot") - restoredPod = StartInPodWithVolume(cs, restoredPVC.Namespace, restoredPVC.Name, "restored-pvc-tester", "sleep 300", config.ClientNodeSelection) + restoredPod = StartInPodWithVolume(ctx, cs, restoredPVC.Namespace, restoredPVC.Name, "restored-pvc-tester", "sleep 300", config.ClientNodeSelection) cleanupSteps = append(cleanupSteps, func() { - StopPod(cs, restoredPod) + StopPod(ctx, cs, restoredPod) }) framework.ExpectNoError(e2epod.WaitTimeoutForPodRunningInNamespace(cs, restoredPod.Name, restoredPod.Namespace, f.Timeouts.PodStartSlow)) commands := e2evolume.GenerateReadFileCmd(datapath) diff --git a/test/e2e/storage/testsuites/snapshottable_stress.go b/test/e2e/storage/testsuites/snapshottable_stress.go index c6827386e2f..e56405b1424 100644 --- a/test/e2e/storage/testsuites/snapshottable_stress.go +++ b/test/e2e/storage/testsuites/snapshottable_stress.go @@ -128,18 +128,17 @@ func (t *snapshottableStressTestSuite) DefineTests(driver storageframework.TestD driverInfo = driver.GetDriverInfo() snapshottableDriver, _ = driver.(storageframework.SnapshottableTestDriver) cs = f.ClientSet - config, driverCleanup := driver.PrepareTest(f) + config := driver.PrepareTest(f) ctx, cancel := context.WithCancel(context.Background()) stressTest = &snapshottableStressTest{ - config: config, - driverCleanup: driverCleanup, - volumes: []*storageframework.VolumeResource{}, - snapshots: []*storageframework.SnapshotResource{}, - pods: []*v1.Pod{}, - testOptions: *driverInfo.VolumeSnapshotStressTestOptions, - ctx: ctx, - cancel: cancel, + config: config, + volumes: []*storageframework.VolumeResource{}, + snapshots: []*storageframework.SnapshotResource{}, + pods: []*v1.Pod{}, + testOptions: *driverInfo.VolumeSnapshotStressTestOptions, + ctx: ctx, + cancel: cancel, } } diff --git a/test/e2e/storage/testsuites/subpath.go b/test/e2e/storage/testsuites/subpath.go index 7616f3ed03c..4bf70fc32b8 100644 --- a/test/e2e/storage/testsuites/subpath.go +++ b/test/e2e/storage/testsuites/subpath.go @@ -99,8 +99,7 @@ func (s *subPathTestSuite) SkipUnsupportedTests(driver storageframework.TestDriv func (s *subPathTestSuite) DefineTests(driver storageframework.TestDriver, pattern storageframework.TestPattern) { type local struct { - config *storageframework.PerTestConfig - driverCleanup func() + config *storageframework.PerTestConfig hostExec storageutils.HostExec resource *storageframework.VolumeResource @@ -124,7 +123,7 @@ func (s *subPathTestSuite) DefineTests(driver storageframework.TestDriver, patte l = local{} // Now do the more expensive test initialization. - l.config, l.driverCleanup = driver.PrepareTest(f) + l.config = driver.PrepareTest(f) l.migrationCheck = newMigrationOpCheck(f.ClientSet, f.ClientConfig(), driver.GetDriverInfo().InTreePluginName) testVolumeSizeRange := s.GetTestSuiteInfo().SupportedSizeRange l.resource = storageframework.CreateVolumeResource(driver, l.config, pattern, testVolumeSizeRange) @@ -181,8 +180,6 @@ func (s *subPathTestSuite) DefineTests(driver storageframework.TestDriver, patte l.resource = nil } - errs = append(errs, storageutils.TryFunc(l.driverCleanup)) - l.driverCleanup = nil framework.ExpectNoError(errors.NewAggregate(errs), "while cleaning up resource") if l.hostExec != nil { diff --git a/test/e2e/storage/testsuites/topology.go b/test/e2e/storage/testsuites/topology.go index 9f41a4ddda7..39120d53fae 100644 --- a/test/e2e/storage/testsuites/topology.go +++ b/test/e2e/storage/testsuites/topology.go @@ -44,8 +44,7 @@ type topologyTestSuite struct { } type topologyTest struct { - config *storageframework.PerTestConfig - driverCleanup func() + config *storageframework.PerTestConfig migrationCheck *migrationOpCheck @@ -112,7 +111,7 @@ func (t *topologyTestSuite) DefineTests(driver storageframework.TestDriver, patt l := topologyTest{} // Now do the more expensive test initialization. - l.config, l.driverCleanup = driver.PrepareTest(f) + l.config = driver.PrepareTest(f) l.resource = storageframework.VolumeResource{ Config: l.config, @@ -156,8 +155,6 @@ func (t *topologyTestSuite) DefineTests(driver storageframework.TestDriver, patt cleanup := func(l topologyTest) { t.CleanupResources(cs, &l) - err := storageutils.TryFunc(l.driverCleanup) - l.driverCleanup = nil framework.ExpectNoError(err, "while cleaning up driver") l.migrationCheck.validateMigrationVolumeOpCounts() diff --git a/test/e2e/storage/testsuites/volume_expand.go b/test/e2e/storage/testsuites/volume_expand.go index 09acce9908b..b91e27690ad 100644 --- a/test/e2e/storage/testsuites/volume_expand.go +++ b/test/e2e/storage/testsuites/volume_expand.go @@ -35,7 +35,6 @@ import ( e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" e2evolume "k8s.io/kubernetes/test/e2e/framework/volume" storageframework "k8s.io/kubernetes/test/e2e/storage/framework" - storageutils "k8s.io/kubernetes/test/e2e/storage/utils" admissionapi "k8s.io/pod-security-admission/api" ) @@ -102,8 +101,7 @@ func (v *volumeExpandTestSuite) SkipUnsupportedTests(driver storageframework.Tes func (v *volumeExpandTestSuite) DefineTests(driver storageframework.TestDriver, pattern storageframework.TestPattern) { type local struct { - config *storageframework.PerTestConfig - driverCleanup func() + config *storageframework.PerTestConfig resource *storageframework.VolumeResource pod *v1.Pod @@ -122,7 +120,7 @@ func (v *volumeExpandTestSuite) DefineTests(driver storageframework.TestDriver, l = local{} // Now do the more expensive test initialization. - l.config, l.driverCleanup = driver.PrepareTest(f) + l.config = driver.PrepareTest(f) l.migrationCheck = newMigrationOpCheck(f.ClientSet, f.ClientConfig(), driver.GetDriverInfo().InTreePluginName) testVolumeSizeRange := v.GetTestSuiteInfo().SupportedSizeRange l.resource = storageframework.CreateVolumeResource(driver, l.config, pattern, testVolumeSizeRange) @@ -149,8 +147,6 @@ func (v *volumeExpandTestSuite) DefineTests(driver storageframework.TestDriver, l.resource = nil } - errs = append(errs, storageutils.TryFunc(l.driverCleanup)) - l.driverCleanup = nil framework.ExpectNoError(errors.NewAggregate(errs), "while cleaning up resource") l.migrationCheck.validateMigrationVolumeOpCounts() } diff --git a/test/e2e/storage/testsuites/volume_io.go b/test/e2e/storage/testsuites/volume_io.go index 6045a64d577..489ad92f4ac 100644 --- a/test/e2e/storage/testsuites/volume_io.go +++ b/test/e2e/storage/testsuites/volume_io.go @@ -41,7 +41,6 @@ import ( e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" e2evolume "k8s.io/kubernetes/test/e2e/framework/volume" storageframework "k8s.io/kubernetes/test/e2e/storage/framework" - storageutils "k8s.io/kubernetes/test/e2e/storage/utils" admissionapi "k8s.io/pod-security-admission/api" ) @@ -98,8 +97,7 @@ func (t *volumeIOTestSuite) SkipUnsupportedTests(driver storageframework.TestDri func (t *volumeIOTestSuite) DefineTests(driver storageframework.TestDriver, pattern storageframework.TestPattern) { type local struct { - config *storageframework.PerTestConfig - driverCleanup func() + config *storageframework.PerTestConfig resource *storageframework.VolumeResource @@ -119,7 +117,7 @@ func (t *volumeIOTestSuite) DefineTests(driver storageframework.TestDriver, patt l = local{} // Now do the more expensive test initialization. - l.config, l.driverCleanup = driver.PrepareTest(f) + l.config = driver.PrepareTest(f) l.migrationCheck = newMigrationOpCheck(f.ClientSet, f.ClientConfig(), dInfo.InTreePluginName) testVolumeSizeRange := t.GetTestSuiteInfo().SupportedSizeRange @@ -137,11 +135,6 @@ func (t *volumeIOTestSuite) DefineTests(driver storageframework.TestDriver, patt l.resource = nil } - if l.driverCleanup != nil { - errs = append(errs, storageutils.TryFunc(l.driverCleanup)) - l.driverCleanup = nil - } - framework.ExpectNoError(errors.NewAggregate(errs), "while cleaning up resource") l.migrationCheck.validateMigrationVolumeOpCounts() } diff --git a/test/e2e/storage/testsuites/volume_stress.go b/test/e2e/storage/testsuites/volume_stress.go index 4f035f9ef9d..f82dc1bced3 100644 --- a/test/e2e/storage/testsuites/volume_stress.go +++ b/test/e2e/storage/testsuites/volume_stress.go @@ -33,7 +33,6 @@ import ( e2epv "k8s.io/kubernetes/test/e2e/framework/pv" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" storageframework "k8s.io/kubernetes/test/e2e/storage/framework" - storageutils "k8s.io/kubernetes/test/e2e/storage/utils" admissionapi "k8s.io/pod-security-admission/api" ) @@ -42,8 +41,7 @@ type volumeStressTestSuite struct { } type volumeStressTest struct { - config *storageframework.PerTestConfig - driverCleanup func() + config *storageframework.PerTestConfig migrationCheck *migrationOpCheck @@ -121,7 +119,7 @@ func (t *volumeStressTestSuite) DefineTests(driver storageframework.TestDriver, l = &volumeStressTest{} // Now do the more expensive test initialization. - l.config, l.driverCleanup = driver.PrepareTest(f) + l.config = driver.PrepareTest(f) l.migrationCheck = newMigrationOpCheck(f.ClientSet, f.ClientConfig(), dInfo.InTreePluginName) l.volumes = []*storageframework.VolumeResource{} l.pods = []*v1.Pod{} @@ -187,7 +185,6 @@ func (t *volumeStressTestSuite) DefineTests(driver storageframework.TestDriver, } wg.Wait() - errs = append(errs, storageutils.TryFunc(l.driverCleanup)) framework.ExpectNoError(errors.NewAggregate(errs), "while cleaning up resource") l.migrationCheck.validateMigrationVolumeOpCounts() } diff --git a/test/e2e/storage/testsuites/volumelimits.go b/test/e2e/storage/testsuites/volumelimits.go index b73d63f54fc..84692be70b4 100644 --- a/test/e2e/storage/testsuites/volumelimits.go +++ b/test/e2e/storage/testsuites/volumelimits.go @@ -90,8 +90,7 @@ func (t *volumeLimitsTestSuite) SkipUnsupportedTests(driver storageframework.Tes func (t *volumeLimitsTestSuite) DefineTests(driver storageframework.TestDriver, pattern storageframework.TestPattern) { type local struct { - config *storageframework.PerTestConfig - testCleanup func() + config *storageframework.PerTestConfig cs clientset.Interface ns *v1.Namespace @@ -124,7 +123,7 @@ func (t *volumeLimitsTestSuite) DefineTests(driver storageframework.TestDriver, // And one extra pod with a CSI volume should get Pending with a condition // that says it's unschedulable because of volume limit. // BEWARE: the test may create lot of volumes and it's really slow. - ginkgo.It("should support volume limits [Serial]", func() { + ginkgo.It("should support volume limits [Serial]", func(ctx context.Context) { driverInfo := driver.GetDriverInfo() if !driverInfo.Capabilities[storageframework.CapVolumeLimits] { ginkgo.Skip(fmt.Sprintf("driver %s does not support volume limits", driverInfo.Name)) @@ -137,8 +136,7 @@ func (t *volumeLimitsTestSuite) DefineTests(driver storageframework.TestDriver, l.ns = f.Namespace l.cs = f.ClientSet - l.config, l.testCleanup = driver.PrepareTest(f) - defer l.testCleanup() + l.config = driver.PrepareTest(f) ginkgo.By("Picking a node") // Some CSI drivers are deployed to a single node (e.g csi-hostpath), @@ -177,7 +175,7 @@ func (t *volumeLimitsTestSuite) DefineTests(driver storageframework.TestDriver, // Create Pods. ginkgo.By(fmt.Sprintf("Creating %d Pod(s) with one volume each", limit)) for i := 0; i < limit; i++ { - pod := StartInPodWithVolumeSource(l.cs, *l.resource.VolSource, l.ns.Name, "volume-limits", "sleep 1000000", selection) + pod := StartInPodWithVolumeSource(ctx, l.cs, *l.resource.VolSource, l.ns.Name, "volume-limits", "sleep 1000000", selection) l.podNames = append(l.podNames, pod.Name) l.pvcNames = append(l.pvcNames, ephemeral.VolumeClaimName(pod, &pod.Spec.Volumes[0])) } @@ -221,7 +219,7 @@ func (t *volumeLimitsTestSuite) DefineTests(driver storageframework.TestDriver, } ginkgo.By("Creating an extra pod with one volume to exceed the limit") - pod := StartInPodWithVolumeSource(l.cs, *l.resource.VolSource, l.ns.Name, "volume-limits-exceeded", "sleep 10000", selection) + pod := StartInPodWithVolumeSource(ctx, l.cs, *l.resource.VolSource, l.ns.Name, "volume-limits-exceeded", "sleep 10000", selection) l.podNames = append(l.podNames, pod.Name) ginkgo.By("Waiting for the pod to get unschedulable with the right message") @@ -255,8 +253,7 @@ func (t *volumeLimitsTestSuite) DefineTests(driver storageframework.TestDriver, l.ns = f.Namespace l.cs = f.ClientSet - l.config, l.testCleanup = driver.PrepareTest(f) - defer l.testCleanup() + l.config = driver.PrepareTest(f) nodeNames := []string{} if l.config.ClientNodeSelection.Name != "" { diff --git a/test/e2e/storage/testsuites/volumemode.go b/test/e2e/storage/testsuites/volumemode.go index 75a7b98eac4..1448c23f8a0 100644 --- a/test/e2e/storage/testsuites/volumemode.go +++ b/test/e2e/storage/testsuites/volumemode.go @@ -116,7 +116,7 @@ func (t *volumeModeTestSuite) DefineTests(driver storageframework.TestDriver, pa l.cs = f.ClientSet // Now do the more expensive test initialization. - l.config, l.driverCleanup = driver.PrepareTest(f) + l.config = driver.PrepareTest(f) l.migrationCheck = newMigrationOpCheck(f.ClientSet, f.ClientConfig(), dInfo.InTreePluginName) } diff --git a/test/e2e/storage/testsuites/volumeperf.go b/test/e2e/storage/testsuites/volumeperf.go index eaa1965be13..5c77f47eb7b 100644 --- a/test/e2e/storage/testsuites/volumeperf.go +++ b/test/e2e/storage/testsuites/volumeperf.go @@ -93,14 +93,13 @@ func (t *volumePerformanceTestSuite) SkipUnsupportedTests(driver storageframewor func (t *volumePerformanceTestSuite) DefineTests(driver storageframework.TestDriver, pattern storageframework.TestPattern) { type local struct { - config *storageframework.PerTestConfig - testCleanup func() - cs clientset.Interface - ns *v1.Namespace - scName string - pvcs []*v1.PersistentVolumeClaim - options *storageframework.PerformanceTestOptions - stopCh chan struct{} + config *storageframework.PerTestConfig + cs clientset.Interface + ns *v1.Namespace + scName string + pvcs []*v1.PersistentVolumeClaim + options *storageframework.PerformanceTestOptions + stopCh chan struct{} } var ( dInfo *storageframework.DriverInfo @@ -142,7 +141,6 @@ func (t *volumePerformanceTestSuite) DefineTests(driver storageframework.TestDri ginkgo.By(fmt.Sprintf("Deleting Storage Class %s", l.scName)) err := l.cs.StorageV1().StorageClasses().Delete(context.TODO(), l.scName, metav1.DeleteOptions{}) framework.ExpectNoError(err) - l.testCleanup() }) ginkgo.It("should provision volumes at scale within performance constraints [Slow] [Serial]", func() { @@ -151,7 +149,7 @@ func (t *volumePerformanceTestSuite) DefineTests(driver storageframework.TestDri ns: f.Namespace, options: dInfo.PerformanceTestOptions, } - l.config, l.testCleanup = driver.PrepareTest(f) + l.config = driver.PrepareTest(f) // Stats for volume provisioning operation // TODO: Add stats for attach, resize and snapshot diff --git a/test/e2e/storage/testsuites/volumes.go b/test/e2e/storage/testsuites/volumes.go index bc35bacbc15..472e7169148 100644 --- a/test/e2e/storage/testsuites/volumes.go +++ b/test/e2e/storage/testsuites/volumes.go @@ -36,7 +36,6 @@ import ( e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" e2evolume "k8s.io/kubernetes/test/e2e/framework/volume" storageframework "k8s.io/kubernetes/test/e2e/storage/framework" - storageutils "k8s.io/kubernetes/test/e2e/storage/utils" imageutils "k8s.io/kubernetes/test/utils/image" admissionapi "k8s.io/pod-security-admission/api" ) @@ -118,8 +117,7 @@ func skipTestIfBlockNotSupported(driver storageframework.TestDriver) { func (t *volumesTestSuite) DefineTests(driver storageframework.TestDriver, pattern storageframework.TestPattern) { type local struct { - config *storageframework.PerTestConfig - driverCleanup func() + config *storageframework.PerTestConfig resource *storageframework.VolumeResource @@ -137,7 +135,7 @@ func (t *volumesTestSuite) DefineTests(driver storageframework.TestDriver, patte l = local{} // Now do the more expensive test initialization. - l.config, l.driverCleanup = driver.PrepareTest(f) + l.config = driver.PrepareTest(f) l.migrationCheck = newMigrationOpCheck(f.ClientSet, f.ClientConfig(), dInfo.InTreePluginName) testVolumeSizeRange := t.GetTestSuiteInfo().SupportedSizeRange l.resource = storageframework.CreateVolumeResource(driver, l.config, pattern, testVolumeSizeRange) @@ -153,8 +151,6 @@ func (t *volumesTestSuite) DefineTests(driver storageframework.TestDriver, patte l.resource = nil } - errs = append(errs, storageutils.TryFunc(l.driverCleanup)) - l.driverCleanup = nil framework.ExpectNoError(errors.NewAggregate(errs), "while cleaning up resource") l.migrationCheck.validateMigrationVolumeOpCounts() } diff --git a/test/e2e/storage/volume_provisioning.go b/test/e2e/storage/volume_provisioning.go index f6a2de8e29e..e3838770aa3 100644 --- a/test/e2e/storage/volume_provisioning.go +++ b/test/e2e/storage/volume_provisioning.go @@ -148,7 +148,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() { }) ginkgo.Describe("DynamicProvisioner [Slow] [Feature:StorageProvider]", func() { - ginkgo.It("should provision storage with different parameters", func() { + ginkgo.It("should provision storage with different parameters", func(ctx context.Context) { // This test checks that dynamic provisioning can provision a volume // that can be used to persist data among pods. @@ -166,7 +166,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() { ClaimSize: "1.5Gi", ExpectedSize: "2Gi", PvCheck: func(claim *v1.PersistentVolumeClaim) { - volume := testsuites.PVWriteReadSingleNodeCheck(c, f.Timeouts, claim, e2epod.NodeSelection{}) + volume := testsuites.PVWriteReadSingleNodeCheck(ctx, c, f.Timeouts, claim, e2epod.NodeSelection{}) gomega.Expect(volume).NotTo(gomega.BeNil(), "get bound PV") err := checkGCEPD(volume, "pd-ssd") @@ -184,7 +184,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() { ClaimSize: "1.5Gi", ExpectedSize: "2Gi", PvCheck: func(claim *v1.PersistentVolumeClaim) { - volume := testsuites.PVWriteReadSingleNodeCheck(c, f.Timeouts, claim, e2epod.NodeSelection{}) + volume := testsuites.PVWriteReadSingleNodeCheck(ctx, c, f.Timeouts, claim, e2epod.NodeSelection{}) gomega.Expect(volume).NotTo(gomega.BeNil(), "get bound PV") err := checkGCEPD(volume, "pd-standard") @@ -204,7 +204,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() { ClaimSize: "1.5Gi", ExpectedSize: "2Gi", PvCheck: func(claim *v1.PersistentVolumeClaim) { - volume := testsuites.PVWriteReadSingleNodeCheck(c, f.Timeouts, claim, e2epod.NodeSelection{}) + volume := testsuites.PVWriteReadSingleNodeCheck(ctx, c, f.Timeouts, claim, e2epod.NodeSelection{}) gomega.Expect(volume).NotTo(gomega.BeNil(), "get bound PV") err := checkAWSEBS(volume, "gp2", false) @@ -223,7 +223,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() { ClaimSize: "3.5Gi", ExpectedSize: "4Gi", // 4 GiB is minimum for io1 PvCheck: func(claim *v1.PersistentVolumeClaim) { - volume := testsuites.PVWriteReadSingleNodeCheck(c, f.Timeouts, claim, e2epod.NodeSelection{}) + volume := testsuites.PVWriteReadSingleNodeCheck(ctx, c, f.Timeouts, claim, e2epod.NodeSelection{}) gomega.Expect(volume).NotTo(gomega.BeNil(), "get bound PV") err := checkAWSEBS(volume, "io1", false) @@ -241,7 +241,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() { ClaimSize: "500Gi", // minimum for sc1 ExpectedSize: "500Gi", PvCheck: func(claim *v1.PersistentVolumeClaim) { - volume := testsuites.PVWriteReadSingleNodeCheck(c, f.Timeouts, claim, e2epod.NodeSelection{}) + volume := testsuites.PVWriteReadSingleNodeCheck(ctx, c, f.Timeouts, claim, e2epod.NodeSelection{}) gomega.Expect(volume).NotTo(gomega.BeNil(), "get bound PV") err := checkAWSEBS(volume, "sc1", false) @@ -259,7 +259,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() { ClaimSize: "500Gi", // minimum for st1 ExpectedSize: "500Gi", PvCheck: func(claim *v1.PersistentVolumeClaim) { - volume := testsuites.PVWriteReadSingleNodeCheck(c, f.Timeouts, claim, e2epod.NodeSelection{}) + volume := testsuites.PVWriteReadSingleNodeCheck(ctx, c, f.Timeouts, claim, e2epod.NodeSelection{}) gomega.Expect(volume).NotTo(gomega.BeNil(), "get bound PV") err := checkAWSEBS(volume, "st1", false) @@ -277,7 +277,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() { ClaimSize: "1Gi", ExpectedSize: "1Gi", PvCheck: func(claim *v1.PersistentVolumeClaim) { - volume := testsuites.PVWriteReadSingleNodeCheck(c, f.Timeouts, claim, e2epod.NodeSelection{}) + volume := testsuites.PVWriteReadSingleNodeCheck(ctx, c, f.Timeouts, claim, e2epod.NodeSelection{}) gomega.Expect(volume).NotTo(gomega.BeNil(), "get bound PV") err := checkAWSEBS(volume, "gp2", true) @@ -294,7 +294,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() { ClaimSize: "1.5Gi", ExpectedSize: "1.5Gi", PvCheck: func(claim *v1.PersistentVolumeClaim) { - testsuites.PVWriteReadSingleNodeCheck(c, f.Timeouts, claim, e2epod.NodeSelection{}) + testsuites.PVWriteReadSingleNodeCheck(ctx, c, f.Timeouts, claim, e2epod.NodeSelection{}) }, }, // Azure @@ -307,7 +307,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() { ClaimSize: "1Gi", ExpectedSize: "1Gi", PvCheck: func(claim *v1.PersistentVolumeClaim) { - testsuites.PVWriteReadSingleNodeCheck(c, f.Timeouts, claim, e2epod.NodeSelection{}) + testsuites.PVWriteReadSingleNodeCheck(ctx, c, f.Timeouts, claim, e2epod.NodeSelection{}) }, }, } @@ -331,8 +331,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() { test.Client = c // overwrite StorageClass spec with provisioned StorageClass - storageClass, clearStorageClass := testsuites.SetupStorageClass(test.Client, newStorageClass(test, ns, suffix)) - defer clearStorageClass() + storageClass := testsuites.SetupStorageClass(ctx, test.Client, newStorageClass(test, ns, suffix)) test.Class = storageClass test.Claim = e2epv.MakePersistentVolumeClaim(e2epv.PersistentVolumeClaimConfig{ @@ -341,11 +340,11 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() { VolumeMode: &test.VolumeMode, }, ns) - test.TestDynamicProvisioning() + test.TestDynamicProvisioning(ctx) } }) - ginkgo.It("should provision storage with non-default reclaim policy Retain", func() { + ginkgo.It("should provision storage with non-default reclaim policy Retain", func(ctx context.Context) { e2eskipper.SkipUnlessProviderIs("gce", "gke") test := testsuites.StorageClassTest{ @@ -360,7 +359,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() { ClaimSize: "1Gi", ExpectedSize: "1Gi", PvCheck: func(claim *v1.PersistentVolumeClaim) { - volume := testsuites.PVWriteReadSingleNodeCheck(c, f.Timeouts, claim, e2epod.NodeSelection{}) + volume := testsuites.PVWriteReadSingleNodeCheck(ctx, c, f.Timeouts, claim, e2epod.NodeSelection{}) gomega.Expect(volume).NotTo(gomega.BeNil(), "get bound PV") err := checkGCEPD(volume, "pd-standard") @@ -370,8 +369,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() { test.Class = newStorageClass(test, ns, "reclaimpolicy") retain := v1.PersistentVolumeReclaimRetain test.Class.ReclaimPolicy = &retain - storageClass, clearStorageClass := testsuites.SetupStorageClass(test.Client, test.Class) - defer clearStorageClass() + storageClass := testsuites.SetupStorageClass(ctx, test.Client, test.Class) test.Class = storageClass test.Claim = e2epv.MakePersistentVolumeClaim(e2epv.PersistentVolumeClaimConfig{ @@ -380,7 +378,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() { VolumeMode: &test.VolumeMode, }, ns) - pv := test.TestDynamicProvisioning() + pv := test.TestDynamicProvisioning(ctx) ginkgo.By(fmt.Sprintf("waiting for the provisioned PV %q to enter phase %s", pv.Name, v1.VolumeReleased)) framework.ExpectNoError(e2epv.WaitForPersistentVolumePhase(v1.VolumeReleased, c, pv.Name, 1*time.Second, 30*time.Second)) @@ -509,7 +507,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() { }) ginkgo.Describe("DynamicProvisioner External", func() { - ginkgo.It("should let an external dynamic provisioner create and delete persistent volumes [Slow]", func() { + ginkgo.It("should let an external dynamic provisioner create and delete persistent volumes [Slow]", func(ctx context.Context) { // external dynamic provisioner pods need additional permissions provided by the // persistent-volume-provisioner clusterrole and a leader-locking role serviceAccountName := "default" @@ -557,8 +555,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() { ExpectedSize: "1500Mi", } - storageClass, clearStorageClass := testsuites.SetupStorageClass(test.Client, newStorageClass(test, ns, "external")) - defer clearStorageClass() + storageClass := testsuites.SetupStorageClass(ctx, test.Client, newStorageClass(test, ns, "external")) test.Class = storageClass test.Claim = e2epv.MakePersistentVolumeClaim(e2epv.PersistentVolumeClaimConfig{ @@ -569,12 +566,12 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() { ginkgo.By("creating a claim with a external provisioning annotation") - test.TestDynamicProvisioning() + test.TestDynamicProvisioning(ctx) }) }) ginkgo.Describe("DynamicProvisioner Default", func() { - ginkgo.It("should create and delete default persistent volumes [Slow]", func() { + ginkgo.It("should create and delete default persistent volumes [Slow]", func(ctx context.Context) { e2eskipper.SkipUnlessProviderIs("gce", "aws", "gke", "vsphere", "azure") e2epv.SkipIfNoDefaultStorageClass(c) @@ -592,11 +589,9 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() { VolumeMode: &test.VolumeMode, }, ns) // NOTE: this test assumes that there's a default storageclass - storageClass, clearStorageClass := testsuites.SetupStorageClass(test.Client, nil) - test.Class = storageClass - defer clearStorageClass() + test.Class = testsuites.SetupStorageClass(ctx, test.Client, nil) - test.TestDynamicProvisioning() + test.TestDynamicProvisioning(ctx) }) // Modifying the default storage class can be disruptive to other tests that depend on it @@ -679,7 +674,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() { }) ginkgo.Describe("Invalid AWS KMS key", func() { - ginkgo.It("should report an error and create no PV", func() { + ginkgo.It("should report an error and create no PV", func(ctx context.Context) { e2eskipper.SkipUnlessProviderIs("aws") test := testsuites.StorageClassTest{ Client: c, @@ -691,9 +686,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() { } ginkgo.By("creating a StorageClass") - storageClass, clearStorageClass := testsuites.SetupStorageClass(test.Client, newStorageClass(test, ns, "invalid-aws")) - defer clearStorageClass() - test.Class = storageClass + test.Class = testsuites.SetupStorageClass(ctx, test.Client, newStorageClass(test, ns, "invalid-aws")) ginkgo.By("creating a claim object with a suffix for gluster dynamic provisioner") claim := e2epv.MakePersistentVolumeClaim(e2epv.PersistentVolumeClaimConfig{