use defercleanup in all the places

This commit is contained in:
Hemant Kumar 2022-12-13 15:54:55 -05:00
parent 6dd94f5663
commit 9e5d0828e0
12 changed files with 25 additions and 28 deletions

View File

@ -196,9 +196,7 @@ func (m *mockDriverSetup) init(tp testParameters) {
if tp.registerDriver {
err = waitForCSIDriver(m.cs, m.config.GetUniqueDriverName())
framework.ExpectNoError(err, "Failed to get CSIDriver %v", m.config.GetUniqueDriverName())
m.testCleanups = append(m.testCleanups, func() {
destroyCSIDriver(m.cs, m.config.GetUniqueDriverName())
})
ginkgo.DeferCleanup(destroyCSIDriver, m.cs, m.config.GetUniqueDriverName())
}
// Wait for the CSIDriver actually get deployed and CSINode object to be generated.

View File

@ -72,7 +72,7 @@ var _ = utils.SIGDescribe("CSI Mock volume attach", func() {
ginkgo.It(t.name, func(ctx context.Context) {
var err error
m.init(testParameters{registerDriver: test.deployClusterRegistrar, disableAttach: test.disableAttach})
defer m.cleanup()
ginkgo.DeferCleanup(m.cleanup)
volumeType := test.volumeType
if volumeType == "" {
@ -110,7 +110,7 @@ var _ = utils.SIGDescribe("CSI Mock volume attach", func() {
ginkgo.It("should bringup pod after deploying CSIDriver attach=false [Slow]", func(ctx context.Context) {
var err error
m.init(testParameters{registerDriver: false, disableAttach: true})
defer m.cleanup()
ginkgo.DeferCleanup(m.cleanup)
_, claim, pod := m.createPod(pvcReference) // late binding as specified above
if pod == nil {
@ -156,13 +156,12 @@ var _ = utils.SIGDescribe("CSI Mock volume attach", func() {
NewDriverName: "csi-mock-" + f.UniqueName,
CanAttach: &canAttach,
}
cleanupCSIDriver, err := utils.CreateFromManifests(f, driverNamespace, func(item interface{}) error {
err = utils.CreateFromManifests(f, driverNamespace, func(item interface{}) error {
return utils.PatchCSIDeployment(f, o, item)
}, "test/e2e/testing-manifests/storage-csi/mock/csi-mock-driverinfo.yaml")
if err != nil {
framework.Failf("fail to deploy CSIDriver object: %v", err)
}
m.testCleanups = append(m.testCleanups, cleanupCSIDriver)
ginkgo.By("Wait for the pod in running status")
err = e2epod.WaitForPodNameRunningInNamespace(m.cs, pod.Name, pod.Namespace)

View File

@ -61,7 +61,7 @@ var _ = utils.SIGDescribe("CSI Mock fsgroup as mount option", func() {
enableVolumeMountGroup: t.enableVolumeMountGroup,
hooks: createFSGroupRequestPreHook(&nodeStageFsGroup, &nodePublishFsGroup),
})
defer m.cleanup()
ginkgo.DeferCleanup(m.cleanup)
fsGroupVal := int64(rand.Int63n(20000) + 1024)
fsGroup := &fsGroupVal

View File

@ -72,7 +72,7 @@ var _ = utils.SIGDescribe("CSI Mock volume fsgroup policies", func() {
registerDriver: true,
fsGroupPolicy: &test.fsGroupPolicy,
})
defer m.cleanup()
ginkgo.DeferCleanup(m.cleanup)
// kube-scheduler may need some time before it gets the CSIDriver object.
// Without them, scheduling doesn't run as expected by the test.

View File

@ -143,7 +143,7 @@ var _ = utils.SIGDescribe("CSI Mock volume node stage", func() {
registerDriver: true,
hooks: hooks,
})
defer m.cleanup()
ginkgo.DeferCleanup(m.cleanup)
_, claim, pod := m.createPod(pvcReference)
if pod == nil {
@ -281,7 +281,7 @@ var _ = utils.SIGDescribe("CSI Mock volume node stage", func() {
registerDriver: true,
hooks: hooks,
})
defer m.cleanup()
ginkgo.DeferCleanup(m.cleanup)
_, claim, pod := m.createPod(pvcReference)
if pod == nil {

View File

@ -101,7 +101,7 @@ var _ = utils.SIGDescribe("CSI Mock selinux on mount", func() {
enableSELinuxMount: &t.seLinuxEnabled,
hooks: createSELinuxMountPreHook(&nodeStageMountOpts, &nodePublishMountOpts),
})
defer m.cleanup()
ginkgo.DeferCleanup(m.cleanup)
accessModes := []v1.PersistentVolumeAccessMode{t.volumeMode}
var podSELinuxOpts *v1.SELinuxOptions

View File

@ -69,7 +69,7 @@ var _ = utils.SIGDescribe("CSI Mock volume service account token", func() {
requiresRepublish: &csiServiceAccountTokenEnabled,
})
defer m.cleanup()
ginkgo.DeferCleanup(m.cleanup)
_, _, pod := m.createPod(pvcReference)
if pod == nil {

View File

@ -80,7 +80,7 @@ var _ = utils.SIGDescribe("CSI Mock volume snapshot", func() {
}
ctx, cancel := context.WithTimeout(ctx, csiPodRunningTimeout)
defer cancel()
defer m.cleanup()
ginkgo.DeferCleanup(m.cleanup)
sc := m.driver.GetDynamicProvisionStorageClass(m.config, "")
ginkgo.By("Creating storage class")
@ -219,7 +219,7 @@ var _ = utils.SIGDescribe("CSI Mock volume snapshot", func() {
if !ok {
e2eskipper.Skipf("mock driver does not support snapshots -- skipping")
}
defer m.cleanup()
ginkgo.DeferCleanup(m.cleanup)
var sc *storagev1.StorageClass
if dDriver, ok := m.driver.(storageframework.DynamicPVTestDriver); ok {
@ -308,7 +308,7 @@ var _ = utils.SIGDescribe("CSI Mock volume snapshot", func() {
if !ok {
e2eskipper.Skipf("mock driver does not support snapshots -- skipping")
}
defer m.cleanup()
ginkgo.DeferCleanup(m.cleanup)
metricsGrabber, err := e2emetrics.NewMetricsGrabber(m.config.Framework.ClientSet, nil, f.ClientConfig(), false, false, false, false, false, true)
if err != nil {

View File

@ -127,7 +127,7 @@ var _ = utils.SIGDescribe("CSI Mock volume storage capacity", func() {
}
m.init(params)
defer m.cleanup()
ginkgo.DeferCleanup(m.cleanup)
ctx, cancel := context.WithTimeout(ctx, csiPodRunningTimeout)
defer cancel()
@ -331,7 +331,7 @@ var _ = utils.SIGDescribe("CSI Mock volume storage capacity", func() {
storageCapacity: test.storageCapacity,
lateBinding: true,
})
defer m.cleanup()
ginkgo.DeferCleanup(m.cleanup)
// The storage class uses a random name, therefore we have to create it first
// before adding CSIStorageCapacity objects for it.
@ -348,9 +348,7 @@ var _ = utils.SIGDescribe("CSI Mock volume storage capacity", func() {
}
createdCapacity, err := f.ClientSet.StorageV1().CSIStorageCapacities(f.Namespace.Name).Create(context.Background(), capacity, metav1.CreateOptions{})
framework.ExpectNoError(err, "create CSIStorageCapacity %+v", *capacity)
m.testCleanups = append(m.testCleanups, func() {
f.ClientSet.StorageV1().CSIStorageCapacities(f.Namespace.Name).Delete(context.Background(), createdCapacity.Name, metav1.DeleteOptions{})
})
ginkgo.DeferCleanup(framework.IgnoreNotFound(f.ClientSet.StorageV1().CSIStorageCapacities(f.Namespace.Name).Delete), createdCapacity.Name, metav1.DeleteOptions{})
}
// kube-scheduler may need some time before it gets the CSIDriver and CSIStorageCapacity objects.

View File

@ -78,7 +78,7 @@ var _ = utils.SIGDescribe("CSI Mock volume expansion", func() {
}
m.init(tp)
defer m.cleanup()
ginkgo.DeferCleanup(m.cleanup)
sc, pvc, pod := m.createPod(pvcReference)
gomega.Expect(pod).NotTo(gomega.BeNil(), "while creating pod for resizing")
@ -172,8 +172,7 @@ var _ = utils.SIGDescribe("CSI Mock volume expansion", func() {
}
m.init(params)
defer m.cleanup()
ginkgo.DeferCleanup(m.cleanup)
sc, pvc, pod := m.createPod(pvcReference)
gomega.Expect(pod).NotTo(gomega.BeNil(), "while creating pod for resizing")

View File

@ -44,7 +44,8 @@ var _ = utils.SIGDescribe("CSI Mock volume limit", func() {
// define volume limit to be 2 for this test
var err error
m.init(testParameters{attachLimit: 2})
defer m.cleanup()
ginkgo.DeferCleanup(m.cleanup)
nodeName := m.config.ClientNodeSelection.Name
driverName := m.config.GetUniqueDriverName()
@ -75,7 +76,8 @@ var _ = utils.SIGDescribe("CSI Mock volume limit", func() {
// define volume limit to be 2 for this test
var err error
m.init(testParameters{attachLimit: 1})
defer m.cleanup()
ginkgo.DeferCleanup(m.cleanup)
nodeName := m.config.ClientNodeSelection.Name
driverName := m.config.GetUniqueDriverName()
@ -100,7 +102,8 @@ var _ = utils.SIGDescribe("CSI Mock volume limit", func() {
// define volume limit to be 2 for this test
var err error
m.init(testParameters{attachLimit: 1})
defer m.cleanup()
ginkgo.DeferCleanup(m.cleanup)
nodeName := m.config.ClientNodeSelection.Name
driverName := m.config.GetUniqueDriverName()

View File

@ -87,7 +87,7 @@ var _ = utils.SIGDescribe("CSI Mock workload info", func() {
registerDriver: test.deployClusterRegistrar,
podInfo: test.podInfoOnMount})
defer m.cleanup()
ginkgo.DeferCleanup(m.cleanup)
withVolume := pvcReference
if test.expectEphemeral {