e2e: remove cleanup actions

The framework.AddCleanupAction API was a workaround for Ginkgo v1 not invoking
AfterEach callbacks after a test failure. Ginkgo v2 not only fixed that, but
also added a DeferCleanup API which can be used to run some code if (and only
if!) the corresponding setup code ran. In several cases that makes the test
cleanup simpler.
This commit is contained in:
Patrick Ohly 2022-09-06 16:54:39 +02:00
parent b7529097ca
commit fc092eb145
12 changed files with 114 additions and 328 deletions

View File

@ -85,7 +85,6 @@ var _ = ginkgo.SynchronizedBeforeSuite(func() []byte {
var _ = ginkgo.SynchronizedAfterSuite(func() { var _ = ginkgo.SynchronizedAfterSuite(func() {
progressReporter.SetEndMsg() progressReporter.SetEndMsg()
CleanupSuite()
}, func() { }, func() {
AfterSuiteActions() AfterSuiteActions()
}) })

View File

@ -1,78 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package framework
import (
"reflect"
"runtime"
"sync"
)
// CleanupActionHandle is an integer pointer type for handling cleanup action
type CleanupActionHandle *int
type cleanupFuncHandle struct {
actionHandle CleanupActionHandle
actionHook func()
}
var cleanupActionsLock sync.Mutex
var cleanupHookList = []cleanupFuncHandle{}
// AddCleanupAction installs a function that will be called in the event of the
// whole test being terminated. This allows arbitrary pieces of the overall
// test to hook into SynchronizedAfterSuite().
// The hooks are called in last-in-first-out order.
func AddCleanupAction(fn func()) CleanupActionHandle {
p := CleanupActionHandle(new(int))
cleanupActionsLock.Lock()
defer cleanupActionsLock.Unlock()
c := cleanupFuncHandle{actionHandle: p, actionHook: fn}
cleanupHookList = append([]cleanupFuncHandle{c}, cleanupHookList...)
return p
}
// RemoveCleanupAction removes a function that was installed by
// AddCleanupAction.
func RemoveCleanupAction(p CleanupActionHandle) {
cleanupActionsLock.Lock()
defer cleanupActionsLock.Unlock()
for i, item := range cleanupHookList {
if item.actionHandle == p {
cleanupHookList = append(cleanupHookList[:i], cleanupHookList[i+1:]...)
break
}
}
}
// RunCleanupActions runs all functions installed by AddCleanupAction. It does
// not remove them (see RemoveCleanupAction) but it does run unlocked, so they
// may remove themselves.
func RunCleanupActions() {
list := []func(){}
func() {
cleanupActionsLock.Lock()
defer cleanupActionsLock.Unlock()
for _, p := range cleanupHookList {
list = append(list, p.actionHook)
}
}()
// Run unlocked.
for _, fn := range list {
Logf("Running Cleanup Action: %v", runtime.FuncForPC(reflect.ValueOf(fn).Pointer()).Name())
fn()
}
}

View File

@ -98,11 +98,6 @@ type Framework struct {
// Flaky operation failures in an e2e test can be captured through this. // Flaky operation failures in an e2e test can be captured through this.
flakeReport *FlakeReport flakeReport *FlakeReport
// To make sure that this framework cleans up after itself, no matter what,
// we install a Cleanup action before each test and clear it after. If we
// should abort, the AfterSuite hook should run all Cleanup actions.
cleanupHandle CleanupActionHandle
// configuration for framework's client // configuration for framework's client
Options Options Options Options
@ -177,10 +172,6 @@ func (f *Framework) BeforeEach() {
// Registered later and thus runs before deleting namespaces. // Registered later and thus runs before deleting namespaces.
ginkgo.DeferCleanup(f.dumpNamespaceInfo) ginkgo.DeferCleanup(f.dumpNamespaceInfo)
// The fact that we need this feels like a bug in ginkgo.
// https://github.com/onsi/ginkgo/v2/issues/222
f.cleanupHandle = AddCleanupAction(f.AfterEach)
if f.ClientSet == nil {
ginkgo.By("Creating a kubernetes client") ginkgo.By("Creating a kubernetes client")
config, err := LoadConfig() config, err := LoadConfig()
ExpectNoError(err) ExpectNoError(err)
@ -218,7 +209,6 @@ func (f *Framework) BeforeEach() {
f.ScalesGetter = scaleclient.New(restClient, restMapper, dynamic.LegacyAPIPathResolverFunc, resolver) f.ScalesGetter = scaleclient.New(restClient, restMapper, dynamic.LegacyAPIPathResolverFunc, resolver)
TestContext.CloudConfig.Provider.FrameworkBeforeEach(f) TestContext.CloudConfig.Provider.FrameworkBeforeEach(f)
}
if !f.SkipNamespaceCreation { if !f.SkipNamespaceCreation {
ginkgo.By(fmt.Sprintf("Building a namespace api object, basename %s", f.BaseName)) ginkgo.By(fmt.Sprintf("Building a namespace api object, basename %s", f.BaseName))
@ -356,8 +346,6 @@ func printSummaries(summaries []TestDataSummary, testBaseName string) {
// AfterEach deletes the namespace, after reading its events. // AfterEach deletes the namespace, after reading its events.
func (f *Framework) AfterEach() { func (f *Framework) AfterEach() {
RemoveCleanupAction(f.cleanupHandle)
// This should not happen. Given ClientSet is a public field a test must have updated it! // This should not happen. Given ClientSet is a public field a test must have updated it!
// Error out early before any API calls during cleanup. // Error out early before any API calls during cleanup.
if f.ClientSet == nil { if f.ClientSet == nil {

View File

@ -987,8 +987,6 @@ func generateDriverCleanupFunc(
driverName, testns, driverns string, driverName, testns, driverns string,
driverCleanup, cancelLogging func()) func() { driverCleanup, cancelLogging func()) func() {
cleanupHandle := new(framework.CleanupActionHandle)
// Cleanup CSI driver and namespaces. This function needs to be idempotent and can be // Cleanup CSI driver and namespaces. This function needs to be idempotent and can be
// concurrently called from defer (or AfterEach) and AfterSuite action hooks. // concurrently called from defer (or AfterEach) and AfterSuite action hooks.
cleanupFunc := func() { cleanupFunc := func() {
@ -1003,13 +1001,7 @@ func generateDriverCleanupFunc(
ginkgo.By(fmt.Sprintf("deleting the driver namespace: %s", driverns)) ginkgo.By(fmt.Sprintf("deleting the driver namespace: %s", driverns))
tryFunc(func() { f.DeleteNamespace(driverns) }) tryFunc(func() { f.DeleteNamespace(driverns) })
// cleanup function has already ran and hence we don't need to run it again.
// We do this as very last action because in-case defer(or AfterEach) races
// with AfterSuite and test routine gets killed then this block still
// runs in AfterSuite
framework.RemoveCleanupAction(*cleanupHandle)
} }
*cleanupHandle = framework.AddCleanupAction(cleanupFunc)
return cleanupFunc return cleanupFunc
} }

View File

@ -55,7 +55,6 @@ var _ = utils.SIGDescribe("[Feature:Flexvolumes] Mounted flexvolume expand[Slow]
resizableSc *storagev1.StorageClass resizableSc *storagev1.StorageClass
node *v1.Node node *v1.Node
nodeName string nodeName string
isNodeLabeled bool
nodeKeyValueLabel map[string]string nodeKeyValueLabel map[string]string
nodeLabelValue string nodeLabelValue string
nodeKey string nodeKey string
@ -76,15 +75,11 @@ var _ = utils.SIGDescribe("[Feature:Flexvolumes] Mounted flexvolume expand[Slow]
framework.ExpectNoError(err) framework.ExpectNoError(err)
nodeName = node.Name nodeName = node.Name
nodeKey = "mounted_flexvolume_expand" nodeKey = "mounted_flexvolume_expand_" + ns
if !isNodeLabeled {
nodeLabelValue = ns nodeLabelValue = ns
nodeKeyValueLabel = make(map[string]string) nodeKeyValueLabel = map[string]string{nodeKey: nodeLabelValue}
nodeKeyValueLabel[nodeKey] = nodeLabelValue
framework.AddOrUpdateLabelOnNode(c, nodeName, nodeKey, nodeLabelValue) framework.AddOrUpdateLabelOnNode(c, nodeName, nodeKey, nodeLabelValue)
isNodeLabeled = true ginkgo.DeferCleanup(framework.RemoveLabelOffNode, c, nodeName, nodeKey)
}
test := testsuites.StorageClassTest{ test := testsuites.StorageClassTest{
Name: "flexvolume-resize", Name: "flexvolume-resize",
@ -109,24 +104,12 @@ var _ = utils.SIGDescribe("[Feature:Flexvolumes] Mounted flexvolume expand[Slow]
}, ns) }, ns)
pvc, err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(context.TODO(), pvc, metav1.CreateOptions{}) pvc, err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(context.TODO(), pvc, metav1.CreateOptions{})
framework.ExpectNoError(err, "Error creating pvc") framework.ExpectNoError(err, "Error creating pvc")
}) ginkgo.DeferCleanup(func() {
framework.AddCleanupAction(func() {
if len(nodeLabelValue) > 0 {
framework.RemoveLabelOffNode(c, nodeName, nodeKey)
}
})
ginkgo.AfterEach(func() {
framework.Logf("AfterEach: Cleaning up resources for mounted volume resize") framework.Logf("AfterEach: Cleaning up resources for mounted volume resize")
if c != nil {
if errs := e2epv.PVPVCCleanup(c, ns, nil, pvc); len(errs) > 0 { if errs := e2epv.PVPVCCleanup(c, ns, nil, pvc); len(errs) > 0 {
framework.Failf("AfterEach: Failed to delete PVC and/or PV. Errors: %v", utilerrors.NewAggregate(errs)) framework.Failf("AfterEach: Failed to delete PVC and/or PV. Errors: %v", utilerrors.NewAggregate(errs))
} }
pvc, nodeName, isNodeLabeled, nodeLabelValue = nil, "", false, "" })
nodeKeyValueLabel = make(map[string]string)
}
}) })
ginkgo.It("Should verify mounted flex volumes can be resized", func() { ginkgo.It("Should verify mounted flex volumes can be resized", func() {

View File

@ -48,7 +48,6 @@ var _ = utils.SIGDescribe("[Feature:Flexvolumes] Mounted flexvolume volume expan
pvc *v1.PersistentVolumeClaim pvc *v1.PersistentVolumeClaim
resizableSc *storagev1.StorageClass resizableSc *storagev1.StorageClass
nodeName string nodeName string
isNodeLabeled bool
nodeKeyValueLabel map[string]string nodeKeyValueLabel map[string]string
nodeLabelValue string nodeLabelValue string
nodeKey string nodeKey string
@ -71,15 +70,11 @@ var _ = utils.SIGDescribe("[Feature:Flexvolumes] Mounted flexvolume volume expan
framework.ExpectNoError(err) framework.ExpectNoError(err)
nodeName = node.Name nodeName = node.Name
nodeKey = "mounted_flexvolume_expand" nodeKey = "mounted_flexvolume_expand_" + ns
if !isNodeLabeled {
nodeLabelValue = ns nodeLabelValue = ns
nodeKeyValueLabel = make(map[string]string) nodeKeyValueLabel = map[string]string{nodeKey: nodeLabelValue}
nodeKeyValueLabel[nodeKey] = nodeLabelValue
framework.AddOrUpdateLabelOnNode(c, nodeName, nodeKey, nodeLabelValue) framework.AddOrUpdateLabelOnNode(c, nodeName, nodeKey, nodeLabelValue)
isNodeLabeled = true ginkgo.DeferCleanup(framework.RemoveLabelOffNode, c, nodeName, nodeKey)
}
test := testsuites.StorageClassTest{ test := testsuites.StorageClassTest{
Name: "flexvolume-resize", Name: "flexvolume-resize",
@ -104,25 +99,12 @@ var _ = utils.SIGDescribe("[Feature:Flexvolumes] Mounted flexvolume volume expan
}, ns) }, ns)
pvc, err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(context.TODO(), pvc, metav1.CreateOptions{}) pvc, err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(context.TODO(), pvc, metav1.CreateOptions{})
framework.ExpectNoError(err, "Error creating pvc: %v", err) framework.ExpectNoError(err, "Error creating pvc: %v", err)
ginkgo.DeferCleanup(func() {
})
framework.AddCleanupAction(func() {
if len(nodeLabelValue) > 0 {
framework.RemoveLabelOffNode(c, nodeName, nodeKey)
}
})
ginkgo.AfterEach(func() {
framework.Logf("AfterEach: Cleaning up resources for mounted volume resize") framework.Logf("AfterEach: Cleaning up resources for mounted volume resize")
if c != nil {
if errs := e2epv.PVPVCCleanup(c, ns, nil, pvc); len(errs) > 0 { if errs := e2epv.PVPVCCleanup(c, ns, nil, pvc); len(errs) > 0 {
framework.Failf("AfterEach: Failed to delete PVC and/or PV. Errors: %v", utilerrors.NewAggregate(errs)) framework.Failf("AfterEach: Failed to delete PVC and/or PV. Errors: %v", utilerrors.NewAggregate(errs))
} }
pvc, nodeName, isNodeLabeled, nodeLabelValue = nil, "", false, "" })
nodeKeyValueLabel = make(map[string]string)
}
}) })
ginkgo.It("should be resizable when mounted", func() { ginkgo.It("should be resizable when mounted", func() {

View File

@ -52,7 +52,6 @@ var _ = utils.SIGDescribe("Mounted volume expand [Feature:StorageProvider]", fun
sc *storagev1.StorageClass sc *storagev1.StorageClass
cleanStorageClass func() cleanStorageClass func()
nodeName string nodeName string
isNodeLabeled bool
nodeKeyValueLabel map[string]string nodeKeyValueLabel map[string]string
nodeLabelValue string nodeLabelValue string
nodeKey string nodeKey string
@ -70,15 +69,11 @@ var _ = utils.SIGDescribe("Mounted volume expand [Feature:StorageProvider]", fun
framework.ExpectNoError(err) framework.ExpectNoError(err)
nodeName = node.Name nodeName = node.Name
nodeKey = "mounted_volume_expand" nodeKey = "mounted_volume_expand_" + ns
if !isNodeLabeled {
nodeLabelValue = ns nodeLabelValue = ns
nodeKeyValueLabel = make(map[string]string) nodeKeyValueLabel = map[string]string{nodeKey: nodeLabelValue}
nodeKeyValueLabel[nodeKey] = nodeLabelValue
framework.AddOrUpdateLabelOnNode(c, nodeName, nodeKey, nodeLabelValue) framework.AddOrUpdateLabelOnNode(c, nodeName, nodeKey, nodeLabelValue)
isNodeLabeled = true ginkgo.DeferCleanup(framework.RemoveLabelOffNode, c, nodeName, nodeKey)
}
test := testsuites.StorageClassTest{ test := testsuites.StorageClassTest{
Name: "default", Name: "default",
@ -93,6 +88,7 @@ var _ = utils.SIGDescribe("Mounted volume expand [Feature:StorageProvider]", fun
if !*sc.AllowVolumeExpansion { if !*sc.AllowVolumeExpansion {
framework.Failf("Class %s does not allow volume expansion", sc.Name) framework.Failf("Class %s does not allow volume expansion", sc.Name)
} }
ginkgo.DeferCleanup(cleanStorageClass)
pvc = e2epv.MakePersistentVolumeClaim(e2epv.PersistentVolumeClaimConfig{ pvc = e2epv.MakePersistentVolumeClaim(e2epv.PersistentVolumeClaimConfig{
ClaimSize: test.ClaimSize, ClaimSize: test.ClaimSize,
@ -101,26 +97,13 @@ var _ = utils.SIGDescribe("Mounted volume expand [Feature:StorageProvider]", fun
}, ns) }, ns)
pvc, err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(context.TODO(), pvc, metav1.CreateOptions{}) pvc, err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(context.TODO(), pvc, metav1.CreateOptions{})
framework.ExpectNoError(err, "Error creating pvc") framework.ExpectNoError(err, "Error creating pvc")
}) ginkgo.DeferCleanup(func() {
framework.AddCleanupAction(func() {
if len(nodeLabelValue) > 0 {
framework.RemoveLabelOffNode(c, nodeName, nodeKey)
}
})
ginkgo.AfterEach(func() {
framework.Logf("AfterEach: Cleaning up resources for mounted volume resize") framework.Logf("AfterEach: Cleaning up resources for mounted volume resize")
if c != nil {
if errs := e2epv.PVPVCCleanup(c, ns, nil, pvc); len(errs) > 0 { if errs := e2epv.PVPVCCleanup(c, ns, nil, pvc); len(errs) > 0 {
framework.Failf("AfterEach: Failed to delete PVC and/or PV. Errors: %v", utilerrors.NewAggregate(errs)) framework.Failf("AfterEach: Failed to delete PVC and/or PV. Errors: %v", utilerrors.NewAggregate(errs))
} }
pvc, nodeName, isNodeLabeled, nodeLabelValue = nil, "", false, "" })
nodeKeyValueLabel = make(map[string]string)
}
cleanStorageClass()
}) })
ginkgo.It("Should verify mounted devices can be resized", func() { ginkgo.It("Should verify mounted devices can be resized", func() {

View File

@ -75,9 +75,11 @@ var _ = utils.SIGDescribe("PersistentVolumes:vsphere [Feature:vsphere]", func()
volLabel = labels.Set{e2epv.VolumeSelectorKey: ns} volLabel = labels.Set{e2epv.VolumeSelectorKey: ns}
selector = metav1.SetAsLabelSelector(volLabel) selector = metav1.SetAsLabelSelector(volLabel)
if volumePath == "" {
volumePath, err = nodeInfo.VSphere.CreateVolume(&VolumeOptions{}, nodeInfo.DataCenterRef) volumePath, err = nodeInfo.VSphere.CreateVolume(&VolumeOptions{}, nodeInfo.DataCenterRef)
framework.ExpectNoError(err) framework.ExpectNoError(err)
ginkgo.DeferCleanup(func() {
nodeInfo.VSphere.DeleteVolume(volumePath, nodeInfo.DataCenterRef)
})
pvConfig = e2epv.PersistentVolumeConfig{ pvConfig = e2epv.PersistentVolumeConfig{
NamePrefix: "vspherepv-", NamePrefix: "vspherepv-",
Labels: volLabel, Labels: volLabel,
@ -94,16 +96,27 @@ var _ = utils.SIGDescribe("PersistentVolumes:vsphere [Feature:vsphere]", func()
Selector: selector, Selector: selector,
StorageClassName: &emptyStorageClass, StorageClassName: &emptyStorageClass,
} }
}
ginkgo.By("Creating the PV and PVC") ginkgo.By("Creating the PV and PVC")
pv, pvc, err = e2epv.CreatePVPVC(c, f.Timeouts, pvConfig, pvcConfig, ns, false) pv, pvc, err = e2epv.CreatePVPVC(c, f.Timeouts, pvConfig, pvcConfig, ns, false)
framework.ExpectNoError(err) framework.ExpectNoError(err)
ginkgo.DeferCleanup(func() {
framework.ExpectNoError(e2epv.DeletePersistentVolume(c, pv.Name), "AfterEach: failed to delete PV ", pv.Name)
})
ginkgo.DeferCleanup(func() {
framework.ExpectNoError(e2epv.DeletePersistentVolumeClaim(c, pvc.Name, ns), "AfterEach: failed to delete PVC ", pvc.Name)
})
framework.ExpectNoError(e2epv.WaitOnPVandPVC(c, f.Timeouts, ns, pv, pvc)) framework.ExpectNoError(e2epv.WaitOnPVandPVC(c, f.Timeouts, ns, pv, pvc))
ginkgo.By("Creating the Client Pod") ginkgo.By("Creating the Client Pod")
clientPod, err = e2epod.CreateClientPod(c, ns, pvc) clientPod, err = e2epod.CreateClientPod(c, ns, pvc)
framework.ExpectNoError(err) framework.ExpectNoError(err)
node = clientPod.Spec.NodeName node = clientPod.Spec.NodeName
ginkgo.DeferCleanup(func() {
framework.ExpectNoError(e2epod.DeletePodWithWait(c, clientPod), "AfterEach: failed to delete pod ", clientPod.Name)
})
ginkgo.DeferCleanup(func() {
framework.ExpectNoError(waitForVSphereDiskToDetach(volumePath, node), "wait for vsphere disk to detach")
})
ginkgo.By("Verify disk should be attached to the node") ginkgo.By("Verify disk should be attached to the node")
isAttached, err := diskIsAttached(volumePath, node) isAttached, err := diskIsAttached(volumePath, node)
@ -113,42 +126,6 @@ var _ = utils.SIGDescribe("PersistentVolumes:vsphere [Feature:vsphere]", func()
} }
}) })
ginkgo.AfterEach(func() {
framework.Logf("AfterEach: Cleaning up test resources")
if c != nil {
framework.ExpectNoError(e2epod.DeletePodWithWait(c, clientPod), "AfterEach: failed to delete pod ", clientPod.Name)
if pv != nil {
framework.ExpectNoError(e2epv.DeletePersistentVolume(c, pv.Name), "AfterEach: failed to delete PV ", pv.Name)
}
if pvc != nil {
framework.ExpectNoError(e2epv.DeletePersistentVolumeClaim(c, pvc.Name, ns), "AfterEach: failed to delete PVC ", pvc.Name)
}
}
})
/*
Clean up
1. Wait and verify volume is detached from the node
2. Delete PV
3. Delete Volume (vmdk)
*/
framework.AddCleanupAction(func() {
// Cleanup actions will be called even when the tests are skipped and leaves namespace unset.
if len(ns) > 0 && len(volumePath) > 0 {
framework.ExpectNoError(waitForVSphereDiskToDetach(volumePath, node))
nodeInfo.VSphere.DeleteVolume(volumePath, nodeInfo.DataCenterRef)
}
})
/*
Delete the PVC and then the pod. Expect the pod to succeed in unmounting and detaching PD on delete.
Test Steps:
1. Delete PVC.
2. Delete POD, POD deletion should succeed.
*/
ginkgo.It("should test that deleting a PVC before the pod does not cause pod deletion to fail on vsphere volume detach", func() { ginkgo.It("should test that deleting a PVC before the pod does not cause pod deletion to fail on vsphere volume detach", func() {
ginkgo.By("Deleting the Claim") ginkgo.By("Deleting the Claim")
framework.ExpectNoError(e2epv.DeletePersistentVolumeClaim(c, pvc.Name, ns), "Failed to delete PVC ", pvc.Name) framework.ExpectNoError(e2epv.DeletePersistentVolumeClaim(c, pvc.Name, ns), "Failed to delete PVC ", pvc.Name)

View File

@ -107,18 +107,11 @@ var _ = utils.SIGDescribe("vcp at scale [Feature:vsphere] ", func() {
e2eskipper.Skipf("Cannot attach %d volumes to %d nodes. Maximum volumes that can be attached on %d nodes is %d", volumeCount, len(nodes.Items), len(nodes.Items), volumesPerNode*len(nodes.Items)) e2eskipper.Skipf("Cannot attach %d volumes to %d nodes. Maximum volumes that can be attached on %d nodes is %d", volumeCount, len(nodes.Items), len(nodes.Items), volumesPerNode*len(nodes.Items))
} }
nodeSelectorList = createNodeLabels(client, namespace, nodes) nodeSelectorList = createNodeLabels(client, namespace, nodes)
}) ginkgo.DeferCleanup(func() {
/*
Remove labels from all the nodes
*/
framework.AddCleanupAction(func() {
// Cleanup actions will be called even when the tests are skipped and leaves namespace unset.
if len(namespace) > 0 && nodes != nil {
for _, node := range nodes.Items { for _, node := range nodes.Items {
framework.RemoveLabelOffNode(client, node.Name, NodeLabelKey) framework.RemoveLabelOffNode(client, node.Name, NodeLabelKey)
} }
} })
}) })
ginkgo.It("vsphere scale tests", func() { ginkgo.It("vsphere scale tests", func() {

View File

@ -65,7 +65,6 @@ var _ = utils.SIGDescribe("Volume Disk Format [Feature:vsphere]", func() {
client clientset.Interface client clientset.Interface
namespace string namespace string
nodeName string nodeName string
isNodeLabeled bool
nodeKeyValueLabel map[string]string nodeKeyValueLabel map[string]string
nodeLabelValue string nodeLabelValue string
) )
@ -74,20 +73,11 @@ var _ = utils.SIGDescribe("Volume Disk Format [Feature:vsphere]", func() {
Bootstrap(f) Bootstrap(f)
client = f.ClientSet client = f.ClientSet
namespace = f.Namespace.Name namespace = f.Namespace.Name
if !isNodeLabeled {
nodeName = GetReadySchedulableRandomNodeInfo().Name nodeName = GetReadySchedulableRandomNodeInfo().Name
nodeLabelValue = "vsphere_e2e_" + string(uuid.NewUUID()) nodeLabelValue = "vsphere_e2e_" + string(uuid.NewUUID())
nodeKeyValueLabel = make(map[string]string) nodeKeyValueLabel = map[string]string{NodeLabelKey: nodeLabelValue}
nodeKeyValueLabel[NodeLabelKey] = nodeLabelValue
framework.AddOrUpdateLabelOnNode(client, nodeName, NodeLabelKey, nodeLabelValue) framework.AddOrUpdateLabelOnNode(client, nodeName, NodeLabelKey, nodeLabelValue)
isNodeLabeled = true ginkgo.DeferCleanup(framework.RemoveLabelOffNode, client, nodeName, NodeLabelKey)
}
})
framework.AddCleanupAction(func() {
// Cleanup actions will be called even when the tests are skipped and leaves namespace unset.
if len(namespace) > 0 && len(nodeLabelValue) > 0 {
framework.RemoveLabelOffNode(client, nodeName, NodeLabelKey)
}
}) })
ginkgo.It("verify disk format type - eagerzeroedthick is honored for dynamically provisioned pv using storageclass", func() { ginkgo.It("verify disk format type - eagerzeroedthick is honored for dynamically provisioned pv using storageclass", func() {

View File

@ -50,7 +50,6 @@ var _ = utils.SIGDescribe("Volume Placement [Feature:vsphere]", func() {
node1KeyValueLabel map[string]string node1KeyValueLabel map[string]string
node2Name string node2Name string
node2KeyValueLabel map[string]string node2KeyValueLabel map[string]string
isNodeLabeled bool
nodeInfo *NodeInfo nodeInfo *NodeInfo
vsp *VSphere vsp *VSphere
) )
@ -60,41 +59,29 @@ var _ = utils.SIGDescribe("Volume Placement [Feature:vsphere]", func() {
c = f.ClientSet c = f.ClientSet
ns = f.Namespace.Name ns = f.Namespace.Name
framework.ExpectNoError(framework.WaitForAllNodesSchedulable(c, framework.TestContext.NodeSchedulableTimeout)) framework.ExpectNoError(framework.WaitForAllNodesSchedulable(c, framework.TestContext.NodeSchedulableTimeout))
if !isNodeLabeled {
node1Name, node1KeyValueLabel, node2Name, node2KeyValueLabel = testSetupVolumePlacement(c, ns) node1Name, node1KeyValueLabel, node2Name, node2KeyValueLabel = testSetupVolumePlacement(c, ns)
isNodeLabeled = true ginkgo.DeferCleanup(func() {
nodeInfo = TestContext.NodeMapper.GetNodeInfo(node1Name)
vsp = nodeInfo.VSphere
}
ginkgo.By("creating vmdk")
volumePath, err := vsp.CreateVolume(&VolumeOptions{}, nodeInfo.DataCenterRef)
framework.ExpectNoError(err)
volumePaths = append(volumePaths, volumePath)
})
ginkgo.AfterEach(func() {
for _, volumePath := range volumePaths {
vsp.DeleteVolume(volumePath, nodeInfo.DataCenterRef)
}
volumePaths = nil
})
/*
Steps
1. Remove labels assigned to node 1 and node 2
2. Delete VMDK volume
*/
framework.AddCleanupAction(func() {
// Cleanup actions will be called even when the tests are skipped and leaves namespace unset.
if len(ns) > 0 {
if len(node1KeyValueLabel) > 0 { if len(node1KeyValueLabel) > 0 {
framework.RemoveLabelOffNode(c, node1Name, NodeLabelKey) framework.RemoveLabelOffNode(c, node1Name, NodeLabelKey)
} }
if len(node2KeyValueLabel) > 0 { if len(node2KeyValueLabel) > 0 {
framework.RemoveLabelOffNode(c, node2Name, NodeLabelKey) framework.RemoveLabelOffNode(c, node2Name, NodeLabelKey)
} }
}
}) })
nodeInfo = TestContext.NodeMapper.GetNodeInfo(node1Name)
vsp = nodeInfo.VSphere
ginkgo.By("creating vmdk")
volumePath, err := vsp.CreateVolume(&VolumeOptions{}, nodeInfo.DataCenterRef)
framework.ExpectNoError(err)
volumePaths = append(volumePaths, volumePath)
ginkgo.DeferCleanup(func() {
for _, volumePath := range volumePaths {
vsp.DeleteVolume(volumePath, nodeInfo.DataCenterRef)
}
volumePaths = nil
})
})
/* /*
Steps Steps

View File

@ -27,16 +27,6 @@ import (
e2emetrics "k8s.io/kubernetes/test/e2e/framework/metrics" e2emetrics "k8s.io/kubernetes/test/e2e/framework/metrics"
) )
// CleanupSuite is the boilerplate that can be used after tests on ginkgo were run, on the SynchronizedAfterSuite step.
// Similar to SynchronizedBeforeSuite, we want to run some operations only once (such as collecting cluster logs).
// Here, the order of functions is reversed; first, the function which runs everywhere,
// and then the function that only runs on the first Ginkgo node.
func CleanupSuite() {
// Run on all Ginkgo nodes
framework.Logf("Running AfterSuite actions on all nodes")
framework.RunCleanupActions()
}
// AfterSuiteActions are actions that are run on ginkgo's SynchronizedAfterSuite // AfterSuiteActions are actions that are run on ginkgo's SynchronizedAfterSuite
func AfterSuiteActions() { func AfterSuiteActions() {
// Run only Ginkgo on node 1 // Run only Ginkgo on node 1