mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-19 18:02:01 +00:00
Remove global framework variable
`f framework.Framework` does not need to be global, it's used only on a few places. This fixes vSphereDriver.PrepareTest() in in_tree.go that schedules ginkgo.DeferCleanup() that uses the global `f` variable, but its value is not valid at the time of ginkgo cleanup.
This commit is contained in:
parent
1c97dbc8c9
commit
ba099644b2
@ -1284,7 +1284,7 @@ func (v *vSphereDriver) PrepareTest(ctx context.Context, f *framework.Framework)
|
||||
ginkgo.DeferCleanup(func(ctx context.Context) {
|
||||
// Driver Cleanup function
|
||||
// Logout each vSphere client connection to prevent session leakage
|
||||
nodes := vspheretest.GetReadySchedulableNodeInfos(ctx)
|
||||
nodes := vspheretest.GetReadySchedulableNodeInfos(ctx, f.ClientSet)
|
||||
for _, node := range nodes {
|
||||
if node.VSphere.Client != nil {
|
||||
_ = node.VSphere.Client.Logout(ctx)
|
||||
@ -1301,7 +1301,7 @@ func (v *vSphereDriver) PrepareTest(ctx context.Context, f *framework.Framework)
|
||||
func (v *vSphereDriver) CreateVolume(ctx context.Context, config *storageframework.PerTestConfig, volType storageframework.TestVolType) storageframework.TestVolume {
|
||||
f := config.Framework
|
||||
vspheretest.Bootstrap(f)
|
||||
nodeInfo := vspheretest.GetReadySchedulableRandomNodeInfo(ctx)
|
||||
nodeInfo := vspheretest.GetReadySchedulableRandomNodeInfo(ctx, f.ClientSet)
|
||||
volumePath, err := nodeInfo.VSphere.CreateVolume(&vspheretest.VolumeOptions{}, nodeInfo.DataCenterRef)
|
||||
framework.ExpectNoError(err)
|
||||
return &vSphereVolume{
|
||||
|
@ -26,21 +26,21 @@ import (
|
||||
|
||||
var once sync.Once
|
||||
var waiting = make(chan bool)
|
||||
var f *framework.Framework
|
||||
|
||||
// Bootstrap takes care of initializing necessary test context for vSphere tests
|
||||
func Bootstrap(fw *framework.Framework) {
|
||||
done := make(chan bool)
|
||||
f = fw
|
||||
go func() {
|
||||
once.Do(bootstrapOnce)
|
||||
once.Do(func() {
|
||||
bootstrapOnce(fw)
|
||||
})
|
||||
<-waiting
|
||||
done <- true
|
||||
}()
|
||||
<-done
|
||||
}
|
||||
|
||||
func bootstrapOnce() {
|
||||
func bootstrapOnce(f *framework.Framework) {
|
||||
// 1. Read vSphere conf and get VSphere instances
|
||||
vsphereInstances, err := GetVSphereInstances()
|
||||
if err != nil {
|
||||
|
@ -70,7 +70,7 @@ var _ = utils.SIGDescribe("PersistentVolumes:vsphere [Feature:vsphere]", func()
|
||||
clientPod = nil
|
||||
pvc = nil
|
||||
pv = nil
|
||||
nodeInfo = GetReadySchedulableRandomNodeInfo(ctx)
|
||||
nodeInfo = GetReadySchedulableRandomNodeInfo(ctx, c)
|
||||
|
||||
volLabel = labels.Set{e2epv.VolumeSelectorKey: ns}
|
||||
selector = metav1.SetAsLabelSelector(volLabel)
|
||||
|
@ -58,7 +58,7 @@ var _ = utils.SIGDescribe("PersistentVolumes [Feature:vsphere][Feature:ReclaimPo
|
||||
e2eskipper.SkipUnlessProviderIs("vsphere")
|
||||
ginkgo.DeferCleanup(testCleanupVSpherePersistentVolumeReclaim, c, nodeInfo, ns, volumePath, pv, pvc)
|
||||
Bootstrap(f)
|
||||
nodeInfo = GetReadySchedulableRandomNodeInfo(ctx)
|
||||
nodeInfo = GetReadySchedulableRandomNodeInfo(ctx, c)
|
||||
pv = nil
|
||||
pvc = nil
|
||||
volumePath = ""
|
||||
@ -81,7 +81,7 @@ var _ = utils.SIGDescribe("PersistentVolumes [Feature:vsphere][Feature:ReclaimPo
|
||||
volumePath, pv, pvc, err = testSetupVSpherePersistentVolumeReclaim(ctx, c, nodeInfo, ns, v1.PersistentVolumeReclaimDelete)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
deletePVCAfterBind(ctx, c, ns, pvc, pv)
|
||||
deletePVCAfterBind(ctx, c, ns, pvc, pv, f.Timeouts)
|
||||
pvc = nil
|
||||
|
||||
ginkgo.By("verify pv is deleted")
|
||||
@ -241,11 +241,11 @@ func testCleanupVSpherePersistentVolumeReclaim(ctx context.Context, c clientset.
|
||||
}
|
||||
|
||||
// func to wait until PV and PVC bind and once bind completes, delete the PVC
|
||||
func deletePVCAfterBind(ctx context.Context, c clientset.Interface, ns string, pvc *v1.PersistentVolumeClaim, pv *v1.PersistentVolume) {
|
||||
func deletePVCAfterBind(ctx context.Context, c clientset.Interface, ns string, pvc *v1.PersistentVolumeClaim, pv *v1.PersistentVolume, timeouts *framework.TimeoutContext) {
|
||||
var err error
|
||||
|
||||
ginkgo.By("wait for the pv and pvc to bind")
|
||||
framework.ExpectNoError(e2epv.WaitOnPVandPVC(ctx, c, f.Timeouts, ns, pv, pvc))
|
||||
framework.ExpectNoError(e2epv.WaitOnPVandPVC(ctx, c, timeouts, ns, pv, pvc))
|
||||
|
||||
ginkgo.By("delete pvc")
|
||||
framework.ExpectNoError(e2epv.DeletePersistentVolumeClaim(ctx, c, pvc.Name, ns), "Failed to delete PVC ", pvc.Name)
|
||||
|
@ -68,7 +68,7 @@ var _ = utils.SIGDescribe("PersistentVolumes [Feature:vsphere][Feature:LabelSele
|
||||
c = f.ClientSet
|
||||
ns = f.Namespace.Name
|
||||
Bootstrap(f)
|
||||
nodeInfo = GetReadySchedulableRandomNodeInfo(ctx)
|
||||
nodeInfo = GetReadySchedulableRandomNodeInfo(ctx, c)
|
||||
framework.ExpectNoError(e2enode.WaitForAllNodesSchedulable(ctx, c, f.Timeouts.NodeSchedulable))
|
||||
ssdlabels = make(map[string]string)
|
||||
ssdlabels["volume-type"] = "ssd"
|
||||
|
@ -752,8 +752,8 @@ func getUUIDFromProviderID(providerID string) string {
|
||||
}
|
||||
|
||||
// GetReadySchedulableNodeInfos returns NodeInfo objects for all nodes with Ready and schedulable state
|
||||
func GetReadySchedulableNodeInfos(ctx context.Context) []*NodeInfo {
|
||||
nodeList, err := e2enode.GetReadySchedulableNodes(ctx, f.ClientSet)
|
||||
func GetReadySchedulableNodeInfos(ctx context.Context, c clientset.Interface) []*NodeInfo {
|
||||
nodeList, err := e2enode.GetReadySchedulableNodes(ctx, c)
|
||||
framework.ExpectNoError(err)
|
||||
var nodesInfo []*NodeInfo
|
||||
for _, node := range nodeList.Items {
|
||||
@ -768,8 +768,8 @@ func GetReadySchedulableNodeInfos(ctx context.Context) []*NodeInfo {
|
||||
// GetReadySchedulableRandomNodeInfo returns NodeInfo object for one of the Ready and Schedulable Node.
|
||||
// if multiple nodes are present with Ready and Schedulable state then one of the Node is selected randomly
|
||||
// and it's associated NodeInfo object is returned.
|
||||
func GetReadySchedulableRandomNodeInfo(ctx context.Context) *NodeInfo {
|
||||
nodesInfo := GetReadySchedulableNodeInfos(ctx)
|
||||
func GetReadySchedulableRandomNodeInfo(ctx context.Context, c clientset.Interface) *NodeInfo {
|
||||
nodesInfo := GetReadySchedulableNodeInfos(ctx, c)
|
||||
gomega.Expect(nodesInfo).NotTo(gomega.BeEmpty())
|
||||
return nodesInfo[rand.Int()%len(nodesInfo)]
|
||||
}
|
||||
|
@ -57,7 +57,7 @@ var _ = utils.SIGDescribe("Volume Provisioning On Clustered Datastore [Feature:v
|
||||
Bootstrap(f)
|
||||
client = f.ClientSet
|
||||
namespace = f.Namespace.Name
|
||||
nodeInfo = GetReadySchedulableRandomNodeInfo(ctx)
|
||||
nodeInfo = GetReadySchedulableRandomNodeInfo(ctx, client)
|
||||
scParameters = make(map[string]string)
|
||||
clusterDatastore = GetAndExpectStringEnvVar(VCPClusterDatastore)
|
||||
})
|
||||
|
@ -74,7 +74,7 @@ var _ = utils.SIGDescribe("Volume Disk Format [Feature:vsphere]", func() {
|
||||
Bootstrap(f)
|
||||
client = f.ClientSet
|
||||
namespace = f.Namespace.Name
|
||||
nodeName = GetReadySchedulableRandomNodeInfo(ctx).Name
|
||||
nodeName = GetReadySchedulableRandomNodeInfo(ctx, client).Name
|
||||
nodeLabelValue = "vsphere_e2e_" + string(uuid.NewUUID())
|
||||
nodeKeyValueLabel = map[string]string{NodeLabelKey: nodeLabelValue}
|
||||
e2enode.AddOrUpdateLabelOnNode(client, nodeName, NodeLabelKey, nodeLabelValue)
|
||||
|
@ -80,7 +80,7 @@ var _ = utils.SIGDescribe("Volume FStype [Feature:vsphere]", func() {
|
||||
Bootstrap(f)
|
||||
client = f.ClientSet
|
||||
namespace = f.Namespace.Name
|
||||
gomega.Expect(GetReadySchedulableNodeInfos(ctx)).NotTo(gomega.BeEmpty())
|
||||
gomega.Expect(GetReadySchedulableNodeInfos(ctx, client)).NotTo(gomega.BeEmpty())
|
||||
})
|
||||
|
||||
ginkgo.It("verify fstype - ext3 formatted volume", func(ctx context.Context) {
|
||||
|
@ -70,7 +70,7 @@ var _ = utils.SIGDescribe("Volume Operations Storm [Feature:vsphere]", func() {
|
||||
Bootstrap(f)
|
||||
client = f.ClientSet
|
||||
namespace = f.Namespace.Name
|
||||
gomega.Expect(GetReadySchedulableNodeInfos(ctx)).NotTo(gomega.BeEmpty())
|
||||
gomega.Expect(GetReadySchedulableNodeInfos(ctx, client)).NotTo(gomega.BeEmpty())
|
||||
if scale := os.Getenv("VOLUME_OPS_SCALE"); scale != "" {
|
||||
volumeOpsScale, err = strconv.Atoi(scale)
|
||||
framework.ExpectNoError(err)
|
||||
|
Loading…
Reference in New Issue
Block a user