change defer order to pass test

This commit is contained in:
YamasouA 2025-02-22 20:17:27 +09:00
parent fcce8aaad8
commit b1f6cfcfae

View File

@ -1496,9 +1496,6 @@ func runWorkload(tCtx ktesting.TContext, tc *testCase, w *workload, informerFact
// Everything else started by this function gets stopped before it returns. // Everything else started by this function gets stopped before it returns.
tCtx = ktesting.WithCancel(tCtx) tCtx = ktesting.WithCancel(tCtx)
var wg sync.WaitGroup
defer wg.Wait()
defer tCtx.Cancel("workload is done")
var dataItems []DataItem var dataItems []DataItem
@ -1507,14 +1504,12 @@ func runWorkload(tCtx ktesting.TContext, tc *testCase, w *workload, informerFact
// the metrics collecting needs to be sure that the goroutines // the metrics collecting needs to be sure that the goroutines
// are stopped. // are stopped.
var collectorCtx ktesting.TContext var collectorCtx ktesting.TContext
var collectorWG sync.WaitGroup
defer collectorWG.Wait()
executor := WorkloadExecutor{ executor := WorkloadExecutor{
tCtx: tCtx, tCtx: tCtx,
wg: wg, wg: sync.WaitGroup{},
collectorCtx: collectorCtx, collectorCtx: collectorCtx,
collectorWG: collectorWG, collectorWG: sync.WaitGroup{},
collectors: collectors, collectors: collectors,
numPodsScheduledPerNamespace: make(map[string]int), numPodsScheduledPerNamespace: make(map[string]int),
podInformer: podInformer, podInformer: podInformer,
@ -1525,6 +1520,10 @@ func runWorkload(tCtx ktesting.TContext, tc *testCase, w *workload, informerFact
dataItems: dataItems, dataItems: dataItems,
} }
defer executor.wg.Wait()
defer executor.collectorWG.Wait()
defer tCtx.Cancel("workload is done")
for opIndex, op := range unrollWorkloadTemplate(tCtx, tc.WorkloadTemplate, w) { for opIndex, op := range unrollWorkloadTemplate(tCtx, tc.WorkloadTemplate, w) {
realOp, err := op.realOp.patchParams(w) realOp, err := op.realOp.patchParams(w)
if err != nil { if err != nil {
@ -1538,14 +1537,10 @@ func runWorkload(tCtx ktesting.TContext, tc *testCase, w *workload, informerFact
switch concreteOp := realOp.(type) { switch concreteOp := realOp.(type) {
case *createNodesOp: case *createNodesOp:
executor.runCreateNodesOp(opIndex, concreteOp) executor.runCreateNodesOp(opIndex, concreteOp)
case *createNamespacesOp: case *createNamespacesOp:
executor.runCreateNamespaceOp(opIndex, concreteOp) executor.runCreateNamespaceOp(opIndex, concreteOp)
case *createPodsOp: case *createPodsOp:
executor.runCreatePodsOp(opIndex, concreteOp) executor.runCreatePodsOp(opIndex, concreteOp)
if executor.collectorCtx != nil {
executor.collectorCtx.Cancel("cleaning up")
}
case *deletePodsOp: case *deletePodsOp:
executor.runDeletePodsOp(opIndex, concreteOp) executor.runDeletePodsOp(opIndex, concreteOp)
case *churnOp: case *churnOp:
@ -1666,9 +1661,7 @@ func (e *WorkloadExecutor) runCreatePodsOp(opIndex int, op *createPodsOp) {
e.tCtx.Fatalf("op %d: Metrics collection is overlapping. Probably second collector was started before stopping a previous one", opIndex) e.tCtx.Fatalf("op %d: Metrics collection is overlapping. Probably second collector was started before stopping a previous one", opIndex)
} }
e.collectorCtx, e.collectors = startCollectingMetrics(e.tCtx, &e.collectorWG, e.podInformer, e.testCase.MetricsCollectorConfig, e.throughputErrorMargin, opIndex, namespace, []string{namespace}, nil) e.collectorCtx, e.collectors = startCollectingMetrics(e.tCtx, &e.collectorWG, e.podInformer, e.testCase.MetricsCollectorConfig, e.throughputErrorMargin, opIndex, namespace, []string{namespace}, nil)
// e.collectorCtx.Cleanup(func() { defer e.collectorCtx.Cancel("cleaning up")
// e.collectorCtx.Cancel("cleaning up")
// })
} }
if err := createPodsRapidly(e.tCtx, namespace, op); err != nil { if err := createPodsRapidly(e.tCtx, namespace, op); err != nil {
e.tCtx.Fatalf("op %d: %v", opIndex, err) e.tCtx.Fatalf("op %d: %v", opIndex, err)
@ -1869,9 +1862,6 @@ func (e *WorkloadExecutor) runStartCollectingMetricsOp(opIndex int, op *startCol
e.tCtx.Fatalf("op %d: Metrics collection is overlapping. Probably second collector was started before stopping a previous one", opIndex) e.tCtx.Fatalf("op %d: Metrics collection is overlapping. Probably second collector was started before stopping a previous one", opIndex)
} }
e.collectorCtx, e.collectors = startCollectingMetrics(e.tCtx, &e.collectorWG, e.podInformer, e.testCase.MetricsCollectorConfig, e.throughputErrorMargin, opIndex, op.Name, op.Namespaces, op.LabelSelector) e.collectorCtx, e.collectors = startCollectingMetrics(e.tCtx, &e.collectorWG, e.podInformer, e.testCase.MetricsCollectorConfig, e.throughputErrorMargin, opIndex, op.Name, op.Namespaces, op.LabelSelector)
// e.collectorCtx.Cleanup(func() {
// collectorCtx.Cancel("cleaning up")
// })
} }
func createNamespaceIfNotPresent(tCtx ktesting.TContext, namespace string, podsPerNamespace *map[string]int) { func createNamespaceIfNotPresent(tCtx ktesting.TContext, namespace string, podsPerNamespace *map[string]int) {