From eb6abf046233fc9c3a6c831c201755e8fb639fe3 Mon Sep 17 00:00:00 2001 From: Patrick Ohly Date: Thu, 29 Feb 2024 12:42:51 +0100 Subject: [PATCH] scheduler_perf: automatically delete created objects This is not relevant for namespaced objects, but matters for the cluster-scoped ResourceClass during unit testing. This works right now because there is only one such unit test, but will fail when adding a second one. Instead of passing a boolean flag down into all functions where it might be needed, it's now a context value. --- test/integration/scheduler_perf/create.go | 13 +++++++ .../scheduler_perf/scheduler_perf.go | 34 +++++++++++++++++-- .../scheduler_perf/scheduler_test.go | 2 +- 3 files changed, 46 insertions(+), 3 deletions(-) diff --git a/test/integration/scheduler_perf/create.go b/test/integration/scheduler_perf/create.go index ddc0e350e75..d0b1a43c058 100644 --- a/test/integration/scheduler_perf/create.go +++ b/test/integration/scheduler_perf/create.go @@ -21,6 +21,7 @@ import ( "fmt" "time" + apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" @@ -107,6 +108,18 @@ func (c *createAny) run(tCtx ktesting.TContext) { } _, err = resourceClient.Create(tCtx, obj, metav1.CreateOptions{}) } + if err == nil && shouldCleanup(tCtx) { + tCtx.CleanupCtx(func(tCtx ktesting.TContext) { + del := resourceClient.Delete + if mapping.Scope.Name() != meta.RESTScopeNameNamespace { + del = resourceClient.Namespace(c.Namespace).Delete + } + err := del(tCtx, obj.GetName(), metav1.DeleteOptions{}) + if !apierrors.IsNotFound(err) { + tCtx.ExpectNoError(err, fmt.Sprintf("deleting %s.%s %s", obj.GetKind(), obj.GetAPIVersion(), klog.KObj(obj))) + } + }) + } return err } // Retry, some errors (like CRD just created and type not ready for use yet) are temporary. diff --git a/test/integration/scheduler_perf/scheduler_perf.go b/test/integration/scheduler_perf/scheduler_perf.go index 366756c502d..b781fa4896b 100644 --- a/test/integration/scheduler_perf/scheduler_perf.go +++ b/test/integration/scheduler_perf/scheduler_perf.go @@ -641,6 +641,30 @@ func initTestOutput(tb testing.TB) io.Writer { return output } +type cleanupKeyType struct{} + +var cleanupKey = cleanupKeyType{} + +// shouldCleanup returns true if a function should clean up resource in the +// apiserver when the test is done. This is true for unit tests (etcd and +// apiserver get reused) and false for benchmarks (each benchmark starts with a +// clean state, so cleaning up just wastes time). +// +// The default if not explicitly set in the context is true. +func shouldCleanup(ctx context.Context) bool { + val := ctx.Value(cleanupKey) + if enabled, ok := val.(bool); ok { + return enabled + } + return true +} + +// withCleanup sets whether cleaning up resources in the apiserver +// should be done. The default is true. +func withCleanup(tCtx ktesting.TContext, enabled bool) ktesting.TContext { + return ktesting.WithValue(tCtx, cleanupKey, enabled) +} + var perfSchedulingLabelFilter = flag.String("perf-scheduling-label-filter", "performance", "comma-separated list of labels which a testcase must have (no prefix or +) or must not have (-), used by BenchmarkPerfScheduling") // RunBenchmarkPerfScheduling runs the scheduler performance tests. @@ -695,7 +719,12 @@ func RunBenchmarkPerfScheduling(b *testing.B, outOfTreePluginRegistry frameworkr defer featuregatetesting.SetFeatureGateDuringTest(b, utilfeature.DefaultFeatureGate, feature, flag)() } informerFactory, tCtx := setupClusterForWorkload(tCtx, tc.SchedulerConfigPath, tc.FeatureGates, outOfTreePluginRegistry) - results := runWorkload(tCtx, tc, w, informerFactory, false) + + // No need to clean up, each benchmark testcase starts with an empty + // etcd database. + tCtx = withCleanup(tCtx, false) + + results := runWorkload(tCtx, tc, w, informerFactory) dataItems.DataItems = append(dataItems.DataItems, results...) if len(results) > 0 { @@ -799,7 +828,7 @@ func setupClusterForWorkload(tCtx ktesting.TContext, configPath string, featureG return mustSetupCluster(tCtx, cfg, featureGates, outOfTreePluginRegistry) } -func runWorkload(tCtx ktesting.TContext, tc *testCase, w *workload, informerFactory informers.SharedInformerFactory, cleanup bool) []DataItem { +func runWorkload(tCtx ktesting.TContext, tc *testCase, w *workload, informerFactory informers.SharedInformerFactory) []DataItem { b, benchmarking := tCtx.TB().(*testing.B) if benchmarking { start := time.Now() @@ -811,6 +840,7 @@ func runWorkload(tCtx ktesting.TContext, tc *testCase, w *workload, informerFact b.ReportMetric(duration.Seconds(), "runtime_seconds") }) } + cleanup := shouldCleanup(tCtx) // Disable error checking of the sampling interval length in the // throughput collector by default. When running benchmarks, report diff --git a/test/integration/scheduler_perf/scheduler_test.go b/test/integration/scheduler_perf/scheduler_test.go index dacc201f8e4..1bd106f20f1 100644 --- a/test/integration/scheduler_perf/scheduler_test.go +++ b/test/integration/scheduler_perf/scheduler_test.go @@ -92,7 +92,7 @@ func TestScheduling(t *testing.T) { t.Skipf("disabled by label filter %q", *testSchedulingLabelFilter) } tCtx := ktesting.WithTB(tCtx, t) - runWorkload(tCtx, tc, w, informerFactory, true) + runWorkload(tCtx, tc, w, informerFactory) }) } })