scheduler_perf: automatically delete created objects

This is not relevant for namespaced objects, but matters for the cluster-scoped
ResourceClass during unit testing. This works right now because there is only
one such unit test, but will fail when adding a second one.

Instead of passing a boolean flag down into all functions where it might be
needed, it's now a context value.
This commit is contained in:
Patrick Ohly 2024-02-29 12:42:51 +01:00
parent 47c92e2ab7
commit eb6abf0462
3 changed files with 46 additions and 3 deletions

View File

@ -21,6 +21,7 @@ import (
"fmt" "fmt"
"time" "time"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/meta" "k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
@ -107,6 +108,18 @@ func (c *createAny) run(tCtx ktesting.TContext) {
} }
_, err = resourceClient.Create(tCtx, obj, metav1.CreateOptions{}) _, err = resourceClient.Create(tCtx, obj, metav1.CreateOptions{})
} }
if err == nil && shouldCleanup(tCtx) {
tCtx.CleanupCtx(func(tCtx ktesting.TContext) {
del := resourceClient.Delete
if mapping.Scope.Name() != meta.RESTScopeNameNamespace {
del = resourceClient.Namespace(c.Namespace).Delete
}
err := del(tCtx, obj.GetName(), metav1.DeleteOptions{})
if !apierrors.IsNotFound(err) {
tCtx.ExpectNoError(err, fmt.Sprintf("deleting %s.%s %s", obj.GetKind(), obj.GetAPIVersion(), klog.KObj(obj)))
}
})
}
return err return err
} }
// Retry, some errors (like CRD just created and type not ready for use yet) are temporary. // Retry, some errors (like CRD just created and type not ready for use yet) are temporary.

View File

@ -641,6 +641,30 @@ func initTestOutput(tb testing.TB) io.Writer {
return output return output
} }
type cleanupKeyType struct{}
var cleanupKey = cleanupKeyType{}
// shouldCleanup returns true if a function should clean up resource in the
// apiserver when the test is done. This is true for unit tests (etcd and
// apiserver get reused) and false for benchmarks (each benchmark starts with a
// clean state, so cleaning up just wastes time).
//
// The default if not explicitly set in the context is true.
func shouldCleanup(ctx context.Context) bool {
val := ctx.Value(cleanupKey)
if enabled, ok := val.(bool); ok {
return enabled
}
return true
}
// withCleanup sets whether cleaning up resources in the apiserver
// should be done. The default is true.
func withCleanup(tCtx ktesting.TContext, enabled bool) ktesting.TContext {
return ktesting.WithValue(tCtx, cleanupKey, enabled)
}
var perfSchedulingLabelFilter = flag.String("perf-scheduling-label-filter", "performance", "comma-separated list of labels which a testcase must have (no prefix or +) or must not have (-), used by BenchmarkPerfScheduling") var perfSchedulingLabelFilter = flag.String("perf-scheduling-label-filter", "performance", "comma-separated list of labels which a testcase must have (no prefix or +) or must not have (-), used by BenchmarkPerfScheduling")
// RunBenchmarkPerfScheduling runs the scheduler performance tests. // RunBenchmarkPerfScheduling runs the scheduler performance tests.
@ -695,7 +719,12 @@ func RunBenchmarkPerfScheduling(b *testing.B, outOfTreePluginRegistry frameworkr
defer featuregatetesting.SetFeatureGateDuringTest(b, utilfeature.DefaultFeatureGate, feature, flag)() defer featuregatetesting.SetFeatureGateDuringTest(b, utilfeature.DefaultFeatureGate, feature, flag)()
} }
informerFactory, tCtx := setupClusterForWorkload(tCtx, tc.SchedulerConfigPath, tc.FeatureGates, outOfTreePluginRegistry) informerFactory, tCtx := setupClusterForWorkload(tCtx, tc.SchedulerConfigPath, tc.FeatureGates, outOfTreePluginRegistry)
results := runWorkload(tCtx, tc, w, informerFactory, false)
// No need to clean up, each benchmark testcase starts with an empty
// etcd database.
tCtx = withCleanup(tCtx, false)
results := runWorkload(tCtx, tc, w, informerFactory)
dataItems.DataItems = append(dataItems.DataItems, results...) dataItems.DataItems = append(dataItems.DataItems, results...)
if len(results) > 0 { if len(results) > 0 {
@ -799,7 +828,7 @@ func setupClusterForWorkload(tCtx ktesting.TContext, configPath string, featureG
return mustSetupCluster(tCtx, cfg, featureGates, outOfTreePluginRegistry) return mustSetupCluster(tCtx, cfg, featureGates, outOfTreePluginRegistry)
} }
func runWorkload(tCtx ktesting.TContext, tc *testCase, w *workload, informerFactory informers.SharedInformerFactory, cleanup bool) []DataItem { func runWorkload(tCtx ktesting.TContext, tc *testCase, w *workload, informerFactory informers.SharedInformerFactory) []DataItem {
b, benchmarking := tCtx.TB().(*testing.B) b, benchmarking := tCtx.TB().(*testing.B)
if benchmarking { if benchmarking {
start := time.Now() start := time.Now()
@ -811,6 +840,7 @@ func runWorkload(tCtx ktesting.TContext, tc *testCase, w *workload, informerFact
b.ReportMetric(duration.Seconds(), "runtime_seconds") b.ReportMetric(duration.Seconds(), "runtime_seconds")
}) })
} }
cleanup := shouldCleanup(tCtx)
// Disable error checking of the sampling interval length in the // Disable error checking of the sampling interval length in the
// throughput collector by default. When running benchmarks, report // throughput collector by default. When running benchmarks, report

View File

@ -92,7 +92,7 @@ func TestScheduling(t *testing.T) {
t.Skipf("disabled by label filter %q", *testSchedulingLabelFilter) t.Skipf("disabled by label filter %q", *testSchedulingLabelFilter)
} }
tCtx := ktesting.WithTB(tCtx, t) tCtx := ktesting.WithTB(tCtx, t)
runWorkload(tCtx, tc, w, informerFactory, true) runWorkload(tCtx, tc, w, informerFactory)
}) })
} }
}) })