Merge pull request #123588 from pohly/scheduler-perf-any-cleanup

scheduler_perf: automatically delete created objects
This commit is contained in:
Kubernetes Prow Robot 2024-03-04 04:49:12 -08:00 committed by GitHub
commit 55d1518126
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
3 changed files with 46 additions and 3 deletions

View File

@ -21,6 +21,7 @@ import (
"fmt"
"time"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
@ -113,6 +114,18 @@ func (c *createAny) run(tCtx ktesting.TContext) {
}
_, err = resourceClient.Create(tCtx, obj, options)
}
if err == nil && shouldCleanup(tCtx) {
tCtx.CleanupCtx(func(tCtx ktesting.TContext) {
del := resourceClient.Delete
if mapping.Scope.Name() != meta.RESTScopeNameNamespace {
del = resourceClient.Namespace(c.Namespace).Delete
}
err := del(tCtx, obj.GetName(), metav1.DeleteOptions{})
if !apierrors.IsNotFound(err) {
tCtx.ExpectNoError(err, fmt.Sprintf("deleting %s.%s %s", obj.GetKind(), obj.GetAPIVersion(), klog.KObj(obj)))
}
})
}
return err
}
// Retry, some errors (like CRD just created and type not ready for use yet) are temporary.

View File

@ -641,6 +641,30 @@ func initTestOutput(tb testing.TB) io.Writer {
return output
}
type cleanupKeyType struct{}
var cleanupKey = cleanupKeyType{}
// shouldCleanup returns true if a function should clean up resource in the
// apiserver when the test is done. This is true for unit tests (etcd and
// apiserver get reused) and false for benchmarks (each benchmark starts with a
// clean state, so cleaning up just wastes time).
//
// The default if not explicitly set in the context is true.
func shouldCleanup(ctx context.Context) bool {
val := ctx.Value(cleanupKey)
if enabled, ok := val.(bool); ok {
return enabled
}
return true
}
// withCleanup sets whether cleaning up resources in the apiserver
// should be done. The default is true.
func withCleanup(tCtx ktesting.TContext, enabled bool) ktesting.TContext {
return ktesting.WithValue(tCtx, cleanupKey, enabled)
}
var perfSchedulingLabelFilter = flag.String("perf-scheduling-label-filter", "performance", "comma-separated list of labels which a testcase must have (no prefix or +) or must not have (-), used by BenchmarkPerfScheduling")
// RunBenchmarkPerfScheduling runs the scheduler performance tests.
@ -695,7 +719,12 @@ func RunBenchmarkPerfScheduling(b *testing.B, outOfTreePluginRegistry frameworkr
defer featuregatetesting.SetFeatureGateDuringTest(b, utilfeature.DefaultFeatureGate, feature, flag)()
}
informerFactory, tCtx := setupClusterForWorkload(tCtx, tc.SchedulerConfigPath, tc.FeatureGates, outOfTreePluginRegistry)
results := runWorkload(tCtx, tc, w, informerFactory, false)
// No need to clean up, each benchmark testcase starts with an empty
// etcd database.
tCtx = withCleanup(tCtx, false)
results := runWorkload(tCtx, tc, w, informerFactory)
dataItems.DataItems = append(dataItems.DataItems, results...)
if len(results) > 0 {
@ -799,7 +828,7 @@ func setupClusterForWorkload(tCtx ktesting.TContext, configPath string, featureG
return mustSetupCluster(tCtx, cfg, featureGates, outOfTreePluginRegistry)
}
func runWorkload(tCtx ktesting.TContext, tc *testCase, w *workload, informerFactory informers.SharedInformerFactory, cleanup bool) []DataItem {
func runWorkload(tCtx ktesting.TContext, tc *testCase, w *workload, informerFactory informers.SharedInformerFactory) []DataItem {
b, benchmarking := tCtx.TB().(*testing.B)
if benchmarking {
start := time.Now()
@ -811,6 +840,7 @@ func runWorkload(tCtx ktesting.TContext, tc *testCase, w *workload, informerFact
b.ReportMetric(duration.Seconds(), "runtime_seconds")
})
}
cleanup := shouldCleanup(tCtx)
// Disable error checking of the sampling interval length in the
// throughput collector by default. When running benchmarks, report

View File

@ -92,7 +92,7 @@ func TestScheduling(t *testing.T) {
t.Skipf("disabled by label filter %q", *testSchedulingLabelFilter)
}
tCtx := ktesting.WithTB(tCtx, t)
runWorkload(tCtx, tc, w, informerFactory, true)
runWorkload(tCtx, tc, w, informerFactory)
})
}
})