Merge pull request #127662 from macsko/make_scheduler_perf_sleepop_duration_parametrizable

Make sleepOp duration parametrizable in scheduler_perf
This commit is contained in:
Kubernetes Prow Robot
2024-09-26 20:10:01 +01:00
committed by GitHub
2 changed files with 21 additions and 20 deletions

View File

@@ -1450,7 +1450,7 @@
skipWaitToCompletion: true
# Wait to make sure gated pods are enqueued in scheduler.
- opcode: sleep
duration: 5s
durationParam: $sleepDuration
# Create pods that will be gradually deleted after being scheduled.
- opcode: createPods
countParam: $deletingPods
@@ -1471,6 +1471,7 @@
gatedPods: 10
deletingPods: 10
measurePods: 10
sleepDuration: 1s
- name: 1Node_10000GatedPods
labels: [performance, short]
threshold: 130
@@ -1478,6 +1479,7 @@
gatedPods: 10000
deletingPods: 20000
measurePods: 20000
sleepDuration: 5s
- name: SchedulingGatedPodsWithPodAffinityImpactForThroughput
defaultPodTemplatePath: config/templates/pod-with-label.yaml
@@ -1491,7 +1493,7 @@
skipWaitToCompletion: true
- opcode: sleep
# To produce a stable scheduler_perf result, here we make sure all gated Pods are stored in the scheduling queue.
duration: 5s
durationParam: $sleepDuration
- opcode: createPods
# The scheduling of those Pods will result in many cluster events (AssignedPodAdded)
# and each of them will be processed by the scheduling queue.
@@ -1504,12 +1506,14 @@
params:
gatedPods: 10
measurePods: 10
sleepDuration: 1s
- name: 1Node_10000GatedPods
labels: [performance, short]
threshold: 110
params:
gatedPods: 10000
measurePods: 20000
sleepDuration: 5s
# This test case simulates the scheduling when pods selected to schedule have deletionTimestamp set.
# There was a memory leak related to this path of code fixed in:

View File

@@ -835,22 +835,10 @@ func (bo barrierOp) patchParams(w *workload) (realOp, error) {
type sleepOp struct {
// Must be "sleep".
Opcode operationCode
// duration of sleep.
Duration time.Duration
}
func (so *sleepOp) UnmarshalJSON(data []byte) (err error) {
var tmp struct {
Opcode operationCode
Duration string
}
if err = json.Unmarshal(data, &tmp); err != nil {
return err
}
so.Opcode = tmp.Opcode
so.Duration, err = time.ParseDuration(tmp.Duration)
return err
// Duration of sleep.
Duration metav1.Duration
// Template parameter for Duration.
DurationParam string
}
func (so *sleepOp) isValid(_ bool) error {
@@ -861,7 +849,16 @@ func (so *sleepOp) collectsMetrics() bool {
return false
}
func (so sleepOp) patchParams(_ *workload) (realOp, error) {
func (so sleepOp) patchParams(w *workload) (realOp, error) {
if so.DurationParam != "" {
durationStr, err := getParam[string](w.Params, so.DurationParam[1:])
if err != nil {
return nil, err
}
if so.Duration.Duration, err = time.ParseDuration(durationStr); err != nil {
return nil, fmt.Errorf("invalid duration parameter %s: %w", so.DurationParam, err)
}
}
return &so, nil
}
@@ -1580,7 +1577,7 @@ func runWorkload(tCtx ktesting.TContext, tc *testCase, w *workload, informerFact
case *sleepOp:
select {
case <-tCtx.Done():
case <-time.After(concreteOp.Duration):
case <-time.After(concreteOp.Duration.Duration):
}
case *startCollectingMetricsOp: