diff --git a/test/integration/scheduler_perf/README.md b/test/integration/scheduler_perf/README.md index 5ca8474fb33..46d7423cad3 100644 --- a/test/integration/scheduler_perf/README.md +++ b/test/integration/scheduler_perf/README.md @@ -158,7 +158,7 @@ the ci-benchmark-scheduler-perf periodic job will fail with an error log such as ``` --- FAIL: BenchmarkPerfScheduling/SchedulingBasic/5000Nodes_10000Pods ... - scheduler_perf.go:1098: ERROR: op 2: expected SchedulingThroughput Average to be higher: got 256.12, want 270 + scheduler_perf.go:1098: ERROR: op 2: SchedulingBasic/5000Nodes_10000Pods/namespace-2: expected SchedulingThroughput Average to be higher: got 256.12, want 270 ``` This allows to analyze which workload failed. Make sure that the failure is not an outlier diff --git a/test/integration/scheduler_perf/config/performance-config.yaml b/test/integration/scheduler_perf/config/performance-config.yaml index 8285b17ba27..a04055a438a 100644 --- a/test/integration/scheduler_perf/config/performance-config.yaml +++ b/test/integration/scheduler_perf/config/performance-config.yaml @@ -818,7 +818,7 @@ measurePods: 1000 - name: 5000Nodes_2000Pods labels: [performance] - threshold: 35 + threshold: 24 params: initNodes: 6000 initPodsPerNamespace: 40 @@ -1070,7 +1070,6 @@ measurePods: 10 maxClaimsPerNode: 10 - name: 2000pods_100nodes - labels: [performance, fast] params: # In this testcase, the number of nodes is smaller # than the limit for the PodScheduling slices. @@ -1214,7 +1213,6 @@ measurePods: 10 maxClaimsPerNode: 10 - name: 2000pods_100nodes - labels: [performance, fast] params: nodesWithDRA: 100 nodesWithoutDRA: 0 @@ -1289,7 +1287,6 @@ measureClaims: 2 # must be measurePods / 5 maxClaimsPerNode: 2 - name: 2000pods_100nodes - labels: [performance, fast] params: nodesWithDRA: 100 nodesWithoutDRA: 0 diff --git a/test/integration/scheduler_perf/scheduler_perf.go b/test/integration/scheduler_perf/scheduler_perf.go index c87ad55cec9..af571b21b0f 100644 --- a/test/integration/scheduler_perf/scheduler_perf.go +++ b/test/integration/scheduler_perf/scheduler_perf.go @@ -939,9 +939,9 @@ func compareMetricWithThreshold(items []DataItem, threshold float64, metricSelec for _, item := range items { if item.Labels["Metric"] == metricSelector.Name && labelsMatch(item.Labels, metricSelector.Labels) && !valueWithinThreshold(item.Data["Average"], threshold, metricSelector.ExpectLower) { if metricSelector.ExpectLower { - return fmt.Errorf("expected %s Average to be lower: got %f, want %f", metricSelector.Name, item.Data["Average"], threshold) + return fmt.Errorf("%s: expected %s Average to be lower: got %f, want %f", item.Labels["Name"], metricSelector.Name, item.Data["Average"], threshold) } - return fmt.Errorf("expected %s Average to be higher: got %f, want %f", metricSelector.Name, item.Data["Average"], threshold) + return fmt.Errorf("%s: expected %s Average to be higher: got %f, want %f", item.Labels["Name"], metricSelector.Name, item.Data["Average"], threshold) } } return nil