Measure metrics only during a specific op in scheduler_perf

This commit is contained in:
Maciej Skoczeń 2024-07-29 12:39:37 +00:00
parent 7a4c962341
commit 5894e201fa
2 changed files with 18 additions and 8 deletions

View File

@ -1059,10 +1059,11 @@ func runWorkload(tCtx ktesting.TContext, tc *testCase, w *workload, informerFact
name := tCtx.Name()
// The first part is the same for each work load, therefore we can strip it.
name = name[strings.Index(name, "/")+1:]
collectors = getTestDataCollectors(collectorCtx, podInformer, fmt.Sprintf("%s/%s", name, namespace), namespace, tc.MetricsCollectorConfig, throughputErrorMargin)
collectors = getTestDataCollectors(podInformer, fmt.Sprintf("%s/%s", name, namespace), namespace, tc.MetricsCollectorConfig, throughputErrorMargin)
for _, collector := range collectors {
// Need loop-local variable for function below.
collector := collector
collector.init()
collectorWG.Add(1)
go func() {
defer collectorWG.Done()
@ -1299,16 +1300,17 @@ func createNamespaceIfNotPresent(tCtx ktesting.TContext, namespace string, podsP
}
type testDataCollector interface {
init()
run(tCtx ktesting.TContext)
collect() []DataItem
}
func getTestDataCollectors(tCtx ktesting.TContext, podInformer coreinformers.PodInformer, name, namespace string, mcc *metricsCollectorConfig, throughputErrorMargin float64) []testDataCollector {
func getTestDataCollectors(podInformer coreinformers.PodInformer, name, namespace string, mcc *metricsCollectorConfig, throughputErrorMargin float64) []testDataCollector {
if mcc == nil {
mcc = &defaultMetricsCollectorConfig
}
return []testDataCollector{
newThroughputCollector(tCtx, podInformer, map[string]string{"Name": name}, []string{namespace}, throughputErrorMargin),
newThroughputCollector(podInformer, map[string]string{"Name": name}, []string{namespace}, throughputErrorMargin),
newMetricsCollector(mcc, map[string]string{"Name": name}),
}
}

View File

@ -266,22 +266,27 @@ func newMetricsCollector(config *metricsCollectorConfig, labels map[string]strin
}
}
func (mc *metricsCollector) init() {
// Reset the metrics so that the measurements do not interfere with those collected during the previous steps.
legacyregistry.Reset()
}
func (*metricsCollector) run(tCtx ktesting.TContext) {
// metricCollector doesn't need to start before the tests, so nothing to do here.
}
func (pc *metricsCollector) collect() []DataItem {
func (mc *metricsCollector) collect() []DataItem {
var dataItems []DataItem
for metric, labelValsSlice := range pc.Metrics {
for metric, labelValsSlice := range mc.Metrics {
// no filter is specified, aggregate all the metrics within the same metricFamily.
if labelValsSlice == nil {
dataItem := collectHistogramVec(metric, pc.labels, nil)
dataItem := collectHistogramVec(metric, mc.labels, nil)
if dataItem != nil {
dataItems = append(dataItems, *dataItem)
}
} else {
for _, lvMap := range uniqueLVCombos(labelValsSlice) {
dataItem := collectHistogramVec(metric, pc.labels, lvMap)
dataItem := collectHistogramVec(metric, mc.labels, lvMap)
if dataItem != nil {
dataItems = append(dataItems, *dataItem)
}
@ -370,7 +375,7 @@ type throughputCollector struct {
errorMargin float64
}
func newThroughputCollector(tb ktesting.TB, podInformer coreinformers.PodInformer, labels map[string]string, namespaces []string, errorMargin float64) *throughputCollector {
func newThroughputCollector(podInformer coreinformers.PodInformer, labels map[string]string, namespaces []string, errorMargin float64) *throughputCollector {
return &throughputCollector{
podInformer: podInformer,
labels: labels,
@ -379,6 +384,9 @@ func newThroughputCollector(tb ktesting.TB, podInformer coreinformers.PodInforme
}
}
func (tc *throughputCollector) init() {
}
func (tc *throughputCollector) run(tCtx ktesting.TContext) {
podsScheduled, _, err := getScheduledPods(tc.podInformer, tc.namespaces...)
if err != nil {