mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-08-09 12:07:47 +00:00
Measure metrics only during a specific op in scheduler_perf
This commit is contained in:
parent
7a4c962341
commit
5894e201fa
@ -1059,10 +1059,11 @@ func runWorkload(tCtx ktesting.TContext, tc *testCase, w *workload, informerFact
|
|||||||
name := tCtx.Name()
|
name := tCtx.Name()
|
||||||
// The first part is the same for each work load, therefore we can strip it.
|
// The first part is the same for each work load, therefore we can strip it.
|
||||||
name = name[strings.Index(name, "/")+1:]
|
name = name[strings.Index(name, "/")+1:]
|
||||||
collectors = getTestDataCollectors(collectorCtx, podInformer, fmt.Sprintf("%s/%s", name, namespace), namespace, tc.MetricsCollectorConfig, throughputErrorMargin)
|
collectors = getTestDataCollectors(podInformer, fmt.Sprintf("%s/%s", name, namespace), namespace, tc.MetricsCollectorConfig, throughputErrorMargin)
|
||||||
for _, collector := range collectors {
|
for _, collector := range collectors {
|
||||||
// Need loop-local variable for function below.
|
// Need loop-local variable for function below.
|
||||||
collector := collector
|
collector := collector
|
||||||
|
collector.init()
|
||||||
collectorWG.Add(1)
|
collectorWG.Add(1)
|
||||||
go func() {
|
go func() {
|
||||||
defer collectorWG.Done()
|
defer collectorWG.Done()
|
||||||
@ -1299,16 +1300,17 @@ func createNamespaceIfNotPresent(tCtx ktesting.TContext, namespace string, podsP
|
|||||||
}
|
}
|
||||||
|
|
||||||
type testDataCollector interface {
|
type testDataCollector interface {
|
||||||
|
init()
|
||||||
run(tCtx ktesting.TContext)
|
run(tCtx ktesting.TContext)
|
||||||
collect() []DataItem
|
collect() []DataItem
|
||||||
}
|
}
|
||||||
|
|
||||||
func getTestDataCollectors(tCtx ktesting.TContext, podInformer coreinformers.PodInformer, name, namespace string, mcc *metricsCollectorConfig, throughputErrorMargin float64) []testDataCollector {
|
func getTestDataCollectors(podInformer coreinformers.PodInformer, name, namespace string, mcc *metricsCollectorConfig, throughputErrorMargin float64) []testDataCollector {
|
||||||
if mcc == nil {
|
if mcc == nil {
|
||||||
mcc = &defaultMetricsCollectorConfig
|
mcc = &defaultMetricsCollectorConfig
|
||||||
}
|
}
|
||||||
return []testDataCollector{
|
return []testDataCollector{
|
||||||
newThroughputCollector(tCtx, podInformer, map[string]string{"Name": name}, []string{namespace}, throughputErrorMargin),
|
newThroughputCollector(podInformer, map[string]string{"Name": name}, []string{namespace}, throughputErrorMargin),
|
||||||
newMetricsCollector(mcc, map[string]string{"Name": name}),
|
newMetricsCollector(mcc, map[string]string{"Name": name}),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -266,22 +266,27 @@ func newMetricsCollector(config *metricsCollectorConfig, labels map[string]strin
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (mc *metricsCollector) init() {
|
||||||
|
// Reset the metrics so that the measurements do not interfere with those collected during the previous steps.
|
||||||
|
legacyregistry.Reset()
|
||||||
|
}
|
||||||
|
|
||||||
func (*metricsCollector) run(tCtx ktesting.TContext) {
|
func (*metricsCollector) run(tCtx ktesting.TContext) {
|
||||||
// metricCollector doesn't need to start before the tests, so nothing to do here.
|
// metricCollector doesn't need to start before the tests, so nothing to do here.
|
||||||
}
|
}
|
||||||
|
|
||||||
func (pc *metricsCollector) collect() []DataItem {
|
func (mc *metricsCollector) collect() []DataItem {
|
||||||
var dataItems []DataItem
|
var dataItems []DataItem
|
||||||
for metric, labelValsSlice := range pc.Metrics {
|
for metric, labelValsSlice := range mc.Metrics {
|
||||||
// no filter is specified, aggregate all the metrics within the same metricFamily.
|
// no filter is specified, aggregate all the metrics within the same metricFamily.
|
||||||
if labelValsSlice == nil {
|
if labelValsSlice == nil {
|
||||||
dataItem := collectHistogramVec(metric, pc.labels, nil)
|
dataItem := collectHistogramVec(metric, mc.labels, nil)
|
||||||
if dataItem != nil {
|
if dataItem != nil {
|
||||||
dataItems = append(dataItems, *dataItem)
|
dataItems = append(dataItems, *dataItem)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
for _, lvMap := range uniqueLVCombos(labelValsSlice) {
|
for _, lvMap := range uniqueLVCombos(labelValsSlice) {
|
||||||
dataItem := collectHistogramVec(metric, pc.labels, lvMap)
|
dataItem := collectHistogramVec(metric, mc.labels, lvMap)
|
||||||
if dataItem != nil {
|
if dataItem != nil {
|
||||||
dataItems = append(dataItems, *dataItem)
|
dataItems = append(dataItems, *dataItem)
|
||||||
}
|
}
|
||||||
@ -370,7 +375,7 @@ type throughputCollector struct {
|
|||||||
errorMargin float64
|
errorMargin float64
|
||||||
}
|
}
|
||||||
|
|
||||||
func newThroughputCollector(tb ktesting.TB, podInformer coreinformers.PodInformer, labels map[string]string, namespaces []string, errorMargin float64) *throughputCollector {
|
func newThroughputCollector(podInformer coreinformers.PodInformer, labels map[string]string, namespaces []string, errorMargin float64) *throughputCollector {
|
||||||
return &throughputCollector{
|
return &throughputCollector{
|
||||||
podInformer: podInformer,
|
podInformer: podInformer,
|
||||||
labels: labels,
|
labels: labels,
|
||||||
@ -379,6 +384,9 @@ func newThroughputCollector(tb ktesting.TB, podInformer coreinformers.PodInforme
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (tc *throughputCollector) init() {
|
||||||
|
}
|
||||||
|
|
||||||
func (tc *throughputCollector) run(tCtx ktesting.TContext) {
|
func (tc *throughputCollector) run(tCtx ktesting.TContext) {
|
||||||
podsScheduled, _, err := getScheduledPods(tc.podInformer, tc.namespaces...)
|
podsScheduled, _, err := getScheduledPods(tc.podInformer, tc.namespaces...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
Loading…
Reference in New Issue
Block a user