sched: fix a bug that metrics of init or collected pods are re-collected

This commit is contained in:
Wei Huang 2021-02-25 13:33:27 -08:00
parent 23d4b3b4f0
commit b93b4a2c96
No known key found for this signature in database
GPG Key ID: BE5E9752F8B6E005
4 changed files with 11 additions and 61 deletions

View File

@ -281,21 +281,6 @@ func (hist *Histogram) Average() float64 {
return hist.GetSampleSum() / float64(hist.GetSampleCount())
}
// Clear clears all fields of the wrapped histogram
func (hist *Histogram) Clear() {
if hist.SampleCount != nil {
*hist.SampleCount = 0
}
if hist.SampleSum != nil {
*hist.SampleSum = 0
}
for _, b := range hist.Bucket {
if b.CumulativeCount != nil {
*b.CumulativeCount = 0
}
}
}
// Validate makes sure the wrapped histogram has all necessary fields set and with valid values.
func (hist *Histogram) Validate() error {
if hist.SampleCount == nil || hist.GetSampleCount() == 0 {

View File

@ -121,48 +121,6 @@ func TestHistogramQuantile(t *testing.T) {
}
}
func TestHistogramClear(t *testing.T) {
samples := []float64{0.5, 0.5, 0.5, 0.5, 1.5, 1.5, 1.5, 1.5, 3, 3, 3, 3, 6, 6, 6, 6}
bounds := []float64{1, 2, 4, 8}
h := samples2Histogram(samples, bounds)
if *h.SampleCount == 0 {
t.Errorf("Expected histogram .SampleCount to be non-zero")
}
if *h.SampleSum == 0 {
t.Errorf("Expected histogram .SampleSum to be non-zero")
}
for _, b := range h.Bucket {
if b.CumulativeCount != nil {
if *b.CumulativeCount == 0 {
t.Errorf("Expected histogram bucket to have non-zero comulative count")
}
}
}
h.Clear()
if *h.SampleCount != 0 {
t.Errorf("Expected histogram .SampleCount to be zero, have %v instead", *h.SampleCount)
}
if *h.SampleSum != 0 {
t.Errorf("Expected histogram .SampleSum to be zero, have %v instead", *h.SampleSum)
}
for _, b := range h.Bucket {
if b.CumulativeCount != nil {
if *b.CumulativeCount != 0 {
t.Errorf("Expected histogram bucket to have zero comulative count, have %v instead", *b.CumulativeCount)
}
}
if b.UpperBound != nil {
*b.UpperBound = 0
}
}
}
func TestHistogramValidate(t *testing.T) {
tests := []struct {
name string

View File

@ -42,6 +42,7 @@ import (
"k8s.io/client-go/restmapper"
"k8s.io/component-base/featuregate"
featuregatetesting "k8s.io/component-base/featuregate/testing"
"k8s.io/component-base/metrics/legacyregistry"
"k8s.io/klog/v2"
"k8s.io/kubernetes/test/integration/framework"
testutils "k8s.io/kubernetes/test/utils"
@ -363,6 +364,9 @@ func BenchmarkPerfScheduling(b *testing.B) {
defer featuregatetesting.SetFeatureGateDuringTest(b, utilfeature.DefaultFeatureGate, feature, flag)()
}
dataItems.DataItems = append(dataItems.DataItems, runWorkload(b, tc, w)...)
// Reset metrics to prevent metrics generated in current workload gets
// carried over to the next workload.
legacyregistry.Reset()
})
}
})
@ -454,6 +458,13 @@ func runWorkload(b *testing.B, tc *testCase, w *workload) []DataItem {
mu.Unlock()
}
if !concreteOp.SkipWaitToCompletion {
// SkipWaitToCompletion=false indicates this step has waited for the Pods to be scheduled.
// So we reset the metrics in global registry; otherwise metrics gathered in this step
// will be carried over to next step.
legacyregistry.Reset()
}
case *churnOp:
var namespace string
if concreteOp.Namespace != nil {

View File

@ -205,10 +205,6 @@ func collectHistogram(metric string, labels map[string]string) *DataItem {
q99 := hist.Quantile(0.95)
avg := hist.Average()
// clear the metrics so that next test always starts with empty prometheus
// metrics (since the metrics are shared among all tests run inside the same binary)
hist.Clear()
msFactor := float64(time.Second) / float64(time.Millisecond)
// Copy labels and add "Metric" label for this metric.