mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-23 11:50:44 +00:00
Remove prometheus dependencies for scheduler
This commit is contained in:
parent
76ead99a56
commit
c959e23250
@ -432,7 +432,6 @@ package_group(
|
||||
name = "vendor_githubcom_prometheus_CONSUMERS",
|
||||
packages = [
|
||||
"//cluster/images/etcd-version-monitor",
|
||||
"//pkg/scheduler/framework/runtime",
|
||||
"//pkg/volume/util/operationexecutor",
|
||||
"//staging/src/k8s.io/apiserver/pkg/admission/metrics",
|
||||
"//staging/src/k8s.io/component-base/metrics/...",
|
||||
|
1
go.mod
1
go.mod
@ -80,7 +80,6 @@ require (
|
||||
github.com/opencontainers/selinux v1.6.0
|
||||
github.com/pkg/errors v0.9.1
|
||||
github.com/pmezard/go-difflib v1.0.0
|
||||
github.com/prometheus/client_golang v1.7.1
|
||||
github.com/prometheus/client_model v0.2.0
|
||||
github.com/prometheus/common v0.10.0
|
||||
github.com/quobyte/api v0.1.8
|
||||
|
@ -48,9 +48,8 @@ go_test(
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//staging/src/k8s.io/component-base/metrics/testutil:go_default_library",
|
||||
"//vendor/github.com/google/go-cmp/cmp:go_default_library",
|
||||
"//vendor/github.com/prometheus/client_golang/prometheus:go_default_library",
|
||||
"//vendor/github.com/prometheus/client_model/go:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
@ -26,12 +26,11 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
dto "github.com/prometheus/client_model/go"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/component-base/metrics/testutil"
|
||||
"k8s.io/kubernetes/pkg/scheduler/apis/config"
|
||||
"k8s.io/kubernetes/pkg/scheduler/framework"
|
||||
internalqueue "k8s.io/kubernetes/pkg/scheduler/internal/queue"
|
||||
@ -2227,94 +2226,64 @@ func injectNormalizeRes(inj injectedResult, scores framework.NodeScoreList) *fra
|
||||
|
||||
func collectAndComparePluginMetrics(t *testing.T, wantExtensionPoint, wantPlugin string, wantStatus framework.Code) {
|
||||
t.Helper()
|
||||
m := collectHistogramMetric(metrics.PluginExecutionDuration)
|
||||
if len(m.Label) != 3 {
|
||||
t.Fatalf("Unexpected number of label pairs, got: %v, want: 2", len(m.Label))
|
||||
}
|
||||
m := metrics.PluginExecutionDuration.WithLabelValues(wantPlugin, wantExtensionPoint, wantStatus.String())
|
||||
|
||||
if *m.Label[0].Value != wantExtensionPoint {
|
||||
t.Errorf("Unexpected extension point label, got: %q, want %q", *m.Label[0].Value, wantExtensionPoint)
|
||||
count, err := testutil.GetHistogramMetricCount(m)
|
||||
if err != nil {
|
||||
t.Errorf("Failed to get %s sampleCount, err: %v", metrics.PluginExecutionDuration.Name, err)
|
||||
}
|
||||
|
||||
if *m.Label[1].Value != wantPlugin {
|
||||
t.Errorf("Unexpected plugin label, got: %q, want %q", *m.Label[1].Value, wantPlugin)
|
||||
}
|
||||
|
||||
if *m.Label[2].Value != wantStatus.String() {
|
||||
t.Errorf("Unexpected status code label, got: %q, want %q", *m.Label[2].Value, wantStatus)
|
||||
}
|
||||
|
||||
if *m.Histogram.SampleCount == 0 {
|
||||
if count == 0 {
|
||||
t.Error("Expect at least 1 sample")
|
||||
}
|
||||
|
||||
if *m.Histogram.SampleSum <= 0 {
|
||||
t.Errorf("Expect latency to be greater than 0, got: %v", *m.Histogram.SampleSum)
|
||||
value, err := testutil.GetHistogramMetricValue(m)
|
||||
if err != nil {
|
||||
t.Errorf("Failed to get %s value, err: %v", metrics.PluginExecutionDuration.Name, err)
|
||||
}
|
||||
if value <= 0 {
|
||||
t.Errorf("Expect latency to be greater than 0, got: %v", value)
|
||||
}
|
||||
}
|
||||
|
||||
func collectAndCompareFrameworkMetrics(t *testing.T, wantExtensionPoint string, wantStatus framework.Code) {
|
||||
t.Helper()
|
||||
m := collectHistogramMetric(metrics.FrameworkExtensionPointDuration)
|
||||
m := metrics.FrameworkExtensionPointDuration.WithLabelValues(wantExtensionPoint, wantStatus.String(), testProfileName)
|
||||
|
||||
gotLabels := make(map[string]string, len(m.Label))
|
||||
for _, p := range m.Label {
|
||||
gotLabels[p.GetName()] = p.GetValue()
|
||||
count, err := testutil.GetHistogramMetricCount(m)
|
||||
if err != nil {
|
||||
t.Errorf("Failed to get %s sampleCount, err: %v", metrics.FrameworkExtensionPointDuration.Name, err)
|
||||
}
|
||||
wantLabels := map[string]string{
|
||||
"extension_point": wantExtensionPoint,
|
||||
"status": wantStatus.String(),
|
||||
"profile": testProfileName,
|
||||
if count != 1 {
|
||||
t.Errorf("Expect 1 sample, got: %v", count)
|
||||
}
|
||||
if diff := cmp.Diff(wantLabels, gotLabels); diff != "" {
|
||||
t.Errorf("unexpected labels (-want,+got):\n%s", diff)
|
||||
value, err := testutil.GetHistogramMetricValue(m)
|
||||
if err != nil {
|
||||
t.Errorf("Failed to get %s value, err: %v", metrics.FrameworkExtensionPointDuration.Name, err)
|
||||
}
|
||||
|
||||
if *m.Histogram.SampleCount != 1 {
|
||||
t.Errorf("Expect 1 sample, got: %v", *m.Histogram.SampleCount)
|
||||
}
|
||||
|
||||
if *m.Histogram.SampleSum <= 0 {
|
||||
t.Errorf("Expect latency to be greater than 0, got: %v", *m.Histogram.SampleSum)
|
||||
if value <= 0 {
|
||||
t.Errorf("Expect latency to be greater than 0, got: %v", value)
|
||||
}
|
||||
}
|
||||
|
||||
func collectAndComparePermitWaitDuration(t *testing.T, wantRes string) {
|
||||
m := collectHistogramMetric(metrics.PermitWaitDuration)
|
||||
m := metrics.PermitWaitDuration.WithLabelValues(wantRes)
|
||||
count, err := testutil.GetHistogramMetricCount(m)
|
||||
if err != nil {
|
||||
t.Errorf("Failed to get %s sampleCount, err: %v", metrics.PermitWaitDuration.Name, err)
|
||||
}
|
||||
if wantRes == "" {
|
||||
if m != nil {
|
||||
t.Errorf("PermitWaitDuration shouldn't be recorded but got %+v", m)
|
||||
if count != 0 {
|
||||
t.Errorf("Expect 0 sample, got: %v", count)
|
||||
}
|
||||
return
|
||||
}
|
||||
if wantRes != "" {
|
||||
if len(m.Label) != 1 {
|
||||
t.Fatalf("Unexpected number of label pairs, got: %v, want: 1", len(m.Label))
|
||||
} else {
|
||||
if count != 1 {
|
||||
t.Errorf("Expect 1 sample, got: %v", count)
|
||||
}
|
||||
|
||||
if *m.Label[0].Value != wantRes {
|
||||
t.Errorf("Unexpected result label, got: %q, want %q", *m.Label[0].Value, wantRes)
|
||||
value, err := testutil.GetHistogramMetricValue(m)
|
||||
if err != nil {
|
||||
t.Errorf("Failed to get %s value, err: %v", metrics.PermitWaitDuration.Name, err)
|
||||
}
|
||||
|
||||
if *m.Histogram.SampleCount != 1 {
|
||||
t.Errorf("Expect 1 sample, got: %v", *m.Histogram.SampleCount)
|
||||
}
|
||||
|
||||
if *m.Histogram.SampleSum <= 0 {
|
||||
t.Errorf("Expect latency to be greater than 0, got: %v", *m.Histogram.SampleSum)
|
||||
if value <= 0 {
|
||||
t.Errorf("Expect latency to be greater than 0, got: %v", value)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func collectHistogramMetric(metric prometheus.Collector) *dto.Metric {
|
||||
ch := make(chan prometheus.Metric, 100)
|
||||
metric.Collect(ch)
|
||||
select {
|
||||
case got := <-ch:
|
||||
m := &dto.Metric{}
|
||||
got.Write(m)
|
||||
return m
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
@ -318,7 +318,7 @@ func (hist *Histogram) Validate() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetGaugeMetricValue extract metric value from GaugeMetric
|
||||
// GetGaugeMetricValue extracts metric value from GaugeMetric
|
||||
func GetGaugeMetricValue(m metrics.GaugeMetric) (float64, error) {
|
||||
metricProto := &dto.Metric{}
|
||||
if err := m.Write(metricProto); err != nil {
|
||||
@ -327,7 +327,7 @@ func GetGaugeMetricValue(m metrics.GaugeMetric) (float64, error) {
|
||||
return metricProto.Gauge.GetValue(), nil
|
||||
}
|
||||
|
||||
// GetCounterMetricValue extract metric value from CounterMetric
|
||||
// GetCounterMetricValue extracts metric value from CounterMetric
|
||||
func GetCounterMetricValue(m metrics.CounterMetric) (float64, error) {
|
||||
metricProto := &dto.Metric{}
|
||||
if err := m.(metrics.Metric).Write(metricProto); err != nil {
|
||||
@ -336,7 +336,7 @@ func GetCounterMetricValue(m metrics.CounterMetric) (float64, error) {
|
||||
return metricProto.Counter.GetValue(), nil
|
||||
}
|
||||
|
||||
// GetHistogramMetricValue extract sum of all samples from ObserverMetric
|
||||
// GetHistogramMetricValue extracts sum of all samples from ObserverMetric
|
||||
func GetHistogramMetricValue(m metrics.ObserverMetric) (float64, error) {
|
||||
metricProto := &dto.Metric{}
|
||||
if err := m.(metrics.Metric).Write(metricProto); err != nil {
|
||||
@ -345,6 +345,15 @@ func GetHistogramMetricValue(m metrics.ObserverMetric) (float64, error) {
|
||||
return metricProto.Histogram.GetSampleSum(), nil
|
||||
}
|
||||
|
||||
// GetHistogramMetricCount extracts count of all samples from ObserverMetric
|
||||
func GetHistogramMetricCount(m metrics.ObserverMetric) (uint64, error) {
|
||||
metricProto := &dto.Metric{}
|
||||
if err := m.(metrics.Metric).Write(metricProto); err != nil {
|
||||
return 0, fmt.Errorf("error writing m: %v", err)
|
||||
}
|
||||
return metricProto.Histogram.GetSampleCount(), nil
|
||||
}
|
||||
|
||||
// LabelsMatch returns true if metric has all expected labels otherwise false
|
||||
func LabelsMatch(metric *dto.Metric, labelFilter map[string]string) bool {
|
||||
metricLabels := map[string]string{}
|
||||
|
1
vendor/modules.txt
vendored
1
vendor/modules.txt
vendored
@ -954,7 +954,6 @@ github.com/pquerna/cachecontrol
|
||||
# github.com/pquerna/cachecontrol => github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021
|
||||
github.com/pquerna/cachecontrol/cacheobject
|
||||
# github.com/prometheus/client_golang v1.7.1 => github.com/prometheus/client_golang v1.7.1
|
||||
## explicit
|
||||
# github.com/prometheus/client_golang => github.com/prometheus/client_golang v1.7.1
|
||||
github.com/prometheus/client_golang/prometheus
|
||||
github.com/prometheus/client_golang/prometheus/internal
|
||||
|
Loading…
Reference in New Issue
Block a user