Fix metrics reporting in kube-apiserver

This commit is contained in:
wojtekt 2020-10-14 17:34:33 +02:00
parent b2de4a6159
commit 3d2a80643a
4 changed files with 136 additions and 0 deletions

View File

@ -436,6 +436,7 @@ package_group(
"//staging/src/k8s.io/component-base/metrics/...",
"//test/e2e_node",
"//test/integration/apiserver/flowcontrol",
"//test/integration/metrics",
"//vendor/...",
],
)

View File

@ -594,6 +594,9 @@ func (a *APIInstaller) registerResourceHandlers(path string, storage rest.Storag
requestScope = "resource"
operationSuffix = operationSuffix + "WithPath"
}
if strings.Index(action.Path, "/{name}") != -1 || action.Verb == "POST" {
requestScope = "resource"
}
if action.AllNamespaces {
requestScope = "cluster"
operationSuffix = operationSuffix + "ForAllNamespaces"

View File

@ -35,11 +35,13 @@ go_test(
embed = [":go_default_library"],
tags = ["integration"],
deps = [
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
"//staging/src/k8s.io/client-go/rest:go_default_library",
"//staging/src/k8s.io/component-base/metrics/testutil:go_default_library",
"//test/integration/framework:go_default_library",
"//vendor/github.com/prometheus/common/model:go_default_library",
],
)

View File

@ -25,6 +25,9 @@ import (
"runtime"
"testing"
"github.com/prometheus/common/model"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
clientset "k8s.io/client-go/kubernetes"
@ -111,3 +114,130 @@ func TestApiserverMetrics(t *testing.T) {
"etcd_request_duration_seconds_sum",
})
}
func TestApiserverMetricsLabels(t *testing.T) {
_, s, closeFn := framework.RunAMaster(nil)
defer closeFn()
client, err := clientset.NewForConfig(&restclient.Config{Host: s.URL, QPS: -1})
if err != nil {
t.Fatalf("Error in create clientset: %v", err)
}
expectedMetrics := []model.Metric{}
metricLabels := func(group, version, resource, subresource, scope, verb string) model.Metric {
return map[model.LabelName]model.LabelValue{
model.LabelName("group"): model.LabelValue(group),
model.LabelName("version"): model.LabelValue(version),
model.LabelName("resource"): model.LabelValue(resource),
model.LabelName("subresource"): model.LabelValue(subresource),
model.LabelName("scope"): model.LabelValue(scope),
model.LabelName("verb"): model.LabelValue(verb),
}
}
callOrDie := func(_ interface{}, err error) {
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
}
appendExpectedMetric := func(metric model.Metric) {
expectedMetrics = append(expectedMetrics, metric)
}
// Call appropriate endpoints to ensure particular metrics will be exposed
// Namespace-scoped resource
c := client.CoreV1().Pods(metav1.NamespaceDefault)
makePod := func(labelValue string) *v1.Pod {
return &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "foo",
Labels: map[string]string{"foo": labelValue},
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "container",
Image: "image",
},
},
},
}
}
callOrDie(c.Create(context.TODO(), makePod("foo"), metav1.CreateOptions{}))
appendExpectedMetric(metricLabels("", "v1", "pods", "", "resource", "POST"))
callOrDie(c.Update(context.TODO(), makePod("bar"), metav1.UpdateOptions{}))
appendExpectedMetric(metricLabels("", "v1", "pods", "", "resource", "PUT"))
callOrDie(c.UpdateStatus(context.TODO(), makePod("bar"), metav1.UpdateOptions{}))
appendExpectedMetric(metricLabels("", "v1", "pods", "status", "resource", "PUT"))
callOrDie(c.Get(context.TODO(), "foo", metav1.GetOptions{}))
appendExpectedMetric(metricLabels("", "v1", "pods", "", "resource", "GET"))
callOrDie(c.List(context.TODO(), metav1.ListOptions{}))
appendExpectedMetric(metricLabels("", "v1", "pods", "", "namespace", "LIST"))
callOrDie(nil, c.Delete(context.TODO(), "foo", metav1.DeleteOptions{}))
appendExpectedMetric(metricLabels("", "v1", "pods", "", "resource", "DELETE"))
// cluster-scoped LIST of namespace-scoped resources
callOrDie(client.CoreV1().Pods(metav1.NamespaceAll).List(context.TODO(), metav1.ListOptions{}))
appendExpectedMetric(metricLabels("", "v1", "pods", "", "cluster", "LIST"))
// Cluster-scoped resource
cn := client.CoreV1().Namespaces()
makeNamespace := func(labelValue string) *v1.Namespace {
return &v1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: "foo",
Labels: map[string]string{"foo": labelValue},
},
}
}
callOrDie(cn.Create(context.TODO(), makeNamespace("foo"), metav1.CreateOptions{}))
appendExpectedMetric(metricLabels("", "v1", "namespaces", "", "resource", "POST"))
callOrDie(cn.Update(context.TODO(), makeNamespace("bar"), metav1.UpdateOptions{}))
appendExpectedMetric(metricLabels("", "v1", "namespaces", "", "resource", "PUT"))
callOrDie(cn.UpdateStatus(context.TODO(), makeNamespace("bar"), metav1.UpdateOptions{}))
appendExpectedMetric(metricLabels("", "v1", "namespaces", "status", "resource", "PUT"))
callOrDie(cn.Get(context.TODO(), "foo", metav1.GetOptions{}))
appendExpectedMetric(metricLabels("", "v1", "namespaces", "", "resource", "GET"))
callOrDie(cn.List(context.TODO(), metav1.ListOptions{}))
appendExpectedMetric(metricLabels("", "v1", "namespaces", "", "cluster", "LIST"))
callOrDie(nil, cn.Delete(context.TODO(), "foo", metav1.DeleteOptions{}))
appendExpectedMetric(metricLabels("", "v1", "namespaces", "", "resource", "DELETE"))
// Verify if all metrics were properly exported.
metrics, err := scrapeMetrics(s)
if err != nil {
t.Fatal(err)
}
samples, ok := metrics["apiserver_request_total"]
if !ok {
t.Fatalf("apiserver_request_total metric not exposed")
}
hasLabels := func(current, expected model.Metric) bool {
for key, value := range expected {
if current[key] != value {
return false
}
}
return true
}
for _, expectedMetric := range expectedMetrics {
found := false
for _, sample := range samples {
if hasLabels(sample.Metric, expectedMetric) {
found = true
break
}
}
if !found {
t.Errorf("No sample found for %#v", expectedMetric)
}
}
}