move MergeBuckets into component-base so we can properly support it for static-analysis

Change-Id: I137413b5b0328487a7bcd70eca79a8b0ae6dea51
This commit is contained in:
Han Kang 2022-10-18 10:52:33 -07:00
parent 507cf76570
commit 287738c125
5 changed files with 136 additions and 17 deletions

View File

@ -0,0 +1,43 @@
/*
Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package metrics
import (
"github.com/prometheus/client_golang/prometheus"
)
// DefBuckets is a wrapper for prometheus.DefBuckets
var DefBuckets = prometheus.DefBuckets
// LinearBuckets is a wrapper for prometheus.LinearBuckets.
func LinearBuckets(start, width float64, count int) []float64 {
return prometheus.LinearBuckets(start, width, count)
}
// ExponentialBuckets is a wrapper for prometheus.ExponentialBuckets.
func ExponentialBuckets(start, factor float64, count int) []float64 {
return prometheus.ExponentialBuckets(start, factor, count)
}
// MergeBuckets merges buckets together
func MergeBuckets(buckets ...[]float64) []float64 {
result := make([]float64, 1)
for _, s := range buckets {
result = append(result, s...)
}
return result
}

View File

@ -23,19 +23,6 @@ import (
"github.com/prometheus/client_golang/prometheus"
)
// DefBuckets is a wrapper for prometheus.DefBuckets
var DefBuckets = prometheus.DefBuckets
// LinearBuckets is a wrapper for prometheus.LinearBuckets.
func LinearBuckets(start, width float64, count int) []float64 {
return prometheus.LinearBuckets(start, width, count)
}
// ExponentialBuckets is a wrapper for prometheus.ExponentialBuckets.
func ExponentialBuckets(start, factor float64, count int) []float64 {
return prometheus.ExponentialBuckets(start, factor, count)
}
// Histogram is our internal representation for our wrapping struct around prometheus
// histograms. Summary implements both kubeCollector and ObserverMetric
type Histogram struct {

View File

@ -480,15 +480,58 @@ func (c *metricDecoder) decodeBucketFunctionCall(v *ast.CallExpr) ([]float64, er
if functionImport.String() != c.kubeMetricsImportName {
return nil, newDecodeErrorf(v, errBuckets), true
}
firstArg, secondArg, thirdArg, err := decodeBucketArguments(v)
if err != nil {
return nil, err, true
}
switch functionName {
case "LinearBuckets":
firstArg, secondArg, thirdArg, err := decodeBucketArguments(v)
if err != nil {
return nil, err, true
}
return metrics.LinearBuckets(firstArg, secondArg, thirdArg), nil, true
case "ExponentialBuckets":
firstArg, secondArg, thirdArg, err := decodeBucketArguments(v)
if err != nil {
return nil, err, true
}
return metrics.ExponentialBuckets(firstArg, secondArg, thirdArg), nil, true
case "MergeBuckets":
merged := []float64{}
for _, arg := range v.Args {
cl, ok := arg.(*ast.CompositeLit)
if ok {
fs, err := decodeListOfFloats(cl, cl.Elts)
if err != nil {
return nil, err, true
}
merged = append(merged, fs...)
} else {
v2, ok := arg.(*ast.CallExpr)
if !ok {
return nil, newDecodeErrorf(v2, errBuckets), true
}
se, ok = v2.Fun.(*ast.SelectorExpr)
if ok {
functionName := se.Sel.String()
functionImport, ok := se.X.(*ast.Ident)
if !ok {
return nil, newDecodeErrorf(v, errBuckets), true
}
if functionImport.String() != c.kubeMetricsImportName {
return nil, newDecodeErrorf(v, errBuckets), true
}
firstArg, secondArg, thirdArg, err := decodeBucketArguments(v2)
if err != nil {
return nil, err, true
}
switch functionName {
case "LinearBuckets":
merged = append(merged, metrics.LinearBuckets(firstArg, secondArg, thirdArg)...)
case "ExponentialBuckets":
merged = append(merged, metrics.LinearBuckets(firstArg, secondArg, thirdArg)...)
}
}
}
}
return merged, nil, true
}
return nil, nil, false
}

View File

@ -488,6 +488,21 @@ var (
StabilityLevel: metrics.BETA,
},
)
NetworkProgrammingLatency2 = metrics.NewHistogram(
&metrics.HistogramOpts{
Subsystem: "kube_proxy",
Name: "network_programming_duration_seconds2",
Help: "In Cluster Network Programming Latency in seconds",
Buckets: metrics.MergeBuckets(
metrics.LinearBuckets(0.25, 0.25, 2), // 0.25s, 0.50s
[]float64{1, 5, 10, 59}, // 1s, 2s, 3s, ... 59s
metrics.LinearBuckets(60, 5, 12), // 60s, 65s, 70s, ... 115s
metrics.LinearBuckets(120, 30, 7), // 2min, 2.5min, 3min, ..., 5min
),
StabilityLevel: metrics.BETA,
},
)
)
var registerMetrics sync.Once

View File

@ -84,6 +84,37 @@
- 240
- 270
- 300
- name: network_programming_duration_seconds2
subsystem: kube_proxy
help: In Cluster Network Programming Latency in seconds
type: Histogram
stabilityLevel: BETA
buckets:
- 0.25
- 0.5
- 1
- 5
- 10
- 59
- 60
- 65
- 70
- 75
- 80
- 85
- 90
- 95
- 100
- 105
- 110
- 115
- 120
- 150
- 180
- 210
- 240
- 270
- 300
- name: certificate_manager_client_ttl_seconds
subsystem: kubelet
help: Gauge of the TTL (time-to-live) of the Kubelet's client certificate. The value