From 4c6e7247878477a1f2efc26df7f141258010374f Mon Sep 17 00:00:00 2001 From: Han Kang Date: Wed, 2 Oct 2019 15:04:29 -0700 Subject: [PATCH] Fix double counting issue for request metrics on timeout. Currently we record request metrics during the normal request flow and we also manually invoke `Record` in the timeout handler to record timeouts. This means that we effectively double count whenever we timeout. This PR renames the `Record` function to `RecordRequestError` to more accurately reflect the intended side-effect of the function call. Change-Id: Ie37fd0c1e501bd525640a434433d364a5fd6dde2 --- .../pkg/endpoints/metrics/metrics.go | 23 +++++++++++++------ .../pkg/server/filters/maxinflight.go | 2 +- .../apiserver/pkg/server/filters/timeout.go | 2 +- .../pkg/apiserver/handler_proxy.go | 2 +- 4 files changed, 19 insertions(+), 10 deletions(-) diff --git a/staging/src/k8s.io/apiserver/pkg/endpoints/metrics/metrics.go b/staging/src/k8s.io/apiserver/pkg/endpoints/metrics/metrics.go index 30ce24a4bd8..e092e3e55f3 100644 --- a/staging/src/k8s.io/apiserver/pkg/endpoints/metrics/metrics.go +++ b/staging/src/k8s.io/apiserver/pkg/endpoints/metrics/metrics.go @@ -178,7 +178,7 @@ var ( }, []string{"group", "version", "kind"}, ) - // Because of volatality of the base metric this is pre-aggregated one. Instead of reporing current usage all the time + // Because of volatility of the base metric this is pre-aggregated one. Instead of reporting current usage all the time // it reports maximal usage during the last second. currentInflightRequests = compbasemetrics.NewGaugeVec( &compbasemetrics.GaugeOpts{ @@ -188,6 +188,15 @@ var ( }, []string{"requestKind"}, ) + + requestErrorTotal = compbasemetrics.NewCounterVec( + &compbasemetrics.CounterOpts{ + Name: "apiserver_request_error_total", + Help: "Number of requests which have resulted in an apiserver response error", + StabilityLevel: compbasemetrics.ALPHA, + }, + []string{"verb", "group", "version", "resource", "subresource", "scope", "component", "code"}, + ) kubectlExeRegexp = regexp.MustCompile(`^.*((?i:kubectl\.exe))`) metrics = []resettableCollector{ @@ -204,6 +213,7 @@ var ( WatchEvents, WatchEventsSizes, currentInflightRequests, + requestErrorTotal, } ) @@ -237,10 +247,9 @@ func UpdateInflightRequestMetrics(nonmutating, mutating int) { currentInflightRequests.WithLabelValues(MutatingKind).Set(float64(mutating)) } -// Record records a single request to the standard metrics endpoints. For use by handlers that perform their own -// processing. All API paths should use InstrumentRouteFunc implicitly. Use this instead of MonitorRequest if -// you already have a RequestInfo object. -func Record(req *http.Request, requestInfo *request.RequestInfo, component, contentType string, code int, responseSizeInBytes int, elapsed time.Duration) { +// RecordRequestError records the occurrence of a request error during apiserver handling (e.g. timeouts, +// maxinflight throttling, proxyHandler errors). +func RecordRequestError(req *http.Request, requestInfo *request.RequestInfo, component string, code int) { if requestInfo == nil { requestInfo = &request.RequestInfo{Verb: req.Method, Path: req.URL.Path} } @@ -252,9 +261,9 @@ func Record(req *http.Request, requestInfo *request.RequestInfo, component, cont // However, we need to tweak it e.g. to differentiate GET from LIST. verb := canonicalVerb(strings.ToUpper(req.Method), scope) if requestInfo.IsResourceRequest { - MonitorRequest(req, verb, requestInfo.APIGroup, requestInfo.APIVersion, requestInfo.Resource, requestInfo.Subresource, scope, component, contentType, code, responseSizeInBytes, elapsed) + requestErrorTotal.WithLabelValues(cleanVerb(verb, req), requestInfo.APIGroup, requestInfo.APIVersion, requestInfo.Resource, requestInfo.Subresource, scope, component, codeToString(code)).Inc() } else { - MonitorRequest(req, verb, "", "", "", requestInfo.Path, scope, component, contentType, code, responseSizeInBytes, elapsed) + requestErrorTotal.WithLabelValues(cleanVerb(verb, req), "", "", "", requestInfo.Path, scope, component, codeToString(code)).Inc() } } diff --git a/staging/src/k8s.io/apiserver/pkg/server/filters/maxinflight.go b/staging/src/k8s.io/apiserver/pkg/server/filters/maxinflight.go index f7bc691c762..d77676b0a02 100644 --- a/staging/src/k8s.io/apiserver/pkg/server/filters/maxinflight.go +++ b/staging/src/k8s.io/apiserver/pkg/server/filters/maxinflight.go @@ -178,7 +178,7 @@ func WithMaxInFlightLimit( } } } - metrics.Record(r, requestInfo, metrics.APIServerComponent, "", http.StatusTooManyRequests, 0, 0) + metrics.RecordRequestError(r, requestInfo, metrics.APIServerComponent, http.StatusTooManyRequests) tooManyRequests(r, w) } } diff --git a/staging/src/k8s.io/apiserver/pkg/server/filters/timeout.go b/staging/src/k8s.io/apiserver/pkg/server/filters/timeout.go index 23ac0034d8d..251597584c8 100644 --- a/staging/src/k8s.io/apiserver/pkg/server/filters/timeout.go +++ b/staging/src/k8s.io/apiserver/pkg/server/filters/timeout.go @@ -59,7 +59,7 @@ func WithTimeoutForNonLongRunningRequests(handler http.Handler, longRunning apir postTimeoutFn := func() { cancel() - metrics.Record(req, requestInfo, metrics.APIServerComponent, "", http.StatusGatewayTimeout, 0, 0) + metrics.RecordRequestError(req, requestInfo, metrics.APIServerComponent, http.StatusGatewayTimeout) } return req, time.After(timeout), postTimeoutFn, apierrors.NewTimeoutError(fmt.Sprintf("request did not complete within %s", timeout), 0) } diff --git a/staging/src/k8s.io/kube-aggregator/pkg/apiserver/handler_proxy.go b/staging/src/k8s.io/kube-aggregator/pkg/apiserver/handler_proxy.go index bfbe4c0955d..c49658cd0e2 100644 --- a/staging/src/k8s.io/kube-aggregator/pkg/apiserver/handler_proxy.go +++ b/staging/src/k8s.io/kube-aggregator/pkg/apiserver/handler_proxy.go @@ -98,7 +98,7 @@ func proxyError(w http.ResponseWriter, req *http.Request, error string, code int return } // TODO: record long-running request differently? The long-running check func does not necessarily match the one of the aggregated apiserver - endpointmetrics.Record(req, info, aggregatorComponent, "", code, 0, 0) + endpointmetrics.RecordRequestError(req, info, aggregatorComponent, code) } func (r *proxyHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) {