Merge pull request #96424 from deads2k/check

add timeout message in addition to metric
This commit is contained in:
Kubernetes Prow Robot 2020-11-12 11:22:06 -08:00 committed by GitHub
commit 9bfffa2430
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
2 changed files with 20 additions and 8 deletions

View File

@ -244,8 +244,12 @@ func TestErrConnKilled(t *testing.T) {
t.Fatal("expected to receive an error") t.Fatal("expected to receive an error")
} }
// we should only get one line for this, not the big stack from before
capturedOutput := readStdErr() capturedOutput := readStdErr()
if len(capturedOutput) > 0 { if strings.Count(capturedOutput, "\n") != 1 {
t.Errorf("unexpected output captured actual = %v", capturedOutput)
}
if !strings.Contains(capturedOutput, `timeout or abort while handling: GET "/"`) {
t.Errorf("unexpected output captured actual = %v", capturedOutput) t.Errorf("unexpected output captured actual = %v", capturedOutput)
} }
} }
@ -330,8 +334,12 @@ func TestErrConnKilledHTTP2(t *testing.T) {
t.Fatal("expected to receive an error") t.Fatal("expected to receive an error")
} }
// we should only get one line for this, not the big stack from before
capturedOutput := readStdErr() capturedOutput := readStdErr()
if len(capturedOutput) > 0 { if strings.Count(capturedOutput, "\n") != 1 {
t.Errorf("unexpected output captured actual = %v", capturedOutput)
}
if !strings.Contains(capturedOutput, `timeout or abort while handling: GET "/"`) {
t.Errorf("unexpected output captured actual = %v", capturedOutput) t.Errorf("unexpected output captured actual = %v", capturedOutput)
} }

View File

@ -17,13 +17,14 @@ limitations under the License.
package filters package filters
import ( import (
"k8s.io/apiserver/pkg/endpoints/request" "fmt"
"k8s.io/klog/v2"
"net/http" "net/http"
"k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apiserver/pkg/endpoints/metrics" "k8s.io/apiserver/pkg/endpoints/metrics"
"k8s.io/apiserver/pkg/endpoints/request"
"k8s.io/apiserver/pkg/server/httplog" "k8s.io/apiserver/pkg/server/httplog"
"k8s.io/klog/v2"
) )
// WithPanicRecovery wraps an http Handler to recover and log panics (except in the special case of http.ErrAbortHandler panics, which suppress logging). // WithPanicRecovery wraps an http Handler to recover and log panics (except in the special case of http.ErrAbortHandler panics, which suppress logging).
@ -42,12 +43,15 @@ func WithPanicRecovery(handler http.Handler, resolver request.RequestInfoResolve
// //
// Note that the ReallyCrash variable controls the behaviour of the HandleCrash function // Note that the ReallyCrash variable controls the behaviour of the HandleCrash function
// So it might actually crash, after calling the handlers // So it might actually crash, after calling the handlers
info, err := resolver.NewRequestInfo(req) if info, err := resolver.NewRequestInfo(req); err != nil {
if err != nil {
metrics.RecordRequestAbort(req, nil) metrics.RecordRequestAbort(req, nil)
return } else {
metrics.RecordRequestAbort(req, info)
} }
metrics.RecordRequestAbort(req, info) // This call can have different handlers, but the default chain rate limits. Call it after the metrics are updated
// in case the rate limit delays it. If you outrun the rate for this one timed out requests, something has gone
// seriously wrong with your server, but generally having a logging signal for timeouts is useful.
runtime.HandleError(fmt.Errorf("timeout or abort while handling: %v %q", req.Method, req.URL.Path))
return return
} }
http.Error(w, "This request caused apiserver to panic. Look in the logs for details.", http.StatusInternalServerError) http.Error(w, "This request caused apiserver to panic. Look in the logs for details.", http.StatusInternalServerError)