mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-08-04 18:00:08 +00:00
Merge pull request #127709 from pohly/log-client-go-rest
client-go/rest: finish context support
This commit is contained in:
commit
427cd18f72
@ -143,6 +143,7 @@ linters-settings: # please keep this alphabetized
|
||||
contextual k8s.io/api/.*
|
||||
contextual k8s.io/apimachinery/pkg/util/runtime/.*
|
||||
contextual k8s.io/client-go/metadata/.*
|
||||
contextual k8s.io/client-go/rest/.*
|
||||
contextual k8s.io/client-go/tools/cache/.*
|
||||
contextual k8s.io/client-go/tools/events/.*
|
||||
contextual k8s.io/client-go/tools/record/.*
|
||||
|
@ -189,6 +189,7 @@ linters-settings: # please keep this alphabetized
|
||||
contextual k8s.io/api/.*
|
||||
contextual k8s.io/apimachinery/pkg/util/runtime/.*
|
||||
contextual k8s.io/client-go/metadata/.*
|
||||
contextual k8s.io/client-go/rest/.*
|
||||
contextual k8s.io/client-go/tools/cache/.*
|
||||
contextual k8s.io/client-go/tools/events/.*
|
||||
contextual k8s.io/client-go/tools/record/.*
|
||||
|
@ -191,6 +191,7 @@ linters-settings: # please keep this alphabetized
|
||||
contextual k8s.io/api/.*
|
||||
contextual k8s.io/apimachinery/pkg/util/runtime/.*
|
||||
contextual k8s.io/client-go/metadata/.*
|
||||
contextual k8s.io/client-go/rest/.*
|
||||
contextual k8s.io/client-go/tools/cache/.*
|
||||
contextual k8s.io/client-go/tools/events/.*
|
||||
contextual k8s.io/client-go/tools/record/.*
|
||||
|
@ -27,6 +27,7 @@ structured k8s.io/apiserver/pkg/server/options/encryptionconfig/.*
|
||||
contextual k8s.io/api/.*
|
||||
contextual k8s.io/apimachinery/pkg/util/runtime/.*
|
||||
contextual k8s.io/client-go/metadata/.*
|
||||
contextual k8s.io/client-go/rest/.*
|
||||
contextual k8s.io/client-go/tools/cache/.*
|
||||
contextual k8s.io/client-go/tools/events/.*
|
||||
contextual k8s.io/client-go/tools/record/.*
|
||||
|
@ -318,6 +318,7 @@ github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+
|
||||
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
|
||||
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
|
||||
github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY=
|
||||
github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA=
|
||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
|
1
staging/src/k8s.io/apiserver/go.sum
generated
1
staging/src/k8s.io/apiserver/go.sum
generated
@ -320,6 +320,7 @@ github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+
|
||||
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
|
||||
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
|
||||
github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY=
|
||||
github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA=
|
||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
|
@ -58,6 +58,7 @@ require (
|
||||
github.com/onsi/ginkgo/v2 v2.21.0 // indirect
|
||||
github.com/pkg/errors v0.9.1 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
github.com/stretchr/objx v0.5.2 // indirect
|
||||
github.com/x448/float16 v0.8.4 // indirect
|
||||
golang.org/x/sys v0.28.0 // indirect
|
||||
golang.org/x/text v0.21.0 // indirect
|
||||
|
1
staging/src/k8s.io/client-go/go.sum
generated
1
staging/src/k8s.io/client-go/go.sum
generated
@ -88,6 +88,7 @@ github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
|
||||
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
|
||||
github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY=
|
||||
github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
|
10
staging/src/k8s.io/client-go/rest/.mockery.yaml
Normal file
10
staging/src/k8s.io/client-go/rest/.mockery.yaml
Normal file
@ -0,0 +1,10 @@
|
||||
---
|
||||
dir: .
|
||||
filename: "mock_{{.InterfaceName | snakecase}}_test.go"
|
||||
boilerplate-file: ../../../../../hack/boilerplate/boilerplate.generatego.txt
|
||||
outpkg: rest
|
||||
with-expecter: true
|
||||
packages:
|
||||
k8s.io/client-go/rest:
|
||||
interfaces:
|
||||
BackoffManager:
|
@ -93,7 +93,7 @@ type RESTClient struct {
|
||||
content requestClientContentConfigProvider
|
||||
|
||||
// creates BackoffManager that is passed to requests.
|
||||
createBackoffMgr func() BackoffManager
|
||||
createBackoffMgr func() BackoffManagerWithContext
|
||||
|
||||
// rateLimiter is shared among all requests created by this client unless specifically
|
||||
// overridden.
|
||||
@ -101,7 +101,7 @@ type RESTClient struct {
|
||||
|
||||
// warningHandler is shared among all requests created by this client.
|
||||
// If not set, defaultWarningHandler is used.
|
||||
warningHandler WarningHandler
|
||||
warningHandler WarningHandlerWithContext
|
||||
|
||||
// Set specific behavior of the client. If not set http.DefaultClient will be used.
|
||||
Client *http.Client
|
||||
@ -178,7 +178,7 @@ func (c *RESTClient) GetRateLimiter() flowcontrol.RateLimiter {
|
||||
// readExpBackoffConfig handles the internal logic of determining what the
|
||||
// backoff policy is. By default if no information is available, NoBackoff.
|
||||
// TODO Generalize this see #17727 .
|
||||
func readExpBackoffConfig() BackoffManager {
|
||||
func readExpBackoffConfig() BackoffManagerWithContext {
|
||||
backoffBase := os.Getenv(envBackoffBase)
|
||||
backoffDuration := os.Getenv(envBackoffDuration)
|
||||
|
||||
|
@ -35,6 +35,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/client-go/kubernetes/scheme"
|
||||
utiltesting "k8s.io/client-go/util/testing"
|
||||
"k8s.io/klog/v2/ktesting"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
)
|
||||
@ -335,26 +336,26 @@ func TestHTTPProxy(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestCreateBackoffManager(t *testing.T) {
|
||||
|
||||
_, ctx := ktesting.NewTestContext(t)
|
||||
theUrl, _ := url.Parse("http://localhost")
|
||||
|
||||
// 1 second base backoff + duration of 2 seconds -> exponential backoff for requests.
|
||||
t.Setenv(envBackoffBase, "1")
|
||||
t.Setenv(envBackoffDuration, "2")
|
||||
backoff := readExpBackoffConfig()
|
||||
backoff.UpdateBackoff(theUrl, nil, 500)
|
||||
backoff.UpdateBackoff(theUrl, nil, 500)
|
||||
if backoff.CalculateBackoff(theUrl)/time.Second != 2 {
|
||||
backoff.UpdateBackoffWithContext(ctx, theUrl, nil, 500)
|
||||
backoff.UpdateBackoffWithContext(ctx, theUrl, nil, 500)
|
||||
if backoff.CalculateBackoffWithContext(ctx, theUrl)/time.Second != 2 {
|
||||
t.Errorf("Backoff env not working.")
|
||||
}
|
||||
|
||||
// 0 duration -> no backoff.
|
||||
t.Setenv(envBackoffBase, "1")
|
||||
t.Setenv(envBackoffDuration, "0")
|
||||
backoff.UpdateBackoff(theUrl, nil, 500)
|
||||
backoff.UpdateBackoff(theUrl, nil, 500)
|
||||
backoff.UpdateBackoffWithContext(ctx, theUrl, nil, 500)
|
||||
backoff.UpdateBackoffWithContext(ctx, theUrl, nil, 500)
|
||||
backoff = readExpBackoffConfig()
|
||||
if backoff.CalculateBackoff(theUrl)/time.Second != 0 {
|
||||
if backoff.CalculateBackoffWithContext(ctx, theUrl)/time.Second != 0 {
|
||||
t.Errorf("Zero backoff duration, but backoff still occurring.")
|
||||
}
|
||||
|
||||
@ -362,9 +363,9 @@ func TestCreateBackoffManager(t *testing.T) {
|
||||
t.Setenv(envBackoffBase, "")
|
||||
t.Setenv(envBackoffDuration, "")
|
||||
backoff = readExpBackoffConfig()
|
||||
backoff.UpdateBackoff(theUrl, nil, 500)
|
||||
backoff.UpdateBackoff(theUrl, nil, 500)
|
||||
if backoff.CalculateBackoff(theUrl)/time.Second != 0 {
|
||||
backoff.UpdateBackoffWithContext(ctx, theUrl, nil, 500)
|
||||
backoff.UpdateBackoffWithContext(ctx, theUrl, nil, 500)
|
||||
if backoff.CalculateBackoffWithContext(ctx, theUrl)/time.Second != 0 {
|
||||
t.Errorf("Backoff should have been 0.")
|
||||
}
|
||||
|
||||
|
@ -129,10 +129,23 @@ type Config struct {
|
||||
RateLimiter flowcontrol.RateLimiter
|
||||
|
||||
// WarningHandler handles warnings in server responses.
|
||||
// If not set, the default warning handler is used.
|
||||
// See documentation for SetDefaultWarningHandler() for details.
|
||||
// If this and WarningHandlerWithContext are not set, the
|
||||
// default warning handler is used. If both are set,
|
||||
// WarningHandlerWithContext is used.
|
||||
//
|
||||
// See documentation for [SetDefaultWarningHandler] for details.
|
||||
//
|
||||
//logcheck:context // WarningHandlerWithContext should be used instead of WarningHandler in code which supports contextual logging.
|
||||
WarningHandler WarningHandler
|
||||
|
||||
// WarningHandlerWithContext handles warnings in server responses.
|
||||
// If this and WarningHandler are not set, the
|
||||
// default warning handler is used. If both are set,
|
||||
// WarningHandlerWithContext is used.
|
||||
//
|
||||
// See documentation for [SetDefaultWarningHandler] for details.
|
||||
WarningHandlerWithContext WarningHandlerWithContext
|
||||
|
||||
// The maximum length of time to wait before giving up on a server request. A value of zero means no timeout.
|
||||
Timeout time.Duration
|
||||
|
||||
@ -381,12 +394,27 @@ func RESTClientForConfigAndClient(config *Config, httpClient *http.Client) (*RES
|
||||
}
|
||||
|
||||
restClient, err := NewRESTClient(baseURL, versionedAPIPath, clientContent, rateLimiter, httpClient)
|
||||
if err == nil && config.WarningHandler != nil {
|
||||
restClient.warningHandler = config.WarningHandler
|
||||
}
|
||||
maybeSetWarningHandler(restClient, config.WarningHandler, config.WarningHandlerWithContext)
|
||||
return restClient, err
|
||||
}
|
||||
|
||||
// maybeSetWarningHandler sets the handlerWithContext if non-nil,
|
||||
// otherwise the handler with a wrapper if non-nil,
|
||||
// and does nothing if both are nil.
|
||||
//
|
||||
// May be called for a nil client.
|
||||
func maybeSetWarningHandler(c *RESTClient, handler WarningHandler, handlerWithContext WarningHandlerWithContext) {
|
||||
if c == nil {
|
||||
return
|
||||
}
|
||||
switch {
|
||||
case handlerWithContext != nil:
|
||||
c.warningHandler = handlerWithContext
|
||||
case handler != nil:
|
||||
c.warningHandler = warningLoggerNopContext{l: handler}
|
||||
}
|
||||
}
|
||||
|
||||
// UnversionedRESTClientFor is the same as RESTClientFor, except that it allows
|
||||
// the config.Version to be empty.
|
||||
func UnversionedRESTClientFor(config *Config) (*RESTClient, error) {
|
||||
@ -448,9 +476,7 @@ func UnversionedRESTClientForConfigAndClient(config *Config, httpClient *http.Cl
|
||||
}
|
||||
|
||||
restClient, err := NewRESTClient(baseURL, versionedAPIPath, clientContent, rateLimiter, httpClient)
|
||||
if err == nil && config.WarningHandler != nil {
|
||||
restClient.warningHandler = config.WarningHandler
|
||||
}
|
||||
maybeSetWarningHandler(restClient, config.WarningHandler, config.WarningHandlerWithContext)
|
||||
return restClient, err
|
||||
}
|
||||
|
||||
@ -532,6 +558,7 @@ func InClusterConfig() (*Config, error) {
|
||||
tlsClientConfig := TLSClientConfig{}
|
||||
|
||||
if _, err := certutil.NewPool(rootCAFile); err != nil {
|
||||
//nolint:logcheck // The decision to log this instead of returning an error goes back to ~2016. It's part of the client-go API now, so not changing it just to support contextual logging.
|
||||
klog.Errorf("Expected to load root CA config from %s, but got err: %v", rootCAFile, err)
|
||||
} else {
|
||||
tlsClientConfig.CAFile = rootCAFile
|
||||
@ -616,15 +643,16 @@ func AnonymousClientConfig(config *Config) *Config {
|
||||
CAData: config.TLSClientConfig.CAData,
|
||||
NextProtos: config.TLSClientConfig.NextProtos,
|
||||
},
|
||||
RateLimiter: config.RateLimiter,
|
||||
WarningHandler: config.WarningHandler,
|
||||
UserAgent: config.UserAgent,
|
||||
DisableCompression: config.DisableCompression,
|
||||
QPS: config.QPS,
|
||||
Burst: config.Burst,
|
||||
Timeout: config.Timeout,
|
||||
Dial: config.Dial,
|
||||
Proxy: config.Proxy,
|
||||
RateLimiter: config.RateLimiter,
|
||||
WarningHandler: config.WarningHandler,
|
||||
WarningHandlerWithContext: config.WarningHandlerWithContext,
|
||||
UserAgent: config.UserAgent,
|
||||
DisableCompression: config.DisableCompression,
|
||||
QPS: config.QPS,
|
||||
Burst: config.Burst,
|
||||
Timeout: config.Timeout,
|
||||
Dial: config.Dial,
|
||||
Proxy: config.Proxy,
|
||||
}
|
||||
}
|
||||
|
||||
@ -658,17 +686,18 @@ func CopyConfig(config *Config) *Config {
|
||||
CAData: config.TLSClientConfig.CAData,
|
||||
NextProtos: config.TLSClientConfig.NextProtos,
|
||||
},
|
||||
UserAgent: config.UserAgent,
|
||||
DisableCompression: config.DisableCompression,
|
||||
Transport: config.Transport,
|
||||
WrapTransport: config.WrapTransport,
|
||||
QPS: config.QPS,
|
||||
Burst: config.Burst,
|
||||
RateLimiter: config.RateLimiter,
|
||||
WarningHandler: config.WarningHandler,
|
||||
Timeout: config.Timeout,
|
||||
Dial: config.Dial,
|
||||
Proxy: config.Proxy,
|
||||
UserAgent: config.UserAgent,
|
||||
DisableCompression: config.DisableCompression,
|
||||
Transport: config.Transport,
|
||||
WrapTransport: config.WrapTransport,
|
||||
QPS: config.QPS,
|
||||
Burst: config.Burst,
|
||||
RateLimiter: config.RateLimiter,
|
||||
WarningHandler: config.WarningHandler,
|
||||
WarningHandlerWithContext: config.WarningHandlerWithContext,
|
||||
Timeout: config.Timeout,
|
||||
Dial: config.Dial,
|
||||
Proxy: config.Proxy,
|
||||
}
|
||||
if config.ExecProvider != nil && config.ExecProvider.Config != nil {
|
||||
c.ExecProvider.Config = config.ExecProvider.Config.DeepCopyObject()
|
||||
|
@ -41,6 +41,7 @@ import (
|
||||
"github.com/google/go-cmp/cmp"
|
||||
fuzz "github.com/google/gofuzz"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestIsConfigTransportTLS(t *testing.T) {
|
||||
@ -266,6 +267,19 @@ type fakeWarningHandler struct{}
|
||||
|
||||
func (f fakeWarningHandler) HandleWarningHeader(code int, agent string, message string) {}
|
||||
|
||||
type fakeWarningHandlerWithLogging struct {
|
||||
messages []string
|
||||
}
|
||||
|
||||
func (f *fakeWarningHandlerWithLogging) HandleWarningHeader(code int, agent string, message string) {
|
||||
f.messages = append(f.messages, message)
|
||||
}
|
||||
|
||||
type fakeWarningHandlerWithContext struct{}
|
||||
|
||||
func (f fakeWarningHandlerWithContext) HandleWarningHeaderWithContext(ctx context.Context, code int, agent string, message string) {
|
||||
}
|
||||
|
||||
type fakeNegotiatedSerializer struct{}
|
||||
|
||||
func (n *fakeNegotiatedSerializer) SupportedMediaTypes() []runtime.SerializerInfo {
|
||||
@ -330,6 +344,9 @@ func TestAnonymousAuthConfig(t *testing.T) {
|
||||
func(h *WarningHandler, f fuzz.Continue) {
|
||||
*h = &fakeWarningHandler{}
|
||||
},
|
||||
func(h *WarningHandlerWithContext, f fuzz.Continue) {
|
||||
*h = &fakeWarningHandlerWithContext{}
|
||||
},
|
||||
// Authentication does not require fuzzer
|
||||
func(r *AuthProviderConfigPersister, f fuzz.Continue) {},
|
||||
func(r *clientcmdapi.AuthProviderConfig, f fuzz.Continue) {
|
||||
@ -428,6 +445,9 @@ func TestCopyConfig(t *testing.T) {
|
||||
func(h *WarningHandler, f fuzz.Continue) {
|
||||
*h = &fakeWarningHandler{}
|
||||
},
|
||||
func(h *WarningHandlerWithContext, f fuzz.Continue) {
|
||||
*h = &fakeWarningHandlerWithContext{}
|
||||
},
|
||||
func(r *AuthProviderConfigPersister, f fuzz.Continue) {
|
||||
*r = fakeAuthProviderConfigPersister{}
|
||||
},
|
||||
@ -619,25 +639,69 @@ func TestConfigSprint(t *testing.T) {
|
||||
KeyData: []byte("fake key"),
|
||||
NextProtos: []string{"h2", "http/1.1"},
|
||||
},
|
||||
UserAgent: "gobot",
|
||||
Transport: &fakeRoundTripper{},
|
||||
WrapTransport: fakeWrapperFunc,
|
||||
QPS: 1,
|
||||
Burst: 2,
|
||||
RateLimiter: &fakeLimiter{},
|
||||
WarningHandler: fakeWarningHandler{},
|
||||
Timeout: 3 * time.Second,
|
||||
Dial: fakeDialFunc,
|
||||
Proxy: fakeProxyFunc,
|
||||
UserAgent: "gobot",
|
||||
Transport: &fakeRoundTripper{},
|
||||
WrapTransport: fakeWrapperFunc,
|
||||
QPS: 1,
|
||||
Burst: 2,
|
||||
RateLimiter: &fakeLimiter{},
|
||||
WarningHandler: fakeWarningHandler{},
|
||||
WarningHandlerWithContext: fakeWarningHandlerWithContext{},
|
||||
Timeout: 3 * time.Second,
|
||||
Dial: fakeDialFunc,
|
||||
Proxy: fakeProxyFunc,
|
||||
}
|
||||
want := fmt.Sprintf(
|
||||
`&rest.Config{Host:"localhost:8080", APIPath:"v1", ContentConfig:rest.ContentConfig{AcceptContentTypes:"application/json", ContentType:"application/json", GroupVersion:(*schema.GroupVersion)(nil), NegotiatedSerializer:runtime.NegotiatedSerializer(nil)}, Username:"gopher", Password:"--- REDACTED ---", BearerToken:"--- REDACTED ---", BearerTokenFile:"", Impersonate:rest.ImpersonationConfig{UserName:"gopher2", UID:"uid123", Groups:[]string(nil), Extra:map[string][]string(nil)}, AuthProvider:api.AuthProviderConfig{Name: "gopher", Config: map[string]string{--- REDACTED ---}}, AuthConfigPersister:rest.AuthProviderConfigPersister(--- REDACTED ---), ExecProvider:api.ExecConfig{Command: "sudo", Args: []string{"--- REDACTED ---"}, Env: []ExecEnvVar{--- REDACTED ---}, APIVersion: "", ProvideClusterInfo: true, Config: runtime.Object(--- REDACTED ---), StdinUnavailable: false}, TLSClientConfig:rest.sanitizedTLSClientConfig{Insecure:false, ServerName:"", CertFile:"a.crt", KeyFile:"a.key", CAFile:"", CertData:[]uint8{0x2d, 0x2d, 0x2d, 0x20, 0x54, 0x52, 0x55, 0x4e, 0x43, 0x41, 0x54, 0x45, 0x44, 0x20, 0x2d, 0x2d, 0x2d}, KeyData:[]uint8{0x2d, 0x2d, 0x2d, 0x20, 0x52, 0x45, 0x44, 0x41, 0x43, 0x54, 0x45, 0x44, 0x20, 0x2d, 0x2d, 0x2d}, CAData:[]uint8(nil), NextProtos:[]string{"h2", "http/1.1"}}, UserAgent:"gobot", DisableCompression:false, Transport:(*rest.fakeRoundTripper)(%p), WrapTransport:(transport.WrapperFunc)(%p), QPS:1, Burst:2, RateLimiter:(*rest.fakeLimiter)(%p), WarningHandler:rest.fakeWarningHandler{}, Timeout:3000000000, Dial:(func(context.Context, string, string) (net.Conn, error))(%p), Proxy:(func(*http.Request) (*url.URL, error))(%p)}`,
|
||||
`&rest.Config{Host:"localhost:8080", APIPath:"v1", ContentConfig:rest.ContentConfig{AcceptContentTypes:"application/json", ContentType:"application/json", GroupVersion:(*schema.GroupVersion)(nil), NegotiatedSerializer:runtime.NegotiatedSerializer(nil)}, Username:"gopher", Password:"--- REDACTED ---", BearerToken:"--- REDACTED ---", BearerTokenFile:"", Impersonate:rest.ImpersonationConfig{UserName:"gopher2", UID:"uid123", Groups:[]string(nil), Extra:map[string][]string(nil)}, AuthProvider:api.AuthProviderConfig{Name: "gopher", Config: map[string]string{--- REDACTED ---}}, AuthConfigPersister:rest.AuthProviderConfigPersister(--- REDACTED ---), ExecProvider:api.ExecConfig{Command: "sudo", Args: []string{"--- REDACTED ---"}, Env: []ExecEnvVar{--- REDACTED ---}, APIVersion: "", ProvideClusterInfo: true, Config: runtime.Object(--- REDACTED ---), StdinUnavailable: false}, TLSClientConfig:rest.sanitizedTLSClientConfig{Insecure:false, ServerName:"", CertFile:"a.crt", KeyFile:"a.key", CAFile:"", CertData:[]uint8{0x2d, 0x2d, 0x2d, 0x20, 0x54, 0x52, 0x55, 0x4e, 0x43, 0x41, 0x54, 0x45, 0x44, 0x20, 0x2d, 0x2d, 0x2d}, KeyData:[]uint8{0x2d, 0x2d, 0x2d, 0x20, 0x52, 0x45, 0x44, 0x41, 0x43, 0x54, 0x45, 0x44, 0x20, 0x2d, 0x2d, 0x2d}, CAData:[]uint8(nil), NextProtos:[]string{"h2", "http/1.1"}}, UserAgent:"gobot", DisableCompression:false, Transport:(*rest.fakeRoundTripper)(%p), WrapTransport:(transport.WrapperFunc)(%p), QPS:1, Burst:2, RateLimiter:(*rest.fakeLimiter)(%p), WarningHandler:rest.fakeWarningHandler{}, WarningHandlerWithContext:rest.fakeWarningHandlerWithContext{}, Timeout:3000000000, Dial:(func(context.Context, string, string) (net.Conn, error))(%p), Proxy:(func(*http.Request) (*url.URL, error))(%p)}`,
|
||||
c.Transport, fakeWrapperFunc, c.RateLimiter, fakeDialFunc, fakeProxyFunc,
|
||||
)
|
||||
|
||||
for _, f := range []string{"%s", "%v", "%+v", "%#v"} {
|
||||
if got := fmt.Sprintf(f, c); want != got {
|
||||
t.Errorf("fmt.Sprintf(%q, c)\ngot: %q\nwant: %q", f, got, want)
|
||||
t.Errorf("fmt.Sprintf(%q, c)\ngot: %q\nwant: %q\ndiff: %s", f, got, want, cmp.Diff(want, got))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestConfigWarningHandler(t *testing.T) {
|
||||
config := &Config{}
|
||||
config.GroupVersion = &schema.GroupVersion{}
|
||||
config.NegotiatedSerializer = &fakeNegotiatedSerializer{}
|
||||
handlerNoContext := &fakeWarningHandler{}
|
||||
handlerWithContext := &fakeWarningHandlerWithContext{}
|
||||
|
||||
t.Run("none", func(t *testing.T) {
|
||||
client, err := RESTClientForConfigAndClient(config, nil)
|
||||
require.NoError(t, err)
|
||||
assert.Nil(t, client.warningHandler)
|
||||
})
|
||||
|
||||
t.Run("no-context", func(t *testing.T) {
|
||||
config := CopyConfig(config)
|
||||
handler := &fakeWarningHandlerWithLogging{}
|
||||
config.WarningHandler = handler
|
||||
client, err := RESTClientForConfigAndClient(config, nil)
|
||||
require.NoError(t, err)
|
||||
client.warningHandler.HandleWarningHeaderWithContext(context.Background(), 0, "", "message")
|
||||
assert.Equal(t, []string{"message"}, handler.messages)
|
||||
|
||||
})
|
||||
|
||||
t.Run("with-context", func(t *testing.T) {
|
||||
config := CopyConfig(config)
|
||||
config.WarningHandlerWithContext = handlerWithContext
|
||||
client, err := RESTClientForConfigAndClient(config, nil)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, handlerWithContext, client.warningHandler)
|
||||
})
|
||||
|
||||
t.Run("both", func(t *testing.T) {
|
||||
config := CopyConfig(config)
|
||||
config.WarningHandler = handlerNoContext
|
||||
config.WarningHandlerWithContext = handlerWithContext
|
||||
client, err := RESTClientForConfigAndClient(config, nil)
|
||||
require.NoError(t, err)
|
||||
assert.NotNil(t, client.warningHandler)
|
||||
assert.Equal(t, handlerWithContext, client.warningHandler)
|
||||
})
|
||||
}
|
||||
|
@ -242,6 +242,9 @@ func TestConfigToExecClusterRoundtrip(t *testing.T) {
|
||||
func(h *WarningHandler, f fuzz.Continue) {
|
||||
*h = &fakeWarningHandler{}
|
||||
},
|
||||
func(h *WarningHandlerWithContext, f fuzz.Continue) {
|
||||
*h = &fakeWarningHandlerWithContext{}
|
||||
},
|
||||
// Authentication does not require fuzzer
|
||||
func(r *AuthProviderConfigPersister, f fuzz.Continue) {},
|
||||
func(r *clientcmdapi.AuthProviderConfig, f fuzz.Continue) {
|
||||
@ -289,6 +292,7 @@ func TestConfigToExecClusterRoundtrip(t *testing.T) {
|
||||
expected.Burst = 0
|
||||
expected.RateLimiter = nil
|
||||
expected.WarningHandler = nil
|
||||
expected.WarningHandlerWithContext = nil
|
||||
expected.Timeout = 0
|
||||
expected.Dial = nil
|
||||
|
||||
|
168
staging/src/k8s.io/client-go/rest/mock_backoff_manager_test.go
Normal file
168
staging/src/k8s.io/client-go/rest/mock_backoff_manager_test.go
Normal file
@ -0,0 +1,168 @@
|
||||
/*
|
||||
Copyright The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Code generated by mockery v2.40.3. DO NOT EDIT.
|
||||
|
||||
package rest
|
||||
|
||||
import (
|
||||
mock "github.com/stretchr/testify/mock"
|
||||
|
||||
time "time"
|
||||
|
||||
url "net/url"
|
||||
)
|
||||
|
||||
// MockBackoffManager is an autogenerated mock type for the BackoffManager type
|
||||
type MockBackoffManager struct {
|
||||
mock.Mock
|
||||
}
|
||||
|
||||
type MockBackoffManager_Expecter struct {
|
||||
mock *mock.Mock
|
||||
}
|
||||
|
||||
func (_m *MockBackoffManager) EXPECT() *MockBackoffManager_Expecter {
|
||||
return &MockBackoffManager_Expecter{mock: &_m.Mock}
|
||||
}
|
||||
|
||||
// CalculateBackoff provides a mock function with given fields: actualURL
|
||||
func (_m *MockBackoffManager) CalculateBackoff(actualURL *url.URL) time.Duration {
|
||||
ret := _m.Called(actualURL)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for CalculateBackoff")
|
||||
}
|
||||
|
||||
var r0 time.Duration
|
||||
if rf, ok := ret.Get(0).(func(*url.URL) time.Duration); ok {
|
||||
r0 = rf(actualURL)
|
||||
} else {
|
||||
r0 = ret.Get(0).(time.Duration)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// MockBackoffManager_CalculateBackoff_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'CalculateBackoff'
|
||||
type MockBackoffManager_CalculateBackoff_Call struct {
|
||||
*mock.Call
|
||||
}
|
||||
|
||||
// CalculateBackoff is a helper method to define mock.On call
|
||||
// - actualURL *url.URL
|
||||
func (_e *MockBackoffManager_Expecter) CalculateBackoff(actualURL interface{}) *MockBackoffManager_CalculateBackoff_Call {
|
||||
return &MockBackoffManager_CalculateBackoff_Call{Call: _e.mock.On("CalculateBackoff", actualURL)}
|
||||
}
|
||||
|
||||
func (_c *MockBackoffManager_CalculateBackoff_Call) Run(run func(actualURL *url.URL)) *MockBackoffManager_CalculateBackoff_Call {
|
||||
_c.Call.Run(func(args mock.Arguments) {
|
||||
run(args[0].(*url.URL))
|
||||
})
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *MockBackoffManager_CalculateBackoff_Call) Return(_a0 time.Duration) *MockBackoffManager_CalculateBackoff_Call {
|
||||
_c.Call.Return(_a0)
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *MockBackoffManager_CalculateBackoff_Call) RunAndReturn(run func(*url.URL) time.Duration) *MockBackoffManager_CalculateBackoff_Call {
|
||||
_c.Call.Return(run)
|
||||
return _c
|
||||
}
|
||||
|
||||
// Sleep provides a mock function with given fields: d
|
||||
func (_m *MockBackoffManager) Sleep(d time.Duration) {
|
||||
_m.Called(d)
|
||||
}
|
||||
|
||||
// MockBackoffManager_Sleep_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Sleep'
|
||||
type MockBackoffManager_Sleep_Call struct {
|
||||
*mock.Call
|
||||
}
|
||||
|
||||
// Sleep is a helper method to define mock.On call
|
||||
// - d time.Duration
|
||||
func (_e *MockBackoffManager_Expecter) Sleep(d interface{}) *MockBackoffManager_Sleep_Call {
|
||||
return &MockBackoffManager_Sleep_Call{Call: _e.mock.On("Sleep", d)}
|
||||
}
|
||||
|
||||
func (_c *MockBackoffManager_Sleep_Call) Run(run func(d time.Duration)) *MockBackoffManager_Sleep_Call {
|
||||
_c.Call.Run(func(args mock.Arguments) {
|
||||
run(args[0].(time.Duration))
|
||||
})
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *MockBackoffManager_Sleep_Call) Return() *MockBackoffManager_Sleep_Call {
|
||||
_c.Call.Return()
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *MockBackoffManager_Sleep_Call) RunAndReturn(run func(time.Duration)) *MockBackoffManager_Sleep_Call {
|
||||
_c.Call.Return(run)
|
||||
return _c
|
||||
}
|
||||
|
||||
// UpdateBackoff provides a mock function with given fields: actualURL, err, responseCode
|
||||
func (_m *MockBackoffManager) UpdateBackoff(actualURL *url.URL, err error, responseCode int) {
|
||||
_m.Called(actualURL, err, responseCode)
|
||||
}
|
||||
|
||||
// MockBackoffManager_UpdateBackoff_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'UpdateBackoff'
|
||||
type MockBackoffManager_UpdateBackoff_Call struct {
|
||||
*mock.Call
|
||||
}
|
||||
|
||||
// UpdateBackoff is a helper method to define mock.On call
|
||||
// - actualURL *url.URL
|
||||
// - err error
|
||||
// - responseCode int
|
||||
func (_e *MockBackoffManager_Expecter) UpdateBackoff(actualURL interface{}, err interface{}, responseCode interface{}) *MockBackoffManager_UpdateBackoff_Call {
|
||||
return &MockBackoffManager_UpdateBackoff_Call{Call: _e.mock.On("UpdateBackoff", actualURL, err, responseCode)}
|
||||
}
|
||||
|
||||
func (_c *MockBackoffManager_UpdateBackoff_Call) Run(run func(actualURL *url.URL, err error, responseCode int)) *MockBackoffManager_UpdateBackoff_Call {
|
||||
_c.Call.Run(func(args mock.Arguments) {
|
||||
run(args[0].(*url.URL), args[1].(error), args[2].(int))
|
||||
})
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *MockBackoffManager_UpdateBackoff_Call) Return() *MockBackoffManager_UpdateBackoff_Call {
|
||||
_c.Call.Return()
|
||||
return _c
|
||||
}
|
||||
|
||||
func (_c *MockBackoffManager_UpdateBackoff_Call) RunAndReturn(run func(*url.URL, error, int)) *MockBackoffManager_UpdateBackoff_Call {
|
||||
_c.Call.Return(run)
|
||||
return _c
|
||||
}
|
||||
|
||||
// NewMockBackoffManager creates a new instance of MockBackoffManager. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
|
||||
// The first argument is typically a *testing.T value.
|
||||
func NewMockBackoffManager(t interface {
|
||||
mock.TestingT
|
||||
Cleanup(func())
|
||||
}) *MockBackoffManager {
|
||||
mock := &MockBackoffManager{}
|
||||
mock.Mock.Test(t)
|
||||
|
||||
t.Cleanup(func() { mock.AssertExpectations(t) })
|
||||
|
||||
return mock
|
||||
}
|
@ -21,8 +21,6 @@ import (
|
||||
"net/http"
|
||||
"sync"
|
||||
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
|
||||
)
|
||||
|
||||
@ -65,7 +63,10 @@ func RegisterAuthProviderPlugin(name string, plugin Factory) error {
|
||||
if _, found := plugins[name]; found {
|
||||
return fmt.Errorf("auth Provider Plugin %q was registered twice", name)
|
||||
}
|
||||
klog.V(4).Infof("Registered Auth Provider Plugin %q", name)
|
||||
// RegisterAuthProviderPlugin gets called during the init phase before
|
||||
// logging is initialized and therefore should not emit logs. If you
|
||||
// need this message for debugging something, then uncomment it.
|
||||
// klog.V(4).Infof("Registered Auth Provider Plugin %q", name)
|
||||
plugins[name] = plugin
|
||||
return nil
|
||||
}
|
||||
|
@ -54,7 +54,7 @@ import (
|
||||
"k8s.io/utils/clock"
|
||||
)
|
||||
|
||||
var (
|
||||
const (
|
||||
// longThrottleLatency defines threshold for logging requests. All requests being
|
||||
// throttled (via the provided rateLimiter) for more than longThrottleLatency will
|
||||
// be logged.
|
||||
@ -103,10 +103,10 @@ type Request struct {
|
||||
contentConfig ClientContentConfig
|
||||
contentTypeNotSet bool
|
||||
|
||||
warningHandler WarningHandler
|
||||
warningHandler WarningHandlerWithContext
|
||||
|
||||
rateLimiter flowcontrol.RateLimiter
|
||||
backoff BackoffManager
|
||||
backoff BackoffManagerWithContext
|
||||
timeout time.Duration
|
||||
maxRetries int
|
||||
|
||||
@ -136,7 +136,7 @@ type Request struct {
|
||||
|
||||
// NewRequest creates a new request helper object for accessing runtime.Objects on a server.
|
||||
func NewRequest(c *RESTClient) *Request {
|
||||
var backoff BackoffManager
|
||||
var backoff BackoffManagerWithContext
|
||||
if c.createBackoffMgr != nil {
|
||||
backoff = c.createBackoffMgr()
|
||||
}
|
||||
@ -259,20 +259,47 @@ func (r *Request) Resource(resource string) *Request {
|
||||
}
|
||||
|
||||
// BackOff sets the request's backoff manager to the one specified,
|
||||
// or defaults to the stub implementation if nil is provided
|
||||
// or defaults to the stub implementation if nil is provided.
|
||||
//
|
||||
// Deprecated: BackoffManager.Sleep ignores the caller's context. Use BackOffWithContext and BackoffManagerWithContext instead.
|
||||
func (r *Request) BackOff(manager BackoffManager) *Request {
|
||||
if manager == nil {
|
||||
r.backoff = &NoBackoff{}
|
||||
return r
|
||||
}
|
||||
|
||||
r.backoff = &backoffManagerNopContext{BackoffManager: manager}
|
||||
return r
|
||||
}
|
||||
|
||||
// BackOffWithContext sets the request's backoff manager to the one specified,
|
||||
// or defaults to the stub implementation if nil is provided.
|
||||
func (r *Request) BackOffWithContext(manager BackoffManagerWithContext) *Request {
|
||||
if manager == nil {
|
||||
r.backoff = &NoBackoff{}
|
||||
return r
|
||||
}
|
||||
|
||||
r.backoff = manager
|
||||
return r
|
||||
}
|
||||
|
||||
// WarningHandler sets the handler this client uses when warning headers are encountered.
|
||||
// If set to nil, this client will use the default warning handler (see SetDefaultWarningHandler).
|
||||
// If set to nil, this client will use the default warning handler (see [SetDefaultWarningHandler]).
|
||||
//
|
||||
//logcheck:context // WarningHandlerWithContext should be used instead of WarningHandler in code which supports contextual logging.
|
||||
func (r *Request) WarningHandler(handler WarningHandler) *Request {
|
||||
if handler == nil {
|
||||
r.warningHandler = nil
|
||||
return r
|
||||
}
|
||||
r.warningHandler = warningLoggerNopContext{l: handler}
|
||||
return r
|
||||
}
|
||||
|
||||
// WarningHandlerWithContext sets the handler this client uses when warning headers are encountered.
|
||||
// If set to nil, this client will use the default warning handler (see [SetDefaultWarningHandlerWithContext]).
|
||||
func (r *Request) WarningHandlerWithContext(handler WarningHandlerWithContext) *Request {
|
||||
r.warningHandler = handler
|
||||
return r
|
||||
}
|
||||
@ -649,21 +676,17 @@ func (r *Request) tryThrottleWithInfo(ctx context.Context, retryInfo string) err
|
||||
}
|
||||
latency := time.Since(now)
|
||||
|
||||
var message string
|
||||
switch {
|
||||
case len(retryInfo) > 0:
|
||||
message = fmt.Sprintf("Waited for %v, %s - request: %s:%s", latency, retryInfo, r.verb, r.URL().String())
|
||||
default:
|
||||
message = fmt.Sprintf("Waited for %v due to client-side throttling, not priority and fairness, request: %s:%s", latency, r.verb, r.URL().String())
|
||||
}
|
||||
|
||||
if latency > longThrottleLatency {
|
||||
klog.V(3).Info(message)
|
||||
}
|
||||
if latency > extraLongThrottleLatency {
|
||||
// If the rate limiter latency is very high, the log message should be printed at a higher log level,
|
||||
// but we use a throttled logger to prevent spamming.
|
||||
globalThrottledLogger.Infof("%s", message)
|
||||
if retryInfo == "" {
|
||||
retryInfo = "client-side throttling, not priority and fairness"
|
||||
}
|
||||
klog.FromContext(ctx).V(3).Info("Waited before sending request", "delay", latency, "reason", retryInfo, "verb", r.verb, "URL", r.URL())
|
||||
|
||||
if latency > extraLongThrottleLatency {
|
||||
// If the rate limiter latency is very high, the log message should be printed at a higher log level,
|
||||
// but we use a throttled logger to prevent spamming.
|
||||
globalThrottledLogger.info(klog.FromContext(ctx), "Waited before sending request", "delay", latency, "reason", retryInfo, "verb", r.verb, "URL", r.URL())
|
||||
}
|
||||
}
|
||||
metrics.RateLimiterLatency.Observe(ctx, r.verb, r.finalURLTemplate(), latency)
|
||||
|
||||
@ -675,7 +698,7 @@ func (r *Request) tryThrottle(ctx context.Context) error {
|
||||
}
|
||||
|
||||
type throttleSettings struct {
|
||||
logLevel klog.Level
|
||||
logLevel int
|
||||
minLogInterval time.Duration
|
||||
|
||||
lastLogTime time.Time
|
||||
@ -700,9 +723,9 @@ var globalThrottledLogger = &throttledLogger{
|
||||
},
|
||||
}
|
||||
|
||||
func (b *throttledLogger) attemptToLog() (klog.Level, bool) {
|
||||
func (b *throttledLogger) attemptToLog(logger klog.Logger) (int, bool) {
|
||||
for _, setting := range b.settings {
|
||||
if bool(klog.V(setting.logLevel).Enabled()) {
|
||||
if bool(logger.V(setting.logLevel).Enabled()) {
|
||||
// Return early without write locking if possible.
|
||||
if func() bool {
|
||||
setting.lock.RLock()
|
||||
@ -724,9 +747,9 @@ func (b *throttledLogger) attemptToLog() (klog.Level, bool) {
|
||||
|
||||
// Infof will write a log message at each logLevel specified by the receiver's throttleSettings
|
||||
// as long as it hasn't written a log message more recently than minLogInterval.
|
||||
func (b *throttledLogger) Infof(message string, args ...interface{}) {
|
||||
if logLevel, ok := b.attemptToLog(); ok {
|
||||
klog.V(logLevel).Infof(message, args...)
|
||||
func (b *throttledLogger) info(logger klog.Logger, message string, kv ...any) {
|
||||
if logLevel, ok := b.attemptToLog(logger); ok {
|
||||
logger.V(logLevel).Info(message, kv...)
|
||||
}
|
||||
}
|
||||
|
||||
@ -739,7 +762,7 @@ func (r *Request) Watch(ctx context.Context) (watch.Interface, error) {
|
||||
|
||||
func (r *Request) watchInternal(ctx context.Context) (watch.Interface, runtime.Decoder, error) {
|
||||
if r.body == nil {
|
||||
logBody(ctx, 2, "Request Body", r.bodyBytes)
|
||||
logBody(klog.FromContext(ctx), 2, "Request Body", r.bodyBytes)
|
||||
}
|
||||
|
||||
// We specifically don't want to rate limit watches, so we
|
||||
@ -776,7 +799,7 @@ func (r *Request) watchInternal(ctx context.Context) (watch.Interface, runtime.D
|
||||
resp, err := client.Do(req)
|
||||
retry.After(ctx, r, resp, err)
|
||||
if err == nil && resp.StatusCode == http.StatusOK {
|
||||
return r.newStreamWatcher(resp)
|
||||
return r.newStreamWatcher(ctx, resp)
|
||||
}
|
||||
|
||||
done, transformErr := func() (bool, error) {
|
||||
@ -898,7 +921,7 @@ func (r WatchListResult) Into(obj runtime.Object) error {
|
||||
// to see what parameters are currently required.
|
||||
func (r *Request) WatchList(ctx context.Context) WatchListResult {
|
||||
if r.body == nil {
|
||||
logBody(ctx, 2, "Request Body", r.bodyBytes)
|
||||
logBody(klog.FromContext(ctx), 2, "Request Body", r.bodyBytes)
|
||||
}
|
||||
|
||||
if !clientfeatures.FeatureGates().Enabled(clientfeatures.WatchListClient) {
|
||||
@ -969,18 +992,18 @@ func (r *Request) handleWatchList(ctx context.Context, w watch.Interface, negoti
|
||||
}
|
||||
}
|
||||
|
||||
func (r *Request) newStreamWatcher(resp *http.Response) (watch.Interface, runtime.Decoder, error) {
|
||||
func (r *Request) newStreamWatcher(ctx context.Context, resp *http.Response) (watch.Interface, runtime.Decoder, error) {
|
||||
contentType := resp.Header.Get("Content-Type")
|
||||
mediaType, params, err := mime.ParseMediaType(contentType)
|
||||
if err != nil {
|
||||
klog.V(4).Infof("Unexpected content type from the server: %q: %v", contentType, err)
|
||||
klog.FromContext(ctx).V(4).Info("Unexpected content type from the server", "contentType", contentType, "err", err)
|
||||
}
|
||||
objectDecoder, streamingSerializer, framer, err := r.contentConfig.Negotiator.StreamDecoder(mediaType, params)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
handleWarnings(resp.Header, r.warningHandler)
|
||||
handleWarnings(ctx, resp.Header, r.warningHandler)
|
||||
|
||||
frameReader := framer.NewFrameReader(resp.Body)
|
||||
watchEventDecoder := streaming.NewDecoder(frameReader, streamingSerializer)
|
||||
@ -1031,7 +1054,7 @@ func sanitize(req *Request, resp *http.Response, err error) (string, string) {
|
||||
// If we can, we return that as an error. Otherwise, we create an error that lists the http status and the content of the response.
|
||||
func (r *Request) Stream(ctx context.Context) (io.ReadCloser, error) {
|
||||
if r.body == nil {
|
||||
logBody(ctx, 2, "Request Body", r.bodyBytes)
|
||||
logBody(klog.FromContext(ctx), 2, "Request Body", r.bodyBytes)
|
||||
}
|
||||
|
||||
if r.err != nil {
|
||||
@ -1067,7 +1090,7 @@ func (r *Request) Stream(ctx context.Context) (io.ReadCloser, error) {
|
||||
|
||||
switch {
|
||||
case (resp.StatusCode >= 200) && (resp.StatusCode < 300):
|
||||
handleWarnings(resp.Header, r.warningHandler)
|
||||
handleWarnings(ctx, resp.Header, r.warningHandler)
|
||||
return resp.Body, nil
|
||||
|
||||
default:
|
||||
@ -1175,7 +1198,7 @@ func (r *Request) request(ctx context.Context, fn func(*http.Request, *http.Resp
|
||||
}()
|
||||
|
||||
if r.err != nil {
|
||||
klog.V(4).Infof("Error in request: %v", r.err)
|
||||
klog.FromContext(ctx).V(4).Info("Error in request", "err", r.err)
|
||||
return r.err
|
||||
}
|
||||
|
||||
@ -1267,8 +1290,9 @@ func (r *Request) request(ctx context.Context, fn func(*http.Request, *http.Resp
|
||||
// - If the server responds with a status: *errors.StatusError or *errors.UnexpectedObjectError
|
||||
// - http.Client.Do errors are returned directly.
|
||||
func (r *Request) Do(ctx context.Context) Result {
|
||||
logger := klog.FromContext(ctx)
|
||||
if r.body == nil {
|
||||
logBody(ctx, 2, "Request Body", r.bodyBytes)
|
||||
logBody(logger, 2, "Request Body", r.bodyBytes)
|
||||
}
|
||||
|
||||
var result Result
|
||||
@ -1276,7 +1300,7 @@ func (r *Request) Do(ctx context.Context) Result {
|
||||
result = r.transformResponse(ctx, resp, req)
|
||||
})
|
||||
if err != nil {
|
||||
return Result{err: err}
|
||||
return Result{err: err, logger: logger}
|
||||
}
|
||||
if result.err == nil || len(result.body) > 0 {
|
||||
metrics.ResponseSize.Observe(ctx, r.verb, r.URL().Host, float64(len(result.body)))
|
||||
@ -1286,14 +1310,15 @@ func (r *Request) Do(ctx context.Context) Result {
|
||||
|
||||
// DoRaw executes the request but does not process the response body.
|
||||
func (r *Request) DoRaw(ctx context.Context) ([]byte, error) {
|
||||
logger := klog.FromContext(ctx)
|
||||
if r.body == nil {
|
||||
logBody(ctx, 2, "Request Body", r.bodyBytes)
|
||||
logBody(logger, 2, "Request Body", r.bodyBytes)
|
||||
}
|
||||
|
||||
var result Result
|
||||
err := r.request(ctx, func(req *http.Request, resp *http.Response) {
|
||||
result.body, result.err = io.ReadAll(resp.Body)
|
||||
logBody(ctx, 2, "Response Body", result.body)
|
||||
logBody(logger, 2, "Response Body", result.body)
|
||||
if resp.StatusCode < http.StatusOK || resp.StatusCode > http.StatusPartialContent {
|
||||
result.err = r.transformUnstructuredResponseError(resp, req, result.body)
|
||||
}
|
||||
@ -1309,6 +1334,7 @@ func (r *Request) DoRaw(ctx context.Context) ([]byte, error) {
|
||||
|
||||
// transformResponse converts an API response into a structured API object
|
||||
func (r *Request) transformResponse(ctx context.Context, resp *http.Response, req *http.Request) Result {
|
||||
logger := klog.FromContext(ctx)
|
||||
var body []byte
|
||||
if resp.Body != nil {
|
||||
data, err := io.ReadAll(resp.Body)
|
||||
@ -1323,22 +1349,24 @@ func (r *Request) transformResponse(ctx context.Context, resp *http.Response, re
|
||||
// 2. Apiserver sends back the headers and then part of the body
|
||||
// 3. Apiserver closes connection.
|
||||
// 4. client-go should catch this and return an error.
|
||||
klog.V(2).Infof("Stream error %#v when reading response body, may be caused by closed connection.", err)
|
||||
logger.V(2).Info("Stream error when reading response body, may be caused by closed connection", "err", err)
|
||||
streamErr := fmt.Errorf("stream error when reading response body, may be caused by closed connection. Please retry. Original error: %w", err)
|
||||
return Result{
|
||||
err: streamErr,
|
||||
err: streamErr,
|
||||
logger: logger,
|
||||
}
|
||||
default:
|
||||
klog.Errorf("Unexpected error when reading response body: %v", err)
|
||||
logger.Error(err, "Unexpected error when reading response body")
|
||||
unexpectedErr := fmt.Errorf("unexpected error when reading response body. Please retry. Original error: %w", err)
|
||||
return Result{
|
||||
err: unexpectedErr,
|
||||
err: unexpectedErr,
|
||||
logger: logger,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Call depth is tricky. This one is okay for Do and DoRaw.
|
||||
logBody(ctx, 7, "Response Body", body)
|
||||
logBody(logger, 7, "Response Body", body)
|
||||
|
||||
// verify the content type is accurate
|
||||
var decoder runtime.Decoder
|
||||
@ -1350,7 +1378,7 @@ func (r *Request) transformResponse(ctx context.Context, resp *http.Response, re
|
||||
var err error
|
||||
mediaType, params, err := mime.ParseMediaType(contentType)
|
||||
if err != nil {
|
||||
return Result{err: errors.NewInternalError(err)}
|
||||
return Result{err: errors.NewInternalError(err), logger: logger}
|
||||
}
|
||||
decoder, err = r.contentConfig.Negotiator.Decoder(mediaType, params)
|
||||
if err != nil {
|
||||
@ -1359,13 +1387,14 @@ func (r *Request) transformResponse(ctx context.Context, resp *http.Response, re
|
||||
case resp.StatusCode == http.StatusSwitchingProtocols:
|
||||
// no-op, we've been upgraded
|
||||
case resp.StatusCode < http.StatusOK || resp.StatusCode > http.StatusPartialContent:
|
||||
return Result{err: r.transformUnstructuredResponseError(resp, req, body)}
|
||||
return Result{err: r.transformUnstructuredResponseError(resp, req, body), logger: logger}
|
||||
}
|
||||
return Result{
|
||||
body: body,
|
||||
contentType: contentType,
|
||||
statusCode: resp.StatusCode,
|
||||
warnings: handleWarnings(resp.Header, r.warningHandler),
|
||||
warnings: handleWarnings(ctx, resp.Header, r.warningHandler),
|
||||
logger: logger,
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -1384,7 +1413,8 @@ func (r *Request) transformResponse(ctx context.Context, resp *http.Response, re
|
||||
statusCode: resp.StatusCode,
|
||||
decoder: decoder,
|
||||
err: err,
|
||||
warnings: handleWarnings(resp.Header, r.warningHandler),
|
||||
warnings: handleWarnings(ctx, resp.Header, r.warningHandler),
|
||||
logger: logger,
|
||||
}
|
||||
}
|
||||
|
||||
@ -1393,7 +1423,8 @@ func (r *Request) transformResponse(ctx context.Context, resp *http.Response, re
|
||||
contentType: contentType,
|
||||
statusCode: resp.StatusCode,
|
||||
decoder: decoder,
|
||||
warnings: handleWarnings(resp.Header, r.warningHandler),
|
||||
warnings: handleWarnings(ctx, resp.Header, r.warningHandler),
|
||||
logger: logger,
|
||||
}
|
||||
}
|
||||
|
||||
@ -1421,8 +1452,7 @@ func truncateBody(logger klog.Logger, body string) string {
|
||||
// whether the body is printable.
|
||||
//
|
||||
// It needs to be called by all functions which send or receive the data.
|
||||
func logBody(ctx context.Context, callDepth int, prefix string, body []byte) {
|
||||
logger := klog.FromContext(ctx)
|
||||
func logBody(logger klog.Logger, callDepth int, prefix string, body []byte) {
|
||||
if loggerV := logger.V(8); loggerV.Enabled() {
|
||||
loggerV := loggerV.WithCallDepth(callDepth)
|
||||
if bytes.IndexFunc(body, func(r rune) bool {
|
||||
@ -1524,6 +1554,7 @@ type Result struct {
|
||||
contentType string
|
||||
err error
|
||||
statusCode int
|
||||
logger klog.Logger
|
||||
|
||||
decoder runtime.Decoder
|
||||
}
|
||||
@ -1629,7 +1660,7 @@ func (r Result) Error() error {
|
||||
// to be backwards compatible with old servers that do not return a version, default to "v1"
|
||||
out, _, err := r.decoder.Decode(r.body, &schema.GroupVersionKind{Version: "v1"}, nil)
|
||||
if err != nil {
|
||||
klog.V(5).Infof("body was not decodable (unable to check for Status): %v", err)
|
||||
r.logger.V(5).Info("Body was not decodable (unable to check for Status)", "err", err)
|
||||
return r.err
|
||||
}
|
||||
switch t := out.(type) {
|
||||
|
@ -1489,6 +1489,7 @@ func TestDoRequestNewWay(t *testing.T) {
|
||||
|
||||
// This test assumes that the client implementation backs off exponentially, for an individual request.
|
||||
func TestBackoffLifecycle(t *testing.T) {
|
||||
_, ctx := ktesting.NewTestContext(t)
|
||||
count := 0
|
||||
testServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
|
||||
count++
|
||||
@ -1508,22 +1509,30 @@ func TestBackoffLifecycle(t *testing.T) {
|
||||
seconds := []int{0, 1, 2, 4, 8, 0, 1, 2, 4, 0}
|
||||
request := c.Verb("POST").Prefix("backofftest").Suffix("abc")
|
||||
clock := testingclock.FakeClock{}
|
||||
request.backoff = &URLBackoff{
|
||||
// Use a fake backoff here to avoid flakes and speed the test up.
|
||||
Backoff: flowcontrol.NewFakeBackOff(
|
||||
time.Duration(1)*time.Second,
|
||||
time.Duration(200)*time.Second,
|
||||
&clock,
|
||||
)}
|
||||
request.backoff = stepClockDuringSleep{
|
||||
BackoffManagerWithContext: &URLBackoff{
|
||||
// Use a fake backoff here to avoid flakes and speed the test up.
|
||||
Backoff: flowcontrol.NewFakeBackOff(
|
||||
time.Duration(1)*time.Second,
|
||||
time.Duration(200)*time.Second,
|
||||
&clock,
|
||||
),
|
||||
},
|
||||
clock: &clock,
|
||||
}
|
||||
|
||||
for _, sec := range seconds {
|
||||
thisBackoff := request.backoff.CalculateBackoff(request.URL())
|
||||
thisBackoff := request.backoff.CalculateBackoffWithContext(ctx, request.URL())
|
||||
t.Logf("Current backoff %v", thisBackoff)
|
||||
if thisBackoff != time.Duration(sec)*time.Second {
|
||||
t.Errorf("Backoff is %v instead of %v", thisBackoff, sec)
|
||||
}
|
||||
|
||||
// This relies on advancing the fake clock by exactly the duration
|
||||
// that SleepWithContext is being called for while DoRaw is executing.
|
||||
// stepClockDuringSleep.SleepWithContext ensures that this happens.
|
||||
now := clock.Now()
|
||||
request.DoRaw(context.Background())
|
||||
request.DoRaw(ctx)
|
||||
elapsed := clock.Since(now)
|
||||
if clock.Since(now) != thisBackoff {
|
||||
t.Errorf("CalculatedBackoff not honored by clock: Expected time of %v, but got %v ", thisBackoff, elapsed)
|
||||
@ -1531,18 +1540,51 @@ func TestBackoffLifecycle(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
type stepClockDuringSleep struct {
|
||||
BackoffManagerWithContext
|
||||
clock *testingclock.FakeClock
|
||||
}
|
||||
|
||||
// SleepWithContext wraps the underlying SleepWithContext and ensures that once
|
||||
// that is sleeping, the fake clock advances by exactly the duration that
|
||||
// it is sleeping for.
|
||||
func (s stepClockDuringSleep) SleepWithContext(ctx context.Context, d time.Duration) {
|
||||
// This code is sensitive to both the implementation of
|
||||
// URLBackoff.SleepWithContext and of FakeClock.NewTimer:
|
||||
// - SleepWithContext must be a no-op when the duration is zero
|
||||
// => no need to step the fake clock
|
||||
// - SleepWithContext must use FakeClock.NewTimer, not FakeClock.Sleep
|
||||
// because the latter would advance time itself
|
||||
if d != 0 {
|
||||
go func() {
|
||||
// Poll until the caller is sleeping.
|
||||
for {
|
||||
if s.clock.HasWaiters() {
|
||||
s.clock.Step(d)
|
||||
return
|
||||
}
|
||||
if ctx.Err() != nil {
|
||||
return
|
||||
}
|
||||
time.Sleep(time.Millisecond)
|
||||
}
|
||||
}()
|
||||
}
|
||||
s.BackoffManagerWithContext.SleepWithContext(ctx, d)
|
||||
}
|
||||
|
||||
type testBackoffManager struct {
|
||||
sleeps []time.Duration
|
||||
}
|
||||
|
||||
func (b *testBackoffManager) UpdateBackoff(actualUrl *url.URL, err error, responseCode int) {
|
||||
func (b *testBackoffManager) UpdateBackoffWithContext(ctx context.Context, actualURL *url.URL, err error, responseCode int) {
|
||||
}
|
||||
|
||||
func (b *testBackoffManager) CalculateBackoff(actualUrl *url.URL) time.Duration {
|
||||
func (b *testBackoffManager) CalculateBackoffWithContext(ctx context.Context, actualURL *url.URL) time.Duration {
|
||||
return time.Duration(0)
|
||||
}
|
||||
|
||||
func (b *testBackoffManager) Sleep(d time.Duration) {
|
||||
func (b *testBackoffManager) SleepWithContext(ctx context.Context, d time.Duration) {
|
||||
b.sleeps = append(b.sleeps, d)
|
||||
}
|
||||
|
||||
@ -1568,7 +1610,7 @@ func TestCheckRetryClosesBody(t *testing.T) {
|
||||
expectedSleeps := []time.Duration{0, time.Second, time.Second, time.Second, time.Second}
|
||||
|
||||
c := testRESTClient(t, testServer)
|
||||
c.createBackoffMgr = func() BackoffManager { return backoff }
|
||||
c.createBackoffMgr = func() BackoffManagerWithContext { return backoff }
|
||||
_, err := c.Verb("POST").
|
||||
Prefix("foo", "bar").
|
||||
Suffix("baz").
|
||||
@ -2434,6 +2476,7 @@ func TestRequestPreflightCheck(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestThrottledLogger(t *testing.T) {
|
||||
logger := klog.Background()
|
||||
now := time.Now()
|
||||
oldClock := globalThrottledLogger.clock
|
||||
defer func() {
|
||||
@ -2448,7 +2491,7 @@ func TestThrottledLogger(t *testing.T) {
|
||||
wg.Add(10)
|
||||
for j := 0; j < 10; j++ {
|
||||
go func() {
|
||||
if _, ok := globalThrottledLogger.attemptToLog(); ok {
|
||||
if _, ok := globalThrottledLogger.attemptToLog(logger); ok {
|
||||
logMessages++
|
||||
}
|
||||
wg.Done()
|
||||
@ -2612,6 +2655,8 @@ type noSleepBackOff struct {
|
||||
|
||||
func (n *noSleepBackOff) Sleep(d time.Duration) {}
|
||||
|
||||
func (n *noSleepBackOff) SleepWithContext(ctx context.Context, d time.Duration) {}
|
||||
|
||||
func TestRequestWithRetry(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
@ -2997,7 +3042,6 @@ const retryTestKey retryTestKeyType = iota
|
||||
// metric calls are invoked appropriately in right order.
|
||||
type withRateLimiterBackoffManagerAndMetrics struct {
|
||||
flowcontrol.RateLimiter
|
||||
*NoBackoff
|
||||
metrics.ResultMetric
|
||||
calculateBackoffSeq int64
|
||||
calculateBackoffFn func(i int64) time.Duration
|
||||
@ -3013,7 +3057,7 @@ func (lb *withRateLimiterBackoffManagerAndMetrics) Wait(ctx context.Context) err
|
||||
return nil
|
||||
}
|
||||
|
||||
func (lb *withRateLimiterBackoffManagerAndMetrics) CalculateBackoff(actualUrl *url.URL) time.Duration {
|
||||
func (lb *withRateLimiterBackoffManagerAndMetrics) CalculateBackoffWithContext(ctx context.Context, actualURL *url.URL) time.Duration {
|
||||
lb.invokeOrderGot = append(lb.invokeOrderGot, "BackoffManager.CalculateBackoff")
|
||||
|
||||
waitFor := lb.calculateBackoffFn(lb.calculateBackoffSeq)
|
||||
@ -3021,11 +3065,11 @@ func (lb *withRateLimiterBackoffManagerAndMetrics) CalculateBackoff(actualUrl *u
|
||||
return waitFor
|
||||
}
|
||||
|
||||
func (lb *withRateLimiterBackoffManagerAndMetrics) UpdateBackoff(actualUrl *url.URL, err error, responseCode int) {
|
||||
func (lb *withRateLimiterBackoffManagerAndMetrics) UpdateBackoffWithContext(ctx context.Context, actualURL *url.URL, err error, responseCode int) {
|
||||
lb.invokeOrderGot = append(lb.invokeOrderGot, "BackoffManager.UpdateBackoff")
|
||||
}
|
||||
|
||||
func (lb *withRateLimiterBackoffManagerAndMetrics) Sleep(d time.Duration) {
|
||||
func (lb *withRateLimiterBackoffManagerAndMetrics) SleepWithContext(ctx context.Context, d time.Duration) {
|
||||
lb.invokeOrderGot = append(lb.invokeOrderGot, "BackoffManager.Sleep")
|
||||
lb.sleepsGot = append(lb.sleepsGot, d.String())
|
||||
}
|
||||
@ -3206,7 +3250,6 @@ func testRetryWithRateLimiterBackoffAndMetrics(t *testing.T, key string, doFunc
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
interceptor := &withRateLimiterBackoffManagerAndMetrics{
|
||||
RateLimiter: flowcontrol.NewFakeAlwaysRateLimiter(),
|
||||
NoBackoff: &NoBackoff{},
|
||||
calculateBackoffFn: test.calculateBackoffFn,
|
||||
}
|
||||
|
||||
@ -4066,15 +4109,24 @@ func TestRequestLogging(t *testing.T) {
|
||||
testcases := map[string]struct {
|
||||
v int
|
||||
body any
|
||||
response *http.Response
|
||||
expectedOutput string
|
||||
}{
|
||||
"no-output": {
|
||||
v: 7,
|
||||
body: []byte("ping"),
|
||||
response: &http.Response{
|
||||
StatusCode: http.StatusOK,
|
||||
Body: io.NopCloser(strings.NewReader("pong")),
|
||||
},
|
||||
},
|
||||
"output": {
|
||||
v: 8,
|
||||
body: []byte("ping"),
|
||||
response: &http.Response{
|
||||
StatusCode: http.StatusOK,
|
||||
Body: io.NopCloser(strings.NewReader("pong")),
|
||||
},
|
||||
expectedOutput: `<location>] "Request Body" logger="TestLogger" body="ping"
|
||||
<location>] "Response Body" logger="TestLogger" body="pong"
|
||||
`,
|
||||
@ -4082,6 +4134,10 @@ func TestRequestLogging(t *testing.T) {
|
||||
"io-reader": {
|
||||
v: 8,
|
||||
body: strings.NewReader("ping"),
|
||||
response: &http.Response{
|
||||
StatusCode: http.StatusOK,
|
||||
Body: io.NopCloser(strings.NewReader("pong")),
|
||||
},
|
||||
// Cannot log the request body!
|
||||
expectedOutput: `<location>] "Response Body" logger="TestLogger" body="pong"
|
||||
`,
|
||||
@ -4089,13 +4145,38 @@ func TestRequestLogging(t *testing.T) {
|
||||
"truncate": {
|
||||
v: 8,
|
||||
body: []byte(strings.Repeat("a", 2000)),
|
||||
response: &http.Response{
|
||||
StatusCode: http.StatusOK,
|
||||
Body: io.NopCloser(strings.NewReader("pong")),
|
||||
},
|
||||
expectedOutput: fmt.Sprintf(`<location>] "Request Body" logger="TestLogger" body="%s [truncated 976 chars]"
|
||||
<location>] "Response Body" logger="TestLogger" body="pong"
|
||||
`, strings.Repeat("a", 1024)),
|
||||
},
|
||||
"warnings": {
|
||||
v: 8,
|
||||
body: []byte("ping"),
|
||||
response: &http.Response{
|
||||
StatusCode: http.StatusOK,
|
||||
Header: http.Header{
|
||||
"Warning": []string{
|
||||
`299 request-test "warning 1"`,
|
||||
`299 request-test-2 "warning 2"`,
|
||||
`300 request-test-3 "ignore code 300"`,
|
||||
},
|
||||
},
|
||||
Body: io.NopCloser(strings.NewReader("pong")),
|
||||
},
|
||||
expectedOutput: `<location>] "Request Body" logger="TestLogger" body="ping"
|
||||
<location>] "Response Body" logger="TestLogger" body="pong"
|
||||
warnings.go] "Warning: warning 1" logger="TestLogger"
|
||||
warnings.go] "Warning: warning 2" logger="TestLogger"
|
||||
`,
|
||||
},
|
||||
}
|
||||
|
||||
for name, tc := range testcases {
|
||||
//nolint:logcheck // Intentionally testing with plain klog here.
|
||||
t.Run(name, func(t *testing.T) {
|
||||
state := klog.CaptureState()
|
||||
defer state.Restore()
|
||||
@ -4106,12 +4187,10 @@ func TestRequestLogging(t *testing.T) {
|
||||
var fs flag.FlagSet
|
||||
klog.InitFlags(&fs)
|
||||
require.NoError(t, fs.Set("v", fmt.Sprintf("%d", tc.v)), "set verbosity")
|
||||
require.NoError(t, fs.Set("one_output", "true"), "set one_output")
|
||||
|
||||
client := clientForFunc(func(req *http.Request) (*http.Response, error) {
|
||||
return &http.Response{
|
||||
StatusCode: http.StatusOK,
|
||||
Body: io.NopCloser(strings.NewReader("pong")),
|
||||
}, nil
|
||||
return tc.response, nil
|
||||
})
|
||||
|
||||
req := NewRequestWithClient(nil, "", defaultContentConfig(), client).
|
||||
@ -4128,11 +4207,49 @@ func TestRequestLogging(t *testing.T) {
|
||||
// Compare log output:
|
||||
// - strip date/time/pid from each line (fixed length header)
|
||||
// - replace <location> with the actual call location
|
||||
// - strip line number from warnings.go (might change)
|
||||
state.Restore()
|
||||
expectedOutput := strings.ReplaceAll(tc.expectedOutput, "<location>", fmt.Sprintf("%s:%d", path.Base(file), line+1))
|
||||
actualOutput := buffer.String()
|
||||
actualOutput = regexp.MustCompile(`(?m)^.{30}`).ReplaceAllString(actualOutput, "")
|
||||
actualOutput = regexp.MustCompile(`(?m)^warnings\.go:\d+`).ReplaceAllString(actualOutput, "warnings.go")
|
||||
assert.Equal(t, expectedOutput, actualOutput)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestRequestWarningHandler(t *testing.T) {
|
||||
t.Run("no-context", func(t *testing.T) {
|
||||
request := &Request{}
|
||||
handler := &fakeWarningHandlerWithLogging{}
|
||||
//nolint:logcheck
|
||||
assert.Equal(t, request, request.WarningHandler(handler))
|
||||
assert.NotNil(t, request.warningHandler)
|
||||
request.warningHandler.HandleWarningHeaderWithContext(context.Background(), 0, "", "message")
|
||||
assert.Equal(t, []string{"message"}, handler.messages)
|
||||
})
|
||||
|
||||
t.Run("with-context", func(t *testing.T) {
|
||||
request := &Request{}
|
||||
handler := &fakeWarningHandlerWithContext{}
|
||||
assert.Equal(t, request, request.WarningHandlerWithContext(handler))
|
||||
assert.Equal(t, request.warningHandler, handler)
|
||||
})
|
||||
|
||||
t.Run("nil-no-context", func(t *testing.T) {
|
||||
request := &Request{
|
||||
warningHandler: &fakeWarningHandlerWithContext{},
|
||||
}
|
||||
//nolint:logcheck
|
||||
assert.Equal(t, request, request.WarningHandler(nil))
|
||||
assert.Nil(t, request.warningHandler)
|
||||
})
|
||||
|
||||
t.Run("nil-with-context", func(t *testing.T) {
|
||||
request := &Request{
|
||||
warningHandler: &fakeWarningHandlerWithContext{},
|
||||
}
|
||||
assert.Equal(t, request, request.WarningHandlerWithContext(nil))
|
||||
assert.Nil(t, request.warningHandler)
|
||||
})
|
||||
}
|
||||
|
@ -17,6 +17,8 @@ limitations under the License.
|
||||
package rest
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"time"
|
||||
|
||||
@ -32,12 +34,24 @@ import (
|
||||
var serverIsOverloadedSet = sets.NewInt(429)
|
||||
var maxResponseCode = 499
|
||||
|
||||
//go:generate mockery
|
||||
|
||||
// Deprecated: BackoffManager.Sleep ignores the caller's context. Use BackoffManagerWithContext instead.
|
||||
type BackoffManager interface {
|
||||
UpdateBackoff(actualUrl *url.URL, err error, responseCode int)
|
||||
CalculateBackoff(actualUrl *url.URL) time.Duration
|
||||
UpdateBackoff(actualURL *url.URL, err error, responseCode int)
|
||||
CalculateBackoff(actualURL *url.URL) time.Duration
|
||||
Sleep(d time.Duration)
|
||||
}
|
||||
|
||||
type BackoffManagerWithContext interface {
|
||||
UpdateBackoffWithContext(ctx context.Context, actualURL *url.URL, err error, responseCode int)
|
||||
CalculateBackoffWithContext(ctx context.Context, actualURL *url.URL) time.Duration
|
||||
SleepWithContext(ctx context.Context, d time.Duration)
|
||||
}
|
||||
|
||||
var _ BackoffManager = &URLBackoff{}
|
||||
var _ BackoffManagerWithContext = &URLBackoff{}
|
||||
|
||||
// URLBackoff struct implements the semantics on top of Backoff which
|
||||
// we need for URL specific exponential backoff.
|
||||
type URLBackoff struct {
|
||||
@ -49,11 +63,19 @@ type URLBackoff struct {
|
||||
type NoBackoff struct {
|
||||
}
|
||||
|
||||
func (n *NoBackoff) UpdateBackoff(actualUrl *url.URL, err error, responseCode int) {
|
||||
func (n *NoBackoff) UpdateBackoff(actualURL *url.URL, err error, responseCode int) {
|
||||
// do nothing.
|
||||
}
|
||||
|
||||
func (n *NoBackoff) CalculateBackoff(actualUrl *url.URL) time.Duration {
|
||||
func (n *NoBackoff) UpdateBackoffWithContext(ctx context.Context, actualURL *url.URL, err error, responseCode int) {
|
||||
// do nothing.
|
||||
}
|
||||
|
||||
func (n *NoBackoff) CalculateBackoff(actualURL *url.URL) time.Duration {
|
||||
return 0 * time.Second
|
||||
}
|
||||
|
||||
func (n *NoBackoff) CalculateBackoffWithContext(ctx context.Context, actualURL *url.URL) time.Duration {
|
||||
return 0 * time.Second
|
||||
}
|
||||
|
||||
@ -61,10 +83,21 @@ func (n *NoBackoff) Sleep(d time.Duration) {
|
||||
time.Sleep(d)
|
||||
}
|
||||
|
||||
func (n *NoBackoff) SleepWithContext(ctx context.Context, d time.Duration) {
|
||||
if d == 0 {
|
||||
return
|
||||
}
|
||||
t := time.NewTimer(d)
|
||||
defer t.Stop()
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
case <-t.C:
|
||||
}
|
||||
}
|
||||
|
||||
// Disable makes the backoff trivial, i.e., sets it to zero. This might be used
|
||||
// by tests which want to run 1000s of mock requests without slowing down.
|
||||
func (b *URLBackoff) Disable() {
|
||||
klog.V(4).Infof("Disabling backoff strategy")
|
||||
b.Backoff = flowcontrol.NewBackOff(0*time.Second, 0*time.Second)
|
||||
}
|
||||
|
||||
@ -76,32 +109,74 @@ func (b *URLBackoff) baseUrlKey(rawurl *url.URL) string {
|
||||
// in the future.
|
||||
host, err := url.Parse(rawurl.String())
|
||||
if err != nil {
|
||||
klog.V(4).Infof("Error extracting url: %v", rawurl)
|
||||
panic("bad url!")
|
||||
panic(fmt.Sprintf("Error parsing bad URL %q: %v", rawurl, err))
|
||||
}
|
||||
return host.Host
|
||||
}
|
||||
|
||||
// UpdateBackoff updates backoff metadata
|
||||
func (b *URLBackoff) UpdateBackoff(actualUrl *url.URL, err error, responseCode int) {
|
||||
func (b *URLBackoff) UpdateBackoff(actualURL *url.URL, err error, responseCode int) {
|
||||
b.UpdateBackoffWithContext(context.Background(), actualURL, err, responseCode)
|
||||
}
|
||||
|
||||
// UpdateBackoffWithContext updates backoff metadata
|
||||
func (b *URLBackoff) UpdateBackoffWithContext(ctx context.Context, actualURL *url.URL, err error, responseCode int) {
|
||||
// range for retry counts that we store is [0,13]
|
||||
if responseCode > maxResponseCode || serverIsOverloadedSet.Has(responseCode) {
|
||||
b.Backoff.Next(b.baseUrlKey(actualUrl), b.Backoff.Clock.Now())
|
||||
b.Backoff.Next(b.baseUrlKey(actualURL), b.Backoff.Clock.Now())
|
||||
return
|
||||
} else if responseCode >= 300 || err != nil {
|
||||
klog.V(4).Infof("Client is returning errors: code %v, error %v", responseCode, err)
|
||||
klog.FromContext(ctx).V(4).Info("Client is returning errors", "code", responseCode, "err", err)
|
||||
}
|
||||
|
||||
//If we got this far, there is no backoff required for this URL anymore.
|
||||
b.Backoff.Reset(b.baseUrlKey(actualUrl))
|
||||
b.Backoff.Reset(b.baseUrlKey(actualURL))
|
||||
}
|
||||
|
||||
// CalculateBackoff takes a url and back's off exponentially,
|
||||
// based on its knowledge of existing failures.
|
||||
func (b *URLBackoff) CalculateBackoff(actualUrl *url.URL) time.Duration {
|
||||
return b.Backoff.Get(b.baseUrlKey(actualUrl))
|
||||
func (b *URLBackoff) CalculateBackoff(actualURL *url.URL) time.Duration {
|
||||
return b.Backoff.Get(b.baseUrlKey(actualURL))
|
||||
}
|
||||
|
||||
// CalculateBackoffWithContext takes a url and back's off exponentially,
|
||||
// based on its knowledge of existing failures.
|
||||
func (b *URLBackoff) CalculateBackoffWithContext(ctx context.Context, actualURL *url.URL) time.Duration {
|
||||
return b.Backoff.Get(b.baseUrlKey(actualURL))
|
||||
}
|
||||
|
||||
func (b *URLBackoff) Sleep(d time.Duration) {
|
||||
b.Backoff.Clock.Sleep(d)
|
||||
}
|
||||
|
||||
func (b *URLBackoff) SleepWithContext(ctx context.Context, d time.Duration) {
|
||||
if d == 0 {
|
||||
return
|
||||
}
|
||||
t := b.Backoff.Clock.NewTimer(d)
|
||||
defer t.Stop()
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
case <-t.C():
|
||||
}
|
||||
}
|
||||
|
||||
// backoffManagerNopContext wraps a BackoffManager and adds the *WithContext methods.
|
||||
type backoffManagerNopContext struct {
|
||||
BackoffManager
|
||||
}
|
||||
|
||||
var _ BackoffManager = &backoffManagerNopContext{}
|
||||
var _ BackoffManagerWithContext = &backoffManagerNopContext{}
|
||||
|
||||
func (b *backoffManagerNopContext) UpdateBackoffWithContext(ctx context.Context, actualURL *url.URL, err error, responseCode int) {
|
||||
b.UpdateBackoff(actualURL, err, responseCode)
|
||||
}
|
||||
|
||||
func (b *backoffManagerNopContext) CalculateBackoffWithContext(ctx context.Context, actualURL *url.URL) time.Duration {
|
||||
return b.CalculateBackoff(actualURL)
|
||||
}
|
||||
|
||||
func (b *backoffManagerNopContext) SleepWithContext(ctx context.Context, d time.Duration) {
|
||||
b.Sleep(d)
|
||||
}
|
||||
|
@ -17,10 +17,14 @@ limitations under the License.
|
||||
package rest
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"net/url"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"k8s.io/client-go/util/flowcontrol"
|
||||
)
|
||||
|
||||
@ -77,3 +81,38 @@ func TestURLBackoffFunctionality(t *testing.T) {
|
||||
t.Errorf("The final return code %v should have resulted in a backoff ! ", returnCodes[7])
|
||||
}
|
||||
}
|
||||
|
||||
func TestBackoffManagerNopContext(t *testing.T) {
|
||||
mock := NewMockBackoffManager(t)
|
||||
|
||||
sleepDuration := 42 * time.Second
|
||||
mock.On("Sleep", sleepDuration).Return()
|
||||
url := &url.URL{}
|
||||
mock.On("CalculateBackoff", url).Return(time.Second)
|
||||
err := errors.New("fake error")
|
||||
responseCode := 404
|
||||
mock.On("UpdateBackoff", url, err, responseCode).Return()
|
||||
|
||||
ctx := context.Background()
|
||||
wrapper := backoffManagerNopContext{BackoffManager: mock}
|
||||
wrapper.SleepWithContext(ctx, sleepDuration)
|
||||
wrapper.CalculateBackoffWithContext(ctx, url)
|
||||
wrapper.UpdateBackoffWithContext(ctx, url, err, responseCode)
|
||||
}
|
||||
|
||||
func TestNoBackoff(t *testing.T) {
|
||||
var backoff NoBackoff
|
||||
assert.Equal(t, 0*time.Second, backoff.CalculateBackoff(nil))
|
||||
assert.Equal(t, 0*time.Second, backoff.CalculateBackoffWithContext(context.Background(), nil))
|
||||
|
||||
start := time.Now()
|
||||
backoff.Sleep(0 * time.Second)
|
||||
assert.WithinDuration(t, start, time.Now(), time.Minute /* pretty generous, but we don't want to flake */, time.Since(start), "backoff.Sleep")
|
||||
|
||||
// Cancel right away to prevent sleeping.
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
cancel()
|
||||
start = time.Now()
|
||||
backoff.SleepWithContext(ctx, 10*time.Minute)
|
||||
assert.WithinDuration(t, start, time.Now(), time.Minute /* pretty generous, but we don't want to flake */, time.Since(start), "backoff.SleepWithContext")
|
||||
}
|
||||
|
@ -17,6 +17,7 @@ limitations under the License.
|
||||
package rest
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
@ -33,8 +34,15 @@ type WarningHandler interface {
|
||||
HandleWarningHeader(code int, agent string, text string)
|
||||
}
|
||||
|
||||
// WarningHandlerWithContext is an interface for handling warning headers with
|
||||
// support for contextual logging.
|
||||
type WarningHandlerWithContext interface {
|
||||
// HandleWarningHeaderWithContext is called with the warn code, agent, and text when a warning header is countered.
|
||||
HandleWarningHeaderWithContext(ctx context.Context, code int, agent string, text string)
|
||||
}
|
||||
|
||||
var (
|
||||
defaultWarningHandler WarningHandler = WarningLogger{}
|
||||
defaultWarningHandler WarningHandlerWithContext = WarningLogger{}
|
||||
defaultWarningHandlerLock sync.RWMutex
|
||||
)
|
||||
|
||||
@ -43,33 +51,68 @@ var (
|
||||
// - NoWarnings suppresses warnings.
|
||||
// - WarningLogger logs warnings.
|
||||
// - NewWarningWriter() outputs warnings to the provided writer.
|
||||
//
|
||||
// logcheck:context // SetDefaultWarningHandlerWithContext should be used instead of SetDefaultWarningHandler in code which supports contextual logging.
|
||||
func SetDefaultWarningHandler(l WarningHandler) {
|
||||
if l == nil {
|
||||
SetDefaultWarningHandlerWithContext(nil)
|
||||
return
|
||||
}
|
||||
SetDefaultWarningHandlerWithContext(warningLoggerNopContext{l: l})
|
||||
}
|
||||
|
||||
// SetDefaultWarningHandlerWithContext is a variant of [SetDefaultWarningHandler] which supports contextual logging.
|
||||
func SetDefaultWarningHandlerWithContext(l WarningHandlerWithContext) {
|
||||
defaultWarningHandlerLock.Lock()
|
||||
defer defaultWarningHandlerLock.Unlock()
|
||||
defaultWarningHandler = l
|
||||
}
|
||||
func getDefaultWarningHandler() WarningHandler {
|
||||
|
||||
func getDefaultWarningHandler() WarningHandlerWithContext {
|
||||
defaultWarningHandlerLock.RLock()
|
||||
defer defaultWarningHandlerLock.RUnlock()
|
||||
l := defaultWarningHandler
|
||||
return l
|
||||
}
|
||||
|
||||
// NoWarnings is an implementation of WarningHandler that suppresses warnings.
|
||||
type warningLoggerNopContext struct {
|
||||
l WarningHandler
|
||||
}
|
||||
|
||||
func (w warningLoggerNopContext) HandleWarningHeaderWithContext(_ context.Context, code int, agent string, message string) {
|
||||
w.l.HandleWarningHeader(code, agent, message)
|
||||
}
|
||||
|
||||
// NoWarnings is an implementation of [WarningHandler] and [WarningHandlerWithContext] that suppresses warnings.
|
||||
type NoWarnings struct{}
|
||||
|
||||
func (NoWarnings) HandleWarningHeader(code int, agent string, message string) {}
|
||||
func (NoWarnings) HandleWarningHeaderWithContext(ctx context.Context, code int, agent string, message string) {
|
||||
}
|
||||
|
||||
// WarningLogger is an implementation of WarningHandler that logs code 299 warnings
|
||||
var _ WarningHandler = NoWarnings{}
|
||||
var _ WarningHandlerWithContext = NoWarnings{}
|
||||
|
||||
// WarningLogger is an implementation of [WarningHandler] and [WarningHandlerWithContext] that logs code 299 warnings
|
||||
type WarningLogger struct{}
|
||||
|
||||
func (WarningLogger) HandleWarningHeader(code int, agent string, message string) {
|
||||
if code != 299 || len(message) == 0 {
|
||||
return
|
||||
}
|
||||
klog.Warning(message)
|
||||
klog.Background().Info("Warning: " + message)
|
||||
}
|
||||
|
||||
func (WarningLogger) HandleWarningHeaderWithContext(ctx context.Context, code int, agent string, message string) {
|
||||
if code != 299 || len(message) == 0 {
|
||||
return
|
||||
}
|
||||
klog.FromContext(ctx).Info("Warning: " + message)
|
||||
}
|
||||
|
||||
var _ WarningHandler = WarningLogger{}
|
||||
var _ WarningHandlerWithContext = WarningLogger{}
|
||||
|
||||
type warningWriter struct {
|
||||
// out is the writer to output warnings to
|
||||
out io.Writer
|
||||
@ -134,14 +177,14 @@ func (w *warningWriter) WarningCount() int {
|
||||
return w.writtenCount
|
||||
}
|
||||
|
||||
func handleWarnings(headers http.Header, handler WarningHandler) []net.WarningHeader {
|
||||
func handleWarnings(ctx context.Context, headers http.Header, handler WarningHandlerWithContext) []net.WarningHeader {
|
||||
if handler == nil {
|
||||
handler = getDefaultWarningHandler()
|
||||
}
|
||||
|
||||
warnings, _ := net.ParseWarningHeaders(headers["Warning"])
|
||||
for _, warning := range warnings {
|
||||
handler.HandleWarningHeader(warning.Code, warning.Agent, warning.Text)
|
||||
handler.HandleWarningHeaderWithContext(ctx, warning.Code, warning.Agent, warning.Text)
|
||||
}
|
||||
return warnings
|
||||
}
|
||||
|
57
staging/src/k8s.io/client-go/rest/warnings_test.go
Normal file
57
staging/src/k8s.io/client-go/rest/warnings_test.go
Normal file
@ -0,0 +1,57 @@
|
||||
/*
|
||||
Copyright 2024 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package rest
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestDefaultWarningHandler(t *testing.T) {
|
||||
t.Run("default", func(t *testing.T) {
|
||||
assert.IsType(t, WarningHandlerWithContext(WarningLogger{}), getDefaultWarningHandler())
|
||||
})
|
||||
|
||||
deferRestore := func(t *testing.T) {
|
||||
handler := getDefaultWarningHandler()
|
||||
t.Cleanup(func() {
|
||||
SetDefaultWarningHandlerWithContext(handler)
|
||||
})
|
||||
}
|
||||
|
||||
t.Run("no-context", func(t *testing.T) {
|
||||
deferRestore(t)
|
||||
handler := &fakeWarningHandlerWithLogging{}
|
||||
//nolint:logcheck
|
||||
SetDefaultWarningHandler(handler)
|
||||
getDefaultWarningHandler().HandleWarningHeaderWithContext(context.Background(), 0, "", "message")
|
||||
assert.Equal(t, []string{"message"}, handler.messages)
|
||||
SetDefaultWarningHandler(nil)
|
||||
assert.Nil(t, getDefaultWarningHandler())
|
||||
})
|
||||
|
||||
t.Run("with-context", func(t *testing.T) {
|
||||
deferRestore(t)
|
||||
handler := &fakeWarningHandlerWithContext{}
|
||||
SetDefaultWarningHandlerWithContext(handler)
|
||||
assert.Equal(t, handler, getDefaultWarningHandler())
|
||||
SetDefaultWarningHandlerWithContext(nil)
|
||||
assert.Nil(t, getDefaultWarningHandler())
|
||||
})
|
||||
}
|
@ -209,18 +209,18 @@ func (r *withRetry) Before(ctx context.Context, request *Request) error {
|
||||
// we do a backoff sleep before the first attempt is made,
|
||||
// (preserving current behavior).
|
||||
if request.backoff != nil {
|
||||
request.backoff.Sleep(request.backoff.CalculateBackoff(url))
|
||||
request.backoff.SleepWithContext(ctx, request.backoff.CalculateBackoffWithContext(ctx, url))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// if we are here, we have made attempt(s) at least once before.
|
||||
if request.backoff != nil {
|
||||
delay := request.backoff.CalculateBackoff(url)
|
||||
delay := request.backoff.CalculateBackoffWithContext(ctx, url)
|
||||
if r.retryAfter.Wait > delay {
|
||||
delay = r.retryAfter.Wait
|
||||
}
|
||||
request.backoff.Sleep(delay)
|
||||
request.backoff.SleepWithContext(ctx, delay)
|
||||
}
|
||||
|
||||
// We are retrying the request that we already send to
|
||||
@ -231,7 +231,7 @@ func (r *withRetry) Before(ctx context.Context, request *Request) error {
|
||||
return err
|
||||
}
|
||||
|
||||
klog.V(4).Infof("Got a Retry-After %s response for attempt %d to %v", r.retryAfter.Wait, r.retryAfter.Attempt, request.URL().String())
|
||||
klog.FromContext(ctx).V(4).Info("Got a Retry-After response", "delay", r.retryAfter.Wait, "attempt", r.retryAfter.Attempt, "url", request.URL())
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -258,9 +258,9 @@ func (r *withRetry) After(ctx context.Context, request *Request, resp *http.Resp
|
||||
|
||||
if request.c.base != nil {
|
||||
if err != nil {
|
||||
request.backoff.UpdateBackoff(request.URL(), err, 0)
|
||||
request.backoff.UpdateBackoffWithContext(ctx, request.URL(), err, 0)
|
||||
} else {
|
||||
request.backoff.UpdateBackoff(request.URL(), err, resp.StatusCode)
|
||||
request.backoff.UpdateBackoffWithContext(ctx, request.URL(), err, resp.StatusCode)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
1
staging/src/k8s.io/cloud-provider/go.sum
generated
1
staging/src/k8s.io/cloud-provider/go.sum
generated
@ -168,6 +168,7 @@ github.com/stoewer/go-strcase v1.3.0/go.mod h1:fAH5hQ5pehh+j3nZfvwdk2RgEgQjAoM8w
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
|
||||
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
|
||||
github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY=
|
||||
github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
|
@ -70,6 +70,8 @@ github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
|
||||
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
|
||||
github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY=
|
||||
github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
||||
|
1
staging/src/k8s.io/component-base/go.sum
generated
1
staging/src/k8s.io/component-base/go.sum
generated
@ -131,6 +131,7 @@ github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
|
||||
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
|
||||
github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY=
|
||||
github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
|
1
staging/src/k8s.io/component-helpers/go.sum
generated
1
staging/src/k8s.io/component-helpers/go.sum
generated
@ -79,6 +79,7 @@ github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
|
||||
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
|
||||
github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY=
|
||||
github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
|
1
staging/src/k8s.io/controller-manager/go.sum
generated
1
staging/src/k8s.io/controller-manager/go.sum
generated
@ -164,6 +164,7 @@ github.com/stoewer/go-strcase v1.3.0/go.mod h1:fAH5hQ5pehh+j3nZfvwdk2RgEgQjAoM8w
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
|
||||
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
|
||||
github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY=
|
||||
github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
|
@ -139,6 +139,7 @@ github.com/stoewer/go-strcase v1.3.0/go.mod h1:fAH5hQ5pehh+j3nZfvwdk2RgEgQjAoM8w
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
|
||||
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
|
||||
github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY=
|
||||
github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
|
1
staging/src/k8s.io/endpointslice/go.sum
generated
1
staging/src/k8s.io/endpointslice/go.sum
generated
@ -109,6 +109,7 @@ github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
|
||||
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
|
||||
github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY=
|
||||
github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
|
1
staging/src/k8s.io/kube-aggregator/go.sum
generated
1
staging/src/k8s.io/kube-aggregator/go.sum
generated
@ -167,6 +167,7 @@ github.com/stoewer/go-strcase v1.3.0/go.mod h1:fAH5hQ5pehh+j3nZfvwdk2RgEgQjAoM8w
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
|
||||
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
|
||||
github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY=
|
||||
github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
|
1
staging/src/k8s.io/kubelet/go.sum
generated
1
staging/src/k8s.io/kubelet/go.sum
generated
@ -131,6 +131,7 @@ github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
|
||||
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||
github.com/stoewer/go-strcase v1.3.0/go.mod h1:fAH5hQ5pehh+j3nZfvwdk2RgEgQjAoM8wodgtPmh1xo=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY=
|
||||
github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
|
||||
|
1
staging/src/k8s.io/metrics/go.sum
generated
1
staging/src/k8s.io/metrics/go.sum
generated
@ -79,6 +79,7 @@ github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
|
||||
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
|
||||
github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY=
|
||||
github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
|
1
staging/src/k8s.io/pod-security-admission/go.sum
generated
1
staging/src/k8s.io/pod-security-admission/go.sum
generated
@ -164,6 +164,7 @@ github.com/stoewer/go-strcase v1.3.0/go.mod h1:fAH5hQ5pehh+j3nZfvwdk2RgEgQjAoM8w
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
|
||||
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
|
||||
github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY=
|
||||
github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
|
1
staging/src/k8s.io/sample-apiserver/go.sum
generated
1
staging/src/k8s.io/sample-apiserver/go.sum
generated
@ -164,6 +164,7 @@ github.com/stoewer/go-strcase v1.3.0/go.mod h1:fAH5hQ5pehh+j3nZfvwdk2RgEgQjAoM8w
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
|
||||
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
|
||||
github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY=
|
||||
github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
|
2
staging/src/k8s.io/sample-controller/go.sum
generated
2
staging/src/k8s.io/sample-controller/go.sum
generated
@ -79,6 +79,8 @@ github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
|
||||
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
|
||||
github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY=
|
||||
github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
||||
|
Loading…
Reference in New Issue
Block a user