diff --git a/cmd/kube-scheduler/app/options/options.go b/cmd/kube-scheduler/app/options/options.go index 534ca693df8..338e5de7501 100644 --- a/cmd/kube-scheduler/app/options/options.go +++ b/cmd/kube-scheduler/app/options/options.go @@ -335,7 +335,7 @@ func makeLeaderElectionConfig(config componentbaseconfig.LeaderElectionConfigura // TODO remove masterOverride when CLI flags are removed. func createKubeConfig(config componentbaseconfig.ClientConnectionConfiguration, masterOverride string) (*restclient.Config, error) { if len(config.Kubeconfig) == 0 && len(masterOverride) == 0 { - klog.Warningf("Neither --kubeconfig nor --master was specified. Using default API client. This might not work.") + klog.Warning("Neither --kubeconfig nor --master was specified. Using default API client. This might not work.") } // This creates a client, first loading any specified kubeconfig diff --git a/pkg/controller/volume/attachdetach/reconciler/reconciler.go b/pkg/controller/volume/attachdetach/reconciler/reconciler.go index 247b49ca640..c3273013aee 100644 --- a/pkg/controller/volume/attachdetach/reconciler/reconciler.go +++ b/pkg/controller/volume/attachdetach/reconciler/reconciler.go @@ -332,7 +332,7 @@ func (rc *reconciler) reportMultiAttachError(volumeToAttach cache.VolumeToAttach // Log detailed message to system admin nodeList := strings.Join(otherNodesStr, ", ") detailedMsg := volumeToAttach.GenerateMsgDetailed("Multi-Attach error", fmt.Sprintf("Volume is already exclusively attached to node %s and can't be attached to another", nodeList)) - klog.Warningf(detailedMsg) + klog.Warning(detailedMsg) return } diff --git a/pkg/kubelet/kubelet.go b/pkg/kubelet/kubelet.go index a930b210d49..d599a01acf4 100644 --- a/pkg/kubelet/kubelet.go +++ b/pkg/kubelet/kubelet.go @@ -565,7 +565,7 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration, klet.configMapManager = configMapManager if klet.experimentalHostUserNamespaceDefaulting { - klog.Infof("Experimental host user namespace defaulting is enabled.") + klog.Info("Experimental host user namespace defaulting is enabled.") } machineInfo, err := klet.cadvisor.MachineInfo() diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/customresource_discovery_controller.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/customresource_discovery_controller.go index 090274c6330..aad0482633d 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/customresource_discovery_controller.go +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/customresource_discovery_controller.go @@ -204,9 +204,9 @@ func sortGroupDiscoveryByKubeAwareVersion(gd []metav1.GroupVersionForDiscovery) func (c *DiscoveryController) Run(stopCh <-chan struct{}, synchedCh chan<- struct{}) { defer utilruntime.HandleCrash() defer c.queue.ShutDown() - defer klog.Infof("Shutting down DiscoveryController") + defer klog.Info("Shutting down DiscoveryController") - klog.Infof("Starting DiscoveryController") + klog.Info("Starting DiscoveryController") if !cache.WaitForCacheSync(stopCh, c.crdsSynced) { utilruntime.HandleError(fmt.Errorf("timed out waiting for caches to sync")) diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/controller/establish/establishing_controller.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/controller/establish/establishing_controller.go index 5899a5625a4..c2d7d55718f 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/controller/establish/establishing_controller.go +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/controller/establish/establishing_controller.go @@ -73,8 +73,8 @@ func (ec *EstablishingController) Run(stopCh <-chan struct{}) { defer utilruntime.HandleCrash() defer ec.queue.ShutDown() - klog.Infof("Starting EstablishingController") - defer klog.Infof("Shutting down EstablishingController") + klog.Info("Starting EstablishingController") + defer klog.Info("Shutting down EstablishingController") if !cache.WaitForCacheSync(stopCh, ec.crdSynced) { return diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/controller/finalizer/crd_finalizer.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/controller/finalizer/crd_finalizer.go index 4ccd33cd336..d559dca45d8 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/controller/finalizer/crd_finalizer.go +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/controller/finalizer/crd_finalizer.go @@ -263,8 +263,8 @@ func (c *CRDFinalizer) Run(workers int, stopCh <-chan struct{}) { defer utilruntime.HandleCrash() defer c.queue.ShutDown() - klog.Infof("Starting CRDFinalizer") - defer klog.Infof("Shutting down CRDFinalizer") + klog.Info("Starting CRDFinalizer") + defer klog.Info("Shutting down CRDFinalizer") if !cache.WaitForCacheSync(stopCh, c.crdSynced) { return diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/controller/status/naming_controller.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/controller/status/naming_controller.go index 170304ead0a..d4165a09457 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/controller/status/naming_controller.go +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/controller/status/naming_controller.go @@ -288,8 +288,8 @@ func (c *NamingConditionController) Run(stopCh <-chan struct{}) { defer utilruntime.HandleCrash() defer c.queue.ShutDown() - klog.Infof("Starting NamingConditionController") - defer klog.Infof("Shutting down NamingConditionController") + klog.Info("Starting NamingConditionController") + defer klog.Info("Shutting down NamingConditionController") if !cache.WaitForCacheSync(stopCh, c.crdSynced) { return diff --git a/staging/src/k8s.io/apimachinery/pkg/util/net/http.go b/staging/src/k8s.io/apimachinery/pkg/util/net/http.go index 567a294e26c..ce69b8054b5 100644 --- a/staging/src/k8s.io/apimachinery/pkg/util/net/http.go +++ b/staging/src/k8s.io/apimachinery/pkg/util/net/http.go @@ -131,7 +131,7 @@ func SetTransportDefaults(t *http.Transport) *http.Transport { t = SetOldTransportDefaults(t) // Allow clients to disable http2 if needed. if s := os.Getenv("DISABLE_HTTP2"); len(s) > 0 { - klog.Infof("HTTP2 has been explicitly disabled") + klog.Info("HTTP2 has been explicitly disabled") } else if allowsHTTP2(t) { if err := configureHTTP2Transport(t); err != nil { klog.Warningf("Transport failed http2 configuration: %v", err) diff --git a/staging/src/k8s.io/apimachinery/pkg/util/proxy/dial.go b/staging/src/k8s.io/apimachinery/pkg/util/proxy/dial.go index 7b0704f596f..5042981c5dc 100644 --- a/staging/src/k8s.io/apimachinery/pkg/util/proxy/dial.go +++ b/staging/src/k8s.io/apimachinery/pkg/util/proxy/dial.go @@ -69,7 +69,7 @@ func dialURL(ctx context.Context, url *url.URL, transport http.RoundTripper) (ne } if tlsConfig == nil { // tls.Client requires non-nil config - klog.Warningf("using custom dialer with no TLSClientConfig. Defaulting to InsecureSkipVerify") + klog.Warning("using custom dialer with no TLSClientConfig. Defaulting to InsecureSkipVerify") // tls.Handshake() requires ServerName or InsecureSkipVerify tlsConfig = &tls.Config{ InsecureSkipVerify: true, diff --git a/staging/src/k8s.io/apiserver/pkg/endpoints/filters/authentication.go b/staging/src/k8s.io/apiserver/pkg/endpoints/filters/authentication.go index e88e7ad28d6..43dcfa952e1 100644 --- a/staging/src/k8s.io/apiserver/pkg/endpoints/filters/authentication.go +++ b/staging/src/k8s.io/apiserver/pkg/endpoints/filters/authentication.go @@ -37,7 +37,7 @@ import ( // is invoked to serve the request. func WithAuthentication(handler http.Handler, auth authenticator.Request, failed http.Handler, apiAuds authenticator.Audiences) http.Handler { if auth == nil { - klog.Warningf("Authentication is disabled") + klog.Warning("Authentication is disabled") return handler } return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { diff --git a/staging/src/k8s.io/apiserver/pkg/endpoints/filters/authorization.go b/staging/src/k8s.io/apiserver/pkg/endpoints/filters/authorization.go index 8d115ff0910..5f0ce730b78 100644 --- a/staging/src/k8s.io/apiserver/pkg/endpoints/filters/authorization.go +++ b/staging/src/k8s.io/apiserver/pkg/endpoints/filters/authorization.go @@ -44,7 +44,7 @@ const ( // WithAuthorizationCheck passes all authorized requests on to handler, and returns a forbidden error otherwise. func WithAuthorization(handler http.Handler, a authorizer.Authorizer, s runtime.NegotiatedSerializer) http.Handler { if a == nil { - klog.Warningf("Authorization is disabled") + klog.Warning("Authorization is disabled") return handler } return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { diff --git a/staging/src/k8s.io/apiserver/pkg/server/options/authentication.go b/staging/src/k8s.io/apiserver/pkg/server/options/authentication.go index 75e9c8dffd1..46130aad4e9 100644 --- a/staging/src/k8s.io/apiserver/pkg/server/options/authentication.go +++ b/staging/src/k8s.io/apiserver/pkg/server/options/authentication.go @@ -334,8 +334,8 @@ func (s *DelegatingAuthenticationOptions) ApplyTo(authenticationInfo *server.Aut if err != nil { if s.TolerateInClusterLookupFailure { klog.Warningf("Error looking up in-cluster authentication configuration: %v", err) - klog.Warningf("Continuing without authentication configuration. This may treat all requests as anonymous.") - klog.Warningf("To require authentication configuration lookup to succeed, set --authentication-tolerate-lookup-failure=false") + klog.Warning("Continuing without authentication configuration. This may treat all requests as anonymous.") + klog.Warning("To require authentication configuration lookup to succeed, set --authentication-tolerate-lookup-failure=false") } else { return fmt.Errorf("unable to load configmap based request-header-client-ca-file: %v", err) } diff --git a/staging/src/k8s.io/apiserver/pkg/server/options/authorization.go b/staging/src/k8s.io/apiserver/pkg/server/options/authorization.go index 6b4129639fa..bfa02bc3f47 100644 --- a/staging/src/k8s.io/apiserver/pkg/server/options/authorization.go +++ b/staging/src/k8s.io/apiserver/pkg/server/options/authorization.go @@ -173,7 +173,7 @@ func (s *DelegatingAuthorizationOptions) toAuthorizer(client kubernetes.Interfac } if client == nil { - klog.Warningf("No authorization-kubeconfig provided, so SubjectAccessReview of authorization tokens won't work.") + klog.Warning("No authorization-kubeconfig provided, so SubjectAccessReview of authorization tokens won't work.") } else { cfg := authorizerfactory.DelegatingAuthorizerConfig{ SubjectAccessReviewClient: client.AuthorizationV1().SubjectAccessReviews(), diff --git a/staging/src/k8s.io/client-go/tools/clientcmd/client_config.go b/staging/src/k8s.io/client-go/tools/clientcmd/client_config.go index a50ce5e3979..0a905490c90 100644 --- a/staging/src/k8s.io/client-go/tools/clientcmd/client_config.go +++ b/staging/src/k8s.io/client-go/tools/clientcmd/client_config.go @@ -612,7 +612,7 @@ func (config *inClusterClientConfig) Possible() bool { // to the default config. func BuildConfigFromFlags(masterUrl, kubeconfigPath string) (*restclient.Config, error) { if kubeconfigPath == "" && masterUrl == "" { - klog.Warningf("Neither --kubeconfig nor --master was specified. Using the inClusterConfig. This might not work.") + klog.Warning("Neither --kubeconfig nor --master was specified. Using the inClusterConfig. This might not work.") kubeconfig, err := restclient.InClusterConfig() if err == nil { return kubeconfig, nil diff --git a/staging/src/k8s.io/client-go/transport/round_trippers.go b/staging/src/k8s.io/client-go/transport/round_trippers.go index 056bc023c55..56df8ead12c 100644 --- a/staging/src/k8s.io/client-go/transport/round_trippers.go +++ b/staging/src/k8s.io/client-go/transport/round_trippers.go @@ -426,7 +426,7 @@ func (rt *debuggingRoundTripper) RoundTrip(req *http.Request) (*http.Response, e } if rt.levels[debugRequestHeaders] { - klog.Infof("Request Headers:") + klog.Info("Request Headers:") for key, values := range reqInfo.RequestHeaders { for _, value := range values { value = maskValue(key, value) @@ -448,7 +448,7 @@ func (rt *debuggingRoundTripper) RoundTrip(req *http.Request) (*http.Response, e klog.Infof("Response Status: %s in %d milliseconds", reqInfo.ResponseStatus, reqInfo.Duration.Nanoseconds()/int64(time.Millisecond)) } if rt.levels[debugResponseHeaders] { - klog.Infof("Response Headers:") + klog.Info("Response Headers:") for key, values := range reqInfo.ResponseHeaders { for _, value := range values { klog.Infof(" %s: %s", key, value) diff --git a/staging/src/k8s.io/code-generator/cmd/conversion-gen/generators/conversion.go b/staging/src/k8s.io/code-generator/cmd/conversion-gen/generators/conversion.go index 01b140072de..04b97690de8 100644 --- a/staging/src/k8s.io/code-generator/cmd/conversion-gen/generators/conversion.go +++ b/staging/src/k8s.io/code-generator/cmd/conversion-gen/generators/conversion.go @@ -133,7 +133,7 @@ type conversionFuncMap map[conversionPair]*types.Type // Returns all manually-defined conversion functions in the package. func getManualConversionFunctions(context *generator.Context, pkg *types.Package, manualMap conversionFuncMap) { if pkg == nil { - klog.Warningf("Skipping nil package passed to getManualConversionFunctions") + klog.Warning("Skipping nil package passed to getManualConversionFunctions") return } klog.V(5).Infof("Scanning for conversion functions in %v", pkg.Name) @@ -641,7 +641,7 @@ func (g *genConversion) Init(c *generator.Context, w io.Writer) error { if klog.V(5).Enabled() { if m, ok := g.useUnsafe.(equalMemoryTypes); ok { var result []string - klog.Infof("All objects without identical memory layout:") + klog.Info("All objects without identical memory layout:") for k, v := range m { if v { continue diff --git a/staging/src/k8s.io/kube-aggregator/pkg/apiserver/apiservice_controller.go b/staging/src/k8s.io/kube-aggregator/pkg/apiserver/apiservice_controller.go index 506748954cc..52df3cb25fa 100644 --- a/staging/src/k8s.io/kube-aggregator/pkg/apiserver/apiservice_controller.go +++ b/staging/src/k8s.io/kube-aggregator/pkg/apiserver/apiservice_controller.go @@ -94,8 +94,8 @@ func (c *APIServiceRegistrationController) Run(stopCh <-chan struct{}, handlerSy defer utilruntime.HandleCrash() defer c.queue.ShutDown() - klog.Infof("Starting APIServiceRegistrationController") - defer klog.Infof("Shutting down APIServiceRegistrationController") + klog.Info("Starting APIServiceRegistrationController") + defer klog.Info("Shutting down APIServiceRegistrationController") if !controllers.WaitForCacheSync("APIServiceRegistrationController", stopCh, c.apiServiceSynced) { return diff --git a/staging/src/k8s.io/kube-aggregator/pkg/controllers/autoregister/autoregister_controller.go b/staging/src/k8s.io/kube-aggregator/pkg/controllers/autoregister/autoregister_controller.go index 5704656b645..6efad8da9ae 100644 --- a/staging/src/k8s.io/kube-aggregator/pkg/controllers/autoregister/autoregister_controller.go +++ b/staging/src/k8s.io/kube-aggregator/pkg/controllers/autoregister/autoregister_controller.go @@ -138,8 +138,8 @@ func (c *autoRegisterController) Run(threadiness int, stopCh <-chan struct{}) { // make sure the work queue is shutdown which will trigger workers to end defer c.queue.ShutDown() - klog.Infof("Starting autoregister controller") - defer klog.Infof("Shutting down autoregister controller") + klog.Info("Starting autoregister controller") + defer klog.Info("Shutting down autoregister controller") // wait for your secondary caches to fill before starting your work if !controllers.WaitForCacheSync("autoregister", stopCh, c.apiServiceSynced) { diff --git a/staging/src/k8s.io/kube-aggregator/pkg/controllers/openapi/controller.go b/staging/src/k8s.io/kube-aggregator/pkg/controllers/openapi/controller.go index e61a2735862..98e6ecc6dad 100644 --- a/staging/src/k8s.io/kube-aggregator/pkg/controllers/openapi/controller.go +++ b/staging/src/k8s.io/kube-aggregator/pkg/controllers/openapi/controller.go @@ -80,8 +80,8 @@ func (c *AggregationController) Run(stopCh <-chan struct{}) { defer utilruntime.HandleCrash() defer c.queue.ShutDown() - klog.Infof("Starting OpenAPI AggregationController") - defer klog.Infof("Shutting down OpenAPI AggregationController") + klog.Info("Starting OpenAPI AggregationController") + defer klog.Info("Shutting down OpenAPI AggregationController") go wait.Until(c.runWorker, time.Second, stopCh) diff --git a/staging/src/k8s.io/kube-aggregator/pkg/controllers/status/available_controller.go b/staging/src/k8s.io/kube-aggregator/pkg/controllers/status/available_controller.go index 5d91c85fa66..2b112b23dfb 100644 --- a/staging/src/k8s.io/kube-aggregator/pkg/controllers/status/available_controller.go +++ b/staging/src/k8s.io/kube-aggregator/pkg/controllers/status/available_controller.go @@ -472,8 +472,8 @@ func (c *AvailableConditionController) Run(threadiness int, stopCh <-chan struct defer utilruntime.HandleCrash() defer c.queue.ShutDown() - klog.Infof("Starting AvailableConditionController") - defer klog.Infof("Shutting down AvailableConditionController") + klog.Info("Starting AvailableConditionController") + defer klog.Info("Shutting down AvailableConditionController") if !controllers.WaitForCacheSync("AvailableConditionController", stopCh, c.apiServiceSynced, c.servicesSynced, c.endpointsSynced) { return diff --git a/staging/src/k8s.io/legacy-cloud-providers/gce/gce_instances.go b/staging/src/k8s.io/legacy-cloud-providers/gce/gce_instances.go index cf69332d251..027c45dddce 100644 --- a/staging/src/k8s.io/legacy-cloud-providers/gce/gce_instances.go +++ b/staging/src/k8s.io/legacy-cloud-providers/gce/gce_instances.go @@ -407,7 +407,7 @@ func (g *Cloud) AddSSHKeyToAllInstances(ctx context.Context, user string, keyDat // GetAllCurrentZones returns all the zones in which k8s nodes are currently running func (g *Cloud) GetAllCurrentZones() (sets.String, error) { if g.nodeInformerSynced == nil { - klog.Warningf("Cloud object does not have informers set, should only happen in E2E binary.") + klog.Warning("Cloud object does not have informers set, should only happen in E2E binary.") return g.GetAllZonesFromCloudProvider() } g.nodeZonesLock.Lock()