mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-30 15:05:27 +00:00
Merge pull request #108159 from pohly/logcheck
logcheck update and golangci-lint integration
This commit is contained in:
commit
d60df5ba04
@ -21,10 +21,17 @@ linters:
|
||||
# - structcheck
|
||||
# - varcheck
|
||||
- ineffassign
|
||||
- logcheck
|
||||
- staticcheck
|
||||
- unused
|
||||
|
||||
linters-settings: # please keep this alphabetized
|
||||
custom:
|
||||
logcheck:
|
||||
# Installed there by hack/verify-golangci-lint.sh.
|
||||
path: _output/local/bin/logcheck.so
|
||||
description: structured logging checker
|
||||
original-url: k8s.io/klog/hack/tools
|
||||
staticcheck:
|
||||
go: "1.17"
|
||||
checks: [
|
||||
|
@ -93,7 +93,7 @@ func LogOrWriteConfig(fileName string, cfg *config.KubeSchedulerConfiguration, c
|
||||
}
|
||||
|
||||
if klogV.Enabled() {
|
||||
klogV.Info("Using component config", "\n-------------------------Configuration File Contents Start Here---------------------- \n", buf.String(), "\n------------------------------------Configuration File Contents End Here---------------------------------\n")
|
||||
klogV.InfoS("Using component config", "config", buf.String())
|
||||
}
|
||||
|
||||
if len(fileName) > 0 {
|
||||
|
@ -149,11 +149,11 @@ func addUsersAndGroupsImpl(pathLoginDef, pathUsers, pathGroups string) (*UsersAn
|
||||
var loginDef string
|
||||
f, close, err := openFileWithLock(pathLoginDef)
|
||||
if err != nil {
|
||||
klog.V(1).Info("Could not open %q, using default system limits: %v", pathLoginDef, err)
|
||||
klog.V(1).Infof("Could not open %q, using default system limits: %v", pathLoginDef, err)
|
||||
} else {
|
||||
loginDef, err = readFile(f)
|
||||
if err != nil {
|
||||
klog.V(1).Info("Could not read %q, using default system limits: %v", pathLoginDef, err)
|
||||
klog.V(1).Infof("Could not read %q, using default system limits: %v", pathLoginDef, err)
|
||||
}
|
||||
close()
|
||||
}
|
||||
|
@ -1,15 +0,0 @@
|
||||
cmd/kube-proxy/...
|
||||
cmd/kube-scheduler/app/config/...
|
||||
cmd/kube-scheduler/app/testing/...
|
||||
cmd/kubelet/...
|
||||
pkg/kubelet/...
|
||||
pkg/proxy/...
|
||||
pkg/scheduler/apis/...
|
||||
pkg/scheduler/framework/...
|
||||
pkg/scheduler/internal/cache/fake/...
|
||||
pkg/scheduler/internal/heap/...
|
||||
pkg/scheduler/internal/queue/...
|
||||
pkg/scheduler/metrics/...
|
||||
pkg/scheduler/profile/...
|
||||
pkg/scheduler/testing/...
|
||||
pkg/scheduler/util/...
|
22
hack/logcheck.conf
Normal file
22
hack/logcheck.conf
Normal file
@ -0,0 +1,22 @@
|
||||
# This file contains regular expressions that are matched against <pkg>/<file>,
|
||||
# for example k8s.io/cmd/kube-scheduler/app/config/config.go.
|
||||
#
|
||||
# By default, structured logging call parameters are checked, but usage of
|
||||
# those calls is not required. That is changed on a per-file basis.
|
||||
#
|
||||
# Remember to clean the golangci-lint cache when changing the configuration and
|
||||
# running the verify-golangci-lint.sh script multiple times, otherwise
|
||||
# golangci-lint will report stale results:
|
||||
# _output/local/bin/golangci-lint cache clean
|
||||
|
||||
# At this point we don't enforce the usage structured logging calls except in
|
||||
# those packages that were migrated. This disables the check for other files.
|
||||
-structured .*
|
||||
|
||||
# Now enable it again for migrated packages.
|
||||
structured k8s.io/kubernetes/cmd/kube-proxy/.*
|
||||
structured k8s.io/kubernetes/cmd/kube-scheduler/.*
|
||||
structured k8s.io/kubernetes/cmd/kubelet/.*
|
||||
structured k8s.io/kubernetes/pkg/kubelet/.*
|
||||
structured k8s.io/kubernetes/pkg/proxy/.*
|
||||
structured k8s.io/kubernetes/pkg/scheduler/.*
|
@ -35,7 +35,6 @@ EXCLUDED_PATTERNS=(
|
||||
"verify-linkcheck.sh" # runs in separate Jenkins job once per day due to high network usage
|
||||
"verify-*-dockerized.sh" # Don't run any scripts that intended to be run dockerized
|
||||
"verify-govet-levee.sh" # Do not run levee analysis by default while KEP-1933 implementation is in alpha.
|
||||
"verify-structured-logging.sh" # TODO(dims) Need to get this running with golang 1.18
|
||||
)
|
||||
|
||||
# Exclude generated-files-remake in certain cases, if they're running in a separate job.
|
||||
|
@ -11,6 +11,6 @@ require (
|
||||
github.com/google/go-flow-levee v0.1.5
|
||||
gotest.tools/gotestsum v1.6.4
|
||||
honnef.co/go/tools v0.2.2
|
||||
k8s.io/klog/hack/tools v0.0.0-20210917071902-331d2323a192
|
||||
k8s.io/klog/hack/tools v0.0.0-20220321210246-c697110cd8ac
|
||||
sigs.k8s.io/zeitgeist v0.2.0
|
||||
)
|
||||
|
@ -1521,8 +1521,8 @@ honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9
|
||||
honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
|
||||
honnef.co/go/tools v0.2.2 h1:MNh1AVMyVX23VUHE2O27jm6lNj3vjO5DexS4A1xvnzk=
|
||||
honnef.co/go/tools v0.2.2/go.mod h1:lPVVZ2BS5TfnjLyizF7o7hv7j9/L+8cZY2hLyjP9cGY=
|
||||
k8s.io/klog/hack/tools v0.0.0-20210917071902-331d2323a192 h1:u27Xm1of9MTDM1CZW3hg0Vv04ohywEG152G8mpr9n8Y=
|
||||
k8s.io/klog/hack/tools v0.0.0-20210917071902-331d2323a192/go.mod h1:DXW3Mv8xqJvjXWiBSBHrK2O4mq5LMD0clqkv3b1g9HA=
|
||||
k8s.io/klog/hack/tools v0.0.0-20220321210246-c697110cd8ac h1:c2NQfDLtcwrPdD9pnUcRPlMJ719zgRzdlufHULIW7mU=
|
||||
k8s.io/klog/hack/tools v0.0.0-20220321210246-c697110cd8ac/go.mod h1:DXW3Mv8xqJvjXWiBSBHrK2O4mq5LMD0clqkv3b1g9HA=
|
||||
k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE=
|
||||
k8s.io/utils v0.0.0-20210802155522-efc7438f0176/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
|
||||
mvdan.cc/gofumpt v0.3.0 h1:kTojdZo9AcEYbQYhGuLf/zszYthRdhDNDUi2JKTxas4=
|
||||
|
@ -36,14 +36,25 @@ PATH="${GOBIN}:${PATH}"
|
||||
export GO111MODULE=on
|
||||
|
||||
# Install golangci-lint
|
||||
echo 'installing golangci-lint '
|
||||
echo "installing golangci-lint and logcheck plugin from hack/tools into ${GOBIN}"
|
||||
pushd "${KUBE_ROOT}/hack/tools" >/dev/null
|
||||
go install github.com/golangci/golangci-lint/cmd/golangci-lint
|
||||
go build -o "${GOBIN}/logcheck.so" -buildmode=plugin k8s.io/klog/hack/tools/logcheck/plugin
|
||||
popd >/dev/null
|
||||
|
||||
cd "${KUBE_ROOT}"
|
||||
|
||||
# The config is in ${KUBE_ROOT}/.golangci.yaml
|
||||
# The config is in ${KUBE_ROOT}/.golangci.yaml where it will be found
|
||||
# even when golangci-lint is invoked in a sub-directory.
|
||||
#
|
||||
# The logcheck plugin currently has to be configured via env variables
|
||||
# (https://github.com/golangci/golangci-lint/issues/1512).
|
||||
#
|
||||
# Remember to clean the golangci-lint cache when changing
|
||||
# the configuration and running this script multiple times,
|
||||
# otherwise golangci-lint will report stale results:
|
||||
# _output/local/bin/golangci-lint cache clean
|
||||
export LOGCHECK_CONFIG="${KUBE_ROOT}/hack/logcheck.conf"
|
||||
echo 'running golangci-lint ' >&2
|
||||
res=0
|
||||
if [[ "$#" -gt 0 ]]; then
|
||||
|
@ -1,97 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Copyright 2021 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# This script is used to avoid regressions after a package is migrated
|
||||
# to structured logging. once a package is completely migrated add
|
||||
# it .structured_logging file to avoid any future regressions.
|
||||
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
KUBE_ROOT=$(dirname "${BASH_SOURCE[0]}")/..
|
||||
source "${KUBE_ROOT}/hack/lib/init.sh"
|
||||
source "${KUBE_ROOT}/hack/lib/util.sh"
|
||||
|
||||
kube::golang::verify_go_version
|
||||
|
||||
# Ensure that we find the binaries we build before anything else.
|
||||
export GOBIN="${KUBE_OUTPUT_BINPATH}"
|
||||
PATH="${GOBIN}:${PATH}"
|
||||
|
||||
# Install logcheck
|
||||
pushd "${KUBE_ROOT}/hack/tools" >/dev/null
|
||||
GO111MODULE=on go install k8s.io/klog/hack/tools/logcheck
|
||||
popd >/dev/null
|
||||
|
||||
# We store packages that are migrated in .structured_logging
|
||||
migrated_packages_file="${KUBE_ROOT}/hack/.structured_logging"
|
||||
|
||||
# Ensure that file is sorted.
|
||||
kube::util::check-file-in-alphabetical-order "${migrated_packages_file}"
|
||||
|
||||
migrated_packages=()
|
||||
while IFS='' read -r line; do
|
||||
migrated_packages+=("$KUBE_ROOT/$line")
|
||||
done < <(cat "${migrated_packages_file}")
|
||||
|
||||
echo "Running structured logging static check on migrated packages"
|
||||
ret=0
|
||||
GOOS=linux logcheck "${migrated_packages[@]}" || ret=$?
|
||||
GOOS=windows logcheck "${migrated_packages[@]}" || ret=$?
|
||||
|
||||
if [ $ret -eq 0 ]; then
|
||||
echo "Structured logging static check passed on migrated packages :)"
|
||||
else
|
||||
echo "Please fix above failures. You can locally test via:"
|
||||
echo "hack/verify-structured-logging.sh"
|
||||
fi
|
||||
|
||||
# Ignore migrated packages as they are already tested
|
||||
# Trim the trailing /... from given packages
|
||||
ignore_packages=$(grep -oE '^[a-zA-Z0-9/]+[^/\.]' < "${KUBE_ROOT}"/hack/.structured_logging)
|
||||
|
||||
all_packages=()
|
||||
# shellcheck disable=SC2010
|
||||
for i in $(ls -d ./*/ | grep -v 'staging' | grep -v 'vendor' | grep -v 'hack')
|
||||
do
|
||||
all_packages+=("$(go list ./"$i"/... 2> /dev/null | sed 's/k8s.io\/kubernetes\///g')")
|
||||
all_packages+=(" ")
|
||||
done
|
||||
# We exclude vendor/ except vendor/k8s.io
|
||||
# This is because vendor/k8s.io is symlinked to staging
|
||||
# and needs to be checked
|
||||
all_packages+=("vendor/k8s.io")
|
||||
|
||||
# Packages to test = all_packages - ignored_packages
|
||||
packages=()
|
||||
while IFS='' read -r line; do
|
||||
if [ -z "$line" ]; then continue; fi
|
||||
packages+=("$KUBE_ROOT/$line/...")
|
||||
done < <(echo "${all_packages[@]}" | tr " " "\n" | grep -v "$ignore_packages")
|
||||
|
||||
echo -e "\nRunning structured logging static check on all other packages"
|
||||
GOOS=linux logcheck -allow-unstructured "${packages[@]}" || ret=$?
|
||||
GOOS=windows logcheck -allow-unstructured "${packages[@]}" || ret=$?
|
||||
|
||||
if [ $ret -eq 0 ]; then
|
||||
echo "Structured logging static check passed on all packages :)"
|
||||
else
|
||||
echo "Please fix above failures. You can locally test via:"
|
||||
echo "hack/verify-structured-logging.sh"
|
||||
fi
|
||||
|
||||
exit $ret
|
@ -406,6 +406,9 @@ func (g *GenericPLEG) updateCache(pod *kubecontainer.Pod, pid types.UID) error {
|
||||
// all containers again.
|
||||
status, err := g.runtime.GetPodStatus(pod.ID, pod.Name, pod.Namespace)
|
||||
if err != nil {
|
||||
// nolint:logcheck // Not using the result of klog.V inside the
|
||||
// if branch is okay, we just use it to determine whether the
|
||||
// additional "podStatus" key and its value should be added.
|
||||
if klog.V(6).Enabled() {
|
||||
klog.ErrorS(err, "PLEG: Write status", "pod", klog.KRef(pod.Namespace, pod.Name), "podStatus", status)
|
||||
} else {
|
||||
|
@ -399,7 +399,7 @@ func (i *scaleUpdatedObjectInfo) UpdatedObject(ctx context.Context, oldObj runti
|
||||
if _, ok := replicasPathInDeployment[requestGroupVersion.String()]; ok {
|
||||
groupVersion = requestGroupVersion
|
||||
} else {
|
||||
klog.Fatal("Unrecognized group/version in request info %q", requestGroupVersion.String())
|
||||
klog.Fatalf("Unrecognized group/version in request info %q", requestGroupVersion.String())
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -300,7 +300,7 @@ func (i *scaleUpdatedObjectInfo) UpdatedObject(ctx context.Context, oldObj runti
|
||||
if _, ok := replicasPathInReplicaSet[requestGroupVersion.String()]; ok {
|
||||
groupVersion = requestGroupVersion
|
||||
} else {
|
||||
klog.Fatal("Unrecognized group/version in request info %q", requestGroupVersion.String())
|
||||
klog.Fatalf("Unrecognized group/version in request info %q", requestGroupVersion.String())
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -294,7 +294,7 @@ func (i *scaleUpdatedObjectInfo) UpdatedObject(ctx context.Context, oldObj runti
|
||||
if _, ok := replicasPathInStatefulSet[requestGroupVersion.String()]; ok {
|
||||
groupVersion = requestGroupVersion
|
||||
} else {
|
||||
klog.Fatal("Unrecognized group/version in request info %q", requestGroupVersion.String())
|
||||
klog.Fatalf("Unrecognized group/version in request info %q", requestGroupVersion.String())
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -266,7 +266,7 @@ func (i *scaleUpdatedObjectInfo) UpdatedObject(ctx context.Context, oldObj runti
|
||||
if _, ok := replicasPathInReplicationController[requestGroupVersion.String()]; ok {
|
||||
groupVersion = requestGroupVersion
|
||||
} else {
|
||||
klog.Fatal("Unrecognized group/version in request info %q", requestGroupVersion.String())
|
||||
klog.Fatalf("Unrecognized group/version in request info %q", requestGroupVersion.String())
|
||||
}
|
||||
}
|
||||
|
||||
|
10
pkg/scheduler/internal/cache/debugger/dumper.go
vendored
10
pkg/scheduler/internal/cache/debugger/dumper.go
vendored
@ -44,10 +44,12 @@ func (d *CacheDumper) DumpAll() {
|
||||
// dumpNodes writes NodeInfo to the scheduler logs.
|
||||
func (d *CacheDumper) dumpNodes() {
|
||||
dump := d.cache.Dump()
|
||||
klog.InfoS("Dump of cached NodeInfo")
|
||||
nodeInfos := make([]string, 0, len(dump.Nodes))
|
||||
for name, nodeInfo := range dump.Nodes {
|
||||
klog.Info(d.printNodeInfo(name, nodeInfo))
|
||||
nodeInfos = append(nodeInfos, d.printNodeInfo(name, nodeInfo))
|
||||
}
|
||||
// Extra blank line added between node entries for readability.
|
||||
klog.InfoS("Dump of cached NodeInfo", "nodes", strings.Join(nodeInfos, "\n\n"))
|
||||
}
|
||||
|
||||
// dumpSchedulingQueue writes pods in the scheduling queue to the scheduler logs.
|
||||
@ -57,13 +59,13 @@ func (d *CacheDumper) dumpSchedulingQueue() {
|
||||
for _, p := range pendingPods {
|
||||
podData.WriteString(printPod(p))
|
||||
}
|
||||
klog.Infof("Dump of scheduling queue:\n%s", podData.String())
|
||||
klog.InfoS("Dump of scheduling queue", "pods", podData.String())
|
||||
}
|
||||
|
||||
// printNodeInfo writes parts of NodeInfo to a string.
|
||||
func (d *CacheDumper) printNodeInfo(name string, n *framework.NodeInfo) string {
|
||||
var nodeData strings.Builder
|
||||
nodeData.WriteString(fmt.Sprintf("\nNode name: %s\nDeleted: %t\nRequested Resources: %+v\nAllocatable Resources:%+v\nScheduled Pods(number: %v):\n",
|
||||
nodeData.WriteString(fmt.Sprintf("Node name: %s\nDeleted: %t\nRequested Resources: %+v\nAllocatable Resources:%+v\nScheduled Pods(number: %v):\n",
|
||||
name, n.Node() == nil, n.Requested, n.Allocatable, len(n.Pods)))
|
||||
// Dumping Pod Info
|
||||
for _, p := range n.Pods {
|
||||
|
@ -971,10 +971,10 @@ func ignoreExitCodes(err error, ignoredExitCodes ...int) error {
|
||||
func execWithLog(b iscsiDiskMounter, cmd string, args ...string) (string, error) {
|
||||
start := time.Now()
|
||||
out, err := b.exec.Command(cmd, args...).CombinedOutput()
|
||||
if klog.V(5).Enabled() {
|
||||
if klogV := klog.V(5); klogV.Enabled() {
|
||||
d := time.Since(start)
|
||||
klog.V(5).Infof("Executed %s %v in %v, err: %v", cmd, args, d, err)
|
||||
klog.V(5).Infof("Output: %s", string(out))
|
||||
klogV.Infof("Executed %s %v in %v, err: %v", cmd, args, d, err)
|
||||
klogV.Infof("Output: %s", string(out))
|
||||
}
|
||||
return string(out), err
|
||||
}
|
||||
|
@ -82,7 +82,7 @@ func (r *RBACAuthorizer) Authorize(ctx context.Context, requestAttributes author
|
||||
|
||||
// Build a detailed log of the denial.
|
||||
// Make the whole block conditional so we don't do a lot of string-building we won't use.
|
||||
if klog.V(5).Enabled() {
|
||||
if klogV := klog.V(5); klogV.Enabled() {
|
||||
var operation string
|
||||
if requestAttributes.IsResourceRequest() {
|
||||
b := &bytes.Buffer{}
|
||||
@ -116,7 +116,7 @@ func (r *RBACAuthorizer) Authorize(ctx context.Context, requestAttributes author
|
||||
scope = "cluster-wide"
|
||||
}
|
||||
|
||||
klog.Infof("RBAC: no rules authorize user %q with groups %q to %s %s", requestAttributes.GetUser().GetName(), requestAttributes.GetUser().GetGroups(), operation, scope)
|
||||
klogV.Infof("RBAC: no rules authorize user %q with groups %q to %s %s", requestAttributes.GetUser().GetName(), requestAttributes.GetUser().GetGroups(), operation, scope)
|
||||
}
|
||||
|
||||
reason := ""
|
||||
|
@ -49,8 +49,9 @@ func StorageWithCacher() generic.StorageDecorator {
|
||||
if err != nil {
|
||||
return s, d, err
|
||||
}
|
||||
if klog.V(5).Enabled() {
|
||||
klog.V(5).InfoS("Storage caching is enabled", objectTypeToArgs(newFunc())...)
|
||||
if klogV := klog.V(5); klogV.Enabled() {
|
||||
//nolint:logcheck // It complains about the key/value pairs because it cannot check them.
|
||||
klogV.InfoS("Storage caching is enabled", objectTypeToArgs(newFunc())...)
|
||||
}
|
||||
|
||||
cacherConfig := cacherstorage.Config{
|
||||
|
@ -125,6 +125,10 @@ func WithPriorityAndFairness(
|
||||
workEstimate := workEstimator(r, classification.FlowSchemaName, classification.PriorityLevelName)
|
||||
|
||||
fcmetrics.ObserveWorkEstimatedSeats(classification.PriorityLevelName, classification.FlowSchemaName, workEstimate.MaxSeats())
|
||||
// nolint:logcheck // Not using the result of klog.V
|
||||
// inside the if branch is okay, we just use it to
|
||||
// determine whether the additional information should
|
||||
// be added.
|
||||
if klog.V(4).Enabled() {
|
||||
httplog.AddKeyValue(ctx, "apf_iseats", workEstimate.InitialSeats)
|
||||
httplog.AddKeyValue(ctx, "apf_fseats", workEstimate.FinalSeats)
|
||||
|
@ -32,19 +32,19 @@ type klogWrapper struct{}
|
||||
const klogWrapperDepth = 4
|
||||
|
||||
func (klogWrapper) Info(args ...interface{}) {
|
||||
if klog.V(5).Enabled() {
|
||||
klog.V(5).InfoSDepth(klogWrapperDepth, fmt.Sprint(args...))
|
||||
if klogV := klog.V(5); klogV.Enabled() {
|
||||
klogV.InfoSDepth(klogWrapperDepth, fmt.Sprint(args...))
|
||||
}
|
||||
}
|
||||
|
||||
func (klogWrapper) Infoln(args ...interface{}) {
|
||||
if klog.V(5).Enabled() {
|
||||
klog.V(5).InfoSDepth(klogWrapperDepth, fmt.Sprintln(args...))
|
||||
if klogV := klog.V(5); klogV.Enabled() {
|
||||
klogV.InfoSDepth(klogWrapperDepth, fmt.Sprintln(args...))
|
||||
}
|
||||
}
|
||||
|
||||
func (klogWrapper) Infof(format string, args ...interface{}) {
|
||||
if klog.V(5).Enabled() {
|
||||
if klogV := klog.V(5); klogV.Enabled() {
|
||||
klog.V(5).InfoSDepth(klogWrapperDepth, fmt.Sprintf(format, args...))
|
||||
}
|
||||
}
|
||||
|
@ -434,8 +434,8 @@ func (cfgCtlr *configController) digestConfigObjects(newPLs []*flowcontrol.Prior
|
||||
|
||||
// if we are going to issue an update, be sure we track every name we update so we know if we update it too often.
|
||||
currResult.updatedItems.Insert(fsu.flowSchema.Name)
|
||||
if klog.V(4).Enabled() {
|
||||
klog.V(4).Infof("%s writing Condition %s to FlowSchema %s, which had ResourceVersion=%s, because its previous value was %s, diff: %s",
|
||||
if klogV := klog.V(4); klogV.Enabled() {
|
||||
klogV.Infof("%s writing Condition %s to FlowSchema %s, which had ResourceVersion=%s, because its previous value was %s, diff: %s",
|
||||
cfgCtlr.name, fsu.condition, fsu.flowSchema.Name, fsu.flowSchema.ResourceVersion, fcfmt.Fmt(fsu.oldValue), cmp.Diff(fsu.oldValue, fsu.condition))
|
||||
}
|
||||
fsIfc := cfgCtlr.flowcontrolClient.FlowSchemas()
|
||||
|
@ -573,9 +573,9 @@ func (qs *queueSet) shuffleShardLocked(hashValue uint64, descr1, descr2 interfac
|
||||
bestQueueIdx = queueIdx
|
||||
}
|
||||
}
|
||||
if klog.V(6).Enabled() {
|
||||
if klogV := klog.V(6); klogV.Enabled() {
|
||||
chosenQueue := qs.queues[bestQueueIdx]
|
||||
klog.V(6).Infof("QS(%s) at t=%s R=%v: For request %#+v %#+v chose queue %d, with sum: %#v & %d seats in use & nextDispatchR=%v", qs.qCfg.Name, qs.clock.Now().Format(nsTimeFmt), qs.currentR, descr1, descr2, bestQueueIdx, chosenQueue.requests.QueueSum(), chosenQueue.seatsInUse, chosenQueue.nextDispatchR)
|
||||
klogV.Infof("QS(%s) at t=%s R=%v: For request %#+v %#+v chose queue %d, with sum: %#v & %d seats in use & nextDispatchR=%v", qs.qCfg.Name, qs.clock.Now().Format(nsTimeFmt), qs.currentR, descr1, descr2, bestQueueIdx, chosenQueue.requests.QueueSum(), chosenQueue.seatsInUse, chosenQueue.nextDispatchR)
|
||||
}
|
||||
return bestQueueIdx
|
||||
}
|
||||
|
@ -1051,13 +1051,13 @@ func truncateBody(body string) string {
|
||||
// allocating a new string for the body output unless necessary. Uses a simple heuristic to determine
|
||||
// whether the body is printable.
|
||||
func glogBody(prefix string, body []byte) {
|
||||
if klog.V(8).Enabled() {
|
||||
if klogV := klog.V(8); klogV.Enabled() {
|
||||
if bytes.IndexFunc(body, func(r rune) bool {
|
||||
return r < 0x0a
|
||||
}) != -1 {
|
||||
klog.Infof("%s:\n%s", prefix, truncateBody(hex.Dump(body)))
|
||||
klogV.Infof("%s:\n%s", prefix, truncateBody(hex.Dump(body)))
|
||||
} else {
|
||||
klog.Infof("%s: %s", prefix, truncateBody(string(body)))
|
||||
klogV.Infof("%s: %s", prefix, truncateBody(string(body)))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -65,6 +65,7 @@ func TestZapLoggerInfo(t *testing.T) {
|
||||
var buffer bytes.Buffer
|
||||
writer := zapcore.AddSync(&buffer)
|
||||
sampleInfoLogger, _ := NewJSONLogger(writer, nil, nil)
|
||||
// nolint:logcheck // The linter cannot and doesn't need to check the key/value pairs.
|
||||
sampleInfoLogger.Info(data.msg, data.keysValues...)
|
||||
logStr := buffer.String()
|
||||
|
||||
|
@ -93,6 +93,8 @@ func DefaultBehaviorOnFatal() {
|
||||
// klog.Fatal is invoked for extended information. This is intended for maintainer
|
||||
// debugging and out of a reasonable range for users.
|
||||
func fatal(msg string, code int) {
|
||||
// nolint:logcheck // Not using the result of klog.V(99) inside the if
|
||||
// branch is okay, we just use it to determine how to terminate.
|
||||
if klog.V(99).Enabled() {
|
||||
klog.FatalDepth(2, msg)
|
||||
}
|
||||
|
@ -31,7 +31,7 @@ import (
|
||||
"github.com/GoogleCloudPlatform/k8s-cloud-provider/pkg/cloud/meta"
|
||||
"github.com/google/go-cmp/cmp"
|
||||
compute "google.golang.org/api/compute/v1"
|
||||
"k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
cloudprovider "k8s.io/cloud-provider"
|
||||
@ -202,9 +202,9 @@ func (g *Cloud) ensureInternalLoadBalancer(clusterName, clusterID string, svc *v
|
||||
// Delete existing forwarding rule before making changes to the backend service. For example - changing protocol
|
||||
// of backend service without first deleting forwarding rule will throw an error since the linked forwarding
|
||||
// rule would show the old protocol.
|
||||
if klog.V(2).Enabled() {
|
||||
if klogV := klog.V(2); klogV.Enabled() {
|
||||
frDiff := cmp.Diff(existingFwdRule, newFwdRule)
|
||||
klog.V(2).Infof("ensureInternalLoadBalancer(%v): forwarding rule changed - Existing - %+v\n, New - %+v\n, Diff(-existing, +new) - %s\n. Deleting existing forwarding rule.", loadBalancerName, existingFwdRule, newFwdRule, frDiff)
|
||||
klogV.Infof("ensureInternalLoadBalancer(%v): forwarding rule changed - Existing - %+v\n, New - %+v\n, Diff(-existing, +new) - %s\n. Deleting existing forwarding rule.", loadBalancerName, existingFwdRule, newFwdRule, frDiff)
|
||||
}
|
||||
if err = ignoreNotFound(g.DeleteRegionForwardingRule(loadBalancerName, g.region)); err != nil {
|
||||
return nil, err
|
||||
|
Loading…
Reference in New Issue
Block a user