Merge pull request #106978 from pohly/log-message-verbosity

log message verbosity
This commit is contained in:
Kubernetes Prow Robot 2022-01-12 07:24:36 -08:00 committed by GitHub
commit 46c072d9d9
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
26 changed files with 184 additions and 130 deletions

View File

@ -82,7 +82,8 @@ func encodeConfig(cfg *config.KubeSchedulerConfiguration) (*bytes.Buffer, error)
// LogOrWriteConfig logs the completed component config and writes it into the given file name as YAML, if either is enabled
func LogOrWriteConfig(fileName string, cfg *config.KubeSchedulerConfiguration, completedProfiles []config.KubeSchedulerProfile) error {
if !(klog.V(2).Enabled() || len(fileName) > 0) {
klogV := klog.V(2)
if !klogV.Enabled() && len(fileName) == 0 {
return nil
}
cfg.Profiles = completedProfiles
@ -92,8 +93,8 @@ func LogOrWriteConfig(fileName string, cfg *config.KubeSchedulerConfiguration, c
return err
}
if klog.V(2).Enabled() {
klog.Info("Using component config", "\n-------------------------Configuration File Contents Start Here---------------------- \n", buf.String(), "\n------------------------------------Configuration File Contents End Here---------------------------------\n")
if klogV.Enabled() {
klogV.Info("Using component config", "\n-------------------------Configuration File Contents Start Here---------------------- \n", buf.String(), "\n------------------------------------Configuration File Contents End Here---------------------------------\n")
}
if len(fileName) > 0 {

1
go.mod
View File

@ -36,6 +36,7 @@ require (
github.com/emicklei/go-restful v2.9.5+incompatible
github.com/evanphx/json-patch v4.12.0+incompatible
github.com/fsnotify/fsnotify v1.4.9
github.com/go-logr/logr v1.2.0
github.com/go-ozzo/ozzo-validation v3.5.0+incompatible // indirect
github.com/godbus/dbus/v5 v5.0.4
github.com/gogo/protobuf v1.3.2

View File

@ -1042,8 +1042,8 @@ func (nc *Controller) tryUpdateNodeHealth(ctx context.Context, node *v1.Node) (t
} else {
transitionTime = nodeHealth.readyTransitionTimestamp
}
if klog.V(5).Enabled() {
klog.Infof("Node %s ReadyCondition updated. Updating timestamp: %+v vs %+v.", node.Name, nodeHealth.status, node.Status)
if klogV := klog.V(5); klogV.Enabled() {
klogV.Infof("Node %s ReadyCondition updated. Updating timestamp: %+v vs %+v.", node.Name, nodeHealth.status, node.Status)
} else {
klog.V(3).Infof("Node %s ReadyCondition updated. Updating timestamp.", node.Name)
}

View File

@ -37,7 +37,7 @@ import (
"time"
apps "k8s.io/api/apps/v1"
"k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
@ -219,12 +219,10 @@ func (rsc *ReplicaSetController) getReplicaSetsWithSameController(rs *apps.Repli
}
}
if klog.V(2).Enabled() {
var relatedNames []string
for _, r := range relatedRSs {
relatedNames = append(relatedNames, r.Name)
}
klog.InfoS("Found related ReplicaSets", "replicaSet", klog.KObj(rs), "relatedReplicaSets", relatedNames)
// The if check is used to avoid the overhead for the KObjs call, see
// https://github.com/kubernetes/kubernetes/issues/106945.
if klogV := klog.V(2); klogV.Enabled() {
klogV.InfoS("Found related ReplicaSets", "replicaSet", klog.KObj(rs), "relatedReplicaSets", klog.KObjs(relatedRSs))
}
return relatedRSs
@ -791,13 +789,7 @@ func (rsc *ReplicaSetController) getIndirectlyRelatedPods(rs *apps.ReplicaSet) (
relatedPods = append(relatedPods, pod)
}
}
if klog.V(4).Enabled() {
var relatedNames []string
for _, related := range relatedPods {
relatedNames = append(relatedNames, related.Name)
}
klog.Infof("Found %d related pods for %v %s/%s: %v", len(relatedPods), rsc.Kind, rs.Namespace, rs.Name, strings.Join(relatedNames, ", "))
}
klog.V(4).InfoS("Found related pods", "kind", rsc.Kind, "object", klog.KObj(rs), "pods", klog.KObjs(relatedPods))
return relatedPods, nil
}

View File

@ -25,7 +25,7 @@ import (
"k8s.io/klog/v2"
"k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
apiequality "k8s.io/apimachinery/pkg/api/equality"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@ -435,8 +435,8 @@ func (rq *Controller) Sync(discoveryFunc NamespacedResourcesFunc, period time.Du
defer rq.workerLock.Unlock()
// Something has changed, so track the new state and perform a sync.
if klog.V(2).Enabled() {
klog.Infof("syncing resource quota controller with updated resources from discovery: %s", printDiff(oldResources, newResources))
if klogV := klog.V(2); klogV.Enabled() {
klogV.Infof("syncing resource quota controller with updated resources from discovery: %s", printDiff(oldResources, newResources))
}
// Perform the monitor resync and wait for controllers to report cache sync.

View File

@ -171,9 +171,7 @@ func (rc *reconciler) reconcile() {
// See https://github.com/kubernetes/kubernetes/issues/93902
attachState := rc.actualStateOfWorld.GetAttachState(attachedVolume.VolumeName, attachedVolume.NodeName)
if attachState == cache.AttachStateDetached {
if klog.V(5).Enabled() {
klog.Infof(attachedVolume.GenerateMsgDetailed("Volume detached--skipping", ""))
}
klog.V(5).InfoS("Volume detached--skipping", "volume", attachedVolume)
continue
}
@ -187,7 +185,7 @@ func (rc *reconciler) reconcile() {
timeout := elapsedTime > rc.maxWaitForUnmountDuration
// Check whether volume is still mounted. Skip detach if it is still mounted unless timeout
if attachedVolume.MountedByNode && !timeout {
klog.V(5).Infof(attachedVolume.GenerateMsgDetailed("Cannot detach volume because it is still mounted", ""))
klog.V(5).InfoS("Cannot detach volume because it is still mounted", "volume", attachedVolume)
continue
}
@ -207,21 +205,21 @@ func (rc *reconciler) reconcile() {
err = rc.nodeStatusUpdater.UpdateNodeStatuses()
if err != nil {
// Skip detaching this volume if unable to update node status
klog.Errorf(attachedVolume.GenerateErrorDetailed("UpdateNodeStatuses failed while attempting to report volume as attached", err).Error())
klog.ErrorS(err, "UpdateNodeStatuses failed while attempting to report volume as attached", "volume", attachedVolume)
continue
}
// Trigger detach volume which requires verifying safe to detach step
// If timeout is true, skip verifySafeToDetach check
klog.V(5).Infof(attachedVolume.GenerateMsgDetailed("Starting attacherDetacher.DetachVolume", ""))
klog.V(5).InfoS("Starting attacherDetacher.DetachVolume", "volume", attachedVolume)
verifySafeToDetach := !timeout
err = rc.attacherDetacher.DetachVolume(attachedVolume.AttachedVolume, verifySafeToDetach, rc.actualStateOfWorld)
if err == nil {
if !timeout {
klog.Infof(attachedVolume.GenerateMsgDetailed("attacherDetacher.DetachVolume started", ""))
klog.InfoS("attacherDetacher.DetachVolume started", "volume", attachedVolume)
} else {
metrics.RecordForcedDetachMetric()
klog.Warningf(attachedVolume.GenerateMsgDetailed("attacherDetacher.DetachVolume started", fmt.Sprintf("This volume is not safe to detach, but maxWaitForUnmountDuration %v expired, force detaching", rc.maxWaitForUnmountDuration)))
klog.InfoS("attacherDetacher.DetachVolume started: this volume is not safe to detach, but maxWaitForUnmountDuration expired, force detaching", "duration", rc.maxWaitForUnmountDuration, "volume", attachedVolume)
}
}
if err != nil {
@ -233,7 +231,7 @@ func (rc *reconciler) reconcile() {
if !exponentialbackoff.IsExponentialBackoff(err) {
// Ignore exponentialbackoff.IsExponentialBackoff errors, they are expected.
// Log all other errors.
klog.Errorf(attachedVolume.GenerateErrorDetailed("attacherDetacher.DetachVolume failed to start", err).Error())
klog.ErrorS(err, "attacherDetacher.DetachVolume failed to start", "volume", attachedVolume)
}
}
}
@ -254,17 +252,13 @@ func (rc *reconciler) attachDesiredVolumes() {
if util.IsMultiAttachAllowed(volumeToAttach.VolumeSpec) {
// Don't even try to start an operation if there is already one running for the given volume and node.
if rc.attacherDetacher.IsOperationPending(volumeToAttach.VolumeName, "" /* podName */, volumeToAttach.NodeName) {
if klog.V(10).Enabled() {
klog.Infof("Operation for volume %q is already running for node %q. Can't start attach", volumeToAttach.VolumeName, volumeToAttach.NodeName)
}
klog.V(10).Infof("Operation for volume %q is already running for node %q. Can't start attach", volumeToAttach.VolumeName, volumeToAttach.NodeName)
continue
}
} else {
// Don't even try to start an operation if there is already one running for the given volume
if rc.attacherDetacher.IsOperationPending(volumeToAttach.VolumeName, "" /* podName */, "" /* nodeName */) {
if klog.V(10).Enabled() {
klog.Infof("Operation for volume %q is already running. Can't start attach for %q", volumeToAttach.VolumeName, volumeToAttach.NodeName)
}
klog.V(10).Infof("Operation for volume %q is already running. Can't start attach for %q", volumeToAttach.VolumeName, volumeToAttach.NodeName)
continue
}
}
@ -277,9 +271,7 @@ func (rc *reconciler) attachDesiredVolumes() {
attachState := rc.actualStateOfWorld.GetAttachState(volumeToAttach.VolumeName, volumeToAttach.NodeName)
if attachState == cache.AttachStateAttached {
// Volume/Node exists, touch it to reset detachRequestedTime
if klog.V(5).Enabled() {
klog.Infof(volumeToAttach.GenerateMsgDetailed("Volume attached--touching", ""))
}
klog.V(5).InfoS("Volume attached--touching", "volume", volumeToAttach)
rc.actualStateOfWorld.ResetDetachRequestTime(volumeToAttach.VolumeName, volumeToAttach.NodeName)
continue
}
@ -296,17 +288,15 @@ func (rc *reconciler) attachDesiredVolumes() {
}
// Volume/Node doesn't exist, spawn a goroutine to attach it
if klog.V(5).Enabled() {
klog.Infof(volumeToAttach.GenerateMsgDetailed("Starting attacherDetacher.AttachVolume", ""))
}
klog.V(5).InfoS("Starting attacherDetacher.AttachVolume", "volume", volumeToAttach)
err := rc.attacherDetacher.AttachVolume(volumeToAttach.VolumeToAttach, rc.actualStateOfWorld)
if err == nil {
klog.Infof(volumeToAttach.GenerateMsgDetailed("attacherDetacher.AttachVolume started", ""))
klog.InfoS("attacherDetacher.AttachVolume started", "volume", volumeToAttach)
}
if err != nil && !exponentialbackoff.IsExponentialBackoff(err) {
// Ignore exponentialbackoff.IsExponentialBackoff errors, they are expected.
// Log all other errors.
klog.Errorf(volumeToAttach.GenerateErrorDetailed("attacherDetacher.AttachVolume failed to start", err).Error())
klog.ErrorS(err, "attacherDetacher.AttachVolume failed to start", "volume", volumeToAttach)
}
}
}
@ -339,9 +329,7 @@ func (rc *reconciler) reportMultiAttachError(volumeToAttach cache.VolumeToAttach
rc.recorder.Eventf(pod, v1.EventTypeWarning, kevents.FailedAttachVolume, simpleMsg)
}
// Log detailed message to system admin
nodeList := strings.Join(otherNodesStr, ", ")
detailedMsg := volumeToAttach.GenerateMsgDetailed("Multi-Attach error", fmt.Sprintf("Volume is already exclusively attached to node %s and can't be attached to another", nodeList))
klog.Warning(detailedMsg)
klog.InfoS("Multi-Attach error: volume is already exclusively attached and can't be attached to another node", "attachedTo", otherNodesStr, "volume", volumeToAttach)
return
}
@ -377,10 +365,5 @@ func (rc *reconciler) reportMultiAttachError(volumeToAttach cache.VolumeToAttach
}
// Log all pods for system admin
podNames := []string{}
for _, pod := range pods {
podNames = append(podNames, pod.Namespace+"/"+pod.Name)
}
detailedMsg := volumeToAttach.GenerateMsgDetailed("Multi-Attach error", fmt.Sprintf("Volume is already used by pods %s on node %s", strings.Join(podNames, ", "), strings.Join(otherNodesStr, ", ")))
klog.Warningf(detailedMsg)
klog.InfoS("Multi-Attach error: volume is already used by pods", "pods", klog.KObjs(pods), "attachedTo", otherNodesStr, "volume", volumeToAttach)
}

View File

@ -766,20 +766,22 @@ func thresholdsMet(thresholds []evictionapi.Threshold, observations signalObserv
}
func debugLogObservations(logPrefix string, observations signalObservations) {
if !klog.V(3).Enabled() {
klogV := klog.V(3)
if !klogV.Enabled() {
return
}
for k, v := range observations {
if !v.time.IsZero() {
klog.InfoS("Eviction manager:", "log", logPrefix, "signal", k, "resourceName", v.available, "capacity", v.capacity, "time", v.time)
klogV.InfoS("Eviction manager:", "log", logPrefix, "signal", k, "resourceName", v.available, "capacity", v.capacity, "time", v.time)
} else {
klog.InfoS("Eviction manager:", "log", logPrefix, "signal", k, "resourceName", v.available, "capacity", v.capacity)
klogV.InfoS("Eviction manager:", "log", logPrefix, "signal", k, "resourceName", v.available, "capacity", v.capacity)
}
}
}
func debugLogThresholdsWithObservation(logPrefix string, thresholds []evictionapi.Threshold, observations signalObservations) {
if !klog.V(3).Enabled() {
klogV := klog.V(3)
if !klogV.Enabled() {
return
}
for i := range thresholds {
@ -787,9 +789,9 @@ func debugLogThresholdsWithObservation(logPrefix string, thresholds []evictionap
observed, found := observations[threshold.Signal]
if found {
quantity := evictionapi.GetThresholdQuantity(threshold.Value, observed.capacity)
klog.InfoS("Eviction manager: threshold observed resource", "log", logPrefix, "signal", threshold.Signal, "quantity", quantity, "resource", observed.available)
klogV.InfoS("Eviction manager: threshold observed resource", "log", logPrefix, "signal", threshold.Signal, "quantity", quantity, "resource", observed.available)
} else {
klog.InfoS("Eviction manager: threshold had no observation", "log", logPrefix, "signal", threshold.Signal)
klogV.InfoS("Eviction manager: threshold had no observation", "log", logPrefix, "signal", threshold.Signal)
}
}
}

View File

@ -1796,16 +1796,26 @@ func (kl *Kubelet) syncTerminatingPod(ctx context.Context, pod *v1.Pod, podStatu
return err
}
var runningContainers []string
var containers []string
type container struct {
Name string
State string
ExitCode int
FinishedAt string
}
var containers []container
klogV := klog.V(4)
klogVEnabled := klogV.Enabled()
for _, s := range podStatus.ContainerStatuses {
if s.State == kubecontainer.ContainerStateRunning {
runningContainers = append(runningContainers, s.ID.String())
}
containers = append(containers, fmt.Sprintf("(%s state=%s exitCode=%d finishedAt=%s)", s.Name, s.State, s.ExitCode, s.FinishedAt.UTC().Format(time.RFC3339Nano)))
if klogVEnabled {
containers = append(containers, container{Name: s.Name, State: string(s.State), ExitCode: s.ExitCode, FinishedAt: s.FinishedAt.UTC().Format(time.RFC3339Nano)})
}
if klog.V(4).Enabled() {
sort.Strings(containers)
klog.InfoS("Post-termination container state", "pod", klog.KObj(pod), "podUID", pod.UID, "containers", strings.Join(containers, " "))
}
if klogVEnabled {
sort.Slice(containers, func(i, j int) bool { return containers[i].Name < containers[j].Name })
klog.V(4).InfoS("Post-termination container state", "pod", klog.KObj(pod), "podUID", pod.UID, "containers", containers)
}
if len(runningContainers) > 0 {
return fmt.Errorf("detected running containers after a successful KillPod, CRI violation: %v", runningContainers)

View File

@ -407,13 +407,13 @@ func (g *GenericPLEG) updateCache(pod *kubecontainer.Pod, pid types.UID) error {
status, err := g.runtime.GetPodStatus(pod.ID, pod.Name, pod.Namespace)
if err != nil {
if klog.V(6).Enabled() {
klog.V(6).ErrorS(err, "PLEG: Write status", "pod", klog.KRef(pod.Namespace, pod.Name), "podStatus", status)
klog.ErrorS(err, "PLEG: Write status", "pod", klog.KRef(pod.Namespace, pod.Name), "podStatus", status)
} else {
klog.V(4).ErrorS(err, "PLEG: Write status", "pod", klog.KRef(pod.Namespace, pod.Name))
klog.ErrorS(err, "PLEG: Write status", "pod", klog.KRef(pod.Namespace, pod.Name))
}
} else {
if klog.V(6).Enabled() {
klog.V(6).InfoS("PLEG: Write status", "pod", klog.KRef(pod.Namespace, pod.Name), "podStatus", status)
if klogV := klog.V(6); klogV.Enabled() {
klogV.InfoS("PLEG: Write status", "pod", klog.KRef(pod.Namespace, pod.Name), "podStatus", status)
} else {
klog.V(4).InfoS("PLEG: Write status", "pod", klog.KRef(pod.Namespace, pod.Name))
}

View File

@ -439,7 +439,7 @@ func (m *manager) updateStatusInternal(pod *v1.Pod, status v1.PodStatus, forceUp
// Perform some more extensive logging of container termination state to assist in
// debugging production races (generally not needed).
if klog.V(5).Enabled() {
if klogV := klog.V(5); klogV.Enabled() {
var containers []string
for _, s := range append(append([]v1.ContainerStatus(nil), status.InitContainerStatuses...), status.ContainerStatuses...) {
var current, previous string
@ -466,7 +466,7 @@ func (m *manager) updateStatusInternal(pod *v1.Pod, status v1.PodStatus, forceUp
containers = append(containers, fmt.Sprintf("(%s state=%s previous=%s)", s.Name, current, previous))
}
sort.Strings(containers)
klog.InfoS("updateStatusInternal", "version", cachedStatus.version+1, "pod", klog.KObj(pod), "podUID", pod.UID, "containers", strings.Join(containers, " "))
klogV.InfoS("updateStatusInternal", "version", cachedStatus.version+1, "pod", klog.KObj(pod), "podUID", pod.UID, "containers", strings.Join(containers, " "))
}
// The intent here is to prevent concurrent updates to a pod's status from

View File

@ -248,9 +248,9 @@ func ensureConfiguration(wrapper configurationWrapper, strategy ensureStrategy,
return fmt.Errorf("failed to determine whether auto-update is required for %s type=%s name=%q error=%w", wrapper.TypeName(), configurationType, name, err)
}
if !update {
if klog.V(5).Enabled() {
// TODO: if we use structured logging here the diff gets escaped and very awkward to read in the log
klog.Infof("No update required for the %s type=%s name=%q diff: %s", wrapper.TypeName(), configurationType, name, cmp.Diff(current, bootstrap))
if klogV := klog.V(5); klogV.Enabled() {
klogV.InfoS("No update required", "wrapper", wrapper.TypeName(), "type", configurationType, "name", name,
"diff", cmp.Diff(current, bootstrap))
}
return nil
}

View File

@ -69,12 +69,10 @@ func (r *resourceAllocationScorer) score(
score := r.scorer(requested, allocatable)
if klog.V(10).Enabled() {
klog.InfoS("Listing internal info for allocatable resources, requested resources and score", "pod",
klog.V(10).InfoS("Listing internal info for allocatable resources, requested resources and score", "pod",
klog.KObj(pod), "node", klog.KObj(node), "resourceAllocationScorer", r.Name,
"allocatableResource", allocatable, "requestedResource", requested, "resourceScore", score,
)
}
return score, nil
}
@ -107,9 +105,7 @@ func (r *resourceAllocationScorer) calculateResourceAllocatableRequest(nodeInfo
return nodeInfo.Allocatable.ScalarResources[resource], (nodeInfo.Requested.ScalarResources[resource] + podRequest)
}
}
if klog.V(10).Enabled() {
klog.InfoS("Requested resource is omitted for node score calculation", "resourceName", resource)
}
klog.V(10).InfoS("Requested resource is omitted for node score calculation", "resourceName", resource)
return 0, 0
}

View File

@ -220,12 +220,12 @@ func (ev *Evaluator) findCandidates(ctx context.Context, pod *v1.Pod, m framewor
}
offset, numCandidates := ev.GetOffsetAndNumCandidates(int32(len(potentialNodes)))
if klog.V(5).Enabled() {
if klogV := klog.V(5); klogV.Enabled() {
var sample []string
for i := offset; i < offset+10 && i < int32(len(potentialNodes)); i++ {
sample = append(sample, potentialNodes[i].Node().Name)
}
klog.InfoS("Selecting candidates from a pool of nodes", "potentialNodesCount", len(potentialNodes), "offset", offset, "sampleLength", len(sample), "sample", sample, "candidates", numCandidates)
klogV.InfoS("Selecting candidates from a pool of nodes", "potentialNodesCount", len(potentialNodes), "offset", offset, "sampleLength", len(sample), "sample", sample, "candidates", numCandidates)
}
candidates, nodeStatuses, err := ev.DryRunPreemption(ctx, pod, potentialNodes, pdbs, offset, numCandidates)
for node, nodeStatus := range unschedulableNodeStatus {

View File

@ -426,10 +426,12 @@ func prioritizeNodes(
return nil, scoreStatus.AsError()
}
if klog.V(10).Enabled() {
// Additional details logged at level 10 if enabled.
klogV := klog.V(10)
if klogV.Enabled() {
for plugin, nodeScoreList := range scoresMap {
for _, nodeScore := range nodeScoreList {
klog.InfoS("Plugin scored node for pod", "pod", klog.KObj(pod), "plugin", plugin, "node", nodeScore.Name, "score", nodeScore.Score)
klogV.InfoS("Plugin scored node for pod", "pod", klog.KObj(pod), "plugin", plugin, "node", nodeScore.Name, "score", nodeScore.Score)
}
}
}
@ -467,8 +469,8 @@ func prioritizeNodes(
mu.Lock()
for i := range *prioritizedList {
host, score := (*prioritizedList)[i].Host, (*prioritizedList)[i].Score
if klog.V(10).Enabled() {
klog.InfoS("Extender scored node for pod", "pod", klog.KObj(pod), "extender", extenders[extIndex].Name(), "node", host, "score", score)
if klogV.Enabled() {
klogV.InfoS("Extender scored node for pod", "pod", klog.KObj(pod), "extender", extenders[extIndex].Name(), "node", host, "score", score)
}
combinedScores[host] += score * weight
}
@ -484,9 +486,9 @@ func prioritizeNodes(
}
}
if klog.V(10).Enabled() {
if klogV.Enabled() {
for i := range result {
klog.InfoS("Calculated node's final score for pod", "pod", klog.KObj(pod), "node", result[i].Name, "score", result[i].Score)
klogV.InfoS("Calculated node's final score for pod", "pod", klog.KObj(pod), "node", result[i].Name, "score", result[i].Score)
}
}
return result, nil

View File

@ -617,9 +617,7 @@ func (sched *Scheduler) scheduleOne(ctx context.Context) {
sched.handleSchedulingFailure(fwk, assumedPodInfo, fmt.Errorf("binding rejected: %w", err), SchedulerError, clearNominatedNode)
} else {
// Calculating nodeResourceString can be heavy. Avoid it if klog verbosity is below 2.
if klog.V(2).Enabled() {
klog.InfoS("Successfully bound pod to node", "pod", klog.KObj(pod), "node", scheduleResult.SuggestedHost, "evaluatedNodes", scheduleResult.EvaluatedNodes, "feasibleNodes", scheduleResult.FeasibleNodes)
}
klog.V(2).InfoS("Successfully bound pod to node", "pod", klog.KObj(pod), "node", scheduleResult.SuggestedHost, "evaluatedNodes", scheduleResult.EvaluatedNodes, "feasibleNodes", scheduleResult.FeasibleNodes)
metrics.PodScheduled(fwk.ProfileName(), metrics.SinceInSeconds(start))
metrics.PodSchedulingAttempts.Observe(float64(podInfo.Attempts))
metrics.PodSchedulingDuration.WithLabelValues(getAttemptsLabel(podInfo)).Observe(metrics.SinceInSeconds(podInfo.InitialAttemptTimestamp))

View File

@ -25,6 +25,8 @@ import (
"fmt"
"time"
"github.com/go-logr/logr"
"k8s.io/klog/v2"
"k8s.io/mount-utils"
@ -342,6 +344,33 @@ func (volume *VolumeToAttach) GenerateError(prefixMsg string, err error) (simple
return fmt.Errorf(simpleMsg), fmt.Errorf(detailedMsg)
}
// String combines key fields of the volume for logging in text format.
func (volume *VolumeToAttach) String() string {
volumeSpecName := "nil"
if volume.VolumeSpec != nil {
volumeSpecName = volume.VolumeSpec.Name()
}
return fmt.Sprintf("%s (UniqueName: %s) from node %s", volumeSpecName, volume.VolumeName, volume.NodeName)
}
// MarshalLog combines key fields of the volume for logging in a structured format.
func (volume *VolumeToAttach) MarshalLog() interface{} {
volumeSpecName := "nil"
if volume.VolumeSpec != nil {
volumeSpecName = volume.VolumeSpec.Name()
}
return struct {
VolumeName, UniqueName, NodeName string
}{
VolumeName: volumeSpecName,
UniqueName: string(volume.VolumeName),
NodeName: string(volume.NodeName),
}
}
var _ fmt.Stringer = &VolumeToAttach{}
var _ logr.Marshaler = &VolumeToAttach{}
// VolumeToMount represents a volume that should be attached to this node and
// mounted to the PodName.
type VolumeToMount struct {
@ -530,6 +559,33 @@ func (volume *AttachedVolume) GenerateError(prefixMsg string, err error) (simple
return fmt.Errorf(simpleMsg), fmt.Errorf(detailedMsg)
}
// String combines key fields of the volume for logging in text format.
func (volume *AttachedVolume) String() string {
volumeSpecName := "nil"
if volume.VolumeSpec != nil {
volumeSpecName = volume.VolumeSpec.Name()
}
return fmt.Sprintf("%s (UniqueName: %s) from node %s", volumeSpecName, volume.VolumeName, volume.NodeName)
}
// MarshalLog combines key fields of the volume for logging in a structured format.
func (volume *AttachedVolume) MarshalLog() interface{} {
volumeSpecName := "nil"
if volume.VolumeSpec != nil {
volumeSpecName = volume.VolumeSpec.Name()
}
return struct {
VolumeName, UniqueName, NodeName string
}{
VolumeName: volumeSpecName,
UniqueName: string(volume.VolumeName),
NodeName: string(volume.NodeName),
}
}
var _ fmt.Stringer = &AttachedVolume{}
var _ logr.Marshaler = &AttachedVolume{}
// MountedVolume represents a volume that has successfully been mounted to a pod.
type MountedVolume struct {
// PodName is the unique identifier of the pod mounted to.

View File

@ -50,7 +50,7 @@ func StorageWithCacher() generic.StorageDecorator {
return s, d, err
}
if klog.V(5).Enabled() {
klog.InfoS("Storage caching is enabled", objectTypeToArgs(newFunc())...)
klog.V(5).InfoS("Storage caching is enabled", objectTypeToArgs(newFunc())...)
}
cacherConfig := cacherstorage.Config{

View File

@ -95,10 +95,12 @@ func DefaultStacktracePred(status int) bool {
return (status < http.StatusOK || status >= http.StatusInternalServerError) && status != http.StatusSwitchingProtocols
}
const withLoggingLevel = 3
// WithLogging wraps the handler with logging.
func WithLogging(handler http.Handler, pred StacktracePred) http.Handler {
return withLogging(handler, pred, func() bool {
return klog.V(3).Enabled()
return klog.V(withLoggingLevel).Enabled()
})
}
@ -280,7 +282,7 @@ func (rl *respLogger) Log() {
}
}
klog.InfoSDepth(1, "HTTP", keysAndValues...)
klog.V(withLoggingLevel).InfoSDepth(1, "HTTP", keysAndValues...)
}
// Header implements http.ResponseWriter.

View File

@ -33,19 +33,19 @@ const klogWrapperDepth = 4
func (klogWrapper) Info(args ...interface{}) {
if klog.V(5).Enabled() {
klog.InfoDepth(klogWrapperDepth, args...)
klog.V(5).InfoSDepth(klogWrapperDepth, fmt.Sprint(args...))
}
}
func (klogWrapper) Infoln(args ...interface{}) {
if klog.V(5).Enabled() {
klog.InfoDepth(klogWrapperDepth, fmt.Sprintln(args...))
klog.V(5).InfoSDepth(klogWrapperDepth, fmt.Sprintln(args...))
}
}
func (klogWrapper) Infof(format string, args ...interface{}) {
if klog.V(5).Enabled() {
klog.InfoDepth(klogWrapperDepth, fmt.Sprintf(format, args...))
klog.V(5).InfoSDepth(klogWrapperDepth, fmt.Sprintf(format, args...))
}
}

View File

@ -608,9 +608,10 @@ func (meal *cfgMeal) digestFlowSchemasLocked(newFSs []*flowcontrol.FlowSchema) {
}
meal.cfgCtlr.flowSchemas = fsSeq
if klog.V(5).Enabled() {
klogV := klog.V(5)
if klogV.Enabled() {
for _, fs := range fsSeq {
klog.Infof("Using FlowSchema %s", fcfmt.Fmt(fs))
klogV.Infof("Using FlowSchema %s", fcfmt.Fmt(fs))
}
}
}

View File

@ -637,8 +637,9 @@ func (qs *queueSet) enqueueLocked(request *request) {
if queue.requests.Length() == 0 && queue.requestsExecuting == 0 {
// the queues start R is set to the virtual time.
queue.nextDispatchR = qs.currentR
if klog.V(6).Enabled() {
klog.Infof("QS(%s) at t=%s R=%v: initialized queue %d start R due to request %#+v %#+v", qs.qCfg.Name, now.Format(nsTimeFmt), queue.nextDispatchR, queue.index, request.descr1, request.descr2)
klogV := klog.V(6)
if klogV.Enabled() {
klogV.Infof("QS(%s) at t=%s R=%v: initialized queue %d start R due to request %#+v %#+v", qs.qCfg.Name, now.Format(nsTimeFmt), queue.nextDispatchR, queue.index, request.descr1, request.descr2)
}
}
request.removeFromQueueLocked = queue.requests.Enqueue(request)
@ -676,8 +677,9 @@ func (qs *queueSet) dispatchSansQueueLocked(ctx context.Context, workEstimate *f
metrics.AddRequestConcurrencyInUse(qs.qCfg.Name, fsName, req.MaxSeats())
qs.reqsObsPair.RequestsExecuting.Add(1)
qs.execSeatsObs.Add(float64(req.MaxSeats()))
if klog.V(5).Enabled() {
klog.Infof("QS(%s) at t=%s R=%v: immediate dispatch of request %q %#+v %#+v, qs will have %d executing", qs.qCfg.Name, now.Format(nsTimeFmt), qs.currentR, fsName, descr1, descr2, qs.totRequestsExecuting)
klogV := klog.V(5)
if klogV.Enabled() {
klogV.Infof("QS(%s) at t=%s R=%v: immediate dispatch of request %q %#+v %#+v, qs will have %d executing", qs.qCfg.Name, now.Format(nsTimeFmt), qs.currentR, fsName, descr1, descr2, qs.totRequestsExecuting)
}
return req
}
@ -717,8 +719,9 @@ func (qs *queueSet) dispatchLocked() bool {
metrics.AddRequestConcurrencyInUse(qs.qCfg.Name, request.fsName, request.MaxSeats())
qs.reqsObsPair.RequestsExecuting.Add(1)
qs.execSeatsObs.Add(float64(request.MaxSeats()))
if klog.V(6).Enabled() {
klog.Infof("QS(%s) at t=%s R=%v: dispatching request %#+v %#+v work %v from queue %d with start R %v, queue will have %d waiting & %d requests occupying %d seats, set will have %d seats occupied",
klogV := klog.V(6)
if klogV.Enabled() {
klogV.Infof("QS(%s) at t=%s R=%v: dispatching request %#+v %#+v work %v from queue %d with start R %v, queue will have %d waiting & %d requests occupying %d seats, set will have %d seats occupied",
qs.qCfg.Name, request.startTime.Format(nsTimeFmt), qs.currentR, request.descr1, request.descr2,
request.workEstimate, queue.index, queue.nextDispatchR, queue.requests.Length(), queue.requestsExecuting, queue.seatsInUse, qs.totSeatsInUse)
}
@ -792,8 +795,9 @@ func (qs *queueSet) findDispatchQueueLocked() (*queue, *request) {
if !qs.canAccommodateSeatsLocked(oldestReqFromMinQueue.MaxSeats()) {
// since we have not picked the queue with the minimum virtual finish
// time, we are not going to advance the round robin index here.
if klog.V(4).Enabled() {
klog.Infof("QS(%s): request %v %v seats %d cannot be dispatched from queue %d, waiting for currently executing requests to complete, %d requests are occupying %d seats and the limit is %d",
klogV := klog.V(4)
if klogV.Enabled() {
klogV.Infof("QS(%s): request %v %v seats %d cannot be dispatched from queue %d, waiting for currently executing requests to complete, %d requests are occupying %d seats and the limit is %d",
qs.qCfg.Name, oldestReqFromMinQueue.descr1, oldestReqFromMinQueue.descr2, oldestReqFromMinQueue.MaxSeats(), minQueue.index, qs.totRequestsExecuting, qs.totSeatsInUse, qs.dCfg.ConcurrencyLimit)
}
return nil, nil
@ -876,28 +880,29 @@ func (qs *queueSet) finishRequestLocked(r *request) {
}
defer func() {
klogV := klog.V(6)
if r.workEstimate.AdditionalLatency <= 0 {
// release the seats allocated to this request immediately
releaseSeatsLocked()
if !klog.V(6).Enabled() {
if !klogV.Enabled() {
} else if r.queue != nil {
klog.Infof("QS(%s) at t=%s R=%v: request %#+v %#+v finished all use of %d seats, adjusted queue %d start R to %v due to service time %.9fs, queue will have %d requests with %#v waiting & %d requests occupying %d seats",
klogV.Infof("QS(%s) at t=%s R=%v: request %#+v %#+v finished all use of %d seats, adjusted queue %d start R to %v due to service time %.9fs, queue will have %d requests with %#v waiting & %d requests occupying %d seats",
qs.qCfg.Name, now.Format(nsTimeFmt), qs.currentR, r.descr1, r.descr2, r.workEstimate.MaxSeats(), r.queue.index,
r.queue.nextDispatchR, actualServiceDuration.Seconds(), r.queue.requests.Length(), r.queue.requests.QueueSum(), r.queue.requestsExecuting, r.queue.seatsInUse)
} else {
klog.Infof("QS(%s) at t=%s R=%v: request %#+v %#+v finished all use of %d seats, qs will have %d requests occupying %d seats", qs.qCfg.Name, now.Format(nsTimeFmt), qs.currentR, r.descr1, r.descr2, r.workEstimate.InitialSeats, qs.totRequestsExecuting, qs.totSeatsInUse)
klogV.Infof("QS(%s) at t=%s R=%v: request %#+v %#+v finished all use of %d seats, qs will have %d requests occupying %d seats", qs.qCfg.Name, now.Format(nsTimeFmt), qs.currentR, r.descr1, r.descr2, r.workEstimate.InitialSeats, qs.totRequestsExecuting, qs.totSeatsInUse)
}
return
}
additionalLatency := r.workEstimate.AdditionalLatency
if !klog.V(6).Enabled() {
if !klogV.Enabled() {
} else if r.queue != nil {
klog.Infof("QS(%s) at t=%s R=%v: request %#+v %#+v finished main use of %d seats but lingering on %d seats for %v seconds, adjusted queue %d start R to %v due to service time %.9fs, queue will have %d requests with %#v waiting & %d requests occupying %d seats",
klogV.Infof("QS(%s) at t=%s R=%v: request %#+v %#+v finished main use of %d seats but lingering on %d seats for %v seconds, adjusted queue %d start R to %v due to service time %.9fs, queue will have %d requests with %#v waiting & %d requests occupying %d seats",
qs.qCfg.Name, now.Format(nsTimeFmt), qs.currentR, r.descr1, r.descr2, r.workEstimate.InitialSeats, r.workEstimate.FinalSeats, additionalLatency.Seconds(), r.queue.index,
r.queue.nextDispatchR, actualServiceDuration.Seconds(), r.queue.requests.Length(), r.queue.requests.QueueSum(), r.queue.requestsExecuting, r.queue.seatsInUse)
} else {
klog.Infof("QS(%s) at t=%s R=%v: request %#+v %#+v finished main use of %d seats but lingering on %d seats for %v seconds, qs will have %d requests occupying %d seats", qs.qCfg.Name, now.Format(nsTimeFmt), qs.currentR, r.descr1, r.descr2, r.workEstimate.InitialSeats, r.workEstimate.FinalSeats, additionalLatency.Seconds(), qs.totRequestsExecuting, qs.totSeatsInUse)
klogV.Infof("QS(%s) at t=%s R=%v: request %#+v %#+v finished main use of %d seats but lingering on %d seats for %v seconds, qs will have %d requests occupying %d seats", qs.qCfg.Name, now.Format(nsTimeFmt), qs.currentR, r.descr1, r.descr2, r.workEstimate.InitialSeats, r.workEstimate.FinalSeats, additionalLatency.Seconds(), qs.totRequestsExecuting, qs.totSeatsInUse)
}
// EventAfterDuration will execute the event func in a new goroutine,
// so the seats allocated to this request will be released after
@ -908,13 +913,13 @@ func (qs *queueSet) finishRequestLocked(r *request) {
defer qs.lock.Unlock()
now := qs.clock.Now()
releaseSeatsLocked()
if !klog.V(6).Enabled() {
if !klogV.Enabled() {
} else if r.queue != nil {
klog.Infof("QS(%s) at t=%s R=%v: request %#+v %#+v finished lingering on %d seats, queue %d will have %d requests with %#v waiting & %d requests occupying %d seats",
klogV.Infof("QS(%s) at t=%s R=%v: request %#+v %#+v finished lingering on %d seats, queue %d will have %d requests with %#v waiting & %d requests occupying %d seats",
qs.qCfg.Name, now.Format(nsTimeFmt), qs.currentR, r.descr1, r.descr2, r.workEstimate.FinalSeats, r.queue.index,
r.queue.requests.Length(), r.queue.requests.QueueSum(), r.queue.requestsExecuting, r.queue.seatsInUse)
} else {
klog.Infof("QS(%s) at t=%s R=%v: request %#+v %#+v finished lingering on %d seats, qs will have %d requests occupying %d seats", qs.qCfg.Name, now.Format(nsTimeFmt), qs.currentR, r.descr1, r.descr2, r.workEstimate.FinalSeats, qs.totRequestsExecuting, qs.totSeatsInUse)
klogV.Infof("QS(%s) at t=%s R=%v: request %#+v %#+v finished lingering on %d seats, qs will have %d requests occupying %d seats", qs.qCfg.Name, now.Format(nsTimeFmt), qs.currentR, r.descr1, r.descr2, r.workEstimate.FinalSeats, qs.totRequestsExecuting, qs.totSeatsInUse)
}
qs.dispatchAsMuchAsPossibleLocked()
}, additionalLatency)
@ -945,8 +950,8 @@ func (qs *queueSet) boundNextDispatchLocked(queue *queue) {
}
var virtualStartBound = oldestReqFromMinQueue.arrivalR
if queue.nextDispatchR < virtualStartBound {
if klog.V(4).Enabled() {
klog.InfoS("AntiWindup tweaked queue", "QS", qs.qCfg.Name, "queue", queue.index, "time", qs.clock.Now().Format(nsTimeFmt), "requestDescr1", oldestReqFromMinQueue.descr1, "requestDescr2", oldestReqFromMinQueue.descr2, "newVirtualStart", virtualStartBound, "deltaVirtualStart", (virtualStartBound - queue.nextDispatchR))
if klogV := klog.V(4); klogV.Enabled() {
klogV.InfoS("AntiWindup tweaked queue", "QS", qs.qCfg.Name, "queue", queue.index, "time", qs.clock.Now().Format(nsTimeFmt), "requestDescr1", oldestReqFromMinQueue.descr1, "requestDescr2", oldestReqFromMinQueue.descr2, "newVirtualStart", virtualStartBound, "deltaVirtualStart", (virtualStartBound - queue.nextDispatchR))
}
queue.nextDispatchR = virtualStartBound
}

View File

@ -44,7 +44,8 @@ type waitGroupCounter struct {
var _ counter.GoRoutineCounter = (*waitGroupCounter)(nil)
func (wgc *waitGroupCounter) Add(delta int) {
if klog.V(7).Enabled() {
klogV := klog.V(7)
if klogV.Enabled() {
var pcs [10]uintptr
nCallers := runtime.Callers(2, pcs[:])
frames := runtime.CallersFrames(pcs[:nCallers])
@ -64,7 +65,7 @@ func (wgc *waitGroupCounter) Add(delta int) {
break
}
}
klog.InfoS("Add", "counter", fmt.Sprintf("%p", wgc), "delta", delta, "callers", callers)
klogV.InfoS("Add", "counter", fmt.Sprintf("%p", wgc), "delta", delta, "callers", callers)
}
wgc.wg.Add(delta)
}

View File

@ -638,10 +638,11 @@ func (g *genConversion) preexists(inType, outType *types.Type) (*types.Type, boo
}
func (g *genConversion) Init(c *generator.Context, w io.Writer) error {
if klog.V(5).Enabled() {
klogV := klog.V(5)
if klogV.Enabled() {
if m, ok := g.useUnsafe.(equalMemoryTypes); ok {
var result []string
klog.Info("All objects without identical memory layout:")
klogV.Info("All objects without identical memory layout:")
for k, v := range m {
if v {
continue
@ -650,7 +651,7 @@ func (g *genConversion) Init(c *generator.Context, w io.Writer) error {
}
sort.Strings(result)
for _, s := range result {
klog.Infof(s)
klogV.Info(s)
}
}
}

View File

@ -206,10 +206,11 @@ func (connection *VSphereConnection) NewClient(ctx context.Context) (*vim25.Clie
if err != nil {
return nil, err
}
if klog.V(3).Enabled() {
klogV := klog.V(3)
if klogV.Enabled() {
s, err := session.NewManager(client).UserSession(ctx)
if err == nil {
klog.Infof("New session ID for '%s' = %s", s.UserName, s.Key)
klogV.Infof("New session ID for '%s' = %s", s.UserName, s.Key)
}
}

View File

@ -453,8 +453,9 @@ func (a *Admission) EvaluatePod(ctx context.Context, nsPolicy api.Policy, nsPoli
a.Metrics.RecordError(false, attrs)
}
if klog.V(5).Enabled() {
klog.InfoS("PodSecurity evaluation", "policy", fmt.Sprintf("%v", nsPolicy), "op", attrs.GetOperation(), "resource", attrs.GetResource(), "namespace", attrs.GetNamespace(), "name", attrs.GetName())
klogV := klog.V(5)
if klogV.Enabled() {
klogV.InfoS("PodSecurity evaluation", "policy", fmt.Sprintf("%v", nsPolicy), "op", attrs.GetOperation(), "resource", attrs.GetResource(), "namespace", attrs.GetNamespace(), "name", attrs.GetName())
}
cachedResults := make(map[api.LevelVersion]policy.AggregateCheckResult)
response := allowedResponse()

1
vendor/modules.txt vendored
View File

@ -254,6 +254,7 @@ github.com/fvbommel/sortorder
# github.com/go-errors/errors v1.0.1 => github.com/go-errors/errors v1.0.1
github.com/go-errors/errors
# github.com/go-logr/logr v1.2.0 => github.com/go-logr/logr v1.2.0
## explicit
github.com/go-logr/logr
# github.com/go-logr/zapr v1.2.0 => github.com/go-logr/zapr v1.2.0
github.com/go-logr/zapr