mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-20 02:11:09 +00:00
Merge pull request #115060 from pohly/logcheck-update
hack: update logtools to v0.4.1
This commit is contained in:
commit
f267dd8340
@ -49,7 +49,7 @@ func (rct realConntracker) SetMax(max int) error {
|
||||
if err := rct.setIntSysCtl("nf_conntrack_max", max); err != nil {
|
||||
return err
|
||||
}
|
||||
klog.InfoS("Setting nf_conntrack_max", "nf_conntrack_max", max)
|
||||
klog.InfoS("Setting nf_conntrack_max", "nfConntrackMax", max)
|
||||
|
||||
// Linux does not support writing to /sys/module/nf_conntrack/parameters/hashsize
|
||||
// when the writer process is not in the initial network namespace
|
||||
@ -80,7 +80,7 @@ func (rct realConntracker) SetMax(max int) error {
|
||||
return errReadOnlySysFS
|
||||
}
|
||||
// TODO: generify this and sysctl to a new sysfs.WriteInt()
|
||||
klog.InfoS("Setting conntrack hashsize", "conntrack hashsize", max/4)
|
||||
klog.InfoS("Setting conntrack hashsize", "conntrackHashsize", max/4)
|
||||
return writeIntStringFile("/sys/module/nf_conntrack/parameters/hashsize", max/4)
|
||||
}
|
||||
|
||||
|
@ -365,7 +365,7 @@ func (o *Options) writeConfigFile() (err error) {
|
||||
return err
|
||||
}
|
||||
|
||||
klog.InfoS("Wrote configuration", "WriteConfigTo", o.WriteConfigTo)
|
||||
klog.InfoS("Wrote configuration", "file", o.WriteConfigTo)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
@ -141,10 +141,10 @@ func newProxyServer(
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
klog.InfoS("NodeInfo", "PodCIDR", nodeInfo.Spec.PodCIDR, "PodCIDRs", nodeInfo.Spec.PodCIDRs)
|
||||
klog.InfoS("NodeInfo", "podCIDR", nodeInfo.Spec.PodCIDR, "podCIDRs", nodeInfo.Spec.PodCIDRs)
|
||||
}
|
||||
|
||||
klog.V(2).InfoS("DetectLocalMode", "LocalMode", string(detectLocalMode))
|
||||
klog.V(2).InfoS("DetectLocalMode", "localMode", string(detectLocalMode))
|
||||
|
||||
primaryFamily := v1.IPv4Protocol
|
||||
primaryProtocol := utiliptables.ProtocolIPv4
|
||||
@ -422,7 +422,7 @@ func getDetectLocalMode(config *proxyconfigapi.KubeProxyConfiguration) (proxycon
|
||||
if strings.TrimSpace(mode.String()) != "" {
|
||||
return mode, fmt.Errorf("unknown detect-local-mode: %v", mode)
|
||||
}
|
||||
klog.V(4).InfoS("Defaulting detect-local-mode", "LocalModeClusterCIDR", string(proxyconfigapi.LocalModeClusterCIDR))
|
||||
klog.V(4).InfoS("Defaulting detect-local-mode", "localModeClusterCIDR", string(proxyconfigapi.LocalModeClusterCIDR))
|
||||
return proxyconfigapi.LocalModeClusterCIDR, nil
|
||||
}
|
||||
}
|
||||
@ -452,7 +452,7 @@ func getLocalDetector(mode proxyconfigapi.LocalMode, config *proxyconfigapi.Kube
|
||||
}
|
||||
return proxyutiliptables.NewDetectLocalByInterfaceNamePrefix(config.DetectLocal.InterfaceNamePrefix)
|
||||
}
|
||||
klog.InfoS("Defaulting to no-op detect-local", "detect-local-mode", string(mode))
|
||||
klog.InfoS("Defaulting to no-op detect-local", "detectLocalMode", string(mode))
|
||||
return proxyutiliptables.NewNoOpLocalDetector(), nil
|
||||
}
|
||||
|
||||
@ -516,9 +516,9 @@ func getDualStackLocalDetectorTuple(mode proxyconfigapi.LocalMode, config *proxy
|
||||
}
|
||||
return localDetectors, err
|
||||
default:
|
||||
klog.InfoS("Unknown detect-local-mode", "detect-local-mode", mode)
|
||||
klog.InfoS("Unknown detect-local-mode", "detectLocalMode", mode)
|
||||
}
|
||||
klog.InfoS("Defaulting to no-op detect-local", "detect-local-mode", string(mode))
|
||||
klog.InfoS("Defaulting to no-op detect-local", "detectLocalMode", string(mode))
|
||||
return localDetectors, nil
|
||||
}
|
||||
|
||||
|
@ -11,7 +11,7 @@ require (
|
||||
github.com/google/go-flow-levee v0.1.5
|
||||
gotest.tools/gotestsum v1.6.4
|
||||
honnef.co/go/tools v0.4.0-0.dev.0.20221209223220-58c4d7e4b720
|
||||
sigs.k8s.io/logtools v0.1.0
|
||||
sigs.k8s.io/logtools v0.4.1
|
||||
sigs.k8s.io/zeitgeist v0.2.0
|
||||
)
|
||||
|
||||
|
@ -1194,8 +1194,8 @@ mvdan.cc/unparam v0.0.0-20220706161116-678bad134442/go.mod h1:F/Cxw/6mVrNKqrR2Yj
|
||||
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
|
||||
rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
|
||||
rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
|
||||
sigs.k8s.io/logtools v0.1.0 h1:BWdoZu2PK6/lj5QhSXBGlHFu3d/ST1HlINXwE/CkJRQ=
|
||||
sigs.k8s.io/logtools v0.1.0/go.mod h1:/Rp/yzQWyUgsNCRb1HKRnrVujUV9CmzksOlsQR2OVvw=
|
||||
sigs.k8s.io/logtools v0.4.1 h1:b1SFUhj3Iu1af7pJSf02EyNGlbdCkwGOuAq12dW9c9g=
|
||||
sigs.k8s.io/logtools v0.4.1/go.mod h1:2HGcCK1vi9YvsBoUDMXvrf588j890iRtsymJX2biE0Q=
|
||||
sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q=
|
||||
sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc=
|
||||
sigs.k8s.io/zeitgeist v0.2.0 h1:gM+B8dB/3GB/UIqll1Y7g5HTjsXI8frqpUGK40w5Aus=
|
||||
|
@ -161,7 +161,7 @@ func nextScheduleTime(cj *batchv1.CronJob, now time.Time, schedule cron.Schedule
|
||||
// I've somewhat arbitrarily picked 100, as more than 80,
|
||||
// but less than "lots".
|
||||
recorder.Eventf(cj, corev1.EventTypeWarning, "TooManyMissedTimes", "too many missed start times: %d. Set or decrease .spec.startingDeadlineSeconds or check clock skew", numberOfMissedSchedules)
|
||||
klog.InfoS("too many missed times", "cronjob", klog.KRef(cj.GetNamespace(), cj.GetName()), "missed times", numberOfMissedSchedules)
|
||||
klog.InfoS("too many missed times", "cronjob", klog.KRef(cj.GetNamespace(), cj.GetName()), "missedTimes", numberOfMissedSchedules)
|
||||
}
|
||||
return mostRecentTime, err
|
||||
}
|
||||
|
@ -287,7 +287,7 @@ func (ca *cloudCIDRAllocator) updateCIDRAllocation(nodeName string) error {
|
||||
}
|
||||
if needUpdate {
|
||||
if node.Spec.PodCIDR != "" {
|
||||
klog.ErrorS(nil, "PodCIDR being reassigned!", "nodeName", node.Name, "node.Spec.PodCIDRs", node.Spec.PodCIDRs, "cidrStrings", cidrStrings)
|
||||
klog.ErrorS(nil, "PodCIDR being reassigned!", "nodeName", node.Name, "podCIDRs", node.Spec.PodCIDRs, "cidrStrings", cidrStrings)
|
||||
// We fall through and set the CIDR despite this error. This
|
||||
// implements the same logic as implemented in the
|
||||
// rangeAllocator.
|
||||
@ -326,20 +326,20 @@ func needPodCIDRsUpdate(node *v1.Node, podCIDRs []*net.IPNet) (bool, error) {
|
||||
}
|
||||
_, nodePodCIDR, err := netutils.ParseCIDRSloppy(node.Spec.PodCIDR)
|
||||
if err != nil {
|
||||
klog.ErrorS(err, "Found invalid node.Spec.PodCIDR", "node.Spec.PodCIDR", node.Spec.PodCIDR)
|
||||
klog.ErrorS(err, "Found invalid node.Spec.PodCIDR", "podCIDR", node.Spec.PodCIDR)
|
||||
// We will try to overwrite with new CIDR(s)
|
||||
return true, nil
|
||||
}
|
||||
nodePodCIDRs, err := netutils.ParseCIDRs(node.Spec.PodCIDRs)
|
||||
if err != nil {
|
||||
klog.ErrorS(err, "Found invalid node.Spec.PodCIDRs", "node.Spec.PodCIDRs", node.Spec.PodCIDRs)
|
||||
klog.ErrorS(err, "Found invalid node.Spec.PodCIDRs", "podCIDRs", node.Spec.PodCIDRs)
|
||||
// We will try to overwrite with new CIDR(s)
|
||||
return true, nil
|
||||
}
|
||||
|
||||
if len(podCIDRs) == 1 {
|
||||
if cmp.Equal(nodePodCIDR, podCIDRs[0]) {
|
||||
klog.V(4).InfoS("Node already has allocated CIDR. It matches the proposed one.", "nodeName", node.Name, "podCIDRs[0]", podCIDRs[0])
|
||||
klog.V(4).InfoS("Node already has allocated CIDR. It matches the proposed one.", "nodeName", node.Name, "podCIDR", podCIDRs[0])
|
||||
return false, nil
|
||||
}
|
||||
} else if len(nodePodCIDRs) == len(podCIDRs) {
|
||||
|
@ -27,7 +27,7 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
networkingv1alpha1 "k8s.io/api/networking/v1alpha1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
@ -34,7 +34,7 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
@ -272,7 +272,7 @@ func (ttlc *Controller) patchNodeWithAnnotation(ctx context.Context, node *v1.No
|
||||
klog.V(2).InfoS("Failed to change ttl annotation for node", "node", klog.KObj(node), "err", err)
|
||||
return err
|
||||
}
|
||||
klog.V(2).InfoS("Changed ttl annotation", "node", klog.KObj(node), "new_ttl", time.Duration(value)*time.Second)
|
||||
klog.V(2).InfoS("Changed ttl annotation", "node", klog.KObj(node), "TTL", time.Duration(value)*time.Second)
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -111,8 +111,8 @@ func (r *draPluginClient) NodePrepareResource(
|
||||
log("calling NodePrepareResource rpc"),
|
||||
"namespace", namespace,
|
||||
"claimUID", claimUID,
|
||||
"claim name", claimName,
|
||||
"resource handle", resourceHandle)
|
||||
"claimName", claimName,
|
||||
"resourceHandle", resourceHandle)
|
||||
|
||||
if r.nodeV1ClientCreator == nil {
|
||||
return nil, errors.New("failed to call NodePrepareResource. nodeV1ClientCreator is nil")
|
||||
@ -146,7 +146,7 @@ func (r *draPluginClient) NodeUnprepareResource(
|
||||
"namespace", namespace,
|
||||
"claimUID", claimUID,
|
||||
"claimname", claimName,
|
||||
"cdi devices", cdiDevices)
|
||||
"cdiDevices", cdiDevices)
|
||||
|
||||
if r.nodeV1ClientCreator == nil {
|
||||
return nil, errors.New("nodeV1ClientCreate is nil")
|
||||
|
@ -2437,7 +2437,7 @@ func TestAllocateAndAddPodWithInitContainers(t *testing.T) {
|
||||
|
||||
for _, testCase := range testCases {
|
||||
t.Run(testCase.description, func(t *testing.T) {
|
||||
klog.InfoS("TestAllocateAndAddPodWithInitContainers", "test name", testCase.description)
|
||||
klog.InfoS("TestAllocateAndAddPodWithInitContainers", "name", testCase.description)
|
||||
mgr := &manager{
|
||||
policy: returnPolicyByName(testCase),
|
||||
state: state.NewMemoryState(),
|
||||
|
@ -2489,7 +2489,7 @@ func TestStaticPolicyAllocateWithInitContainers(t *testing.T) {
|
||||
|
||||
for _, testCase := range testCases {
|
||||
t.Run(testCase.description, func(t *testing.T) {
|
||||
klog.InfoS("TestStaticPolicyAllocateWithInitContainers", "test name", testCase.description)
|
||||
klog.InfoS("TestStaticPolicyAllocateWithInitContainers", "name", testCase.description)
|
||||
p, s, err := initTests(t, &testCase, testCase.topologyHint, testCase.initContainersReusableMemory)
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected error: %v", err)
|
||||
|
@ -290,7 +290,7 @@ func (m *qosContainerManagerImpl) setMemoryQoS(configs map[v1.PodQOSClass]*Cgrou
|
||||
configs[v1.PodQOSBurstable].ResourceParameters.Unified = make(map[string]string)
|
||||
}
|
||||
configs[v1.PodQOSBurstable].ResourceParameters.Unified[MemoryMin] = strconv.FormatInt(burstableMin, 10)
|
||||
klog.V(4).InfoS("MemoryQoS config for qos", "qos", v1.PodQOSBurstable, "memory.min", burstableMin)
|
||||
klog.V(4).InfoS("MemoryQoS config for qos", "qos", v1.PodQOSBurstable, "memoryMin", burstableMin)
|
||||
}
|
||||
|
||||
if guaranteedMin > 0 {
|
||||
@ -298,7 +298,7 @@ func (m *qosContainerManagerImpl) setMemoryQoS(configs map[v1.PodQOSClass]*Cgrou
|
||||
configs[v1.PodQOSGuaranteed].ResourceParameters.Unified = make(map[string]string)
|
||||
}
|
||||
configs[v1.PodQOSGuaranteed].ResourceParameters.Unified[MemoryMin] = strconv.FormatInt(guaranteedMin, 10)
|
||||
klog.V(4).InfoS("MemoryQoS config for qos", "qos", v1.PodQOSGuaranteed, "memory.min", guaranteedMin)
|
||||
klog.V(4).InfoS("MemoryQoS config for qos", "qos", v1.PodQOSGuaranteed, "memoryMin", guaranteedMin)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1006,7 +1006,7 @@ func setTerminationGracePeriod(pod *v1.Pod, containerSpec *v1.Container, contain
|
||||
func isProbeTerminationGracePeriodSecondsSet(pod *v1.Pod, containerSpec *v1.Container, probe *v1.Probe, containerName string, containerID kubecontainer.ContainerID, probeType string) bool {
|
||||
if probe != nil && probe.TerminationGracePeriodSeconds != nil {
|
||||
if *probe.TerminationGracePeriodSeconds > *pod.Spec.TerminationGracePeriodSeconds {
|
||||
klog.V(4).InfoS("Using probe-level grace period that is greater than the pod-level grace period", "pod", klog.KObj(pod), "pod-uid", pod.UID, "containerName", containerName, "containerID", containerID.String(), "probe-type", probeType, "probe-grace-period", *probe.TerminationGracePeriodSeconds, "pod-grace-period", *pod.Spec.TerminationGracePeriodSeconds)
|
||||
klog.V(4).InfoS("Using probe-level grace period that is greater than the pod-level grace period", "pod", klog.KObj(pod), "podUID", pod.UID, "containerName", containerName, "containerID", containerID.String(), "probeType", probeType, "probeGracePeriod", *probe.TerminationGracePeriodSeconds, "podGracePeriod", *pod.Spec.TerminationGracePeriodSeconds)
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
@ -102,7 +102,7 @@ func (c *criMetricsCollector) criMetricToProm(m *runtimeapi.Metric) (metrics.Met
|
||||
desc, ok := c.descriptors[m.Name]
|
||||
if !ok {
|
||||
err := fmt.Errorf("error converting CRI Metric to prometheus format")
|
||||
klog.V(5).ErrorS(err, "Descriptor not present in pre-populated list of descriptors", "descriptor name", m.Name)
|
||||
klog.V(5).ErrorS(err, "Descriptor not present in pre-populated list of descriptors", "name", m.Name)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
|
@ -102,7 +102,7 @@ func (p *basicPodStartupLatencyTracker) ObservedPodOnWatch(pod *v1.Pod, when tim
|
||||
klog.InfoS("Observed pod startup duration",
|
||||
"pod", klog.KObj(pod),
|
||||
"podStartSLOduration", podStartSLOduration,
|
||||
"pod.CreationTimestamp", pod.CreationTimestamp.Time,
|
||||
"podCreationTimestamp", pod.CreationTimestamp.Time,
|
||||
"firstStartedPulling", state.firstStartedPulling,
|
||||
"lastFinishedPulling", state.lastFinishedPulling,
|
||||
"observedRunningTime", state.observedRunningTime,
|
||||
|
@ -43,13 +43,13 @@ func (n *NodePodCIDRHandler) OnNodeAdd(node *v1.Node) {
|
||||
podCIDRs := node.Spec.PodCIDRs
|
||||
// initialize podCIDRs
|
||||
if len(n.podCIDRs) == 0 && len(podCIDRs) > 0 {
|
||||
klog.InfoS("Setting current PodCIDRs", "PodCIDRs", podCIDRs)
|
||||
klog.InfoS("Setting current PodCIDRs", "podCIDRs", podCIDRs)
|
||||
n.podCIDRs = podCIDRs
|
||||
return
|
||||
}
|
||||
if !reflect.DeepEqual(n.podCIDRs, podCIDRs) {
|
||||
klog.ErrorS(nil, "Using NodeCIDR LocalDetector mode, current PodCIDRs are different than previous PodCIDRs, restarting",
|
||||
"node", klog.KObj(node), "New Node PodCIDRs", podCIDRs, "Old Node UID", n.podCIDRs)
|
||||
"node", klog.KObj(node), "newPodCIDRs", podCIDRs, "oldPodCIDRs", n.podCIDRs)
|
||||
panic("Current Node PodCIDRs are different than previous PodCIDRs, restarting")
|
||||
}
|
||||
}
|
||||
@ -61,13 +61,13 @@ func (n *NodePodCIDRHandler) OnNodeUpdate(_, node *v1.Node) {
|
||||
podCIDRs := node.Spec.PodCIDRs
|
||||
// initialize podCIDRs
|
||||
if len(n.podCIDRs) == 0 && len(podCIDRs) > 0 {
|
||||
klog.InfoS("Setting current PodCIDRs", "PodCIDRs", podCIDRs)
|
||||
klog.InfoS("Setting current PodCIDRs", "podCIDRs", podCIDRs)
|
||||
n.podCIDRs = podCIDRs
|
||||
return
|
||||
}
|
||||
if !reflect.DeepEqual(n.podCIDRs, podCIDRs) {
|
||||
klog.ErrorS(nil, "Using NodeCIDR LocalDetector mode, current PodCIDRs are different than previous PodCIDRs, restarting",
|
||||
"node", klog.KObj(node), "New Node PodCIDRs", podCIDRs, "Old Node UID", n.podCIDRs)
|
||||
"node", klog.KObj(node), "newPodCIDRs", podCIDRs, "oldPODCIDRs", n.podCIDRs)
|
||||
panic("Current Node PodCIDRs are different than previous PodCIDRs, restarting")
|
||||
}
|
||||
}
|
||||
|
@ -320,7 +320,7 @@ func (pl *dynamicResources) PreFilter(ctx context.Context, state *framework.Cycl
|
||||
if err != nil {
|
||||
return nil, statusUnschedulable(logger, err.Error())
|
||||
}
|
||||
logger.V(5).Info("pod resource claims", "pod", klog.KObj(pod), "resourceclaims", klog.KObjs(claims))
|
||||
logger.V(5).Info("pod resource claims", "pod", klog.KObj(pod), "resourceclaims", klog.KObjSlice(claims))
|
||||
// If the pod does not reference any claim, we don't need to do
|
||||
// anything for it.
|
||||
if len(claims) == 0 {
|
||||
|
@ -418,7 +418,7 @@ func (plugin *localVolumePlugin) NodeExpand(resizeOptions volume.NodeResizeOptio
|
||||
case hostutil.FileTypeDirectory:
|
||||
// if the given local volume path is of already filesystem directory, return directly because
|
||||
// we do not want to prevent mount operation from succeeding.
|
||||
klog.InfoS("Expansion of directory based local volumes is NO-OP", "local-volume-path", localDevicePath)
|
||||
klog.InfoS("Expansion of directory based local volumes is NO-OP", "localVolumePath", localDevicePath)
|
||||
return true, nil
|
||||
default:
|
||||
return false, fmt.Errorf("only directory and block device are supported")
|
||||
|
@ -70,7 +70,7 @@ func WithAuthorization(handler http.Handler, a authorizer.Authorizer, s runtime.
|
||||
return
|
||||
}
|
||||
|
||||
klog.V(4).InfoS("Forbidden", "URI", req.RequestURI, "Reason", reason)
|
||||
klog.V(4).InfoS("Forbidden", "URI", req.RequestURI, "reason", reason)
|
||||
audit.AddAuditAnnotations(ctx,
|
||||
decisionAnnotationKey, decisionForbid,
|
||||
reasonAnnotationKey, reason)
|
||||
|
@ -109,14 +109,14 @@ func WithImpersonation(handler http.Handler, a authorizer.Authorizer, s runtime.
|
||||
actingAsAttributes.Resource = "uids"
|
||||
|
||||
default:
|
||||
klog.V(4).InfoS("unknown impersonation request type", "Request", impersonationRequest)
|
||||
klog.V(4).InfoS("unknown impersonation request type", "request", impersonationRequest)
|
||||
responsewriters.Forbidden(ctx, actingAsAttributes, w, req, fmt.Sprintf("unknown impersonation request type: %v", impersonationRequest), s)
|
||||
return
|
||||
}
|
||||
|
||||
decision, reason, err := a.Authorize(ctx, actingAsAttributes)
|
||||
if err != nil || decision != authorizer.DecisionAllow {
|
||||
klog.V(4).InfoS("Forbidden", "URI", req.RequestURI, "Reason", reason, "Error", err)
|
||||
klog.V(4).InfoS("Forbidden", "URI", req.RequestURI, "reason", reason, "err", err)
|
||||
responsewriters.Forbidden(ctx, actingAsAttributes, w, req, reason, s)
|
||||
return
|
||||
}
|
||||
|
@ -149,7 +149,7 @@ func (f *FieldManager) UpdateNoErrors(liveObj, newObj runtime.Object, manager st
|
||||
name = accessor.GetName()
|
||||
}
|
||||
|
||||
klog.ErrorS(err, "[SHOULD NOT HAPPEN] failed to update managedFields", "VersionKind",
|
||||
klog.ErrorS(err, "[SHOULD NOT HAPPEN] failed to update managedFields", "versionKind",
|
||||
newObj.GetObjectKind().GroupVersionKind(), "namespace", ns, "name", name)
|
||||
})
|
||||
// Explicitly remove managedFields on failure, so that
|
||||
|
@ -211,7 +211,7 @@ func (r *RequestInfoFactory) NewRequestInfo(req *http.Request) (*RequestInfo, er
|
||||
opts := metainternalversion.ListOptions{}
|
||||
if err := metainternalversionscheme.ParameterCodec.DecodeParameters(req.URL.Query(), metav1.SchemeGroupVersion, &opts); err != nil {
|
||||
// An error in parsing request will result in default to "list" and not setting "name" field.
|
||||
klog.ErrorS(err, "Couldn't parse request", "Request", req.URL.Query())
|
||||
klog.ErrorS(err, "Couldn't parse request", "request", req.URL.Query())
|
||||
// Reset opts to not rely on partial results from parsing.
|
||||
// However, if watch is set, let's report it.
|
||||
opts = metainternalversion.ListOptions{}
|
||||
|
@ -55,7 +55,7 @@ func WithPanicRecovery(handler http.Handler, resolver request.RequestInfoResolve
|
||||
return
|
||||
}
|
||||
http.Error(w, "This request caused apiserver to panic. Look in the logs for details.", http.StatusInternalServerError)
|
||||
klog.ErrorS(nil, "apiserver panic'd", "method", req.Method, "URI", req.RequestURI, "audit-ID", audit.GetAuditIDTruncated(req.Context()))
|
||||
klog.ErrorS(nil, "apiserver panic'd", "method", req.Method, "URI", req.RequestURI, "auditID", audit.GetAuditIDTruncated(req.Context()))
|
||||
})
|
||||
}
|
||||
|
||||
|
@ -522,7 +522,7 @@ func (s preparedGenericAPIServer) Run(stopCh <-chan struct{}) error {
|
||||
// net/http waits for 1s for the peer to respond to a GO_AWAY frame, so
|
||||
// we should wait for a minimum of 2s
|
||||
shutdownTimeout = 2 * time.Second
|
||||
klog.V(1).InfoS("[graceful-termination] using HTTP Server shutdown timeout", "ShutdownTimeout", shutdownTimeout)
|
||||
klog.V(1).InfoS("[graceful-termination] using HTTP Server shutdown timeout", "shutdownTimeout", shutdownTimeout)
|
||||
}
|
||||
|
||||
notAcceptingNewRequestCh := s.lifecycleSignals.NotAcceptingNewRequest
|
||||
|
@ -175,7 +175,7 @@ func (t *envelopeTransformer) TransformToStorage(ctx context.Context, data []byt
|
||||
// Check keyID freshness and write to log if key IDs are different
|
||||
statusKeyID, err := t.keyIDGetter(ctx)
|
||||
if err == nil && encObject.KeyID != statusKeyID {
|
||||
klog.V(2).InfoS("observed different key IDs when encrypting content using kms v2 envelope service", "uid", uid, "encObject.KeyID", encObject.KeyID, "statusKeyID", statusKeyID)
|
||||
klog.V(2).InfoS("observed different key IDs when encrypting content using kms v2 envelope service", "uid", uid, "objectKeyID", encObject.KeyID, "statusKeyID", statusKeyID)
|
||||
}
|
||||
|
||||
// Serialize the EncryptedObject to a byte array.
|
||||
|
@ -32,8 +32,8 @@ func BenchmarkInfoLoggerInfo(b *testing.B) {
|
||||
for pb.Next() {
|
||||
logger.Info("test",
|
||||
"str", "foo",
|
||||
"int64-1", int64(1),
|
||||
"int64-2", int64(1),
|
||||
"int64A", int64(1),
|
||||
"int64B", int64(1),
|
||||
"float64", float64(1.0),
|
||||
"string1", "\n",
|
||||
"string2", "💩",
|
||||
@ -62,8 +62,8 @@ func BenchmarkZapLoggerError(b *testing.B) {
|
||||
logger.Error(fmt.Errorf("test for error:%s", "default"),
|
||||
"test",
|
||||
"str", "foo",
|
||||
"int64-1", int64(1),
|
||||
"int64-2", int64(1),
|
||||
"int64A", int64(1),
|
||||
"int64B", int64(1),
|
||||
"float64", float64(1.0),
|
||||
"string1", "\n",
|
||||
"string2", "💩",
|
||||
@ -91,8 +91,8 @@ func BenchmarkZapLoggerV(b *testing.B) {
|
||||
for pb.Next() {
|
||||
logger.V(1).Info("test",
|
||||
"str", "foo",
|
||||
"int64-1", int64(1),
|
||||
"int64-2", int64(1),
|
||||
"int64A", int64(1),
|
||||
"int64B", int64(1),
|
||||
"float64", float64(1.0),
|
||||
"string1", "\n",
|
||||
"string2", "💩",
|
||||
|
@ -194,7 +194,7 @@ func (s *Server) HandleValidate(w http.ResponseWriter, r *http.Request) {
|
||||
// verify the content type is accurate
|
||||
if contentType := r.Header.Get("Content-Type"); contentType != "application/json" {
|
||||
err = fmt.Errorf("contentType=%s, expected application/json", contentType)
|
||||
klog.ErrorS(err, "unable to process a request with an unknown content type", "content type", contentType)
|
||||
klog.ErrorS(err, "unable to process a request with an unknown content type", "contentType", contentType)
|
||||
http.Error(w, "unable to process a request with a non-json content type", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
|
@ -651,7 +651,7 @@ func (s *service) DeleteSnapshot(ctx context.Context,
|
||||
// leaks. The slice's elements may not be pointers, but the structs
|
||||
// themselves have fields that are.
|
||||
s.snapshots.Delete(i)
|
||||
klog.V(5).InfoS("mock delete snapshot", "SnapshotId", req.SnapshotId)
|
||||
klog.V(5).InfoS("mock delete snapshot", "snapshotId", req.SnapshotId)
|
||||
|
||||
if hookVal, hookMsg := s.execHook("DeleteSnapshotEnd"); hookVal != codes.OK {
|
||||
return nil, status.Errorf(hookVal, hookMsg)
|
||||
|
Loading…
Reference in New Issue
Block a user