mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-19 18:02:01 +00:00
logging: fix names of keys
The stricter checking with the upcoming logcheck v0.4.1 pointed out these names which don't comply with our recommendations in https://github.com/kubernetes/community/blob/master/contributors/devel/sig-instrumentation/migration-to-structured-logging.md#name-arguments.
This commit is contained in:
parent
f21c603417
commit
bc6c7fa912
@ -49,7 +49,7 @@ func (rct realConntracker) SetMax(max int) error {
|
|||||||
if err := rct.setIntSysCtl("nf_conntrack_max", max); err != nil {
|
if err := rct.setIntSysCtl("nf_conntrack_max", max); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
klog.InfoS("Setting nf_conntrack_max", "nf_conntrack_max", max)
|
klog.InfoS("Setting nf_conntrack_max", "nfConntrackMax", max)
|
||||||
|
|
||||||
// Linux does not support writing to /sys/module/nf_conntrack/parameters/hashsize
|
// Linux does not support writing to /sys/module/nf_conntrack/parameters/hashsize
|
||||||
// when the writer process is not in the initial network namespace
|
// when the writer process is not in the initial network namespace
|
||||||
@ -80,7 +80,7 @@ func (rct realConntracker) SetMax(max int) error {
|
|||||||
return errReadOnlySysFS
|
return errReadOnlySysFS
|
||||||
}
|
}
|
||||||
// TODO: generify this and sysctl to a new sysfs.WriteInt()
|
// TODO: generify this and sysctl to a new sysfs.WriteInt()
|
||||||
klog.InfoS("Setting conntrack hashsize", "conntrack hashsize", max/4)
|
klog.InfoS("Setting conntrack hashsize", "conntrackHashsize", max/4)
|
||||||
return writeIntStringFile("/sys/module/nf_conntrack/parameters/hashsize", max/4)
|
return writeIntStringFile("/sys/module/nf_conntrack/parameters/hashsize", max/4)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -365,7 +365,7 @@ func (o *Options) writeConfigFile() (err error) {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
klog.InfoS("Wrote configuration", "WriteConfigTo", o.WriteConfigTo)
|
klog.InfoS("Wrote configuration", "file", o.WriteConfigTo)
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -141,10 +141,10 @@ func newProxyServer(
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
klog.InfoS("NodeInfo", "PodCIDR", nodeInfo.Spec.PodCIDR, "PodCIDRs", nodeInfo.Spec.PodCIDRs)
|
klog.InfoS("NodeInfo", "podCIDR", nodeInfo.Spec.PodCIDR, "podCIDRs", nodeInfo.Spec.PodCIDRs)
|
||||||
}
|
}
|
||||||
|
|
||||||
klog.V(2).InfoS("DetectLocalMode", "LocalMode", string(detectLocalMode))
|
klog.V(2).InfoS("DetectLocalMode", "localMode", string(detectLocalMode))
|
||||||
|
|
||||||
primaryFamily := v1.IPv4Protocol
|
primaryFamily := v1.IPv4Protocol
|
||||||
primaryProtocol := utiliptables.ProtocolIPv4
|
primaryProtocol := utiliptables.ProtocolIPv4
|
||||||
@ -422,7 +422,7 @@ func getDetectLocalMode(config *proxyconfigapi.KubeProxyConfiguration) (proxycon
|
|||||||
if strings.TrimSpace(mode.String()) != "" {
|
if strings.TrimSpace(mode.String()) != "" {
|
||||||
return mode, fmt.Errorf("unknown detect-local-mode: %v", mode)
|
return mode, fmt.Errorf("unknown detect-local-mode: %v", mode)
|
||||||
}
|
}
|
||||||
klog.V(4).InfoS("Defaulting detect-local-mode", "LocalModeClusterCIDR", string(proxyconfigapi.LocalModeClusterCIDR))
|
klog.V(4).InfoS("Defaulting detect-local-mode", "localModeClusterCIDR", string(proxyconfigapi.LocalModeClusterCIDR))
|
||||||
return proxyconfigapi.LocalModeClusterCIDR, nil
|
return proxyconfigapi.LocalModeClusterCIDR, nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -452,7 +452,7 @@ func getLocalDetector(mode proxyconfigapi.LocalMode, config *proxyconfigapi.Kube
|
|||||||
}
|
}
|
||||||
return proxyutiliptables.NewDetectLocalByInterfaceNamePrefix(config.DetectLocal.InterfaceNamePrefix)
|
return proxyutiliptables.NewDetectLocalByInterfaceNamePrefix(config.DetectLocal.InterfaceNamePrefix)
|
||||||
}
|
}
|
||||||
klog.InfoS("Defaulting to no-op detect-local", "detect-local-mode", string(mode))
|
klog.InfoS("Defaulting to no-op detect-local", "detectLocalMode", string(mode))
|
||||||
return proxyutiliptables.NewNoOpLocalDetector(), nil
|
return proxyutiliptables.NewNoOpLocalDetector(), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -516,9 +516,9 @@ func getDualStackLocalDetectorTuple(mode proxyconfigapi.LocalMode, config *proxy
|
|||||||
}
|
}
|
||||||
return localDetectors, err
|
return localDetectors, err
|
||||||
default:
|
default:
|
||||||
klog.InfoS("Unknown detect-local-mode", "detect-local-mode", mode)
|
klog.InfoS("Unknown detect-local-mode", "detectLocalMode", mode)
|
||||||
}
|
}
|
||||||
klog.InfoS("Defaulting to no-op detect-local", "detect-local-mode", string(mode))
|
klog.InfoS("Defaulting to no-op detect-local", "detectLocalMode", string(mode))
|
||||||
return localDetectors, nil
|
return localDetectors, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -161,7 +161,7 @@ func nextScheduleTime(cj *batchv1.CronJob, now time.Time, schedule cron.Schedule
|
|||||||
// I've somewhat arbitrarily picked 100, as more than 80,
|
// I've somewhat arbitrarily picked 100, as more than 80,
|
||||||
// but less than "lots".
|
// but less than "lots".
|
||||||
recorder.Eventf(cj, corev1.EventTypeWarning, "TooManyMissedTimes", "too many missed start times: %d. Set or decrease .spec.startingDeadlineSeconds or check clock skew", numberOfMissedSchedules)
|
recorder.Eventf(cj, corev1.EventTypeWarning, "TooManyMissedTimes", "too many missed start times: %d. Set or decrease .spec.startingDeadlineSeconds or check clock skew", numberOfMissedSchedules)
|
||||||
klog.InfoS("too many missed times", "cronjob", klog.KRef(cj.GetNamespace(), cj.GetName()), "missed times", numberOfMissedSchedules)
|
klog.InfoS("too many missed times", "cronjob", klog.KRef(cj.GetNamespace(), cj.GetName()), "missedTimes", numberOfMissedSchedules)
|
||||||
}
|
}
|
||||||
return mostRecentTime, err
|
return mostRecentTime, err
|
||||||
}
|
}
|
||||||
|
@ -287,7 +287,7 @@ func (ca *cloudCIDRAllocator) updateCIDRAllocation(nodeName string) error {
|
|||||||
}
|
}
|
||||||
if needUpdate {
|
if needUpdate {
|
||||||
if node.Spec.PodCIDR != "" {
|
if node.Spec.PodCIDR != "" {
|
||||||
klog.ErrorS(nil, "PodCIDR being reassigned!", "nodeName", node.Name, "node.Spec.PodCIDRs", node.Spec.PodCIDRs, "cidrStrings", cidrStrings)
|
klog.ErrorS(nil, "PodCIDR being reassigned!", "nodeName", node.Name, "podCIDRs", node.Spec.PodCIDRs, "cidrStrings", cidrStrings)
|
||||||
// We fall through and set the CIDR despite this error. This
|
// We fall through and set the CIDR despite this error. This
|
||||||
// implements the same logic as implemented in the
|
// implements the same logic as implemented in the
|
||||||
// rangeAllocator.
|
// rangeAllocator.
|
||||||
@ -326,20 +326,20 @@ func needPodCIDRsUpdate(node *v1.Node, podCIDRs []*net.IPNet) (bool, error) {
|
|||||||
}
|
}
|
||||||
_, nodePodCIDR, err := netutils.ParseCIDRSloppy(node.Spec.PodCIDR)
|
_, nodePodCIDR, err := netutils.ParseCIDRSloppy(node.Spec.PodCIDR)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
klog.ErrorS(err, "Found invalid node.Spec.PodCIDR", "node.Spec.PodCIDR", node.Spec.PodCIDR)
|
klog.ErrorS(err, "Found invalid node.Spec.PodCIDR", "podCIDR", node.Spec.PodCIDR)
|
||||||
// We will try to overwrite with new CIDR(s)
|
// We will try to overwrite with new CIDR(s)
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
nodePodCIDRs, err := netutils.ParseCIDRs(node.Spec.PodCIDRs)
|
nodePodCIDRs, err := netutils.ParseCIDRs(node.Spec.PodCIDRs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
klog.ErrorS(err, "Found invalid node.Spec.PodCIDRs", "node.Spec.PodCIDRs", node.Spec.PodCIDRs)
|
klog.ErrorS(err, "Found invalid node.Spec.PodCIDRs", "podCIDRs", node.Spec.PodCIDRs)
|
||||||
// We will try to overwrite with new CIDR(s)
|
// We will try to overwrite with new CIDR(s)
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(podCIDRs) == 1 {
|
if len(podCIDRs) == 1 {
|
||||||
if cmp.Equal(nodePodCIDR, podCIDRs[0]) {
|
if cmp.Equal(nodePodCIDR, podCIDRs[0]) {
|
||||||
klog.V(4).InfoS("Node already has allocated CIDR. It matches the proposed one.", "nodeName", node.Name, "podCIDRs[0]", podCIDRs[0])
|
klog.V(4).InfoS("Node already has allocated CIDR. It matches the proposed one.", "nodeName", node.Name, "podCIDR", podCIDRs[0])
|
||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
} else if len(nodePodCIDRs) == len(podCIDRs) {
|
} else if len(nodePodCIDRs) == len(podCIDRs) {
|
||||||
|
@ -27,7 +27,7 @@ import (
|
|||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
networkingv1alpha1 "k8s.io/api/networking/v1alpha1"
|
networkingv1alpha1 "k8s.io/api/networking/v1alpha1"
|
||||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
@ -34,7 +34,7 @@ import (
|
|||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/types"
|
"k8s.io/apimachinery/pkg/types"
|
||||||
@ -272,7 +272,7 @@ func (ttlc *Controller) patchNodeWithAnnotation(ctx context.Context, node *v1.No
|
|||||||
klog.V(2).InfoS("Failed to change ttl annotation for node", "node", klog.KObj(node), "err", err)
|
klog.V(2).InfoS("Failed to change ttl annotation for node", "node", klog.KObj(node), "err", err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
klog.V(2).InfoS("Changed ttl annotation", "node", klog.KObj(node), "new_ttl", time.Duration(value)*time.Second)
|
klog.V(2).InfoS("Changed ttl annotation", "node", klog.KObj(node), "TTL", time.Duration(value)*time.Second)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -110,9 +110,9 @@ func (r *draPluginClient) NodePrepareResource(
|
|||||||
klog.V(4).InfoS(
|
klog.V(4).InfoS(
|
||||||
log("calling NodePrepareResource rpc"),
|
log("calling NodePrepareResource rpc"),
|
||||||
"namespace", namespace,
|
"namespace", namespace,
|
||||||
"claim UID", claimUID,
|
"claimUID", claimUID,
|
||||||
"claim name", claimName,
|
"claimName", claimName,
|
||||||
"resource handle", resourceHandle)
|
"resourceHandle", resourceHandle)
|
||||||
|
|
||||||
if r.nodeV1ClientCreator == nil {
|
if r.nodeV1ClientCreator == nil {
|
||||||
return nil, errors.New("failed to call NodePrepareResource. nodeV1ClientCreator is nil")
|
return nil, errors.New("failed to call NodePrepareResource. nodeV1ClientCreator is nil")
|
||||||
@ -144,9 +144,9 @@ func (r *draPluginClient) NodeUnprepareResource(
|
|||||||
klog.V(4).InfoS(
|
klog.V(4).InfoS(
|
||||||
log("calling NodeUnprepareResource rpc"),
|
log("calling NodeUnprepareResource rpc"),
|
||||||
"namespace", namespace,
|
"namespace", namespace,
|
||||||
"claim UID", claimUID,
|
"claimUID", claimUID,
|
||||||
"claim name", claimName,
|
"claimname", claimName,
|
||||||
"cdi devices", cdiDevices)
|
"cdiDevices", cdiDevices)
|
||||||
|
|
||||||
if r.nodeV1ClientCreator == nil {
|
if r.nodeV1ClientCreator == nil {
|
||||||
return nil, errors.New("nodeV1ClientCreate is nil")
|
return nil, errors.New("nodeV1ClientCreate is nil")
|
||||||
|
@ -2437,7 +2437,7 @@ func TestAllocateAndAddPodWithInitContainers(t *testing.T) {
|
|||||||
|
|
||||||
for _, testCase := range testCases {
|
for _, testCase := range testCases {
|
||||||
t.Run(testCase.description, func(t *testing.T) {
|
t.Run(testCase.description, func(t *testing.T) {
|
||||||
klog.InfoS("TestAllocateAndAddPodWithInitContainers", "test name", testCase.description)
|
klog.InfoS("TestAllocateAndAddPodWithInitContainers", "name", testCase.description)
|
||||||
mgr := &manager{
|
mgr := &manager{
|
||||||
policy: returnPolicyByName(testCase),
|
policy: returnPolicyByName(testCase),
|
||||||
state: state.NewMemoryState(),
|
state: state.NewMemoryState(),
|
||||||
|
@ -2489,7 +2489,7 @@ func TestStaticPolicyAllocateWithInitContainers(t *testing.T) {
|
|||||||
|
|
||||||
for _, testCase := range testCases {
|
for _, testCase := range testCases {
|
||||||
t.Run(testCase.description, func(t *testing.T) {
|
t.Run(testCase.description, func(t *testing.T) {
|
||||||
klog.InfoS("TestStaticPolicyAllocateWithInitContainers", "test name", testCase.description)
|
klog.InfoS("TestStaticPolicyAllocateWithInitContainers", "name", testCase.description)
|
||||||
p, s, err := initTests(t, &testCase, testCase.topologyHint, testCase.initContainersReusableMemory)
|
p, s, err := initTests(t, &testCase, testCase.topologyHint, testCase.initContainersReusableMemory)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Unexpected error: %v", err)
|
t.Fatalf("Unexpected error: %v", err)
|
||||||
|
@ -290,7 +290,7 @@ func (m *qosContainerManagerImpl) setMemoryQoS(configs map[v1.PodQOSClass]*Cgrou
|
|||||||
configs[v1.PodQOSBurstable].ResourceParameters.Unified = make(map[string]string)
|
configs[v1.PodQOSBurstable].ResourceParameters.Unified = make(map[string]string)
|
||||||
}
|
}
|
||||||
configs[v1.PodQOSBurstable].ResourceParameters.Unified[MemoryMin] = strconv.FormatInt(burstableMin, 10)
|
configs[v1.PodQOSBurstable].ResourceParameters.Unified[MemoryMin] = strconv.FormatInt(burstableMin, 10)
|
||||||
klog.V(4).InfoS("MemoryQoS config for qos", "qos", v1.PodQOSBurstable, "memory.min", burstableMin)
|
klog.V(4).InfoS("MemoryQoS config for qos", "qos", v1.PodQOSBurstable, "memoryMin", burstableMin)
|
||||||
}
|
}
|
||||||
|
|
||||||
if guaranteedMin > 0 {
|
if guaranteedMin > 0 {
|
||||||
@ -298,7 +298,7 @@ func (m *qosContainerManagerImpl) setMemoryQoS(configs map[v1.PodQOSClass]*Cgrou
|
|||||||
configs[v1.PodQOSGuaranteed].ResourceParameters.Unified = make(map[string]string)
|
configs[v1.PodQOSGuaranteed].ResourceParameters.Unified = make(map[string]string)
|
||||||
}
|
}
|
||||||
configs[v1.PodQOSGuaranteed].ResourceParameters.Unified[MemoryMin] = strconv.FormatInt(guaranteedMin, 10)
|
configs[v1.PodQOSGuaranteed].ResourceParameters.Unified[MemoryMin] = strconv.FormatInt(guaranteedMin, 10)
|
||||||
klog.V(4).InfoS("MemoryQoS config for qos", "qos", v1.PodQOSGuaranteed, "memory.min", guaranteedMin)
|
klog.V(4).InfoS("MemoryQoS config for qos", "qos", v1.PodQOSGuaranteed, "memoryMin", guaranteedMin)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1006,7 +1006,7 @@ func setTerminationGracePeriod(pod *v1.Pod, containerSpec *v1.Container, contain
|
|||||||
func isProbeTerminationGracePeriodSecondsSet(pod *v1.Pod, containerSpec *v1.Container, probe *v1.Probe, containerName string, containerID kubecontainer.ContainerID, probeType string) bool {
|
func isProbeTerminationGracePeriodSecondsSet(pod *v1.Pod, containerSpec *v1.Container, probe *v1.Probe, containerName string, containerID kubecontainer.ContainerID, probeType string) bool {
|
||||||
if probe != nil && probe.TerminationGracePeriodSeconds != nil {
|
if probe != nil && probe.TerminationGracePeriodSeconds != nil {
|
||||||
if *probe.TerminationGracePeriodSeconds > *pod.Spec.TerminationGracePeriodSeconds {
|
if *probe.TerminationGracePeriodSeconds > *pod.Spec.TerminationGracePeriodSeconds {
|
||||||
klog.V(4).InfoS("Using probe-level grace period that is greater than the pod-level grace period", "pod", klog.KObj(pod), "pod-uid", pod.UID, "containerName", containerName, "containerID", containerID.String(), "probe-type", probeType, "probe-grace-period", *probe.TerminationGracePeriodSeconds, "pod-grace-period", *pod.Spec.TerminationGracePeriodSeconds)
|
klog.V(4).InfoS("Using probe-level grace period that is greater than the pod-level grace period", "pod", klog.KObj(pod), "podUID", pod.UID, "containerName", containerName, "containerID", containerID.String(), "probeType", probeType, "probeGracePeriod", *probe.TerminationGracePeriodSeconds, "podGracePeriod", *pod.Spec.TerminationGracePeriodSeconds)
|
||||||
}
|
}
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
@ -102,7 +102,7 @@ func (c *criMetricsCollector) criMetricToProm(m *runtimeapi.Metric) (metrics.Met
|
|||||||
desc, ok := c.descriptors[m.Name]
|
desc, ok := c.descriptors[m.Name]
|
||||||
if !ok {
|
if !ok {
|
||||||
err := fmt.Errorf("error converting CRI Metric to prometheus format")
|
err := fmt.Errorf("error converting CRI Metric to prometheus format")
|
||||||
klog.V(5).ErrorS(err, "Descriptor not present in pre-populated list of descriptors", "descriptor name", m.Name)
|
klog.V(5).ErrorS(err, "Descriptor not present in pre-populated list of descriptors", "name", m.Name)
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -102,7 +102,7 @@ func (p *basicPodStartupLatencyTracker) ObservedPodOnWatch(pod *v1.Pod, when tim
|
|||||||
klog.InfoS("Observed pod startup duration",
|
klog.InfoS("Observed pod startup duration",
|
||||||
"pod", klog.KObj(pod),
|
"pod", klog.KObj(pod),
|
||||||
"podStartSLOduration", podStartSLOduration,
|
"podStartSLOduration", podStartSLOduration,
|
||||||
"pod.CreationTimestamp", pod.CreationTimestamp.Time,
|
"podCreationTimestamp", pod.CreationTimestamp.Time,
|
||||||
"firstStartedPulling", state.firstStartedPulling,
|
"firstStartedPulling", state.firstStartedPulling,
|
||||||
"lastFinishedPulling", state.lastFinishedPulling,
|
"lastFinishedPulling", state.lastFinishedPulling,
|
||||||
"observedRunningTime", state.observedRunningTime,
|
"observedRunningTime", state.observedRunningTime,
|
||||||
|
@ -43,13 +43,13 @@ func (n *NodePodCIDRHandler) OnNodeAdd(node *v1.Node) {
|
|||||||
podCIDRs := node.Spec.PodCIDRs
|
podCIDRs := node.Spec.PodCIDRs
|
||||||
// initialize podCIDRs
|
// initialize podCIDRs
|
||||||
if len(n.podCIDRs) == 0 && len(podCIDRs) > 0 {
|
if len(n.podCIDRs) == 0 && len(podCIDRs) > 0 {
|
||||||
klog.InfoS("Setting current PodCIDRs", "PodCIDRs", podCIDRs)
|
klog.InfoS("Setting current PodCIDRs", "podCIDRs", podCIDRs)
|
||||||
n.podCIDRs = podCIDRs
|
n.podCIDRs = podCIDRs
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if !reflect.DeepEqual(n.podCIDRs, podCIDRs) {
|
if !reflect.DeepEqual(n.podCIDRs, podCIDRs) {
|
||||||
klog.ErrorS(nil, "Using NodeCIDR LocalDetector mode, current PodCIDRs are different than previous PodCIDRs, restarting",
|
klog.ErrorS(nil, "Using NodeCIDR LocalDetector mode, current PodCIDRs are different than previous PodCIDRs, restarting",
|
||||||
"node", klog.KObj(node), "New Node PodCIDRs", podCIDRs, "Old Node UID", n.podCIDRs)
|
"node", klog.KObj(node), "newPodCIDRs", podCIDRs, "oldPodCIDRs", n.podCIDRs)
|
||||||
panic("Current Node PodCIDRs are different than previous PodCIDRs, restarting")
|
panic("Current Node PodCIDRs are different than previous PodCIDRs, restarting")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -61,13 +61,13 @@ func (n *NodePodCIDRHandler) OnNodeUpdate(_, node *v1.Node) {
|
|||||||
podCIDRs := node.Spec.PodCIDRs
|
podCIDRs := node.Spec.PodCIDRs
|
||||||
// initialize podCIDRs
|
// initialize podCIDRs
|
||||||
if len(n.podCIDRs) == 0 && len(podCIDRs) > 0 {
|
if len(n.podCIDRs) == 0 && len(podCIDRs) > 0 {
|
||||||
klog.InfoS("Setting current PodCIDRs", "PodCIDRs", podCIDRs)
|
klog.InfoS("Setting current PodCIDRs", "podCIDRs", podCIDRs)
|
||||||
n.podCIDRs = podCIDRs
|
n.podCIDRs = podCIDRs
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if !reflect.DeepEqual(n.podCIDRs, podCIDRs) {
|
if !reflect.DeepEqual(n.podCIDRs, podCIDRs) {
|
||||||
klog.ErrorS(nil, "Using NodeCIDR LocalDetector mode, current PodCIDRs are different than previous PodCIDRs, restarting",
|
klog.ErrorS(nil, "Using NodeCIDR LocalDetector mode, current PodCIDRs are different than previous PodCIDRs, restarting",
|
||||||
"node", klog.KObj(node), "New Node PodCIDRs", podCIDRs, "Old Node UID", n.podCIDRs)
|
"node", klog.KObj(node), "newPodCIDRs", podCIDRs, "oldPODCIDRs", n.podCIDRs)
|
||||||
panic("Current Node PodCIDRs are different than previous PodCIDRs, restarting")
|
panic("Current Node PodCIDRs are different than previous PodCIDRs, restarting")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -320,7 +320,7 @@ func (pl *dynamicResources) PreFilter(ctx context.Context, state *framework.Cycl
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, statusUnschedulable(logger, err.Error())
|
return nil, statusUnschedulable(logger, err.Error())
|
||||||
}
|
}
|
||||||
logger.V(5).Info("pod resource claims", "pod", klog.KObj(pod), "resourceclaims", klog.KObjs(claims))
|
logger.V(5).Info("pod resource claims", "pod", klog.KObj(pod), "resourceclaims", klog.KObjSlice(claims))
|
||||||
// If the pod does not reference any claim, we don't need to do
|
// If the pod does not reference any claim, we don't need to do
|
||||||
// anything for it.
|
// anything for it.
|
||||||
if len(claims) == 0 {
|
if len(claims) == 0 {
|
||||||
|
@ -418,7 +418,7 @@ func (plugin *localVolumePlugin) NodeExpand(resizeOptions volume.NodeResizeOptio
|
|||||||
case hostutil.FileTypeDirectory:
|
case hostutil.FileTypeDirectory:
|
||||||
// if the given local volume path is of already filesystem directory, return directly because
|
// if the given local volume path is of already filesystem directory, return directly because
|
||||||
// we do not want to prevent mount operation from succeeding.
|
// we do not want to prevent mount operation from succeeding.
|
||||||
klog.InfoS("Expansion of directory based local volumes is NO-OP", "local-volume-path", localDevicePath)
|
klog.InfoS("Expansion of directory based local volumes is NO-OP", "localVolumePath", localDevicePath)
|
||||||
return true, nil
|
return true, nil
|
||||||
default:
|
default:
|
||||||
return false, fmt.Errorf("only directory and block device are supported")
|
return false, fmt.Errorf("only directory and block device are supported")
|
||||||
|
@ -70,7 +70,7 @@ func WithAuthorization(handler http.Handler, a authorizer.Authorizer, s runtime.
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
klog.V(4).InfoS("Forbidden", "URI", req.RequestURI, "Reason", reason)
|
klog.V(4).InfoS("Forbidden", "URI", req.RequestURI, "reason", reason)
|
||||||
audit.AddAuditAnnotations(ctx,
|
audit.AddAuditAnnotations(ctx,
|
||||||
decisionAnnotationKey, decisionForbid,
|
decisionAnnotationKey, decisionForbid,
|
||||||
reasonAnnotationKey, reason)
|
reasonAnnotationKey, reason)
|
||||||
|
@ -109,14 +109,14 @@ func WithImpersonation(handler http.Handler, a authorizer.Authorizer, s runtime.
|
|||||||
actingAsAttributes.Resource = "uids"
|
actingAsAttributes.Resource = "uids"
|
||||||
|
|
||||||
default:
|
default:
|
||||||
klog.V(4).InfoS("unknown impersonation request type", "Request", impersonationRequest)
|
klog.V(4).InfoS("unknown impersonation request type", "request", impersonationRequest)
|
||||||
responsewriters.Forbidden(ctx, actingAsAttributes, w, req, fmt.Sprintf("unknown impersonation request type: %v", impersonationRequest), s)
|
responsewriters.Forbidden(ctx, actingAsAttributes, w, req, fmt.Sprintf("unknown impersonation request type: %v", impersonationRequest), s)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
decision, reason, err := a.Authorize(ctx, actingAsAttributes)
|
decision, reason, err := a.Authorize(ctx, actingAsAttributes)
|
||||||
if err != nil || decision != authorizer.DecisionAllow {
|
if err != nil || decision != authorizer.DecisionAllow {
|
||||||
klog.V(4).InfoS("Forbidden", "URI", req.RequestURI, "Reason", reason, "Error", err)
|
klog.V(4).InfoS("Forbidden", "URI", req.RequestURI, "reason", reason, "err", err)
|
||||||
responsewriters.Forbidden(ctx, actingAsAttributes, w, req, reason, s)
|
responsewriters.Forbidden(ctx, actingAsAttributes, w, req, reason, s)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -149,7 +149,7 @@ func (f *FieldManager) UpdateNoErrors(liveObj, newObj runtime.Object, manager st
|
|||||||
name = accessor.GetName()
|
name = accessor.GetName()
|
||||||
}
|
}
|
||||||
|
|
||||||
klog.ErrorS(err, "[SHOULD NOT HAPPEN] failed to update managedFields", "VersionKind",
|
klog.ErrorS(err, "[SHOULD NOT HAPPEN] failed to update managedFields", "versionKind",
|
||||||
newObj.GetObjectKind().GroupVersionKind(), "namespace", ns, "name", name)
|
newObj.GetObjectKind().GroupVersionKind(), "namespace", ns, "name", name)
|
||||||
})
|
})
|
||||||
// Explicitly remove managedFields on failure, so that
|
// Explicitly remove managedFields on failure, so that
|
||||||
|
@ -211,7 +211,7 @@ func (r *RequestInfoFactory) NewRequestInfo(req *http.Request) (*RequestInfo, er
|
|||||||
opts := metainternalversion.ListOptions{}
|
opts := metainternalversion.ListOptions{}
|
||||||
if err := metainternalversionscheme.ParameterCodec.DecodeParameters(req.URL.Query(), metav1.SchemeGroupVersion, &opts); err != nil {
|
if err := metainternalversionscheme.ParameterCodec.DecodeParameters(req.URL.Query(), metav1.SchemeGroupVersion, &opts); err != nil {
|
||||||
// An error in parsing request will result in default to "list" and not setting "name" field.
|
// An error in parsing request will result in default to "list" and not setting "name" field.
|
||||||
klog.ErrorS(err, "Couldn't parse request", "Request", req.URL.Query())
|
klog.ErrorS(err, "Couldn't parse request", "request", req.URL.Query())
|
||||||
// Reset opts to not rely on partial results from parsing.
|
// Reset opts to not rely on partial results from parsing.
|
||||||
// However, if watch is set, let's report it.
|
// However, if watch is set, let's report it.
|
||||||
opts = metainternalversion.ListOptions{}
|
opts = metainternalversion.ListOptions{}
|
||||||
|
@ -55,7 +55,7 @@ func WithPanicRecovery(handler http.Handler, resolver request.RequestInfoResolve
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
http.Error(w, "This request caused apiserver to panic. Look in the logs for details.", http.StatusInternalServerError)
|
http.Error(w, "This request caused apiserver to panic. Look in the logs for details.", http.StatusInternalServerError)
|
||||||
klog.ErrorS(nil, "apiserver panic'd", "method", req.Method, "URI", req.RequestURI, "audit-ID", audit.GetAuditIDTruncated(req.Context()))
|
klog.ErrorS(nil, "apiserver panic'd", "method", req.Method, "URI", req.RequestURI, "auditID", audit.GetAuditIDTruncated(req.Context()))
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -522,7 +522,7 @@ func (s preparedGenericAPIServer) Run(stopCh <-chan struct{}) error {
|
|||||||
// net/http waits for 1s for the peer to respond to a GO_AWAY frame, so
|
// net/http waits for 1s for the peer to respond to a GO_AWAY frame, so
|
||||||
// we should wait for a minimum of 2s
|
// we should wait for a minimum of 2s
|
||||||
shutdownTimeout = 2 * time.Second
|
shutdownTimeout = 2 * time.Second
|
||||||
klog.V(1).InfoS("[graceful-termination] using HTTP Server shutdown timeout", "ShutdownTimeout", shutdownTimeout)
|
klog.V(1).InfoS("[graceful-termination] using HTTP Server shutdown timeout", "shutdownTimeout", shutdownTimeout)
|
||||||
}
|
}
|
||||||
|
|
||||||
notAcceptingNewRequestCh := s.lifecycleSignals.NotAcceptingNewRequest
|
notAcceptingNewRequestCh := s.lifecycleSignals.NotAcceptingNewRequest
|
||||||
|
@ -175,7 +175,7 @@ func (t *envelopeTransformer) TransformToStorage(ctx context.Context, data []byt
|
|||||||
// Check keyID freshness and write to log if key IDs are different
|
// Check keyID freshness and write to log if key IDs are different
|
||||||
statusKeyID, err := t.keyIDGetter(ctx)
|
statusKeyID, err := t.keyIDGetter(ctx)
|
||||||
if err == nil && encObject.KeyID != statusKeyID {
|
if err == nil && encObject.KeyID != statusKeyID {
|
||||||
klog.V(2).InfoS("observed different key IDs when encrypting content using kms v2 envelope service", "uid", uid, "encObject.KeyID", encObject.KeyID, "statusKeyID", statusKeyID)
|
klog.V(2).InfoS("observed different key IDs when encrypting content using kms v2 envelope service", "uid", uid, "objectKeyID", encObject.KeyID, "statusKeyID", statusKeyID)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Serialize the EncryptedObject to a byte array.
|
// Serialize the EncryptedObject to a byte array.
|
||||||
|
@ -32,8 +32,8 @@ func BenchmarkInfoLoggerInfo(b *testing.B) {
|
|||||||
for pb.Next() {
|
for pb.Next() {
|
||||||
logger.Info("test",
|
logger.Info("test",
|
||||||
"str", "foo",
|
"str", "foo",
|
||||||
"int64-1", int64(1),
|
"int64A", int64(1),
|
||||||
"int64-2", int64(1),
|
"int64B", int64(1),
|
||||||
"float64", float64(1.0),
|
"float64", float64(1.0),
|
||||||
"string1", "\n",
|
"string1", "\n",
|
||||||
"string2", "💩",
|
"string2", "💩",
|
||||||
@ -62,8 +62,8 @@ func BenchmarkZapLoggerError(b *testing.B) {
|
|||||||
logger.Error(fmt.Errorf("test for error:%s", "default"),
|
logger.Error(fmt.Errorf("test for error:%s", "default"),
|
||||||
"test",
|
"test",
|
||||||
"str", "foo",
|
"str", "foo",
|
||||||
"int64-1", int64(1),
|
"int64A", int64(1),
|
||||||
"int64-2", int64(1),
|
"int64B", int64(1),
|
||||||
"float64", float64(1.0),
|
"float64", float64(1.0),
|
||||||
"string1", "\n",
|
"string1", "\n",
|
||||||
"string2", "💩",
|
"string2", "💩",
|
||||||
@ -91,8 +91,8 @@ func BenchmarkZapLoggerV(b *testing.B) {
|
|||||||
for pb.Next() {
|
for pb.Next() {
|
||||||
logger.V(1).Info("test",
|
logger.V(1).Info("test",
|
||||||
"str", "foo",
|
"str", "foo",
|
||||||
"int64-1", int64(1),
|
"int64A", int64(1),
|
||||||
"int64-2", int64(1),
|
"int64B", int64(1),
|
||||||
"float64", float64(1.0),
|
"float64", float64(1.0),
|
||||||
"string1", "\n",
|
"string1", "\n",
|
||||||
"string2", "💩",
|
"string2", "💩",
|
||||||
|
@ -194,7 +194,7 @@ func (s *Server) HandleValidate(w http.ResponseWriter, r *http.Request) {
|
|||||||
// verify the content type is accurate
|
// verify the content type is accurate
|
||||||
if contentType := r.Header.Get("Content-Type"); contentType != "application/json" {
|
if contentType := r.Header.Get("Content-Type"); contentType != "application/json" {
|
||||||
err = fmt.Errorf("contentType=%s, expected application/json", contentType)
|
err = fmt.Errorf("contentType=%s, expected application/json", contentType)
|
||||||
klog.ErrorS(err, "unable to process a request with an unknown content type", "content type", contentType)
|
klog.ErrorS(err, "unable to process a request with an unknown content type", "contentType", contentType)
|
||||||
http.Error(w, "unable to process a request with a non-json content type", http.StatusBadRequest)
|
http.Error(w, "unable to process a request with a non-json content type", http.StatusBadRequest)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -651,7 +651,7 @@ func (s *service) DeleteSnapshot(ctx context.Context,
|
|||||||
// leaks. The slice's elements may not be pointers, but the structs
|
// leaks. The slice's elements may not be pointers, but the structs
|
||||||
// themselves have fields that are.
|
// themselves have fields that are.
|
||||||
s.snapshots.Delete(i)
|
s.snapshots.Delete(i)
|
||||||
klog.V(5).InfoS("mock delete snapshot", "SnapshotId", req.SnapshotId)
|
klog.V(5).InfoS("mock delete snapshot", "snapshotId", req.SnapshotId)
|
||||||
|
|
||||||
if hookVal, hookMsg := s.execHook("DeleteSnapshotEnd"); hookVal != codes.OK {
|
if hookVal, hookMsg := s.execHook("DeleteSnapshotEnd"); hookVal != codes.OK {
|
||||||
return nil, status.Errorf(hookVal, hookMsg)
|
return nil, status.Errorf(hookVal, hookMsg)
|
||||||
|
Loading…
Reference in New Issue
Block a user