Migrate missed log entries in kubelet

Co-Authored-By: pacoxu <paco.xu@daocloud.io>
This commit is contained in:
Elana Hashman 2021-03-17 14:48:16 -07:00
parent 3aa1c58342
commit 6af7eb6d49
No known key found for this signature in database
GPG Key ID: D37F7B2A20B48FA0
14 changed files with 31 additions and 36 deletions

View File

@ -52,7 +52,7 @@ func initForOS(windowsService bool, windowsPriorityClass string) error {
}
kubeletProcessHandle := windows.CurrentProcess()
// Set the priority of the kubelet process to given priority
klog.Infof("Setting the priority of kubelet process to %s", windowsPriorityClass)
klog.InfoS("Setting the priority of kubelet process", "windowsPriorityClass", windowsPriorityClass)
if err := windows.SetPriorityClass(kubeletProcessHandle, priority); err != nil {
return err
}

View File

@ -52,7 +52,7 @@ func appendPluginBasedOnFeatureFlags(plugins []volume.VolumePlugin, inTreePlugin
return plugins, nil
}
if featureGate.Enabled(pluginInfo.pluginUnregisterFeature) {
klog.Infof("Skipped registration of plugin since feature flag is enabled", "pluginName", inTreePluginName, "featureFlag", pluginInfo.pluginUnregisterFeature)
klog.InfoS("Skipped registration of plugin since feature flag is enabled", "pluginName", inTreePluginName, "featureFlag", pluginInfo.pluginUnregisterFeature)
return plugins, nil
}

View File

@ -285,10 +285,9 @@ HTTP server: The kubelet can also listen for HTTP and respond to a simple API
config.StaticPodURLHeader[k] = []string{"<masked>"}
}
// log the kubelet's config for inspection
klog.V(5).Infof("KubeletConfiguration: %#v", config)
klog.V(5).InfoS("KubeletConfiguration", "configuration", kubeletServer.KubeletConfiguration)
// run the kubelet
klog.V(5).InfoS("KubeletConfiguration", "configuration", kubeletServer.KubeletConfiguration)
if err := Run(ctx, kubeletServer, kubeletDeps, utilfeature.DefaultFeatureGate); err != nil {
klog.ErrorS(err, "Failed to run kubelet")
os.Exit(1)

View File

@ -73,7 +73,7 @@ func (cm *containerManagerImpl) Start(node *v1.Node,
sourcesReady config.SourcesReady,
podStatusProvider status.PodStatusProvider,
runtimeService internalapi.RuntimeService) error {
klog.V(2).Infof("Starting Windows container manager")
klog.V(2).InfoS("Starting Windows container manager")
if utilfeature.DefaultFeatureGate.Enabled(kubefeatures.LocalStorageCapacityIsolation) {
rootfs, err := cm.cadvisorInterface.RootFsInfo()
@ -112,7 +112,7 @@ func NewContainerManager(mountUtil mount.Interface, cadvisorInterface cadvisor.I
cm.topologyManager = topologymanager.NewFakeManager()
klog.Infof("Creating device plugin manager: %t", devicePluginEnabled)
klog.InfoS("Creating device plugin manager", "devicePluginEnabled", devicePluginEnabled)
if devicePluginEnabled {
cm.deviceManager, err = devicemanager.NewManagerImpl(nil, cm.topologyManager)
cm.topologyManager.AddHintProvider(cm.deviceManager)

View File

@ -32,7 +32,7 @@ type fakeManager struct {
}
func (m *fakeManager) Start(activePods ActivePodsFunc, sourcesReady config.SourcesReady, podStatusProvider status.PodStatusProvider, containerRuntime runtimeService, initialContainers containermap.ContainerMap) error {
klog.Info("Start()")
klog.InfoS("Start()")
return nil
}

View File

@ -187,7 +187,7 @@ func (m *Stub) Register(kubeletEndpoint, resourceName string, pluginSockDir stri
return nil
}
}
klog.Info("Deprecation file not found. Invoke registration")
klog.InfoS("Deprecation file not found. Invoke registration")
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()

View File

@ -582,7 +582,7 @@ func (m *ManagerImpl) writeCheckpoint() error {
err := m.checkpointManager.CreateCheckpoint(kubeletDeviceManagerCheckpoint, data)
if err != nil {
err2 := fmt.Errorf("failed to write checkpoint file %q: %v", kubeletDeviceManagerCheckpoint, err)
klog.Warning(err2)
klog.InfoS("Failed to write checkpoint file", "err", err)
return err2
}
return nil
@ -1071,7 +1071,7 @@ func (m *ManagerImpl) GetAllocatableDevices() ResourceDeviceInstances {
m.mutex.Lock()
resp := m.allDevices.Clone()
m.mutex.Unlock()
klog.V(4).Infof("known devices: %d", len(resp))
klog.V(4).InfoS("known devices", "numDevices", len(resp))
return resp
}

View File

@ -25,7 +25,7 @@ import (
)
func (s *sourceFile) startWatch() {
klog.Errorf("Watching source file is unsupported in this build")
klog.ErrorS(nil, "Watching source file is unsupported in this build")
}
func (s *sourceFile) consumeWatchEvent(e *watchEvent) error {

View File

@ -1952,7 +1952,7 @@ func (kl *Kubelet) syncLoopIteration(configCh <-chan kubetypes.PodUpdate, handle
klog.V(4).InfoS("SyncLoop RECONCILE", "source", u.Source, "pods", format.Pods(u.Pods))
handler.HandlePodReconcile(u.Pods)
case kubetypes.DELETE:
klog.V(2).Infof("SyncLoop DELETE", "source", u.Source, "pods", format.Pods(u.Pods))
klog.V(2).InfoS("SyncLoop DELETE", "source", u.Source, "pods", format.Pods(u.Pods))
// DELETE is treated as a UPDATE because of graceful deletion.
handler.HandlePodUpdates(u.Pods)
case kubetypes.SET:
@ -2023,18 +2023,14 @@ func (kl *Kubelet) syncLoopIteration(configCh <-chan kubetypes.PodUpdate, handle
}
func handleProbeSync(kl *Kubelet, update proberesults.Update, handler SyncHandler, probe, status string) {
probeAndStatus := probe
if len(status) > 0 {
probeAndStatus = fmt.Sprintf("%s (container %s)", probe, status)
}
// We should not use the pod from manager, because it is never updated after initialization.
pod, ok := kl.podManager.GetPodByUID(update.PodUID)
if !ok {
// If the pod no longer exists, ignore the update.
klog.V(4).Infof("SyncLoop %s: ignore irrelevant update: %#v", probeAndStatus, update)
klog.V(4).InfoS("SyncLoop (probe): ignore irrelevant update", "probe", probe, "status", status, "update", update)
return
}
klog.V(1).Infof("SyncLoop %s: %q", probeAndStatus, format.Pod(pod))
klog.V(1).InfoS("SyncLoop (probe)", "probe", probe, "status", status, "pod", klog.KObj(pod))
handler.HandlePodSyncs([]*v1.Pod{pod})
}
@ -2044,7 +2040,7 @@ func (kl *Kubelet) dispatchWork(pod *v1.Pod, syncType kubetypes.SyncPodType, mir
// check whether we are ready to delete the pod from the API server (all status up to date)
containersTerminal, podWorkerTerminal := kl.podAndContainersAreTerminal(pod)
if pod.DeletionTimestamp != nil && containersTerminal {
klog.V(4).Infof("Pod has completed execution and should be deleted from the API server", "pod", klog.KObj(pod), "syncType", syncType)
klog.V(4).InfoS("Pod has completed execution and should be deleted from the API server", "pod", klog.KObj(pod), "syncType", syncType)
kl.statusManager.TerminatePod(pod)
return
}

View File

@ -1259,7 +1259,7 @@ func (pk *podKillerWithChannel) KillPod(podPair *kubecontainer.PodPair) {
if apiPodExists && runningPodExists {
klog.V(4).InfoS("Api pod and running pod are pending termination", "apiPodUID", podPair.APIPod.UID, "runningPodUID", podPair.RunningPod.ID)
} else if apiPodExists {
klog.V(4).Infof("Api pod is pending termination", "podUID", podPair.APIPod.UID)
klog.V(4).InfoS("Api pod is pending termination", "podUID", podPair.APIPod.UID)
} else {
klog.V(4).InfoS("Running pod is pending termination", "podUID", podPair.RunningPod.ID)
}

View File

@ -67,7 +67,7 @@ func (kl *Kubelet) podVolumesExist(podUID types.UID) bool {
return true
}
if len(volumePaths) > 0 {
klog.V(4).InfoS("Pod found, but volumes are still mounted on disk", "podUID", podUID, "volumePaths", volumePaths)
klog.V(4).InfoS("Pod found, but volumes are still mounted on disk", "podUID", podUID, "paths", volumePaths)
return true
}
@ -136,7 +136,7 @@ func (kl *Kubelet) cleanupOrphanedPodDirs(pods []*v1.Pod, runningPods []*kubecon
orphanVolumeErrors = append(orphanVolumeErrors, fmt.Errorf("orphaned pod %q found, but failed to rmdir() volume at path %v: %v", uid, volumePath, err))
allVolumesCleanedUp = false
} else {
klog.Warningf("Cleaned up orphaned volume from pod %q at %s", uid, volumePath)
klog.InfoS("Cleaned up orphaned volume from pod", "podUID", uid, "path", volumePath)
}
}
}
@ -153,7 +153,7 @@ func (kl *Kubelet) cleanupOrphanedPodDirs(pods []*v1.Pod, runningPods []*kubecon
orphanVolumeErrors = append(orphanVolumeErrors, fmt.Errorf("orphaned pod %q found, but failed to rmdir() subpath at path %v: %v", uid, subpathVolumePath, err))
allVolumesCleanedUp = false
} else {
klog.Warningf("Cleaned up orphaned volume subpath from pod %q at %s", uid, subpathVolumePath)
klog.InfoS("Cleaned up orphaned volume subpath from pod", "podUID", uid, "path", subpathVolumePath)
}
}
}

View File

@ -81,7 +81,7 @@ func newProber(
func (pb *prober) recordContainerEvent(pod *v1.Pod, container *v1.Container, eventType, reason, message string, args ...interface{}) {
ref, err := kubecontainer.GenerateContainerRef(pod, container)
if err != nil {
klog.Errorf("Can't make a ref to pod %q, container %v: %v", format.Pod(pod), container.Name, err)
klog.ErrorS(err, "Can't make a ref to pod and container", "pod", klog.KObj(pod), "containerName", container.Name)
return
}
pb.recorder.Eventf(ref, eventType, reason, message, args...)
@ -102,7 +102,7 @@ func (pb *prober) probe(probeType probeType, pod *v1.Pod, status v1.PodStatus, c
}
if probeSpec == nil {
klog.InfoS("Probe is nil", "probeType", probeType, "pod", klog.KObj(pod), "podUID", pod.UID, "container", container.Name)
klog.InfoS("Probe is nil", "probeType", probeType, "pod", klog.KObj(pod), "podUID", pod.UID, "containerName", container.Name)
return results.Success, nil
}
@ -110,19 +110,19 @@ func (pb *prober) probe(probeType probeType, pod *v1.Pod, status v1.PodStatus, c
if err != nil || (result != probe.Success && result != probe.Warning) {
// Probe failed in one way or another.
if err != nil {
klog.V(1).ErrorS(err, "Probe errored", "probeType", probeType, "pod", klog.KObj(pod), "podUID", pod.UID, "container", container.Name)
klog.V(1).ErrorS(err, "Probe errored", "probeType", probeType, "pod", klog.KObj(pod), "podUID", pod.UID, "containerName", container.Name)
pb.recordContainerEvent(pod, &container, v1.EventTypeWarning, events.ContainerUnhealthy, "%s probe errored: %v", probeType, err)
} else { // result != probe.Success
klog.V(1).InfoS("Probe failed", "probeType", probeType, "pod", klog.KObj(pod), "podUID", pod.UID, "container", container.Name, "probeResult", result, "output", output)
klog.V(1).InfoS("Probe failed", "probeType", probeType, "pod", klog.KObj(pod), "podUID", pod.UID, "containerName", container.Name, "probeResult", result, "output", output)
pb.recordContainerEvent(pod, &container, v1.EventTypeWarning, events.ContainerUnhealthy, "%s probe failed: %s", probeType, output)
}
return results.Failure, err
}
if result == probe.Warning {
pb.recordContainerEvent(pod, &container, v1.EventTypeWarning, events.ContainerProbeWarning, "%s probe warning: %s", probeType, output)
klog.V(3).InfoS("Probe succeeded with a warning", "probeType", probeType, "pod", klog.KObj(pod), "podUID", pod.UID, "container", container.Name, "output", output)
klog.V(3).InfoS("Probe succeeded with a warning", "probeType", probeType, "pod", klog.KObj(pod), "podUID", pod.UID, "containerName", container.Name, "output", output)
} else {
klog.V(3).InfoS("Probe succeeded", "probeType", probeType, "pod", klog.KObj(pod), "podUID", pod.UID, "container", container.Name)
klog.V(3).InfoS("Probe succeeded", "probeType", probeType, "pod", klog.KObj(pod), "podUID", pod.UID, "containerName", container.Name)
}
return results.Success, nil
}
@ -155,7 +155,7 @@ func buildHeader(headerList []v1.HTTPHeader) http.Header {
func (pb *prober) runProbe(probeType probeType, p *v1.Probe, pod *v1.Pod, status v1.PodStatus, container v1.Container, containerID kubecontainer.ContainerID) (probe.Result, string, error) {
timeout := time.Duration(p.TimeoutSeconds) * time.Second
if p.Exec != nil {
klog.V(4).Infof("Exec-Probe Pod: %v, Container: %v, Command: %v", pod.Name, container.Name, p.Exec.Command)
klog.V(4).InfoS("Exec-Probe runProbe", "pod", klog.KObj(pod), "containerName", container.Name, "execCommand", p.Exec.Command)
command := kubecontainer.ExpandContainerCommandOnlyStatic(p.Exec.Command, container.Env)
return pb.exec.Probe(pb.newExecInContainer(container, containerID, command, timeout))
}
@ -170,10 +170,10 @@ func (pb *prober) runProbe(probeType probeType, p *v1.Probe, pod *v1.Pod, status
return probe.Unknown, "", err
}
path := p.HTTPGet.Path
klog.V(4).Infof("HTTP-Probe Host: %v://%v, Port: %v, Path: %v", scheme, host, port, path)
klog.V(4).InfoS("HTTP-Probe Host", "scheme", scheme, "host", host, "port", port, "path", path)
url := formatURL(scheme, host, port, path)
headers := buildHeader(p.HTTPGet.HTTPHeaders)
klog.V(4).Infof("HTTP-Probe Headers: %v", headers)
klog.V(4).InfoS("HTTP-Probe Headers", "headers", headers)
switch probeType {
case liveness:
return pb.livenessHTTP.Probe(url, headers, timeout)
@ -192,10 +192,10 @@ func (pb *prober) runProbe(probeType probeType, p *v1.Probe, pod *v1.Pod, status
if host == "" {
host = status.PodIP
}
klog.V(4).Infof("TCP-Probe Host: %v, Port: %v, Timeout: %v", host, port, timeout)
klog.V(4).InfoS("TCP-Probe Host", "host", host, "port", port, "timeout", timeout)
return pb.tcp.Probe(host, port, timeout)
}
klog.Warningf("Failed to find probe builder for container: %v", container)
klog.InfoS("Failed to find probe builder for container", "containerName", container.Name)
return probe.Unknown, "", fmt.Errorf("missing probe handler for %s:%s", format.Pod(pod), container.Name)
}

View File

@ -252,7 +252,7 @@ func (m *manager) UpdatePodStatus(podUID types.UID, podStatus *v1.PodStatus) {
select {
case w.manualTriggerCh <- struct{}{}:
default: // Non-blocking.
klog.Warningf("Failed to trigger a manual run of %s probe", w.probeType.String())
klog.InfoS("Failed to trigger a manual run", "probe", w.probeType.String())
}
}
}

View File

@ -555,7 +555,7 @@ func (m *manager) syncPod(uid types.UID, status versionedPodStatus) {
// TODO: make me easier to express from client code
pod, err := m.kubeClient.CoreV1().Pods(status.podNamespace).Get(context.TODO(), status.podName, metav1.GetOptions{})
if errors.IsNotFound(err) {
klog.V(3).Infof("Pod does not exist on the server",
klog.V(3).InfoS("Pod does not exist on the server",
"podUID", uid,
"pod", klog.KRef(status.podNamespace, status.podName))
// If the Pod is deleted the status will be cleared in