mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-23 11:50:44 +00:00
Merge pull request #100007 from utsavoza/ugo/issue-98976/09-03-2021
Migrate remaining pkg/kubelet/cm/ top level files to structured logging
This commit is contained in:
commit
21de277402
@ -19,6 +19,7 @@ package cpuset
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"os"
|
||||
"reflect"
|
||||
"sort"
|
||||
"strconv"
|
||||
@ -278,7 +279,8 @@ func (s CPUSet) String() string {
|
||||
func MustParse(s string) CPUSet {
|
||||
res, err := Parse(s)
|
||||
if err != nil {
|
||||
klog.Fatalf("unable to parse [%s] as CPUSet: %v", s, err)
|
||||
klog.ErrorS(err, "Failed to parse input as CPUSet", "input", s)
|
||||
os.Exit(1)
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
@ -54,7 +54,7 @@ func (cm *containerManagerImpl) createNodeAllocatableCgroups() error {
|
||||
return nil
|
||||
}
|
||||
if err := cm.cgroupManager.Create(cgroupConfig); err != nil {
|
||||
klog.Errorf("Failed to create %q cgroup", cm.cgroupRoot)
|
||||
klog.ErrorS(err, "Failed to create cgroup", "cgroupName", cm.cgroupRoot)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
@ -72,7 +72,7 @@ func (cm *containerManagerImpl) enforceNodeAllocatableCgroups() error {
|
||||
nodeAllocatable = cm.getNodeAllocatableInternalAbsolute()
|
||||
}
|
||||
|
||||
klog.V(4).Infof("Attempting to enforce Node Allocatable with config: %+v", nc)
|
||||
klog.V(4).InfoS("Attempting to enforce Node Allocatable", "config", nc)
|
||||
|
||||
cgroupConfig := &CgroupConfig{
|
||||
Name: cm.cgroupRoot,
|
||||
@ -109,7 +109,7 @@ func (cm *containerManagerImpl) enforceNodeAllocatableCgroups() error {
|
||||
}
|
||||
// Now apply kube reserved and system reserved limits if required.
|
||||
if nc.EnforceNodeAllocatable.Has(kubetypes.SystemReservedEnforcementKey) {
|
||||
klog.V(2).Infof("Enforcing System reserved on cgroup %q with limits: %+v", nc.SystemReservedCgroupName, nc.SystemReserved)
|
||||
klog.V(2).InfoS("Enforcing system reserved on cgroup", "cgroupName", nc.SystemReservedCgroupName, "limits", nc.SystemReserved)
|
||||
if err := enforceExistingCgroup(cm.cgroupManager, cm.cgroupManager.CgroupName(nc.SystemReservedCgroupName), nc.SystemReserved); err != nil {
|
||||
message := fmt.Sprintf("Failed to enforce System Reserved Cgroup Limits on %q: %v", nc.SystemReservedCgroupName, err)
|
||||
cm.recorder.Event(nodeRef, v1.EventTypeWarning, events.FailedNodeAllocatableEnforcement, message)
|
||||
@ -118,7 +118,7 @@ func (cm *containerManagerImpl) enforceNodeAllocatableCgroups() error {
|
||||
cm.recorder.Eventf(nodeRef, v1.EventTypeNormal, events.SuccessfulNodeAllocatableEnforcement, "Updated limits on system reserved cgroup %v", nc.SystemReservedCgroupName)
|
||||
}
|
||||
if nc.EnforceNodeAllocatable.Has(kubetypes.KubeReservedEnforcementKey) {
|
||||
klog.V(2).Infof("Enforcing kube reserved on cgroup %q with limits: %+v", nc.KubeReservedCgroupName, nc.KubeReserved)
|
||||
klog.V(2).InfoS("Enforcing kube reserved on cgroup", "cgroupName", nc.KubeReservedCgroupName, "limits", nc.KubeReserved)
|
||||
if err := enforceExistingCgroup(cm.cgroupManager, cm.cgroupManager.CgroupName(nc.KubeReservedCgroupName), nc.KubeReserved); err != nil {
|
||||
message := fmt.Sprintf("Failed to enforce Kube Reserved Cgroup Limits on %q: %v", nc.KubeReservedCgroupName, err)
|
||||
cm.recorder.Event(nodeRef, v1.EventTypeWarning, events.FailedNodeAllocatableEnforcement, message)
|
||||
@ -138,7 +138,7 @@ func enforceExistingCgroup(cgroupManager CgroupManager, cName CgroupName, rl v1.
|
||||
if cgroupConfig.ResourceParameters == nil {
|
||||
return fmt.Errorf("%q cgroup is not config properly", cgroupConfig.Name)
|
||||
}
|
||||
klog.V(4).Infof("Enforcing limits on cgroup %q with %d cpu shares, %d bytes of memory, and %d processes", cName, cgroupConfig.ResourceParameters.CpuShares, cgroupConfig.ResourceParameters.Memory, cgroupConfig.ResourceParameters.PidsLimit)
|
||||
klog.V(4).InfoS("Enforcing limits on cgroup", "cgroupName", cName, "cpuShares", cgroupConfig.ResourceParameters.CpuShares, "memory", cgroupConfig.ResourceParameters.Memory, "pidsLimit", cgroupConfig.ResourceParameters.PidsLimit)
|
||||
if !cgroupManager.Exists(cgroupConfig.Name) {
|
||||
return fmt.Errorf("%q cgroup does not exist", cgroupConfig.Name)
|
||||
}
|
||||
|
@ -135,7 +135,7 @@ func (m *podContainerManagerImpl) killOnePid(pid int) error {
|
||||
// Hate parsing strings, but
|
||||
// vendor/github.com/opencontainers/runc/libcontainer/
|
||||
// also does this.
|
||||
klog.V(3).Infof("process with pid %v no longer exists", pid)
|
||||
klog.V(3).InfoS("Process no longer exists", "pid", pid)
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
@ -158,23 +158,23 @@ func (m *podContainerManagerImpl) tryKillingCgroupProcesses(podCgroup CgroupName
|
||||
removed := map[int]bool{}
|
||||
for i := 0; i < 5; i++ {
|
||||
if i != 0 {
|
||||
klog.V(3).Infof("Attempt %v failed to kill all unwanted process from cgroup: %v. Retyring", i, podCgroup)
|
||||
klog.V(3).InfoS("Attempt failed to kill all unwanted process from cgroup, retrying", "attempt", i, "cgroupName", podCgroup)
|
||||
}
|
||||
errlist = []error{}
|
||||
for _, pid := range pidsToKill {
|
||||
if _, ok := removed[pid]; ok {
|
||||
continue
|
||||
}
|
||||
klog.V(3).Infof("Attempt to kill process with pid: %v from cgroup: %v", pid, podCgroup)
|
||||
klog.V(3).InfoS("Attempting to kill process from cgroup", "pid", pid, "cgroupName", podCgroup)
|
||||
if err := m.killOnePid(pid); err != nil {
|
||||
klog.V(3).Infof("failed to kill process with pid: %v from cgroup: %v", pid, podCgroup)
|
||||
klog.V(3).InfoS("Failed to kill process from cgroup", "pid", pid, "cgroupName", podCgroup, "err", err)
|
||||
errlist = append(errlist, err)
|
||||
} else {
|
||||
removed[pid] = true
|
||||
}
|
||||
}
|
||||
if len(errlist) == 0 {
|
||||
klog.V(3).Infof("successfully killed all unwanted processes from cgroup: %v", podCgroup)
|
||||
klog.V(3).InfoS("Successfully killed all unwanted processes from cgroup", "cgroupName", podCgroup)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
@ -185,7 +185,7 @@ func (m *podContainerManagerImpl) tryKillingCgroupProcesses(podCgroup CgroupName
|
||||
func (m *podContainerManagerImpl) Destroy(podCgroup CgroupName) error {
|
||||
// Try killing all the processes attached to the pod cgroup
|
||||
if err := m.tryKillingCgroupProcesses(podCgroup); err != nil {
|
||||
klog.Warningf("failed to kill all the processes attached to the %v cgroups", podCgroup)
|
||||
klog.InfoS("Failed to kill all the processes attached to cgroup", "cgroupName", podCgroup, "err", err)
|
||||
return fmt.Errorf("failed to kill all the processes attached to the %v cgroups : %v", podCgroup, err)
|
||||
}
|
||||
|
||||
@ -195,7 +195,7 @@ func (m *podContainerManagerImpl) Destroy(podCgroup CgroupName) error {
|
||||
ResourceParameters: &ResourceConfig{},
|
||||
}
|
||||
if err := m.cgroupManager.Destroy(containerConfig); err != nil {
|
||||
klog.Warningf("failed to delete cgroup paths for %v : %v", podCgroup, err)
|
||||
klog.InfoS("Failed to delete cgroup paths", "cgroupName", podCgroup, "err", err)
|
||||
return fmt.Errorf("failed to delete cgroup paths for %v : %v", podCgroup, err)
|
||||
}
|
||||
return nil
|
||||
@ -274,7 +274,7 @@ func (m *podContainerManagerImpl) GetAllPodsFromCgroups() (map[types.UID]CgroupN
|
||||
parts := strings.Split(basePath, podCgroupNamePrefix)
|
||||
// the uid is missing, so we log the unexpected cgroup not of form pod<uid>
|
||||
if len(parts) != 2 {
|
||||
klog.Errorf("pod cgroup manager ignoring unexpected cgroup %v because it is not a pod", cgroupfsPath)
|
||||
klog.InfoS("Pod cgroup manager ignored unexpected cgroup because it is not a pod", "path", cgroupfsPath)
|
||||
continue
|
||||
}
|
||||
podUID := parts[1]
|
||||
|
@ -135,7 +135,7 @@ func (m *qosContainerManagerImpl) Start(getNodeAllocatable func() v1.ResourceLis
|
||||
go wait.Until(func() {
|
||||
err := m.UpdateCgroups()
|
||||
if err != nil {
|
||||
klog.Warningf("[ContainerManager] Failed to reserve QoS requests: %v", err)
|
||||
klog.InfoS("Failed to reserve QoS requests", "err", err)
|
||||
}
|
||||
}, periodicQOSCgroupUpdateInterval, wait.NeverStop)
|
||||
|
||||
@ -219,17 +219,17 @@ func (m *qosContainerManagerImpl) setMemoryReserve(configs map[v1.PodQOSClass]*C
|
||||
resources := m.getNodeAllocatable()
|
||||
allocatableResource, ok := resources[v1.ResourceMemory]
|
||||
if !ok {
|
||||
klog.V(2).Infof("[Container Manager] Allocatable memory value could not be determined. Not setting QOS memory limts.")
|
||||
klog.V(2).InfoS("Allocatable memory value could not be determined, not setting QoS memory limits")
|
||||
return
|
||||
}
|
||||
allocatable := allocatableResource.Value()
|
||||
if allocatable == 0 {
|
||||
klog.V(2).Infof("[Container Manager] Memory allocatable reported as 0, might be in standalone mode. Not setting QOS memory limts.")
|
||||
klog.V(2).InfoS("Allocatable memory reported as 0, might be in standalone mode, not setting QoS memory limits")
|
||||
return
|
||||
}
|
||||
|
||||
for qos, limits := range qosMemoryRequests {
|
||||
klog.V(2).Infof("[Container Manager] %s pod requests total %d bytes (reserve %d%%)", qos, limits, percentReserve)
|
||||
klog.V(2).InfoS("QoS pod memory limit", "qos", qos, "limits", limits, "percentReserve", percentReserve)
|
||||
}
|
||||
|
||||
// Calculate QOS memory limits
|
||||
@ -249,7 +249,7 @@ func (m *qosContainerManagerImpl) retrySetMemoryReserve(configs map[v1.PodQOSCla
|
||||
for qos, config := range configs {
|
||||
stats, err := m.cgroupManager.GetResourceStats(config.Name)
|
||||
if err != nil {
|
||||
klog.V(2).Infof("[Container Manager] %v", err)
|
||||
klog.V(2).InfoS("Failed to get resource stats", "err", err)
|
||||
return
|
||||
}
|
||||
usage := stats.MemoryStats.Usage
|
||||
@ -307,7 +307,7 @@ func (m *qosContainerManagerImpl) UpdateCgroups() error {
|
||||
}
|
||||
}
|
||||
if updateSuccess {
|
||||
klog.V(4).Infof("[ContainerManager]: Updated QoS cgroup configuration")
|
||||
klog.V(4).InfoS("Updated QoS cgroup configuration")
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -325,12 +325,12 @@ func (m *qosContainerManagerImpl) UpdateCgroups() error {
|
||||
for _, config := range qosConfigs {
|
||||
err := m.cgroupManager.Update(config)
|
||||
if err != nil {
|
||||
klog.Errorf("[ContainerManager]: Failed to update QoS cgroup configuration")
|
||||
klog.ErrorS(err, "Failed to update QoS cgroup configuration")
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
klog.V(4).Infof("[ContainerManager]: Updated QoS cgroup configuration")
|
||||
klog.V(4).InfoS("Updated QoS cgroup configuration")
|
||||
return nil
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user