mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-27 13:37:30 +00:00
update structured log for policy_static.go
This commit is contained in:
parent
4cf80f160d
commit
9e024e839b
@ -103,11 +103,11 @@ func NewStaticPolicy(topology *topology.CPUTopology, numReservedCPUs int, reserv
|
|||||||
}
|
}
|
||||||
|
|
||||||
if reserved.Size() != numReservedCPUs {
|
if reserved.Size() != numReservedCPUs {
|
||||||
err := fmt.Errorf("[cpumanager] unable to reserve the required amount of CPUs (size of %s did not equal %d)", reserved, numReservedCPUs)
|
err := fmt.ErrorS(nil, "[cpumanager] unable to reserve the required amount of CPUs (not equal)", "reserved", reserved, "numReservedCPUs", numReservedCPUs)
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
klog.Infof("[cpumanager] reserved %d CPUs (\"%s\") not available for exclusive assignment", reserved.Size(), reserved)
|
klog.InfoS("[cpumanager] reserved CPUs not available for exclusive assignment", "reservedSize", reserved.Size(), "reserved", reserved)
|
||||||
|
|
||||||
return &staticPolicy{
|
return &staticPolicy{
|
||||||
topology: topology,
|
topology: topology,
|
||||||
@ -123,7 +123,7 @@ func (p *staticPolicy) Name() string {
|
|||||||
|
|
||||||
func (p *staticPolicy) Start(s state.State) error {
|
func (p *staticPolicy) Start(s state.State) error {
|
||||||
if err := p.validateState(s); err != nil {
|
if err := p.validateState(s); err != nil {
|
||||||
klog.Errorf("[cpumanager] static policy invalid state: %v, please drain node and remove policy state file", err)
|
klog.ErrorS(err, "[cpumanager] static policy invalid state, please drain node and remove policy state file")
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
@ -218,23 +218,23 @@ func (p *staticPolicy) updateCPUsToReuse(pod *v1.Pod, container *v1.Container, c
|
|||||||
|
|
||||||
func (p *staticPolicy) Allocate(s state.State, pod *v1.Pod, container *v1.Container) error {
|
func (p *staticPolicy) Allocate(s state.State, pod *v1.Pod, container *v1.Container) error {
|
||||||
if numCPUs := p.guaranteedCPUs(pod, container); numCPUs != 0 {
|
if numCPUs := p.guaranteedCPUs(pod, container); numCPUs != 0 {
|
||||||
klog.Infof("[cpumanager] static policy: Allocate (pod: %s, container: %s)", format.Pod(pod), container.Name)
|
klog.InfoS("[cpumanager] static policy: Allocate", "pod", klog.KObj(pod), "containerName", container.Name)
|
||||||
// container belongs in an exclusively allocated pool
|
// container belongs in an exclusively allocated pool
|
||||||
|
|
||||||
if cpuset, ok := s.GetCPUSet(string(pod.UID), container.Name); ok {
|
if cpuset, ok := s.GetCPUSet(string(pod.UID), container.Name); ok {
|
||||||
p.updateCPUsToReuse(pod, container, cpuset)
|
p.updateCPUsToReuse(pod, container, cpuset)
|
||||||
klog.Infof("[cpumanager] static policy: container already present in state, skipping (pod: %s, container: %s)", format.Pod(pod), container.Name)
|
klog.InfoS("[cpumanager] static policy: container already present in state, skipping", "pod", klog.KObj(pod), "containerName", container.Name)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Call Topology Manager to get the aligned socket affinity across all hint providers.
|
// Call Topology Manager to get the aligned socket affinity across all hint providers.
|
||||||
hint := p.affinity.GetAffinity(string(pod.UID), container.Name)
|
hint := p.affinity.GetAffinity(string(pod.UID), container.Name)
|
||||||
klog.Infof("[cpumanager] Pod %v, Container %v Topology Affinity is: %v", format.Pod(pod), container.Name, hint)
|
klog.InfoS("[cpumanager] Topology Affinity", "pod", klog.KObj(pod), "containerName", container.Name, "affinity", hint)
|
||||||
|
|
||||||
// Allocate CPUs according to the NUMA affinity contained in the hint.
|
// Allocate CPUs according to the NUMA affinity contained in the hint.
|
||||||
cpuset, err := p.allocateCPUs(s, numCPUs, hint.NUMANodeAffinity, p.cpusToReuse[string(pod.UID)])
|
cpuset, err := p.allocateCPUs(s, numCPUs, hint.NUMANodeAffinity, p.cpusToReuse[string(pod.UID)])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
klog.Errorf("[cpumanager] unable to allocate %d CPUs (pod: %s, container: %s, error: %v)", numCPUs, format.Pod(pod), container.Name, err)
|
klog.ErrorS(err, "[cpumanager] unable to allocate CPUs", "numCPUs", numCPUs, "pod", klog.KObj(pod), "containerName", container.Name)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
s.SetCPUSet(string(pod.UID), container.Name, cpuset)
|
s.SetCPUSet(string(pod.UID), container.Name, cpuset)
|
||||||
@ -246,7 +246,7 @@ func (p *staticPolicy) Allocate(s state.State, pod *v1.Pod, container *v1.Contai
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (p *staticPolicy) RemoveContainer(s state.State, podUID string, containerName string) error {
|
func (p *staticPolicy) RemoveContainer(s state.State, podUID string, containerName string) error {
|
||||||
klog.Infof("[cpumanager] static policy: RemoveContainer (pod: %s, container: %s)", podUID, containerName)
|
klog.InfoS("[cpumanager] static policy: RemoveContainer ", "podUID", podUID, "containerName", containerName)
|
||||||
if toRelease, ok := s.GetCPUSet(podUID, containerName); ok {
|
if toRelease, ok := s.GetCPUSet(podUID, containerName); ok {
|
||||||
s.Delete(podUID, containerName)
|
s.Delete(podUID, containerName)
|
||||||
// Mutate the shared pool, adding released cpus.
|
// Mutate the shared pool, adding released cpus.
|
||||||
@ -256,7 +256,7 @@ func (p *staticPolicy) RemoveContainer(s state.State, podUID string, containerNa
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (p *staticPolicy) allocateCPUs(s state.State, numCPUs int, numaAffinity bitmask.BitMask, reusableCPUs cpuset.CPUSet) (cpuset.CPUSet, error) {
|
func (p *staticPolicy) allocateCPUs(s state.State, numCPUs int, numaAffinity bitmask.BitMask, reusableCPUs cpuset.CPUSet) (cpuset.CPUSet, error) {
|
||||||
klog.Infof("[cpumanager] allocateCpus: (numCPUs: %d, socket: %v)", numCPUs, numaAffinity)
|
klog.InfoS("[cpumanager] allocateCpus", "numCPUs", numCPUs, "socket", numaAffinity)
|
||||||
|
|
||||||
allocatableCPUs := p.GetAllocatableCPUs(s).Union(reusableCPUs)
|
allocatableCPUs := p.GetAllocatableCPUs(s).Union(reusableCPUs)
|
||||||
|
|
||||||
@ -291,7 +291,7 @@ func (p *staticPolicy) allocateCPUs(s state.State, numCPUs int, numaAffinity bit
|
|||||||
// Remove allocated CPUs from the shared CPUSet.
|
// Remove allocated CPUs from the shared CPUSet.
|
||||||
s.SetDefaultCPUSet(s.GetDefaultCPUSet().Difference(result))
|
s.SetDefaultCPUSet(s.GetDefaultCPUSet().Difference(result))
|
||||||
|
|
||||||
klog.Infof("[cpumanager] allocateCPUs: returning \"%v\"", result)
|
klog.InfoS("[cpumanager] allocateCPUs", "result", result)
|
||||||
return result, nil
|
return result, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -353,7 +353,7 @@ func (p *staticPolicy) GetTopologyHints(s state.State, pod *v1.Pod, container *v
|
|||||||
// kubelet restart, for example.
|
// kubelet restart, for example.
|
||||||
if allocated, exists := s.GetCPUSet(string(pod.UID), container.Name); exists {
|
if allocated, exists := s.GetCPUSet(string(pod.UID), container.Name); exists {
|
||||||
if allocated.Size() != requested {
|
if allocated.Size() != requested {
|
||||||
klog.Errorf("[cpumanager] CPUs already allocated to (pod %v, container %v) with different number than request: requested: %d, allocated: %d", format.Pod(pod), container.Name, requested, allocated.Size())
|
klog.ErrorS(nil, "[cpumanager] CPUs already allocated to container with different number than request", "pod", klog.KObj(pod), "containerName", container.Name, "requested", requested, "allocated", allocated.Size())
|
||||||
// An empty list of hints will be treated as a preference that cannot be satisfied.
|
// An empty list of hints will be treated as a preference that cannot be satisfied.
|
||||||
// In definition of hints this is equal to: TopologyHint[NUMANodeAffinity: nil, Preferred: false].
|
// In definition of hints this is equal to: TopologyHint[NUMANodeAffinity: nil, Preferred: false].
|
||||||
// For all but the best-effort policy, the Topology Manager will throw a pod-admission error.
|
// For all but the best-effort policy, the Topology Manager will throw a pod-admission error.
|
||||||
@ -361,7 +361,7 @@ func (p *staticPolicy) GetTopologyHints(s state.State, pod *v1.Pod, container *v
|
|||||||
string(v1.ResourceCPU): {},
|
string(v1.ResourceCPU): {},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
klog.Infof("[cpumanager] Regenerating TopologyHints for CPUs already allocated to (pod %v, container %v)", format.Pod(pod), container.Name)
|
klog.InfoS("[cpumanager] Regenerating TopologyHints for CPUs already allocated", "pod", klog.KObj(pod), "containerName", container.Name)
|
||||||
return map[string][]topologymanager.TopologyHint{
|
return map[string][]topologymanager.TopologyHint{
|
||||||
string(v1.ResourceCPU): p.generateCPUTopologyHints(allocated, cpuset.CPUSet{}, requested),
|
string(v1.ResourceCPU): p.generateCPUTopologyHints(allocated, cpuset.CPUSet{}, requested),
|
||||||
}
|
}
|
||||||
@ -376,7 +376,7 @@ func (p *staticPolicy) GetTopologyHints(s state.State, pod *v1.Pod, container *v
|
|||||||
|
|
||||||
// Generate hints.
|
// Generate hints.
|
||||||
cpuHints := p.generateCPUTopologyHints(available, reusable, requested)
|
cpuHints := p.generateCPUTopologyHints(available, reusable, requested)
|
||||||
klog.Infof("[cpumanager] TopologyHints generated for pod '%v', container '%v': %v", format.Pod(pod), container.Name, cpuHints)
|
klog.InfoS("[cpumanager] TopologyHints generated", "pod", klog.KObj(pod), "containerName", container.Name, "cpuHints", cpuHints)
|
||||||
|
|
||||||
return map[string][]topologymanager.TopologyHint{
|
return map[string][]topologymanager.TopologyHint{
|
||||||
string(v1.ResourceCPU): cpuHints,
|
string(v1.ResourceCPU): cpuHints,
|
||||||
@ -403,7 +403,7 @@ func (p *staticPolicy) GetPodTopologyHints(s state.State, pod *v1.Pod) map[strin
|
|||||||
// kubelet restart, for example.
|
// kubelet restart, for example.
|
||||||
if allocated, exists := s.GetCPUSet(string(pod.UID), container.Name); exists {
|
if allocated, exists := s.GetCPUSet(string(pod.UID), container.Name); exists {
|
||||||
if allocated.Size() != requestedByContainer {
|
if allocated.Size() != requestedByContainer {
|
||||||
klog.Errorf("[cpumanager] CPUs already allocated to (pod %v, container %v) with different number than request: requested: %d, allocated: %d", format.Pod(pod), container.Name, requestedByContainer, allocated.Size())
|
klog.ErrorS(nil, "[cpumanager] CPUs already allocated to container with different number than request", "pod", klog.KObj(pod), "containerName", container.Name, "requested", requested, "requestedByContainer", requestedByContainer, "allocated", allocated.Size())
|
||||||
// An empty list of hints will be treated as a preference that cannot be satisfied.
|
// An empty list of hints will be treated as a preference that cannot be satisfied.
|
||||||
// In definition of hints this is equal to: TopologyHint[NUMANodeAffinity: nil, Preferred: false].
|
// In definition of hints this is equal to: TopologyHint[NUMANodeAffinity: nil, Preferred: false].
|
||||||
// For all but the best-effort policy, the Topology Manager will throw a pod-admission error.
|
// For all but the best-effort policy, the Topology Manager will throw a pod-admission error.
|
||||||
@ -416,7 +416,7 @@ func (p *staticPolicy) GetPodTopologyHints(s state.State, pod *v1.Pod) map[strin
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
if assignedCPUs.Size() == requested {
|
if assignedCPUs.Size() == requested {
|
||||||
klog.Infof("[cpumanager] Regenerating TopologyHints for CPUs already allocated to pod %v", format.Pod(pod))
|
klog.InfoS("[cpumanager] Regenerating TopologyHints for CPUs already allocated", "pod", klog.KObj(pod))
|
||||||
return map[string][]topologymanager.TopologyHint{
|
return map[string][]topologymanager.TopologyHint{
|
||||||
string(v1.ResourceCPU): p.generateCPUTopologyHints(assignedCPUs, cpuset.CPUSet{}, requested),
|
string(v1.ResourceCPU): p.generateCPUTopologyHints(assignedCPUs, cpuset.CPUSet{}, requested),
|
||||||
}
|
}
|
||||||
@ -434,7 +434,7 @@ func (p *staticPolicy) GetPodTopologyHints(s state.State, pod *v1.Pod) map[strin
|
|||||||
|
|
||||||
// Generate hints.
|
// Generate hints.
|
||||||
cpuHints := p.generateCPUTopologyHints(available, reusable, requested)
|
cpuHints := p.generateCPUTopologyHints(available, reusable, requested)
|
||||||
klog.Infof("[cpumanager] TopologyHints generated for pod '%v' : %v", format.Pod(pod), cpuHints)
|
klog.InfoS("[cpumanager] TopologyHints generated", "pod", klog.KObj(pod), "cpuHints", cpuHints)
|
||||||
|
|
||||||
return map[string][]topologymanager.TopologyHint{
|
return map[string][]topologymanager.TopologyHint{
|
||||||
string(v1.ResourceCPU): cpuHints,
|
string(v1.ResourceCPU): cpuHints,
|
||||||
|
Loading…
Reference in New Issue
Block a user