mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-29 22:46:12 +00:00
Merge pull request #105967 from shivanshu1333/feature2/master/105841
Migrated scheduler files `preemption.go`, `stateful.go`, `resource_allocation.go` to structured logging
This commit is contained in:
commit
6ac2d8edc8
@ -98,7 +98,7 @@ func (c *CriticalPodAdmissionHandler) evictPodsToFreeRequests(admitPod *v1.Pod,
|
|||||||
// record that we are evicting the pod
|
// record that we are evicting the pod
|
||||||
c.recorder.Eventf(pod, v1.EventTypeWarning, events.PreemptContainer, message)
|
c.recorder.Eventf(pod, v1.EventTypeWarning, events.PreemptContainer, message)
|
||||||
// this is a blocking call and should only return when the pod and its containers are killed.
|
// this is a blocking call and should only return when the pod and its containers are killed.
|
||||||
klog.V(3).InfoS("Preempting pod to free up resources", "pod", klog.KObj(pod), "podUID", pod.UID, "insufficientResources", insufficientResources.toString())
|
klog.V(3).InfoS("Preempting pod to free up resources", "pod", klog.KObj(pod), "podUID", pod.UID, "insufficientResources", insufficientResources)
|
||||||
err := c.killPodFunc(pod, true, nil, func(status *v1.PodStatus) {
|
err := c.killPodFunc(pod, true, nil, func(status *v1.PodStatus) {
|
||||||
status.Phase = v1.PodFailed
|
status.Phase = v1.PodFailed
|
||||||
status.Reason = events.PreemptContainer
|
status.Reason = events.PreemptContainer
|
||||||
|
@ -84,7 +84,7 @@ func (mp *MultipointExample) PreBind(ctx context.Context, state *framework.Cycle
|
|||||||
// New initializes a new plugin and returns it.
|
// New initializes a new plugin and returns it.
|
||||||
func New(config *runtime.Unknown, _ framework.Handle) (framework.Plugin, error) {
|
func New(config *runtime.Unknown, _ framework.Handle) (framework.Plugin, error) {
|
||||||
if config == nil {
|
if config == nil {
|
||||||
klog.Error("MultipointExample configuration cannot be empty")
|
klog.ErrorS(nil, "MultipointExample configuration cannot be empty")
|
||||||
return nil, fmt.Errorf("MultipointExample configuration cannot be empty")
|
return nil, fmt.Errorf("MultipointExample configuration cannot be empty")
|
||||||
}
|
}
|
||||||
mp := MultipointExample{}
|
mp := MultipointExample{}
|
||||||
|
@ -70,10 +70,9 @@ func (r *resourceAllocationScorer) score(
|
|||||||
score := r.scorer(requested, allocatable)
|
score := r.scorer(requested, allocatable)
|
||||||
|
|
||||||
if klog.V(10).Enabled() {
|
if klog.V(10).Enabled() {
|
||||||
klog.Infof(
|
klog.InfoS("Listing internal info for allocatable resources, requested resources and score", "pod",
|
||||||
"%v -> %v: %v, map of allocatable resources %v, map of requested resources %v ,score %d,",
|
klog.KObj(pod), "node", klog.KObj(node), "resourceAllocationScorer", r.Name,
|
||||||
pod.Name, node.Name, r.Name,
|
"allocatableResource", allocatable, "requestedResource", requested, "resourceScore", score,
|
||||||
allocatable, requested, score,
|
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -109,7 +108,7 @@ func (r *resourceAllocationScorer) calculateResourceAllocatableRequest(nodeInfo
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
if klog.V(10).Enabled() {
|
if klog.V(10).Enabled() {
|
||||||
klog.Infof("requested resource %v not considered for node score calculation", resource)
|
klog.InfoS("Requested resource is omitted for node score calculation", "resourceName", resource)
|
||||||
}
|
}
|
||||||
return 0, 0
|
return 0, 0
|
||||||
}
|
}
|
||||||
|
@ -132,7 +132,7 @@ func (ev *Evaluator) Preempt(ctx context.Context, pod *v1.Pod, m framework.NodeT
|
|||||||
podNamespace, podName := pod.Namespace, pod.Name
|
podNamespace, podName := pod.Namespace, pod.Name
|
||||||
pod, err := ev.PodLister.Pods(pod.Namespace).Get(pod.Name)
|
pod, err := ev.PodLister.Pods(pod.Namespace).Get(pod.Name)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
klog.ErrorS(err, "getting the updated preemptor pod object", "pod", klog.KRef(podNamespace, podName))
|
klog.ErrorS(err, "Getting the updated preemptor pod object", "pod", klog.KRef(podNamespace, podName))
|
||||||
return nil, framework.AsStatus(err)
|
return nil, framework.AsStatus(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -196,7 +196,7 @@ func (ev *Evaluator) findCandidates(ctx context.Context, pod *v1.Pod, m framewor
|
|||||||
klog.V(3).InfoS("Preemption will not help schedule pod on any node", "pod", klog.KObj(pod))
|
klog.V(3).InfoS("Preemption will not help schedule pod on any node", "pod", klog.KObj(pod))
|
||||||
// In this case, we should clean-up any existing nominated node name of the pod.
|
// In this case, we should clean-up any existing nominated node name of the pod.
|
||||||
if err := util.ClearNominatedNodeName(ev.Handler.ClientSet(), pod); err != nil {
|
if err := util.ClearNominatedNodeName(ev.Handler.ClientSet(), pod); err != nil {
|
||||||
klog.ErrorS(err, "cannot clear 'NominatedNodeName' field of pod", "pod", klog.KObj(pod))
|
klog.ErrorS(err, "Cannot clear 'NominatedNodeName' field of pod", "pod", klog.KObj(pod))
|
||||||
// We do not return as this error is not critical.
|
// We do not return as this error is not critical.
|
||||||
}
|
}
|
||||||
return nil, unschedulableNodeStatus, nil
|
return nil, unschedulableNodeStatus, nil
|
||||||
@ -213,7 +213,7 @@ func (ev *Evaluator) findCandidates(ctx context.Context, pod *v1.Pod, m framewor
|
|||||||
for i := offset; i < offset+10 && i < int32(len(potentialNodes)); i++ {
|
for i := offset; i < offset+10 && i < int32(len(potentialNodes)); i++ {
|
||||||
sample = append(sample, potentialNodes[i].Node().Name)
|
sample = append(sample, potentialNodes[i].Node().Name)
|
||||||
}
|
}
|
||||||
klog.Infof("from a pool of %d nodes (offset: %d, sample %d nodes: %v), ~%d candidates will be chosen", len(potentialNodes), offset, len(sample), sample, numCandidates)
|
klog.InfoS("Selecting candidates from a pool of nodes", "potentialNodesCount", len(potentialNodes), "offset", offset, "sampleLength", len(sample), "sample", sample, "candidates", numCandidates)
|
||||||
}
|
}
|
||||||
candidates, nodeStatuses, err := ev.DryRunPreemption(ctx, pod, potentialNodes, pdbs, offset, numCandidates)
|
candidates, nodeStatuses, err := ev.DryRunPreemption(ctx, pod, potentialNodes, pdbs, offset, numCandidates)
|
||||||
for node, nodeStatus := range unschedulableNodeStatus {
|
for node, nodeStatus := range unschedulableNodeStatus {
|
||||||
@ -257,7 +257,7 @@ func (ev *Evaluator) callExtenders(pod *v1.Pod, candidates []Candidate) ([]Candi
|
|||||||
if victims == nil || len(victims.Pods) == 0 {
|
if victims == nil || len(victims.Pods) == 0 {
|
||||||
if extender.IsIgnorable() {
|
if extender.IsIgnorable() {
|
||||||
delete(nodeNameToVictims, nodeName)
|
delete(nodeNameToVictims, nodeName)
|
||||||
klog.InfoS("Ignoring node without victims", "node", nodeName)
|
klog.InfoS("Ignoring node without victims", "node", klog.KRef("", nodeName))
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
return nil, framework.AsStatus(fmt.Errorf("expected at least one victim pod on node %q", nodeName))
|
return nil, framework.AsStatus(fmt.Errorf("expected at least one victim pod on node %q", nodeName))
|
||||||
@ -307,7 +307,7 @@ func (ev *Evaluator) SelectCandidate(candidates []Candidate) Candidate {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// We shouldn't reach here.
|
// We shouldn't reach here.
|
||||||
klog.ErrorS(errors.New("no candidate selected"), "should not reach here", "candidates", candidates)
|
klog.ErrorS(errors.New("no candidate selected"), "Should not reach here", "candidates", candidates)
|
||||||
// To not break the whole flow, return the first candidate.
|
// To not break the whole flow, return the first candidate.
|
||||||
return candidates[0]
|
return candidates[0]
|
||||||
}
|
}
|
||||||
@ -339,7 +339,7 @@ func (ev *Evaluator) prepareCandidate(c Candidate, pod *v1.Pod, pluginName strin
|
|||||||
// lets scheduler find another place for them.
|
// lets scheduler find another place for them.
|
||||||
nominatedPods := getLowerPriorityNominatedPods(fh, pod, c.Name())
|
nominatedPods := getLowerPriorityNominatedPods(fh, pod, c.Name())
|
||||||
if err := util.ClearNominatedNodeName(cs, nominatedPods...); err != nil {
|
if err := util.ClearNominatedNodeName(cs, nominatedPods...); err != nil {
|
||||||
klog.ErrorS(err, "cannot clear 'NominatedNodeName' field")
|
klog.ErrorS(err, "Cannot clear 'NominatedNodeName' field")
|
||||||
// We do not return as this error is not critical.
|
// We do not return as this error is not critical.
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -481,7 +481,7 @@ func pickOneNodeForPreemption(nodesToVictims map[string]*extenderv1.Victims) str
|
|||||||
if latestStartTime == nil {
|
if latestStartTime == nil {
|
||||||
// If the earliest start time of all pods on the 1st node is nil, just return it,
|
// If the earliest start time of all pods on the 1st node is nil, just return it,
|
||||||
// which is not expected to happen.
|
// which is not expected to happen.
|
||||||
klog.ErrorS(errors.New("earliestStartTime is nil for node"), "should not reach here", "node", minNodes2[0])
|
klog.ErrorS(errors.New("earliestStartTime is nil for node"), "Should not reach here", "node", klog.KRef("", minNodes2[0]))
|
||||||
return minNodes2[0]
|
return minNodes2[0]
|
||||||
}
|
}
|
||||||
nodeToReturn := minNodes2[0]
|
nodeToReturn := minNodes2[0]
|
||||||
@ -490,7 +490,7 @@ func pickOneNodeForPreemption(nodesToVictims map[string]*extenderv1.Victims) str
|
|||||||
// Get earliest start time of all pods on the current node.
|
// Get earliest start time of all pods on the current node.
|
||||||
earliestStartTimeOnNode := util.GetEarliestPodStartTime(nodesToVictims[node])
|
earliestStartTimeOnNode := util.GetEarliestPodStartTime(nodesToVictims[node])
|
||||||
if earliestStartTimeOnNode == nil {
|
if earliestStartTimeOnNode == nil {
|
||||||
klog.ErrorS(errors.New("earliestStartTime is nil for node"), "should not reach here", "node", node)
|
klog.ErrorS(errors.New("earliestStartTime is nil for node"), "Should not reach here", "node", klog.KRef("", node))
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if earliestStartTimeOnNode.After(latestStartTime.Time) {
|
if earliestStartTimeOnNode.After(latestStartTime.Time) {
|
||||||
|
Loading…
Reference in New Issue
Block a user