Merge pull request #120933 from mengjiao-liu/contextual-logging-scheduler-remaining-part

kube-scheduler: convert the remaining part to use contextual logging
This commit is contained in:
Kubernetes Prow Robot
2023-10-27 10:30:58 +02:00
committed by GitHub
15 changed files with 109 additions and 92 deletions

View File

@@ -24,6 +24,7 @@ import (
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/runtime"
corev1helpers "k8s.io/component-helpers/scheduling/corev1"
"k8s.io/klog/v2"
extenderv1 "k8s.io/kube-scheduler/extender/v1"
"k8s.io/kubernetes/pkg/scheduler/framework"
frameworkruntime "k8s.io/kubernetes/pkg/scheduler/framework/runtime"
@@ -191,10 +192,13 @@ func (f *FakeExtender) ProcessPreemption(
nodeNameToVictimsCopy[k] = v
}
// If Extender.ProcessPreemption ever gets extended with a context parameter, then the logger should be retrieved from that.
// Now, in order not to modify the Extender interface, we get the logger from klog.TODO()
logger := klog.TODO()
for nodeName, victims := range nodeNameToVictimsCopy {
// Try to do preemption on extender side.
nodeInfo, _ := nodeInfos.Get(nodeName)
extenderVictimPods, extenderPDBViolations, fits, err := f.selectVictimsOnNodeByExtender(pod, nodeInfo.Node())
extenderVictimPods, extenderPDBViolations, fits, err := f.selectVictimsOnNodeByExtender(logger, pod, nodeInfo.Node())
if err != nil {
return nil, err
}
@@ -216,7 +220,7 @@ func (f *FakeExtender) ProcessPreemption(
// 1. More victim pods (if any) amended by preemption phase of extender.
// 2. Number of violating victim (used to calculate PDB).
// 3. Fits or not after preemption phase on extender's side.
func (f *FakeExtender) selectVictimsOnNodeByExtender(pod *v1.Pod, node *v1.Node) ([]*v1.Pod, int, bool, error) {
func (f *FakeExtender) selectVictimsOnNodeByExtender(logger klog.Logger, pod *v1.Pod, node *v1.Node) ([]*v1.Pod, int, bool, error) {
// If a extender support preemption but have no cached node info, let's run filter to make sure
// default scheduler's decision still stand with given pod and node.
if !f.NodeCacheCapable {
@@ -236,8 +240,8 @@ func (f *FakeExtender) selectVictimsOnNodeByExtender(pod *v1.Pod, node *v1.Node)
var potentialVictims []*v1.Pod
removePod := func(rp *v1.Pod) {
nodeInfoCopy.RemovePod(rp)
removePod := func(rp *v1.Pod) error {
return nodeInfoCopy.RemovePod(logger, rp)
}
addPod := func(ap *v1.Pod) {
nodeInfoCopy.AddPod(ap)
@@ -248,7 +252,9 @@ func (f *FakeExtender) selectVictimsOnNodeByExtender(pod *v1.Pod, node *v1.Node)
for _, p := range nodeInfoCopy.Pods {
if corev1helpers.PodPriority(p.Pod) < podPriority {
potentialVictims = append(potentialVictims, p.Pod)
removePod(p.Pod)
if err := removePod(p.Pod); err != nil {
return nil, 0, false, err
}
}
}
sort.Slice(potentialVictims, func(i, j int) bool { return util.MoreImportantPod(potentialVictims[i], potentialVictims[j]) })
@@ -275,7 +281,9 @@ func (f *FakeExtender) selectVictimsOnNodeByExtender(pod *v1.Pod, node *v1.Node)
addPod(p)
status := f.runPredicate(pod, nodeInfoCopy.Node())
if !status.IsSuccess() {
removePod(p)
if err := removePod(p); err != nil {
return false
}
victims = append(victims, p)
}
return status.IsSuccess()