mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-08-06 10:43:56 +00:00
Merge pull request #127256 from dom4ha/scheduler_test_logging
Enable testing logger in the remaining scheduler tests.
This commit is contained in:
commit
38f68d59a7
@ -1425,8 +1425,8 @@ func TestPriorityQueue_addToActiveQ(t *testing.T) {
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
logger := klog.FromContext(ctx)
|
||||
logger, ctx := ktesting.NewTestContext(t)
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
|
||||
m := map[string][]framework.PreEnqueuePlugin{"": tt.plugins}
|
||||
@ -1507,7 +1507,7 @@ func BenchmarkMoveAllToActiveOrBackoffQueue(b *testing.B) {
|
||||
for _, tt := range tests {
|
||||
for _, podsInUnschedulablePods := range []int{1000, 5000} {
|
||||
b.Run(fmt.Sprintf("%v-%v", tt.name, podsInUnschedulablePods), func(b *testing.B) {
|
||||
logger, _ := ktesting.NewTestContext(b)
|
||||
logger, ctx := ktesting.NewTestContext(b)
|
||||
for i := 0; i < b.N; i++ {
|
||||
b.StopTimer()
|
||||
c := testingclock.NewFakeClock(time.Now())
|
||||
@ -1528,7 +1528,7 @@ func BenchmarkMoveAllToActiveOrBackoffQueue(b *testing.B) {
|
||||
}
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
q := NewTestQueue(ctx, newDefaultQueueSort(), WithClock(c), WithQueueingHintMapPerProfile(m))
|
||||
|
||||
@ -2103,14 +2103,14 @@ func TestPriorityQueue_NominatedPodDeleted(t *testing.T) {
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
logger, _ := ktesting.NewTestContext(t)
|
||||
logger, ctx := ktesting.NewTestContext(t)
|
||||
cs := fake.NewClientset(tt.podInfo.Pod)
|
||||
informerFactory := informers.NewSharedInformerFactory(cs, 0)
|
||||
podLister := informerFactory.Core().V1().Pods().Lister()
|
||||
|
||||
// Build a PriorityQueue.
|
||||
q := NewPriorityQueue(newDefaultQueueSort(), informerFactory, WithPodLister(podLister))
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
informerFactory.Start(ctx.Done())
|
||||
informerFactory.WaitForCacheSync(ctx.Done())
|
||||
@ -2269,7 +2269,8 @@ func TestPriorityQueue_UpdateNominatedPodForNode(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestPriorityQueue_NewWithOptions(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
_, ctx := ktesting.NewTestContext(t)
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
q := NewTestQueue(ctx,
|
||||
newDefaultQueueSort(),
|
||||
@ -3631,7 +3632,8 @@ func TestPriorityQueue_calculateBackoffDuration(t *testing.T) {
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
_, ctx := ktesting.NewTestContext(t)
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
q := NewTestQueue(ctx, newDefaultQueueSort(), WithPodInitialBackoffDuration(tt.initialBackoffDuration), WithPodMaxBackoffDuration(tt.maxBackoffDuration))
|
||||
if got := q.calculateBackoffDuration(tt.podInfo); got != tt.want {
|
||||
|
@ -20,6 +20,8 @@ import (
|
||||
"context"
|
||||
"errors"
|
||||
"testing"
|
||||
|
||||
"k8s.io/klog/v2/ktesting"
|
||||
)
|
||||
|
||||
func TestErrorChannel(t *testing.T) {
|
||||
@ -35,7 +37,8 @@ func TestErrorChannel(t *testing.T) {
|
||||
t.Errorf("expect %v from err channel, but got %v", err, actualErr)
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
_, ctx := ktesting.NewTestContext(t)
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
errCh.SendErrorWithCancel(err, cancel)
|
||||
if actualErr := errCh.ReceiveError(); actualErr != err {
|
||||
t.Errorf("expect %v from err channel, but got %v", err, actualErr)
|
||||
|
@ -982,11 +982,12 @@ func TestPreFilterDisabled(t *testing.T) {
|
||||
nodeInfo := framework.NewNodeInfo()
|
||||
node := v1.Node{}
|
||||
nodeInfo.SetNode(&node)
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
_, ctx := ktesting.NewTestContext(t)
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
p := plugintesting.SetupPluginWithInformers(ctx, t, New, &config.InterPodAffinityArgs{}, cache.NewEmptySnapshot(), nil)
|
||||
cycleState := framework.NewCycleState()
|
||||
gotStatus := p.(framework.FilterPlugin).Filter(context.Background(), cycleState, pod, nodeInfo)
|
||||
gotStatus := p.(framework.FilterPlugin).Filter(ctx, cycleState, pod, nodeInfo)
|
||||
wantStatus := framework.AsStatus(fmt.Errorf(`error reading "PreFilterInterPodAffinity" from cycleState: %w`, framework.ErrNotFound))
|
||||
if !reflect.DeepEqual(gotStatus, wantStatus) {
|
||||
t.Errorf("status does not match: %v, want: %v", gotStatus, wantStatus)
|
||||
@ -1244,7 +1245,7 @@ func TestPreFilterStateAddRemovePod(t *testing.T) {
|
||||
return p.(*InterPodAffinity), cycleState, state, snapshot
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
_, ctx := ktesting.NewTestContext(t)
|
||||
// allPodsState is the state produced when all pods, including test.addedPod are given to prefilter.
|
||||
_, _, allPodsState, _ := getState(append(test.existingPods, test.addedPod))
|
||||
|
||||
@ -1277,7 +1278,7 @@ func TestPreFilterStateAddRemovePod(t *testing.T) {
|
||||
}
|
||||
|
||||
// Remove the added pod pod and make sure it is equal to the original state.
|
||||
if err := ipa.RemovePod(context.Background(), cycleState, test.pendingPod, mustNewPodInfo(t, test.addedPod), nodeInfo); err != nil {
|
||||
if err := ipa.RemovePod(ctx, cycleState, test.pendingPod, mustNewPodInfo(t, test.addedPod), nodeInfo); err != nil {
|
||||
t.Errorf("error removing pod from meta: %v", err)
|
||||
}
|
||||
if !reflect.DeepEqual(originalState, state) {
|
||||
@ -1429,7 +1430,8 @@ func TestGetTPMapMatchingIncomingAffinityAntiAffinity(t *testing.T) {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
snapshot := cache.NewSnapshot(tt.existingPods, tt.nodes)
|
||||
l, _ := snapshot.NodeInfos().List()
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
_, ctx := ktesting.NewTestContext(t)
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
p := plugintesting.SetupPluginWithInformers(ctx, t, New, &config.InterPodAffinityArgs{}, snapshot, nil)
|
||||
gotAffinityPodsMap, gotAntiAffinityPodsMap := p.(*InterPodAffinity).getIncomingAffinityAntiAffinityCounts(ctx, mustNewPodInfo(t, tt.pod), l)
|
||||
|
@ -915,7 +915,7 @@ func TestNodeAffinity(t *testing.T) {
|
||||
state := framework.NewCycleState()
|
||||
var gotStatus *framework.Status
|
||||
if test.runPreFilter {
|
||||
gotPreFilterResult, gotStatus := p.(framework.PreFilterPlugin).PreFilter(context.Background(), state, test.pod)
|
||||
gotPreFilterResult, gotStatus := p.(framework.PreFilterPlugin).PreFilter(ctx, state, test.pod)
|
||||
if diff := cmp.Diff(test.wantPreFilterStatus, gotStatus); diff != "" {
|
||||
t.Errorf("unexpected PreFilter Status (-want,+got):\n%s", diff)
|
||||
}
|
||||
@ -923,7 +923,7 @@ func TestNodeAffinity(t *testing.T) {
|
||||
t.Errorf("unexpected PreFilterResult (-want,+got):\n%s", diff)
|
||||
}
|
||||
}
|
||||
gotStatus = p.(framework.FilterPlugin).Filter(context.Background(), state, test.pod, nodeInfo)
|
||||
gotStatus = p.(framework.FilterPlugin).Filter(ctx, state, test.pod, nodeInfo)
|
||||
if diff := cmp.Diff(test.wantStatus, gotStatus); diff != "" {
|
||||
t.Errorf("unexpected Filter Status (-want,+got):\n%s", diff)
|
||||
}
|
||||
|
@ -732,7 +732,7 @@ func TestRestartableInitContainers(t *testing.T) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
cycleState := framework.NewCycleState()
|
||||
_, preFilterStatus := p.(framework.PreFilterPlugin).PreFilter(context.Background(), cycleState, test.pod)
|
||||
_, preFilterStatus := p.(framework.PreFilterPlugin).PreFilter(ctx, cycleState, test.pod)
|
||||
if diff := cmp.Diff(test.wantPreFilterStatus, preFilterStatus); diff != "" {
|
||||
t.Error("status does not match (-expected +actual):\n", diff)
|
||||
}
|
||||
@ -1112,7 +1112,8 @@ func TestEventsToRegister(t *testing.T) {
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
fp := &Fit{enableInPlacePodVerticalScaling: test.inPlacePodVerticalScalingEnabled}
|
||||
actualClusterEvents, err := fp.EventsToRegister(context.TODO())
|
||||
_, ctx := ktesting.NewTestContext(t)
|
||||
actualClusterEvents, err := fp.EventsToRegister(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -328,11 +328,11 @@ func TestResourceBinPackingSingleExtended(t *testing.T) {
|
||||
|
||||
var gotList framework.NodeScoreList
|
||||
for _, n := range test.nodes {
|
||||
status := p.(framework.PreScorePlugin).PreScore(context.Background(), state, test.pod, tf.BuildNodeInfos(test.nodes))
|
||||
status := p.(framework.PreScorePlugin).PreScore(ctx, state, test.pod, tf.BuildNodeInfos(test.nodes))
|
||||
if !status.IsSuccess() {
|
||||
t.Errorf("PreScore is expected to return success, but didn't. Got status: %v", status)
|
||||
}
|
||||
score, status := p.(framework.ScorePlugin).Score(context.Background(), state, test.pod, n.Name)
|
||||
score, status := p.(framework.ScorePlugin).Score(ctx, state, test.pod, n.Name)
|
||||
if !status.IsSuccess() {
|
||||
t.Errorf("Score is expected to return success, but didn't. Got status: %v", status)
|
||||
}
|
||||
@ -554,14 +554,14 @@ func TestResourceBinPackingMultipleExtended(t *testing.T) {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
|
||||
status := p.(framework.PreScorePlugin).PreScore(context.Background(), state, test.pod, tf.BuildNodeInfos(test.nodes))
|
||||
status := p.(framework.PreScorePlugin).PreScore(ctx, state, test.pod, tf.BuildNodeInfos(test.nodes))
|
||||
if !status.IsSuccess() {
|
||||
t.Errorf("PreScore is expected to return success, but didn't. Got status: %v", status)
|
||||
}
|
||||
|
||||
var gotScores framework.NodeScoreList
|
||||
for _, n := range test.nodes {
|
||||
score, status := p.(framework.ScorePlugin).Score(context.Background(), state, test.pod, n.Name)
|
||||
score, status := p.(framework.ScorePlugin).Score(ctx, state, test.pod, n.Name)
|
||||
if !status.IsSuccess() {
|
||||
t.Errorf("Score is expected to return success, but didn't. Got status: %v", status)
|
||||
}
|
||||
|
@ -3064,7 +3064,7 @@ func TestSingleConstraint(t *testing.T) {
|
||||
|
||||
for _, node := range tt.nodes {
|
||||
nodeInfo, _ := snapshot.NodeInfos().Get(node.Name)
|
||||
status := p.Filter(context.Background(), state, tt.pod, nodeInfo)
|
||||
status := p.Filter(ctx, state, tt.pod, nodeInfo)
|
||||
if len(tt.wantStatusCode) != 0 && status.Code() != tt.wantStatusCode[node.Name] {
|
||||
t.Errorf("[%s]: expected status code %v got %v", node.Name, tt.wantStatusCode[node.Name], status.Code())
|
||||
}
|
||||
@ -3408,7 +3408,7 @@ func TestMultipleConstraints(t *testing.T) {
|
||||
|
||||
for _, node := range tt.nodes {
|
||||
nodeInfo, _ := snapshot.NodeInfos().Get(node.Name)
|
||||
status := p.Filter(context.Background(), state, tt.pod, nodeInfo)
|
||||
status := p.Filter(ctx, state, tt.pod, nodeInfo)
|
||||
if len(tt.wantStatusCode) != 0 && status.Code() != tt.wantStatusCode[node.Name] {
|
||||
t.Errorf("[%s]: expected error code %v got %v", node.Name, tt.wantStatusCode[node.Name], status.Code())
|
||||
}
|
||||
@ -3425,7 +3425,7 @@ func TestPreFilterDisabled(t *testing.T) {
|
||||
_, ctx := ktesting.NewTestContext(t)
|
||||
p := plugintesting.SetupPlugin(ctx, t, topologySpreadFunc, &config.PodTopologySpreadArgs{DefaultingType: config.ListDefaulting}, cache.NewEmptySnapshot())
|
||||
cycleState := framework.NewCycleState()
|
||||
gotStatus := p.(*PodTopologySpread).Filter(context.Background(), cycleState, pod, nodeInfo)
|
||||
gotStatus := p.(*PodTopologySpread).Filter(ctx, cycleState, pod, nodeInfo)
|
||||
wantStatus := framework.AsStatus(fmt.Errorf(`reading "PreFilterPodTopologySpread" from cycleState: %w`, framework.ErrNotFound))
|
||||
if !reflect.DeepEqual(gotStatus, wantStatus) {
|
||||
t.Errorf("status does not match: %v, want: %v", gotStatus, wantStatus)
|
||||
|
@ -1429,14 +1429,14 @@ func BenchmarkTestPodTopologySpreadScore(b *testing.B) {
|
||||
var gotList framework.NodeScoreList
|
||||
for _, n := range filteredNodes {
|
||||
nodeName := n.Name
|
||||
score, status := p.Score(context.Background(), state, tt.pod, nodeName)
|
||||
score, status := p.Score(ctx, state, tt.pod, nodeName)
|
||||
if !status.IsSuccess() {
|
||||
b.Fatalf("unexpected error: %v", status)
|
||||
}
|
||||
gotList = append(gotList, framework.NodeScore{Name: nodeName, Score: score})
|
||||
}
|
||||
|
||||
status = p.NormalizeScore(context.Background(), state, tt.pod, gotList)
|
||||
status = p.NormalizeScore(ctx, state, tt.pod, gotList)
|
||||
if !status.IsSuccess() {
|
||||
b.Fatal(status)
|
||||
}
|
||||
|
@ -99,7 +99,8 @@ func TestGCEDiskConflicts(t *testing.T) {
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
_, ctx := ktesting.NewTestContext(t)
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
p := newPlugin(ctx, t)
|
||||
cycleState := framework.NewCycleState()
|
||||
@ -173,7 +174,8 @@ func TestAWSDiskConflicts(t *testing.T) {
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
_, ctx := ktesting.NewTestContext(t)
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
p := newPlugin(ctx, t)
|
||||
cycleState := framework.NewCycleState()
|
||||
@ -253,7 +255,8 @@ func TestRBDDiskConflicts(t *testing.T) {
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
_, ctx := ktesting.NewTestContext(t)
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
p := newPlugin(ctx, t)
|
||||
cycleState := framework.NewCycleState()
|
||||
@ -333,7 +336,8 @@ func TestISCSIDiskConflicts(t *testing.T) {
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
_, ctx := ktesting.NewTestContext(t)
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
p := newPlugin(ctx, t)
|
||||
cycleState := framework.NewCycleState()
|
||||
@ -460,7 +464,8 @@ func TestAccessModeConflicts(t *testing.T) {
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
_, ctx := ktesting.NewTestContext(t)
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
p := newPluginWithListers(ctx, t, test.existingPods, test.existingNodes, test.existingPVCs)
|
||||
cycleState := framework.NewCycleState()
|
||||
@ -660,8 +665,8 @@ func Test_isSchedulableAfterPodDeleted(t *testing.T) {
|
||||
|
||||
for name, tc := range testcases {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
logger, _ := ktesting.NewTestContext(t)
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
logger, ctx := ktesting.NewTestContext(t)
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
p := newPluginWithListers(ctx, t, tc.existingPods, nil, []*v1.PersistentVolumeClaim{tc.existingPVC})
|
||||
|
||||
@ -754,8 +759,8 @@ func Test_isSchedulableAfterPersistentVolumeClaimChange(t *testing.T) {
|
||||
|
||||
for name, tc := range testcases {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
logger, _ := ktesting.NewTestContext(t)
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
logger, ctx := ktesting.NewTestContext(t)
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
p := newPluginWithListers(ctx, t, tc.existingPods, nil, []*v1.PersistentVolumeClaim{tc.newObj.(*v1.PersistentVolumeClaim)})
|
||||
|
||||
|
@ -803,7 +803,8 @@ func BenchmarkVolumeZone(b *testing.B) {
|
||||
|
||||
for _, tt := range tests {
|
||||
b.Run(tt.Name, func(b *testing.B) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
_, ctx := ktesting.NewTestContext(b)
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
nodes := makeNodesWithTopologyZone(tt.NumNodes)
|
||||
pl := newPluginWithListers(ctx, b, []*v1.Pod{tt.Pod}, nodes, makePVCsWithPV(tt.NumPVC), makePVsWithZoneLabel(tt.NumPV))
|
||||
|
@ -188,7 +188,7 @@ func TestDryRunPreemption(t *testing.T) {
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
logger, _ := ktesting.NewTestContext(t)
|
||||
logger, ctx := ktesting.NewTestContext(t)
|
||||
registeredPlugins := append([]tf.RegisterPluginFunc{
|
||||
tf.RegisterQueueSortPlugin(queuesort.Name, queuesort.New)},
|
||||
tf.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New),
|
||||
@ -202,7 +202,6 @@ func TestDryRunPreemption(t *testing.T) {
|
||||
}
|
||||
informerFactory := informers.NewSharedInformerFactory(clientsetfake.NewClientset(objs...), 0)
|
||||
parallelism := parallelize.DefaultParallelism
|
||||
_, ctx := ktesting.NewTestContext(t)
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
fwk, err := tf.NewFramework(
|
||||
@ -239,7 +238,7 @@ func TestDryRunPreemption(t *testing.T) {
|
||||
Interface: fakePostPlugin,
|
||||
State: state,
|
||||
}
|
||||
got, _, _ := pe.DryRunPreemption(context.Background(), pod, nodeInfos, nil, 0, int32(len(nodeInfos)))
|
||||
got, _, _ := pe.DryRunPreemption(ctx, pod, nodeInfos, nil, 0, int32(len(nodeInfos)))
|
||||
// Sort the values (inner victims) and the candidate itself (by its NominatedNodeName).
|
||||
for i := range got {
|
||||
victims := got[i].Victims().Pods
|
||||
@ -290,7 +289,7 @@ func TestSelectCandidate(t *testing.T) {
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
logger, _ := ktesting.NewTestContext(t)
|
||||
logger, ctx := ktesting.NewTestContext(t)
|
||||
nodes := make([]*v1.Node, len(tt.nodeNames))
|
||||
for i, nodeName := range tt.nodeNames {
|
||||
nodes[i] = st.MakeNode().Name(nodeName).Capacity(veryLargeRes).Obj()
|
||||
@ -306,7 +305,6 @@ func TestSelectCandidate(t *testing.T) {
|
||||
}
|
||||
informerFactory := informers.NewSharedInformerFactory(clientsetfake.NewClientset(objs...), 0)
|
||||
snapshot := internalcache.NewSnapshot(tt.testPods, nodes)
|
||||
_, ctx := ktesting.NewTestContext(t)
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
fwk, err := tf.NewFramework(
|
||||
@ -341,7 +339,7 @@ func TestSelectCandidate(t *testing.T) {
|
||||
Interface: fakePreemptionScorePostFilterPlugin,
|
||||
State: state,
|
||||
}
|
||||
candidates, _, _ := pe.DryRunPreemption(context.Background(), pod, nodeInfos, nil, 0, int32(len(nodeInfos)))
|
||||
candidates, _, _ := pe.DryRunPreemption(ctx, pod, nodeInfos, nil, 0, int32(len(nodeInfos)))
|
||||
s := pe.SelectCandidate(ctx, candidates)
|
||||
if s == nil || len(s.Name()) == 0 {
|
||||
t.Errorf("expect any node in %v, but no candidate selected", tt.expected)
|
||||
|
@ -951,6 +951,7 @@ func TestPreEnqueuePlugins(t *testing.T) {
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
_, ctx := ktesting.NewTestContext(t)
|
||||
registry := Registry{}
|
||||
cfgPls := &config.Plugins{}
|
||||
for _, pl := range tt.plugins {
|
||||
@ -969,7 +970,7 @@ func TestPreEnqueuePlugins(t *testing.T) {
|
||||
)
|
||||
}
|
||||
profile := config.KubeSchedulerProfile{Plugins: cfgPls}
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
f, err := newFrameworkWithQueueSortAndBind(ctx, registry, profile)
|
||||
if err != nil {
|
||||
@ -1089,7 +1090,8 @@ func TestRunPreScorePlugins(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
_, ctx := ktesting.NewTestContext(t)
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
|
||||
f, err := newFrameworkWithQueueSortAndBind(
|
||||
@ -1490,7 +1492,8 @@ func TestRunScorePlugins(t *testing.T) {
|
||||
Plugins: tt.plugins,
|
||||
PluginConfig: tt.pluginConfigs,
|
||||
}
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
_, ctx := ktesting.NewTestContext(t)
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
f, err := newFrameworkWithQueueSortAndBind(ctx, registry, profile)
|
||||
if err != nil {
|
||||
@ -1536,7 +1539,8 @@ func TestPreFilterPlugins(t *testing.T) {
|
||||
plugins := &config.Plugins{PreFilter: config.PluginSet{Enabled: []config.Plugin{{Name: preFilterWithExtensionsPluginName}, {Name: preFilterPluginName}}}}
|
||||
t.Run("TestPreFilterPlugin", func(t *testing.T) {
|
||||
profile := config.KubeSchedulerProfile{Plugins: plugins}
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
_, ctx := ktesting.NewTestContext(t)
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
|
||||
f, err := newFrameworkWithQueueSortAndBind(ctx, r, profile)
|
||||
@ -1723,7 +1727,8 @@ func TestRunPreFilterPlugins(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
_, ctx := ktesting.NewTestContext(t)
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
|
||||
f, err := newFrameworkWithQueueSortAndBind(
|
||||
@ -1816,7 +1821,8 @@ func TestRunPreFilterExtensionRemovePod(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
_, ctx := ktesting.NewTestContext(t)
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
|
||||
f, err := newFrameworkWithQueueSortAndBind(
|
||||
@ -1903,7 +1909,8 @@ func TestRunPreFilterExtensionAddPod(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
_, ctx := ktesting.NewTestContext(t)
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
|
||||
f, err := newFrameworkWithQueueSortAndBind(
|
||||
@ -2096,6 +2103,7 @@ func TestFilterPlugins(t *testing.T) {
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
_, ctx := ktesting.NewTestContext(t)
|
||||
registry := Registry{}
|
||||
cfgPls := &config.Plugins{}
|
||||
for _, pl := range tt.plugins {
|
||||
@ -2113,7 +2121,7 @@ func TestFilterPlugins(t *testing.T) {
|
||||
config.Plugin{Name: pl.name})
|
||||
}
|
||||
profile := config.KubeSchedulerProfile{Plugins: cfgPls}
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
|
||||
f, err := newFrameworkWithQueueSortAndBind(ctx, registry, profile)
|
||||
@ -2223,6 +2231,7 @@ func TestPostFilterPlugins(t *testing.T) {
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
_, ctx := ktesting.NewTestContext(t)
|
||||
registry := Registry{}
|
||||
cfgPls := &config.Plugins{}
|
||||
for _, pl := range tt.plugins {
|
||||
@ -2241,7 +2250,7 @@ func TestPostFilterPlugins(t *testing.T) {
|
||||
)
|
||||
}
|
||||
profile := config.KubeSchedulerProfile{Plugins: cfgPls}
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
f, err := newFrameworkWithQueueSortAndBind(ctx, registry, profile)
|
||||
if err != nil {
|
||||
@ -2358,7 +2367,7 @@ func TestFilterPluginsWithNominatedPods(t *testing.T) {
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
logger, _ := ktesting.NewTestContext(t)
|
||||
logger, ctx := ktesting.NewTestContext(t)
|
||||
registry := Registry{}
|
||||
cfgPls := &config.Plugins{}
|
||||
|
||||
@ -2408,7 +2417,7 @@ func TestFilterPluginsWithNominatedPods(t *testing.T) {
|
||||
&framework.NominatingInfo{NominatingMode: framework.ModeOverride, NominatedNodeName: nodeName})
|
||||
}
|
||||
profile := config.KubeSchedulerProfile{Plugins: cfgPls}
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
f, err := newFrameworkWithQueueSortAndBind(ctx, registry, profile, WithPodNominator(podNominator))
|
||||
if err != nil {
|
||||
@ -2551,6 +2560,7 @@ func TestPreBindPlugins(t *testing.T) {
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
_, ctx := ktesting.NewTestContext(t)
|
||||
registry := Registry{}
|
||||
configPlugins := &config.Plugins{}
|
||||
|
||||
@ -2568,7 +2578,7 @@ func TestPreBindPlugins(t *testing.T) {
|
||||
)
|
||||
}
|
||||
profile := config.KubeSchedulerProfile{Plugins: configPlugins}
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
f, err := newFrameworkWithQueueSortAndBind(ctx, registry, profile)
|
||||
if err != nil {
|
||||
@ -2712,6 +2722,7 @@ func TestReservePlugins(t *testing.T) {
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
_, ctx := ktesting.NewTestContext(t)
|
||||
registry := Registry{}
|
||||
configPlugins := &config.Plugins{}
|
||||
|
||||
@ -2729,7 +2740,7 @@ func TestReservePlugins(t *testing.T) {
|
||||
)
|
||||
}
|
||||
profile := config.KubeSchedulerProfile{Plugins: configPlugins}
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
f, err := newFrameworkWithQueueSortAndBind(ctx, registry, profile)
|
||||
defer func() {
|
||||
@ -2841,6 +2852,7 @@ func TestPermitPlugins(t *testing.T) {
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
_, ctx := ktesting.NewTestContext(t)
|
||||
registry := Registry{}
|
||||
configPlugins := &config.Plugins{}
|
||||
|
||||
@ -2858,7 +2870,7 @@ func TestPermitPlugins(t *testing.T) {
|
||||
)
|
||||
}
|
||||
profile := config.KubeSchedulerProfile{Plugins: configPlugins}
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
f, err := newFrameworkWithQueueSortAndBind(ctx, registry, profile,
|
||||
WithWaitingPods(NewWaitingPodsMap()),
|
||||
@ -2891,86 +2903,86 @@ func TestRecordingMetrics(t *testing.T) {
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
action func(f framework.Framework)
|
||||
action func(ctx context.Context, f framework.Framework)
|
||||
inject injectedResult
|
||||
wantExtensionPoint string
|
||||
wantStatus framework.Code
|
||||
}{
|
||||
{
|
||||
name: "PreFilter - Success",
|
||||
action: func(f framework.Framework) { f.RunPreFilterPlugins(context.Background(), state, pod) },
|
||||
action: func(ctx context.Context, f framework.Framework) { f.RunPreFilterPlugins(ctx, state, pod) },
|
||||
wantExtensionPoint: "PreFilter",
|
||||
wantStatus: framework.Success,
|
||||
},
|
||||
{
|
||||
name: "PreScore - Success",
|
||||
action: func(f framework.Framework) { f.RunPreScorePlugins(context.Background(), state, pod, nil) },
|
||||
action: func(ctx context.Context, f framework.Framework) { f.RunPreScorePlugins(ctx, state, pod, nil) },
|
||||
wantExtensionPoint: "PreScore",
|
||||
wantStatus: framework.Success,
|
||||
},
|
||||
{
|
||||
name: "Score - Success",
|
||||
action: func(f framework.Framework) {
|
||||
f.RunScorePlugins(context.Background(), state, pod, BuildNodeInfos(nodes))
|
||||
action: func(ctx context.Context, f framework.Framework) {
|
||||
f.RunScorePlugins(ctx, state, pod, BuildNodeInfos(nodes))
|
||||
},
|
||||
wantExtensionPoint: "Score",
|
||||
wantStatus: framework.Success,
|
||||
},
|
||||
{
|
||||
name: "Reserve - Success",
|
||||
action: func(f framework.Framework) { f.RunReservePluginsReserve(context.Background(), state, pod, "") },
|
||||
action: func(ctx context.Context, f framework.Framework) { f.RunReservePluginsReserve(ctx, state, pod, "") },
|
||||
wantExtensionPoint: "Reserve",
|
||||
wantStatus: framework.Success,
|
||||
},
|
||||
{
|
||||
name: "Unreserve - Success",
|
||||
action: func(f framework.Framework) { f.RunReservePluginsUnreserve(context.Background(), state, pod, "") },
|
||||
action: func(ctx context.Context, f framework.Framework) { f.RunReservePluginsUnreserve(ctx, state, pod, "") },
|
||||
wantExtensionPoint: "Unreserve",
|
||||
wantStatus: framework.Success,
|
||||
},
|
||||
{
|
||||
name: "PreBind - Success",
|
||||
action: func(f framework.Framework) { f.RunPreBindPlugins(context.Background(), state, pod, "") },
|
||||
action: func(ctx context.Context, f framework.Framework) { f.RunPreBindPlugins(ctx, state, pod, "") },
|
||||
wantExtensionPoint: "PreBind",
|
||||
wantStatus: framework.Success,
|
||||
},
|
||||
{
|
||||
name: "Bind - Success",
|
||||
action: func(f framework.Framework) { f.RunBindPlugins(context.Background(), state, pod, "") },
|
||||
action: func(ctx context.Context, f framework.Framework) { f.RunBindPlugins(ctx, state, pod, "") },
|
||||
wantExtensionPoint: "Bind",
|
||||
wantStatus: framework.Success,
|
||||
},
|
||||
{
|
||||
name: "PostBind - Success",
|
||||
action: func(f framework.Framework) { f.RunPostBindPlugins(context.Background(), state, pod, "") },
|
||||
action: func(ctx context.Context, f framework.Framework) { f.RunPostBindPlugins(ctx, state, pod, "") },
|
||||
wantExtensionPoint: "PostBind",
|
||||
wantStatus: framework.Success,
|
||||
},
|
||||
{
|
||||
name: "Permit - Success",
|
||||
action: func(f framework.Framework) { f.RunPermitPlugins(context.Background(), state, pod, "") },
|
||||
action: func(ctx context.Context, f framework.Framework) { f.RunPermitPlugins(ctx, state, pod, "") },
|
||||
wantExtensionPoint: "Permit",
|
||||
wantStatus: framework.Success,
|
||||
},
|
||||
|
||||
{
|
||||
name: "PreFilter - Error",
|
||||
action: func(f framework.Framework) { f.RunPreFilterPlugins(context.Background(), state, pod) },
|
||||
action: func(ctx context.Context, f framework.Framework) { f.RunPreFilterPlugins(ctx, state, pod) },
|
||||
inject: injectedResult{PreFilterStatus: int(framework.Error)},
|
||||
wantExtensionPoint: "PreFilter",
|
||||
wantStatus: framework.Error,
|
||||
},
|
||||
{
|
||||
name: "PreScore - Error",
|
||||
action: func(f framework.Framework) { f.RunPreScorePlugins(context.Background(), state, pod, nil) },
|
||||
action: func(ctx context.Context, f framework.Framework) { f.RunPreScorePlugins(ctx, state, pod, nil) },
|
||||
inject: injectedResult{PreScoreStatus: int(framework.Error)},
|
||||
wantExtensionPoint: "PreScore",
|
||||
wantStatus: framework.Error,
|
||||
},
|
||||
{
|
||||
name: "Score - Error",
|
||||
action: func(f framework.Framework) {
|
||||
f.RunScorePlugins(context.Background(), state, pod, BuildNodeInfos(nodes))
|
||||
action: func(ctx context.Context, f framework.Framework) {
|
||||
f.RunScorePlugins(ctx, state, pod, BuildNodeInfos(nodes))
|
||||
},
|
||||
inject: injectedResult{ScoreStatus: int(framework.Error)},
|
||||
wantExtensionPoint: "Score",
|
||||
@ -2978,35 +2990,35 @@ func TestRecordingMetrics(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "Reserve - Error",
|
||||
action: func(f framework.Framework) { f.RunReservePluginsReserve(context.Background(), state, pod, "") },
|
||||
action: func(ctx context.Context, f framework.Framework) { f.RunReservePluginsReserve(ctx, state, pod, "") },
|
||||
inject: injectedResult{ReserveStatus: int(framework.Error)},
|
||||
wantExtensionPoint: "Reserve",
|
||||
wantStatus: framework.Error,
|
||||
},
|
||||
{
|
||||
name: "PreBind - Error",
|
||||
action: func(f framework.Framework) { f.RunPreBindPlugins(context.Background(), state, pod, "") },
|
||||
action: func(ctx context.Context, f framework.Framework) { f.RunPreBindPlugins(ctx, state, pod, "") },
|
||||
inject: injectedResult{PreBindStatus: int(framework.Error)},
|
||||
wantExtensionPoint: "PreBind",
|
||||
wantStatus: framework.Error,
|
||||
},
|
||||
{
|
||||
name: "Bind - Error",
|
||||
action: func(f framework.Framework) { f.RunBindPlugins(context.Background(), state, pod, "") },
|
||||
action: func(ctx context.Context, f framework.Framework) { f.RunBindPlugins(ctx, state, pod, "") },
|
||||
inject: injectedResult{BindStatus: int(framework.Error)},
|
||||
wantExtensionPoint: "Bind",
|
||||
wantStatus: framework.Error,
|
||||
},
|
||||
{
|
||||
name: "Permit - Error",
|
||||
action: func(f framework.Framework) { f.RunPermitPlugins(context.Background(), state, pod, "") },
|
||||
action: func(ctx context.Context, f framework.Framework) { f.RunPermitPlugins(ctx, state, pod, "") },
|
||||
inject: injectedResult{PermitStatus: int(framework.Error)},
|
||||
wantExtensionPoint: "Permit",
|
||||
wantStatus: framework.Error,
|
||||
},
|
||||
{
|
||||
name: "Permit - Wait",
|
||||
action: func(f framework.Framework) { f.RunPermitPlugins(context.Background(), state, pod, "") },
|
||||
action: func(ctx context.Context, f framework.Framework) { f.RunPermitPlugins(ctx, state, pod, "") },
|
||||
inject: injectedResult{PermitStatus: int(framework.Wait)},
|
||||
wantExtensionPoint: "Permit",
|
||||
wantStatus: framework.Wait,
|
||||
@ -3059,7 +3071,7 @@ func TestRecordingMetrics(t *testing.T) {
|
||||
_ = f.Close()
|
||||
}()
|
||||
|
||||
tt.action(f)
|
||||
tt.action(ctx, f)
|
||||
|
||||
// Stop the goroutine which records metrics and ensure it's stopped.
|
||||
cancel()
|
||||
@ -3175,7 +3187,7 @@ func TestRunBindPlugins(t *testing.T) {
|
||||
_ = fwk.Close()
|
||||
}()
|
||||
|
||||
st := fwk.RunBindPlugins(context.Background(), state, pod, "")
|
||||
st := fwk.RunBindPlugins(ctx, state, pod, "")
|
||||
if st.Code() != tt.wantStatus {
|
||||
t.Errorf("got status code %s, want %s", st.Code(), tt.wantStatus)
|
||||
}
|
||||
@ -3208,6 +3220,7 @@ func TestPermitWaitDurationMetric(t *testing.T) {
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
_, ctx := ktesting.NewTestContext(t)
|
||||
metrics.Register()
|
||||
metrics.PermitWaitDuration.Reset()
|
||||
|
||||
@ -3224,7 +3237,7 @@ func TestPermitWaitDurationMetric(t *testing.T) {
|
||||
Permit: config.PluginSet{Enabled: []config.Plugin{{Name: testPlugin, Weight: 1}}},
|
||||
}
|
||||
profile := config.KubeSchedulerProfile{Plugins: plugins}
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
f, err := newFrameworkWithQueueSortAndBind(ctx, r, profile,
|
||||
WithWaitingPods(NewWaitingPodsMap()),
|
||||
@ -3275,6 +3288,7 @@ func TestWaitOnPermit(t *testing.T) {
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
_, ctx := ktesting.NewTestContext(t)
|
||||
testPermitPlugin := &TestPermitPlugin{}
|
||||
r := make(Registry)
|
||||
r.Register(permitPlugin,
|
||||
@ -3285,7 +3299,7 @@ func TestWaitOnPermit(t *testing.T) {
|
||||
Permit: config.PluginSet{Enabled: []config.Plugin{{Name: permitPlugin, Weight: 1}}},
|
||||
}
|
||||
profile := config.KubeSchedulerProfile{Plugins: plugins}
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
f, err := newFrameworkWithQueueSortAndBind(ctx, r, profile,
|
||||
WithWaitingPods(NewWaitingPodsMap()),
|
||||
|
@ -421,7 +421,8 @@ func TestSchedulerMultipleProfilesScheduling(t *testing.T) {
|
||||
&v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: ""}}}, nodes...)
|
||||
client := clientsetfake.NewClientset(objs...)
|
||||
broadcaster := events.NewBroadcaster(&events.EventSinkImpl{Interface: client.EventsV1()})
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
_, ctx := ktesting.NewTestContext(t)
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
|
||||
informerFactory := informers.NewSharedInformerFactory(client, 0)
|
||||
@ -528,7 +529,8 @@ func TestSchedulerGuaranteeNonNilNodeInSchedulingCycle(t *testing.T) {
|
||||
t.Skip("Skip failing test on Windows.")
|
||||
}
|
||||
random := rand.New(rand.NewSource(time.Now().UnixNano()))
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
_, ctx := ktesting.NewTestContext(t)
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
|
||||
var (
|
||||
@ -1170,7 +1172,8 @@ func TestSchedulerWithVolumeBinding(t *testing.T) {
|
||||
|
||||
for _, item := range table {
|
||||
t.Run(item.name, func(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
_, ctx := ktesting.NewTestContext(t)
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
fakeVolumeBinder := volumebinding.NewFakeVolumeBinder(item.volumeBinderConfig)
|
||||
s, bindingChan, errChan := setupTestSchedulerWithVolumeBinding(ctx, t, fakeVolumeBinder, eventBroadcaster)
|
||||
@ -1476,7 +1479,8 @@ func TestUpdatePod(t *testing.T) {
|
||||
|
||||
pod := st.MakePod().Name("foo").NominatedNodeName(test.currentNominatedNodeName).Conditions(test.currentPodConditions).Obj()
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
_, ctx := ktesting.NewTestContext(t)
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
if err := updatePod(ctx, cs, pod, test.newPodCondition, test.newNominatingInfo); err != nil {
|
||||
t.Fatalf("Error calling update: %v", err)
|
||||
@ -2535,7 +2539,8 @@ func TestSchedulerSchedulePod(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestFindFitAllError(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
_, ctx := ktesting.NewTestContext(t)
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
|
||||
nodes := makeNodeList([]string{"3", "2", "1"})
|
||||
@ -2578,7 +2583,8 @@ func TestFindFitAllError(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestFindFitSomeError(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
_, ctx := ktesting.NewTestContext(t)
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
|
||||
nodes := makeNodeList([]string{"3", "2", "1"})
|
||||
@ -2812,7 +2818,8 @@ func TestZeroRequest(t *testing.T) {
|
||||
tf.RegisterScorePlugin(noderesources.BalancedAllocationName, frameworkruntime.FactoryAdapter(fts, noderesources.NewBalancedAllocation), 1),
|
||||
tf.RegisterBindPlugin(defaultbinder.Name, defaultbinder.New),
|
||||
}
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
_, ctx := ktesting.NewTestContext(t)
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
fwk, err := tf.NewFramework(
|
||||
ctx,
|
||||
@ -3207,7 +3214,8 @@ func Test_prioritizeNodes(t *testing.T) {
|
||||
client := clientsetfake.NewClientset()
|
||||
informerFactory := informers.NewSharedInformerFactory(client, 0)
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
_, ctx := ktesting.NewTestContext(t)
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
cache := internalcache.New(ctx, time.Duration(0))
|
||||
for _, node := range test.nodes {
|
||||
@ -3328,7 +3336,8 @@ func TestFairEvaluationForNodes(t *testing.T) {
|
||||
nodeNames = append(nodeNames, strconv.Itoa(i))
|
||||
}
|
||||
nodes := makeNodeList(nodeNames)
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
_, ctx := ktesting.NewTestContext(t)
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
sched := makeScheduler(ctx, nodes)
|
||||
|
||||
|
@ -32,6 +32,7 @@ import (
|
||||
clientsetfake "k8s.io/client-go/kubernetes/fake"
|
||||
clienttesting "k8s.io/client-go/testing"
|
||||
"k8s.io/klog/v2"
|
||||
"k8s.io/klog/v2/ktesting"
|
||||
extenderv1 "k8s.io/kube-scheduler/extender/v1"
|
||||
)
|
||||
|
||||
@ -180,6 +181,7 @@ func TestRemoveNominatedNodeName(t *testing.T) {
|
||||
}
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
_, ctx := ktesting.NewTestContext(t)
|
||||
actualPatchRequests := 0
|
||||
var actualPatchData string
|
||||
cs := &clientsetfake.Clientset{}
|
||||
@ -197,7 +199,7 @@ func TestRemoveNominatedNodeName(t *testing.T) {
|
||||
Status: v1.PodStatus{NominatedNodeName: test.currentNominatedNodeName},
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
if err := ClearNominatedNodeName(ctx, cs, pod); err != nil {
|
||||
t.Fatalf("Error calling removeNominatedNodeName: %v", err)
|
||||
@ -359,7 +361,8 @@ func TestPatchPodStatus(t *testing.T) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
_, ctx := ktesting.NewTestContext(t)
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
err = PatchPodStatus(ctx, client, &tc.pod, &tc.statusToUpdate)
|
||||
if err != nil && tc.validateErr == nil {
|
||||
|
Loading…
Reference in New Issue
Block a user