mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-09-01 09:18:45 +00:00
Merge pull request #87298 from mikedanese/prectx
rename some declartions named context in tests
This commit is contained in:
@@ -278,8 +278,8 @@ func machine3Prioritizer(pod *v1.Pod, nodes *v1.NodeList) (*extenderv1.HostPrior
|
||||
}
|
||||
|
||||
func TestSchedulerExtender(t *testing.T) {
|
||||
context := initTestMaster(t, "scheduler-extender", nil)
|
||||
clientSet := context.clientSet
|
||||
testCtx := initTestMaster(t, "scheduler-extender", nil)
|
||||
clientSet := testCtx.clientSet
|
||||
|
||||
extender1 := &Extender{
|
||||
name: "extender1",
|
||||
@@ -348,10 +348,10 @@ func TestSchedulerExtender(t *testing.T) {
|
||||
}
|
||||
policy.APIVersion = "v1"
|
||||
|
||||
context = initTestScheduler(t, context, false, &policy)
|
||||
defer cleanupTest(t, context)
|
||||
testCtx = initTestScheduler(t, testCtx, false, &policy)
|
||||
defer cleanupTest(t, testCtx)
|
||||
|
||||
DoTestPodScheduling(context.ns, t, clientSet)
|
||||
DoTestPodScheduling(testCtx.ns, t, clientSet)
|
||||
}
|
||||
|
||||
func DoTestPodScheduling(ns *v1.Namespace, t *testing.T, cs clientset.Interface) {
|
||||
|
@@ -480,10 +480,10 @@ func TestPreFilterPlugin(t *testing.T) {
|
||||
}
|
||||
|
||||
// Create the master and the scheduler with the test plugin set.
|
||||
context := initTestSchedulerForFrameworkTest(t, initTestMaster(t, "prefilter-plugin", nil), 2,
|
||||
testCtx := initTestSchedulerForFrameworkTest(t, initTestMaster(t, "prefilter-plugin", nil), 2,
|
||||
scheduler.WithFrameworkPlugins(plugins),
|
||||
scheduler.WithFrameworkOutOfTreeRegistry(registry))
|
||||
defer cleanupTest(t, context)
|
||||
defer cleanupTest(t, testCtx)
|
||||
|
||||
tests := []struct {
|
||||
fail bool
|
||||
@@ -507,18 +507,18 @@ func TestPreFilterPlugin(t *testing.T) {
|
||||
preFilterPlugin.failPreFilter = test.fail
|
||||
preFilterPlugin.rejectPreFilter = test.reject
|
||||
// Create a best effort pod.
|
||||
pod, err := createPausePod(context.clientSet,
|
||||
initPausePod(context.clientSet, &pausePodConfig{Name: "test-pod", Namespace: context.ns.Name}))
|
||||
pod, err := createPausePod(testCtx.clientSet,
|
||||
initPausePod(testCtx.clientSet, &pausePodConfig{Name: "test-pod", Namespace: testCtx.ns.Name}))
|
||||
if err != nil {
|
||||
t.Errorf("Error while creating a test pod: %v", err)
|
||||
}
|
||||
|
||||
if test.reject || test.fail {
|
||||
if err = waitForPodUnschedulable(context.clientSet, pod); err != nil {
|
||||
if err = waitForPodUnschedulable(testCtx.clientSet, pod); err != nil {
|
||||
t.Errorf("test #%v: Didn't expect the pod to be scheduled. error: %v", i, err)
|
||||
}
|
||||
} else {
|
||||
if err = waitForPodToSchedule(context.clientSet, pod); err != nil {
|
||||
if err = waitForPodToSchedule(testCtx.clientSet, pod); err != nil {
|
||||
t.Errorf("test #%v: Expected the pod to be scheduled. error: %v", i, err)
|
||||
}
|
||||
}
|
||||
@@ -528,7 +528,7 @@ func TestPreFilterPlugin(t *testing.T) {
|
||||
}
|
||||
|
||||
preFilterPlugin.reset()
|
||||
cleanupPods(context.clientSet, t, []*v1.Pod{pod})
|
||||
cleanupPods(testCtx.clientSet, t, []*v1.Pod{pod})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -551,29 +551,29 @@ func TestScorePlugin(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
context := initTestSchedulerForFrameworkTest(t, initTestMaster(t, "score-plugin", nil), 10,
|
||||
testCtx := initTestSchedulerForFrameworkTest(t, initTestMaster(t, "score-plugin", nil), 10,
|
||||
scheduler.WithFrameworkPlugins(plugins),
|
||||
scheduler.WithFrameworkOutOfTreeRegistry(registry))
|
||||
defer cleanupTest(t, context)
|
||||
defer cleanupTest(t, testCtx)
|
||||
|
||||
for i, fail := range []bool{false, true} {
|
||||
scorePlugin.failScore = fail
|
||||
// Create a best effort pod.
|
||||
pod, err := createPausePod(context.clientSet,
|
||||
initPausePod(context.clientSet, &pausePodConfig{Name: "test-pod", Namespace: context.ns.Name}))
|
||||
pod, err := createPausePod(testCtx.clientSet,
|
||||
initPausePod(testCtx.clientSet, &pausePodConfig{Name: "test-pod", Namespace: testCtx.ns.Name}))
|
||||
if err != nil {
|
||||
t.Fatalf("Error while creating a test pod: %v", err)
|
||||
}
|
||||
|
||||
if fail {
|
||||
if err = waitForPodUnschedulable(context.clientSet, pod); err != nil {
|
||||
if err = waitForPodUnschedulable(testCtx.clientSet, pod); err != nil {
|
||||
t.Errorf("test #%v: Didn't expect the pod to be scheduled. error: %v", i, err)
|
||||
}
|
||||
} else {
|
||||
if err = waitForPodToSchedule(context.clientSet, pod); err != nil {
|
||||
if err = waitForPodToSchedule(testCtx.clientSet, pod); err != nil {
|
||||
t.Errorf("Expected the pod to be scheduled. error: %v", err)
|
||||
} else {
|
||||
p, err := getPod(context.clientSet, pod.Name, pod.Namespace)
|
||||
p, err := getPod(testCtx.clientSet, pod.Name, pod.Namespace)
|
||||
if err != nil {
|
||||
t.Errorf("Failed to retrieve the pod. error: %v", err)
|
||||
} else if p.Spec.NodeName != scorePlugin.highScoreNode {
|
||||
@@ -587,7 +587,7 @@ func TestScorePlugin(t *testing.T) {
|
||||
}
|
||||
|
||||
scorePlugin.reset()
|
||||
cleanupPods(context.clientSet, t, []*v1.Pod{pod})
|
||||
cleanupPods(testCtx.clientSet, t, []*v1.Pod{pod})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -609,20 +609,20 @@ func TestNormalizeScorePlugin(t *testing.T) {
|
||||
},
|
||||
},
|
||||
}
|
||||
context := initTestSchedulerForFrameworkTest(t, initTestMaster(t, "score-plugin", nil), 10,
|
||||
testCtx := initTestSchedulerForFrameworkTest(t, initTestMaster(t, "score-plugin", nil), 10,
|
||||
scheduler.WithFrameworkPlugins(plugins),
|
||||
scheduler.WithFrameworkOutOfTreeRegistry(registry))
|
||||
|
||||
defer cleanupTest(t, context)
|
||||
defer cleanupTest(t, testCtx)
|
||||
|
||||
// Create a best effort pod.
|
||||
pod, err := createPausePod(context.clientSet,
|
||||
initPausePod(context.clientSet, &pausePodConfig{Name: "test-pod", Namespace: context.ns.Name}))
|
||||
pod, err := createPausePod(testCtx.clientSet,
|
||||
initPausePod(testCtx.clientSet, &pausePodConfig{Name: "test-pod", Namespace: testCtx.ns.Name}))
|
||||
if err != nil {
|
||||
t.Fatalf("Error while creating a test pod: %v", err)
|
||||
}
|
||||
|
||||
if err = waitForPodToSchedule(context.clientSet, pod); err != nil {
|
||||
if err = waitForPodToSchedule(testCtx.clientSet, pod); err != nil {
|
||||
t.Errorf("Expected the pod to be scheduled. error: %v", err)
|
||||
}
|
||||
|
||||
@@ -654,27 +654,27 @@ func TestReservePlugin(t *testing.T) {
|
||||
}
|
||||
|
||||
// Create the master and the scheduler with the test plugin set.
|
||||
context := initTestSchedulerForFrameworkTest(t, initTestMaster(t, "reserve-plugin", nil), 2,
|
||||
testCtx := initTestSchedulerForFrameworkTest(t, initTestMaster(t, "reserve-plugin", nil), 2,
|
||||
scheduler.WithFrameworkPlugins(plugins),
|
||||
scheduler.WithFrameworkOutOfTreeRegistry(registry))
|
||||
defer cleanupTest(t, context)
|
||||
defer cleanupTest(t, testCtx)
|
||||
|
||||
for _, fail := range []bool{false, true} {
|
||||
reservePlugin.failReserve = fail
|
||||
// Create a best effort pod.
|
||||
pod, err := createPausePod(context.clientSet,
|
||||
initPausePod(context.clientSet, &pausePodConfig{Name: "test-pod", Namespace: context.ns.Name}))
|
||||
pod, err := createPausePod(testCtx.clientSet,
|
||||
initPausePod(testCtx.clientSet, &pausePodConfig{Name: "test-pod", Namespace: testCtx.ns.Name}))
|
||||
if err != nil {
|
||||
t.Errorf("Error while creating a test pod: %v", err)
|
||||
}
|
||||
|
||||
if fail {
|
||||
if err = wait.Poll(10*time.Millisecond, 30*time.Second,
|
||||
podSchedulingError(context.clientSet, pod.Namespace, pod.Name)); err != nil {
|
||||
podSchedulingError(testCtx.clientSet, pod.Namespace, pod.Name)); err != nil {
|
||||
t.Errorf("Didn't expect the pod to be scheduled. error: %v", err)
|
||||
}
|
||||
} else {
|
||||
if err = waitForPodToSchedule(context.clientSet, pod); err != nil {
|
||||
if err = waitForPodToSchedule(testCtx.clientSet, pod); err != nil {
|
||||
t.Errorf("Expected the pod to be scheduled. error: %v", err)
|
||||
}
|
||||
}
|
||||
@@ -684,7 +684,7 @@ func TestReservePlugin(t *testing.T) {
|
||||
}
|
||||
|
||||
reservePlugin.reset()
|
||||
cleanupPods(context.clientSet, t, []*v1.Pod{pod})
|
||||
cleanupPods(testCtx.clientSet, t, []*v1.Pod{pod})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -706,10 +706,10 @@ func TestPrebindPlugin(t *testing.T) {
|
||||
}
|
||||
|
||||
// Create the master and the scheduler with the test plugin set.
|
||||
context := initTestSchedulerForFrameworkTest(t, initTestMaster(t, "prebind-plugin", nil), 2,
|
||||
testCtx := initTestSchedulerForFrameworkTest(t, initTestMaster(t, "prebind-plugin", nil), 2,
|
||||
scheduler.WithFrameworkPlugins(plugins),
|
||||
scheduler.WithFrameworkOutOfTreeRegistry(registry))
|
||||
defer cleanupTest(t, context)
|
||||
defer cleanupTest(t, testCtx)
|
||||
|
||||
tests := []struct {
|
||||
fail bool
|
||||
@@ -737,17 +737,17 @@ func TestPrebindPlugin(t *testing.T) {
|
||||
preBindPlugin.failPreBind = test.fail
|
||||
preBindPlugin.rejectPreBind = test.reject
|
||||
// Create a best effort pod.
|
||||
pod, err := createPausePod(context.clientSet,
|
||||
initPausePod(context.clientSet, &pausePodConfig{Name: "test-pod", Namespace: context.ns.Name}))
|
||||
pod, err := createPausePod(testCtx.clientSet,
|
||||
initPausePod(testCtx.clientSet, &pausePodConfig{Name: "test-pod", Namespace: testCtx.ns.Name}))
|
||||
if err != nil {
|
||||
t.Errorf("Error while creating a test pod: %v", err)
|
||||
}
|
||||
|
||||
if test.fail || test.reject {
|
||||
if err = wait.Poll(10*time.Millisecond, 30*time.Second, podSchedulingError(context.clientSet, pod.Namespace, pod.Name)); err != nil {
|
||||
if err = wait.Poll(10*time.Millisecond, 30*time.Second, podSchedulingError(testCtx.clientSet, pod.Namespace, pod.Name)); err != nil {
|
||||
t.Errorf("test #%v: Expected a scheduling error, but didn't get it. error: %v", i, err)
|
||||
}
|
||||
} else if err = waitForPodToSchedule(context.clientSet, pod); err != nil {
|
||||
} else if err = waitForPodToSchedule(testCtx.clientSet, pod); err != nil {
|
||||
t.Errorf("test #%v: Expected the pod to be scheduled. error: %v", i, err)
|
||||
}
|
||||
|
||||
@@ -756,7 +756,7 @@ func TestPrebindPlugin(t *testing.T) {
|
||||
}
|
||||
|
||||
preBindPlugin.reset()
|
||||
cleanupPods(context.clientSet, t, []*v1.Pod{pod})
|
||||
cleanupPods(testCtx.clientSet, t, []*v1.Pod{pod})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -789,10 +789,10 @@ func TestUnreservePlugin(t *testing.T) {
|
||||
}
|
||||
|
||||
// Create the master and the scheduler with the test plugin set.
|
||||
context := initTestSchedulerForFrameworkTest(t, initTestMaster(t, "unreserve-plugin", nil), 2,
|
||||
testCtx := initTestSchedulerForFrameworkTest(t, initTestMaster(t, "unreserve-plugin", nil), 2,
|
||||
scheduler.WithFrameworkPlugins(plugins),
|
||||
scheduler.WithFrameworkOutOfTreeRegistry(registry))
|
||||
defer cleanupTest(t, context)
|
||||
defer cleanupTest(t, testCtx)
|
||||
|
||||
tests := []struct {
|
||||
preBindFail bool
|
||||
@@ -809,21 +809,21 @@ func TestUnreservePlugin(t *testing.T) {
|
||||
preBindPlugin.failPreBind = test.preBindFail
|
||||
|
||||
// Create a best effort pod.
|
||||
pod, err := createPausePod(context.clientSet,
|
||||
initPausePod(context.clientSet, &pausePodConfig{Name: "test-pod", Namespace: context.ns.Name}))
|
||||
pod, err := createPausePod(testCtx.clientSet,
|
||||
initPausePod(testCtx.clientSet, &pausePodConfig{Name: "test-pod", Namespace: testCtx.ns.Name}))
|
||||
if err != nil {
|
||||
t.Errorf("Error while creating a test pod: %v", err)
|
||||
}
|
||||
|
||||
if test.preBindFail {
|
||||
if err = wait.Poll(10*time.Millisecond, 30*time.Second, podSchedulingError(context.clientSet, pod.Namespace, pod.Name)); err != nil {
|
||||
if err = wait.Poll(10*time.Millisecond, 30*time.Second, podSchedulingError(testCtx.clientSet, pod.Namespace, pod.Name)); err != nil {
|
||||
t.Errorf("test #%v: Expected a scheduling error, but didn't get it. error: %v", i, err)
|
||||
}
|
||||
if unreservePlugin.numUnreserveCalled == 0 || unreservePlugin.numUnreserveCalled != preBindPlugin.numPreBindCalled {
|
||||
t.Errorf("test #%v: Expected the unreserve plugin to be called %d times, was called %d times.", i, preBindPlugin.numPreBindCalled, unreservePlugin.numUnreserveCalled)
|
||||
}
|
||||
} else {
|
||||
if err = waitForPodToSchedule(context.clientSet, pod); err != nil {
|
||||
if err = waitForPodToSchedule(testCtx.clientSet, pod); err != nil {
|
||||
t.Errorf("test #%v: Expected the pod to be scheduled. error: %v", i, err)
|
||||
}
|
||||
if unreservePlugin.numUnreserveCalled > 0 {
|
||||
@@ -833,7 +833,7 @@ func TestUnreservePlugin(t *testing.T) {
|
||||
|
||||
unreservePlugin.reset()
|
||||
preBindPlugin.reset()
|
||||
cleanupPods(context.clientSet, t, []*v1.Pod{pod})
|
||||
cleanupPods(testCtx.clientSet, t, []*v1.Pod{pod})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -879,13 +879,13 @@ func TestBindPlugin(t *testing.T) {
|
||||
}
|
||||
|
||||
// Create the master and the scheduler with the test plugin set.
|
||||
context := initTestSchedulerWithOptions(t, testContext, false, nil, time.Second,
|
||||
testCtx := initTestSchedulerWithOptions(t, testContext, false, nil, time.Second,
|
||||
scheduler.WithFrameworkPlugins(plugins),
|
||||
scheduler.WithFrameworkOutOfTreeRegistry(registry))
|
||||
defer cleanupTest(t, context)
|
||||
defer cleanupTest(t, testCtx)
|
||||
|
||||
// Add a few nodes.
|
||||
_, err := createNodes(context.clientSet, "test-node", nil, 2)
|
||||
_, err := createNodes(testCtx.clientSet, "test-node", nil, 2)
|
||||
if err != nil {
|
||||
t.Fatalf("Cannot create nodes: %v", err)
|
||||
}
|
||||
@@ -936,19 +936,19 @@ func TestBindPlugin(t *testing.T) {
|
||||
postBindPlugin.pluginInvokeEventChan = pluginInvokeEventChan
|
||||
|
||||
// Create a best effort pod.
|
||||
pod, err := createPausePod(context.clientSet,
|
||||
initPausePod(context.clientSet, &pausePodConfig{Name: "test-pod", Namespace: context.ns.Name}))
|
||||
pod, err := createPausePod(testCtx.clientSet,
|
||||
initPausePod(testCtx.clientSet, &pausePodConfig{Name: "test-pod", Namespace: testCtx.ns.Name}))
|
||||
if err != nil {
|
||||
t.Errorf("Error while creating a test pod: %v", err)
|
||||
}
|
||||
|
||||
if test.expectBoundByScheduler || test.expectBoundByPlugin {
|
||||
// bind plugins skipped to bind the pod
|
||||
if err = waitForPodToSchedule(context.clientSet, pod); err != nil {
|
||||
if err = waitForPodToSchedule(testCtx.clientSet, pod); err != nil {
|
||||
t.Errorf("test #%v: Expected the pod to be scheduled. error: %v", i, err)
|
||||
continue
|
||||
}
|
||||
pod, err = context.clientSet.CoreV1().Pods(pod.Namespace).Get(pod.Name, metav1.GetOptions{})
|
||||
pod, err = testCtx.clientSet.CoreV1().Pods(pod.Namespace).Get(pod.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
t.Errorf("can't get pod: %v", err)
|
||||
}
|
||||
@@ -981,7 +981,7 @@ func TestBindPlugin(t *testing.T) {
|
||||
}
|
||||
} else {
|
||||
// bind plugin fails to bind the pod
|
||||
if err = wait.Poll(10*time.Millisecond, 30*time.Second, podSchedulingError(context.clientSet, pod.Namespace, pod.Name)); err != nil {
|
||||
if err = wait.Poll(10*time.Millisecond, 30*time.Second, podSchedulingError(testCtx.clientSet, pod.Namespace, pod.Name)); err != nil {
|
||||
t.Errorf("test #%v: Expected a scheduling error, but didn't get it. error: %v", i, err)
|
||||
}
|
||||
if postBindPlugin.numPostBindCalled > 0 {
|
||||
@@ -1006,7 +1006,7 @@ func TestBindPlugin(t *testing.T) {
|
||||
bindPlugin1.reset()
|
||||
bindPlugin2.reset()
|
||||
unreservePlugin.reset()
|
||||
cleanupPods(context.clientSet, t, []*v1.Pod{pod})
|
||||
cleanupPods(testCtx.clientSet, t, []*v1.Pod{pod})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1039,10 +1039,10 @@ func TestPostBindPlugin(t *testing.T) {
|
||||
}
|
||||
|
||||
// Create the master and the scheduler with the test plugin set.
|
||||
context := initTestSchedulerForFrameworkTest(t, initTestMaster(t, "postbind-plugin", nil), 2,
|
||||
testCtx := initTestSchedulerForFrameworkTest(t, initTestMaster(t, "postbind-plugin", nil), 2,
|
||||
scheduler.WithFrameworkPlugins(plugins),
|
||||
scheduler.WithFrameworkOutOfTreeRegistry(registry))
|
||||
defer cleanupTest(t, context)
|
||||
defer cleanupTest(t, testCtx)
|
||||
|
||||
tests := []struct {
|
||||
preBindFail bool
|
||||
@@ -1060,21 +1060,21 @@ func TestPostBindPlugin(t *testing.T) {
|
||||
preBindPlugin.failPreBind = test.preBindFail
|
||||
|
||||
// Create a best effort pod.
|
||||
pod, err := createPausePod(context.clientSet,
|
||||
initPausePod(context.clientSet, &pausePodConfig{Name: "test-pod", Namespace: context.ns.Name}))
|
||||
pod, err := createPausePod(testCtx.clientSet,
|
||||
initPausePod(testCtx.clientSet, &pausePodConfig{Name: "test-pod", Namespace: testCtx.ns.Name}))
|
||||
if err != nil {
|
||||
t.Errorf("Error while creating a test pod: %v", err)
|
||||
}
|
||||
|
||||
if test.preBindFail {
|
||||
if err = wait.Poll(10*time.Millisecond, 30*time.Second, podSchedulingError(context.clientSet, pod.Namespace, pod.Name)); err != nil {
|
||||
if err = wait.Poll(10*time.Millisecond, 30*time.Second, podSchedulingError(testCtx.clientSet, pod.Namespace, pod.Name)); err != nil {
|
||||
t.Errorf("test #%v: Expected a scheduling error, but didn't get it. error: %v", i, err)
|
||||
}
|
||||
if postBindPlugin.numPostBindCalled > 0 {
|
||||
t.Errorf("test #%v: Didn't expected the postbind plugin to be called %d times.", i, postBindPlugin.numPostBindCalled)
|
||||
}
|
||||
} else {
|
||||
if err = waitForPodToSchedule(context.clientSet, pod); err != nil {
|
||||
if err = waitForPodToSchedule(testCtx.clientSet, pod); err != nil {
|
||||
t.Errorf("test #%v: Expected the pod to be scheduled. error: %v", i, err)
|
||||
}
|
||||
if postBindPlugin.numPostBindCalled == 0 {
|
||||
@@ -1084,7 +1084,7 @@ func TestPostBindPlugin(t *testing.T) {
|
||||
|
||||
postBindPlugin.reset()
|
||||
preBindPlugin.reset()
|
||||
cleanupPods(context.clientSet, t, []*v1.Pod{pod})
|
||||
cleanupPods(testCtx.clientSet, t, []*v1.Pod{pod})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1095,10 +1095,10 @@ func TestPermitPlugin(t *testing.T) {
|
||||
registry, plugins := initRegistryAndConfig(perPlugin)
|
||||
|
||||
// Create the master and the scheduler with the test plugin set.
|
||||
context := initTestSchedulerForFrameworkTest(t, initTestMaster(t, "permit-plugin", nil), 2,
|
||||
testCtx := initTestSchedulerForFrameworkTest(t, initTestMaster(t, "permit-plugin", nil), 2,
|
||||
scheduler.WithFrameworkPlugins(plugins),
|
||||
scheduler.WithFrameworkOutOfTreeRegistry(registry))
|
||||
defer cleanupTest(t, context)
|
||||
defer cleanupTest(t, testCtx)
|
||||
|
||||
tests := []struct {
|
||||
fail bool
|
||||
@@ -1145,22 +1145,22 @@ func TestPermitPlugin(t *testing.T) {
|
||||
perPlugin.waitAndAllowPermit = false
|
||||
|
||||
// Create a best effort pod.
|
||||
pod, err := createPausePod(context.clientSet,
|
||||
initPausePod(context.clientSet, &pausePodConfig{Name: "test-pod", Namespace: context.ns.Name}))
|
||||
pod, err := createPausePod(testCtx.clientSet,
|
||||
initPausePod(testCtx.clientSet, &pausePodConfig{Name: "test-pod", Namespace: testCtx.ns.Name}))
|
||||
if err != nil {
|
||||
t.Errorf("Error while creating a test pod: %v", err)
|
||||
}
|
||||
if test.fail {
|
||||
if err = wait.Poll(10*time.Millisecond, 30*time.Second, podSchedulingError(context.clientSet, pod.Namespace, pod.Name)); err != nil {
|
||||
if err = wait.Poll(10*time.Millisecond, 30*time.Second, podSchedulingError(testCtx.clientSet, pod.Namespace, pod.Name)); err != nil {
|
||||
t.Errorf("test #%v: Expected a scheduling error, but didn't get it. error: %v", i, err)
|
||||
}
|
||||
} else {
|
||||
if test.reject || test.timeout {
|
||||
if err = waitForPodUnschedulable(context.clientSet, pod); err != nil {
|
||||
if err = waitForPodUnschedulable(testCtx.clientSet, pod); err != nil {
|
||||
t.Errorf("test #%v: Didn't expect the pod to be scheduled. error: %v", i, err)
|
||||
}
|
||||
} else {
|
||||
if err = waitForPodToSchedule(context.clientSet, pod); err != nil {
|
||||
if err = waitForPodToSchedule(testCtx.clientSet, pod); err != nil {
|
||||
t.Errorf("test #%v: Expected the pod to be scheduled. error: %v", i, err)
|
||||
}
|
||||
}
|
||||
@@ -1171,7 +1171,7 @@ func TestPermitPlugin(t *testing.T) {
|
||||
}
|
||||
|
||||
perPlugin.reset()
|
||||
cleanupPods(context.clientSet, t, []*v1.Pod{pod})
|
||||
cleanupPods(testCtx.clientSet, t, []*v1.Pod{pod})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1183,10 +1183,10 @@ func TestMultiplePermitPlugins(t *testing.T) {
|
||||
registry, plugins := initRegistryAndConfig(perPlugin1, perPlugin2)
|
||||
|
||||
// Create the master and the scheduler with the test plugin set.
|
||||
context := initTestSchedulerForFrameworkTest(t, initTestMaster(t, "multi-permit-plugin", nil), 2,
|
||||
testCtx := initTestSchedulerForFrameworkTest(t, initTestMaster(t, "multi-permit-plugin", nil), 2,
|
||||
scheduler.WithFrameworkPlugins(plugins),
|
||||
scheduler.WithFrameworkOutOfTreeRegistry(registry))
|
||||
defer cleanupTest(t, context)
|
||||
defer cleanupTest(t, testCtx)
|
||||
|
||||
// Both permit plugins will return Wait for permitting
|
||||
perPlugin1.timeoutPermit = true
|
||||
@@ -1194,8 +1194,8 @@ func TestMultiplePermitPlugins(t *testing.T) {
|
||||
|
||||
// Create a test pod.
|
||||
podName := "test-pod"
|
||||
pod, err := createPausePod(context.clientSet,
|
||||
initPausePod(context.clientSet, &pausePodConfig{Name: podName, Namespace: context.ns.Name}))
|
||||
pod, err := createPausePod(testCtx.clientSet,
|
||||
initPausePod(testCtx.clientSet, &pausePodConfig{Name: podName, Namespace: testCtx.ns.Name}))
|
||||
if err != nil {
|
||||
t.Errorf("Error while creating a test pod: %v", err)
|
||||
}
|
||||
@@ -1219,7 +1219,7 @@ func TestMultiplePermitPlugins(t *testing.T) {
|
||||
}
|
||||
|
||||
perPlugin2.allowAllPods()
|
||||
if err = waitForPodToSchedule(context.clientSet, pod); err != nil {
|
||||
if err = waitForPodToSchedule(testCtx.clientSet, pod); err != nil {
|
||||
t.Errorf("Expected the pod to be scheduled. error: %v", err)
|
||||
}
|
||||
|
||||
@@ -1227,7 +1227,7 @@ func TestMultiplePermitPlugins(t *testing.T) {
|
||||
t.Errorf("Expected the permit plugin to be called.")
|
||||
}
|
||||
|
||||
cleanupPods(context.clientSet, t, []*v1.Pod{pod})
|
||||
cleanupPods(testCtx.clientSet, t, []*v1.Pod{pod})
|
||||
}
|
||||
|
||||
// TestPermitPluginsCancelled tests whether all permit plugins are cancelled when pod is rejected.
|
||||
@@ -1238,10 +1238,10 @@ func TestPermitPluginsCancelled(t *testing.T) {
|
||||
registry, plugins := initRegistryAndConfig(perPlugin1, perPlugin2)
|
||||
|
||||
// Create the master and the scheduler with the test plugin set.
|
||||
context := initTestSchedulerForFrameworkTest(t, initTestMaster(t, "permit-plugins", nil), 2,
|
||||
testCtx := initTestSchedulerForFrameworkTest(t, initTestMaster(t, "permit-plugins", nil), 2,
|
||||
scheduler.WithFrameworkPlugins(plugins),
|
||||
scheduler.WithFrameworkOutOfTreeRegistry(registry))
|
||||
defer cleanupTest(t, context)
|
||||
defer cleanupTest(t, testCtx)
|
||||
|
||||
// Both permit plugins will return Wait for permitting
|
||||
perPlugin1.timeoutPermit = true
|
||||
@@ -1249,8 +1249,8 @@ func TestPermitPluginsCancelled(t *testing.T) {
|
||||
|
||||
// Create a test pod.
|
||||
podName := "test-pod"
|
||||
pod, err := createPausePod(context.clientSet,
|
||||
initPausePod(context.clientSet, &pausePodConfig{Name: podName, Namespace: context.ns.Name}))
|
||||
pod, err := createPausePod(testCtx.clientSet,
|
||||
initPausePod(testCtx.clientSet, &pausePodConfig{Name: podName, Namespace: testCtx.ns.Name}))
|
||||
if err != nil {
|
||||
t.Errorf("Error while creating a test pod: %v", err)
|
||||
}
|
||||
@@ -1279,10 +1279,10 @@ func TestCoSchedulingWithPermitPlugin(t *testing.T) {
|
||||
registry, plugins := initRegistryAndConfig(permitPlugin)
|
||||
|
||||
// Create the master and the scheduler with the test plugin set.
|
||||
context := initTestSchedulerForFrameworkTest(t, initTestMaster(t, "permit-plugin", nil), 2,
|
||||
testCtx := initTestSchedulerForFrameworkTest(t, initTestMaster(t, "permit-plugin", nil), 2,
|
||||
scheduler.WithFrameworkPlugins(plugins),
|
||||
scheduler.WithFrameworkOutOfTreeRegistry(registry))
|
||||
defer cleanupTest(t, context)
|
||||
defer cleanupTest(t, testCtx)
|
||||
|
||||
tests := []struct {
|
||||
waitReject bool
|
||||
@@ -1306,29 +1306,29 @@ func TestCoSchedulingWithPermitPlugin(t *testing.T) {
|
||||
permitPlugin.waitAndAllowPermit = test.waitAllow
|
||||
|
||||
// Create two pods.
|
||||
waitingPod, err := createPausePod(context.clientSet,
|
||||
initPausePod(context.clientSet, &pausePodConfig{Name: "waiting-pod", Namespace: context.ns.Name}))
|
||||
waitingPod, err := createPausePod(testCtx.clientSet,
|
||||
initPausePod(testCtx.clientSet, &pausePodConfig{Name: "waiting-pod", Namespace: testCtx.ns.Name}))
|
||||
if err != nil {
|
||||
t.Errorf("Error while creating the waiting pod: %v", err)
|
||||
}
|
||||
signallingPod, err := createPausePod(context.clientSet,
|
||||
initPausePod(context.clientSet, &pausePodConfig{Name: "signalling-pod", Namespace: context.ns.Name}))
|
||||
signallingPod, err := createPausePod(testCtx.clientSet,
|
||||
initPausePod(testCtx.clientSet, &pausePodConfig{Name: "signalling-pod", Namespace: testCtx.ns.Name}))
|
||||
if err != nil {
|
||||
t.Errorf("Error while creating the signalling pod: %v", err)
|
||||
}
|
||||
|
||||
if test.waitReject {
|
||||
if err = waitForPodUnschedulable(context.clientSet, waitingPod); err != nil {
|
||||
if err = waitForPodUnschedulable(testCtx.clientSet, waitingPod); err != nil {
|
||||
t.Errorf("test #%v: Didn't expect the waiting pod to be scheduled. error: %v", i, err)
|
||||
}
|
||||
if err = waitForPodUnschedulable(context.clientSet, signallingPod); err != nil {
|
||||
if err = waitForPodUnschedulable(testCtx.clientSet, signallingPod); err != nil {
|
||||
t.Errorf("test #%v: Didn't expect the signalling pod to be scheduled. error: %v", i, err)
|
||||
}
|
||||
} else {
|
||||
if err = waitForPodToSchedule(context.clientSet, waitingPod); err != nil {
|
||||
if err = waitForPodToSchedule(testCtx.clientSet, waitingPod); err != nil {
|
||||
t.Errorf("test #%v: Expected the waiting pod to be scheduled. error: %v", i, err)
|
||||
}
|
||||
if err = waitForPodToSchedule(context.clientSet, signallingPod); err != nil {
|
||||
if err = waitForPodToSchedule(testCtx.clientSet, signallingPod); err != nil {
|
||||
t.Errorf("test #%v: Expected the signalling pod to be scheduled. error: %v", i, err)
|
||||
}
|
||||
}
|
||||
@@ -1338,7 +1338,7 @@ func TestCoSchedulingWithPermitPlugin(t *testing.T) {
|
||||
}
|
||||
|
||||
permitPlugin.reset()
|
||||
cleanupPods(context.clientSet, t, []*v1.Pod{waitingPod, signallingPod})
|
||||
cleanupPods(testCtx.clientSet, t, []*v1.Pod{waitingPod, signallingPod})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1360,26 +1360,26 @@ func TestFilterPlugin(t *testing.T) {
|
||||
}
|
||||
|
||||
// Create the master and the scheduler with the test plugin set.
|
||||
context := initTestSchedulerForFrameworkTest(t, initTestMaster(t, "filter-plugin", nil), 2,
|
||||
testCtx := initTestSchedulerForFrameworkTest(t, initTestMaster(t, "filter-plugin", nil), 2,
|
||||
scheduler.WithFrameworkPlugins(plugins),
|
||||
scheduler.WithFrameworkOutOfTreeRegistry(registry))
|
||||
defer cleanupTest(t, context)
|
||||
defer cleanupTest(t, testCtx)
|
||||
|
||||
for _, fail := range []bool{false, true} {
|
||||
filterPlugin.failFilter = fail
|
||||
// Create a best effort pod.
|
||||
pod, err := createPausePod(context.clientSet,
|
||||
initPausePod(context.clientSet, &pausePodConfig{Name: "test-pod", Namespace: context.ns.Name}))
|
||||
pod, err := createPausePod(testCtx.clientSet,
|
||||
initPausePod(testCtx.clientSet, &pausePodConfig{Name: "test-pod", Namespace: testCtx.ns.Name}))
|
||||
if err != nil {
|
||||
t.Errorf("Error while creating a test pod: %v", err)
|
||||
}
|
||||
|
||||
if fail {
|
||||
if err = wait.Poll(10*time.Millisecond, 30*time.Second, podUnschedulable(context.clientSet, pod.Namespace, pod.Name)); err != nil {
|
||||
if err = wait.Poll(10*time.Millisecond, 30*time.Second, podUnschedulable(testCtx.clientSet, pod.Namespace, pod.Name)); err != nil {
|
||||
t.Errorf("Didn't expect the pod to be scheduled.")
|
||||
}
|
||||
} else {
|
||||
if err = waitForPodToSchedule(context.clientSet, pod); err != nil {
|
||||
if err = waitForPodToSchedule(testCtx.clientSet, pod); err != nil {
|
||||
t.Errorf("Expected the pod to be scheduled. error: %v", err)
|
||||
}
|
||||
}
|
||||
@@ -1389,7 +1389,7 @@ func TestFilterPlugin(t *testing.T) {
|
||||
}
|
||||
|
||||
filterPlugin.reset()
|
||||
cleanupPods(context.clientSet, t, []*v1.Pod{pod})
|
||||
cleanupPods(testCtx.clientSet, t, []*v1.Pod{pod})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1411,26 +1411,26 @@ func TestPostFilterPlugin(t *testing.T) {
|
||||
}
|
||||
|
||||
// Create the master and the scheduler with the test plugin set.
|
||||
context := initTestSchedulerForFrameworkTest(t, initTestMaster(t, "post-filter-plugin", nil), 2,
|
||||
testCtx := initTestSchedulerForFrameworkTest(t, initTestMaster(t, "post-filter-plugin", nil), 2,
|
||||
scheduler.WithFrameworkPlugins(plugins),
|
||||
scheduler.WithFrameworkOutOfTreeRegistry(registry))
|
||||
defer cleanupTest(t, context)
|
||||
defer cleanupTest(t, testCtx)
|
||||
|
||||
for _, fail := range []bool{false, true} {
|
||||
postFilterPlugin.failPostFilter = fail
|
||||
// Create a best effort pod.
|
||||
pod, err := createPausePod(context.clientSet,
|
||||
initPausePod(context.clientSet, &pausePodConfig{Name: "test-pod", Namespace: context.ns.Name}))
|
||||
pod, err := createPausePod(testCtx.clientSet,
|
||||
initPausePod(testCtx.clientSet, &pausePodConfig{Name: "test-pod", Namespace: testCtx.ns.Name}))
|
||||
if err != nil {
|
||||
t.Errorf("Error while creating a test pod: %v", err)
|
||||
}
|
||||
|
||||
if fail {
|
||||
if err = waitForPodUnschedulable(context.clientSet, pod); err != nil {
|
||||
if err = waitForPodUnschedulable(testCtx.clientSet, pod); err != nil {
|
||||
t.Errorf("Didn't expect the pod to be scheduled. error: %v", err)
|
||||
}
|
||||
} else {
|
||||
if err = waitForPodToSchedule(context.clientSet, pod); err != nil {
|
||||
if err = waitForPodToSchedule(testCtx.clientSet, pod); err != nil {
|
||||
t.Errorf("Expected the pod to be scheduled. error: %v", err)
|
||||
}
|
||||
}
|
||||
@@ -1440,7 +1440,7 @@ func TestPostFilterPlugin(t *testing.T) {
|
||||
}
|
||||
|
||||
postFilterPlugin.reset()
|
||||
cleanupPods(context.clientSet, t, []*v1.Pod{pod})
|
||||
cleanupPods(testCtx.clientSet, t, []*v1.Pod{pod})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1451,10 +1451,10 @@ func TestPreemptWithPermitPlugin(t *testing.T) {
|
||||
registry, plugins := initRegistryAndConfig(permitPlugin)
|
||||
|
||||
// Create the master and the scheduler with the test plugin set.
|
||||
context := initTestSchedulerForFrameworkTest(t, initTestMaster(t, "preempt-with-permit-plugin", nil), 0,
|
||||
testCtx := initTestSchedulerForFrameworkTest(t, initTestMaster(t, "preempt-with-permit-plugin", nil), 0,
|
||||
scheduler.WithFrameworkPlugins(plugins),
|
||||
scheduler.WithFrameworkOutOfTreeRegistry(registry))
|
||||
defer cleanupTest(t, context)
|
||||
defer cleanupTest(t, testCtx)
|
||||
|
||||
// Add one node.
|
||||
nodeRes := &v1.ResourceList{
|
||||
@@ -1462,7 +1462,7 @@ func TestPreemptWithPermitPlugin(t *testing.T) {
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(500, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(500, resource.DecimalSI),
|
||||
}
|
||||
_, err := createNodes(context.clientSet, "test-node", nodeRes, 1)
|
||||
_, err := createNodes(testCtx.clientSet, "test-node", nodeRes, 1)
|
||||
if err != nil {
|
||||
t.Fatalf("Cannot create nodes: %v", err)
|
||||
}
|
||||
@@ -1481,9 +1481,9 @@ func TestPreemptWithPermitPlugin(t *testing.T) {
|
||||
}
|
||||
|
||||
// Create two pods.
|
||||
waitingPod := initPausePod(context.clientSet, &pausePodConfig{Name: "waiting-pod", Namespace: context.ns.Name, Priority: &lowPriority, Resources: &resourceRequest})
|
||||
waitingPod := initPausePod(testCtx.clientSet, &pausePodConfig{Name: "waiting-pod", Namespace: testCtx.ns.Name, Priority: &lowPriority, Resources: &resourceRequest})
|
||||
waitingPod.Spec.TerminationGracePeriodSeconds = new(int64)
|
||||
waitingPod, err = createPausePod(context.clientSet, waitingPod)
|
||||
waitingPod, err = createPausePod(testCtx.clientSet, waitingPod)
|
||||
if err != nil {
|
||||
t.Errorf("Error while creating the waiting pod: %v", err)
|
||||
}
|
||||
@@ -1494,17 +1494,17 @@ func TestPreemptWithPermitPlugin(t *testing.T) {
|
||||
return w, nil
|
||||
})
|
||||
|
||||
preemptorPod, err := createPausePod(context.clientSet,
|
||||
initPausePod(context.clientSet, &pausePodConfig{Name: "preemptor-pod", Namespace: context.ns.Name, Priority: &highPriority, Resources: &resourceRequest}))
|
||||
preemptorPod, err := createPausePod(testCtx.clientSet,
|
||||
initPausePod(testCtx.clientSet, &pausePodConfig{Name: "preemptor-pod", Namespace: testCtx.ns.Name, Priority: &highPriority, Resources: &resourceRequest}))
|
||||
if err != nil {
|
||||
t.Errorf("Error while creating the preemptor pod: %v", err)
|
||||
}
|
||||
|
||||
if err = waitForPodToSchedule(context.clientSet, preemptorPod); err != nil {
|
||||
if err = waitForPodToSchedule(testCtx.clientSet, preemptorPod); err != nil {
|
||||
t.Errorf("Expected the preemptor pod to be scheduled. error: %v", err)
|
||||
}
|
||||
|
||||
if _, err := getPod(context.clientSet, waitingPod.Name, waitingPod.Namespace); err == nil {
|
||||
if _, err := getPod(testCtx.clientSet, waitingPod.Name, waitingPod.Namespace); err == nil {
|
||||
t.Error("Expected the waiting pod to get preempted and deleted")
|
||||
}
|
||||
|
||||
@@ -1513,11 +1513,11 @@ func TestPreemptWithPermitPlugin(t *testing.T) {
|
||||
}
|
||||
|
||||
permitPlugin.reset()
|
||||
cleanupPods(context.clientSet, t, []*v1.Pod{waitingPod, preemptorPod})
|
||||
cleanupPods(testCtx.clientSet, t, []*v1.Pod{waitingPod, preemptorPod})
|
||||
}
|
||||
|
||||
func initTestSchedulerForFrameworkTest(t *testing.T, context *testContext, nodeCount int, opts ...scheduler.Option) *testContext {
|
||||
c := initTestSchedulerWithOptions(t, context, false, nil, time.Second, opts...)
|
||||
func initTestSchedulerForFrameworkTest(t *testing.T, testCtx *testContext, nodeCount int, opts ...scheduler.Option) *testContext {
|
||||
c := initTestSchedulerWithOptions(t, testCtx, false, nil, time.Second, opts...)
|
||||
if nodeCount > 0 {
|
||||
_, err := createNodes(c.clientSet, "test-node", nil, nodeCount)
|
||||
if err != nil {
|
||||
|
@@ -29,11 +29,11 @@ import (
|
||||
func TestNodeResourceLimits(t *testing.T) {
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ResourceLimitsPriorityFunction, true)()
|
||||
|
||||
context := initTest(t, "node-resource-limits")
|
||||
defer cleanupTest(t, context)
|
||||
testCtx := initTest(t, "node-resource-limits")
|
||||
defer cleanupTest(t, testCtx)
|
||||
|
||||
// Add one node
|
||||
expectedNode, err := createNode(context.clientSet, "test-node1", &v1.ResourceList{
|
||||
expectedNode, err := createNode(testCtx.clientSet, "test-node1", &v1.ResourceList{
|
||||
v1.ResourcePods: *resource.NewQuantity(32, resource.DecimalSI),
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(2000, resource.DecimalSI),
|
||||
@@ -43,7 +43,7 @@ func TestNodeResourceLimits(t *testing.T) {
|
||||
}
|
||||
|
||||
// Add another node with less resource
|
||||
_, err = createNode(context.clientSet, "test-node2", &v1.ResourceList{
|
||||
_, err = createNode(testCtx.clientSet, "test-node2", &v1.ResourceList{
|
||||
v1.ResourcePods: *resource.NewQuantity(32, resource.DecimalSI),
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(1000, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(1000, resource.DecimalSI),
|
||||
@@ -53,9 +53,9 @@ func TestNodeResourceLimits(t *testing.T) {
|
||||
}
|
||||
|
||||
podName := "pod-with-resource-limits"
|
||||
pod, err := runPausePod(context.clientSet, initPausePod(context.clientSet, &pausePodConfig{
|
||||
pod, err := runPausePod(testCtx.clientSet, initPausePod(testCtx.clientSet, &pausePodConfig{
|
||||
Name: podName,
|
||||
Namespace: context.ns.Name,
|
||||
Namespace: testCtx.ns.Name,
|
||||
Resources: &v1.ResourceRequirements{Requests: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(500, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(500, resource.DecimalSI)},
|
||||
|
@@ -40,10 +40,10 @@ const pollInterval = 100 * time.Millisecond
|
||||
// TestInterPodAffinity verifies that scheduler's inter pod affinity and
|
||||
// anti-affinity predicate functions works correctly.
|
||||
func TestInterPodAffinity(t *testing.T) {
|
||||
context := initTest(t, "inter-pod-affinity")
|
||||
defer cleanupTest(t, context)
|
||||
testCtx := initTest(t, "inter-pod-affinity")
|
||||
defer cleanupTest(t, testCtx)
|
||||
// Add a few nodes.
|
||||
nodes, err := createNodes(context.clientSet, "testnode", nil, 2)
|
||||
nodes, err := createNodes(testCtx.clientSet, "testnode", nil, 2)
|
||||
if err != nil {
|
||||
t.Fatalf("Cannot create nodes: %v", err)
|
||||
}
|
||||
@@ -53,15 +53,15 @@ func TestInterPodAffinity(t *testing.T) {
|
||||
"zone": "z11",
|
||||
}
|
||||
for _, node := range nodes {
|
||||
if err = testutils.AddLabelsToNode(context.clientSet, node.Name, labels1); err != nil {
|
||||
if err = testutils.AddLabelsToNode(testCtx.clientSet, node.Name, labels1); err != nil {
|
||||
t.Fatalf("Cannot add labels to node: %v", err)
|
||||
}
|
||||
if err = waitForNodeLabels(context.clientSet, node.Name, labels1); err != nil {
|
||||
if err = waitForNodeLabels(testCtx.clientSet, node.Name, labels1); err != nil {
|
||||
t.Fatalf("Adding labels to node didn't succeed: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
cs := context.clientSet
|
||||
cs := testCtx.clientSet
|
||||
podLabel := map[string]string{"service": "securityscan"}
|
||||
podLabel2 := map[string]string{"security": "S1"}
|
||||
|
||||
@@ -821,7 +821,7 @@ func TestInterPodAffinity(t *testing.T) {
|
||||
if pod.Namespace != "" {
|
||||
nsName = pod.Namespace
|
||||
} else {
|
||||
nsName = context.ns.Name
|
||||
nsName = testCtx.ns.Name
|
||||
}
|
||||
createdPod, err := cs.CoreV1().Pods(nsName).Create(pod)
|
||||
if err != nil {
|
||||
@@ -832,7 +832,7 @@ func TestInterPodAffinity(t *testing.T) {
|
||||
t.Errorf("Test Failed: error, %v, while waiting for pod during test, %v", err, test)
|
||||
}
|
||||
}
|
||||
testPod, err := cs.CoreV1().Pods(context.ns.Name).Create(test.pod)
|
||||
testPod, err := cs.CoreV1().Pods(testCtx.ns.Name).Create(test.pod)
|
||||
if err != nil {
|
||||
if !(test.errorType == "invalidPod" && apierrors.IsInvalid(err)) {
|
||||
t.Fatalf("Test Failed: error, %v, while creating pod during test: %v", err, test.test)
|
||||
@@ -848,11 +848,11 @@ func TestInterPodAffinity(t *testing.T) {
|
||||
t.Errorf("Test Failed: %v, err %v, test.fits %v", test.test, err, test.fits)
|
||||
}
|
||||
|
||||
err = cs.CoreV1().Pods(context.ns.Name).Delete(test.pod.Name, metav1.NewDeleteOptions(0))
|
||||
err = cs.CoreV1().Pods(testCtx.ns.Name).Delete(test.pod.Name, metav1.NewDeleteOptions(0))
|
||||
if err != nil {
|
||||
t.Errorf("Test Failed: error, %v, while deleting pod during test: %v", err, test.test)
|
||||
}
|
||||
err = wait.Poll(pollInterval, wait.ForeverTestTimeout, podDeleted(cs, context.ns.Name, test.pod.Name))
|
||||
err = wait.Poll(pollInterval, wait.ForeverTestTimeout, podDeleted(cs, testCtx.ns.Name, test.pod.Name))
|
||||
if err != nil {
|
||||
t.Errorf("Test Failed: error, %v, while waiting for pod to get deleted, %v", err, test.test)
|
||||
}
|
||||
@@ -861,7 +861,7 @@ func TestInterPodAffinity(t *testing.T) {
|
||||
if pod.Namespace != "" {
|
||||
nsName = pod.Namespace
|
||||
} else {
|
||||
nsName = context.ns.Name
|
||||
nsName = testCtx.ns.Name
|
||||
}
|
||||
err = cs.CoreV1().Pods(nsName).Delete(pod.Name, metav1.NewDeleteOptions(0))
|
||||
if err != nil {
|
||||
@@ -879,10 +879,10 @@ func TestInterPodAffinity(t *testing.T) {
|
||||
func TestEvenPodsSpreadPredicate(t *testing.T) {
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.EvenPodsSpread, true)()
|
||||
|
||||
context := initTest(t, "eps-predicate")
|
||||
cs := context.clientSet
|
||||
ns := context.ns.Name
|
||||
defer cleanupTest(t, context)
|
||||
testCtx := initTest(t, "eps-predicate")
|
||||
cs := testCtx.clientSet
|
||||
ns := testCtx.ns.Name
|
||||
defer cleanupTest(t, testCtx)
|
||||
// Add 4 nodes.
|
||||
nodes, err := createNodes(cs, "node", nil, 4)
|
||||
if err != nil {
|
||||
|
@@ -144,14 +144,14 @@ func TestPreemption(t *testing.T) {
|
||||
},
|
||||
},
|
||||
}
|
||||
context := initTestSchedulerWithOptions(t,
|
||||
testCtx := initTestSchedulerWithOptions(t,
|
||||
initTestMaster(t, "preemptiom", nil),
|
||||
false, nil, time.Second,
|
||||
scheduler.WithFrameworkPlugins(plugins),
|
||||
scheduler.WithFrameworkOutOfTreeRegistry(registry))
|
||||
|
||||
defer cleanupTest(t, context)
|
||||
cs := context.clientSet
|
||||
defer cleanupTest(t, testCtx)
|
||||
cs := testCtx.clientSet
|
||||
|
||||
defaultPodRes := &v1.ResourceRequirements{Requests: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(100, resource.DecimalSI),
|
||||
@@ -171,9 +171,9 @@ func TestPreemption(t *testing.T) {
|
||||
description: "basic pod preemption",
|
||||
initTokens: maxTokens,
|
||||
existingPods: []*v1.Pod{
|
||||
initPausePod(context.clientSet, &pausePodConfig{
|
||||
initPausePod(testCtx.clientSet, &pausePodConfig{
|
||||
Name: "victim-pod",
|
||||
Namespace: context.ns.Name,
|
||||
Namespace: testCtx.ns.Name,
|
||||
Priority: &lowPriority,
|
||||
Resources: &v1.ResourceRequirements{Requests: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(400, resource.DecimalSI),
|
||||
@@ -183,7 +183,7 @@ func TestPreemption(t *testing.T) {
|
||||
},
|
||||
pod: initPausePod(cs, &pausePodConfig{
|
||||
Name: "preemptor-pod",
|
||||
Namespace: context.ns.Name,
|
||||
Namespace: testCtx.ns.Name,
|
||||
Priority: &highPriority,
|
||||
Resources: &v1.ResourceRequirements{Requests: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(300, resource.DecimalSI),
|
||||
@@ -196,9 +196,9 @@ func TestPreemption(t *testing.T) {
|
||||
description: "basic pod preemption with filter",
|
||||
initTokens: 1,
|
||||
existingPods: []*v1.Pod{
|
||||
initPausePod(context.clientSet, &pausePodConfig{
|
||||
initPausePod(testCtx.clientSet, &pausePodConfig{
|
||||
Name: "victim-pod",
|
||||
Namespace: context.ns.Name,
|
||||
Namespace: testCtx.ns.Name,
|
||||
Priority: &lowPriority,
|
||||
Resources: &v1.ResourceRequirements{Requests: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(200, resource.DecimalSI),
|
||||
@@ -208,7 +208,7 @@ func TestPreemption(t *testing.T) {
|
||||
},
|
||||
pod: initPausePod(cs, &pausePodConfig{
|
||||
Name: "preemptor-pod",
|
||||
Namespace: context.ns.Name,
|
||||
Namespace: testCtx.ns.Name,
|
||||
Priority: &highPriority,
|
||||
Resources: &v1.ResourceRequirements{Requests: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(200, resource.DecimalSI),
|
||||
@@ -223,9 +223,9 @@ func TestPreemption(t *testing.T) {
|
||||
initTokens: 1,
|
||||
unresolvable: true,
|
||||
existingPods: []*v1.Pod{
|
||||
initPausePod(context.clientSet, &pausePodConfig{
|
||||
initPausePod(testCtx.clientSet, &pausePodConfig{
|
||||
Name: "victim-pod",
|
||||
Namespace: context.ns.Name,
|
||||
Namespace: testCtx.ns.Name,
|
||||
Priority: &lowPriority,
|
||||
Resources: &v1.ResourceRequirements{Requests: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(200, resource.DecimalSI),
|
||||
@@ -235,7 +235,7 @@ func TestPreemption(t *testing.T) {
|
||||
},
|
||||
pod: initPausePod(cs, &pausePodConfig{
|
||||
Name: "preemptor-pod",
|
||||
Namespace: context.ns.Name,
|
||||
Namespace: testCtx.ns.Name,
|
||||
Priority: &highPriority,
|
||||
Resources: &v1.ResourceRequirements{Requests: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(200, resource.DecimalSI),
|
||||
@@ -249,13 +249,13 @@ func TestPreemption(t *testing.T) {
|
||||
initTokens: maxTokens,
|
||||
existingPods: []*v1.Pod{
|
||||
initPausePod(cs, &pausePodConfig{
|
||||
Name: "pod-0", Namespace: context.ns.Name,
|
||||
Name: "pod-0", Namespace: testCtx.ns.Name,
|
||||
Priority: &mediumPriority,
|
||||
Labels: map[string]string{"pod": "p0"},
|
||||
Resources: defaultPodRes,
|
||||
}),
|
||||
initPausePod(cs, &pausePodConfig{
|
||||
Name: "pod-1", Namespace: context.ns.Name,
|
||||
Name: "pod-1", Namespace: testCtx.ns.Name,
|
||||
Priority: &lowPriority,
|
||||
Labels: map[string]string{"pod": "p1"},
|
||||
Resources: defaultPodRes,
|
||||
@@ -282,7 +282,7 @@ func TestPreemption(t *testing.T) {
|
||||
// A higher priority pod with anti-affinity.
|
||||
pod: initPausePod(cs, &pausePodConfig{
|
||||
Name: "preemptor-pod",
|
||||
Namespace: context.ns.Name,
|
||||
Namespace: testCtx.ns.Name,
|
||||
Priority: &highPriority,
|
||||
Labels: map[string]string{"pod": "preemptor"},
|
||||
Resources: defaultPodRes,
|
||||
@@ -313,13 +313,13 @@ func TestPreemption(t *testing.T) {
|
||||
initTokens: maxTokens,
|
||||
existingPods: []*v1.Pod{
|
||||
initPausePod(cs, &pausePodConfig{
|
||||
Name: "pod-0", Namespace: context.ns.Name,
|
||||
Name: "pod-0", Namespace: testCtx.ns.Name,
|
||||
Priority: &mediumPriority,
|
||||
Labels: map[string]string{"pod": "p0"},
|
||||
Resources: defaultPodRes,
|
||||
}),
|
||||
initPausePod(cs, &pausePodConfig{
|
||||
Name: "pod-1", Namespace: context.ns.Name,
|
||||
Name: "pod-1", Namespace: testCtx.ns.Name,
|
||||
Priority: &highPriority,
|
||||
Labels: map[string]string{"pod": "p1"},
|
||||
Resources: defaultPodRes,
|
||||
@@ -346,7 +346,7 @@ func TestPreemption(t *testing.T) {
|
||||
// A higher priority pod with anti-affinity.
|
||||
pod: initPausePod(cs, &pausePodConfig{
|
||||
Name: "preemptor-pod",
|
||||
Namespace: context.ns.Name,
|
||||
Namespace: testCtx.ns.Name,
|
||||
Priority: &highPriority,
|
||||
Labels: map[string]string{"pod": "preemptor"},
|
||||
Resources: defaultPodRes,
|
||||
@@ -379,15 +379,15 @@ func TestPreemption(t *testing.T) {
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(500, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(500, resource.DecimalSI),
|
||||
}
|
||||
node, err := createNode(context.clientSet, "node1", nodeRes)
|
||||
node, err := createNode(testCtx.clientSet, "node1", nodeRes)
|
||||
if err != nil {
|
||||
t.Fatalf("Error creating nodes: %v", err)
|
||||
}
|
||||
nodeLabels := map[string]string{"node": node.Name}
|
||||
if err = testutils.AddLabelsToNode(context.clientSet, node.Name, nodeLabels); err != nil {
|
||||
if err = testutils.AddLabelsToNode(testCtx.clientSet, node.Name, nodeLabels); err != nil {
|
||||
t.Fatalf("Cannot add labels to node: %v", err)
|
||||
}
|
||||
if err = waitForNodeLabels(context.clientSet, node.Name, nodeLabels); err != nil {
|
||||
if err = waitForNodeLabels(testCtx.clientSet, node.Name, nodeLabels); err != nil {
|
||||
t.Fatalf("Adding labels to node didn't succeed: %v", err)
|
||||
}
|
||||
|
||||
@@ -436,9 +436,9 @@ func TestPreemption(t *testing.T) {
|
||||
// TestDisablePreemption tests disable pod preemption of scheduler works as expected.
|
||||
func TestDisablePreemption(t *testing.T) {
|
||||
// Initialize scheduler, and disable preemption.
|
||||
context := initTestDisablePreemption(t, "disable-preemption")
|
||||
defer cleanupTest(t, context)
|
||||
cs := context.clientSet
|
||||
testCtx := initTestDisablePreemption(t, "disable-preemption")
|
||||
defer cleanupTest(t, testCtx)
|
||||
cs := testCtx.clientSet
|
||||
|
||||
tests := []struct {
|
||||
description string
|
||||
@@ -448,9 +448,9 @@ func TestDisablePreemption(t *testing.T) {
|
||||
{
|
||||
description: "pod preemption will not happen",
|
||||
existingPods: []*v1.Pod{
|
||||
initPausePod(context.clientSet, &pausePodConfig{
|
||||
initPausePod(testCtx.clientSet, &pausePodConfig{
|
||||
Name: "victim-pod",
|
||||
Namespace: context.ns.Name,
|
||||
Namespace: testCtx.ns.Name,
|
||||
Priority: &lowPriority,
|
||||
Resources: &v1.ResourceRequirements{Requests: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(400, resource.DecimalSI),
|
||||
@@ -460,7 +460,7 @@ func TestDisablePreemption(t *testing.T) {
|
||||
},
|
||||
pod: initPausePod(cs, &pausePodConfig{
|
||||
Name: "preemptor-pod",
|
||||
Namespace: context.ns.Name,
|
||||
Namespace: testCtx.ns.Name,
|
||||
Priority: &highPriority,
|
||||
Resources: &v1.ResourceRequirements{Requests: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(300, resource.DecimalSI),
|
||||
@@ -476,7 +476,7 @@ func TestDisablePreemption(t *testing.T) {
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(500, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(500, resource.DecimalSI),
|
||||
}
|
||||
_, err := createNode(context.clientSet, "node1", nodeRes)
|
||||
_, err := createNode(testCtx.clientSet, "node1", nodeRes)
|
||||
if err != nil {
|
||||
t.Fatalf("Error creating nodes: %v", err)
|
||||
}
|
||||
@@ -516,20 +516,20 @@ func TestDisablePreemption(t *testing.T) {
|
||||
// This test verifies that system critical priorities are created automatically and resolved properly.
|
||||
func TestPodPriorityResolution(t *testing.T) {
|
||||
admission := priority.NewPlugin()
|
||||
context := initTestScheduler(t, initTestMaster(t, "preemption", admission), true, nil)
|
||||
defer cleanupTest(t, context)
|
||||
cs := context.clientSet
|
||||
testCtx := initTestScheduler(t, initTestMaster(t, "preemption", admission), true, nil)
|
||||
defer cleanupTest(t, testCtx)
|
||||
cs := testCtx.clientSet
|
||||
|
||||
// Build clientset and informers for controllers.
|
||||
externalClientset := kubernetes.NewForConfigOrDie(&restclient.Config{
|
||||
QPS: -1,
|
||||
Host: context.httpServer.URL,
|
||||
Host: testCtx.httpServer.URL,
|
||||
ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}})
|
||||
externalInformers := informers.NewSharedInformerFactory(externalClientset, time.Second)
|
||||
admission.SetExternalKubeClientSet(externalClientset)
|
||||
admission.SetExternalKubeInformerFactory(externalInformers)
|
||||
externalInformers.Start(context.ctx.Done())
|
||||
externalInformers.WaitForCacheSync(context.ctx.Done())
|
||||
externalInformers.Start(testCtx.ctx.Done())
|
||||
externalInformers.WaitForCacheSync(testCtx.ctx.Done())
|
||||
|
||||
tests := []struct {
|
||||
Name string
|
||||
@@ -577,7 +577,7 @@ func TestPodPriorityResolution(t *testing.T) {
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(500, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(500, resource.DecimalSI),
|
||||
}
|
||||
_, err := createNode(context.clientSet, "node1", nodeRes)
|
||||
_, err := createNode(testCtx.clientSet, "node1", nodeRes)
|
||||
if err != nil {
|
||||
t.Fatalf("Error creating nodes: %v", err)
|
||||
}
|
||||
@@ -633,9 +633,9 @@ func mkPriorityPodWithGrace(tc *testContext, name string, priority int32, grace
|
||||
// after preemption and while the higher priority pods is not scheduled yet.
|
||||
func TestPreemptionStarvation(t *testing.T) {
|
||||
// Initialize scheduler.
|
||||
context := initTest(t, "preemption")
|
||||
defer cleanupTest(t, context)
|
||||
cs := context.clientSet
|
||||
testCtx := initTest(t, "preemption")
|
||||
defer cleanupTest(t, testCtx)
|
||||
cs := testCtx.clientSet
|
||||
|
||||
tests := []struct {
|
||||
description string
|
||||
@@ -652,7 +652,7 @@ func TestPreemptionStarvation(t *testing.T) {
|
||||
numExpectedPending: 5,
|
||||
preemptor: initPausePod(cs, &pausePodConfig{
|
||||
Name: "preemptor-pod",
|
||||
Namespace: context.ns.Name,
|
||||
Namespace: testCtx.ns.Name,
|
||||
Priority: &highPriority,
|
||||
Resources: &v1.ResourceRequirements{Requests: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(300, resource.DecimalSI),
|
||||
@@ -668,7 +668,7 @@ func TestPreemptionStarvation(t *testing.T) {
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(500, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(500, resource.DecimalSI),
|
||||
}
|
||||
_, err := createNode(context.clientSet, "node1", nodeRes)
|
||||
_, err := createNode(testCtx.clientSet, "node1", nodeRes)
|
||||
if err != nil {
|
||||
t.Fatalf("Error creating nodes: %v", err)
|
||||
}
|
||||
@@ -679,7 +679,7 @@ func TestPreemptionStarvation(t *testing.T) {
|
||||
runningPods := make([]*v1.Pod, numRunningPods)
|
||||
// Create and run existingPods.
|
||||
for i := 0; i < numRunningPods; i++ {
|
||||
runningPods[i], err = createPausePod(cs, mkPriorityPodWithGrace(context, fmt.Sprintf("rpod-%v", i), mediumPriority, 0))
|
||||
runningPods[i], err = createPausePod(cs, mkPriorityPodWithGrace(testCtx, fmt.Sprintf("rpod-%v", i), mediumPriority, 0))
|
||||
if err != nil {
|
||||
t.Fatalf("Test [%v]: Error creating pause pod: %v", test.description, err)
|
||||
}
|
||||
@@ -692,7 +692,7 @@ func TestPreemptionStarvation(t *testing.T) {
|
||||
}
|
||||
// Create pending pods.
|
||||
for i := 0; i < test.numExpectedPending; i++ {
|
||||
pendingPods[i], err = createPausePod(cs, mkPriorityPodWithGrace(context, fmt.Sprintf("ppod-%v", i), mediumPriority, 0))
|
||||
pendingPods[i], err = createPausePod(cs, mkPriorityPodWithGrace(testCtx, fmt.Sprintf("ppod-%v", i), mediumPriority, 0))
|
||||
if err != nil {
|
||||
t.Fatalf("Test [%v]: Error creating pending pod: %v", test.description, err)
|
||||
}
|
||||
@@ -730,9 +730,9 @@ func TestPreemptionStarvation(t *testing.T) {
|
||||
// race with the preemption process.
|
||||
func TestPreemptionRaces(t *testing.T) {
|
||||
// Initialize scheduler.
|
||||
context := initTest(t, "preemption-race")
|
||||
defer cleanupTest(t, context)
|
||||
cs := context.clientSet
|
||||
testCtx := initTest(t, "preemption-race")
|
||||
defer cleanupTest(t, testCtx)
|
||||
cs := testCtx.clientSet
|
||||
|
||||
tests := []struct {
|
||||
description string
|
||||
@@ -751,7 +751,7 @@ func TestPreemptionRaces(t *testing.T) {
|
||||
numRepetitions: 10,
|
||||
preemptor: initPausePod(cs, &pausePodConfig{
|
||||
Name: "preemptor-pod",
|
||||
Namespace: context.ns.Name,
|
||||
Namespace: testCtx.ns.Name,
|
||||
Priority: &highPriority,
|
||||
Resources: &v1.ResourceRequirements{Requests: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(4900, resource.DecimalSI),
|
||||
@@ -767,7 +767,7 @@ func TestPreemptionRaces(t *testing.T) {
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(5000, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(5000, resource.DecimalSI),
|
||||
}
|
||||
_, err := createNode(context.clientSet, "node1", nodeRes)
|
||||
_, err := createNode(testCtx.clientSet, "node1", nodeRes)
|
||||
if err != nil {
|
||||
t.Fatalf("Error creating nodes: %v", err)
|
||||
}
|
||||
@@ -781,7 +781,7 @@ func TestPreemptionRaces(t *testing.T) {
|
||||
additionalPods := make([]*v1.Pod, test.numAdditionalPods)
|
||||
// Create and run existingPods.
|
||||
for i := 0; i < test.numInitialPods; i++ {
|
||||
initialPods[i], err = createPausePod(cs, mkPriorityPodWithGrace(context, fmt.Sprintf("rpod-%v", i), mediumPriority, 0))
|
||||
initialPods[i], err = createPausePod(cs, mkPriorityPodWithGrace(testCtx, fmt.Sprintf("rpod-%v", i), mediumPriority, 0))
|
||||
if err != nil {
|
||||
t.Fatalf("Test [%v]: Error creating pause pod: %v", test.description, err)
|
||||
}
|
||||
@@ -801,7 +801,7 @@ func TestPreemptionRaces(t *testing.T) {
|
||||
|
||||
klog.Info("Creating additional pods...")
|
||||
for i := 0; i < test.numAdditionalPods; i++ {
|
||||
additionalPods[i], err = createPausePod(cs, mkPriorityPodWithGrace(context, fmt.Sprintf("ppod-%v", i), mediumPriority, 0))
|
||||
additionalPods[i], err = createPausePod(cs, mkPriorityPodWithGrace(testCtx, fmt.Sprintf("ppod-%v", i), mediumPriority, 0))
|
||||
if err != nil {
|
||||
t.Fatalf("Test [%v]: Error creating pending pod: %v", test.description, err)
|
||||
}
|
||||
@@ -851,12 +851,12 @@ func TestPreemptionRaces(t *testing.T) {
|
||||
// node name of the medium priority pod is cleared.
|
||||
func TestNominatedNodeCleanUp(t *testing.T) {
|
||||
// Initialize scheduler.
|
||||
context := initTest(t, "preemption")
|
||||
defer cleanupTest(t, context)
|
||||
testCtx := initTest(t, "preemption")
|
||||
defer cleanupTest(t, testCtx)
|
||||
|
||||
cs := context.clientSet
|
||||
cs := testCtx.clientSet
|
||||
|
||||
defer cleanupPodsInNamespace(cs, t, context.ns.Name)
|
||||
defer cleanupPodsInNamespace(cs, t, testCtx.ns.Name)
|
||||
|
||||
// Create a node with some resources and a label.
|
||||
nodeRes := &v1.ResourceList{
|
||||
@@ -864,7 +864,7 @@ func TestNominatedNodeCleanUp(t *testing.T) {
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(500, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(500, resource.DecimalSI),
|
||||
}
|
||||
_, err := createNode(context.clientSet, "node1", nodeRes)
|
||||
_, err := createNode(testCtx.clientSet, "node1", nodeRes)
|
||||
if err != nil {
|
||||
t.Fatalf("Error creating nodes: %v", err)
|
||||
}
|
||||
@@ -872,7 +872,7 @@ func TestNominatedNodeCleanUp(t *testing.T) {
|
||||
// Step 1. Create a few low priority pods.
|
||||
lowPriPods := make([]*v1.Pod, 4)
|
||||
for i := 0; i < len(lowPriPods); i++ {
|
||||
lowPriPods[i], err = createPausePod(cs, mkPriorityPodWithGrace(context, fmt.Sprintf("lpod-%v", i), lowPriority, 60))
|
||||
lowPriPods[i], err = createPausePod(cs, mkPriorityPodWithGrace(testCtx, fmt.Sprintf("lpod-%v", i), lowPriority, 60))
|
||||
if err != nil {
|
||||
t.Fatalf("Error creating pause pod: %v", err)
|
||||
}
|
||||
@@ -886,7 +886,7 @@ func TestNominatedNodeCleanUp(t *testing.T) {
|
||||
// Step 2. Create a medium priority pod.
|
||||
podConf := initPausePod(cs, &pausePodConfig{
|
||||
Name: "medium-priority",
|
||||
Namespace: context.ns.Name,
|
||||
Namespace: testCtx.ns.Name,
|
||||
Priority: &mediumPriority,
|
||||
Resources: &v1.ResourceRequirements{Requests: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(400, resource.DecimalSI),
|
||||
@@ -904,7 +904,7 @@ func TestNominatedNodeCleanUp(t *testing.T) {
|
||||
// Step 4. Create a high priority pod.
|
||||
podConf = initPausePod(cs, &pausePodConfig{
|
||||
Name: "high-priority",
|
||||
Namespace: context.ns.Name,
|
||||
Namespace: testCtx.ns.Name,
|
||||
Priority: &highPriority,
|
||||
Resources: &v1.ResourceRequirements{Requests: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(300, resource.DecimalSI),
|
||||
@@ -963,11 +963,11 @@ func addPodConditionReady(pod *v1.Pod) {
|
||||
// TestPDBInPreemption tests PodDisruptionBudget support in preemption.
|
||||
func TestPDBInPreemption(t *testing.T) {
|
||||
// Initialize scheduler.
|
||||
context := initTest(t, "preemption-pdb")
|
||||
defer cleanupTest(t, context)
|
||||
cs := context.clientSet
|
||||
testCtx := initTest(t, "preemption-pdb")
|
||||
defer cleanupTest(t, testCtx)
|
||||
cs := testCtx.clientSet
|
||||
|
||||
initDisruptionController(t, context)
|
||||
initDisruptionController(t, testCtx)
|
||||
|
||||
defaultPodRes := &v1.ResourceRequirements{Requests: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(100, resource.DecimalSI),
|
||||
@@ -997,34 +997,34 @@ func TestPDBInPreemption(t *testing.T) {
|
||||
description: "A non-PDB violating pod is preempted despite its higher priority",
|
||||
nodes: []*nodeConfig{{name: "node-1", res: defaultNodeRes}},
|
||||
pdbs: []*policy.PodDisruptionBudget{
|
||||
mkMinAvailablePDB("pdb-1", context.ns.Name, types.UID("pdb-1-uid"), 2, map[string]string{"foo": "bar"}),
|
||||
mkMinAvailablePDB("pdb-1", testCtx.ns.Name, types.UID("pdb-1-uid"), 2, map[string]string{"foo": "bar"}),
|
||||
},
|
||||
pdbPodNum: []int32{2},
|
||||
existingPods: []*v1.Pod{
|
||||
initPausePod(context.clientSet, &pausePodConfig{
|
||||
initPausePod(testCtx.clientSet, &pausePodConfig{
|
||||
Name: "low-pod1",
|
||||
Namespace: context.ns.Name,
|
||||
Namespace: testCtx.ns.Name,
|
||||
Priority: &lowPriority,
|
||||
Resources: defaultPodRes,
|
||||
Labels: map[string]string{"foo": "bar"},
|
||||
}),
|
||||
initPausePod(context.clientSet, &pausePodConfig{
|
||||
initPausePod(testCtx.clientSet, &pausePodConfig{
|
||||
Name: "low-pod2",
|
||||
Namespace: context.ns.Name,
|
||||
Namespace: testCtx.ns.Name,
|
||||
Priority: &lowPriority,
|
||||
Resources: defaultPodRes,
|
||||
Labels: map[string]string{"foo": "bar"},
|
||||
}),
|
||||
initPausePod(context.clientSet, &pausePodConfig{
|
||||
initPausePod(testCtx.clientSet, &pausePodConfig{
|
||||
Name: "mid-pod3",
|
||||
Namespace: context.ns.Name,
|
||||
Namespace: testCtx.ns.Name,
|
||||
Priority: &mediumPriority,
|
||||
Resources: defaultPodRes,
|
||||
}),
|
||||
},
|
||||
pod: initPausePod(cs, &pausePodConfig{
|
||||
Name: "preemptor-pod",
|
||||
Namespace: context.ns.Name,
|
||||
Namespace: testCtx.ns.Name,
|
||||
Priority: &highPriority,
|
||||
Resources: &v1.ResourceRequirements{Requests: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(300, resource.DecimalSI),
|
||||
@@ -1040,21 +1040,21 @@ func TestPDBInPreemption(t *testing.T) {
|
||||
{name: "node-2", res: defaultNodeRes},
|
||||
},
|
||||
pdbs: []*policy.PodDisruptionBudget{
|
||||
mkMinAvailablePDB("pdb-1", context.ns.Name, types.UID("pdb-1-uid"), 2, map[string]string{"foo": "bar"}),
|
||||
mkMinAvailablePDB("pdb-1", testCtx.ns.Name, types.UID("pdb-1-uid"), 2, map[string]string{"foo": "bar"}),
|
||||
},
|
||||
pdbPodNum: []int32{1},
|
||||
existingPods: []*v1.Pod{
|
||||
initPausePod(context.clientSet, &pausePodConfig{
|
||||
initPausePod(testCtx.clientSet, &pausePodConfig{
|
||||
Name: "low-pod1",
|
||||
Namespace: context.ns.Name,
|
||||
Namespace: testCtx.ns.Name,
|
||||
Priority: &lowPriority,
|
||||
Resources: defaultPodRes,
|
||||
NodeName: "node-1",
|
||||
Labels: map[string]string{"foo": "bar"},
|
||||
}),
|
||||
initPausePod(context.clientSet, &pausePodConfig{
|
||||
initPausePod(testCtx.clientSet, &pausePodConfig{
|
||||
Name: "mid-pod2",
|
||||
Namespace: context.ns.Name,
|
||||
Namespace: testCtx.ns.Name,
|
||||
Priority: &mediumPriority,
|
||||
NodeName: "node-2",
|
||||
Resources: defaultPodRes,
|
||||
@@ -1062,7 +1062,7 @@ func TestPDBInPreemption(t *testing.T) {
|
||||
},
|
||||
pod: initPausePod(cs, &pausePodConfig{
|
||||
Name: "preemptor-pod",
|
||||
Namespace: context.ns.Name,
|
||||
Namespace: testCtx.ns.Name,
|
||||
Priority: &highPriority,
|
||||
Resources: &v1.ResourceRequirements{Requests: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(500, resource.DecimalSI),
|
||||
@@ -1079,61 +1079,61 @@ func TestPDBInPreemption(t *testing.T) {
|
||||
{name: "node-3", res: defaultNodeRes},
|
||||
},
|
||||
pdbs: []*policy.PodDisruptionBudget{
|
||||
mkMinAvailablePDB("pdb-1", context.ns.Name, types.UID("pdb-1-uid"), 2, map[string]string{"foo1": "bar"}),
|
||||
mkMinAvailablePDB("pdb-2", context.ns.Name, types.UID("pdb-2-uid"), 2, map[string]string{"foo2": "bar"}),
|
||||
mkMinAvailablePDB("pdb-1", testCtx.ns.Name, types.UID("pdb-1-uid"), 2, map[string]string{"foo1": "bar"}),
|
||||
mkMinAvailablePDB("pdb-2", testCtx.ns.Name, types.UID("pdb-2-uid"), 2, map[string]string{"foo2": "bar"}),
|
||||
},
|
||||
pdbPodNum: []int32{1, 5},
|
||||
existingPods: []*v1.Pod{
|
||||
initPausePod(context.clientSet, &pausePodConfig{
|
||||
initPausePod(testCtx.clientSet, &pausePodConfig{
|
||||
Name: "low-pod1",
|
||||
Namespace: context.ns.Name,
|
||||
Namespace: testCtx.ns.Name,
|
||||
Priority: &lowPriority,
|
||||
Resources: defaultPodRes,
|
||||
NodeName: "node-1",
|
||||
Labels: map[string]string{"foo1": "bar"},
|
||||
}),
|
||||
initPausePod(context.clientSet, &pausePodConfig{
|
||||
initPausePod(testCtx.clientSet, &pausePodConfig{
|
||||
Name: "mid-pod1",
|
||||
Namespace: context.ns.Name,
|
||||
Namespace: testCtx.ns.Name,
|
||||
Priority: &mediumPriority,
|
||||
Resources: defaultPodRes,
|
||||
NodeName: "node-1",
|
||||
}),
|
||||
initPausePod(context.clientSet, &pausePodConfig{
|
||||
initPausePod(testCtx.clientSet, &pausePodConfig{
|
||||
Name: "low-pod2",
|
||||
Namespace: context.ns.Name,
|
||||
Namespace: testCtx.ns.Name,
|
||||
Priority: &lowPriority,
|
||||
Resources: defaultPodRes,
|
||||
NodeName: "node-2",
|
||||
Labels: map[string]string{"foo2": "bar"},
|
||||
}),
|
||||
initPausePod(context.clientSet, &pausePodConfig{
|
||||
initPausePod(testCtx.clientSet, &pausePodConfig{
|
||||
Name: "mid-pod2",
|
||||
Namespace: context.ns.Name,
|
||||
Namespace: testCtx.ns.Name,
|
||||
Priority: &mediumPriority,
|
||||
Resources: defaultPodRes,
|
||||
NodeName: "node-2",
|
||||
Labels: map[string]string{"foo2": "bar"},
|
||||
}),
|
||||
initPausePod(context.clientSet, &pausePodConfig{
|
||||
initPausePod(testCtx.clientSet, &pausePodConfig{
|
||||
Name: "low-pod4",
|
||||
Namespace: context.ns.Name,
|
||||
Namespace: testCtx.ns.Name,
|
||||
Priority: &lowPriority,
|
||||
Resources: defaultPodRes,
|
||||
NodeName: "node-3",
|
||||
Labels: map[string]string{"foo2": "bar"},
|
||||
}),
|
||||
initPausePod(context.clientSet, &pausePodConfig{
|
||||
initPausePod(testCtx.clientSet, &pausePodConfig{
|
||||
Name: "low-pod5",
|
||||
Namespace: context.ns.Name,
|
||||
Namespace: testCtx.ns.Name,
|
||||
Priority: &lowPriority,
|
||||
Resources: defaultPodRes,
|
||||
NodeName: "node-3",
|
||||
Labels: map[string]string{"foo2": "bar"},
|
||||
}),
|
||||
initPausePod(context.clientSet, &pausePodConfig{
|
||||
initPausePod(testCtx.clientSet, &pausePodConfig{
|
||||
Name: "low-pod6",
|
||||
Namespace: context.ns.Name,
|
||||
Namespace: testCtx.ns.Name,
|
||||
Priority: &lowPriority,
|
||||
Resources: defaultPodRes,
|
||||
NodeName: "node-3",
|
||||
@@ -1142,7 +1142,7 @@ func TestPDBInPreemption(t *testing.T) {
|
||||
},
|
||||
pod: initPausePod(cs, &pausePodConfig{
|
||||
Name: "preemptor-pod",
|
||||
Namespace: context.ns.Name,
|
||||
Namespace: testCtx.ns.Name,
|
||||
Priority: &highPriority,
|
||||
Resources: &v1.ResourceRequirements{Requests: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(500, resource.DecimalSI),
|
||||
@@ -1172,24 +1172,24 @@ func TestPDBInPreemption(t *testing.T) {
|
||||
}
|
||||
// Add pod condition ready so that PDB is updated.
|
||||
addPodConditionReady(p)
|
||||
if _, err := context.clientSet.CoreV1().Pods(context.ns.Name).UpdateStatus(p); err != nil {
|
||||
if _, err := testCtx.clientSet.CoreV1().Pods(testCtx.ns.Name).UpdateStatus(p); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
// Wait for Pods to be stable in scheduler cache.
|
||||
if err := waitCachedPodsStable(context, test.existingPods); err != nil {
|
||||
if err := waitCachedPodsStable(testCtx, test.existingPods); err != nil {
|
||||
t.Fatalf("Not all pods are stable in the cache: %v", err)
|
||||
}
|
||||
|
||||
// Create PDBs.
|
||||
for _, pdb := range test.pdbs {
|
||||
_, err := context.clientSet.PolicyV1beta1().PodDisruptionBudgets(context.ns.Name).Create(pdb)
|
||||
_, err := testCtx.clientSet.PolicyV1beta1().PodDisruptionBudgets(testCtx.ns.Name).Create(pdb)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create PDB: %v", err)
|
||||
}
|
||||
}
|
||||
// Wait for PDBs to become stable.
|
||||
if err := waitForPDBsStable(context, test.pdbs, test.pdbPodNum); err != nil {
|
||||
if err := waitForPDBsStable(testCtx, test.pdbs, test.pdbPodNum); err != nil {
|
||||
t.Fatalf("Not all pdbs are stable in the cache: %v", err)
|
||||
}
|
||||
|
||||
@@ -1220,7 +1220,7 @@ func TestPDBInPreemption(t *testing.T) {
|
||||
// Cleanup
|
||||
pods = append(pods, preemptor)
|
||||
cleanupPods(cs, t, pods)
|
||||
cs.PolicyV1beta1().PodDisruptionBudgets(context.ns.Name).DeleteCollection(nil, metav1.ListOptions{})
|
||||
cs.PolicyV1beta1().PodDisruptionBudgets(testCtx.ns.Name).DeleteCollection(nil, metav1.ListOptions{})
|
||||
cs.CoreV1().Nodes().DeleteCollection(nil, metav1.ListOptions{})
|
||||
}
|
||||
}
|
||||
|
@@ -38,10 +38,10 @@ import (
|
||||
// TestNodeAffinity verifies that scheduler's node affinity priority function
|
||||
// works correctly.
|
||||
func TestNodeAffinity(t *testing.T) {
|
||||
context := initTest(t, "node-affinity")
|
||||
defer cleanupTest(t, context)
|
||||
testCtx := initTest(t, "node-affinity")
|
||||
defer cleanupTest(t, testCtx)
|
||||
// Add a few nodes.
|
||||
nodes, err := createNodes(context.clientSet, "testnode", nil, 5)
|
||||
nodes, err := createNodes(testCtx.clientSet, "testnode", nil, 5)
|
||||
if err != nil {
|
||||
t.Fatalf("Cannot create nodes: %v", err)
|
||||
}
|
||||
@@ -52,17 +52,17 @@ func TestNodeAffinity(t *testing.T) {
|
||||
labels := map[string]string{
|
||||
labelKey: labelValue,
|
||||
}
|
||||
if err = testutils.AddLabelsToNode(context.clientSet, labeledNode.Name, labels); err != nil {
|
||||
if err = testutils.AddLabelsToNode(testCtx.clientSet, labeledNode.Name, labels); err != nil {
|
||||
t.Fatalf("Cannot add labels to node: %v", err)
|
||||
}
|
||||
if err = waitForNodeLabels(context.clientSet, labeledNode.Name, labels); err != nil {
|
||||
if err = waitForNodeLabels(testCtx.clientSet, labeledNode.Name, labels); err != nil {
|
||||
t.Fatalf("Adding labels to node didn't succeed: %v", err)
|
||||
}
|
||||
// Create a pod with node affinity.
|
||||
podName := "pod-with-node-affinity"
|
||||
pod, err := runPausePod(context.clientSet, initPausePod(context.clientSet, &pausePodConfig{
|
||||
pod, err := runPausePod(testCtx.clientSet, initPausePod(testCtx.clientSet, &pausePodConfig{
|
||||
Name: podName,
|
||||
Namespace: context.ns.Name,
|
||||
Namespace: testCtx.ns.Name,
|
||||
Affinity: &v1.Affinity{
|
||||
NodeAffinity: &v1.NodeAffinity{
|
||||
PreferredDuringSchedulingIgnoredDuringExecution: []v1.PreferredSchedulingTerm{
|
||||
@@ -95,10 +95,10 @@ func TestNodeAffinity(t *testing.T) {
|
||||
// TestPodAffinity verifies that scheduler's pod affinity priority function
|
||||
// works correctly.
|
||||
func TestPodAffinity(t *testing.T) {
|
||||
context := initTest(t, "pod-affinity")
|
||||
defer cleanupTest(t, context)
|
||||
testCtx := initTest(t, "pod-affinity")
|
||||
defer cleanupTest(t, testCtx)
|
||||
// Add a few nodes.
|
||||
nodesInTopology, err := createNodes(context.clientSet, "in-topology", nil, 5)
|
||||
nodesInTopology, err := createNodes(testCtx.clientSet, "in-topology", nil, 5)
|
||||
if err != nil {
|
||||
t.Fatalf("Cannot create nodes: %v", err)
|
||||
}
|
||||
@@ -109,34 +109,34 @@ func TestPodAffinity(t *testing.T) {
|
||||
}
|
||||
for _, node := range nodesInTopology {
|
||||
// Add topology key to all the nodes.
|
||||
if err = testutils.AddLabelsToNode(context.clientSet, node.Name, nodeLabels); err != nil {
|
||||
if err = testutils.AddLabelsToNode(testCtx.clientSet, node.Name, nodeLabels); err != nil {
|
||||
t.Fatalf("Cannot add labels to node %v: %v", node.Name, err)
|
||||
}
|
||||
if err = waitForNodeLabels(context.clientSet, node.Name, nodeLabels); err != nil {
|
||||
if err = waitForNodeLabels(testCtx.clientSet, node.Name, nodeLabels); err != nil {
|
||||
t.Fatalf("Adding labels to node %v didn't succeed: %v", node.Name, err)
|
||||
}
|
||||
}
|
||||
// Add a pod with a label and wait for it to schedule.
|
||||
labelKey := "service"
|
||||
labelValue := "S1"
|
||||
_, err = runPausePod(context.clientSet, initPausePod(context.clientSet, &pausePodConfig{
|
||||
_, err = runPausePod(testCtx.clientSet, initPausePod(testCtx.clientSet, &pausePodConfig{
|
||||
Name: "attractor-pod",
|
||||
Namespace: context.ns.Name,
|
||||
Namespace: testCtx.ns.Name,
|
||||
Labels: map[string]string{labelKey: labelValue},
|
||||
}))
|
||||
if err != nil {
|
||||
t.Fatalf("Error running the attractor pod: %v", err)
|
||||
}
|
||||
// Add a few more nodes without the topology label.
|
||||
_, err = createNodes(context.clientSet, "other-node", nil, 5)
|
||||
_, err = createNodes(testCtx.clientSet, "other-node", nil, 5)
|
||||
if err != nil {
|
||||
t.Fatalf("Cannot create the second set of nodes: %v", err)
|
||||
}
|
||||
// Add a new pod with affinity to the attractor pod.
|
||||
podName := "pod-with-podaffinity"
|
||||
pod, err := runPausePod(context.clientSet, initPausePod(context.clientSet, &pausePodConfig{
|
||||
pod, err := runPausePod(testCtx.clientSet, initPausePod(testCtx.clientSet, &pausePodConfig{
|
||||
Name: podName,
|
||||
Namespace: context.ns.Name,
|
||||
Namespace: testCtx.ns.Name,
|
||||
Affinity: &v1.Affinity{
|
||||
PodAffinity: &v1.PodAffinity{
|
||||
PreferredDuringSchedulingIgnoredDuringExecution: []v1.WeightedPodAffinityTerm{
|
||||
@@ -160,7 +160,7 @@ func TestPodAffinity(t *testing.T) {
|
||||
},
|
||||
},
|
||||
TopologyKey: topologyKey,
|
||||
Namespaces: []string{context.ns.Name},
|
||||
Namespaces: []string{testCtx.ns.Name},
|
||||
},
|
||||
Weight: 50,
|
||||
},
|
||||
@@ -185,8 +185,8 @@ func TestPodAffinity(t *testing.T) {
|
||||
// TestImageLocality verifies that the scheduler's image locality priority function
|
||||
// works correctly, i.e., the pod gets scheduled to the node where its container images are ready.
|
||||
func TestImageLocality(t *testing.T) {
|
||||
context := initTest(t, "image-locality")
|
||||
defer cleanupTest(t, context)
|
||||
testCtx := initTest(t, "image-locality")
|
||||
defer cleanupTest(t, testCtx)
|
||||
|
||||
// We use a fake large image as the test image used by the pod, which has relatively large image size.
|
||||
image := v1.ContainerImage{
|
||||
@@ -197,22 +197,22 @@ func TestImageLocality(t *testing.T) {
|
||||
}
|
||||
|
||||
// Create a node with the large image.
|
||||
nodeWithLargeImage, err := createNodeWithImages(context.clientSet, "testnode-large-image", nil, []v1.ContainerImage{image})
|
||||
nodeWithLargeImage, err := createNodeWithImages(testCtx.clientSet, "testnode-large-image", nil, []v1.ContainerImage{image})
|
||||
if err != nil {
|
||||
t.Fatalf("cannot create node with a large image: %v", err)
|
||||
}
|
||||
|
||||
// Add a few nodes.
|
||||
_, err = createNodes(context.clientSet, "testnode", nil, 10)
|
||||
_, err = createNodes(testCtx.clientSet, "testnode", nil, 10)
|
||||
if err != nil {
|
||||
t.Fatalf("cannot create nodes: %v", err)
|
||||
}
|
||||
|
||||
// Create a pod with containers each having the specified image.
|
||||
podName := "pod-using-large-image"
|
||||
pod, err := runPodWithContainers(context.clientSet, initPodWithContainers(context.clientSet, &podWithContainersConfig{
|
||||
pod, err := runPodWithContainers(testCtx.clientSet, initPodWithContainers(testCtx.clientSet, &podWithContainersConfig{
|
||||
Name: podName,
|
||||
Namespace: context.ns.Name,
|
||||
Namespace: testCtx.ns.Name,
|
||||
Containers: makeContainersWithImages(image.Names),
|
||||
}))
|
||||
if err != nil {
|
||||
@@ -247,10 +247,10 @@ func makeContainersWithImages(images []string) []v1.Container {
|
||||
func TestEvenPodsSpreadPriority(t *testing.T) {
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.EvenPodsSpread, true)()
|
||||
|
||||
context := initTest(t, "eps-priority")
|
||||
cs := context.clientSet
|
||||
ns := context.ns.Name
|
||||
defer cleanupTest(t, context)
|
||||
testCtx := initTest(t, "eps-priority")
|
||||
cs := testCtx.clientSet
|
||||
ns := testCtx.ns.Name
|
||||
defer cleanupTest(t, testCtx)
|
||||
// Add 4 nodes.
|
||||
nodes, err := createNodes(cs, "node", nil, 4)
|
||||
if err != nil {
|
||||
|
@@ -323,13 +323,13 @@ func TestSchedulerCreationFromNonExistentConfigMap(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestUnschedulableNodes(t *testing.T) {
|
||||
context := initTest(t, "unschedulable-nodes")
|
||||
defer cleanupTest(t, context)
|
||||
testCtx := initTest(t, "unschedulable-nodes")
|
||||
defer cleanupTest(t, testCtx)
|
||||
|
||||
nodeLister := context.informerFactory.Core().V1().Nodes().Lister()
|
||||
nodeLister := testCtx.informerFactory.Core().V1().Nodes().Lister()
|
||||
// NOTE: This test cannot run in parallel, because it is creating and deleting
|
||||
// non-namespaced objects (Nodes).
|
||||
defer context.clientSet.CoreV1().Nodes().DeleteCollection(nil, metav1.ListOptions{})
|
||||
defer testCtx.clientSet.CoreV1().Nodes().DeleteCollection(nil, metav1.ListOptions{})
|
||||
|
||||
goodCondition := v1.NodeCondition{
|
||||
Type: v1.NodeReady,
|
||||
@@ -398,23 +398,23 @@ func TestUnschedulableNodes(t *testing.T) {
|
||||
}
|
||||
|
||||
for i, mod := range nodeModifications {
|
||||
unSchedNode, err := context.clientSet.CoreV1().Nodes().Create(node)
|
||||
unSchedNode, err := testCtx.clientSet.CoreV1().Nodes().Create(node)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create node: %v", err)
|
||||
}
|
||||
|
||||
// Apply the unschedulable modification to the node, and wait for the reflection
|
||||
mod.makeUnSchedulable(t, unSchedNode, nodeLister, context.clientSet)
|
||||
mod.makeUnSchedulable(t, unSchedNode, nodeLister, testCtx.clientSet)
|
||||
|
||||
// Create the new pod, note that this needs to happen post unschedulable
|
||||
// modification or we have a race in the test.
|
||||
myPod, err := createPausePodWithResource(context.clientSet, "node-scheduling-test-pod", context.ns.Name, nil)
|
||||
myPod, err := createPausePodWithResource(testCtx.clientSet, "node-scheduling-test-pod", testCtx.ns.Name, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create pod: %v", err)
|
||||
}
|
||||
|
||||
// There are no schedulable nodes - the pod shouldn't be scheduled.
|
||||
err = waitForPodToScheduleWithTimeout(context.clientSet, myPod, 2*time.Second)
|
||||
err = waitForPodToScheduleWithTimeout(testCtx.clientSet, myPod, 2*time.Second)
|
||||
if err == nil {
|
||||
t.Errorf("Test %d: Pod scheduled successfully on unschedulable nodes", i)
|
||||
}
|
||||
@@ -425,23 +425,23 @@ func TestUnschedulableNodes(t *testing.T) {
|
||||
}
|
||||
|
||||
// Apply the schedulable modification to the node, and wait for the reflection
|
||||
schedNode, err := context.clientSet.CoreV1().Nodes().Get(unSchedNode.Name, metav1.GetOptions{})
|
||||
schedNode, err := testCtx.clientSet.CoreV1().Nodes().Get(unSchedNode.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to get node: %v", err)
|
||||
}
|
||||
mod.makeSchedulable(t, schedNode, nodeLister, context.clientSet)
|
||||
mod.makeSchedulable(t, schedNode, nodeLister, testCtx.clientSet)
|
||||
|
||||
// Wait until the pod is scheduled.
|
||||
if err := waitForPodToSchedule(context.clientSet, myPod); err != nil {
|
||||
if err := waitForPodToSchedule(testCtx.clientSet, myPod); err != nil {
|
||||
t.Errorf("Test %d: failed to schedule a pod: %v", i, err)
|
||||
} else {
|
||||
t.Logf("Test %d: Pod got scheduled on a schedulable node", i)
|
||||
}
|
||||
// Clean up.
|
||||
if err := deletePod(context.clientSet, myPod.Name, myPod.Namespace); err != nil {
|
||||
if err := deletePod(testCtx.clientSet, myPod.Name, myPod.Namespace); err != nil {
|
||||
t.Errorf("Failed to delete pod: %v", err)
|
||||
}
|
||||
err = context.clientSet.CoreV1().Nodes().Delete(schedNode.Name, nil)
|
||||
err = testCtx.clientSet.CoreV1().Nodes().Delete(schedNode.Name, nil)
|
||||
if err != nil {
|
||||
t.Errorf("Failed to delete node: %v", err)
|
||||
}
|
||||
@@ -468,8 +468,8 @@ func TestMultiScheduler(t *testing.T) {
|
||||
// - testPodNoAnnotation2 and testPodWithAnnotationFitsDefault2 should NOT be scheduled
|
||||
|
||||
// 1. create and start default-scheduler
|
||||
context := initTest(t, "multi-scheduler")
|
||||
defer cleanupTest(t, context)
|
||||
testCtx := initTest(t, "multi-scheduler")
|
||||
defer cleanupTest(t, testCtx)
|
||||
|
||||
// 2. create a node
|
||||
node := &v1.Node{
|
||||
@@ -481,23 +481,23 @@ func TestMultiScheduler(t *testing.T) {
|
||||
},
|
||||
},
|
||||
}
|
||||
context.clientSet.CoreV1().Nodes().Create(node)
|
||||
testCtx.clientSet.CoreV1().Nodes().Create(node)
|
||||
|
||||
// 3. create 3 pods for testing
|
||||
t.Logf("create 3 pods for testing")
|
||||
testPod, err := createPausePodWithResource(context.clientSet, "pod-without-scheduler-name", context.ns.Name, nil)
|
||||
testPod, err := createPausePodWithResource(testCtx.clientSet, "pod-without-scheduler-name", testCtx.ns.Name, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create pod: %v", err)
|
||||
}
|
||||
|
||||
defaultScheduler := "default-scheduler"
|
||||
testPodFitsDefault, err := createPausePod(context.clientSet, initPausePod(context.clientSet, &pausePodConfig{Name: "pod-fits-default", Namespace: context.ns.Name, SchedulerName: defaultScheduler}))
|
||||
testPodFitsDefault, err := createPausePod(testCtx.clientSet, initPausePod(testCtx.clientSet, &pausePodConfig{Name: "pod-fits-default", Namespace: testCtx.ns.Name, SchedulerName: defaultScheduler}))
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create pod: %v", err)
|
||||
}
|
||||
|
||||
fooScheduler := "foo-scheduler"
|
||||
testPodFitsFoo, err := createPausePod(context.clientSet, initPausePod(context.clientSet, &pausePodConfig{Name: "pod-fits-foo", Namespace: context.ns.Name, SchedulerName: fooScheduler}))
|
||||
testPodFitsFoo, err := createPausePod(testCtx.clientSet, initPausePod(testCtx.clientSet, &pausePodConfig{Name: "pod-fits-foo", Namespace: testCtx.ns.Name, SchedulerName: fooScheduler}))
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create pod: %v", err)
|
||||
}
|
||||
@@ -506,30 +506,30 @@ func TestMultiScheduler(t *testing.T) {
|
||||
// - testPod, testPodFitsDefault should be scheduled
|
||||
// - testPodFitsFoo should NOT be scheduled
|
||||
t.Logf("wait for pods scheduled")
|
||||
if err := waitForPodToSchedule(context.clientSet, testPod); err != nil {
|
||||
if err := waitForPodToSchedule(testCtx.clientSet, testPod); err != nil {
|
||||
t.Errorf("Test MultiScheduler: %s Pod not scheduled: %v", testPod.Name, err)
|
||||
} else {
|
||||
t.Logf("Test MultiScheduler: %s Pod scheduled", testPod.Name)
|
||||
}
|
||||
|
||||
if err := waitForPodToSchedule(context.clientSet, testPodFitsDefault); err != nil {
|
||||
if err := waitForPodToSchedule(testCtx.clientSet, testPodFitsDefault); err != nil {
|
||||
t.Errorf("Test MultiScheduler: %s Pod not scheduled: %v", testPodFitsDefault.Name, err)
|
||||
} else {
|
||||
t.Logf("Test MultiScheduler: %s Pod scheduled", testPodFitsDefault.Name)
|
||||
}
|
||||
|
||||
if err := waitForPodToScheduleWithTimeout(context.clientSet, testPodFitsFoo, time.Second*5); err == nil {
|
||||
if err := waitForPodToScheduleWithTimeout(testCtx.clientSet, testPodFitsFoo, time.Second*5); err == nil {
|
||||
t.Errorf("Test MultiScheduler: %s Pod got scheduled, %v", testPodFitsFoo.Name, err)
|
||||
} else {
|
||||
t.Logf("Test MultiScheduler: %s Pod not scheduled", testPodFitsFoo.Name)
|
||||
}
|
||||
|
||||
// 5. create and start a scheduler with name "foo-scheduler"
|
||||
context = initTestSchedulerWithOptions(t, context, true, nil, time.Second, scheduler.WithName(fooScheduler))
|
||||
testCtx = initTestSchedulerWithOptions(t, testCtx, true, nil, time.Second, scheduler.WithName(fooScheduler))
|
||||
|
||||
// 6. **check point-2**:
|
||||
// - testPodWithAnnotationFitsFoo should be scheduled
|
||||
err = waitForPodToSchedule(context.clientSet, testPodFitsFoo)
|
||||
err = waitForPodToSchedule(testCtx.clientSet, testPodFitsFoo)
|
||||
if err != nil {
|
||||
t.Errorf("Test MultiScheduler: %s Pod not scheduled, %v", testPodFitsFoo.Name, err)
|
||||
} else {
|
||||
@@ -537,10 +537,10 @@ func TestMultiScheduler(t *testing.T) {
|
||||
}
|
||||
|
||||
// 7. delete the pods that were scheduled by the default scheduler, and stop the default scheduler
|
||||
if err := deletePod(context.clientSet, testPod.Name, context.ns.Name); err != nil {
|
||||
if err := deletePod(testCtx.clientSet, testPod.Name, testCtx.ns.Name); err != nil {
|
||||
t.Errorf("Failed to delete pod: %v", err)
|
||||
}
|
||||
if err := deletePod(context.clientSet, testPodFitsDefault.Name, context.ns.Name); err != nil {
|
||||
if err := deletePod(testCtx.clientSet, testPodFitsDefault.Name, testCtx.ns.Name); err != nil {
|
||||
t.Errorf("Failed to delete pod: %v", err)
|
||||
}
|
||||
|
||||
@@ -585,8 +585,8 @@ func TestMultiScheduler(t *testing.T) {
|
||||
|
||||
// This test will verify scheduler can work well regardless of whether kubelet is allocatable aware or not.
|
||||
func TestAllocatable(t *testing.T) {
|
||||
context := initTest(t, "allocatable")
|
||||
defer cleanupTest(t, context)
|
||||
testCtx := initTest(t, "allocatable")
|
||||
defer cleanupTest(t, testCtx)
|
||||
|
||||
// 2. create a node without allocatable awareness
|
||||
nodeRes := &v1.ResourceList{
|
||||
@@ -594,7 +594,7 @@ func TestAllocatable(t *testing.T) {
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(30, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(30, resource.BinarySI),
|
||||
}
|
||||
allocNode, err := createNode(context.clientSet, "node-allocatable-scheduler-test-node", nodeRes)
|
||||
allocNode, err := createNode(testCtx.clientSet, "node-allocatable-scheduler-test-node", nodeRes)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create node: %v", err)
|
||||
}
|
||||
@@ -605,13 +605,13 @@ func TestAllocatable(t *testing.T) {
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(20, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(20, resource.BinarySI),
|
||||
}
|
||||
testAllocPod, err := createPausePodWithResource(context.clientSet, podName, context.ns.Name, podRes)
|
||||
testAllocPod, err := createPausePodWithResource(testCtx.clientSet, podName, testCtx.ns.Name, podRes)
|
||||
if err != nil {
|
||||
t.Fatalf("Test allocatable unawareness failed to create pod: %v", err)
|
||||
}
|
||||
|
||||
// 4. Test: this test pod should be scheduled since api-server will use Capacity as Allocatable
|
||||
err = waitForPodToScheduleWithTimeout(context.clientSet, testAllocPod, time.Second*5)
|
||||
err = waitForPodToScheduleWithTimeout(testCtx.clientSet, testAllocPod, time.Second*5)
|
||||
if err != nil {
|
||||
t.Errorf("Test allocatable unawareness: %s Pod not scheduled: %v", testAllocPod.Name, err)
|
||||
} else {
|
||||
@@ -632,23 +632,23 @@ func TestAllocatable(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
if _, err := context.clientSet.CoreV1().Nodes().UpdateStatus(allocNode); err != nil {
|
||||
if _, err := testCtx.clientSet.CoreV1().Nodes().UpdateStatus(allocNode); err != nil {
|
||||
t.Fatalf("Failed to update node with Status.Allocatable: %v", err)
|
||||
}
|
||||
|
||||
if err := deletePod(context.clientSet, testAllocPod.Name, context.ns.Name); err != nil {
|
||||
if err := deletePod(testCtx.clientSet, testAllocPod.Name, testCtx.ns.Name); err != nil {
|
||||
t.Fatalf("Failed to remove the first pod: %v", err)
|
||||
}
|
||||
|
||||
// 6. Make another pod with different name, same resource request
|
||||
podName2 := "pod-test-allocatable2"
|
||||
testAllocPod2, err := createPausePodWithResource(context.clientSet, podName2, context.ns.Name, podRes)
|
||||
testAllocPod2, err := createPausePodWithResource(testCtx.clientSet, podName2, testCtx.ns.Name, podRes)
|
||||
if err != nil {
|
||||
t.Fatalf("Test allocatable awareness failed to create pod: %v", err)
|
||||
}
|
||||
|
||||
// 7. Test: this test pod should not be scheduled since it request more than Allocatable
|
||||
if err := waitForPodToScheduleWithTimeout(context.clientSet, testAllocPod2, time.Second*5); err == nil {
|
||||
if err := waitForPodToScheduleWithTimeout(testCtx.clientSet, testAllocPod2, time.Second*5); err == nil {
|
||||
t.Errorf("Test allocatable awareness: %s Pod got scheduled unexpectedly, %v", testAllocPod2.Name, err)
|
||||
} else {
|
||||
t.Logf("Test allocatable awareness: %s Pod not scheduled as expected", testAllocPod2.Name)
|
||||
@@ -659,9 +659,9 @@ func TestAllocatable(t *testing.T) {
|
||||
// pods are scheduled by other schedulers.
|
||||
func TestSchedulerInformers(t *testing.T) {
|
||||
// Initialize scheduler.
|
||||
context := initTest(t, "scheduler-informer")
|
||||
defer cleanupTest(t, context)
|
||||
cs := context.clientSet
|
||||
testCtx := initTest(t, "scheduler-informer")
|
||||
defer cleanupTest(t, testCtx)
|
||||
cs := testCtx.clientSet
|
||||
|
||||
defaultPodRes := &v1.ResourceRequirements{Requests: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(200, resource.DecimalSI),
|
||||
@@ -689,17 +689,17 @@ func TestSchedulerInformers(t *testing.T) {
|
||||
description: "Pod cannot be scheduled when node is occupied by pods scheduled by other schedulers",
|
||||
nodes: []*nodeConfig{{name: "node-1", res: defaultNodeRes}},
|
||||
existingPods: []*v1.Pod{
|
||||
initPausePod(context.clientSet, &pausePodConfig{
|
||||
initPausePod(testCtx.clientSet, &pausePodConfig{
|
||||
Name: "pod1",
|
||||
Namespace: context.ns.Name,
|
||||
Namespace: testCtx.ns.Name,
|
||||
Resources: defaultPodRes,
|
||||
Labels: map[string]string{"foo": "bar"},
|
||||
NodeName: "node-1",
|
||||
SchedulerName: "foo-scheduler",
|
||||
}),
|
||||
initPausePod(context.clientSet, &pausePodConfig{
|
||||
initPausePod(testCtx.clientSet, &pausePodConfig{
|
||||
Name: "pod2",
|
||||
Namespace: context.ns.Name,
|
||||
Namespace: testCtx.ns.Name,
|
||||
Resources: defaultPodRes,
|
||||
Labels: map[string]string{"foo": "bar"},
|
||||
NodeName: "node-1",
|
||||
@@ -708,7 +708,7 @@ func TestSchedulerInformers(t *testing.T) {
|
||||
},
|
||||
pod: initPausePod(cs, &pausePodConfig{
|
||||
Name: "unschedulable-pod",
|
||||
Namespace: context.ns.Name,
|
||||
Namespace: testCtx.ns.Name,
|
||||
Resources: defaultPodRes,
|
||||
}),
|
||||
preemptedPodIndexes: map[int]struct{}{2: {}},
|
||||
@@ -743,7 +743,7 @@ func TestSchedulerInformers(t *testing.T) {
|
||||
// Cleanup
|
||||
pods = append(pods, unschedulable)
|
||||
cleanupPods(cs, t, pods)
|
||||
cs.PolicyV1beta1().PodDisruptionBudgets(context.ns.Name).DeleteCollection(nil, metav1.ListOptions{})
|
||||
cs.PolicyV1beta1().PodDisruptionBudgets(testCtx.ns.Name).DeleteCollection(nil, metav1.ListOptions{})
|
||||
cs.CoreV1().Nodes().DeleteCollection(nil, metav1.ListOptions{})
|
||||
}
|
||||
}
|
||||
|
@@ -70,24 +70,24 @@ func TestTaintNodeByCondition(t *testing.T) {
|
||||
// Build PodToleration Admission.
|
||||
admission := podtolerationrestriction.NewPodTolerationsPlugin(&pluginapi.Configuration{})
|
||||
|
||||
context := initTestMaster(t, "default", admission)
|
||||
testCtx := initTestMaster(t, "default", admission)
|
||||
|
||||
// Build clientset and informers for controllers.
|
||||
externalClientset := kubernetes.NewForConfigOrDie(&restclient.Config{
|
||||
QPS: -1,
|
||||
Host: context.httpServer.URL,
|
||||
Host: testCtx.httpServer.URL,
|
||||
ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}})
|
||||
externalInformers := informers.NewSharedInformerFactory(externalClientset, time.Second)
|
||||
|
||||
admission.SetExternalKubeClientSet(externalClientset)
|
||||
admission.SetExternalKubeInformerFactory(externalInformers)
|
||||
|
||||
context = initTestScheduler(t, context, false, nil)
|
||||
defer cleanupTest(t, context)
|
||||
testCtx = initTestScheduler(t, testCtx, false, nil)
|
||||
defer cleanupTest(t, testCtx)
|
||||
|
||||
cs := context.clientSet
|
||||
informers := context.informerFactory
|
||||
nsName := context.ns.Name
|
||||
cs := testCtx.clientSet
|
||||
informers := testCtx.informerFactory
|
||||
nsName := testCtx.ns.Name
|
||||
|
||||
// Start NodeLifecycleController for taint.
|
||||
nc, err := nodelifecycle.NewNodeLifecycleController(
|
||||
@@ -111,13 +111,13 @@ func TestTaintNodeByCondition(t *testing.T) {
|
||||
t.Errorf("Failed to create node controller: %v", err)
|
||||
return
|
||||
}
|
||||
go nc.Run(context.ctx.Done())
|
||||
go nc.Run(testCtx.ctx.Done())
|
||||
|
||||
// Waiting for all controller sync.
|
||||
externalInformers.Start(context.ctx.Done())
|
||||
externalInformers.WaitForCacheSync(context.ctx.Done())
|
||||
informers.Start(context.ctx.Done())
|
||||
informers.WaitForCacheSync(context.ctx.Done())
|
||||
externalInformers.Start(testCtx.ctx.Done())
|
||||
externalInformers.WaitForCacheSync(testCtx.ctx.Done())
|
||||
informers.Start(testCtx.ctx.Done())
|
||||
informers.WaitForCacheSync(testCtx.ctx.Done())
|
||||
|
||||
// -------------------------------------------
|
||||
// Test TaintNodeByCondition feature.
|
||||
@@ -571,7 +571,7 @@ func TestTaintNodeByCondition(t *testing.T) {
|
||||
|
||||
cleanupPods(cs, t, pods)
|
||||
cleanupNodes(cs, t)
|
||||
waitForSchedulerCacheCleanup(context.scheduler, t)
|
||||
waitForSchedulerCacheCleanup(testCtx.scheduler, t)
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -653,22 +653,22 @@ func TestTaintBasedEvictions(t *testing.T) {
|
||||
)
|
||||
for i, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
context := initTestMaster(t, "taint-based-evictions", admission)
|
||||
testCtx := initTestMaster(t, "taint-based-evictions", admission)
|
||||
|
||||
// Build clientset and informers for controllers.
|
||||
externalClientset := kubernetes.NewForConfigOrDie(&restclient.Config{
|
||||
QPS: -1,
|
||||
Host: context.httpServer.URL,
|
||||
Host: testCtx.httpServer.URL,
|
||||
ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}})
|
||||
externalInformers := informers.NewSharedInformerFactory(externalClientset, time.Second)
|
||||
podTolerations.SetExternalKubeClientSet(externalClientset)
|
||||
podTolerations.SetExternalKubeInformerFactory(externalInformers)
|
||||
|
||||
context = initTestScheduler(t, context, true, nil)
|
||||
defer cleanupTest(t, context)
|
||||
cs := context.clientSet
|
||||
informers := context.informerFactory
|
||||
_, err := cs.CoreV1().Namespaces().Create(context.ns)
|
||||
testCtx = initTestScheduler(t, testCtx, true, nil)
|
||||
defer cleanupTest(t, testCtx)
|
||||
cs := testCtx.clientSet
|
||||
informers := testCtx.informerFactory
|
||||
_, err := cs.CoreV1().Namespaces().Create(testCtx.ns)
|
||||
if err != nil {
|
||||
t.Errorf("Failed to create namespace %+v", err)
|
||||
}
|
||||
@@ -696,13 +696,13 @@ func TestTaintBasedEvictions(t *testing.T) {
|
||||
return
|
||||
}
|
||||
|
||||
go nc.Run(context.ctx.Done())
|
||||
go nc.Run(testCtx.ctx.Done())
|
||||
|
||||
// Waiting for all controller sync.
|
||||
externalInformers.Start(context.ctx.Done())
|
||||
externalInformers.WaitForCacheSync(context.ctx.Done())
|
||||
informers.Start(context.ctx.Done())
|
||||
informers.WaitForCacheSync(context.ctx.Done())
|
||||
externalInformers.Start(testCtx.ctx.Done())
|
||||
externalInformers.WaitForCacheSync(testCtx.ctx.Done())
|
||||
informers.Start(testCtx.ctx.Done())
|
||||
informers.WaitForCacheSync(testCtx.ctx.Done())
|
||||
|
||||
nodeRes := v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("4000m"),
|
||||
@@ -742,7 +742,7 @@ func TestTaintBasedEvictions(t *testing.T) {
|
||||
test.pod.Spec.Tolerations[0].TolerationSeconds = &tolerationSeconds[i]
|
||||
}
|
||||
|
||||
test.pod, err = cs.CoreV1().Pods(context.ns.Name).Create(test.pod)
|
||||
test.pod, err = cs.CoreV1().Pods(testCtx.ns.Name).Create(test.pod)
|
||||
if err != nil {
|
||||
t.Fatalf("Test Failed: error: %v, while creating pod", err)
|
||||
}
|
||||
@@ -751,7 +751,7 @@ func TestTaintBasedEvictions(t *testing.T) {
|
||||
t.Errorf("Failed to schedule pod %s/%s on the node, err: %v",
|
||||
test.pod.Namespace, test.pod.Name, err)
|
||||
}
|
||||
test.pod, err = cs.CoreV1().Pods(context.ns.Name).Get(test.pod.Name, metav1.GetOptions{})
|
||||
test.pod, err = cs.CoreV1().Pods(testCtx.ns.Name).Get(test.pod.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("Test Failed: error: %v, while creating pod", err)
|
||||
}
|
||||
@@ -798,7 +798,7 @@ func TestTaintBasedEvictions(t *testing.T) {
|
||||
go func(i int) {
|
||||
for {
|
||||
select {
|
||||
case <-context.ctx.Done():
|
||||
case <-testCtx.ctx.Done():
|
||||
return
|
||||
case <-time.Tick(heartbeatInternal):
|
||||
nodeCopy := nodeCopyWithConditions(nodes[i], conditions)
|
||||
@@ -815,7 +815,7 @@ func TestTaintBasedEvictions(t *testing.T) {
|
||||
}
|
||||
|
||||
if test.pod != nil {
|
||||
err = pod.WaitForPodCondition(cs, context.ns.Name, test.pod.Name, test.waitForPodCondition, time.Second*15, func(pod *v1.Pod) (bool, error) {
|
||||
err = pod.WaitForPodCondition(cs, testCtx.ns.Name, test.pod.Name, test.waitForPodCondition, time.Second*15, func(pod *v1.Pod) (bool, error) {
|
||||
// as node is unreachable, pod0 is expected to be in Terminating status
|
||||
// rather than getting deleted
|
||||
if tolerationSeconds[i] == 0 {
|
||||
@@ -827,13 +827,13 @@ func TestTaintBasedEvictions(t *testing.T) {
|
||||
return false, nil
|
||||
})
|
||||
if err != nil {
|
||||
pod, _ := cs.CoreV1().Pods(context.ns.Name).Get(test.pod.Name, metav1.GetOptions{})
|
||||
pod, _ := cs.CoreV1().Pods(testCtx.ns.Name).Get(test.pod.Name, metav1.GetOptions{})
|
||||
t.Fatalf("Error: %v, Expected test pod to be %s but it's %v", err, test.waitForPodCondition, pod)
|
||||
}
|
||||
cleanupPods(cs, t, []*v1.Pod{test.pod})
|
||||
}
|
||||
cleanupNodes(cs, t)
|
||||
waitForSchedulerCacheCleanup(context.scheduler, t)
|
||||
waitForSchedulerCacheCleanup(testCtx.scheduler, t)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
@@ -99,7 +99,7 @@ func createAlgorithmSourceFromPolicy(policy *schedulerapi.Policy, clientSet clie
|
||||
// configuration.
|
||||
func initTestMaster(t *testing.T, nsPrefix string, admission admission.Interface) *testContext {
|
||||
ctx, cancelFunc := context.WithCancel(context.Background())
|
||||
context := testContext{
|
||||
testCtx := testContext{
|
||||
ctx: ctx,
|
||||
cancelFn: cancelFunc,
|
||||
}
|
||||
@@ -117,16 +117,16 @@ func initTestMaster(t *testing.T, nsPrefix string, admission admission.Interface
|
||||
masterConfig.GenericConfig.AdmissionControl = admission
|
||||
}
|
||||
|
||||
_, context.httpServer, context.closeFn = framework.RunAMasterUsingServer(masterConfig, s, h)
|
||||
_, testCtx.httpServer, testCtx.closeFn = framework.RunAMasterUsingServer(masterConfig, s, h)
|
||||
|
||||
if nsPrefix != "default" {
|
||||
context.ns = framework.CreateTestingNamespace(nsPrefix+string(uuid.NewUUID()), s, t)
|
||||
testCtx.ns = framework.CreateTestingNamespace(nsPrefix+string(uuid.NewUUID()), s, t)
|
||||
} else {
|
||||
context.ns = framework.CreateTestingNamespace("default", s, t)
|
||||
testCtx.ns = framework.CreateTestingNamespace("default", s, t)
|
||||
}
|
||||
|
||||
// 2. Create kubeclient
|
||||
context.clientSet = clientset.NewForConfigOrDie(
|
||||
testCtx.clientSet = clientset.NewForConfigOrDie(
|
||||
&restclient.Config{
|
||||
QPS: -1, Host: s.URL,
|
||||
ContentConfig: restclient.ContentConfig{
|
||||
@@ -134,60 +134,60 @@ func initTestMaster(t *testing.T, nsPrefix string, admission admission.Interface
|
||||
},
|
||||
},
|
||||
)
|
||||
return &context
|
||||
return &testCtx
|
||||
}
|
||||
|
||||
// initTestScheduler initializes a test environment and creates a scheduler with default
|
||||
// configuration.
|
||||
func initTestScheduler(
|
||||
t *testing.T,
|
||||
context *testContext,
|
||||
testCtx *testContext,
|
||||
setPodInformer bool,
|
||||
policy *schedulerapi.Policy,
|
||||
) *testContext {
|
||||
// Pod preemption is enabled by default scheduler configuration.
|
||||
return initTestSchedulerWithOptions(t, context, setPodInformer, policy, time.Second)
|
||||
return initTestSchedulerWithOptions(t, testCtx, setPodInformer, policy, time.Second)
|
||||
}
|
||||
|
||||
// initTestSchedulerWithOptions initializes a test environment and creates a scheduler with default
|
||||
// configuration and other options.
|
||||
func initTestSchedulerWithOptions(
|
||||
t *testing.T,
|
||||
context *testContext,
|
||||
testCtx *testContext,
|
||||
setPodInformer bool,
|
||||
policy *schedulerapi.Policy,
|
||||
resyncPeriod time.Duration,
|
||||
opts ...scheduler.Option,
|
||||
) *testContext {
|
||||
// 1. Create scheduler
|
||||
context.informerFactory = informers.NewSharedInformerFactory(context.clientSet, resyncPeriod)
|
||||
testCtx.informerFactory = informers.NewSharedInformerFactory(testCtx.clientSet, resyncPeriod)
|
||||
|
||||
var podInformer coreinformers.PodInformer
|
||||
|
||||
// create independent pod informer if required
|
||||
if setPodInformer {
|
||||
podInformer = scheduler.NewPodInformer(context.clientSet, 12*time.Hour)
|
||||
podInformer = scheduler.NewPodInformer(testCtx.clientSet, 12*time.Hour)
|
||||
} else {
|
||||
podInformer = context.informerFactory.Core().V1().Pods()
|
||||
podInformer = testCtx.informerFactory.Core().V1().Pods()
|
||||
}
|
||||
var err error
|
||||
eventBroadcaster := events.NewBroadcaster(&events.EventSinkImpl{
|
||||
Interface: context.clientSet.EventsV1beta1().Events(""),
|
||||
Interface: testCtx.clientSet.EventsV1beta1().Events(""),
|
||||
})
|
||||
recorder := eventBroadcaster.NewRecorder(
|
||||
legacyscheme.Scheme,
|
||||
v1.DefaultSchedulerName,
|
||||
)
|
||||
if policy != nil {
|
||||
opts = append(opts, scheduler.WithAlgorithmSource(createAlgorithmSourceFromPolicy(policy, context.clientSet)))
|
||||
opts = append(opts, scheduler.WithAlgorithmSource(createAlgorithmSourceFromPolicy(policy, testCtx.clientSet)))
|
||||
}
|
||||
opts = append([]scheduler.Option{scheduler.WithBindTimeoutSeconds(600)}, opts...)
|
||||
context.scheduler, err = scheduler.New(
|
||||
context.clientSet,
|
||||
context.informerFactory,
|
||||
testCtx.scheduler, err = scheduler.New(
|
||||
testCtx.clientSet,
|
||||
testCtx.informerFactory,
|
||||
podInformer,
|
||||
recorder,
|
||||
context.ctx.Done(),
|
||||
testCtx.ctx.Done(),
|
||||
opts...,
|
||||
)
|
||||
|
||||
@@ -197,31 +197,31 @@ func initTestSchedulerWithOptions(
|
||||
|
||||
// set setPodInformer if provided.
|
||||
if setPodInformer {
|
||||
go podInformer.Informer().Run(context.scheduler.StopEverything)
|
||||
cache.WaitForNamedCacheSync("scheduler", context.scheduler.StopEverything, podInformer.Informer().HasSynced)
|
||||
go podInformer.Informer().Run(testCtx.scheduler.StopEverything)
|
||||
cache.WaitForNamedCacheSync("scheduler", testCtx.scheduler.StopEverything, podInformer.Informer().HasSynced)
|
||||
}
|
||||
|
||||
stopCh := make(chan struct{})
|
||||
eventBroadcaster.StartRecordingToSink(stopCh)
|
||||
|
||||
context.informerFactory.Start(context.scheduler.StopEverything)
|
||||
context.informerFactory.WaitForCacheSync(context.scheduler.StopEverything)
|
||||
testCtx.informerFactory.Start(testCtx.scheduler.StopEverything)
|
||||
testCtx.informerFactory.WaitForCacheSync(testCtx.scheduler.StopEverything)
|
||||
|
||||
go context.scheduler.Run(context.ctx)
|
||||
go testCtx.scheduler.Run(testCtx.ctx)
|
||||
|
||||
return context
|
||||
return testCtx
|
||||
}
|
||||
|
||||
// initDisruptionController initializes and runs a Disruption Controller to properly
|
||||
// update PodDisuptionBudget objects.
|
||||
func initDisruptionController(t *testing.T, context *testContext) *disruption.DisruptionController {
|
||||
informers := informers.NewSharedInformerFactory(context.clientSet, 12*time.Hour)
|
||||
func initDisruptionController(t *testing.T, testCtx *testContext) *disruption.DisruptionController {
|
||||
informers := informers.NewSharedInformerFactory(testCtx.clientSet, 12*time.Hour)
|
||||
|
||||
discoveryClient := cacheddiscovery.NewMemCacheClient(context.clientSet.Discovery())
|
||||
discoveryClient := cacheddiscovery.NewMemCacheClient(testCtx.clientSet.Discovery())
|
||||
mapper := restmapper.NewDeferredDiscoveryRESTMapper(discoveryClient)
|
||||
|
||||
config := restclient.Config{Host: context.httpServer.URL}
|
||||
scaleKindResolver := scale.NewDiscoveryScaleKindResolver(context.clientSet.Discovery())
|
||||
config := restclient.Config{Host: testCtx.httpServer.URL}
|
||||
scaleKindResolver := scale.NewDiscoveryScaleKindResolver(testCtx.clientSet.Discovery())
|
||||
scaleClient, err := scale.NewForConfig(&config, mapper, dynamic.LegacyAPIPathResolverFunc, scaleKindResolver)
|
||||
if err != nil {
|
||||
t.Fatalf("Error in create scaleClient: %v", err)
|
||||
@@ -234,13 +234,13 @@ func initDisruptionController(t *testing.T, context *testContext) *disruption.Di
|
||||
informers.Apps().V1().ReplicaSets(),
|
||||
informers.Apps().V1().Deployments(),
|
||||
informers.Apps().V1().StatefulSets(),
|
||||
context.clientSet,
|
||||
testCtx.clientSet,
|
||||
mapper,
|
||||
scaleClient)
|
||||
|
||||
informers.Start(context.scheduler.StopEverything)
|
||||
informers.WaitForCacheSync(context.scheduler.StopEverything)
|
||||
go dc.Run(context.scheduler.StopEverything)
|
||||
informers.Start(testCtx.scheduler.StopEverything)
|
||||
informers.WaitForCacheSync(testCtx.scheduler.StopEverything)
|
||||
go dc.Run(testCtx.scheduler.StopEverything)
|
||||
return dc
|
||||
}
|
||||
|
||||
@@ -260,13 +260,13 @@ func initTestDisablePreemption(t *testing.T, nsPrefix string) *testContext {
|
||||
|
||||
// cleanupTest deletes the scheduler and the test namespace. It should be called
|
||||
// at the end of a test.
|
||||
func cleanupTest(t *testing.T, context *testContext) {
|
||||
func cleanupTest(t *testing.T, testCtx *testContext) {
|
||||
// Kill the scheduler.
|
||||
context.cancelFn()
|
||||
testCtx.cancelFn()
|
||||
// Cleanup nodes.
|
||||
context.clientSet.CoreV1().Nodes().DeleteCollection(nil, metav1.ListOptions{})
|
||||
framework.DeleteTestingNamespace(context.ns, context.httpServer, t)
|
||||
context.closeFn()
|
||||
testCtx.clientSet.CoreV1().Nodes().DeleteCollection(nil, metav1.ListOptions{})
|
||||
framework.DeleteTestingNamespace(testCtx.ns, testCtx.httpServer, t)
|
||||
testCtx.closeFn()
|
||||
}
|
||||
|
||||
// waitForReflection waits till the passFunc confirms that the object it expects
|
||||
@@ -670,9 +670,9 @@ func waitForPodUnschedulable(cs clientset.Interface, pod *v1.Pod) error {
|
||||
|
||||
// waitForPDBsStable waits for PDBs to have "CurrentHealthy" status equal to
|
||||
// the expected values.
|
||||
func waitForPDBsStable(context *testContext, pdbs []*policy.PodDisruptionBudget, pdbPodNum []int32) error {
|
||||
func waitForPDBsStable(testCtx *testContext, pdbs []*policy.PodDisruptionBudget, pdbPodNum []int32) error {
|
||||
return wait.Poll(time.Second, 60*time.Second, func() (bool, error) {
|
||||
pdbList, err := context.clientSet.PolicyV1beta1().PodDisruptionBudgets(context.ns.Name).List(metav1.ListOptions{})
|
||||
pdbList, err := testCtx.clientSet.PolicyV1beta1().PodDisruptionBudgets(testCtx.ns.Name).List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
@@ -698,9 +698,9 @@ func waitForPDBsStable(context *testContext, pdbs []*policy.PodDisruptionBudget,
|
||||
}
|
||||
|
||||
// waitCachedPodsStable waits until scheduler cache has the given pods.
|
||||
func waitCachedPodsStable(context *testContext, pods []*v1.Pod) error {
|
||||
func waitCachedPodsStable(testCtx *testContext, pods []*v1.Pod) error {
|
||||
return wait.Poll(time.Second, 30*time.Second, func() (bool, error) {
|
||||
cachedPods, err := context.scheduler.SchedulerCache.List(labels.Everything())
|
||||
cachedPods, err := testCtx.scheduler.SchedulerCache.List(labels.Everything())
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
@@ -708,11 +708,11 @@ func waitCachedPodsStable(context *testContext, pods []*v1.Pod) error {
|
||||
return false, nil
|
||||
}
|
||||
for _, p := range pods {
|
||||
actualPod, err1 := context.clientSet.CoreV1().Pods(p.Namespace).Get(p.Name, metav1.GetOptions{})
|
||||
actualPod, err1 := testCtx.clientSet.CoreV1().Pods(p.Namespace).Get(p.Name, metav1.GetOptions{})
|
||||
if err1 != nil {
|
||||
return false, err1
|
||||
}
|
||||
cachedPod, err2 := context.scheduler.SchedulerCache.GetPod(actualPod)
|
||||
cachedPod, err2 := testCtx.scheduler.SchedulerCache.GetPod(actualPod)
|
||||
if err2 != nil || cachedPod == nil {
|
||||
return false, err2
|
||||
}
|
||||
|
Reference in New Issue
Block a user