mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-22 11:21:47 +00:00
Refactor: move generic functions of integration test to util directory
This commit is contained in:
parent
b378b17560
commit
b67a033de2
@ -61,6 +61,7 @@ go_test(
|
||||
"//staging/src/k8s.io/kube-scheduler/extender/v1:go_default_library",
|
||||
"//test/e2e/framework/pod:go_default_library",
|
||||
"//test/integration/framework:go_default_library",
|
||||
"//test/integration/util:go_default_library",
|
||||
"//test/utils:go_default_library",
|
||||
"//test/utils/image:go_default_library",
|
||||
"//vendor/github.com/google/go-cmp/cmp:go_default_library",
|
||||
@ -89,34 +90,22 @@ go_library(
|
||||
"//pkg/api/v1/pod:go_default_library",
|
||||
"//pkg/controller/disruption:go_default_library",
|
||||
"//pkg/scheduler:go_default_library",
|
||||
"//pkg/scheduler/apis/config:go_default_library",
|
||||
"//pkg/scheduler/apis/config/scheme:go_default_library",
|
||||
"//pkg/scheduler/apis/config/v1:go_default_library",
|
||||
"//pkg/scheduler/profile:go_default_library",
|
||||
"//pkg/util/taints:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/api/policy/v1beta1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/admission:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/discovery/cached/memory:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/dynamic:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/informers:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/informers/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/listers/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/rest:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/restmapper:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/scale:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/cache:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/events:go_default_library",
|
||||
"//test/integration/framework:go_default_library",
|
||||
"//test/integration/util:go_default_library",
|
||||
"//test/utils/image:go_default_library",
|
||||
],
|
||||
)
|
||||
|
@ -35,6 +35,7 @@ import (
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
extenderv1 "k8s.io/kube-scheduler/extender/v1"
|
||||
schedulerapi "k8s.io/kubernetes/pkg/scheduler/apis/config"
|
||||
testutils "k8s.io/kubernetes/test/integration/util"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
)
|
||||
|
||||
@ -279,8 +280,8 @@ func machine3Prioritizer(pod *v1.Pod, nodes *v1.NodeList) (*extenderv1.HostPrior
|
||||
}
|
||||
|
||||
func TestSchedulerExtender(t *testing.T) {
|
||||
testCtx := initTestMaster(t, "scheduler-extender", nil)
|
||||
clientSet := testCtx.clientSet
|
||||
testCtx := testutils.InitTestMaster(t, "scheduler-extender", nil)
|
||||
clientSet := testCtx.ClientSet
|
||||
|
||||
extender1 := &Extender{
|
||||
name: "extender1",
|
||||
@ -349,10 +350,10 @@ func TestSchedulerExtender(t *testing.T) {
|
||||
}
|
||||
policy.APIVersion = "v1"
|
||||
|
||||
testCtx = initTestScheduler(t, testCtx, false, &policy)
|
||||
defer cleanupTest(t, testCtx)
|
||||
testCtx = testutils.InitTestScheduler(t, testCtx, false, &policy)
|
||||
defer testutils.CleanupTest(t, testCtx)
|
||||
|
||||
DoTestPodScheduling(testCtx.ns, t, clientSet)
|
||||
DoTestPodScheduling(testCtx.NS, t, clientSet)
|
||||
}
|
||||
|
||||
func DoTestPodScheduling(ns *v1.Namespace, t *testing.T, cs clientset.Interface) {
|
||||
@ -405,7 +406,7 @@ func DoTestPodScheduling(ns *v1.Namespace, t *testing.T, cs clientset.Interface)
|
||||
t.Fatalf("Failed to create pod: %v", err)
|
||||
}
|
||||
|
||||
err = wait.Poll(time.Second, wait.ForeverTestTimeout, podScheduled(cs, myPod.Namespace, myPod.Name))
|
||||
err = wait.Poll(time.Second, wait.ForeverTestTimeout, testutils.PodScheduled(cs, myPod.Namespace, myPod.Name))
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to schedule pod: %v", err)
|
||||
}
|
||||
|
@ -33,6 +33,7 @@ import (
|
||||
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/defaultbinder"
|
||||
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
|
||||
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||
testutils "k8s.io/kubernetes/test/integration/util"
|
||||
)
|
||||
|
||||
type PreFilterPlugin struct {
|
||||
@ -482,10 +483,10 @@ func TestPreFilterPlugin(t *testing.T) {
|
||||
}
|
||||
|
||||
// Create the master and the scheduler with the test plugin set.
|
||||
testCtx := initTestSchedulerForFrameworkTest(t, initTestMaster(t, "prefilter-plugin", nil), 2,
|
||||
testCtx := initTestSchedulerForFrameworkTest(t, testutils.InitTestMaster(t, "prefilter-plugin", nil), 2,
|
||||
scheduler.WithProfiles(prof),
|
||||
scheduler.WithFrameworkOutOfTreeRegistry(registry))
|
||||
defer cleanupTest(t, testCtx)
|
||||
defer testutils.CleanupTest(t, testCtx)
|
||||
|
||||
tests := []struct {
|
||||
fail bool
|
||||
@ -509,18 +510,18 @@ func TestPreFilterPlugin(t *testing.T) {
|
||||
preFilterPlugin.failPreFilter = test.fail
|
||||
preFilterPlugin.rejectPreFilter = test.reject
|
||||
// Create a best effort pod.
|
||||
pod, err := createPausePod(testCtx.clientSet,
|
||||
initPausePod(testCtx.clientSet, &pausePodConfig{Name: "test-pod", Namespace: testCtx.ns.Name}))
|
||||
pod, err := createPausePod(testCtx.ClientSet,
|
||||
initPausePod(testCtx.ClientSet, &pausePodConfig{Name: "test-pod", Namespace: testCtx.NS.Name}))
|
||||
if err != nil {
|
||||
t.Errorf("Error while creating a test pod: %v", err)
|
||||
}
|
||||
|
||||
if test.reject || test.fail {
|
||||
if err = waitForPodUnschedulable(testCtx.clientSet, pod); err != nil {
|
||||
if err = waitForPodUnschedulable(testCtx.ClientSet, pod); err != nil {
|
||||
t.Errorf("test #%v: Didn't expect the pod to be scheduled. error: %v", i, err)
|
||||
}
|
||||
} else {
|
||||
if err = waitForPodToSchedule(testCtx.clientSet, pod); err != nil {
|
||||
if err = testutils.WaitForPodToSchedule(testCtx.ClientSet, pod); err != nil {
|
||||
t.Errorf("test #%v: Expected the pod to be scheduled. error: %v", i, err)
|
||||
}
|
||||
}
|
||||
@ -530,7 +531,7 @@ func TestPreFilterPlugin(t *testing.T) {
|
||||
}
|
||||
|
||||
preFilterPlugin.reset()
|
||||
cleanupPods(testCtx.clientSet, t, []*v1.Pod{pod})
|
||||
testutils.CleanupPods(testCtx.ClientSet, t, []*v1.Pod{pod})
|
||||
}
|
||||
}
|
||||
|
||||
@ -553,29 +554,29 @@ func TestScorePlugin(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
testCtx := initTestSchedulerForFrameworkTest(t, initTestMaster(t, "score-plugin", nil), 10,
|
||||
testCtx := initTestSchedulerForFrameworkTest(t, testutils.InitTestMaster(t, "score-plugin", nil), 10,
|
||||
scheduler.WithProfiles(prof),
|
||||
scheduler.WithFrameworkOutOfTreeRegistry(registry))
|
||||
defer cleanupTest(t, testCtx)
|
||||
defer testutils.CleanupTest(t, testCtx)
|
||||
|
||||
for i, fail := range []bool{false, true} {
|
||||
scorePlugin.failScore = fail
|
||||
// Create a best effort pod.
|
||||
pod, err := createPausePod(testCtx.clientSet,
|
||||
initPausePod(testCtx.clientSet, &pausePodConfig{Name: "test-pod", Namespace: testCtx.ns.Name}))
|
||||
pod, err := createPausePod(testCtx.ClientSet,
|
||||
initPausePod(testCtx.ClientSet, &pausePodConfig{Name: "test-pod", Namespace: testCtx.NS.Name}))
|
||||
if err != nil {
|
||||
t.Fatalf("Error while creating a test pod: %v", err)
|
||||
}
|
||||
|
||||
if fail {
|
||||
if err = waitForPodUnschedulable(testCtx.clientSet, pod); err != nil {
|
||||
if err = waitForPodUnschedulable(testCtx.ClientSet, pod); err != nil {
|
||||
t.Errorf("test #%v: Didn't expect the pod to be scheduled. error: %v", i, err)
|
||||
}
|
||||
} else {
|
||||
if err = waitForPodToSchedule(testCtx.clientSet, pod); err != nil {
|
||||
if err = testutils.WaitForPodToSchedule(testCtx.ClientSet, pod); err != nil {
|
||||
t.Errorf("Expected the pod to be scheduled. error: %v", err)
|
||||
} else {
|
||||
p, err := getPod(testCtx.clientSet, pod.Name, pod.Namespace)
|
||||
p, err := getPod(testCtx.ClientSet, pod.Name, pod.Namespace)
|
||||
if err != nil {
|
||||
t.Errorf("Failed to retrieve the pod. error: %v", err)
|
||||
} else if p.Spec.NodeName != scorePlugin.highScoreNode {
|
||||
@ -589,7 +590,7 @@ func TestScorePlugin(t *testing.T) {
|
||||
}
|
||||
|
||||
scorePlugin.reset()
|
||||
cleanupPods(testCtx.clientSet, t, []*v1.Pod{pod})
|
||||
testutils.CleanupPods(testCtx.ClientSet, t, []*v1.Pod{pod})
|
||||
}
|
||||
}
|
||||
|
||||
@ -612,20 +613,20 @@ func TestNormalizeScorePlugin(t *testing.T) {
|
||||
},
|
||||
},
|
||||
}
|
||||
testCtx := initTestSchedulerForFrameworkTest(t, initTestMaster(t, "score-plugin", nil), 10,
|
||||
testCtx := initTestSchedulerForFrameworkTest(t, testutils.InitTestMaster(t, "score-plugin", nil), 10,
|
||||
scheduler.WithProfiles(prof),
|
||||
scheduler.WithFrameworkOutOfTreeRegistry(registry))
|
||||
|
||||
defer cleanupTest(t, testCtx)
|
||||
defer testutils.CleanupTest(t, testCtx)
|
||||
|
||||
// Create a best effort pod.
|
||||
pod, err := createPausePod(testCtx.clientSet,
|
||||
initPausePod(testCtx.clientSet, &pausePodConfig{Name: "test-pod", Namespace: testCtx.ns.Name}))
|
||||
pod, err := createPausePod(testCtx.ClientSet,
|
||||
initPausePod(testCtx.ClientSet, &pausePodConfig{Name: "test-pod", Namespace: testCtx.NS.Name}))
|
||||
if err != nil {
|
||||
t.Fatalf("Error while creating a test pod: %v", err)
|
||||
}
|
||||
|
||||
if err = waitForPodToSchedule(testCtx.clientSet, pod); err != nil {
|
||||
if err = testutils.WaitForPodToSchedule(testCtx.ClientSet, pod); err != nil {
|
||||
t.Errorf("Expected the pod to be scheduled. error: %v", err)
|
||||
}
|
||||
|
||||
@ -660,27 +661,27 @@ func TestReservePlugin(t *testing.T) {
|
||||
}
|
||||
|
||||
// Create the master and the scheduler with the test plugin set.
|
||||
testCtx := initTestSchedulerForFrameworkTest(t, initTestMaster(t, "reserve-plugin", nil), 2,
|
||||
testCtx := initTestSchedulerForFrameworkTest(t, testutils.InitTestMaster(t, "reserve-plugin", nil), 2,
|
||||
scheduler.WithProfiles(prof),
|
||||
scheduler.WithFrameworkOutOfTreeRegistry(registry))
|
||||
defer cleanupTest(t, testCtx)
|
||||
defer testutils.CleanupTest(t, testCtx)
|
||||
|
||||
for _, fail := range []bool{false, true} {
|
||||
reservePlugin.failReserve = fail
|
||||
// Create a best effort pod.
|
||||
pod, err := createPausePod(testCtx.clientSet,
|
||||
initPausePod(testCtx.clientSet, &pausePodConfig{Name: "test-pod", Namespace: testCtx.ns.Name}))
|
||||
pod, err := createPausePod(testCtx.ClientSet,
|
||||
initPausePod(testCtx.ClientSet, &pausePodConfig{Name: "test-pod", Namespace: testCtx.NS.Name}))
|
||||
if err != nil {
|
||||
t.Errorf("Error while creating a test pod: %v", err)
|
||||
}
|
||||
|
||||
if fail {
|
||||
if err = wait.Poll(10*time.Millisecond, 30*time.Second,
|
||||
podSchedulingError(testCtx.clientSet, pod.Namespace, pod.Name)); err != nil {
|
||||
podSchedulingError(testCtx.ClientSet, pod.Namespace, pod.Name)); err != nil {
|
||||
t.Errorf("Didn't expect the pod to be scheduled. error: %v", err)
|
||||
}
|
||||
} else {
|
||||
if err = waitForPodToSchedule(testCtx.clientSet, pod); err != nil {
|
||||
if err = testutils.WaitForPodToSchedule(testCtx.ClientSet, pod); err != nil {
|
||||
t.Errorf("Expected the pod to be scheduled. error: %v", err)
|
||||
}
|
||||
}
|
||||
@ -690,7 +691,7 @@ func TestReservePlugin(t *testing.T) {
|
||||
}
|
||||
|
||||
reservePlugin.reset()
|
||||
cleanupPods(testCtx.clientSet, t, []*v1.Pod{pod})
|
||||
testutils.CleanupPods(testCtx.ClientSet, t, []*v1.Pod{pod})
|
||||
}
|
||||
}
|
||||
|
||||
@ -715,10 +716,10 @@ func TestPrebindPlugin(t *testing.T) {
|
||||
}
|
||||
|
||||
// Create the master and the scheduler with the test plugin set.
|
||||
testCtx := initTestSchedulerForFrameworkTest(t, initTestMaster(t, "prebind-plugin", nil), 2,
|
||||
testCtx := initTestSchedulerForFrameworkTest(t, testutils.InitTestMaster(t, "prebind-plugin", nil), 2,
|
||||
scheduler.WithProfiles(prof),
|
||||
scheduler.WithFrameworkOutOfTreeRegistry(registry))
|
||||
defer cleanupTest(t, testCtx)
|
||||
defer testutils.CleanupTest(t, testCtx)
|
||||
|
||||
tests := []struct {
|
||||
fail bool
|
||||
@ -746,17 +747,17 @@ func TestPrebindPlugin(t *testing.T) {
|
||||
preBindPlugin.failPreBind = test.fail
|
||||
preBindPlugin.rejectPreBind = test.reject
|
||||
// Create a best effort pod.
|
||||
pod, err := createPausePod(testCtx.clientSet,
|
||||
initPausePod(testCtx.clientSet, &pausePodConfig{Name: "test-pod", Namespace: testCtx.ns.Name}))
|
||||
pod, err := createPausePod(testCtx.ClientSet,
|
||||
initPausePod(testCtx.ClientSet, &pausePodConfig{Name: "test-pod", Namespace: testCtx.NS.Name}))
|
||||
if err != nil {
|
||||
t.Errorf("Error while creating a test pod: %v", err)
|
||||
}
|
||||
|
||||
if test.fail || test.reject {
|
||||
if err = wait.Poll(10*time.Millisecond, 30*time.Second, podSchedulingError(testCtx.clientSet, pod.Namespace, pod.Name)); err != nil {
|
||||
if err = wait.Poll(10*time.Millisecond, 30*time.Second, podSchedulingError(testCtx.ClientSet, pod.Namespace, pod.Name)); err != nil {
|
||||
t.Errorf("test #%v: Expected a scheduling error, but didn't get it. error: %v", i, err)
|
||||
}
|
||||
} else if err = waitForPodToSchedule(testCtx.clientSet, pod); err != nil {
|
||||
} else if err = testutils.WaitForPodToSchedule(testCtx.ClientSet, pod); err != nil {
|
||||
t.Errorf("test #%v: Expected the pod to be scheduled. error: %v", i, err)
|
||||
}
|
||||
|
||||
@ -765,7 +766,7 @@ func TestPrebindPlugin(t *testing.T) {
|
||||
}
|
||||
|
||||
preBindPlugin.reset()
|
||||
cleanupPods(testCtx.clientSet, t, []*v1.Pod{pod})
|
||||
testutils.CleanupPods(testCtx.ClientSet, t, []*v1.Pod{pod})
|
||||
}
|
||||
}
|
||||
|
||||
@ -801,10 +802,10 @@ func TestUnreservePlugin(t *testing.T) {
|
||||
}
|
||||
|
||||
// Create the master and the scheduler with the test plugin set.
|
||||
testCtx := initTestSchedulerForFrameworkTest(t, initTestMaster(t, "unreserve-plugin", nil), 2,
|
||||
testCtx := initTestSchedulerForFrameworkTest(t, testutils.InitTestMaster(t, "unreserve-plugin", nil), 2,
|
||||
scheduler.WithProfiles(prof),
|
||||
scheduler.WithFrameworkOutOfTreeRegistry(registry))
|
||||
defer cleanupTest(t, testCtx)
|
||||
defer testutils.CleanupTest(t, testCtx)
|
||||
|
||||
tests := []struct {
|
||||
preBindFail bool
|
||||
@ -821,21 +822,21 @@ func TestUnreservePlugin(t *testing.T) {
|
||||
preBindPlugin.failPreBind = test.preBindFail
|
||||
|
||||
// Create a best effort pod.
|
||||
pod, err := createPausePod(testCtx.clientSet,
|
||||
initPausePod(testCtx.clientSet, &pausePodConfig{Name: "test-pod", Namespace: testCtx.ns.Name}))
|
||||
pod, err := createPausePod(testCtx.ClientSet,
|
||||
initPausePod(testCtx.ClientSet, &pausePodConfig{Name: "test-pod", Namespace: testCtx.NS.Name}))
|
||||
if err != nil {
|
||||
t.Errorf("Error while creating a test pod: %v", err)
|
||||
}
|
||||
|
||||
if test.preBindFail {
|
||||
if err = wait.Poll(10*time.Millisecond, 30*time.Second, podSchedulingError(testCtx.clientSet, pod.Namespace, pod.Name)); err != nil {
|
||||
if err = wait.Poll(10*time.Millisecond, 30*time.Second, podSchedulingError(testCtx.ClientSet, pod.Namespace, pod.Name)); err != nil {
|
||||
t.Errorf("test #%v: Expected a scheduling error, but didn't get it. error: %v", i, err)
|
||||
}
|
||||
if unreservePlugin.numUnreserveCalled == 0 || unreservePlugin.numUnreserveCalled != preBindPlugin.numPreBindCalled {
|
||||
t.Errorf("test #%v: Expected the unreserve plugin to be called %d times, was called %d times.", i, preBindPlugin.numPreBindCalled, unreservePlugin.numUnreserveCalled)
|
||||
}
|
||||
} else {
|
||||
if err = waitForPodToSchedule(testCtx.clientSet, pod); err != nil {
|
||||
if err = testutils.WaitForPodToSchedule(testCtx.ClientSet, pod); err != nil {
|
||||
t.Errorf("test #%v: Expected the pod to be scheduled. error: %v", i, err)
|
||||
}
|
||||
if unreservePlugin.numUnreserveCalled > 0 {
|
||||
@ -845,7 +846,7 @@ func TestUnreservePlugin(t *testing.T) {
|
||||
|
||||
unreservePlugin.reset()
|
||||
preBindPlugin.reset()
|
||||
cleanupPods(testCtx.clientSet, t, []*v1.Pod{pod})
|
||||
testutils.CleanupPods(testCtx.ClientSet, t, []*v1.Pod{pod})
|
||||
}
|
||||
}
|
||||
|
||||
@ -856,9 +857,9 @@ type pluginInvokeEvent struct {
|
||||
|
||||
// TestBindPlugin tests invocation of bind plugins.
|
||||
func TestBindPlugin(t *testing.T) {
|
||||
testContext := initTestMaster(t, "bind-plugin", nil)
|
||||
bindPlugin1 := &BindPlugin{PluginName: "bind-plugin-1", client: testContext.clientSet}
|
||||
bindPlugin2 := &BindPlugin{PluginName: "bind-plugin-2", client: testContext.clientSet}
|
||||
testContext := testutils.InitTestMaster(t, "bind-plugin", nil)
|
||||
bindPlugin1 := &BindPlugin{PluginName: "bind-plugin-1", client: testContext.ClientSet}
|
||||
bindPlugin2 := &BindPlugin{PluginName: "bind-plugin-2", client: testContext.ClientSet}
|
||||
unreservePlugin := &UnreservePlugin{name: "mock-unreserve-plugin"}
|
||||
postBindPlugin := &PostBindPlugin{name: "mock-post-bind-plugin"}
|
||||
// Create a plugin registry for testing. Register an unreserve, a bind plugin and a postBind plugin.
|
||||
@ -896,13 +897,13 @@ func TestBindPlugin(t *testing.T) {
|
||||
}
|
||||
|
||||
// Create the master and the scheduler with the test plugin set.
|
||||
testCtx := initTestSchedulerWithOptions(t, testContext, false, nil, time.Second,
|
||||
testCtx := testutils.InitTestSchedulerWithOptions(t, testContext, false, nil, time.Second,
|
||||
scheduler.WithProfiles(prof),
|
||||
scheduler.WithFrameworkOutOfTreeRegistry(registry))
|
||||
defer cleanupTest(t, testCtx)
|
||||
defer testutils.CleanupTest(t, testCtx)
|
||||
|
||||
// Add a few nodes.
|
||||
_, err := createNodes(testCtx.clientSet, "test-node", nil, 2)
|
||||
_, err := createNodes(testCtx.ClientSet, "test-node", nil, 2)
|
||||
if err != nil {
|
||||
t.Fatalf("Cannot create nodes: %v", err)
|
||||
}
|
||||
@ -953,19 +954,19 @@ func TestBindPlugin(t *testing.T) {
|
||||
postBindPlugin.pluginInvokeEventChan = pluginInvokeEventChan
|
||||
|
||||
// Create a best effort pod.
|
||||
pod, err := createPausePod(testCtx.clientSet,
|
||||
initPausePod(testCtx.clientSet, &pausePodConfig{Name: "test-pod", Namespace: testCtx.ns.Name}))
|
||||
pod, err := createPausePod(testCtx.ClientSet,
|
||||
initPausePod(testCtx.ClientSet, &pausePodConfig{Name: "test-pod", Namespace: testCtx.NS.Name}))
|
||||
if err != nil {
|
||||
t.Errorf("Error while creating a test pod: %v", err)
|
||||
}
|
||||
|
||||
if test.expectBoundByScheduler || test.expectBoundByPlugin {
|
||||
// bind plugins skipped to bind the pod
|
||||
if err = waitForPodToSchedule(testCtx.clientSet, pod); err != nil {
|
||||
if err = testutils.WaitForPodToSchedule(testCtx.ClientSet, pod); err != nil {
|
||||
t.Errorf("test #%v: Expected the pod to be scheduled. error: %v", i, err)
|
||||
continue
|
||||
}
|
||||
pod, err = testCtx.clientSet.CoreV1().Pods(pod.Namespace).Get(context.TODO(), pod.Name, metav1.GetOptions{})
|
||||
pod, err = testCtx.ClientSet.CoreV1().Pods(pod.Namespace).Get(context.TODO(), pod.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
t.Errorf("can't get pod: %v", err)
|
||||
}
|
||||
@ -998,7 +999,7 @@ func TestBindPlugin(t *testing.T) {
|
||||
}
|
||||
} else {
|
||||
// bind plugin fails to bind the pod
|
||||
if err = wait.Poll(10*time.Millisecond, 30*time.Second, podSchedulingError(testCtx.clientSet, pod.Namespace, pod.Name)); err != nil {
|
||||
if err = wait.Poll(10*time.Millisecond, 30*time.Second, podSchedulingError(testCtx.ClientSet, pod.Namespace, pod.Name)); err != nil {
|
||||
t.Errorf("test #%v: Expected a scheduling error, but didn't get it. error: %v", i, err)
|
||||
}
|
||||
if postBindPlugin.numPostBindCalled > 0 {
|
||||
@ -1023,7 +1024,7 @@ func TestBindPlugin(t *testing.T) {
|
||||
bindPlugin1.reset()
|
||||
bindPlugin2.reset()
|
||||
unreservePlugin.reset()
|
||||
cleanupPods(testCtx.clientSet, t, []*v1.Pod{pod})
|
||||
testutils.CleanupPods(testCtx.ClientSet, t, []*v1.Pod{pod})
|
||||
}
|
||||
}
|
||||
|
||||
@ -1059,10 +1060,10 @@ func TestPostBindPlugin(t *testing.T) {
|
||||
}
|
||||
|
||||
// Create the master and the scheduler with the test plugin set.
|
||||
testCtx := initTestSchedulerForFrameworkTest(t, initTestMaster(t, "postbind-plugin", nil), 2,
|
||||
testCtx := initTestSchedulerForFrameworkTest(t, testutils.InitTestMaster(t, "postbind-plugin", nil), 2,
|
||||
scheduler.WithProfiles(prof),
|
||||
scheduler.WithFrameworkOutOfTreeRegistry(registry))
|
||||
defer cleanupTest(t, testCtx)
|
||||
defer testutils.CleanupTest(t, testCtx)
|
||||
|
||||
tests := []struct {
|
||||
preBindFail bool
|
||||
@ -1080,21 +1081,21 @@ func TestPostBindPlugin(t *testing.T) {
|
||||
preBindPlugin.failPreBind = test.preBindFail
|
||||
|
||||
// Create a best effort pod.
|
||||
pod, err := createPausePod(testCtx.clientSet,
|
||||
initPausePod(testCtx.clientSet, &pausePodConfig{Name: "test-pod", Namespace: testCtx.ns.Name}))
|
||||
pod, err := createPausePod(testCtx.ClientSet,
|
||||
initPausePod(testCtx.ClientSet, &pausePodConfig{Name: "test-pod", Namespace: testCtx.NS.Name}))
|
||||
if err != nil {
|
||||
t.Errorf("Error while creating a test pod: %v", err)
|
||||
}
|
||||
|
||||
if test.preBindFail {
|
||||
if err = wait.Poll(10*time.Millisecond, 30*time.Second, podSchedulingError(testCtx.clientSet, pod.Namespace, pod.Name)); err != nil {
|
||||
if err = wait.Poll(10*time.Millisecond, 30*time.Second, podSchedulingError(testCtx.ClientSet, pod.Namespace, pod.Name)); err != nil {
|
||||
t.Errorf("test #%v: Expected a scheduling error, but didn't get it. error: %v", i, err)
|
||||
}
|
||||
if postBindPlugin.numPostBindCalled > 0 {
|
||||
t.Errorf("test #%v: Didn't expected the postbind plugin to be called %d times.", i, postBindPlugin.numPostBindCalled)
|
||||
}
|
||||
} else {
|
||||
if err = waitForPodToSchedule(testCtx.clientSet, pod); err != nil {
|
||||
if err = testutils.WaitForPodToSchedule(testCtx.ClientSet, pod); err != nil {
|
||||
t.Errorf("test #%v: Expected the pod to be scheduled. error: %v", i, err)
|
||||
}
|
||||
if postBindPlugin.numPostBindCalled == 0 {
|
||||
@ -1104,7 +1105,7 @@ func TestPostBindPlugin(t *testing.T) {
|
||||
|
||||
postBindPlugin.reset()
|
||||
preBindPlugin.reset()
|
||||
cleanupPods(testCtx.clientSet, t, []*v1.Pod{pod})
|
||||
testutils.CleanupPods(testCtx.ClientSet, t, []*v1.Pod{pod})
|
||||
}
|
||||
}
|
||||
|
||||
@ -1115,10 +1116,10 @@ func TestPermitPlugin(t *testing.T) {
|
||||
registry, prof := initRegistryAndConfig(perPlugin)
|
||||
|
||||
// Create the master and the scheduler with the test plugin set.
|
||||
testCtx := initTestSchedulerForFrameworkTest(t, initTestMaster(t, "permit-plugin", nil), 2,
|
||||
testCtx := initTestSchedulerForFrameworkTest(t, testutils.InitTestMaster(t, "permit-plugin", nil), 2,
|
||||
scheduler.WithProfiles(prof),
|
||||
scheduler.WithFrameworkOutOfTreeRegistry(registry))
|
||||
defer cleanupTest(t, testCtx)
|
||||
defer testutils.CleanupTest(t, testCtx)
|
||||
|
||||
tests := []struct {
|
||||
fail bool
|
||||
@ -1165,22 +1166,22 @@ func TestPermitPlugin(t *testing.T) {
|
||||
perPlugin.waitAndAllowPermit = false
|
||||
|
||||
// Create a best effort pod.
|
||||
pod, err := createPausePod(testCtx.clientSet,
|
||||
initPausePod(testCtx.clientSet, &pausePodConfig{Name: "test-pod", Namespace: testCtx.ns.Name}))
|
||||
pod, err := createPausePod(testCtx.ClientSet,
|
||||
initPausePod(testCtx.ClientSet, &pausePodConfig{Name: "test-pod", Namespace: testCtx.NS.Name}))
|
||||
if err != nil {
|
||||
t.Errorf("Error while creating a test pod: %v", err)
|
||||
}
|
||||
if test.fail {
|
||||
if err = wait.Poll(10*time.Millisecond, 30*time.Second, podSchedulingError(testCtx.clientSet, pod.Namespace, pod.Name)); err != nil {
|
||||
if err = wait.Poll(10*time.Millisecond, 30*time.Second, podSchedulingError(testCtx.ClientSet, pod.Namespace, pod.Name)); err != nil {
|
||||
t.Errorf("test #%v: Expected a scheduling error, but didn't get it. error: %v", i, err)
|
||||
}
|
||||
} else {
|
||||
if test.reject || test.timeout {
|
||||
if err = waitForPodUnschedulable(testCtx.clientSet, pod); err != nil {
|
||||
if err = waitForPodUnschedulable(testCtx.ClientSet, pod); err != nil {
|
||||
t.Errorf("test #%v: Didn't expect the pod to be scheduled. error: %v", i, err)
|
||||
}
|
||||
} else {
|
||||
if err = waitForPodToSchedule(testCtx.clientSet, pod); err != nil {
|
||||
if err = testutils.WaitForPodToSchedule(testCtx.ClientSet, pod); err != nil {
|
||||
t.Errorf("test #%v: Expected the pod to be scheduled. error: %v", i, err)
|
||||
}
|
||||
}
|
||||
@ -1191,7 +1192,7 @@ func TestPermitPlugin(t *testing.T) {
|
||||
}
|
||||
|
||||
perPlugin.reset()
|
||||
cleanupPods(testCtx.clientSet, t, []*v1.Pod{pod})
|
||||
testutils.CleanupPods(testCtx.ClientSet, t, []*v1.Pod{pod})
|
||||
}
|
||||
}
|
||||
|
||||
@ -1203,10 +1204,10 @@ func TestMultiplePermitPlugins(t *testing.T) {
|
||||
registry, prof := initRegistryAndConfig(perPlugin1, perPlugin2)
|
||||
|
||||
// Create the master and the scheduler with the test plugin set.
|
||||
testCtx := initTestSchedulerForFrameworkTest(t, initTestMaster(t, "multi-permit-plugin", nil), 2,
|
||||
testCtx := initTestSchedulerForFrameworkTest(t, testutils.InitTestMaster(t, "multi-permit-plugin", nil), 2,
|
||||
scheduler.WithProfiles(prof),
|
||||
scheduler.WithFrameworkOutOfTreeRegistry(registry))
|
||||
defer cleanupTest(t, testCtx)
|
||||
defer testutils.CleanupTest(t, testCtx)
|
||||
|
||||
// Both permit plugins will return Wait for permitting
|
||||
perPlugin1.timeoutPermit = true
|
||||
@ -1214,8 +1215,8 @@ func TestMultiplePermitPlugins(t *testing.T) {
|
||||
|
||||
// Create a test pod.
|
||||
podName := "test-pod"
|
||||
pod, err := createPausePod(testCtx.clientSet,
|
||||
initPausePod(testCtx.clientSet, &pausePodConfig{Name: podName, Namespace: testCtx.ns.Name}))
|
||||
pod, err := createPausePod(testCtx.ClientSet,
|
||||
initPausePod(testCtx.ClientSet, &pausePodConfig{Name: podName, Namespace: testCtx.NS.Name}))
|
||||
if err != nil {
|
||||
t.Errorf("Error while creating a test pod: %v", err)
|
||||
}
|
||||
@ -1239,7 +1240,7 @@ func TestMultiplePermitPlugins(t *testing.T) {
|
||||
}
|
||||
|
||||
perPlugin2.allowAllPods()
|
||||
if err = waitForPodToSchedule(testCtx.clientSet, pod); err != nil {
|
||||
if err = testutils.WaitForPodToSchedule(testCtx.ClientSet, pod); err != nil {
|
||||
t.Errorf("Expected the pod to be scheduled. error: %v", err)
|
||||
}
|
||||
|
||||
@ -1247,7 +1248,7 @@ func TestMultiplePermitPlugins(t *testing.T) {
|
||||
t.Errorf("Expected the permit plugin to be called.")
|
||||
}
|
||||
|
||||
cleanupPods(testCtx.clientSet, t, []*v1.Pod{pod})
|
||||
testutils.CleanupPods(testCtx.ClientSet, t, []*v1.Pod{pod})
|
||||
}
|
||||
|
||||
// TestPermitPluginsCancelled tests whether all permit plugins are cancelled when pod is rejected.
|
||||
@ -1258,10 +1259,10 @@ func TestPermitPluginsCancelled(t *testing.T) {
|
||||
registry, prof := initRegistryAndConfig(perPlugin1, perPlugin2)
|
||||
|
||||
// Create the master and the scheduler with the test plugin set.
|
||||
testCtx := initTestSchedulerForFrameworkTest(t, initTestMaster(t, "permit-plugins", nil), 2,
|
||||
testCtx := initTestSchedulerForFrameworkTest(t, testutils.InitTestMaster(t, "permit-plugins", nil), 2,
|
||||
scheduler.WithProfiles(prof),
|
||||
scheduler.WithFrameworkOutOfTreeRegistry(registry))
|
||||
defer cleanupTest(t, testCtx)
|
||||
defer testutils.CleanupTest(t, testCtx)
|
||||
|
||||
// Both permit plugins will return Wait for permitting
|
||||
perPlugin1.timeoutPermit = true
|
||||
@ -1269,8 +1270,8 @@ func TestPermitPluginsCancelled(t *testing.T) {
|
||||
|
||||
// Create a test pod.
|
||||
podName := "test-pod"
|
||||
pod, err := createPausePod(testCtx.clientSet,
|
||||
initPausePod(testCtx.clientSet, &pausePodConfig{Name: podName, Namespace: testCtx.ns.Name}))
|
||||
pod, err := createPausePod(testCtx.ClientSet,
|
||||
initPausePod(testCtx.ClientSet, &pausePodConfig{Name: podName, Namespace: testCtx.NS.Name}))
|
||||
if err != nil {
|
||||
t.Errorf("Error while creating a test pod: %v", err)
|
||||
}
|
||||
@ -1299,10 +1300,10 @@ func TestCoSchedulingWithPermitPlugin(t *testing.T) {
|
||||
registry, prof := initRegistryAndConfig(permitPlugin)
|
||||
|
||||
// Create the master and the scheduler with the test plugin set.
|
||||
testCtx := initTestSchedulerForFrameworkTest(t, initTestMaster(t, "permit-plugin", nil), 2,
|
||||
testCtx := initTestSchedulerForFrameworkTest(t, testutils.InitTestMaster(t, "permit-plugin", nil), 2,
|
||||
scheduler.WithProfiles(prof),
|
||||
scheduler.WithFrameworkOutOfTreeRegistry(registry))
|
||||
defer cleanupTest(t, testCtx)
|
||||
defer testutils.CleanupTest(t, testCtx)
|
||||
|
||||
tests := []struct {
|
||||
waitReject bool
|
||||
@ -1326,29 +1327,29 @@ func TestCoSchedulingWithPermitPlugin(t *testing.T) {
|
||||
permitPlugin.waitAndAllowPermit = test.waitAllow
|
||||
|
||||
// Create two pods.
|
||||
waitingPod, err := createPausePod(testCtx.clientSet,
|
||||
initPausePod(testCtx.clientSet, &pausePodConfig{Name: "waiting-pod", Namespace: testCtx.ns.Name}))
|
||||
waitingPod, err := createPausePod(testCtx.ClientSet,
|
||||
initPausePod(testCtx.ClientSet, &pausePodConfig{Name: "waiting-pod", Namespace: testCtx.NS.Name}))
|
||||
if err != nil {
|
||||
t.Errorf("Error while creating the waiting pod: %v", err)
|
||||
}
|
||||
signallingPod, err := createPausePod(testCtx.clientSet,
|
||||
initPausePod(testCtx.clientSet, &pausePodConfig{Name: "signalling-pod", Namespace: testCtx.ns.Name}))
|
||||
signallingPod, err := createPausePod(testCtx.ClientSet,
|
||||
initPausePod(testCtx.ClientSet, &pausePodConfig{Name: "signalling-pod", Namespace: testCtx.NS.Name}))
|
||||
if err != nil {
|
||||
t.Errorf("Error while creating the signalling pod: %v", err)
|
||||
}
|
||||
|
||||
if test.waitReject {
|
||||
if err = waitForPodUnschedulable(testCtx.clientSet, waitingPod); err != nil {
|
||||
if err = waitForPodUnschedulable(testCtx.ClientSet, waitingPod); err != nil {
|
||||
t.Errorf("test #%v: Didn't expect the waiting pod to be scheduled. error: %v", i, err)
|
||||
}
|
||||
if err = waitForPodUnschedulable(testCtx.clientSet, signallingPod); err != nil {
|
||||
if err = waitForPodUnschedulable(testCtx.ClientSet, signallingPod); err != nil {
|
||||
t.Errorf("test #%v: Didn't expect the signalling pod to be scheduled. error: %v", i, err)
|
||||
}
|
||||
} else {
|
||||
if err = waitForPodToSchedule(testCtx.clientSet, waitingPod); err != nil {
|
||||
if err = testutils.WaitForPodToSchedule(testCtx.ClientSet, waitingPod); err != nil {
|
||||
t.Errorf("test #%v: Expected the waiting pod to be scheduled. error: %v", i, err)
|
||||
}
|
||||
if err = waitForPodToSchedule(testCtx.clientSet, signallingPod); err != nil {
|
||||
if err = testutils.WaitForPodToSchedule(testCtx.ClientSet, signallingPod); err != nil {
|
||||
t.Errorf("test #%v: Expected the signalling pod to be scheduled. error: %v", i, err)
|
||||
}
|
||||
}
|
||||
@ -1358,7 +1359,7 @@ func TestCoSchedulingWithPermitPlugin(t *testing.T) {
|
||||
}
|
||||
|
||||
permitPlugin.reset()
|
||||
cleanupPods(testCtx.clientSet, t, []*v1.Pod{waitingPod, signallingPod})
|
||||
testutils.CleanupPods(testCtx.ClientSet, t, []*v1.Pod{waitingPod, signallingPod})
|
||||
}
|
||||
}
|
||||
|
||||
@ -1383,26 +1384,26 @@ func TestFilterPlugin(t *testing.T) {
|
||||
}
|
||||
|
||||
// Create the master and the scheduler with the test plugin set.
|
||||
testCtx := initTestSchedulerForFrameworkTest(t, initTestMaster(t, "filter-plugin", nil), 2,
|
||||
testCtx := initTestSchedulerForFrameworkTest(t, testutils.InitTestMaster(t, "filter-plugin", nil), 2,
|
||||
scheduler.WithProfiles(prof),
|
||||
scheduler.WithFrameworkOutOfTreeRegistry(registry))
|
||||
defer cleanupTest(t, testCtx)
|
||||
defer testutils.CleanupTest(t, testCtx)
|
||||
|
||||
for _, fail := range []bool{false, true} {
|
||||
filterPlugin.failFilter = fail
|
||||
// Create a best effort pod.
|
||||
pod, err := createPausePod(testCtx.clientSet,
|
||||
initPausePod(testCtx.clientSet, &pausePodConfig{Name: "test-pod", Namespace: testCtx.ns.Name}))
|
||||
pod, err := createPausePod(testCtx.ClientSet,
|
||||
initPausePod(testCtx.ClientSet, &pausePodConfig{Name: "test-pod", Namespace: testCtx.NS.Name}))
|
||||
if err != nil {
|
||||
t.Errorf("Error while creating a test pod: %v", err)
|
||||
}
|
||||
|
||||
if fail {
|
||||
if err = wait.Poll(10*time.Millisecond, 30*time.Second, podUnschedulable(testCtx.clientSet, pod.Namespace, pod.Name)); err != nil {
|
||||
if err = wait.Poll(10*time.Millisecond, 30*time.Second, podUnschedulable(testCtx.ClientSet, pod.Namespace, pod.Name)); err != nil {
|
||||
t.Errorf("Didn't expect the pod to be scheduled.")
|
||||
}
|
||||
} else {
|
||||
if err = waitForPodToSchedule(testCtx.clientSet, pod); err != nil {
|
||||
if err = testutils.WaitForPodToSchedule(testCtx.ClientSet, pod); err != nil {
|
||||
t.Errorf("Expected the pod to be scheduled. error: %v", err)
|
||||
}
|
||||
}
|
||||
@ -1412,7 +1413,7 @@ func TestFilterPlugin(t *testing.T) {
|
||||
}
|
||||
|
||||
filterPlugin.reset()
|
||||
cleanupPods(testCtx.clientSet, t, []*v1.Pod{pod})
|
||||
testutils.CleanupPods(testCtx.ClientSet, t, []*v1.Pod{pod})
|
||||
}
|
||||
}
|
||||
|
||||
@ -1437,26 +1438,26 @@ func TestPreScorePlugin(t *testing.T) {
|
||||
}
|
||||
|
||||
// Create the master and the scheduler with the test plugin set.
|
||||
testCtx := initTestSchedulerForFrameworkTest(t, initTestMaster(t, "pre-score-plugin", nil), 2,
|
||||
testCtx := initTestSchedulerForFrameworkTest(t, testutils.InitTestMaster(t, "pre-score-plugin", nil), 2,
|
||||
scheduler.WithProfiles(prof),
|
||||
scheduler.WithFrameworkOutOfTreeRegistry(registry))
|
||||
defer cleanupTest(t, testCtx)
|
||||
defer testutils.CleanupTest(t, testCtx)
|
||||
|
||||
for _, fail := range []bool{false, true} {
|
||||
preScorePlugin.failPreScore = fail
|
||||
// Create a best effort pod.
|
||||
pod, err := createPausePod(testCtx.clientSet,
|
||||
initPausePod(testCtx.clientSet, &pausePodConfig{Name: "test-pod", Namespace: testCtx.ns.Name}))
|
||||
pod, err := createPausePod(testCtx.ClientSet,
|
||||
initPausePod(testCtx.ClientSet, &pausePodConfig{Name: "test-pod", Namespace: testCtx.NS.Name}))
|
||||
if err != nil {
|
||||
t.Errorf("Error while creating a test pod: %v", err)
|
||||
}
|
||||
|
||||
if fail {
|
||||
if err = waitForPodUnschedulable(testCtx.clientSet, pod); err != nil {
|
||||
if err = waitForPodUnschedulable(testCtx.ClientSet, pod); err != nil {
|
||||
t.Errorf("Didn't expect the pod to be scheduled. error: %v", err)
|
||||
}
|
||||
} else {
|
||||
if err = waitForPodToSchedule(testCtx.clientSet, pod); err != nil {
|
||||
if err = testutils.WaitForPodToSchedule(testCtx.ClientSet, pod); err != nil {
|
||||
t.Errorf("Expected the pod to be scheduled. error: %v", err)
|
||||
}
|
||||
}
|
||||
@ -1466,7 +1467,7 @@ func TestPreScorePlugin(t *testing.T) {
|
||||
}
|
||||
|
||||
preScorePlugin.reset()
|
||||
cleanupPods(testCtx.clientSet, t, []*v1.Pod{pod})
|
||||
testutils.CleanupPods(testCtx.ClientSet, t, []*v1.Pod{pod})
|
||||
}
|
||||
}
|
||||
|
||||
@ -1477,10 +1478,10 @@ func TestPreemptWithPermitPlugin(t *testing.T) {
|
||||
registry, prof := initRegistryAndConfig(permitPlugin)
|
||||
|
||||
// Create the master and the scheduler with the test plugin set.
|
||||
testCtx := initTestSchedulerForFrameworkTest(t, initTestMaster(t, "preempt-with-permit-plugin", nil), 0,
|
||||
testCtx := initTestSchedulerForFrameworkTest(t, testutils.InitTestMaster(t, "preempt-with-permit-plugin", nil), 0,
|
||||
scheduler.WithProfiles(prof),
|
||||
scheduler.WithFrameworkOutOfTreeRegistry(registry))
|
||||
defer cleanupTest(t, testCtx)
|
||||
defer testutils.CleanupTest(t, testCtx)
|
||||
|
||||
// Add one node.
|
||||
nodeRes := &v1.ResourceList{
|
||||
@ -1488,7 +1489,7 @@ func TestPreemptWithPermitPlugin(t *testing.T) {
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(500, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(500, resource.DecimalSI),
|
||||
}
|
||||
_, err := createNodes(testCtx.clientSet, "test-node", nodeRes, 1)
|
||||
_, err := createNodes(testCtx.ClientSet, "test-node", nodeRes, 1)
|
||||
if err != nil {
|
||||
t.Fatalf("Cannot create nodes: %v", err)
|
||||
}
|
||||
@ -1507,9 +1508,9 @@ func TestPreemptWithPermitPlugin(t *testing.T) {
|
||||
}
|
||||
|
||||
// Create two pods.
|
||||
waitingPod := initPausePod(testCtx.clientSet, &pausePodConfig{Name: "waiting-pod", Namespace: testCtx.ns.Name, Priority: &lowPriority, Resources: &resourceRequest})
|
||||
waitingPod := initPausePod(testCtx.ClientSet, &pausePodConfig{Name: "waiting-pod", Namespace: testCtx.NS.Name, Priority: &lowPriority, Resources: &resourceRequest})
|
||||
waitingPod.Spec.TerminationGracePeriodSeconds = new(int64)
|
||||
waitingPod, err = createPausePod(testCtx.clientSet, waitingPod)
|
||||
waitingPod, err = createPausePod(testCtx.ClientSet, waitingPod)
|
||||
if err != nil {
|
||||
t.Errorf("Error while creating the waiting pod: %v", err)
|
||||
}
|
||||
@ -1520,17 +1521,17 @@ func TestPreemptWithPermitPlugin(t *testing.T) {
|
||||
return w, nil
|
||||
})
|
||||
|
||||
preemptorPod, err := createPausePod(testCtx.clientSet,
|
||||
initPausePod(testCtx.clientSet, &pausePodConfig{Name: "preemptor-pod", Namespace: testCtx.ns.Name, Priority: &highPriority, Resources: &resourceRequest}))
|
||||
preemptorPod, err := createPausePod(testCtx.ClientSet,
|
||||
initPausePod(testCtx.ClientSet, &pausePodConfig{Name: "preemptor-pod", Namespace: testCtx.NS.Name, Priority: &highPriority, Resources: &resourceRequest}))
|
||||
if err != nil {
|
||||
t.Errorf("Error while creating the preemptor pod: %v", err)
|
||||
}
|
||||
|
||||
if err = waitForPodToSchedule(testCtx.clientSet, preemptorPod); err != nil {
|
||||
if err = testutils.WaitForPodToSchedule(testCtx.ClientSet, preemptorPod); err != nil {
|
||||
t.Errorf("Expected the preemptor pod to be scheduled. error: %v", err)
|
||||
}
|
||||
|
||||
if _, err := getPod(testCtx.clientSet, waitingPod.Name, waitingPod.Namespace); err == nil {
|
||||
if _, err := getPod(testCtx.ClientSet, waitingPod.Name, waitingPod.Namespace); err == nil {
|
||||
t.Error("Expected the waiting pod to get preempted and deleted")
|
||||
}
|
||||
|
||||
@ -1539,13 +1540,13 @@ func TestPreemptWithPermitPlugin(t *testing.T) {
|
||||
}
|
||||
|
||||
permitPlugin.reset()
|
||||
cleanupPods(testCtx.clientSet, t, []*v1.Pod{waitingPod, preemptorPod})
|
||||
testutils.CleanupPods(testCtx.ClientSet, t, []*v1.Pod{waitingPod, preemptorPod})
|
||||
}
|
||||
|
||||
func initTestSchedulerForFrameworkTest(t *testing.T, testCtx *testContext, nodeCount int, opts ...scheduler.Option) *testContext {
|
||||
c := initTestSchedulerWithOptions(t, testCtx, false, nil, time.Second, opts...)
|
||||
func initTestSchedulerForFrameworkTest(t *testing.T, testCtx *testutils.TestContext, nodeCount int, opts ...scheduler.Option) *testutils.TestContext {
|
||||
c := testutils.InitTestSchedulerWithOptions(t, testCtx, false, nil, time.Second, opts...)
|
||||
if nodeCount > 0 {
|
||||
_, err := createNodes(c.clientSet, "test-node", nil, nodeCount)
|
||||
_, err := createNodes(c.ClientSet, "test-node", nil, nodeCount)
|
||||
if err != nil {
|
||||
t.Fatalf("Cannot create nodes: %v", err)
|
||||
}
|
||||
|
@ -19,21 +19,22 @@ package scheduler
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
featuregatetesting "k8s.io/component-base/featuregate/testing"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
testutils "k8s.io/kubernetes/test/integration/util"
|
||||
)
|
||||
|
||||
func TestNodeResourceLimits(t *testing.T) {
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ResourceLimitsPriorityFunction, true)()
|
||||
|
||||
testCtx := initTest(t, "node-resource-limits")
|
||||
defer cleanupTest(t, testCtx)
|
||||
defer testutils.CleanupTest(t, testCtx)
|
||||
|
||||
// Add one node
|
||||
expectedNode, err := createNode(testCtx.clientSet, "test-node1", &v1.ResourceList{
|
||||
expectedNode, err := createNode(testCtx.ClientSet, "test-node1", &v1.ResourceList{
|
||||
v1.ResourcePods: *resource.NewQuantity(32, resource.DecimalSI),
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(2000, resource.DecimalSI),
|
||||
@ -43,7 +44,7 @@ func TestNodeResourceLimits(t *testing.T) {
|
||||
}
|
||||
|
||||
// Add another node with less resource
|
||||
_, err = createNode(testCtx.clientSet, "test-node2", &v1.ResourceList{
|
||||
_, err = createNode(testCtx.ClientSet, "test-node2", &v1.ResourceList{
|
||||
v1.ResourcePods: *resource.NewQuantity(32, resource.DecimalSI),
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(1000, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(1000, resource.DecimalSI),
|
||||
@ -53,9 +54,9 @@ func TestNodeResourceLimits(t *testing.T) {
|
||||
}
|
||||
|
||||
podName := "pod-with-resource-limits"
|
||||
pod, err := runPausePod(testCtx.clientSet, initPausePod(testCtx.clientSet, &pausePodConfig{
|
||||
pod, err := runPausePod(testCtx.ClientSet, initPausePod(testCtx.ClientSet, &pausePodConfig{
|
||||
Name: podName,
|
||||
Namespace: testCtx.ns.Name,
|
||||
Namespace: testCtx.NS.Name,
|
||||
Resources: &v1.ResourceRequirements{Requests: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(500, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(500, resource.DecimalSI)},
|
||||
|
@ -30,7 +30,8 @@ import (
|
||||
featuregatetesting "k8s.io/component-base/featuregate/testing"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
st "k8s.io/kubernetes/pkg/scheduler/testing"
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
testutils "k8s.io/kubernetes/test/integration/util"
|
||||
"k8s.io/kubernetes/test/utils"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
)
|
||||
|
||||
@ -42,9 +43,9 @@ const pollInterval = 100 * time.Millisecond
|
||||
// anti-affinity predicate functions works correctly.
|
||||
func TestInterPodAffinity(t *testing.T) {
|
||||
testCtx := initTest(t, "inter-pod-affinity")
|
||||
defer cleanupTest(t, testCtx)
|
||||
defer testutils.CleanupTest(t, testCtx)
|
||||
// Add a few nodes.
|
||||
nodes, err := createNodes(testCtx.clientSet, "testnode", nil, 2)
|
||||
nodes, err := createNodes(testCtx.ClientSet, "testnode", nil, 2)
|
||||
if err != nil {
|
||||
t.Fatalf("Cannot create nodes: %v", err)
|
||||
}
|
||||
@ -54,15 +55,15 @@ func TestInterPodAffinity(t *testing.T) {
|
||||
"zone": "z11",
|
||||
}
|
||||
for _, node := range nodes {
|
||||
if err = testutils.AddLabelsToNode(testCtx.clientSet, node.Name, labels1); err != nil {
|
||||
if err = utils.AddLabelsToNode(testCtx.ClientSet, node.Name, labels1); err != nil {
|
||||
t.Fatalf("Cannot add labels to node: %v", err)
|
||||
}
|
||||
if err = waitForNodeLabels(testCtx.clientSet, node.Name, labels1); err != nil {
|
||||
if err = waitForNodeLabels(testCtx.ClientSet, node.Name, labels1); err != nil {
|
||||
t.Fatalf("Adding labels to node didn't succeed: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
cs := testCtx.clientSet
|
||||
cs := testCtx.ClientSet
|
||||
podLabel := map[string]string{"service": "securityscan"}
|
||||
podLabel2 := map[string]string{"security": "S1"}
|
||||
|
||||
@ -822,18 +823,18 @@ func TestInterPodAffinity(t *testing.T) {
|
||||
if pod.Namespace != "" {
|
||||
nsName = pod.Namespace
|
||||
} else {
|
||||
nsName = testCtx.ns.Name
|
||||
nsName = testCtx.NS.Name
|
||||
}
|
||||
createdPod, err := cs.CoreV1().Pods(nsName).Create(context.TODO(), pod, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("Test Failed: error, %v, while creating pod during test: %v", err, test.test)
|
||||
}
|
||||
err = wait.Poll(pollInterval, wait.ForeverTestTimeout, podScheduled(cs, createdPod.Namespace, createdPod.Name))
|
||||
err = wait.Poll(pollInterval, wait.ForeverTestTimeout, testutils.PodScheduled(cs, createdPod.Namespace, createdPod.Name))
|
||||
if err != nil {
|
||||
t.Errorf("Test Failed: error, %v, while waiting for pod during test, %v", err, test)
|
||||
}
|
||||
}
|
||||
testPod, err := cs.CoreV1().Pods(testCtx.ns.Name).Create(context.TODO(), test.pod, metav1.CreateOptions{})
|
||||
testPod, err := cs.CoreV1().Pods(testCtx.NS.Name).Create(context.TODO(), test.pod, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
if !(test.errorType == "invalidPod" && apierrors.IsInvalid(err)) {
|
||||
t.Fatalf("Test Failed: error, %v, while creating pod during test: %v", err, test.test)
|
||||
@ -841,7 +842,7 @@ func TestInterPodAffinity(t *testing.T) {
|
||||
}
|
||||
|
||||
if test.fits {
|
||||
err = wait.Poll(pollInterval, wait.ForeverTestTimeout, podScheduled(cs, testPod.Namespace, testPod.Name))
|
||||
err = wait.Poll(pollInterval, wait.ForeverTestTimeout, testutils.PodScheduled(cs, testPod.Namespace, testPod.Name))
|
||||
} else {
|
||||
err = wait.Poll(pollInterval, wait.ForeverTestTimeout, podUnschedulable(cs, testPod.Namespace, testPod.Name))
|
||||
}
|
||||
@ -849,11 +850,11 @@ func TestInterPodAffinity(t *testing.T) {
|
||||
t.Errorf("Test Failed: %v, err %v, test.fits %v", test.test, err, test.fits)
|
||||
}
|
||||
|
||||
err = cs.CoreV1().Pods(testCtx.ns.Name).Delete(context.TODO(), test.pod.Name, metav1.NewDeleteOptions(0))
|
||||
err = cs.CoreV1().Pods(testCtx.NS.Name).Delete(context.TODO(), test.pod.Name, metav1.NewDeleteOptions(0))
|
||||
if err != nil {
|
||||
t.Errorf("Test Failed: error, %v, while deleting pod during test: %v", err, test.test)
|
||||
}
|
||||
err = wait.Poll(pollInterval, wait.ForeverTestTimeout, podDeleted(cs, testCtx.ns.Name, test.pod.Name))
|
||||
err = wait.Poll(pollInterval, wait.ForeverTestTimeout, testutils.PodDeleted(cs, testCtx.NS.Name, test.pod.Name))
|
||||
if err != nil {
|
||||
t.Errorf("Test Failed: error, %v, while waiting for pod to get deleted, %v", err, test.test)
|
||||
}
|
||||
@ -862,13 +863,13 @@ func TestInterPodAffinity(t *testing.T) {
|
||||
if pod.Namespace != "" {
|
||||
nsName = pod.Namespace
|
||||
} else {
|
||||
nsName = testCtx.ns.Name
|
||||
nsName = testCtx.NS.Name
|
||||
}
|
||||
err = cs.CoreV1().Pods(nsName).Delete(context.TODO(), pod.Name, metav1.NewDeleteOptions(0))
|
||||
if err != nil {
|
||||
t.Errorf("Test Failed: error, %v, while deleting pod during test: %v", err, test.test)
|
||||
}
|
||||
err = wait.Poll(pollInterval, wait.ForeverTestTimeout, podDeleted(cs, nsName, pod.Name))
|
||||
err = wait.Poll(pollInterval, wait.ForeverTestTimeout, testutils.PodDeleted(cs, nsName, pod.Name))
|
||||
if err != nil {
|
||||
t.Errorf("Test Failed: error, %v, while waiting for pod to get deleted, %v", err, test.test)
|
||||
}
|
||||
@ -881,9 +882,9 @@ func TestEvenPodsSpreadPredicate(t *testing.T) {
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.EvenPodsSpread, true)()
|
||||
|
||||
testCtx := initTest(t, "eps-predicate")
|
||||
cs := testCtx.clientSet
|
||||
ns := testCtx.ns.Name
|
||||
defer cleanupTest(t, testCtx)
|
||||
cs := testCtx.ClientSet
|
||||
ns := testCtx.NS.Name
|
||||
defer testutils.CleanupTest(t, testCtx)
|
||||
// Add 4 nodes.
|
||||
nodes, err := createNodes(cs, "node", nil, 4)
|
||||
if err != nil {
|
||||
@ -895,7 +896,7 @@ func TestEvenPodsSpreadPredicate(t *testing.T) {
|
||||
"zone": fmt.Sprintf("zone-%d", i/2),
|
||||
"node": node.Name,
|
||||
}
|
||||
if err = testutils.AddLabelsToNode(cs, node.Name, labels); err != nil {
|
||||
if err = utils.AddLabelsToNode(cs, node.Name, labels); err != nil {
|
||||
t.Fatalf("Cannot add labels to node: %v", err)
|
||||
}
|
||||
if err = waitForNodeLabels(cs, node.Name, labels); err != nil {
|
||||
@ -1003,13 +1004,13 @@ func TestEvenPodsSpreadPredicate(t *testing.T) {
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
allPods := append(tt.existingPods, tt.incomingPod)
|
||||
defer cleanupPods(cs, t, allPods)
|
||||
defer testutils.CleanupPods(cs, t, allPods)
|
||||
for _, pod := range tt.existingPods {
|
||||
createdPod, err := cs.CoreV1().Pods(pod.Namespace).Create(context.TODO(), pod, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("Test Failed: error while creating pod during test: %v", err)
|
||||
}
|
||||
err = wait.Poll(pollInterval, wait.ForeverTestTimeout, podScheduled(cs, createdPod.Namespace, createdPod.Name))
|
||||
err = wait.Poll(pollInterval, wait.ForeverTestTimeout, testutils.PodScheduled(cs, createdPod.Namespace, createdPod.Name))
|
||||
if err != nil {
|
||||
t.Errorf("Test Failed: error while waiting for pod during test: %v", err)
|
||||
}
|
||||
|
@ -45,7 +45,8 @@ import (
|
||||
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
|
||||
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||
"k8s.io/kubernetes/plugin/pkg/admission/priority"
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
testutils "k8s.io/kubernetes/test/integration/util"
|
||||
utils "k8s.io/kubernetes/test/utils"
|
||||
)
|
||||
|
||||
var lowPriority, mediumPriority, highPriority = int32(100), int32(200), int32(300)
|
||||
@ -143,14 +144,14 @@ func TestPreemption(t *testing.T) {
|
||||
},
|
||||
},
|
||||
}
|
||||
testCtx := initTestSchedulerWithOptions(t,
|
||||
initTestMaster(t, "preemptiom", nil),
|
||||
testCtx := testutils.InitTestSchedulerWithOptions(t,
|
||||
testutils.InitTestMaster(t, "preemptiom", nil),
|
||||
false, nil, time.Second,
|
||||
scheduler.WithProfiles(prof),
|
||||
scheduler.WithFrameworkOutOfTreeRegistry(registry))
|
||||
|
||||
defer cleanupTest(t, testCtx)
|
||||
cs := testCtx.clientSet
|
||||
defer testutils.CleanupTest(t, testCtx)
|
||||
cs := testCtx.ClientSet
|
||||
|
||||
defaultPodRes := &v1.ResourceRequirements{Requests: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(100, resource.DecimalSI),
|
||||
@ -170,9 +171,9 @@ func TestPreemption(t *testing.T) {
|
||||
description: "basic pod preemption",
|
||||
initTokens: maxTokens,
|
||||
existingPods: []*v1.Pod{
|
||||
initPausePod(testCtx.clientSet, &pausePodConfig{
|
||||
initPausePod(testCtx.ClientSet, &pausePodConfig{
|
||||
Name: "victim-pod",
|
||||
Namespace: testCtx.ns.Name,
|
||||
Namespace: testCtx.NS.Name,
|
||||
Priority: &lowPriority,
|
||||
Resources: &v1.ResourceRequirements{Requests: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(400, resource.DecimalSI),
|
||||
@ -182,7 +183,7 @@ func TestPreemption(t *testing.T) {
|
||||
},
|
||||
pod: initPausePod(cs, &pausePodConfig{
|
||||
Name: "preemptor-pod",
|
||||
Namespace: testCtx.ns.Name,
|
||||
Namespace: testCtx.NS.Name,
|
||||
Priority: &highPriority,
|
||||
Resources: &v1.ResourceRequirements{Requests: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(300, resource.DecimalSI),
|
||||
@ -195,9 +196,9 @@ func TestPreemption(t *testing.T) {
|
||||
description: "basic pod preemption with filter",
|
||||
initTokens: 1,
|
||||
existingPods: []*v1.Pod{
|
||||
initPausePod(testCtx.clientSet, &pausePodConfig{
|
||||
initPausePod(testCtx.ClientSet, &pausePodConfig{
|
||||
Name: "victim-pod",
|
||||
Namespace: testCtx.ns.Name,
|
||||
Namespace: testCtx.NS.Name,
|
||||
Priority: &lowPriority,
|
||||
Resources: &v1.ResourceRequirements{Requests: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(200, resource.DecimalSI),
|
||||
@ -207,7 +208,7 @@ func TestPreemption(t *testing.T) {
|
||||
},
|
||||
pod: initPausePod(cs, &pausePodConfig{
|
||||
Name: "preemptor-pod",
|
||||
Namespace: testCtx.ns.Name,
|
||||
Namespace: testCtx.NS.Name,
|
||||
Priority: &highPriority,
|
||||
Resources: &v1.ResourceRequirements{Requests: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(200, resource.DecimalSI),
|
||||
@ -222,9 +223,9 @@ func TestPreemption(t *testing.T) {
|
||||
initTokens: 1,
|
||||
unresolvable: true,
|
||||
existingPods: []*v1.Pod{
|
||||
initPausePod(testCtx.clientSet, &pausePodConfig{
|
||||
initPausePod(testCtx.ClientSet, &pausePodConfig{
|
||||
Name: "victim-pod",
|
||||
Namespace: testCtx.ns.Name,
|
||||
Namespace: testCtx.NS.Name,
|
||||
Priority: &lowPriority,
|
||||
Resources: &v1.ResourceRequirements{Requests: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(200, resource.DecimalSI),
|
||||
@ -234,7 +235,7 @@ func TestPreemption(t *testing.T) {
|
||||
},
|
||||
pod: initPausePod(cs, &pausePodConfig{
|
||||
Name: "preemptor-pod",
|
||||
Namespace: testCtx.ns.Name,
|
||||
Namespace: testCtx.NS.Name,
|
||||
Priority: &highPriority,
|
||||
Resources: &v1.ResourceRequirements{Requests: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(200, resource.DecimalSI),
|
||||
@ -248,13 +249,13 @@ func TestPreemption(t *testing.T) {
|
||||
initTokens: maxTokens,
|
||||
existingPods: []*v1.Pod{
|
||||
initPausePod(cs, &pausePodConfig{
|
||||
Name: "pod-0", Namespace: testCtx.ns.Name,
|
||||
Name: "pod-0", Namespace: testCtx.NS.Name,
|
||||
Priority: &mediumPriority,
|
||||
Labels: map[string]string{"pod": "p0"},
|
||||
Resources: defaultPodRes,
|
||||
}),
|
||||
initPausePod(cs, &pausePodConfig{
|
||||
Name: "pod-1", Namespace: testCtx.ns.Name,
|
||||
Name: "pod-1", Namespace: testCtx.NS.Name,
|
||||
Priority: &lowPriority,
|
||||
Labels: map[string]string{"pod": "p1"},
|
||||
Resources: defaultPodRes,
|
||||
@ -281,7 +282,7 @@ func TestPreemption(t *testing.T) {
|
||||
// A higher priority pod with anti-affinity.
|
||||
pod: initPausePod(cs, &pausePodConfig{
|
||||
Name: "preemptor-pod",
|
||||
Namespace: testCtx.ns.Name,
|
||||
Namespace: testCtx.NS.Name,
|
||||
Priority: &highPriority,
|
||||
Labels: map[string]string{"pod": "preemptor"},
|
||||
Resources: defaultPodRes,
|
||||
@ -312,13 +313,13 @@ func TestPreemption(t *testing.T) {
|
||||
initTokens: maxTokens,
|
||||
existingPods: []*v1.Pod{
|
||||
initPausePod(cs, &pausePodConfig{
|
||||
Name: "pod-0", Namespace: testCtx.ns.Name,
|
||||
Name: "pod-0", Namespace: testCtx.NS.Name,
|
||||
Priority: &mediumPriority,
|
||||
Labels: map[string]string{"pod": "p0"},
|
||||
Resources: defaultPodRes,
|
||||
}),
|
||||
initPausePod(cs, &pausePodConfig{
|
||||
Name: "pod-1", Namespace: testCtx.ns.Name,
|
||||
Name: "pod-1", Namespace: testCtx.NS.Name,
|
||||
Priority: &highPriority,
|
||||
Labels: map[string]string{"pod": "p1"},
|
||||
Resources: defaultPodRes,
|
||||
@ -345,7 +346,7 @@ func TestPreemption(t *testing.T) {
|
||||
// A higher priority pod with anti-affinity.
|
||||
pod: initPausePod(cs, &pausePodConfig{
|
||||
Name: "preemptor-pod",
|
||||
Namespace: testCtx.ns.Name,
|
||||
Namespace: testCtx.NS.Name,
|
||||
Priority: &highPriority,
|
||||
Labels: map[string]string{"pod": "preemptor"},
|
||||
Resources: defaultPodRes,
|
||||
@ -378,15 +379,15 @@ func TestPreemption(t *testing.T) {
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(500, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(500, resource.DecimalSI),
|
||||
}
|
||||
node, err := createNode(testCtx.clientSet, "node1", nodeRes)
|
||||
node, err := createNode(testCtx.ClientSet, "node1", nodeRes)
|
||||
if err != nil {
|
||||
t.Fatalf("Error creating nodes: %v", err)
|
||||
}
|
||||
nodeLabels := map[string]string{"node": node.Name}
|
||||
if err = testutils.AddLabelsToNode(testCtx.clientSet, node.Name, nodeLabels); err != nil {
|
||||
if err = utils.AddLabelsToNode(testCtx.ClientSet, node.Name, nodeLabels); err != nil {
|
||||
t.Fatalf("Cannot add labels to node: %v", err)
|
||||
}
|
||||
if err = waitForNodeLabels(testCtx.clientSet, node.Name, nodeLabels); err != nil {
|
||||
if err = waitForNodeLabels(testCtx.ClientSet, node.Name, nodeLabels); err != nil {
|
||||
t.Fatalf("Adding labels to node didn't succeed: %v", err)
|
||||
}
|
||||
|
||||
@ -428,7 +429,7 @@ func TestPreemption(t *testing.T) {
|
||||
|
||||
// Cleanup
|
||||
pods = append(pods, preemptor)
|
||||
cleanupPods(cs, t, pods)
|
||||
testutils.CleanupPods(cs, t, pods)
|
||||
}
|
||||
}
|
||||
|
||||
@ -436,8 +437,8 @@ func TestPreemption(t *testing.T) {
|
||||
func TestDisablePreemption(t *testing.T) {
|
||||
// Initialize scheduler, and disable preemption.
|
||||
testCtx := initTestDisablePreemption(t, "disable-preemption")
|
||||
defer cleanupTest(t, testCtx)
|
||||
cs := testCtx.clientSet
|
||||
defer testutils.CleanupTest(t, testCtx)
|
||||
cs := testCtx.ClientSet
|
||||
|
||||
tests := []struct {
|
||||
description string
|
||||
@ -447,9 +448,9 @@ func TestDisablePreemption(t *testing.T) {
|
||||
{
|
||||
description: "pod preemption will not happen",
|
||||
existingPods: []*v1.Pod{
|
||||
initPausePod(testCtx.clientSet, &pausePodConfig{
|
||||
initPausePod(testCtx.ClientSet, &pausePodConfig{
|
||||
Name: "victim-pod",
|
||||
Namespace: testCtx.ns.Name,
|
||||
Namespace: testCtx.NS.Name,
|
||||
Priority: &lowPriority,
|
||||
Resources: &v1.ResourceRequirements{Requests: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(400, resource.DecimalSI),
|
||||
@ -459,7 +460,7 @@ func TestDisablePreemption(t *testing.T) {
|
||||
},
|
||||
pod: initPausePod(cs, &pausePodConfig{
|
||||
Name: "preemptor-pod",
|
||||
Namespace: testCtx.ns.Name,
|
||||
Namespace: testCtx.NS.Name,
|
||||
Priority: &highPriority,
|
||||
Resources: &v1.ResourceRequirements{Requests: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(300, resource.DecimalSI),
|
||||
@ -475,7 +476,7 @@ func TestDisablePreemption(t *testing.T) {
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(500, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(500, resource.DecimalSI),
|
||||
}
|
||||
_, err := createNode(testCtx.clientSet, "node1", nodeRes)
|
||||
_, err := createNode(testCtx.ClientSet, "node1", nodeRes)
|
||||
if err != nil {
|
||||
t.Fatalf("Error creating nodes: %v", err)
|
||||
}
|
||||
@ -508,27 +509,27 @@ func TestDisablePreemption(t *testing.T) {
|
||||
|
||||
// Cleanup
|
||||
pods = append(pods, preemptor)
|
||||
cleanupPods(cs, t, pods)
|
||||
testutils.CleanupPods(cs, t, pods)
|
||||
}
|
||||
}
|
||||
|
||||
// This test verifies that system critical priorities are created automatically and resolved properly.
|
||||
func TestPodPriorityResolution(t *testing.T) {
|
||||
admission := priority.NewPlugin()
|
||||
testCtx := initTestScheduler(t, initTestMaster(t, "preemption", admission), true, nil)
|
||||
defer cleanupTest(t, testCtx)
|
||||
cs := testCtx.clientSet
|
||||
testCtx := testutils.InitTestScheduler(t, testutils.InitTestMaster(t, "preemption", admission), true, nil)
|
||||
defer testutils.CleanupTest(t, testCtx)
|
||||
cs := testCtx.ClientSet
|
||||
|
||||
// Build clientset and informers for controllers.
|
||||
externalClientset := kubernetes.NewForConfigOrDie(&restclient.Config{
|
||||
QPS: -1,
|
||||
Host: testCtx.httpServer.URL,
|
||||
Host: testCtx.HTTPServer.URL,
|
||||
ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}})
|
||||
externalInformers := informers.NewSharedInformerFactory(externalClientset, time.Second)
|
||||
admission.SetExternalKubeClientSet(externalClientset)
|
||||
admission.SetExternalKubeInformerFactory(externalInformers)
|
||||
externalInformers.Start(testCtx.ctx.Done())
|
||||
externalInformers.WaitForCacheSync(testCtx.ctx.Done())
|
||||
externalInformers.Start(testCtx.Ctx.Done())
|
||||
externalInformers.WaitForCacheSync(testCtx.Ctx.Done())
|
||||
|
||||
tests := []struct {
|
||||
Name string
|
||||
@ -576,7 +577,7 @@ func TestPodPriorityResolution(t *testing.T) {
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(500, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(500, resource.DecimalSI),
|
||||
}
|
||||
_, err := createNode(testCtx.clientSet, "node1", nodeRes)
|
||||
_, err := createNode(testCtx.ClientSet, "node1", nodeRes)
|
||||
if err != nil {
|
||||
t.Fatalf("Error creating nodes: %v", err)
|
||||
}
|
||||
@ -605,18 +606,18 @@ func TestPodPriorityResolution(t *testing.T) {
|
||||
}
|
||||
})
|
||||
}
|
||||
cleanupPods(cs, t, pods)
|
||||
cleanupNodes(cs, t)
|
||||
testutils.CleanupPods(cs, t, pods)
|
||||
testutils.CleanupNodes(cs, t)
|
||||
}
|
||||
|
||||
func mkPriorityPodWithGrace(tc *testContext, name string, priority int32, grace int64) *v1.Pod {
|
||||
func mkPriorityPodWithGrace(tc *testutils.TestContext, name string, priority int32, grace int64) *v1.Pod {
|
||||
defaultPodRes := &v1.ResourceRequirements{Requests: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(100, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(100, resource.DecimalSI)},
|
||||
}
|
||||
pod := initPausePod(tc.clientSet, &pausePodConfig{
|
||||
pod := initPausePod(tc.ClientSet, &pausePodConfig{
|
||||
Name: name,
|
||||
Namespace: tc.ns.Name,
|
||||
Namespace: tc.NS.Name,
|
||||
Priority: &priority,
|
||||
Labels: map[string]string{"pod": name},
|
||||
Resources: defaultPodRes,
|
||||
@ -633,8 +634,8 @@ func mkPriorityPodWithGrace(tc *testContext, name string, priority int32, grace
|
||||
func TestPreemptionStarvation(t *testing.T) {
|
||||
// Initialize scheduler.
|
||||
testCtx := initTest(t, "preemption")
|
||||
defer cleanupTest(t, testCtx)
|
||||
cs := testCtx.clientSet
|
||||
defer testutils.CleanupTest(t, testCtx)
|
||||
cs := testCtx.ClientSet
|
||||
|
||||
tests := []struct {
|
||||
description string
|
||||
@ -651,7 +652,7 @@ func TestPreemptionStarvation(t *testing.T) {
|
||||
numExpectedPending: 5,
|
||||
preemptor: initPausePod(cs, &pausePodConfig{
|
||||
Name: "preemptor-pod",
|
||||
Namespace: testCtx.ns.Name,
|
||||
Namespace: testCtx.NS.Name,
|
||||
Priority: &highPriority,
|
||||
Resources: &v1.ResourceRequirements{Requests: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(300, resource.DecimalSI),
|
||||
@ -667,7 +668,7 @@ func TestPreemptionStarvation(t *testing.T) {
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(500, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(500, resource.DecimalSI),
|
||||
}
|
||||
_, err := createNode(testCtx.clientSet, "node1", nodeRes)
|
||||
_, err := createNode(testCtx.ClientSet, "node1", nodeRes)
|
||||
if err != nil {
|
||||
t.Fatalf("Error creating nodes: %v", err)
|
||||
}
|
||||
@ -685,7 +686,7 @@ func TestPreemptionStarvation(t *testing.T) {
|
||||
}
|
||||
// make sure that runningPods are all scheduled.
|
||||
for _, p := range runningPods {
|
||||
if err := waitForPodToSchedule(cs, p); err != nil {
|
||||
if err := testutils.WaitForPodToSchedule(cs, p); err != nil {
|
||||
t.Fatalf("Pod %v/%v didn't get scheduled: %v", p.Namespace, p.Name, err)
|
||||
}
|
||||
}
|
||||
@ -713,7 +714,7 @@ func TestPreemptionStarvation(t *testing.T) {
|
||||
t.Errorf("Test [%v]: NominatedNodeName annotation was not set for pod %v/%v: %v", test.description, preemptor.Namespace, preemptor.Name, err)
|
||||
}
|
||||
// Make sure that preemptor is scheduled after preemptions.
|
||||
if err := waitForPodToScheduleWithTimeout(cs, preemptor, 60*time.Second); err != nil {
|
||||
if err := testutils.WaitForPodToScheduleWithTimeout(cs, preemptor, 60*time.Second); err != nil {
|
||||
t.Errorf("Preemptor pod %v didn't get scheduled: %v", preemptor.Name, err)
|
||||
}
|
||||
// Cleanup
|
||||
@ -721,7 +722,7 @@ func TestPreemptionStarvation(t *testing.T) {
|
||||
allPods := pendingPods
|
||||
allPods = append(allPods, runningPods...)
|
||||
allPods = append(allPods, preemptor)
|
||||
cleanupPods(cs, t, allPods)
|
||||
testutils.CleanupPods(cs, t, allPods)
|
||||
}
|
||||
}
|
||||
|
||||
@ -730,8 +731,8 @@ func TestPreemptionStarvation(t *testing.T) {
|
||||
func TestPreemptionRaces(t *testing.T) {
|
||||
// Initialize scheduler.
|
||||
testCtx := initTest(t, "preemption-race")
|
||||
defer cleanupTest(t, testCtx)
|
||||
cs := testCtx.clientSet
|
||||
defer testutils.CleanupTest(t, testCtx)
|
||||
cs := testCtx.ClientSet
|
||||
|
||||
tests := []struct {
|
||||
description string
|
||||
@ -750,7 +751,7 @@ func TestPreemptionRaces(t *testing.T) {
|
||||
numRepetitions: 10,
|
||||
preemptor: initPausePod(cs, &pausePodConfig{
|
||||
Name: "preemptor-pod",
|
||||
Namespace: testCtx.ns.Name,
|
||||
Namespace: testCtx.NS.Name,
|
||||
Priority: &highPriority,
|
||||
Resources: &v1.ResourceRequirements{Requests: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(4900, resource.DecimalSI),
|
||||
@ -766,7 +767,7 @@ func TestPreemptionRaces(t *testing.T) {
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(5000, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(5000, resource.DecimalSI),
|
||||
}
|
||||
_, err := createNode(testCtx.clientSet, "node1", nodeRes)
|
||||
_, err := createNode(testCtx.ClientSet, "node1", nodeRes)
|
||||
if err != nil {
|
||||
t.Fatalf("Error creating nodes: %v", err)
|
||||
}
|
||||
@ -787,7 +788,7 @@ func TestPreemptionRaces(t *testing.T) {
|
||||
}
|
||||
// make sure that initial Pods are all scheduled.
|
||||
for _, p := range initialPods {
|
||||
if err := waitForPodToSchedule(cs, p); err != nil {
|
||||
if err := testutils.WaitForPodToSchedule(cs, p); err != nil {
|
||||
t.Fatalf("Pod %v/%v didn't get scheduled: %v", p.Namespace, p.Name, err)
|
||||
}
|
||||
}
|
||||
@ -810,7 +811,7 @@ func TestPreemptionRaces(t *testing.T) {
|
||||
t.Errorf("Test [%v]: NominatedNodeName annotation was not set for pod %v/%v: %v", test.description, preemptor.Namespace, preemptor.Name, err)
|
||||
}
|
||||
// Make sure that preemptor is scheduled after preemptions.
|
||||
if err := waitForPodToScheduleWithTimeout(cs, preemptor, 60*time.Second); err != nil {
|
||||
if err := testutils.WaitForPodToScheduleWithTimeout(cs, preemptor, 60*time.Second); err != nil {
|
||||
t.Errorf("Preemptor pod %v didn't get scheduled: %v", preemptor.Name, err)
|
||||
}
|
||||
|
||||
@ -833,7 +834,7 @@ func TestPreemptionRaces(t *testing.T) {
|
||||
allPods := additionalPods
|
||||
allPods = append(allPods, initialPods...)
|
||||
allPods = append(allPods, preemptor)
|
||||
cleanupPods(cs, t, allPods)
|
||||
testutils.CleanupPods(cs, t, allPods)
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -851,11 +852,11 @@ func TestPreemptionRaces(t *testing.T) {
|
||||
func TestNominatedNodeCleanUp(t *testing.T) {
|
||||
// Initialize scheduler.
|
||||
testCtx := initTest(t, "preemption")
|
||||
defer cleanupTest(t, testCtx)
|
||||
defer testutils.CleanupTest(t, testCtx)
|
||||
|
||||
cs := testCtx.clientSet
|
||||
cs := testCtx.ClientSet
|
||||
|
||||
defer cleanupPodsInNamespace(cs, t, testCtx.ns.Name)
|
||||
defer cleanupPodsInNamespace(cs, t, testCtx.NS.Name)
|
||||
|
||||
// Create a node with some resources and a label.
|
||||
nodeRes := &v1.ResourceList{
|
||||
@ -863,7 +864,7 @@ func TestNominatedNodeCleanUp(t *testing.T) {
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(500, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(500, resource.DecimalSI),
|
||||
}
|
||||
_, err := createNode(testCtx.clientSet, "node1", nodeRes)
|
||||
_, err := createNode(testCtx.ClientSet, "node1", nodeRes)
|
||||
if err != nil {
|
||||
t.Fatalf("Error creating nodes: %v", err)
|
||||
}
|
||||
@ -878,14 +879,14 @@ func TestNominatedNodeCleanUp(t *testing.T) {
|
||||
}
|
||||
// make sure that the pods are all scheduled.
|
||||
for _, p := range lowPriPods {
|
||||
if err := waitForPodToSchedule(cs, p); err != nil {
|
||||
if err := testutils.WaitForPodToSchedule(cs, p); err != nil {
|
||||
t.Fatalf("Pod %v/%v didn't get scheduled: %v", p.Namespace, p.Name, err)
|
||||
}
|
||||
}
|
||||
// Step 2. Create a medium priority pod.
|
||||
podConf := initPausePod(cs, &pausePodConfig{
|
||||
Name: "medium-priority",
|
||||
Namespace: testCtx.ns.Name,
|
||||
Namespace: testCtx.NS.Name,
|
||||
Priority: &mediumPriority,
|
||||
Resources: &v1.ResourceRequirements{Requests: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(400, resource.DecimalSI),
|
||||
@ -903,7 +904,7 @@ func TestNominatedNodeCleanUp(t *testing.T) {
|
||||
// Step 4. Create a high priority pod.
|
||||
podConf = initPausePod(cs, &pausePodConfig{
|
||||
Name: "high-priority",
|
||||
Namespace: testCtx.ns.Name,
|
||||
Namespace: testCtx.NS.Name,
|
||||
Priority: &highPriority,
|
||||
Resources: &v1.ResourceRequirements{Requests: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(300, resource.DecimalSI),
|
||||
@ -963,8 +964,8 @@ func addPodConditionReady(pod *v1.Pod) {
|
||||
func TestPDBInPreemption(t *testing.T) {
|
||||
// Initialize scheduler.
|
||||
testCtx := initTest(t, "preemption-pdb")
|
||||
defer cleanupTest(t, testCtx)
|
||||
cs := testCtx.clientSet
|
||||
defer testutils.CleanupTest(t, testCtx)
|
||||
cs := testCtx.ClientSet
|
||||
|
||||
initDisruptionController(t, testCtx)
|
||||
|
||||
@ -996,34 +997,34 @@ func TestPDBInPreemption(t *testing.T) {
|
||||
description: "A non-PDB violating pod is preempted despite its higher priority",
|
||||
nodes: []*nodeConfig{{name: "node-1", res: defaultNodeRes}},
|
||||
pdbs: []*policy.PodDisruptionBudget{
|
||||
mkMinAvailablePDB("pdb-1", testCtx.ns.Name, types.UID("pdb-1-uid"), 2, map[string]string{"foo": "bar"}),
|
||||
mkMinAvailablePDB("pdb-1", testCtx.NS.Name, types.UID("pdb-1-uid"), 2, map[string]string{"foo": "bar"}),
|
||||
},
|
||||
pdbPodNum: []int32{2},
|
||||
existingPods: []*v1.Pod{
|
||||
initPausePod(testCtx.clientSet, &pausePodConfig{
|
||||
initPausePod(testCtx.ClientSet, &pausePodConfig{
|
||||
Name: "low-pod1",
|
||||
Namespace: testCtx.ns.Name,
|
||||
Namespace: testCtx.NS.Name,
|
||||
Priority: &lowPriority,
|
||||
Resources: defaultPodRes,
|
||||
Labels: map[string]string{"foo": "bar"},
|
||||
}),
|
||||
initPausePod(testCtx.clientSet, &pausePodConfig{
|
||||
initPausePod(testCtx.ClientSet, &pausePodConfig{
|
||||
Name: "low-pod2",
|
||||
Namespace: testCtx.ns.Name,
|
||||
Namespace: testCtx.NS.Name,
|
||||
Priority: &lowPriority,
|
||||
Resources: defaultPodRes,
|
||||
Labels: map[string]string{"foo": "bar"},
|
||||
}),
|
||||
initPausePod(testCtx.clientSet, &pausePodConfig{
|
||||
initPausePod(testCtx.ClientSet, &pausePodConfig{
|
||||
Name: "mid-pod3",
|
||||
Namespace: testCtx.ns.Name,
|
||||
Namespace: testCtx.NS.Name,
|
||||
Priority: &mediumPriority,
|
||||
Resources: defaultPodRes,
|
||||
}),
|
||||
},
|
||||
pod: initPausePod(cs, &pausePodConfig{
|
||||
Name: "preemptor-pod",
|
||||
Namespace: testCtx.ns.Name,
|
||||
Namespace: testCtx.NS.Name,
|
||||
Priority: &highPriority,
|
||||
Resources: &v1.ResourceRequirements{Requests: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(300, resource.DecimalSI),
|
||||
@ -1039,21 +1040,21 @@ func TestPDBInPreemption(t *testing.T) {
|
||||
{name: "node-2", res: defaultNodeRes},
|
||||
},
|
||||
pdbs: []*policy.PodDisruptionBudget{
|
||||
mkMinAvailablePDB("pdb-1", testCtx.ns.Name, types.UID("pdb-1-uid"), 2, map[string]string{"foo": "bar"}),
|
||||
mkMinAvailablePDB("pdb-1", testCtx.NS.Name, types.UID("pdb-1-uid"), 2, map[string]string{"foo": "bar"}),
|
||||
},
|
||||
pdbPodNum: []int32{1},
|
||||
existingPods: []*v1.Pod{
|
||||
initPausePod(testCtx.clientSet, &pausePodConfig{
|
||||
initPausePod(testCtx.ClientSet, &pausePodConfig{
|
||||
Name: "low-pod1",
|
||||
Namespace: testCtx.ns.Name,
|
||||
Namespace: testCtx.NS.Name,
|
||||
Priority: &lowPriority,
|
||||
Resources: defaultPodRes,
|
||||
NodeName: "node-1",
|
||||
Labels: map[string]string{"foo": "bar"},
|
||||
}),
|
||||
initPausePod(testCtx.clientSet, &pausePodConfig{
|
||||
initPausePod(testCtx.ClientSet, &pausePodConfig{
|
||||
Name: "mid-pod2",
|
||||
Namespace: testCtx.ns.Name,
|
||||
Namespace: testCtx.NS.Name,
|
||||
Priority: &mediumPriority,
|
||||
NodeName: "node-2",
|
||||
Resources: defaultPodRes,
|
||||
@ -1061,7 +1062,7 @@ func TestPDBInPreemption(t *testing.T) {
|
||||
},
|
||||
pod: initPausePod(cs, &pausePodConfig{
|
||||
Name: "preemptor-pod",
|
||||
Namespace: testCtx.ns.Name,
|
||||
Namespace: testCtx.NS.Name,
|
||||
Priority: &highPriority,
|
||||
Resources: &v1.ResourceRequirements{Requests: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(500, resource.DecimalSI),
|
||||
@ -1078,61 +1079,61 @@ func TestPDBInPreemption(t *testing.T) {
|
||||
{name: "node-3", res: defaultNodeRes},
|
||||
},
|
||||
pdbs: []*policy.PodDisruptionBudget{
|
||||
mkMinAvailablePDB("pdb-1", testCtx.ns.Name, types.UID("pdb-1-uid"), 2, map[string]string{"foo1": "bar"}),
|
||||
mkMinAvailablePDB("pdb-2", testCtx.ns.Name, types.UID("pdb-2-uid"), 2, map[string]string{"foo2": "bar"}),
|
||||
mkMinAvailablePDB("pdb-1", testCtx.NS.Name, types.UID("pdb-1-uid"), 2, map[string]string{"foo1": "bar"}),
|
||||
mkMinAvailablePDB("pdb-2", testCtx.NS.Name, types.UID("pdb-2-uid"), 2, map[string]string{"foo2": "bar"}),
|
||||
},
|
||||
pdbPodNum: []int32{1, 5},
|
||||
existingPods: []*v1.Pod{
|
||||
initPausePod(testCtx.clientSet, &pausePodConfig{
|
||||
initPausePod(testCtx.ClientSet, &pausePodConfig{
|
||||
Name: "low-pod1",
|
||||
Namespace: testCtx.ns.Name,
|
||||
Namespace: testCtx.NS.Name,
|
||||
Priority: &lowPriority,
|
||||
Resources: defaultPodRes,
|
||||
NodeName: "node-1",
|
||||
Labels: map[string]string{"foo1": "bar"},
|
||||
}),
|
||||
initPausePod(testCtx.clientSet, &pausePodConfig{
|
||||
initPausePod(testCtx.ClientSet, &pausePodConfig{
|
||||
Name: "mid-pod1",
|
||||
Namespace: testCtx.ns.Name,
|
||||
Namespace: testCtx.NS.Name,
|
||||
Priority: &mediumPriority,
|
||||
Resources: defaultPodRes,
|
||||
NodeName: "node-1",
|
||||
}),
|
||||
initPausePod(testCtx.clientSet, &pausePodConfig{
|
||||
initPausePod(testCtx.ClientSet, &pausePodConfig{
|
||||
Name: "low-pod2",
|
||||
Namespace: testCtx.ns.Name,
|
||||
Namespace: testCtx.NS.Name,
|
||||
Priority: &lowPriority,
|
||||
Resources: defaultPodRes,
|
||||
NodeName: "node-2",
|
||||
Labels: map[string]string{"foo2": "bar"},
|
||||
}),
|
||||
initPausePod(testCtx.clientSet, &pausePodConfig{
|
||||
initPausePod(testCtx.ClientSet, &pausePodConfig{
|
||||
Name: "mid-pod2",
|
||||
Namespace: testCtx.ns.Name,
|
||||
Namespace: testCtx.NS.Name,
|
||||
Priority: &mediumPriority,
|
||||
Resources: defaultPodRes,
|
||||
NodeName: "node-2",
|
||||
Labels: map[string]string{"foo2": "bar"},
|
||||
}),
|
||||
initPausePod(testCtx.clientSet, &pausePodConfig{
|
||||
initPausePod(testCtx.ClientSet, &pausePodConfig{
|
||||
Name: "low-pod4",
|
||||
Namespace: testCtx.ns.Name,
|
||||
Namespace: testCtx.NS.Name,
|
||||
Priority: &lowPriority,
|
||||
Resources: defaultPodRes,
|
||||
NodeName: "node-3",
|
||||
Labels: map[string]string{"foo2": "bar"},
|
||||
}),
|
||||
initPausePod(testCtx.clientSet, &pausePodConfig{
|
||||
initPausePod(testCtx.ClientSet, &pausePodConfig{
|
||||
Name: "low-pod5",
|
||||
Namespace: testCtx.ns.Name,
|
||||
Namespace: testCtx.NS.Name,
|
||||
Priority: &lowPriority,
|
||||
Resources: defaultPodRes,
|
||||
NodeName: "node-3",
|
||||
Labels: map[string]string{"foo2": "bar"},
|
||||
}),
|
||||
initPausePod(testCtx.clientSet, &pausePodConfig{
|
||||
initPausePod(testCtx.ClientSet, &pausePodConfig{
|
||||
Name: "low-pod6",
|
||||
Namespace: testCtx.ns.Name,
|
||||
Namespace: testCtx.NS.Name,
|
||||
Priority: &lowPriority,
|
||||
Resources: defaultPodRes,
|
||||
NodeName: "node-3",
|
||||
@ -1141,7 +1142,7 @@ func TestPDBInPreemption(t *testing.T) {
|
||||
},
|
||||
pod: initPausePod(cs, &pausePodConfig{
|
||||
Name: "preemptor-pod",
|
||||
Namespace: testCtx.ns.Name,
|
||||
Namespace: testCtx.NS.Name,
|
||||
Priority: &highPriority,
|
||||
Resources: &v1.ResourceRequirements{Requests: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(500, resource.DecimalSI),
|
||||
@ -1171,7 +1172,7 @@ func TestPDBInPreemption(t *testing.T) {
|
||||
}
|
||||
// Add pod condition ready so that PDB is updated.
|
||||
addPodConditionReady(p)
|
||||
if _, err := testCtx.clientSet.CoreV1().Pods(testCtx.ns.Name).UpdateStatus(context.TODO(), p, metav1.UpdateOptions{}); err != nil {
|
||||
if _, err := testCtx.ClientSet.CoreV1().Pods(testCtx.NS.Name).UpdateStatus(context.TODO(), p, metav1.UpdateOptions{}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
@ -1182,7 +1183,7 @@ func TestPDBInPreemption(t *testing.T) {
|
||||
|
||||
// Create PDBs.
|
||||
for _, pdb := range test.pdbs {
|
||||
_, err := testCtx.clientSet.PolicyV1beta1().PodDisruptionBudgets(testCtx.ns.Name).Create(context.TODO(), pdb, metav1.CreateOptions{})
|
||||
_, err := testCtx.ClientSet.PolicyV1beta1().PodDisruptionBudgets(testCtx.NS.Name).Create(context.TODO(), pdb, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create PDB: %v", err)
|
||||
}
|
||||
@ -1218,8 +1219,8 @@ func TestPDBInPreemption(t *testing.T) {
|
||||
|
||||
// Cleanup
|
||||
pods = append(pods, preemptor)
|
||||
cleanupPods(cs, t, pods)
|
||||
cs.PolicyV1beta1().PodDisruptionBudgets(testCtx.ns.Name).DeleteCollection(context.TODO(), nil, metav1.ListOptions{})
|
||||
testutils.CleanupPods(cs, t, pods)
|
||||
cs.PolicyV1beta1().PodDisruptionBudgets(testCtx.NS.Name).DeleteCollection(context.TODO(), nil, metav1.ListOptions{})
|
||||
cs.CoreV1().Nodes().DeleteCollection(context.TODO(), nil, metav1.ListOptions{})
|
||||
}
|
||||
}
|
||||
|
@ -22,7 +22,7 @@ import (
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
@ -30,7 +30,8 @@ import (
|
||||
featuregatetesting "k8s.io/component-base/featuregate/testing"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
st "k8s.io/kubernetes/pkg/scheduler/testing"
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
testutils "k8s.io/kubernetes/test/integration/util"
|
||||
"k8s.io/kubernetes/test/utils"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
)
|
||||
|
||||
@ -40,9 +41,9 @@ import (
|
||||
// works correctly.
|
||||
func TestNodeAffinity(t *testing.T) {
|
||||
testCtx := initTest(t, "node-affinity")
|
||||
defer cleanupTest(t, testCtx)
|
||||
defer testutils.CleanupTest(t, testCtx)
|
||||
// Add a few nodes.
|
||||
nodes, err := createNodes(testCtx.clientSet, "testnode", nil, 5)
|
||||
nodes, err := createNodes(testCtx.ClientSet, "testnode", nil, 5)
|
||||
if err != nil {
|
||||
t.Fatalf("Cannot create nodes: %v", err)
|
||||
}
|
||||
@ -53,17 +54,17 @@ func TestNodeAffinity(t *testing.T) {
|
||||
labels := map[string]string{
|
||||
labelKey: labelValue,
|
||||
}
|
||||
if err = testutils.AddLabelsToNode(testCtx.clientSet, labeledNode.Name, labels); err != nil {
|
||||
if err = utils.AddLabelsToNode(testCtx.ClientSet, labeledNode.Name, labels); err != nil {
|
||||
t.Fatalf("Cannot add labels to node: %v", err)
|
||||
}
|
||||
if err = waitForNodeLabels(testCtx.clientSet, labeledNode.Name, labels); err != nil {
|
||||
if err = waitForNodeLabels(testCtx.ClientSet, labeledNode.Name, labels); err != nil {
|
||||
t.Fatalf("Adding labels to node didn't succeed: %v", err)
|
||||
}
|
||||
// Create a pod with node affinity.
|
||||
podName := "pod-with-node-affinity"
|
||||
pod, err := runPausePod(testCtx.clientSet, initPausePod(testCtx.clientSet, &pausePodConfig{
|
||||
pod, err := runPausePod(testCtx.ClientSet, initPausePod(testCtx.ClientSet, &pausePodConfig{
|
||||
Name: podName,
|
||||
Namespace: testCtx.ns.Name,
|
||||
Namespace: testCtx.NS.Name,
|
||||
Affinity: &v1.Affinity{
|
||||
NodeAffinity: &v1.NodeAffinity{
|
||||
PreferredDuringSchedulingIgnoredDuringExecution: []v1.PreferredSchedulingTerm{
|
||||
@ -97,9 +98,9 @@ func TestNodeAffinity(t *testing.T) {
|
||||
// works correctly.
|
||||
func TestPodAffinity(t *testing.T) {
|
||||
testCtx := initTest(t, "pod-affinity")
|
||||
defer cleanupTest(t, testCtx)
|
||||
defer testutils.CleanupTest(t, testCtx)
|
||||
// Add a few nodes.
|
||||
nodesInTopology, err := createNodes(testCtx.clientSet, "in-topology", nil, 5)
|
||||
nodesInTopology, err := createNodes(testCtx.ClientSet, "in-topology", nil, 5)
|
||||
if err != nil {
|
||||
t.Fatalf("Cannot create nodes: %v", err)
|
||||
}
|
||||
@ -110,34 +111,34 @@ func TestPodAffinity(t *testing.T) {
|
||||
}
|
||||
for _, node := range nodesInTopology {
|
||||
// Add topology key to all the nodes.
|
||||
if err = testutils.AddLabelsToNode(testCtx.clientSet, node.Name, nodeLabels); err != nil {
|
||||
if err = utils.AddLabelsToNode(testCtx.ClientSet, node.Name, nodeLabels); err != nil {
|
||||
t.Fatalf("Cannot add labels to node %v: %v", node.Name, err)
|
||||
}
|
||||
if err = waitForNodeLabels(testCtx.clientSet, node.Name, nodeLabels); err != nil {
|
||||
if err = waitForNodeLabels(testCtx.ClientSet, node.Name, nodeLabels); err != nil {
|
||||
t.Fatalf("Adding labels to node %v didn't succeed: %v", node.Name, err)
|
||||
}
|
||||
}
|
||||
// Add a pod with a label and wait for it to schedule.
|
||||
labelKey := "service"
|
||||
labelValue := "S1"
|
||||
_, err = runPausePod(testCtx.clientSet, initPausePod(testCtx.clientSet, &pausePodConfig{
|
||||
_, err = runPausePod(testCtx.ClientSet, initPausePod(testCtx.ClientSet, &pausePodConfig{
|
||||
Name: "attractor-pod",
|
||||
Namespace: testCtx.ns.Name,
|
||||
Namespace: testCtx.NS.Name,
|
||||
Labels: map[string]string{labelKey: labelValue},
|
||||
}))
|
||||
if err != nil {
|
||||
t.Fatalf("Error running the attractor pod: %v", err)
|
||||
}
|
||||
// Add a few more nodes without the topology label.
|
||||
_, err = createNodes(testCtx.clientSet, "other-node", nil, 5)
|
||||
_, err = createNodes(testCtx.ClientSet, "other-node", nil, 5)
|
||||
if err != nil {
|
||||
t.Fatalf("Cannot create the second set of nodes: %v", err)
|
||||
}
|
||||
// Add a new pod with affinity to the attractor pod.
|
||||
podName := "pod-with-podaffinity"
|
||||
pod, err := runPausePod(testCtx.clientSet, initPausePod(testCtx.clientSet, &pausePodConfig{
|
||||
pod, err := runPausePod(testCtx.ClientSet, initPausePod(testCtx.ClientSet, &pausePodConfig{
|
||||
Name: podName,
|
||||
Namespace: testCtx.ns.Name,
|
||||
Namespace: testCtx.NS.Name,
|
||||
Affinity: &v1.Affinity{
|
||||
PodAffinity: &v1.PodAffinity{
|
||||
PreferredDuringSchedulingIgnoredDuringExecution: []v1.WeightedPodAffinityTerm{
|
||||
@ -161,7 +162,7 @@ func TestPodAffinity(t *testing.T) {
|
||||
},
|
||||
},
|
||||
TopologyKey: topologyKey,
|
||||
Namespaces: []string{testCtx.ns.Name},
|
||||
Namespaces: []string{testCtx.NS.Name},
|
||||
},
|
||||
Weight: 50,
|
||||
},
|
||||
@ -187,7 +188,7 @@ func TestPodAffinity(t *testing.T) {
|
||||
// works correctly, i.e., the pod gets scheduled to the node where its container images are ready.
|
||||
func TestImageLocality(t *testing.T) {
|
||||
testCtx := initTest(t, "image-locality")
|
||||
defer cleanupTest(t, testCtx)
|
||||
defer testutils.CleanupTest(t, testCtx)
|
||||
|
||||
// We use a fake large image as the test image used by the pod, which has relatively large image size.
|
||||
image := v1.ContainerImage{
|
||||
@ -198,22 +199,22 @@ func TestImageLocality(t *testing.T) {
|
||||
}
|
||||
|
||||
// Create a node with the large image.
|
||||
nodeWithLargeImage, err := createNodeWithImages(testCtx.clientSet, "testnode-large-image", nil, []v1.ContainerImage{image})
|
||||
nodeWithLargeImage, err := createNodeWithImages(testCtx.ClientSet, "testnode-large-image", nil, []v1.ContainerImage{image})
|
||||
if err != nil {
|
||||
t.Fatalf("cannot create node with a large image: %v", err)
|
||||
}
|
||||
|
||||
// Add a few nodes.
|
||||
_, err = createNodes(testCtx.clientSet, "testnode", nil, 10)
|
||||
_, err = createNodes(testCtx.ClientSet, "testnode", nil, 10)
|
||||
if err != nil {
|
||||
t.Fatalf("cannot create nodes: %v", err)
|
||||
}
|
||||
|
||||
// Create a pod with containers each having the specified image.
|
||||
podName := "pod-using-large-image"
|
||||
pod, err := runPodWithContainers(testCtx.clientSet, initPodWithContainers(testCtx.clientSet, &podWithContainersConfig{
|
||||
pod, err := runPodWithContainers(testCtx.ClientSet, initPodWithContainers(testCtx.ClientSet, &podWithContainersConfig{
|
||||
Name: podName,
|
||||
Namespace: testCtx.ns.Name,
|
||||
Namespace: testCtx.NS.Name,
|
||||
Containers: makeContainersWithImages(image.Names),
|
||||
}))
|
||||
if err != nil {
|
||||
@ -249,9 +250,9 @@ func TestEvenPodsSpreadPriority(t *testing.T) {
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.EvenPodsSpread, true)()
|
||||
|
||||
testCtx := initTest(t, "eps-priority")
|
||||
cs := testCtx.clientSet
|
||||
ns := testCtx.ns.Name
|
||||
defer cleanupTest(t, testCtx)
|
||||
cs := testCtx.ClientSet
|
||||
ns := testCtx.NS.Name
|
||||
defer testutils.CleanupTest(t, testCtx)
|
||||
// Add 4 nodes.
|
||||
nodes, err := createNodes(cs, "node", nil, 4)
|
||||
if err != nil {
|
||||
@ -263,7 +264,7 @@ func TestEvenPodsSpreadPriority(t *testing.T) {
|
||||
"zone": fmt.Sprintf("zone-%d", i/2),
|
||||
"node": node.Name,
|
||||
}
|
||||
if err = testutils.AddLabelsToNode(cs, node.Name, labels); err != nil {
|
||||
if err = utils.AddLabelsToNode(cs, node.Name, labels); err != nil {
|
||||
t.Fatalf("Cannot add labels to node: %v", err)
|
||||
}
|
||||
if err = waitForNodeLabels(cs, node.Name, labels); err != nil {
|
||||
@ -277,10 +278,10 @@ func TestEvenPodsSpreadPriority(t *testing.T) {
|
||||
Value: "v1",
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
}
|
||||
if err = addTaintToNode(cs, nodes[0].Name, taint); err != nil {
|
||||
if err = testutils.AddTaintToNode(cs, nodes[0].Name, taint); err != nil {
|
||||
t.Fatalf("Adding taint to node failed: %v", err)
|
||||
}
|
||||
if err = waitForNodeTaints(cs, nodes[0], []v1.Taint{taint}); err != nil {
|
||||
if err = testutils.WaitForNodeTaints(cs, nodes[0], []v1.Taint{taint}); err != nil {
|
||||
t.Fatalf("Taint not seen on node: %v", err)
|
||||
}
|
||||
|
||||
@ -332,13 +333,13 @@ func TestEvenPodsSpreadPriority(t *testing.T) {
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
allPods := append(tt.existingPods, tt.incomingPod)
|
||||
defer cleanupPods(cs, t, allPods)
|
||||
defer testutils.CleanupPods(cs, t, allPods)
|
||||
for _, pod := range tt.existingPods {
|
||||
createdPod, err := cs.CoreV1().Pods(pod.Namespace).Create(context.TODO(), pod, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("Test Failed: error while creating pod during test: %v", err)
|
||||
}
|
||||
err = wait.Poll(pollInterval, wait.ForeverTestTimeout, podScheduled(cs, createdPod.Namespace, createdPod.Name))
|
||||
err = wait.Poll(pollInterval, wait.ForeverTestTimeout, testutils.PodScheduled(cs, createdPod.Namespace, createdPod.Name))
|
||||
if err != nil {
|
||||
t.Errorf("Test Failed: error while waiting for pod during test: %v", err)
|
||||
}
|
||||
|
@ -41,6 +41,7 @@ import (
|
||||
kubeschedulerconfig "k8s.io/kubernetes/pkg/scheduler/apis/config"
|
||||
"k8s.io/kubernetes/pkg/scheduler/profile"
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
testutils "k8s.io/kubernetes/test/integration/util"
|
||||
)
|
||||
|
||||
type nodeMutationFunc func(t *testing.T, n *v1.Node, nodeLister corelisters.NodeLister, c clientset.Interface)
|
||||
@ -335,12 +336,12 @@ func TestSchedulerCreationFromNonExistentConfigMap(t *testing.T) {
|
||||
|
||||
func TestUnschedulableNodes(t *testing.T) {
|
||||
testCtx := initTest(t, "unschedulable-nodes")
|
||||
defer cleanupTest(t, testCtx)
|
||||
defer testutils.CleanupTest(t, testCtx)
|
||||
|
||||
nodeLister := testCtx.informerFactory.Core().V1().Nodes().Lister()
|
||||
nodeLister := testCtx.InformerFactory.Core().V1().Nodes().Lister()
|
||||
// NOTE: This test cannot run in parallel, because it is creating and deleting
|
||||
// non-namespaced objects (Nodes).
|
||||
defer testCtx.clientSet.CoreV1().Nodes().DeleteCollection(context.TODO(), nil, metav1.ListOptions{})
|
||||
defer testCtx.ClientSet.CoreV1().Nodes().DeleteCollection(context.TODO(), nil, metav1.ListOptions{})
|
||||
|
||||
goodCondition := v1.NodeCondition{
|
||||
Type: v1.NodeReady,
|
||||
@ -409,23 +410,23 @@ func TestUnschedulableNodes(t *testing.T) {
|
||||
}
|
||||
|
||||
for i, mod := range nodeModifications {
|
||||
unSchedNode, err := testCtx.clientSet.CoreV1().Nodes().Create(context.TODO(), node, metav1.CreateOptions{})
|
||||
unSchedNode, err := testCtx.ClientSet.CoreV1().Nodes().Create(context.TODO(), node, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create node: %v", err)
|
||||
}
|
||||
|
||||
// Apply the unschedulable modification to the node, and wait for the reflection
|
||||
mod.makeUnSchedulable(t, unSchedNode, nodeLister, testCtx.clientSet)
|
||||
mod.makeUnSchedulable(t, unSchedNode, nodeLister, testCtx.ClientSet)
|
||||
|
||||
// Create the new pod, note that this needs to happen post unschedulable
|
||||
// modification or we have a race in the test.
|
||||
myPod, err := createPausePodWithResource(testCtx.clientSet, "node-scheduling-test-pod", testCtx.ns.Name, nil)
|
||||
myPod, err := createPausePodWithResource(testCtx.ClientSet, "node-scheduling-test-pod", testCtx.NS.Name, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create pod: %v", err)
|
||||
}
|
||||
|
||||
// There are no schedulable nodes - the pod shouldn't be scheduled.
|
||||
err = waitForPodToScheduleWithTimeout(testCtx.clientSet, myPod, 2*time.Second)
|
||||
err = testutils.WaitForPodToScheduleWithTimeout(testCtx.ClientSet, myPod, 2*time.Second)
|
||||
if err == nil {
|
||||
t.Errorf("Test %d: Pod scheduled successfully on unschedulable nodes", i)
|
||||
}
|
||||
@ -436,23 +437,23 @@ func TestUnschedulableNodes(t *testing.T) {
|
||||
}
|
||||
|
||||
// Apply the schedulable modification to the node, and wait for the reflection
|
||||
schedNode, err := testCtx.clientSet.CoreV1().Nodes().Get(context.TODO(), unSchedNode.Name, metav1.GetOptions{})
|
||||
schedNode, err := testCtx.ClientSet.CoreV1().Nodes().Get(context.TODO(), unSchedNode.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to get node: %v", err)
|
||||
}
|
||||
mod.makeSchedulable(t, schedNode, nodeLister, testCtx.clientSet)
|
||||
mod.makeSchedulable(t, schedNode, nodeLister, testCtx.ClientSet)
|
||||
|
||||
// Wait until the pod is scheduled.
|
||||
if err := waitForPodToSchedule(testCtx.clientSet, myPod); err != nil {
|
||||
if err := testutils.WaitForPodToSchedule(testCtx.ClientSet, myPod); err != nil {
|
||||
t.Errorf("Test %d: failed to schedule a pod: %v", i, err)
|
||||
} else {
|
||||
t.Logf("Test %d: Pod got scheduled on a schedulable node", i)
|
||||
}
|
||||
// Clean up.
|
||||
if err := deletePod(testCtx.clientSet, myPod.Name, myPod.Namespace); err != nil {
|
||||
if err := deletePod(testCtx.ClientSet, myPod.Name, myPod.Namespace); err != nil {
|
||||
t.Errorf("Failed to delete pod: %v", err)
|
||||
}
|
||||
err = testCtx.clientSet.CoreV1().Nodes().Delete(context.TODO(), schedNode.Name, nil)
|
||||
err = testCtx.ClientSet.CoreV1().Nodes().Delete(context.TODO(), schedNode.Name, nil)
|
||||
if err != nil {
|
||||
t.Errorf("Failed to delete node: %v", err)
|
||||
}
|
||||
@ -480,7 +481,7 @@ func TestMultipleSchedulers(t *testing.T) {
|
||||
|
||||
// 1. create and start default-scheduler
|
||||
testCtx := initTest(t, "multi-scheduler")
|
||||
defer cleanupTest(t, testCtx)
|
||||
defer testutils.CleanupTest(t, testCtx)
|
||||
|
||||
// 2. create a node
|
||||
node := &v1.Node{
|
||||
@ -492,23 +493,23 @@ func TestMultipleSchedulers(t *testing.T) {
|
||||
},
|
||||
},
|
||||
}
|
||||
testCtx.clientSet.CoreV1().Nodes().Create(context.TODO(), node, metav1.CreateOptions{})
|
||||
testCtx.ClientSet.CoreV1().Nodes().Create(context.TODO(), node, metav1.CreateOptions{})
|
||||
|
||||
// 3. create 3 pods for testing
|
||||
t.Logf("create 3 pods for testing")
|
||||
testPod, err := createPausePodWithResource(testCtx.clientSet, "pod-without-scheduler-name", testCtx.ns.Name, nil)
|
||||
testPod, err := createPausePodWithResource(testCtx.ClientSet, "pod-without-scheduler-name", testCtx.NS.Name, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create pod: %v", err)
|
||||
}
|
||||
|
||||
defaultScheduler := "default-scheduler"
|
||||
testPodFitsDefault, err := createPausePod(testCtx.clientSet, initPausePod(testCtx.clientSet, &pausePodConfig{Name: "pod-fits-default", Namespace: testCtx.ns.Name, SchedulerName: defaultScheduler}))
|
||||
testPodFitsDefault, err := createPausePod(testCtx.ClientSet, initPausePod(testCtx.ClientSet, &pausePodConfig{Name: "pod-fits-default", Namespace: testCtx.NS.Name, SchedulerName: defaultScheduler}))
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create pod: %v", err)
|
||||
}
|
||||
|
||||
fooScheduler := "foo-scheduler"
|
||||
testPodFitsFoo, err := createPausePod(testCtx.clientSet, initPausePod(testCtx.clientSet, &pausePodConfig{Name: "pod-fits-foo", Namespace: testCtx.ns.Name, SchedulerName: fooScheduler}))
|
||||
testPodFitsFoo, err := createPausePod(testCtx.ClientSet, initPausePod(testCtx.ClientSet, &pausePodConfig{Name: "pod-fits-foo", Namespace: testCtx.NS.Name, SchedulerName: fooScheduler}))
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create pod: %v", err)
|
||||
}
|
||||
@ -517,19 +518,19 @@ func TestMultipleSchedulers(t *testing.T) {
|
||||
// - testPod, testPodFitsDefault should be scheduled
|
||||
// - testPodFitsFoo should NOT be scheduled
|
||||
t.Logf("wait for pods scheduled")
|
||||
if err := waitForPodToSchedule(testCtx.clientSet, testPod); err != nil {
|
||||
if err := testutils.WaitForPodToSchedule(testCtx.ClientSet, testPod); err != nil {
|
||||
t.Errorf("Test MultiScheduler: %s Pod not scheduled: %v", testPod.Name, err)
|
||||
} else {
|
||||
t.Logf("Test MultiScheduler: %s Pod scheduled", testPod.Name)
|
||||
}
|
||||
|
||||
if err := waitForPodToSchedule(testCtx.clientSet, testPodFitsDefault); err != nil {
|
||||
if err := testutils.WaitForPodToSchedule(testCtx.ClientSet, testPodFitsDefault); err != nil {
|
||||
t.Errorf("Test MultiScheduler: %s Pod not scheduled: %v", testPodFitsDefault.Name, err)
|
||||
} else {
|
||||
t.Logf("Test MultiScheduler: %s Pod scheduled", testPodFitsDefault.Name)
|
||||
}
|
||||
|
||||
if err := waitForPodToScheduleWithTimeout(testCtx.clientSet, testPodFitsFoo, time.Second*5); err == nil {
|
||||
if err := testutils.WaitForPodToScheduleWithTimeout(testCtx.ClientSet, testPodFitsFoo, time.Second*5); err == nil {
|
||||
t.Errorf("Test MultiScheduler: %s Pod got scheduled, %v", testPodFitsFoo.Name, err)
|
||||
} else {
|
||||
t.Logf("Test MultiScheduler: %s Pod not scheduled", testPodFitsFoo.Name)
|
||||
@ -537,11 +538,11 @@ func TestMultipleSchedulers(t *testing.T) {
|
||||
|
||||
// 5. create and start a scheduler with name "foo-scheduler"
|
||||
fooProf := kubeschedulerconfig.KubeSchedulerProfile{SchedulerName: fooScheduler}
|
||||
testCtx = initTestSchedulerWithOptions(t, testCtx, true, nil, time.Second, scheduler.WithProfiles(fooProf))
|
||||
testCtx = testutils.InitTestSchedulerWithOptions(t, testCtx, true, nil, time.Second, scheduler.WithProfiles(fooProf))
|
||||
|
||||
// 6. **check point-2**:
|
||||
// - testPodWithAnnotationFitsFoo should be scheduled
|
||||
err = waitForPodToSchedule(testCtx.clientSet, testPodFitsFoo)
|
||||
err = testutils.WaitForPodToSchedule(testCtx.ClientSet, testPodFitsFoo)
|
||||
if err != nil {
|
||||
t.Errorf("Test MultiScheduler: %s Pod not scheduled, %v", testPodFitsFoo.Name, err)
|
||||
} else {
|
||||
@ -549,10 +550,10 @@ func TestMultipleSchedulers(t *testing.T) {
|
||||
}
|
||||
|
||||
// 7. delete the pods that were scheduled by the default scheduler, and stop the default scheduler
|
||||
if err := deletePod(testCtx.clientSet, testPod.Name, testCtx.ns.Name); err != nil {
|
||||
if err := deletePod(testCtx.ClientSet, testPod.Name, testCtx.NS.Name); err != nil {
|
||||
t.Errorf("Failed to delete pod: %v", err)
|
||||
}
|
||||
if err := deletePod(testCtx.clientSet, testPodFitsDefault.Name, testCtx.ns.Name); err != nil {
|
||||
if err := deletePod(testCtx.ClientSet, testPodFitsDefault.Name, testCtx.NS.Name); err != nil {
|
||||
t.Errorf("Failed to delete pod: %v", err)
|
||||
}
|
||||
|
||||
@ -580,13 +581,13 @@ func TestMultipleSchedulers(t *testing.T) {
|
||||
|
||||
// 9. **check point-3**:
|
||||
// - testPodNoAnnotation2 and testPodWithAnnotationFitsDefault2 should NOT be scheduled
|
||||
err = wait.Poll(time.Second, time.Second*5, podScheduled(clientSet, testPodNoAnnotation2.Namespace, testPodNoAnnotation2.Name))
|
||||
err = wait.Poll(time.Second, time.Second*5, testutils.PodScheduled(clientSet, testPodNoAnnotation2.Namespace, testPodNoAnnotation2.Name))
|
||||
if err == nil {
|
||||
t.Errorf("Test MultiScheduler: %s Pod got scheduled, %v", testPodNoAnnotation2.Name, err)
|
||||
} else {
|
||||
t.Logf("Test MultiScheduler: %s Pod not scheduled", testPodNoAnnotation2.Name)
|
||||
}
|
||||
err = wait.Poll(time.Second, time.Second*5, podScheduled(clientSet, testPodWithAnnotationFitsDefault2.Namespace, testPodWithAnnotationFitsDefault2.Name))
|
||||
err = wait.Poll(time.Second, time.Second*5, testutils.PodScheduled(clientSet, testPodWithAnnotationFitsDefault2.Namespace, testPodWithAnnotationFitsDefault2.Name))
|
||||
if err == nil {
|
||||
t.Errorf("Test MultiScheduler: %s Pod got scheduled, %v", testPodWithAnnotationFitsDefault2.Name, err)
|
||||
} else {
|
||||
@ -604,7 +605,7 @@ func TestMultipleSchedulingProfiles(t *testing.T) {
|
||||
SchedulerName: "custom-scheduler",
|
||||
},
|
||||
))
|
||||
defer cleanupTest(t, testCtx)
|
||||
defer testutils.CleanupTest(t, testCtx)
|
||||
|
||||
node := &v1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "node-multi-scheduler-test-node"},
|
||||
@ -615,23 +616,23 @@ func TestMultipleSchedulingProfiles(t *testing.T) {
|
||||
},
|
||||
},
|
||||
}
|
||||
if _, err := testCtx.clientSet.CoreV1().Nodes().Create(context.TODO(), node, metav1.CreateOptions{}); err != nil {
|
||||
if _, err := testCtx.ClientSet.CoreV1().Nodes().Create(context.TODO(), node, metav1.CreateOptions{}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
evs, err := testCtx.clientSet.CoreV1().Events(testCtx.ns.Name).Watch(testCtx.ctx, metav1.ListOptions{})
|
||||
evs, err := testCtx.ClientSet.CoreV1().Events(testCtx.NS.Name).Watch(testCtx.Ctx, metav1.ListOptions{})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer evs.Stop()
|
||||
|
||||
for _, pc := range []*pausePodConfig{
|
||||
{Name: "foo", Namespace: testCtx.ns.Name},
|
||||
{Name: "bar", Namespace: testCtx.ns.Name, SchedulerName: "unknown-scheduler"},
|
||||
{Name: "baz", Namespace: testCtx.ns.Name, SchedulerName: "default-scheduler"},
|
||||
{Name: "zet", Namespace: testCtx.ns.Name, SchedulerName: "custom-scheduler"},
|
||||
{Name: "foo", Namespace: testCtx.NS.Name},
|
||||
{Name: "bar", Namespace: testCtx.NS.Name, SchedulerName: "unknown-scheduler"},
|
||||
{Name: "baz", Namespace: testCtx.NS.Name, SchedulerName: "default-scheduler"},
|
||||
{Name: "zet", Namespace: testCtx.NS.Name, SchedulerName: "custom-scheduler"},
|
||||
} {
|
||||
if _, err := createPausePod(testCtx.clientSet, initPausePod(testCtx.clientSet, pc)); err != nil {
|
||||
if _, err := createPausePod(testCtx.ClientSet, initPausePod(testCtx.ClientSet, pc)); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
@ -668,7 +669,7 @@ func TestMultipleSchedulingProfiles(t *testing.T) {
|
||||
// This test will verify scheduler can work well regardless of whether kubelet is allocatable aware or not.
|
||||
func TestAllocatable(t *testing.T) {
|
||||
testCtx := initTest(t, "allocatable")
|
||||
defer cleanupTest(t, testCtx)
|
||||
defer testutils.CleanupTest(t, testCtx)
|
||||
|
||||
// 2. create a node without allocatable awareness
|
||||
nodeRes := &v1.ResourceList{
|
||||
@ -676,7 +677,7 @@ func TestAllocatable(t *testing.T) {
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(30, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(30, resource.BinarySI),
|
||||
}
|
||||
allocNode, err := createNode(testCtx.clientSet, "node-allocatable-scheduler-test-node", nodeRes)
|
||||
allocNode, err := createNode(testCtx.ClientSet, "node-allocatable-scheduler-test-node", nodeRes)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create node: %v", err)
|
||||
}
|
||||
@ -687,13 +688,13 @@ func TestAllocatable(t *testing.T) {
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(20, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(20, resource.BinarySI),
|
||||
}
|
||||
testAllocPod, err := createPausePodWithResource(testCtx.clientSet, podName, testCtx.ns.Name, podRes)
|
||||
testAllocPod, err := createPausePodWithResource(testCtx.ClientSet, podName, testCtx.NS.Name, podRes)
|
||||
if err != nil {
|
||||
t.Fatalf("Test allocatable unawareness failed to create pod: %v", err)
|
||||
}
|
||||
|
||||
// 4. Test: this test pod should be scheduled since api-server will use Capacity as Allocatable
|
||||
err = waitForPodToScheduleWithTimeout(testCtx.clientSet, testAllocPod, time.Second*5)
|
||||
err = testutils.WaitForPodToScheduleWithTimeout(testCtx.ClientSet, testAllocPod, time.Second*5)
|
||||
if err != nil {
|
||||
t.Errorf("Test allocatable unawareness: %s Pod not scheduled: %v", testAllocPod.Name, err)
|
||||
} else {
|
||||
@ -714,23 +715,23 @@ func TestAllocatable(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
if _, err := testCtx.clientSet.CoreV1().Nodes().UpdateStatus(context.TODO(), allocNode, metav1.UpdateOptions{}); err != nil {
|
||||
if _, err := testCtx.ClientSet.CoreV1().Nodes().UpdateStatus(context.TODO(), allocNode, metav1.UpdateOptions{}); err != nil {
|
||||
t.Fatalf("Failed to update node with Status.Allocatable: %v", err)
|
||||
}
|
||||
|
||||
if err := deletePod(testCtx.clientSet, testAllocPod.Name, testCtx.ns.Name); err != nil {
|
||||
if err := deletePod(testCtx.ClientSet, testAllocPod.Name, testCtx.NS.Name); err != nil {
|
||||
t.Fatalf("Failed to remove the first pod: %v", err)
|
||||
}
|
||||
|
||||
// 6. Make another pod with different name, same resource request
|
||||
podName2 := "pod-test-allocatable2"
|
||||
testAllocPod2, err := createPausePodWithResource(testCtx.clientSet, podName2, testCtx.ns.Name, podRes)
|
||||
testAllocPod2, err := createPausePodWithResource(testCtx.ClientSet, podName2, testCtx.NS.Name, podRes)
|
||||
if err != nil {
|
||||
t.Fatalf("Test allocatable awareness failed to create pod: %v", err)
|
||||
}
|
||||
|
||||
// 7. Test: this test pod should not be scheduled since it request more than Allocatable
|
||||
if err := waitForPodToScheduleWithTimeout(testCtx.clientSet, testAllocPod2, time.Second*5); err == nil {
|
||||
if err := testutils.WaitForPodToScheduleWithTimeout(testCtx.ClientSet, testAllocPod2, time.Second*5); err == nil {
|
||||
t.Errorf("Test allocatable awareness: %s Pod got scheduled unexpectedly, %v", testAllocPod2.Name, err)
|
||||
} else {
|
||||
t.Logf("Test allocatable awareness: %s Pod not scheduled as expected", testAllocPod2.Name)
|
||||
@ -742,8 +743,8 @@ func TestAllocatable(t *testing.T) {
|
||||
func TestSchedulerInformers(t *testing.T) {
|
||||
// Initialize scheduler.
|
||||
testCtx := initTest(t, "scheduler-informer")
|
||||
defer cleanupTest(t, testCtx)
|
||||
cs := testCtx.clientSet
|
||||
defer testutils.CleanupTest(t, testCtx)
|
||||
cs := testCtx.ClientSet
|
||||
|
||||
defaultPodRes := &v1.ResourceRequirements{Requests: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(200, resource.DecimalSI),
|
||||
@ -771,17 +772,17 @@ func TestSchedulerInformers(t *testing.T) {
|
||||
description: "Pod cannot be scheduled when node is occupied by pods scheduled by other schedulers",
|
||||
nodes: []*nodeConfig{{name: "node-1", res: defaultNodeRes}},
|
||||
existingPods: []*v1.Pod{
|
||||
initPausePod(testCtx.clientSet, &pausePodConfig{
|
||||
initPausePod(testCtx.ClientSet, &pausePodConfig{
|
||||
Name: "pod1",
|
||||
Namespace: testCtx.ns.Name,
|
||||
Namespace: testCtx.NS.Name,
|
||||
Resources: defaultPodRes,
|
||||
Labels: map[string]string{"foo": "bar"},
|
||||
NodeName: "node-1",
|
||||
SchedulerName: "foo-scheduler",
|
||||
}),
|
||||
initPausePod(testCtx.clientSet, &pausePodConfig{
|
||||
initPausePod(testCtx.ClientSet, &pausePodConfig{
|
||||
Name: "pod2",
|
||||
Namespace: testCtx.ns.Name,
|
||||
Namespace: testCtx.NS.Name,
|
||||
Resources: defaultPodRes,
|
||||
Labels: map[string]string{"foo": "bar"},
|
||||
NodeName: "node-1",
|
||||
@ -790,7 +791,7 @@ func TestSchedulerInformers(t *testing.T) {
|
||||
},
|
||||
pod: initPausePod(cs, &pausePodConfig{
|
||||
Name: "unschedulable-pod",
|
||||
Namespace: testCtx.ns.Name,
|
||||
Namespace: testCtx.NS.Name,
|
||||
Resources: defaultPodRes,
|
||||
}),
|
||||
preemptedPodIndexes: map[int]struct{}{2: {}},
|
||||
@ -824,8 +825,8 @@ func TestSchedulerInformers(t *testing.T) {
|
||||
|
||||
// Cleanup
|
||||
pods = append(pods, unschedulable)
|
||||
cleanupPods(cs, t, pods)
|
||||
cs.PolicyV1beta1().PodDisruptionBudgets(testCtx.ns.Name).DeleteCollection(context.TODO(), nil, metav1.ListOptions{})
|
||||
testutils.CleanupPods(cs, t, pods)
|
||||
cs.PolicyV1beta1().PodDisruptionBudgets(testCtx.NS.Name).DeleteCollection(context.TODO(), nil, metav1.ListOptions{})
|
||||
cs.CoreV1().Nodes().DeleteCollection(context.TODO(), nil, metav1.ListOptions{})
|
||||
}
|
||||
}
|
||||
|
@ -42,6 +42,7 @@ import (
|
||||
"k8s.io/kubernetes/plugin/pkg/admission/podtolerationrestriction"
|
||||
pluginapi "k8s.io/kubernetes/plugin/pkg/admission/podtolerationrestriction/apis/podtolerationrestriction"
|
||||
"k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
testutils "k8s.io/kubernetes/test/integration/util"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
)
|
||||
|
||||
@ -71,24 +72,24 @@ func TestTaintNodeByCondition(t *testing.T) {
|
||||
// Build PodToleration Admission.
|
||||
admission := podtolerationrestriction.NewPodTolerationsPlugin(&pluginapi.Configuration{})
|
||||
|
||||
testCtx := initTestMaster(t, "default", admission)
|
||||
testCtx := testutils.InitTestMaster(t, "default", admission)
|
||||
|
||||
// Build clientset and informers for controllers.
|
||||
externalClientset := kubernetes.NewForConfigOrDie(&restclient.Config{
|
||||
QPS: -1,
|
||||
Host: testCtx.httpServer.URL,
|
||||
Host: testCtx.HTTPServer.URL,
|
||||
ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}})
|
||||
externalInformers := informers.NewSharedInformerFactory(externalClientset, time.Second)
|
||||
|
||||
admission.SetExternalKubeClientSet(externalClientset)
|
||||
admission.SetExternalKubeInformerFactory(externalInformers)
|
||||
|
||||
testCtx = initTestScheduler(t, testCtx, false, nil)
|
||||
defer cleanupTest(t, testCtx)
|
||||
testCtx = testutils.InitTestScheduler(t, testCtx, false, nil)
|
||||
defer testutils.CleanupTest(t, testCtx)
|
||||
|
||||
cs := testCtx.clientSet
|
||||
informers := testCtx.informerFactory
|
||||
nsName := testCtx.ns.Name
|
||||
cs := testCtx.ClientSet
|
||||
informers := testCtx.InformerFactory
|
||||
nsName := testCtx.NS.Name
|
||||
|
||||
// Start NodeLifecycleController for taint.
|
||||
nc, err := nodelifecycle.NewNodeLifecycleController(
|
||||
@ -112,13 +113,13 @@ func TestTaintNodeByCondition(t *testing.T) {
|
||||
t.Errorf("Failed to create node controller: %v", err)
|
||||
return
|
||||
}
|
||||
go nc.Run(testCtx.ctx.Done())
|
||||
go nc.Run(testCtx.Ctx.Done())
|
||||
|
||||
// Waiting for all controller sync.
|
||||
externalInformers.Start(testCtx.ctx.Done())
|
||||
externalInformers.WaitForCacheSync(testCtx.ctx.Done())
|
||||
informers.Start(testCtx.ctx.Done())
|
||||
informers.WaitForCacheSync(testCtx.ctx.Done())
|
||||
externalInformers.Start(testCtx.Ctx.Done())
|
||||
externalInformers.WaitForCacheSync(testCtx.Ctx.Done())
|
||||
informers.Start(testCtx.Ctx.Done())
|
||||
informers.WaitForCacheSync(testCtx.Ctx.Done())
|
||||
|
||||
// -------------------------------------------
|
||||
// Test TaintNodeByCondition feature.
|
||||
@ -534,7 +535,7 @@ func TestTaintNodeByCondition(t *testing.T) {
|
||||
if _, err := cs.CoreV1().Nodes().Create(context.TODO(), node, metav1.CreateOptions{}); err != nil {
|
||||
t.Errorf("Failed to create node, err: %v", err)
|
||||
}
|
||||
if err := waitForNodeTaints(cs, node, test.expectedTaints); err != nil {
|
||||
if err := testutils.WaitForNodeTaints(cs, node, test.expectedTaints); err != nil {
|
||||
node, err = cs.CoreV1().Nodes().Get(context.TODO(), node.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
t.Errorf("Failed to get node <%s>", node.Name)
|
||||
@ -558,7 +559,7 @@ func TestTaintNodeByCondition(t *testing.T) {
|
||||
pods = append(pods, createdPod)
|
||||
|
||||
if p.fits {
|
||||
if err := waitForPodToSchedule(cs, createdPod); err != nil {
|
||||
if err := testutils.WaitForPodToSchedule(cs, createdPod); err != nil {
|
||||
t.Errorf("Failed to schedule pod %s/%s on the node, err: %v",
|
||||
pod.Namespace, pod.Name, err)
|
||||
}
|
||||
@ -570,9 +571,9 @@ func TestTaintNodeByCondition(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
cleanupPods(cs, t, pods)
|
||||
cleanupNodes(cs, t)
|
||||
waitForSchedulerCacheCleanup(testCtx.scheduler, t)
|
||||
testutils.CleanupPods(cs, t, pods)
|
||||
testutils.CleanupNodes(cs, t)
|
||||
testutils.WaitForSchedulerCacheCleanup(testCtx.Scheduler, t)
|
||||
})
|
||||
}
|
||||
}
|
||||
@ -654,22 +655,22 @@ func TestTaintBasedEvictions(t *testing.T) {
|
||||
)
|
||||
for i, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
testCtx := initTestMaster(t, "taint-based-evictions", admission)
|
||||
testCtx := testutils.InitTestMaster(t, "taint-based-evictions", admission)
|
||||
|
||||
// Build clientset and informers for controllers.
|
||||
externalClientset := kubernetes.NewForConfigOrDie(&restclient.Config{
|
||||
QPS: -1,
|
||||
Host: testCtx.httpServer.URL,
|
||||
Host: testCtx.HTTPServer.URL,
|
||||
ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}})
|
||||
externalInformers := informers.NewSharedInformerFactory(externalClientset, time.Second)
|
||||
podTolerations.SetExternalKubeClientSet(externalClientset)
|
||||
podTolerations.SetExternalKubeInformerFactory(externalInformers)
|
||||
|
||||
testCtx = initTestScheduler(t, testCtx, true, nil)
|
||||
defer cleanupTest(t, testCtx)
|
||||
cs := testCtx.clientSet
|
||||
informers := testCtx.informerFactory
|
||||
_, err := cs.CoreV1().Namespaces().Create(context.TODO(), testCtx.ns, metav1.CreateOptions{})
|
||||
testCtx = testutils.InitTestScheduler(t, testCtx, true, nil)
|
||||
defer testutils.CleanupTest(t, testCtx)
|
||||
cs := testCtx.ClientSet
|
||||
informers := testCtx.InformerFactory
|
||||
_, err := cs.CoreV1().Namespaces().Create(context.TODO(), testCtx.NS, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
t.Errorf("Failed to create namespace %+v", err)
|
||||
}
|
||||
@ -697,13 +698,13 @@ func TestTaintBasedEvictions(t *testing.T) {
|
||||
return
|
||||
}
|
||||
|
||||
go nc.Run(testCtx.ctx.Done())
|
||||
go nc.Run(testCtx.Ctx.Done())
|
||||
|
||||
// Waiting for all controller sync.
|
||||
externalInformers.Start(testCtx.ctx.Done())
|
||||
externalInformers.WaitForCacheSync(testCtx.ctx.Done())
|
||||
informers.Start(testCtx.ctx.Done())
|
||||
informers.WaitForCacheSync(testCtx.ctx.Done())
|
||||
externalInformers.Start(testCtx.Ctx.Done())
|
||||
externalInformers.WaitForCacheSync(testCtx.Ctx.Done())
|
||||
informers.Start(testCtx.Ctx.Done())
|
||||
informers.WaitForCacheSync(testCtx.Ctx.Done())
|
||||
|
||||
nodeRes := v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("4000m"),
|
||||
@ -743,16 +744,16 @@ func TestTaintBasedEvictions(t *testing.T) {
|
||||
test.pod.Spec.Tolerations[0].TolerationSeconds = &tolerationSeconds[i]
|
||||
}
|
||||
|
||||
test.pod, err = cs.CoreV1().Pods(testCtx.ns.Name).Create(context.TODO(), test.pod, metav1.CreateOptions{})
|
||||
test.pod, err = cs.CoreV1().Pods(testCtx.NS.Name).Create(context.TODO(), test.pod, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("Test Failed: error: %v, while creating pod", err)
|
||||
}
|
||||
|
||||
if err := waitForPodToSchedule(cs, test.pod); err != nil {
|
||||
if err := testutils.WaitForPodToSchedule(cs, test.pod); err != nil {
|
||||
t.Errorf("Failed to schedule pod %s/%s on the node, err: %v",
|
||||
test.pod.Namespace, test.pod.Name, err)
|
||||
}
|
||||
test.pod, err = cs.CoreV1().Pods(testCtx.ns.Name).Get(context.TODO(), test.pod.Name, metav1.GetOptions{})
|
||||
test.pod, err = cs.CoreV1().Pods(testCtx.NS.Name).Get(context.TODO(), test.pod.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("Test Failed: error: %v, while creating pod", err)
|
||||
}
|
||||
@ -789,7 +790,7 @@ func TestTaintBasedEvictions(t *testing.T) {
|
||||
// i.e. expect a Unreachable:NoExecute taint,
|
||||
// we need to only send the update event once to simulate the network unreachable scenario.
|
||||
nodeCopy := nodeCopyWithConditions(nodes[i], test.nodeConditions)
|
||||
if err := updateNodeStatus(cs, nodeCopy); err != nil && !apierrors.IsNotFound(err) {
|
||||
if err := testutils.UpdateNodeStatus(cs, nodeCopy); err != nil && !apierrors.IsNotFound(err) {
|
||||
t.Errorf("Cannot update node: %v", err)
|
||||
}
|
||||
continue
|
||||
@ -799,11 +800,11 @@ func TestTaintBasedEvictions(t *testing.T) {
|
||||
go func(i int) {
|
||||
for {
|
||||
select {
|
||||
case <-testCtx.ctx.Done():
|
||||
case <-testCtx.Ctx.Done():
|
||||
return
|
||||
case <-time.Tick(heartbeatInternal):
|
||||
nodeCopy := nodeCopyWithConditions(nodes[i], conditions)
|
||||
if err := updateNodeStatus(cs, nodeCopy); err != nil && !apierrors.IsNotFound(err) {
|
||||
if err := testutils.UpdateNodeStatus(cs, nodeCopy); err != nil && !apierrors.IsNotFound(err) {
|
||||
t.Errorf("Cannot update node: %v", err)
|
||||
}
|
||||
}
|
||||
@ -811,12 +812,12 @@ func TestTaintBasedEvictions(t *testing.T) {
|
||||
}(i)
|
||||
}
|
||||
|
||||
if err := waitForNodeTaints(cs, neededNode, test.nodeTaints); err != nil {
|
||||
if err := testutils.WaitForNodeTaints(cs, neededNode, test.nodeTaints); err != nil {
|
||||
t.Errorf("Failed to taint node in test %d <%s>, err: %v", i, neededNode.Name, err)
|
||||
}
|
||||
|
||||
if test.pod != nil {
|
||||
err = pod.WaitForPodCondition(cs, testCtx.ns.Name, test.pod.Name, test.waitForPodCondition, time.Second*15, func(pod *v1.Pod) (bool, error) {
|
||||
err = pod.WaitForPodCondition(cs, testCtx.NS.Name, test.pod.Name, test.waitForPodCondition, time.Second*15, func(pod *v1.Pod) (bool, error) {
|
||||
// as node is unreachable, pod0 is expected to be in Terminating status
|
||||
// rather than getting deleted
|
||||
if tolerationSeconds[i] == 0 {
|
||||
@ -828,13 +829,13 @@ func TestTaintBasedEvictions(t *testing.T) {
|
||||
return false, nil
|
||||
})
|
||||
if err != nil {
|
||||
pod, _ := cs.CoreV1().Pods(testCtx.ns.Name).Get(context.TODO(), test.pod.Name, metav1.GetOptions{})
|
||||
pod, _ := cs.CoreV1().Pods(testCtx.NS.Name).Get(context.TODO(), test.pod.Name, metav1.GetOptions{})
|
||||
t.Fatalf("Error: %v, Expected test pod to be %s but it's %v", err, test.waitForPodCondition, pod)
|
||||
}
|
||||
cleanupPods(cs, t, []*v1.Pod{test.pod})
|
||||
testutils.CleanupPods(cs, t, []*v1.Pod{test.pod})
|
||||
}
|
||||
cleanupNodes(cs, t)
|
||||
waitForSchedulerCacheCleanup(testCtx.scheduler, t)
|
||||
testutils.CleanupNodes(cs, t)
|
||||
testutils.WaitForSchedulerCacheCleanup(testCtx.Scheduler, t)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
@ -19,8 +19,6 @@ package scheduler
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@ -30,194 +28,32 @@ import (
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/util/uuid"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/apiserver/pkg/admission"
|
||||
cacheddiscovery "k8s.io/client-go/discovery/cached/memory"
|
||||
"k8s.io/client-go/dynamic"
|
||||
"k8s.io/client-go/informers"
|
||||
coreinformers "k8s.io/client-go/informers/core/v1"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
corelisters "k8s.io/client-go/listers/core/v1"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/restmapper"
|
||||
"k8s.io/client-go/scale"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/client-go/tools/events"
|
||||
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
|
||||
"k8s.io/kubernetes/pkg/controller/disruption"
|
||||
"k8s.io/kubernetes/pkg/scheduler"
|
||||
schedulerapi "k8s.io/kubernetes/pkg/scheduler/apis/config"
|
||||
"k8s.io/kubernetes/pkg/scheduler/apis/config/scheme"
|
||||
schedulerapiv1 "k8s.io/kubernetes/pkg/scheduler/apis/config/v1"
|
||||
"k8s.io/kubernetes/pkg/scheduler/profile"
|
||||
taintutils "k8s.io/kubernetes/pkg/util/taints"
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
testutils "k8s.io/kubernetes/test/integration/util"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
)
|
||||
|
||||
type testContext struct {
|
||||
closeFn framework.CloseFunc
|
||||
httpServer *httptest.Server
|
||||
ns *v1.Namespace
|
||||
clientSet *clientset.Clientset
|
||||
informerFactory informers.SharedInformerFactory
|
||||
scheduler *scheduler.Scheduler
|
||||
ctx context.Context
|
||||
cancelFn context.CancelFunc
|
||||
}
|
||||
|
||||
func createAlgorithmSourceFromPolicy(policy *schedulerapi.Policy, clientSet clientset.Interface) schedulerapi.SchedulerAlgorithmSource {
|
||||
// Serialize the Policy object into a ConfigMap later.
|
||||
info, ok := runtime.SerializerInfoForMediaType(scheme.Codecs.SupportedMediaTypes(), runtime.ContentTypeJSON)
|
||||
if !ok {
|
||||
panic("could not find json serializer")
|
||||
}
|
||||
encoder := scheme.Codecs.EncoderForVersion(info.Serializer, schedulerapiv1.SchemeGroupVersion)
|
||||
policyString := runtime.EncodeOrDie(encoder, policy)
|
||||
configPolicyName := "scheduler-custom-policy-config"
|
||||
policyConfigMap := v1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{Namespace: metav1.NamespaceSystem, Name: configPolicyName},
|
||||
Data: map[string]string{schedulerapi.SchedulerPolicyConfigMapKey: policyString},
|
||||
}
|
||||
policyConfigMap.APIVersion = "v1"
|
||||
clientSet.CoreV1().ConfigMaps(metav1.NamespaceSystem).Create(context.TODO(), &policyConfigMap, metav1.CreateOptions{})
|
||||
|
||||
return schedulerapi.SchedulerAlgorithmSource{
|
||||
Policy: &schedulerapi.SchedulerPolicySource{
|
||||
ConfigMap: &schedulerapi.SchedulerPolicyConfigMapSource{
|
||||
Namespace: policyConfigMap.Namespace,
|
||||
Name: policyConfigMap.Name,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// initTestMasterAndScheduler initializes a test environment and creates a master with default
|
||||
// configuration.
|
||||
func initTestMaster(t *testing.T, nsPrefix string, admission admission.Interface) *testContext {
|
||||
ctx, cancelFunc := context.WithCancel(context.Background())
|
||||
testCtx := testContext{
|
||||
ctx: ctx,
|
||||
cancelFn: cancelFunc,
|
||||
}
|
||||
|
||||
// 1. Create master
|
||||
h := &framework.MasterHolder{Initialized: make(chan struct{})}
|
||||
s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
|
||||
<-h.Initialized
|
||||
h.M.GenericAPIServer.Handler.ServeHTTP(w, req)
|
||||
}))
|
||||
|
||||
masterConfig := framework.NewIntegrationTestMasterConfig()
|
||||
|
||||
if admission != nil {
|
||||
masterConfig.GenericConfig.AdmissionControl = admission
|
||||
}
|
||||
|
||||
_, testCtx.httpServer, testCtx.closeFn = framework.RunAMasterUsingServer(masterConfig, s, h)
|
||||
|
||||
if nsPrefix != "default" {
|
||||
testCtx.ns = framework.CreateTestingNamespace(nsPrefix+string(uuid.NewUUID()), s, t)
|
||||
} else {
|
||||
testCtx.ns = framework.CreateTestingNamespace("default", s, t)
|
||||
}
|
||||
|
||||
// 2. Create kubeclient
|
||||
testCtx.clientSet = clientset.NewForConfigOrDie(
|
||||
&restclient.Config{
|
||||
QPS: -1, Host: s.URL,
|
||||
ContentConfig: restclient.ContentConfig{
|
||||
GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"},
|
||||
},
|
||||
},
|
||||
)
|
||||
return &testCtx
|
||||
}
|
||||
|
||||
// initTestScheduler initializes a test environment and creates a scheduler with default
|
||||
// configuration.
|
||||
func initTestScheduler(
|
||||
t *testing.T,
|
||||
testCtx *testContext,
|
||||
setPodInformer bool,
|
||||
policy *schedulerapi.Policy,
|
||||
) *testContext {
|
||||
// Pod preemption is enabled by default scheduler configuration.
|
||||
return initTestSchedulerWithOptions(t, testCtx, setPodInformer, policy, time.Second)
|
||||
}
|
||||
|
||||
// initTestSchedulerWithOptions initializes a test environment and creates a scheduler with default
|
||||
// configuration and other options.
|
||||
func initTestSchedulerWithOptions(
|
||||
t *testing.T,
|
||||
testCtx *testContext,
|
||||
setPodInformer bool,
|
||||
policy *schedulerapi.Policy,
|
||||
resyncPeriod time.Duration,
|
||||
opts ...scheduler.Option,
|
||||
) *testContext {
|
||||
// 1. Create scheduler
|
||||
testCtx.informerFactory = informers.NewSharedInformerFactory(testCtx.clientSet, resyncPeriod)
|
||||
|
||||
var podInformer coreinformers.PodInformer
|
||||
|
||||
// create independent pod informer if required
|
||||
if setPodInformer {
|
||||
podInformer = scheduler.NewPodInformer(testCtx.clientSet, 12*time.Hour)
|
||||
} else {
|
||||
podInformer = testCtx.informerFactory.Core().V1().Pods()
|
||||
}
|
||||
var err error
|
||||
eventBroadcaster := events.NewBroadcaster(&events.EventSinkImpl{
|
||||
Interface: testCtx.clientSet.EventsV1beta1().Events(""),
|
||||
})
|
||||
if policy != nil {
|
||||
opts = append(opts, scheduler.WithAlgorithmSource(createAlgorithmSourceFromPolicy(policy, testCtx.clientSet)))
|
||||
}
|
||||
opts = append([]scheduler.Option{scheduler.WithBindTimeoutSeconds(600)}, opts...)
|
||||
testCtx.scheduler, err = scheduler.New(
|
||||
testCtx.clientSet,
|
||||
testCtx.informerFactory,
|
||||
podInformer,
|
||||
profile.NewRecorderFactory(eventBroadcaster),
|
||||
testCtx.ctx.Done(),
|
||||
opts...,
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("Couldn't create scheduler: %v", err)
|
||||
}
|
||||
|
||||
// set setPodInformer if provided.
|
||||
if setPodInformer {
|
||||
go podInformer.Informer().Run(testCtx.scheduler.StopEverything)
|
||||
cache.WaitForNamedCacheSync("scheduler", testCtx.scheduler.StopEverything, podInformer.Informer().HasSynced)
|
||||
}
|
||||
|
||||
stopCh := make(chan struct{})
|
||||
eventBroadcaster.StartRecordingToSink(stopCh)
|
||||
|
||||
testCtx.informerFactory.Start(testCtx.scheduler.StopEverything)
|
||||
testCtx.informerFactory.WaitForCacheSync(testCtx.scheduler.StopEverything)
|
||||
|
||||
go testCtx.scheduler.Run(testCtx.ctx)
|
||||
|
||||
return testCtx
|
||||
}
|
||||
|
||||
// initDisruptionController initializes and runs a Disruption Controller to properly
|
||||
// update PodDisuptionBudget objects.
|
||||
func initDisruptionController(t *testing.T, testCtx *testContext) *disruption.DisruptionController {
|
||||
informers := informers.NewSharedInformerFactory(testCtx.clientSet, 12*time.Hour)
|
||||
func initDisruptionController(t *testing.T, testCtx *testutils.TestContext) *disruption.DisruptionController {
|
||||
informers := informers.NewSharedInformerFactory(testCtx.ClientSet, 12*time.Hour)
|
||||
|
||||
discoveryClient := cacheddiscovery.NewMemCacheClient(testCtx.clientSet.Discovery())
|
||||
discoveryClient := cacheddiscovery.NewMemCacheClient(testCtx.ClientSet.Discovery())
|
||||
mapper := restmapper.NewDeferredDiscoveryRESTMapper(discoveryClient)
|
||||
|
||||
config := restclient.Config{Host: testCtx.httpServer.URL}
|
||||
scaleKindResolver := scale.NewDiscoveryScaleKindResolver(testCtx.clientSet.Discovery())
|
||||
config := restclient.Config{Host: testCtx.HTTPServer.URL}
|
||||
scaleKindResolver := scale.NewDiscoveryScaleKindResolver(testCtx.ClientSet.Discovery())
|
||||
scaleClient, err := scale.NewForConfig(&config, mapper, dynamic.LegacyAPIPathResolverFunc, scaleKindResolver)
|
||||
if err != nil {
|
||||
t.Fatalf("Error in create scaleClient: %v", err)
|
||||
@ -230,41 +66,30 @@ func initDisruptionController(t *testing.T, testCtx *testContext) *disruption.Di
|
||||
informers.Apps().V1().ReplicaSets(),
|
||||
informers.Apps().V1().Deployments(),
|
||||
informers.Apps().V1().StatefulSets(),
|
||||
testCtx.clientSet,
|
||||
testCtx.ClientSet,
|
||||
mapper,
|
||||
scaleClient)
|
||||
|
||||
informers.Start(testCtx.scheduler.StopEverything)
|
||||
informers.WaitForCacheSync(testCtx.scheduler.StopEverything)
|
||||
go dc.Run(testCtx.scheduler.StopEverything)
|
||||
informers.Start(testCtx.Scheduler.StopEverything)
|
||||
informers.WaitForCacheSync(testCtx.Scheduler.StopEverything)
|
||||
go dc.Run(testCtx.Scheduler.StopEverything)
|
||||
return dc
|
||||
}
|
||||
|
||||
// initTest initializes a test environment and creates master and scheduler with default
|
||||
// configuration.
|
||||
func initTest(t *testing.T, nsPrefix string, opts ...scheduler.Option) *testContext {
|
||||
return initTestSchedulerWithOptions(t, initTestMaster(t, nsPrefix, nil), true, nil, time.Second, opts...)
|
||||
func initTest(t *testing.T, nsPrefix string, opts ...scheduler.Option) *testutils.TestContext {
|
||||
return testutils.InitTestSchedulerWithOptions(t, testutils.InitTestMaster(t, nsPrefix, nil), true, nil, time.Second, opts...)
|
||||
}
|
||||
|
||||
// initTestDisablePreemption initializes a test environment and creates master and scheduler with default
|
||||
// configuration but with pod preemption disabled.
|
||||
func initTestDisablePreemption(t *testing.T, nsPrefix string) *testContext {
|
||||
return initTestSchedulerWithOptions(
|
||||
t, initTestMaster(t, nsPrefix, nil), true, nil,
|
||||
func initTestDisablePreemption(t *testing.T, nsPrefix string) *testutils.TestContext {
|
||||
return testutils.InitTestSchedulerWithOptions(
|
||||
t, testutils.InitTestMaster(t, nsPrefix, nil), true, nil,
|
||||
time.Second, scheduler.WithPreemptionDisabled(true))
|
||||
}
|
||||
|
||||
// cleanupTest deletes the scheduler and the test namespace. It should be called
|
||||
// at the end of a test.
|
||||
func cleanupTest(t *testing.T, testCtx *testContext) {
|
||||
// Kill the scheduler.
|
||||
testCtx.cancelFn()
|
||||
// Cleanup nodes.
|
||||
testCtx.clientSet.CoreV1().Nodes().DeleteCollection(context.TODO(), nil, metav1.ListOptions{})
|
||||
framework.DeleteTestingNamespace(testCtx.ns, testCtx.httpServer, t)
|
||||
testCtx.closeFn()
|
||||
}
|
||||
|
||||
// waitForReflection waits till the passFunc confirms that the object it expects
|
||||
// to see is in the store. Used to observe reflected events.
|
||||
func waitForReflection(t *testing.T, nodeLister corelisters.NodeLister, key string,
|
||||
@ -348,12 +173,6 @@ func createNodeWithImages(cs clientset.Interface, name string, res *v1.ResourceL
|
||||
return cs.CoreV1().Nodes().Create(context.TODO(), initNode(name, res, images), metav1.CreateOptions{})
|
||||
}
|
||||
|
||||
// updateNodeStatus updates the status of node.
|
||||
func updateNodeStatus(cs clientset.Interface, node *v1.Node) error {
|
||||
_, err := cs.CoreV1().Nodes().UpdateStatus(context.TODO(), node, metav1.UpdateOptions{})
|
||||
return err
|
||||
}
|
||||
|
||||
// createNodes creates `numNodes` nodes. The created node names will be in the
|
||||
// form of "`prefix`-X" where X is an ordinal.
|
||||
func createNodes(cs clientset.Interface, prefix string, res *v1.ResourceList, numNodes int) ([]*v1.Node, error) {
|
||||
@ -369,55 +188,6 @@ func createNodes(cs clientset.Interface, prefix string, res *v1.ResourceList, nu
|
||||
return nodes[:], nil
|
||||
}
|
||||
|
||||
// nodeTainted return a condition function that returns true if the given node contains
|
||||
// the taints.
|
||||
func nodeTainted(cs clientset.Interface, nodeName string, taints []v1.Taint) wait.ConditionFunc {
|
||||
return func() (bool, error) {
|
||||
node, err := cs.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
// node.Spec.Taints may have more taints
|
||||
if len(taints) > len(node.Spec.Taints) {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
for _, taint := range taints {
|
||||
if !taintutils.TaintExists(node.Spec.Taints, &taint) {
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
|
||||
func addTaintToNode(cs clientset.Interface, nodeName string, taint v1.Taint) error {
|
||||
node, err := cs.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
copy := node.DeepCopy()
|
||||
copy.Spec.Taints = append(copy.Spec.Taints, taint)
|
||||
_, err = cs.CoreV1().Nodes().Update(context.TODO(), copy, metav1.UpdateOptions{})
|
||||
return err
|
||||
}
|
||||
|
||||
// waitForNodeTaints waits for a node to have the target taints and returns
|
||||
// an error if it does not have taints within the given timeout.
|
||||
func waitForNodeTaints(cs clientset.Interface, node *v1.Node, taints []v1.Taint) error {
|
||||
return wait.Poll(100*time.Millisecond, 30*time.Second, nodeTainted(cs, node.Name, taints))
|
||||
}
|
||||
|
||||
// cleanupNodes deletes all nodes.
|
||||
func cleanupNodes(cs clientset.Interface, t *testing.T) {
|
||||
err := cs.CoreV1().Nodes().DeleteCollection(context.TODO(), metav1.NewDeleteOptions(0), metav1.ListOptions{})
|
||||
if err != nil {
|
||||
t.Errorf("error while deleting all nodes: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
type pausePodConfig struct {
|
||||
Name string
|
||||
Namespace string
|
||||
@ -499,7 +269,7 @@ func runPausePod(cs clientset.Interface, pod *v1.Pod) (*v1.Pod, error) {
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Error creating pause pod: %v", err)
|
||||
}
|
||||
if err = waitForPodToSchedule(cs, pod); err != nil {
|
||||
if err = testutils.WaitForPodToSchedule(cs, pod); err != nil {
|
||||
return pod, fmt.Errorf("Pod %v/%v didn't schedule successfully. Error: %v", pod.Namespace, pod.Name, err)
|
||||
}
|
||||
if pod, err = cs.CoreV1().Pods(pod.Namespace).Get(context.TODO(), pod.Name, metav1.GetOptions{}); err != nil {
|
||||
@ -536,7 +306,7 @@ func runPodWithContainers(cs clientset.Interface, pod *v1.Pod) (*v1.Pod, error)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Error creating pod-with-containers: %v", err)
|
||||
}
|
||||
if err = waitForPodToSchedule(cs, pod); err != nil {
|
||||
if err = testutils.WaitForPodToSchedule(cs, pod); err != nil {
|
||||
return pod, fmt.Errorf("Pod %v didn't schedule successfully. Error: %v", pod.Name, err)
|
||||
}
|
||||
if pod, err = cs.CoreV1().Pods(pod.Namespace).Get(context.TODO(), pod.Name, metav1.GetOptions{}); err != nil {
|
||||
@ -545,20 +315,6 @@ func runPodWithContainers(cs clientset.Interface, pod *v1.Pod) (*v1.Pod, error)
|
||||
return pod, nil
|
||||
}
|
||||
|
||||
// podDeleted returns true if a pod is not found in the given namespace.
|
||||
func podDeleted(c clientset.Interface, podNamespace, podName string) wait.ConditionFunc {
|
||||
return func() (bool, error) {
|
||||
pod, err := c.CoreV1().Pods(podNamespace).Get(context.TODO(), podName, metav1.GetOptions{})
|
||||
if apierrors.IsNotFound(err) {
|
||||
return true, nil
|
||||
}
|
||||
if pod.DeletionTimestamp != nil {
|
||||
return true, nil
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
|
||||
// podIsGettingEvicted returns true if the pod's deletion timestamp is set.
|
||||
func podIsGettingEvicted(c clientset.Interface, podNamespace, podName string) wait.ConditionFunc {
|
||||
return func() (bool, error) {
|
||||
@ -573,21 +329,6 @@ func podIsGettingEvicted(c clientset.Interface, podNamespace, podName string) wa
|
||||
}
|
||||
}
|
||||
|
||||
// podScheduled returns true if a node is assigned to the given pod.
|
||||
func podScheduled(c clientset.Interface, podNamespace, podName string) wait.ConditionFunc {
|
||||
return func() (bool, error) {
|
||||
pod, err := c.CoreV1().Pods(podNamespace).Get(context.TODO(), podName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
// This could be a connection error so we want to retry.
|
||||
return false, nil
|
||||
}
|
||||
if pod.Spec.NodeName == "" {
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
|
||||
// podScheduledIn returns true if a given pod is placed onto one of the expected nodes.
|
||||
func podScheduledIn(c clientset.Interface, podNamespace, podName string, nodeNames []string) wait.ConditionFunc {
|
||||
return func() (bool, error) {
|
||||
@ -639,18 +380,6 @@ func podSchedulingError(c clientset.Interface, podNamespace, podName string) wai
|
||||
}
|
||||
}
|
||||
|
||||
// waitForPodToScheduleWithTimeout waits for a pod to get scheduled and returns
|
||||
// an error if it does not scheduled within the given timeout.
|
||||
func waitForPodToScheduleWithTimeout(cs clientset.Interface, pod *v1.Pod, timeout time.Duration) error {
|
||||
return wait.Poll(100*time.Millisecond, timeout, podScheduled(cs, pod.Namespace, pod.Name))
|
||||
}
|
||||
|
||||
// waitForPodToSchedule waits for a pod to get scheduled and returns an error if
|
||||
// it does not get scheduled within the timeout duration (30 seconds).
|
||||
func waitForPodToSchedule(cs clientset.Interface, pod *v1.Pod) error {
|
||||
return waitForPodToScheduleWithTimeout(cs, pod, 30*time.Second)
|
||||
}
|
||||
|
||||
// waitForPodUnscheduleWithTimeout waits for a pod to fail scheduling and returns
|
||||
// an error if it does not become unschedulable within the given timeout.
|
||||
func waitForPodUnschedulableWithTimeout(cs clientset.Interface, pod *v1.Pod, timeout time.Duration) error {
|
||||
@ -665,9 +394,9 @@ func waitForPodUnschedulable(cs clientset.Interface, pod *v1.Pod) error {
|
||||
|
||||
// waitForPDBsStable waits for PDBs to have "CurrentHealthy" status equal to
|
||||
// the expected values.
|
||||
func waitForPDBsStable(testCtx *testContext, pdbs []*policy.PodDisruptionBudget, pdbPodNum []int32) error {
|
||||
func waitForPDBsStable(testCtx *testutils.TestContext, pdbs []*policy.PodDisruptionBudget, pdbPodNum []int32) error {
|
||||
return wait.Poll(time.Second, 60*time.Second, func() (bool, error) {
|
||||
pdbList, err := testCtx.clientSet.PolicyV1beta1().PodDisruptionBudgets(testCtx.ns.Name).List(context.TODO(), metav1.ListOptions{})
|
||||
pdbList, err := testCtx.ClientSet.PolicyV1beta1().PodDisruptionBudgets(testCtx.NS.Name).List(context.TODO(), metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
@ -693,9 +422,9 @@ func waitForPDBsStable(testCtx *testContext, pdbs []*policy.PodDisruptionBudget,
|
||||
}
|
||||
|
||||
// waitCachedPodsStable waits until scheduler cache has the given pods.
|
||||
func waitCachedPodsStable(testCtx *testContext, pods []*v1.Pod) error {
|
||||
func waitCachedPodsStable(testCtx *testutils.TestContext, pods []*v1.Pod) error {
|
||||
return wait.Poll(time.Second, 30*time.Second, func() (bool, error) {
|
||||
cachedPods, err := testCtx.scheduler.SchedulerCache.List(labels.Everything())
|
||||
cachedPods, err := testCtx.Scheduler.SchedulerCache.List(labels.Everything())
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
@ -703,11 +432,11 @@ func waitCachedPodsStable(testCtx *testContext, pods []*v1.Pod) error {
|
||||
return false, nil
|
||||
}
|
||||
for _, p := range pods {
|
||||
actualPod, err1 := testCtx.clientSet.CoreV1().Pods(p.Namespace).Get(context.TODO(), p.Name, metav1.GetOptions{})
|
||||
actualPod, err1 := testCtx.ClientSet.CoreV1().Pods(p.Namespace).Get(context.TODO(), p.Name, metav1.GetOptions{})
|
||||
if err1 != nil {
|
||||
return false, err1
|
||||
}
|
||||
cachedPod, err2 := testCtx.scheduler.SchedulerCache.GetPod(actualPod)
|
||||
cachedPod, err2 := testCtx.Scheduler.SchedulerCache.GetPod(actualPod)
|
||||
if err2 != nil || cachedPod == nil {
|
||||
return false, err2
|
||||
}
|
||||
@ -725,22 +454,6 @@ func getPod(cs clientset.Interface, podName string, podNamespace string) (*v1.Po
|
||||
return cs.CoreV1().Pods(podNamespace).Get(context.TODO(), podName, metav1.GetOptions{})
|
||||
}
|
||||
|
||||
// cleanupPods deletes the given pods and waits for them to be actually deleted.
|
||||
func cleanupPods(cs clientset.Interface, t *testing.T, pods []*v1.Pod) {
|
||||
for _, p := range pods {
|
||||
err := cs.CoreV1().Pods(p.Namespace).Delete(context.TODO(), p.Name, metav1.NewDeleteOptions(0))
|
||||
if err != nil && !apierrors.IsNotFound(err) {
|
||||
t.Errorf("error while deleting pod %v/%v: %v", p.Namespace, p.Name, err)
|
||||
}
|
||||
}
|
||||
for _, p := range pods {
|
||||
if err := wait.Poll(time.Millisecond, wait.ForeverTestTimeout,
|
||||
podDeleted(cs, p.Namespace, p.Name)); err != nil {
|
||||
t.Errorf("error while waiting for pod %v/%v to get deleted: %v", p.Namespace, p.Name, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// noPodsInNamespace returns true if no pods in the given namespace.
|
||||
func noPodsInNamespace(c clientset.Interface, podNamespace string) wait.ConditionFunc {
|
||||
return func() (bool, error) {
|
||||
@ -766,15 +479,3 @@ func cleanupPodsInNamespace(cs clientset.Interface, t *testing.T, ns string) {
|
||||
t.Errorf("error while waiting for pods in namespace %v: %v", ns, err)
|
||||
}
|
||||
}
|
||||
|
||||
func waitForSchedulerCacheCleanup(sched *scheduler.Scheduler, t *testing.T) {
|
||||
schedulerCacheIsEmpty := func() (bool, error) {
|
||||
dump := sched.Cache().Dump()
|
||||
|
||||
return len(dump.Nodes) == 0 && len(dump.AssumedPods) == 0, nil
|
||||
}
|
||||
|
||||
if err := wait.Poll(time.Second, wait.ForeverTestTimeout, schedulerCacheIsEmpty); err != nil {
|
||||
t.Errorf("Failed to wait for scheduler cache cleanup: %v", err)
|
||||
}
|
||||
}
|
||||
|
@ -15,12 +15,23 @@ go_library(
|
||||
deps = [
|
||||
"//pkg/controller/volume/persistentvolume/util:go_default_library",
|
||||
"//pkg/scheduler:go_default_library",
|
||||
"//pkg/scheduler/apis/config:go_default_library",
|
||||
"//pkg/scheduler/apis/config/scheme:go_default_library",
|
||||
"//pkg/scheduler/apis/config/v1:go_default_library",
|
||||
"//pkg/scheduler/profile:go_default_library",
|
||||
"//pkg/util/taints:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/admission:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/informers:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/informers/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/rest:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/cache:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/events:go_default_library",
|
||||
"//staging/src/k8s.io/legacy-cloud-providers/gce:go_default_library",
|
||||
|
@ -18,20 +18,35 @@ package util
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/util/uuid"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/apiserver/pkg/admission"
|
||||
"k8s.io/client-go/informers"
|
||||
coreinformers "k8s.io/client-go/informers/core/v1"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/client-go/tools/events"
|
||||
"k8s.io/klog"
|
||||
pvutil "k8s.io/kubernetes/pkg/controller/volume/persistentvolume/util"
|
||||
"k8s.io/kubernetes/pkg/scheduler"
|
||||
schedulerapi "k8s.io/kubernetes/pkg/scheduler/apis/config"
|
||||
"k8s.io/kubernetes/pkg/scheduler/apis/config/scheme"
|
||||
schedulerapiv1 "k8s.io/kubernetes/pkg/scheduler/apis/config/v1"
|
||||
"k8s.io/kubernetes/pkg/scheduler/profile"
|
||||
taintutils "k8s.io/kubernetes/pkg/util/taints"
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
)
|
||||
|
||||
@ -131,3 +146,326 @@ func StartFakePVController(clientSet clientset.Interface) ShutdownFunc {
|
||||
informerFactory.Start(ctx.Done())
|
||||
return ShutdownFunc(cancel)
|
||||
}
|
||||
|
||||
// TestContext store necessary context info
|
||||
type TestContext struct {
|
||||
CloseFn framework.CloseFunc
|
||||
HTTPServer *httptest.Server
|
||||
NS *v1.Namespace
|
||||
ClientSet *clientset.Clientset
|
||||
InformerFactory informers.SharedInformerFactory
|
||||
Scheduler *scheduler.Scheduler
|
||||
Ctx context.Context
|
||||
CancelFn context.CancelFunc
|
||||
}
|
||||
|
||||
// CleanupNodes cleans all nodes which were created during integration test
|
||||
func CleanupNodes(cs clientset.Interface, t *testing.T) {
|
||||
err := cs.CoreV1().Nodes().DeleteCollection(context.TODO(), metav1.NewDeleteOptions(0), metav1.ListOptions{})
|
||||
if err != nil {
|
||||
t.Errorf("error while deleting all nodes: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// PodDeleted returns true if a pod is not found in the given namespace.
|
||||
func PodDeleted(c clientset.Interface, podNamespace, podName string) wait.ConditionFunc {
|
||||
return func() (bool, error) {
|
||||
pod, err := c.CoreV1().Pods(podNamespace).Get(context.TODO(), podName, metav1.GetOptions{})
|
||||
if apierrors.IsNotFound(err) {
|
||||
return true, nil
|
||||
}
|
||||
if pod.DeletionTimestamp != nil {
|
||||
return true, nil
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
|
||||
// CleanupTest cleans related resources which were created during integration test
|
||||
func CleanupTest(t *testing.T, testCtx *TestContext) {
|
||||
// Kill the scheduler.
|
||||
testCtx.CancelFn()
|
||||
// Cleanup nodes.
|
||||
testCtx.ClientSet.CoreV1().Nodes().DeleteCollection(context.TODO(), nil, metav1.ListOptions{})
|
||||
framework.DeleteTestingNamespace(testCtx.NS, testCtx.HTTPServer, t)
|
||||
testCtx.CloseFn()
|
||||
}
|
||||
|
||||
// CleanupPods deletes the given pods and waits for them to be actually deleted.
|
||||
func CleanupPods(cs clientset.Interface, t *testing.T, pods []*v1.Pod) {
|
||||
for _, p := range pods {
|
||||
err := cs.CoreV1().Pods(p.Namespace).Delete(context.TODO(), p.Name, metav1.NewDeleteOptions(0))
|
||||
if err != nil && !apierrors.IsNotFound(err) {
|
||||
t.Errorf("error while deleting pod %v/%v: %v", p.Namespace, p.Name, err)
|
||||
}
|
||||
}
|
||||
for _, p := range pods {
|
||||
if err := wait.Poll(time.Millisecond, wait.ForeverTestTimeout,
|
||||
PodDeleted(cs, p.Namespace, p.Name)); err != nil {
|
||||
t.Errorf("error while waiting for pod %v/%v to get deleted: %v", p.Namespace, p.Name, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// AddTaintToNode add taints to specific node
|
||||
func AddTaintToNode(cs clientset.Interface, nodeName string, taint v1.Taint) error {
|
||||
node, err := cs.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
copy := node.DeepCopy()
|
||||
copy.Spec.Taints = append(copy.Spec.Taints, taint)
|
||||
_, err = cs.CoreV1().Nodes().Update(context.TODO(), copy, metav1.UpdateOptions{})
|
||||
return err
|
||||
}
|
||||
|
||||
// WaitForNodeTaints waits for a node to have the target taints and returns
|
||||
// an error if it does not have taints within the given timeout.
|
||||
func WaitForNodeTaints(cs clientset.Interface, node *v1.Node, taints []v1.Taint) error {
|
||||
return wait.Poll(100*time.Millisecond, 30*time.Second, NodeTainted(cs, node.Name, taints))
|
||||
}
|
||||
|
||||
// NodeTainted return a condition function that returns true if the given node contains
|
||||
// the taints.
|
||||
func NodeTainted(cs clientset.Interface, nodeName string, taints []v1.Taint) wait.ConditionFunc {
|
||||
return func() (bool, error) {
|
||||
node, err := cs.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
// node.Spec.Taints may have more taints
|
||||
if len(taints) > len(node.Spec.Taints) {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
for _, taint := range taints {
|
||||
if !taintutils.TaintExists(node.Spec.Taints, &taint) {
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
|
||||
// NodeReadyStatus returns the status of first condition with type NodeReady.
|
||||
// If none of the condition is of type NodeReady, returns an error.
|
||||
func NodeReadyStatus(conditions []v1.NodeCondition) (v1.ConditionStatus, error) {
|
||||
for _, c := range conditions {
|
||||
if c.Type != v1.NodeReady {
|
||||
continue
|
||||
}
|
||||
// Just return the first condition with type NodeReady
|
||||
return c.Status, nil
|
||||
}
|
||||
return v1.ConditionFalse, errors.New("None of the conditions is of type NodeReady")
|
||||
}
|
||||
|
||||
// GetTolerationSeconds gets the period of time the toleration
|
||||
func GetTolerationSeconds(tolerations []v1.Toleration) (int64, error) {
|
||||
for _, t := range tolerations {
|
||||
if t.Key == v1.TaintNodeNotReady && t.Effect == v1.TaintEffectNoExecute && t.Operator == v1.TolerationOpExists {
|
||||
return *t.TolerationSeconds, nil
|
||||
}
|
||||
}
|
||||
return 0, fmt.Errorf("cannot find toleration")
|
||||
}
|
||||
|
||||
// NodeCopyWithConditions duplicates the ode object with conditions
|
||||
func NodeCopyWithConditions(node *v1.Node, conditions []v1.NodeCondition) *v1.Node {
|
||||
copy := node.DeepCopy()
|
||||
copy.ResourceVersion = "0"
|
||||
copy.Status.Conditions = conditions
|
||||
for i := range copy.Status.Conditions {
|
||||
copy.Status.Conditions[i].LastHeartbeatTime = metav1.Now()
|
||||
}
|
||||
return copy
|
||||
}
|
||||
|
||||
// UpdateNodeStatus updates the status of node.
|
||||
func UpdateNodeStatus(cs clientset.Interface, node *v1.Node) error {
|
||||
_, err := cs.CoreV1().Nodes().UpdateStatus(context.TODO(), node, metav1.UpdateOptions{})
|
||||
return err
|
||||
}
|
||||
|
||||
// InitTestMaster initializes a test environment and creates a master with default
|
||||
// configuration.
|
||||
func InitTestMaster(t *testing.T, nsPrefix string, admission admission.Interface) *TestContext {
|
||||
ctx, cancelFunc := context.WithCancel(context.Background())
|
||||
testCtx := TestContext{
|
||||
Ctx: ctx,
|
||||
CancelFn: cancelFunc,
|
||||
}
|
||||
|
||||
// 1. Create master
|
||||
h := &framework.MasterHolder{Initialized: make(chan struct{})}
|
||||
s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
|
||||
<-h.Initialized
|
||||
h.M.GenericAPIServer.Handler.ServeHTTP(w, req)
|
||||
}))
|
||||
|
||||
masterConfig := framework.NewIntegrationTestMasterConfig()
|
||||
|
||||
if admission != nil {
|
||||
masterConfig.GenericConfig.AdmissionControl = admission
|
||||
}
|
||||
|
||||
_, testCtx.HTTPServer, testCtx.CloseFn = framework.RunAMasterUsingServer(masterConfig, s, h)
|
||||
|
||||
if nsPrefix != "default" {
|
||||
testCtx.NS = framework.CreateTestingNamespace(nsPrefix+string(uuid.NewUUID()), s, t)
|
||||
} else {
|
||||
testCtx.NS = framework.CreateTestingNamespace("default", s, t)
|
||||
}
|
||||
|
||||
// 2. Create kubeclient
|
||||
testCtx.ClientSet = clientset.NewForConfigOrDie(
|
||||
&restclient.Config{
|
||||
QPS: -1, Host: s.URL,
|
||||
ContentConfig: restclient.ContentConfig{
|
||||
GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"},
|
||||
},
|
||||
},
|
||||
)
|
||||
return &testCtx
|
||||
}
|
||||
|
||||
// WaitForSchedulerCacheCleanup waits for cleanup of scheduler's cache to complete
|
||||
func WaitForSchedulerCacheCleanup(sched *scheduler.Scheduler, t *testing.T) {
|
||||
schedulerCacheIsEmpty := func() (bool, error) {
|
||||
dump := sched.Cache().Dump()
|
||||
|
||||
return len(dump.Nodes) == 0 && len(dump.AssumedPods) == 0, nil
|
||||
}
|
||||
|
||||
if err := wait.Poll(time.Second, wait.ForeverTestTimeout, schedulerCacheIsEmpty); err != nil {
|
||||
t.Errorf("Failed to wait for scheduler cache cleanup: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// InitTestScheduler initializes a test environment and creates a scheduler with default
|
||||
// configuration.
|
||||
func InitTestScheduler(
|
||||
t *testing.T,
|
||||
testCtx *TestContext,
|
||||
setPodInformer bool,
|
||||
policy *schedulerapi.Policy,
|
||||
) *TestContext {
|
||||
// Pod preemption is enabled by default scheduler configuration.
|
||||
return InitTestSchedulerWithOptions(t, testCtx, setPodInformer, policy, time.Second)
|
||||
}
|
||||
|
||||
// InitTestSchedulerWithOptions initializes a test environment and creates a scheduler with default
|
||||
// configuration and other options.
|
||||
func InitTestSchedulerWithOptions(
|
||||
t *testing.T,
|
||||
testCtx *TestContext,
|
||||
setPodInformer bool,
|
||||
policy *schedulerapi.Policy,
|
||||
resyncPeriod time.Duration,
|
||||
opts ...scheduler.Option,
|
||||
) *TestContext {
|
||||
// 1. Create scheduler
|
||||
testCtx.InformerFactory = informers.NewSharedInformerFactory(testCtx.ClientSet, resyncPeriod)
|
||||
|
||||
var podInformer coreinformers.PodInformer
|
||||
|
||||
// create independent pod informer if required
|
||||
if setPodInformer {
|
||||
podInformer = scheduler.NewPodInformer(testCtx.ClientSet, 12*time.Hour)
|
||||
} else {
|
||||
podInformer = testCtx.InformerFactory.Core().V1().Pods()
|
||||
}
|
||||
var err error
|
||||
eventBroadcaster := events.NewBroadcaster(&events.EventSinkImpl{
|
||||
Interface: testCtx.ClientSet.EventsV1beta1().Events(""),
|
||||
})
|
||||
|
||||
if policy != nil {
|
||||
opts = append(opts, scheduler.WithAlgorithmSource(CreateAlgorithmSourceFromPolicy(policy, testCtx.ClientSet)))
|
||||
}
|
||||
opts = append([]scheduler.Option{scheduler.WithBindTimeoutSeconds(600)}, opts...)
|
||||
testCtx.Scheduler, err = scheduler.New(
|
||||
testCtx.ClientSet,
|
||||
testCtx.InformerFactory,
|
||||
podInformer,
|
||||
profile.NewRecorderFactory(eventBroadcaster),
|
||||
testCtx.Ctx.Done(),
|
||||
opts...,
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("Couldn't create scheduler: %v", err)
|
||||
}
|
||||
|
||||
// set setPodInformer if provided.
|
||||
if setPodInformer {
|
||||
go podInformer.Informer().Run(testCtx.Scheduler.StopEverything)
|
||||
cache.WaitForNamedCacheSync("scheduler", testCtx.Scheduler.StopEverything, podInformer.Informer().HasSynced)
|
||||
}
|
||||
|
||||
stopCh := make(chan struct{})
|
||||
eventBroadcaster.StartRecordingToSink(stopCh)
|
||||
|
||||
testCtx.InformerFactory.Start(testCtx.Scheduler.StopEverything)
|
||||
testCtx.InformerFactory.WaitForCacheSync(testCtx.Scheduler.StopEverything)
|
||||
|
||||
go testCtx.Scheduler.Run(testCtx.Ctx)
|
||||
|
||||
return testCtx
|
||||
}
|
||||
|
||||
// CreateAlgorithmSourceFromPolicy creates the schedulerAlgorithmSource from the policy parameter
|
||||
func CreateAlgorithmSourceFromPolicy(policy *schedulerapi.Policy, clientSet clientset.Interface) schedulerapi.SchedulerAlgorithmSource {
|
||||
// Serialize the Policy object into a ConfigMap later.
|
||||
info, ok := runtime.SerializerInfoForMediaType(scheme.Codecs.SupportedMediaTypes(), runtime.ContentTypeJSON)
|
||||
if !ok {
|
||||
panic("could not find json serializer")
|
||||
}
|
||||
encoder := scheme.Codecs.EncoderForVersion(info.Serializer, schedulerapiv1.SchemeGroupVersion)
|
||||
policyString := runtime.EncodeOrDie(encoder, policy)
|
||||
configPolicyName := "scheduler-custom-policy-config"
|
||||
policyConfigMap := v1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{Namespace: metav1.NamespaceSystem, Name: configPolicyName},
|
||||
Data: map[string]string{schedulerapi.SchedulerPolicyConfigMapKey: policyString},
|
||||
}
|
||||
policyConfigMap.APIVersion = "v1"
|
||||
clientSet.CoreV1().ConfigMaps(metav1.NamespaceSystem).Create(context.TODO(), &policyConfigMap, metav1.CreateOptions{})
|
||||
|
||||
return schedulerapi.SchedulerAlgorithmSource{
|
||||
Policy: &schedulerapi.SchedulerPolicySource{
|
||||
ConfigMap: &schedulerapi.SchedulerPolicyConfigMapSource{
|
||||
Namespace: policyConfigMap.Namespace,
|
||||
Name: policyConfigMap.Name,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// WaitForPodToScheduleWithTimeout waits for a pod to get scheduled and returns
|
||||
// an error if it does not scheduled within the given timeout.
|
||||
func WaitForPodToScheduleWithTimeout(cs clientset.Interface, pod *v1.Pod, timeout time.Duration) error {
|
||||
return wait.Poll(100*time.Millisecond, timeout, PodScheduled(cs, pod.Namespace, pod.Name))
|
||||
}
|
||||
|
||||
// WaitForPodToSchedule waits for a pod to get scheduled and returns an error if
|
||||
// it does not get scheduled within the timeout duration (30 seconds).
|
||||
func WaitForPodToSchedule(cs clientset.Interface, pod *v1.Pod) error {
|
||||
return WaitForPodToScheduleWithTimeout(cs, pod, 30*time.Second)
|
||||
}
|
||||
|
||||
// PodScheduled checks if the pod has been scheduled
|
||||
func PodScheduled(c clientset.Interface, podNamespace, podName string) wait.ConditionFunc {
|
||||
return func() (bool, error) {
|
||||
pod, err := c.CoreV1().Pods(podNamespace).Get(context.TODO(), podName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
// This could be a connection error so we want to retry.
|
||||
return false, nil
|
||||
}
|
||||
if pod.Spec.NodeName == "" {
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user