mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-23 11:50:44 +00:00
Make scheduler integration test faster
Not to wait for 30 seconds for every negative test case. It cuts the test time from 450s to 125s.
This commit is contained in:
parent
df072ca97e
commit
dc15e9cf90
@ -49,7 +49,7 @@ const (
|
||||
|
||||
// IMPORTANT NOTE for predicate developers:
|
||||
// We are using cached predicate result for pods belonging to the same equivalence class.
|
||||
// So when updating a existing predicate, you should consider whether your change will introduce new
|
||||
// So when updating an existing predicate, you should consider whether your change will introduce new
|
||||
// dependency to attributes of any API object like Pod, Node, Service etc.
|
||||
// If yes, you are expected to invalidate the cached predicate result for related API object change.
|
||||
// For example:
|
||||
|
@ -30,6 +30,8 @@ import (
|
||||
|
||||
// This file tests the scheduler predicates functionality.
|
||||
|
||||
const pollInterval = 100 * time.Millisecond
|
||||
|
||||
// TestInterPodAffinity verifies that scheduler's inter pod affinity and
|
||||
// anti-affinity predicate functions works correctly.
|
||||
func TestInterPodAffinity(t *testing.T) {
|
||||
@ -808,6 +810,7 @@ func TestInterPodAffinity(t *testing.T) {
|
||||
test: "nodes[0] and nodes[1] have same topologyKey and label value. nodes[0] has an existing pod that matches the inter pod affinity rule. The new pod can not be scheduled onto either of the two nodes.",
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
for _, pod := range test.pods {
|
||||
var nsName string
|
||||
@ -820,7 +823,7 @@ func TestInterPodAffinity(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatalf("Test Failed: error, %v, while creating pod during test: %v", err, test.test)
|
||||
}
|
||||
err = wait.Poll(time.Second, wait.ForeverTestTimeout, podScheduled(cs, createdPod.Namespace, createdPod.Name))
|
||||
err = wait.Poll(pollInterval, wait.ForeverTestTimeout, podScheduled(cs, createdPod.Namespace, createdPod.Name))
|
||||
if err != nil {
|
||||
t.Errorf("Test Failed: error, %v, while waiting for pod during test, %v", err, test)
|
||||
}
|
||||
@ -831,12 +834,20 @@ func TestInterPodAffinity(t *testing.T) {
|
||||
t.Fatalf("Test Failed: error, %v, while creating pod during test: %v", err, test.test)
|
||||
}
|
||||
} else {
|
||||
err = wait.Poll(time.Second, wait.ForeverTestTimeout, podScheduled(cs, testPod.Namespace, testPod.Name))
|
||||
if err != nil && err != wait.ErrWaitTimeout {
|
||||
t.Errorf("Test Failed: error, %v, while waiting for pod to get scheduled, %v", err, test.test)
|
||||
waitTime := wait.ForeverTestTimeout
|
||||
if !test.fits {
|
||||
waitTime = 2 * time.Second
|
||||
}
|
||||
if (err == nil) != test.fits {
|
||||
t.Errorf("Test Failed: %v, err %v, test.fits %v", test.test, err, test.fits)
|
||||
|
||||
err = wait.Poll(pollInterval, waitTime, podScheduled(cs, testPod.Namespace, testPod.Name))
|
||||
if test.fits {
|
||||
if err != nil {
|
||||
t.Errorf("Test Failed: %v, err %v, test.fits %v", test.test, err, test.fits)
|
||||
}
|
||||
} else {
|
||||
if err != wait.ErrWaitTimeout {
|
||||
t.Errorf("Test Failed: error, %v, while waiting for pod to get scheduled, %v", err, test.test)
|
||||
}
|
||||
}
|
||||
|
||||
for _, pod := range test.pods {
|
||||
@ -850,7 +861,7 @@ func TestInterPodAffinity(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Errorf("Test Failed: error, %v, while deleting pod during test: %v", err, test.test)
|
||||
}
|
||||
err = wait.Poll(time.Second, wait.ForeverTestTimeout, podDeleted(cs, nsName, pod.Name))
|
||||
err = wait.Poll(pollInterval, wait.ForeverTestTimeout, podDeleted(cs, nsName, pod.Name))
|
||||
if err != nil {
|
||||
t.Errorf("Test Failed: error, %v, while waiting for pod to get deleted, %v", err, test.test)
|
||||
}
|
||||
@ -859,7 +870,7 @@ func TestInterPodAffinity(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Errorf("Test Failed: error, %v, while deleting pod during test: %v", err, test.test)
|
||||
}
|
||||
err = wait.Poll(time.Second, wait.ForeverTestTimeout, podDeleted(cs, context.ns.Name, test.pod.Name))
|
||||
err = wait.Poll(pollInterval, wait.ForeverTestTimeout, podDeleted(cs, context.ns.Name, test.pod.Name))
|
||||
if err != nil {
|
||||
t.Errorf("Test Failed: error, %v, while waiting for pod to get deleted, %v", err, test.test)
|
||||
}
|
||||
|
@ -384,7 +384,7 @@ func TestUnschedulableNodes(t *testing.T) {
|
||||
}
|
||||
|
||||
// There are no schedulable nodes - the pod shouldn't be scheduled.
|
||||
err = waitForPodToSchedule(context.clientSet, myPod)
|
||||
err = waitForPodToScheduleWithTimeout(context.clientSet, myPod, 2*time.Second)
|
||||
if err == nil {
|
||||
t.Errorf("Pod scheduled successfully on unschedulable nodes")
|
||||
}
|
||||
|
@ -332,13 +332,13 @@ func podScheduled(c clientset.Interface, podNamespace, podName string) wait.Cond
|
||||
// waitForPodToScheduleWithTimeout waits for a pod to get scheduled and returns
|
||||
// an error if it does not scheduled within the given timeout.
|
||||
func waitForPodToScheduleWithTimeout(cs clientset.Interface, pod *v1.Pod, timeout time.Duration) error {
|
||||
return wait.Poll(time.Second, timeout, podScheduled(cs, pod.Namespace, pod.Name))
|
||||
return wait.Poll(100*time.Millisecond, timeout, podScheduled(cs, pod.Namespace, pod.Name))
|
||||
}
|
||||
|
||||
// waitForPodToSchedule waits for a pod to get scheduled and returns an error if
|
||||
// it does not scheduled within the timeout duration (30 seconds).
|
||||
// it does not get scheduled within the timeout duration (30 seconds).
|
||||
func waitForPodToSchedule(cs clientset.Interface, pod *v1.Pod) error {
|
||||
return waitForPodToScheduleWithTimeout(cs, pod, wait.ForeverTestTimeout)
|
||||
return waitForPodToScheduleWithTimeout(cs, pod, 30*time.Second)
|
||||
}
|
||||
|
||||
// deletePod deletes the given pod in the given namespace.
|
||||
|
Loading…
Reference in New Issue
Block a user