mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-24 04:06:03 +00:00
Scheduler integration tests standarization
This commit is contained in:
parent
b8b4186a14
commit
5e679cf51f
@ -67,14 +67,15 @@ func TestInterPodAffinity(t *testing.T) {
|
||||
podLabel2 := map[string]string{"security": "S1"}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
pod *v1.Pod
|
||||
pods []*v1.Pod
|
||||
node *v1.Node
|
||||
fits bool
|
||||
errorType string
|
||||
test string
|
||||
}{
|
||||
{
|
||||
name: "validates that a pod with an invalid podAffinity is rejected because of the LabelSelectorRequirement is invalid",
|
||||
pod: &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "fakename",
|
||||
@ -104,9 +105,9 @@ func TestInterPodAffinity(t *testing.T) {
|
||||
node: nodes[0],
|
||||
fits: false,
|
||||
errorType: "invalidPod",
|
||||
test: "validates that a pod with an invalid podAffinity is rejected because of the LabelSelectorRequirement is invalid",
|
||||
},
|
||||
{
|
||||
name: "validates that Inter-pod-Affinity is respected if not matching",
|
||||
pod: &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "fakename",
|
||||
@ -136,9 +137,9 @@ func TestInterPodAffinity(t *testing.T) {
|
||||
},
|
||||
node: nodes[0],
|
||||
fits: false,
|
||||
test: "validates that Inter-pod-Affinity is respected if not matching",
|
||||
},
|
||||
{
|
||||
name: "validates that InterPodAffinity is respected if matching. requiredDuringSchedulingIgnoredDuringExecution in PodAffinity using In operator that matches the existing pod",
|
||||
pod: &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "fakename",
|
||||
@ -179,9 +180,9 @@ func TestInterPodAffinity(t *testing.T) {
|
||||
},
|
||||
node: nodes[0],
|
||||
fits: true,
|
||||
test: "validates that InterPodAffinity is respected if matching. requiredDuringSchedulingIgnoredDuringExecution in PodAffinity using In operator that matches the existing pod",
|
||||
},
|
||||
{
|
||||
name: "validates that InterPodAffinity is respected if matching. requiredDuringSchedulingIgnoredDuringExecution in PodAffinity using not in operator in labelSelector that matches the existing pod",
|
||||
pod: &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "fakename",
|
||||
@ -217,9 +218,9 @@ func TestInterPodAffinity(t *testing.T) {
|
||||
Labels: podLabel}}},
|
||||
node: nodes[0],
|
||||
fits: true,
|
||||
test: "validates that InterPodAffinity is respected if matching. requiredDuringSchedulingIgnoredDuringExecution in PodAffinity using not in operator in labelSelector that matches the existing pod",
|
||||
},
|
||||
{
|
||||
name: "validates that inter-pod-affinity is respected when pods have different Namespaces",
|
||||
pod: &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "fakename",
|
||||
@ -256,9 +257,9 @@ func TestInterPodAffinity(t *testing.T) {
|
||||
Labels: podLabel, Namespace: "ns"}}},
|
||||
node: nodes[0],
|
||||
fits: false,
|
||||
test: "validates that inter-pod-affinity is respected when pods have different Namespaces",
|
||||
},
|
||||
{
|
||||
name: "Doesn't satisfy the PodAffinity because of unmatching labelSelector with the existing pod",
|
||||
pod: &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "fakename",
|
||||
@ -293,9 +294,9 @@ func TestInterPodAffinity(t *testing.T) {
|
||||
Labels: podLabel}}},
|
||||
node: nodes[0],
|
||||
fits: false,
|
||||
test: "Doesn't satisfy the PodAffinity because of unmatching labelSelector with the existing pod",
|
||||
},
|
||||
{
|
||||
name: "validates that InterPodAffinity is respected if matching with multiple affinities in multiple RequiredDuringSchedulingIgnoredDuringExecution ",
|
||||
pod: &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "fakename",
|
||||
@ -347,9 +348,9 @@ func TestInterPodAffinity(t *testing.T) {
|
||||
Labels: podLabel}}},
|
||||
node: nodes[0],
|
||||
fits: true,
|
||||
test: "validates that InterPodAffinity is respected if matching with multiple affinities in multiple RequiredDuringSchedulingIgnoredDuringExecution ",
|
||||
},
|
||||
{
|
||||
name: "The labelSelector requirements(items of matchExpressions) are ANDed, the pod cannot schedule onto the node because one of the matchExpression items doesn't match.",
|
||||
pod: &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: podLabel2,
|
||||
@ -401,9 +402,9 @@ func TestInterPodAffinity(t *testing.T) {
|
||||
Labels: podLabel}}},
|
||||
node: nodes[0],
|
||||
fits: false,
|
||||
test: "The labelSelector requirements(items of matchExpressions) are ANDed, the pod cannot schedule onto the node because one of the matchExpression items doesn't match.",
|
||||
},
|
||||
{
|
||||
name: "validates that InterPod Affinity and AntiAffinity is respected if matching",
|
||||
pod: &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "fakename",
|
||||
@ -454,9 +455,9 @@ func TestInterPodAffinity(t *testing.T) {
|
||||
Labels: podLabel}}},
|
||||
node: nodes[0],
|
||||
fits: true,
|
||||
test: "validates that InterPod Affinity and AntiAffinity is respected if matching",
|
||||
},
|
||||
{
|
||||
name: "satisfies the PodAffinity and PodAntiAffinity and PodAntiAffinity symmetry with the existing pod",
|
||||
pod: &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "fakename",
|
||||
@ -531,9 +532,9 @@ func TestInterPodAffinity(t *testing.T) {
|
||||
},
|
||||
node: nodes[0],
|
||||
fits: true,
|
||||
test: "satisfies the PodAffinity and PodAntiAffinity and PodAntiAffinity symmetry with the existing pod",
|
||||
},
|
||||
{
|
||||
name: "satisfies the PodAffinity but doesn't satisfies the PodAntiAffinity with the existing pod",
|
||||
pod: &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "fakename",
|
||||
@ -584,9 +585,9 @@ func TestInterPodAffinity(t *testing.T) {
|
||||
Labels: podLabel}}},
|
||||
node: nodes[0],
|
||||
fits: false,
|
||||
test: "satisfies the PodAffinity but doesn't satisfies the PodAntiAffinity with the existing pod",
|
||||
},
|
||||
{
|
||||
name: "satisfies the PodAffinity and PodAntiAffinity but doesn't satisfies PodAntiAffinity symmetry with the existing pod",
|
||||
pod: &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "fakename",
|
||||
@ -661,9 +662,9 @@ func TestInterPodAffinity(t *testing.T) {
|
||||
},
|
||||
node: nodes[0],
|
||||
fits: false,
|
||||
test: "satisfies the PodAffinity and PodAntiAffinity but doesn't satisfies PodAntiAffinity symmetry with the existing pod",
|
||||
},
|
||||
{
|
||||
name: "pod matches its own Label in PodAffinity and that matches the existing pod Labels",
|
||||
pod: &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "fakename",
|
||||
@ -698,9 +699,9 @@ func TestInterPodAffinity(t *testing.T) {
|
||||
Labels: podLabel}}},
|
||||
node: nodes[0],
|
||||
fits: false,
|
||||
test: "pod matches its own Label in PodAffinity and that matches the existing pod Labels",
|
||||
},
|
||||
{
|
||||
name: "Verify that PodAntiAffinity of an existing pod is respected when PodAntiAffinity symmetry is not satisfied with the existing pod",
|
||||
pod: &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "fakename",
|
||||
@ -738,9 +739,9 @@ func TestInterPodAffinity(t *testing.T) {
|
||||
},
|
||||
node: nodes[0],
|
||||
fits: false,
|
||||
test: "Verify that PodAntiAffinity of an existing pod is respected when PodAntiAffinity symmetry is not satisfied with the existing pod",
|
||||
},
|
||||
{
|
||||
name: "Verify that PodAntiAffinity from existing pod is respected when pod statisfies PodAntiAffinity symmetry with the existing pod",
|
||||
pod: &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "fake-name",
|
||||
@ -778,9 +779,9 @@ func TestInterPodAffinity(t *testing.T) {
|
||||
},
|
||||
node: nodes[0],
|
||||
fits: true,
|
||||
test: "Verify that PodAntiAffinity from existing pod is respected when pod statisfies PodAntiAffinity symmetry with the existing pod",
|
||||
},
|
||||
{
|
||||
name: "nodes[0] and nodes[1] have same topologyKey and label value. nodes[0] has an existing pod that matches the inter pod affinity rule. The new pod can not be scheduled onto either of the two nodes.",
|
||||
pod: &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "fake-name2"},
|
||||
Spec: v1.PodSpec{
|
||||
@ -812,11 +813,11 @@ func TestInterPodAffinity(t *testing.T) {
|
||||
NodeName: nodes[0].Name}, ObjectMeta: metav1.ObjectMeta{Name: "fakename", Labels: map[string]string{"foo": "abc"}}},
|
||||
},
|
||||
fits: false,
|
||||
test: "nodes[0] and nodes[1] have same topologyKey and label value. nodes[0] has an existing pod that matches the inter pod affinity rule. The new pod can not be scheduled onto either of the two nodes.",
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
for _, pod := range test.pods {
|
||||
var nsName string
|
||||
if pod.Namespace != "" {
|
||||
@ -826,17 +827,17 @@ func TestInterPodAffinity(t *testing.T) {
|
||||
}
|
||||
createdPod, err := cs.CoreV1().Pods(nsName).Create(context.TODO(), pod, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("Test Failed: error, %v, while creating pod during test: %v", err, test.test)
|
||||
t.Fatalf("Error while creating pod: %v", err)
|
||||
}
|
||||
err = wait.Poll(pollInterval, wait.ForeverTestTimeout, testutils.PodScheduled(cs, createdPod.Namespace, createdPod.Name))
|
||||
if err != nil {
|
||||
t.Errorf("Test Failed: error, %v, while waiting for pod during test, %v", err, test)
|
||||
t.Errorf("Error while creating pod: %v", err)
|
||||
}
|
||||
}
|
||||
testPod, err := cs.CoreV1().Pods(testCtx.NS.Name).Create(context.TODO(), test.pod, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
if !(test.errorType == "invalidPod" && apierrors.IsInvalid(err)) {
|
||||
t.Fatalf("Test Failed: error, %v, while creating pod during test: %v", err, test.test)
|
||||
t.Fatalf("Error while creating pod: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
@ -846,16 +847,16 @@ func TestInterPodAffinity(t *testing.T) {
|
||||
err = wait.Poll(pollInterval, wait.ForeverTestTimeout, podUnschedulable(cs, testPod.Namespace, testPod.Name))
|
||||
}
|
||||
if err != nil {
|
||||
t.Errorf("Test Failed: %v, err %v, test.fits %v", test.test, err, test.fits)
|
||||
t.Errorf("Error while trying to fit a pod: %v", err)
|
||||
}
|
||||
|
||||
err = cs.CoreV1().Pods(testCtx.NS.Name).Delete(context.TODO(), test.pod.Name, *metav1.NewDeleteOptions(0))
|
||||
if err != nil {
|
||||
t.Errorf("Test Failed: error, %v, while deleting pod during test: %v", err, test.test)
|
||||
t.Errorf("Error while deleting pod: %v", err)
|
||||
}
|
||||
err = wait.Poll(pollInterval, wait.ForeverTestTimeout, testutils.PodDeleted(cs, testCtx.NS.Name, test.pod.Name))
|
||||
if err != nil {
|
||||
t.Errorf("Test Failed: error, %v, while waiting for pod to get deleted, %v", err, test.test)
|
||||
t.Errorf("Error while waiting for pod to get deleted: %v", err)
|
||||
}
|
||||
for _, pod := range test.pods {
|
||||
var nsName string
|
||||
@ -866,13 +867,14 @@ func TestInterPodAffinity(t *testing.T) {
|
||||
}
|
||||
err = cs.CoreV1().Pods(nsName).Delete(context.TODO(), pod.Name, *metav1.NewDeleteOptions(0))
|
||||
if err != nil {
|
||||
t.Errorf("Test Failed: error, %v, while deleting pod during test: %v", err, test.test)
|
||||
t.Errorf("Error while deleting pod: %v", err)
|
||||
}
|
||||
err = wait.Poll(pollInterval, wait.ForeverTestTimeout, testutils.PodDeleted(cs, nsName, pod.Name))
|
||||
if err != nil {
|
||||
t.Errorf("Test Failed: error, %v, while waiting for pod to get deleted, %v", err, test.test)
|
||||
t.Errorf("Error while waiting for pod to get deleted: %v", err)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@ -1005,16 +1007,16 @@ func TestEvenPodsSpreadPredicate(t *testing.T) {
|
||||
for _, pod := range tt.existingPods {
|
||||
createdPod, err := cs.CoreV1().Pods(pod.Namespace).Create(context.TODO(), pod, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("Test Failed: error while creating pod during test: %v", err)
|
||||
t.Fatalf("Error while creating pod during test: %v", err)
|
||||
}
|
||||
err = wait.Poll(pollInterval, wait.ForeverTestTimeout, testutils.PodScheduled(cs, createdPod.Namespace, createdPod.Name))
|
||||
if err != nil {
|
||||
t.Errorf("Test Failed: error while waiting for pod during test: %v", err)
|
||||
t.Errorf("Error while waiting for pod during test: %v", err)
|
||||
}
|
||||
}
|
||||
testPod, err := cs.CoreV1().Pods(tt.incomingPod.Namespace).Create(context.TODO(), tt.incomingPod, metav1.CreateOptions{})
|
||||
if err != nil && !apierrors.IsInvalid(err) {
|
||||
t.Fatalf("Test Failed: error while creating pod during test: %v", err)
|
||||
t.Fatalf("Error while creating pod during test: %v", err)
|
||||
}
|
||||
|
||||
if tt.fits {
|
||||
|
@ -164,7 +164,7 @@ func TestPreemption(t *testing.T) {
|
||||
|
||||
maxTokens := 1000
|
||||
tests := []struct {
|
||||
description string
|
||||
name string
|
||||
existingPods []*v1.Pod
|
||||
pod *v1.Pod
|
||||
initTokens int
|
||||
@ -172,7 +172,7 @@ func TestPreemption(t *testing.T) {
|
||||
preemptedPodIndexes map[int]struct{}
|
||||
}{
|
||||
{
|
||||
description: "basic pod preemption",
|
||||
name: "basic pod preemption",
|
||||
initTokens: maxTokens,
|
||||
existingPods: []*v1.Pod{
|
||||
initPausePod(&pausePodConfig{
|
||||
@ -197,7 +197,7 @@ func TestPreemption(t *testing.T) {
|
||||
preemptedPodIndexes: map[int]struct{}{0: {}},
|
||||
},
|
||||
{
|
||||
description: "basic pod preemption with filter",
|
||||
name: "basic pod preemption with filter",
|
||||
initTokens: 1,
|
||||
existingPods: []*v1.Pod{
|
||||
initPausePod(&pausePodConfig{
|
||||
@ -223,7 +223,7 @@ func TestPreemption(t *testing.T) {
|
||||
},
|
||||
{
|
||||
// same as the previous test, but the filter is unresolvable.
|
||||
description: "basic pod preemption with unresolvable filter",
|
||||
name: "basic pod preemption with unresolvable filter",
|
||||
initTokens: 1,
|
||||
unresolvable: true,
|
||||
existingPods: []*v1.Pod{
|
||||
@ -249,7 +249,7 @@ func TestPreemption(t *testing.T) {
|
||||
preemptedPodIndexes: map[int]struct{}{},
|
||||
},
|
||||
{
|
||||
description: "preemption is performed to satisfy anti-affinity",
|
||||
name: "preemption is performed to satisfy anti-affinity",
|
||||
initTokens: maxTokens,
|
||||
existingPods: []*v1.Pod{
|
||||
initPausePod(&pausePodConfig{
|
||||
@ -313,7 +313,7 @@ func TestPreemption(t *testing.T) {
|
||||
},
|
||||
{
|
||||
// This is similar to the previous case only pod-1 is high priority.
|
||||
description: "preemption is not performed when anti-affinity is not satisfied",
|
||||
name: "preemption is not performed when anti-affinity is not satisfied",
|
||||
initTokens: maxTokens,
|
||||
existingPods: []*v1.Pod{
|
||||
initPausePod(&pausePodConfig{
|
||||
@ -396,7 +396,7 @@ func TestPreemption(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Logf("================ Running test: %v\n", test.description)
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
filter.Tokens = test.initTokens
|
||||
filter.Unresolvable = test.unresolvable
|
||||
pods := make([]*v1.Pod, len(test.existingPods))
|
||||
@ -404,7 +404,7 @@ func TestPreemption(t *testing.T) {
|
||||
for i, p := range test.existingPods {
|
||||
pods[i], err = runPausePod(cs, p)
|
||||
if err != nil {
|
||||
t.Fatalf("Test [%v]: Error running pause pod: %v", test.description, err)
|
||||
t.Fatalf("Error running pause pod: %v", err)
|
||||
}
|
||||
}
|
||||
// Create the "pod".
|
||||
@ -416,24 +416,25 @@ func TestPreemption(t *testing.T) {
|
||||
for i, p := range pods {
|
||||
if _, found := test.preemptedPodIndexes[i]; found {
|
||||
if err = wait.Poll(time.Second, wait.ForeverTestTimeout, podIsGettingEvicted(cs, p.Namespace, p.Name)); err != nil {
|
||||
t.Errorf("Test [%v]: Pod %v/%v is not getting evicted.", test.description, p.Namespace, p.Name)
|
||||
t.Errorf("Pod %v/%v is not getting evicted.", p.Namespace, p.Name)
|
||||
}
|
||||
} else {
|
||||
if p.DeletionTimestamp != nil {
|
||||
t.Errorf("Test [%v]: Didn't expect pod %v to get preempted.", test.description, p.Name)
|
||||
t.Errorf("Didn't expect pod %v to get preempted.", p.Name)
|
||||
}
|
||||
}
|
||||
}
|
||||
// Also check that the preemptor pod gets the NominatedNodeName field set.
|
||||
if len(test.preemptedPodIndexes) > 0 {
|
||||
if err := waitForNominatedNodeName(cs, preemptor); err != nil {
|
||||
t.Errorf("Test [%v]: NominatedNodeName field was not set for pod %v: %v", test.description, preemptor.Name, err)
|
||||
t.Errorf("NominatedNodeName field was not set for pod %v: %v", preemptor.Name, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Cleanup
|
||||
pods = append(pods, preemptor)
|
||||
testutils.CleanupPods(cs, t, pods)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@ -526,12 +527,12 @@ func TestDisablePreemption(t *testing.T) {
|
||||
cs := testCtx.ClientSet
|
||||
|
||||
tests := []struct {
|
||||
description string
|
||||
name string
|
||||
existingPods []*v1.Pod
|
||||
pod *v1.Pod
|
||||
}{
|
||||
{
|
||||
description: "pod preemption will not happen",
|
||||
name: "pod preemption will not happen",
|
||||
existingPods: []*v1.Pod{
|
||||
initPausePod(&pausePodConfig{
|
||||
Name: "victim-pod",
|
||||
@ -567,12 +568,13 @@ func TestDisablePreemption(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
pods := make([]*v1.Pod, len(test.existingPods))
|
||||
// Create and run existingPods.
|
||||
for i, p := range test.existingPods {
|
||||
pods[i], err = runPausePod(cs, p)
|
||||
if err != nil {
|
||||
t.Fatalf("Test [%v]: Error running pause pod: %v", test.description, err)
|
||||
t.Fatalf("Test [%v]: Error running pause pod: %v", test.name, err)
|
||||
}
|
||||
}
|
||||
// Create the "pod".
|
||||
@ -582,19 +584,18 @@ func TestDisablePreemption(t *testing.T) {
|
||||
}
|
||||
// Ensure preemptor should keep unschedulable.
|
||||
if err := waitForPodUnschedulable(cs, preemptor); err != nil {
|
||||
t.Errorf("Test [%v]: Preemptor %v should not become scheduled",
|
||||
test.description, preemptor.Name)
|
||||
t.Errorf("Preemptor %v should not become scheduled", preemptor.Name)
|
||||
}
|
||||
|
||||
// Ensure preemptor should not be nominated.
|
||||
if err := waitForNominatedNodeNameWithTimeout(cs, preemptor, 5*time.Second); err == nil {
|
||||
t.Errorf("Test [%v]: Preemptor %v should not be nominated",
|
||||
test.description, preemptor.Name)
|
||||
t.Errorf("Preemptor %v should not be nominated", preemptor.Name)
|
||||
}
|
||||
|
||||
// Cleanup
|
||||
pods = append(pods, preemptor)
|
||||
testutils.CleanupPods(cs, t, pods)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@ -675,7 +676,7 @@ func TestPodPriorityResolution(t *testing.T) {
|
||||
|
||||
pods := make([]*v1.Pod, 0, len(tests))
|
||||
for _, test := range tests {
|
||||
t.Logf("================ Running test: %v\n", test.Name)
|
||||
t.Run(test.Name, func(t *testing.T) {
|
||||
t.Run(test.Name, func(t *testing.T) {
|
||||
pod, err := runPausePod(cs, test.Pod)
|
||||
if err != nil {
|
||||
@ -696,6 +697,7 @@ func TestPodPriorityResolution(t *testing.T) {
|
||||
t.Errorf("Expected pod %v to have priority %v but was nil", pod.Name, test.PriorityClass)
|
||||
}
|
||||
})
|
||||
})
|
||||
}
|
||||
testutils.CleanupPods(cs, t, pods)
|
||||
testutils.CleanupNodes(cs, t)
|
||||
@ -727,7 +729,7 @@ func TestPreemptionStarvation(t *testing.T) {
|
||||
cs := testCtx.ClientSet
|
||||
|
||||
tests := []struct {
|
||||
description string
|
||||
name string
|
||||
numExistingPod int
|
||||
numExpectedPending int
|
||||
preemptor *v1.Pod
|
||||
@ -736,7 +738,7 @@ func TestPreemptionStarvation(t *testing.T) {
|
||||
// This test ensures that while the preempting pod is waiting for the victims
|
||||
// terminate, other lower priority pods are not scheduled in the room created
|
||||
// after preemption and while the higher priority pods is not scheduled yet.
|
||||
description: "starvation test: higher priority pod is scheduled before the lower priority ones",
|
||||
name: "starvation test: higher priority pod is scheduled before the lower priority ones",
|
||||
numExistingPod: 10,
|
||||
numExpectedPending: 5,
|
||||
preemptor: initPausePod(&pausePodConfig{
|
||||
@ -763,6 +765,7 @@ func TestPreemptionStarvation(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
pendingPods := make([]*v1.Pod, test.numExpectedPending)
|
||||
numRunningPods := test.numExistingPod - test.numExpectedPending
|
||||
runningPods := make([]*v1.Pod, numRunningPods)
|
||||
@ -770,7 +773,7 @@ func TestPreemptionStarvation(t *testing.T) {
|
||||
for i := 0; i < numRunningPods; i++ {
|
||||
runningPods[i], err = createPausePod(cs, mkPriorityPodWithGrace(testCtx, fmt.Sprintf("rpod-%v", i), mediumPriority, 0))
|
||||
if err != nil {
|
||||
t.Fatalf("Test [%v]: Error creating pause pod: %v", test.description, err)
|
||||
t.Fatalf("Error creating pause pod: %v", err)
|
||||
}
|
||||
}
|
||||
// make sure that runningPods are all scheduled.
|
||||
@ -783,7 +786,7 @@ func TestPreemptionStarvation(t *testing.T) {
|
||||
for i := 0; i < test.numExpectedPending; i++ {
|
||||
pendingPods[i], err = createPausePod(cs, mkPriorityPodWithGrace(testCtx, fmt.Sprintf("ppod-%v", i), mediumPriority, 0))
|
||||
if err != nil {
|
||||
t.Fatalf("Test [%v]: Error creating pending pod: %v", test.description, err)
|
||||
t.Fatalf("Error creating pending pod: %v", err)
|
||||
}
|
||||
}
|
||||
// Make sure that all pending pods are being marked unschedulable.
|
||||
@ -800,7 +803,7 @@ func TestPreemptionStarvation(t *testing.T) {
|
||||
}
|
||||
// Check that the preemptor pod gets the annotation for nominated node name.
|
||||
if err := waitForNominatedNodeName(cs, preemptor); err != nil {
|
||||
t.Errorf("Test [%v]: NominatedNodeName annotation was not set for pod %v/%v: %v", test.description, preemptor.Namespace, preemptor.Name, err)
|
||||
t.Errorf("NominatedNodeName annotation was not set for pod %v/%v: %v", preemptor.Namespace, preemptor.Name, err)
|
||||
}
|
||||
// Make sure that preemptor is scheduled after preemptions.
|
||||
if err := testutils.WaitForPodToScheduleWithTimeout(cs, preemptor, 60*time.Second); err != nil {
|
||||
@ -812,6 +815,7 @@ func TestPreemptionStarvation(t *testing.T) {
|
||||
allPods = append(allPods, runningPods...)
|
||||
allPods = append(allPods, preemptor)
|
||||
testutils.CleanupPods(cs, t, allPods)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@ -824,7 +828,7 @@ func TestPreemptionRaces(t *testing.T) {
|
||||
cs := testCtx.ClientSet
|
||||
|
||||
tests := []struct {
|
||||
description string
|
||||
name string
|
||||
numInitialPods int // Pods created and executed before running preemptor
|
||||
numAdditionalPods int // Pods created after creating the preemptor
|
||||
numRepetitions int // Repeat the tests to check races
|
||||
@ -834,7 +838,7 @@ func TestPreemptionRaces(t *testing.T) {
|
||||
// This test ensures that while the preempting pod is waiting for the victims
|
||||
// terminate, other lower priority pods are not scheduled in the room created
|
||||
// after preemption and while the higher priority pods is not scheduled yet.
|
||||
description: "ensures that other pods are not scheduled while preemptor is being marked as nominated (issue #72124)",
|
||||
name: "ensures that other pods are not scheduled while preemptor is being marked as nominated (issue #72124)",
|
||||
numInitialPods: 2,
|
||||
numAdditionalPods: 50,
|
||||
numRepetitions: 10,
|
||||
@ -862,6 +866,7 @@ func TestPreemptionRaces(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
if test.numRepetitions <= 0 {
|
||||
test.numRepetitions = 1
|
||||
}
|
||||
@ -872,7 +877,7 @@ func TestPreemptionRaces(t *testing.T) {
|
||||
for i := 0; i < test.numInitialPods; i++ {
|
||||
initialPods[i], err = createPausePod(cs, mkPriorityPodWithGrace(testCtx, fmt.Sprintf("rpod-%v", i), mediumPriority, 0))
|
||||
if err != nil {
|
||||
t.Fatalf("Test [%v]: Error creating pause pod: %v", test.description, err)
|
||||
t.Fatalf("Error creating pause pod: %v", err)
|
||||
}
|
||||
}
|
||||
// make sure that initial Pods are all scheduled.
|
||||
@ -892,12 +897,12 @@ func TestPreemptionRaces(t *testing.T) {
|
||||
for i := 0; i < test.numAdditionalPods; i++ {
|
||||
additionalPods[i], err = createPausePod(cs, mkPriorityPodWithGrace(testCtx, fmt.Sprintf("ppod-%v", i), mediumPriority, 0))
|
||||
if err != nil {
|
||||
t.Fatalf("Test [%v]: Error creating pending pod: %v", test.description, err)
|
||||
t.Fatalf("Error creating pending pod: %v", err)
|
||||
}
|
||||
}
|
||||
// Check that the preemptor pod gets nominated node name.
|
||||
if err := waitForNominatedNodeName(cs, preemptor); err != nil {
|
||||
t.Errorf("Test [%v]: NominatedNodeName annotation was not set for pod %v/%v: %v", test.description, preemptor.Namespace, preemptor.Name, err)
|
||||
t.Errorf("NominatedNodeName annotation was not set for pod %v/%v: %v", preemptor.Namespace, preemptor.Name, err)
|
||||
}
|
||||
// Make sure that preemptor is scheduled after preemptions.
|
||||
if err := testutils.WaitForPodToScheduleWithTimeout(cs, preemptor, 60*time.Second); err != nil {
|
||||
@ -925,6 +930,7 @@ func TestPreemptionRaces(t *testing.T) {
|
||||
allPods = append(allPods, preemptor)
|
||||
testutils.CleanupPods(cs, t, allPods)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@ -1074,7 +1080,7 @@ func TestPDBInPreemption(t *testing.T) {
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
description string
|
||||
name string
|
||||
nodes []*nodeConfig
|
||||
pdbs []*policy.PodDisruptionBudget
|
||||
pdbPodNum []int32
|
||||
@ -1083,7 +1089,7 @@ func TestPDBInPreemption(t *testing.T) {
|
||||
preemptedPodIndexes map[int]struct{}
|
||||
}{
|
||||
{
|
||||
description: "A non-PDB violating pod is preempted despite its higher priority",
|
||||
name: "A non-PDB violating pod is preempted despite its higher priority",
|
||||
nodes: []*nodeConfig{{name: "node-1", res: defaultNodeRes}},
|
||||
pdbs: []*policy.PodDisruptionBudget{
|
||||
mkMinAvailablePDB("pdb-1", testCtx.NS.Name, types.UID("pdb-1-uid"), 2, map[string]string{"foo": "bar"}),
|
||||
@ -1123,7 +1129,7 @@ func TestPDBInPreemption(t *testing.T) {
|
||||
preemptedPodIndexes: map[int]struct{}{2: {}},
|
||||
},
|
||||
{
|
||||
description: "A node without any PDB violating pods is preferred for preemption",
|
||||
name: "A node without any PDB violating pods is preferred for preemption",
|
||||
nodes: []*nodeConfig{
|
||||
{name: "node-1", res: defaultNodeRes},
|
||||
{name: "node-2", res: defaultNodeRes},
|
||||
@ -1161,7 +1167,7 @@ func TestPDBInPreemption(t *testing.T) {
|
||||
preemptedPodIndexes: map[int]struct{}{1: {}},
|
||||
},
|
||||
{
|
||||
description: "A node with fewer PDB violating pods is preferred for preemption",
|
||||
name: "A node with fewer PDB violating pods is preferred for preemption",
|
||||
nodes: []*nodeConfig{
|
||||
{name: "node-1", res: defaultNodeRes},
|
||||
{name: "node-2", res: defaultNodeRes},
|
||||
@ -1244,7 +1250,7 @@ func TestPDBInPreemption(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Logf("================ Running test: %v\n", test.description)
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
for _, nodeConf := range test.nodes {
|
||||
_, err := createNode(cs, nodeConf.name, nodeConf.res)
|
||||
if err != nil {
|
||||
@ -1257,7 +1263,7 @@ func TestPDBInPreemption(t *testing.T) {
|
||||
// Create and run existingPods.
|
||||
for i, p := range test.existingPods {
|
||||
if pods[i], err = runPausePod(cs, p); err != nil {
|
||||
t.Fatalf("Test [%v]: Error running pause pod: %v", test.description, err)
|
||||
t.Fatalf("Test [%v]: Error running pause pod: %v", test.name, err)
|
||||
}
|
||||
// Add pod condition ready so that PDB is updated.
|
||||
addPodConditionReady(p)
|
||||
@ -1291,18 +1297,18 @@ func TestPDBInPreemption(t *testing.T) {
|
||||
for i, p := range pods {
|
||||
if _, found := test.preemptedPodIndexes[i]; found {
|
||||
if err = wait.Poll(time.Second, wait.ForeverTestTimeout, podIsGettingEvicted(cs, p.Namespace, p.Name)); err != nil {
|
||||
t.Errorf("Test [%v]: Pod %v/%v is not getting evicted.", test.description, p.Namespace, p.Name)
|
||||
t.Errorf("Test [%v]: Pod %v/%v is not getting evicted.", test.name, p.Namespace, p.Name)
|
||||
}
|
||||
} else {
|
||||
if p.DeletionTimestamp != nil {
|
||||
t.Errorf("Test [%v]: Didn't expect pod %v/%v to get preempted.", test.description, p.Namespace, p.Name)
|
||||
t.Errorf("Test [%v]: Didn't expect pod %v/%v to get preempted.", test.name, p.Namespace, p.Name)
|
||||
}
|
||||
}
|
||||
}
|
||||
// Also check that the preemptor pod gets the annotation for nominated node name.
|
||||
if len(test.preemptedPodIndexes) > 0 {
|
||||
if err := waitForNominatedNodeName(cs, preemptor); err != nil {
|
||||
t.Errorf("Test [%v]: NominatedNodeName annotation was not set for pod %v/%v: %v", test.description, preemptor.Namespace, preemptor.Name, err)
|
||||
t.Errorf("Test [%v]: NominatedNodeName annotation was not set for pod %v/%v: %v", test.name, preemptor.Namespace, preemptor.Name, err)
|
||||
}
|
||||
}
|
||||
|
||||
@ -1311,5 +1317,6 @@ func TestPDBInPreemption(t *testing.T) {
|
||||
testutils.CleanupPods(cs, t, pods)
|
||||
cs.PolicyV1beta1().PodDisruptionBudgets(testCtx.NS.Name).DeleteCollection(context.TODO(), metav1.DeleteOptions{}, metav1.ListOptions{})
|
||||
cs.CoreV1().Nodes().DeleteCollection(context.TODO(), metav1.DeleteOptions{}, metav1.ListOptions{})
|
||||
})
|
||||
}
|
||||
}
|
||||
|
@ -789,14 +789,14 @@ func TestSchedulerInformers(t *testing.T) {
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
description string
|
||||
name string
|
||||
nodes []*nodeConfig
|
||||
existingPods []*v1.Pod
|
||||
pod *v1.Pod
|
||||
preemptedPodIndexes map[int]struct{}
|
||||
}{
|
||||
{
|
||||
description: "Pod cannot be scheduled when node is occupied by pods scheduled by other schedulers",
|
||||
name: "Pod cannot be scheduled when node is occupied by pods scheduled by other schedulers",
|
||||
nodes: []*nodeConfig{{name: "node-1", res: defaultNodeRes}},
|
||||
existingPods: []*v1.Pod{
|
||||
initPausePod(&pausePodConfig{
|
||||
@ -826,6 +826,7 @@ func TestSchedulerInformers(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
for _, nodeConf := range test.nodes {
|
||||
_, err := createNode(cs, nodeConf.name, nodeConf.res)
|
||||
if err != nil {
|
||||
@ -838,7 +839,7 @@ func TestSchedulerInformers(t *testing.T) {
|
||||
// Create and run existingPods.
|
||||
for i, p := range test.existingPods {
|
||||
if pods[i], err = runPausePod(cs, p); err != nil {
|
||||
t.Fatalf("Test [%v]: Error running pause pod: %v", test.description, err)
|
||||
t.Fatalf("Error running pause pod: %v", err)
|
||||
}
|
||||
}
|
||||
// Create the new "pod".
|
||||
@ -855,5 +856,6 @@ func TestSchedulerInformers(t *testing.T) {
|
||||
testutils.CleanupPods(cs, t, pods)
|
||||
cs.PolicyV1beta1().PodDisruptionBudgets(testCtx.NS.Name).DeleteCollection(context.TODO(), metav1.DeleteOptions{}, metav1.ListOptions{})
|
||||
cs.CoreV1().Nodes().DeleteCollection(context.TODO(), metav1.DeleteOptions{}, metav1.ListOptions{})
|
||||
})
|
||||
}
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user