mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-23 03:41:45 +00:00
Scheduler integration tests standarization
This commit is contained in:
parent
b8b4186a14
commit
5e679cf51f
@ -67,14 +67,15 @@ func TestInterPodAffinity(t *testing.T) {
|
|||||||
podLabel2 := map[string]string{"security": "S1"}
|
podLabel2 := map[string]string{"security": "S1"}
|
||||||
|
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
|
name string
|
||||||
pod *v1.Pod
|
pod *v1.Pod
|
||||||
pods []*v1.Pod
|
pods []*v1.Pod
|
||||||
node *v1.Node
|
node *v1.Node
|
||||||
fits bool
|
fits bool
|
||||||
errorType string
|
errorType string
|
||||||
test string
|
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
|
name: "validates that a pod with an invalid podAffinity is rejected because of the LabelSelectorRequirement is invalid",
|
||||||
pod: &v1.Pod{
|
pod: &v1.Pod{
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
Name: "fakename",
|
Name: "fakename",
|
||||||
@ -104,9 +105,9 @@ func TestInterPodAffinity(t *testing.T) {
|
|||||||
node: nodes[0],
|
node: nodes[0],
|
||||||
fits: false,
|
fits: false,
|
||||||
errorType: "invalidPod",
|
errorType: "invalidPod",
|
||||||
test: "validates that a pod with an invalid podAffinity is rejected because of the LabelSelectorRequirement is invalid",
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
name: "validates that Inter-pod-Affinity is respected if not matching",
|
||||||
pod: &v1.Pod{
|
pod: &v1.Pod{
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
Name: "fakename",
|
Name: "fakename",
|
||||||
@ -136,9 +137,9 @@ func TestInterPodAffinity(t *testing.T) {
|
|||||||
},
|
},
|
||||||
node: nodes[0],
|
node: nodes[0],
|
||||||
fits: false,
|
fits: false,
|
||||||
test: "validates that Inter-pod-Affinity is respected if not matching",
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
name: "validates that InterPodAffinity is respected if matching. requiredDuringSchedulingIgnoredDuringExecution in PodAffinity using In operator that matches the existing pod",
|
||||||
pod: &v1.Pod{
|
pod: &v1.Pod{
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
Name: "fakename",
|
Name: "fakename",
|
||||||
@ -179,9 +180,9 @@ func TestInterPodAffinity(t *testing.T) {
|
|||||||
},
|
},
|
||||||
node: nodes[0],
|
node: nodes[0],
|
||||||
fits: true,
|
fits: true,
|
||||||
test: "validates that InterPodAffinity is respected if matching. requiredDuringSchedulingIgnoredDuringExecution in PodAffinity using In operator that matches the existing pod",
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
name: "validates that InterPodAffinity is respected if matching. requiredDuringSchedulingIgnoredDuringExecution in PodAffinity using not in operator in labelSelector that matches the existing pod",
|
||||||
pod: &v1.Pod{
|
pod: &v1.Pod{
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
Name: "fakename",
|
Name: "fakename",
|
||||||
@ -217,9 +218,9 @@ func TestInterPodAffinity(t *testing.T) {
|
|||||||
Labels: podLabel}}},
|
Labels: podLabel}}},
|
||||||
node: nodes[0],
|
node: nodes[0],
|
||||||
fits: true,
|
fits: true,
|
||||||
test: "validates that InterPodAffinity is respected if matching. requiredDuringSchedulingIgnoredDuringExecution in PodAffinity using not in operator in labelSelector that matches the existing pod",
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
name: "validates that inter-pod-affinity is respected when pods have different Namespaces",
|
||||||
pod: &v1.Pod{
|
pod: &v1.Pod{
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
Name: "fakename",
|
Name: "fakename",
|
||||||
@ -256,9 +257,9 @@ func TestInterPodAffinity(t *testing.T) {
|
|||||||
Labels: podLabel, Namespace: "ns"}}},
|
Labels: podLabel, Namespace: "ns"}}},
|
||||||
node: nodes[0],
|
node: nodes[0],
|
||||||
fits: false,
|
fits: false,
|
||||||
test: "validates that inter-pod-affinity is respected when pods have different Namespaces",
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
name: "Doesn't satisfy the PodAffinity because of unmatching labelSelector with the existing pod",
|
||||||
pod: &v1.Pod{
|
pod: &v1.Pod{
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
Name: "fakename",
|
Name: "fakename",
|
||||||
@ -293,9 +294,9 @@ func TestInterPodAffinity(t *testing.T) {
|
|||||||
Labels: podLabel}}},
|
Labels: podLabel}}},
|
||||||
node: nodes[0],
|
node: nodes[0],
|
||||||
fits: false,
|
fits: false,
|
||||||
test: "Doesn't satisfy the PodAffinity because of unmatching labelSelector with the existing pod",
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
name: "validates that InterPodAffinity is respected if matching with multiple affinities in multiple RequiredDuringSchedulingIgnoredDuringExecution ",
|
||||||
pod: &v1.Pod{
|
pod: &v1.Pod{
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
Name: "fakename",
|
Name: "fakename",
|
||||||
@ -347,9 +348,9 @@ func TestInterPodAffinity(t *testing.T) {
|
|||||||
Labels: podLabel}}},
|
Labels: podLabel}}},
|
||||||
node: nodes[0],
|
node: nodes[0],
|
||||||
fits: true,
|
fits: true,
|
||||||
test: "validates that InterPodAffinity is respected if matching with multiple affinities in multiple RequiredDuringSchedulingIgnoredDuringExecution ",
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
name: "The labelSelector requirements(items of matchExpressions) are ANDed, the pod cannot schedule onto the node because one of the matchExpression items doesn't match.",
|
||||||
pod: &v1.Pod{
|
pod: &v1.Pod{
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
Labels: podLabel2,
|
Labels: podLabel2,
|
||||||
@ -401,9 +402,9 @@ func TestInterPodAffinity(t *testing.T) {
|
|||||||
Labels: podLabel}}},
|
Labels: podLabel}}},
|
||||||
node: nodes[0],
|
node: nodes[0],
|
||||||
fits: false,
|
fits: false,
|
||||||
test: "The labelSelector requirements(items of matchExpressions) are ANDed, the pod cannot schedule onto the node because one of the matchExpression items doesn't match.",
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
name: "validates that InterPod Affinity and AntiAffinity is respected if matching",
|
||||||
pod: &v1.Pod{
|
pod: &v1.Pod{
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
Name: "fakename",
|
Name: "fakename",
|
||||||
@ -454,9 +455,9 @@ func TestInterPodAffinity(t *testing.T) {
|
|||||||
Labels: podLabel}}},
|
Labels: podLabel}}},
|
||||||
node: nodes[0],
|
node: nodes[0],
|
||||||
fits: true,
|
fits: true,
|
||||||
test: "validates that InterPod Affinity and AntiAffinity is respected if matching",
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
name: "satisfies the PodAffinity and PodAntiAffinity and PodAntiAffinity symmetry with the existing pod",
|
||||||
pod: &v1.Pod{
|
pod: &v1.Pod{
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
Name: "fakename",
|
Name: "fakename",
|
||||||
@ -531,9 +532,9 @@ func TestInterPodAffinity(t *testing.T) {
|
|||||||
},
|
},
|
||||||
node: nodes[0],
|
node: nodes[0],
|
||||||
fits: true,
|
fits: true,
|
||||||
test: "satisfies the PodAffinity and PodAntiAffinity and PodAntiAffinity symmetry with the existing pod",
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
name: "satisfies the PodAffinity but doesn't satisfies the PodAntiAffinity with the existing pod",
|
||||||
pod: &v1.Pod{
|
pod: &v1.Pod{
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
Name: "fakename",
|
Name: "fakename",
|
||||||
@ -584,9 +585,9 @@ func TestInterPodAffinity(t *testing.T) {
|
|||||||
Labels: podLabel}}},
|
Labels: podLabel}}},
|
||||||
node: nodes[0],
|
node: nodes[0],
|
||||||
fits: false,
|
fits: false,
|
||||||
test: "satisfies the PodAffinity but doesn't satisfies the PodAntiAffinity with the existing pod",
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
name: "satisfies the PodAffinity and PodAntiAffinity but doesn't satisfies PodAntiAffinity symmetry with the existing pod",
|
||||||
pod: &v1.Pod{
|
pod: &v1.Pod{
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
Name: "fakename",
|
Name: "fakename",
|
||||||
@ -661,9 +662,9 @@ func TestInterPodAffinity(t *testing.T) {
|
|||||||
},
|
},
|
||||||
node: nodes[0],
|
node: nodes[0],
|
||||||
fits: false,
|
fits: false,
|
||||||
test: "satisfies the PodAffinity and PodAntiAffinity but doesn't satisfies PodAntiAffinity symmetry with the existing pod",
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
name: "pod matches its own Label in PodAffinity and that matches the existing pod Labels",
|
||||||
pod: &v1.Pod{
|
pod: &v1.Pod{
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
Name: "fakename",
|
Name: "fakename",
|
||||||
@ -698,9 +699,9 @@ func TestInterPodAffinity(t *testing.T) {
|
|||||||
Labels: podLabel}}},
|
Labels: podLabel}}},
|
||||||
node: nodes[0],
|
node: nodes[0],
|
||||||
fits: false,
|
fits: false,
|
||||||
test: "pod matches its own Label in PodAffinity and that matches the existing pod Labels",
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
name: "Verify that PodAntiAffinity of an existing pod is respected when PodAntiAffinity symmetry is not satisfied with the existing pod",
|
||||||
pod: &v1.Pod{
|
pod: &v1.Pod{
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
Name: "fakename",
|
Name: "fakename",
|
||||||
@ -738,9 +739,9 @@ func TestInterPodAffinity(t *testing.T) {
|
|||||||
},
|
},
|
||||||
node: nodes[0],
|
node: nodes[0],
|
||||||
fits: false,
|
fits: false,
|
||||||
test: "Verify that PodAntiAffinity of an existing pod is respected when PodAntiAffinity symmetry is not satisfied with the existing pod",
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
name: "Verify that PodAntiAffinity from existing pod is respected when pod statisfies PodAntiAffinity symmetry with the existing pod",
|
||||||
pod: &v1.Pod{
|
pod: &v1.Pod{
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
Name: "fake-name",
|
Name: "fake-name",
|
||||||
@ -778,9 +779,9 @@ func TestInterPodAffinity(t *testing.T) {
|
|||||||
},
|
},
|
||||||
node: nodes[0],
|
node: nodes[0],
|
||||||
fits: true,
|
fits: true,
|
||||||
test: "Verify that PodAntiAffinity from existing pod is respected when pod statisfies PodAntiAffinity symmetry with the existing pod",
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
name: "nodes[0] and nodes[1] have same topologyKey and label value. nodes[0] has an existing pod that matches the inter pod affinity rule. The new pod can not be scheduled onto either of the two nodes.",
|
||||||
pod: &v1.Pod{
|
pod: &v1.Pod{
|
||||||
ObjectMeta: metav1.ObjectMeta{Name: "fake-name2"},
|
ObjectMeta: metav1.ObjectMeta{Name: "fake-name2"},
|
||||||
Spec: v1.PodSpec{
|
Spec: v1.PodSpec{
|
||||||
@ -812,67 +813,68 @@ func TestInterPodAffinity(t *testing.T) {
|
|||||||
NodeName: nodes[0].Name}, ObjectMeta: metav1.ObjectMeta{Name: "fakename", Labels: map[string]string{"foo": "abc"}}},
|
NodeName: nodes[0].Name}, ObjectMeta: metav1.ObjectMeta{Name: "fakename", Labels: map[string]string{"foo": "abc"}}},
|
||||||
},
|
},
|
||||||
fits: false,
|
fits: false,
|
||||||
test: "nodes[0] and nodes[1] have same topologyKey and label value. nodes[0] has an existing pod that matches the inter pod affinity rule. The new pod can not be scheduled onto either of the two nodes.",
|
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, test := range tests {
|
for _, test := range tests {
|
||||||
for _, pod := range test.pods {
|
t.Run(test.name, func(t *testing.T) {
|
||||||
var nsName string
|
for _, pod := range test.pods {
|
||||||
if pod.Namespace != "" {
|
var nsName string
|
||||||
nsName = pod.Namespace
|
if pod.Namespace != "" {
|
||||||
} else {
|
nsName = pod.Namespace
|
||||||
nsName = testCtx.NS.Name
|
} else {
|
||||||
|
nsName = testCtx.NS.Name
|
||||||
|
}
|
||||||
|
createdPod, err := cs.CoreV1().Pods(nsName).Create(context.TODO(), pod, metav1.CreateOptions{})
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Error while creating pod: %v", err)
|
||||||
|
}
|
||||||
|
err = wait.Poll(pollInterval, wait.ForeverTestTimeout, testutils.PodScheduled(cs, createdPod.Namespace, createdPod.Name))
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Error while creating pod: %v", err)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
createdPod, err := cs.CoreV1().Pods(nsName).Create(context.TODO(), pod, metav1.CreateOptions{})
|
testPod, err := cs.CoreV1().Pods(testCtx.NS.Name).Create(context.TODO(), test.pod, metav1.CreateOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Test Failed: error, %v, while creating pod during test: %v", err, test.test)
|
if !(test.errorType == "invalidPod" && apierrors.IsInvalid(err)) {
|
||||||
|
t.Fatalf("Error while creating pod: %v", err)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
err = wait.Poll(pollInterval, wait.ForeverTestTimeout, testutils.PodScheduled(cs, createdPod.Namespace, createdPod.Name))
|
|
||||||
if err != nil {
|
|
||||||
t.Errorf("Test Failed: error, %v, while waiting for pod during test, %v", err, test)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
testPod, err := cs.CoreV1().Pods(testCtx.NS.Name).Create(context.TODO(), test.pod, metav1.CreateOptions{})
|
|
||||||
if err != nil {
|
|
||||||
if !(test.errorType == "invalidPod" && apierrors.IsInvalid(err)) {
|
|
||||||
t.Fatalf("Test Failed: error, %v, while creating pod during test: %v", err, test.test)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if test.fits {
|
if test.fits {
|
||||||
err = wait.Poll(pollInterval, wait.ForeverTestTimeout, testutils.PodScheduled(cs, testPod.Namespace, testPod.Name))
|
err = wait.Poll(pollInterval, wait.ForeverTestTimeout, testutils.PodScheduled(cs, testPod.Namespace, testPod.Name))
|
||||||
} else {
|
|
||||||
err = wait.Poll(pollInterval, wait.ForeverTestTimeout, podUnschedulable(cs, testPod.Namespace, testPod.Name))
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
t.Errorf("Test Failed: %v, err %v, test.fits %v", test.test, err, test.fits)
|
|
||||||
}
|
|
||||||
|
|
||||||
err = cs.CoreV1().Pods(testCtx.NS.Name).Delete(context.TODO(), test.pod.Name, *metav1.NewDeleteOptions(0))
|
|
||||||
if err != nil {
|
|
||||||
t.Errorf("Test Failed: error, %v, while deleting pod during test: %v", err, test.test)
|
|
||||||
}
|
|
||||||
err = wait.Poll(pollInterval, wait.ForeverTestTimeout, testutils.PodDeleted(cs, testCtx.NS.Name, test.pod.Name))
|
|
||||||
if err != nil {
|
|
||||||
t.Errorf("Test Failed: error, %v, while waiting for pod to get deleted, %v", err, test.test)
|
|
||||||
}
|
|
||||||
for _, pod := range test.pods {
|
|
||||||
var nsName string
|
|
||||||
if pod.Namespace != "" {
|
|
||||||
nsName = pod.Namespace
|
|
||||||
} else {
|
} else {
|
||||||
nsName = testCtx.NS.Name
|
err = wait.Poll(pollInterval, wait.ForeverTestTimeout, podUnschedulable(cs, testPod.Namespace, testPod.Name))
|
||||||
}
|
}
|
||||||
err = cs.CoreV1().Pods(nsName).Delete(context.TODO(), pod.Name, *metav1.NewDeleteOptions(0))
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("Test Failed: error, %v, while deleting pod during test: %v", err, test.test)
|
t.Errorf("Error while trying to fit a pod: %v", err)
|
||||||
}
|
}
|
||||||
err = wait.Poll(pollInterval, wait.ForeverTestTimeout, testutils.PodDeleted(cs, nsName, pod.Name))
|
|
||||||
|
err = cs.CoreV1().Pods(testCtx.NS.Name).Delete(context.TODO(), test.pod.Name, *metav1.NewDeleteOptions(0))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("Test Failed: error, %v, while waiting for pod to get deleted, %v", err, test.test)
|
t.Errorf("Error while deleting pod: %v", err)
|
||||||
}
|
}
|
||||||
}
|
err = wait.Poll(pollInterval, wait.ForeverTestTimeout, testutils.PodDeleted(cs, testCtx.NS.Name, test.pod.Name))
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Error while waiting for pod to get deleted: %v", err)
|
||||||
|
}
|
||||||
|
for _, pod := range test.pods {
|
||||||
|
var nsName string
|
||||||
|
if pod.Namespace != "" {
|
||||||
|
nsName = pod.Namespace
|
||||||
|
} else {
|
||||||
|
nsName = testCtx.NS.Name
|
||||||
|
}
|
||||||
|
err = cs.CoreV1().Pods(nsName).Delete(context.TODO(), pod.Name, *metav1.NewDeleteOptions(0))
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Error while deleting pod: %v", err)
|
||||||
|
}
|
||||||
|
err = wait.Poll(pollInterval, wait.ForeverTestTimeout, testutils.PodDeleted(cs, nsName, pod.Name))
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Error while waiting for pod to get deleted: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1005,16 +1007,16 @@ func TestEvenPodsSpreadPredicate(t *testing.T) {
|
|||||||
for _, pod := range tt.existingPods {
|
for _, pod := range tt.existingPods {
|
||||||
createdPod, err := cs.CoreV1().Pods(pod.Namespace).Create(context.TODO(), pod, metav1.CreateOptions{})
|
createdPod, err := cs.CoreV1().Pods(pod.Namespace).Create(context.TODO(), pod, metav1.CreateOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Test Failed: error while creating pod during test: %v", err)
|
t.Fatalf("Error while creating pod during test: %v", err)
|
||||||
}
|
}
|
||||||
err = wait.Poll(pollInterval, wait.ForeverTestTimeout, testutils.PodScheduled(cs, createdPod.Namespace, createdPod.Name))
|
err = wait.Poll(pollInterval, wait.ForeverTestTimeout, testutils.PodScheduled(cs, createdPod.Namespace, createdPod.Name))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("Test Failed: error while waiting for pod during test: %v", err)
|
t.Errorf("Error while waiting for pod during test: %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
testPod, err := cs.CoreV1().Pods(tt.incomingPod.Namespace).Create(context.TODO(), tt.incomingPod, metav1.CreateOptions{})
|
testPod, err := cs.CoreV1().Pods(tt.incomingPod.Namespace).Create(context.TODO(), tt.incomingPod, metav1.CreateOptions{})
|
||||||
if err != nil && !apierrors.IsInvalid(err) {
|
if err != nil && !apierrors.IsInvalid(err) {
|
||||||
t.Fatalf("Test Failed: error while creating pod during test: %v", err)
|
t.Fatalf("Error while creating pod during test: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if tt.fits {
|
if tt.fits {
|
||||||
|
@ -164,7 +164,7 @@ func TestPreemption(t *testing.T) {
|
|||||||
|
|
||||||
maxTokens := 1000
|
maxTokens := 1000
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
description string
|
name string
|
||||||
existingPods []*v1.Pod
|
existingPods []*v1.Pod
|
||||||
pod *v1.Pod
|
pod *v1.Pod
|
||||||
initTokens int
|
initTokens int
|
||||||
@ -172,8 +172,8 @@ func TestPreemption(t *testing.T) {
|
|||||||
preemptedPodIndexes map[int]struct{}
|
preemptedPodIndexes map[int]struct{}
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
description: "basic pod preemption",
|
name: "basic pod preemption",
|
||||||
initTokens: maxTokens,
|
initTokens: maxTokens,
|
||||||
existingPods: []*v1.Pod{
|
existingPods: []*v1.Pod{
|
||||||
initPausePod(&pausePodConfig{
|
initPausePod(&pausePodConfig{
|
||||||
Name: "victim-pod",
|
Name: "victim-pod",
|
||||||
@ -197,8 +197,8 @@ func TestPreemption(t *testing.T) {
|
|||||||
preemptedPodIndexes: map[int]struct{}{0: {}},
|
preemptedPodIndexes: map[int]struct{}{0: {}},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
description: "basic pod preemption with filter",
|
name: "basic pod preemption with filter",
|
||||||
initTokens: 1,
|
initTokens: 1,
|
||||||
existingPods: []*v1.Pod{
|
existingPods: []*v1.Pod{
|
||||||
initPausePod(&pausePodConfig{
|
initPausePod(&pausePodConfig{
|
||||||
Name: "victim-pod",
|
Name: "victim-pod",
|
||||||
@ -223,7 +223,7 @@ func TestPreemption(t *testing.T) {
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
// same as the previous test, but the filter is unresolvable.
|
// same as the previous test, but the filter is unresolvable.
|
||||||
description: "basic pod preemption with unresolvable filter",
|
name: "basic pod preemption with unresolvable filter",
|
||||||
initTokens: 1,
|
initTokens: 1,
|
||||||
unresolvable: true,
|
unresolvable: true,
|
||||||
existingPods: []*v1.Pod{
|
existingPods: []*v1.Pod{
|
||||||
@ -249,8 +249,8 @@ func TestPreemption(t *testing.T) {
|
|||||||
preemptedPodIndexes: map[int]struct{}{},
|
preemptedPodIndexes: map[int]struct{}{},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
description: "preemption is performed to satisfy anti-affinity",
|
name: "preemption is performed to satisfy anti-affinity",
|
||||||
initTokens: maxTokens,
|
initTokens: maxTokens,
|
||||||
existingPods: []*v1.Pod{
|
existingPods: []*v1.Pod{
|
||||||
initPausePod(&pausePodConfig{
|
initPausePod(&pausePodConfig{
|
||||||
Name: "pod-0", Namespace: testCtx.NS.Name,
|
Name: "pod-0", Namespace: testCtx.NS.Name,
|
||||||
@ -313,8 +313,8 @@ func TestPreemption(t *testing.T) {
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
// This is similar to the previous case only pod-1 is high priority.
|
// This is similar to the previous case only pod-1 is high priority.
|
||||||
description: "preemption is not performed when anti-affinity is not satisfied",
|
name: "preemption is not performed when anti-affinity is not satisfied",
|
||||||
initTokens: maxTokens,
|
initTokens: maxTokens,
|
||||||
existingPods: []*v1.Pod{
|
existingPods: []*v1.Pod{
|
||||||
initPausePod(&pausePodConfig{
|
initPausePod(&pausePodConfig{
|
||||||
Name: "pod-0", Namespace: testCtx.NS.Name,
|
Name: "pod-0", Namespace: testCtx.NS.Name,
|
||||||
@ -396,44 +396,45 @@ func TestPreemption(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, test := range tests {
|
for _, test := range tests {
|
||||||
t.Logf("================ Running test: %v\n", test.description)
|
t.Run(test.name, func(t *testing.T) {
|
||||||
filter.Tokens = test.initTokens
|
filter.Tokens = test.initTokens
|
||||||
filter.Unresolvable = test.unresolvable
|
filter.Unresolvable = test.unresolvable
|
||||||
pods := make([]*v1.Pod, len(test.existingPods))
|
pods := make([]*v1.Pod, len(test.existingPods))
|
||||||
// Create and run existingPods.
|
// Create and run existingPods.
|
||||||
for i, p := range test.existingPods {
|
for i, p := range test.existingPods {
|
||||||
pods[i], err = runPausePod(cs, p)
|
pods[i], err = runPausePod(cs, p)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Error running pause pod: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Create the "pod".
|
||||||
|
preemptor, err := createPausePod(cs, test.pod)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Test [%v]: Error running pause pod: %v", test.description, err)
|
t.Errorf("Error while creating high priority pod: %v", err)
|
||||||
}
|
}
|
||||||
}
|
// Wait for preemption of pods and make sure the other ones are not preempted.
|
||||||
// Create the "pod".
|
for i, p := range pods {
|
||||||
preemptor, err := createPausePod(cs, test.pod)
|
if _, found := test.preemptedPodIndexes[i]; found {
|
||||||
if err != nil {
|
if err = wait.Poll(time.Second, wait.ForeverTestTimeout, podIsGettingEvicted(cs, p.Namespace, p.Name)); err != nil {
|
||||||
t.Errorf("Error while creating high priority pod: %v", err)
|
t.Errorf("Pod %v/%v is not getting evicted.", p.Namespace, p.Name)
|
||||||
}
|
}
|
||||||
// Wait for preemption of pods and make sure the other ones are not preempted.
|
} else {
|
||||||
for i, p := range pods {
|
if p.DeletionTimestamp != nil {
|
||||||
if _, found := test.preemptedPodIndexes[i]; found {
|
t.Errorf("Didn't expect pod %v to get preempted.", p.Name)
|
||||||
if err = wait.Poll(time.Second, wait.ForeverTestTimeout, podIsGettingEvicted(cs, p.Namespace, p.Name)); err != nil {
|
}
|
||||||
t.Errorf("Test [%v]: Pod %v/%v is not getting evicted.", test.description, p.Namespace, p.Name)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
if p.DeletionTimestamp != nil {
|
|
||||||
t.Errorf("Test [%v]: Didn't expect pod %v to get preempted.", test.description, p.Name)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
// Also check that the preemptor pod gets the NominatedNodeName field set.
|
||||||
// Also check that the preemptor pod gets the NominatedNodeName field set.
|
if len(test.preemptedPodIndexes) > 0 {
|
||||||
if len(test.preemptedPodIndexes) > 0 {
|
if err := waitForNominatedNodeName(cs, preemptor); err != nil {
|
||||||
if err := waitForNominatedNodeName(cs, preemptor); err != nil {
|
t.Errorf("NominatedNodeName field was not set for pod %v: %v", preemptor.Name, err)
|
||||||
t.Errorf("Test [%v]: NominatedNodeName field was not set for pod %v: %v", test.description, preemptor.Name, err)
|
}
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
// Cleanup
|
// Cleanup
|
||||||
pods = append(pods, preemptor)
|
pods = append(pods, preemptor)
|
||||||
testutils.CleanupPods(cs, t, pods)
|
testutils.CleanupPods(cs, t, pods)
|
||||||
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -526,12 +527,12 @@ func TestDisablePreemption(t *testing.T) {
|
|||||||
cs := testCtx.ClientSet
|
cs := testCtx.ClientSet
|
||||||
|
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
description string
|
name string
|
||||||
existingPods []*v1.Pod
|
existingPods []*v1.Pod
|
||||||
pod *v1.Pod
|
pod *v1.Pod
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
description: "pod preemption will not happen",
|
name: "pod preemption will not happen",
|
||||||
existingPods: []*v1.Pod{
|
existingPods: []*v1.Pod{
|
||||||
initPausePod(&pausePodConfig{
|
initPausePod(&pausePodConfig{
|
||||||
Name: "victim-pod",
|
Name: "victim-pod",
|
||||||
@ -567,34 +568,34 @@ func TestDisablePreemption(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, test := range tests {
|
for _, test := range tests {
|
||||||
pods := make([]*v1.Pod, len(test.existingPods))
|
t.Run(test.name, func(t *testing.T) {
|
||||||
// Create and run existingPods.
|
pods := make([]*v1.Pod, len(test.existingPods))
|
||||||
for i, p := range test.existingPods {
|
// Create and run existingPods.
|
||||||
pods[i], err = runPausePod(cs, p)
|
for i, p := range test.existingPods {
|
||||||
if err != nil {
|
pods[i], err = runPausePod(cs, p)
|
||||||
t.Fatalf("Test [%v]: Error running pause pod: %v", test.description, err)
|
if err != nil {
|
||||||
|
t.Fatalf("Test [%v]: Error running pause pod: %v", test.name, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Create the "pod".
|
||||||
|
preemptor, err := createPausePod(cs, test.pod)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Error while creating high priority pod: %v", err)
|
||||||
|
}
|
||||||
|
// Ensure preemptor should keep unschedulable.
|
||||||
|
if err := waitForPodUnschedulable(cs, preemptor); err != nil {
|
||||||
|
t.Errorf("Preemptor %v should not become scheduled", preemptor.Name)
|
||||||
}
|
}
|
||||||
}
|
|
||||||
// Create the "pod".
|
|
||||||
preemptor, err := createPausePod(cs, test.pod)
|
|
||||||
if err != nil {
|
|
||||||
t.Errorf("Error while creating high priority pod: %v", err)
|
|
||||||
}
|
|
||||||
// Ensure preemptor should keep unschedulable.
|
|
||||||
if err := waitForPodUnschedulable(cs, preemptor); err != nil {
|
|
||||||
t.Errorf("Test [%v]: Preemptor %v should not become scheduled",
|
|
||||||
test.description, preemptor.Name)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Ensure preemptor should not be nominated.
|
// Ensure preemptor should not be nominated.
|
||||||
if err := waitForNominatedNodeNameWithTimeout(cs, preemptor, 5*time.Second); err == nil {
|
if err := waitForNominatedNodeNameWithTimeout(cs, preemptor, 5*time.Second); err == nil {
|
||||||
t.Errorf("Test [%v]: Preemptor %v should not be nominated",
|
t.Errorf("Preemptor %v should not be nominated", preemptor.Name)
|
||||||
test.description, preemptor.Name)
|
}
|
||||||
}
|
|
||||||
|
|
||||||
// Cleanup
|
// Cleanup
|
||||||
pods = append(pods, preemptor)
|
pods = append(pods, preemptor)
|
||||||
testutils.CleanupPods(cs, t, pods)
|
testutils.CleanupPods(cs, t, pods)
|
||||||
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -675,26 +676,27 @@ func TestPodPriorityResolution(t *testing.T) {
|
|||||||
|
|
||||||
pods := make([]*v1.Pod, 0, len(tests))
|
pods := make([]*v1.Pod, 0, len(tests))
|
||||||
for _, test := range tests {
|
for _, test := range tests {
|
||||||
t.Logf("================ Running test: %v\n", test.Name)
|
|
||||||
t.Run(test.Name, func(t *testing.T) {
|
t.Run(test.Name, func(t *testing.T) {
|
||||||
pod, err := runPausePod(cs, test.Pod)
|
t.Run(test.Name, func(t *testing.T) {
|
||||||
if err != nil {
|
pod, err := runPausePod(cs, test.Pod)
|
||||||
if test.ExpectedError == nil {
|
if err != nil {
|
||||||
t.Fatalf("Test [PodPriority/%v]: Error running pause pod: %v", test.PriorityClass, err)
|
if test.ExpectedError == nil {
|
||||||
|
t.Fatalf("Test [PodPriority/%v]: Error running pause pod: %v", test.PriorityClass, err)
|
||||||
|
}
|
||||||
|
if err.Error() != test.ExpectedError.Error() {
|
||||||
|
t.Fatalf("Test [PodPriority/%v]: Expected error %v but got error %v", test.PriorityClass, test.ExpectedError, err)
|
||||||
|
}
|
||||||
|
return
|
||||||
}
|
}
|
||||||
if err.Error() != test.ExpectedError.Error() {
|
pods = append(pods, pod)
|
||||||
t.Fatalf("Test [PodPriority/%v]: Expected error %v but got error %v", test.PriorityClass, test.ExpectedError, err)
|
if pod.Spec.Priority != nil {
|
||||||
|
if *pod.Spec.Priority != test.ExpectedPriority {
|
||||||
|
t.Errorf("Expected pod %v to have priority %v but was %v", pod.Name, test.ExpectedPriority, pod.Spec.Priority)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
t.Errorf("Expected pod %v to have priority %v but was nil", pod.Name, test.PriorityClass)
|
||||||
}
|
}
|
||||||
return
|
})
|
||||||
}
|
|
||||||
pods = append(pods, pod)
|
|
||||||
if pod.Spec.Priority != nil {
|
|
||||||
if *pod.Spec.Priority != test.ExpectedPriority {
|
|
||||||
t.Errorf("Expected pod %v to have priority %v but was %v", pod.Name, test.ExpectedPriority, pod.Spec.Priority)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
t.Errorf("Expected pod %v to have priority %v but was nil", pod.Name, test.PriorityClass)
|
|
||||||
}
|
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
testutils.CleanupPods(cs, t, pods)
|
testutils.CleanupPods(cs, t, pods)
|
||||||
@ -727,7 +729,7 @@ func TestPreemptionStarvation(t *testing.T) {
|
|||||||
cs := testCtx.ClientSet
|
cs := testCtx.ClientSet
|
||||||
|
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
description string
|
name string
|
||||||
numExistingPod int
|
numExistingPod int
|
||||||
numExpectedPending int
|
numExpectedPending int
|
||||||
preemptor *v1.Pod
|
preemptor *v1.Pod
|
||||||
@ -736,7 +738,7 @@ func TestPreemptionStarvation(t *testing.T) {
|
|||||||
// This test ensures that while the preempting pod is waiting for the victims
|
// This test ensures that while the preempting pod is waiting for the victims
|
||||||
// terminate, other lower priority pods are not scheduled in the room created
|
// terminate, other lower priority pods are not scheduled in the room created
|
||||||
// after preemption and while the higher priority pods is not scheduled yet.
|
// after preemption and while the higher priority pods is not scheduled yet.
|
||||||
description: "starvation test: higher priority pod is scheduled before the lower priority ones",
|
name: "starvation test: higher priority pod is scheduled before the lower priority ones",
|
||||||
numExistingPod: 10,
|
numExistingPod: 10,
|
||||||
numExpectedPending: 5,
|
numExpectedPending: 5,
|
||||||
preemptor: initPausePod(&pausePodConfig{
|
preemptor: initPausePod(&pausePodConfig{
|
||||||
@ -763,55 +765,57 @@ func TestPreemptionStarvation(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, test := range tests {
|
for _, test := range tests {
|
||||||
pendingPods := make([]*v1.Pod, test.numExpectedPending)
|
t.Run(test.name, func(t *testing.T) {
|
||||||
numRunningPods := test.numExistingPod - test.numExpectedPending
|
pendingPods := make([]*v1.Pod, test.numExpectedPending)
|
||||||
runningPods := make([]*v1.Pod, numRunningPods)
|
numRunningPods := test.numExistingPod - test.numExpectedPending
|
||||||
// Create and run existingPods.
|
runningPods := make([]*v1.Pod, numRunningPods)
|
||||||
for i := 0; i < numRunningPods; i++ {
|
// Create and run existingPods.
|
||||||
runningPods[i], err = createPausePod(cs, mkPriorityPodWithGrace(testCtx, fmt.Sprintf("rpod-%v", i), mediumPriority, 0))
|
for i := 0; i < numRunningPods; i++ {
|
||||||
|
runningPods[i], err = createPausePod(cs, mkPriorityPodWithGrace(testCtx, fmt.Sprintf("rpod-%v", i), mediumPriority, 0))
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Error creating pause pod: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// make sure that runningPods are all scheduled.
|
||||||
|
for _, p := range runningPods {
|
||||||
|
if err := testutils.WaitForPodToSchedule(cs, p); err != nil {
|
||||||
|
t.Fatalf("Pod %v/%v didn't get scheduled: %v", p.Namespace, p.Name, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Create pending pods.
|
||||||
|
for i := 0; i < test.numExpectedPending; i++ {
|
||||||
|
pendingPods[i], err = createPausePod(cs, mkPriorityPodWithGrace(testCtx, fmt.Sprintf("ppod-%v", i), mediumPriority, 0))
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Error creating pending pod: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Make sure that all pending pods are being marked unschedulable.
|
||||||
|
for _, p := range pendingPods {
|
||||||
|
if err := wait.Poll(100*time.Millisecond, wait.ForeverTestTimeout,
|
||||||
|
podUnschedulable(cs, p.Namespace, p.Name)); err != nil {
|
||||||
|
t.Errorf("Pod %v/%v didn't get marked unschedulable: %v", p.Namespace, p.Name, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Create the preemptor.
|
||||||
|
preemptor, err := createPausePod(cs, test.preemptor)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Test [%v]: Error creating pause pod: %v", test.description, err)
|
t.Errorf("Error while creating the preempting pod: %v", err)
|
||||||
}
|
}
|
||||||
}
|
// Check that the preemptor pod gets the annotation for nominated node name.
|
||||||
// make sure that runningPods are all scheduled.
|
if err := waitForNominatedNodeName(cs, preemptor); err != nil {
|
||||||
for _, p := range runningPods {
|
t.Errorf("NominatedNodeName annotation was not set for pod %v/%v: %v", preemptor.Namespace, preemptor.Name, err)
|
||||||
if err := testutils.WaitForPodToSchedule(cs, p); err != nil {
|
|
||||||
t.Fatalf("Pod %v/%v didn't get scheduled: %v", p.Namespace, p.Name, err)
|
|
||||||
}
|
}
|
||||||
}
|
// Make sure that preemptor is scheduled after preemptions.
|
||||||
// Create pending pods.
|
if err := testutils.WaitForPodToScheduleWithTimeout(cs, preemptor, 60*time.Second); err != nil {
|
||||||
for i := 0; i < test.numExpectedPending; i++ {
|
t.Errorf("Preemptor pod %v didn't get scheduled: %v", preemptor.Name, err)
|
||||||
pendingPods[i], err = createPausePod(cs, mkPriorityPodWithGrace(testCtx, fmt.Sprintf("ppod-%v", i), mediumPriority, 0))
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("Test [%v]: Error creating pending pod: %v", test.description, err)
|
|
||||||
}
|
}
|
||||||
}
|
// Cleanup
|
||||||
// Make sure that all pending pods are being marked unschedulable.
|
klog.Info("Cleaning up all pods...")
|
||||||
for _, p := range pendingPods {
|
allPods := pendingPods
|
||||||
if err := wait.Poll(100*time.Millisecond, wait.ForeverTestTimeout,
|
allPods = append(allPods, runningPods...)
|
||||||
podUnschedulable(cs, p.Namespace, p.Name)); err != nil {
|
allPods = append(allPods, preemptor)
|
||||||
t.Errorf("Pod %v/%v didn't get marked unschedulable: %v", p.Namespace, p.Name, err)
|
testutils.CleanupPods(cs, t, allPods)
|
||||||
}
|
})
|
||||||
}
|
|
||||||
// Create the preemptor.
|
|
||||||
preemptor, err := createPausePod(cs, test.preemptor)
|
|
||||||
if err != nil {
|
|
||||||
t.Errorf("Error while creating the preempting pod: %v", err)
|
|
||||||
}
|
|
||||||
// Check that the preemptor pod gets the annotation for nominated node name.
|
|
||||||
if err := waitForNominatedNodeName(cs, preemptor); err != nil {
|
|
||||||
t.Errorf("Test [%v]: NominatedNodeName annotation was not set for pod %v/%v: %v", test.description, preemptor.Namespace, preemptor.Name, err)
|
|
||||||
}
|
|
||||||
// Make sure that preemptor is scheduled after preemptions.
|
|
||||||
if err := testutils.WaitForPodToScheduleWithTimeout(cs, preemptor, 60*time.Second); err != nil {
|
|
||||||
t.Errorf("Preemptor pod %v didn't get scheduled: %v", preemptor.Name, err)
|
|
||||||
}
|
|
||||||
// Cleanup
|
|
||||||
klog.Info("Cleaning up all pods...")
|
|
||||||
allPods := pendingPods
|
|
||||||
allPods = append(allPods, runningPods...)
|
|
||||||
allPods = append(allPods, preemptor)
|
|
||||||
testutils.CleanupPods(cs, t, allPods)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -824,7 +828,7 @@ func TestPreemptionRaces(t *testing.T) {
|
|||||||
cs := testCtx.ClientSet
|
cs := testCtx.ClientSet
|
||||||
|
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
description string
|
name string
|
||||||
numInitialPods int // Pods created and executed before running preemptor
|
numInitialPods int // Pods created and executed before running preemptor
|
||||||
numAdditionalPods int // Pods created after creating the preemptor
|
numAdditionalPods int // Pods created after creating the preemptor
|
||||||
numRepetitions int // Repeat the tests to check races
|
numRepetitions int // Repeat the tests to check races
|
||||||
@ -834,7 +838,7 @@ func TestPreemptionRaces(t *testing.T) {
|
|||||||
// This test ensures that while the preempting pod is waiting for the victims
|
// This test ensures that while the preempting pod is waiting for the victims
|
||||||
// terminate, other lower priority pods are not scheduled in the room created
|
// terminate, other lower priority pods are not scheduled in the room created
|
||||||
// after preemption and while the higher priority pods is not scheduled yet.
|
// after preemption and while the higher priority pods is not scheduled yet.
|
||||||
description: "ensures that other pods are not scheduled while preemptor is being marked as nominated (issue #72124)",
|
name: "ensures that other pods are not scheduled while preemptor is being marked as nominated (issue #72124)",
|
||||||
numInitialPods: 2,
|
numInitialPods: 2,
|
||||||
numAdditionalPods: 50,
|
numAdditionalPods: 50,
|
||||||
numRepetitions: 10,
|
numRepetitions: 10,
|
||||||
@ -862,69 +866,71 @@ func TestPreemptionRaces(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, test := range tests {
|
for _, test := range tests {
|
||||||
if test.numRepetitions <= 0 {
|
t.Run(test.name, func(t *testing.T) {
|
||||||
test.numRepetitions = 1
|
if test.numRepetitions <= 0 {
|
||||||
}
|
test.numRepetitions = 1
|
||||||
for n := 0; n < test.numRepetitions; n++ {
|
}
|
||||||
initialPods := make([]*v1.Pod, test.numInitialPods)
|
for n := 0; n < test.numRepetitions; n++ {
|
||||||
additionalPods := make([]*v1.Pod, test.numAdditionalPods)
|
initialPods := make([]*v1.Pod, test.numInitialPods)
|
||||||
// Create and run existingPods.
|
additionalPods := make([]*v1.Pod, test.numAdditionalPods)
|
||||||
for i := 0; i < test.numInitialPods; i++ {
|
// Create and run existingPods.
|
||||||
initialPods[i], err = createPausePod(cs, mkPriorityPodWithGrace(testCtx, fmt.Sprintf("rpod-%v", i), mediumPriority, 0))
|
for i := 0; i < test.numInitialPods; i++ {
|
||||||
|
initialPods[i], err = createPausePod(cs, mkPriorityPodWithGrace(testCtx, fmt.Sprintf("rpod-%v", i), mediumPriority, 0))
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Error creating pause pod: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// make sure that initial Pods are all scheduled.
|
||||||
|
for _, p := range initialPods {
|
||||||
|
if err := testutils.WaitForPodToSchedule(cs, p); err != nil {
|
||||||
|
t.Fatalf("Pod %v/%v didn't get scheduled: %v", p.Namespace, p.Name, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Create the preemptor.
|
||||||
|
klog.Info("Creating the preemptor pod...")
|
||||||
|
preemptor, err := createPausePod(cs, test.preemptor)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Test [%v]: Error creating pause pod: %v", test.description, err)
|
t.Errorf("Error while creating the preempting pod: %v", err)
|
||||||
}
|
}
|
||||||
}
|
|
||||||
// make sure that initial Pods are all scheduled.
|
|
||||||
for _, p := range initialPods {
|
|
||||||
if err := testutils.WaitForPodToSchedule(cs, p); err != nil {
|
|
||||||
t.Fatalf("Pod %v/%v didn't get scheduled: %v", p.Namespace, p.Name, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// Create the preemptor.
|
|
||||||
klog.Info("Creating the preemptor pod...")
|
|
||||||
preemptor, err := createPausePod(cs, test.preemptor)
|
|
||||||
if err != nil {
|
|
||||||
t.Errorf("Error while creating the preempting pod: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
klog.Info("Creating additional pods...")
|
klog.Info("Creating additional pods...")
|
||||||
for i := 0; i < test.numAdditionalPods; i++ {
|
for i := 0; i < test.numAdditionalPods; i++ {
|
||||||
additionalPods[i], err = createPausePod(cs, mkPriorityPodWithGrace(testCtx, fmt.Sprintf("ppod-%v", i), mediumPriority, 0))
|
additionalPods[i], err = createPausePod(cs, mkPriorityPodWithGrace(testCtx, fmt.Sprintf("ppod-%v", i), mediumPriority, 0))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Test [%v]: Error creating pending pod: %v", test.description, err)
|
t.Fatalf("Error creating pending pod: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Check that the preemptor pod gets nominated node name.
|
||||||
|
if err := waitForNominatedNodeName(cs, preemptor); err != nil {
|
||||||
|
t.Errorf("NominatedNodeName annotation was not set for pod %v/%v: %v", preemptor.Namespace, preemptor.Name, err)
|
||||||
|
}
|
||||||
|
// Make sure that preemptor is scheduled after preemptions.
|
||||||
|
if err := testutils.WaitForPodToScheduleWithTimeout(cs, preemptor, 60*time.Second); err != nil {
|
||||||
|
t.Errorf("Preemptor pod %v didn't get scheduled: %v", preemptor.Name, err)
|
||||||
}
|
}
|
||||||
}
|
|
||||||
// Check that the preemptor pod gets nominated node name.
|
|
||||||
if err := waitForNominatedNodeName(cs, preemptor); err != nil {
|
|
||||||
t.Errorf("Test [%v]: NominatedNodeName annotation was not set for pod %v/%v: %v", test.description, preemptor.Namespace, preemptor.Name, err)
|
|
||||||
}
|
|
||||||
// Make sure that preemptor is scheduled after preemptions.
|
|
||||||
if err := testutils.WaitForPodToScheduleWithTimeout(cs, preemptor, 60*time.Second); err != nil {
|
|
||||||
t.Errorf("Preemptor pod %v didn't get scheduled: %v", preemptor.Name, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
klog.Info("Check unschedulable pods still exists and were never scheduled...")
|
klog.Info("Check unschedulable pods still exists and were never scheduled...")
|
||||||
for _, p := range additionalPods {
|
for _, p := range additionalPods {
|
||||||
pod, err := cs.CoreV1().Pods(p.Namespace).Get(context.TODO(), p.Name, metav1.GetOptions{})
|
pod, err := cs.CoreV1().Pods(p.Namespace).Get(context.TODO(), p.Name, metav1.GetOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("Error in getting Pod %v/%v info: %v", p.Namespace, p.Name, err)
|
t.Errorf("Error in getting Pod %v/%v info: %v", p.Namespace, p.Name, err)
|
||||||
}
|
}
|
||||||
if len(pod.Spec.NodeName) > 0 {
|
if len(pod.Spec.NodeName) > 0 {
|
||||||
t.Errorf("Pod %v/%v is already scheduled", p.Namespace, p.Name)
|
t.Errorf("Pod %v/%v is already scheduled", p.Namespace, p.Name)
|
||||||
}
|
}
|
||||||
_, cond := podutil.GetPodCondition(&pod.Status, v1.PodScheduled)
|
_, cond := podutil.GetPodCondition(&pod.Status, v1.PodScheduled)
|
||||||
if cond != nil && cond.Status != v1.ConditionFalse {
|
if cond != nil && cond.Status != v1.ConditionFalse {
|
||||||
t.Errorf("Pod %v/%v is no longer unschedulable: %v", p.Namespace, p.Name, err)
|
t.Errorf("Pod %v/%v is no longer unschedulable: %v", p.Namespace, p.Name, err)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
// Cleanup
|
||||||
|
klog.Info("Cleaning up all pods...")
|
||||||
|
allPods := additionalPods
|
||||||
|
allPods = append(allPods, initialPods...)
|
||||||
|
allPods = append(allPods, preemptor)
|
||||||
|
testutils.CleanupPods(cs, t, allPods)
|
||||||
}
|
}
|
||||||
// Cleanup
|
})
|
||||||
klog.Info("Cleaning up all pods...")
|
|
||||||
allPods := additionalPods
|
|
||||||
allPods = append(allPods, initialPods...)
|
|
||||||
allPods = append(allPods, preemptor)
|
|
||||||
testutils.CleanupPods(cs, t, allPods)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1074,7 +1080,7 @@ func TestPDBInPreemption(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
description string
|
name string
|
||||||
nodes []*nodeConfig
|
nodes []*nodeConfig
|
||||||
pdbs []*policy.PodDisruptionBudget
|
pdbs []*policy.PodDisruptionBudget
|
||||||
pdbPodNum []int32
|
pdbPodNum []int32
|
||||||
@ -1083,8 +1089,8 @@ func TestPDBInPreemption(t *testing.T) {
|
|||||||
preemptedPodIndexes map[int]struct{}
|
preemptedPodIndexes map[int]struct{}
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
description: "A non-PDB violating pod is preempted despite its higher priority",
|
name: "A non-PDB violating pod is preempted despite its higher priority",
|
||||||
nodes: []*nodeConfig{{name: "node-1", res: defaultNodeRes}},
|
nodes: []*nodeConfig{{name: "node-1", res: defaultNodeRes}},
|
||||||
pdbs: []*policy.PodDisruptionBudget{
|
pdbs: []*policy.PodDisruptionBudget{
|
||||||
mkMinAvailablePDB("pdb-1", testCtx.NS.Name, types.UID("pdb-1-uid"), 2, map[string]string{"foo": "bar"}),
|
mkMinAvailablePDB("pdb-1", testCtx.NS.Name, types.UID("pdb-1-uid"), 2, map[string]string{"foo": "bar"}),
|
||||||
},
|
},
|
||||||
@ -1123,7 +1129,7 @@ func TestPDBInPreemption(t *testing.T) {
|
|||||||
preemptedPodIndexes: map[int]struct{}{2: {}},
|
preemptedPodIndexes: map[int]struct{}{2: {}},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
description: "A node without any PDB violating pods is preferred for preemption",
|
name: "A node without any PDB violating pods is preferred for preemption",
|
||||||
nodes: []*nodeConfig{
|
nodes: []*nodeConfig{
|
||||||
{name: "node-1", res: defaultNodeRes},
|
{name: "node-1", res: defaultNodeRes},
|
||||||
{name: "node-2", res: defaultNodeRes},
|
{name: "node-2", res: defaultNodeRes},
|
||||||
@ -1161,7 +1167,7 @@ func TestPDBInPreemption(t *testing.T) {
|
|||||||
preemptedPodIndexes: map[int]struct{}{1: {}},
|
preemptedPodIndexes: map[int]struct{}{1: {}},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
description: "A node with fewer PDB violating pods is preferred for preemption",
|
name: "A node with fewer PDB violating pods is preferred for preemption",
|
||||||
nodes: []*nodeConfig{
|
nodes: []*nodeConfig{
|
||||||
{name: "node-1", res: defaultNodeRes},
|
{name: "node-1", res: defaultNodeRes},
|
||||||
{name: "node-2", res: defaultNodeRes},
|
{name: "node-2", res: defaultNodeRes},
|
||||||
@ -1244,72 +1250,73 @@ func TestPDBInPreemption(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, test := range tests {
|
for _, test := range tests {
|
||||||
t.Logf("================ Running test: %v\n", test.description)
|
t.Run(test.name, func(t *testing.T) {
|
||||||
for _, nodeConf := range test.nodes {
|
for _, nodeConf := range test.nodes {
|
||||||
_, err := createNode(cs, nodeConf.name, nodeConf.res)
|
_, err := createNode(cs, nodeConf.name, nodeConf.res)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Error creating node %v: %v", nodeConf.name, err)
|
t.Fatalf("Error creating node %v: %v", nodeConf.name, err)
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pods := make([]*v1.Pod, len(test.existingPods))
|
|
||||||
var err error
|
|
||||||
// Create and run existingPods.
|
|
||||||
for i, p := range test.existingPods {
|
|
||||||
if pods[i], err = runPausePod(cs, p); err != nil {
|
|
||||||
t.Fatalf("Test [%v]: Error running pause pod: %v", test.description, err)
|
|
||||||
}
|
|
||||||
// Add pod condition ready so that PDB is updated.
|
|
||||||
addPodConditionReady(p)
|
|
||||||
if _, err := testCtx.ClientSet.CoreV1().Pods(testCtx.NS.Name).UpdateStatus(context.TODO(), p, metav1.UpdateOptions{}); err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// Wait for Pods to be stable in scheduler cache.
|
|
||||||
if err := waitCachedPodsStable(testCtx, test.existingPods); err != nil {
|
|
||||||
t.Fatalf("Not all pods are stable in the cache: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create PDBs.
|
|
||||||
for _, pdb := range test.pdbs {
|
|
||||||
_, err := testCtx.ClientSet.PolicyV1beta1().PodDisruptionBudgets(testCtx.NS.Name).Create(context.TODO(), pdb, metav1.CreateOptions{})
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("Failed to create PDB: %v", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// Wait for PDBs to become stable.
|
|
||||||
if err := waitForPDBsStable(testCtx, test.pdbs, test.pdbPodNum); err != nil {
|
|
||||||
t.Fatalf("Not all pdbs are stable in the cache: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create the "pod".
|
|
||||||
preemptor, err := createPausePod(cs, test.pod)
|
|
||||||
if err != nil {
|
|
||||||
t.Errorf("Error while creating high priority pod: %v", err)
|
|
||||||
}
|
|
||||||
// Wait for preemption of pods and make sure the other ones are not preempted.
|
|
||||||
for i, p := range pods {
|
|
||||||
if _, found := test.preemptedPodIndexes[i]; found {
|
|
||||||
if err = wait.Poll(time.Second, wait.ForeverTestTimeout, podIsGettingEvicted(cs, p.Namespace, p.Name)); err != nil {
|
|
||||||
t.Errorf("Test [%v]: Pod %v/%v is not getting evicted.", test.description, p.Namespace, p.Name)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
if p.DeletionTimestamp != nil {
|
|
||||||
t.Errorf("Test [%v]: Didn't expect pod %v/%v to get preempted.", test.description, p.Namespace, p.Name)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
|
||||||
// Also check that the preemptor pod gets the annotation for nominated node name.
|
|
||||||
if len(test.preemptedPodIndexes) > 0 {
|
|
||||||
if err := waitForNominatedNodeName(cs, preemptor); err != nil {
|
|
||||||
t.Errorf("Test [%v]: NominatedNodeName annotation was not set for pod %v/%v: %v", test.description, preemptor.Namespace, preemptor.Name, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Cleanup
|
pods := make([]*v1.Pod, len(test.existingPods))
|
||||||
pods = append(pods, preemptor)
|
var err error
|
||||||
testutils.CleanupPods(cs, t, pods)
|
// Create and run existingPods.
|
||||||
cs.PolicyV1beta1().PodDisruptionBudgets(testCtx.NS.Name).DeleteCollection(context.TODO(), metav1.DeleteOptions{}, metav1.ListOptions{})
|
for i, p := range test.existingPods {
|
||||||
cs.CoreV1().Nodes().DeleteCollection(context.TODO(), metav1.DeleteOptions{}, metav1.ListOptions{})
|
if pods[i], err = runPausePod(cs, p); err != nil {
|
||||||
|
t.Fatalf("Test [%v]: Error running pause pod: %v", test.name, err)
|
||||||
|
}
|
||||||
|
// Add pod condition ready so that PDB is updated.
|
||||||
|
addPodConditionReady(p)
|
||||||
|
if _, err := testCtx.ClientSet.CoreV1().Pods(testCtx.NS.Name).UpdateStatus(context.TODO(), p, metav1.UpdateOptions{}); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Wait for Pods to be stable in scheduler cache.
|
||||||
|
if err := waitCachedPodsStable(testCtx, test.existingPods); err != nil {
|
||||||
|
t.Fatalf("Not all pods are stable in the cache: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create PDBs.
|
||||||
|
for _, pdb := range test.pdbs {
|
||||||
|
_, err := testCtx.ClientSet.PolicyV1beta1().PodDisruptionBudgets(testCtx.NS.Name).Create(context.TODO(), pdb, metav1.CreateOptions{})
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Failed to create PDB: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Wait for PDBs to become stable.
|
||||||
|
if err := waitForPDBsStable(testCtx, test.pdbs, test.pdbPodNum); err != nil {
|
||||||
|
t.Fatalf("Not all pdbs are stable in the cache: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create the "pod".
|
||||||
|
preemptor, err := createPausePod(cs, test.pod)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Error while creating high priority pod: %v", err)
|
||||||
|
}
|
||||||
|
// Wait for preemption of pods and make sure the other ones are not preempted.
|
||||||
|
for i, p := range pods {
|
||||||
|
if _, found := test.preemptedPodIndexes[i]; found {
|
||||||
|
if err = wait.Poll(time.Second, wait.ForeverTestTimeout, podIsGettingEvicted(cs, p.Namespace, p.Name)); err != nil {
|
||||||
|
t.Errorf("Test [%v]: Pod %v/%v is not getting evicted.", test.name, p.Namespace, p.Name)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if p.DeletionTimestamp != nil {
|
||||||
|
t.Errorf("Test [%v]: Didn't expect pod %v/%v to get preempted.", test.name, p.Namespace, p.Name)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Also check that the preemptor pod gets the annotation for nominated node name.
|
||||||
|
if len(test.preemptedPodIndexes) > 0 {
|
||||||
|
if err := waitForNominatedNodeName(cs, preemptor); err != nil {
|
||||||
|
t.Errorf("Test [%v]: NominatedNodeName annotation was not set for pod %v/%v: %v", test.name, preemptor.Namespace, preemptor.Name, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Cleanup
|
||||||
|
pods = append(pods, preemptor)
|
||||||
|
testutils.CleanupPods(cs, t, pods)
|
||||||
|
cs.PolicyV1beta1().PodDisruptionBudgets(testCtx.NS.Name).DeleteCollection(context.TODO(), metav1.DeleteOptions{}, metav1.ListOptions{})
|
||||||
|
cs.CoreV1().Nodes().DeleteCollection(context.TODO(), metav1.DeleteOptions{}, metav1.ListOptions{})
|
||||||
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -789,15 +789,15 @@ func TestSchedulerInformers(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
description string
|
name string
|
||||||
nodes []*nodeConfig
|
nodes []*nodeConfig
|
||||||
existingPods []*v1.Pod
|
existingPods []*v1.Pod
|
||||||
pod *v1.Pod
|
pod *v1.Pod
|
||||||
preemptedPodIndexes map[int]struct{}
|
preemptedPodIndexes map[int]struct{}
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
description: "Pod cannot be scheduled when node is occupied by pods scheduled by other schedulers",
|
name: "Pod cannot be scheduled when node is occupied by pods scheduled by other schedulers",
|
||||||
nodes: []*nodeConfig{{name: "node-1", res: defaultNodeRes}},
|
nodes: []*nodeConfig{{name: "node-1", res: defaultNodeRes}},
|
||||||
existingPods: []*v1.Pod{
|
existingPods: []*v1.Pod{
|
||||||
initPausePod(&pausePodConfig{
|
initPausePod(&pausePodConfig{
|
||||||
Name: "pod1",
|
Name: "pod1",
|
||||||
@ -826,34 +826,36 @@ func TestSchedulerInformers(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, test := range tests {
|
for _, test := range tests {
|
||||||
for _, nodeConf := range test.nodes {
|
t.Run(test.name, func(t *testing.T) {
|
||||||
_, err := createNode(cs, nodeConf.name, nodeConf.res)
|
for _, nodeConf := range test.nodes {
|
||||||
|
_, err := createNode(cs, nodeConf.name, nodeConf.res)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("Error creating node %v: %v", nodeConf.name, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pods := make([]*v1.Pod, len(test.existingPods))
|
||||||
|
var err error
|
||||||
|
// Create and run existingPods.
|
||||||
|
for i, p := range test.existingPods {
|
||||||
|
if pods[i], err = runPausePod(cs, p); err != nil {
|
||||||
|
t.Fatalf("Error running pause pod: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Create the new "pod".
|
||||||
|
unschedulable, err := createPausePod(cs, test.pod)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Error creating node %v: %v", nodeConf.name, err)
|
t.Errorf("Error while creating new pod: %v", err)
|
||||||
}
|
}
|
||||||
}
|
if err := waitForPodUnschedulable(cs, unschedulable); err != nil {
|
||||||
|
t.Errorf("Pod %v got scheduled: %v", unschedulable.Name, err)
|
||||||
pods := make([]*v1.Pod, len(test.existingPods))
|
|
||||||
var err error
|
|
||||||
// Create and run existingPods.
|
|
||||||
for i, p := range test.existingPods {
|
|
||||||
if pods[i], err = runPausePod(cs, p); err != nil {
|
|
||||||
t.Fatalf("Test [%v]: Error running pause pod: %v", test.description, err)
|
|
||||||
}
|
}
|
||||||
}
|
|
||||||
// Create the new "pod".
|
|
||||||
unschedulable, err := createPausePod(cs, test.pod)
|
|
||||||
if err != nil {
|
|
||||||
t.Errorf("Error while creating new pod: %v", err)
|
|
||||||
}
|
|
||||||
if err := waitForPodUnschedulable(cs, unschedulable); err != nil {
|
|
||||||
t.Errorf("Pod %v got scheduled: %v", unschedulable.Name, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Cleanup
|
// Cleanup
|
||||||
pods = append(pods, unschedulable)
|
pods = append(pods, unschedulable)
|
||||||
testutils.CleanupPods(cs, t, pods)
|
testutils.CleanupPods(cs, t, pods)
|
||||||
cs.PolicyV1beta1().PodDisruptionBudgets(testCtx.NS.Name).DeleteCollection(context.TODO(), metav1.DeleteOptions{}, metav1.ListOptions{})
|
cs.PolicyV1beta1().PodDisruptionBudgets(testCtx.NS.Name).DeleteCollection(context.TODO(), metav1.DeleteOptions{}, metav1.ListOptions{})
|
||||||
cs.CoreV1().Nodes().DeleteCollection(context.TODO(), metav1.DeleteOptions{}, metav1.ListOptions{})
|
cs.CoreV1().Nodes().DeleteCollection(context.TODO(), metav1.DeleteOptions{}, metav1.ListOptions{})
|
||||||
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user