mirror of
https://github.com/k3s-io/kubernetes.git
synced 2026-01-05 07:27:21 +00:00
implement Node affinity and NodeSelector
This commit is contained in:
@@ -18,9 +18,11 @@ package e2e
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
"time"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/errors"
|
||||
"k8s.io/kubernetes/pkg/api/resource"
|
||||
"k8s.io/kubernetes/pkg/api/unversioned"
|
||||
client "k8s.io/kubernetes/pkg/client/unversioned"
|
||||
@@ -29,6 +31,7 @@ import (
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
_ "github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
// Returns a number of currently scheduled and not scheduled Pods.
|
||||
@@ -392,6 +395,49 @@ var _ = Describe("SchedulerPredicates [Serial]", func() {
|
||||
cleanupPods(c, ns)
|
||||
})
|
||||
|
||||
It("validates that a pod with an invalid Affinity is rejected [Conformance]", func() {
|
||||
|
||||
By("Trying to launch a pod with an invalid Affinity data.")
|
||||
podName := "without-label"
|
||||
_, err := c.Pods(ns).Create(&api.Pod{
|
||||
TypeMeta: unversioned.TypeMeta{
|
||||
Kind: "Pod",
|
||||
},
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Name: podName,
|
||||
Annotations: map[string]string{
|
||||
api.AffinityAnnotationKey: `
|
||||
{"nodeAffinity": {
|
||||
"requiredDuringSchedulingRequiredDuringExecution": {
|
||||
"nodeSelectorTerms": [{
|
||||
"matchExpressions": []
|
||||
}]
|
||||
},
|
||||
}}`,
|
||||
},
|
||||
},
|
||||
Spec: api.PodSpec{
|
||||
Containers: []api.Container{
|
||||
{
|
||||
Name: podName,
|
||||
Image: "gcr.io/google_containers/pause:2.0",
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
if err == nil || !errors.IsInvalid(err) {
|
||||
Failf("Expect error of invalid, got : %v", err)
|
||||
}
|
||||
|
||||
// Wait a bit to allow scheduler to do its thing if the pod is not rejected.
|
||||
// TODO: this is brittle; there's no guarantee the scheduler will have run in 10 seconds.
|
||||
Logf("Sleeping 10 seconds and crossing our fingers that scheduler will run in that time.")
|
||||
time.Sleep(10 * time.Second)
|
||||
|
||||
cleanupPods(c, ns)
|
||||
})
|
||||
|
||||
It("validates that NodeSelector is respected if matching [Conformance]", func() {
|
||||
// launch a pod to find a node which can launch a pod. We intentionally do
|
||||
// not just take the node list and choose the first of them. Depending on the
|
||||
@@ -470,4 +516,217 @@ var _ = Describe("SchedulerPredicates [Serial]", func() {
|
||||
expectNoError(err)
|
||||
Expect(labelPod.Spec.NodeName).To(Equal(nodeName))
|
||||
})
|
||||
|
||||
// Test Nodes does not have any label, hence it should be impossible to schedule Pod with
|
||||
// non-nil NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.
|
||||
It("validates that NodeAffinity is respected if not matching [Conformance]", func() {
|
||||
By("Trying to schedule Pod with nonempty NodeSelector.")
|
||||
podName := "restricted-pod"
|
||||
|
||||
waitForStableCluster(c)
|
||||
|
||||
_, err := c.Pods(ns).Create(&api.Pod{
|
||||
TypeMeta: unversioned.TypeMeta{
|
||||
Kind: "Pod",
|
||||
},
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Name: podName,
|
||||
Labels: map[string]string{"name": "restricted"},
|
||||
Annotations: map[string]string{
|
||||
"scheduler.alpha.kubernetes.io/affinity": `
|
||||
{"nodeAffinity": { "requiredDuringSchedulingIgnoredDuringExecution": {
|
||||
"nodeSelectorTerms": [
|
||||
{
|
||||
"matchExpressions": [{
|
||||
"key": "foo",
|
||||
"operator": "In",
|
||||
"values": ["bar", "value2"]
|
||||
}]
|
||||
},
|
||||
{
|
||||
"matchExpressions": [{
|
||||
"key": "diffkey",
|
||||
"operator": "In",
|
||||
"values": ["wrong", "value2"]
|
||||
}]
|
||||
}
|
||||
]
|
||||
}}}`,
|
||||
},
|
||||
},
|
||||
Spec: api.PodSpec{
|
||||
Containers: []api.Container{
|
||||
{
|
||||
Name: podName,
|
||||
Image: "gcr.io/google_containers/pause:2.0",
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
expectNoError(err)
|
||||
// Wait a bit to allow scheduler to do its thing
|
||||
// TODO: this is brittle; there's no guarantee the scheduler will have run in 10 seconds.
|
||||
Logf("Sleeping 10 seconds and crossing our fingers that scheduler will run in that time.")
|
||||
time.Sleep(10 * time.Second)
|
||||
|
||||
verifyResult(c, podName, ns)
|
||||
cleanupPods(c, ns)
|
||||
})
|
||||
|
||||
// Keep the same steps with the test on NodeSelector,
|
||||
// but specify Affinity in Pod.Annotations, instead of NodeSelector.
|
||||
It("validates that required NodeAffinity setting is respected if matching [Conformance]", func() {
|
||||
// launch a pod to find a node which can launch a pod. We intentionally do
|
||||
// not just take the node list and choose the first of them. Depending on the
|
||||
// cluster and the scheduler it might be that a "normal" pod cannot be
|
||||
// scheduled onto it.
|
||||
By("Trying to launch a pod without a label to get a node which can launch it.")
|
||||
podName := "without-label"
|
||||
_, err := c.Pods(ns).Create(&api.Pod{
|
||||
TypeMeta: unversioned.TypeMeta{
|
||||
Kind: "Pod",
|
||||
},
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Name: podName,
|
||||
},
|
||||
Spec: api.PodSpec{
|
||||
Containers: []api.Container{
|
||||
{
|
||||
Name: podName,
|
||||
Image: "gcr.io/google_containers/pause:2.0",
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
expectNoError(err)
|
||||
expectNoError(waitForPodRunningInNamespace(c, podName, ns))
|
||||
pod, err := c.Pods(ns).Get(podName)
|
||||
expectNoError(err)
|
||||
|
||||
nodeName := pod.Spec.NodeName
|
||||
err = c.Pods(ns).Delete(podName, api.NewDeleteOptions(0))
|
||||
expectNoError(err)
|
||||
|
||||
By("Trying to apply a random label on the found node.")
|
||||
k := fmt.Sprintf("kubernetes.io/e2e-%s", string(util.NewUUID()))
|
||||
v := "42"
|
||||
patch := fmt.Sprintf(`{"metadata":{"labels":{"%s":"%s"}}}`, k, v)
|
||||
err = c.Patch(api.MergePatchType).Resource("nodes").Name(nodeName).Body([]byte(patch)).Do().Error()
|
||||
expectNoError(err)
|
||||
|
||||
node, err := c.Nodes().Get(nodeName)
|
||||
expectNoError(err)
|
||||
Expect(node.Labels[k]).To(Equal(v))
|
||||
|
||||
By("Trying to relaunch the pod, now with labels.")
|
||||
labelPodName := "with-labels"
|
||||
_, err = c.Pods(ns).Create(&api.Pod{
|
||||
TypeMeta: unversioned.TypeMeta{
|
||||
Kind: "Pod",
|
||||
},
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Name: labelPodName,
|
||||
Annotations: map[string]string{
|
||||
"scheduler.alpha.kubernetes.io/affinity": `
|
||||
{"nodeAffinity": { "requiredDuringSchedulingIgnoredDuringExecution": {
|
||||
"nodeSelectorTerms": [
|
||||
{
|
||||
"matchExpressions": [{
|
||||
"key": "kubernetes.io/hostname",
|
||||
"operator": "In",
|
||||
"values": ["` + nodeName + `"]
|
||||
},{
|
||||
"key": "` + k + `",
|
||||
"operator": "In",
|
||||
"values": ["` + v + `"]
|
||||
}]
|
||||
}
|
||||
]
|
||||
}}}`,
|
||||
},
|
||||
},
|
||||
Spec: api.PodSpec{
|
||||
Containers: []api.Container{
|
||||
{
|
||||
Name: labelPodName,
|
||||
Image: "gcr.io/google_containers/pause:2.0",
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
expectNoError(err)
|
||||
defer c.Pods(ns).Delete(labelPodName, api.NewDeleteOptions(0))
|
||||
|
||||
// check that pod got scheduled. We intentionally DO NOT check that the
|
||||
// pod is running because this will create a race condition with the
|
||||
// kubelet and the scheduler: the scheduler might have scheduled a pod
|
||||
// already when the kubelet does not know about its new label yet. The
|
||||
// kubelet will then refuse to launch the pod.
|
||||
expectNoError(waitForPodNotPending(c, ns, labelPodName))
|
||||
labelPod, err := c.Pods(ns).Get(labelPodName)
|
||||
expectNoError(err)
|
||||
Expect(labelPod.Spec.NodeName).To(Equal(nodeName))
|
||||
})
|
||||
|
||||
// Verify that an escaped JSON string of NodeAffinity in a YAML PodSpec works.
|
||||
It("validates that embedding the JSON NodeAffinity setting as a string in the annotation value work [Conformance]", func() {
|
||||
// launch a pod to find a node which can launch a pod. We intentionally do
|
||||
// not just take the node list and choose the first of them. Depending on the
|
||||
// cluster and the scheduler it might be that a "normal" pod cannot be
|
||||
// scheduled onto it.
|
||||
By("Trying to launch a pod without a label to get a node which can launch it.")
|
||||
podName := "without-label"
|
||||
_, err := c.Pods(ns).Create(&api.Pod{
|
||||
TypeMeta: unversioned.TypeMeta{
|
||||
Kind: "Pod",
|
||||
},
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Name: podName,
|
||||
},
|
||||
Spec: api.PodSpec{
|
||||
Containers: []api.Container{
|
||||
{
|
||||
Name: podName,
|
||||
Image: "gcr.io/google_containers/pause:2.0",
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
expectNoError(err)
|
||||
expectNoError(waitForPodRunningInNamespace(c, podName, ns))
|
||||
pod, err := c.Pods(ns).Get(podName)
|
||||
expectNoError(err)
|
||||
|
||||
nodeName := pod.Spec.NodeName
|
||||
err = c.Pods(ns).Delete(podName, api.NewDeleteOptions(0))
|
||||
expectNoError(err)
|
||||
|
||||
By("Trying to apply a label with fake az info on the found node.")
|
||||
k := "kubernetes.io/e2e-az-name"
|
||||
v := "e2e-az1"
|
||||
patch := fmt.Sprintf(`{"metadata":{"labels":{"%s":"%s"}}}`, k, v)
|
||||
err = c.Patch(api.MergePatchType).Resource("nodes").Name(nodeName).Body([]byte(patch)).Do().Error()
|
||||
expectNoError(err)
|
||||
|
||||
node, err := c.Nodes().Get(nodeName)
|
||||
expectNoError(err)
|
||||
Expect(node.Labels[k]).To(Equal(v))
|
||||
|
||||
By("Trying to launch a pod that with NodeAffinity setting as embedded JSON string in the annotation value.")
|
||||
labelPodName := "with-labels"
|
||||
nodeSelectionRoot := filepath.Join(testContext.RepoRoot, "docs/user-guide/node-selection")
|
||||
testPodPath := filepath.Join(nodeSelectionRoot, "pod-with-node-affinity.yaml")
|
||||
runKubectlOrDie("create", "-f", testPodPath, fmt.Sprintf("--namespace=%v", ns))
|
||||
defer c.Pods(ns).Delete(labelPodName, api.NewDeleteOptions(0))
|
||||
|
||||
// check that pod got scheduled. We intentionally DO NOT check that the
|
||||
// pod is running because this will create a race condition with the
|
||||
// kubelet and the scheduler: the scheduler might have scheduled a pod
|
||||
// already when the kubelet does not know about its new label yet. The
|
||||
// kubelet will then refuse to launch the pod.
|
||||
expectNoError(waitForPodNotPending(c, ns, labelPodName))
|
||||
labelPod, err := c.Pods(ns).Get(labelPodName)
|
||||
expectNoError(err)
|
||||
Expect(labelPod.Spec.NodeName).To(Equal(nodeName))
|
||||
})
|
||||
})
|
||||
|
||||
Reference in New Issue
Block a user