mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-22 19:31:44 +00:00
Remove affinity annotations leftover
This commit is contained in:
parent
08d9893998
commit
d9d3396566
@ -27,6 +27,8 @@ import (
|
||||
)
|
||||
|
||||
// ensures the hard PodAntiAffinity is denied if it defines TopologyKey other than kubernetes.io/hostname.
|
||||
// TODO: Add test case "invalid topologyKey in requiredDuringSchedulingRequiredDuringExecution then admission fails"
|
||||
// after RequiredDuringSchedulingRequiredDuringExecution is implemented.
|
||||
func TestInterPodAffinityAdmission(t *testing.T) {
|
||||
handler := NewInterPodAntiAffinity()
|
||||
pod := api.Pod{
|
||||
@ -150,27 +152,6 @@ func TestInterPodAffinityAdmission(t *testing.T) {
|
||||
},
|
||||
errorExpected: true,
|
||||
},
|
||||
// invalid topologyKey in requiredDuringSchedulingRequiredDuringExecution then admission fails.
|
||||
// TODO: Uncomment this block when implement RequiredDuringSchedulingRequiredDuringExecution.
|
||||
// {
|
||||
// affinity: map[string]string{
|
||||
// api.AffinityAnnotationKey: `
|
||||
// {"podAntiAffinity": {
|
||||
// "requiredDuringSchedulingRequiredDuringExecution": [{
|
||||
// "labelSelector": {
|
||||
// "matchExpressions": [{
|
||||
// "key": "security",
|
||||
// "operator": "In",
|
||||
// "values":["S2"]
|
||||
// }]
|
||||
// },
|
||||
// "namespaces":[],
|
||||
// "topologyKey": " zone "
|
||||
// }]
|
||||
// }}`,
|
||||
// },
|
||||
// errorExpected: true,
|
||||
// }
|
||||
// list of requiredDuringSchedulingIgnoredDuringExecution middle element topologyKey is not valid.
|
||||
{
|
||||
affinity: &api.Affinity{
|
||||
|
@ -915,6 +915,7 @@ func TestISCSIDiskConflicts(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: Add test case for RequiredDuringSchedulingRequiredDuringExecution after it's implemented.
|
||||
func TestPodFitsSelector(t *testing.T) {
|
||||
tests := []struct {
|
||||
pod *v1.Pod
|
||||
@ -1303,42 +1304,6 @@ func TestPodFitsSelector(t *testing.T) {
|
||||
fits: true,
|
||||
test: "Pod with multiple NodeSelectorTerms ORed in affinity, matches the node's labels and will schedule onto the node",
|
||||
},
|
||||
// TODO: Uncomment this test when implement RequiredDuringSchedulingRequiredDuringExecution
|
||||
// {
|
||||
// pod: &v1.Pod{
|
||||
// ObjectMeta: metav1.ObjectMeta{
|
||||
// Annotations: map[string]string{
|
||||
// v1.AffinityAnnotationKey: `
|
||||
// {"nodeAffinity": {
|
||||
// "requiredDuringSchedulingRequiredDuringExecution": {
|
||||
// "nodeSelectorTerms": [{
|
||||
// "matchExpressions": [{
|
||||
// "key": "foo",
|
||||
// "operator": "In",
|
||||
// "values": ["bar", "value2"]
|
||||
// }]
|
||||
// }]
|
||||
// },
|
||||
// "requiredDuringSchedulingIgnoredDuringExecution": {
|
||||
// "nodeSelectorTerms": [{
|
||||
// "matchExpressions": [{
|
||||
// "key": "foo",
|
||||
// "operator": "NotIn",
|
||||
// "values": ["bar", "value2"]
|
||||
// }]
|
||||
// }]
|
||||
// }
|
||||
// }}`,
|
||||
// },
|
||||
// },
|
||||
// },
|
||||
// labels: map[string]string{
|
||||
// "foo": "bar",
|
||||
// },
|
||||
// fits: false,
|
||||
// test: "Pod with an Affinity both requiredDuringSchedulingRequiredDuringExecution and " +
|
||||
// "requiredDuringSchedulingIgnoredDuringExecution indicated that don't match node's labels and won't schedule onto the node",
|
||||
// },
|
||||
{
|
||||
pod: &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
@ -2058,6 +2023,7 @@ func TestRunGeneralPredicates(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: Add test case for RequiredDuringSchedulingRequiredDuringExecution after it's implemented.
|
||||
func TestInterPodAffinity(t *testing.T) {
|
||||
podLabel := map[string]string{"service": "securityscan"}
|
||||
labels1 := map[string]string{
|
||||
@ -2345,50 +2311,6 @@ func TestInterPodAffinity(t *testing.T) {
|
||||
fits: true,
|
||||
test: "satisfies the PodAffinity and PodAntiAffinity with the existing pod",
|
||||
},
|
||||
// TODO: Uncomment this block when implement RequiredDuringSchedulingRequiredDuringExecution.
|
||||
//{
|
||||
// pod: &v1.Pod{
|
||||
// ObjectMeta: metav1.ObjectMeta{
|
||||
// Labels: podLabel2,
|
||||
// Annotations: map[string]string{
|
||||
// v1.AffinityAnnotationKey: `
|
||||
// {"podAffinity": {
|
||||
// "requiredDuringSchedulingRequiredDuringExecution": [
|
||||
// {
|
||||
// "labelSelector": {
|
||||
// "matchExpressions": [{
|
||||
// "key": "service",
|
||||
// "operator": "Exists"
|
||||
// }, {
|
||||
// "key": "wrongkey",
|
||||
// "operator": "DoesNotExist"
|
||||
// }]
|
||||
// },
|
||||
// "topologyKey": "region"
|
||||
// }, {
|
||||
// "labelSelector": {
|
||||
// "matchExpressions": [{
|
||||
// "key": "service",
|
||||
// "operator": "In",
|
||||
// "values": ["securityscan"]
|
||||
// }, {
|
||||
// "key": "service",
|
||||
// "operator": "NotIn",
|
||||
// "values": ["WrongValue"]
|
||||
// }]
|
||||
// },
|
||||
// "topologyKey": "region"
|
||||
// }
|
||||
// ]
|
||||
// }}`,
|
||||
// },
|
||||
// },
|
||||
// },
|
||||
// pods: []*v1.Pod{{Spec: v1.PodSpec{NodeName: "machine1"}, ObjectMeta: metav1.ObjectMeta{Labels: podlabel}}},
|
||||
// node: &node1,
|
||||
// fits: true,
|
||||
// test: "satisfies the PodAffinity with different Label Operators in multiple RequiredDuringSchedulingRequiredDuringExecution ",
|
||||
//},
|
||||
{
|
||||
pod: &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
|
@ -337,7 +337,7 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() {
|
||||
})
|
||||
|
||||
// Keep the same steps with the test on NodeSelector,
|
||||
// but specify Affinity in Pod.Annotations, instead of NodeSelector.
|
||||
// but specify Affinity in Pod.Spec.Affinity, instead of NodeSelector.
|
||||
It("validates that required NodeAffinity setting is respected if matching", func() {
|
||||
nodeName := GetNodeThatCanRunPod(f)
|
||||
|
||||
@ -387,7 +387,7 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() {
|
||||
})
|
||||
|
||||
// labelSelector Operator is DoesNotExist but values are there in requiredDuringSchedulingIgnoredDuringExecution
|
||||
// part of podAffinity,so validation fails.
|
||||
// part of podAffinity, so validation fails.
|
||||
It("validates that a pod with an invalid podAffinity is rejected because of the LabelSelectorRequirement is invalid", func() {
|
||||
By("Trying to launch a pod with an invalid pod Affinity data.")
|
||||
podName := "without-label-" + string(uuid.NewUUID())
|
||||
@ -639,30 +639,6 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() {
|
||||
Expect(labelPod.Spec.NodeName).To(Equal(nodeName))
|
||||
})
|
||||
|
||||
// Verify that an escaped JSON string of pod affinity and pod anti affinity in a YAML PodSpec works.
|
||||
It("validates that embedding the JSON PodAffinity and PodAntiAffinity setting as a string in the annotation value work", func() {
|
||||
nodeName, _ := runAndKeepPodWithLabelAndGetNodeName(f)
|
||||
|
||||
By("Trying to apply a label with fake az info on the found node.")
|
||||
k := "e2e.inter-pod-affinity.kubernetes.io/zone"
|
||||
v := "e2e-az1"
|
||||
framework.AddOrUpdateLabelOnNode(cs, nodeName, k, v)
|
||||
framework.ExpectNodeHasLabel(cs, nodeName, k, v)
|
||||
defer framework.RemoveLabelOffNode(cs, nodeName, k)
|
||||
|
||||
By("Trying to launch a pod that with PodAffinity & PodAntiAffinity setting as embedded JSON string in the annotation value.")
|
||||
pod := createPodWithPodAffinity(f, "kubernetes.io/hostname")
|
||||
// check that pod got scheduled. We intentionally DO NOT check that the
|
||||
// pod is running because this will create a race condition with the
|
||||
// kubelet and the scheduler: the scheduler might have scheduled a pod
|
||||
// already when the kubelet does not know about its new label yet. The
|
||||
// kubelet will then refuse to launch the pod.
|
||||
framework.ExpectNoError(framework.WaitForPodNotPending(cs, ns, pod.Name))
|
||||
labelPod, err := cs.Core().Pods(ns).Get(pod.Name, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
Expect(labelPod.Spec.NodeName).To(Equal(nodeName))
|
||||
})
|
||||
|
||||
// 1. Run a pod to get an available node, then delete the pod
|
||||
// 2. Taint the node with a random taint
|
||||
// 3. Try to relaunch the pod with tolerations tolerate the taints on node,
|
||||
|
Loading…
Reference in New Issue
Block a user