diff --git a/pkg/api/v1/helpers.go b/pkg/api/v1/helpers.go index 3b1190e8a13..4608399043a 100644 --- a/pkg/api/v1/helpers.go +++ b/pkg/api/v1/helpers.go @@ -277,6 +277,11 @@ const ( // an object (e.g. secret, config map) before fetching it again from apiserver. // This annotation can be attached to node. ObjectTTLAnnotationKey string = "node.alpha.kubernetes.io/ttl" + + // AffinityAnnotationKey represents the key of affinity data (json serialized) + // in the Annotations of a Pod. + // TODO: remove when alpha support for affinity is removed + AffinityAnnotationKey string = "scheduler.alpha.kubernetes.io/affinity" ) // GetTolerationsFromPodAnnotations gets the json serialized tolerations data from Pod.Annotations @@ -646,3 +651,18 @@ func RemoveTaint(node *Node, taint *Taint) (*Node, bool, error) { } return newNode, true, nil } + +// GetAffinityFromPodAnnotations gets the json serialized affinity data from Pod.Annotations +// and converts it to the Affinity type in api. +// TODO: remove when alpha support for affinity is removed +func GetAffinityFromPodAnnotations(annotations map[string]string) (*Affinity, error) { + if len(annotations) > 0 && annotations[AffinityAnnotationKey] != "" { + var affinity Affinity + err := json.Unmarshal([]byte(annotations[AffinityAnnotationKey]), &affinity) + if err != nil { + return nil, err + } + return &affinity, nil + } + return nil, nil +} diff --git a/pkg/api/v1/helpers_test.go b/pkg/api/v1/helpers_test.go index fd51eb18184..c1496dbe36f 100644 --- a/pkg/api/v1/helpers_test.go +++ b/pkg/api/v1/helpers_test.go @@ -644,3 +644,60 @@ func TestSysctlsFromPodAnnotation(t *testing.T) { } } } + +// TODO: remove when alpha support for affinity is removed +func TestGetAffinityFromPodAnnotations(t *testing.T) { + testCases := []struct { + pod *Pod + expectErr bool + }{ + { + pod: &Pod{}, + expectErr: false, + }, + { + pod: &Pod{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + AffinityAnnotationKey: ` + {"nodeAffinity": { "requiredDuringSchedulingIgnoredDuringExecution": { + "nodeSelectorTerms": [{ + "matchExpressions": [{ + "key": "foo", + "operator": "In", + "values": ["value1", "value2"] + }] + }] + }}}`, + }, + }, + }, + expectErr: false, + }, + { + pod: &Pod{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + AffinityAnnotationKey: ` + {"nodeAffinity": { "requiredDuringSchedulingIgnoredDuringExecution": { + "nodeSelectorTerms": [{ + "matchExpressions": [{ + "key": "foo", + `, + }, + }, + }, + expectErr: true, + }, + } + + for i, tc := range testCases { + _, err := GetAffinityFromPodAnnotations(tc.pod.Annotations) + if err == nil && tc.expectErr { + t.Errorf("[%v]expected error but got none.", i) + } + if err != nil && !tc.expectErr { + t.Errorf("[%v]did not expect error but got: %v", i, err) + } + } +} diff --git a/pkg/features/kube_features.go b/pkg/features/kube_features.go index 7d106f0c98e..4bf9fedea5e 100644 --- a/pkg/features/kube_features.go +++ b/pkg/features/kube_features.go @@ -67,7 +67,11 @@ const ( // Note: This feature is not supported for `BestEffort` pods. ExperimentalCriticalPodAnnotation utilfeature.Feature = "ExperimentalCriticalPodAnnotation" - // Determines if affinity defined in annotations should bep rocessed + // owner: @davidopp + // alpha: v1.6 + // + // Determines if affinity defined in annotations should be processed + // TODO: remove when alpha support for affinity is removed AffinityInAnnotations utilfeature.Feature = "AffinityInAnnotations" ) @@ -85,6 +89,7 @@ var defaultKubernetesFeatureGates = map[utilfeature.Feature]utilfeature.FeatureS DynamicVolumeProvisioning: {Default: true, PreRelease: utilfeature.Alpha}, ExperimentalHostUserNamespaceDefaultingGate: {Default: false, PreRelease: utilfeature.Beta}, ExperimentalCriticalPodAnnotation: {Default: false, PreRelease: utilfeature.Alpha}, + AffinityInAnnotations: {Default: false, PreRelease: utilfeature.Alpha}, // inherited features from generic apiserver, relisted here to get a conflict if it is changed // unintentionally on either side: diff --git a/plugin/pkg/scheduler/algorithm/predicates/BUILD b/plugin/pkg/scheduler/algorithm/predicates/BUILD index 44cc969fd00..b77a17843af 100644 --- a/plugin/pkg/scheduler/algorithm/predicates/BUILD +++ b/plugin/pkg/scheduler/algorithm/predicates/BUILD @@ -47,6 +47,7 @@ go_test( "//vendor:k8s.io/apimachinery/pkg/api/resource", "//vendor:k8s.io/apimachinery/pkg/apis/meta/v1", "//vendor:k8s.io/apimachinery/pkg/labels", + "//vendor:k8s.io/apiserver/pkg/util/feature", ], ) diff --git a/plugin/pkg/scheduler/algorithm/predicates/predicates.go b/plugin/pkg/scheduler/algorithm/predicates/predicates.go index ec96ab4be95..cc6b2dd5f32 100644 --- a/plugin/pkg/scheduler/algorithm/predicates/predicates.go +++ b/plugin/pkg/scheduler/algorithm/predicates/predicates.go @@ -585,7 +585,7 @@ func podMatchesNodeLabels(pod *v1.Pod, node *v1.Node) bool { // 5. zero-length non-nil []NodeSelectorRequirement matches no nodes also, just for simplicity // 6. non-nil empty NodeSelectorRequirement is not allowed nodeAffinityMatches := true - affinity := pod.Spec.Affinity + affinity := schedulercache.ReconcileAffinity(pod) if affinity != nil && affinity.NodeAffinity != nil { nodeAffinity := affinity.NodeAffinity // if no required NodeAffinity requirements, will do no-op, means select all nodes. @@ -897,7 +897,7 @@ func (c *PodAffinityChecker) InterPodAffinityMatches(pod *v1.Pod, meta interface } // Now check if requirements will be satisfied on this node. - affinity := pod.Spec.Affinity + affinity := schedulercache.ReconcileAffinity(pod) if affinity == nil || (affinity.PodAffinity == nil && affinity.PodAntiAffinity == nil) { return true, nil, nil } @@ -1001,7 +1001,7 @@ func getMatchingAntiAffinityTerms(pod *v1.Pod, nodeInfoMap map[string]*scheduler } var nodeResult []matchingPodAntiAffinityTerm for _, existingPod := range nodeInfo.PodsWithAffinity() { - affinity := existingPod.Spec.Affinity + affinity := schedulercache.ReconcileAffinity(existingPod) if affinity == nil { continue } @@ -1029,7 +1029,7 @@ func getMatchingAntiAffinityTerms(pod *v1.Pod, nodeInfoMap map[string]*scheduler func (c *PodAffinityChecker) getMatchingAntiAffinityTerms(pod *v1.Pod, allPods []*v1.Pod) ([]matchingPodAntiAffinityTerm, error) { var result []matchingPodAntiAffinityTerm for _, existingPod := range allPods { - affinity := existingPod.Spec.Affinity + affinity := schedulercache.ReconcileAffinity(existingPod) if affinity != nil && affinity.PodAntiAffinity != nil { existingPodNode, err := c.info.GetNodeInfo(existingPod.Spec.NodeName) if err != nil { diff --git a/plugin/pkg/scheduler/algorithm/predicates/predicates_test.go b/plugin/pkg/scheduler/algorithm/predicates/predicates_test.go index f5dec1e4545..f6c2b5250f6 100644 --- a/plugin/pkg/scheduler/algorithm/predicates/predicates_test.go +++ b/plugin/pkg/scheduler/algorithm/predicates/predicates_test.go @@ -23,6 +23,7 @@ import ( "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + utilfeature "k8s.io/apiserver/pkg/util/feature" "k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm" "k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache" @@ -3579,3 +3580,1242 @@ func TestVolumeZonePredicate(t *testing.T) { } } + +// TODO: remove when alpha support for affinity is removed +func TestPodAnnotationFitsSelector(t *testing.T) { + utilfeature.DefaultFeatureGate.Set("AffinityInAnnotations=true") + tests := []struct { + pod *v1.Pod + labels map[string]string + fits bool + test string + }{ + { + pod: &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + v1.AffinityAnnotationKey: ` + {"nodeAffinity": { "requiredDuringSchedulingIgnoredDuringExecution": { + "nodeSelectorTerms": [{ + "matchExpressions": [{ + "key": "foo", + "operator": "In", + "values": ["bar", "value2"] + }] + }] + }}}`, + }, + }, + }, + labels: map[string]string{ + "foo": "bar", + }, + fits: true, + test: "Pod with matchExpressions using In operator that matches the existing node", + }, + { + pod: &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + v1.AffinityAnnotationKey: ` + {"nodeAffinity": { "requiredDuringSchedulingIgnoredDuringExecution": { + "nodeSelectorTerms": [{ + "matchExpressions": [{ + "key": "kernel-version", + "operator": "Gt", + "values": ["0204"] + }] + }] + }}}`, + }, + }, + }, + labels: map[string]string{ + // We use two digit to denote major version and two digit for minor version. + "kernel-version": "0206", + }, + fits: true, + test: "Pod with matchExpressions using Gt operator that matches the existing node", + }, + { + pod: &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + v1.AffinityAnnotationKey: ` + {"nodeAffinity": { "requiredDuringSchedulingIgnoredDuringExecution": { + "nodeSelectorTerms": [{ + "matchExpressions": [{ + "key": "mem-type", + "operator": "NotIn", + "values": ["DDR", "DDR2"] + }] + }] + }}}`, + }, + }, + }, + labels: map[string]string{ + "mem-type": "DDR3", + }, + fits: true, + test: "Pod with matchExpressions using NotIn operator that matches the existing node", + }, + { + pod: &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + v1.AffinityAnnotationKey: ` + {"nodeAffinity": { "requiredDuringSchedulingIgnoredDuringExecution": { + "nodeSelectorTerms": [{ + "matchExpressions": [{ + "key": "GPU", + "operator": "Exists" + }] + }] + }}}`, + }, + }, + }, + labels: map[string]string{ + "GPU": "NVIDIA-GRID-K1", + }, + fits: true, + test: "Pod with matchExpressions using Exists operator that matches the existing node", + }, + { + pod: &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + v1.AffinityAnnotationKey: ` + {"nodeAffinity": { "requiredDuringSchedulingIgnoredDuringExecution": { + "nodeSelectorTerms": [{ + "matchExpressions": [{ + "key": "foo", + "operator": "In", + "values": ["value1", "value2"] + }] + }] + }}}`, + }, + }, + }, + labels: map[string]string{ + "foo": "bar", + }, + fits: false, + test: "Pod with affinity that don't match node's labels won't schedule onto the node", + }, + { + pod: &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + v1.AffinityAnnotationKey: ` + {"nodeAffinity": { "requiredDuringSchedulingIgnoredDuringExecution": { + "nodeSelectorTerms": null + }}}`, + }, + }, + }, + labels: map[string]string{ + "foo": "bar", + }, + fits: false, + test: "Pod with a nil []NodeSelectorTerm in affinity, can't match the node's labels and won't schedule onto the node", + }, + { + pod: &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + v1.AffinityAnnotationKey: ` + {"nodeAffinity": { "requiredDuringSchedulingIgnoredDuringExecution": { + "nodeSelectorTerms": [] + }}}`, + }, + }, + }, + labels: map[string]string{ + "foo": "bar", + }, + fits: false, + test: "Pod with an empty []NodeSelectorTerm in affinity, can't match the node's labels and won't schedule onto the node", + }, + { + pod: &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + v1.AffinityAnnotationKey: ` + {"nodeAffinity": { "requiredDuringSchedulingIgnoredDuringExecution": { + "nodeSelectorTerms": [{}, {}] + }}}`, + }, + }, + }, + labels: map[string]string{ + "foo": "bar", + }, + fits: false, + test: "Pod with invalid NodeSelectTerms in affinity will match no objects and won't schedule onto the node", + }, + { + pod: &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + v1.AffinityAnnotationKey: ` + {"nodeAffinity": { "requiredDuringSchedulingIgnoredDuringExecution": { + "nodeSelectorTerms": [{"matchExpressions": [{}]}] + }}}`, + }, + }, + }, + labels: map[string]string{ + "foo": "bar", + }, + fits: false, + test: "Pod with empty MatchExpressions is not a valid value will match no objects and won't schedule onto the node", + }, + { + pod: &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + "some-key": "some-value", + }, + }, + }, + labels: map[string]string{ + "foo": "bar", + }, + fits: true, + test: "Pod with no Affinity will schedule onto a node", + }, + { + pod: &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + v1.AffinityAnnotationKey: ` + {"nodeAffinity": { "requiredDuringSchedulingIgnoredDuringExecution": null + }}`, + }, + }, + }, + labels: map[string]string{ + "foo": "bar", + }, + fits: true, + test: "Pod with Affinity but nil NodeSelector will schedule onto a node", + }, + { + pod: &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + v1.AffinityAnnotationKey: ` + {"nodeAffinity": { "requiredDuringSchedulingIgnoredDuringExecution": { + "nodeSelectorTerms": [{ + "matchExpressions": [{ + "key": "GPU", + "operator": "Exists" + }, { + "key": "GPU", + "operator": "NotIn", + "values": ["AMD", "INTER"] + }] + }] + }}}`, + }, + }, + }, + labels: map[string]string{ + "GPU": "NVIDIA-GRID-K1", + }, + fits: true, + test: "Pod with multiple matchExpressions ANDed that matches the existing node", + }, + { + pod: &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + v1.AffinityAnnotationKey: ` + {"nodeAffinity": { "requiredDuringSchedulingIgnoredDuringExecution": { + "nodeSelectorTerms": [{ + "matchExpressions": [{ + "key": "GPU", + "operator": "Exists" + }, { + "key": "GPU", + "operator": "In", + "values": ["AMD", "INTER"] + }] + }] + }}}`, + }, + }, + }, + labels: map[string]string{ + "GPU": "NVIDIA-GRID-K1", + }, + fits: false, + test: "Pod with multiple matchExpressions ANDed that doesn't match the existing node", + }, + { + pod: &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + v1.AffinityAnnotationKey: ` + {"nodeAffinity": { "requiredDuringSchedulingIgnoredDuringExecution": { + "nodeSelectorTerms": [ + { + "matchExpressions": [{ + "key": "foo", + "operator": "In", + "values": ["bar", "value2"] + }] + }, + { + "matchExpressions": [{ + "key": "diffkey", + "operator": "In", + "values": ["wrong", "value2"] + }] + } + ] + }}}`, + }, + }, + }, + labels: map[string]string{ + "foo": "bar", + }, + fits: true, + test: "Pod with multiple NodeSelectorTerms ORed in affinity, matches the node's labels and will schedule onto the node", + }, + // TODO: Uncomment this test when implement RequiredDuringSchedulingRequiredDuringExecution + // { + // pod: &v1.Pod{ + // ObjectMeta: metav1.ObjectMeta{ + // Annotations: map[string]string{ + // v1.AffinityAnnotationKey: ` + // {"nodeAffinity": { + // "requiredDuringSchedulingRequiredDuringExecution": { + // "nodeSelectorTerms": [{ + // "matchExpressions": [{ + // "key": "foo", + // "operator": "In", + // "values": ["bar", "value2"] + // }] + // }] + // }, + // "requiredDuringSchedulingIgnoredDuringExecution": { + // "nodeSelectorTerms": [{ + // "matchExpressions": [{ + // "key": "foo", + // "operator": "NotIn", + // "values": ["bar", "value2"] + // }] + // }] + // } + // }}`, + // }, + // }, + // }, + // labels: map[string]string{ + // "foo": "bar", + // }, + // fits: false, + // test: "Pod with an Affinity both requiredDuringSchedulingRequiredDuringExecution and " + + // "requiredDuringSchedulingIgnoredDuringExecution indicated that don't match node's labels and won't schedule onto the node", + // }, + { + pod: &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + v1.AffinityAnnotationKey: ` + {"nodeAffinity": { "requiredDuringSchedulingIgnoredDuringExecution": { + "nodeSelectorTerms": [{ + "matchExpressions": [{ + "key": "foo", + "operator": "Exists" + }] + }] + }}}`, + }, + }, + Spec: v1.PodSpec{ + NodeSelector: map[string]string{ + "foo": "bar", + }, + }, + }, + labels: map[string]string{ + "foo": "bar", + }, + fits: true, + test: "Pod with an Affinity and a PodSpec.NodeSelector(the old thing that we are deprecating) " + + "both are satisfied, will schedule onto the node", + }, + { + pod: &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + v1.AffinityAnnotationKey: ` + {"nodeAffinity": { "requiredDuringSchedulingIgnoredDuringExecution": { + "nodeSelectorTerms": [{ + "matchExpressions": [{ + "key": "foo", + "operator": "Exists" + }] + }] + }}}`, + }, + }, + Spec: v1.PodSpec{ + NodeSelector: map[string]string{ + "foo": "bar", + }, + }, + }, + labels: map[string]string{ + "foo": "barrrrrr", + }, + fits: false, + test: "Pod with an Affinity matches node's labels but the PodSpec.NodeSelector(the old thing that we are deprecating) " + + "is not satisfied, won't schedule onto the node", + }, + } + expectedFailureReasons := []algorithm.PredicateFailureReason{ErrNodeSelectorNotMatch} + + for _, test := range tests { + node := v1.Node{ObjectMeta: metav1.ObjectMeta{Labels: test.labels}} + nodeInfo := schedulercache.NewNodeInfo() + nodeInfo.SetNode(&node) + + fits, reasons, err := PodSelectorMatches(test.pod, PredicateMetadata(test.pod, nil), nodeInfo) + if err != nil { + t.Errorf("%s: unexpected error: %v", test.test, err) + } + if !fits && !reflect.DeepEqual(reasons, expectedFailureReasons) { + t.Errorf("%s: unexpected failure reasons: %v, want: %v", test.test, reasons, expectedFailureReasons) + } + if fits != test.fits { + t.Errorf("%s: expected: %v got %v", test.test, test.fits, fits) + } + } +} + +// TODO: remove when alpha support for affinity is removed +func TestInterPodAffinityAnnotations(t *testing.T) { + utilfeature.DefaultFeatureGate.Set("AffinityInAnnotations=true") + podLabel := map[string]string{"service": "securityscan"} + labels1 := map[string]string{ + "region": "r1", + "zone": "z11", + } + podLabel2 := map[string]string{"security": "S1"} + node1 := v1.Node{ObjectMeta: metav1.ObjectMeta{Name: "machine1", Labels: labels1}} + tests := []struct { + pod *v1.Pod + pods []*v1.Pod + node *v1.Node + fits bool + test string + }{ + { + pod: new(v1.Pod), + node: &node1, + fits: true, + test: "A pod that has no required pod affinity scheduling rules can schedule onto a node with no existing pods", + }, + { + pod: &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Labels: podLabel2, + Annotations: map[string]string{ + v1.AffinityAnnotationKey: ` + {"podAffinity": { + "requiredDuringSchedulingIgnoredDuringExecution": [{ + "labelSelector": { + "matchExpressions": [{ + "key": "service", + "operator": "In", + "values": ["securityscan", "value2"] + }] + }, + "topologyKey": "region" + }] + }}`, + }, + }, + }, + pods: []*v1.Pod{{Spec: v1.PodSpec{NodeName: "machine1"}, ObjectMeta: metav1.ObjectMeta{Labels: podLabel}}}, + node: &node1, + fits: true, + test: "satisfies with requiredDuringSchedulingIgnoredDuringExecution in PodAffinity using In operator that matches the existing pod", + }, + { + pod: &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Labels: podLabel2, + Annotations: map[string]string{ + v1.AffinityAnnotationKey: `{"podAffinity": { + "requiredDuringSchedulingIgnoredDuringExecution": [{ + "labelSelector": { + "matchExpressions": [{ + "key": "service", + "operator": "NotIn", + "values": ["securityscan3", "value3"] + }] + }, + "topologyKey": "region" + }] + }}`, + }, + }, + }, + pods: []*v1.Pod{{Spec: v1.PodSpec{NodeName: "machine1"}, ObjectMeta: metav1.ObjectMeta{Labels: podLabel}}}, + node: &node1, + fits: true, + test: "satisfies the pod with requiredDuringSchedulingIgnoredDuringExecution in PodAffinity using not in operator in labelSelector that matches the existing pod", + }, + { + pod: &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Labels: podLabel2, + Annotations: map[string]string{ + v1.AffinityAnnotationKey: ` + {"podAffinity": { + "requiredDuringSchedulingIgnoredDuringExecution": [{ + "labelSelector": { + "matchExpressions": [{ + "key": "service", + "operator": "In", + "values": ["securityscan", "value2"] + }] + }, + "namespaces":["DiffNameSpace"] + }] + }}`, + }, + }, + }, + pods: []*v1.Pod{{Spec: v1.PodSpec{NodeName: "machine1"}, ObjectMeta: metav1.ObjectMeta{Labels: podLabel, Namespace: "ns"}}}, + node: &node1, + fits: false, + test: "Does not satisfy the PodAffinity with labelSelector because of diff Namespace", + }, + { + pod: &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Labels: podLabel, + Annotations: map[string]string{ + v1.AffinityAnnotationKey: ` + {"podAffinity": { + "requiredDuringSchedulingIgnoredDuringExecution": [{ + "labelSelector": { + "matchExpressions": [{ + "key": "service", + "operator": "In", + "values": ["antivirusscan", "value2"] + }] + } + }] + }}`, + }, + }, + }, + pods: []*v1.Pod{{Spec: v1.PodSpec{NodeName: "machine1"}, ObjectMeta: metav1.ObjectMeta{Labels: podLabel}}}, + node: &node1, + fits: false, + test: "Doesn't satisfy the PodAffinity because of unmatching labelSelector with the existing pod", + }, + { + pod: &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Labels: podLabel2, + Annotations: map[string]string{ + v1.AffinityAnnotationKey: ` + {"podAffinity": { + "requiredDuringSchedulingIgnoredDuringExecution": [ + { + "labelSelector": { + "matchExpressions": [{ + "key": "service", + "operator": "Exists" + }, { + "key": "wrongkey", + "operator": "DoesNotExist" + }] + }, + "topologyKey": "region" + }, { + "labelSelector": { + "matchExpressions": [{ + "key": "service", + "operator": "In", + "values": ["securityscan"] + }, { + "key": "service", + "operator": "NotIn", + "values": ["WrongValue"] + }] + }, + "topologyKey": "region" + } + ] + }}`, + }, + }, + }, + pods: []*v1.Pod{{Spec: v1.PodSpec{NodeName: "machine1"}, ObjectMeta: metav1.ObjectMeta{Labels: podLabel}}}, + node: &node1, + fits: true, + test: "satisfies the PodAffinity with different label Operators in multiple RequiredDuringSchedulingIgnoredDuringExecution ", + }, + { + pod: &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Labels: podLabel2, + Annotations: map[string]string{ + v1.AffinityAnnotationKey: ` + {"podAffinity": { + "requiredDuringSchedulingIgnoredDuringExecution": [ + { + "labelSelector": { + "matchExpressions": [{ + "key": "service", + "operator": "Exists" + }, { + "key": "wrongkey", + "operator": "DoesNotExist" + }] + }, + "topologyKey": "region" + }, { + "labelSelector": { + "matchExpressions": [{ + "key": "service", + "operator": "In", + "values": ["securityscan2"] + }, { + "key": "service", + "operator": "NotIn", + "values": ["WrongValue"] + }] + }, + "topologyKey": "region" + } + ] + }}`, + }, + }, + }, + pods: []*v1.Pod{{Spec: v1.PodSpec{NodeName: "machine1"}, ObjectMeta: metav1.ObjectMeta{Labels: podLabel}}}, + node: &node1, + fits: false, + test: "The labelSelector requirements(items of matchExpressions) are ANDed, the pod cannot schedule onto the node because one of the matchExpression item don't match.", + }, + { + pod: &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Labels: podLabel2, + Annotations: map[string]string{ + v1.AffinityAnnotationKey: ` + {"podAffinity": { + "requiredDuringSchedulingIgnoredDuringExecution": [{ + "labelSelector": { + "matchExpressions": [{ + "key": "service", + "operator": "In", + "values": ["securityscan", "value2"] + }] + }, + "topologyKey": "region" + }] + }, + "podAntiAffinity": { + "requiredDuringSchedulingIgnoredDuringExecution": [{ + "labelSelector": { + "matchExpressions": [{ + "key": "service", + "operator": "In", + "values": ["antivirusscan", "value2"] + }] + }, + "topologyKey": "node" + }] + }}`, + }, + }, + }, + pods: []*v1.Pod{{Spec: v1.PodSpec{NodeName: "machine1"}, ObjectMeta: metav1.ObjectMeta{Labels: podLabel}}}, + node: &node1, + fits: true, + test: "satisfies the PodAffinity and PodAntiAffinity with the existing pod", + }, + // TODO: Uncomment this block when implement RequiredDuringSchedulingRequiredDuringExecution. + //{ + // pod: &v1.Pod{ + // ObjectMeta: metav1.ObjectMeta{ + // Labels: podLabel2, + // Annotations: map[string]string{ + // v1.AffinityAnnotationKey: ` + // {"podAffinity": { + // "requiredDuringSchedulingRequiredDuringExecution": [ + // { + // "labelSelector": { + // "matchExpressions": [{ + // "key": "service", + // "operator": "Exists" + // }, { + // "key": "wrongkey", + // "operator": "DoesNotExist" + // }] + // }, + // "topologyKey": "region" + // }, { + // "labelSelector": { + // "matchExpressions": [{ + // "key": "service", + // "operator": "In", + // "values": ["securityscan"] + // }, { + // "key": "service", + // "operator": "NotIn", + // "values": ["WrongValue"] + // }] + // }, + // "topologyKey": "region" + // } + // ] + // }}`, + // }, + // }, + // }, + // pods: []*v1.Pod{{Spec: v1.PodSpec{NodeName: "machine1"}, ObjectMeta: metav1.ObjectMeta{Labels: podlabel}}}, + // node: &node1, + // fits: true, + // test: "satisfies the PodAffinity with different Label Operators in multiple RequiredDuringSchedulingRequiredDuringExecution ", + //}, + { + pod: &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Labels: podLabel2, + Annotations: map[string]string{ + v1.AffinityAnnotationKey: ` + {"podAffinity": { + "requiredDuringSchedulingIgnoredDuringExecution": [{ + "labelSelector": { + "matchExpressions": [{ + "key": "service", + "operator": "In", + "values": ["securityscan", "value2"] + }] + }, + "topologyKey": "region" + }] + }, + "podAntiAffinity": { + "requiredDuringSchedulingIgnoredDuringExecution": [{ + "labelSelector": { + "matchExpressions": [{ + "key": "service", + "operator": "In", + "values": ["antivirusscan", "value2"] + }] + }, + "topologyKey": "node" + }] + }}`, + }, + }, + }, + pods: []*v1.Pod{{Spec: v1.PodSpec{NodeName: "machine1"}, + ObjectMeta: metav1.ObjectMeta{Labels: podLabel, + Annotations: map[string]string{ + v1.AffinityAnnotationKey: ` + {"PodAntiAffinity": { + "requiredDuringSchedulingIgnoredDuringExecution": [{ + "labelSelector": { + "matchExpressions": [{ + "key": "service", + "operator": "In", + "values": ["antivirusscan", "value2"] + }] + }, + "topologyKey": "node" + }] + }}`, + }}, + }}, + node: &node1, + fits: true, + test: "satisfies the PodAffinity and PodAntiAffinity and PodAntiAffinity symmetry with the existing pod", + }, + { + pod: &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Labels: podLabel2, + Annotations: map[string]string{ + v1.AffinityAnnotationKey: ` + {"podAffinity": { + "requiredDuringSchedulingIgnoredDuringExecution": [{ + "labelSelector": { + "matchExpressions": [{ + "key": "service", + "operator": "In", + "values": ["securityscan", "value2"] + }] + }, + "topologyKey": "region" + }] + }, + "podAntiAffinity": { + "requiredDuringSchedulingIgnoredDuringExecution": [{ + "labelSelector": { + "matchExpressions": [{ + "key": "service", + "operator": "In", + "values": ["securityscan", "value2"] + }] + }, + "topologyKey": "zone" + }] + }}`, + }, + }, + }, + pods: []*v1.Pod{{Spec: v1.PodSpec{NodeName: "machine1"}, ObjectMeta: metav1.ObjectMeta{Labels: podLabel}}}, + node: &node1, + fits: false, + test: "satisfies the PodAffinity but doesn't satisfies the PodAntiAffinity with the existing pod", + }, + { + pod: &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Labels: podLabel, + Annotations: map[string]string{ + v1.AffinityAnnotationKey: ` + {"podAffinity": { + "requiredDuringSchedulingIgnoredDuringExecution": [{ + "labelSelector": { + "matchExpressions": [{ + "key": "service", + "operator": "In", + "values": ["securityscan", "value2"] + }] + }, + "topologyKey": "region" + }] + }, + "podAntiAffinity": { + "requiredDuringSchedulingIgnoredDuringExecution": [{ + "labelSelector": { + "matchExpressions": [{ + "key": "service", + "operator": "In", + "values": ["antivirusscan", "value2"] + }] + }, + "topologyKey": "node" + }] + }}`, + }, + }, + }, + pods: []*v1.Pod{{Spec: v1.PodSpec{NodeName: "machine1"}, + ObjectMeta: metav1.ObjectMeta{Labels: podLabel, + Annotations: map[string]string{ + v1.AffinityAnnotationKey: ` + {"PodAntiAffinity": { + "requiredDuringSchedulingIgnoredDuringExecution": [{ + "labelSelector": { + "matchExpressions": [{ + "key": "service", + "operator": "In", + "values": ["securityscan", "value2"] + }] + }, + "topologyKey": "zone" + }] + }}`, + }}, + }}, + node: &node1, + fits: false, + test: "satisfies the PodAffinity and PodAntiAffinity but doesn't satisfies PodAntiAffinity symmetry with the existing pod", + }, + { + pod: &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Labels: podLabel, + Annotations: map[string]string{ + v1.AffinityAnnotationKey: ` + {"podAffinity": { + "requiredDuringSchedulingIgnoredDuringExecution": [{ + "labelSelector": { + "matchExpressions": [{ + "key": "service", + "operator": "NotIn", + "values": ["securityscan", "value2"] + }] + }, + "topologyKey": "region" + }] + }}`, + }, + }, + }, + pods: []*v1.Pod{{Spec: v1.PodSpec{NodeName: "machine2"}, ObjectMeta: metav1.ObjectMeta{Labels: podLabel}}}, + node: &node1, + fits: false, + test: "pod matches its own Label in PodAffinity and that matches the existing pod Labels", + }, + { + pod: &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Labels: podLabel, + }, + }, + pods: []*v1.Pod{{Spec: v1.PodSpec{NodeName: "machine1"}, + ObjectMeta: metav1.ObjectMeta{Labels: podLabel, + Annotations: map[string]string{ + v1.AffinityAnnotationKey: ` + {"PodAntiAffinity": { + "requiredDuringSchedulingIgnoredDuringExecution": [{ + "labelSelector": { + "matchExpressions": [{ + "key": "service", + "operator": "In", + "values": ["securityscan", "value2"] + }] + }, + "topologyKey": "zone" + }] + }}`, + }}, + }}, + node: &node1, + fits: false, + test: "verify that PodAntiAffinity from existing pod is respected when pod has no AntiAffinity constraints. doesn't satisfy PodAntiAffinity symmetry with the existing pod", + }, + { + pod: &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Labels: podLabel, + }, + }, + pods: []*v1.Pod{{Spec: v1.PodSpec{NodeName: "machine1"}, + ObjectMeta: metav1.ObjectMeta{Labels: podLabel, + Annotations: map[string]string{ + v1.AffinityAnnotationKey: ` + {"PodAntiAffinity": { + "requiredDuringSchedulingIgnoredDuringExecution": [{ + "labelSelector": { + "matchExpressions": [{ + "key": "service", + "operator": "NotIn", + "values": ["securityscan", "value2"] + }] + }, + "topologyKey": "zone" + }] + }}`, + }}, + }}, + node: &node1, + fits: true, + test: "verify that PodAntiAffinity from existing pod is respected when pod has no AntiAffinity constraints. satisfy PodAntiAffinity symmetry with the existing pod", + }, + } + expectedFailureReasons := []algorithm.PredicateFailureReason{ErrPodAffinityNotMatch} + + for _, test := range tests { + node := test.node + var podsOnNode []*v1.Pod + for _, pod := range test.pods { + if pod.Spec.NodeName == node.Name { + podsOnNode = append(podsOnNode, pod) + } + } + + fit := PodAffinityChecker{ + info: FakeNodeInfo(*node), + podLister: algorithm.FakePodLister(test.pods), + } + nodeInfo := schedulercache.NewNodeInfo(podsOnNode...) + nodeInfo.SetNode(test.node) + nodeInfoMap := map[string]*schedulercache.NodeInfo{test.node.Name: nodeInfo} + fits, reasons, err := fit.InterPodAffinityMatches(test.pod, PredicateMetadata(test.pod, nodeInfoMap), nodeInfo) + if err != nil { + t.Errorf("%s: unexpected error %v", test.test, err) + } + if !fits && !reflect.DeepEqual(reasons, expectedFailureReasons) { + t.Errorf("%s: unexpected failure reasons: %v, want: %v", test.test, reasons, expectedFailureReasons) + } + if fits != test.fits { + t.Errorf("%s: expected %v got %v", test.test, test.fits, fits) + } + } +} + +// TODO: remove when alpha support for affinity is removed +func TestInterPodAffinityAnnotationsWithMultipleNodes(t *testing.T) { + utilfeature.DefaultFeatureGate.Set("AffinityInAnnotations=true") + podLabelA := map[string]string{ + "foo": "bar", + } + labelRgChina := map[string]string{ + "region": "China", + } + labelRgChinaAzAz1 := map[string]string{ + "region": "China", + "az": "az1", + } + labelRgIndia := map[string]string{ + "region": "India", + } + tests := []struct { + pod *v1.Pod + pods []*v1.Pod + nodes []v1.Node + fits map[string]bool + test string + }{ + { + pod: &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + v1.AffinityAnnotationKey: ` + {"podAffinity": { + "requiredDuringSchedulingIgnoredDuringExecution": [{ + "labelSelector": { + "matchExpressions": [{ + "key": "foo", + "operator": "In", + "values": ["bar"] + }] + }, + "topologyKey": "region" + }] + }}`, + }, + }, + }, + pods: []*v1.Pod{ + {Spec: v1.PodSpec{NodeName: "machine1"}, ObjectMeta: metav1.ObjectMeta{Labels: podLabelA}}, + }, + nodes: []v1.Node{ + {ObjectMeta: metav1.ObjectMeta{Name: "machine1", Labels: labelRgChina}}, + {ObjectMeta: metav1.ObjectMeta{Name: "machine2", Labels: labelRgChinaAzAz1}}, + {ObjectMeta: metav1.ObjectMeta{Name: "machine3", Labels: labelRgIndia}}, + }, + fits: map[string]bool{ + "machine1": true, + "machine2": true, + "machine3": false, + }, + test: "A pod can be scheduled onto all the nodes that have the same topology key & label value with one of them has an existing pod that match the affinity rules", + }, + { + pod: &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + v1.AffinityAnnotationKey: ` + { + "nodeAffinity": { + "requiredDuringSchedulingIgnoredDuringExecution": { + "nodeSelectorTerms": [{ + "matchExpressions": [{ + "key": "hostname", + "operator": "NotIn", + "values": ["h1"] + }] + }] + } + }, + "podAffinity": { + "requiredDuringSchedulingIgnoredDuringExecution": [{ + "labelSelector": { + "matchExpressions": [{ + "key": "foo", + "operator": "In", + "values": ["abc"] + }] + }, + "topologyKey": "region" + }] + } + }`, + }, + }, + }, + pods: []*v1.Pod{ + {Spec: v1.PodSpec{NodeName: "nodeA"}, ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{"foo": "abc"}}}, + {Spec: v1.PodSpec{NodeName: "nodeB"}, ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{"foo": "def"}}}, + }, + nodes: []v1.Node{ + {ObjectMeta: metav1.ObjectMeta{Name: "nodeA", Labels: map[string]string{"region": "r1", "hostname": "h1"}}}, + {ObjectMeta: metav1.ObjectMeta{Name: "nodeB", Labels: map[string]string{"region": "r1", "hostname": "h2"}}}, + }, + fits: map[string]bool{ + "nodeA": false, + "nodeB": true, + }, + test: "NodeA and nodeB have same topologyKey and label value. NodeA does not satisfy node affinity rule, but has an existing pod that match the inter pod affinity rule. The pod can be scheduled onto nodeB.", + }, + { + pod: &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "foo": "bar", + }, + Annotations: map[string]string{ + v1.AffinityAnnotationKey: ` + {"podAffinity": { + "requiredDuringSchedulingIgnoredDuringExecution": [{ + "labelSelector": { + "matchExpressions": [{ + "key": "foo", + "operator": "In", + "values": ["bar"] + }] + }, + "topologyKey": "zone" + }] + }}`, + }, + }, + }, + pods: []*v1.Pod{}, + nodes: []v1.Node{ + {ObjectMeta: metav1.ObjectMeta{Name: "nodeA", Labels: map[string]string{"zone": "az1", "hostname": "h1"}}}, + {ObjectMeta: metav1.ObjectMeta{Name: "nodeB", Labels: map[string]string{"zone": "az2", "hostname": "h2"}}}, + }, + fits: map[string]bool{ + "nodeA": true, + "nodeB": true, + }, + test: "The affinity rule is to schedule all of the pods of this collection to the same zone. The first pod of the collection " + + "should not be blocked from being scheduled onto any node, even there's no existing pod that match the rule anywhere.", + }, + { + pod: &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + v1.AffinityAnnotationKey: ` + { + "podAntiAffinity": { + "requiredDuringSchedulingIgnoredDuringExecution": [{ + "labelSelector": { + "matchExpressions": [{ + "key": "foo", + "operator": "In", + "values": ["abc"] + }] + }, + "topologyKey": "region" + }] + } + }`, + }, + }, + }, + pods: []*v1.Pod{ + {Spec: v1.PodSpec{NodeName: "nodeA"}, ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{"foo": "abc"}}}, + }, + nodes: []v1.Node{ + {ObjectMeta: metav1.ObjectMeta{Name: "nodeA", Labels: map[string]string{"region": "r1", "hostname": "nodeA"}}}, + {ObjectMeta: metav1.ObjectMeta{Name: "nodeB", Labels: map[string]string{"region": "r1", "hostname": "nodeB"}}}, + }, + fits: map[string]bool{ + "nodeA": false, + "nodeB": false, + }, + test: "NodeA and nodeB have same topologyKey and label value. NodeA has an existing pod that match the inter pod affinity rule. The pod can not be scheduled onto nodeA and nodeB.", + }, + { + pod: &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{ + v1.AffinityAnnotationKey: ` + { + "podAntiAffinity": { + "requiredDuringSchedulingIgnoredDuringExecution": [{ + "labelSelector": { + "matchExpressions": [{ + "key": "foo", + "operator": "In", + "values": ["abc"] + }] + }, + "topologyKey": "region" + }] + } + }`, + }, + }, + }, + pods: []*v1.Pod{ + {Spec: v1.PodSpec{NodeName: "nodeA"}, ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{"foo": "abc"}}}, + }, + nodes: []v1.Node{ + {ObjectMeta: metav1.ObjectMeta{Name: "nodeA", Labels: labelRgChina}}, + {ObjectMeta: metav1.ObjectMeta{Name: "nodeB", Labels: labelRgChinaAzAz1}}, + {ObjectMeta: metav1.ObjectMeta{Name: "nodeC", Labels: labelRgIndia}}, + }, + fits: map[string]bool{ + "nodeA": false, + "nodeB": false, + "nodeC": true, + }, + test: "NodeA and nodeB have same topologyKey and label value. NodeA has an existing pod that match the inter pod affinity rule. The pod can not be scheduled onto nodeA and nodeB but can be schedulerd onto nodeC", + }, + } + affinityExpectedFailureReasons := []algorithm.PredicateFailureReason{ErrPodAffinityNotMatch} + selectorExpectedFailureReasons := []algorithm.PredicateFailureReason{ErrNodeSelectorNotMatch} + + for _, test := range tests { + nodeListInfo := FakeNodeListInfo(test.nodes) + for _, node := range test.nodes { + var podsOnNode []*v1.Pod + for _, pod := range test.pods { + if pod.Spec.NodeName == node.Name { + podsOnNode = append(podsOnNode, pod) + } + } + + testFit := PodAffinityChecker{ + info: nodeListInfo, + podLister: algorithm.FakePodLister(test.pods), + } + nodeInfo := schedulercache.NewNodeInfo(podsOnNode...) + nodeInfo.SetNode(&node) + nodeInfoMap := map[string]*schedulercache.NodeInfo{node.Name: nodeInfo} + fits, reasons, err := testFit.InterPodAffinityMatches(test.pod, PredicateMetadata(test.pod, nodeInfoMap), nodeInfo) + if err != nil { + t.Errorf("%s: unexpected error %v", test.test, err) + } + if !fits && !reflect.DeepEqual(reasons, affinityExpectedFailureReasons) { + t.Errorf("%s: unexpected failure reasons: %v", test.test, reasons) + } + affinity, err := v1.GetAffinityFromPodAnnotations(test.pod.ObjectMeta.Annotations) + if err != nil { + t.Errorf("%s: unexpected error: %v", test.test, err) + } + if affinity != nil && affinity.NodeAffinity != nil { + nodeInfo := schedulercache.NewNodeInfo() + nodeInfo.SetNode(&node) + nodeInfoMap := map[string]*schedulercache.NodeInfo{node.Name: nodeInfo} + fits2, reasons, err := PodSelectorMatches(test.pod, PredicateMetadata(test.pod, nodeInfoMap), nodeInfo) + if err != nil { + t.Errorf("%s: unexpected error: %v", test.test, err) + } + if !fits2 && !reflect.DeepEqual(reasons, selectorExpectedFailureReasons) { + t.Errorf("%s: unexpected failure reasons: %v, want: %v", test.test, reasons, selectorExpectedFailureReasons) + } + fits = fits && fits2 + } + + if fits != test.fits[node.Name] { + t.Errorf("%s: expected %v for %s got %v", test.test, test.fits[node.Name], node.Name, fits) + } + } + } +} diff --git a/plugin/pkg/scheduler/algorithm/priorities/BUILD b/plugin/pkg/scheduler/algorithm/priorities/BUILD index f3a18dd86b8..13c91320c4a 100644 --- a/plugin/pkg/scheduler/algorithm/priorities/BUILD +++ b/plugin/pkg/scheduler/algorithm/priorities/BUILD @@ -67,6 +67,7 @@ go_test( "//plugin/pkg/scheduler/schedulercache:go_default_library", "//vendor:k8s.io/apimachinery/pkg/api/resource", "//vendor:k8s.io/apimachinery/pkg/apis/meta/v1", + "//vendor:k8s.io/apiserver/pkg/util/feature", ], ) diff --git a/plugin/pkg/scheduler/algorithm/priorities/interpod_affinity.go b/plugin/pkg/scheduler/algorithm/priorities/interpod_affinity.go index c2c7bf5b38d..75cdcd0cc24 100644 --- a/plugin/pkg/scheduler/algorithm/priorities/interpod_affinity.go +++ b/plugin/pkg/scheduler/algorithm/priorities/interpod_affinity.go @@ -116,7 +116,7 @@ func (p *podAffinityPriorityMap) processTerms(terms []v1.WeightedPodAffinityTerm // Symmetry need to be considered for preferredDuringSchedulingIgnoredDuringExecution from podAffinity & podAntiAffinity, // symmetry need to be considered for hard requirements from podAffinity func (ipa *InterPodAffinity) CalculateInterPodAffinityPriority(pod *v1.Pod, nodeNameToInfo map[string]*schedulercache.NodeInfo, nodes []*v1.Node) (schedulerapi.HostPriorityList, error) { - affinity := pod.Spec.Affinity + affinity := schedulercache.ReconcileAffinity(pod) hasAffinityConstraints := affinity != nil && affinity.PodAffinity != nil hasAntiAffinityConstraints := affinity != nil && affinity.PodAntiAffinity != nil @@ -137,7 +137,7 @@ func (ipa *InterPodAffinity) CalculateInterPodAffinityPriority(pod *v1.Pod, node if err != nil { return err } - existingPodAffinity := existingPod.Spec.Affinity + existingPodAffinity := schedulercache.ReconcileAffinity(existingPod) existingHasAffinityConstraints := existingPodAffinity != nil && existingPodAffinity.PodAffinity != nil existingHasAntiAffinityConstraints := existingPodAffinity != nil && existingPodAffinity.PodAntiAffinity != nil diff --git a/plugin/pkg/scheduler/algorithm/priorities/interpod_affinity_test.go b/plugin/pkg/scheduler/algorithm/priorities/interpod_affinity_test.go index 0bfb7d8743d..d2247b459fe 100644 --- a/plugin/pkg/scheduler/algorithm/priorities/interpod_affinity_test.go +++ b/plugin/pkg/scheduler/algorithm/priorities/interpod_affinity_test.go @@ -22,6 +22,7 @@ import ( "testing" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + utilfeature "k8s.io/apiserver/pkg/util/feature" "k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm" schedulerapi "k8s.io/kubernetes/plugin/pkg/scheduler/api" @@ -613,3 +614,567 @@ func TestHardPodAffinitySymmetricWeight(t *testing.T) { } } } + +// TODO: remove when alpha support for affinity is removed +func TestInterPodAffinityAnnotationsPriority(t *testing.T) { + utilfeature.DefaultFeatureGate.Set("AffinityInAnnotations=true") + labelRgChina := map[string]string{ + "region": "China", + } + labelRgIndia := map[string]string{ + "region": "India", + } + labelAzAz1 := map[string]string{ + "az": "az1", + } + labelAzAz2 := map[string]string{ + "az": "az2", + } + labelRgChinaAzAz1 := map[string]string{ + "region": "China", + "az": "az1", + } + podLabelSecurityS1 := map[string]string{ + "security": "S1", + } + podLabelSecurityS2 := map[string]string{ + "security": "S2", + } + // considered only preferredDuringSchedulingIgnoredDuringExecution in pod affinity + stayWithS1InRegion := map[string]string{ + v1.AffinityAnnotationKey: ` + {"podAffinity": { + "preferredDuringSchedulingIgnoredDuringExecution": [{ + "weight": 5, + "podAffinityTerm": { + "labelSelector": { + "matchExpressions": [{ + "key": "security", + "operator": "In", + "values":["S1"] + }] + }, + "namespaces": [], + "topologyKey": "region" + } + }] + }}`, + } + stayWithS2InRegion := map[string]string{ + v1.AffinityAnnotationKey: ` + {"podAffinity": { + "preferredDuringSchedulingIgnoredDuringExecution": [{ + "weight": 6, + "podAffinityTerm": { + "labelSelector": { + "matchExpressions": [{ + "key": "security", + "operator": "In", + "values":["S2"] + }] + }, + "namespaces": [], + "topologyKey": "region" + } + }] + }}`, + } + affinity3 := map[string]string{ + v1.AffinityAnnotationKey: ` + {"podAffinity": { + "preferredDuringSchedulingIgnoredDuringExecution": [ + { + "weight": 8, + "podAffinityTerm": { + "labelSelector": { + "matchExpressions": [{ + "key": "security", + "operator": "NotIn", + "values":["S1"] + }, { + "key": "security", + "operator": "In", + "values":["S2"] + }] + }, + "namespaces": [], + "topologyKey": "region" + } + }, { + "weight": 2, + "podAffinityTerm": { + "labelSelector": { + "matchExpressions": [{ + "key": "security", + "operator": "Exists" + }, { + "key": "wrongkey", + "operator": "DoesNotExist" + }] + }, + "namespaces": [], + "topologyKey": "region" + } + } + ] + }}`, + } + hardAffinity := map[string]string{ + v1.AffinityAnnotationKey: ` + {"podAffinity": { + "requiredDuringSchedulingIgnoredDuringExecution": [ + { + "labelSelector":{ + "matchExpressions": [{ + "key": "security", + "operator": "In", + "values": ["S1", "value2"] + }] + }, + "namespaces": [], + "topologyKey": "region" + }, { + "labelSelector": { + "matchExpressions": [{ + "key": "security", + "operator": "Exists" + }, { + "key": "wrongkey", + "operator": "DoesNotExist" + }] + }, + "namespaces": [], + "topologyKey": "region" + } + ] + }}`, + } + awayFromS1InAz := map[string]string{ + v1.AffinityAnnotationKey: ` + {"podAntiAffinity": { + "preferredDuringSchedulingIgnoredDuringExecution": [{ + "weight": 5, + "podAffinityTerm": { + "labelSelector": { + "matchExpressions": [{ + "key": "security", + "operator": "In", + "values":["S1"] + }] + }, + "namespaces": [], + "topologyKey": "az" + } + }] + }}`, + } + // to stay away from security S2 in any az. + awayFromS2InAz := map[string]string{ + v1.AffinityAnnotationKey: ` + {"podAntiAffinity": { + "preferredDuringSchedulingIgnoredDuringExecution": [{ + "weight": 5, + "podAffinityTerm": { + "labelSelector": { + "matchExpressions": [{ + "key": "security", + "operator": "In", + "values":["S2"] + }] + }, + "namespaces": [], + "topologyKey": "az" + } + }] + }}`, + } + // to stay with security S1 in same region, stay away from security S2 in any az. + stayWithS1InRegionAwayFromS2InAz := map[string]string{ + v1.AffinityAnnotationKey: ` + {"podAffinity": { + "preferredDuringSchedulingIgnoredDuringExecution": [{ + "weight": 8, + "podAffinityTerm": { + "labelSelector": { + "matchExpressions": [{ + "key": "security", + "operator": "In", + "values":["S1"] + }] + }, + "namespaces": [], + "topologyKey": "region" + } + }] + }, + "podAntiAffinity": { + "preferredDuringSchedulingIgnoredDuringExecution": [{ + "weight": 5, + "podAffinityTerm": { + "labelSelector": { + "matchExpressions": [{ + "key": "security", + "operator": "In", + "values":["S2"] + }] + }, + "namespaces": [], + "topologyKey": "az" + } + }] + }}`, + } + + tests := []struct { + pod *v1.Pod + pods []*v1.Pod + nodes []*v1.Node + expectedList schedulerapi.HostPriorityList + test string + }{ + { + pod: &v1.Pod{Spec: v1.PodSpec{NodeName: ""}, ObjectMeta: metav1.ObjectMeta{Labels: podLabelSecurityS1, Annotations: map[string]string{}}}, + nodes: []*v1.Node{ + {ObjectMeta: metav1.ObjectMeta{Name: "machine1", Labels: labelRgChina}}, + {ObjectMeta: metav1.ObjectMeta{Name: "machine2", Labels: labelRgIndia}}, + {ObjectMeta: metav1.ObjectMeta{Name: "machine3", Labels: labelAzAz1}}, + }, + expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 0}, {Host: "machine3", Score: 0}}, + test: "all machines are same priority as Affinity is nil", + }, + // the node(machine1) that have the label {"region": "China"} (match the topology key) and that have existing pods that match the labelSelector get high score + // the node(machine3) that don't have the label {"region": "whatever the value is"} (mismatch the topology key) but that have existing pods that match the labelSelector get low score + // the node(machine2) that have the label {"region": "China"} (match the topology key) but that have existing pods that mismatch the labelSelector get low score + { + pod: &v1.Pod{Spec: v1.PodSpec{NodeName: ""}, ObjectMeta: metav1.ObjectMeta{Labels: podLabelSecurityS1, Annotations: stayWithS1InRegion}}, + pods: []*v1.Pod{ + {Spec: v1.PodSpec{NodeName: "machine1"}, ObjectMeta: metav1.ObjectMeta{Labels: podLabelSecurityS1}}, + {Spec: v1.PodSpec{NodeName: "machine2"}, ObjectMeta: metav1.ObjectMeta{Labels: podLabelSecurityS2}}, + {Spec: v1.PodSpec{NodeName: "machine3"}, ObjectMeta: metav1.ObjectMeta{Labels: podLabelSecurityS1}}, + }, + nodes: []*v1.Node{ + {ObjectMeta: metav1.ObjectMeta{Name: "machine1", Labels: labelRgChina}}, + {ObjectMeta: metav1.ObjectMeta{Name: "machine2", Labels: labelRgIndia}}, + {ObjectMeta: metav1.ObjectMeta{Name: "machine3", Labels: labelAzAz1}}, + }, + expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 10}, {Host: "machine2", Score: 0}, {Host: "machine3", Score: 0}}, + test: "Affinity: pod that matches topology key & pods in nodes will get high score comparing to others" + + "which doesn't match either pods in nodes or in topology key", + }, + // the node1(machine1) that have the label {"region": "China"} (match the topology key) and that have existing pods that match the labelSelector get high score + // the node2(machine2) that have the label {"region": "China"}, match the topology key and have the same label value with node1, get the same high score with node1 + // the node3(machine3) that have the label {"region": "India"}, match the topology key but have a different label value, don't have existing pods that match the labelSelector, + // get a low score. + { + pod: &v1.Pod{Spec: v1.PodSpec{NodeName: ""}, ObjectMeta: metav1.ObjectMeta{Annotations: stayWithS1InRegion}}, + pods: []*v1.Pod{ + {Spec: v1.PodSpec{NodeName: "machine1"}, ObjectMeta: metav1.ObjectMeta{Labels: podLabelSecurityS1}}, + }, + nodes: []*v1.Node{ + {ObjectMeta: metav1.ObjectMeta{Name: "machine1", Labels: labelRgChina}}, + {ObjectMeta: metav1.ObjectMeta{Name: "machine2", Labels: labelRgChinaAzAz1}}, + {ObjectMeta: metav1.ObjectMeta{Name: "machine3", Labels: labelRgIndia}}, + }, + expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 10}, {Host: "machine2", Score: 10}, {Host: "machine3", Score: 0}}, + test: "All the nodes that have the same topology key & label value with one of them has an existing pod that match the affinity rules, have the same score", + }, + // there are 2 regions, say regionChina(machine1,machine3,machine4) and regionIndia(machine2,machine5), both regions have nodes that match the preference. + // But there are more nodes(actually more existing pods) in regionChina that match the preference than regionIndia. + // Then, nodes in regionChina get higher score than nodes in regionIndia, and all the nodes in regionChina should get a same score(high score), + // while all the nodes in regionIndia should get another same score(low score). + { + pod: &v1.Pod{Spec: v1.PodSpec{NodeName: ""}, ObjectMeta: metav1.ObjectMeta{Labels: podLabelSecurityS1, Annotations: stayWithS2InRegion}}, + pods: []*v1.Pod{ + {Spec: v1.PodSpec{NodeName: "machine1"}, ObjectMeta: metav1.ObjectMeta{Labels: podLabelSecurityS2}}, + {Spec: v1.PodSpec{NodeName: "machine1"}, ObjectMeta: metav1.ObjectMeta{Labels: podLabelSecurityS2}}, + {Spec: v1.PodSpec{NodeName: "machine2"}, ObjectMeta: metav1.ObjectMeta{Labels: podLabelSecurityS2}}, + {Spec: v1.PodSpec{NodeName: "machine3"}, ObjectMeta: metav1.ObjectMeta{Labels: podLabelSecurityS2}}, + {Spec: v1.PodSpec{NodeName: "machine4"}, ObjectMeta: metav1.ObjectMeta{Labels: podLabelSecurityS2}}, + {Spec: v1.PodSpec{NodeName: "machine5"}, ObjectMeta: metav1.ObjectMeta{Labels: podLabelSecurityS2}}, + }, + nodes: []*v1.Node{ + {ObjectMeta: metav1.ObjectMeta{Name: "machine1", Labels: labelRgChina}}, + {ObjectMeta: metav1.ObjectMeta{Name: "machine2", Labels: labelRgIndia}}, + {ObjectMeta: metav1.ObjectMeta{Name: "machine3", Labels: labelRgChina}}, + {ObjectMeta: metav1.ObjectMeta{Name: "machine4", Labels: labelRgChina}}, + {ObjectMeta: metav1.ObjectMeta{Name: "machine5", Labels: labelRgIndia}}, + }, + expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 10}, {Host: "machine2", Score: 5}, {Host: "machine3", Score: 10}, {Host: "machine4", Score: 10}, {Host: "machine5", Score: 5}}, + test: "Affinity: nodes in one region has more matching pods comparing to other reqion, so the region which has more macthes will get high score", + }, + // Test with the different operators and values for pod affinity scheduling preference, including some match failures. + { + pod: &v1.Pod{Spec: v1.PodSpec{NodeName: ""}, ObjectMeta: metav1.ObjectMeta{Labels: podLabelSecurityS1, Annotations: affinity3}}, + pods: []*v1.Pod{ + {Spec: v1.PodSpec{NodeName: "machine1"}, ObjectMeta: metav1.ObjectMeta{Labels: podLabelSecurityS1}}, + {Spec: v1.PodSpec{NodeName: "machine2"}, ObjectMeta: metav1.ObjectMeta{Labels: podLabelSecurityS2}}, + {Spec: v1.PodSpec{NodeName: "machine3"}, ObjectMeta: metav1.ObjectMeta{Labels: podLabelSecurityS1}}, + }, + nodes: []*v1.Node{ + {ObjectMeta: metav1.ObjectMeta{Name: "machine1", Labels: labelRgChina}}, + {ObjectMeta: metav1.ObjectMeta{Name: "machine2", Labels: labelRgIndia}}, + {ObjectMeta: metav1.ObjectMeta{Name: "machine3", Labels: labelAzAz1}}, + }, + expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 2}, {Host: "machine2", Score: 10}, {Host: "machine3", Score: 0}}, + test: "Affinity: different Label operators and values for pod affinity scheduling preference, including some match failures ", + }, + // Test the symmetry cases for affinity, the difference between affinity and symmetry is not the pod wants to run together with some existing pods, + // but the existing pods have the inter pod affinity preference while the pod to schedule satisfy the preference. + { + pod: &v1.Pod{Spec: v1.PodSpec{NodeName: ""}, ObjectMeta: metav1.ObjectMeta{Labels: podLabelSecurityS2}}, + pods: []*v1.Pod{ + {Spec: v1.PodSpec{NodeName: "machine1"}, ObjectMeta: metav1.ObjectMeta{Labels: podLabelSecurityS1, Annotations: stayWithS1InRegion}}, + {Spec: v1.PodSpec{NodeName: "machine2"}, ObjectMeta: metav1.ObjectMeta{Labels: podLabelSecurityS2, Annotations: stayWithS2InRegion}}, + }, + nodes: []*v1.Node{ + {ObjectMeta: metav1.ObjectMeta{Name: "machine1", Labels: labelRgChina}}, + {ObjectMeta: metav1.ObjectMeta{Name: "machine2", Labels: labelRgIndia}}, + {ObjectMeta: metav1.ObjectMeta{Name: "machine3", Labels: labelAzAz1}}, + }, + expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 10}, {Host: "machine3", Score: 0}}, + test: "Affinity symmetry: considred only the preferredDuringSchedulingIgnoredDuringExecution in pod affinity symmetry", + }, + { + pod: &v1.Pod{Spec: v1.PodSpec{NodeName: ""}, ObjectMeta: metav1.ObjectMeta{Labels: podLabelSecurityS1}}, + pods: []*v1.Pod{ + {Spec: v1.PodSpec{NodeName: "machine1"}, ObjectMeta: metav1.ObjectMeta{Labels: podLabelSecurityS1, Annotations: hardAffinity}}, + {Spec: v1.PodSpec{NodeName: "machine2"}, ObjectMeta: metav1.ObjectMeta{Labels: podLabelSecurityS2, Annotations: hardAffinity}}, + }, + nodes: []*v1.Node{ + {ObjectMeta: metav1.ObjectMeta{Name: "machine1", Labels: labelRgChina}}, + {ObjectMeta: metav1.ObjectMeta{Name: "machine2", Labels: labelRgIndia}}, + {ObjectMeta: metav1.ObjectMeta{Name: "machine3", Labels: labelAzAz1}}, + }, + expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 10}, {Host: "machine2", Score: 10}, {Host: "machine3", Score: 0}}, + test: "Affinity symmetry: considred RequiredDuringSchedulingIgnoredDuringExecution in pod affinity symmetry", + }, + + // The pod to schedule prefer to stay away from some existing pods at node level using the pod anti affinity. + // the nodes that have the label {"node": "bar"} (match the topology key) and that have existing pods that match the labelSelector get low score + // the nodes that don't have the label {"node": "whatever the value is"} (mismatch the topology key) but that have existing pods that match the labelSelector get high score + // the nodes that have the label {"node": "bar"} (match the topology key) but that have existing pods that mismatch the labelSelector get high score + // there are 2 nodes, say node1 and node2, both nodes have pods that match the labelSelector and have topology-key in node.Labels. + // But there are more pods on node1 that match the preference than node2. Then, node1 get a lower score than node2. + { + pod: &v1.Pod{Spec: v1.PodSpec{NodeName: ""}, ObjectMeta: metav1.ObjectMeta{Labels: podLabelSecurityS1, Annotations: awayFromS1InAz}}, + pods: []*v1.Pod{ + {Spec: v1.PodSpec{NodeName: "machine1"}, ObjectMeta: metav1.ObjectMeta{Labels: podLabelSecurityS1}}, + {Spec: v1.PodSpec{NodeName: "machine2"}, ObjectMeta: metav1.ObjectMeta{Labels: podLabelSecurityS2}}, + }, + nodes: []*v1.Node{ + {ObjectMeta: metav1.ObjectMeta{Name: "machine1", Labels: labelAzAz1}}, + {ObjectMeta: metav1.ObjectMeta{Name: "machine2", Labels: labelRgChina}}, + }, + expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 10}}, + test: "Anti Affinity: pod that doesnot match existing pods in node will get high score ", + }, + { + pod: &v1.Pod{Spec: v1.PodSpec{NodeName: ""}, ObjectMeta: metav1.ObjectMeta{Labels: podLabelSecurityS1, Annotations: awayFromS1InAz}}, + pods: []*v1.Pod{ + {Spec: v1.PodSpec{NodeName: "machine1"}, ObjectMeta: metav1.ObjectMeta{Labels: podLabelSecurityS1}}, + {Spec: v1.PodSpec{NodeName: "machine2"}, ObjectMeta: metav1.ObjectMeta{Labels: podLabelSecurityS1}}, + }, + nodes: []*v1.Node{ + {ObjectMeta: metav1.ObjectMeta{Name: "machine1", Labels: labelAzAz1}}, + {ObjectMeta: metav1.ObjectMeta{Name: "machine2", Labels: labelRgChina}}, + }, + expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 10}}, + test: "Anti Affinity: pod that does not matches topology key & matches the pods in nodes will get higher score comparing to others ", + }, + { + pod: &v1.Pod{Spec: v1.PodSpec{NodeName: ""}, ObjectMeta: metav1.ObjectMeta{Labels: podLabelSecurityS1, Annotations: awayFromS1InAz}}, + pods: []*v1.Pod{ + {Spec: v1.PodSpec{NodeName: "machine1"}, ObjectMeta: metav1.ObjectMeta{Labels: podLabelSecurityS1}}, + {Spec: v1.PodSpec{NodeName: "machine1"}, ObjectMeta: metav1.ObjectMeta{Labels: podLabelSecurityS1}}, + {Spec: v1.PodSpec{NodeName: "machine2"}, ObjectMeta: metav1.ObjectMeta{Labels: podLabelSecurityS2}}, + }, + nodes: []*v1.Node{ + {ObjectMeta: metav1.ObjectMeta{Name: "machine1", Labels: labelAzAz1}}, + {ObjectMeta: metav1.ObjectMeta{Name: "machine2", Labels: labelRgIndia}}, + }, + expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 10}}, + test: "Anti Affinity: one node has more matching pods comparing to other node, so the node which has more unmacthes will get high score", + }, + // Test the symmetry cases for anti affinity + { + pod: &v1.Pod{Spec: v1.PodSpec{NodeName: ""}, ObjectMeta: metav1.ObjectMeta{Labels: podLabelSecurityS2}}, + pods: []*v1.Pod{ + {Spec: v1.PodSpec{NodeName: "machine1"}, ObjectMeta: metav1.ObjectMeta{Labels: podLabelSecurityS1, Annotations: awayFromS2InAz}}, + {Spec: v1.PodSpec{NodeName: "machine2"}, ObjectMeta: metav1.ObjectMeta{Labels: podLabelSecurityS2, Annotations: awayFromS1InAz}}, + }, + nodes: []*v1.Node{ + {ObjectMeta: metav1.ObjectMeta{Name: "machine1", Labels: labelAzAz1}}, + {ObjectMeta: metav1.ObjectMeta{Name: "machine2", Labels: labelAzAz2}}, + }, + expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 10}}, + test: "Anti Affinity symmetry: the existing pods in node which has anti affinity match will get high score", + }, + // Test both affinity and anti-affinity + { + pod: &v1.Pod{Spec: v1.PodSpec{NodeName: ""}, ObjectMeta: metav1.ObjectMeta{Labels: podLabelSecurityS1, Annotations: stayWithS1InRegionAwayFromS2InAz}}, + pods: []*v1.Pod{ + {Spec: v1.PodSpec{NodeName: "machine1"}, ObjectMeta: metav1.ObjectMeta{Labels: podLabelSecurityS1}}, + {Spec: v1.PodSpec{NodeName: "machine2"}, ObjectMeta: metav1.ObjectMeta{Labels: podLabelSecurityS1}}, + }, + nodes: []*v1.Node{ + {ObjectMeta: metav1.ObjectMeta{Name: "machine1", Labels: labelRgChina}}, + {ObjectMeta: metav1.ObjectMeta{Name: "machine2", Labels: labelAzAz1}}, + }, + expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 10}, {Host: "machine2", Score: 0}}, + test: "Affinity and Anti Affinity: considered only preferredDuringSchedulingIgnoredDuringExecution in both pod affinity & anti affinity", + }, + // Combined cases considering both affinity and anti-affinity, the pod to schedule and existing pods have the same labels (they are in the same RC/service), + // the pod prefer to run together with its brother pods in the same region, but wants to stay away from them at node level, + // so that all the pods of a RC/service can stay in a same region but trying to separate with each other + // machine-1,machine-3,machine-4 are in ChinaRegion others machin-2,machine-5 are in IndiaRegion + { + pod: &v1.Pod{Spec: v1.PodSpec{NodeName: ""}, ObjectMeta: metav1.ObjectMeta{Labels: podLabelSecurityS1, Annotations: stayWithS1InRegionAwayFromS2InAz}}, + pods: []*v1.Pod{ + {Spec: v1.PodSpec{NodeName: "machine1"}, ObjectMeta: metav1.ObjectMeta{Labels: podLabelSecurityS1}}, + {Spec: v1.PodSpec{NodeName: "machine1"}, ObjectMeta: metav1.ObjectMeta{Labels: podLabelSecurityS1}}, + {Spec: v1.PodSpec{NodeName: "machine2"}, ObjectMeta: metav1.ObjectMeta{Labels: podLabelSecurityS1}}, + {Spec: v1.PodSpec{NodeName: "machine3"}, ObjectMeta: metav1.ObjectMeta{Labels: podLabelSecurityS1}}, + {Spec: v1.PodSpec{NodeName: "machine3"}, ObjectMeta: metav1.ObjectMeta{Labels: podLabelSecurityS1}}, + {Spec: v1.PodSpec{NodeName: "machine4"}, ObjectMeta: metav1.ObjectMeta{Labels: podLabelSecurityS1}}, + {Spec: v1.PodSpec{NodeName: "machine5"}, ObjectMeta: metav1.ObjectMeta{Labels: podLabelSecurityS1}}, + }, + nodes: []*v1.Node{ + {ObjectMeta: metav1.ObjectMeta{Name: "machine1", Labels: labelRgChinaAzAz1}}, + {ObjectMeta: metav1.ObjectMeta{Name: "machine2", Labels: labelRgIndia}}, + {ObjectMeta: metav1.ObjectMeta{Name: "machine3", Labels: labelRgChina}}, + {ObjectMeta: metav1.ObjectMeta{Name: "machine4", Labels: labelRgChina}}, + {ObjectMeta: metav1.ObjectMeta{Name: "machine5", Labels: labelRgIndia}}, + }, + expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 10}, {Host: "machine2", Score: 4}, {Host: "machine3", Score: 10}, {Host: "machine4", Score: 10}, {Host: "machine5", Score: 4}}, + test: "Affinity and Anti Affinity: considering both affinity and anti-affinity, the pod to schedule and existing pods have the same labels", + }, + // Consider Affinity, Anti Affinity and symmetry together. + // for Affinity, the weights are: 8, 0, 0, 0 + // for Anti Affinity, the weights are: 0, -5, 0, 0 + // for Affinity symmetry, the weights are: 0, 0, 8, 0 + // for Anti Affinity symmetry, the weights are: 0, 0, 0, -5 + { + pod: &v1.Pod{Spec: v1.PodSpec{NodeName: ""}, ObjectMeta: metav1.ObjectMeta{Labels: podLabelSecurityS1, Annotations: stayWithS1InRegionAwayFromS2InAz}}, + pods: []*v1.Pod{ + {Spec: v1.PodSpec{NodeName: "machine1"}, ObjectMeta: metav1.ObjectMeta{Labels: podLabelSecurityS1}}, + {Spec: v1.PodSpec{NodeName: "machine2"}, ObjectMeta: metav1.ObjectMeta{Labels: podLabelSecurityS2}}, + {Spec: v1.PodSpec{NodeName: "machine3"}, ObjectMeta: metav1.ObjectMeta{Annotations: stayWithS1InRegionAwayFromS2InAz}}, + {Spec: v1.PodSpec{NodeName: "machine4"}, ObjectMeta: metav1.ObjectMeta{Annotations: awayFromS1InAz}}, + }, + nodes: []*v1.Node{ + {ObjectMeta: metav1.ObjectMeta{Name: "machine1", Labels: labelRgChina}}, + {ObjectMeta: metav1.ObjectMeta{Name: "machine2", Labels: labelAzAz1}}, + {ObjectMeta: metav1.ObjectMeta{Name: "machine3", Labels: labelRgIndia}}, + {ObjectMeta: metav1.ObjectMeta{Name: "machine4", Labels: labelAzAz2}}, + }, + expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 10}, {Host: "machine2", Score: 0}, {Host: "machine3", Score: 10}, {Host: "machine4", Score: 0}}, + test: "Affinity and Anti Affinity and symmetry: considered only preferredDuringSchedulingIgnoredDuringExecution in both pod affinity & anti affinity & symmetry", + }, + } + for _, test := range tests { + nodeNameToInfo := schedulercache.CreateNodeNameToInfoMap(test.pods, test.nodes) + interPodAffinity := InterPodAffinity{ + info: FakeNodeListInfo(test.nodes), + nodeLister: algorithm.FakeNodeLister(test.nodes), + podLister: algorithm.FakePodLister(test.pods), + hardPodAffinityWeight: v1.DefaultHardPodAffinitySymmetricWeight, + } + list, err := interPodAffinity.CalculateInterPodAffinityPriority(test.pod, nodeNameToInfo, test.nodes) + if err != nil { + t.Errorf("unexpected error: %v", err) + } + if !reflect.DeepEqual(test.expectedList, list) { + t.Errorf("%s: \nexpected \n\t%#v, \ngot \n\t%#v\n", test.test, test.expectedList, list) + } + } +} + +// TODO: remove when alpha support for affinity is removed +func TestHardPodAffinityAnnotationsSymmetricWeight(t *testing.T) { + utilfeature.DefaultFeatureGate.Set("AffinityInAnnotations=true") + podLabelServiceS1 := map[string]string{ + "service": "S1", + } + labelRgChina := map[string]string{ + "region": "China", + } + labelRgIndia := map[string]string{ + "region": "India", + } + labelAzAz1 := map[string]string{ + "az": "az1", + } + hardPodAffinity := map[string]string{ + v1.AffinityAnnotationKey: ` + {"podAffinity": { + "requiredDuringSchedulingIgnoredDuringExecution": [ + { + "labelSelector":{ + "matchExpressions": [{ + "key": "service", + "operator": "In", + "values": ["S1"] + }] + }, + "namespaces": [], + "topologyKey": "region" + } + ] + }}`, + } + tests := []struct { + pod *v1.Pod + pods []*v1.Pod + nodes []*v1.Node + hardPodAffinityWeight int + expectedList schedulerapi.HostPriorityList + test string + }{ + { + pod: &v1.Pod{Spec: v1.PodSpec{NodeName: ""}, ObjectMeta: metav1.ObjectMeta{Labels: podLabelServiceS1}}, + pods: []*v1.Pod{ + {Spec: v1.PodSpec{NodeName: "machine1"}, ObjectMeta: metav1.ObjectMeta{Annotations: hardPodAffinity}}, + {Spec: v1.PodSpec{NodeName: "machine2"}, ObjectMeta: metav1.ObjectMeta{Annotations: hardPodAffinity}}, + }, + nodes: []*v1.Node{ + {ObjectMeta: metav1.ObjectMeta{Name: "machine1", Labels: labelRgChina}}, + {ObjectMeta: metav1.ObjectMeta{Name: "machine2", Labels: labelRgIndia}}, + {ObjectMeta: metav1.ObjectMeta{Name: "machine3", Labels: labelAzAz1}}, + }, + hardPodAffinityWeight: v1.DefaultHardPodAffinitySymmetricWeight, + expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 10}, {Host: "machine2", Score: 10}, {Host: "machine3", Score: 0}}, + test: "Hard Pod Affinity symmetry: hard pod affinity symmetry weights 1 by default, then nodes that match the hard pod affinity symmetry rules, get a high score", + }, + { + pod: &v1.Pod{Spec: v1.PodSpec{NodeName: ""}, ObjectMeta: metav1.ObjectMeta{Labels: podLabelServiceS1}}, + pods: []*v1.Pod{ + {Spec: v1.PodSpec{NodeName: "machine1"}, ObjectMeta: metav1.ObjectMeta{Annotations: hardPodAffinity}}, + {Spec: v1.PodSpec{NodeName: "machine2"}, ObjectMeta: metav1.ObjectMeta{Annotations: hardPodAffinity}}, + }, + nodes: []*v1.Node{ + {ObjectMeta: metav1.ObjectMeta{Name: "machine1", Labels: labelRgChina}}, + {ObjectMeta: metav1.ObjectMeta{Name: "machine2", Labels: labelRgIndia}}, + {ObjectMeta: metav1.ObjectMeta{Name: "machine3", Labels: labelAzAz1}}, + }, + hardPodAffinityWeight: 0, + expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 0}, {Host: "machine3", Score: 0}}, + test: "Hard Pod Affinity symmetry: hard pod affinity symmetry is closed(weights 0), then nodes that match the hard pod affinity symmetry rules, get same score with those not match", + }, + } + for _, test := range tests { + nodeNameToInfo := schedulercache.CreateNodeNameToInfoMap(test.pods, test.nodes) + ipa := InterPodAffinity{ + info: FakeNodeListInfo(test.nodes), + nodeLister: algorithm.FakeNodeLister(test.nodes), + podLister: algorithm.FakePodLister(test.pods), + hardPodAffinityWeight: test.hardPodAffinityWeight, + } + list, err := ipa.CalculateInterPodAffinityPriority(test.pod, nodeNameToInfo, test.nodes) + if err != nil { + t.Errorf("unexpected error: %v", err) + } + if !reflect.DeepEqual(test.expectedList, list) { + t.Errorf("%s: \nexpected \n\t%#v, \ngot \n\t%#v\n", test.test, test.expectedList, list) + } + } +} diff --git a/plugin/pkg/scheduler/algorithm/priorities/metadata.go b/plugin/pkg/scheduler/algorithm/priorities/metadata.go index ef8143e68e3..112501bb313 100644 --- a/plugin/pkg/scheduler/algorithm/priorities/metadata.go +++ b/plugin/pkg/scheduler/algorithm/priorities/metadata.go @@ -41,6 +41,6 @@ func PriorityMetadata(pod *v1.Pod, nodeNameToInfo map[string]*schedulercache.Nod return &priorityMetadata{ nonZeroRequest: getNonZeroRequests(pod), podTolerations: tolerations, - affinity: pod.Spec.Affinity, + affinity: schedulercache.ReconcileAffinity(pod), } } diff --git a/plugin/pkg/scheduler/algorithm/priorities/node_affinity.go b/plugin/pkg/scheduler/algorithm/priorities/node_affinity.go index bd361af66cf..4ee8a80a84c 100644 --- a/plugin/pkg/scheduler/algorithm/priorities/node_affinity.go +++ b/plugin/pkg/scheduler/algorithm/priorities/node_affinity.go @@ -42,7 +42,7 @@ func CalculateNodeAffinityPriorityMap(pod *v1.Pod, meta interface{}, nodeInfo *s affinity = priorityMeta.affinity } else { // We couldn't parse metadata - fallback to the podspec. - affinity = pod.Spec.Affinity + affinity = schedulercache.ReconcileAffinity(pod) } var count int32 diff --git a/plugin/pkg/scheduler/algorithm/priorities/node_affinity_test.go b/plugin/pkg/scheduler/algorithm/priorities/node_affinity_test.go index 5f81ebebe6c..66776eeacd5 100644 --- a/plugin/pkg/scheduler/algorithm/priorities/node_affinity_test.go +++ b/plugin/pkg/scheduler/algorithm/priorities/node_affinity_test.go @@ -21,6 +21,7 @@ import ( "testing" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + utilfeature "k8s.io/apiserver/pkg/util/feature" "k8s.io/kubernetes/pkg/api/v1" schedulerapi "k8s.io/kubernetes/plugin/pkg/scheduler/api" "k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache" @@ -177,3 +178,147 @@ func TestNodeAffinityPriority(t *testing.T) { } } } + +// TODO: remove when alpha support for affinity is removed +func TestNodeAffinityAnnotationsPriority(t *testing.T) { + utilfeature.DefaultFeatureGate.Set("AffinityInAnnotations=true") + label1 := map[string]string{"foo": "bar"} + label2 := map[string]string{"key": "value"} + label3 := map[string]string{"az": "az1"} + label4 := map[string]string{"abc": "az11", "def": "az22"} + label5 := map[string]string{"foo": "bar", "key": "value", "az": "az1"} + + affinity1 := map[string]string{ + v1.AffinityAnnotationKey: ` + {"nodeAffinity": {"preferredDuringSchedulingIgnoredDuringExecution": [ + { + "weight": 2, + "preference": { + "matchExpressions": [ + { + "key": "foo", + "operator": "In", "values": ["bar"] + } + ] + } + } + ]}}`, + } + + affinity2 := map[string]string{ + v1.AffinityAnnotationKey: ` + {"nodeAffinity": {"preferredDuringSchedulingIgnoredDuringExecution": [ + { + "weight": 2, + "preference": {"matchExpressions": [ + { + "key": "foo", + "operator": "In", "values": ["bar"] + } + ]} + }, + { + "weight": 4, + "preference": {"matchExpressions": [ + { + "key": "key", + "operator": "In", "values": ["value"] + } + ]} + }, + { + "weight": 5, + "preference": {"matchExpressions": [ + { + "key": "foo", + "operator": "In", "values": ["bar"] + }, + { + "key": "key", + "operator": "In", "values": ["value"] + }, + { + "key": "az", + "operator": "In", "values": ["az1"] + } + ]} + } + ]}}`, + } + + tests := []struct { + pod *v1.Pod + nodes []*v1.Node + expectedList schedulerapi.HostPriorityList + test string + }{ + { + pod: &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string{}, + }, + }, + nodes: []*v1.Node{ + {ObjectMeta: metav1.ObjectMeta{Name: "machine1", Labels: label1}}, + {ObjectMeta: metav1.ObjectMeta{Name: "machine2", Labels: label2}}, + {ObjectMeta: metav1.ObjectMeta{Name: "machine3", Labels: label3}}, + }, + expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 0}, {Host: "machine3", Score: 0}}, + test: "all machines are same priority as NodeAffinity is nil", + }, + { + pod: &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: affinity1, + }, + }, + nodes: []*v1.Node{ + {ObjectMeta: metav1.ObjectMeta{Name: "machine1", Labels: label4}}, + {ObjectMeta: metav1.ObjectMeta{Name: "machine2", Labels: label2}}, + {ObjectMeta: metav1.ObjectMeta{Name: "machine3", Labels: label3}}, + }, + expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 0}, {Host: "machine3", Score: 0}}, + test: "no machine macthes preferred scheduling requirements in NodeAffinity of pod so all machines' priority is zero", + }, + { + pod: &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: affinity1, + }, + }, + nodes: []*v1.Node{ + {ObjectMeta: metav1.ObjectMeta{Name: "machine1", Labels: label1}}, + {ObjectMeta: metav1.ObjectMeta{Name: "machine2", Labels: label2}}, + {ObjectMeta: metav1.ObjectMeta{Name: "machine3", Labels: label3}}, + }, + expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 10}, {Host: "machine2", Score: 0}, {Host: "machine3", Score: 0}}, + test: "only machine1 matches the preferred scheduling requirements of pod", + }, + { + pod: &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: affinity2, + }, + }, + nodes: []*v1.Node{ + {ObjectMeta: metav1.ObjectMeta{Name: "machine1", Labels: label1}}, + {ObjectMeta: metav1.ObjectMeta{Name: "machine5", Labels: label5}}, + {ObjectMeta: metav1.ObjectMeta{Name: "machine2", Labels: label2}}, + }, + expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 1}, {Host: "machine5", Score: 10}, {Host: "machine2", Score: 3}}, + test: "all machines matches the preferred scheduling requirements of pod but with different priorities ", + }, + } + + for _, test := range tests { + nodeNameToInfo := schedulercache.CreateNodeNameToInfoMap(nil, test.nodes) + nap := priorityFunction(CalculateNodeAffinityPriorityMap, CalculateNodeAffinityPriorityReduce) + list, err := nap(test.pod, nodeNameToInfo, test.nodes) + if err != nil { + t.Errorf("unexpected error: %v", err) + } + if !reflect.DeepEqual(test.expectedList, list) { + t.Errorf("%s: \nexpected %#v, \ngot %#v", test.test, test.expectedList, list) + } + } +} diff --git a/plugin/pkg/scheduler/schedulercache/BUILD b/plugin/pkg/scheduler/schedulercache/BUILD index 214ad6dea17..85633d416d0 100644 --- a/plugin/pkg/scheduler/schedulercache/BUILD +++ b/plugin/pkg/scheduler/schedulercache/BUILD @@ -14,23 +14,29 @@ go_library( "cache.go", "interface.go", "node_info.go", + "reconcile_affinity.go", "util.go", ], tags = ["automanaged"], deps = [ "//pkg/api/v1:go_default_library", + "//pkg/features:go_default_library", "//plugin/pkg/scheduler/algorithm/priorities/util:go_default_library", "//vendor:github.com/golang/glog", "//vendor:k8s.io/apimachinery/pkg/api/resource", "//vendor:k8s.io/apimachinery/pkg/labels", "//vendor:k8s.io/apimachinery/pkg/util/wait", + "//vendor:k8s.io/apiserver/pkg/util/feature", "//vendor:k8s.io/client-go/tools/cache", ], ) go_test( name = "go_default_test", - srcs = ["cache_test.go"], + srcs = [ + "cache_test.go", + "reconcile_affinity_test.go", + ], library = ":go_default_library", tags = ["automanaged"], deps = [ @@ -39,6 +45,7 @@ go_test( "//vendor:k8s.io/apimachinery/pkg/api/resource", "//vendor:k8s.io/apimachinery/pkg/apis/meta/v1", "//vendor:k8s.io/apimachinery/pkg/labels", + "//vendor:k8s.io/apiserver/pkg/util/feature", ], ) diff --git a/plugin/pkg/scheduler/schedulercache/node_info.go b/plugin/pkg/scheduler/schedulercache/node_info.go index 99691bfb59c..dd4ccf02b2c 100644 --- a/plugin/pkg/scheduler/schedulercache/node_info.go +++ b/plugin/pkg/scheduler/schedulercache/node_info.go @@ -217,7 +217,7 @@ func (n *NodeInfo) String() string { } func hasPodAffinityConstraints(pod *v1.Pod) bool { - affinity := pod.Spec.Affinity + affinity := ReconcileAffinity(pod) return affinity != nil && (affinity.PodAffinity != nil || affinity.PodAntiAffinity != nil) } diff --git a/plugin/pkg/scheduler/schedulercache/reconcile_affinity.go b/plugin/pkg/scheduler/schedulercache/reconcile_affinity.go new file mode 100644 index 00000000000..8009075571f --- /dev/null +++ b/plugin/pkg/scheduler/schedulercache/reconcile_affinity.go @@ -0,0 +1,53 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package schedulercache + +import ( + utilfeature "k8s.io/apiserver/pkg/util/feature" + "k8s.io/kubernetes/pkg/api/v1" + "k8s.io/kubernetes/pkg/features" +) + +// Reconcile api and annotation affinity definitions. +// When alpha affinity feature is not enabled, always take affinity +// from PodSpec.When alpha affinity feature is enabled, if affinity +// is not set in PodSpec, take affinity from annotation. +// When alpha affinity feature is enabled, if affinity is set in PodSpec, +// take node affinity, pod affinity, and pod anti-affinity individually +// using the following rule: take affinity from PodSpec if it is defined, +// otherwise take from annotation if it is defined. +// TODO: remove when alpha support for affinity is removed +func ReconcileAffinity(pod *v1.Pod) *v1.Affinity { + affinity := pod.Spec.Affinity + if utilfeature.DefaultFeatureGate.Enabled(features.AffinityInAnnotations) { + annotationsAffinity, _ := v1.GetAffinityFromPodAnnotations(pod.Annotations) + if affinity == nil && annotationsAffinity != nil { + affinity = annotationsAffinity + } else if annotationsAffinity != nil { + if affinity != nil && affinity.NodeAffinity == nil && annotationsAffinity.NodeAffinity != nil { + affinity.NodeAffinity = annotationsAffinity.NodeAffinity + } + if affinity != nil && affinity.PodAffinity == nil && annotationsAffinity.PodAffinity != nil { + affinity.PodAffinity = annotationsAffinity.PodAffinity + } + if affinity != nil && affinity.PodAntiAffinity == nil && annotationsAffinity.PodAntiAffinity != nil { + affinity.PodAntiAffinity = annotationsAffinity.PodAntiAffinity + } + } + } + return affinity +} diff --git a/plugin/pkg/scheduler/schedulercache/reconcile_affinity_test.go b/plugin/pkg/scheduler/schedulercache/reconcile_affinity_test.go new file mode 100644 index 00000000000..c8c7627d807 --- /dev/null +++ b/plugin/pkg/scheduler/schedulercache/reconcile_affinity_test.go @@ -0,0 +1,151 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package schedulercache + +import ( + "fmt" + "reflect" + "testing" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + utilfeature "k8s.io/apiserver/pkg/util/feature" + "k8s.io/kubernetes/pkg/api/v1" +) + +// TODO: remove when alpha support for affinity is removed +func TestReconcileAffinity(t *testing.T) { + baseAffinity := &v1.Affinity{ + NodeAffinity: &v1.NodeAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{ + NodeSelectorTerms: []v1.NodeSelectorTerm{ + { + MatchExpressions: []v1.NodeSelectorRequirement{ + { + Key: "foo", + Operator: v1.NodeSelectorOpIn, + Values: []string{"bar", "value2"}, + }, + }, + }, + }, + }, + }, + PodAffinity: &v1.PodAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: []v1.PodAffinityTerm{ + { + LabelSelector: &metav1.LabelSelector{ + MatchExpressions: []metav1.LabelSelectorRequirement{ + { + Key: "security", + Operator: metav1.LabelSelectorOpDoesNotExist, + Values: []string{"securityscan"}, + }, + }, + }, + TopologyKey: "topologyKey1", + }, + }, + }, + PodAntiAffinity: &v1.PodAntiAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: []v1.PodAffinityTerm{ + { + LabelSelector: &metav1.LabelSelector{ + MatchExpressions: []metav1.LabelSelectorRequirement{ + { + Key: "service", + Operator: metav1.LabelSelectorOpIn, + Values: []string{"S1", "value2"}, + }, + }, + }, + TopologyKey: "topologyKey2", + Namespaces: []string{"ns1"}, + }, + }, + }, + } + + nodeAffinityAnnotation := map[string]string{ + v1.AffinityAnnotationKey: ` + {"nodeAffinity": {"preferredDuringSchedulingIgnoredDuringExecution": [ + { + "weight": 2, + "preference": {"matchExpressions": [ + { + "key": "foo", + "operator": "In", "values": ["bar"] + } + ]} + } + ]}}`, + } + + testCases := []struct { + pod *v1.Pod + expected *v1.Affinity + annotationsEnabled bool + }{ + { + // affinity is set in both PodSpec and annotations; take from PodSpec. + pod: &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: nodeAffinityAnnotation, + }, + Spec: v1.PodSpec{ + Affinity: baseAffinity, + }, + }, + expected: baseAffinity, + annotationsEnabled: true, + }, + { + // affinity is only set in annotation; take from annotation. + pod: &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: nodeAffinityAnnotation, + }, + }, + expected: &v1.Affinity{ + NodeAffinity: &v1.NodeAffinity{ + PreferredDuringSchedulingIgnoredDuringExecution: []v1.PreferredSchedulingTerm{ + { + Weight: 2, + Preference: v1.NodeSelectorTerm{ + MatchExpressions: []v1.NodeSelectorRequirement{ + { + Key: "foo", + Operator: v1.NodeSelectorOpIn, + Values: []string{"bar"}, + }, + }, + }, + }, + }, + }, + }, + annotationsEnabled: true, + }, + } + + for i, tc := range testCases { + utilfeature.DefaultFeatureGate.Set(fmt.Sprintf("AffinityInAnnotations=%t", tc.annotationsEnabled)) + affinity := ReconcileAffinity(tc.pod) + if !reflect.DeepEqual(affinity, tc.expected) { + t.Errorf("[%v] Did not get expected affinity:\n\n%v\n\n. got:\n\n %v", i, tc.expected, affinity) + } + } +} diff --git a/staging/src/k8s.io/apiserver/pkg/util/feature/feature_gate.go b/staging/src/k8s.io/apiserver/pkg/util/feature/feature_gate.go index c060766c0a4..ebb81425d6d 100644 --- a/staging/src/k8s.io/apiserver/pkg/util/feature/feature_gate.go +++ b/staging/src/k8s.io/apiserver/pkg/util/feature/feature_gate.go @@ -107,6 +107,11 @@ type FeatureGate interface { // owner: @pweil- // alpha: v1.5 ExperimentalHostUserNamespaceDefaulting() bool + + // owner: @davidopp + // alpha: v1.6 + // TODO: remove when alpha support for affinity is removed + AffinityInAnnotations() bool } // featureGate implements FeatureGate as well as pflag.Value for flag parsing.