From 53ab8fd86d7a97aaba78c9e524e5f73136b10b79 Mon Sep 17 00:00:00 2001 From: Aleksandra Malinowska Date: Tue, 20 Jun 2017 14:23:38 +0200 Subject: [PATCH] Create respecting pod anti affinity test --- .../autoscaling/cluster_size_autoscaling.go | 64 ++++++++++++++++++- test/utils/runners.go | 2 + 2 files changed, 63 insertions(+), 3 deletions(-) diff --git a/test/e2e/autoscaling/cluster_size_autoscaling.go b/test/e2e/autoscaling/cluster_size_autoscaling.go index c240a76c2f6..ec1f53bc988 100644 --- a/test/e2e/autoscaling/cluster_size_autoscaling.go +++ b/test/e2e/autoscaling/cluster_size_autoscaling.go @@ -145,7 +145,7 @@ var _ = framework.KubeDescribe("Cluster size autoscaling [Slow]", func() { } } Expect(eventFound).Should(Equal(true)) - // Verify, that cluster size is not changed. + // Verify that cluster size is not changed framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet, func(size int) bool { return size <= nodeCount }, time.Second)) }) @@ -154,7 +154,7 @@ var _ = framework.KubeDescribe("Cluster size autoscaling [Slow]", func() { ReserveMemory(f, "memory-reservation", 100, nodeCount*memCapacityMb, false, 1*time.Second) defer framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.Namespace.Name, "memory-reservation") - // Verify, that cluster size is increased + // Verify that cluster size is increased framework.ExpectNoError(WaitForClusterSizeFuncWithUnready(f.ClientSet, func(size int) bool { return size >= nodeCount+1 }, scaleUpTimeout, unready)) framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, c)) @@ -233,6 +233,25 @@ var _ = framework.KubeDescribe("Cluster size autoscaling [Slow]", func() { framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, c)) }) + It("should increase cluster size if pods are pending due to pod anti-affinity [Feature:ClusterSizeAutoscalingAntiAffinityScaleUp]", func() { + pods := nodeCount + newPods := 2 + labels := map[string]string{ + "anti-affinity": "yes", + } + By("starting a pod with anti-affinity on each node") + framework.ExpectNoError(runAntiAffinityPods(f, f.Namespace.Name, pods, "some-pod", labels, labels)) + defer framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.Namespace.Name, "some-pod") + framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, c)) + + By("scheduling extra pods with anti-affinity to existing ones") + framework.ExpectNoError(runAntiAffinityPods(f, f.Namespace.Name, newPods, "extra-pod", labels, labels)) + defer framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.Namespace.Name, "extra-pod") + + framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, c)) + framework.ExpectNoError(framework.WaitForClusterSize(c, nodeCount+newPods, scaleUpTimeout)) + }) + It("should add node to the particular mig [Feature:ClusterSizeAutoscalingScaleUp]", func() { labelKey := "cluster-autoscaling-test.special-node" labelValue := "true" @@ -873,7 +892,46 @@ func makeNodeSchedulable(c clientset.Interface, node *v1.Node) error { return fmt.Errorf("Failed to remove taint from node in allowed number of retries") } -// Creat an RC running a given number of pods on each node without adding any constraint forcing +// Create an RC running a given number of pods with anti-affinity +func runAntiAffinityPods(f *framework.Framework, namespace string, pods int, id string, podLabels, antiAffinityLabels map[string]string) error { + config := &testutils.RCConfig{ + Affinity: buildAntiAffinity(antiAffinityLabels), + Client: f.ClientSet, + InternalClient: f.InternalClientset, + Name: id, + Namespace: namespace, + Timeout: scaleUpTimeout, + Image: framework.GetPauseImageName(f.ClientSet), + Replicas: pods, + Labels: podLabels, + } + err := framework.RunRC(*config) + if err != nil { + return err + } + _, err = f.ClientSet.Core().ReplicationControllers(namespace).Get(id, metav1.GetOptions{}) + if err != nil { + return err + } + return nil +} + +func buildAntiAffinity(labels map[string]string) *v1.Affinity { + return &v1.Affinity{ + PodAntiAffinity: &v1.PodAntiAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: []v1.PodAffinityTerm{ + { + LabelSelector: &metav1.LabelSelector{ + MatchLabels: labels, + }, + TopologyKey: "kubernetes.io/hostname", + }, + }, + }, + } +} + +// Create an RC running a given number of pods on each node without adding any constraint forcing // such pod distribution. This is meant to create a bunch of underutilized (but not unused) nodes // with pods that can be rescheduled on different nodes. // This is achieved using the following method: diff --git a/test/utils/runners.go b/test/utils/runners.go index ae3e5990aea..1baa72eac32 100644 --- a/test/utils/runners.go +++ b/test/utils/runners.go @@ -111,6 +111,7 @@ type RunObjectConfig interface { } type RCConfig struct { + Affinity *v1.Affinity Client clientset.Interface InternalClient internalclientset.Interface Image string @@ -519,6 +520,7 @@ func (config *RCConfig) create() error { Labels: map[string]string{"name": config.Name}, }, Spec: v1.PodSpec{ + Affinity: config.Affinity, Containers: []v1.Container{ { Name: config.Name,