From 77d9ce230c3e6b5e48887f99beed8c7a5159590e Mon Sep 17 00:00:00 2001 From: Piotr Szczesniak Date: Wed, 14 Oct 2015 20:45:32 +0200 Subject: [PATCH] Fixed flakiness in cluster size autoscaling e2e --- test/e2e/autoscaling_utils.go | 18 ++++++------ ...scaling.go => cluster_size_autoscaling.go} | 28 ++++++++++++++----- 2 files changed, 30 insertions(+), 16 deletions(-) rename test/e2e/{autoscaling.go => cluster_size_autoscaling.go} (86%) diff --git a/test/e2e/autoscaling_utils.go b/test/e2e/autoscaling_utils.go index 14a1ac78e84..cbd94c190c2 100644 --- a/test/e2e/autoscaling_utils.go +++ b/test/e2e/autoscaling_utils.go @@ -63,22 +63,22 @@ type ResourceConsumer struct { requestSizeInMegabytes int } -func NewDynamicResourceConsumer(name string, replicas, initCPU, initMemory int, cpuLimit, memLimit int64, framework *Framework) *ResourceConsumer { - return newResourceConsumer(name, replicas, initCPU, initMemory, dynamicConsumptionTimeInSeconds, dynamicRequestSizeInMillicores, dynamicRequestSizeInMegabytes, cpuLimit, memLimit, framework) +func NewDynamicResourceConsumer(name string, replicas, initCPUTotal, initMemoryTotal int, cpuLimit, memLimit int64, framework *Framework) *ResourceConsumer { + return newResourceConsumer(name, replicas, initCPUTotal, initMemoryTotal, dynamicConsumptionTimeInSeconds, dynamicRequestSizeInMillicores, dynamicRequestSizeInMegabytes, cpuLimit, memLimit, framework) } -func NewStaticResourceConsumer(name string, replicas, initCPU, initMemory int, cpuLimit, memLimit int64, framework *Framework) *ResourceConsumer { - return newResourceConsumer(name, replicas, initCPU, initMemory, staticConsumptionTimeInSeconds, initCPU/replicas, initMemory/replicas, cpuLimit, memLimit, framework) +func NewStaticResourceConsumer(name string, replicas, initCPUTotal, initMemoryTotal int, cpuLimit, memLimit int64, framework *Framework) *ResourceConsumer { + return newResourceConsumer(name, replicas, initCPUTotal, initMemoryTotal, staticConsumptionTimeInSeconds, initCPUTotal/replicas, initMemoryTotal/replicas, cpuLimit, memLimit, framework) } /* NewResourceConsumer creates new ResourceConsumer -initCPU argument is in millicores -initMemory argument is in megabytes +initCPUTotal argument is in millicores +initMemoryTotal argument is in megabytes memLimit argument is in megabytes, memLimit is a maximum amount of memory that can be consumed by a single pod cpuLimit argument is in millicores, cpuLimit is a maximum amount of cpu that can be consumed by a single pod */ -func newResourceConsumer(name string, replicas, initCPU, initMemory, consumptionTimeInSeconds, requestSizeInMillicores, requestSizeInMegabytes int, cpuLimit, memLimit int64, framework *Framework) *ResourceConsumer { +func newResourceConsumer(name string, replicas, initCPUTotal, initMemoryTotal, consumptionTimeInSeconds, requestSizeInMillicores, requestSizeInMegabytes int, cpuLimit, memLimit int64, framework *Framework) *ResourceConsumer { runServiceAndRCForResourceConsumer(framework.Client, framework.Namespace.Name, name, replicas, cpuLimit, memLimit) rc := &ResourceConsumer{ name: name, @@ -93,9 +93,9 @@ func newResourceConsumer(name string, replicas, initCPU, initMemory, consumption requestSizeInMegabytes: requestSizeInMegabytes, } go rc.makeConsumeCPURequests() - rc.ConsumeCPU(initCPU) + rc.ConsumeCPU(initCPUTotal) go rc.makeConsumeMemRequests() - rc.ConsumeMem(initMemory) + rc.ConsumeMem(initMemoryTotal) return rc } diff --git a/test/e2e/autoscaling.go b/test/e2e/cluster_size_autoscaling.go similarity index 86% rename from test/e2e/autoscaling.go rename to test/e2e/cluster_size_autoscaling.go index ba8fc94ba0a..4bad9d081d3 100644 --- a/test/e2e/autoscaling.go +++ b/test/e2e/cluster_size_autoscaling.go @@ -61,11 +61,13 @@ var _ = Describe("Autoscaling", func() { setUpAutoscaler("cpu/node_utilization", 0.4, nodeCount, nodeCount+1) // Consume 50% CPU - millicoresPerReplica := 500 - rc := NewStaticResourceConsumer("cpu-utilization", nodeCount*coresPerNode, millicoresPerReplica*nodeCount*coresPerNode, 0, int64(millicoresPerReplica), 100, f) - expectNoError(waitForClusterSize(f.Client, nodeCount+1, scaleUpTimeout)) + rcs := createConsumingRCs(f, "cpu-utilization", nodeCount*coresPerNode, 500, 0) + err := waitForClusterSize(f.Client, nodeCount+1, scaleUpTimeout) + for _, rc := range rcs { + rc.CleanUp() + } + expectNoError(err) - rc.CleanUp() expectNoError(waitForClusterSize(f.Client, nodeCount, scaleDownTimeout)) }) @@ -84,10 +86,13 @@ var _ = Describe("Autoscaling", func() { // Consume 60% of total memory capacity megabytesPerReplica := int(memCapacityMb * 6 / 10 / coresPerNode) - rc := NewStaticResourceConsumer("mem-utilization", nodeCount*coresPerNode, 0, megabytesPerReplica*nodeCount*coresPerNode, 100, int64(megabytesPerReplica+100), f) - expectNoError(waitForClusterSize(f.Client, nodeCount+1, scaleUpTimeout)) + rcs := createConsumingRCs(f, "mem-utilization", nodeCount*coresPerNode, 0, megabytesPerReplica) + err := waitForClusterSize(f.Client, nodeCount+1, scaleUpTimeout) + for _, rc := range rcs { + rc.CleanUp() + } + expectNoError(err) - rc.CleanUp() expectNoError(waitForClusterSize(f.Client, nodeCount, scaleDownTimeout)) }) @@ -116,6 +121,15 @@ func setUpAutoscaler(metric string, target float64, min, max int) { expectNoError(err, "Output: "+string(out)) } +func createConsumingRCs(f *Framework, name string, count, cpuPerReplica, memPerReplica int) []*ResourceConsumer { + var res []*ResourceConsumer + for i := 1; i <= count; i++ { + name := fmt.Sprintf("%s-%d", name, i) + res = append(res, NewStaticResourceConsumer(name, 1, cpuPerReplica, memPerReplica, int64(cpuPerReplica), int64(memPerReplica+100), f)) + } + return res +} + func cleanUpAutoscaler() { By("Removing autoscaler") out, err := exec.Command("gcloud", "compute", "instance-groups", "managed", "stop-autoscaling",