Enabled Autoscaling test that uses mem utilization metric

This commit is contained in:
Piotr Szczesniak 2015-08-19 15:05:13 +02:00
parent c69dff8b69
commit 265d3da701

View File

@ -34,6 +34,7 @@ var _ = Describe("Autoscaling", func() {
f := NewFramework("autoscaling") f := NewFramework("autoscaling")
var nodeCount int var nodeCount int
var coresPerNode int var coresPerNode int
var memCapacityMb int
BeforeEach(func() { BeforeEach(func() {
SkipUnlessProviderIs("gce") SkipUnlessProviderIs("gce")
@ -42,8 +43,10 @@ var _ = Describe("Autoscaling", func() {
expectNoError(err) expectNoError(err)
nodeCount = len(nodes.Items) nodeCount = len(nodes.Items)
Expect(nodeCount).NotTo(BeZero()) Expect(nodeCount).NotTo(BeZero())
res := nodes.Items[0].Status.Capacity[api.ResourceCPU] cpu := nodes.Items[0].Status.Capacity[api.ResourceCPU]
coresPerNode = int((&res).MilliValue() / 1000) mem := nodes.Items[0].Status.Capacity[api.ResourceMemory]
coresPerNode = int((&cpu).MilliValue() / 1000)
memCapacityMb = int((&mem).Value() / 1024 / 1024)
}) })
AfterEach(func() { AfterEach(func() {
@ -70,14 +73,16 @@ var _ = Describe("Autoscaling", func() {
expectNoError(waitForClusterSize(f.Client, 1, 20*time.Minute)) expectNoError(waitForClusterSize(f.Client, 1, 20*time.Minute))
}) })
It("[Skipped] should scale cluster size based on memory utilization", func() { It("[Autoscaling] should scale cluster size based on memory utilization", func() {
setUpAutoscaler("memory/node_utilization", 0.5, 1, 10) setUpAutoscaler("memory/node_utilization", 0.5, nodeCount, nodeCount+1)
ConsumeMemory(f, "memory-utilization", 2) // Consume 60% of total memory capacity in 256MB chunks.
expectNoError(waitForClusterSize(f.Client, 2, 20*time.Minute)) chunks := memCapacityMb * nodeCount * 6 / 10 / 256
ConsumeMemory(f, "memory-utilization", chunks)
expectNoError(waitForClusterSize(f.Client, nodeCount+1, 20*time.Minute))
StopConsuming(f, "memory-utilization") StopConsuming(f, "memory-utilization")
expectNoError(waitForClusterSize(f.Client, 1, 20*time.Minute)) expectNoError(waitForClusterSize(f.Client, nodeCount, 20*time.Minute))
}) })
It("[Skipped] should scale cluster size based on memory reservation", func() { It("[Skipped] should scale cluster size based on memory reservation", func() {
@ -152,8 +157,10 @@ func ConsumeCpu(f *Framework, id string, cores int) {
expectNoError(RunRC(*config)) expectNoError(RunRC(*config))
} }
func ConsumeMemory(f *Framework, id string, gigabytes int) { // Consume <chunks> chunks of size 256MB.
By(fmt.Sprintf("Running RC which consumes %v GB of memory", gigabytes)) func ConsumeMemory(f *Framework, id string, chunks int) {
CreateService(f, id)
By(fmt.Sprintf("Running RC which consumes %v MB of memory in 256MB chunks", chunks*256))
config := &RCConfig{ config := &RCConfig{
Client: f.Client, Client: f.Client,
Name: id, Name: id,
@ -161,7 +168,7 @@ func ConsumeMemory(f *Framework, id string, gigabytes int) {
Timeout: 10 * time.Minute, Timeout: 10 * time.Minute,
Image: "jess/stress", Image: "jess/stress",
Command: []string{"stress", "-m", "1"}, Command: []string{"stress", "-m", "1"},
Replicas: 4 * gigabytes, Replicas: chunks,
} }
expectNoError(RunRC(*config)) expectNoError(RunRC(*config))
} }