Enabled cluster autoscaling based on cpu/mem reservation e2e tests

This commit is contained in:
Piotr Szczesniak 2015-09-25 11:41:26 +02:00
parent 6a04145362
commit 67e5aca7f3

View File

@ -65,13 +65,13 @@ var _ = Describe("Autoscaling", func() {
}) })
It("[Skipped] should scale cluster size based on cpu reservation", func() { It("[Skipped] should scale cluster size based on cpu reservation", func() {
setUpAutoscaler("cpu/node_reservation", 0.7, 1, 10) setUpAutoscaler("cpu/node_reservation", 0.5, nodeCount, nodeCount+1)
ReserveCpu(f, "cpu-reservation", 800) ReserveCpu(f, "cpu-reservation", 600*nodeCount*coresPerNode)
expectNoError(waitForClusterSize(f.Client, 2, 20*time.Minute)) expectNoError(waitForClusterSize(f.Client, nodeCount+1, 20*time.Minute))
StopConsuming(f, "cpu-reservation") expectNoError(DeleteRC(f.Client, f.Namespace.Name, "cpu-reservation"))
expectNoError(waitForClusterSize(f.Client, 1, 20*time.Minute)) expectNoError(waitForClusterSize(f.Client, nodeCount, 20*time.Minute))
}) })
It("[Skipped][Autoscaling Suite] should scale cluster size based on memory utilization", func() { It("[Skipped][Autoscaling Suite] should scale cluster size based on memory utilization", func() {
@ -87,13 +87,13 @@ var _ = Describe("Autoscaling", func() {
}) })
It("[Skipped] should scale cluster size based on memory reservation", func() { It("[Skipped] should scale cluster size based on memory reservation", func() {
setUpAutoscaler("memory/node_reservation", 0.5, 1, 10) setUpAutoscaler("memory/node_reservation", 0.5, nodeCount, nodeCount+1)
ReserveMemory(f, "memory-reservation", 2) ReserveMemory(f, "memory-reservation", nodeCount*memCapacityMb*6/10)
expectNoError(waitForClusterSize(f.Client, 2, 20*time.Minute)) expectNoError(waitForClusterSize(f.Client, nodeCount+1, 20*time.Minute))
StopConsuming(f, "memory-reservation") expectNoError(DeleteRC(f.Client, f.Namespace.Name, "memory-reservation"))
expectNoError(waitForClusterSize(f.Client, 1, 20*time.Minute)) expectNoError(waitForClusterSize(f.Client, nodeCount, 20*time.Minute))
}) })
}) })
@ -124,35 +124,27 @@ func cleanUpAutoscaler() {
func ReserveCpu(f *Framework, id string, millicores int) { func ReserveCpu(f *Framework, id string, millicores int) {
By(fmt.Sprintf("Running RC which reserves %v millicores", millicores)) By(fmt.Sprintf("Running RC which reserves %v millicores", millicores))
config := &RCConfig{ config := &RCConfig{
Client: f.Client, Client: f.Client,
Name: id, Name: id,
Namespace: f.Namespace.Name, Namespace: f.Namespace.Name,
Timeout: 10 * time.Minute, Timeout: 10 * time.Minute,
Image: "gcr.io/google_containers/pause", Image: "gcr.io/google_containers/pause",
Replicas: millicores / 100, Replicas: millicores / 100,
CpuLimit: 100, CpuRequest: 100,
} }
expectNoError(RunRC(*config)) expectNoError(RunRC(*config))
} }
func ReserveMemory(f *Framework, id string, gigabytes int) { func ReserveMemory(f *Framework, id string, megabytes int) {
By(fmt.Sprintf("Running RC which reserves %v GB of memory", gigabytes)) By(fmt.Sprintf("Running RC which reserves %v MB of memory", megabytes))
config := &RCConfig{ config := &RCConfig{
Client: f.Client, Client: f.Client,
Name: id, Name: id,
Namespace: f.Namespace.Name, Namespace: f.Namespace.Name,
Timeout: 10 * time.Minute, Timeout: 10 * time.Minute,
Image: "gcr.io/google_containers/pause", Image: "gcr.io/google_containers/pause",
Replicas: 5 * gigabytes, Replicas: megabytes / 500,
MemLimit: 200 * 1024 * 1024, MemRequest: 500 * 1024 * 1024,
} }
expectNoError(RunRC(*config)) expectNoError(RunRC(*config))
} }
func StopConsuming(f *Framework, id string) {
By("Stopping service " + id)
err := f.Client.Services(f.Namespace.Name).Delete(id)
Expect(err).NotTo(HaveOccurred())
By("Stopping RC " + id)
expectNoError(DeleteRC(f.Client, f.Namespace.Name, id))
}