Use Resource Consumer for tests in autoscaling.go

This commit is contained in:
Ewa Socala 2015-09-21 16:41:07 +02:00
parent a010e91913
commit a9a52a9750

View File

@ -24,7 +24,6 @@ import (
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/fields"
"k8s.io/kubernetes/pkg/labels"
"k8s.io/kubernetes/pkg/util"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
@ -54,12 +53,14 @@ var _ = Describe("Autoscaling", func() {
})
It("[Skipped][Autoscaling Suite] should scale cluster size based on cpu utilization", func() {
setUpAutoscaler("cpu/node_utilization", 0.7, nodeCount, nodeCount+1)
setUpAutoscaler("cpu/node_utilization", 0.5, nodeCount, nodeCount+1)
ConsumeCpu(f, "cpu-utilization", nodeCount*coresPerNode)
// Consume 60% CPU
millicoresPerReplica := 600
rc := NewStaticResourceConsumer("rc", nodeCount*coresPerNode, millicoresPerReplica*nodeCount*coresPerNode, 0, int64(millicoresPerReplica), 100, f)
expectNoError(waitForClusterSize(f.Client, nodeCount+1, 20*time.Minute))
StopConsuming(f, "cpu-utilization")
rc.CleanUp()
expectNoError(waitForClusterSize(f.Client, nodeCount, 20*time.Minute))
})
@ -76,12 +77,12 @@ var _ = Describe("Autoscaling", func() {
It("[Skipped][Autoscaling Suite] should scale cluster size based on memory utilization", func() {
setUpAutoscaler("memory/node_utilization", 0.5, nodeCount, nodeCount+1)
// Consume 60% of total memory capacity in 256MB chunks.
chunks := memCapacityMb * nodeCount * 6 / 10 / 256
ConsumeMemory(f, "memory-utilization", chunks)
// Consume 60% of total memory capacity
megabytesPerReplica := int(memCapacityMb * 6 / 10 / coresPerNode)
rc := NewStaticResourceConsumer("rc", nodeCount*coresPerNode, 0, megabytesPerReplica*nodeCount*coresPerNode, 100, int64(megabytesPerReplica+100), f)
expectNoError(waitForClusterSize(f.Client, nodeCount+1, 20*time.Minute))
StopConsuming(f, "memory-utilization")
rc.CleanUp()
expectNoError(waitForClusterSize(f.Client, nodeCount, 20*time.Minute))
})
@ -120,59 +121,6 @@ func cleanUpAutoscaler() {
expectNoError(err, "Output: "+string(out))
}
func CreateService(f *Framework, name string) {
By("Running sevice" + name)
service := &api.Service{
ObjectMeta: api.ObjectMeta{
Name: name,
},
Spec: api.ServiceSpec{
Selector: map[string]string{
"name": name,
},
Ports: []api.ServicePort{{
Port: 8080,
TargetPort: util.NewIntOrStringFromInt(8080),
}},
},
}
_, err := f.Client.Services(f.Namespace.Name).Create(service)
Expect(err).NotTo(HaveOccurred())
}
func ConsumeCpu(f *Framework, id string, cores int) {
CreateService(f, id)
By(fmt.Sprintf("Running RC which consumes %v cores", cores))
config := &RCConfig{
Client: f.Client,
Name: id,
Namespace: f.Namespace.Name,
Timeout: 10 * time.Minute,
Image: "jess/stress",
Command: []string{"stress", "-c", "1"},
Replicas: cores,
CpuRequest: 500,
CpuLimit: 1000,
}
expectNoError(RunRC(*config))
}
// Consume <chunks> chunks of size 256MB.
func ConsumeMemory(f *Framework, id string, chunks int) {
CreateService(f, id)
By(fmt.Sprintf("Running RC which consumes %v MB of memory in 256MB chunks", chunks*256))
config := &RCConfig{
Client: f.Client,
Name: id,
Namespace: f.Namespace.Name,
Timeout: 10 * time.Minute,
Image: "jess/stress",
Command: []string{"stress", "-m", "1", "--vm-hang", "0"},
Replicas: chunks,
}
expectNoError(RunRC(*config))
}
func ReserveCpu(f *Framework, id string, millicores int) {
By(fmt.Sprintf("Running RC which reserves %v millicores", millicores))
config := &RCConfig{