Merge pull request #12783 from piosz/autoscaling_e2e

Enabled Autoscaling e2e test for Jenkins flaky job
This commit is contained in:
Marek Grabowski 2015-08-17 11:23:28 +02:00
commit d310eeaca9
3 changed files with 26 additions and 10 deletions

View File

@ -96,6 +96,7 @@ GCE_DEFAULT_SKIP_TESTS=(
# The following tests are known to be flaky, and are thus run only in their own
# -flaky- build variants.
GCE_FLAKY_TESTS=(
"Autoscaling"
"ResourceUsage"
)
@ -186,6 +187,8 @@ case ${JOB_NAME} in
)"}
: ${KUBE_GCE_INSTANCE_PREFIX:="e2e-flaky"}
: ${PROJECT:="k8s-jkns-e2e-gce-flaky"}
# Override GCE default for cluster size autoscaling purposes.
ENABLE_CLUSTER_MONITORING="googleinfluxdb"
;;
# Runs all non-flaky tests on GCE in parallel.

View File

@ -60,7 +60,7 @@ var _ = Describe("Autoscaling", func() {
expectNoError(waitForClusterSize(f.Client, nodeCount))
})
It("[Skipped] [Autoscaling] should scale cluster size based on cpu reservation", func() {
It("[Skipped] should scale cluster size based on cpu reservation", func() {
setUpAutoscaler("cpu/node_reservation", 0.7, 1, 10)
ReserveCpu(f, "cpu-reservation", 800)
@ -70,7 +70,7 @@ var _ = Describe("Autoscaling", func() {
expectNoError(waitForClusterSize(f.Client, 1))
})
It("[Skipped] [Autoscaling] should scale cluster size based on memory utilization", func() {
It("[Skipped] should scale cluster size based on memory utilization", func() {
setUpAutoscaler("memory/node_utilization", 0.5, 1, 10)
ConsumeMemory(f, "memory-utilization", 2)
@ -80,7 +80,7 @@ var _ = Describe("Autoscaling", func() {
expectNoError(waitForClusterSize(f.Client, 1))
})
It("[Skipped] [Autoscaling] should scale cluster size based on memory reservation", func() {
It("[Skipped] should scale cluster size based on memory reservation", func() {
setUpAutoscaler("memory/node_reservation", 0.5, 1, 10)
ReserveMemory(f, "memory-reservation", 2)
@ -138,13 +138,15 @@ func ConsumeCpu(f *Framework, id string, cores int) {
CreateService(f, id)
By(fmt.Sprintf("Running RC which consumes %v cores", cores))
config := &RCConfig{
Client: f.Client,
Name: id,
Namespace: f.Namespace.Name,
Timeout: 10 * time.Minute,
Image: "jess/stress",
Command: []string{"stress", "-c", "1"},
Replicas: cores,
Client: f.Client,
Name: id,
Namespace: f.Namespace.Name,
Timeout: 10 * time.Minute,
Image: "jess/stress",
Command: []string{"stress", "-c", "1"},
Replicas: cores,
CpuRequest: 500,
CpuLimit: 1000,
}
expectNoError(RunRC(*config))
}

View File

@ -178,7 +178,9 @@ type RCConfig struct {
Timeout time.Duration
PodStatusFile *os.File
Replicas int
CpuRequest int64 // millicores
CpuLimit int64 // millicores
MemRequest int64 // bytes
MemLimit int64 // bytes
// Env vars, set the same for every pod.
@ -1201,6 +1203,15 @@ func RunRC(config RCConfig) error {
if config.MemLimit > 0 {
rc.Spec.Template.Spec.Containers[0].Resources.Limits[api.ResourceMemory] = *resource.NewQuantity(config.MemLimit, resource.DecimalSI)
}
if config.CpuRequest > 0 || config.MemRequest > 0 {
rc.Spec.Template.Spec.Containers[0].Resources.Requests = api.ResourceList{}
}
if config.CpuRequest > 0 {
rc.Spec.Template.Spec.Containers[0].Resources.Requests[api.ResourceCPU] = *resource.NewMilliQuantity(config.CpuRequest, resource.DecimalSI)
}
if config.MemRequest > 0 {
rc.Spec.Template.Spec.Containers[0].Resources.Requests[api.ResourceMemory] = *resource.NewQuantity(config.MemRequest, resource.DecimalSI)
}
_, err := config.Client.ReplicationControllers(config.Namespace).Create(rc)
if err != nil {