diff --git a/hack/jenkins/e2e.sh b/hack/jenkins/e2e.sh index a38db38fb0f..5e703805b95 100755 --- a/hack/jenkins/e2e.sh +++ b/hack/jenkins/e2e.sh @@ -96,6 +96,7 @@ GCE_DEFAULT_SKIP_TESTS=( # The following tests are known to be flaky, and are thus run only in their own # -flaky- build variants. GCE_FLAKY_TESTS=( + "Autoscaling" "ResourceUsage" ) @@ -186,6 +187,8 @@ case ${JOB_NAME} in )"} : ${KUBE_GCE_INSTANCE_PREFIX:="e2e-flaky"} : ${PROJECT:="k8s-jkns-e2e-gce-flaky"} + # Override GCE default for cluster size autoscaling purposes. + ENABLE_CLUSTER_MONITORING="googleinfluxdb" ;; # Runs all non-flaky tests on GCE in parallel. diff --git a/test/e2e/autoscaling.go b/test/e2e/autoscaling.go index 67e2242223f..c33db5c0fb6 100644 --- a/test/e2e/autoscaling.go +++ b/test/e2e/autoscaling.go @@ -60,7 +60,7 @@ var _ = Describe("Autoscaling", func() { expectNoError(waitForClusterSize(f.Client, nodeCount)) }) - It("[Skipped] [Autoscaling] should scale cluster size based on cpu reservation", func() { + It("[Skipped] should scale cluster size based on cpu reservation", func() { setUpAutoscaler("cpu/node_reservation", 0.7, 1, 10) ReserveCpu(f, "cpu-reservation", 800) @@ -70,7 +70,7 @@ var _ = Describe("Autoscaling", func() { expectNoError(waitForClusterSize(f.Client, 1)) }) - It("[Skipped] [Autoscaling] should scale cluster size based on memory utilization", func() { + It("[Skipped] should scale cluster size based on memory utilization", func() { setUpAutoscaler("memory/node_utilization", 0.5, 1, 10) ConsumeMemory(f, "memory-utilization", 2) @@ -80,7 +80,7 @@ var _ = Describe("Autoscaling", func() { expectNoError(waitForClusterSize(f.Client, 1)) }) - It("[Skipped] [Autoscaling] should scale cluster size based on memory reservation", func() { + It("[Skipped] should scale cluster size based on memory reservation", func() { setUpAutoscaler("memory/node_reservation", 0.5, 1, 10) ReserveMemory(f, "memory-reservation", 2) @@ -138,13 +138,15 @@ func ConsumeCpu(f *Framework, id string, cores int) { CreateService(f, id) By(fmt.Sprintf("Running RC which consumes %v cores", cores)) config := &RCConfig{ - Client: f.Client, - Name: id, - Namespace: f.Namespace.Name, - Timeout: 10 * time.Minute, - Image: "jess/stress", - Command: []string{"stress", "-c", "1"}, - Replicas: cores, + Client: f.Client, + Name: id, + Namespace: f.Namespace.Name, + Timeout: 10 * time.Minute, + Image: "jess/stress", + Command: []string{"stress", "-c", "1"}, + Replicas: cores, + CpuRequest: 500, + CpuLimit: 1000, } expectNoError(RunRC(*config)) } diff --git a/test/e2e/util.go b/test/e2e/util.go index 48867f1ebe4..af69273ce72 100644 --- a/test/e2e/util.go +++ b/test/e2e/util.go @@ -178,7 +178,9 @@ type RCConfig struct { Timeout time.Duration PodStatusFile *os.File Replicas int + CpuRequest int64 // millicores CpuLimit int64 // millicores + MemRequest int64 // bytes MemLimit int64 // bytes // Env vars, set the same for every pod. @@ -1201,6 +1203,15 @@ func RunRC(config RCConfig) error { if config.MemLimit > 0 { rc.Spec.Template.Spec.Containers[0].Resources.Limits[api.ResourceMemory] = *resource.NewQuantity(config.MemLimit, resource.DecimalSI) } + if config.CpuRequest > 0 || config.MemRequest > 0 { + rc.Spec.Template.Spec.Containers[0].Resources.Requests = api.ResourceList{} + } + if config.CpuRequest > 0 { + rc.Spec.Template.Spec.Containers[0].Resources.Requests[api.ResourceCPU] = *resource.NewMilliQuantity(config.CpuRequest, resource.DecimalSI) + } + if config.MemRequest > 0 { + rc.Spec.Template.Spec.Containers[0].Resources.Requests[api.ResourceMemory] = *resource.NewQuantity(config.MemRequest, resource.DecimalSI) + } _, err := config.Client.ReplicationControllers(config.Namespace).Create(rc) if err != nil {