diff --git a/cluster/gce/config-test.sh b/cluster/gce/config-test.sh index dfaede9aeca..b9b4895ca3c 100755 --- a/cluster/gce/config-test.sh +++ b/cluster/gce/config-test.sh @@ -53,9 +53,11 @@ POLL_SLEEP_INTERVAL=3 SERVICE_CLUSTER_IP_RANGE="10.0.0.0/16" # formerly PORTAL_NET # Optional: Cluster monitoring to setup as part of the cluster bring up: -# none - No cluster monitoring setup -# influxdb - Heapster, InfluxDB, and Grafana -# google - Heapster, Google Cloud Monitoring, and Google Cloud Logging +# none - No cluster monitoring setup +# influxdb - Heapster, InfluxDB, and Grafana +# google - Heapster, Google Cloud Monitoring, and Google Cloud Logging +# googleinfluxdb - Enable influxdb and google (except GCM) +# standalone - Heapster only. Metrics available via Heapster REST API. ENABLE_CLUSTER_MONITORING="${KUBE_ENABLE_CLUSTER_MONITORING:-influxdb}" TEST_CLUSTER_LOG_LEVEL="${TEST_CLUSTER_LOG_LEVEL:---v=4}" diff --git a/cmd/kube-controller-manager/app/controllermanager.go b/cmd/kube-controller-manager/app/controllermanager.go index 77c6fadfc65..228647b8ac2 100644 --- a/cmd/kube-controller-manager/app/controllermanager.go +++ b/cmd/kube-controller-manager/app/controllermanager.go @@ -112,7 +112,7 @@ func NewCMServer() *CMServer { ResourceQuotaSyncPeriod: 10 * time.Second, NamespaceSyncPeriod: 5 * time.Minute, PVClaimBinderSyncPeriod: 10 * time.Second, - HorizontalPodAutoscalerSyncPeriod: 1 * time.Minute, + HorizontalPodAutoscalerSyncPeriod: 30 * time.Second, DeploymentControllerSyncPeriod: 1 * time.Minute, RegisterRetryCount: 10, PodEvictionTimeout: 5 * time.Minute, diff --git a/hack/jenkins/e2e.sh b/hack/jenkins/e2e.sh index 85681256e59..2c5015a8a07 100755 --- a/hack/jenkins/e2e.sh +++ b/hack/jenkins/e2e.sh @@ -218,6 +218,7 @@ case ${JOB_NAME} in : ${PROJECT:="k8s-jnks-e2e-gce-autoscaling"} # Override GCE default for cluster size autoscaling purposes. ENABLE_CLUSTER_MONITORING="googleinfluxdb" + ENABLE_HORIZONTAL_POD_AUTOSCALER="true" ;; # Runs the flaky tests on GCE, sequentially. @@ -522,6 +523,7 @@ fi # Shared cluster variables export E2E_MIN_STARTUP_PODS=${E2E_MIN_STARTUP_PODS:-} export KUBE_ENABLE_CLUSTER_MONITORING=${ENABLE_CLUSTER_MONITORING:-} +export KUBE_ENABLE_HORIZONTAL_POD_AUTOSCALER=${ENABLE_HORIZONTAL_POD_AUTOSCALER:-} export MASTER_SIZE=${MASTER_SIZE:-} export MINION_SIZE=${MINION_SIZE:-} export NUM_MINIONS=${NUM_MINIONS:-} diff --git a/pkg/controller/podautoscaler/horizontal.go b/pkg/controller/podautoscaler/horizontal.go index 8ab373eafff..ff5e5268cb6 100644 --- a/pkg/controller/podautoscaler/horizontal.go +++ b/pkg/controller/podautoscaler/horizontal.go @@ -34,9 +34,6 @@ import ( ) const ( - heapsterNamespace = "kube-system" - heapsterService = "monitoring-heapster" - // Usage shoud exceed the tolerance before we start downscale or upscale the pods. // TODO: make it a flag or HPA spec element. tolerance = 0.1 @@ -48,8 +45,8 @@ type HorizontalController struct { eventRecorder record.EventRecorder } -var downscaleForbiddenWindow, _ = time.ParseDuration("20m") -var upscaleForbiddenWindow, _ = time.ParseDuration("3m") +var downscaleForbiddenWindow = 5 * time.Minute +var upscaleForbiddenWindow = 3 * time.Minute func NewHorizontalController(client client.Interface, metricsClient metrics.MetricsClient) *HorizontalController { broadcaster := record.NewBroadcaster() diff --git a/pkg/controller/podautoscaler/metrics/metrics_client.go b/pkg/controller/podautoscaler/metrics/metrics_client.go index 5d2f4309f15..1460a049429 100644 --- a/pkg/controller/podautoscaler/metrics/metrics_client.go +++ b/pkg/controller/podautoscaler/metrics/metrics_client.go @@ -35,10 +35,10 @@ import ( const ( heapsterNamespace = "kube-system" - heapsterService = "monitoring-heapster" + heapsterService = "heapster" ) -var heapsterQueryStart, _ = time.ParseDuration("-5m") +var heapsterQueryStart = -5 * time.Minute // An interface for getting metrics for pods. type MetricsClient interface { diff --git a/test/e2e/autoscaling_utils.go b/test/e2e/autoscaling_utils.go index 0c7fb01a40b..9a05d104097 100644 --- a/test/e2e/autoscaling_utils.go +++ b/test/e2e/autoscaling_utils.go @@ -227,6 +227,8 @@ func (rc *ResourceConsumer) WaitForReplicas(desiredReplicas int) { func (rc *ResourceConsumer) CleanUp() { rc.stopCPU <- 0 rc.stopMem <- 0 + // Wait some time to ensure all child goroutines are finished. + time.Sleep(10 * time.Second) expectNoError(DeleteRC(rc.framework.Client, rc.framework.Namespace.Name, rc.name)) expectNoError(rc.framework.Client.Services(rc.framework.Namespace.Name).Delete(rc.name)) } diff --git a/test/e2e/horizontal_pod_autoscaling.go b/test/e2e/horizontal_pod_autoscaling.go index 6eae095e302..4bbdb08967a 100644 --- a/test/e2e/horizontal_pod_autoscaling.go +++ b/test/e2e/horizontal_pod_autoscaling.go @@ -33,15 +33,8 @@ var _ = Describe("Horizontal pod autoscaling", func() { var rc *ResourceConsumer f := NewFramework("horizontal-pod-autoscaling") - BeforeEach(func() { - Skip("Skipped Horizontal pod autoscaling test") - }) - - AfterEach(func() { - }) - // CPU tests - It("[Skipped][Horizontal pod autoscaling Suite] should scale from 1 pod to 3 pods (scale resource: CPU)", func() { + It("[Skipped][Autoscaling suite] should scale from 1 pod to 3 pods (scale resource: CPU)", func() { rc = NewDynamicResourceConsumer("rc", 1, 700, 0, 800, 100, f) createCPUHorizontalPodAutoscaler(rc, "0.3") rc.WaitForReplicas(3) @@ -72,7 +65,7 @@ var _ = Describe("Horizontal pod autoscaling", func() { }) It("[Skipped][Horizontal pod autoscaling Suite] should scale from 1 pod to 3 pods and from 3 to 5 (scale resource: CPU)", func() { - rc = NewDynamicResourceConsumer("rc", 1, 300, 0, 400, 100, f) + rc = NewDynamicResourceConsumer("rc", 1, 250, 0, 400, 100, f) createCPUHorizontalPodAutoscaler(rc, "0.1") rc.WaitForReplicas(3) rc.ConsumeCPU(700) @@ -146,7 +139,7 @@ var _ = Describe("Horizontal pod autoscaling", func() { rc.WaitForReplicas(3) rc.CleanUp() }) - It("[Skipped][Horizontal pod autoscaling Suite] should scale from 5 pods to 3 pods and from 3 to 1 (scale resource: Memory)", func() { + It("[Skipped][Autoscaling suite] should scale from 5 pods to 3 pods and from 3 to 1 (scale resource: Memory)", func() { rc = NewDynamicResourceConsumer("rc", 5, 0, 700, 100, 800, f) createMemoryHorizontalPodAutoscaler(rc, "300") rc.WaitForReplicas(3)