mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-21 19:01:49 +00:00
E2E tests for horizontal pod autoscaler.
Fixes & tuning in horiontal pod autoscaler and its e2e tests; two of the tests added to "Autoscaling suite".
This commit is contained in:
parent
ec0e1faab4
commit
c8238c079a
@ -53,9 +53,11 @@ POLL_SLEEP_INTERVAL=3
|
||||
SERVICE_CLUSTER_IP_RANGE="10.0.0.0/16" # formerly PORTAL_NET
|
||||
|
||||
# Optional: Cluster monitoring to setup as part of the cluster bring up:
|
||||
# none - No cluster monitoring setup
|
||||
# influxdb - Heapster, InfluxDB, and Grafana
|
||||
# google - Heapster, Google Cloud Monitoring, and Google Cloud Logging
|
||||
# none - No cluster monitoring setup
|
||||
# influxdb - Heapster, InfluxDB, and Grafana
|
||||
# google - Heapster, Google Cloud Monitoring, and Google Cloud Logging
|
||||
# googleinfluxdb - Enable influxdb and google (except GCM)
|
||||
# standalone - Heapster only. Metrics available via Heapster REST API.
|
||||
ENABLE_CLUSTER_MONITORING="${KUBE_ENABLE_CLUSTER_MONITORING:-influxdb}"
|
||||
|
||||
TEST_CLUSTER_LOG_LEVEL="${TEST_CLUSTER_LOG_LEVEL:---v=4}"
|
||||
|
@ -112,7 +112,7 @@ func NewCMServer() *CMServer {
|
||||
ResourceQuotaSyncPeriod: 10 * time.Second,
|
||||
NamespaceSyncPeriod: 5 * time.Minute,
|
||||
PVClaimBinderSyncPeriod: 10 * time.Second,
|
||||
HorizontalPodAutoscalerSyncPeriod: 1 * time.Minute,
|
||||
HorizontalPodAutoscalerSyncPeriod: 30 * time.Second,
|
||||
DeploymentControllerSyncPeriod: 1 * time.Minute,
|
||||
RegisterRetryCount: 10,
|
||||
PodEvictionTimeout: 5 * time.Minute,
|
||||
|
@ -218,6 +218,7 @@ case ${JOB_NAME} in
|
||||
: ${PROJECT:="k8s-jnks-e2e-gce-autoscaling"}
|
||||
# Override GCE default for cluster size autoscaling purposes.
|
||||
ENABLE_CLUSTER_MONITORING="googleinfluxdb"
|
||||
ENABLE_HORIZONTAL_POD_AUTOSCALER="true"
|
||||
;;
|
||||
|
||||
# Runs the flaky tests on GCE, sequentially.
|
||||
@ -522,6 +523,7 @@ fi
|
||||
# Shared cluster variables
|
||||
export E2E_MIN_STARTUP_PODS=${E2E_MIN_STARTUP_PODS:-}
|
||||
export KUBE_ENABLE_CLUSTER_MONITORING=${ENABLE_CLUSTER_MONITORING:-}
|
||||
export KUBE_ENABLE_HORIZONTAL_POD_AUTOSCALER=${ENABLE_HORIZONTAL_POD_AUTOSCALER:-}
|
||||
export MASTER_SIZE=${MASTER_SIZE:-}
|
||||
export MINION_SIZE=${MINION_SIZE:-}
|
||||
export NUM_MINIONS=${NUM_MINIONS:-}
|
||||
|
@ -34,9 +34,6 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
heapsterNamespace = "kube-system"
|
||||
heapsterService = "monitoring-heapster"
|
||||
|
||||
// Usage shoud exceed the tolerance before we start downscale or upscale the pods.
|
||||
// TODO: make it a flag or HPA spec element.
|
||||
tolerance = 0.1
|
||||
@ -48,8 +45,8 @@ type HorizontalController struct {
|
||||
eventRecorder record.EventRecorder
|
||||
}
|
||||
|
||||
var downscaleForbiddenWindow, _ = time.ParseDuration("20m")
|
||||
var upscaleForbiddenWindow, _ = time.ParseDuration("3m")
|
||||
var downscaleForbiddenWindow = 5 * time.Minute
|
||||
var upscaleForbiddenWindow = 3 * time.Minute
|
||||
|
||||
func NewHorizontalController(client client.Interface, metricsClient metrics.MetricsClient) *HorizontalController {
|
||||
broadcaster := record.NewBroadcaster()
|
||||
|
@ -35,10 +35,10 @@ import (
|
||||
|
||||
const (
|
||||
heapsterNamespace = "kube-system"
|
||||
heapsterService = "monitoring-heapster"
|
||||
heapsterService = "heapster"
|
||||
)
|
||||
|
||||
var heapsterQueryStart, _ = time.ParseDuration("-5m")
|
||||
var heapsterQueryStart = -5 * time.Minute
|
||||
|
||||
// An interface for getting metrics for pods.
|
||||
type MetricsClient interface {
|
||||
|
@ -227,6 +227,8 @@ func (rc *ResourceConsumer) WaitForReplicas(desiredReplicas int) {
|
||||
func (rc *ResourceConsumer) CleanUp() {
|
||||
rc.stopCPU <- 0
|
||||
rc.stopMem <- 0
|
||||
// Wait some time to ensure all child goroutines are finished.
|
||||
time.Sleep(10 * time.Second)
|
||||
expectNoError(DeleteRC(rc.framework.Client, rc.framework.Namespace.Name, rc.name))
|
||||
expectNoError(rc.framework.Client.Services(rc.framework.Namespace.Name).Delete(rc.name))
|
||||
}
|
||||
|
@ -33,15 +33,8 @@ var _ = Describe("Horizontal pod autoscaling", func() {
|
||||
var rc *ResourceConsumer
|
||||
f := NewFramework("horizontal-pod-autoscaling")
|
||||
|
||||
BeforeEach(func() {
|
||||
Skip("Skipped Horizontal pod autoscaling test")
|
||||
})
|
||||
|
||||
AfterEach(func() {
|
||||
})
|
||||
|
||||
// CPU tests
|
||||
It("[Skipped][Horizontal pod autoscaling Suite] should scale from 1 pod to 3 pods (scale resource: CPU)", func() {
|
||||
It("[Skipped][Autoscaling suite] should scale from 1 pod to 3 pods (scale resource: CPU)", func() {
|
||||
rc = NewDynamicResourceConsumer("rc", 1, 700, 0, 800, 100, f)
|
||||
createCPUHorizontalPodAutoscaler(rc, "0.3")
|
||||
rc.WaitForReplicas(3)
|
||||
@ -72,7 +65,7 @@ var _ = Describe("Horizontal pod autoscaling", func() {
|
||||
})
|
||||
|
||||
It("[Skipped][Horizontal pod autoscaling Suite] should scale from 1 pod to 3 pods and from 3 to 5 (scale resource: CPU)", func() {
|
||||
rc = NewDynamicResourceConsumer("rc", 1, 300, 0, 400, 100, f)
|
||||
rc = NewDynamicResourceConsumer("rc", 1, 250, 0, 400, 100, f)
|
||||
createCPUHorizontalPodAutoscaler(rc, "0.1")
|
||||
rc.WaitForReplicas(3)
|
||||
rc.ConsumeCPU(700)
|
||||
@ -146,7 +139,7 @@ var _ = Describe("Horizontal pod autoscaling", func() {
|
||||
rc.WaitForReplicas(3)
|
||||
rc.CleanUp()
|
||||
})
|
||||
It("[Skipped][Horizontal pod autoscaling Suite] should scale from 5 pods to 3 pods and from 3 to 1 (scale resource: Memory)", func() {
|
||||
It("[Skipped][Autoscaling suite] should scale from 5 pods to 3 pods and from 3 to 1 (scale resource: Memory)", func() {
|
||||
rc = NewDynamicResourceConsumer("rc", 5, 0, 700, 100, 800, f)
|
||||
createMemoryHorizontalPodAutoscaler(rc, "300")
|
||||
rc.WaitForReplicas(3)
|
||||
|
Loading…
Reference in New Issue
Block a user