mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-23 11:50:44 +00:00
Made WaitForReplicas take a "how long to wait" parameter instead of it being hardcoded.
This commit is contained in:
parent
1b0f981f82
commit
38f175f115
@ -114,17 +114,18 @@ type HPAScaleTest struct {
|
||||
// The second state change (optional) is due to the CPU burst parameter, which HPA again responds to.
|
||||
// TODO The use of 3 states is arbitrary, we could eventually make this test handle "n" states once this test stabilizes.
|
||||
func (scaleTest *HPAScaleTest) run(name, kind string, rc *common.ResourceConsumer, f *framework.Framework) {
|
||||
const timeToWait = 15 * time.Minute
|
||||
rc = common.NewDynamicResourceConsumer(name, kind, int(scaleTest.initPods), int(scaleTest.totalInitialCPUUsage), 0, 0, scaleTest.perPodCPURequest, 200, f)
|
||||
defer rc.CleanUp()
|
||||
hpa := common.CreateCPUHorizontalPodAutoscaler(rc, scaleTest.targetCPUUtilizationPercent, scaleTest.minPods, scaleTest.maxPods)
|
||||
defer common.DeleteHorizontalPodAutoscaler(rc, hpa.Name)
|
||||
rc.WaitForReplicas(int(scaleTest.firstScale))
|
||||
rc.WaitForReplicas(int(scaleTest.firstScale), timeToWait)
|
||||
if scaleTest.firstScaleStasis > 0 {
|
||||
rc.EnsureDesiredReplicas(int(scaleTest.firstScale), scaleTest.firstScaleStasis)
|
||||
}
|
||||
if scaleTest.cpuBurst > 0 && scaleTest.secondScale > 0 {
|
||||
rc.ConsumeCPU(scaleTest.cpuBurst)
|
||||
rc.WaitForReplicas(int(scaleTest.secondScale))
|
||||
rc.WaitForReplicas(int(scaleTest.secondScale), timeToWait)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -342,8 +342,7 @@ func (rc *ResourceConsumer) GetReplicas() int {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (rc *ResourceConsumer) WaitForReplicas(desiredReplicas int) {
|
||||
duration := 15 * time.Minute
|
||||
func (rc *ResourceConsumer) WaitForReplicas(desiredReplicas int, duration time.Duration) {
|
||||
interval := 20 * time.Second
|
||||
err := wait.PollImmediate(interval, duration, func() (bool, error) {
|
||||
replicas := rc.GetReplicas()
|
||||
|
@ -89,7 +89,7 @@ func testStackdriverMonitoring(f *framework.Framework, pods, allPodsCPU int, per
|
||||
rc := common.NewDynamicResourceConsumer(rcName, common.KindDeployment, pods, allPodsCPU, memoryUsed, 0, perPodCPU, memoryLimit, f)
|
||||
defer rc.CleanUp()
|
||||
|
||||
rc.WaitForReplicas(pods)
|
||||
rc.WaitForReplicas(pods, 15*time.Minute)
|
||||
|
||||
metricsMap := map[string]bool{}
|
||||
pollingFunction := checkForMetrics(projectId, gcmService, time.Now(), metricsMap, allPodsCPU, perPodCPU)
|
||||
|
@ -18,6 +18,7 @@ package upgrades
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
autoscalingv1 "k8s.io/kubernetes/pkg/apis/autoscaling/v1"
|
||||
"k8s.io/kubernetes/test/e2e/common"
|
||||
@ -72,22 +73,23 @@ func (t *HPAUpgradeTest) Teardown(f *framework.Framework) {
|
||||
}
|
||||
|
||||
func (t *HPAUpgradeTest) test() {
|
||||
const timeToWait = 15 * time.Minute
|
||||
t.rc.Resume()
|
||||
|
||||
By(fmt.Sprintf("HPA scales to 1 replica: consume 10 millicores, target per pod 100 millicores, min pods 1."))
|
||||
t.rc.ConsumeCPU(10) /* millicores */
|
||||
By(fmt.Sprintf("HPA waits for 1 replica"))
|
||||
t.rc.WaitForReplicas(1)
|
||||
t.rc.WaitForReplicas(1, timeToWait)
|
||||
|
||||
By(fmt.Sprintf("HPA scales to 3 replicas: consume 250 millicores, target per pod 100 millicores."))
|
||||
t.rc.ConsumeCPU(250) /* millicores */
|
||||
By(fmt.Sprintf("HPA waits for 3 replicas"))
|
||||
t.rc.WaitForReplicas(3)
|
||||
t.rc.WaitForReplicas(3, timeToWait)
|
||||
|
||||
By(fmt.Sprintf("HPA scales to 5 replicas: consume 700 millicores, target per pod 100 millicores, max pods 5."))
|
||||
t.rc.ConsumeCPU(700) /* millicores */
|
||||
By(fmt.Sprintf("HPA waits for 5 replicas"))
|
||||
t.rc.WaitForReplicas(5)
|
||||
t.rc.WaitForReplicas(5, timeToWait)
|
||||
|
||||
// We need to pause background goroutines as during upgrade master is unavailable and requests issued by them fail.
|
||||
t.rc.Pause()
|
||||
|
Loading…
Reference in New Issue
Block a user