mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-08-05 10:19:50 +00:00
Deflake e2e tests of HPA
Resource consumer might use slightly more CPU than requested. That resulted in HPA sometimes increasing size of deployments during e2e tests. Deflake tests by: - Scaling up CPU requests in those tests. Resource consumer might go a fixed number of milli CPU seconds above target. Having higher requests makes the test less sensitive. - On scale down consume CPU in the middle between what would generate recommendation of expexted size and 1 pod fewer (instead of righ on edge beween expected and expected +1). Some variables were int32 but always cast to int before use. Make them int.
This commit is contained in:
parent
b7c2d923ef
commit
e2394bd3b8
@ -96,8 +96,8 @@ var _ = SIGDescribe("[HPA] Horizontal pod autoscaling (scale resource: CPU)", fu
|
|||||||
|
|
||||||
// HPAScaleTest struct is used by the scale(...) function.
|
// HPAScaleTest struct is used by the scale(...) function.
|
||||||
type HPAScaleTest struct {
|
type HPAScaleTest struct {
|
||||||
initPods int32
|
initPods int
|
||||||
totalInitialCPUUsage int32
|
totalInitialCPUUsage int
|
||||||
perPodCPURequest int64
|
perPodCPURequest int64
|
||||||
targetCPUUtilizationPercent int32
|
targetCPUUtilizationPercent int32
|
||||||
minPods int32
|
minPods int32
|
||||||
@ -116,7 +116,7 @@ type HPAScaleTest struct {
|
|||||||
// TODO The use of 3 states is arbitrary, we could eventually make this test handle "n" states once this test stabilizes.
|
// TODO The use of 3 states is arbitrary, we could eventually make this test handle "n" states once this test stabilizes.
|
||||||
func (scaleTest *HPAScaleTest) run(name string, kind schema.GroupVersionKind, rc *common.ResourceConsumer, f *framework.Framework) {
|
func (scaleTest *HPAScaleTest) run(name string, kind schema.GroupVersionKind, rc *common.ResourceConsumer, f *framework.Framework) {
|
||||||
const timeToWait = 15 * time.Minute
|
const timeToWait = 15 * time.Minute
|
||||||
rc = common.NewDynamicResourceConsumer(name, f.Namespace.Name, kind, int(scaleTest.initPods), int(scaleTest.totalInitialCPUUsage), 0, 0, scaleTest.perPodCPURequest, 200, f.ClientSet, f.InternalClientset, f.ScalesGetter)
|
rc = common.NewDynamicResourceConsumer(name, f.Namespace.Name, kind, scaleTest.initPods, scaleTest.totalInitialCPUUsage, 0, 0, scaleTest.perPodCPURequest, 200, f.ClientSet, f.InternalClientset, f.ScalesGetter)
|
||||||
defer rc.CleanUp()
|
defer rc.CleanUp()
|
||||||
hpa := common.CreateCPUHorizontalPodAutoscaler(rc, scaleTest.targetCPUUtilizationPercent, scaleTest.minPods, scaleTest.maxPods)
|
hpa := common.CreateCPUHorizontalPodAutoscaler(rc, scaleTest.targetCPUUtilizationPercent, scaleTest.minPods, scaleTest.maxPods)
|
||||||
defer common.DeleteHorizontalPodAutoscaler(rc, hpa.Name)
|
defer common.DeleteHorizontalPodAutoscaler(rc, hpa.Name)
|
||||||
@ -137,14 +137,14 @@ func scaleUp(name string, kind schema.GroupVersionKind, checkStability bool, rc
|
|||||||
}
|
}
|
||||||
scaleTest := &HPAScaleTest{
|
scaleTest := &HPAScaleTest{
|
||||||
initPods: 1,
|
initPods: 1,
|
||||||
totalInitialCPUUsage: 250,
|
totalInitialCPUUsage: 500,
|
||||||
perPodCPURequest: 500,
|
perPodCPURequest: 1000,
|
||||||
targetCPUUtilizationPercent: 20,
|
targetCPUUtilizationPercent: 20,
|
||||||
minPods: 1,
|
minPods: 1,
|
||||||
maxPods: 5,
|
maxPods: 5,
|
||||||
firstScale: 3,
|
firstScale: 3,
|
||||||
firstScaleStasis: stasis,
|
firstScaleStasis: stasis,
|
||||||
cpuBurst: 700,
|
cpuBurst: 1400,
|
||||||
secondScale: 5,
|
secondScale: 5,
|
||||||
}
|
}
|
||||||
scaleTest.run(name, kind, rc, f)
|
scaleTest.run(name, kind, rc, f)
|
||||||
@ -157,8 +157,8 @@ func scaleDown(name string, kind schema.GroupVersionKind, checkStability bool, r
|
|||||||
}
|
}
|
||||||
scaleTest := &HPAScaleTest{
|
scaleTest := &HPAScaleTest{
|
||||||
initPods: 5,
|
initPods: 5,
|
||||||
totalInitialCPUUsage: 375,
|
totalInitialCPUUsage: 650,
|
||||||
perPodCPURequest: 500,
|
perPodCPURequest: 1000,
|
||||||
targetCPUUtilizationPercent: 30,
|
targetCPUUtilizationPercent: 30,
|
||||||
minPods: 1,
|
minPods: 1,
|
||||||
maxPods: 5,
|
maxPods: 5,
|
||||||
|
Loading…
Reference in New Issue
Block a user