Merge pull request #84510 from danielqsj/f-autoscaling

refactor autoscaling utils in e2e
This commit is contained in:
Kubernetes Prow Robot 2019-11-07 16:20:19 -08:00 committed by GitHub
commit 66219e1638
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
11 changed files with 91 additions and 41 deletions

View File

@ -38,8 +38,8 @@ go_library(
"//staging/src/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
"//test/e2e/common:go_default_library",
"//test/e2e/framework:go_default_library",
"//test/e2e/framework/autoscaling:go_default_library",
"//test/e2e/framework/network:go_default_library",
"//test/e2e/framework/node:go_default_library",
"//test/e2e/framework/pod:go_default_library",

View File

@ -22,8 +22,8 @@ import (
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/kubernetes/test/e2e/common"
"k8s.io/kubernetes/test/e2e/framework"
e2eautoscaling "k8s.io/kubernetes/test/e2e/framework/autoscaling"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
"github.com/onsi/ginkgo"
@ -94,15 +94,15 @@ var _ = SIGDescribe("[Feature:ClusterSizeAutoscalingScaleUp] [Slow] Autoscaling"
nodeMemoryMB := (&nodeMemoryBytes).Value() / 1024 / 1024
memRequestMB := nodeMemoryMB / 10 // Ensure each pod takes not more than 10% of node's allocatable memory.
replicas := 1
resourceConsumer := common.NewDynamicResourceConsumer("resource-consumer", f.Namespace.Name, common.KindDeployment, replicas, 0, 0, 0, cpuRequestMillis, memRequestMB, f.ClientSet, f.ScalesGetter)
resourceConsumer := e2eautoscaling.NewDynamicResourceConsumer("resource-consumer", f.Namespace.Name, e2eautoscaling.KindDeployment, replicas, 0, 0, 0, cpuRequestMillis, memRequestMB, f.ClientSet, f.ScalesGetter)
defer resourceConsumer.CleanUp()
resourceConsumer.WaitForReplicas(replicas, 1*time.Minute) // Should finish ~immediately, so 1 minute is more than enough.
// Enable Horizontal Pod Autoscaler with 50% target utilization and
// scale up the CPU usage to trigger autoscaling to 8 pods for target to be satisfied.
targetCPUUtilizationPercent := int32(50)
hpa := common.CreateCPUHorizontalPodAutoscaler(resourceConsumer, targetCPUUtilizationPercent, 1, 10)
defer common.DeleteHorizontalPodAutoscaler(resourceConsumer, hpa.Name)
hpa := e2eautoscaling.CreateCPUHorizontalPodAutoscaler(resourceConsumer, targetCPUUtilizationPercent, 1, 10)
defer e2eautoscaling.DeleteHorizontalPodAutoscaler(resourceConsumer, hpa.Name)
cpuLoad := 8 * cpuRequestMillis * int64(targetCPUUtilizationPercent) / 100 // 8 pods utilized to the target level
resourceConsumer.ConsumeCPU(int(cpuLoad))

View File

@ -20,8 +20,8 @@ import (
"time"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/kubernetes/test/e2e/common"
"k8s.io/kubernetes/test/e2e/framework"
e2eautoscaling "k8s.io/kubernetes/test/e2e/framework/autoscaling"
"github.com/onsi/ginkgo"
)
@ -29,7 +29,7 @@ import (
// These tests don't seem to be running properly in parallel: issue: #20338.
//
var _ = SIGDescribe("[Feature:HPA] Horizontal pod autoscaling (scale resource: CPU)", func() {
var rc *common.ResourceConsumer
var rc *e2eautoscaling.ResourceConsumer
f := framework.NewDefaultFramework("horizontal-pod-autoscaling")
titleUp := "Should scale from 1 pod to 3 pods and from 3 to 5"
@ -38,20 +38,20 @@ var _ = SIGDescribe("[Feature:HPA] Horizontal pod autoscaling (scale resource: C
SIGDescribe("[Serial] [Slow] Deployment", func() {
// CPU tests via deployments
ginkgo.It(titleUp, func() {
scaleUp("test-deployment", common.KindDeployment, false, rc, f)
scaleUp("test-deployment", e2eautoscaling.KindDeployment, false, rc, f)
})
ginkgo.It(titleDown, func() {
scaleDown("test-deployment", common.KindDeployment, false, rc, f)
scaleDown("test-deployment", e2eautoscaling.KindDeployment, false, rc, f)
})
})
SIGDescribe("[Serial] [Slow] ReplicaSet", func() {
// CPU tests via ReplicaSets
ginkgo.It(titleUp, func() {
scaleUp("rs", common.KindReplicaSet, false, rc, f)
scaleUp("rs", e2eautoscaling.KindReplicaSet, false, rc, f)
})
ginkgo.It(titleDown, func() {
scaleDown("rs", common.KindReplicaSet, false, rc, f)
scaleDown("rs", e2eautoscaling.KindReplicaSet, false, rc, f)
})
})
@ -59,10 +59,10 @@ var _ = SIGDescribe("[Feature:HPA] Horizontal pod autoscaling (scale resource: C
SIGDescribe("[Serial] [Slow] ReplicationController", func() {
// CPU tests via replication controllers
ginkgo.It(titleUp+" and verify decision stability", func() {
scaleUp("rc", common.KindRC, true, rc, f)
scaleUp("rc", e2eautoscaling.KindRC, true, rc, f)
})
ginkgo.It(titleDown+" and verify decision stability", func() {
scaleDown("rc", common.KindRC, true, rc, f)
scaleDown("rc", e2eautoscaling.KindRC, true, rc, f)
})
})
@ -77,7 +77,7 @@ var _ = SIGDescribe("[Feature:HPA] Horizontal pod autoscaling (scale resource: C
maxPods: 2,
firstScale: 2,
}
scaleTest.run("rc-light", common.KindRC, rc, f)
scaleTest.run("rc-light", e2eautoscaling.KindRC, rc, f)
})
ginkgo.It("Should scale from 2 pods to 1 pod [Slow]", func() {
scaleTest := &HPAScaleTest{
@ -89,7 +89,7 @@ var _ = SIGDescribe("[Feature:HPA] Horizontal pod autoscaling (scale resource: C
maxPods: 2,
firstScale: 1,
}
scaleTest.run("rc-light", common.KindRC, rc, f)
scaleTest.run("rc-light", e2eautoscaling.KindRC, rc, f)
})
})
})
@ -114,12 +114,12 @@ type HPAScaleTest struct {
// The first state change is due to the CPU being consumed initially, which HPA responds to by changing pod counts.
// The second state change (optional) is due to the CPU burst parameter, which HPA again responds to.
// TODO The use of 3 states is arbitrary, we could eventually make this test handle "n" states once this test stabilizes.
func (scaleTest *HPAScaleTest) run(name string, kind schema.GroupVersionKind, rc *common.ResourceConsumer, f *framework.Framework) {
func (scaleTest *HPAScaleTest) run(name string, kind schema.GroupVersionKind, rc *e2eautoscaling.ResourceConsumer, f *framework.Framework) {
const timeToWait = 15 * time.Minute
rc = common.NewDynamicResourceConsumer(name, f.Namespace.Name, kind, scaleTest.initPods, scaleTest.totalInitialCPUUsage, 0, 0, scaleTest.perPodCPURequest, 200, f.ClientSet, f.ScalesGetter)
rc = e2eautoscaling.NewDynamicResourceConsumer(name, f.Namespace.Name, kind, scaleTest.initPods, scaleTest.totalInitialCPUUsage, 0, 0, scaleTest.perPodCPURequest, 200, f.ClientSet, f.ScalesGetter)
defer rc.CleanUp()
hpa := common.CreateCPUHorizontalPodAutoscaler(rc, scaleTest.targetCPUUtilizationPercent, scaleTest.minPods, scaleTest.maxPods)
defer common.DeleteHorizontalPodAutoscaler(rc, hpa.Name)
hpa := e2eautoscaling.CreateCPUHorizontalPodAutoscaler(rc, scaleTest.targetCPUUtilizationPercent, scaleTest.minPods, scaleTest.maxPods)
defer e2eautoscaling.DeleteHorizontalPodAutoscaler(rc, hpa.Name)
rc.WaitForReplicas(scaleTest.firstScale, timeToWait)
if scaleTest.firstScaleStasis > 0 {
@ -131,7 +131,7 @@ func (scaleTest *HPAScaleTest) run(name string, kind schema.GroupVersionKind, rc
}
}
func scaleUp(name string, kind schema.GroupVersionKind, checkStability bool, rc *common.ResourceConsumer, f *framework.Framework) {
func scaleUp(name string, kind schema.GroupVersionKind, checkStability bool, rc *e2eautoscaling.ResourceConsumer, f *framework.Framework) {
stasis := 0 * time.Minute
if checkStability {
stasis = 10 * time.Minute
@ -151,7 +151,7 @@ func scaleUp(name string, kind schema.GroupVersionKind, checkStability bool, rc
scaleTest.run(name, kind, rc, f)
}
func scaleDown(name string, kind schema.GroupVersionKind, checkStability bool, rc *common.ResourceConsumer, f *framework.Framework) {
func scaleDown(name string, kind schema.GroupVersionKind, checkStability bool, rc *e2eautoscaling.ResourceConsumer, f *framework.Framework) {
stasis := 0 * time.Minute
if checkStability {
stasis = 10 * time.Minute

View File

@ -9,7 +9,6 @@ go_library(
name = "go_default_library",
srcs = [
"apparmor.go",
"autoscaling_utils.go",
"configmap.go",
"configmap_volume.go",
"container.go",
@ -46,7 +45,6 @@ go_library(
importpath = "k8s.io/kubernetes/test/e2e/common",
deps = [
"//pkg/api/v1/pod:go_default_library",
"//pkg/apis/core:go_default_library",
"//pkg/client/conditions:go_default_library",
"//pkg/kubelet:go_default_library",
"//pkg/kubelet/events:go_default_library",
@ -54,7 +52,6 @@ go_library(
"//pkg/kubelet/runtimeclass/testing:go_default_library",
"//pkg/kubelet/sysctl:go_default_library",
"//pkg/security/apparmor:go_default_library",
"//staging/src/k8s.io/api/autoscaling/v1:go_default_library",
"//staging/src/k8s.io/api/coordination/v1:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/equality:go_default_library",
@ -64,7 +61,6 @@ go_library(
"//staging/src/k8s.io/apimachinery/pkg/fields:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/diff:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
@ -74,7 +70,6 @@ go_library(
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/watch:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
"//staging/src/k8s.io/client-go/scale:go_default_library",
"//staging/src/k8s.io/client-go/tools/cache:go_default_library",
"//staging/src/k8s.io/client-go/tools/watch:go_default_library",
"//test/e2e/framework:go_default_library",
@ -82,8 +77,6 @@ go_library(
"//test/e2e/framework/network:go_default_library",
"//test/e2e/framework/node:go_default_library",
"//test/e2e/framework/pod:go_default_library",
"//test/e2e/framework/replicaset:go_default_library",
"//test/e2e/framework/service:go_default_library",
"//test/e2e/framework/volume:go_default_library",
"//test/utils:go_default_library",
"//test/utils/image:go_default_library",

View File

@ -107,6 +107,7 @@ filegroup(
srcs = [
":package-srcs",
"//test/e2e/framework/auth:all-srcs",
"//test/e2e/framework/autoscaling:all-srcs",
"//test/e2e/framework/config:all-srcs",
"//test/e2e/framework/deployment:all-srcs",
"//test/e2e/framework/deviceplugin:all-srcs",

View File

@ -0,0 +1,39 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = ["autoscaling_utils.go"],
importpath = "k8s.io/kubernetes/test/e2e/framework/autoscaling",
visibility = ["//visibility:public"],
deps = [
"//pkg/apis/core:go_default_library",
"//staging/src/k8s.io/api/autoscaling/v1:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
"//staging/src/k8s.io/client-go/scale:go_default_library",
"//test/e2e/framework:go_default_library",
"//test/e2e/framework/replicaset:go_default_library",
"//test/e2e/framework/service:go_default_library",
"//test/utils:go_default_library",
"//test/utils/image:go_default_library",
"//vendor/github.com/onsi/ginkgo:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
package common
package autoscaling
import (
"context"
@ -67,8 +67,11 @@ var (
)
var (
KindRC = schema.GroupVersionKind{Version: "v1", Kind: "ReplicationController"}
// KindRC is the GVK for ReplicationController
KindRC = schema.GroupVersionKind{Version: "v1", Kind: "ReplicationController"}
// KindDeployment is the GVK for Deployment
KindDeployment = schema.GroupVersionKind{Group: "apps", Version: "v1beta2", Kind: "Deployment"}
// KindReplicaSet is the GVK for ReplicaSet
KindReplicaSet = schema.GroupVersionKind{Group: "apps", Version: "v1beta2", Kind: "ReplicaSet"}
)
@ -101,21 +104,25 @@ type ResourceConsumer struct {
requestSizeCustomMetric int
}
// GetResourceConsumerImage is a wrapper to get the fully qualified URI of the ResourceConsumer image
func GetResourceConsumerImage() string {
return resourceConsumerImage
}
// NewDynamicResourceConsumer is a wrapper to create a new dynamic ResourceConsumer
func NewDynamicResourceConsumer(name, nsName string, kind schema.GroupVersionKind, replicas, initCPUTotal, initMemoryTotal, initCustomMetric int, cpuLimit, memLimit int64, clientset clientset.Interface, scaleClient scaleclient.ScalesGetter) *ResourceConsumer {
return newResourceConsumer(name, nsName, kind, replicas, initCPUTotal, initMemoryTotal, initCustomMetric, dynamicConsumptionTimeInSeconds,
dynamicRequestSizeInMillicores, dynamicRequestSizeInMegabytes, dynamicRequestSizeCustomMetric, cpuLimit, memLimit, clientset, scaleClient, nil, nil)
}
// NewStaticResourceConsumer is a wrapper to create a new static ResourceConsumer
// TODO this still defaults to replication controller
func NewStaticResourceConsumer(name, nsName string, replicas, initCPUTotal, initMemoryTotal, initCustomMetric int, cpuLimit, memLimit int64, clientset clientset.Interface, scaleClient scaleclient.ScalesGetter) *ResourceConsumer {
return newResourceConsumer(name, nsName, KindRC, replicas, initCPUTotal, initMemoryTotal, initCustomMetric, staticConsumptionTimeInSeconds,
initCPUTotal/replicas, initMemoryTotal/replicas, initCustomMetric/replicas, cpuLimit, memLimit, clientset, scaleClient, nil, nil)
}
// NewMetricExporter is a wrapper to create a new ResourceConsumer for metrics exporter
func NewMetricExporter(name, nsName string, podAnnotations, serviceAnnotations map[string]string, metricValue int, clientset clientset.Interface, scaleClient scaleclient.ScalesGetter) *ResourceConsumer {
return newResourceConsumer(name, nsName, KindDeployment, 1, 0, 0, metricValue, dynamicConsumptionTimeInSeconds,
dynamicRequestSizeInMillicores, dynamicRequestSizeInMegabytes, dynamicRequestSizeCustomMetric, 100, 100, clientset, scaleClient, podAnnotations, serviceAnnotations)
@ -179,7 +186,7 @@ func (rc *ResourceConsumer) ConsumeMem(megabytes int) {
rc.mem <- megabytes
}
// ConsumeMem consumes given number of custom metric
// ConsumeCustomMetric consumes given number of custom metric
func (rc *ResourceConsumer) ConsumeCustomMetric(amount int) {
framework.Logf("RC %s: consume custom metric %v in total", rc.name, amount)
rc.customMetric <- amount
@ -328,6 +335,7 @@ func (rc *ResourceConsumer) sendConsumeCustomMetric(delta int) {
framework.ExpectNoError(err)
}
// GetReplicas get the replicas
func (rc *ResourceConsumer) GetReplicas() int {
switch rc.kind {
case KindRC:
@ -357,10 +365,12 @@ func (rc *ResourceConsumer) GetReplicas() int {
return 0
}
// GetHpa get the corresponding horizontalPodAutoscaler object
func (rc *ResourceConsumer) GetHpa(name string) (*autoscalingv1.HorizontalPodAutoscaler, error) {
return rc.clientSet.AutoscalingV1().HorizontalPodAutoscalers(rc.nsName).Get(name, metav1.GetOptions{})
}
// WaitForReplicas wait for the desired replicas
func (rc *ResourceConsumer) WaitForReplicas(desiredReplicas int, duration time.Duration) {
interval := 20 * time.Second
err := wait.PollImmediate(interval, duration, func() (bool, error) {
@ -371,10 +381,12 @@ func (rc *ResourceConsumer) WaitForReplicas(desiredReplicas int, duration time.D
framework.ExpectNoErrorWithOffset(1, err, "timeout waiting %v for %d replicas", duration, desiredReplicas)
}
// EnsureDesiredReplicas ensure the replicas to desired number
func (rc *ResourceConsumer) EnsureDesiredReplicas(desiredReplicas int, duration time.Duration, hpaName string) {
rc.EnsureDesiredReplicasInRange(desiredReplicas, desiredReplicas, duration, hpaName)
}
// EnsureDesiredReplicasInRange ensure the replicas is in a desired range
func (rc *ResourceConsumer) EnsureDesiredReplicasInRange(minDesiredReplicas, maxDesiredReplicas int, duration time.Duration, hpaName string) {
interval := 10 * time.Second
err := wait.PollImmediate(interval, duration, func() (bool, error) {
@ -411,7 +423,7 @@ func (rc *ResourceConsumer) Pause() {
rc.stopWaitGroup.Wait()
}
// Pause starts background goroutines responsible for consuming resources.
// Resume starts background goroutines responsible for consuming resources.
func (rc *ResourceConsumer) Resume() {
ginkgo.By(fmt.Sprintf("HPA resuming RC %s", rc.name))
go rc.makeConsumeCPURequests()
@ -419,6 +431,7 @@ func (rc *ResourceConsumer) Resume() {
go rc.makeConsumeCustomMetric()
}
// CleanUp clean up the background goroutines responsible for consuming resources.
func (rc *ResourceConsumer) CleanUp() {
ginkgo.By(fmt.Sprintf("Removing consuming RC %s", rc.name))
close(rc.stopCPU)
@ -526,6 +539,8 @@ func runServiceAndWorkloadForResourceConsumer(c clientset.Interface, ns, name st
c, ns, controllerName, 1, startServiceInterval, startServiceTimeout))
}
// CreateCPUHorizontalPodAutoscaler create a horizontalPodAutoscaler with CPU target
// for consuming resources.
func CreateCPUHorizontalPodAutoscaler(rc *ResourceConsumer, cpu, minReplicas, maxRepl int32) *autoscalingv1.HorizontalPodAutoscaler {
hpa := &autoscalingv1.HorizontalPodAutoscaler{
ObjectMeta: metav1.ObjectMeta{
@ -548,6 +563,7 @@ func CreateCPUHorizontalPodAutoscaler(rc *ResourceConsumer, cpu, minReplicas, ma
return hpa
}
// DeleteHorizontalPodAutoscaler delete the horizontalPodAutoscaler for consuming resources.
func DeleteHorizontalPodAutoscaler(rc *ResourceConsumer, autoscalerName string) {
rc.clientSet.AutoscalingV1().HorizontalPodAutoscalers(rc.nsName).Delete(autoscalerName, nil)
}

View File

@ -33,8 +33,8 @@ go_library(
"//staging/src/k8s.io/client-go/restmapper:go_default_library",
"//staging/src/k8s.io/metrics/pkg/client/custom_metrics:go_default_library",
"//staging/src/k8s.io/metrics/pkg/client/external_metrics:go_default_library",
"//test/e2e/common:go_default_library",
"//test/e2e/framework:go_default_library",
"//test/e2e/framework/autoscaling:go_default_library",
"//test/e2e/framework/config:go_default_library",
"//test/e2e/framework/gpu:go_default_library",
"//test/e2e/framework/metrics:go_default_library",

View File

@ -27,8 +27,8 @@ import (
"github.com/onsi/ginkgo"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/kubernetes/test/e2e/common"
"k8s.io/kubernetes/test/e2e/framework"
e2eautoscaling "k8s.io/kubernetes/test/e2e/framework/autoscaling"
instrumentation "k8s.io/kubernetes/test/e2e/instrumentation/common"
gcm "google.golang.org/api/monitoring/v3"
@ -101,7 +101,7 @@ func testStackdriverMonitoring(f *framework.Framework, pods, allPodsCPU int, per
framework.ExpectNoError(err)
rc := common.NewDynamicResourceConsumer(rcName, f.Namespace.Name, common.KindDeployment, pods, allPodsCPU, memoryUsed, 0, perPodCPU, memoryLimit, f.ClientSet, f.ScalesGetter)
rc := e2eautoscaling.NewDynamicResourceConsumer(rcName, f.Namespace.Name, e2eautoscaling.KindDeployment, pods, allPodsCPU, memoryUsed, 0, perPodCPU, memoryLimit, f.ClientSet, f.ScalesGetter)
defer rc.CleanUp()
rc.WaitForReplicas(pods, 15*time.Minute)

View File

@ -36,6 +36,7 @@ go_library(
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
"//test/e2e/common:go_default_library",
"//test/e2e/framework:go_default_library",
"//test/e2e/framework/autoscaling:go_default_library",
"//test/e2e/framework/job:go_default_library",
"//test/e2e/framework/node:go_default_library",
"//test/e2e/framework/service:go_default_library",

View File

@ -21,15 +21,15 @@ import (
"time"
autoscalingv1 "k8s.io/api/autoscaling/v1"
"k8s.io/kubernetes/test/e2e/common"
"k8s.io/kubernetes/test/e2e/framework"
e2eautoscaling "k8s.io/kubernetes/test/e2e/framework/autoscaling"
"github.com/onsi/ginkgo"
)
// HPAUpgradeTest tests that HPA rescales target resource correctly before and after a cluster upgrade.
type HPAUpgradeTest struct {
rc *common.ResourceConsumer
rc *e2eautoscaling.ResourceConsumer
hpa *autoscalingv1.HorizontalPodAutoscaler
}
@ -38,10 +38,10 @@ func (HPAUpgradeTest) Name() string { return "hpa-upgrade" }
// Setup creates a resource consumer and an HPA object that autoscales the consumer.
func (t *HPAUpgradeTest) Setup(f *framework.Framework) {
t.rc = common.NewDynamicResourceConsumer(
t.rc = e2eautoscaling.NewDynamicResourceConsumer(
"res-cons-upgrade",
f.Namespace.Name,
common.KindRC,
e2eautoscaling.KindRC,
1, /* replicas */
250, /* initCPUTotal */
0,
@ -50,7 +50,7 @@ func (t *HPAUpgradeTest) Setup(f *framework.Framework) {
200, /* memLimit */
f.ClientSet,
f.ScalesGetter)
t.hpa = common.CreateCPUHorizontalPodAutoscaler(
t.hpa = e2eautoscaling.CreateCPUHorizontalPodAutoscaler(
t.rc,
20, /* targetCPUUtilizationPercent */
1, /* minPods */
@ -71,7 +71,7 @@ func (t *HPAUpgradeTest) Test(f *framework.Framework, done <-chan struct{}, upgr
// Teardown cleans up any remaining resources.
func (t *HPAUpgradeTest) Teardown(f *framework.Framework) {
// rely on the namespace deletion to clean up everything
common.DeleteHorizontalPodAutoscaler(t.rc, t.hpa.Name)
e2eautoscaling.DeleteHorizontalPodAutoscaler(t.rc, t.hpa.Name)
t.rc.CleanUp()
}