mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-24 20:24:09 +00:00
Merge pull request #51277 from irfanurrehman/hpa-e2e-mod-for-fed
Automatic merge from submit-queue. If you want to cherry-pick this change to another branch, please follow the instructions <a href="https://github.com/kubernetes/community/blob/master/contributors/devel/cherry-picks.md">here</a>.. [Federation] Update hpa e2e utils to enable reuse in fed hpa tests This PR is to enable reuse of some hpa e2e utils in federation, facilitating https://github.com/kubernetes/kubernetes/pull/50168 cc @mwielgus @quinton-hoole cc @kubernetes/sig-federation-pr-reviews **Release note**: ```NONE ```
This commit is contained in:
commit
f7dd62f149
@ -89,7 +89,7 @@ var _ = SIGDescribe("[Feature:ClusterSizeAutoscalingScaleUp] [Slow] Autoscaling"
|
|||||||
nodeMemoryMB := (&nodeMemoryBytes).Value() / 1024 / 1024
|
nodeMemoryMB := (&nodeMemoryBytes).Value() / 1024 / 1024
|
||||||
memRequestMB := nodeMemoryMB / 10 // Ensure each pod takes not more than 10% of node's allocatable memory.
|
memRequestMB := nodeMemoryMB / 10 // Ensure each pod takes not more than 10% of node's allocatable memory.
|
||||||
replicas := 1
|
replicas := 1
|
||||||
resourceConsumer := common.NewDynamicResourceConsumer("resource-consumer", common.KindDeployment, replicas, 0, 0, 0, cpuRequestMillis, memRequestMB, f)
|
resourceConsumer := common.NewDynamicResourceConsumer("resource-consumer", f.Namespace.Name, common.KindDeployment, replicas, 0, 0, 0, cpuRequestMillis, memRequestMB, f.ClientSet, f.InternalClientset)
|
||||||
defer resourceConsumer.CleanUp()
|
defer resourceConsumer.CleanUp()
|
||||||
resourceConsumer.WaitForReplicas(replicas, 1*time.Minute) // Should finish ~immediately, so 1 minute is more than enough.
|
resourceConsumer.WaitForReplicas(replicas, 1*time.Minute) // Should finish ~immediately, so 1 minute is more than enough.
|
||||||
|
|
||||||
|
@ -115,7 +115,7 @@ type HPAScaleTest struct {
|
|||||||
// TODO The use of 3 states is arbitrary, we could eventually make this test handle "n" states once this test stabilizes.
|
// TODO The use of 3 states is arbitrary, we could eventually make this test handle "n" states once this test stabilizes.
|
||||||
func (scaleTest *HPAScaleTest) run(name, kind string, rc *common.ResourceConsumer, f *framework.Framework) {
|
func (scaleTest *HPAScaleTest) run(name, kind string, rc *common.ResourceConsumer, f *framework.Framework) {
|
||||||
const timeToWait = 15 * time.Minute
|
const timeToWait = 15 * time.Minute
|
||||||
rc = common.NewDynamicResourceConsumer(name, kind, int(scaleTest.initPods), int(scaleTest.totalInitialCPUUsage), 0, 0, scaleTest.perPodCPURequest, 200, f)
|
rc = common.NewDynamicResourceConsumer(name, f.Namespace.Name, kind, int(scaleTest.initPods), int(scaleTest.totalInitialCPUUsage), 0, 0, scaleTest.perPodCPURequest, 200, f.ClientSet, f.InternalClientset)
|
||||||
defer rc.CleanUp()
|
defer rc.CleanUp()
|
||||||
hpa := common.CreateCPUHorizontalPodAutoscaler(rc, scaleTest.targetCPUUtilizationPercent, scaleTest.minPods, scaleTest.maxPods)
|
hpa := common.CreateCPUHorizontalPodAutoscaler(rc, scaleTest.targetCPUUtilizationPercent, scaleTest.minPods, scaleTest.maxPods)
|
||||||
defer common.DeleteHorizontalPodAutoscaler(rc, hpa.Name)
|
defer common.DeleteHorizontalPodAutoscaler(rc, hpa.Name)
|
||||||
|
@ -84,7 +84,9 @@ type ResourceConsumer struct {
|
|||||||
name string
|
name string
|
||||||
controllerName string
|
controllerName string
|
||||||
kind string
|
kind string
|
||||||
framework *framework.Framework
|
nsName string
|
||||||
|
clientSet clientset.Interface
|
||||||
|
internalClientset *internalclientset.Clientset
|
||||||
cpu chan int
|
cpu chan int
|
||||||
mem chan int
|
mem chan int
|
||||||
customMetric chan int
|
customMetric chan int
|
||||||
@ -103,15 +105,15 @@ func GetResourceConsumerImage() string {
|
|||||||
return resourceConsumerImage
|
return resourceConsumerImage
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewDynamicResourceConsumer(name, kind string, replicas, initCPUTotal, initMemoryTotal, initCustomMetric int, cpuLimit, memLimit int64, f *framework.Framework) *ResourceConsumer {
|
func NewDynamicResourceConsumer(name, nsName, kind string, replicas, initCPUTotal, initMemoryTotal, initCustomMetric int, cpuLimit, memLimit int64, clientset clientset.Interface, internalClientset *internalclientset.Clientset) *ResourceConsumer {
|
||||||
return newResourceConsumer(name, kind, replicas, initCPUTotal, initMemoryTotal, initCustomMetric, dynamicConsumptionTimeInSeconds,
|
return newResourceConsumer(name, nsName, kind, replicas, initCPUTotal, initMemoryTotal, initCustomMetric, dynamicConsumptionTimeInSeconds,
|
||||||
dynamicRequestSizeInMillicores, dynamicRequestSizeInMegabytes, dynamicRequestSizeCustomMetric, cpuLimit, memLimit, f)
|
dynamicRequestSizeInMillicores, dynamicRequestSizeInMegabytes, dynamicRequestSizeCustomMetric, cpuLimit, memLimit, clientset, internalClientset)
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO this still defaults to replication controller
|
// TODO this still defaults to replication controller
|
||||||
func NewStaticResourceConsumer(name string, replicas, initCPUTotal, initMemoryTotal, initCustomMetric int, cpuLimit, memLimit int64, f *framework.Framework) *ResourceConsumer {
|
func NewStaticResourceConsumer(name, nsName string, replicas, initCPUTotal, initMemoryTotal, initCustomMetric int, cpuLimit, memLimit int64, clientset clientset.Interface, internalClientset *internalclientset.Clientset) *ResourceConsumer {
|
||||||
return newResourceConsumer(name, KindRC, replicas, initCPUTotal, initMemoryTotal, initCustomMetric, staticConsumptionTimeInSeconds,
|
return newResourceConsumer(name, nsName, KindRC, replicas, initCPUTotal, initMemoryTotal, initCustomMetric, staticConsumptionTimeInSeconds,
|
||||||
initCPUTotal/replicas, initMemoryTotal/replicas, initCustomMetric/replicas, cpuLimit, memLimit, f)
|
initCPUTotal/replicas, initMemoryTotal/replicas, initCustomMetric/replicas, cpuLimit, memLimit, clientset, internalClientset)
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -121,15 +123,17 @@ initMemoryTotal argument is in megabytes
|
|||||||
memLimit argument is in megabytes, memLimit is a maximum amount of memory that can be consumed by a single pod
|
memLimit argument is in megabytes, memLimit is a maximum amount of memory that can be consumed by a single pod
|
||||||
cpuLimit argument is in millicores, cpuLimit is a maximum amount of cpu that can be consumed by a single pod
|
cpuLimit argument is in millicores, cpuLimit is a maximum amount of cpu that can be consumed by a single pod
|
||||||
*/
|
*/
|
||||||
func newResourceConsumer(name, kind string, replicas, initCPUTotal, initMemoryTotal, initCustomMetric, consumptionTimeInSeconds, requestSizeInMillicores,
|
func newResourceConsumer(name, nsName, kind string, replicas, initCPUTotal, initMemoryTotal, initCustomMetric, consumptionTimeInSeconds, requestSizeInMillicores,
|
||||||
requestSizeInMegabytes int, requestSizeCustomMetric int, cpuLimit, memLimit int64, f *framework.Framework) *ResourceConsumer {
|
requestSizeInMegabytes int, requestSizeCustomMetric int, cpuLimit, memLimit int64, clientset clientset.Interface, internalClientset *internalclientset.Clientset) *ResourceConsumer {
|
||||||
|
|
||||||
runServiceAndWorkloadForResourceConsumer(f.ClientSet, f.InternalClientset, f.Namespace.Name, name, kind, replicas, cpuLimit, memLimit)
|
runServiceAndWorkloadForResourceConsumer(clientset, internalClientset, nsName, name, kind, replicas, cpuLimit, memLimit)
|
||||||
rc := &ResourceConsumer{
|
rc := &ResourceConsumer{
|
||||||
name: name,
|
name: name,
|
||||||
controllerName: name + "-ctrl",
|
controllerName: name + "-ctrl",
|
||||||
kind: kind,
|
kind: kind,
|
||||||
framework: f,
|
nsName: nsName,
|
||||||
|
clientSet: clientset,
|
||||||
|
internalClientset: internalClientset,
|
||||||
cpu: make(chan int),
|
cpu: make(chan int),
|
||||||
mem: make(chan int),
|
mem: make(chan int),
|
||||||
customMetric: make(chan int),
|
customMetric: make(chan int),
|
||||||
@ -235,14 +239,13 @@ func (rc *ResourceConsumer) makeConsumeCustomMetric() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (rc *ResourceConsumer) sendConsumeCPURequest(millicores int) {
|
func (rc *ResourceConsumer) sendConsumeCPURequest(millicores int) {
|
||||||
proxyRequest, err := framework.GetServicesProxyRequest(rc.framework.ClientSet, rc.framework.ClientSet.Core().RESTClient().Post())
|
|
||||||
framework.ExpectNoError(err)
|
|
||||||
|
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), framework.SingleCallTimeout)
|
ctx, cancel := context.WithTimeout(context.Background(), framework.SingleCallTimeout)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
|
|
||||||
err = wait.PollImmediate(serviceInitializationInterval, serviceInitializationTimeout, func() (bool, error) {
|
err := wait.PollImmediate(serviceInitializationInterval, serviceInitializationTimeout, func() (bool, error) {
|
||||||
req := proxyRequest.Namespace(rc.framework.Namespace.Name).
|
proxyRequest, err := framework.GetServicesProxyRequest(rc.clientSet, rc.clientSet.Core().RESTClient().Post())
|
||||||
|
framework.ExpectNoError(err)
|
||||||
|
req := proxyRequest.Namespace(rc.nsName).
|
||||||
Context(ctx).
|
Context(ctx).
|
||||||
Name(rc.controllerName).
|
Name(rc.controllerName).
|
||||||
Suffix("ConsumeCPU").
|
Suffix("ConsumeCPU").
|
||||||
@ -250,7 +253,7 @@ func (rc *ResourceConsumer) sendConsumeCPURequest(millicores int) {
|
|||||||
Param("durationSec", strconv.Itoa(rc.consumptionTimeInSeconds)).
|
Param("durationSec", strconv.Itoa(rc.consumptionTimeInSeconds)).
|
||||||
Param("requestSizeMillicores", strconv.Itoa(rc.requestSizeInMillicores))
|
Param("requestSizeMillicores", strconv.Itoa(rc.requestSizeInMillicores))
|
||||||
framework.Logf("ConsumeCPU URL: %v", *req.URL())
|
framework.Logf("ConsumeCPU URL: %v", *req.URL())
|
||||||
_, err := req.DoRaw()
|
_, err = req.DoRaw()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
framework.Logf("ConsumeCPU failure: %v", err)
|
framework.Logf("ConsumeCPU failure: %v", err)
|
||||||
return false, nil
|
return false, nil
|
||||||
@ -263,14 +266,13 @@ func (rc *ResourceConsumer) sendConsumeCPURequest(millicores int) {
|
|||||||
|
|
||||||
// sendConsumeMemRequest sends POST request for memory consumption
|
// sendConsumeMemRequest sends POST request for memory consumption
|
||||||
func (rc *ResourceConsumer) sendConsumeMemRequest(megabytes int) {
|
func (rc *ResourceConsumer) sendConsumeMemRequest(megabytes int) {
|
||||||
proxyRequest, err := framework.GetServicesProxyRequest(rc.framework.ClientSet, rc.framework.ClientSet.Core().RESTClient().Post())
|
|
||||||
framework.ExpectNoError(err)
|
|
||||||
|
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), framework.SingleCallTimeout)
|
ctx, cancel := context.WithTimeout(context.Background(), framework.SingleCallTimeout)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
|
|
||||||
err = wait.PollImmediate(serviceInitializationInterval, serviceInitializationTimeout, func() (bool, error) {
|
err := wait.PollImmediate(serviceInitializationInterval, serviceInitializationTimeout, func() (bool, error) {
|
||||||
req := proxyRequest.Namespace(rc.framework.Namespace.Name).
|
proxyRequest, err := framework.GetServicesProxyRequest(rc.clientSet, rc.clientSet.Core().RESTClient().Post())
|
||||||
|
framework.ExpectNoError(err)
|
||||||
|
req := proxyRequest.Namespace(rc.nsName).
|
||||||
Context(ctx).
|
Context(ctx).
|
||||||
Name(rc.controllerName).
|
Name(rc.controllerName).
|
||||||
Suffix("ConsumeMem").
|
Suffix("ConsumeMem").
|
||||||
@ -278,7 +280,7 @@ func (rc *ResourceConsumer) sendConsumeMemRequest(megabytes int) {
|
|||||||
Param("durationSec", strconv.Itoa(rc.consumptionTimeInSeconds)).
|
Param("durationSec", strconv.Itoa(rc.consumptionTimeInSeconds)).
|
||||||
Param("requestSizeMegabytes", strconv.Itoa(rc.requestSizeInMegabytes))
|
Param("requestSizeMegabytes", strconv.Itoa(rc.requestSizeInMegabytes))
|
||||||
framework.Logf("ConsumeMem URL: %v", *req.URL())
|
framework.Logf("ConsumeMem URL: %v", *req.URL())
|
||||||
_, err := req.DoRaw()
|
_, err = req.DoRaw()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
framework.Logf("ConsumeMem failure: %v", err)
|
framework.Logf("ConsumeMem failure: %v", err)
|
||||||
return false, nil
|
return false, nil
|
||||||
@ -291,14 +293,13 @@ func (rc *ResourceConsumer) sendConsumeMemRequest(megabytes int) {
|
|||||||
|
|
||||||
// sendConsumeCustomMetric sends POST request for custom metric consumption
|
// sendConsumeCustomMetric sends POST request for custom metric consumption
|
||||||
func (rc *ResourceConsumer) sendConsumeCustomMetric(delta int) {
|
func (rc *ResourceConsumer) sendConsumeCustomMetric(delta int) {
|
||||||
proxyRequest, err := framework.GetServicesProxyRequest(rc.framework.ClientSet, rc.framework.ClientSet.Core().RESTClient().Post())
|
|
||||||
framework.ExpectNoError(err)
|
|
||||||
|
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), framework.SingleCallTimeout)
|
ctx, cancel := context.WithTimeout(context.Background(), framework.SingleCallTimeout)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
|
|
||||||
err = wait.PollImmediate(serviceInitializationInterval, serviceInitializationTimeout, func() (bool, error) {
|
err := wait.PollImmediate(serviceInitializationInterval, serviceInitializationTimeout, func() (bool, error) {
|
||||||
req := proxyRequest.Namespace(rc.framework.Namespace.Name).
|
proxyRequest, err := framework.GetServicesProxyRequest(rc.clientSet, rc.clientSet.Core().RESTClient().Post())
|
||||||
|
framework.ExpectNoError(err)
|
||||||
|
req := proxyRequest.Namespace(rc.nsName).
|
||||||
Context(ctx).
|
Context(ctx).
|
||||||
Name(rc.controllerName).
|
Name(rc.controllerName).
|
||||||
Suffix("BumpMetric").
|
Suffix("BumpMetric").
|
||||||
@ -307,7 +308,7 @@ func (rc *ResourceConsumer) sendConsumeCustomMetric(delta int) {
|
|||||||
Param("durationSec", strconv.Itoa(rc.consumptionTimeInSeconds)).
|
Param("durationSec", strconv.Itoa(rc.consumptionTimeInSeconds)).
|
||||||
Param("requestSizeMetrics", strconv.Itoa(rc.requestSizeCustomMetric))
|
Param("requestSizeMetrics", strconv.Itoa(rc.requestSizeCustomMetric))
|
||||||
framework.Logf("ConsumeCustomMetric URL: %v", *req.URL())
|
framework.Logf("ConsumeCustomMetric URL: %v", *req.URL())
|
||||||
_, err := req.DoRaw()
|
_, err = req.DoRaw()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
framework.Logf("ConsumeCustomMetric failure: %v", err)
|
framework.Logf("ConsumeCustomMetric failure: %v", err)
|
||||||
return false, nil
|
return false, nil
|
||||||
@ -320,21 +321,21 @@ func (rc *ResourceConsumer) sendConsumeCustomMetric(delta int) {
|
|||||||
func (rc *ResourceConsumer) GetReplicas() int {
|
func (rc *ResourceConsumer) GetReplicas() int {
|
||||||
switch rc.kind {
|
switch rc.kind {
|
||||||
case KindRC:
|
case KindRC:
|
||||||
replicationController, err := rc.framework.ClientSet.Core().ReplicationControllers(rc.framework.Namespace.Name).Get(rc.name, metav1.GetOptions{})
|
replicationController, err := rc.clientSet.Core().ReplicationControllers(rc.nsName).Get(rc.name, metav1.GetOptions{})
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
if replicationController == nil {
|
if replicationController == nil {
|
||||||
framework.Failf(rcIsNil)
|
framework.Failf(rcIsNil)
|
||||||
}
|
}
|
||||||
return int(replicationController.Status.ReadyReplicas)
|
return int(replicationController.Status.ReadyReplicas)
|
||||||
case KindDeployment:
|
case KindDeployment:
|
||||||
deployment, err := rc.framework.ClientSet.Extensions().Deployments(rc.framework.Namespace.Name).Get(rc.name, metav1.GetOptions{})
|
deployment, err := rc.clientSet.Extensions().Deployments(rc.nsName).Get(rc.name, metav1.GetOptions{})
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
if deployment == nil {
|
if deployment == nil {
|
||||||
framework.Failf(deploymentIsNil)
|
framework.Failf(deploymentIsNil)
|
||||||
}
|
}
|
||||||
return int(deployment.Status.ReadyReplicas)
|
return int(deployment.Status.ReadyReplicas)
|
||||||
case KindReplicaSet:
|
case KindReplicaSet:
|
||||||
rs, err := rc.framework.ClientSet.Extensions().ReplicaSets(rc.framework.Namespace.Name).Get(rc.name, metav1.GetOptions{})
|
rs, err := rc.clientSet.Extensions().ReplicaSets(rc.nsName).Get(rc.name, metav1.GetOptions{})
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
if rs == nil {
|
if rs == nil {
|
||||||
framework.Failf(rsIsNil)
|
framework.Failf(rsIsNil)
|
||||||
@ -402,10 +403,10 @@ func (rc *ResourceConsumer) CleanUp() {
|
|||||||
time.Sleep(10 * time.Second)
|
time.Sleep(10 * time.Second)
|
||||||
kind, err := kindOf(rc.kind)
|
kind, err := kindOf(rc.kind)
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
framework.ExpectNoError(framework.DeleteResourceAndPods(rc.framework.ClientSet, rc.framework.InternalClientset, kind, rc.framework.Namespace.Name, rc.name))
|
framework.ExpectNoError(framework.DeleteResourceAndPods(rc.clientSet, rc.internalClientset, kind, rc.nsName, rc.name))
|
||||||
framework.ExpectNoError(rc.framework.ClientSet.Core().Services(rc.framework.Namespace.Name).Delete(rc.name, nil))
|
framework.ExpectNoError(rc.clientSet.Core().Services(rc.nsName).Delete(rc.name, nil))
|
||||||
framework.ExpectNoError(framework.DeleteResourceAndPods(rc.framework.ClientSet, rc.framework.InternalClientset, api.Kind("ReplicationController"), rc.framework.Namespace.Name, rc.controllerName))
|
framework.ExpectNoError(framework.DeleteResourceAndPods(rc.clientSet, rc.internalClientset, api.Kind("ReplicationController"), rc.nsName, rc.controllerName))
|
||||||
framework.ExpectNoError(rc.framework.ClientSet.Core().Services(rc.framework.Namespace.Name).Delete(rc.controllerName, nil))
|
framework.ExpectNoError(rc.clientSet.Core().Services(rc.nsName).Delete(rc.controllerName, nil))
|
||||||
}
|
}
|
||||||
|
|
||||||
func kindOf(kind string) (schema.GroupKind, error) {
|
func kindOf(kind string) (schema.GroupKind, error) {
|
||||||
@ -516,7 +517,7 @@ func CreateCPUHorizontalPodAutoscaler(rc *ResourceConsumer, cpu, minReplicas, ma
|
|||||||
hpa := &autoscalingv1.HorizontalPodAutoscaler{
|
hpa := &autoscalingv1.HorizontalPodAutoscaler{
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
Name: rc.name,
|
Name: rc.name,
|
||||||
Namespace: rc.framework.Namespace.Name,
|
Namespace: rc.nsName,
|
||||||
},
|
},
|
||||||
Spec: autoscalingv1.HorizontalPodAutoscalerSpec{
|
Spec: autoscalingv1.HorizontalPodAutoscalerSpec{
|
||||||
ScaleTargetRef: autoscalingv1.CrossVersionObjectReference{
|
ScaleTargetRef: autoscalingv1.CrossVersionObjectReference{
|
||||||
@ -528,11 +529,11 @@ func CreateCPUHorizontalPodAutoscaler(rc *ResourceConsumer, cpu, minReplicas, ma
|
|||||||
TargetCPUUtilizationPercentage: &cpu,
|
TargetCPUUtilizationPercentage: &cpu,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
hpa, errHPA := rc.framework.ClientSet.Autoscaling().HorizontalPodAutoscalers(rc.framework.Namespace.Name).Create(hpa)
|
hpa, errHPA := rc.clientSet.Autoscaling().HorizontalPodAutoscalers(rc.nsName).Create(hpa)
|
||||||
framework.ExpectNoError(errHPA)
|
framework.ExpectNoError(errHPA)
|
||||||
return hpa
|
return hpa
|
||||||
}
|
}
|
||||||
|
|
||||||
func DeleteHorizontalPodAutoscaler(rc *ResourceConsumer, autoscalerName string) {
|
func DeleteHorizontalPodAutoscaler(rc *ResourceConsumer, autoscalerName string) {
|
||||||
rc.framework.ClientSet.Autoscaling().HorizontalPodAutoscalers(rc.framework.Namespace.Name).Delete(autoscalerName, nil)
|
rc.clientSet.Autoscaling().HorizontalPodAutoscalers(rc.nsName).Delete(autoscalerName, nil)
|
||||||
}
|
}
|
||||||
|
@ -87,7 +87,7 @@ func testStackdriverMonitoring(f *framework.Framework, pods, allPodsCPU int, per
|
|||||||
|
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
|
|
||||||
rc := common.NewDynamicResourceConsumer(rcName, common.KindDeployment, pods, allPodsCPU, memoryUsed, 0, perPodCPU, memoryLimit, f)
|
rc := common.NewDynamicResourceConsumer(rcName, f.Namespace.Name, common.KindDeployment, pods, allPodsCPU, memoryUsed, 0, perPodCPU, memoryLimit, f.ClientSet, f.InternalClientset)
|
||||||
defer rc.CleanUp()
|
defer rc.CleanUp()
|
||||||
|
|
||||||
rc.WaitForReplicas(pods, 15*time.Minute)
|
rc.WaitForReplicas(pods, 15*time.Minute)
|
||||||
|
@ -39,6 +39,7 @@ func (HPAUpgradeTest) Name() string { return "hpa-upgrade" }
|
|||||||
func (t *HPAUpgradeTest) Setup(f *framework.Framework) {
|
func (t *HPAUpgradeTest) Setup(f *framework.Framework) {
|
||||||
t.rc = common.NewDynamicResourceConsumer(
|
t.rc = common.NewDynamicResourceConsumer(
|
||||||
"res-cons-upgrade",
|
"res-cons-upgrade",
|
||||||
|
f.Namespace.Name,
|
||||||
common.KindRC,
|
common.KindRC,
|
||||||
1, /* replicas */
|
1, /* replicas */
|
||||||
250, /* initCPUTotal */
|
250, /* initCPUTotal */
|
||||||
@ -46,7 +47,8 @@ func (t *HPAUpgradeTest) Setup(f *framework.Framework) {
|
|||||||
0,
|
0,
|
||||||
500, /* cpuLimit */
|
500, /* cpuLimit */
|
||||||
200, /* memLimit */
|
200, /* memLimit */
|
||||||
f)
|
f.ClientSet,
|
||||||
|
f.InternalClientset)
|
||||||
t.hpa = common.CreateCPUHorizontalPodAutoscaler(
|
t.hpa = common.CreateCPUHorizontalPodAutoscaler(
|
||||||
t.rc,
|
t.rc,
|
||||||
20, /* targetCPUUtilizationPercent */
|
20, /* targetCPUUtilizationPercent */
|
||||||
|
Loading…
Reference in New Issue
Block a user