mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-23 19:56:01 +00:00
Merge pull request #51277 from irfanurrehman/hpa-e2e-mod-for-fed
Automatic merge from submit-queue. If you want to cherry-pick this change to another branch, please follow the instructions <a href="https://github.com/kubernetes/community/blob/master/contributors/devel/cherry-picks.md">here</a>.. [Federation] Update hpa e2e utils to enable reuse in fed hpa tests This PR is to enable reuse of some hpa e2e utils in federation, facilitating https://github.com/kubernetes/kubernetes/pull/50168 cc @mwielgus @quinton-hoole cc @kubernetes/sig-federation-pr-reviews **Release note**: ```NONE ```
This commit is contained in:
commit
f7dd62f149
@ -89,7 +89,7 @@ var _ = SIGDescribe("[Feature:ClusterSizeAutoscalingScaleUp] [Slow] Autoscaling"
|
||||
nodeMemoryMB := (&nodeMemoryBytes).Value() / 1024 / 1024
|
||||
memRequestMB := nodeMemoryMB / 10 // Ensure each pod takes not more than 10% of node's allocatable memory.
|
||||
replicas := 1
|
||||
resourceConsumer := common.NewDynamicResourceConsumer("resource-consumer", common.KindDeployment, replicas, 0, 0, 0, cpuRequestMillis, memRequestMB, f)
|
||||
resourceConsumer := common.NewDynamicResourceConsumer("resource-consumer", f.Namespace.Name, common.KindDeployment, replicas, 0, 0, 0, cpuRequestMillis, memRequestMB, f.ClientSet, f.InternalClientset)
|
||||
defer resourceConsumer.CleanUp()
|
||||
resourceConsumer.WaitForReplicas(replicas, 1*time.Minute) // Should finish ~immediately, so 1 minute is more than enough.
|
||||
|
||||
|
@ -115,7 +115,7 @@ type HPAScaleTest struct {
|
||||
// TODO The use of 3 states is arbitrary, we could eventually make this test handle "n" states once this test stabilizes.
|
||||
func (scaleTest *HPAScaleTest) run(name, kind string, rc *common.ResourceConsumer, f *framework.Framework) {
|
||||
const timeToWait = 15 * time.Minute
|
||||
rc = common.NewDynamicResourceConsumer(name, kind, int(scaleTest.initPods), int(scaleTest.totalInitialCPUUsage), 0, 0, scaleTest.perPodCPURequest, 200, f)
|
||||
rc = common.NewDynamicResourceConsumer(name, f.Namespace.Name, kind, int(scaleTest.initPods), int(scaleTest.totalInitialCPUUsage), 0, 0, scaleTest.perPodCPURequest, 200, f.ClientSet, f.InternalClientset)
|
||||
defer rc.CleanUp()
|
||||
hpa := common.CreateCPUHorizontalPodAutoscaler(rc, scaleTest.targetCPUUtilizationPercent, scaleTest.minPods, scaleTest.maxPods)
|
||||
defer common.DeleteHorizontalPodAutoscaler(rc, hpa.Name)
|
||||
|
@ -84,7 +84,9 @@ type ResourceConsumer struct {
|
||||
name string
|
||||
controllerName string
|
||||
kind string
|
||||
framework *framework.Framework
|
||||
nsName string
|
||||
clientSet clientset.Interface
|
||||
internalClientset *internalclientset.Clientset
|
||||
cpu chan int
|
||||
mem chan int
|
||||
customMetric chan int
|
||||
@ -103,15 +105,15 @@ func GetResourceConsumerImage() string {
|
||||
return resourceConsumerImage
|
||||
}
|
||||
|
||||
func NewDynamicResourceConsumer(name, kind string, replicas, initCPUTotal, initMemoryTotal, initCustomMetric int, cpuLimit, memLimit int64, f *framework.Framework) *ResourceConsumer {
|
||||
return newResourceConsumer(name, kind, replicas, initCPUTotal, initMemoryTotal, initCustomMetric, dynamicConsumptionTimeInSeconds,
|
||||
dynamicRequestSizeInMillicores, dynamicRequestSizeInMegabytes, dynamicRequestSizeCustomMetric, cpuLimit, memLimit, f)
|
||||
func NewDynamicResourceConsumer(name, nsName, kind string, replicas, initCPUTotal, initMemoryTotal, initCustomMetric int, cpuLimit, memLimit int64, clientset clientset.Interface, internalClientset *internalclientset.Clientset) *ResourceConsumer {
|
||||
return newResourceConsumer(name, nsName, kind, replicas, initCPUTotal, initMemoryTotal, initCustomMetric, dynamicConsumptionTimeInSeconds,
|
||||
dynamicRequestSizeInMillicores, dynamicRequestSizeInMegabytes, dynamicRequestSizeCustomMetric, cpuLimit, memLimit, clientset, internalClientset)
|
||||
}
|
||||
|
||||
// TODO this still defaults to replication controller
|
||||
func NewStaticResourceConsumer(name string, replicas, initCPUTotal, initMemoryTotal, initCustomMetric int, cpuLimit, memLimit int64, f *framework.Framework) *ResourceConsumer {
|
||||
return newResourceConsumer(name, KindRC, replicas, initCPUTotal, initMemoryTotal, initCustomMetric, staticConsumptionTimeInSeconds,
|
||||
initCPUTotal/replicas, initMemoryTotal/replicas, initCustomMetric/replicas, cpuLimit, memLimit, f)
|
||||
func NewStaticResourceConsumer(name, nsName string, replicas, initCPUTotal, initMemoryTotal, initCustomMetric int, cpuLimit, memLimit int64, clientset clientset.Interface, internalClientset *internalclientset.Clientset) *ResourceConsumer {
|
||||
return newResourceConsumer(name, nsName, KindRC, replicas, initCPUTotal, initMemoryTotal, initCustomMetric, staticConsumptionTimeInSeconds,
|
||||
initCPUTotal/replicas, initMemoryTotal/replicas, initCustomMetric/replicas, cpuLimit, memLimit, clientset, internalClientset)
|
||||
}
|
||||
|
||||
/*
|
||||
@ -121,15 +123,17 @@ initMemoryTotal argument is in megabytes
|
||||
memLimit argument is in megabytes, memLimit is a maximum amount of memory that can be consumed by a single pod
|
||||
cpuLimit argument is in millicores, cpuLimit is a maximum amount of cpu that can be consumed by a single pod
|
||||
*/
|
||||
func newResourceConsumer(name, kind string, replicas, initCPUTotal, initMemoryTotal, initCustomMetric, consumptionTimeInSeconds, requestSizeInMillicores,
|
||||
requestSizeInMegabytes int, requestSizeCustomMetric int, cpuLimit, memLimit int64, f *framework.Framework) *ResourceConsumer {
|
||||
func newResourceConsumer(name, nsName, kind string, replicas, initCPUTotal, initMemoryTotal, initCustomMetric, consumptionTimeInSeconds, requestSizeInMillicores,
|
||||
requestSizeInMegabytes int, requestSizeCustomMetric int, cpuLimit, memLimit int64, clientset clientset.Interface, internalClientset *internalclientset.Clientset) *ResourceConsumer {
|
||||
|
||||
runServiceAndWorkloadForResourceConsumer(f.ClientSet, f.InternalClientset, f.Namespace.Name, name, kind, replicas, cpuLimit, memLimit)
|
||||
runServiceAndWorkloadForResourceConsumer(clientset, internalClientset, nsName, name, kind, replicas, cpuLimit, memLimit)
|
||||
rc := &ResourceConsumer{
|
||||
name: name,
|
||||
controllerName: name + "-ctrl",
|
||||
kind: kind,
|
||||
framework: f,
|
||||
nsName: nsName,
|
||||
clientSet: clientset,
|
||||
internalClientset: internalClientset,
|
||||
cpu: make(chan int),
|
||||
mem: make(chan int),
|
||||
customMetric: make(chan int),
|
||||
@ -235,14 +239,13 @@ func (rc *ResourceConsumer) makeConsumeCustomMetric() {
|
||||
}
|
||||
|
||||
func (rc *ResourceConsumer) sendConsumeCPURequest(millicores int) {
|
||||
proxyRequest, err := framework.GetServicesProxyRequest(rc.framework.ClientSet, rc.framework.ClientSet.Core().RESTClient().Post())
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), framework.SingleCallTimeout)
|
||||
defer cancel()
|
||||
|
||||
err = wait.PollImmediate(serviceInitializationInterval, serviceInitializationTimeout, func() (bool, error) {
|
||||
req := proxyRequest.Namespace(rc.framework.Namespace.Name).
|
||||
err := wait.PollImmediate(serviceInitializationInterval, serviceInitializationTimeout, func() (bool, error) {
|
||||
proxyRequest, err := framework.GetServicesProxyRequest(rc.clientSet, rc.clientSet.Core().RESTClient().Post())
|
||||
framework.ExpectNoError(err)
|
||||
req := proxyRequest.Namespace(rc.nsName).
|
||||
Context(ctx).
|
||||
Name(rc.controllerName).
|
||||
Suffix("ConsumeCPU").
|
||||
@ -250,7 +253,7 @@ func (rc *ResourceConsumer) sendConsumeCPURequest(millicores int) {
|
||||
Param("durationSec", strconv.Itoa(rc.consumptionTimeInSeconds)).
|
||||
Param("requestSizeMillicores", strconv.Itoa(rc.requestSizeInMillicores))
|
||||
framework.Logf("ConsumeCPU URL: %v", *req.URL())
|
||||
_, err := req.DoRaw()
|
||||
_, err = req.DoRaw()
|
||||
if err != nil {
|
||||
framework.Logf("ConsumeCPU failure: %v", err)
|
||||
return false, nil
|
||||
@ -263,14 +266,13 @@ func (rc *ResourceConsumer) sendConsumeCPURequest(millicores int) {
|
||||
|
||||
// sendConsumeMemRequest sends POST request for memory consumption
|
||||
func (rc *ResourceConsumer) sendConsumeMemRequest(megabytes int) {
|
||||
proxyRequest, err := framework.GetServicesProxyRequest(rc.framework.ClientSet, rc.framework.ClientSet.Core().RESTClient().Post())
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), framework.SingleCallTimeout)
|
||||
defer cancel()
|
||||
|
||||
err = wait.PollImmediate(serviceInitializationInterval, serviceInitializationTimeout, func() (bool, error) {
|
||||
req := proxyRequest.Namespace(rc.framework.Namespace.Name).
|
||||
err := wait.PollImmediate(serviceInitializationInterval, serviceInitializationTimeout, func() (bool, error) {
|
||||
proxyRequest, err := framework.GetServicesProxyRequest(rc.clientSet, rc.clientSet.Core().RESTClient().Post())
|
||||
framework.ExpectNoError(err)
|
||||
req := proxyRequest.Namespace(rc.nsName).
|
||||
Context(ctx).
|
||||
Name(rc.controllerName).
|
||||
Suffix("ConsumeMem").
|
||||
@ -278,7 +280,7 @@ func (rc *ResourceConsumer) sendConsumeMemRequest(megabytes int) {
|
||||
Param("durationSec", strconv.Itoa(rc.consumptionTimeInSeconds)).
|
||||
Param("requestSizeMegabytes", strconv.Itoa(rc.requestSizeInMegabytes))
|
||||
framework.Logf("ConsumeMem URL: %v", *req.URL())
|
||||
_, err := req.DoRaw()
|
||||
_, err = req.DoRaw()
|
||||
if err != nil {
|
||||
framework.Logf("ConsumeMem failure: %v", err)
|
||||
return false, nil
|
||||
@ -291,14 +293,13 @@ func (rc *ResourceConsumer) sendConsumeMemRequest(megabytes int) {
|
||||
|
||||
// sendConsumeCustomMetric sends POST request for custom metric consumption
|
||||
func (rc *ResourceConsumer) sendConsumeCustomMetric(delta int) {
|
||||
proxyRequest, err := framework.GetServicesProxyRequest(rc.framework.ClientSet, rc.framework.ClientSet.Core().RESTClient().Post())
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), framework.SingleCallTimeout)
|
||||
defer cancel()
|
||||
|
||||
err = wait.PollImmediate(serviceInitializationInterval, serviceInitializationTimeout, func() (bool, error) {
|
||||
req := proxyRequest.Namespace(rc.framework.Namespace.Name).
|
||||
err := wait.PollImmediate(serviceInitializationInterval, serviceInitializationTimeout, func() (bool, error) {
|
||||
proxyRequest, err := framework.GetServicesProxyRequest(rc.clientSet, rc.clientSet.Core().RESTClient().Post())
|
||||
framework.ExpectNoError(err)
|
||||
req := proxyRequest.Namespace(rc.nsName).
|
||||
Context(ctx).
|
||||
Name(rc.controllerName).
|
||||
Suffix("BumpMetric").
|
||||
@ -307,7 +308,7 @@ func (rc *ResourceConsumer) sendConsumeCustomMetric(delta int) {
|
||||
Param("durationSec", strconv.Itoa(rc.consumptionTimeInSeconds)).
|
||||
Param("requestSizeMetrics", strconv.Itoa(rc.requestSizeCustomMetric))
|
||||
framework.Logf("ConsumeCustomMetric URL: %v", *req.URL())
|
||||
_, err := req.DoRaw()
|
||||
_, err = req.DoRaw()
|
||||
if err != nil {
|
||||
framework.Logf("ConsumeCustomMetric failure: %v", err)
|
||||
return false, nil
|
||||
@ -320,21 +321,21 @@ func (rc *ResourceConsumer) sendConsumeCustomMetric(delta int) {
|
||||
func (rc *ResourceConsumer) GetReplicas() int {
|
||||
switch rc.kind {
|
||||
case KindRC:
|
||||
replicationController, err := rc.framework.ClientSet.Core().ReplicationControllers(rc.framework.Namespace.Name).Get(rc.name, metav1.GetOptions{})
|
||||
replicationController, err := rc.clientSet.Core().ReplicationControllers(rc.nsName).Get(rc.name, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
if replicationController == nil {
|
||||
framework.Failf(rcIsNil)
|
||||
}
|
||||
return int(replicationController.Status.ReadyReplicas)
|
||||
case KindDeployment:
|
||||
deployment, err := rc.framework.ClientSet.Extensions().Deployments(rc.framework.Namespace.Name).Get(rc.name, metav1.GetOptions{})
|
||||
deployment, err := rc.clientSet.Extensions().Deployments(rc.nsName).Get(rc.name, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
if deployment == nil {
|
||||
framework.Failf(deploymentIsNil)
|
||||
}
|
||||
return int(deployment.Status.ReadyReplicas)
|
||||
case KindReplicaSet:
|
||||
rs, err := rc.framework.ClientSet.Extensions().ReplicaSets(rc.framework.Namespace.Name).Get(rc.name, metav1.GetOptions{})
|
||||
rs, err := rc.clientSet.Extensions().ReplicaSets(rc.nsName).Get(rc.name, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
if rs == nil {
|
||||
framework.Failf(rsIsNil)
|
||||
@ -402,10 +403,10 @@ func (rc *ResourceConsumer) CleanUp() {
|
||||
time.Sleep(10 * time.Second)
|
||||
kind, err := kindOf(rc.kind)
|
||||
framework.ExpectNoError(err)
|
||||
framework.ExpectNoError(framework.DeleteResourceAndPods(rc.framework.ClientSet, rc.framework.InternalClientset, kind, rc.framework.Namespace.Name, rc.name))
|
||||
framework.ExpectNoError(rc.framework.ClientSet.Core().Services(rc.framework.Namespace.Name).Delete(rc.name, nil))
|
||||
framework.ExpectNoError(framework.DeleteResourceAndPods(rc.framework.ClientSet, rc.framework.InternalClientset, api.Kind("ReplicationController"), rc.framework.Namespace.Name, rc.controllerName))
|
||||
framework.ExpectNoError(rc.framework.ClientSet.Core().Services(rc.framework.Namespace.Name).Delete(rc.controllerName, nil))
|
||||
framework.ExpectNoError(framework.DeleteResourceAndPods(rc.clientSet, rc.internalClientset, kind, rc.nsName, rc.name))
|
||||
framework.ExpectNoError(rc.clientSet.Core().Services(rc.nsName).Delete(rc.name, nil))
|
||||
framework.ExpectNoError(framework.DeleteResourceAndPods(rc.clientSet, rc.internalClientset, api.Kind("ReplicationController"), rc.nsName, rc.controllerName))
|
||||
framework.ExpectNoError(rc.clientSet.Core().Services(rc.nsName).Delete(rc.controllerName, nil))
|
||||
}
|
||||
|
||||
func kindOf(kind string) (schema.GroupKind, error) {
|
||||
@ -516,7 +517,7 @@ func CreateCPUHorizontalPodAutoscaler(rc *ResourceConsumer, cpu, minReplicas, ma
|
||||
hpa := &autoscalingv1.HorizontalPodAutoscaler{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: rc.name,
|
||||
Namespace: rc.framework.Namespace.Name,
|
||||
Namespace: rc.nsName,
|
||||
},
|
||||
Spec: autoscalingv1.HorizontalPodAutoscalerSpec{
|
||||
ScaleTargetRef: autoscalingv1.CrossVersionObjectReference{
|
||||
@ -528,11 +529,11 @@ func CreateCPUHorizontalPodAutoscaler(rc *ResourceConsumer, cpu, minReplicas, ma
|
||||
TargetCPUUtilizationPercentage: &cpu,
|
||||
},
|
||||
}
|
||||
hpa, errHPA := rc.framework.ClientSet.Autoscaling().HorizontalPodAutoscalers(rc.framework.Namespace.Name).Create(hpa)
|
||||
hpa, errHPA := rc.clientSet.Autoscaling().HorizontalPodAutoscalers(rc.nsName).Create(hpa)
|
||||
framework.ExpectNoError(errHPA)
|
||||
return hpa
|
||||
}
|
||||
|
||||
func DeleteHorizontalPodAutoscaler(rc *ResourceConsumer, autoscalerName string) {
|
||||
rc.framework.ClientSet.Autoscaling().HorizontalPodAutoscalers(rc.framework.Namespace.Name).Delete(autoscalerName, nil)
|
||||
rc.clientSet.Autoscaling().HorizontalPodAutoscalers(rc.nsName).Delete(autoscalerName, nil)
|
||||
}
|
||||
|
@ -87,7 +87,7 @@ func testStackdriverMonitoring(f *framework.Framework, pods, allPodsCPU int, per
|
||||
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
rc := common.NewDynamicResourceConsumer(rcName, common.KindDeployment, pods, allPodsCPU, memoryUsed, 0, perPodCPU, memoryLimit, f)
|
||||
rc := common.NewDynamicResourceConsumer(rcName, f.Namespace.Name, common.KindDeployment, pods, allPodsCPU, memoryUsed, 0, perPodCPU, memoryLimit, f.ClientSet, f.InternalClientset)
|
||||
defer rc.CleanUp()
|
||||
|
||||
rc.WaitForReplicas(pods, 15*time.Minute)
|
||||
|
@ -39,6 +39,7 @@ func (HPAUpgradeTest) Name() string { return "hpa-upgrade" }
|
||||
func (t *HPAUpgradeTest) Setup(f *framework.Framework) {
|
||||
t.rc = common.NewDynamicResourceConsumer(
|
||||
"res-cons-upgrade",
|
||||
f.Namespace.Name,
|
||||
common.KindRC,
|
||||
1, /* replicas */
|
||||
250, /* initCPUTotal */
|
||||
@ -46,7 +47,8 @@ func (t *HPAUpgradeTest) Setup(f *framework.Framework) {
|
||||
0,
|
||||
500, /* cpuLimit */
|
||||
200, /* memLimit */
|
||||
f)
|
||||
f.ClientSet,
|
||||
f.InternalClientset)
|
||||
t.hpa = common.CreateCPUHorizontalPodAutoscaler(
|
||||
t.rc,
|
||||
20, /* targetCPUUtilizationPercent */
|
||||
|
Loading…
Reference in New Issue
Block a user