Merge pull request #111865 from piotrnosek/hpa-tests-batch

Add e2e HPA tests: Scale to zero, scale a CRD targetRef
This commit is contained in:
Kubernetes Prow Robot 2022-09-01 05:43:05 -07:00 committed by GitHub
commit c870f1ddc7
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
4 changed files with 106 additions and 27 deletions

View File

@ -18,6 +18,7 @@ package autoscaling
import (
"context"
"fmt"
"math"
"time"
@ -29,7 +30,10 @@ import (
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
utilfeature "k8s.io/apiserver/pkg/util/feature"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/component-base/featuregate"
"k8s.io/kubernetes/pkg/features"
"k8s.io/kubernetes/test/e2e/framework"
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
"k8s.io/kubernetes/test/e2e/instrumentation/monitoring"
@ -217,6 +221,25 @@ var _ = SIGDescribe("[HPA] Horizontal pod autoscaling (scale resource: Custom Me
hpa: externalHPA(f.Namespace.ObjectMeta.Name, metricTargets)}
tc.Run()
})
ginkgo.It("should scale down to 0 with Custom Metric of type Object from Stackdriver [Feature:CustomMetricsAutoscaling]", func() {
defer withFeatureGate(features.HPAScaleToZero, true)()
initialReplicas := 2
// metric should cause scale down
metricValue := int64(100)
metricTarget := 2 * metricValue
tc := CustomMetricTestCase{
framework: f,
kubeClient: f.ClientSet,
initialReplicas: initialReplicas,
scaledReplicas: 0,
// Metric exported by deployment is ignored
deployment: monitoring.SimpleStackdriverExporterDeployment(dummyDeploymentName, f.Namespace.ObjectMeta.Name, int32(initialReplicas), 0 /* ignored */),
pod: monitoring.StackdriverExporterPod(stackdriverExporterPod, f.Namespace.Name, stackdriverExporterPod, monitoring.CustomMetricName, metricValue),
hpa: objectHPA(f.Namespace.ObjectMeta.Name, metricTarget)}
tc.Run()
})
})
// CustomMetricTestCase is a struct for test cases.
@ -244,14 +267,13 @@ func (tc *CustomMetricTestCase) Run() {
// If this is your use case, create application default credentials:
// $ gcloud auth application-default login
// and uncomment following lines:
/*
ts, err := google.DefaultTokenSource(oauth2.NoContext)
framework.Logf("Couldn't get application default credentials, %v", err)
if err != nil {
framework.Failf("Error accessing application default credentials, %v", err)
}
client := oauth2.NewClient(oauth2.NoContext, ts)
*/
// ts, err := google.DefaultTokenSource(oauth2.NoContext)
// framework.Logf("Couldn't get application default credentials, %v", err)
// if err != nil {
// framework.Failf("Error accessing application default credentials, %v", err)
// }
// client = oauth2.NewClient(oauth2.NoContext, ts)
gcmService, err := gcm.NewService(ctx, option.WithHTTPClient(client))
if err != nil {
@ -472,3 +494,13 @@ func waitForReplicas(deploymentName, namespace string, cs clientset.Interface, t
framework.Failf("Timeout waiting %v for %v replicas", timeout, desiredReplicas)
}
}
// Equivalent of featuregatetesting.SetFeatureGateDuringTest
// which can't be used here because we're not in a Testing context.
func withFeatureGate(feature featuregate.Feature, desired bool) func() {
current := utilfeature.DefaultFeatureGate.Enabled(feature)
utilfeature.DefaultMutableFeatureGate.Set(fmt.Sprintf("%s=%v", string(feature), desired))
return func() {
utilfeature.DefaultMutableFeatureGate.Set(fmt.Sprintf("%s=%v", string(feature), current))
}
}

View File

@ -17,9 +17,11 @@ limitations under the License.
package autoscaling
import (
"k8s.io/pod-security-admission/api"
"time"
"k8s.io/pod-security-admission/api"
autoscalingv2 "k8s.io/api/autoscaling/v2"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/kubernetes/test/e2e/framework"
e2eautoscaling "k8s.io/kubernetes/test/e2e/framework/autoscaling"
@ -104,6 +106,22 @@ var _ = SIGDescribe("[Feature:HPA] Horizontal pod autoscaling (scale resource: C
doNotScaleOnBusySidecar("rs", e2eautoscaling.KindReplicaSet, true, f)
})
})
ginkgo.Describe("CustomResourceDefinition", func() {
ginkgo.It("Should scale with a CRD targetRef", func() {
scaleTest := &HPAScaleTest{
initPods: 1,
totalInitialCPUUsage: 150,
perPodCPURequest: 200,
targetCPUUtilizationPercent: 50,
minPods: 1,
maxPods: 2,
firstScale: 2,
targetRef: e2eautoscaling.CustomCRDTargetRef(),
}
scaleTest.run("crd-light", e2eautoscaling.KindCRD, f)
})
})
})
// HPAScaleTest struct is used by the scale(...) function.
@ -118,6 +136,7 @@ type HPAScaleTest struct {
firstScaleStasis time.Duration
cpuBurst int
secondScale int32
targetRef autoscalingv2.CrossVersionObjectReference
}
// run is a method which runs an HPA lifecycle, from a starting state, to an expected
@ -129,7 +148,8 @@ func (scaleTest *HPAScaleTest) run(name string, kind schema.GroupVersionKind, f
const timeToWait = 15 * time.Minute
rc := e2eautoscaling.NewDynamicResourceConsumer(name, f.Namespace.Name, kind, scaleTest.initPods, scaleTest.totalInitialCPUUsage, 0, 0, scaleTest.perPodCPURequest, 200, f.ClientSet, f.ScalesGetter, e2eautoscaling.Disable, e2eautoscaling.Idle)
defer rc.CleanUp()
hpa := e2eautoscaling.CreateCPUHorizontalPodAutoscaler(rc, scaleTest.targetCPUUtilizationPercent, scaleTest.minPods, scaleTest.maxPods)
var hpa *autoscalingv2.HorizontalPodAutoscaler
hpa = e2eautoscaling.CreateCPUHorizontalPodAutoscaler(rc, scaleTest.targetCPUUtilizationPercent, scaleTest.minPods, scaleTest.maxPods)
defer e2eautoscaling.DeleteHorizontalPodAutoscaler(rc, hpa.Name)
rc.WaitForReplicas(scaleTest.firstScale, timeToWait)

View File

@ -76,6 +76,8 @@ var (
KindDeployment = schema.GroupVersionKind{Group: "apps", Version: "v1beta2", Kind: "Deployment"}
// KindReplicaSet is the GVK for ReplicaSet
KindReplicaSet = schema.GroupVersionKind{Group: "apps", Version: "v1beta2", Kind: "ReplicaSet"}
// KindCRD is the GVK for CRD for test purposes
KindCRD = schema.GroupVersionKind{Group: "test", Version: "v1", Kind: "TestCustomCRD"}
)
// ScalingDirection identifies the scale direction for HPA Behavior.
@ -615,28 +617,45 @@ func runServiceAndWorkloadForResourceConsumer(c clientset.Interface, ns, name st
c, ns, controllerName, 1, startServiceInterval, startServiceTimeout))
}
func CreateCpuHorizontalPodAutoscalerWithCustomTargetRef(rc *ResourceConsumer, targetRef autoscalingv2.CrossVersionObjectReference, namespace string, cpu, minReplicas, maxReplicas int32) *autoscalingv2.HorizontalPodAutoscaler {
hpa := &autoscalingv2.HorizontalPodAutoscaler{
ObjectMeta: metav1.ObjectMeta{
Name: targetRef.Name,
Namespace: namespace,
},
Spec: autoscalingv2.HorizontalPodAutoscalerSpec{
ScaleTargetRef: targetRef,
MinReplicas: &minReplicas,
MaxReplicas: maxReplicas,
Metrics: []autoscalingv2.MetricSpec{
{
Type: autoscalingv2.ResourceMetricSourceType,
Resource: &autoscalingv2.ResourceMetricSource{
Name: v1.ResourceCPU,
Target: autoscalingv2.MetricTarget{
Type: autoscalingv2.UtilizationMetricType,
AverageUtilization: &cpu,
},
},
},
},
},
}
hpa, errHPA := rc.clientSet.AutoscalingV2().HorizontalPodAutoscalers(rc.nsName).Create(context.TODO(), hpa, metav1.CreateOptions{})
framework.ExpectNoError(errHPA)
return hpa
}
// CreateCPUHorizontalPodAutoscaler create a horizontalPodAutoscaler with CPU target
// for consuming resources.
func CreateCPUHorizontalPodAutoscaler(rc *ResourceConsumer, cpu, minReplicas, maxRepl int32) *autoscalingv1.HorizontalPodAutoscaler {
hpa := &autoscalingv1.HorizontalPodAutoscaler{
ObjectMeta: metav1.ObjectMeta{
Name: rc.name,
Namespace: rc.nsName,
},
Spec: autoscalingv1.HorizontalPodAutoscalerSpec{
ScaleTargetRef: autoscalingv1.CrossVersionObjectReference{
func CreateCPUHorizontalPodAutoscaler(rc *ResourceConsumer, cpu, minReplicas, maxReplicas int32) *autoscalingv2.HorizontalPodAutoscaler {
targetRef := autoscalingv2.CrossVersionObjectReference{
APIVersion: rc.kind.GroupVersion().String(),
Kind: rc.kind.Kind,
Name: rc.name,
},
MinReplicas: &minReplicas,
MaxReplicas: maxRepl,
TargetCPUUtilizationPercentage: &cpu,
},
}
hpa, errHPA := rc.clientSet.AutoscalingV1().HorizontalPodAutoscalers(rc.nsName).Create(context.TODO(), hpa, metav1.CreateOptions{})
framework.ExpectNoError(errHPA)
return hpa
return CreateCpuHorizontalPodAutoscalerWithCustomTargetRef(rc, targetRef, rc.nsName, cpu, minReplicas, maxReplicas)
}
// DeleteHorizontalPodAutoscaler delete the horizontalPodAutoscaler for consuming resources.
@ -644,6 +663,14 @@ func DeleteHorizontalPodAutoscaler(rc *ResourceConsumer, autoscalerName string)
rc.clientSet.AutoscalingV1().HorizontalPodAutoscalers(rc.nsName).Delete(context.TODO(), autoscalerName, metav1.DeleteOptions{})
}
func CustomCRDTargetRef() autoscalingv2.CrossVersionObjectReference {
return autoscalingv2.CrossVersionObjectReference{
Kind: "TestCustomCRD",
Name: "test-custom-crd",
APIVersion: "test/v1",
}
}
// runReplicaSet launches (and verifies correctness) of a replicaset.
func runReplicaSet(config testutils.ReplicaSetConfig) error {
ginkgo.By(fmt.Sprintf("creating replicaset %s in namespace %s", config.Name, config.Namespace))

View File

@ -20,7 +20,7 @@ import (
"fmt"
"time"
autoscalingv1 "k8s.io/api/autoscaling/v1"
autoscalingv2 "k8s.io/api/autoscaling/v2"
"k8s.io/kubernetes/test/e2e/framework"
e2eautoscaling "k8s.io/kubernetes/test/e2e/framework/autoscaling"
"k8s.io/kubernetes/test/e2e/upgrades"
@ -31,7 +31,7 @@ import (
// HPAUpgradeTest tests that HPA rescales target resource correctly before and after a cluster upgrade.
type HPAUpgradeTest struct {
rc *e2eautoscaling.ResourceConsumer
hpa *autoscalingv1.HorizontalPodAutoscaler
hpa *autoscalingv2.HorizontalPodAutoscaler
}
// Name returns the tracking name of the test.