mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-23 19:56:01 +00:00
[e2e] make sure to specify APIVersion in HPA tests
Previously, the HPA controller ignored APIVersion when resolving the scale subresource for a kind, meaning if it was set incorrectly in the HPA's scaleTargetRef, it would not matter. This was the case for several of the HPA e2e tests. Since the polymorphic scale client merged, APIVersion now matters. This updates the HPA e2es to care about APIVersion, by passing kind as a full GroupVersionKind, and not just a string.
This commit is contained in:
parent
dc35709eee
commit
32b47a36f6
@ -37,6 +37,7 @@ go_library(
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/fields:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
|
||||
|
@ -19,6 +19,7 @@ package autoscaling
|
||||
import (
|
||||
"time"
|
||||
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/kubernetes/test/e2e/common"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
|
||||
@ -113,7 +114,7 @@ type HPAScaleTest struct {
|
||||
// The first state change is due to the CPU being consumed initially, which HPA responds to by changing pod counts.
|
||||
// The second state change (optional) is due to the CPU burst parameter, which HPA again responds to.
|
||||
// TODO The use of 3 states is arbitrary, we could eventually make this test handle "n" states once this test stabilizes.
|
||||
func (scaleTest *HPAScaleTest) run(name, kind string, rc *common.ResourceConsumer, f *framework.Framework) {
|
||||
func (scaleTest *HPAScaleTest) run(name string, kind schema.GroupVersionKind, rc *common.ResourceConsumer, f *framework.Framework) {
|
||||
const timeToWait = 15 * time.Minute
|
||||
rc = common.NewDynamicResourceConsumer(name, f.Namespace.Name, kind, int(scaleTest.initPods), int(scaleTest.totalInitialCPUUsage), 0, 0, scaleTest.perPodCPURequest, 200, f.ClientSet, f.InternalClientset)
|
||||
defer rc.CleanUp()
|
||||
@ -129,7 +130,7 @@ func (scaleTest *HPAScaleTest) run(name, kind string, rc *common.ResourceConsume
|
||||
}
|
||||
}
|
||||
|
||||
func scaleUp(name, kind string, checkStability bool, rc *common.ResourceConsumer, f *framework.Framework) {
|
||||
func scaleUp(name string, kind schema.GroupVersionKind, checkStability bool, rc *common.ResourceConsumer, f *framework.Framework) {
|
||||
stasis := 0 * time.Minute
|
||||
if checkStability {
|
||||
stasis = 10 * time.Minute
|
||||
@ -149,7 +150,7 @@ func scaleUp(name, kind string, checkStability bool, rc *common.ResourceConsumer
|
||||
scaleTest.run(name, kind, rc, f)
|
||||
}
|
||||
|
||||
func scaleDown(name, kind string, checkStability bool, rc *common.ResourceConsumer, f *framework.Framework) {
|
||||
func scaleDown(name string, kind schema.GroupVersionKind, checkStability bool, rc *common.ResourceConsumer, f *framework.Framework) {
|
||||
stasis := 0 * time.Minute
|
||||
if checkStability {
|
||||
stasis = 10 * time.Minute
|
||||
|
@ -38,7 +38,6 @@ go_library(
|
||||
"//pkg/api/testapi:go_default_library",
|
||||
"//pkg/api/v1/helper:go_default_library",
|
||||
"//pkg/api/v1/pod:go_default_library",
|
||||
"//pkg/apis/extensions:go_default_library",
|
||||
"//pkg/client/clientset_generated/internalclientset:go_default_library",
|
||||
"//pkg/client/conditions:go_default_library",
|
||||
"//pkg/kubelet:go_default_library",
|
||||
|
@ -31,7 +31,6 @@ import (
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
extensionsinternal "k8s.io/kubernetes/pkg/apis/extensions"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
@ -65,10 +64,10 @@ var (
|
||||
resourceConsumerControllerImage = imageutils.GetE2EImage(imageutils.ResourceController)
|
||||
)
|
||||
|
||||
const (
|
||||
KindRC = "ReplicationController"
|
||||
KindDeployment = "Deployment"
|
||||
KindReplicaSet = "ReplicaSet"
|
||||
var (
|
||||
KindRC = schema.GroupVersionKind{Version: "v1", Kind: "ReplicationController"}
|
||||
KindDeployment = schema.GroupVersionKind{Group: "apps", Version: "v1beta2", Kind: "Deployment"}
|
||||
KindReplicaSet = schema.GroupVersionKind{Group: "apps", Version: "v1beta2", Kind: "ReplicaSet"}
|
||||
subresource = "scale"
|
||||
)
|
||||
|
||||
@ -83,7 +82,7 @@ rc.ConsumeCPU(300)
|
||||
type ResourceConsumer struct {
|
||||
name string
|
||||
controllerName string
|
||||
kind string
|
||||
kind schema.GroupVersionKind
|
||||
nsName string
|
||||
clientSet clientset.Interface
|
||||
internalClientset *internalclientset.Clientset
|
||||
@ -105,7 +104,7 @@ func GetResourceConsumerImage() string {
|
||||
return resourceConsumerImage
|
||||
}
|
||||
|
||||
func NewDynamicResourceConsumer(name, nsName, kind string, replicas, initCPUTotal, initMemoryTotal, initCustomMetric int, cpuLimit, memLimit int64, clientset clientset.Interface, internalClientset *internalclientset.Clientset) *ResourceConsumer {
|
||||
func NewDynamicResourceConsumer(name, nsName string, kind schema.GroupVersionKind, replicas, initCPUTotal, initMemoryTotal, initCustomMetric int, cpuLimit, memLimit int64, clientset clientset.Interface, internalClientset *internalclientset.Clientset) *ResourceConsumer {
|
||||
return newResourceConsumer(name, nsName, kind, replicas, initCPUTotal, initMemoryTotal, initCustomMetric, dynamicConsumptionTimeInSeconds,
|
||||
dynamicRequestSizeInMillicores, dynamicRequestSizeInMegabytes, dynamicRequestSizeCustomMetric, cpuLimit, memLimit, clientset, internalClientset)
|
||||
}
|
||||
@ -123,7 +122,7 @@ initMemoryTotal argument is in megabytes
|
||||
memLimit argument is in megabytes, memLimit is a maximum amount of memory that can be consumed by a single pod
|
||||
cpuLimit argument is in millicores, cpuLimit is a maximum amount of cpu that can be consumed by a single pod
|
||||
*/
|
||||
func newResourceConsumer(name, nsName, kind string, replicas, initCPUTotal, initMemoryTotal, initCustomMetric, consumptionTimeInSeconds, requestSizeInMillicores,
|
||||
func newResourceConsumer(name, nsName string, kind schema.GroupVersionKind, replicas, initCPUTotal, initMemoryTotal, initCustomMetric, consumptionTimeInSeconds, requestSizeInMillicores,
|
||||
requestSizeInMegabytes int, requestSizeCustomMetric int, cpuLimit, memLimit int64, clientset clientset.Interface, internalClientset *internalclientset.Clientset) *ResourceConsumer {
|
||||
|
||||
runServiceAndWorkloadForResourceConsumer(clientset, internalClientset, nsName, name, kind, replicas, cpuLimit, memLimit)
|
||||
@ -401,28 +400,14 @@ func (rc *ResourceConsumer) CleanUp() {
|
||||
rc.stopWaitGroup.Wait()
|
||||
// Wait some time to ensure all child goroutines are finished.
|
||||
time.Sleep(10 * time.Second)
|
||||
kind, err := kindOf(rc.kind)
|
||||
framework.ExpectNoError(err)
|
||||
kind := rc.kind.GroupKind()
|
||||
framework.ExpectNoError(framework.DeleteResourceAndPods(rc.clientSet, rc.internalClientset, kind, rc.nsName, rc.name))
|
||||
framework.ExpectNoError(rc.clientSet.CoreV1().Services(rc.nsName).Delete(rc.name, nil))
|
||||
framework.ExpectNoError(framework.DeleteResourceAndPods(rc.clientSet, rc.internalClientset, api.Kind("ReplicationController"), rc.nsName, rc.controllerName))
|
||||
framework.ExpectNoError(rc.clientSet.CoreV1().Services(rc.nsName).Delete(rc.controllerName, nil))
|
||||
}
|
||||
|
||||
func kindOf(kind string) (schema.GroupKind, error) {
|
||||
switch kind {
|
||||
case KindRC:
|
||||
return api.Kind(kind), nil
|
||||
case KindReplicaSet:
|
||||
return extensionsinternal.Kind(kind), nil
|
||||
case KindDeployment:
|
||||
return extensionsinternal.Kind(kind), nil
|
||||
default:
|
||||
return schema.GroupKind{}, fmt.Errorf("Unsupported kind: %v", kind)
|
||||
}
|
||||
}
|
||||
|
||||
func runServiceAndWorkloadForResourceConsumer(c clientset.Interface, internalClient internalclientset.Interface, ns, name, kind string, replicas int, cpuLimitMillis, memLimitMb int64) {
|
||||
func runServiceAndWorkloadForResourceConsumer(c clientset.Interface, internalClient internalclientset.Interface, ns, name string, kind schema.GroupVersionKind, replicas int, cpuLimitMillis, memLimitMb int64) {
|
||||
By(fmt.Sprintf("Running consuming RC %s via %s with %v replicas", name, kind, replicas))
|
||||
_, err := c.CoreV1().Services(ns).Create(&v1.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
@ -521,8 +506,9 @@ func CreateCPUHorizontalPodAutoscaler(rc *ResourceConsumer, cpu, minReplicas, ma
|
||||
},
|
||||
Spec: autoscalingv1.HorizontalPodAutoscalerSpec{
|
||||
ScaleTargetRef: autoscalingv1.CrossVersionObjectReference{
|
||||
Kind: rc.kind,
|
||||
Name: rc.name,
|
||||
APIVersion: rc.kind.GroupVersion().String(),
|
||||
Kind: rc.kind.Kind,
|
||||
Name: rc.name,
|
||||
},
|
||||
MinReplicas: &minReplicas,
|
||||
MaxReplicas: maxRepl,
|
||||
|
Loading…
Reference in New Issue
Block a user