test: e2e: HPA ContainerResource

This add e2e test for HPA ContainerResource metrics. This add test to cover two scenarios
1. Scale up on a busy application with an idle sidecar container
2. Do not scale up on a busy sidecar with an idle application.

Signed-off-by: Vivek Singh <svivekkumar@vmware.com>
This commit is contained in:
Vivek Singh 2021-06-08 00:39:21 +05:30
parent 33aba7ee02
commit 80e4007bc3
6 changed files with 293 additions and 46 deletions

View File

@ -21,7 +21,7 @@ import (
"strings" "strings"
"time" "time"
"k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2eautoscaling "k8s.io/kubernetes/test/e2e/framework/autoscaling" e2eautoscaling "k8s.io/kubernetes/test/e2e/framework/autoscaling"
@ -96,7 +96,7 @@ var _ = SIGDescribe("[Feature:ClusterSizeAutoscalingScaleUp] [Slow] Autoscaling"
nodeMemoryMB := (&nodeMemoryBytes).Value() / 1024 / 1024 nodeMemoryMB := (&nodeMemoryBytes).Value() / 1024 / 1024
memRequestMB := nodeMemoryMB / 10 // Ensure each pod takes not more than 10% of node's allocatable memory. memRequestMB := nodeMemoryMB / 10 // Ensure each pod takes not more than 10% of node's allocatable memory.
replicas := 1 replicas := 1
resourceConsumer := e2eautoscaling.NewDynamicResourceConsumer("resource-consumer", f.Namespace.Name, e2eautoscaling.KindDeployment, replicas, 0, 0, 0, cpuRequestMillis, memRequestMB, f.ClientSet, f.ScalesGetter) resourceConsumer := e2eautoscaling.NewDynamicResourceConsumer("resource-consumer", f.Namespace.Name, e2eautoscaling.KindDeployment, replicas, 0, 0, 0, cpuRequestMillis, memRequestMB, f.ClientSet, f.ScalesGetter, e2eautoscaling.Disable, e2eautoscaling.Idle)
defer resourceConsumer.CleanUp() defer resourceConsumer.CleanUp()
resourceConsumer.WaitForReplicas(replicas, 1*time.Minute) // Should finish ~immediately, so 1 minute is more than enough. resourceConsumer.WaitForReplicas(replicas, 1*time.Minute) // Should finish ~immediately, so 1 minute is more than enough.

View File

@ -91,6 +91,18 @@ var _ = SIGDescribe("[Feature:HPA] Horizontal pod autoscaling (scale resource: C
scaleTest.run("rc-light", e2eautoscaling.KindRC, f) scaleTest.run("rc-light", e2eautoscaling.KindRC, f)
}) })
}) })
ginkgo.Describe("[Serial] [Slow] ReplicaSet with idle sidecar (ContainerResource use case)", func() {
// ContainerResource CPU autoscaling on idle sidecar
ginkgo.It(titleUp+" on a busy application with an idle sidecar container", func() {
scaleOnIdleSideCar("rs", e2eautoscaling.KindReplicaSet, false, f)
})
// ContainerResource CPU autoscaling on busy sidecar
ginkgo.It("Should not scale up on a busy sidecar with an idle application", func() {
doNotScaleOnBusySidecar("rs", e2eautoscaling.KindReplicaSet, true, f)
})
})
}) })
// HPAScaleTest struct is used by the scale(...) function. // HPAScaleTest struct is used by the scale(...) function.
@ -114,7 +126,7 @@ type HPAScaleTest struct {
// TODO The use of 3 states is arbitrary, we could eventually make this test handle "n" states once this test stabilizes. // TODO The use of 3 states is arbitrary, we could eventually make this test handle "n" states once this test stabilizes.
func (scaleTest *HPAScaleTest) run(name string, kind schema.GroupVersionKind, f *framework.Framework) { func (scaleTest *HPAScaleTest) run(name string, kind schema.GroupVersionKind, f *framework.Framework) {
const timeToWait = 15 * time.Minute const timeToWait = 15 * time.Minute
rc := e2eautoscaling.NewDynamicResourceConsumer(name, f.Namespace.Name, kind, scaleTest.initPods, scaleTest.totalInitialCPUUsage, 0, 0, scaleTest.perPodCPURequest, 200, f.ClientSet, f.ScalesGetter) rc := e2eautoscaling.NewDynamicResourceConsumer(name, f.Namespace.Name, kind, scaleTest.initPods, scaleTest.totalInitialCPUUsage, 0, 0, scaleTest.perPodCPURequest, 200, f.ClientSet, f.ScalesGetter, e2eautoscaling.Disable, e2eautoscaling.Idle)
defer rc.CleanUp() defer rc.CleanUp()
hpa := e2eautoscaling.CreateCPUHorizontalPodAutoscaler(rc, scaleTest.targetCPUUtilizationPercent, scaleTest.minPods, scaleTest.maxPods) hpa := e2eautoscaling.CreateCPUHorizontalPodAutoscaler(rc, scaleTest.targetCPUUtilizationPercent, scaleTest.minPods, scaleTest.maxPods)
defer e2eautoscaling.DeleteHorizontalPodAutoscaler(rc, hpa.Name) defer e2eautoscaling.DeleteHorizontalPodAutoscaler(rc, hpa.Name)
@ -168,3 +180,88 @@ func scaleDown(name string, kind schema.GroupVersionKind, checkStability bool, f
} }
scaleTest.run(name, kind, f) scaleTest.run(name, kind, f)
} }
type HPAContainerResourceScaleTest struct {
initPods int
totalInitialCPUUsage int
perContainerCPURequest int64
targetCPUUtilizationPercent int32
minPods int32
maxPods int32
noScale bool
noScaleStasis time.Duration
firstScale int
firstScaleStasis time.Duration
cpuBurst int
secondScale int32
sidecarStatus e2eautoscaling.SidecarStatusType
sidecarType e2eautoscaling.SidecarWorkloadType
}
func (scaleTest *HPAContainerResourceScaleTest) run(name string, kind schema.GroupVersionKind, f *framework.Framework) {
const timeToWait = 15 * time.Minute
rc := e2eautoscaling.NewDynamicResourceConsumer(name, f.Namespace.Name, kind, scaleTest.initPods, scaleTest.totalInitialCPUUsage, 0, 0, scaleTest.perContainerCPURequest, 200, f.ClientSet, f.ScalesGetter, scaleTest.sidecarStatus, scaleTest.sidecarType)
defer rc.CleanUp()
hpa := e2eautoscaling.CreateContainerResourceCPUHorizontalPodAutoscaler(rc, scaleTest.targetCPUUtilizationPercent, scaleTest.minPods, scaleTest.maxPods)
defer e2eautoscaling.DeleteContainerResourceHPA(rc, hpa.Name)
if scaleTest.noScale {
if scaleTest.noScaleStasis > 0 {
rc.EnsureDesiredReplicasInRange(scaleTest.initPods, scaleTest.initPods, scaleTest.noScaleStasis, hpa.Name)
}
} else {
rc.WaitForReplicas(scaleTest.firstScale, timeToWait)
if scaleTest.firstScaleStasis > 0 {
rc.EnsureDesiredReplicasInRange(scaleTest.firstScale, scaleTest.firstScale+1, scaleTest.firstScaleStasis, hpa.Name)
}
if scaleTest.cpuBurst > 0 && scaleTest.secondScale > 0 {
rc.ConsumeCPU(scaleTest.cpuBurst)
rc.WaitForReplicas(int(scaleTest.secondScale), timeToWait)
}
}
}
func scaleOnIdleSideCar(name string, kind schema.GroupVersionKind, checkStability bool, f *framework.Framework) {
// Scale up on a busy application with an idle sidecar container
stasis := 0 * time.Minute
if checkStability {
stasis = 10 * time.Minute
}
scaleTest := &HPAContainerResourceScaleTest{
initPods: 1,
totalInitialCPUUsage: 250,
perContainerCPURequest: 500,
targetCPUUtilizationPercent: 20,
minPods: 1,
maxPods: 5,
firstScale: 3,
firstScaleStasis: stasis,
cpuBurst: 700,
secondScale: 5,
sidecarStatus: e2eautoscaling.Enable,
sidecarType: e2eautoscaling.Idle,
}
scaleTest.run(name, kind, f)
}
func doNotScaleOnBusySidecar(name string, kind schema.GroupVersionKind, checkStability bool, f *framework.Framework) {
// Do not scale up on a busy sidecar with an idle application
stasis := 0 * time.Minute
if checkStability {
stasis = 1 * time.Minute
}
scaleTest := &HPAContainerResourceScaleTest{
initPods: 1,
totalInitialCPUUsage: 250,
perContainerCPURequest: 500,
targetCPUUtilizationPercent: 20,
minPods: 1,
maxPods: 5,
cpuBurst: 700,
sidecarStatus: e2eautoscaling.Enable,
sidecarType: e2eautoscaling.Busy,
noScale: true,
noScaleStasis: stasis,
}
scaleTest.run(name, kind, f)
}

View File

@ -24,7 +24,9 @@ import (
"time" "time"
autoscalingv1 "k8s.io/api/autoscaling/v1" autoscalingv1 "k8s.io/api/autoscaling/v1"
autoscalingv2beta2 "k8s.io/api/autoscaling/v2beta2"
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/util/intstr" "k8s.io/apimachinery/pkg/util/intstr"
@ -50,6 +52,7 @@ const (
dynamicRequestSizeCustomMetric = 10 dynamicRequestSizeCustomMetric = 10
port = 80 port = 80
targetPort = 8080 targetPort = 8080
sidecarTargetPort = 8081
timeoutRC = 120 * time.Second timeoutRC = 120 * time.Second
startServiceTimeout = time.Minute startServiceTimeout = time.Minute
startServiceInterval = 5 * time.Second startServiceInterval = 5 * time.Second
@ -102,12 +105,41 @@ type ResourceConsumer struct {
requestSizeInMillicores int requestSizeInMillicores int
requestSizeInMegabytes int requestSizeInMegabytes int
requestSizeCustomMetric int requestSizeCustomMetric int
sidecarStatus SidecarStatusType
sidecarType SidecarWorkloadType
} }
// NewDynamicResourceConsumer is a wrapper to create a new dynamic ResourceConsumer // NewDynamicResourceConsumer is a wrapper to create a new dynamic ResourceConsumer
func NewDynamicResourceConsumer(name, nsName string, kind schema.GroupVersionKind, replicas, initCPUTotal, initMemoryTotal, initCustomMetric int, cpuLimit, memLimit int64, clientset clientset.Interface, scaleClient scaleclient.ScalesGetter) *ResourceConsumer { func NewDynamicResourceConsumer(name, nsName string, kind schema.GroupVersionKind, replicas, initCPUTotal, initMemoryTotal, initCustomMetric int, cpuLimit, memLimit int64, clientset clientset.Interface, scaleClient scaleclient.ScalesGetter, enableSidecar SidecarStatusType, sidecarType SidecarWorkloadType) *ResourceConsumer {
return newResourceConsumer(name, nsName, kind, replicas, initCPUTotal, initMemoryTotal, initCustomMetric, dynamicConsumptionTimeInSeconds, return newResourceConsumer(name, nsName, kind, replicas, initCPUTotal, initMemoryTotal, initCustomMetric, dynamicConsumptionTimeInSeconds,
dynamicRequestSizeInMillicores, dynamicRequestSizeInMegabytes, dynamicRequestSizeCustomMetric, cpuLimit, memLimit, clientset, scaleClient, nil, nil) dynamicRequestSizeInMillicores, dynamicRequestSizeInMegabytes, dynamicRequestSizeCustomMetric, cpuLimit, memLimit, clientset, scaleClient, nil, nil, enableSidecar, sidecarType)
}
// getSidecarContainer returns sidecar container
func getSidecarContainer(name string, cpuLimit, memLimit int64) v1.Container {
container := v1.Container{
Name: name + "-sidecar",
Image: resourceConsumerImage,
Command: []string{"/consumer", "-port=8081"},
Ports: []v1.ContainerPort{{ContainerPort: 80}},
}
if cpuLimit > 0 || memLimit > 0 {
container.Resources.Limits = v1.ResourceList{}
container.Resources.Requests = v1.ResourceList{}
}
if cpuLimit > 0 {
container.Resources.Limits[v1.ResourceCPU] = *resource.NewMilliQuantity(cpuLimit, resource.DecimalSI)
container.Resources.Requests[v1.ResourceCPU] = *resource.NewMilliQuantity(cpuLimit, resource.DecimalSI)
}
if memLimit > 0 {
container.Resources.Limits[v1.ResourceMemory] = *resource.NewQuantity(memLimit*1024*1024, resource.DecimalSI)
container.Resources.Requests[v1.ResourceMemory] = *resource.NewQuantity(memLimit*1024*1024, resource.DecimalSI)
}
return container
} }
/* /*
@ -118,17 +150,32 @@ memLimit argument is in megabytes, memLimit is a maximum amount of memory that c
cpuLimit argument is in millicores, cpuLimit is a maximum amount of cpu that can be consumed by a single pod cpuLimit argument is in millicores, cpuLimit is a maximum amount of cpu that can be consumed by a single pod
*/ */
func newResourceConsumer(name, nsName string, kind schema.GroupVersionKind, replicas, initCPUTotal, initMemoryTotal, initCustomMetric, consumptionTimeInSeconds, requestSizeInMillicores, func newResourceConsumer(name, nsName string, kind schema.GroupVersionKind, replicas, initCPUTotal, initMemoryTotal, initCustomMetric, consumptionTimeInSeconds, requestSizeInMillicores,
requestSizeInMegabytes int, requestSizeCustomMetric int, cpuLimit, memLimit int64, clientset clientset.Interface, scaleClient scaleclient.ScalesGetter, podAnnotations, serviceAnnotations map[string]string) *ResourceConsumer { requestSizeInMegabytes int, requestSizeCustomMetric int, cpuLimit, memLimit int64, clientset clientset.Interface, scaleClient scaleclient.ScalesGetter, podAnnotations, serviceAnnotations map[string]string, sidecarStatus SidecarStatusType, sidecarType SidecarWorkloadType) *ResourceConsumer {
if podAnnotations == nil { if podAnnotations == nil {
podAnnotations = make(map[string]string) podAnnotations = make(map[string]string)
} }
if serviceAnnotations == nil { if serviceAnnotations == nil {
serviceAnnotations = make(map[string]string) serviceAnnotations = make(map[string]string)
} }
runServiceAndWorkloadForResourceConsumer(clientset, nsName, name, kind, replicas, cpuLimit, memLimit, podAnnotations, serviceAnnotations)
var additionalContainers []v1.Container
if sidecarStatus == Enable {
sidecarContainer := getSidecarContainer(name, cpuLimit, memLimit)
additionalContainers = append(additionalContainers, sidecarContainer)
}
runServiceAndWorkloadForResourceConsumer(clientset, nsName, name, kind, replicas, cpuLimit, memLimit, podAnnotations, serviceAnnotations, additionalContainers)
controllerName := name + "-ctrl"
// If sidecar is enabled and busy, run service and consumer for sidecar
if sidecarStatus == Enable && sidecarType == Busy {
runServiceAndSidecarForResourceConsumer(clientset, nsName, name, kind, replicas, serviceAnnotations)
controllerName = name + "-sidecar-ctrl"
}
rc := &ResourceConsumer{ rc := &ResourceConsumer{
name: name, name: name,
controllerName: name + "-ctrl", controllerName: controllerName,
kind: kind, kind: kind,
nsName: nsName, nsName: nsName,
clientSet: clientset, clientSet: clientset,
@ -144,6 +191,8 @@ func newResourceConsumer(name, nsName string, kind schema.GroupVersionKind, repl
requestSizeInMillicores: requestSizeInMillicores, requestSizeInMillicores: requestSizeInMillicores,
requestSizeInMegabytes: requestSizeInMegabytes, requestSizeInMegabytes: requestSizeInMegabytes,
requestSizeCustomMetric: requestSizeCustomMetric, requestSizeCustomMetric: requestSizeCustomMetric,
sidecarType: sidecarType,
sidecarStatus: sidecarStatus,
} }
go rc.makeConsumeCPURequests() go rc.makeConsumeCPURequests()
@ -418,41 +467,82 @@ func (rc *ResourceConsumer) CleanUp() {
framework.ExpectNoError(e2eresource.DeleteResourceAndWaitForGC(rc.clientSet, kind, rc.nsName, rc.name)) framework.ExpectNoError(e2eresource.DeleteResourceAndWaitForGC(rc.clientSet, kind, rc.nsName, rc.name))
framework.ExpectNoError(rc.clientSet.CoreV1().Services(rc.nsName).Delete(context.TODO(), rc.name, metav1.DeleteOptions{})) framework.ExpectNoError(rc.clientSet.CoreV1().Services(rc.nsName).Delete(context.TODO(), rc.name, metav1.DeleteOptions{}))
framework.ExpectNoError(e2eresource.DeleteResourceAndWaitForGC(rc.clientSet, schema.GroupKind{Kind: "ReplicationController"}, rc.nsName, rc.controllerName)) framework.ExpectNoError(e2eresource.DeleteResourceAndWaitForGC(rc.clientSet, schema.GroupKind{Kind: "ReplicationController"}, rc.nsName, rc.controllerName))
framework.ExpectNoError(rc.clientSet.CoreV1().Services(rc.nsName).Delete(context.TODO(), rc.controllerName, metav1.DeleteOptions{})) framework.ExpectNoError(rc.clientSet.CoreV1().Services(rc.nsName).Delete(context.TODO(), rc.name+"-ctrl", metav1.DeleteOptions{}))
// Cleanup sidecar related resources
if rc.sidecarStatus == Enable && rc.sidecarType == Busy {
framework.ExpectNoError(rc.clientSet.CoreV1().Services(rc.nsName).Delete(context.TODO(), rc.name+"-sidecar", metav1.DeleteOptions{}))
framework.ExpectNoError(rc.clientSet.CoreV1().Services(rc.nsName).Delete(context.TODO(), rc.name+"-sidecar-ctrl", metav1.DeleteOptions{}))
}
} }
func runServiceAndWorkloadForResourceConsumer(c clientset.Interface, ns, name string, kind schema.GroupVersionKind, replicas int, cpuLimitMillis, memLimitMb int64, podAnnotations, serviceAnnotations map[string]string) { func createService(c clientset.Interface, name, ns string, annotations, selectors map[string]string, port int32, targetPort int) (*v1.Service, error) {
ginkgo.By(fmt.Sprintf("Running consuming RC %s via %s with %v replicas", name, kind, replicas)) return c.CoreV1().Services(ns).Create(context.TODO(), &v1.Service{
_, err := c.CoreV1().Services(ns).Create(context.TODO(), &v1.Service{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Name: name, Name: name,
Annotations: serviceAnnotations, Annotations: annotations,
}, },
Spec: v1.ServiceSpec{ Spec: v1.ServiceSpec{
Ports: []v1.ServicePort{{ Ports: []v1.ServicePort{{
Port: port, Port: port,
TargetPort: intstr.FromInt(targetPort), TargetPort: intstr.FromInt(targetPort),
}}, }},
Selector: selectors,
Selector: map[string]string{
"name": name,
},
}, },
}, metav1.CreateOptions{}) }, metav1.CreateOptions{})
}
// runServiceAndSidecarForResourceConsumer creates service and runs resource consumer for sidecar container
func runServiceAndSidecarForResourceConsumer(c clientset.Interface, ns, name string, kind schema.GroupVersionKind, replicas int, serviceAnnotations map[string]string) {
ginkgo.By(fmt.Sprintf("Running consuming RC sidecar %s via %s with %v replicas", name, kind, replicas))
sidecarName := name + "-sidecar"
serviceSelectors := map[string]string{
"name": name,
}
_, err := createService(c, sidecarName, ns, serviceAnnotations, serviceSelectors, port, sidecarTargetPort)
framework.ExpectNoError(err)
ginkgo.By(fmt.Sprintf("Running controller for sidecar"))
controllerName := sidecarName + "-ctrl"
_, err = createService(c, controllerName, ns, map[string]string{}, map[string]string{"name": controllerName}, port, targetPort)
framework.ExpectNoError(err)
dnsClusterFirst := v1.DNSClusterFirst
controllerRcConfig := testutils.RCConfig{
Client: c,
Image: imageutils.GetE2EImage(imageutils.Agnhost),
Name: controllerName,
Namespace: ns,
Timeout: timeoutRC,
Replicas: 1,
Command: []string{"/agnhost", "resource-consumer-controller", "--consumer-service-name=" + sidecarName, "--consumer-service-namespace=" + ns, "--consumer-port=80"},
DNSPolicy: &dnsClusterFirst,
}
framework.ExpectNoError(e2erc.RunRC(controllerRcConfig))
// Wait for endpoints to propagate for the controller service.
framework.ExpectNoError(framework.WaitForServiceEndpointsNum(
c, ns, controllerName, 1, startServiceInterval, startServiceTimeout))
}
func runServiceAndWorkloadForResourceConsumer(c clientset.Interface, ns, name string, kind schema.GroupVersionKind, replicas int, cpuLimitMillis, memLimitMb int64, podAnnotations, serviceAnnotations map[string]string, additionalContainers []v1.Container) {
ginkgo.By(fmt.Sprintf("Running consuming RC %s via %s with %v replicas", name, kind, replicas))
_, err := createService(c, name, ns, serviceAnnotations, map[string]string{"name": name}, port, targetPort)
framework.ExpectNoError(err) framework.ExpectNoError(err)
rcConfig := testutils.RCConfig{ rcConfig := testutils.RCConfig{
Client: c, Client: c,
Image: resourceConsumerImage, Image: resourceConsumerImage,
Name: name, Name: name,
Namespace: ns, Namespace: ns,
Timeout: timeoutRC, Timeout: timeoutRC,
Replicas: replicas, Replicas: replicas,
CpuRequest: cpuLimitMillis, CpuRequest: cpuLimitMillis,
CpuLimit: cpuLimitMillis, CpuLimit: cpuLimitMillis,
MemRequest: memLimitMb * 1024 * 1024, // MemLimit is in bytes MemRequest: memLimitMb * 1024 * 1024, // MemLimit is in bytes
MemLimit: memLimitMb * 1024 * 1024, MemLimit: memLimitMb * 1024 * 1024,
Annotations: podAnnotations, Annotations: podAnnotations,
AdditionalContainers: additionalContainers,
} }
switch kind { switch kind {
@ -478,21 +568,7 @@ func runServiceAndWorkloadForResourceConsumer(c clientset.Interface, ns, name st
ginkgo.By(fmt.Sprintf("Running controller")) ginkgo.By(fmt.Sprintf("Running controller"))
controllerName := name + "-ctrl" controllerName := name + "-ctrl"
_, err = c.CoreV1().Services(ns).Create(context.TODO(), &v1.Service{ _, err = createService(c, controllerName, ns, map[string]string{}, map[string]string{"name": controllerName}, port, targetPort)
ObjectMeta: metav1.ObjectMeta{
Name: controllerName,
},
Spec: v1.ServiceSpec{
Ports: []v1.ServicePort{{
Port: port,
TargetPort: intstr.FromInt(targetPort),
}},
Selector: map[string]string{
"name": controllerName,
},
},
}, metav1.CreateOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
dnsClusterFirst := v1.DNSClusterFirst dnsClusterFirst := v1.DNSClusterFirst
@ -506,8 +582,8 @@ func runServiceAndWorkloadForResourceConsumer(c clientset.Interface, ns, name st
Command: []string{"/agnhost", "resource-consumer-controller", "--consumer-service-name=" + name, "--consumer-service-namespace=" + ns, "--consumer-port=80"}, Command: []string{"/agnhost", "resource-consumer-controller", "--consumer-service-name=" + name, "--consumer-service-namespace=" + ns, "--consumer-port=80"},
DNSPolicy: &dnsClusterFirst, DNSPolicy: &dnsClusterFirst,
} }
framework.ExpectNoError(e2erc.RunRC(controllerRcConfig))
framework.ExpectNoError(e2erc.RunRC(controllerRcConfig))
// Wait for endpoints to propagate for the controller service. // Wait for endpoints to propagate for the controller service.
framework.ExpectNoError(framework.WaitForServiceEndpointsNum( framework.ExpectNoError(framework.WaitForServiceEndpointsNum(
c, ns, controllerName, 1, startServiceInterval, startServiceTimeout)) c, ns, controllerName, 1, startServiceInterval, startServiceTimeout))
@ -549,3 +625,60 @@ func runReplicaSet(config testutils.ReplicaSetConfig) error {
config.ContainerDumpFunc = e2ekubectl.LogFailedContainers config.ContainerDumpFunc = e2ekubectl.LogFailedContainers
return testutils.RunReplicaSet(config) return testutils.RunReplicaSet(config)
} }
// CreateContainerResourceCPUHorizontalPodAutoscaler create a horizontal pod autoscaler with container resource target
// for consuming resources.
func CreateContainerResourceCPUHorizontalPodAutoscaler(rc *ResourceConsumer, cpu, minReplicas, maxRepl int32) *autoscalingv2beta2.HorizontalPodAutoscaler {
hpa := &autoscalingv2beta2.HorizontalPodAutoscaler{
ObjectMeta: metav1.ObjectMeta{
Name: rc.name,
Namespace: rc.nsName,
},
Spec: autoscalingv2beta2.HorizontalPodAutoscalerSpec{
ScaleTargetRef: autoscalingv2beta2.CrossVersionObjectReference{
APIVersion: rc.kind.GroupVersion().String(),
Kind: rc.kind.Kind,
Name: rc.name,
},
MinReplicas: &minReplicas,
MaxReplicas: maxRepl,
Metrics: []autoscalingv2beta2.MetricSpec{
{
Type: "ContainerResource",
ContainerResource: &autoscalingv2beta2.ContainerResourceMetricSource{
Name: "cpu",
Container: rc.name,
Target: autoscalingv2beta2.MetricTarget{
Type: "Utilization",
AverageUtilization: &cpu,
},
},
},
},
},
}
hpa, errHPA := rc.clientSet.AutoscalingV2beta2().HorizontalPodAutoscalers(rc.nsName).Create(context.TODO(), hpa, metav1.CreateOptions{})
framework.ExpectNoError(errHPA)
return hpa
}
// DeleteContainerResourceHPA delete the horizontalPodAutoscaler for consuming resources.
func DeleteContainerResourceHPA(rc *ResourceConsumer, autoscalerName string) {
rc.clientSet.AutoscalingV2beta2().HorizontalPodAutoscalers(rc.nsName).Delete(context.TODO(), autoscalerName, metav1.DeleteOptions{})
}
//SidecarStatusType type for sidecar status
type SidecarStatusType bool
const (
Enable SidecarStatusType = true
Disable SidecarStatusType = false
)
//SidecarWorkloadType type of the sidecar
type SidecarWorkloadType string
const (
Busy SidecarWorkloadType = "Busy"
Idle SidecarWorkloadType = "Idle"
)

View File

@ -103,7 +103,7 @@ func testStackdriverMonitoring(f *framework.Framework, pods, allPodsCPU int, per
framework.ExpectNoError(err) framework.ExpectNoError(err)
rc := e2eautoscaling.NewDynamicResourceConsumer(rcName, f.Namespace.Name, e2eautoscaling.KindDeployment, pods, allPodsCPU, memoryUsed, 0, perPodCPU, memoryLimit, f.ClientSet, f.ScalesGetter) rc := e2eautoscaling.NewDynamicResourceConsumer(rcName, f.Namespace.Name, e2eautoscaling.KindDeployment, pods, allPodsCPU, memoryUsed, 0, perPodCPU, memoryLimit, f.ClientSet, f.ScalesGetter, e2eautoscaling.Disable, e2eautoscaling.Idle)
defer rc.CleanUp() defer rc.CleanUp()
rc.WaitForReplicas(pods, 15*time.Minute) rc.WaitForReplicas(pods, 15*time.Minute)

View File

@ -50,7 +50,9 @@ func (t *HPAUpgradeTest) Setup(f *framework.Framework) {
500, /* cpuLimit */ 500, /* cpuLimit */
200, /* memLimit */ 200, /* memLimit */
f.ClientSet, f.ClientSet,
f.ScalesGetter) f.ScalesGetter,
e2eautoscaling.Disable,
e2eautoscaling.Idle)
t.hpa = e2eautoscaling.CreateCPUHorizontalPodAutoscaler( t.hpa = e2eautoscaling.CreateCPUHorizontalPodAutoscaler(
t.rc, t.rc,
20, /* targetCPUUtilizationPercent */ 20, /* targetCPUUtilizationPercent */

View File

@ -181,6 +181,9 @@ type RCConfig struct {
ConfigMapNames []string ConfigMapNames []string
ServiceAccountTokenProjections int ServiceAccountTokenProjections int
//Additional containers to run in the pod
AdditionalContainers []v1.Container
} }
func (rc *RCConfig) RCConfigLog(fmt string, args ...interface{}) { func (rc *RCConfig) RCConfigLog(fmt string, args ...interface{}) {
@ -343,6 +346,10 @@ func (config *DeploymentConfig) create() error {
}, },
} }
if len(config.AdditionalContainers) > 0 {
deployment.Spec.Template.Spec.Containers = append(deployment.Spec.Template.Spec.Containers, config.AdditionalContainers...)
}
if len(config.SecretNames) > 0 { if len(config.SecretNames) > 0 {
attachSecrets(&deployment.Spec.Template, config.SecretNames) attachSecrets(&deployment.Spec.Template, config.SecretNames)
} }
@ -425,6 +432,10 @@ func (config *ReplicaSetConfig) create() error {
}, },
} }
if len(config.AdditionalContainers) > 0 {
rs.Spec.Template.Spec.Containers = append(rs.Spec.Template.Spec.Containers, config.AdditionalContainers...)
}
if len(config.SecretNames) > 0 { if len(config.SecretNames) > 0 {
attachSecrets(&rs.Spec.Template, config.SecretNames) attachSecrets(&rs.Spec.Template, config.SecretNames)
} }
@ -618,6 +629,10 @@ func (config *RCConfig) create() error {
}, },
} }
if len(config.AdditionalContainers) > 0 {
rc.Spec.Template.Spec.Containers = append(rc.Spec.Template.Spec.Containers, config.AdditionalContainers...)
}
if len(config.SecretNames) > 0 { if len(config.SecretNames) > 0 {
attachSecrets(rc.Spec.Template, config.SecretNames) attachSecrets(rc.Spec.Template, config.SecretNames)
} }