diff --git a/test/e2e/autoscaling/autoscaling_timer.go b/test/e2e/autoscaling/autoscaling_timer.go index 29b284b8a9b..c586590f458 100644 --- a/test/e2e/autoscaling/autoscaling_timer.go +++ b/test/e2e/autoscaling/autoscaling_timer.go @@ -89,7 +89,7 @@ var _ = SIGDescribe("[Feature:ClusterSizeAutoscalingScaleUp] [Slow] Autoscaling" nodeMemoryMB := (&nodeMemoryBytes).Value() / 1024 / 1024 memRequestMB := nodeMemoryMB / 10 // Ensure each pod takes not more than 10% of node's total memory. replicas := 1 - resourceConsumer := common.NewDynamicResourceConsumer("resource-consumer", common.KindDeployment, replicas, 0, 0, 0, cpuRequestMillis, memRequestMB, f) + resourceConsumer := common.NewDynamicResourceConsumer("resource-consumer", f.Namespace.Name, common.KindDeployment, replicas, 0, 0, 0, cpuRequestMillis, memRequestMB, f.ClientSet, f.InternalClientset) defer resourceConsumer.CleanUp() resourceConsumer.WaitForReplicas(replicas, 1*time.Minute) // Should finish ~immediately, so 1 minute is more than enough. diff --git a/test/e2e/autoscaling/horizontal_pod_autoscaling.go b/test/e2e/autoscaling/horizontal_pod_autoscaling.go index cc8fd8254f8..6e6aca1a8da 100644 --- a/test/e2e/autoscaling/horizontal_pod_autoscaling.go +++ b/test/e2e/autoscaling/horizontal_pod_autoscaling.go @@ -115,7 +115,7 @@ type HPAScaleTest struct { // TODO The use of 3 states is arbitrary, we could eventually make this test handle "n" states once this test stabilizes. func (scaleTest *HPAScaleTest) run(name, kind string, rc *common.ResourceConsumer, f *framework.Framework) { const timeToWait = 15 * time.Minute - rc = common.NewDynamicResourceConsumer(name, kind, int(scaleTest.initPods), int(scaleTest.totalInitialCPUUsage), 0, 0, scaleTest.perPodCPURequest, 200, f) + rc = common.NewDynamicResourceConsumer(name, f.Namespace.Name, kind, int(scaleTest.initPods), int(scaleTest.totalInitialCPUUsage), 0, 0, scaleTest.perPodCPURequest, 200, f.ClientSet, f.InternalClientset) defer rc.CleanUp() hpa := common.CreateCPUHorizontalPodAutoscaler(rc, scaleTest.targetCPUUtilizationPercent, scaleTest.minPods, scaleTest.maxPods) defer common.DeleteHorizontalPodAutoscaler(rc, hpa.Name) diff --git a/test/e2e/common/autoscaling_utils.go b/test/e2e/common/autoscaling_utils.go index 16cb7793b86..4bd8a00a6cd 100644 --- a/test/e2e/common/autoscaling_utils.go +++ b/test/e2e/common/autoscaling_utils.go @@ -80,7 +80,9 @@ type ResourceConsumer struct { name string controllerName string kind string - framework *framework.Framework + nsName string + clientSet clientset.Interface + internalClientset *internalclientset.Clientset cpu chan int mem chan int customMetric chan int @@ -99,15 +101,15 @@ func GetResourceConsumerImage() string { return resourceConsumerImage } -func NewDynamicResourceConsumer(name, kind string, replicas, initCPUTotal, initMemoryTotal, initCustomMetric int, cpuLimit, memLimit int64, f *framework.Framework) *ResourceConsumer { - return newResourceConsumer(name, kind, replicas, initCPUTotal, initMemoryTotal, initCustomMetric, dynamicConsumptionTimeInSeconds, - dynamicRequestSizeInMillicores, dynamicRequestSizeInMegabytes, dynamicRequestSizeCustomMetric, cpuLimit, memLimit, f) +func NewDynamicResourceConsumer(name, nsName, kind string, replicas, initCPUTotal, initMemoryTotal, initCustomMetric int, cpuLimit, memLimit int64, clientSet clientset.Interface, internalClientset *internalclientset.Clientset) *ResourceConsumer { + return newResourceConsumer(name, nsName, kind, replicas, initCPUTotal, initMemoryTotal, initCustomMetric, dynamicConsumptionTimeInSeconds, + dynamicRequestSizeInMillicores, dynamicRequestSizeInMegabytes, dynamicRequestSizeCustomMetric, cpuLimit, memLimit, clientSet, internalClientset) } // TODO this still defaults to replication controller -func NewStaticResourceConsumer(name string, replicas, initCPUTotal, initMemoryTotal, initCustomMetric int, cpuLimit, memLimit int64, f *framework.Framework) *ResourceConsumer { - return newResourceConsumer(name, KindRC, replicas, initCPUTotal, initMemoryTotal, initCustomMetric, staticConsumptionTimeInSeconds, - initCPUTotal/replicas, initMemoryTotal/replicas, initCustomMetric/replicas, cpuLimit, memLimit, f) +func NewStaticResourceConsumer(name, nsName string, replicas, initCPUTotal, initMemoryTotal, initCustomMetric int, cpuLimit, memLimit int64, clientSet clientset.Interface, internalClientset *internalclientset.Clientset) *ResourceConsumer { + return newResourceConsumer(name, nsName, KindRC, replicas, initCPUTotal, initMemoryTotal, initCustomMetric, staticConsumptionTimeInSeconds, + initCPUTotal/replicas, initMemoryTotal/replicas, initCustomMetric/replicas, cpuLimit, memLimit, clientSet, internalClientset) } /* @@ -117,15 +119,17 @@ initMemoryTotal argument is in megabytes memLimit argument is in megabytes, memLimit is a maximum amount of memory that can be consumed by a single pod cpuLimit argument is in millicores, cpuLimit is a maximum amount of cpu that can be consumed by a single pod */ -func newResourceConsumer(name, kind string, replicas, initCPUTotal, initMemoryTotal, initCustomMetric, consumptionTimeInSeconds, requestSizeInMillicores, - requestSizeInMegabytes int, requestSizeCustomMetric int, cpuLimit, memLimit int64, f *framework.Framework) *ResourceConsumer { +func newResourceConsumer(name, nsName, kind string, replicas, initCPUTotal, initMemoryTotal, initCustomMetric, consumptionTimeInSeconds, requestSizeInMillicores, + requestSizeInMegabytes int, requestSizeCustomMetric int, cpuLimit, memLimit int64, clientSet clientset.Interface, internalClientset *internalclientset.Clientset) *ResourceConsumer { - runServiceAndWorkloadForResourceConsumer(f.ClientSet, f.InternalClientset, f.Namespace.Name, name, kind, replicas, cpuLimit, memLimit) + runServiceAndWorkloadForResourceConsumer(clientSet, internalClientset, nsName, name, kind, replicas, cpuLimit, memLimit) rc := &ResourceConsumer{ name: name, controllerName: name + "-ctrl", kind: kind, - framework: f, + nsName: nsName, + clientSet: clientSet, + internalClientset: internalClientset, cpu: make(chan int), mem: make(chan int), customMetric: make(chan int), @@ -231,14 +235,14 @@ func (rc *ResourceConsumer) makeConsumeCustomMetric() { } func (rc *ResourceConsumer) sendConsumeCPURequest(millicores int) { - proxyRequest, err := framework.GetServicesProxyRequest(rc.framework.ClientSet, rc.framework.ClientSet.Core().RESTClient().Post()) + proxyRequest, err := framework.GetServicesProxyRequest(rc.clientSet, rc.clientSet.Core().RESTClient().Post()) framework.ExpectNoError(err) ctx, cancel := context.WithTimeout(context.Background(), framework.SingleCallTimeout) defer cancel() err = wait.PollImmediate(serviceInitializationInterval, serviceInitializationTimeout, func() (bool, error) { - req := proxyRequest.Namespace(rc.framework.Namespace.Name). + req := proxyRequest.Namespace(rc.nsName). Context(ctx). Name(rc.controllerName). Suffix("ConsumeCPU"). @@ -259,14 +263,14 @@ func (rc *ResourceConsumer) sendConsumeCPURequest(millicores int) { // sendConsumeMemRequest sends POST request for memory consumption func (rc *ResourceConsumer) sendConsumeMemRequest(megabytes int) { - proxyRequest, err := framework.GetServicesProxyRequest(rc.framework.ClientSet, rc.framework.ClientSet.Core().RESTClient().Post()) + proxyRequest, err := framework.GetServicesProxyRequest(rc.clientSet, rc.clientSet.Core().RESTClient().Post()) framework.ExpectNoError(err) ctx, cancel := context.WithTimeout(context.Background(), framework.SingleCallTimeout) defer cancel() err = wait.PollImmediate(serviceInitializationInterval, serviceInitializationTimeout, func() (bool, error) { - req := proxyRequest.Namespace(rc.framework.Namespace.Name). + req := proxyRequest.Namespace(rc.nsName). Context(ctx). Name(rc.controllerName). Suffix("ConsumeMem"). @@ -287,14 +291,14 @@ func (rc *ResourceConsumer) sendConsumeMemRequest(megabytes int) { // sendConsumeCustomMetric sends POST request for custom metric consumption func (rc *ResourceConsumer) sendConsumeCustomMetric(delta int) { - proxyRequest, err := framework.GetServicesProxyRequest(rc.framework.ClientSet, rc.framework.ClientSet.Core().RESTClient().Post()) + proxyRequest, err := framework.GetServicesProxyRequest(rc.clientSet, rc.clientSet.Core().RESTClient().Post()) framework.ExpectNoError(err) ctx, cancel := context.WithTimeout(context.Background(), framework.SingleCallTimeout) defer cancel() err = wait.PollImmediate(serviceInitializationInterval, serviceInitializationTimeout, func() (bool, error) { - req := proxyRequest.Namespace(rc.framework.Namespace.Name). + req := proxyRequest.Namespace(rc.nsName). Context(ctx). Name(rc.controllerName). Suffix("BumpMetric"). @@ -316,21 +320,21 @@ func (rc *ResourceConsumer) sendConsumeCustomMetric(delta int) { func (rc *ResourceConsumer) GetReplicas() int { switch rc.kind { case KindRC: - replicationController, err := rc.framework.ClientSet.Core().ReplicationControllers(rc.framework.Namespace.Name).Get(rc.name, metav1.GetOptions{}) + replicationController, err := rc.clientSet.Core().ReplicationControllers(rc.nsName).Get(rc.name, metav1.GetOptions{}) framework.ExpectNoError(err) if replicationController == nil { framework.Failf(rcIsNil) } return int(replicationController.Status.ReadyReplicas) case KindDeployment: - deployment, err := rc.framework.ClientSet.Extensions().Deployments(rc.framework.Namespace.Name).Get(rc.name, metav1.GetOptions{}) + deployment, err := rc.clientSet.Extensions().Deployments(rc.nsName).Get(rc.name, metav1.GetOptions{}) framework.ExpectNoError(err) if deployment == nil { framework.Failf(deploymentIsNil) } return int(deployment.Status.ReadyReplicas) case KindReplicaSet: - rs, err := rc.framework.ClientSet.Extensions().ReplicaSets(rc.framework.Namespace.Name).Get(rc.name, metav1.GetOptions{}) + rs, err := rc.clientSet.Extensions().ReplicaSets(rc.nsName).Get(rc.name, metav1.GetOptions{}) framework.ExpectNoError(err) if rs == nil { framework.Failf(rsIsNil) @@ -398,10 +402,10 @@ func (rc *ResourceConsumer) CleanUp() { time.Sleep(10 * time.Second) kind, err := kindOf(rc.kind) framework.ExpectNoError(err) - framework.ExpectNoError(framework.DeleteResourceAndPods(rc.framework.ClientSet, rc.framework.InternalClientset, kind, rc.framework.Namespace.Name, rc.name)) - framework.ExpectNoError(rc.framework.ClientSet.Core().Services(rc.framework.Namespace.Name).Delete(rc.name, nil)) - framework.ExpectNoError(framework.DeleteResourceAndPods(rc.framework.ClientSet, rc.framework.InternalClientset, api.Kind("ReplicationController"), rc.framework.Namespace.Name, rc.controllerName)) - framework.ExpectNoError(rc.framework.ClientSet.Core().Services(rc.framework.Namespace.Name).Delete(rc.controllerName, nil)) + framework.ExpectNoError(framework.DeleteResourceAndPods(rc.clientSet, rc.internalClientset, kind, rc.nsName, rc.name)) + framework.ExpectNoError(rc.clientSet.Core().Services(rc.nsName).Delete(rc.name, nil)) + framework.ExpectNoError(framework.DeleteResourceAndPods(rc.clientSet, rc.internalClientset, api.Kind("ReplicationController"), rc.nsName, rc.controllerName)) + framework.ExpectNoError(rc.clientSet.Core().Services(rc.nsName).Delete(rc.controllerName, nil)) } func kindOf(kind string) (schema.GroupKind, error) { @@ -512,7 +516,7 @@ func CreateCPUHorizontalPodAutoscaler(rc *ResourceConsumer, cpu, minReplicas, ma hpa := &autoscalingv1.HorizontalPodAutoscaler{ ObjectMeta: metav1.ObjectMeta{ Name: rc.name, - Namespace: rc.framework.Namespace.Name, + Namespace: rc.nsName, }, Spec: autoscalingv1.HorizontalPodAutoscalerSpec{ ScaleTargetRef: autoscalingv1.CrossVersionObjectReference{ @@ -524,11 +528,11 @@ func CreateCPUHorizontalPodAutoscaler(rc *ResourceConsumer, cpu, minReplicas, ma TargetCPUUtilizationPercentage: &cpu, }, } - hpa, errHPA := rc.framework.ClientSet.Autoscaling().HorizontalPodAutoscalers(rc.framework.Namespace.Name).Create(hpa) + hpa, errHPA := rc.clientSet.Autoscaling().HorizontalPodAutoscalers(rc.nsName).Create(hpa) framework.ExpectNoError(errHPA) return hpa } func DeleteHorizontalPodAutoscaler(rc *ResourceConsumer, autoscalerName string) { - rc.framework.ClientSet.Autoscaling().HorizontalPodAutoscalers(rc.framework.Namespace.Name).Delete(autoscalerName, nil) + rc.clientSet.Autoscaling().HorizontalPodAutoscalers(rc.nsName).Delete(autoscalerName, nil) } diff --git a/test/e2e/instrumentation/monitoring/stackdriver.go b/test/e2e/instrumentation/monitoring/stackdriver.go index 38c7605e920..3028fdbe5ec 100644 --- a/test/e2e/instrumentation/monitoring/stackdriver.go +++ b/test/e2e/instrumentation/monitoring/stackdriver.go @@ -87,7 +87,7 @@ func testStackdriverMonitoring(f *framework.Framework, pods, allPodsCPU int, per framework.ExpectNoError(err) - rc := common.NewDynamicResourceConsumer(rcName, common.KindDeployment, pods, allPodsCPU, memoryUsed, 0, perPodCPU, memoryLimit, f) + rc := common.NewDynamicResourceConsumer(rcName, f.Namespace.Name, common.KindDeployment, pods, allPodsCPU, memoryUsed, 0, perPodCPU, memoryLimit, f.ClientSet, f.InternalClientset) defer rc.CleanUp() rc.WaitForReplicas(pods, 15*time.Minute) diff --git a/test/e2e/upgrades/horizontal_pod_autoscalers.go b/test/e2e/upgrades/horizontal_pod_autoscalers.go index 119221d7a84..f76ae06355a 100644 --- a/test/e2e/upgrades/horizontal_pod_autoscalers.go +++ b/test/e2e/upgrades/horizontal_pod_autoscalers.go @@ -39,6 +39,7 @@ func (HPAUpgradeTest) Name() string { return "hpa-upgrade" } func (t *HPAUpgradeTest) Setup(f *framework.Framework) { t.rc = common.NewDynamicResourceConsumer( "res-cons-upgrade", + f.Namespace.Name, common.KindRC, 1, /* replicas */ 250, /* initCPUTotal */ @@ -46,7 +47,8 @@ func (t *HPAUpgradeTest) Setup(f *framework.Framework) { 0, 500, /* cpuLimit */ 200, /* memLimit */ - f) + f.ClientSet, + f.InternalClientset) t.hpa = common.CreateCPUHorizontalPodAutoscaler( t.rc, 20, /* targetCPUUtilizationPercent */