mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-29 14:37:00 +00:00
Horizontal Pod Autoscaling e2e tests
This commit is contained in:
parent
6605b170b6
commit
3ddbb7d96b
@ -35,43 +35,45 @@ const (
|
||||
targetPort = 8080
|
||||
timeoutRC = 120 * time.Second
|
||||
image = "gcr.io/google_containers/resource_consumer:alpha"
|
||||
rcIsNil = "ERROR: replicationController = nil"
|
||||
)
|
||||
|
||||
/*
|
||||
ConsumingRC is a tool for testing. It helps create specified usage of CPU or memory (Warnig: memory not supported)
|
||||
ResourceConsumer is a tool for testing. It helps create specified usage of CPU or memory (Warnig: memory not supported)
|
||||
typical use case:
|
||||
rc.ConsumeCPU(600)
|
||||
// ... check your assumption here
|
||||
rc.ConsumeCPU(300)
|
||||
// ... check your assumption here
|
||||
*/
|
||||
type ConsumingRC struct {
|
||||
type ResourceConsumer struct {
|
||||
name string
|
||||
framework *Framework
|
||||
channel chan int
|
||||
stop chan int
|
||||
}
|
||||
|
||||
// NewConsumingRC creates new ConsumingRC
|
||||
func NewConsumingRC(name string, replicas int, framework *Framework) *ConsumingRC {
|
||||
startService(framework.Client, framework.Namespace.Name, name, replicas)
|
||||
rc := &ConsumingRC{
|
||||
// NewResourceConsumer creates new ResourceConsumer
|
||||
// cpu argument is in milicores
|
||||
func NewResourceConsumer(name string, replicas int, cpu int, framework *Framework) *ResourceConsumer {
|
||||
runServiceAndRCForResourceConsumer(framework.Client, framework.Namespace.Name, name, replicas)
|
||||
rc := &ResourceConsumer{
|
||||
name: name,
|
||||
framework: framework,
|
||||
channel: make(chan int),
|
||||
stop: make(chan int),
|
||||
}
|
||||
go rc.makeConsumeCPURequests()
|
||||
rc.ConsumeCPU(0)
|
||||
rc.ConsumeCPU(cpu)
|
||||
return rc
|
||||
}
|
||||
|
||||
// ConsumeCPU consumes given number of CPU
|
||||
func (rc *ConsumingRC) ConsumeCPU(milicores int) {
|
||||
func (rc *ResourceConsumer) ConsumeCPU(milicores int) {
|
||||
rc.channel <- milicores
|
||||
}
|
||||
|
||||
func (rc *ConsumingRC) makeConsumeCPURequests() {
|
||||
func (rc *ResourceConsumer) makeConsumeCPURequests() {
|
||||
defer GinkgoRecover()
|
||||
var count int
|
||||
var rest int
|
||||
@ -93,14 +95,14 @@ func (rc *ConsumingRC) makeConsumeCPURequests() {
|
||||
}
|
||||
}
|
||||
|
||||
func (rc *ConsumingRC) sendConsumeCPUrequests(requests, milicores, durationSec int) {
|
||||
func (rc *ResourceConsumer) sendConsumeCPUrequests(requests, milicores, durationSec int) {
|
||||
for i := 0; i < requests; i++ {
|
||||
go rc.sendOneConsumeCPUrequest(milicores, durationSec)
|
||||
}
|
||||
}
|
||||
|
||||
// sendOneConsumeCPUrequest sends POST request for cpu consumption
|
||||
func (rc *ConsumingRC) sendOneConsumeCPUrequest(milicores int, durationSec int) {
|
||||
func (rc *ResourceConsumer) sendOneConsumeCPUrequest(milicores int, durationSec int) {
|
||||
_, err := rc.framework.Client.Post().
|
||||
Prefix("proxy").
|
||||
Namespace(rc.framework.Namespace.Name).
|
||||
@ -114,14 +116,37 @@ func (rc *ConsumingRC) sendOneConsumeCPUrequest(milicores int, durationSec int)
|
||||
expectNoError(err)
|
||||
}
|
||||
|
||||
func (rc *ConsumingRC) CleanUp() {
|
||||
func (rc *ResourceConsumer) GetReplicas() int {
|
||||
replicationController, err := rc.framework.Client.ReplicationControllers(rc.framework.Namespace.Name).Get(rc.name)
|
||||
expectNoError(err)
|
||||
if replicationController == nil {
|
||||
Failf(rcIsNil)
|
||||
}
|
||||
return replicationController.Status.Replicas
|
||||
}
|
||||
|
||||
func (rc *ResourceConsumer) WaitForReplicas(desiredReplicas int) {
|
||||
timeout := 10 * time.Minute
|
||||
for start := time.Now(); time.Since(start) < timeout; time.Sleep(20 * time.Second) {
|
||||
if desiredReplicas == rc.GetReplicas() {
|
||||
Logf("Replication Controller current replicas number is equal to desired replicas number: %d", desiredReplicas)
|
||||
return
|
||||
} else {
|
||||
Logf("Replication Controller current replicas number %d waiting to be %d", rc.GetReplicas(), desiredReplicas)
|
||||
}
|
||||
}
|
||||
Failf("timeout waiting %v for pods size to be %d", timeout, desiredReplicas)
|
||||
}
|
||||
|
||||
func (rc *ResourceConsumer) CleanUp() {
|
||||
rc.stop <- 0
|
||||
expectNoError(DeleteRC(rc.framework.Client, rc.framework.Namespace.Name, rc.name))
|
||||
expectNoError(rc.framework.Client.Services(rc.framework.Namespace.Name).Delete(rc.name))
|
||||
expectNoError(rc.framework.Client.Experimental().HorizontalPodAutoscalers(rc.framework.Namespace.Name).Delete(rc.name, api.NewDeleteOptions(0)))
|
||||
}
|
||||
|
||||
func startService(c *client.Client, ns, name string, replicas int) {
|
||||
c.Services(ns).Create(&api.Service{
|
||||
func runServiceAndRCForResourceConsumer(c *client.Client, ns, name string, replicas int) {
|
||||
_, err := c.Services(ns).Create(&api.Service{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Name: name,
|
||||
},
|
||||
@ -136,7 +161,7 @@ func startService(c *client.Client, ns, name string, replicas int) {
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
expectNoError(err)
|
||||
config := RCConfig{
|
||||
Client: c,
|
||||
Image: image,
|
||||
|
123
test/e2e/horizontal_pod_autoscaling.go
Normal file
123
test/e2e/horizontal_pod_autoscaling.go
Normal file
@ -0,0 +1,123 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package e2e
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/resource"
|
||||
"k8s.io/kubernetes/pkg/expapi"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
)
|
||||
|
||||
const (
|
||||
sleep = 10 * time.Minute
|
||||
)
|
||||
|
||||
var _ = Describe("Horizontal pod autoscaling", func() {
|
||||
var rc *ResourceConsumer
|
||||
f := NewFramework("horizontal-pod-autoscaling")
|
||||
|
||||
BeforeEach(func() {
|
||||
Skip("Skipped Horizontal pod autoscaling test")
|
||||
})
|
||||
|
||||
AfterEach(func() {
|
||||
})
|
||||
|
||||
It("[Skipped][Horizontal pod autoscaling Suite] should scale from 1 pod to 3 pods (scale resource: CPU)", func() {
|
||||
rc = NewResourceConsumer("rc", 1, 700, f)
|
||||
createHorizontalPodAutoscaler(rc, "0.3")
|
||||
rc.WaitForReplicas(3)
|
||||
rc.CleanUp()
|
||||
})
|
||||
|
||||
It("[Skipped][Horizontal pod autoscaling Suite] should scale from 3 pods to 1 pod (scale resource: CPU)", func() {
|
||||
rc = NewResourceConsumer("rc", 3, 0, f)
|
||||
createHorizontalPodAutoscaler(rc, "0.7")
|
||||
rc.WaitForReplicas(1)
|
||||
rc.CleanUp()
|
||||
})
|
||||
|
||||
It("[Skipped][Horizontal pod autoscaling Suite] should scale from 1 pod to maximum 5 pods (scale resource: CPU)", func() {
|
||||
rc = NewResourceConsumer("rc", 1, 700, f)
|
||||
createHorizontalPodAutoscaler(rc, "0.1")
|
||||
rc.WaitForReplicas(5)
|
||||
rc.CleanUp()
|
||||
})
|
||||
|
||||
It("[Skipped][Horizontal pod autoscaling Suite] should scale from 1 pod to 3 pods and from 3 to 1 (scale resource: CPU)", func() {
|
||||
rc = NewResourceConsumer("rc", 1, 700, f)
|
||||
createHorizontalPodAutoscaler(rc, "0.3")
|
||||
rc.WaitForReplicas(3)
|
||||
rc.ConsumeCPU(300)
|
||||
rc.WaitForReplicas(1)
|
||||
rc.CleanUp()
|
||||
})
|
||||
|
||||
It("[Skipped][Horizontal pod autoscaling Suite] should scale from 1 pod to 3 pods and from 3 to 5 (scale resource: CPU)", func() {
|
||||
rc = NewResourceConsumer("rc", 1, 300, f)
|
||||
createHorizontalPodAutoscaler(rc, "0.1")
|
||||
rc.WaitForReplicas(3)
|
||||
rc.ConsumeCPU(700)
|
||||
rc.WaitForReplicas(5)
|
||||
rc.CleanUp()
|
||||
})
|
||||
|
||||
It("[Skipped][Horizontal pod autoscaling Suite] should scale from 3 pods to 1 pod and from 1 to 3 (scale resource: CPU)", func() {
|
||||
rc = NewResourceConsumer("rc", 3, 0, f)
|
||||
createHorizontalPodAutoscaler(rc, "0.3")
|
||||
rc.WaitForReplicas(1)
|
||||
rc.ConsumeCPU(700)
|
||||
rc.WaitForReplicas(3)
|
||||
rc.CleanUp()
|
||||
})
|
||||
|
||||
It("[Skipped][Horizontal pod autoscaling Suite] should scale from 5 pods to 3 pods and from 3 to 1 (scale resource: CPU)", func() {
|
||||
rc = NewResourceConsumer("rc", 5, 700, f)
|
||||
createHorizontalPodAutoscaler(rc, "0.3")
|
||||
rc.WaitForReplicas(3)
|
||||
rc.ConsumeCPU(100)
|
||||
rc.WaitForReplicas(1)
|
||||
rc.CleanUp()
|
||||
})
|
||||
|
||||
})
|
||||
|
||||
func createHorizontalPodAutoscaler(rc *ResourceConsumer, cpu string) {
|
||||
hpa := &expapi.HorizontalPodAutoscaler{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Name: rc.name,
|
||||
Namespace: rc.framework.Namespace.Name,
|
||||
},
|
||||
Spec: expapi.HorizontalPodAutoscalerSpec{
|
||||
ScaleRef: &expapi.SubresourceReference{
|
||||
Kind: "replicationController",
|
||||
Name: rc.name,
|
||||
Namespace: rc.framework.Namespace.Name,
|
||||
Subresource: "scale",
|
||||
},
|
||||
MinCount: 1,
|
||||
MaxCount: 5,
|
||||
Target: expapi.ResourceConsumption{Resource: api.ResourceCPU, Quantity: resource.MustParse(cpu)},
|
||||
},
|
||||
}
|
||||
_, errHPA := rc.framework.Client.Experimental().HorizontalPodAutoscalers(rc.framework.Namespace.Name).Create(hpa)
|
||||
expectNoError(errHPA)
|
||||
}
|
Loading…
Reference in New Issue
Block a user