mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-08-16 07:13:53 +00:00
Merge pull request #14036 from socaa/hpa-mem-tests
Auto commit by PR queue bot
This commit is contained in:
commit
400e6856b0
@ -31,10 +31,11 @@ const (
|
|||||||
consumptionTimeInSeconds = 30
|
consumptionTimeInSeconds = 30
|
||||||
sleepTime = 30 * time.Second
|
sleepTime = 30 * time.Second
|
||||||
requestSizeInMillicores = 100
|
requestSizeInMillicores = 100
|
||||||
|
requestSizeInMegabytes = 100
|
||||||
port = 80
|
port = 80
|
||||||
targetPort = 8080
|
targetPort = 8080
|
||||||
timeoutRC = 120 * time.Second
|
timeoutRC = 120 * time.Second
|
||||||
image = "gcr.io/google_containers/resource_consumer:alpha"
|
image = "gcr.io/google_containers/resource_consumer:beta"
|
||||||
rcIsNil = "ERROR: replicationController = nil"
|
rcIsNil = "ERROR: replicationController = nil"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -49,60 +50,101 @@ rc.ConsumeCPU(300)
|
|||||||
type ResourceConsumer struct {
|
type ResourceConsumer struct {
|
||||||
name string
|
name string
|
||||||
framework *Framework
|
framework *Framework
|
||||||
channel chan int
|
cpu chan int
|
||||||
stop chan int
|
mem chan int
|
||||||
|
stopCPU chan int
|
||||||
|
stopMem chan int
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewResourceConsumer creates new ResourceConsumer
|
/*
|
||||||
// cpu argument is in millicores
|
NewResourceConsumer creates new ResourceConsumer
|
||||||
func NewResourceConsumer(name string, replicas int, cpu int, framework *Framework) *ResourceConsumer {
|
initCPU argument is in millicores
|
||||||
|
initMemory argument is in megabytes
|
||||||
|
*/
|
||||||
|
func NewResourceConsumer(name string, replicas, initCPU, initMemory int, framework *Framework) *ResourceConsumer {
|
||||||
runServiceAndRCForResourceConsumer(framework.Client, framework.Namespace.Name, name, replicas)
|
runServiceAndRCForResourceConsumer(framework.Client, framework.Namespace.Name, name, replicas)
|
||||||
rc := &ResourceConsumer{
|
rc := &ResourceConsumer{
|
||||||
name: name,
|
name: name,
|
||||||
framework: framework,
|
framework: framework,
|
||||||
channel: make(chan int),
|
cpu: make(chan int),
|
||||||
stop: make(chan int),
|
mem: make(chan int),
|
||||||
|
stopCPU: make(chan int),
|
||||||
|
stopMem: make(chan int),
|
||||||
}
|
}
|
||||||
go rc.makeConsumeCPURequests()
|
go rc.makeConsumeCPURequests()
|
||||||
rc.ConsumeCPU(cpu)
|
rc.ConsumeCPU(initCPU)
|
||||||
|
go rc.makeConsumeMemRequests()
|
||||||
|
rc.ConsumeMem(initMemory)
|
||||||
return rc
|
return rc
|
||||||
}
|
}
|
||||||
|
|
||||||
// ConsumeCPU consumes given number of CPU
|
// ConsumeCPU consumes given number of CPU
|
||||||
func (rc *ResourceConsumer) ConsumeCPU(millicores int) {
|
func (rc *ResourceConsumer) ConsumeCPU(millicores int) {
|
||||||
rc.channel <- millicores
|
rc.cpu <- millicores
|
||||||
|
}
|
||||||
|
|
||||||
|
// ConsumeMem consumes given number of Mem
|
||||||
|
func (rc *ResourceConsumer) ConsumeMem(megabytes int) {
|
||||||
|
rc.mem <- megabytes
|
||||||
}
|
}
|
||||||
|
|
||||||
func (rc *ResourceConsumer) makeConsumeCPURequests() {
|
func (rc *ResourceConsumer) makeConsumeCPURequests() {
|
||||||
defer GinkgoRecover()
|
|
||||||
var count int
|
var count int
|
||||||
var rest int
|
var rest int
|
||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
case millicores := <-rc.channel:
|
case millicores := <-rc.cpu:
|
||||||
count = millicores / requestSizeInMillicores
|
count = millicores / requestSizeInMillicores
|
||||||
rest = millicores - count*requestSizeInMillicores
|
rest = millicores - count*requestSizeInMillicores
|
||||||
case <-time.After(sleepTime):
|
case <-time.After(sleepTime):
|
||||||
if count > 0 {
|
if count > 0 {
|
||||||
rc.sendConsumeCPUrequests(count, requestSizeInMillicores, consumptionTimeInSeconds)
|
rc.sendConsumeCPURequests(count, requestSizeInMillicores, consumptionTimeInSeconds)
|
||||||
}
|
}
|
||||||
if rest > 0 {
|
if rest > 0 {
|
||||||
go rc.sendOneConsumeCPUrequest(rest, consumptionTimeInSeconds)
|
go rc.sendOneConsumeCPURequest(rest, consumptionTimeInSeconds)
|
||||||
}
|
}
|
||||||
case <-rc.stop:
|
case <-rc.stopCPU:
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (rc *ResourceConsumer) sendConsumeCPUrequests(requests, millicores, durationSec int) {
|
func (rc *ResourceConsumer) makeConsumeMemRequests() {
|
||||||
for i := 0; i < requests; i++ {
|
var count int
|
||||||
go rc.sendOneConsumeCPUrequest(millicores, durationSec)
|
var rest int
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case megabytes := <-rc.mem:
|
||||||
|
count = megabytes / requestSizeInMegabytes
|
||||||
|
rest = megabytes - count*requestSizeInMegabytes
|
||||||
|
case <-time.After(sleepTime):
|
||||||
|
if count > 0 {
|
||||||
|
rc.sendConsumeMemRequests(count, requestSizeInMegabytes, consumptionTimeInSeconds)
|
||||||
|
}
|
||||||
|
if rest > 0 {
|
||||||
|
go rc.sendOneConsumeMemRequest(rest, consumptionTimeInSeconds)
|
||||||
|
}
|
||||||
|
case <-rc.stopMem:
|
||||||
|
return
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// sendOneConsumeCPUrequest sends POST request for cpu consumption
|
func (rc *ResourceConsumer) sendConsumeCPURequests(requests, millicores, durationSec int) {
|
||||||
func (rc *ResourceConsumer) sendOneConsumeCPUrequest(millicores int, durationSec int) {
|
for i := 0; i < requests; i++ {
|
||||||
|
go rc.sendOneConsumeCPURequest(millicores, durationSec)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (rc *ResourceConsumer) sendConsumeMemRequests(requests, megabytes, durationSec int) {
|
||||||
|
for i := 0; i < requests; i++ {
|
||||||
|
go rc.sendOneConsumeMemRequest(megabytes, durationSec)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// sendOneConsumeCPURequest sends POST request for cpu consumption
|
||||||
|
func (rc *ResourceConsumer) sendOneConsumeCPURequest(millicores int, durationSec int) {
|
||||||
|
defer GinkgoRecover()
|
||||||
_, err := rc.framework.Client.Post().
|
_, err := rc.framework.Client.Post().
|
||||||
Prefix("proxy").
|
Prefix("proxy").
|
||||||
Namespace(rc.framework.Namespace.Name).
|
Namespace(rc.framework.Namespace.Name).
|
||||||
@ -116,6 +158,22 @@ func (rc *ResourceConsumer) sendOneConsumeCPUrequest(millicores int, durationSec
|
|||||||
expectNoError(err)
|
expectNoError(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// sendOneConsumeMemRequest sends POST request for memory consumption
|
||||||
|
func (rc *ResourceConsumer) sendOneConsumeMemRequest(megabytes int, durationSec int) {
|
||||||
|
defer GinkgoRecover()
|
||||||
|
_, err := rc.framework.Client.Post().
|
||||||
|
Prefix("proxy").
|
||||||
|
Namespace(rc.framework.Namespace.Name).
|
||||||
|
Resource("services").
|
||||||
|
Name(rc.name).
|
||||||
|
Suffix("ConsumeMem").
|
||||||
|
Param("megabytes", strconv.Itoa(megabytes)).
|
||||||
|
Param("durationSec", strconv.Itoa(durationSec)).
|
||||||
|
Do().
|
||||||
|
Raw()
|
||||||
|
expectNoError(err)
|
||||||
|
}
|
||||||
|
|
||||||
func (rc *ResourceConsumer) GetReplicas() int {
|
func (rc *ResourceConsumer) GetReplicas() int {
|
||||||
replicationController, err := rc.framework.Client.ReplicationControllers(rc.framework.Namespace.Name).Get(rc.name)
|
replicationController, err := rc.framework.Client.ReplicationControllers(rc.framework.Namespace.Name).Get(rc.name)
|
||||||
expectNoError(err)
|
expectNoError(err)
|
||||||
@ -139,7 +197,8 @@ func (rc *ResourceConsumer) WaitForReplicas(desiredReplicas int) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (rc *ResourceConsumer) CleanUp() {
|
func (rc *ResourceConsumer) CleanUp() {
|
||||||
rc.stop <- 0
|
rc.stopCPU <- 0
|
||||||
|
rc.stopMem <- 0
|
||||||
expectNoError(DeleteRC(rc.framework.Client, rc.framework.Namespace.Name, rc.name))
|
expectNoError(DeleteRC(rc.framework.Client, rc.framework.Namespace.Name, rc.name))
|
||||||
expectNoError(rc.framework.Client.Services(rc.framework.Namespace.Name).Delete(rc.name))
|
expectNoError(rc.framework.Client.Services(rc.framework.Namespace.Name).Delete(rc.name))
|
||||||
expectNoError(rc.framework.Client.Experimental().HorizontalPodAutoscalers(rc.framework.Namespace.Name).Delete(rc.name, api.NewDeleteOptions(0)))
|
expectNoError(rc.framework.Client.Experimental().HorizontalPodAutoscalers(rc.framework.Namespace.Name).Delete(rc.name, api.NewDeleteOptions(0)))
|
||||||
@ -171,4 +230,5 @@ func runServiceAndRCForResourceConsumer(c *client.Client, ns, name string, repli
|
|||||||
Replicas: replicas,
|
Replicas: replicas,
|
||||||
}
|
}
|
||||||
expectNoError(RunRC(config))
|
expectNoError(RunRC(config))
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -17,8 +17,6 @@ limitations under the License.
|
|||||||
package e2e
|
package e2e
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"time"
|
|
||||||
|
|
||||||
"k8s.io/kubernetes/pkg/api"
|
"k8s.io/kubernetes/pkg/api"
|
||||||
"k8s.io/kubernetes/pkg/api/resource"
|
"k8s.io/kubernetes/pkg/api/resource"
|
||||||
"k8s.io/kubernetes/pkg/apis/experimental"
|
"k8s.io/kubernetes/pkg/apis/experimental"
|
||||||
@ -27,7 +25,8 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
sleep = 10 * time.Minute
|
kind = "replicationController"
|
||||||
|
subresource = "scale"
|
||||||
)
|
)
|
||||||
|
|
||||||
var _ = Describe("Horizontal pod autoscaling", func() {
|
var _ = Describe("Horizontal pod autoscaling", func() {
|
||||||
@ -41,30 +40,31 @@ var _ = Describe("Horizontal pod autoscaling", func() {
|
|||||||
AfterEach(func() {
|
AfterEach(func() {
|
||||||
})
|
})
|
||||||
|
|
||||||
|
// CPU tests
|
||||||
It("[Skipped][Horizontal pod autoscaling Suite] should scale from 1 pod to 3 pods (scale resource: CPU)", func() {
|
It("[Skipped][Horizontal pod autoscaling Suite] should scale from 1 pod to 3 pods (scale resource: CPU)", func() {
|
||||||
rc = NewResourceConsumer("rc", 1, 700, f)
|
rc = NewResourceConsumer("rc", 1, 700, 0, f)
|
||||||
createHorizontalPodAutoscaler(rc, "0.3")
|
createCPUHorizontalPodAutoscaler(rc, "0.3")
|
||||||
rc.WaitForReplicas(3)
|
rc.WaitForReplicas(3)
|
||||||
rc.CleanUp()
|
rc.CleanUp()
|
||||||
})
|
})
|
||||||
|
|
||||||
It("[Skipped][Horizontal pod autoscaling Suite] should scale from 3 pods to 1 pod (scale resource: CPU)", func() {
|
It("[Skipped][Horizontal pod autoscaling Suite] should scale from 3 pods to 1 pod (scale resource: CPU)", func() {
|
||||||
rc = NewResourceConsumer("rc", 3, 0, f)
|
rc = NewResourceConsumer("rc", 3, 0, 0, f)
|
||||||
createHorizontalPodAutoscaler(rc, "0.7")
|
createCPUHorizontalPodAutoscaler(rc, "0.7")
|
||||||
rc.WaitForReplicas(1)
|
rc.WaitForReplicas(1)
|
||||||
rc.CleanUp()
|
rc.CleanUp()
|
||||||
})
|
})
|
||||||
|
|
||||||
It("[Skipped][Horizontal pod autoscaling Suite] should scale from 1 pod to maximum 5 pods (scale resource: CPU)", func() {
|
It("[Skipped][Horizontal pod autoscaling Suite] should scale from 1 pod to maximum 5 pods (scale resource: CPU)", func() {
|
||||||
rc = NewResourceConsumer("rc", 1, 700, f)
|
rc = NewResourceConsumer("rc", 1, 700, 0, f)
|
||||||
createHorizontalPodAutoscaler(rc, "0.1")
|
createCPUHorizontalPodAutoscaler(rc, "0.1")
|
||||||
rc.WaitForReplicas(5)
|
rc.WaitForReplicas(5)
|
||||||
rc.CleanUp()
|
rc.CleanUp()
|
||||||
})
|
})
|
||||||
|
|
||||||
It("[Skipped][Horizontal pod autoscaling Suite] should scale from 1 pod to 3 pods and from 3 to 1 (scale resource: CPU)", func() {
|
It("[Skipped][Horizontal pod autoscaling Suite] should scale from 1 pod to 3 pods and from 3 to 1 (scale resource: CPU)", func() {
|
||||||
rc = NewResourceConsumer("rc", 1, 700, f)
|
rc = NewResourceConsumer("rc", 1, 700, 0, f)
|
||||||
createHorizontalPodAutoscaler(rc, "0.3")
|
createCPUHorizontalPodAutoscaler(rc, "0.3")
|
||||||
rc.WaitForReplicas(3)
|
rc.WaitForReplicas(3)
|
||||||
rc.ConsumeCPU(300)
|
rc.ConsumeCPU(300)
|
||||||
rc.WaitForReplicas(1)
|
rc.WaitForReplicas(1)
|
||||||
@ -72,8 +72,8 @@ var _ = Describe("Horizontal pod autoscaling", func() {
|
|||||||
})
|
})
|
||||||
|
|
||||||
It("[Skipped][Horizontal pod autoscaling Suite] should scale from 1 pod to 3 pods and from 3 to 5 (scale resource: CPU)", func() {
|
It("[Skipped][Horizontal pod autoscaling Suite] should scale from 1 pod to 3 pods and from 3 to 5 (scale resource: CPU)", func() {
|
||||||
rc = NewResourceConsumer("rc", 1, 300, f)
|
rc = NewResourceConsumer("rc", 1, 300, 0, f)
|
||||||
createHorizontalPodAutoscaler(rc, "0.1")
|
createCPUHorizontalPodAutoscaler(rc, "0.1")
|
||||||
rc.WaitForReplicas(3)
|
rc.WaitForReplicas(3)
|
||||||
rc.ConsumeCPU(700)
|
rc.ConsumeCPU(700)
|
||||||
rc.WaitForReplicas(5)
|
rc.WaitForReplicas(5)
|
||||||
@ -81,8 +81,8 @@ var _ = Describe("Horizontal pod autoscaling", func() {
|
|||||||
})
|
})
|
||||||
|
|
||||||
It("[Skipped][Horizontal pod autoscaling Suite] should scale from 3 pods to 1 pod and from 1 to 3 (scale resource: CPU)", func() {
|
It("[Skipped][Horizontal pod autoscaling Suite] should scale from 3 pods to 1 pod and from 1 to 3 (scale resource: CPU)", func() {
|
||||||
rc = NewResourceConsumer("rc", 3, 0, f)
|
rc = NewResourceConsumer("rc", 3, 0, 0, f)
|
||||||
createHorizontalPodAutoscaler(rc, "0.3")
|
createCPUHorizontalPodAutoscaler(rc, "0.3")
|
||||||
rc.WaitForReplicas(1)
|
rc.WaitForReplicas(1)
|
||||||
rc.ConsumeCPU(700)
|
rc.ConsumeCPU(700)
|
||||||
rc.WaitForReplicas(3)
|
rc.WaitForReplicas(3)
|
||||||
@ -90,17 +90,75 @@ var _ = Describe("Horizontal pod autoscaling", func() {
|
|||||||
})
|
})
|
||||||
|
|
||||||
It("[Skipped][Horizontal pod autoscaling Suite] should scale from 5 pods to 3 pods and from 3 to 1 (scale resource: CPU)", func() {
|
It("[Skipped][Horizontal pod autoscaling Suite] should scale from 5 pods to 3 pods and from 3 to 1 (scale resource: CPU)", func() {
|
||||||
rc = NewResourceConsumer("rc", 5, 700, f)
|
rc = NewResourceConsumer("rc", 5, 700, 0, f)
|
||||||
createHorizontalPodAutoscaler(rc, "0.3")
|
createCPUHorizontalPodAutoscaler(rc, "0.3")
|
||||||
rc.WaitForReplicas(3)
|
rc.WaitForReplicas(3)
|
||||||
rc.ConsumeCPU(100)
|
rc.ConsumeCPU(100)
|
||||||
rc.WaitForReplicas(1)
|
rc.WaitForReplicas(1)
|
||||||
rc.CleanUp()
|
rc.CleanUp()
|
||||||
})
|
})
|
||||||
|
|
||||||
|
// Memory tests
|
||||||
|
It("[Skipped][Horizontal pod autoscaling Suite] should scale from 1 pod to 3 pods (scale resource: Memory)", func() {
|
||||||
|
rc = NewResourceConsumer("rc", 1, 0, 800, f)
|
||||||
|
createMemoryHorizontalPodAutoscaler(rc, "300")
|
||||||
|
rc.WaitForReplicas(3)
|
||||||
|
rc.CleanUp()
|
||||||
|
})
|
||||||
|
|
||||||
|
It("[Skipped][Horizontal pod autoscaling Suite] should scale from 3 pods to 1 pod (scale resource: Memory)", func() {
|
||||||
|
rc = NewResourceConsumer("rc", 3, 0, 0, f)
|
||||||
|
createMemoryHorizontalPodAutoscaler(rc, "700")
|
||||||
|
rc.WaitForReplicas(1)
|
||||||
|
rc.CleanUp()
|
||||||
|
})
|
||||||
|
|
||||||
|
It("[Skipped][Horizontal pod autoscaling Suite] should scale from 1 pod to maximum 5 pods (scale resource: Memory)", func() {
|
||||||
|
rc = NewResourceConsumer("rc", 1, 0, 700, f)
|
||||||
|
createMemoryHorizontalPodAutoscaler(rc, "100")
|
||||||
|
rc.WaitForReplicas(5)
|
||||||
|
rc.CleanUp()
|
||||||
|
})
|
||||||
|
|
||||||
|
It("[Skipped][Horizontal pod autoscaling Suite] should scale from 1 pod to 3 pods and from 3 to 1 (scale resource: Memory)", func() {
|
||||||
|
rc = NewResourceConsumer("rc", 1, 0, 700, f)
|
||||||
|
createMemoryHorizontalPodAutoscaler(rc, "300")
|
||||||
|
rc.WaitForReplicas(3)
|
||||||
|
rc.ConsumeMem(100)
|
||||||
|
rc.WaitForReplicas(1)
|
||||||
|
rc.CleanUp()
|
||||||
|
})
|
||||||
|
|
||||||
|
It("[Skipped][Horizontal pod autoscaling Suite] should scale from 1 pod to 3 pods and from 3 to 5 (scale resource: Memory)", func() {
|
||||||
|
rc = NewResourceConsumer("rc", 1, 0, 500, f)
|
||||||
|
createMemoryHorizontalPodAutoscaler(rc, "200")
|
||||||
|
rc.WaitForReplicas(3)
|
||||||
|
rc.ConsumeMem(1000)
|
||||||
|
rc.WaitForReplicas(5)
|
||||||
|
rc.CleanUp()
|
||||||
|
})
|
||||||
|
|
||||||
|
It("[Skipped][Horizontal pod autoscaling Suite] should scale from 3 pods to 1 pod and from 1 to 3 (scale resource: Memory)", func() {
|
||||||
|
rc = NewResourceConsumer("rc", 3, 0, 0, f)
|
||||||
|
createMemoryHorizontalPodAutoscaler(rc, "300")
|
||||||
|
rc.WaitForReplicas(1)
|
||||||
|
rc.ConsumeMem(700)
|
||||||
|
rc.WaitForReplicas(3)
|
||||||
|
rc.CleanUp()
|
||||||
|
})
|
||||||
|
|
||||||
|
It("[Skipped][Horizontal pod autoscaling Suite] should scale from 5 pods to 3 pods and from 3 to 1 (scale resource: Memory)", func() {
|
||||||
|
rc = NewResourceConsumer("rc", 5, 0, 700, f)
|
||||||
|
createMemoryHorizontalPodAutoscaler(rc, "300")
|
||||||
|
rc.WaitForReplicas(3)
|
||||||
|
rc.ConsumeMem(100)
|
||||||
|
rc.WaitForReplicas(1)
|
||||||
|
rc.CleanUp()
|
||||||
|
})
|
||||||
|
|
||||||
})
|
})
|
||||||
|
|
||||||
func createHorizontalPodAutoscaler(rc *ResourceConsumer, cpu string) {
|
func createCPUHorizontalPodAutoscaler(rc *ResourceConsumer, cpu string) {
|
||||||
hpa := &experimental.HorizontalPodAutoscaler{
|
hpa := &experimental.HorizontalPodAutoscaler{
|
||||||
ObjectMeta: api.ObjectMeta{
|
ObjectMeta: api.ObjectMeta{
|
||||||
Name: rc.name,
|
Name: rc.name,
|
||||||
@ -108,10 +166,10 @@ func createHorizontalPodAutoscaler(rc *ResourceConsumer, cpu string) {
|
|||||||
},
|
},
|
||||||
Spec: experimental.HorizontalPodAutoscalerSpec{
|
Spec: experimental.HorizontalPodAutoscalerSpec{
|
||||||
ScaleRef: &experimental.SubresourceReference{
|
ScaleRef: &experimental.SubresourceReference{
|
||||||
Kind: "replicationController",
|
Kind: kind,
|
||||||
Name: rc.name,
|
Name: rc.name,
|
||||||
Namespace: rc.framework.Namespace.Name,
|
Namespace: rc.framework.Namespace.Name,
|
||||||
Subresource: "scale",
|
Subresource: subresource,
|
||||||
},
|
},
|
||||||
MinCount: 1,
|
MinCount: 1,
|
||||||
MaxCount: 5,
|
MaxCount: 5,
|
||||||
@ -121,3 +179,26 @@ func createHorizontalPodAutoscaler(rc *ResourceConsumer, cpu string) {
|
|||||||
_, errHPA := rc.framework.Client.Experimental().HorizontalPodAutoscalers(rc.framework.Namespace.Name).Create(hpa)
|
_, errHPA := rc.framework.Client.Experimental().HorizontalPodAutoscalers(rc.framework.Namespace.Name).Create(hpa)
|
||||||
expectNoError(errHPA)
|
expectNoError(errHPA)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// argument memory is in megabytes
|
||||||
|
func createMemoryHorizontalPodAutoscaler(rc *ResourceConsumer, memory string) {
|
||||||
|
hpa := &experimental.HorizontalPodAutoscaler{
|
||||||
|
ObjectMeta: api.ObjectMeta{
|
||||||
|
Name: rc.name,
|
||||||
|
Namespace: rc.framework.Namespace.Name,
|
||||||
|
},
|
||||||
|
Spec: experimental.HorizontalPodAutoscalerSpec{
|
||||||
|
ScaleRef: &experimental.SubresourceReference{
|
||||||
|
Kind: kind,
|
||||||
|
Name: rc.name,
|
||||||
|
Namespace: rc.framework.Namespace.Name,
|
||||||
|
Subresource: subresource,
|
||||||
|
},
|
||||||
|
MinCount: 1,
|
||||||
|
MaxCount: 5,
|
||||||
|
Target: experimental.ResourceConsumption{Resource: api.ResourceMemory, Quantity: resource.MustParse(memory + "M")},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
_, errHPA := rc.framework.Client.Experimental().HorizontalPodAutoscalers(rc.framework.Namespace.Name).Create(hpa)
|
||||||
|
expectNoError(errHPA)
|
||||||
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user