Memory dynamic consumption

This commit is contained in:
Ewa Socala 2015-09-16 13:52:26 +02:00
parent c9570e34d0
commit e837209f15
2 changed files with 182 additions and 41 deletions

View File

@ -31,10 +31,11 @@ const (
consumptionTimeInSeconds = 30
sleepTime = 30 * time.Second
requestSizeInMillicores = 100
requestSizeInMegabytes = 100
port = 80
targetPort = 8080
timeoutRC = 120 * time.Second
image = "gcr.io/google_containers/resource_consumer:alpha"
image = "gcr.io/google_containers/resource_consumer:beta"
rcIsNil = "ERROR: replicationController = nil"
)
@ -49,60 +50,101 @@ rc.ConsumeCPU(300)
type ResourceConsumer struct {
name string
framework *Framework
channel chan int
stop chan int
cpu chan int
mem chan int
stopCPU chan int
stopMem chan int
}
// NewResourceConsumer creates new ResourceConsumer
// cpu argument is in millicores
func NewResourceConsumer(name string, replicas int, cpu int, framework *Framework) *ResourceConsumer {
/*
NewResourceConsumer creates new ResourceConsumer
initCPU argument is in millicores
initMemory argument is in megabytes
*/
func NewResourceConsumer(name string, replicas, initCPU, initMemory int, framework *Framework) *ResourceConsumer {
runServiceAndRCForResourceConsumer(framework.Client, framework.Namespace.Name, name, replicas)
rc := &ResourceConsumer{
name: name,
framework: framework,
channel: make(chan int),
stop: make(chan int),
cpu: make(chan int),
mem: make(chan int),
stopCPU: make(chan int),
stopMem: make(chan int),
}
go rc.makeConsumeCPURequests()
rc.ConsumeCPU(cpu)
rc.ConsumeCPU(initCPU)
go rc.makeConsumeMemRequests()
rc.ConsumeMem(initMemory)
return rc
}
// ConsumeCPU consumes given number of CPU
func (rc *ResourceConsumer) ConsumeCPU(millicores int) {
rc.channel <- millicores
rc.cpu <- millicores
}
// ConsumeMem consumes given number of Mem
func (rc *ResourceConsumer) ConsumeMem(megabytes int) {
rc.mem <- megabytes
}
func (rc *ResourceConsumer) makeConsumeCPURequests() {
defer GinkgoRecover()
var count int
var rest int
for {
select {
case millicores := <-rc.channel:
case millicores := <-rc.cpu:
count = millicores / requestSizeInMillicores
rest = millicores - count*requestSizeInMillicores
case <-time.After(sleepTime):
if count > 0 {
rc.sendConsumeCPUrequests(count, requestSizeInMillicores, consumptionTimeInSeconds)
rc.sendConsumeCPURequests(count, requestSizeInMillicores, consumptionTimeInSeconds)
}
if rest > 0 {
go rc.sendOneConsumeCPUrequest(rest, consumptionTimeInSeconds)
go rc.sendOneConsumeCPURequest(rest, consumptionTimeInSeconds)
}
case <-rc.stop:
case <-rc.stopCPU:
return
}
}
}
func (rc *ResourceConsumer) sendConsumeCPUrequests(requests, millicores, durationSec int) {
for i := 0; i < requests; i++ {
go rc.sendOneConsumeCPUrequest(millicores, durationSec)
func (rc *ResourceConsumer) makeConsumeMemRequests() {
var count int
var rest int
for {
select {
case megabytes := <-rc.mem:
count = megabytes / requestSizeInMegabytes
rest = megabytes - count*requestSizeInMegabytes
case <-time.After(sleepTime):
if count > 0 {
rc.sendConsumeMemRequests(count, requestSizeInMegabytes, consumptionTimeInSeconds)
}
if rest > 0 {
go rc.sendOneConsumeMemRequest(rest, consumptionTimeInSeconds)
}
case <-rc.stopMem:
return
}
}
}
// sendOneConsumeCPUrequest sends POST request for cpu consumption
func (rc *ResourceConsumer) sendOneConsumeCPUrequest(millicores int, durationSec int) {
func (rc *ResourceConsumer) sendConsumeCPURequests(requests, millicores, durationSec int) {
for i := 0; i < requests; i++ {
go rc.sendOneConsumeCPURequest(millicores, durationSec)
}
}
func (rc *ResourceConsumer) sendConsumeMemRequests(requests, megabytes, durationSec int) {
for i := 0; i < requests; i++ {
go rc.sendOneConsumeMemRequest(megabytes, durationSec)
}
}
// sendOneConsumeCPURequest sends POST request for cpu consumption
func (rc *ResourceConsumer) sendOneConsumeCPURequest(millicores int, durationSec int) {
defer GinkgoRecover()
_, err := rc.framework.Client.Post().
Prefix("proxy").
Namespace(rc.framework.Namespace.Name).
@ -116,6 +158,22 @@ func (rc *ResourceConsumer) sendOneConsumeCPUrequest(millicores int, durationSec
expectNoError(err)
}
// sendOneConsumeMemRequest sends POST request for memory consumption
func (rc *ResourceConsumer) sendOneConsumeMemRequest(megabytes int, durationSec int) {
defer GinkgoRecover()
_, err := rc.framework.Client.Post().
Prefix("proxy").
Namespace(rc.framework.Namespace.Name).
Resource("services").
Name(rc.name).
Suffix("ConsumeMem").
Param("megabytes", strconv.Itoa(megabytes)).
Param("durationSec", strconv.Itoa(durationSec)).
Do().
Raw()
expectNoError(err)
}
func (rc *ResourceConsumer) GetReplicas() int {
replicationController, err := rc.framework.Client.ReplicationControllers(rc.framework.Namespace.Name).Get(rc.name)
expectNoError(err)
@ -139,7 +197,8 @@ func (rc *ResourceConsumer) WaitForReplicas(desiredReplicas int) {
}
func (rc *ResourceConsumer) CleanUp() {
rc.stop <- 0
rc.stopCPU <- 0
rc.stopMem <- 0
expectNoError(DeleteRC(rc.framework.Client, rc.framework.Namespace.Name, rc.name))
expectNoError(rc.framework.Client.Services(rc.framework.Namespace.Name).Delete(rc.name))
expectNoError(rc.framework.Client.Experimental().HorizontalPodAutoscalers(rc.framework.Namespace.Name).Delete(rc.name, api.NewDeleteOptions(0)))
@ -171,4 +230,5 @@ func runServiceAndRCForResourceConsumer(c *client.Client, ns, name string, repli
Replicas: replicas,
}
expectNoError(RunRC(config))
}

View File

@ -17,8 +17,6 @@ limitations under the License.
package e2e
import (
"time"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/resource"
"k8s.io/kubernetes/pkg/apis/experimental"
@ -27,7 +25,8 @@ import (
)
const (
sleep = 10 * time.Minute
kind = "replicationController"
subresource = "scale"
)
var _ = Describe("Horizontal pod autoscaling", func() {
@ -41,30 +40,31 @@ var _ = Describe("Horizontal pod autoscaling", func() {
AfterEach(func() {
})
// CPU tests
It("[Skipped][Horizontal pod autoscaling Suite] should scale from 1 pod to 3 pods (scale resource: CPU)", func() {
rc = NewResourceConsumer("rc", 1, 700, f)
createHorizontalPodAutoscaler(rc, "0.3")
rc = NewResourceConsumer("rc", 1, 700, 0, f)
createCPUHorizontalPodAutoscaler(rc, "0.3")
rc.WaitForReplicas(3)
rc.CleanUp()
})
It("[Skipped][Horizontal pod autoscaling Suite] should scale from 3 pods to 1 pod (scale resource: CPU)", func() {
rc = NewResourceConsumer("rc", 3, 0, f)
createHorizontalPodAutoscaler(rc, "0.7")
rc = NewResourceConsumer("rc", 3, 0, 0, f)
createCPUHorizontalPodAutoscaler(rc, "0.7")
rc.WaitForReplicas(1)
rc.CleanUp()
})
It("[Skipped][Horizontal pod autoscaling Suite] should scale from 1 pod to maximum 5 pods (scale resource: CPU)", func() {
rc = NewResourceConsumer("rc", 1, 700, f)
createHorizontalPodAutoscaler(rc, "0.1")
rc = NewResourceConsumer("rc", 1, 700, 0, f)
createCPUHorizontalPodAutoscaler(rc, "0.1")
rc.WaitForReplicas(5)
rc.CleanUp()
})
It("[Skipped][Horizontal pod autoscaling Suite] should scale from 1 pod to 3 pods and from 3 to 1 (scale resource: CPU)", func() {
rc = NewResourceConsumer("rc", 1, 700, f)
createHorizontalPodAutoscaler(rc, "0.3")
rc = NewResourceConsumer("rc", 1, 700, 0, f)
createCPUHorizontalPodAutoscaler(rc, "0.3")
rc.WaitForReplicas(3)
rc.ConsumeCPU(300)
rc.WaitForReplicas(1)
@ -72,8 +72,8 @@ var _ = Describe("Horizontal pod autoscaling", func() {
})
It("[Skipped][Horizontal pod autoscaling Suite] should scale from 1 pod to 3 pods and from 3 to 5 (scale resource: CPU)", func() {
rc = NewResourceConsumer("rc", 1, 300, f)
createHorizontalPodAutoscaler(rc, "0.1")
rc = NewResourceConsumer("rc", 1, 300, 0, f)
createCPUHorizontalPodAutoscaler(rc, "0.1")
rc.WaitForReplicas(3)
rc.ConsumeCPU(700)
rc.WaitForReplicas(5)
@ -81,8 +81,8 @@ var _ = Describe("Horizontal pod autoscaling", func() {
})
It("[Skipped][Horizontal pod autoscaling Suite] should scale from 3 pods to 1 pod and from 1 to 3 (scale resource: CPU)", func() {
rc = NewResourceConsumer("rc", 3, 0, f)
createHorizontalPodAutoscaler(rc, "0.3")
rc = NewResourceConsumer("rc", 3, 0, 0, f)
createCPUHorizontalPodAutoscaler(rc, "0.3")
rc.WaitForReplicas(1)
rc.ConsumeCPU(700)
rc.WaitForReplicas(3)
@ -90,17 +90,75 @@ var _ = Describe("Horizontal pod autoscaling", func() {
})
It("[Skipped][Horizontal pod autoscaling Suite] should scale from 5 pods to 3 pods and from 3 to 1 (scale resource: CPU)", func() {
rc = NewResourceConsumer("rc", 5, 700, f)
createHorizontalPodAutoscaler(rc, "0.3")
rc = NewResourceConsumer("rc", 5, 700, 0, f)
createCPUHorizontalPodAutoscaler(rc, "0.3")
rc.WaitForReplicas(3)
rc.ConsumeCPU(100)
rc.WaitForReplicas(1)
rc.CleanUp()
})
// Memory tests
It("[Skipped][Horizontal pod autoscaling Suite] should scale from 1 pod to 3 pods (scale resource: Memory)", func() {
rc = NewResourceConsumer("rc", 1, 0, 800, f)
createMemoryHorizontalPodAutoscaler(rc, "300")
rc.WaitForReplicas(3)
rc.CleanUp()
})
It("[Skipped][Horizontal pod autoscaling Suite] should scale from 3 pods to 1 pod (scale resource: Memory)", func() {
rc = NewResourceConsumer("rc", 3, 0, 0, f)
createMemoryHorizontalPodAutoscaler(rc, "700")
rc.WaitForReplicas(1)
rc.CleanUp()
})
It("[Skipped][Horizontal pod autoscaling Suite] should scale from 1 pod to maximum 5 pods (scale resource: Memory)", func() {
rc = NewResourceConsumer("rc", 1, 0, 700, f)
createMemoryHorizontalPodAutoscaler(rc, "100")
rc.WaitForReplicas(5)
rc.CleanUp()
})
It("[Skipped][Horizontal pod autoscaling Suite] should scale from 1 pod to 3 pods and from 3 to 1 (scale resource: Memory)", func() {
rc = NewResourceConsumer("rc", 1, 0, 700, f)
createMemoryHorizontalPodAutoscaler(rc, "300")
rc.WaitForReplicas(3)
rc.ConsumeMem(100)
rc.WaitForReplicas(1)
rc.CleanUp()
})
It("[Skipped][Horizontal pod autoscaling Suite] should scale from 1 pod to 3 pods and from 3 to 5 (scale resource: Memory)", func() {
rc = NewResourceConsumer("rc", 1, 0, 500, f)
createMemoryHorizontalPodAutoscaler(rc, "200")
rc.WaitForReplicas(3)
rc.ConsumeMem(1000)
rc.WaitForReplicas(5)
rc.CleanUp()
})
It("[Skipped][Horizontal pod autoscaling Suite] should scale from 3 pods to 1 pod and from 1 to 3 (scale resource: Memory)", func() {
rc = NewResourceConsumer("rc", 3, 0, 0, f)
createMemoryHorizontalPodAutoscaler(rc, "300")
rc.WaitForReplicas(1)
rc.ConsumeMem(700)
rc.WaitForReplicas(3)
rc.CleanUp()
})
It("[Skipped][Horizontal pod autoscaling Suite] should scale from 5 pods to 3 pods and from 3 to 1 (scale resource: Memory)", func() {
rc = NewResourceConsumer("rc", 5, 0, 700, f)
createMemoryHorizontalPodAutoscaler(rc, "300")
rc.WaitForReplicas(3)
rc.ConsumeMem(100)
rc.WaitForReplicas(1)
rc.CleanUp()
})
})
func createHorizontalPodAutoscaler(rc *ResourceConsumer, cpu string) {
func createCPUHorizontalPodAutoscaler(rc *ResourceConsumer, cpu string) {
hpa := &experimental.HorizontalPodAutoscaler{
ObjectMeta: api.ObjectMeta{
Name: rc.name,
@ -108,10 +166,10 @@ func createHorizontalPodAutoscaler(rc *ResourceConsumer, cpu string) {
},
Spec: experimental.HorizontalPodAutoscalerSpec{
ScaleRef: &experimental.SubresourceReference{
Kind: "replicationController",
Kind: kind,
Name: rc.name,
Namespace: rc.framework.Namespace.Name,
Subresource: "scale",
Subresource: subresource,
},
MinCount: 1,
MaxCount: 5,
@ -121,3 +179,26 @@ func createHorizontalPodAutoscaler(rc *ResourceConsumer, cpu string) {
_, errHPA := rc.framework.Client.Experimental().HorizontalPodAutoscalers(rc.framework.Namespace.Name).Create(hpa)
expectNoError(errHPA)
}
// argument memory is in megabytes
func createMemoryHorizontalPodAutoscaler(rc *ResourceConsumer, memory string) {
hpa := &experimental.HorizontalPodAutoscaler{
ObjectMeta: api.ObjectMeta{
Name: rc.name,
Namespace: rc.framework.Namespace.Name,
},
Spec: experimental.HorizontalPodAutoscalerSpec{
ScaleRef: &experimental.SubresourceReference{
Kind: kind,
Name: rc.name,
Namespace: rc.framework.Namespace.Name,
Subresource: subresource,
},
MinCount: 1,
MaxCount: 5,
Target: experimental.ResourceConsumption{Resource: api.ResourceMemory, Quantity: resource.MustParse(memory + "M")},
},
}
_, errHPA := rc.framework.Client.Experimental().HorizontalPodAutoscalers(rc.framework.Namespace.Name).Create(hpa)
expectNoError(errHPA)
}