mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-08-03 17:30:00 +00:00
Merge pull request #16587 from xelalexv/issue15474
Auto commit by PR queue bot
This commit is contained in:
commit
e5b838e92a
@ -40,6 +40,8 @@ const (
|
|||||||
startServiceInterval = 5 * time.Second
|
startServiceInterval = 5 * time.Second
|
||||||
resourceConsumerImage = "gcr.io/google_containers/resource_consumer:beta"
|
resourceConsumerImage = "gcr.io/google_containers/resource_consumer:beta"
|
||||||
rcIsNil = "ERROR: replicationController = nil"
|
rcIsNil = "ERROR: replicationController = nil"
|
||||||
|
deploymentIsNil = "ERROR: deployment = nil"
|
||||||
|
invalidKind = "ERROR: invalid workload kind for resource consumer"
|
||||||
)
|
)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -52,6 +54,7 @@ rc.ConsumeCPU(300)
|
|||||||
*/
|
*/
|
||||||
type ResourceConsumer struct {
|
type ResourceConsumer struct {
|
||||||
name string
|
name string
|
||||||
|
kind string
|
||||||
framework *Framework
|
framework *Framework
|
||||||
cpu chan int
|
cpu chan int
|
||||||
mem chan int
|
mem chan int
|
||||||
@ -63,12 +66,13 @@ type ResourceConsumer struct {
|
|||||||
requestSizeInMegabytes int
|
requestSizeInMegabytes int
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewDynamicResourceConsumer(name string, replicas, initCPUTotal, initMemoryTotal int, cpuLimit, memLimit int64, framework *Framework) *ResourceConsumer {
|
func NewDynamicResourceConsumer(name, kind string, replicas, initCPUTotal, initMemoryTotal int, cpuLimit, memLimit int64, framework *Framework) *ResourceConsumer {
|
||||||
return newResourceConsumer(name, replicas, initCPUTotal, initMemoryTotal, dynamicConsumptionTimeInSeconds, dynamicRequestSizeInMillicores, dynamicRequestSizeInMegabytes, cpuLimit, memLimit, framework)
|
return newResourceConsumer(name, kind, replicas, initCPUTotal, initMemoryTotal, dynamicConsumptionTimeInSeconds, dynamicRequestSizeInMillicores, dynamicRequestSizeInMegabytes, cpuLimit, memLimit, framework)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TODO this still defaults to replication controller
|
||||||
func NewStaticResourceConsumer(name string, replicas, initCPUTotal, initMemoryTotal int, cpuLimit, memLimit int64, framework *Framework) *ResourceConsumer {
|
func NewStaticResourceConsumer(name string, replicas, initCPUTotal, initMemoryTotal int, cpuLimit, memLimit int64, framework *Framework) *ResourceConsumer {
|
||||||
return newResourceConsumer(name, replicas, initCPUTotal, initMemoryTotal, staticConsumptionTimeInSeconds, initCPUTotal/replicas, initMemoryTotal/replicas, cpuLimit, memLimit, framework)
|
return newResourceConsumer(name, kindRC, replicas, initCPUTotal, initMemoryTotal, staticConsumptionTimeInSeconds, initCPUTotal/replicas, initMemoryTotal/replicas, cpuLimit, memLimit, framework)
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -78,10 +82,11 @@ initMemoryTotal argument is in megabytes
|
|||||||
memLimit argument is in megabytes, memLimit is a maximum amount of memory that can be consumed by a single pod
|
memLimit argument is in megabytes, memLimit is a maximum amount of memory that can be consumed by a single pod
|
||||||
cpuLimit argument is in millicores, cpuLimit is a maximum amount of cpu that can be consumed by a single pod
|
cpuLimit argument is in millicores, cpuLimit is a maximum amount of cpu that can be consumed by a single pod
|
||||||
*/
|
*/
|
||||||
func newResourceConsumer(name string, replicas, initCPUTotal, initMemoryTotal, consumptionTimeInSeconds, requestSizeInMillicores, requestSizeInMegabytes int, cpuLimit, memLimit int64, framework *Framework) *ResourceConsumer {
|
func newResourceConsumer(name, kind string, replicas, initCPUTotal, initMemoryTotal, consumptionTimeInSeconds, requestSizeInMillicores, requestSizeInMegabytes int, cpuLimit, memLimit int64, framework *Framework) *ResourceConsumer {
|
||||||
runServiceAndRCForResourceConsumer(framework.Client, framework.Namespace.Name, name, replicas, cpuLimit, memLimit)
|
runServiceAndWorkloadForResourceConsumer(framework.Client, framework.Namespace.Name, name, kind, replicas, cpuLimit, memLimit)
|
||||||
rc := &ResourceConsumer{
|
rc := &ResourceConsumer{
|
||||||
name: name,
|
name: name,
|
||||||
|
kind: kind,
|
||||||
framework: framework,
|
framework: framework,
|
||||||
cpu: make(chan int),
|
cpu: make(chan int),
|
||||||
mem: make(chan int),
|
mem: make(chan int),
|
||||||
@ -210,22 +215,35 @@ func (rc *ResourceConsumer) sendOneConsumeMemRequest(megabytes int, durationSec
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (rc *ResourceConsumer) GetReplicas() int {
|
func (rc *ResourceConsumer) GetReplicas() int {
|
||||||
replicationController, err := rc.framework.Client.ReplicationControllers(rc.framework.Namespace.Name).Get(rc.name)
|
switch rc.kind {
|
||||||
expectNoError(err)
|
case kindRC:
|
||||||
if replicationController == nil {
|
replicationController, err := rc.framework.Client.ReplicationControllers(rc.framework.Namespace.Name).Get(rc.name)
|
||||||
Failf(rcIsNil)
|
expectNoError(err)
|
||||||
|
if replicationController == nil {
|
||||||
|
Failf(rcIsNil)
|
||||||
|
}
|
||||||
|
return replicationController.Status.Replicas
|
||||||
|
case kindDeployment:
|
||||||
|
deployment, err := rc.framework.Client.Deployments(rc.framework.Namespace.Name).Get(rc.name)
|
||||||
|
expectNoError(err)
|
||||||
|
if deployment == nil {
|
||||||
|
Failf(deploymentIsNil)
|
||||||
|
}
|
||||||
|
return deployment.Status.Replicas
|
||||||
|
default:
|
||||||
|
Failf(invalidKind)
|
||||||
}
|
}
|
||||||
return replicationController.Status.Replicas
|
return 0
|
||||||
}
|
}
|
||||||
|
|
||||||
func (rc *ResourceConsumer) WaitForReplicas(desiredReplicas int) {
|
func (rc *ResourceConsumer) WaitForReplicas(desiredReplicas int) {
|
||||||
timeout := 10 * time.Minute
|
timeout := 10 * time.Minute
|
||||||
for start := time.Now(); time.Since(start) < timeout; time.Sleep(20 * time.Second) {
|
for start := time.Now(); time.Since(start) < timeout; time.Sleep(20 * time.Second) {
|
||||||
if desiredReplicas == rc.GetReplicas() {
|
if desiredReplicas == rc.GetReplicas() {
|
||||||
Logf("Replication Controller current replicas number is equal to desired replicas number: %d", desiredReplicas)
|
Logf("%s: current replicas number is equal to desired replicas number: %d", rc.kind, desiredReplicas)
|
||||||
return
|
return
|
||||||
} else {
|
} else {
|
||||||
Logf("Replication Controller current replicas number %d waiting to be %d", rc.GetReplicas(), desiredReplicas)
|
Logf("%s: current replicas number %d waiting to be %d", rc.kind, rc.GetReplicas(), desiredReplicas)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
Failf("timeout waiting %v for pods size to be %d", timeout, desiredReplicas)
|
Failf("timeout waiting %v for pods size to be %d", timeout, desiredReplicas)
|
||||||
@ -252,8 +270,8 @@ func (rc *ResourceConsumer) CleanUp() {
|
|||||||
expectNoError(rc.framework.Client.Services(rc.framework.Namespace.Name).Delete(rc.name))
|
expectNoError(rc.framework.Client.Services(rc.framework.Namespace.Name).Delete(rc.name))
|
||||||
}
|
}
|
||||||
|
|
||||||
func runServiceAndRCForResourceConsumer(c *client.Client, ns, name string, replicas int, cpuLimitMillis, memLimitMb int64) {
|
func runServiceAndWorkloadForResourceConsumer(c *client.Client, ns, name, kind string, replicas int, cpuLimitMillis, memLimitMb int64) {
|
||||||
By(fmt.Sprintf("Running consuming RC %s with %v replicas", name, replicas))
|
By(fmt.Sprintf("Running consuming RC %s via %s with %v replicas", name, kind, replicas))
|
||||||
_, err := c.Services(ns).Create(&api.Service{
|
_, err := c.Services(ns).Create(&api.Service{
|
||||||
ObjectMeta: api.ObjectMeta{
|
ObjectMeta: api.ObjectMeta{
|
||||||
Name: name,
|
Name: name,
|
||||||
@ -270,7 +288,8 @@ func runServiceAndRCForResourceConsumer(c *client.Client, ns, name string, repli
|
|||||||
},
|
},
|
||||||
})
|
})
|
||||||
expectNoError(err)
|
expectNoError(err)
|
||||||
config := RCConfig{
|
|
||||||
|
rcConfig := RCConfig{
|
||||||
Client: c,
|
Client: c,
|
||||||
Image: resourceConsumerImage,
|
Image: resourceConsumerImage,
|
||||||
Name: name,
|
Name: name,
|
||||||
@ -282,7 +301,21 @@ func runServiceAndRCForResourceConsumer(c *client.Client, ns, name string, repli
|
|||||||
MemRequest: memLimitMb * 1024 * 1024, // MemLimit is in bytes
|
MemRequest: memLimitMb * 1024 * 1024, // MemLimit is in bytes
|
||||||
MemLimit: memLimitMb * 1024 * 1024,
|
MemLimit: memLimitMb * 1024 * 1024,
|
||||||
}
|
}
|
||||||
expectNoError(RunRC(config))
|
|
||||||
|
switch kind {
|
||||||
|
case kindRC:
|
||||||
|
expectNoError(RunRC(rcConfig))
|
||||||
|
break
|
||||||
|
case kindDeployment:
|
||||||
|
dpConfig := DeploymentConfig{
|
||||||
|
rcConfig,
|
||||||
|
}
|
||||||
|
expectNoError(RunDeployment(dpConfig))
|
||||||
|
break
|
||||||
|
default:
|
||||||
|
Failf(invalidKind)
|
||||||
|
}
|
||||||
|
|
||||||
// Make sure endpoints are propagated.
|
// Make sure endpoints are propagated.
|
||||||
// TODO(piosz): replace sleep with endpoints watch.
|
// TODO(piosz): replace sleep with endpoints watch.
|
||||||
time.Sleep(10 * time.Second)
|
time.Sleep(10 * time.Second)
|
||||||
|
@ -17,6 +17,7 @@ limitations under the License.
|
|||||||
package e2e
|
package e2e
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"fmt"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"k8s.io/kubernetes/pkg/api"
|
"k8s.io/kubernetes/pkg/api"
|
||||||
@ -26,37 +27,57 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
kind = "replicationController"
|
kindRC = "replicationController"
|
||||||
|
kindDeployment = "deployment"
|
||||||
subresource = "scale"
|
subresource = "scale"
|
||||||
stabilityTimeout = 10 * time.Minute
|
stabilityTimeout = 10 * time.Minute
|
||||||
)
|
)
|
||||||
|
|
||||||
var _ = Describe("Horizontal pod autoscaling", func() {
|
var _ = Describe("Horizontal pod autoscaling", func() {
|
||||||
|
|
||||||
var rc *ResourceConsumer
|
var rc *ResourceConsumer
|
||||||
f := NewFramework("horizontal-pod-autoscaling")
|
f := NewFramework("horizontal-pod-autoscaling")
|
||||||
|
|
||||||
// CPU tests
|
titleUp := "%s should scale from 1 pod to 3 pods and from 3 to 5 (via %s, with scale resource: CPU)"
|
||||||
It("[Autoscaling Suite] should scale from 1 pod to 3 pods and from 3 to 5 (scale resource: CPU)", func() {
|
titleDown := "%s should scale from 5 pods to 3 pods and from 3 to 1 (via %s, with scale resource: CPU)"
|
||||||
rc = NewDynamicResourceConsumer("rc", 1, 250, 0, 500, 100, f)
|
|
||||||
defer rc.CleanUp()
|
// CPU tests via deployments
|
||||||
createCPUHorizontalPodAutoscaler(rc, 20)
|
It(fmt.Sprintf(titleUp, "[Skipped]", kindDeployment), func() {
|
||||||
rc.WaitForReplicas(3)
|
scaleUp("deployment", kindDeployment, rc, f)
|
||||||
rc.EnsureDesiredReplicas(3, stabilityTimeout)
|
})
|
||||||
rc.ConsumeCPU(700)
|
It(fmt.Sprintf(titleDown, "[Skipped]", kindDeployment), func() {
|
||||||
rc.WaitForReplicas(5)
|
scaleDown("deployment", kindDeployment, rc, f)
|
||||||
})
|
})
|
||||||
|
|
||||||
It("[Autoscaling Suite] should scale from 5 pods to 3 pods and from 3 to 1 (scale resource: CPU)", func() {
|
// CPU tests via replication controllers
|
||||||
rc = NewDynamicResourceConsumer("rc", 5, 400, 0, 500, 100, f)
|
It(fmt.Sprintf(titleUp, "[Autoscaling Suite]", kindRC), func() {
|
||||||
defer rc.CleanUp()
|
scaleUp("rc", kindRC, rc, f)
|
||||||
createCPUHorizontalPodAutoscaler(rc, 30)
|
})
|
||||||
rc.WaitForReplicas(3)
|
It(fmt.Sprintf(titleDown, "[Autoscaling Suite]", kindRC), func() {
|
||||||
rc.EnsureDesiredReplicas(3, stabilityTimeout)
|
scaleDown("rc", kindRC, rc, f)
|
||||||
rc.ConsumeCPU(100)
|
|
||||||
rc.WaitForReplicas(1)
|
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
|
func scaleUp(name, kind string, rc *ResourceConsumer, f *Framework) {
|
||||||
|
rc = NewDynamicResourceConsumer(name, kind, 1, 250, 0, 500, 100, f)
|
||||||
|
defer rc.CleanUp()
|
||||||
|
createCPUHorizontalPodAutoscaler(rc, 20)
|
||||||
|
rc.WaitForReplicas(3)
|
||||||
|
rc.EnsureDesiredReplicas(3, stabilityTimeout)
|
||||||
|
rc.ConsumeCPU(700)
|
||||||
|
rc.WaitForReplicas(5)
|
||||||
|
}
|
||||||
|
|
||||||
|
func scaleDown(name, kind string, rc *ResourceConsumer, f *Framework) {
|
||||||
|
rc = NewDynamicResourceConsumer(name, kind, 5, 400, 0, 500, 100, f)
|
||||||
|
defer rc.CleanUp()
|
||||||
|
createCPUHorizontalPodAutoscaler(rc, 30)
|
||||||
|
rc.WaitForReplicas(3)
|
||||||
|
rc.EnsureDesiredReplicas(3, stabilityTimeout)
|
||||||
|
rc.ConsumeCPU(100)
|
||||||
|
rc.WaitForReplicas(1)
|
||||||
|
}
|
||||||
|
|
||||||
func createCPUHorizontalPodAutoscaler(rc *ResourceConsumer, cpu int) {
|
func createCPUHorizontalPodAutoscaler(rc *ResourceConsumer, cpu int) {
|
||||||
minReplicas := 1
|
minReplicas := 1
|
||||||
hpa := &extensions.HorizontalPodAutoscaler{
|
hpa := &extensions.HorizontalPodAutoscaler{
|
||||||
@ -66,7 +87,7 @@ func createCPUHorizontalPodAutoscaler(rc *ResourceConsumer, cpu int) {
|
|||||||
},
|
},
|
||||||
Spec: extensions.HorizontalPodAutoscalerSpec{
|
Spec: extensions.HorizontalPodAutoscalerSpec{
|
||||||
ScaleRef: extensions.SubresourceReference{
|
ScaleRef: extensions.SubresourceReference{
|
||||||
Kind: kind,
|
Kind: rc.kind,
|
||||||
Name: rc.name,
|
Name: rc.name,
|
||||||
Subresource: subresource,
|
Subresource: subresource,
|
||||||
},
|
},
|
||||||
|
162
test/e2e/util.go
162
test/e2e/util.go
@ -34,6 +34,7 @@ import (
|
|||||||
"k8s.io/kubernetes/pkg/api"
|
"k8s.io/kubernetes/pkg/api"
|
||||||
apierrs "k8s.io/kubernetes/pkg/api/errors"
|
apierrs "k8s.io/kubernetes/pkg/api/errors"
|
||||||
"k8s.io/kubernetes/pkg/api/resource"
|
"k8s.io/kubernetes/pkg/api/resource"
|
||||||
|
"k8s.io/kubernetes/pkg/apis/extensions"
|
||||||
"k8s.io/kubernetes/pkg/client/cache"
|
"k8s.io/kubernetes/pkg/client/cache"
|
||||||
client "k8s.io/kubernetes/pkg/client/unversioned"
|
client "k8s.io/kubernetes/pkg/client/unversioned"
|
||||||
"k8s.io/kubernetes/pkg/client/unversioned/clientcmd"
|
"k8s.io/kubernetes/pkg/client/unversioned/clientcmd"
|
||||||
@ -204,6 +205,10 @@ type RCConfig struct {
|
|||||||
MaxContainerFailures *int
|
MaxContainerFailures *int
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type DeploymentConfig struct {
|
||||||
|
RCConfig
|
||||||
|
}
|
||||||
|
|
||||||
func nowStamp() string {
|
func nowStamp() string {
|
||||||
return time.Now().Format(time.StampMilli)
|
return time.Now().Format(time.StampMilli)
|
||||||
}
|
}
|
||||||
@ -1248,21 +1253,71 @@ func Diff(oldPods []*api.Pod, curPods []*api.Pod) PodDiff {
|
|||||||
return podInfoMap
|
return podInfoMap
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// RunDeployment Launches (and verifies correctness) of a Deployment
|
||||||
|
// and will wait for all pods it spawns to become "Running".
|
||||||
|
// It's the caller's responsibility to clean up externally (i.e. use the
|
||||||
|
// namespace lifecycle for handling cleanup).
|
||||||
|
func RunDeployment(config DeploymentConfig) error {
|
||||||
|
err := config.create()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return config.start()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (config *DeploymentConfig) create() error {
|
||||||
|
By(fmt.Sprintf("creating deployment %s in namespace %s", config.Name, config.Namespace))
|
||||||
|
deployment := &extensions.Deployment{
|
||||||
|
ObjectMeta: api.ObjectMeta{
|
||||||
|
Name: config.Name,
|
||||||
|
},
|
||||||
|
Spec: extensions.DeploymentSpec{
|
||||||
|
Replicas: config.Replicas,
|
||||||
|
Selector: map[string]string{
|
||||||
|
"name": config.Name,
|
||||||
|
},
|
||||||
|
UniqueLabelKey: "deployment.kubernetes.io/podTemplateHash",
|
||||||
|
Template: api.PodTemplateSpec{
|
||||||
|
ObjectMeta: api.ObjectMeta{
|
||||||
|
Labels: map[string]string{"name": config.Name},
|
||||||
|
},
|
||||||
|
Spec: api.PodSpec{
|
||||||
|
Containers: []api.Container{
|
||||||
|
{
|
||||||
|
Name: config.Name,
|
||||||
|
Image: config.Image,
|
||||||
|
Command: config.Command,
|
||||||
|
Ports: []api.ContainerPort{{ContainerPort: 80}},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
config.applyTo(&deployment.Spec.Template)
|
||||||
|
|
||||||
|
_, err := config.Client.Deployments(config.Namespace).Create(deployment)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Error creating deployment: %v", err)
|
||||||
|
}
|
||||||
|
Logf("Created deployment with name: %v, namespace: %v, replica count: %v", deployment.Name, config.Namespace, deployment.Spec.Replicas)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
// RunRC Launches (and verifies correctness) of a Replication Controller
|
// RunRC Launches (and verifies correctness) of a Replication Controller
|
||||||
// and will wait for all pods it spawns to become "Running".
|
// and will wait for all pods it spawns to become "Running".
|
||||||
// It's the caller's responsibility to clean up externally (i.e. use the
|
// It's the caller's responsibility to clean up externally (i.e. use the
|
||||||
// namespace lifecycle for handling cleanup).
|
// namespace lifecycle for handling cleanup).
|
||||||
func RunRC(config RCConfig) error {
|
func RunRC(config RCConfig) error {
|
||||||
// Don't force tests to fail if they don't care about containers restarting.
|
err := config.create()
|
||||||
var maxContainerFailures int
|
if err != nil {
|
||||||
if config.MaxContainerFailures == nil {
|
return err
|
||||||
maxContainerFailures = int(math.Max(1.0, float64(config.Replicas)*.01))
|
|
||||||
} else {
|
|
||||||
maxContainerFailures = *config.MaxContainerFailures
|
|
||||||
}
|
}
|
||||||
|
return config.start()
|
||||||
|
}
|
||||||
|
|
||||||
label := labels.SelectorFromSet(labels.Set(map[string]string{"name": config.Name}))
|
func (config *RCConfig) create() error {
|
||||||
|
|
||||||
By(fmt.Sprintf("creating replication controller %s in namespace %s", config.Name, config.Namespace))
|
By(fmt.Sprintf("creating replication controller %s in namespace %s", config.Name, config.Namespace))
|
||||||
rc := &api.ReplicationController{
|
rc := &api.ReplicationController{
|
||||||
ObjectMeta: api.ObjectMeta{
|
ObjectMeta: api.ObjectMeta{
|
||||||
@ -1290,47 +1345,66 @@ func RunRC(config RCConfig) error {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
if config.Env != nil {
|
|
||||||
for k, v := range config.Env {
|
config.applyTo(rc.Spec.Template)
|
||||||
c := &rc.Spec.Template.Spec.Containers[0]
|
|
||||||
c.Env = append(c.Env, api.EnvVar{Name: k, Value: v})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if config.Labels != nil {
|
|
||||||
for k, v := range config.Labels {
|
|
||||||
rc.Spec.Template.ObjectMeta.Labels[k] = v
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if config.Ports != nil {
|
|
||||||
for k, v := range config.Ports {
|
|
||||||
c := &rc.Spec.Template.Spec.Containers[0]
|
|
||||||
c.Ports = append(c.Ports, api.ContainerPort{Name: k, ContainerPort: v})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if config.CpuLimit > 0 || config.MemLimit > 0 {
|
|
||||||
rc.Spec.Template.Spec.Containers[0].Resources.Limits = api.ResourceList{}
|
|
||||||
}
|
|
||||||
if config.CpuLimit > 0 {
|
|
||||||
rc.Spec.Template.Spec.Containers[0].Resources.Limits[api.ResourceCPU] = *resource.NewMilliQuantity(config.CpuLimit, resource.DecimalSI)
|
|
||||||
}
|
|
||||||
if config.MemLimit > 0 {
|
|
||||||
rc.Spec.Template.Spec.Containers[0].Resources.Limits[api.ResourceMemory] = *resource.NewQuantity(config.MemLimit, resource.DecimalSI)
|
|
||||||
}
|
|
||||||
if config.CpuRequest > 0 || config.MemRequest > 0 {
|
|
||||||
rc.Spec.Template.Spec.Containers[0].Resources.Requests = api.ResourceList{}
|
|
||||||
}
|
|
||||||
if config.CpuRequest > 0 {
|
|
||||||
rc.Spec.Template.Spec.Containers[0].Resources.Requests[api.ResourceCPU] = *resource.NewMilliQuantity(config.CpuRequest, resource.DecimalSI)
|
|
||||||
}
|
|
||||||
if config.MemRequest > 0 {
|
|
||||||
rc.Spec.Template.Spec.Containers[0].Resources.Requests[api.ResourceMemory] = *resource.NewQuantity(config.MemRequest, resource.DecimalSI)
|
|
||||||
}
|
|
||||||
|
|
||||||
_, err := config.Client.ReplicationControllers(config.Namespace).Create(rc)
|
_, err := config.Client.ReplicationControllers(config.Namespace).Create(rc)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("Error creating replication controller: %v", err)
|
return fmt.Errorf("Error creating replication controller: %v", err)
|
||||||
}
|
}
|
||||||
Logf("Created replication controller with name: %v, namespace: %v, replica count: %v", rc.Name, config.Namespace, rc.Spec.Replicas)
|
Logf("Created replication controller with name: %v, namespace: %v, replica count: %v", rc.Name, config.Namespace, rc.Spec.Replicas)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (config *RCConfig) applyTo(template *api.PodTemplateSpec) {
|
||||||
|
if config.Env != nil {
|
||||||
|
for k, v := range config.Env {
|
||||||
|
c := &template.Spec.Containers[0]
|
||||||
|
c.Env = append(c.Env, api.EnvVar{Name: k, Value: v})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if config.Labels != nil {
|
||||||
|
for k, v := range config.Labels {
|
||||||
|
template.ObjectMeta.Labels[k] = v
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if config.Ports != nil {
|
||||||
|
for k, v := range config.Ports {
|
||||||
|
c := &template.Spec.Containers[0]
|
||||||
|
c.Ports = append(c.Ports, api.ContainerPort{Name: k, ContainerPort: v})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if config.CpuLimit > 0 || config.MemLimit > 0 {
|
||||||
|
template.Spec.Containers[0].Resources.Limits = api.ResourceList{}
|
||||||
|
}
|
||||||
|
if config.CpuLimit > 0 {
|
||||||
|
template.Spec.Containers[0].Resources.Limits[api.ResourceCPU] = *resource.NewMilliQuantity(config.CpuLimit, resource.DecimalSI)
|
||||||
|
}
|
||||||
|
if config.MemLimit > 0 {
|
||||||
|
template.Spec.Containers[0].Resources.Limits[api.ResourceMemory] = *resource.NewQuantity(config.MemLimit, resource.DecimalSI)
|
||||||
|
}
|
||||||
|
if config.CpuRequest > 0 || config.MemRequest > 0 {
|
||||||
|
template.Spec.Containers[0].Resources.Requests = api.ResourceList{}
|
||||||
|
}
|
||||||
|
if config.CpuRequest > 0 {
|
||||||
|
template.Spec.Containers[0].Resources.Requests[api.ResourceCPU] = *resource.NewMilliQuantity(config.CpuRequest, resource.DecimalSI)
|
||||||
|
}
|
||||||
|
if config.MemRequest > 0 {
|
||||||
|
template.Spec.Containers[0].Resources.Requests[api.ResourceMemory] = *resource.NewQuantity(config.MemRequest, resource.DecimalSI)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (config *RCConfig) start() error {
|
||||||
|
// Don't force tests to fail if they don't care about containers restarting.
|
||||||
|
var maxContainerFailures int
|
||||||
|
if config.MaxContainerFailures == nil {
|
||||||
|
maxContainerFailures = int(math.Max(1.0, float64(config.Replicas)*.01))
|
||||||
|
} else {
|
||||||
|
maxContainerFailures = *config.MaxContainerFailures
|
||||||
|
}
|
||||||
|
|
||||||
|
label := labels.SelectorFromSet(labels.Set(map[string]string{"name": config.Name}))
|
||||||
|
|
||||||
podStore := newPodStore(config.Client, config.Namespace, label, fields.Everything())
|
podStore := newPodStore(config.Client, config.Namespace, label, fields.Everything())
|
||||||
defer podStore.Stop()
|
defer podStore.Stop()
|
||||||
|
|
||||||
@ -1403,7 +1477,7 @@ func RunRC(config RCConfig) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
Logf("%v Pods: %d out of %d created, %d running, %d pending, %d waiting, %d inactive, %d terminating, %d unknown, %d runningButNotReady ",
|
Logf("%v Pods: %d out of %d created, %d running, %d pending, %d waiting, %d inactive, %d terminating, %d unknown, %d runningButNotReady ",
|
||||||
rc.Name, len(pods), config.Replicas, running, pending, waiting, inactive, terminating, unknown, runningButNotReady)
|
config.Name, len(pods), config.Replicas, running, pending, waiting, inactive, terminating, unknown, runningButNotReady)
|
||||||
|
|
||||||
promPushRunningPending(running, pending)
|
promPushRunningPending(running, pending)
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user