mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-08-04 09:49:50 +00:00
Make it possible to run Load test using Deployments or ReplicaSets
This commit is contained in:
parent
8e8599fcd7
commit
070f0979c2
@ -28,6 +28,7 @@ import (
|
|||||||
"k8s.io/kubernetes/pkg/api"
|
"k8s.io/kubernetes/pkg/api"
|
||||||
"k8s.io/kubernetes/pkg/api/resource"
|
"k8s.io/kubernetes/pkg/api/resource"
|
||||||
"k8s.io/kubernetes/pkg/api/v1"
|
"k8s.io/kubernetes/pkg/api/v1"
|
||||||
|
"k8s.io/kubernetes/pkg/apis/extensions"
|
||||||
metav1 "k8s.io/kubernetes/pkg/apis/meta/v1"
|
metav1 "k8s.io/kubernetes/pkg/apis/meta/v1"
|
||||||
"k8s.io/kubernetes/pkg/client/cache"
|
"k8s.io/kubernetes/pkg/client/cache"
|
||||||
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||||
@ -35,6 +36,7 @@ import (
|
|||||||
"k8s.io/kubernetes/pkg/fields"
|
"k8s.io/kubernetes/pkg/fields"
|
||||||
"k8s.io/kubernetes/pkg/labels"
|
"k8s.io/kubernetes/pkg/labels"
|
||||||
"k8s.io/kubernetes/pkg/runtime"
|
"k8s.io/kubernetes/pkg/runtime"
|
||||||
|
"k8s.io/kubernetes/pkg/runtime/schema"
|
||||||
"k8s.io/kubernetes/pkg/util/sets"
|
"k8s.io/kubernetes/pkg/util/sets"
|
||||||
utiluuid "k8s.io/kubernetes/pkg/util/uuid"
|
utiluuid "k8s.io/kubernetes/pkg/util/uuid"
|
||||||
"k8s.io/kubernetes/pkg/util/workqueue"
|
"k8s.io/kubernetes/pkg/util/workqueue"
|
||||||
@ -56,11 +58,13 @@ const (
|
|||||||
var MaxContainerFailures = 0
|
var MaxContainerFailures = 0
|
||||||
|
|
||||||
type DensityTestConfig struct {
|
type DensityTestConfig struct {
|
||||||
Configs []testutils.RCConfig
|
Configs []testutils.RunObjectConfig
|
||||||
ClientSet clientset.Interface
|
ClientSet clientset.Interface
|
||||||
InternalClientset internalclientset.Interface
|
InternalClientset internalclientset.Interface
|
||||||
PollInterval time.Duration
|
PollInterval time.Duration
|
||||||
PodCount int
|
PodCount int
|
||||||
|
// What kind of resource we want to create
|
||||||
|
kind schema.GroupKind
|
||||||
}
|
}
|
||||||
|
|
||||||
func density30AddonResourceVerifier(numNodes int) map[string]framework.ResourceConstraint {
|
func density30AddonResourceVerifier(numNodes int) map[string]framework.ResourceConstraint {
|
||||||
@ -193,13 +197,13 @@ func runDensityTest(dtc DensityTestConfig) time.Duration {
|
|||||||
wg := sync.WaitGroup{}
|
wg := sync.WaitGroup{}
|
||||||
wg.Add(len(dtc.Configs))
|
wg.Add(len(dtc.Configs))
|
||||||
for i := range dtc.Configs {
|
for i := range dtc.Configs {
|
||||||
rcConfig := dtc.Configs[i]
|
config := dtc.Configs[i]
|
||||||
go func() {
|
go func() {
|
||||||
defer GinkgoRecover()
|
defer GinkgoRecover()
|
||||||
// Call wg.Done() in defer to avoid blocking whole test
|
// Call wg.Done() in defer to avoid blocking whole test
|
||||||
// in case of error from RunRC.
|
// in case of error from RunRC.
|
||||||
defer wg.Done()
|
defer wg.Done()
|
||||||
framework.ExpectNoError(framework.RunRC(rcConfig))
|
framework.ExpectNoError(config.Run())
|
||||||
}()
|
}()
|
||||||
}
|
}
|
||||||
logStopCh := make(chan struct{})
|
logStopCh := make(chan struct{})
|
||||||
@ -236,21 +240,21 @@ func runDensityTest(dtc DensityTestConfig) time.Duration {
|
|||||||
|
|
||||||
func cleanupDensityTest(dtc DensityTestConfig) {
|
func cleanupDensityTest(dtc DensityTestConfig) {
|
||||||
defer GinkgoRecover()
|
defer GinkgoRecover()
|
||||||
By("Deleting ReplicationController")
|
By("Deleting created Collections")
|
||||||
// We explicitly delete all pods to have API calls necessary for deletion accounted in metrics.
|
// We explicitly delete all pods to have API calls necessary for deletion accounted in metrics.
|
||||||
for i := range dtc.Configs {
|
for i := range dtc.Configs {
|
||||||
rcName := dtc.Configs[i].Name
|
name := dtc.Configs[i].GetName()
|
||||||
rc, err := dtc.ClientSet.Core().ReplicationControllers(dtc.Configs[i].Namespace).Get(rcName)
|
namespace := dtc.Configs[i].GetNamespace()
|
||||||
if err == nil && *(rc.Spec.Replicas) != 0 {
|
kind := dtc.Configs[i].GetKind()
|
||||||
if framework.TestContext.GarbageCollectorEnabled {
|
// TODO: Remove Deployment guard once GC is implemented for Deployments.
|
||||||
By("Cleaning up only the replication controller, garbage collector will clean up the pods")
|
if framework.TestContext.GarbageCollectorEnabled && kind != extensions.Kind("Deployment") {
|
||||||
err := framework.DeleteRCAndWaitForGC(dtc.ClientSet, dtc.Configs[i].Namespace, rcName)
|
By(fmt.Sprintf("Cleaning up only the %v, garbage collector will clean up the pods", kind))
|
||||||
framework.ExpectNoError(err)
|
err := framework.DeleteResourceAndWaitForGC(dtc.ClientSet, kind, namespace, name)
|
||||||
} else {
|
framework.ExpectNoError(err)
|
||||||
By("Cleaning up the replication controller and pods")
|
} else {
|
||||||
err := framework.DeleteRCAndPods(dtc.ClientSet, dtc.InternalClientset, dtc.Configs[i].Namespace, rcName)
|
By(fmt.Sprintf("Cleaning up the %v and pods", kind))
|
||||||
framework.ExpectNoError(err)
|
err := framework.DeleteResourceAndPods(dtc.ClientSet, dtc.InternalClientset, kind, dtc.Configs[i].GetNamespace(), name)
|
||||||
}
|
framework.ExpectNoError(err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -265,7 +269,7 @@ func cleanupDensityTest(dtc DensityTestConfig) {
|
|||||||
var _ = framework.KubeDescribe("Density", func() {
|
var _ = framework.KubeDescribe("Density", func() {
|
||||||
var c clientset.Interface
|
var c clientset.Interface
|
||||||
var nodeCount int
|
var nodeCount int
|
||||||
var RCName string
|
var name string
|
||||||
var additionalPodsPrefix string
|
var additionalPodsPrefix string
|
||||||
var ns string
|
var ns string
|
||||||
var uuid string
|
var uuid string
|
||||||
@ -352,27 +356,31 @@ var _ = framework.KubeDescribe("Density", func() {
|
|||||||
podsPerNode int
|
podsPerNode int
|
||||||
// Controls how often the apiserver is polled for pods
|
// Controls how often the apiserver is polled for pods
|
||||||
interval time.Duration
|
interval time.Duration
|
||||||
|
// What kind of resource we should be creating. Default: ReplicationController
|
||||||
|
kind schema.GroupKind
|
||||||
}
|
}
|
||||||
|
|
||||||
densityTests := []Density{
|
densityTests := []Density{
|
||||||
// TODO: Expose runLatencyTest as ginkgo flag.
|
// TODO: Expose runLatencyTest as ginkgo flag.
|
||||||
{podsPerNode: 3, runLatencyTest: false},
|
{podsPerNode: 3, runLatencyTest: false, kind: api.Kind("ReplicationController")},
|
||||||
{podsPerNode: 30, runLatencyTest: true},
|
{podsPerNode: 30, runLatencyTest: true, kind: api.Kind("ReplicationController")},
|
||||||
{podsPerNode: 50, runLatencyTest: false},
|
{podsPerNode: 50, runLatencyTest: false, kind: api.Kind("ReplicationController")},
|
||||||
{podsPerNode: 95, runLatencyTest: true},
|
{podsPerNode: 95, runLatencyTest: true, kind: api.Kind("ReplicationController")},
|
||||||
{podsPerNode: 100, runLatencyTest: false},
|
{podsPerNode: 100, runLatencyTest: false, kind: api.Kind("ReplicationController")},
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, testArg := range densityTests {
|
for _, testArg := range densityTests {
|
||||||
feature := "ManualPerformance"
|
feature := "ManualPerformance"
|
||||||
switch testArg.podsPerNode {
|
switch testArg.podsPerNode {
|
||||||
case 30:
|
case 30:
|
||||||
feature = "Performance"
|
if testArg.kind == api.Kind("ReplicationController") {
|
||||||
|
feature = "Performance"
|
||||||
|
}
|
||||||
case 95:
|
case 95:
|
||||||
feature = "HighDensityPerformance"
|
feature = "HighDensityPerformance"
|
||||||
}
|
}
|
||||||
|
|
||||||
name := fmt.Sprintf("[Feature:%s] should allow starting %d pods per node", feature, testArg.podsPerNode)
|
name := fmt.Sprintf("[Feature:%s] should allow starting %d pods per node using %v", feature, testArg.podsPerNode, testArg.kind)
|
||||||
itArg := testArg
|
itArg := testArg
|
||||||
It(name, func() {
|
It(name, func() {
|
||||||
nodePreparer := framework.NewE2ETestNodePreparer(
|
nodePreparer := framework.NewE2ETestNodePreparer(
|
||||||
@ -392,44 +400,55 @@ var _ = framework.KubeDescribe("Density", func() {
|
|||||||
defer fileHndl.Close()
|
defer fileHndl.Close()
|
||||||
|
|
||||||
// nodeCountPerNamespace and CreateNamespaces are defined in load.go
|
// nodeCountPerNamespace and CreateNamespaces are defined in load.go
|
||||||
numberOfRCs := (nodeCount + nodeCountPerNamespace - 1) / nodeCountPerNamespace
|
numberOfCollections := (nodeCount + nodeCountPerNamespace - 1) / nodeCountPerNamespace
|
||||||
namespaces, err := CreateNamespaces(f, numberOfRCs, fmt.Sprintf("density-%v", testArg.podsPerNode))
|
namespaces, err := CreateNamespaces(f, numberOfCollections, fmt.Sprintf("density-%v", testArg.podsPerNode))
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
|
|
||||||
RCConfigs := make([]testutils.RCConfig, numberOfRCs)
|
configs := make([]testutils.RunObjectConfig, numberOfCollections)
|
||||||
// Since all RCs are created at the same time, timeout for each config
|
// Since all RCs are created at the same time, timeout for each config
|
||||||
// has to assume that it will be run at the very end.
|
// has to assume that it will be run at the very end.
|
||||||
podThroughput := 20
|
podThroughput := 20
|
||||||
timeout := time.Duration(totalPods/podThroughput)*time.Second + 3*time.Minute
|
timeout := time.Duration(totalPods/podThroughput)*time.Second + 3*time.Minute
|
||||||
// createClients is defined in load.go
|
// createClients is defined in load.go
|
||||||
clients, internalClients, err := createClients(numberOfRCs)
|
clients, internalClients, err := createClients(numberOfCollections)
|
||||||
for i := 0; i < numberOfRCs; i++ {
|
for i := 0; i < numberOfCollections; i++ {
|
||||||
RCName := fmt.Sprintf("density%v-%v-%v", totalPods, i, uuid)
|
name := fmt.Sprintf("density%v-%v-%v", totalPods, i, uuid)
|
||||||
nsName := namespaces[i].Name
|
nsName := namespaces[i].Name
|
||||||
RCConfigs[i] = testutils.RCConfig{
|
baseConfig := &testutils.RCConfig{
|
||||||
Client: clients[i],
|
Client: clients[i],
|
||||||
InternalClient: internalClients[i],
|
InternalClient: internalClients[i],
|
||||||
Image: framework.GetPauseImageName(f.ClientSet),
|
Image: framework.GetPauseImageName(f.ClientSet),
|
||||||
Name: RCName,
|
Name: name,
|
||||||
Namespace: nsName,
|
Namespace: nsName,
|
||||||
Labels: map[string]string{"type": "densityPod"},
|
Labels: map[string]string{"type": "densityPod"},
|
||||||
PollInterval: DensityPollInterval,
|
PollInterval: DensityPollInterval,
|
||||||
Timeout: timeout,
|
Timeout: timeout,
|
||||||
PodStatusFile: fileHndl,
|
PodStatusFile: fileHndl,
|
||||||
Replicas: (totalPods + numberOfRCs - 1) / numberOfRCs,
|
Replicas: (totalPods + numberOfCollections - 1) / numberOfCollections,
|
||||||
CpuRequest: nodeCpuCapacity / 100,
|
CpuRequest: nodeCpuCapacity / 100,
|
||||||
MemRequest: nodeMemCapacity / 100,
|
MemRequest: nodeMemCapacity / 100,
|
||||||
MaxContainerFailures: &MaxContainerFailures,
|
MaxContainerFailures: &MaxContainerFailures,
|
||||||
Silent: true,
|
Silent: true,
|
||||||
}
|
}
|
||||||
|
switch itArg.kind {
|
||||||
|
case api.Kind("ReplicationController"):
|
||||||
|
configs[i] = baseConfig
|
||||||
|
case extensions.Kind("ReplicaSet"):
|
||||||
|
configs[i] = &testutils.ReplicaSetConfig{RCConfig: *baseConfig}
|
||||||
|
case extensions.Kind("Deployment"):
|
||||||
|
configs[i] = &testutils.DeploymentConfig{RCConfig: *baseConfig}
|
||||||
|
default:
|
||||||
|
framework.Failf("Unsupported kind: %v", itArg.kind)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
dConfig := DensityTestConfig{
|
dConfig := DensityTestConfig{
|
||||||
ClientSet: f.ClientSet,
|
ClientSet: f.ClientSet,
|
||||||
InternalClientset: f.InternalClientset,
|
InternalClientset: f.InternalClientset,
|
||||||
Configs: RCConfigs,
|
Configs: configs,
|
||||||
PodCount: totalPods,
|
PodCount: totalPods,
|
||||||
PollInterval: DensityPollInterval,
|
PollInterval: DensityPollInterval,
|
||||||
|
kind: itArg.kind,
|
||||||
}
|
}
|
||||||
e2eStartupTime = runDensityTest(dConfig)
|
e2eStartupTime = runDensityTest(dConfig)
|
||||||
if itArg.runLatencyTest {
|
if itArg.runLatencyTest {
|
||||||
@ -657,29 +676,29 @@ var _ = framework.KubeDescribe("Density", func() {
|
|||||||
fileHndl, err := os.Create(fmt.Sprintf(framework.TestContext.OutputDir+"/%s/pod_states.csv", uuid))
|
fileHndl, err := os.Create(fmt.Sprintf(framework.TestContext.OutputDir+"/%s/pod_states.csv", uuid))
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
defer fileHndl.Close()
|
defer fileHndl.Close()
|
||||||
rcCnt := 1
|
collectionCount := 1
|
||||||
RCConfigs := make([]testutils.RCConfig, rcCnt)
|
configs := make([]testutils.RunObjectConfig, collectionCount)
|
||||||
podsPerRC := int(totalPods / rcCnt)
|
podsPerCollection := int(totalPods / collectionCount)
|
||||||
for i := 0; i < rcCnt; i++ {
|
for i := 0; i < collectionCount; i++ {
|
||||||
if i == rcCnt-1 {
|
if i == collectionCount-1 {
|
||||||
podsPerRC += int(math.Mod(float64(totalPods), float64(rcCnt)))
|
podsPerCollection += int(math.Mod(float64(totalPods), float64(collectionCount)))
|
||||||
}
|
}
|
||||||
RCName = "density" + strconv.Itoa(totalPods) + "-" + strconv.Itoa(i) + "-" + uuid
|
name = "density" + strconv.Itoa(totalPods) + "-" + strconv.Itoa(i) + "-" + uuid
|
||||||
RCConfigs[i] = testutils.RCConfig{Client: c,
|
configs[i] = &testutils.RCConfig{Client: c,
|
||||||
Image: framework.GetPauseImageName(f.ClientSet),
|
Image: framework.GetPauseImageName(f.ClientSet),
|
||||||
Name: RCName,
|
Name: name,
|
||||||
Namespace: ns,
|
Namespace: ns,
|
||||||
Labels: map[string]string{"type": "densityPod"},
|
Labels: map[string]string{"type": "densityPod"},
|
||||||
PollInterval: DensityPollInterval,
|
PollInterval: DensityPollInterval,
|
||||||
PodStatusFile: fileHndl,
|
PodStatusFile: fileHndl,
|
||||||
Replicas: podsPerRC,
|
Replicas: podsPerCollection,
|
||||||
MaxContainerFailures: &MaxContainerFailures,
|
MaxContainerFailures: &MaxContainerFailures,
|
||||||
Silent: true,
|
Silent: true,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
dConfig := DensityTestConfig{
|
dConfig := DensityTestConfig{
|
||||||
ClientSet: f.ClientSet,
|
ClientSet: f.ClientSet,
|
||||||
Configs: RCConfigs,
|
Configs: configs,
|
||||||
PodCount: totalPods,
|
PodCount: totalPods,
|
||||||
PollInterval: DensityPollInterval,
|
PollInterval: DensityPollInterval,
|
||||||
}
|
}
|
||||||
@ -727,6 +746,6 @@ func createRunningPodFromRC(wg *sync.WaitGroup, c clientset.Interface, name, ns,
|
|||||||
}
|
}
|
||||||
_, err := c.Core().ReplicationControllers(ns).Create(rc)
|
_, err := c.Core().ReplicationControllers(ns).Create(rc)
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
framework.ExpectNoError(framework.WaitForRCPodsRunning(c, ns, name))
|
framework.ExpectNoError(framework.WaitForControlledPodsRunning(c, ns, name, api.Kind("ReplicationController")))
|
||||||
framework.Logf("Found pod '%s' running", name)
|
framework.Logf("Found pod '%s' running", name)
|
||||||
}
|
}
|
||||||
|
@ -21,6 +21,7 @@ import (
|
|||||||
"path/filepath"
|
"path/filepath"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"k8s.io/kubernetes/pkg/api"
|
||||||
"k8s.io/kubernetes/pkg/api/v1"
|
"k8s.io/kubernetes/pkg/api/v1"
|
||||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
|
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
|
||||||
"k8s.io/kubernetes/pkg/labels"
|
"k8s.io/kubernetes/pkg/labels"
|
||||||
@ -90,7 +91,7 @@ var _ = framework.KubeDescribe("ClusterDns [Feature:Example]", func() {
|
|||||||
|
|
||||||
// wait for objects
|
// wait for objects
|
||||||
for _, ns := range namespaces {
|
for _, ns := range namespaces {
|
||||||
framework.WaitForRCPodsRunning(c, ns.Name, backendRcName)
|
framework.WaitForControlledPodsRunning(c, ns.Name, backendRcName, api.Kind("ReplicationController"))
|
||||||
framework.WaitForService(c, ns.Name, backendSvcName, true, framework.Poll, framework.ServiceStartTimeout)
|
framework.WaitForService(c, ns.Name, backendSvcName, true, framework.Poll, framework.ServiceStartTimeout)
|
||||||
}
|
}
|
||||||
// it is not enough that pods are running because they may be set to running, but
|
// it is not enough that pods are running because they may be set to running, but
|
||||||
|
@ -2622,9 +2622,29 @@ func RemoveTaintOffNode(c clientset.Interface, nodeName string, taint v1.Taint)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func ScaleRC(clientset clientset.Interface, internalClientset internalclientset.Interface, ns, name string, size uint, wait bool) error {
|
func getScalerForKind(internalClientset internalclientset.Interface, kind schema.GroupKind) (kubectl.Scaler, error) {
|
||||||
By(fmt.Sprintf("Scaling replication controller %s in namespace %s to %d", name, ns, size))
|
switch kind {
|
||||||
scaler, err := kubectl.ScalerFor(api.Kind("ReplicationController"), internalClientset)
|
case api.Kind("ReplicationController"):
|
||||||
|
return kubectl.ScalerFor(api.Kind("ReplicationController"), internalClientset)
|
||||||
|
case extensionsinternal.Kind("ReplicaSet"):
|
||||||
|
return kubectl.ScalerFor(extensionsinternal.Kind("ReplicaSet"), internalClientset)
|
||||||
|
case extensionsinternal.Kind("Deployment"):
|
||||||
|
return kubectl.ScalerFor(extensionsinternal.Kind("Deployment"), internalClientset)
|
||||||
|
default:
|
||||||
|
return nil, fmt.Errorf("Unsupported kind for getting Scaler: %v", kind)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func ScaleResource(
|
||||||
|
clientset clientset.Interface,
|
||||||
|
internalClientset internalclientset.Interface,
|
||||||
|
ns, name string,
|
||||||
|
size uint,
|
||||||
|
wait bool,
|
||||||
|
kind schema.GroupKind,
|
||||||
|
) error {
|
||||||
|
By(fmt.Sprintf("Scaling %v %s in namespace %s to %d", kind, name, ns, size))
|
||||||
|
scaler, err := getScalerForKind(internalClientset, kind)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -2636,51 +2656,32 @@ func ScaleRC(clientset clientset.Interface, internalClientset internalclientset.
|
|||||||
if !wait {
|
if !wait {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
return WaitForRCPodsRunning(clientset, ns, name)
|
return WaitForControlledPodsRunning(clientset, ns, name, kind)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Wait up to 10 minutes for pods to become Running.
|
// Wait up to 10 minutes for pods to become Running.
|
||||||
func WaitForRCPodsRunning(c clientset.Interface, ns, rcName string) error {
|
func WaitForControlledPodsRunning(c clientset.Interface, ns, name string, kind schema.GroupKind) error {
|
||||||
rc, err := c.Core().ReplicationControllers(ns).Get(rcName)
|
rtObject, err := getRuntimeObjectForKind(c, kind, ns, name)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
selector, err := getSelectorFromRuntimeObject(rtObject)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
selector := labels.SelectorFromSet(labels.Set(rc.Spec.Selector))
|
|
||||||
err = testutils.WaitForPodsWithLabelRunning(c, ns, selector)
|
err = testutils.WaitForPodsWithLabelRunning(c, ns, selector)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("Error while waiting for replication controller %s pods to be running: %v", rcName, err)
|
return fmt.Errorf("Error while waiting for replication controller %s pods to be running: %v", name, err)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func ScaleRC(clientset clientset.Interface, internalClientset internalclientset.Interface, ns, name string, size uint, wait bool) error {
|
||||||
|
return ScaleResource(clientset, internalClientset, ns, name, size, wait, api.Kind("ReplicationController"))
|
||||||
|
}
|
||||||
|
|
||||||
func ScaleDeployment(clientset clientset.Interface, internalClientset internalclientset.Interface, ns, name string, size uint, wait bool) error {
|
func ScaleDeployment(clientset clientset.Interface, internalClientset internalclientset.Interface, ns, name string, size uint, wait bool) error {
|
||||||
By(fmt.Sprintf("Scaling Deployment %s in namespace %s to %d", name, ns, size))
|
return ScaleResource(clientset, internalClientset, ns, name, size, wait, extensionsinternal.Kind("Deployment"))
|
||||||
scaler, err := kubectl.ScalerFor(extensionsinternal.Kind("Deployment"), internalClientset)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
waitForScale := kubectl.NewRetryParams(5*time.Second, 1*time.Minute)
|
|
||||||
waitForReplicas := kubectl.NewRetryParams(5*time.Second, 5*time.Minute)
|
|
||||||
if err = scaler.Scale(ns, name, size, nil, waitForScale, waitForReplicas); err != nil {
|
|
||||||
return fmt.Errorf("error while scaling Deployment %s to %d replicas: %v", name, size, err)
|
|
||||||
}
|
|
||||||
if !wait {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return WaitForDeploymentPodsRunning(clientset, ns, name)
|
|
||||||
}
|
|
||||||
|
|
||||||
func WaitForDeploymentPodsRunning(c clientset.Interface, ns, name string) error {
|
|
||||||
deployment, err := c.Extensions().Deployments(ns).Get(name)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
selector := labels.SelectorFromSet(labels.Set(deployment.Spec.Selector.MatchLabels))
|
|
||||||
err = testutils.WaitForPodsWithLabelRunning(c, ns, selector)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("Error while waiting for Deployment %s pods to be running: %v", name, err)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Returns true if all the specified pods are scheduled, else returns false.
|
// Returns true if all the specified pods are scheduled, else returns false.
|
||||||
@ -2760,26 +2761,102 @@ func WaitForPodsWithLabelRunningReady(c clientset.Interface, ns string, label la
|
|||||||
return pods, err
|
return pods, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// DeleteRCAndPods a Replication Controller and all pods it spawned
|
func getRuntimeObjectForKind(c clientset.Interface, kind schema.GroupKind, ns, name string) (runtime.Object, error) {
|
||||||
func DeleteRCAndPods(clientset clientset.Interface, internalClientset internalclientset.Interface, ns, name string) error {
|
switch kind {
|
||||||
By(fmt.Sprintf("deleting replication controller %s in namespace %s", name, ns))
|
case api.Kind("ReplicationController"):
|
||||||
rc, err := clientset.Core().ReplicationControllers(ns).Get(name)
|
return c.Core().ReplicationControllers(ns).Get(name)
|
||||||
|
case extensionsinternal.Kind("ReplicaSet"):
|
||||||
|
return c.Extensions().ReplicaSets(ns).Get(name)
|
||||||
|
case extensionsinternal.Kind("Deployment"):
|
||||||
|
return c.Extensions().Deployments(ns).Get(name)
|
||||||
|
default:
|
||||||
|
return nil, fmt.Errorf("Unsupported kind when getting runtime object: %v", kind)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func deleteResource(c clientset.Interface, kind schema.GroupKind, ns, name string, deleteOption *v1.DeleteOptions) error {
|
||||||
|
switch kind {
|
||||||
|
case api.Kind("ReplicationController"):
|
||||||
|
return c.Core().ReplicationControllers(ns).Delete(name, deleteOption)
|
||||||
|
case extensionsinternal.Kind("ReplicaSet"):
|
||||||
|
return c.Extensions().ReplicaSets(ns).Delete(name, deleteOption)
|
||||||
|
case extensionsinternal.Kind("Deployment"):
|
||||||
|
return c.Extensions().Deployments(ns).Delete(name, deleteOption)
|
||||||
|
default:
|
||||||
|
return fmt.Errorf("Unsupported kind when deleting: %v", kind)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func getSelectorFromRuntimeObject(obj runtime.Object) (labels.Selector, error) {
|
||||||
|
switch typed := obj.(type) {
|
||||||
|
case *v1.ReplicationController:
|
||||||
|
return labels.SelectorFromSet(typed.Spec.Selector), nil
|
||||||
|
case *extensions.ReplicaSet:
|
||||||
|
return metav1.LabelSelectorAsSelector(typed.Spec.Selector)
|
||||||
|
case *extensions.Deployment:
|
||||||
|
return metav1.LabelSelectorAsSelector(typed.Spec.Selector)
|
||||||
|
default:
|
||||||
|
return nil, fmt.Errorf("Unsupported kind when getting selector: %v", obj)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func getReplicasFromRuntimeObject(obj runtime.Object) (int32, error) {
|
||||||
|
switch typed := obj.(type) {
|
||||||
|
case *v1.ReplicationController:
|
||||||
|
if typed.Spec.Replicas != nil {
|
||||||
|
return *typed.Spec.Replicas, nil
|
||||||
|
}
|
||||||
|
return 0, nil
|
||||||
|
case *extensions.ReplicaSet:
|
||||||
|
if typed.Spec.Replicas != nil {
|
||||||
|
return *typed.Spec.Replicas, nil
|
||||||
|
}
|
||||||
|
return 0, nil
|
||||||
|
case *extensions.Deployment:
|
||||||
|
if typed.Spec.Replicas != nil {
|
||||||
|
return *typed.Spec.Replicas, nil
|
||||||
|
}
|
||||||
|
return 0, nil
|
||||||
|
default:
|
||||||
|
return -1, fmt.Errorf("Unsupported kind when getting number of replicas: %v", obj)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func getReaperForKind(internalClientset internalclientset.Interface, kind schema.GroupKind) (kubectl.Reaper, error) {
|
||||||
|
switch kind {
|
||||||
|
case api.Kind("ReplicationController"):
|
||||||
|
return kubectl.ReaperFor(api.Kind("ReplicationController"), internalClientset)
|
||||||
|
case extensionsinternal.Kind("ReplicaSet"):
|
||||||
|
return kubectl.ReaperFor(extensionsinternal.Kind("ReplicaSet"), internalClientset)
|
||||||
|
case extensionsinternal.Kind("Deployment"):
|
||||||
|
return kubectl.ReaperFor(extensionsinternal.Kind("Deployment"), internalClientset)
|
||||||
|
default:
|
||||||
|
return nil, fmt.Errorf("Unsupported kind: %v", kind)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeleteResourceAndPods deletes a given resource and all pods it spawned
|
||||||
|
func DeleteResourceAndPods(clientset clientset.Interface, internalClientset internalclientset.Interface, kind schema.GroupKind, ns, name string) error {
|
||||||
|
By(fmt.Sprintf("deleting %v %s in namespace %s", kind, name, ns))
|
||||||
|
|
||||||
|
rtObject, err := getRuntimeObjectForKind(clientset, kind, ns, name)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if apierrs.IsNotFound(err) {
|
if apierrs.IsNotFound(err) {
|
||||||
Logf("RC %s was already deleted: %v", name, err)
|
Logf("%v %s not found: %v", kind, name, err)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
reaper, err := kubectl.ReaperForReplicationController(internalClientset.Core(), 10*time.Minute)
|
selector, err := getSelectorFromRuntimeObject(rtObject)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if apierrs.IsNotFound(err) {
|
|
||||||
Logf("RC %s was already deleted: %v", name, err)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
ps, err := podStoreForRC(clientset, rc)
|
reaper, err := getReaperForKind(internalClientset, kind)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
ps, err := podStoreForSelector(clientset, ns, selector)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -2787,20 +2864,20 @@ func DeleteRCAndPods(clientset clientset.Interface, internalClientset internalcl
|
|||||||
startTime := time.Now()
|
startTime := time.Now()
|
||||||
err = reaper.Stop(ns, name, 0, nil)
|
err = reaper.Stop(ns, name, 0, nil)
|
||||||
if apierrs.IsNotFound(err) {
|
if apierrs.IsNotFound(err) {
|
||||||
Logf("RC %s was already deleted: %v", name, err)
|
Logf("%v %s was already deleted: %v", kind, name, err)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("error while stopping RC: %s: %v", name, err)
|
return fmt.Errorf("error while stopping %v: %s: %v", kind, name, err)
|
||||||
}
|
}
|
||||||
deleteRCTime := time.Now().Sub(startTime)
|
deleteTime := time.Now().Sub(startTime)
|
||||||
Logf("Deleting RC %s took: %v", name, deleteRCTime)
|
Logf("Deleting %v %s took: %v", kind, name, deleteTime)
|
||||||
err = waitForPodsInactive(ps, 10*time.Millisecond, 10*time.Minute)
|
err = waitForPodsInactive(ps, 10*time.Millisecond, 10*time.Minute)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("error while waiting for pods to become inactive %s: %v", name, err)
|
return fmt.Errorf("error while waiting for pods to become inactive %s: %v", name, err)
|
||||||
}
|
}
|
||||||
terminatePodTime := time.Now().Sub(startTime) - deleteRCTime
|
terminatePodTime := time.Now().Sub(startTime) - deleteTime
|
||||||
Logf("Terminating RC %s pods took: %v", name, terminatePodTime)
|
Logf("Terminating %v %s pods took: %v", kind, name, terminatePodTime)
|
||||||
// this is to relieve namespace controller's pressure when deleting the
|
// this is to relieve namespace controller's pressure when deleting the
|
||||||
// namespace after a test.
|
// namespace after a test.
|
||||||
err = waitForPodsGone(ps, 10*time.Second, 10*time.Minute)
|
err = waitForPodsGone(ps, 10*time.Second, 10*time.Minute)
|
||||||
@ -2810,57 +2887,75 @@ func DeleteRCAndPods(clientset clientset.Interface, internalClientset internalcl
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// DeleteRCAndWaitForGC deletes only the Replication Controller and waits for GC to delete the pods.
|
func DeleteRCAndPods(clientset clientset.Interface, internalClientset internalclientset.Interface, ns, name string) error {
|
||||||
func DeleteRCAndWaitForGC(c clientset.Interface, ns, name string) error {
|
return DeleteResourceAndPods(clientset, internalClientset, api.Kind("ReplicationController"), ns, name)
|
||||||
By(fmt.Sprintf("deleting replication controller %s in namespace %s, will wait for the garbage collector to delete the pods", name, ns))
|
}
|
||||||
rc, err := c.Core().ReplicationControllers(ns).Get(name)
|
|
||||||
|
// DeleteResourceAndWaitForGC deletes only given resource and waits for GC to delete the pods.
|
||||||
|
func DeleteResourceAndWaitForGC(c clientset.Interface, kind schema.GroupKind, ns, name string) error {
|
||||||
|
By(fmt.Sprintf("deleting %v %s in namespace %s, will wait for the garbage collector to delete the pods", kind, name, ns))
|
||||||
|
|
||||||
|
rtObject, err := getRuntimeObjectForKind(c, kind, ns, name)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if apierrs.IsNotFound(err) {
|
if apierrs.IsNotFound(err) {
|
||||||
Logf("RC %s was already deleted: %v", name, err)
|
Logf("%v %s not found: %v", kind, name, err)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
ps, err := podStoreForRC(c, rc)
|
selector, err := getSelectorFromRuntimeObject(rtObject)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
replicas, err := getReplicasFromRuntimeObject(rtObject)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
ps, err := podStoreForSelector(c, ns, selector)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
defer ps.Stop()
|
defer ps.Stop()
|
||||||
startTime := time.Now()
|
startTime := time.Now()
|
||||||
falseVar := false
|
falseVar := false
|
||||||
deleteOption := &v1.DeleteOptions{OrphanDependents: &falseVar}
|
deleteOption := &v1.DeleteOptions{OrphanDependents: &falseVar}
|
||||||
err = c.Core().ReplicationControllers(ns).Delete(name, deleteOption)
|
err = deleteResource(c, kind, ns, name, deleteOption)
|
||||||
if err != nil && apierrs.IsNotFound(err) {
|
if err != nil && apierrs.IsNotFound(err) {
|
||||||
Logf("RC %s was already deleted: %v", name, err)
|
Logf("%v %s was already deleted: %v", kind, name, err)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
deleteRCTime := time.Now().Sub(startTime)
|
deleteTime := time.Now().Sub(startTime)
|
||||||
Logf("Deleting RC %s took: %v", name, deleteRCTime)
|
Logf("Deleting %v %s took: %v", kind, name, deleteTime)
|
||||||
|
|
||||||
var interval, timeout time.Duration
|
var interval, timeout time.Duration
|
||||||
switch {
|
switch {
|
||||||
case *(rc.Spec.Replicas) < 100:
|
case replicas < 100:
|
||||||
interval = 100 * time.Millisecond
|
interval = 100 * time.Millisecond
|
||||||
case *(rc.Spec.Replicas) < 1000:
|
case replicas < 1000:
|
||||||
interval = 1 * time.Second
|
interval = 1 * time.Second
|
||||||
default:
|
default:
|
||||||
interval = 10 * time.Second
|
interval = 10 * time.Second
|
||||||
}
|
}
|
||||||
if *(rc.Spec.Replicas) < 5000 {
|
if replicas < 5000 {
|
||||||
timeout = 10 * time.Minute
|
timeout = 10 * time.Minute
|
||||||
} else {
|
} else {
|
||||||
timeout = time.Duration(*(rc.Spec.Replicas)/gcThroughput) * time.Second
|
timeout = time.Duration(replicas/gcThroughput) * time.Second
|
||||||
// gcThroughput is pretty strict now, add a bit more to it
|
// gcThroughput is pretty strict now, add a bit more to it
|
||||||
timeout = timeout + 3*time.Minute
|
timeout = timeout + 3*time.Minute
|
||||||
}
|
}
|
||||||
|
|
||||||
err = waitForPodsInactive(ps, interval, timeout)
|
err = waitForPodsInactive(ps, interval, timeout)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("error while waiting for pods to become inactive %s: %v", name, err)
|
return fmt.Errorf("error while waiting for pods to become inactive %s: %v", name, err)
|
||||||
}
|
}
|
||||||
terminatePodTime := time.Now().Sub(startTime) - deleteRCTime
|
terminatePodTime := time.Now().Sub(startTime) - deleteTime
|
||||||
Logf("Terminating RC %s pods took: %v", name, terminatePodTime)
|
Logf("Terminating %v %s pods took: %v", kind, name, terminatePodTime)
|
||||||
|
|
||||||
err = waitForPodsGone(ps, interval, 10*time.Minute)
|
err = waitForPodsGone(ps, interval, 10*time.Minute)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("error while waiting for pods gone %s: %v", name, err)
|
return fmt.Errorf("error while waiting for pods gone %s: %v", name, err)
|
||||||
@ -2868,11 +2963,15 @@ func DeleteRCAndWaitForGC(c clientset.Interface, ns, name string) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// podStoreForRC creates a PodStore that monitors pods belong to the rc. It
|
// DeleteRCAndWaitForGC deletes only the Replication Controller and waits for GC to delete the pods.
|
||||||
// waits until the reflector does a List() before returning.
|
func DeleteRCAndWaitForGC(c clientset.Interface, ns, name string) error {
|
||||||
func podStoreForRC(c clientset.Interface, rc *v1.ReplicationController) (*testutils.PodStore, error) {
|
return DeleteResourceAndWaitForGC(c, api.Kind("ReplicationController"), ns, name)
|
||||||
labels := labels.SelectorFromSet(rc.Spec.Selector)
|
}
|
||||||
ps := testutils.NewPodStore(c, rc.Namespace, labels, fields.Everything())
|
|
||||||
|
// podStoreForSelector creates a PodStore that monitors pods from given namespace matching given selector.
|
||||||
|
// It waits until the reflector does a List() before returning.
|
||||||
|
func podStoreForSelector(c clientset.Interface, ns string, selector labels.Selector) (*testutils.PodStore, error) {
|
||||||
|
ps := testutils.NewPodStore(c, ns, selector, fields.Everything())
|
||||||
err := wait.Poll(1*time.Second, 2*time.Minute, func() (bool, error) {
|
err := wait.Poll(1*time.Second, 2*time.Minute, func() (bool, error) {
|
||||||
if len(ps.Reflector.LastSyncResourceVersion()) != 0 {
|
if len(ps.Reflector.LastSyncResourceVersion()) != 0 {
|
||||||
return true, nil
|
return true, nil
|
||||||
@ -4312,7 +4411,7 @@ func ScaleRCByLabels(clientset clientset.Interface, internalClientset internalcl
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if replicas == 0 {
|
if replicas == 0 {
|
||||||
ps, err := podStoreForRC(clientset, rc)
|
ps, err := podStoreForSelector(clientset, rc.Namespace, labels.SelectorFromSet(rc.Spec.Selector))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
142
test/e2e/load.go
142
test/e2e/load.go
@ -27,12 +27,15 @@ import (
|
|||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"k8s.io/kubernetes/pkg/api"
|
||||||
"k8s.io/kubernetes/pkg/api/v1"
|
"k8s.io/kubernetes/pkg/api/v1"
|
||||||
|
"k8s.io/kubernetes/pkg/apis/extensions"
|
||||||
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
|
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
|
||||||
"k8s.io/kubernetes/pkg/client/restclient"
|
"k8s.io/kubernetes/pkg/client/restclient"
|
||||||
"k8s.io/kubernetes/pkg/client/transport"
|
"k8s.io/kubernetes/pkg/client/transport"
|
||||||
"k8s.io/kubernetes/pkg/labels"
|
"k8s.io/kubernetes/pkg/labels"
|
||||||
|
"k8s.io/kubernetes/pkg/runtime/schema"
|
||||||
"k8s.io/kubernetes/pkg/util/intstr"
|
"k8s.io/kubernetes/pkg/util/intstr"
|
||||||
utilnet "k8s.io/kubernetes/pkg/util/net"
|
utilnet "k8s.io/kubernetes/pkg/util/net"
|
||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
@ -43,15 +46,15 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
smallRCSize = 5
|
smallGroupSize = 5
|
||||||
mediumRCSize = 30
|
mediumGroupSize = 30
|
||||||
bigRCSize = 250
|
bigGroupSize = 250
|
||||||
smallRCGroupName = "load-small-rc"
|
smallGroupName = "load-small"
|
||||||
mediumRCGroupName = "load-medium-rc"
|
mediumGroupName = "load-medium"
|
||||||
bigRCGroupName = "load-big-rc"
|
bigGroupName = "load-big"
|
||||||
smallRCBatchSize = 30
|
smallGroupBatchSize = 30
|
||||||
mediumRCBatchSize = 5
|
mediumGroupBatchSize = 5
|
||||||
bigRCBatchSize = 1
|
bigGroupBatchSize = 1
|
||||||
// We start RCs/Services/pods/... in different namespace in this test.
|
// We start RCs/Services/pods/... in different namespace in this test.
|
||||||
// nodeCountPerNamespace determines how many namespaces we will be using
|
// nodeCountPerNamespace determines how many namespaces we will be using
|
||||||
// depending on the number of nodes in the underlying cluster.
|
// depending on the number of nodes in the underlying cluster.
|
||||||
@ -66,7 +69,7 @@ var _ = framework.KubeDescribe("Load capacity", func() {
|
|||||||
var clientset clientset.Interface
|
var clientset clientset.Interface
|
||||||
var nodeCount int
|
var nodeCount int
|
||||||
var ns string
|
var ns string
|
||||||
var configs []*testutils.RCConfig
|
var configs []testutils.RunObjectConfig
|
||||||
|
|
||||||
// Gathers metrics before teardown
|
// Gathers metrics before teardown
|
||||||
// TODO add flag that allows to skip cleanup on failure
|
// TODO add flag that allows to skip cleanup on failure
|
||||||
@ -117,20 +120,22 @@ var _ = framework.KubeDescribe("Load capacity", func() {
|
|||||||
podsPerNode int
|
podsPerNode int
|
||||||
image string
|
image string
|
||||||
command []string
|
command []string
|
||||||
|
// What kind of resource we want to create
|
||||||
|
kind schema.GroupKind
|
||||||
}
|
}
|
||||||
|
|
||||||
loadTests := []Load{
|
loadTests := []Load{
|
||||||
// The container will consume 1 cpu and 512mb of memory.
|
// The container will consume 1 cpu and 512mb of memory.
|
||||||
{podsPerNode: 3, image: "jess/stress", command: []string{"stress", "-c", "1", "-m", "2"}},
|
{podsPerNode: 3, image: "jess/stress", command: []string{"stress", "-c", "1", "-m", "2"}, kind: api.Kind("ReplicationController")},
|
||||||
{podsPerNode: 30, image: "gcr.io/google_containers/serve_hostname:v1.4"},
|
{podsPerNode: 30, image: "gcr.io/google_containers/serve_hostname:v1.4", kind: api.Kind("ReplicationController")},
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, testArg := range loadTests {
|
for _, testArg := range loadTests {
|
||||||
feature := "ManualPerformance"
|
feature := "ManualPerformance"
|
||||||
if testArg.podsPerNode == 30 {
|
if testArg.podsPerNode == 30 && testArg.kind == api.Kind("ReplicationController") {
|
||||||
feature = "Performance"
|
feature = "Performance"
|
||||||
}
|
}
|
||||||
name := fmt.Sprintf("[Feature:%s] should be able to handle %v pods per node", feature, testArg.podsPerNode)
|
name := fmt.Sprintf("[Feature:%s] should be able to handle %v pods per node %v", feature, testArg.podsPerNode, testArg.kind)
|
||||||
itArg := testArg
|
itArg := testArg
|
||||||
|
|
||||||
It(name, func() {
|
It(name, func() {
|
||||||
@ -140,7 +145,7 @@ var _ = framework.KubeDescribe("Load capacity", func() {
|
|||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
|
|
||||||
totalPods := itArg.podsPerNode * nodeCount
|
totalPods := itArg.podsPerNode * nodeCount
|
||||||
configs = generateRCConfigs(totalPods, itArg.image, itArg.command, namespaces)
|
configs = generateConfigs(totalPods, itArg.image, itArg.command, namespaces, itArg.kind)
|
||||||
var services []*v1.Service
|
var services []*v1.Service
|
||||||
// Read the environment variable to see if we want to create services
|
// Read the environment variable to see if we want to create services
|
||||||
createServices := os.Getenv("CREATE_SERVICES")
|
createServices := os.Getenv("CREATE_SERVICES")
|
||||||
@ -173,7 +178,7 @@ var _ = framework.KubeDescribe("Load capacity", func() {
|
|||||||
// We may want to revisit it in the future.
|
// We may want to revisit it in the future.
|
||||||
framework.Logf("Starting to create ReplicationControllers...")
|
framework.Logf("Starting to create ReplicationControllers...")
|
||||||
creatingTime := time.Duration(totalPods/throughput) * time.Second
|
creatingTime := time.Duration(totalPods/throughput) * time.Second
|
||||||
createAllRC(configs, creatingTime)
|
createAllResources(configs, creatingTime)
|
||||||
By("============================================================================")
|
By("============================================================================")
|
||||||
|
|
||||||
// We would like to spread scaling replication controllers over time
|
// We would like to spread scaling replication controllers over time
|
||||||
@ -182,11 +187,11 @@ var _ = framework.KubeDescribe("Load capacity", func() {
|
|||||||
// The expected number of created/deleted pods is less than totalPods/3.
|
// The expected number of created/deleted pods is less than totalPods/3.
|
||||||
scalingTime := time.Duration(totalPods/(3*throughput)) * time.Second
|
scalingTime := time.Duration(totalPods/(3*throughput)) * time.Second
|
||||||
framework.Logf("Starting to scale ReplicationControllers first time...")
|
framework.Logf("Starting to scale ReplicationControllers first time...")
|
||||||
scaleAllRC(configs, scalingTime)
|
scaleAllResources(configs, scalingTime)
|
||||||
By("============================================================================")
|
By("============================================================================")
|
||||||
|
|
||||||
framework.Logf("Starting to scale ReplicationControllers second time...")
|
framework.Logf("Starting to scale ReplicationControllers second time...")
|
||||||
scaleAllRC(configs, scalingTime)
|
scaleAllResources(configs, scalingTime)
|
||||||
By("============================================================================")
|
By("============================================================================")
|
||||||
|
|
||||||
// Cleanup all created replication controllers.
|
// Cleanup all created replication controllers.
|
||||||
@ -194,7 +199,7 @@ var _ = framework.KubeDescribe("Load capacity", func() {
|
|||||||
// We may want to revisit it in the future.
|
// We may want to revisit it in the future.
|
||||||
deletingTime := time.Duration(totalPods/throughput) * time.Second
|
deletingTime := time.Duration(totalPods/throughput) * time.Second
|
||||||
framework.Logf("Starting to delete ReplicationControllers...")
|
framework.Logf("Starting to delete ReplicationControllers...")
|
||||||
deleteAllRC(configs, deletingTime)
|
deleteAllResources(configs, deletingTime)
|
||||||
if createServices == "true" {
|
if createServices == "true" {
|
||||||
framework.Logf("Starting to delete services...")
|
framework.Logf("Starting to delete services...")
|
||||||
for _, service := range services {
|
for _, service := range services {
|
||||||
@ -259,27 +264,27 @@ func createClients(numberOfClients int) ([]*clientset.Clientset, []*internalclie
|
|||||||
return clients, internalClients, nil
|
return clients, internalClients, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func computeRCCounts(total int) (int, int, int) {
|
func computePodCounts(total int) (int, int, int) {
|
||||||
// Small RCs owns ~0.5 of total number of pods, medium and big RCs ~0.25 each.
|
// Small RCs owns ~0.5 of total number of pods, medium and big RCs ~0.25 each.
|
||||||
// For example for 3000 pods (100 nodes, 30 pods per node) there are:
|
// For example for 3000 pods (100 nodes, 30 pods per node) there are:
|
||||||
// - 300 small RCs each 5 pods
|
// - 300 small RCs each 5 pods
|
||||||
// - 25 medium RCs each 30 pods
|
// - 25 medium RCs each 30 pods
|
||||||
// - 3 big RCs each 250 pods
|
// - 3 big RCs each 250 pods
|
||||||
bigRCCount := total / 4 / bigRCSize
|
bigRCCount := total / 4 / bigGroupSize
|
||||||
total -= bigRCCount * bigRCSize
|
total -= bigRCCount * bigGroupSize
|
||||||
mediumRCCount := total / 3 / mediumRCSize
|
mediumRCCount := total / 3 / mediumGroupSize
|
||||||
total -= mediumRCCount * mediumRCSize
|
total -= mediumRCCount * mediumGroupSize
|
||||||
smallRCCount := total / smallRCSize
|
smallRCCount := total / smallGroupSize
|
||||||
return smallRCCount, mediumRCCount, bigRCCount
|
return smallRCCount, mediumRCCount, bigRCCount
|
||||||
}
|
}
|
||||||
|
|
||||||
func generateRCConfigs(totalPods int, image string, command []string, nss []*v1.Namespace) []*testutils.RCConfig {
|
func generateConfigs(totalPods int, image string, command []string, nss []*v1.Namespace, kind schema.GroupKind) []testutils.RunObjectConfig {
|
||||||
configs := make([]*testutils.RCConfig, 0)
|
configs := make([]testutils.RunObjectConfig, 0)
|
||||||
|
|
||||||
smallRCCount, mediumRCCount, bigRCCount := computeRCCounts(totalPods)
|
smallRCCount, mediumRCCount, bigRCCount := computePodCounts(totalPods)
|
||||||
configs = append(configs, generateRCConfigsForGroup(nss, smallRCGroupName, smallRCSize, smallRCCount, image, command)...)
|
configs = append(configs, generateConfigsForGroup(nss, smallGroupName, smallGroupSize, smallRCCount, image, command, kind)...)
|
||||||
configs = append(configs, generateRCConfigsForGroup(nss, mediumRCGroupName, mediumRCSize, mediumRCCount, image, command)...)
|
configs = append(configs, generateConfigsForGroup(nss, mediumGroupName, mediumGroupSize, mediumRCCount, image, command, kind)...)
|
||||||
configs = append(configs, generateRCConfigsForGroup(nss, bigRCGroupName, bigRCSize, bigRCCount, image, command)...)
|
configs = append(configs, generateConfigsForGroup(nss, bigGroupName, bigGroupSize, bigRCCount, image, command, kind)...)
|
||||||
|
|
||||||
// Create a number of clients to better simulate real usecase
|
// Create a number of clients to better simulate real usecase
|
||||||
// where not everyone is using exactly the same client.
|
// where not everyone is using exactly the same client.
|
||||||
@ -288,18 +293,18 @@ func generateRCConfigs(totalPods int, image string, command []string, nss []*v1.
|
|||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
|
|
||||||
for i := 0; i < len(configs); i++ {
|
for i := 0; i < len(configs); i++ {
|
||||||
configs[i].Client = clients[i%len(clients)]
|
configs[i].SetClient(clients[i%len(clients)])
|
||||||
configs[i].InternalClient = internalClients[i%len(internalClients)]
|
configs[i].SetInternalClient(internalClients[i%len(internalClients)])
|
||||||
}
|
}
|
||||||
|
|
||||||
return configs
|
return configs
|
||||||
}
|
}
|
||||||
|
|
||||||
func generateRCConfigsForGroup(
|
func generateConfigsForGroup(
|
||||||
nss []*v1.Namespace, groupName string, size, count int, image string, command []string) []*testutils.RCConfig {
|
nss []*v1.Namespace, groupName string, size, count int, image string, command []string, kind schema.GroupKind) []testutils.RunObjectConfig {
|
||||||
configs := make([]*testutils.RCConfig, 0, count)
|
configs := make([]testutils.RunObjectConfig, 0, count)
|
||||||
for i := 1; i <= count; i++ {
|
for i := 1; i <= count; i++ {
|
||||||
config := &testutils.RCConfig{
|
baseConfig := &testutils.RCConfig{
|
||||||
Client: nil, // this will be overwritten later
|
Client: nil, // this will be overwritten later
|
||||||
InternalClient: nil, // this will be overwritten later
|
InternalClient: nil, // this will be overwritten later
|
||||||
Name: groupName + "-" + strconv.Itoa(i),
|
Name: groupName + "-" + strconv.Itoa(i),
|
||||||
@ -311,20 +316,31 @@ func generateRCConfigsForGroup(
|
|||||||
CpuRequest: 10, // 0.01 core
|
CpuRequest: 10, // 0.01 core
|
||||||
MemRequest: 26214400, // 25MB
|
MemRequest: 26214400, // 25MB
|
||||||
}
|
}
|
||||||
|
var config testutils.RunObjectConfig
|
||||||
|
switch kind {
|
||||||
|
case api.Kind("ReplicationController"):
|
||||||
|
config = baseConfig
|
||||||
|
case extensions.Kind("ReplicaSet"):
|
||||||
|
config = &testutils.ReplicaSetConfig{RCConfig: *baseConfig}
|
||||||
|
case extensions.Kind("Deployment"):
|
||||||
|
config = &testutils.DeploymentConfig{RCConfig: *baseConfig}
|
||||||
|
default:
|
||||||
|
framework.Failf("Unsupported kind for config creation: %v", kind)
|
||||||
|
}
|
||||||
configs = append(configs, config)
|
configs = append(configs, config)
|
||||||
}
|
}
|
||||||
return configs
|
return configs
|
||||||
}
|
}
|
||||||
|
|
||||||
func generateServicesForConfigs(configs []*testutils.RCConfig) []*v1.Service {
|
func generateServicesForConfigs(configs []testutils.RunObjectConfig) []*v1.Service {
|
||||||
services := make([]*v1.Service, 0, len(configs))
|
services := make([]*v1.Service, 0, len(configs))
|
||||||
for _, config := range configs {
|
for _, config := range configs {
|
||||||
serviceName := config.Name + "-svc"
|
serviceName := config.GetName() + "-svc"
|
||||||
labels := map[string]string{"name": config.Name}
|
labels := map[string]string{"name": config.GetName()}
|
||||||
service := &v1.Service{
|
service := &v1.Service{
|
||||||
ObjectMeta: v1.ObjectMeta{
|
ObjectMeta: v1.ObjectMeta{
|
||||||
Name: serviceName,
|
Name: serviceName,
|
||||||
Namespace: config.Namespace,
|
Namespace: config.GetNamespace(),
|
||||||
},
|
},
|
||||||
Spec: v1.ServiceSpec{
|
Spec: v1.ServiceSpec{
|
||||||
Selector: labels,
|
Selector: labels,
|
||||||
@ -343,69 +359,75 @@ func sleepUpTo(d time.Duration) {
|
|||||||
time.Sleep(time.Duration(rand.Int63n(d.Nanoseconds())))
|
time.Sleep(time.Duration(rand.Int63n(d.Nanoseconds())))
|
||||||
}
|
}
|
||||||
|
|
||||||
func createAllRC(configs []*testutils.RCConfig, creatingTime time.Duration) {
|
func createAllResources(configs []testutils.RunObjectConfig, creatingTime time.Duration) {
|
||||||
var wg sync.WaitGroup
|
var wg sync.WaitGroup
|
||||||
wg.Add(len(configs))
|
wg.Add(len(configs))
|
||||||
for _, config := range configs {
|
for _, config := range configs {
|
||||||
go createRC(&wg, config, creatingTime)
|
go createResource(&wg, config, creatingTime)
|
||||||
}
|
}
|
||||||
wg.Wait()
|
wg.Wait()
|
||||||
}
|
}
|
||||||
|
|
||||||
func createRC(wg *sync.WaitGroup, config *testutils.RCConfig, creatingTime time.Duration) {
|
func createResource(wg *sync.WaitGroup, config testutils.RunObjectConfig, creatingTime time.Duration) {
|
||||||
defer GinkgoRecover()
|
defer GinkgoRecover()
|
||||||
defer wg.Done()
|
defer wg.Done()
|
||||||
|
|
||||||
sleepUpTo(creatingTime)
|
sleepUpTo(creatingTime)
|
||||||
framework.ExpectNoError(framework.RunRC(*config), fmt.Sprintf("creating rc %s", config.Name))
|
framework.ExpectNoError(config.Run(), fmt.Sprintf("creating %v %s", config.GetKind(), config.GetName()))
|
||||||
}
|
}
|
||||||
|
|
||||||
func scaleAllRC(configs []*testutils.RCConfig, scalingTime time.Duration) {
|
func scaleAllResources(configs []testutils.RunObjectConfig, scalingTime time.Duration) {
|
||||||
var wg sync.WaitGroup
|
var wg sync.WaitGroup
|
||||||
wg.Add(len(configs))
|
wg.Add(len(configs))
|
||||||
for _, config := range configs {
|
for _, config := range configs {
|
||||||
go scaleRC(&wg, config, scalingTime)
|
go scaleResource(&wg, config, scalingTime)
|
||||||
}
|
}
|
||||||
wg.Wait()
|
wg.Wait()
|
||||||
}
|
}
|
||||||
|
|
||||||
// Scales RC to a random size within [0.5*size, 1.5*size] and lists all the pods afterwards.
|
// Scales RC to a random size within [0.5*size, 1.5*size] and lists all the pods afterwards.
|
||||||
// Scaling happens always based on original size, not the current size.
|
// Scaling happens always based on original size, not the current size.
|
||||||
func scaleRC(wg *sync.WaitGroup, config *testutils.RCConfig, scalingTime time.Duration) {
|
func scaleResource(wg *sync.WaitGroup, config testutils.RunObjectConfig, scalingTime time.Duration) {
|
||||||
defer GinkgoRecover()
|
defer GinkgoRecover()
|
||||||
defer wg.Done()
|
defer wg.Done()
|
||||||
|
|
||||||
sleepUpTo(scalingTime)
|
sleepUpTo(scalingTime)
|
||||||
newSize := uint(rand.Intn(config.Replicas) + config.Replicas/2)
|
newSize := uint(rand.Intn(config.GetReplicas()) + config.GetReplicas()/2)
|
||||||
framework.ExpectNoError(framework.ScaleRC(config.Client, config.InternalClient, config.Namespace, config.Name, newSize, true),
|
framework.ExpectNoError(framework.ScaleResource(
|
||||||
fmt.Sprintf("scaling rc %s for the first time", config.Name))
|
config.GetClient(), config.GetInternalClient(), config.GetNamespace(), config.GetName(), newSize, true, config.GetKind()),
|
||||||
selector := labels.SelectorFromSet(labels.Set(map[string]string{"name": config.Name}))
|
fmt.Sprintf("scaling rc %s for the first time", config.GetName()))
|
||||||
|
|
||||||
|
selector := labels.SelectorFromSet(labels.Set(map[string]string{"name": config.GetName()}))
|
||||||
options := v1.ListOptions{
|
options := v1.ListOptions{
|
||||||
LabelSelector: selector.String(),
|
LabelSelector: selector.String(),
|
||||||
ResourceVersion: "0",
|
ResourceVersion: "0",
|
||||||
}
|
}
|
||||||
_, err := config.Client.Core().Pods(config.Namespace).List(options)
|
_, err := config.GetClient().Core().Pods(config.GetNamespace()).List(options)
|
||||||
framework.ExpectNoError(err, fmt.Sprintf("listing pods from rc %v", config.Name))
|
framework.ExpectNoError(err, fmt.Sprintf("listing pods from rc %v", config.GetName()))
|
||||||
}
|
}
|
||||||
|
|
||||||
func deleteAllRC(configs []*testutils.RCConfig, deletingTime time.Duration) {
|
func deleteAllResources(configs []testutils.RunObjectConfig, deletingTime time.Duration) {
|
||||||
var wg sync.WaitGroup
|
var wg sync.WaitGroup
|
||||||
wg.Add(len(configs))
|
wg.Add(len(configs))
|
||||||
for _, config := range configs {
|
for _, config := range configs {
|
||||||
go deleteRC(&wg, config, deletingTime)
|
go deleteResource(&wg, config, deletingTime)
|
||||||
}
|
}
|
||||||
wg.Wait()
|
wg.Wait()
|
||||||
}
|
}
|
||||||
|
|
||||||
func deleteRC(wg *sync.WaitGroup, config *testutils.RCConfig, deletingTime time.Duration) {
|
func deleteResource(wg *sync.WaitGroup, config testutils.RunObjectConfig, deletingTime time.Duration) {
|
||||||
defer GinkgoRecover()
|
defer GinkgoRecover()
|
||||||
defer wg.Done()
|
defer wg.Done()
|
||||||
|
|
||||||
sleepUpTo(deletingTime)
|
sleepUpTo(deletingTime)
|
||||||
if framework.TestContext.GarbageCollectorEnabled {
|
if framework.TestContext.GarbageCollectorEnabled && config.GetKind() != extensions.Kind("Deployment") {
|
||||||
framework.ExpectNoError(framework.DeleteRCAndWaitForGC(config.Client, config.Namespace, config.Name), fmt.Sprintf("deleting rc %s", config.Name))
|
framework.ExpectNoError(framework.DeleteResourceAndWaitForGC(
|
||||||
|
config.GetClient(), config.GetKind(), config.GetNamespace(), config.GetName()),
|
||||||
|
fmt.Sprintf("deleting %v %s", config.GetKind(), config.GetName()))
|
||||||
} else {
|
} else {
|
||||||
framework.ExpectNoError(framework.DeleteRCAndPods(config.Client, config.InternalClient, config.Namespace, config.Name), fmt.Sprintf("deleting rc %s", config.Name))
|
framework.ExpectNoError(framework.DeleteResourceAndPods(
|
||||||
|
config.GetClient(), config.GetInternalClient(), config.GetKind(), config.GetNamespace(), config.GetName()),
|
||||||
|
fmt.Sprintf("deleting %v %s", config.GetKind(), config.GetName()))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -104,7 +104,7 @@ func (h *haproxyControllerTester) start(namespace string) (err error) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if err = framework.WaitForRCPodsRunning(h.client, namespace, rc.Name); err != nil {
|
if err = framework.WaitForControlledPodsRunning(h.client, namespace, rc.Name, api.Kind("ReplicationController")); err != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
h.rcName = rc.Name
|
h.rcName = rc.Name
|
||||||
@ -171,7 +171,7 @@ func (s *ingManager) start(namespace string) (err error) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if err = framework.WaitForRCPodsRunning(s.client, rc.Namespace, rc.Name); err != nil {
|
if err = framework.WaitForControlledPodsRunning(s.client, rc.Namespace, rc.Name, api.Kind("ReplicationController")); err != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -25,6 +25,7 @@ go_library(
|
|||||||
"//pkg/api/errors:go_default_library",
|
"//pkg/api/errors:go_default_library",
|
||||||
"//pkg/api/resource:go_default_library",
|
"//pkg/api/resource:go_default_library",
|
||||||
"//pkg/api/v1:go_default_library",
|
"//pkg/api/v1:go_default_library",
|
||||||
|
"//pkg/apis/extensions:go_default_library",
|
||||||
"//pkg/apis/extensions/v1beta1:go_default_library",
|
"//pkg/apis/extensions/v1beta1:go_default_library",
|
||||||
"//pkg/apis/meta/v1:go_default_library",
|
"//pkg/apis/meta/v1:go_default_library",
|
||||||
"//pkg/client/cache:go_default_library",
|
"//pkg/client/cache:go_default_library",
|
||||||
@ -33,6 +34,7 @@ go_library(
|
|||||||
"//pkg/fields:go_default_library",
|
"//pkg/fields:go_default_library",
|
||||||
"//pkg/labels:go_default_library",
|
"//pkg/labels:go_default_library",
|
||||||
"//pkg/runtime:go_default_library",
|
"//pkg/runtime:go_default_library",
|
||||||
|
"//pkg/runtime/schema:go_default_library",
|
||||||
"//pkg/util/sets:go_default_library",
|
"//pkg/util/sets:go_default_library",
|
||||||
"//pkg/util/uuid:go_default_library",
|
"//pkg/util/uuid:go_default_library",
|
||||||
"//pkg/util/workqueue:go_default_library",
|
"//pkg/util/workqueue:go_default_library",
|
||||||
|
@ -27,12 +27,14 @@ import (
|
|||||||
apierrs "k8s.io/kubernetes/pkg/api/errors"
|
apierrs "k8s.io/kubernetes/pkg/api/errors"
|
||||||
"k8s.io/kubernetes/pkg/api/resource"
|
"k8s.io/kubernetes/pkg/api/resource"
|
||||||
"k8s.io/kubernetes/pkg/api/v1"
|
"k8s.io/kubernetes/pkg/api/v1"
|
||||||
|
extensionsinternal "k8s.io/kubernetes/pkg/apis/extensions"
|
||||||
extensions "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
|
extensions "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
|
||||||
metav1 "k8s.io/kubernetes/pkg/apis/meta/v1"
|
metav1 "k8s.io/kubernetes/pkg/apis/meta/v1"
|
||||||
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
|
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
|
||||||
"k8s.io/kubernetes/pkg/fields"
|
"k8s.io/kubernetes/pkg/fields"
|
||||||
"k8s.io/kubernetes/pkg/labels"
|
"k8s.io/kubernetes/pkg/labels"
|
||||||
|
"k8s.io/kubernetes/pkg/runtime/schema"
|
||||||
"k8s.io/kubernetes/pkg/util/sets"
|
"k8s.io/kubernetes/pkg/util/sets"
|
||||||
"k8s.io/kubernetes/pkg/util/uuid"
|
"k8s.io/kubernetes/pkg/util/uuid"
|
||||||
"k8s.io/kubernetes/pkg/util/workqueue"
|
"k8s.io/kubernetes/pkg/util/workqueue"
|
||||||
@ -45,6 +47,18 @@ const (
|
|||||||
nonExist = "NonExist"
|
nonExist = "NonExist"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
type RunObjectConfig interface {
|
||||||
|
Run() error
|
||||||
|
GetName() string
|
||||||
|
GetNamespace() string
|
||||||
|
GetKind() schema.GroupKind
|
||||||
|
GetClient() clientset.Interface
|
||||||
|
GetInternalClient() internalclientset.Interface
|
||||||
|
SetClient(clientset.Interface)
|
||||||
|
SetInternalClient(internalclientset.Interface)
|
||||||
|
GetReplicas() int
|
||||||
|
}
|
||||||
|
|
||||||
type RCConfig struct {
|
type RCConfig struct {
|
||||||
Client clientset.Interface
|
Client clientset.Interface
|
||||||
InternalClient internalclientset.Interface
|
InternalClient internalclientset.Interface
|
||||||
@ -193,6 +207,14 @@ func RunDeployment(config DeploymentConfig) error {
|
|||||||
return config.start()
|
return config.start()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (config *DeploymentConfig) Run() error {
|
||||||
|
return RunDeployment(*config)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (config *DeploymentConfig) GetKind() schema.GroupKind {
|
||||||
|
return extensionsinternal.Kind("Deployment")
|
||||||
|
}
|
||||||
|
|
||||||
func (config *DeploymentConfig) create() error {
|
func (config *DeploymentConfig) create() error {
|
||||||
deployment := &extensions.Deployment{
|
deployment := &extensions.Deployment{
|
||||||
ObjectMeta: v1.ObjectMeta{
|
ObjectMeta: v1.ObjectMeta{
|
||||||
@ -245,6 +267,14 @@ func RunReplicaSet(config ReplicaSetConfig) error {
|
|||||||
return config.start()
|
return config.start()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (config *ReplicaSetConfig) Run() error {
|
||||||
|
return RunReplicaSet(*config)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (config *ReplicaSetConfig) GetKind() schema.GroupKind {
|
||||||
|
return extensionsinternal.Kind("ReplicaSet")
|
||||||
|
}
|
||||||
|
|
||||||
func (config *ReplicaSetConfig) create() error {
|
func (config *ReplicaSetConfig) create() error {
|
||||||
rs := &extensions.ReplicaSet{
|
rs := &extensions.ReplicaSet{
|
||||||
ObjectMeta: v1.ObjectMeta{
|
ObjectMeta: v1.ObjectMeta{
|
||||||
@ -297,6 +327,42 @@ func RunRC(config RCConfig) error {
|
|||||||
return config.start()
|
return config.start()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (config *RCConfig) Run() error {
|
||||||
|
return RunRC(*config)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (config *RCConfig) GetName() string {
|
||||||
|
return config.Name
|
||||||
|
}
|
||||||
|
|
||||||
|
func (config *RCConfig) GetNamespace() string {
|
||||||
|
return config.Namespace
|
||||||
|
}
|
||||||
|
|
||||||
|
func (config *RCConfig) GetKind() schema.GroupKind {
|
||||||
|
return api.Kind("ReplicationController")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (config *RCConfig) GetClient() clientset.Interface {
|
||||||
|
return config.Client
|
||||||
|
}
|
||||||
|
|
||||||
|
func (config *RCConfig) GetInternalClient() internalclientset.Interface {
|
||||||
|
return config.InternalClient
|
||||||
|
}
|
||||||
|
|
||||||
|
func (config *RCConfig) SetClient(c clientset.Interface) {
|
||||||
|
config.Client = c
|
||||||
|
}
|
||||||
|
|
||||||
|
func (config *RCConfig) SetInternalClient(c internalclientset.Interface) {
|
||||||
|
config.InternalClient = c
|
||||||
|
}
|
||||||
|
|
||||||
|
func (config *RCConfig) GetReplicas() int {
|
||||||
|
return config.Replicas
|
||||||
|
}
|
||||||
|
|
||||||
func (config *RCConfig) create() error {
|
func (config *RCConfig) create() error {
|
||||||
dnsDefault := v1.DNSDefault
|
dnsDefault := v1.DNSDefault
|
||||||
if config.DNSPolicy == nil {
|
if config.DNSPolicy == nil {
|
||||||
|
Loading…
Reference in New Issue
Block a user