Merge pull request #8596 from andronat/fix_8319

Kubectl command renaming (run-container to run and resize to scale)
This commit is contained in:
Tim Hockin
2015-05-27 15:37:54 -07:00
51 changed files with 478 additions and 462 deletions

View File

@@ -88,10 +88,10 @@ var _ = Describe("kubectl", func() {
runKubectl("create", "-f", nautilusPath, fmt.Sprintf("--namespace=%v", ns))
validateController(c, nautilusImage, 2, "update-demo", updateDemoSelector, getUDData("nautilus.jpg", ns), ns)
By("scaling down the replication controller")
runKubectl("resize", "rc", "update-demo-nautilus", "--replicas=1", fmt.Sprintf("--namespace=%v", ns))
runKubectl("scale", "rc", "update-demo-nautilus", "--replicas=1", fmt.Sprintf("--namespace=%v", ns))
validateController(c, nautilusImage, 1, "update-demo", updateDemoSelector, getUDData("nautilus.jpg", ns), ns)
By("scaling up the replication controller")
runKubectl("resize", "rc", "update-demo-nautilus", "--replicas=2", fmt.Sprintf("--namespace=%v", ns))
runKubectl("scale", "rc", "update-demo-nautilus", "--replicas=2", fmt.Sprintf("--namespace=%v", ns))
validateController(c, nautilusImage, 2, "update-demo", updateDemoSelector, getUDData("nautilus.jpg", ns), ns)
})

View File

@@ -112,20 +112,20 @@ func computeRCCounts(total int) (int, int, int) {
return smallRCCount, mediumRCCount, bigRCCount
}
// The function creates a RC and then every few second resize it and with 0.1 probability deletes it.
// The function creates a RC and then every few second scale it and with 0.1 probability deletes it.
func playWithRC(c *client.Client, wg *sync.WaitGroup, ns, name string, size int) {
defer GinkgoRecover()
defer wg.Done()
rcExist := false
// Once every 1-2 minutes perform resize of RC.
// Once every 1-2 minutes perform scale of RC.
for start := time.Now(); time.Since(start) < simulationTime; time.Sleep(time.Duration(60+rand.Intn(60)) * time.Second) {
if !rcExist {
expectNoError(RunRC(c, name, ns, image, size), fmt.Sprintf("creating rc %s in namespace %s", name, ns))
rcExist = true
}
// Resize RC to a random size between 0.5x and 1.5x of the original size.
// Scale RC to a random size between 0.5x and 1.5x of the original size.
newSize := uint(rand.Intn(size+1) + size/2)
expectNoError(ResizeRC(c, ns, name, newSize), fmt.Sprintf("resizing rc %s in namespace %s", name, ns))
expectNoError(ScaleRC(c, ns, name, newSize), fmt.Sprintf("scaling rc %s in namespace %s", name, ns))
// With probability 0.1 remove this RC.
if rand.Intn(10) == 0 {
expectNoError(DeleteRC(c, ns, name), fmt.Sprintf("deleting rc %s in namespace %s", name, ns))

View File

@@ -792,14 +792,14 @@ func RunRC(c *client.Client, name string, ns, image string, replicas int) error
return nil
}
func ResizeRC(c *client.Client, ns, name string, size uint) error {
By(fmt.Sprintf("Resizing replication controller %s in namespace %s to %d", name, ns, size))
resizer, err := kubectl.ResizerFor("ReplicationController", kubectl.NewResizerClient(c))
func ScaleRC(c *client.Client, ns, name string, size uint) error {
By(fmt.Sprintf("Scaling replication controller %s in namespace %s to %d", name, ns, size))
scaler, err := kubectl.ScalerFor("ReplicationController", kubectl.NewScalerClient(c))
if err != nil {
return err
}
waitForReplicas := kubectl.NewRetryParams(5*time.Second, 5*time.Minute)
if err = resizer.Resize(ns, name, size, nil, nil, waitForReplicas); err != nil {
if err = scaler.Scale(ns, name, size, nil, nil, waitForReplicas); err != nil {
return err
}
return waitForRCPodsRunning(c, ns, name)

View File

@@ -42,7 +42,7 @@ import (
)
const (
// Timeout used in benchmarks, to eg: resize an rc
// Timeout used in benchmarks, to eg: scale an rc
DefaultTimeout = 30 * time.Minute
// Rc manifest used to create pods for benchmarks.
@@ -191,26 +191,26 @@ func StopRC(rc *api.ReplicationController, restClient *client.Client) error {
return nil
}
// ResizeRC resizes the given rc to the given replicas.
func ResizeRC(name, ns string, replicas int, restClient *client.Client) (*api.ReplicationController, error) {
resizer, err := kubectl.ResizerFor("ReplicationController", kubectl.NewResizerClient(restClient))
// ScaleRC scales the given rc to the given replicas.
func ScaleRC(name, ns string, replicas int, restClient *client.Client) (*api.ReplicationController, error) {
scaler, err := kubectl.ScalerFor("ReplicationController", kubectl.NewScalerClient(restClient))
if err != nil {
return nil, err
}
retry := &kubectl.RetryParams{50 * time.Millisecond, DefaultTimeout}
waitForReplicas := &kubectl.RetryParams{50 * time.Millisecond, DefaultTimeout}
err = resizer.Resize(ns, name, uint(replicas), nil, retry, waitForReplicas)
err = scaler.Scale(ns, name, uint(replicas), nil, retry, waitForReplicas)
if err != nil {
return nil, err
}
resized, err := restClient.ReplicationControllers(ns).Get(name)
scaled, err := restClient.ReplicationControllers(ns).Get(name)
if err != nil {
return nil, err
}
return resized, nil
return scaled, nil
}
// StartRC creates given rc if it doesn't already exist, then updates it via kubectl's resizer.
// StartRC creates given rc if it doesn't already exist, then updates it via kubectl's scaler.
func StartRC(controller *api.ReplicationController, restClient *client.Client) (*api.ReplicationController, error) {
created, err := restClient.ReplicationControllers(controller.Namespace).Get(controller.Name)
if err != nil {
@@ -221,11 +221,11 @@ func StartRC(controller *api.ReplicationController, restClient *client.Client) (
}
}
// If we just created an rc, wait till it creates its replicas.
return ResizeRC(created.Name, created.Namespace, controller.Spec.Replicas, restClient)
return ScaleRC(created.Name, created.Namespace, controller.Spec.Replicas, restClient)
}
// StartPods check for numPods in TestNS. If they exist, it no-ops, otherwise it starts up
// a temp rc, resizes it to match numPods, then deletes the rc leaving behind the pods.
// a temp rc, scales it to match numPods, then deletes the rc leaving behind the pods.
func StartPods(numPods int, host string, restClient *client.Client) error {
start := time.Now()
defer func() {