mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-08-04 18:00:08 +00:00
Merge pull request #7235 from rrati/rc-functions-refactor-7234
Move RunRC, DeleteRC, and ListPods into the utility module and refactor
This commit is contained in:
commit
35e9ad1747
@ -30,175 +30,12 @@ import (
|
|||||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/labels"
|
"github.com/GoogleCloudPlatform/kubernetes/pkg/labels"
|
||||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/runtime"
|
"github.com/GoogleCloudPlatform/kubernetes/pkg/runtime"
|
||||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/util"
|
"github.com/GoogleCloudPlatform/kubernetes/pkg/util"
|
||||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/util/wait"
|
|
||||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/watch"
|
"github.com/GoogleCloudPlatform/kubernetes/pkg/watch"
|
||||||
|
|
||||||
"github.com/golang/glog"
|
|
||||||
. "github.com/onsi/ginkgo"
|
. "github.com/onsi/ginkgo"
|
||||||
. "github.com/onsi/gomega"
|
. "github.com/onsi/gomega"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Convenient wrapper around listing pods supporting retries.
|
|
||||||
func listPods(c *client.Client, namespace string, label labels.Selector, field fields.Selector) (*api.PodList, error) {
|
|
||||||
maxRetries := 4
|
|
||||||
pods, err := c.Pods(namespace).List(label, field)
|
|
||||||
for i := 0; i < maxRetries; i++ {
|
|
||||||
if err == nil {
|
|
||||||
return pods, nil
|
|
||||||
}
|
|
||||||
pods, err = c.Pods(namespace).List(label, field)
|
|
||||||
}
|
|
||||||
return pods, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Delete a Replication Controller and all pods it spawned
|
|
||||||
func DeleteRC(c *client.Client, ns, name string) error {
|
|
||||||
rc, err := c.ReplicationControllers(ns).Get(name)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("Failed to find replication controller %s in namespace %s: %v", name, ns, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
rc.Spec.Replicas = 0
|
|
||||||
|
|
||||||
if _, err := c.ReplicationControllers(ns).Update(rc); err != nil {
|
|
||||||
return fmt.Errorf("Failed to resize replication controller %s to zero: %v", name, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Wait up to 20 minutes until all replicas are killed.
|
|
||||||
endTime := time.Now().Add(time.Minute * 20)
|
|
||||||
for {
|
|
||||||
if time.Now().After(endTime) {
|
|
||||||
return fmt.Errorf("Timeout while waiting for replication controller %s replicas to 0", name)
|
|
||||||
}
|
|
||||||
remainingTime := endTime.Sub(time.Now())
|
|
||||||
err := wait.Poll(time.Second, remainingTime, client.ControllerHasDesiredReplicas(c, rc))
|
|
||||||
if err != nil {
|
|
||||||
glog.Errorf("Error while waiting for replication controller %s replicas to read 0: %v", name, err)
|
|
||||||
} else {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Delete the replication controller.
|
|
||||||
if err := c.ReplicationControllers(ns).Delete(name); err != nil {
|
|
||||||
return fmt.Errorf("Failed to delete replication controller %s: %v", name, err)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Launch a Replication Controller and wait for all pods it spawns
|
|
||||||
// to become running. The controller will need to be cleaned up external
|
|
||||||
// to this method
|
|
||||||
func RunRC(c *client.Client, name string, ns, image string, replicas int) {
|
|
||||||
defer GinkgoRecover()
|
|
||||||
|
|
||||||
var last int
|
|
||||||
current := 0
|
|
||||||
same := 0
|
|
||||||
|
|
||||||
By(fmt.Sprintf("Creating replication controller %s", name))
|
|
||||||
_, err := c.ReplicationControllers(ns).Create(&api.ReplicationController{
|
|
||||||
ObjectMeta: api.ObjectMeta{
|
|
||||||
Name: name,
|
|
||||||
},
|
|
||||||
Spec: api.ReplicationControllerSpec{
|
|
||||||
Replicas: replicas,
|
|
||||||
Selector: map[string]string{
|
|
||||||
"name": name,
|
|
||||||
},
|
|
||||||
Template: &api.PodTemplateSpec{
|
|
||||||
ObjectMeta: api.ObjectMeta{
|
|
||||||
Labels: map[string]string{"name": name},
|
|
||||||
},
|
|
||||||
Spec: api.PodSpec{
|
|
||||||
Containers: []api.Container{
|
|
||||||
{
|
|
||||||
Name: name,
|
|
||||||
Image: image,
|
|
||||||
Ports: []api.ContainerPort{{ContainerPort: 80}},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
})
|
|
||||||
Expect(err).NotTo(HaveOccurred())
|
|
||||||
|
|
||||||
By(fmt.Sprintf("Making sure all %d replicas exist", replicas))
|
|
||||||
label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name}))
|
|
||||||
pods, err := listPods(c, ns, label, fields.Everything())
|
|
||||||
Expect(err).NotTo(HaveOccurred())
|
|
||||||
current = len(pods.Items)
|
|
||||||
failCount := 5
|
|
||||||
for same < failCount && current < replicas {
|
|
||||||
glog.Infof("Controller %s: Found %d pods out of %d", name, current, replicas)
|
|
||||||
if last < current {
|
|
||||||
same = 0
|
|
||||||
} else if last == current {
|
|
||||||
same++
|
|
||||||
} else if current < last {
|
|
||||||
Failf("Controller %s: Number of submitted pods dropped from %d to %d", name, last, current)
|
|
||||||
}
|
|
||||||
|
|
||||||
if same >= failCount {
|
|
||||||
glog.Infof("No pods submitted for the last %d checks", failCount)
|
|
||||||
}
|
|
||||||
|
|
||||||
last = current
|
|
||||||
time.Sleep(5 * time.Second)
|
|
||||||
pods, err = listPods(c, ns, label, fields.Everything())
|
|
||||||
Expect(err).NotTo(HaveOccurred())
|
|
||||||
current = len(pods.Items)
|
|
||||||
}
|
|
||||||
Expect(current).To(Equal(replicas))
|
|
||||||
glog.Infof("Controller %s: Found %d pods out of %d", name, current, replicas)
|
|
||||||
|
|
||||||
By("Waiting for each pod to be running")
|
|
||||||
same = 0
|
|
||||||
last = 0
|
|
||||||
failCount = 10
|
|
||||||
current = 0
|
|
||||||
for same < failCount && current < replicas {
|
|
||||||
current = 0
|
|
||||||
waiting := 0
|
|
||||||
pending := 0
|
|
||||||
unknown := 0
|
|
||||||
time.Sleep(10 * time.Second)
|
|
||||||
|
|
||||||
currentPods, listErr := listPods(c, ns, label, fields.Everything())
|
|
||||||
Expect(listErr).NotTo(HaveOccurred())
|
|
||||||
if len(currentPods.Items) != len(pods.Items) {
|
|
||||||
Failf("Number of reported pods changed: %d vs %d", len(currentPods.Items), len(pods.Items))
|
|
||||||
}
|
|
||||||
for _, p := range currentPods.Items {
|
|
||||||
if p.Status.Phase == api.PodRunning {
|
|
||||||
current++
|
|
||||||
} else if p.Status.Phase == api.PodPending {
|
|
||||||
if p.Spec.Host == "" {
|
|
||||||
waiting++
|
|
||||||
} else {
|
|
||||||
pending++
|
|
||||||
}
|
|
||||||
} else if p.Status.Phase == api.PodUnknown {
|
|
||||||
unknown++
|
|
||||||
}
|
|
||||||
}
|
|
||||||
glog.Infof("Pod States: %d running, %d pending, %d waiting, %d unknown ", current, pending, waiting, unknown)
|
|
||||||
if last < current {
|
|
||||||
same = 0
|
|
||||||
} else if last == current {
|
|
||||||
same++
|
|
||||||
} else if current < last {
|
|
||||||
Failf("Number of running pods dropped from %d to %d", last, current)
|
|
||||||
}
|
|
||||||
if same >= failCount {
|
|
||||||
glog.Infof("No pods started for the last %d checks", failCount)
|
|
||||||
}
|
|
||||||
last = current
|
|
||||||
}
|
|
||||||
Expect(current).To(Equal(replicas))
|
|
||||||
}
|
|
||||||
|
|
||||||
// This test suite can take a long time to run, so by default it is added to
|
// This test suite can take a long time to run, so by default it is added to
|
||||||
// the ginkgo.skip list (see driver.go).
|
// the ginkgo.skip list (see driver.go).
|
||||||
// To run this suite you must explicitly ask for it by setting the
|
// To run this suite you must explicitly ask for it by setting the
|
||||||
@ -296,7 +133,7 @@ var _ = Describe("Density", func() {
|
|||||||
go controller.Run(stop)
|
go controller.Run(stop)
|
||||||
|
|
||||||
// Start the replication controller
|
// Start the replication controller
|
||||||
RunRC(c, RCName, ns, "gcr.io/google_containers/pause:go", totalPods)
|
expectNoError(RunRC(c, RCName, ns, "gcr.io/google_containers/pause:go", totalPods))
|
||||||
|
|
||||||
By("Waiting for all events to be recorded")
|
By("Waiting for all events to be recorded")
|
||||||
last := -1
|
last := -1
|
||||||
@ -359,9 +196,9 @@ var _ = Describe("Density", func() {
|
|||||||
for i := 0; i < itArg.rcsPerThread; i++ {
|
for i := 0; i < itArg.rcsPerThread; i++ {
|
||||||
name := "my-short-lived-pod" + string(util.NewUUID())
|
name := "my-short-lived-pod" + string(util.NewUUID())
|
||||||
n := itArg.podsPerMinion * minionCount
|
n := itArg.podsPerMinion * minionCount
|
||||||
RunRC(c, name, ns, "gcr.io/google_containers/pause:go", n)
|
expectNoError(RunRC(c, name, ns, "gcr.io/google_containers/pause:go", n))
|
||||||
podsLaunched += n
|
podsLaunched += n
|
||||||
glog.Info("Launched %v pods so far...", podsLaunched)
|
Logf("Launched %v pods so far...", podsLaunched)
|
||||||
err := DeleteRC(c, ns, name)
|
err := DeleteRC(c, ns, name)
|
||||||
expectNoError(err)
|
expectNoError(err)
|
||||||
}
|
}
|
||||||
@ -369,7 +206,7 @@ var _ = Describe("Density", func() {
|
|||||||
}
|
}
|
||||||
// Wait for all the pods from all the RC's to return.
|
// Wait for all the pods from all the RC's to return.
|
||||||
wg.Wait()
|
wg.Wait()
|
||||||
glog.Info("%v pods out of %v launched", podsLaunched, itArg.totalPods)
|
Logf("%v pods out of %v launched", podsLaunched, itArg.totalPods)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
175
test/e2e/util.go
175
test/e2e/util.go
@ -30,6 +30,9 @@ import (
|
|||||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/client"
|
"github.com/GoogleCloudPlatform/kubernetes/pkg/client"
|
||||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/client/clientcmd"
|
"github.com/GoogleCloudPlatform/kubernetes/pkg/client/clientcmd"
|
||||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/clientauth"
|
"github.com/GoogleCloudPlatform/kubernetes/pkg/clientauth"
|
||||||
|
"github.com/GoogleCloudPlatform/kubernetes/pkg/fields"
|
||||||
|
"github.com/GoogleCloudPlatform/kubernetes/pkg/labels"
|
||||||
|
"github.com/GoogleCloudPlatform/kubernetes/pkg/util/wait"
|
||||||
|
|
||||||
. "github.com/onsi/ginkgo"
|
. "github.com/onsi/ginkgo"
|
||||||
. "github.com/onsi/gomega"
|
. "github.com/onsi/gomega"
|
||||||
@ -401,3 +404,175 @@ func testContainerOutputInNamespace(ns, scenarioName string, c *client.Client, p
|
|||||||
Expect(string(logs)).To(ContainSubstring(m), "%q in container output", m)
|
Expect(string(logs)).To(ContainSubstring(m), "%q in container output", m)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Delete a Replication Controller and all pods it spawned
|
||||||
|
func DeleteRC(c *client.Client, ns, name string) error {
|
||||||
|
rc, err := c.ReplicationControllers(ns).Get(name)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Failed to find replication controller %s in namespace %s: %v", name, ns, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
rc.Spec.Replicas = 0
|
||||||
|
|
||||||
|
if _, err := c.ReplicationControllers(ns).Update(rc); err != nil {
|
||||||
|
return fmt.Errorf("Failed to resize replication controller %s to zero: %v", name, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Wait up to 20 minutes until all replicas are killed.
|
||||||
|
endTime := time.Now().Add(time.Minute * 20)
|
||||||
|
for {
|
||||||
|
if time.Now().After(endTime) {
|
||||||
|
return fmt.Errorf("Timeout while waiting for replication controller %s replicas to 0", name)
|
||||||
|
}
|
||||||
|
remainingTime := endTime.Sub(time.Now())
|
||||||
|
err := wait.Poll(time.Second, remainingTime, client.ControllerHasDesiredReplicas(c, rc))
|
||||||
|
if err != nil {
|
||||||
|
Logf("Error while waiting for replication controller %s replicas to read 0: %v", name, err)
|
||||||
|
} else {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Delete the replication controller.
|
||||||
|
if err := c.ReplicationControllers(ns).Delete(name); err != nil {
|
||||||
|
return fmt.Errorf("Failed to delete replication controller %s: %v", name, err)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Launch a Replication Controller and wait for all pods it spawns
|
||||||
|
// to become running. The controller will need to be cleaned up external
|
||||||
|
// to this method
|
||||||
|
func RunRC(c *client.Client, name string, ns, image string, replicas int) error {
|
||||||
|
var last int
|
||||||
|
current := 0
|
||||||
|
same := 0
|
||||||
|
|
||||||
|
By(fmt.Sprintf("Creating replication controller %s", name))
|
||||||
|
_, err := c.ReplicationControllers(ns).Create(&api.ReplicationController{
|
||||||
|
ObjectMeta: api.ObjectMeta{
|
||||||
|
Name: name,
|
||||||
|
},
|
||||||
|
Spec: api.ReplicationControllerSpec{
|
||||||
|
Replicas: replicas,
|
||||||
|
Selector: map[string]string{
|
||||||
|
"name": name,
|
||||||
|
},
|
||||||
|
Template: &api.PodTemplateSpec{
|
||||||
|
ObjectMeta: api.ObjectMeta{
|
||||||
|
Labels: map[string]string{"name": name},
|
||||||
|
},
|
||||||
|
Spec: api.PodSpec{
|
||||||
|
Containers: []api.Container{
|
||||||
|
{
|
||||||
|
Name: name,
|
||||||
|
Image: image,
|
||||||
|
Ports: []api.ContainerPort{{ContainerPort: 80}},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Error creating replication controller: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
By(fmt.Sprintf("Making sure all %d replicas exist", replicas))
|
||||||
|
label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name}))
|
||||||
|
pods, err := listPods(c, ns, label, fields.Everything())
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Error listing pods: %v", err)
|
||||||
|
}
|
||||||
|
current = len(pods.Items)
|
||||||
|
failCount := 5
|
||||||
|
for same < failCount && current < replicas {
|
||||||
|
Logf("Controller %s: Found %d pods out of %d", name, current, replicas)
|
||||||
|
if last < current {
|
||||||
|
same = 0
|
||||||
|
} else if last == current {
|
||||||
|
same++
|
||||||
|
} else if current < last {
|
||||||
|
return fmt.Errorf("Controller %s: Number of submitted pods dropped from %d to %d", name, last, current)
|
||||||
|
}
|
||||||
|
|
||||||
|
if same >= failCount {
|
||||||
|
return fmt.Errorf("Controller %s: No pods submitted for the last %d checks", name, failCount)
|
||||||
|
}
|
||||||
|
|
||||||
|
last = current
|
||||||
|
time.Sleep(5 * time.Second)
|
||||||
|
pods, err = listPods(c, ns, label, fields.Everything())
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Error listing pods: %v", err)
|
||||||
|
}
|
||||||
|
current = len(pods.Items)
|
||||||
|
}
|
||||||
|
if current != replicas {
|
||||||
|
return fmt.Errorf("Controller %s: Only found %d replicas out of %d", name, current, replicas)
|
||||||
|
}
|
||||||
|
Logf("Controller %s: Found %d pods out of %d", name, current, replicas)
|
||||||
|
|
||||||
|
By("Waiting for each pod to be running")
|
||||||
|
same = 0
|
||||||
|
last = 0
|
||||||
|
failCount = 10
|
||||||
|
current = 0
|
||||||
|
for same < failCount && current < replicas {
|
||||||
|
current = 0
|
||||||
|
waiting := 0
|
||||||
|
pending := 0
|
||||||
|
unknown := 0
|
||||||
|
time.Sleep(10 * time.Second)
|
||||||
|
|
||||||
|
currentPods, err := listPods(c, ns, label, fields.Everything())
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Error listing pods: %v", err)
|
||||||
|
}
|
||||||
|
if len(currentPods.Items) != len(pods.Items) {
|
||||||
|
return fmt.Errorf("Number of reported pods changed: %d vs %d", len(currentPods.Items), len(pods.Items))
|
||||||
|
}
|
||||||
|
for _, p := range currentPods.Items {
|
||||||
|
if p.Status.Phase == api.PodRunning {
|
||||||
|
current++
|
||||||
|
} else if p.Status.Phase == api.PodPending {
|
||||||
|
if p.Spec.Host == "" {
|
||||||
|
waiting++
|
||||||
|
} else {
|
||||||
|
pending++
|
||||||
|
}
|
||||||
|
} else if p.Status.Phase == api.PodUnknown {
|
||||||
|
unknown++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Logf("Pod States: %d running, %d pending, %d waiting, %d unknown ", current, pending, waiting, unknown)
|
||||||
|
if last < current {
|
||||||
|
same = 0
|
||||||
|
} else if last == current {
|
||||||
|
same++
|
||||||
|
} else if current < last {
|
||||||
|
return fmt.Errorf("Number of running pods dropped from %d to %d", last, current)
|
||||||
|
}
|
||||||
|
if same >= failCount {
|
||||||
|
return fmt.Errorf("No pods started for the last %d checks", failCount)
|
||||||
|
}
|
||||||
|
last = current
|
||||||
|
}
|
||||||
|
if current != replicas {
|
||||||
|
return fmt.Errorf("Only %d pods started out of %d", current, replicas)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Convenient wrapper around listing pods supporting retries.
|
||||||
|
func listPods(c *client.Client, namespace string, label labels.Selector, field fields.Selector) (*api.PodList, error) {
|
||||||
|
maxRetries := 4
|
||||||
|
pods, err := c.Pods(namespace).List(label, field)
|
||||||
|
for i := 0; i < maxRetries; i++ {
|
||||||
|
if err == nil {
|
||||||
|
return pods, nil
|
||||||
|
}
|
||||||
|
pods, err = c.Pods(namespace).List(label, field)
|
||||||
|
}
|
||||||
|
return pods, err
|
||||||
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user