mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-08-05 18:24:07 +00:00
Merge pull request #10175 from wojtek-t/wait_for_namespaces
In scalability tests wait until terminating namespaces are deleted
This commit is contained in:
commit
bec793119b
@ -86,6 +86,13 @@ var _ = Describe("Density", func() {
|
||||
expectNoError(err)
|
||||
minionCount = len(minions.Items)
|
||||
Expect(minionCount).NotTo(BeZero())
|
||||
|
||||
// Terminating a namespace (deleting the remaining objects from it - which
|
||||
// generally means events) can affect the current run. Thus we wait for all
|
||||
// terminating namespace to be finally deleted before starting this test.
|
||||
err = deleteTestingNS(c)
|
||||
expectNoError(err)
|
||||
|
||||
nsForTesting, err := createTestingNS("density", c)
|
||||
ns = nsForTesting.Name
|
||||
expectNoError(err)
|
||||
@ -149,7 +156,7 @@ var _ = Describe("Density", func() {
|
||||
// TODO: Reenable once we can measure latency only from a single test.
|
||||
// TODO: Expose runLatencyTest as ginkgo flag.
|
||||
{podsPerMinion: 3, skip: true, runLatencyTest: false, interval: 10 * time.Second},
|
||||
{podsPerMinion: 30, skip: true, runLatencyTest: false, interval: 10 * time.Second},
|
||||
{podsPerMinion: 30, skip: true, runLatencyTest: true, interval: 10 * time.Second},
|
||||
// More than 30 pods per node is outside our v1.0 goals.
|
||||
// We might want to enable those tests in the future.
|
||||
{podsPerMinion: 50, skip: true, runLatencyTest: false, interval: 10 * time.Second},
|
||||
@ -158,7 +165,7 @@ var _ = Describe("Density", func() {
|
||||
|
||||
for _, testArg := range densityTests {
|
||||
name := fmt.Sprintf("should allow starting %d pods per node", testArg.podsPerMinion)
|
||||
if testArg.podsPerMinion <= 30 {
|
||||
if testArg.podsPerMinion == 30 {
|
||||
name = "[Performance suite] " + name
|
||||
}
|
||||
if testArg.skip {
|
||||
|
@ -63,6 +63,13 @@ var _ = Describe("Load capacity", func() {
|
||||
expectNoError(err)
|
||||
nodeCount = len(nodes.Items)
|
||||
Expect(nodeCount).NotTo(BeZero())
|
||||
|
||||
// Terminating a namespace (deleting the remaining objects from it - which
|
||||
// generally means events) can affect the current run. Thus we wait for all
|
||||
// terminating namespace to be finally deleted before starting this test.
|
||||
err = deleteTestingNS(c)
|
||||
expectNoError(err)
|
||||
|
||||
nsForTesting, err := createTestingNS("load", c)
|
||||
ns = nsForTesting.Name
|
||||
expectNoError(err)
|
||||
|
@ -376,6 +376,32 @@ func createTestingNS(baseName string, c *client.Client) (*api.Namespace, error)
|
||||
return got, nil
|
||||
}
|
||||
|
||||
// deleteTestingNS checks whether all e2e based existing namespaces are in the Terminating state
|
||||
// and waits until they are finally deleted.
|
||||
func deleteTestingNS(c *client.Client) error {
|
||||
Logf("Waiting for terminating namespaces to be deleted...")
|
||||
for start := time.Now(); time.Since(start) < 30*time.Minute; time.Sleep(15 * time.Second) {
|
||||
namespaces, err := c.Namespaces().List(labels.Everything(), fields.Everything())
|
||||
if err != nil {
|
||||
Logf("Listing namespaces failed: %v", err)
|
||||
continue
|
||||
}
|
||||
terminating := 0
|
||||
for _, ns := range namespaces.Items {
|
||||
if strings.HasPrefix(ns.ObjectMeta.Name, "e2e-tests-") {
|
||||
if ns.Status.Phase == api.NamespaceActive {
|
||||
return fmt.Errorf("Namespace %s is active", ns)
|
||||
}
|
||||
terminating++
|
||||
}
|
||||
}
|
||||
if terminating == 0 {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return fmt.Errorf("Waiting for terminating namespaces to be deleted timed out")
|
||||
}
|
||||
|
||||
func waitForPodRunningInNamespace(c *client.Client, podName string, namespace string) error {
|
||||
return waitForPodCondition(c, namespace, podName, "running", podStartTimeout, func(pod *api.Pod) (bool, error) {
|
||||
if pod.Status.Phase == api.PodRunning {
|
||||
|
Loading…
Reference in New Issue
Block a user