mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-26 05:03:09 +00:00
Increased waiting for cluster size timeout for Autoscaling e2e
This commit is contained in:
parent
8e2cad79ea
commit
e7eccea4ac
@ -54,40 +54,40 @@ var _ = Describe("Autoscaling", func() {
|
|||||||
setUpAutoscaler("cpu/node_utilization", 0.7, nodeCount, nodeCount+1)
|
setUpAutoscaler("cpu/node_utilization", 0.7, nodeCount, nodeCount+1)
|
||||||
|
|
||||||
ConsumeCpu(f, "cpu-utilization", nodeCount*coresPerNode)
|
ConsumeCpu(f, "cpu-utilization", nodeCount*coresPerNode)
|
||||||
expectNoError(waitForClusterSize(f.Client, nodeCount+1))
|
expectNoError(waitForClusterSize(f.Client, nodeCount+1, 20*time.Minute))
|
||||||
|
|
||||||
StopConsuming(f, "cpu-utilization")
|
StopConsuming(f, "cpu-utilization")
|
||||||
expectNoError(waitForClusterSize(f.Client, nodeCount))
|
expectNoError(waitForClusterSize(f.Client, nodeCount, 20*time.Minute))
|
||||||
})
|
})
|
||||||
|
|
||||||
It("[Skipped] should scale cluster size based on cpu reservation", func() {
|
It("[Skipped] should scale cluster size based on cpu reservation", func() {
|
||||||
setUpAutoscaler("cpu/node_reservation", 0.7, 1, 10)
|
setUpAutoscaler("cpu/node_reservation", 0.7, 1, 10)
|
||||||
|
|
||||||
ReserveCpu(f, "cpu-reservation", 800)
|
ReserveCpu(f, "cpu-reservation", 800)
|
||||||
expectNoError(waitForClusterSize(f.Client, 2))
|
expectNoError(waitForClusterSize(f.Client, 2, 20*time.Minute))
|
||||||
|
|
||||||
StopConsuming(f, "cpu-reservation")
|
StopConsuming(f, "cpu-reservation")
|
||||||
expectNoError(waitForClusterSize(f.Client, 1))
|
expectNoError(waitForClusterSize(f.Client, 1, 20*time.Minute))
|
||||||
})
|
})
|
||||||
|
|
||||||
It("[Skipped] should scale cluster size based on memory utilization", func() {
|
It("[Skipped] should scale cluster size based on memory utilization", func() {
|
||||||
setUpAutoscaler("memory/node_utilization", 0.5, 1, 10)
|
setUpAutoscaler("memory/node_utilization", 0.5, 1, 10)
|
||||||
|
|
||||||
ConsumeMemory(f, "memory-utilization", 2)
|
ConsumeMemory(f, "memory-utilization", 2)
|
||||||
expectNoError(waitForClusterSize(f.Client, 2))
|
expectNoError(waitForClusterSize(f.Client, 2, 20*time.Minute))
|
||||||
|
|
||||||
StopConsuming(f, "memory-utilization")
|
StopConsuming(f, "memory-utilization")
|
||||||
expectNoError(waitForClusterSize(f.Client, 1))
|
expectNoError(waitForClusterSize(f.Client, 1, 20*time.Minute))
|
||||||
})
|
})
|
||||||
|
|
||||||
It("[Skipped] should scale cluster size based on memory reservation", func() {
|
It("[Skipped] should scale cluster size based on memory reservation", func() {
|
||||||
setUpAutoscaler("memory/node_reservation", 0.5, 1, 10)
|
setUpAutoscaler("memory/node_reservation", 0.5, 1, 10)
|
||||||
|
|
||||||
ReserveMemory(f, "memory-reservation", 2)
|
ReserveMemory(f, "memory-reservation", 2)
|
||||||
expectNoError(waitForClusterSize(f.Client, 2))
|
expectNoError(waitForClusterSize(f.Client, 2, 20*time.Minute))
|
||||||
|
|
||||||
StopConsuming(f, "memory-reservation")
|
StopConsuming(f, "memory-reservation")
|
||||||
expectNoError(waitForClusterSize(f.Client, 1))
|
expectNoError(waitForClusterSize(f.Client, 1, 20*time.Minute))
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
|
@ -110,8 +110,7 @@ func waitForGroupSize(size int) error {
|
|||||||
return fmt.Errorf("timeout waiting %v for node instance group size to be %d", timeout, size)
|
return fmt.Errorf("timeout waiting %v for node instance group size to be %d", timeout, size)
|
||||||
}
|
}
|
||||||
|
|
||||||
func waitForClusterSize(c *client.Client, size int) error {
|
func waitForClusterSize(c *client.Client, size int, timeout time.Duration) error {
|
||||||
timeout := 10 * time.Minute
|
|
||||||
for start := time.Now(); time.Since(start) < timeout; time.Sleep(20 * time.Second) {
|
for start := time.Now(); time.Since(start) < timeout; time.Sleep(20 * time.Second) {
|
||||||
nodes, err := c.Nodes().List(labels.Everything(), fields.Everything())
|
nodes, err := c.Nodes().List(labels.Everything(), fields.Everything())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -441,7 +440,7 @@ var _ = Describe("Nodes", func() {
|
|||||||
if err := waitForGroupSize(testContext.CloudConfig.NumNodes); err != nil {
|
if err := waitForGroupSize(testContext.CloudConfig.NumNodes); err != nil {
|
||||||
Failf("Couldn't restore the original node instance group size: %v", err)
|
Failf("Couldn't restore the original node instance group size: %v", err)
|
||||||
}
|
}
|
||||||
if err := waitForClusterSize(c, testContext.CloudConfig.NumNodes); err != nil {
|
if err := waitForClusterSize(c, testContext.CloudConfig.NumNodes, 10*time.Minute); err != nil {
|
||||||
Failf("Couldn't restore the original cluster size: %v", err)
|
Failf("Couldn't restore the original cluster size: %v", err)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
@ -460,7 +459,7 @@ var _ = Describe("Nodes", func() {
|
|||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred())
|
||||||
err = waitForGroupSize(replicas - 1)
|
err = waitForGroupSize(replicas - 1)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred())
|
||||||
err = waitForClusterSize(c, replicas-1)
|
err = waitForClusterSize(c, replicas-1, 10*time.Minute)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
|
||||||
By("verifying whether the pods from the removed node are recreated")
|
By("verifying whether the pods from the removed node are recreated")
|
||||||
@ -484,7 +483,7 @@ var _ = Describe("Nodes", func() {
|
|||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred())
|
||||||
err = waitForGroupSize(replicas + 1)
|
err = waitForGroupSize(replicas + 1)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred())
|
||||||
err = waitForClusterSize(c, replicas+1)
|
err = waitForClusterSize(c, replicas+1, 10*time.Minute)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
|
||||||
By(fmt.Sprintf("increasing size of the replication controller to %d and verifying all pods are running", replicas+1))
|
By(fmt.Sprintf("increasing size of the replication controller to %d and verifying all pods are running", replicas+1))
|
||||||
|
Loading…
Reference in New Issue
Block a user