Renamed ClusterSize and WaitForClusterSize to NumberOfReadyNodes and WaitForReadyNodes, respectively.

This commit is contained in:
Andrzej Wasylkowski 2017-08-29 11:42:30 +02:00
parent 9b0f4c9f7c
commit 0c1ab5597e
6 changed files with 32 additions and 32 deletions

View File

@ -70,7 +70,7 @@ var _ = SIGDescribe("[Feature:ClusterSizeAutoscalingScaleUp] [Slow] Autoscaling"
AfterEach(func() {
// Scale down back to only 'nodesNum' nodes, as expected at the start of the test.
framework.ExpectNoError(framework.ResizeGroup(nodeGroupName, nodesNum))
framework.ExpectNoError(framework.WaitForClusterSize(f.ClientSet, nodesNum, 15*time.Minute))
framework.ExpectNoError(framework.WaitForReadyNodes(f.ClientSet, nodesNum, 15*time.Minute))
})
Measure("takes less than 15 minutes", func(b Benchmarker) {

View File

@ -89,7 +89,7 @@ var _ = framework.KubeDescribe("Cluster size autoscaler scalability [Slow]", fun
}
}
framework.ExpectNoError(framework.WaitForClusterSize(c, sum, scaleUpTimeout))
framework.ExpectNoError(framework.WaitForReadyNodes(c, sum, scaleUpTimeout))
nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
nodeCount = len(nodes.Items)
@ -114,7 +114,7 @@ var _ = framework.KubeDescribe("Cluster size autoscaler scalability [Slow]", fun
AfterEach(func() {
By(fmt.Sprintf("Restoring initial size of the cluster"))
setMigSizes(originalSizes)
framework.ExpectNoError(framework.WaitForClusterSize(c, nodeCount, scaleDownTimeout))
framework.ExpectNoError(framework.WaitForReadyNodes(c, nodeCount, scaleDownTimeout))
nodes, err := c.Core().Nodes().List(metav1.ListOptions{})
framework.ExpectNoError(err)
s := time.Now()
@ -216,7 +216,7 @@ var _ = framework.KubeDescribe("Cluster size autoscaler scalability [Slow]", fun
anyKey(originalSizes): totalNodes,
}
setMigSizes(newSizes)
framework.ExpectNoError(framework.WaitForClusterSize(f.ClientSet, totalNodes, largeResizeTimeout))
framework.ExpectNoError(framework.WaitForReadyNodes(f.ClientSet, totalNodes, largeResizeTimeout))
// run replicas
rcConfig := reserveMemoryRCConfig(f, "some-pod", replicas, replicas*perNodeReservation, largeScaleUpTimeout)
@ -250,7 +250,7 @@ var _ = framework.KubeDescribe("Cluster size autoscaler scalability [Slow]", fun
}
setMigSizes(newSizes)
framework.ExpectNoError(framework.WaitForClusterSize(f.ClientSet, totalNodes, largeResizeTimeout))
framework.ExpectNoError(framework.WaitForReadyNodes(f.ClientSet, totalNodes, largeResizeTimeout))
// annotate all nodes with no-scale-down
ScaleDownDisabledKey := "cluster-autoscaler.kubernetes.io/scale-down-disabled"
@ -304,7 +304,7 @@ var _ = framework.KubeDescribe("Cluster size autoscaler scalability [Slow]", fun
anyKey(originalSizes): totalNodes,
}
setMigSizes(newSizes)
framework.ExpectNoError(framework.WaitForClusterSize(f.ClientSet, totalNodes, largeResizeTimeout))
framework.ExpectNoError(framework.WaitForReadyNodes(f.ClientSet, totalNodes, largeResizeTimeout))
divider := int(float64(totalNodes) * 0.7)
fullNodesCount := divider
underutilizedNodesCount := totalNodes - fullNodesCount
@ -350,7 +350,7 @@ var _ = framework.KubeDescribe("Cluster size autoscaler scalability [Slow]", fun
defer framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.Namespace.Name, podsConfig.Name)
// Ensure that no new nodes have been added so far.
Expect(framework.ClusterSize(f.ClientSet)).To(Equal(nodeCount))
Expect(framework.NumberOfReadyNodes(f.ClientSet)).To(Equal(nodeCount))
// Start a number of schedulable pods to ensure CA reacts.
additionalNodes := maxNodes - nodeCount
@ -407,7 +407,7 @@ func simpleScaleUpTestWithTolerance(f *framework.Framework, config *scaleUpTestC
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
func(size int) bool { return size >= minExpectedNodeCount }, scaleUpTimeout))
} else {
framework.ExpectNoError(framework.WaitForClusterSize(f.ClientSet, config.expectedResult.nodes, scaleUpTimeout))
framework.ExpectNoError(framework.WaitForReadyNodes(f.ClientSet, config.expectedResult.nodes, scaleUpTimeout))
}
glog.Infof("cluster is increased")
if tolerateMissingPodCount > 0 {

View File

@ -97,7 +97,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
sum += size
}
// Give instances time to spin up
framework.ExpectNoError(framework.WaitForClusterSize(c, sum, scaleUpTimeout))
framework.ExpectNoError(framework.WaitForReadyNodes(c, sum, scaleUpTimeout))
nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
nodeCount = len(nodes.Items)
@ -127,7 +127,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
for _, size := range originalSizes {
expectedNodes += size
}
framework.ExpectNoError(framework.WaitForClusterSize(c, expectedNodes, scaleDownTimeout))
framework.ExpectNoError(framework.WaitForReadyNodes(c, expectedNodes, scaleDownTimeout))
nodes, err := c.Core().Nodes().List(metav1.ListOptions{})
framework.ExpectNoError(err)
@ -226,7 +226,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
const extraPoolName = "extra-pool"
addNodePool(extraPoolName, "n1-standard-4", 1)
defer deleteNodePool(extraPoolName)
framework.ExpectNoError(framework.WaitForClusterSize(c, nodeCount+1, resizeTimeout))
framework.ExpectNoError(framework.WaitForReadyNodes(c, nodeCount+1, resizeTimeout))
glog.Infof("Not enabling cluster autoscaler for the node pool (on purpose).")
By("Get memory available on new node, so we can account for it when creating RC")
@ -253,7 +253,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
const extraPoolName = "extra-pool"
addNodePool(extraPoolName, "n1-standard-4", 1)
defer deleteNodePool(extraPoolName)
framework.ExpectNoError(framework.WaitForClusterSize(c, nodeCount+1, resizeTimeout))
framework.ExpectNoError(framework.WaitForReadyNodes(c, nodeCount+1, resizeTimeout))
framework.ExpectNoError(enableAutoscaler(extraPoolName, 1, 2))
framework.ExpectNoError(disableAutoscaler(extraPoolName, 1, 2))
})
@ -283,7 +283,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
defer framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.Namespace.Name, "extra-pod")
framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, c))
framework.ExpectNoError(framework.WaitForClusterSize(c, nodeCount+newPods, scaleUpTimeout))
framework.ExpectNoError(framework.WaitForReadyNodes(c, nodeCount+newPods, scaleUpTimeout))
})
It("should increase cluster size if pod requesting EmptyDir volume is pending [Feature:ClusterSizeAutoscalingScaleUp]", func() {
@ -304,7 +304,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
defer framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.Namespace.Name, "extra-pod")
framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, c))
framework.ExpectNoError(framework.WaitForClusterSize(c, nodeCount+newPods, scaleUpTimeout))
framework.ExpectNoError(framework.WaitForReadyNodes(c, nodeCount+newPods, scaleUpTimeout))
})
It("should increase cluster size if pod requesting volume is pending [Feature:ClusterSizeAutoscalingScaleUp]", func() {
@ -377,7 +377,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
}()
framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, c))
framework.ExpectNoError(framework.WaitForClusterSize(c, nodeCount+newPods, scaleUpTimeout))
framework.ExpectNoError(framework.WaitForReadyNodes(c, nodeCount+newPods, scaleUpTimeout))
})
It("should add node to the particular mig [Feature:ClusterSizeAutoscalingScaleUp]", func() {
@ -478,7 +478,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
const extraPoolName = "extra-pool"
addNodePool(extraPoolName, "n1-standard-4", 1)
defer deleteNodePool(extraPoolName)
framework.ExpectNoError(framework.WaitForClusterSize(c, nodeCount+1, resizeTimeout))
framework.ExpectNoError(framework.WaitForReadyNodes(c, nodeCount+1, resizeTimeout))
framework.ExpectNoError(enableAutoscaler(extraPoolName, 1, 2))
By("Creating rc with 2 pods too big to fit default-pool but fitting extra-pool")
@ -489,7 +489,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
// reseting all the timers in scale down code. Adding 5 extra minutes to workaround
// this issue.
// TODO: Remove the extra time when GKE restart is fixed.
framework.ExpectNoError(framework.WaitForClusterSize(c, nodeCount+2, scaleUpTimeout+5*time.Minute))
framework.ExpectNoError(framework.WaitForReadyNodes(c, nodeCount+2, scaleUpTimeout+5*time.Minute))
})
simpleScaleDownTest := func(unready int) {
@ -588,7 +588,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
}
err := framework.ResizeGroup(minMig, int32(0))
framework.ExpectNoError(err)
framework.ExpectNoError(framework.WaitForClusterSize(c, nodeCount-minSize, resizeTimeout))
framework.ExpectNoError(framework.WaitForReadyNodes(c, nodeCount-minSize, resizeTimeout))
By("Make remaining nodes unschedulable")
nodes, err := f.ClientSet.Core().Nodes().List(metav1.ListOptions{FieldSelector: fields.Set{
@ -628,7 +628,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
}
err := framework.ResizeGroup(minMig, int32(1))
framework.ExpectNoError(err)
framework.ExpectNoError(framework.WaitForClusterSize(c, nodeCount-minSize+1, resizeTimeout))
framework.ExpectNoError(framework.WaitForReadyNodes(c, nodeCount-minSize+1, resizeTimeout))
By("Make the single node unschedulable")
allNodes, err := f.ClientSet.Core().Nodes().List(metav1.ListOptions{FieldSelector: fields.Set{
@ -699,7 +699,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
}
testFunction()
// Give nodes time to recover from network failure
framework.ExpectNoError(framework.WaitForClusterSize(c, len(nodes.Items), nodesRecoverTimeout))
framework.ExpectNoError(framework.WaitForReadyNodes(c, len(nodes.Items), nodesRecoverTimeout))
})
})
@ -937,7 +937,7 @@ func ReserveMemory(f *framework.Framework, id string, replicas, megabytes int, e
return nil
}
// WaitForClusterSize waits until the cluster size matches the given function.
// WaitForClusterSizeFunc waits until the cluster size matches the given function.
func WaitForClusterSizeFunc(c clientset.Interface, sizeFunc func(int) bool, timeout time.Duration) error {
return WaitForClusterSizeFuncWithUnready(c, sizeFunc, timeout, 0)
}

View File

@ -151,7 +151,7 @@ var _ = SIGDescribe("DNS horizontal autoscaling", func() {
By("Restoring cluster size")
setMigSizes(originalSizes)
Expect(framework.WaitForClusterSize(c, sum, scaleDownTimeout)).NotTo(HaveOccurred())
Expect(framework.WaitForReadyNodes(c, sum, scaleDownTimeout)).NotTo(HaveOccurred())
By("Wait for kube-dns scaled to expected number")
Expect(waitForDNSReplicasSatisfied(c, getExpectReplicasLinear, DNSdefaultTimeout)).NotTo(HaveOccurred())

View File

@ -3892,8 +3892,8 @@ func WaitForControllerManagerUp() error {
return fmt.Errorf("waiting for controller-manager timed out")
}
// Returns cluster size (number of ready Nodes excluding Master Node).
func ClusterSize(c clientset.Interface) (int, error) {
// Returns number of ready Nodes excluding Master Node.
func NumberOfReadyNodes(c clientset.Interface) (int, error) {
nodes, err := c.Core().Nodes().List(metav1.ListOptions{FieldSelector: fields.Set{
"spec.unschedulable": "false",
}.AsSelector().String()})
@ -3909,9 +3909,9 @@ func ClusterSize(c clientset.Interface) (int, error) {
return len(nodes.Items), nil
}
// WaitForClusterSize waits until the cluster has desired size and there is no not-ready nodes in it.
// WaitForReadyNodes waits until the cluster has desired size and there is no not-ready nodes in it.
// By cluster size we mean number of Nodes excluding Master Node.
func WaitForClusterSize(c clientset.Interface, size int, timeout time.Duration) error {
func WaitForReadyNodes(c clientset.Interface, size int, timeout time.Duration) error {
for start := time.Now(); time.Since(start) < timeout; time.Sleep(20 * time.Second) {
nodes, err := c.Core().Nodes().List(metav1.ListOptions{FieldSelector: fields.Set{
"spec.unschedulable": "false",
@ -3929,12 +3929,12 @@ func WaitForClusterSize(c clientset.Interface, size int, timeout time.Duration)
numReady := len(nodes.Items)
if numNodes == size && numReady == size {
Logf("Cluster has reached the desired size %d", size)
Logf("Cluster has reached the desired number of ready nodes %d", size)
return nil
}
Logf("Waiting for cluster size %d, current size %d, not ready nodes %d", size, numNodes, numNodes-numReady)
Logf("Waiting for ready nodes %d, current ready %d, not ready nodes %d", size, numNodes, numNodes-numReady)
}
return fmt.Errorf("timeout waiting %v for cluster size to be %d", timeout, size)
return fmt.Errorf("timeout waiting %v for number of ready nodes to be %d", timeout, size)
}
func GenerateMasterRegexp(prefix string) string {

View File

@ -98,7 +98,7 @@ var _ = SIGDescribe("Nodes [Disruptive]", func() {
if err := framework.WaitForGroupSize(group, int32(framework.TestContext.CloudConfig.NumNodes)); err != nil {
framework.Failf("Couldn't restore the original node instance group size: %v", err)
}
if err := framework.WaitForClusterSize(c, framework.TestContext.CloudConfig.NumNodes, 10*time.Minute); err != nil {
if err := framework.WaitForReadyNodes(c, framework.TestContext.CloudConfig.NumNodes, 10*time.Minute); err != nil {
framework.Failf("Couldn't restore the original cluster size: %v", err)
}
// Many e2e tests assume that the cluster is fully healthy before they start. Wait until
@ -124,7 +124,7 @@ var _ = SIGDescribe("Nodes [Disruptive]", func() {
Expect(err).NotTo(HaveOccurred())
err = framework.WaitForGroupSize(group, replicas-1)
Expect(err).NotTo(HaveOccurred())
err = framework.WaitForClusterSize(c, int(replicas-1), 10*time.Minute)
err = framework.WaitForReadyNodes(c, int(replicas-1), 10*time.Minute)
Expect(err).NotTo(HaveOccurred())
By("waiting 1 minute for the watch in the podGC to catch up, remove any pods scheduled on " +
@ -152,7 +152,7 @@ var _ = SIGDescribe("Nodes [Disruptive]", func() {
Expect(err).NotTo(HaveOccurred())
err = framework.WaitForGroupSize(group, replicas+1)
Expect(err).NotTo(HaveOccurred())
err = framework.WaitForClusterSize(c, int(replicas+1), 10*time.Minute)
err = framework.WaitForReadyNodes(c, int(replicas+1), 10*time.Minute)
Expect(err).NotTo(HaveOccurred())
By(fmt.Sprintf("increasing size of the replication controller to %d and verifying all pods are running", replicas+1))