mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-23 11:50:44 +00:00
Merge pull request #48262 from MaciekPytel/fix_autoscaler_e2e_on_gke_2
Automatic merge from submit-queue (batch tested with PRs 48262, 48805) Fix condition in autoscaler e2e Fix off-by-one in cluster-autoscaler that made the test trivially pass (without actually requiring scale-up to happen).
This commit is contained in:
commit
3d24cf057f
@ -229,12 +229,20 @@ var _ = framework.KubeDescribe("Cluster size autoscaling [Slow]", func() {
|
|||||||
framework.ExpectNoError(framework.WaitForClusterSize(c, nodeCount+1, resizeTimeout))
|
framework.ExpectNoError(framework.WaitForClusterSize(c, nodeCount+1, resizeTimeout))
|
||||||
glog.Infof("Not enabling cluster autoscaler for the node pool (on purpose).")
|
glog.Infof("Not enabling cluster autoscaler for the node pool (on purpose).")
|
||||||
|
|
||||||
ReserveMemory(f, "memory-reservation", 100, nodeCount*memCapacityMb, false, defaultTimeout)
|
By("Get memory available on new node, so we can account for it when creating RC")
|
||||||
|
nodes, err := framework.GetGroupNodes(extraPoolName)
|
||||||
|
framework.ExpectNoError(err)
|
||||||
|
Expect(len(nodes)).Should(Equal(1))
|
||||||
|
node, err := f.ClientSet.Core().Nodes().Get(nodes[0], metav1.GetOptions{})
|
||||||
|
extraMem := node.Status.Capacity[v1.ResourceMemory]
|
||||||
|
extraMemMb := int((&extraMem).Value() / 1024 / 1024)
|
||||||
|
|
||||||
|
ReserveMemory(f, "memory-reservation", 100, nodeCount*memCapacityMb+extraMemMb, false, defaultTimeout)
|
||||||
defer framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.Namespace.Name, "memory-reservation")
|
defer framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.Namespace.Name, "memory-reservation")
|
||||||
|
|
||||||
// Verify, that cluster size is increased
|
// Verify, that cluster size is increased
|
||||||
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
|
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
|
||||||
func(size int) bool { return size >= nodeCount+1 }, scaleUpTimeout))
|
func(size int) bool { return size >= nodeCount+2 }, scaleUpTimeout))
|
||||||
framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, c))
|
framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, c))
|
||||||
})
|
})
|
||||||
|
|
||||||
@ -586,6 +594,7 @@ var _ = framework.KubeDescribe("Cluster size autoscaling [Slow]", func() {
|
|||||||
nodes, err := f.ClientSet.Core().Nodes().List(metav1.ListOptions{FieldSelector: fields.Set{
|
nodes, err := f.ClientSet.Core().Nodes().List(metav1.ListOptions{FieldSelector: fields.Set{
|
||||||
"spec.unschedulable": "false",
|
"spec.unschedulable": "false",
|
||||||
}.AsSelector().String()})
|
}.AsSelector().String()})
|
||||||
|
framework.ExpectNoError(err)
|
||||||
|
|
||||||
for _, node := range nodes.Items {
|
for _, node := range nodes.Items {
|
||||||
err = makeNodeUnschedulable(f.ClientSet, &node)
|
err = makeNodeUnschedulable(f.ClientSet, &node)
|
||||||
|
Loading…
Reference in New Issue
Block a user