mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-29 14:37:00 +00:00
use allocatable instead of capacity for node memory
This commit is contained in:
parent
8ca1d9f19b
commit
f6d7b96b3a
@ -79,8 +79,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
|||||||
f := framework.NewDefaultFramework("autoscaling")
|
f := framework.NewDefaultFramework("autoscaling")
|
||||||
var c clientset.Interface
|
var c clientset.Interface
|
||||||
var nodeCount int
|
var nodeCount int
|
||||||
var coresPerNode int
|
var memAllocatableMb int
|
||||||
var memCapacityMb int
|
|
||||||
var originalSizes map[string]int
|
var originalSizes map[string]int
|
||||||
|
|
||||||
BeforeEach(func() {
|
BeforeEach(func() {
|
||||||
@ -103,10 +102,8 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
|||||||
nodeCount = len(nodes.Items)
|
nodeCount = len(nodes.Items)
|
||||||
By(fmt.Sprintf("Initial number of schedulable nodes: %v", nodeCount))
|
By(fmt.Sprintf("Initial number of schedulable nodes: %v", nodeCount))
|
||||||
Expect(nodeCount).NotTo(BeZero())
|
Expect(nodeCount).NotTo(BeZero())
|
||||||
cpu := nodes.Items[0].Status.Capacity[v1.ResourceCPU]
|
mem := nodes.Items[0].Status.Allocatable[v1.ResourceMemory]
|
||||||
mem := nodes.Items[0].Status.Capacity[v1.ResourceMemory]
|
memAllocatableMb = int((&mem).Value() / 1024 / 1024)
|
||||||
coresPerNode = int((&cpu).MilliValue() / 1000)
|
|
||||||
memCapacityMb = int((&mem).Value() / 1024 / 1024)
|
|
||||||
|
|
||||||
Expect(nodeCount).Should(Equal(sum))
|
Expect(nodeCount).Should(Equal(sum))
|
||||||
|
|
||||||
@ -150,7 +147,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
|||||||
|
|
||||||
It("shouldn't increase cluster size if pending pod is too large [Feature:ClusterSizeAutoscalingScaleUp]", func() {
|
It("shouldn't increase cluster size if pending pod is too large [Feature:ClusterSizeAutoscalingScaleUp]", func() {
|
||||||
By("Creating unschedulable pod")
|
By("Creating unschedulable pod")
|
||||||
ReserveMemory(f, "memory-reservation", 1, int(1.1*float64(memCapacityMb)), false, defaultTimeout)
|
ReserveMemory(f, "memory-reservation", 1, int(1.1*float64(memAllocatableMb)), false, defaultTimeout)
|
||||||
defer framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.Namespace.Name, "memory-reservation")
|
defer framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.Namespace.Name, "memory-reservation")
|
||||||
|
|
||||||
By("Waiting for scale up hoping it won't happen")
|
By("Waiting for scale up hoping it won't happen")
|
||||||
@ -177,7 +174,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
|||||||
})
|
})
|
||||||
|
|
||||||
simpleScaleUpTest := func(unready int) {
|
simpleScaleUpTest := func(unready int) {
|
||||||
ReserveMemory(f, "memory-reservation", 100, nodeCount*memCapacityMb, false, 1*time.Second)
|
ReserveMemory(f, "memory-reservation", 100, nodeCount*memAllocatableMb, false, 1*time.Second)
|
||||||
defer framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.Namespace.Name, "memory-reservation")
|
defer framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.Namespace.Name, "memory-reservation")
|
||||||
|
|
||||||
// Verify that cluster size is increased
|
// Verify that cluster size is increased
|
||||||
@ -200,7 +197,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
|||||||
unmanagedNodes := nodeCount - status.ready
|
unmanagedNodes := nodeCount - status.ready
|
||||||
|
|
||||||
By("Schedule more pods than can fit and wait for cluster to scale-up")
|
By("Schedule more pods than can fit and wait for cluster to scale-up")
|
||||||
ReserveMemory(f, "memory-reservation", 100, nodeCount*memCapacityMb, false, 1*time.Second)
|
ReserveMemory(f, "memory-reservation", 100, nodeCount*memAllocatableMb, false, 1*time.Second)
|
||||||
defer framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.Namespace.Name, "memory-reservation")
|
defer framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.Namespace.Name, "memory-reservation")
|
||||||
|
|
||||||
status, err = waitForScaleUpStatus(c, caOngoingScaleUpStatus, scaleUpTriggerTimeout)
|
status, err = waitForScaleUpStatus(c, caOngoingScaleUpStatus, scaleUpTriggerTimeout)
|
||||||
@ -235,7 +232,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
|||||||
extraMem := nodes[0].Status.Capacity[v1.ResourceMemory]
|
extraMem := nodes[0].Status.Capacity[v1.ResourceMemory]
|
||||||
extraMemMb := int((&extraMem).Value() / 1024 / 1024)
|
extraMemMb := int((&extraMem).Value() / 1024 / 1024)
|
||||||
|
|
||||||
ReserveMemory(f, "memory-reservation", 100, nodeCount*memCapacityMb+extraMemMb, false, defaultTimeout)
|
ReserveMemory(f, "memory-reservation", 100, nodeCount*memAllocatableMb+extraMemMb, false, defaultTimeout)
|
||||||
defer framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.Namespace.Name, "memory-reservation")
|
defer framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.Namespace.Name, "memory-reservation")
|
||||||
|
|
||||||
// Verify, that cluster size is increased
|
// Verify, that cluster size is increased
|
||||||
@ -480,7 +477,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
|||||||
framework.ExpectNoError(enableAutoscaler(extraPoolName, 1, 2))
|
framework.ExpectNoError(enableAutoscaler(extraPoolName, 1, 2))
|
||||||
|
|
||||||
By("Creating rc with 2 pods too big to fit default-pool but fitting extra-pool")
|
By("Creating rc with 2 pods too big to fit default-pool but fitting extra-pool")
|
||||||
ReserveMemory(f, "memory-reservation", 2, int(2.1*float64(memCapacityMb)), false, defaultTimeout)
|
ReserveMemory(f, "memory-reservation", 2, int(2.1*float64(memAllocatableMb)), false, defaultTimeout)
|
||||||
defer framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.Namespace.Name, "memory-reservation")
|
defer framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.Namespace.Name, "memory-reservation")
|
||||||
|
|
||||||
// Apparently GKE master is restarted couple minutes after the node pool is added
|
// Apparently GKE master is restarted couple minutes after the node pool is added
|
||||||
@ -683,7 +680,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
|||||||
nodesToBreak = nodesToBreak[1:]
|
nodesToBreak = nodesToBreak[1:]
|
||||||
framework.TestUnderTemporaryNetworkFailure(c, "default", ntb, testFunction)
|
framework.TestUnderTemporaryNetworkFailure(c, "default", ntb, testFunction)
|
||||||
} else {
|
} else {
|
||||||
ReserveMemory(f, "memory-reservation", 100, nodeCount*memCapacityMb, false, defaultTimeout)
|
ReserveMemory(f, "memory-reservation", 100, nodeCount*memAllocatableMb, false, defaultTimeout)
|
||||||
defer framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.Namespace.Name, "memory-reservation")
|
defer framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.Namespace.Name, "memory-reservation")
|
||||||
time.Sleep(scaleUpTimeout)
|
time.Sleep(scaleUpTimeout)
|
||||||
currentNodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
|
currentNodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
|
||||||
|
Loading…
Reference in New Issue
Block a user