mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-22 11:21:47 +00:00
Merge pull request #52779 from aleksandra-malinowska/autoscaling-test-fix-2
Automatic merge from submit-queue. If you want to cherry-pick this change to another branch, please follow the instructions <a href="https://github.com/kubernetes/community/blob/master/contributors/devel/cherry-picks.md">here</a>.. Adjust parameter in cluster size autoscaling test This adjusts parameter in one of cluster size autoscaling tests (failing in GKE as a result of moving to use node memory allocatable instead of capacity). Also contains comment fixes as follow up to #52707. cc @wasylkowski
This commit is contained in:
commit
65437de12c
@ -79,7 +79,7 @@ var _ = SIGDescribe("[Feature:ClusterSizeAutoscalingScaleUp] [Slow] Autoscaling"
|
||||
|
||||
// Calculate the CPU request of the service.
|
||||
// This test expects that 8 pods will not fit in 'nodesNum' nodes, but will fit in >='nodesNum'+1 nodes.
|
||||
// Make it so that 'nodesNum' pods fit perfectly per node (in practice other things take space, so less than that will fit).
|
||||
// Make it so that 'nodesNum' pods fit perfectly per node.
|
||||
nodeCpus := nodes.Items[0].Status.Allocatable[v1.ResourceCPU]
|
||||
nodeCpuMillis := (&nodeCpus).MilliValue()
|
||||
cpuRequestMillis := int64(nodeCpuMillis / nodesNum)
|
||||
@ -87,7 +87,7 @@ var _ = SIGDescribe("[Feature:ClusterSizeAutoscalingScaleUp] [Slow] Autoscaling"
|
||||
// Start the service we want to scale and wait for it to be up and running.
|
||||
nodeMemoryBytes := nodes.Items[0].Status.Allocatable[v1.ResourceMemory]
|
||||
nodeMemoryMB := (&nodeMemoryBytes).Value() / 1024 / 1024
|
||||
memRequestMB := nodeMemoryMB / 10 // Ensure each pod takes not more than 10% of node's total memory.
|
||||
memRequestMB := nodeMemoryMB / 10 // Ensure each pod takes not more than 10% of node's allocatable memory.
|
||||
replicas := 1
|
||||
resourceConsumer := common.NewDynamicResourceConsumer("resource-consumer", common.KindDeployment, replicas, 0, 0, 0, cpuRequestMillis, memRequestMB, f)
|
||||
defer resourceConsumer.CleanUp()
|
||||
|
@ -477,7 +477,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
||||
framework.ExpectNoError(enableAutoscaler(extraPoolName, 1, 2))
|
||||
|
||||
By("Creating rc with 2 pods too big to fit default-pool but fitting extra-pool")
|
||||
ReserveMemory(f, "memory-reservation", 2, int(2.1*float64(memAllocatableMb)), false, defaultTimeout)
|
||||
ReserveMemory(f, "memory-reservation", 2, int(2.5*float64(memAllocatableMb)), false, defaultTimeout)
|
||||
defer framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.Namespace.Name, "memory-reservation")
|
||||
|
||||
// Apparently GKE master is restarted couple minutes after the node pool is added
|
||||
|
Loading…
Reference in New Issue
Block a user