mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-24 20:24:09 +00:00
Run expendable pods tests on GKE
This commit is contained in:
parent
23111ad414
commit
5d051efc30
@ -906,8 +906,6 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
|||||||
})
|
})
|
||||||
|
|
||||||
It("shouldn't scale up when expendable pod is created [Feature:ClusterSizeAutoscalingScaleUp]", func() {
|
It("shouldn't scale up when expendable pod is created [Feature:ClusterSizeAutoscalingScaleUp]", func() {
|
||||||
// TODO(krzysztof_jastrzebski): Start running this test on GKE when Pod Priority and Preemption is in beta.
|
|
||||||
framework.SkipUnlessProviderIs("gce")
|
|
||||||
defer createPriorityClasses(f)()
|
defer createPriorityClasses(f)()
|
||||||
// Create nodesCountAfterResize+1 pods allocating 0.7 allocatable on present nodes. One more node will have to be created.
|
// Create nodesCountAfterResize+1 pods allocating 0.7 allocatable on present nodes. One more node will have to be created.
|
||||||
cleanupFunc := ReserveMemoryWithPriority(f, "memory-reservation", nodeCount+1, int(float64(nodeCount+1)*float64(0.7)*float64(memAllocatableMb)), false, time.Second, expendablePriorityClassName)
|
cleanupFunc := ReserveMemoryWithPriority(f, "memory-reservation", nodeCount+1, int(float64(nodeCount+1)*float64(0.7)*float64(memAllocatableMb)), false, time.Second, expendablePriorityClassName)
|
||||||
@ -920,8 +918,6 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
|||||||
})
|
})
|
||||||
|
|
||||||
It("should scale up when non expendable pod is created [Feature:ClusterSizeAutoscalingScaleUp]", func() {
|
It("should scale up when non expendable pod is created [Feature:ClusterSizeAutoscalingScaleUp]", func() {
|
||||||
// TODO(krzysztof_jastrzebski): Start running this test on GKE when Pod Priority and Preemption is in beta.
|
|
||||||
framework.SkipUnlessProviderIs("gce")
|
|
||||||
defer createPriorityClasses(f)()
|
defer createPriorityClasses(f)()
|
||||||
// Create nodesCountAfterResize+1 pods allocating 0.7 allocatable on present nodes. One more node will have to be created.
|
// Create nodesCountAfterResize+1 pods allocating 0.7 allocatable on present nodes. One more node will have to be created.
|
||||||
cleanupFunc := ReserveMemoryWithPriority(f, "memory-reservation", nodeCount+1, int(float64(nodeCount+1)*float64(0.7)*float64(memAllocatableMb)), true, scaleUpTimeout, highPriorityClassName)
|
cleanupFunc := ReserveMemoryWithPriority(f, "memory-reservation", nodeCount+1, int(float64(nodeCount+1)*float64(0.7)*float64(memAllocatableMb)), true, scaleUpTimeout, highPriorityClassName)
|
||||||
@ -932,8 +928,6 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
|||||||
})
|
})
|
||||||
|
|
||||||
It("shouldn't scale up when expendable pod is preempted [Feature:ClusterSizeAutoscalingScaleUp]", func() {
|
It("shouldn't scale up when expendable pod is preempted [Feature:ClusterSizeAutoscalingScaleUp]", func() {
|
||||||
// TODO(krzysztof_jastrzebski): Start running this test on GKE when Pod Priority and Preemption is in beta.
|
|
||||||
framework.SkipUnlessProviderIs("gce")
|
|
||||||
defer createPriorityClasses(f)()
|
defer createPriorityClasses(f)()
|
||||||
// Create nodesCountAfterResize pods allocating 0.7 allocatable on present nodes - one pod per node.
|
// Create nodesCountAfterResize pods allocating 0.7 allocatable on present nodes - one pod per node.
|
||||||
cleanupFunc1 := ReserveMemoryWithPriority(f, "memory-reservation1", nodeCount, int(float64(nodeCount)*float64(0.7)*float64(memAllocatableMb)), true, defaultTimeout, expendablePriorityClassName)
|
cleanupFunc1 := ReserveMemoryWithPriority(f, "memory-reservation1", nodeCount, int(float64(nodeCount)*float64(0.7)*float64(memAllocatableMb)), true, defaultTimeout, expendablePriorityClassName)
|
||||||
@ -946,8 +940,6 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
|||||||
})
|
})
|
||||||
|
|
||||||
It("should scale down when expendable pod is running [Feature:ClusterSizeAutoscalingScaleDown]", func() {
|
It("should scale down when expendable pod is running [Feature:ClusterSizeAutoscalingScaleDown]", func() {
|
||||||
// TODO(krzysztof_jastrzebski): Start running this test on GKE when Pod Priority and Preemption is in beta.
|
|
||||||
framework.SkipUnlessProviderIs("gce")
|
|
||||||
defer createPriorityClasses(f)()
|
defer createPriorityClasses(f)()
|
||||||
increasedSize := manuallyIncreaseClusterSize(f, originalSizes)
|
increasedSize := manuallyIncreaseClusterSize(f, originalSizes)
|
||||||
// Create increasedSize pods allocating 0.7 allocatable on present nodes - one pod per node.
|
// Create increasedSize pods allocating 0.7 allocatable on present nodes - one pod per node.
|
||||||
@ -959,8 +951,6 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
|||||||
})
|
})
|
||||||
|
|
||||||
It("shouldn't scale down when non expendable pod is running [Feature:ClusterSizeAutoscalingScaleDown]", func() {
|
It("shouldn't scale down when non expendable pod is running [Feature:ClusterSizeAutoscalingScaleDown]", func() {
|
||||||
// TODO(krzysztof_jastrzebski): Start running this test on GKE when Pod Priority and Preemption is in beta.
|
|
||||||
framework.SkipUnlessProviderIs("gce")
|
|
||||||
defer createPriorityClasses(f)()
|
defer createPriorityClasses(f)()
|
||||||
increasedSize := manuallyIncreaseClusterSize(f, originalSizes)
|
increasedSize := manuallyIncreaseClusterSize(f, originalSizes)
|
||||||
// Create increasedSize pods allocating 0.7 allocatable on present nodes - one pod per node.
|
// Create increasedSize pods allocating 0.7 allocatable on present nodes - one pod per node.
|
||||||
|
Loading…
Reference in New Issue
Block a user