diff --git a/test/e2e/autoscaling/cluster_size_autoscaling.go b/test/e2e/autoscaling/cluster_size_autoscaling.go index 8efd3653003..0d5f60dc919 100644 --- a/test/e2e/autoscaling/cluster_size_autoscaling.go +++ b/test/e2e/autoscaling/cluster_size_autoscaling.go @@ -866,6 +866,8 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() { }) It("shouldn't scale up when expendable pod is created [Feature:ClusterSizeAutoscalingScaleUp]", func() { + // TODO(krzysztof_jastrzebski): Start running this test on GKE when Pod Priority and Preemption is in beta. + framework.SkipUnlessProviderIs("gce") defer createPriorityClasses(f)() // Create nodesCountAfterResize+1 pods allocating 0.7 allocatable on present nodes. One more node will have to be created. cleanupFunc := ReserveMemoryWithPriority(f, "memory-reservation", nodeCount+1, int(float64(nodeCount+1)*float64(0.7)*float64(memAllocatableMb)), false, time.Second, expendablePriorityClassName) @@ -878,6 +880,8 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() { }) It("should scale up when non expendable pod is created [Feature:ClusterSizeAutoscalingScaleUp]", func() { + // TODO(krzysztof_jastrzebski): Start running this test on GKE when Pod Priority and Preemption is in beta. + framework.SkipUnlessProviderIs("gce") defer createPriorityClasses(f)() // Create nodesCountAfterResize+1 pods allocating 0.7 allocatable on present nodes. One more node will have to be created. cleanupFunc := ReserveMemoryWithPriority(f, "memory-reservation", nodeCount+1, int(float64(nodeCount+1)*float64(0.7)*float64(memAllocatableMb)), true, scaleUpTimeout, highPriorityClassName) @@ -888,6 +892,8 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() { }) It("shouldn't scale up when expendable pod is preempted [Feature:ClusterSizeAutoscalingScaleUp]", func() { + // TODO(krzysztof_jastrzebski): Start running this test on GKE when Pod Priority and Preemption is in beta. + framework.SkipUnlessProviderIs("gce") defer createPriorityClasses(f)() // Create nodesCountAfterResize pods allocating 0.7 allocatable on present nodes - one pod per node. cleanupFunc1 := ReserveMemoryWithPriority(f, "memory-reservation1", nodeCount, int(float64(nodeCount)*float64(0.7)*float64(memAllocatableMb)), true, defaultTimeout, expendablePriorityClassName) @@ -900,6 +906,8 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() { }) It("should scale down when expendable pod is running [Feature:ClusterSizeAutoscalingScaleDown]", func() { + // TODO(krzysztof_jastrzebski): Start running this test on GKE when Pod Priority and Preemption is in beta. + framework.SkipUnlessProviderIs("gce") defer createPriorityClasses(f)() increasedSize := manuallyIncreaseClusterSize(f, originalSizes) // Create increasedSize pods allocating 0.7 allocatable on present nodes - one pod per node. @@ -911,6 +919,8 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() { }) It("shouldn't scale down when non expendable pod is running [Feature:ClusterSizeAutoscalingScaleDown]", func() { + // TODO(krzysztof_jastrzebski): Start running this test on GKE when Pod Priority and Preemption is in beta. + framework.SkipUnlessProviderIs("gce") defer createPriorityClasses(f)() increasedSize := manuallyIncreaseClusterSize(f, originalSizes) // Create increasedSize pods allocating 0.7 allocatable on present nodes - one pod per node.