mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-24 12:15:52 +00:00
WIP: use e2eskipper package in test/e2e/autoscaling
This commit is contained in:
parent
387e6931e5
commit
c402a4bf65
@ -45,6 +45,7 @@ go_library(
|
|||||||
"//test/e2e/framework/pod:go_default_library",
|
"//test/e2e/framework/pod:go_default_library",
|
||||||
"//test/e2e/framework/pv:go_default_library",
|
"//test/e2e/framework/pv:go_default_library",
|
||||||
"//test/e2e/framework/rc:go_default_library",
|
"//test/e2e/framework/rc:go_default_library",
|
||||||
|
"//test/e2e/framework/skipper:go_default_library",
|
||||||
"//test/e2e/instrumentation/monitoring:go_default_library",
|
"//test/e2e/instrumentation/monitoring:go_default_library",
|
||||||
"//test/e2e/scheduling:go_default_library",
|
"//test/e2e/scheduling:go_default_library",
|
||||||
"//test/utils:go_default_library",
|
"//test/utils:go_default_library",
|
||||||
|
@ -25,6 +25,7 @@ import (
|
|||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
e2eautoscaling "k8s.io/kubernetes/test/e2e/framework/autoscaling"
|
e2eautoscaling "k8s.io/kubernetes/test/e2e/framework/autoscaling"
|
||||||
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
|
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
|
||||||
|
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
|
||||||
|
|
||||||
"github.com/onsi/ginkgo"
|
"github.com/onsi/ginkgo"
|
||||||
)
|
)
|
||||||
@ -37,7 +38,7 @@ var _ = SIGDescribe("[Feature:ClusterSizeAutoscalingScaleUp] [Slow] Autoscaling"
|
|||||||
// Check if Cloud Autoscaler is enabled by trying to get its ConfigMap.
|
// Check if Cloud Autoscaler is enabled by trying to get its ConfigMap.
|
||||||
_, err := f.ClientSet.CoreV1().ConfigMaps("kube-system").Get("cluster-autoscaler-status", metav1.GetOptions{})
|
_, err := f.ClientSet.CoreV1().ConfigMaps("kube-system").Get("cluster-autoscaler-status", metav1.GetOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
framework.Skipf("test expects Cluster Autoscaler to be enabled")
|
e2eskipper.Skipf("test expects Cluster Autoscaler to be enabled")
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
@ -50,7 +51,7 @@ var _ = SIGDescribe("[Feature:ClusterSizeAutoscalingScaleUp] [Slow] Autoscaling"
|
|||||||
// Make sure there is only 1 node group, otherwise this test becomes useless.
|
// Make sure there is only 1 node group, otherwise this test becomes useless.
|
||||||
nodeGroups := strings.Split(framework.TestContext.CloudConfig.NodeInstanceGroup, ",")
|
nodeGroups := strings.Split(framework.TestContext.CloudConfig.NodeInstanceGroup, ",")
|
||||||
if len(nodeGroups) != 1 {
|
if len(nodeGroups) != 1 {
|
||||||
framework.Skipf("test expects 1 node group, found %d", len(nodeGroups))
|
e2eskipper.Skipf("test expects 1 node group, found %d", len(nodeGroups))
|
||||||
}
|
}
|
||||||
nodeGroupName = nodeGroups[0]
|
nodeGroupName = nodeGroups[0]
|
||||||
|
|
||||||
@ -58,7 +59,7 @@ var _ = SIGDescribe("[Feature:ClusterSizeAutoscalingScaleUp] [Slow] Autoscaling"
|
|||||||
nodeGroupSize, err := framework.GroupSize(nodeGroupName)
|
nodeGroupSize, err := framework.GroupSize(nodeGroupName)
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
if nodeGroupSize != nodesNum {
|
if nodeGroupSize != nodesNum {
|
||||||
framework.Skipf("test expects %d nodes, found %d", nodesNum, nodeGroupSize)
|
e2eskipper.Skipf("test expects %d nodes, found %d", nodesNum, nodeGroupSize)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Make sure all nodes are schedulable, otherwise we are in some kind of a problem state.
|
// Make sure all nodes are schedulable, otherwise we are in some kind of a problem state.
|
||||||
|
@ -33,6 +33,7 @@ import (
|
|||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
|
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
|
||||||
e2erc "k8s.io/kubernetes/test/e2e/framework/rc"
|
e2erc "k8s.io/kubernetes/test/e2e/framework/rc"
|
||||||
|
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
|
||||||
testutils "k8s.io/kubernetes/test/utils"
|
testutils "k8s.io/kubernetes/test/utils"
|
||||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||||
|
|
||||||
@ -67,12 +68,12 @@ var _ = framework.KubeDescribe("Cluster size autoscaler scalability [Slow]", fun
|
|||||||
var sum int
|
var sum int
|
||||||
|
|
||||||
ginkgo.BeforeEach(func() {
|
ginkgo.BeforeEach(func() {
|
||||||
framework.SkipUnlessProviderIs("gce", "gke", "kubemark")
|
e2eskipper.SkipUnlessProviderIs("gce", "gke", "kubemark")
|
||||||
|
|
||||||
// Check if Cloud Autoscaler is enabled by trying to get its ConfigMap.
|
// Check if Cloud Autoscaler is enabled by trying to get its ConfigMap.
|
||||||
_, err := f.ClientSet.CoreV1().ConfigMaps("kube-system").Get("cluster-autoscaler-status", metav1.GetOptions{})
|
_, err := f.ClientSet.CoreV1().ConfigMaps("kube-system").Get("cluster-autoscaler-status", metav1.GetOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
framework.Skipf("test expects Cluster Autoscaler to be enabled")
|
e2eskipper.Skipf("test expects Cluster Autoscaler to be enabled")
|
||||||
}
|
}
|
||||||
|
|
||||||
c = f.ClientSet
|
c = f.ClientSet
|
||||||
|
@ -48,6 +48,7 @@ import (
|
|||||||
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
|
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
|
||||||
e2epv "k8s.io/kubernetes/test/e2e/framework/pv"
|
e2epv "k8s.io/kubernetes/test/e2e/framework/pv"
|
||||||
e2erc "k8s.io/kubernetes/test/e2e/framework/rc"
|
e2erc "k8s.io/kubernetes/test/e2e/framework/rc"
|
||||||
|
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
|
||||||
"k8s.io/kubernetes/test/e2e/scheduling"
|
"k8s.io/kubernetes/test/e2e/scheduling"
|
||||||
testutils "k8s.io/kubernetes/test/utils"
|
testutils "k8s.io/kubernetes/test/utils"
|
||||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||||
@ -98,7 +99,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
|||||||
|
|
||||||
ginkgo.BeforeEach(func() {
|
ginkgo.BeforeEach(func() {
|
||||||
c = f.ClientSet
|
c = f.ClientSet
|
||||||
framework.SkipUnlessProviderIs("gce", "gke")
|
e2eskipper.SkipUnlessProviderIs("gce", "gke")
|
||||||
|
|
||||||
originalSizes = make(map[string]int)
|
originalSizes = make(map[string]int)
|
||||||
sum := 0
|
sum := 0
|
||||||
@ -138,7 +139,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
|||||||
})
|
})
|
||||||
|
|
||||||
ginkgo.AfterEach(func() {
|
ginkgo.AfterEach(func() {
|
||||||
framework.SkipUnlessProviderIs("gce", "gke")
|
e2eskipper.SkipUnlessProviderIs("gce", "gke")
|
||||||
ginkgo.By(fmt.Sprintf("Restoring initial size of the cluster"))
|
ginkgo.By(fmt.Sprintf("Restoring initial size of the cluster"))
|
||||||
setMigSizes(originalSizes)
|
setMigSizes(originalSizes)
|
||||||
expectedNodes := 0
|
expectedNodes := 0
|
||||||
@ -210,7 +211,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
|||||||
gpuType := os.Getenv("TESTED_GPU_TYPE")
|
gpuType := os.Getenv("TESTED_GPU_TYPE")
|
||||||
|
|
||||||
ginkgo.It(fmt.Sprintf("Should scale up GPU pool from 0 [GpuType:%s] [Feature:ClusterSizeAutoscalingGpu]", gpuType), func() {
|
ginkgo.It(fmt.Sprintf("Should scale up GPU pool from 0 [GpuType:%s] [Feature:ClusterSizeAutoscalingGpu]", gpuType), func() {
|
||||||
framework.SkipUnlessProviderIs("gke")
|
e2eskipper.SkipUnlessProviderIs("gke")
|
||||||
if gpuType == "" {
|
if gpuType == "" {
|
||||||
framework.Failf("TEST_GPU_TYPE not defined")
|
framework.Failf("TEST_GPU_TYPE not defined")
|
||||||
return
|
return
|
||||||
@ -237,7 +238,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
|||||||
})
|
})
|
||||||
|
|
||||||
ginkgo.It(fmt.Sprintf("Should scale up GPU pool from 1 [GpuType:%s] [Feature:ClusterSizeAutoscalingGpu]", gpuType), func() {
|
ginkgo.It(fmt.Sprintf("Should scale up GPU pool from 1 [GpuType:%s] [Feature:ClusterSizeAutoscalingGpu]", gpuType), func() {
|
||||||
framework.SkipUnlessProviderIs("gke")
|
e2eskipper.SkipUnlessProviderIs("gke")
|
||||||
if gpuType == "" {
|
if gpuType == "" {
|
||||||
framework.Failf("TEST_GPU_TYPE not defined")
|
framework.Failf("TEST_GPU_TYPE not defined")
|
||||||
return
|
return
|
||||||
@ -267,7 +268,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
|||||||
})
|
})
|
||||||
|
|
||||||
ginkgo.It(fmt.Sprintf("Should not scale GPU pool up if pod does not require GPUs [GpuType:%s] [Feature:ClusterSizeAutoscalingGpu]", gpuType), func() {
|
ginkgo.It(fmt.Sprintf("Should not scale GPU pool up if pod does not require GPUs [GpuType:%s] [Feature:ClusterSizeAutoscalingGpu]", gpuType), func() {
|
||||||
framework.SkipUnlessProviderIs("gke")
|
e2eskipper.SkipUnlessProviderIs("gke")
|
||||||
if gpuType == "" {
|
if gpuType == "" {
|
||||||
framework.Failf("TEST_GPU_TYPE not defined")
|
framework.Failf("TEST_GPU_TYPE not defined")
|
||||||
return
|
return
|
||||||
@ -296,7 +297,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
|||||||
})
|
})
|
||||||
|
|
||||||
ginkgo.It(fmt.Sprintf("Should scale down GPU pool from 1 [GpuType:%s] [Feature:ClusterSizeAutoscalingGpu]", gpuType), func() {
|
ginkgo.It(fmt.Sprintf("Should scale down GPU pool from 1 [GpuType:%s] [Feature:ClusterSizeAutoscalingGpu]", gpuType), func() {
|
||||||
framework.SkipUnlessProviderIs("gke")
|
e2eskipper.SkipUnlessProviderIs("gke")
|
||||||
if gpuType == "" {
|
if gpuType == "" {
|
||||||
framework.Failf("TEST_GPU_TYPE not defined")
|
framework.Failf("TEST_GPU_TYPE not defined")
|
||||||
return
|
return
|
||||||
@ -371,7 +372,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
|||||||
})
|
})
|
||||||
|
|
||||||
ginkgo.It("should increase cluster size if pending pods are small and there is another node pool that is not autoscaled [Feature:ClusterSizeAutoscalingScaleUp]", func() {
|
ginkgo.It("should increase cluster size if pending pods are small and there is another node pool that is not autoscaled [Feature:ClusterSizeAutoscalingScaleUp]", func() {
|
||||||
framework.SkipUnlessProviderIs("gke")
|
e2eskipper.SkipUnlessProviderIs("gke")
|
||||||
|
|
||||||
ginkgo.By("Creating new node-pool with n1-standard-4 machines")
|
ginkgo.By("Creating new node-pool with n1-standard-4 machines")
|
||||||
const extraPoolName = "extra-pool"
|
const extraPoolName = "extra-pool"
|
||||||
@ -405,7 +406,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
|||||||
})
|
})
|
||||||
|
|
||||||
ginkgo.It("should disable node pool autoscaling [Feature:ClusterSizeAutoscalingScaleUp]", func() {
|
ginkgo.It("should disable node pool autoscaling [Feature:ClusterSizeAutoscalingScaleUp]", func() {
|
||||||
framework.SkipUnlessProviderIs("gke")
|
e2eskipper.SkipUnlessProviderIs("gke")
|
||||||
|
|
||||||
ginkgo.By("Creating new node-pool with n1-standard-4 machines")
|
ginkgo.By("Creating new node-pool with n1-standard-4 machines")
|
||||||
const extraPoolName = "extra-pool"
|
const extraPoolName = "extra-pool"
|
||||||
@ -467,7 +468,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
|||||||
})
|
})
|
||||||
|
|
||||||
ginkgo.It("should increase cluster size if pod requesting volume is pending [Feature:ClusterSizeAutoscalingScaleUp]", func() {
|
ginkgo.It("should increase cluster size if pod requesting volume is pending [Feature:ClusterSizeAutoscalingScaleUp]", func() {
|
||||||
framework.SkipUnlessProviderIs("gce", "gke")
|
e2eskipper.SkipUnlessProviderIs("gce", "gke")
|
||||||
|
|
||||||
volumeLabels := labels.Set{
|
volumeLabels := labels.Set{
|
||||||
e2epv.VolumeSelectorKey: f.Namespace.Name,
|
e2epv.VolumeSelectorKey: f.Namespace.Name,
|
||||||
@ -639,7 +640,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
|||||||
})
|
})
|
||||||
|
|
||||||
ginkgo.It("should scale up correct target pool [Feature:ClusterSizeAutoscalingScaleUp]", func() {
|
ginkgo.It("should scale up correct target pool [Feature:ClusterSizeAutoscalingScaleUp]", func() {
|
||||||
framework.SkipUnlessProviderIs("gke")
|
e2eskipper.SkipUnlessProviderIs("gke")
|
||||||
|
|
||||||
ginkgo.By("Creating new node-pool with n1-standard-4 machines")
|
ginkgo.By("Creating new node-pool with n1-standard-4 machines")
|
||||||
const extraPoolName = "extra-pool"
|
const extraPoolName = "extra-pool"
|
||||||
@ -689,12 +690,12 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
|||||||
|
|
||||||
ginkgo.It("should correctly scale down after a node is not needed and one node is broken [Feature:ClusterSizeAutoscalingScaleDown]",
|
ginkgo.It("should correctly scale down after a node is not needed and one node is broken [Feature:ClusterSizeAutoscalingScaleDown]",
|
||||||
func() {
|
func() {
|
||||||
framework.SkipUnlessSSHKeyPresent()
|
e2eskipper.SkipUnlessSSHKeyPresent()
|
||||||
e2enetwork.TestUnderTemporaryNetworkFailure(c, "default", getAnyNode(c), func() { simpleScaleDownTest(1) })
|
e2enetwork.TestUnderTemporaryNetworkFailure(c, "default", getAnyNode(c), func() { simpleScaleDownTest(1) })
|
||||||
})
|
})
|
||||||
|
|
||||||
ginkgo.It("should correctly scale down after a node is not needed when there is non autoscaled pool[Feature:ClusterSizeAutoscalingScaleDown]", func() {
|
ginkgo.It("should correctly scale down after a node is not needed when there is non autoscaled pool[Feature:ClusterSizeAutoscalingScaleDown]", func() {
|
||||||
framework.SkipUnlessProviderIs("gke")
|
e2eskipper.SkipUnlessProviderIs("gke")
|
||||||
|
|
||||||
increasedSize := manuallyIncreaseClusterSize(f, originalSizes)
|
increasedSize := manuallyIncreaseClusterSize(f, originalSizes)
|
||||||
|
|
||||||
@ -761,7 +762,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
|||||||
defer disableAutoscaler(extraPoolName, 0, 1)
|
defer disableAutoscaler(extraPoolName, 0, 1)
|
||||||
} else {
|
} else {
|
||||||
// on GCE, run only if there are already at least 2 node groups
|
// on GCE, run only if there are already at least 2 node groups
|
||||||
framework.SkipUnlessAtLeast(len(originalSizes), 2, "At least 2 node groups are needed for scale-to-0 tests")
|
e2eskipper.SkipUnlessAtLeast(len(originalSizes), 2, "At least 2 node groups are needed for scale-to-0 tests")
|
||||||
|
|
||||||
ginkgo.By("Manually scale smallest node group to 0")
|
ginkgo.By("Manually scale smallest node group to 0")
|
||||||
minMig := ""
|
minMig := ""
|
||||||
@ -877,12 +878,12 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
|||||||
} else if len(originalSizes) >= 2 {
|
} else if len(originalSizes) >= 2 {
|
||||||
gceScaleToZero()
|
gceScaleToZero()
|
||||||
} else {
|
} else {
|
||||||
framework.Skipf("At least 2 node groups are needed for scale-to-0 tests")
|
e2eskipper.Skipf("At least 2 node groups are needed for scale-to-0 tests")
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
ginkgo.It("Shouldn't perform scale up operation and should list unhealthy status if most of the cluster is broken[Feature:ClusterSizeAutoscalingScaleUp]", func() {
|
ginkgo.It("Shouldn't perform scale up operation and should list unhealthy status if most of the cluster is broken[Feature:ClusterSizeAutoscalingScaleUp]", func() {
|
||||||
framework.SkipUnlessSSHKeyPresent()
|
e2eskipper.SkipUnlessSSHKeyPresent()
|
||||||
|
|
||||||
clusterSize := nodeCount
|
clusterSize := nodeCount
|
||||||
for clusterSize < unhealthyClusterThreshold+1 {
|
for clusterSize < unhealthyClusterThreshold+1 {
|
||||||
|
@ -30,6 +30,7 @@ import (
|
|||||||
"k8s.io/apimachinery/pkg/util/wait"
|
"k8s.io/apimachinery/pkg/util/wait"
|
||||||
clientset "k8s.io/client-go/kubernetes"
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
|
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
|
||||||
"k8s.io/kubernetes/test/e2e/instrumentation/monitoring"
|
"k8s.io/kubernetes/test/e2e/instrumentation/monitoring"
|
||||||
|
|
||||||
"github.com/onsi/ginkgo"
|
"github.com/onsi/ginkgo"
|
||||||
@ -45,7 +46,7 @@ const (
|
|||||||
|
|
||||||
var _ = SIGDescribe("[HPA] Horizontal pod autoscaling (scale resource: Custom Metrics from Stackdriver)", func() {
|
var _ = SIGDescribe("[HPA] Horizontal pod autoscaling (scale resource: Custom Metrics from Stackdriver)", func() {
|
||||||
ginkgo.BeforeEach(func() {
|
ginkgo.BeforeEach(func() {
|
||||||
framework.SkipUnlessProviderIs("gce", "gke")
|
e2eskipper.SkipUnlessProviderIs("gce", "gke")
|
||||||
})
|
})
|
||||||
|
|
||||||
f := framework.NewDefaultFramework("horizontal-pod-autoscaling")
|
f := framework.NewDefaultFramework("horizontal-pod-autoscaling")
|
||||||
|
@ -31,6 +31,7 @@ import (
|
|||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
|
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
|
||||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||||
|
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
|
||||||
|
|
||||||
"github.com/onsi/ginkgo"
|
"github.com/onsi/ginkgo"
|
||||||
)
|
)
|
||||||
@ -53,7 +54,7 @@ var _ = SIGDescribe("DNS horizontal autoscaling", func() {
|
|||||||
var DNSParams3 DNSParamsLinear
|
var DNSParams3 DNSParamsLinear
|
||||||
|
|
||||||
ginkgo.BeforeEach(func() {
|
ginkgo.BeforeEach(func() {
|
||||||
framework.SkipUnlessProviderIs("gce", "gke")
|
e2eskipper.SkipUnlessProviderIs("gce", "gke")
|
||||||
c = f.ClientSet
|
c = f.ClientSet
|
||||||
|
|
||||||
nodes, err := e2enode.GetReadySchedulableNodes(c)
|
nodes, err := e2enode.GetReadySchedulableNodes(c)
|
||||||
|
Loading…
Reference in New Issue
Block a user