Added e2e test for cluster autoscaler that verifies host ports.

Added e2e test for cluster autoscaler that verifies host ports.
This commit is contained in:
Jerzy Szczepkowski 2016-05-17 15:07:30 +02:00
parent 1738bbfe5f
commit 905382f7b8
2 changed files with 33 additions and 6 deletions

View File

@ -65,11 +65,38 @@ var _ = framework.KubeDescribe("Cluster size autoscaling [Feature:ClusterSizeAut
// Verify, that cluster size is increased
framework.ExpectNoError(framework.WaitForClusterSize(f.Client, nodeCount+1, scaleTimeout))
framework.ExpectNoError(framework.DeleteRC(f.Client, f.Namespace.Name, "memory-reservation"))
// TODO(jsz): Enable code bellow when scale down is implemented.
// framework.ExpectNoError(framework.WaitForClusterSize(f.Client, nodeCount, scaleDownTimeout))
// TODO(jsz): Disable the line bellow when scale down is implemented.
framework.ExpectNoError(ResizeGroup(int32(nodeCount)))
framework.ExpectNoError(framework.WaitForClusterSize(f.Client, nodeCount, scaleTimeout))
By("Handling node port pods")
CreateHostPortPods(f, "host-port", nodeCount+2, false)
framework.ExpectNoError(framework.WaitForClusterSize(f.Client, nodeCount+2, scaleTimeout))
framework.ExpectNoError(framework.DeleteRC(f.Client, f.Namespace.Name, "host-port"))
// TODO(jsz): Disable the line bellow when scale down is implemented.
framework.ExpectNoError(ResizeGroup(int32(nodeCount)))
framework.ExpectNoError(framework.WaitForClusterSize(f.Client, nodeCount, scaleTimeout))
})
})
func CreateHostPortPods(f *framework.Framework, id string, replicas int, expectRunning bool) {
By(fmt.Sprintf("Running RC which reserves host port"))
config := &framework.RCConfig{
Client: f.Client,
Name: id,
Namespace: f.Namespace.Name,
Timeout: scaleTimeout,
Image: "gcr.io/google_containers/pause-amd64:3.0",
Replicas: replicas,
HostPorts: map[string]int{"port1": 4321},
}
err := framework.RunRC(*config)
if expectRunning {
framework.ExpectNoError(err)
}
}
func ReserveCpu(f *framework.Framework, id string, replicas, millicores int) {
By(fmt.Sprintf("Running RC which reserves %v millicores", millicores))
request := int64(millicores / replicas)

View File

@ -53,7 +53,7 @@ const (
testPort = 9376
)
func resizeGroup(size int32) error {
func ResizeGroup(size int32) error {
if framework.TestContext.ReportDir != "" {
framework.CoreDump(framework.TestContext.ReportDir)
defer framework.CoreDump(framework.TestContext.ReportDir)
@ -371,7 +371,7 @@ var _ = framework.KubeDescribe("Nodes [Disruptive]", func() {
}
By("restoring the original node instance group size")
if err := resizeGroup(int32(framework.TestContext.CloudConfig.NumNodes)); err != nil {
if err := ResizeGroup(int32(framework.TestContext.CloudConfig.NumNodes)); err != nil {
framework.Failf("Couldn't restore the original node instance group size: %v", err)
}
// In GKE, our current tunneling setup has the potential to hold on to a broken tunnel (from a
@ -410,7 +410,7 @@ var _ = framework.KubeDescribe("Nodes [Disruptive]", func() {
Expect(err).NotTo(HaveOccurred())
By(fmt.Sprintf("decreasing cluster size to %d", replicas-1))
err = resizeGroup(replicas - 1)
err = ResizeGroup(replicas - 1)
Expect(err).NotTo(HaveOccurred())
err = waitForGroupSize(replicas - 1)
Expect(err).NotTo(HaveOccurred())
@ -434,7 +434,7 @@ var _ = framework.KubeDescribe("Nodes [Disruptive]", func() {
Expect(err).NotTo(HaveOccurred())
By(fmt.Sprintf("increasing cluster size to %d", replicas+1))
err = resizeGroup(replicas + 1)
err = ResizeGroup(replicas + 1)
Expect(err).NotTo(HaveOccurred())
err = waitForGroupSize(replicas + 1)
Expect(err).NotTo(HaveOccurred())