mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-08-01 07:47:56 +00:00
Merge pull request #43983 from mwielgus/autoscaling_e2e_dir
Automatic merge from submit-queue Move autoscaling e2e tests to a separate directory For fine-grain access control. Autoscaling team is expanding the e2e test coverage and the need for getting an approval for every PR is annoying. cc: @MaciekPytel @jszczepkowski @fgrzadkowski @wojtek-t
This commit is contained in:
commit
ff40d8b408
@ -14,7 +14,6 @@ go_library(
|
||||
"addon_update.go",
|
||||
"apparmor.go",
|
||||
"cadvisor.go",
|
||||
"cluster_size_autoscaling.go",
|
||||
"cluster_upgrade.go",
|
||||
"cronjob.go",
|
||||
"daemon_restart.go",
|
||||
@ -23,7 +22,6 @@ go_library(
|
||||
"deployment.go",
|
||||
"disruption.go",
|
||||
"dns.go",
|
||||
"dns_autoscaling.go",
|
||||
"dns_common.go",
|
||||
"dns_configmap.go",
|
||||
"e2e.go",
|
||||
@ -39,7 +37,6 @@ go_library(
|
||||
"gke_local_ssd.go",
|
||||
"gke_node_pools.go",
|
||||
"ha_master.go",
|
||||
"horizontal_pod_autoscaling.go",
|
||||
"ingress.go",
|
||||
"initial_resources.go",
|
||||
"job.go",
|
||||
@ -115,7 +112,6 @@ go_library(
|
||||
"//pkg/client/clientset_generated/clientset/typed/extensions/v1beta1:go_default_library",
|
||||
"//pkg/client/clientset_generated/internalclientset:go_default_library",
|
||||
"//pkg/cloudprovider:go_default_library",
|
||||
"//pkg/cloudprovider/providers/aws:go_default_library",
|
||||
"//pkg/cloudprovider/providers/gce:go_default_library",
|
||||
"//pkg/cloudprovider/providers/vsphere:go_default_library",
|
||||
"//pkg/controller:go_default_library",
|
||||
@ -152,7 +148,6 @@ go_library(
|
||||
"//test/utils:go_default_library",
|
||||
"//vendor:github.com/aws/aws-sdk-go/aws",
|
||||
"//vendor:github.com/aws/aws-sdk-go/aws/session",
|
||||
"//vendor:github.com/aws/aws-sdk-go/service/autoscaling",
|
||||
"//vendor:github.com/aws/aws-sdk-go/service/ec2",
|
||||
"//vendor:github.com/elazarl/goproxy",
|
||||
"//vendor:github.com/ghodss/yaml",
|
||||
@ -214,6 +209,7 @@ go_test(
|
||||
"//pkg/api/v1:go_default_library",
|
||||
"//pkg/client/clientset_generated/clientset:go_default_library",
|
||||
"//pkg/metrics:go_default_library",
|
||||
"//test/e2e/autoscaling:go_default_library",
|
||||
"//test/e2e/cluster-logging:go_default_library",
|
||||
"//test/e2e/framework:go_default_library",
|
||||
"//test/e2e/perf:go_default_library",
|
||||
@ -250,6 +246,7 @@ filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [
|
||||
":package-srcs",
|
||||
"//test/e2e/autoscaling:all-srcs",
|
||||
"//test/e2e/chaosmonkey:all-srcs",
|
||||
"//test/e2e/cluster-logging:all-srcs",
|
||||
"//test/e2e/common:all-srcs",
|
||||
|
52
test/e2e/autoscaling/BUILD
Normal file
52
test/e2e/autoscaling/BUILD
Normal file
@ -0,0 +1,52 @@
|
||||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
licenses(["notice"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_library",
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"cluster_size_autoscaling.go",
|
||||
"dns_autoscaling.go",
|
||||
"horizontal_pod_autoscaling.go",
|
||||
],
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//pkg/api/v1:go_default_library",
|
||||
"//pkg/client/clientset_generated/clientset:go_default_library",
|
||||
"//test/e2e/common:go_default_library",
|
||||
"//test/e2e/framework:go_default_library",
|
||||
"//test/e2e/scheduling:go_default_library",
|
||||
"//test/utils:go_default_library",
|
||||
"//vendor:github.com/golang/glog",
|
||||
"//vendor:github.com/onsi/ginkgo",
|
||||
"//vendor:github.com/onsi/gomega",
|
||||
"//vendor:k8s.io/apimachinery/pkg/api/errors",
|
||||
"//vendor:k8s.io/apimachinery/pkg/api/resource",
|
||||
"//vendor:k8s.io/apimachinery/pkg/apis/meta/v1",
|
||||
"//vendor:k8s.io/apimachinery/pkg/fields",
|
||||
"//vendor:k8s.io/apimachinery/pkg/labels",
|
||||
"//vendor:k8s.io/apimachinery/pkg/util/intstr",
|
||||
"//vendor:k8s.io/apimachinery/pkg/util/sets",
|
||||
"//vendor:k8s.io/apimachinery/pkg/util/uuid",
|
||||
"//vendor:k8s.io/apimachinery/pkg/util/wait",
|
||||
"//vendor:k8s.io/client-go/pkg/apis/policy/v1beta1",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
)
|
8
test/e2e/autoscaling/OWNERS
Normal file
8
test/e2e/autoscaling/OWNERS
Normal file
@ -0,0 +1,8 @@
|
||||
reviewers:
|
||||
- mwielgus
|
||||
- jszczepkowski
|
||||
- MaciekPytel
|
||||
approvers:
|
||||
- mwielgus
|
||||
- jszczepkowski
|
||||
- MaciekPytel
|
@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package e2e
|
||||
package autoscaling
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
@ -82,7 +82,7 @@ var _ = framework.KubeDescribe("Cluster size autoscaling [Slow]", func() {
|
||||
originalSizes = make(map[string]int)
|
||||
sum := 0
|
||||
for _, mig := range strings.Split(framework.TestContext.CloudConfig.NodeInstanceGroup, ",") {
|
||||
size, err := GroupSize(mig)
|
||||
size, err := framework.GroupSize(mig)
|
||||
framework.ExpectNoError(err)
|
||||
By(fmt.Sprintf("Initial size of %s: %d", mig, size))
|
||||
originalSizes[mig] = size
|
||||
@ -190,7 +190,8 @@ var _ = framework.KubeDescribe("Cluster size autoscaling [Slow]", func() {
|
||||
})
|
||||
|
||||
It("should add node to the particular mig [Feature:ClusterSizeAutoscalingScaleUp]", func() {
|
||||
labels := map[string]string{"cluster-autoscaling-test.special-node": "true"}
|
||||
labelKey := "cluster-autoscaling-test.special-node"
|
||||
labelValue := "true"
|
||||
|
||||
By("Finding the smallest MIG")
|
||||
minMig := ""
|
||||
@ -204,25 +205,30 @@ var _ = framework.KubeDescribe("Cluster size autoscaling [Slow]", func() {
|
||||
|
||||
removeLabels := func(nodesToClean sets.String) {
|
||||
By("Removing labels from nodes")
|
||||
updateNodeLabels(c, nodesToClean, nil, labels)
|
||||
for node := range nodesToClean {
|
||||
framework.RemoveLabelOffNode(c, node, labelKey)
|
||||
}
|
||||
}
|
||||
|
||||
nodes, err := GetGroupNodes(minMig)
|
||||
nodes, err := framework.GetGroupNodes(minMig)
|
||||
framework.ExpectNoError(err)
|
||||
nodesSet := sets.NewString(nodes...)
|
||||
defer removeLabels(nodesSet)
|
||||
By(fmt.Sprintf("Annotating nodes of the smallest MIG(%s): %v", minMig, nodes))
|
||||
updateNodeLabels(c, nodesSet, labels, nil)
|
||||
|
||||
CreateNodeSelectorPods(f, "node-selector", minSize+1, labels, false)
|
||||
for node := range nodesSet {
|
||||
framework.AddOrUpdateLabelOnNode(c, node, labelKey, labelValue)
|
||||
}
|
||||
|
||||
CreateNodeSelectorPods(f, "node-selector", minSize+1, map[string]string{labelKey: labelValue}, false)
|
||||
|
||||
By("Waiting for new node to appear and annotating it")
|
||||
WaitForGroupSize(minMig, int32(minSize+1))
|
||||
framework.WaitForGroupSize(minMig, int32(minSize+1))
|
||||
// Verify, that cluster size is increased
|
||||
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
|
||||
func(size int) bool { return size >= nodeCount+1 }, scaleUpTimeout))
|
||||
|
||||
newNodes, err := GetGroupNodes(minMig)
|
||||
newNodes, err := framework.GetGroupNodes(minMig)
|
||||
framework.ExpectNoError(err)
|
||||
newNodesSet := sets.NewString(newNodes...)
|
||||
newNodesSet.Delete(nodes...)
|
||||
@ -264,7 +270,10 @@ var _ = framework.KubeDescribe("Cluster size autoscaling [Slow]", func() {
|
||||
}
|
||||
}
|
||||
By(fmt.Sprintf("Setting labels for registered new nodes: %v", registeredNodes.List()))
|
||||
updateNodeLabels(c, registeredNodes, labels, nil)
|
||||
for node := range registeredNodes {
|
||||
framework.AddOrUpdateLabelOnNode(c, node, labelKey, labelValue)
|
||||
}
|
||||
|
||||
defer removeLabels(registeredNodes)
|
||||
|
||||
framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, c))
|
||||
@ -645,11 +654,11 @@ func waitForAllCaPodsReadyInNamespace(f *framework.Framework, c clientset.Interf
|
||||
|
||||
func setMigSizes(sizes map[string]int) {
|
||||
for mig, desiredSize := range sizes {
|
||||
currentSize, err := GroupSize(mig)
|
||||
currentSize, err := framework.GroupSize(mig)
|
||||
framework.ExpectNoError(err)
|
||||
if desiredSize != currentSize {
|
||||
By(fmt.Sprintf("Setting size of %s to %d", mig, desiredSize))
|
||||
err = ResizeGroup(mig, int32(desiredSize))
|
||||
err = framework.ResizeGroup(mig, int32(desiredSize))
|
||||
framework.ExpectNoError(err)
|
||||
}
|
||||
}
|
@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package e2e
|
||||
package autoscaling
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
@ -119,7 +119,7 @@ var _ = framework.KubeDescribe("DNS horizontal autoscaling", func() {
|
||||
originalSizes := make(map[string]int)
|
||||
sum := 0
|
||||
for _, mig := range strings.Split(framework.TestContext.CloudConfig.NodeInstanceGroup, ",") {
|
||||
size, err := GroupSize(mig)
|
||||
size, err := framework.GroupSize(mig)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
By(fmt.Sprintf("Initial size of %s: %d", mig, size))
|
||||
originalSizes[mig] = size
|
@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package e2e
|
||||
package autoscaling
|
||||
|
||||
import (
|
||||
"time"
|
@ -19,6 +19,7 @@ package e2e
|
||||
import (
|
||||
"testing"
|
||||
|
||||
_ "k8s.io/kubernetes/test/e2e/autoscaling"
|
||||
_ "k8s.io/kubernetes/test/e2e/cluster-logging"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
_ "k8s.io/kubernetes/test/e2e/perf"
|
||||
|
@ -30,6 +30,7 @@ go_library(
|
||||
"pv_util.go",
|
||||
"resource_usage_gatherer.go",
|
||||
"service_util.go",
|
||||
"size.go",
|
||||
"statefulset_utils.go",
|
||||
"test_context.go",
|
||||
"upgrade_util.go",
|
||||
@ -88,6 +89,7 @@ go_library(
|
||||
"//vendor:github.com/aws/aws-sdk-go/aws",
|
||||
"//vendor:github.com/aws/aws-sdk-go/aws/awserr",
|
||||
"//vendor:github.com/aws/aws-sdk-go/aws/session",
|
||||
"//vendor:github.com/aws/aws-sdk-go/service/autoscaling",
|
||||
"//vendor:github.com/aws/aws-sdk-go/service/ec2",
|
||||
"//vendor:github.com/golang/glog",
|
||||
"//vendor:github.com/google/cadvisor/info/v1",
|
||||
|
128
test/e2e/framework/size.go
Normal file
128
test/e2e/framework/size.go
Normal file
@ -0,0 +1,128 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package framework
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os/exec"
|
||||
"regexp"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws/session"
|
||||
"github.com/aws/aws-sdk-go/service/autoscaling"
|
||||
awscloud "k8s.io/kubernetes/pkg/cloudprovider/providers/aws"
|
||||
)
|
||||
|
||||
const (
|
||||
serveHostnameImage = "gcr.io/google_containers/serve_hostname:v1.4"
|
||||
resizeNodeReadyTimeout = 2 * time.Minute
|
||||
resizeNodeNotReadyTimeout = 2 * time.Minute
|
||||
nodeReadinessTimeout = 3 * time.Minute
|
||||
podNotReadyTimeout = 1 * time.Minute
|
||||
podReadyTimeout = 2 * time.Minute
|
||||
testPort = 9376
|
||||
)
|
||||
|
||||
func ResizeGroup(group string, size int32) error {
|
||||
if TestContext.ReportDir != "" {
|
||||
CoreDump(TestContext.ReportDir)
|
||||
defer CoreDump(TestContext.ReportDir)
|
||||
}
|
||||
if TestContext.Provider == "gce" || TestContext.Provider == "gke" {
|
||||
// TODO: make this hit the compute API directly instead of shelling out to gcloud.
|
||||
// TODO: make gce/gke implement InstanceGroups, so we can eliminate the per-provider logic
|
||||
output, err := exec.Command("gcloud", "compute", "instance-groups", "managed", "resize",
|
||||
group, fmt.Sprintf("--size=%v", size),
|
||||
"--project="+TestContext.CloudConfig.ProjectID, "--zone="+TestContext.CloudConfig.Zone).CombinedOutput()
|
||||
if err != nil {
|
||||
Logf("Failed to resize node instance group: %v", string(output))
|
||||
}
|
||||
return err
|
||||
} else if TestContext.Provider == "aws" {
|
||||
client := autoscaling.New(session.New())
|
||||
return awscloud.ResizeInstanceGroup(client, group, int(size))
|
||||
} else {
|
||||
return fmt.Errorf("Provider does not support InstanceGroups")
|
||||
}
|
||||
}
|
||||
|
||||
func GetGroupNodes(group string) ([]string, error) {
|
||||
if TestContext.Provider == "gce" || TestContext.Provider == "gke" {
|
||||
// TODO: make this hit the compute API directly instead of shelling out to gcloud.
|
||||
// TODO: make gce/gke implement InstanceGroups, so we can eliminate the per-provider logic
|
||||
output, err := exec.Command("gcloud", "compute", "instance-groups", "managed",
|
||||
"list-instances", group, "--project="+TestContext.CloudConfig.ProjectID,
|
||||
"--zone="+TestContext.CloudConfig.Zone).CombinedOutput()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
re := regexp.MustCompile(".*RUNNING")
|
||||
lines := re.FindAllString(string(output), -1)
|
||||
for i, line := range lines {
|
||||
lines[i] = line[:strings.Index(line, " ")]
|
||||
}
|
||||
return lines, nil
|
||||
} else {
|
||||
return nil, fmt.Errorf("provider does not support InstanceGroups")
|
||||
}
|
||||
}
|
||||
|
||||
func GroupSize(group string) (int, error) {
|
||||
if TestContext.Provider == "gce" || TestContext.Provider == "gke" {
|
||||
// TODO: make this hit the compute API directly instead of shelling out to gcloud.
|
||||
// TODO: make gce/gke implement InstanceGroups, so we can eliminate the per-provider logic
|
||||
output, err := exec.Command("gcloud", "compute", "instance-groups", "managed",
|
||||
"list-instances", group, "--project="+TestContext.CloudConfig.ProjectID,
|
||||
"--zone="+TestContext.CloudConfig.Zone).CombinedOutput()
|
||||
if err != nil {
|
||||
return -1, err
|
||||
}
|
||||
re := regexp.MustCompile("RUNNING")
|
||||
return len(re.FindAllString(string(output), -1)), nil
|
||||
} else if TestContext.Provider == "aws" {
|
||||
client := autoscaling.New(session.New())
|
||||
instanceGroup, err := awscloud.DescribeInstanceGroup(client, group)
|
||||
if err != nil {
|
||||
return -1, fmt.Errorf("error describing instance group: %v", err)
|
||||
}
|
||||
if instanceGroup == nil {
|
||||
return -1, fmt.Errorf("instance group not found: %s", group)
|
||||
}
|
||||
return instanceGroup.CurrentSize()
|
||||
} else {
|
||||
return -1, fmt.Errorf("provider does not support InstanceGroups")
|
||||
}
|
||||
}
|
||||
|
||||
func WaitForGroupSize(group string, size int32) error {
|
||||
timeout := 30 * time.Minute
|
||||
for start := time.Now(); time.Since(start) < timeout; time.Sleep(20 * time.Second) {
|
||||
currentSize, err := GroupSize(group)
|
||||
if err != nil {
|
||||
Logf("Failed to get node instance group size: %v", err)
|
||||
continue
|
||||
}
|
||||
if currentSize != int(size) {
|
||||
Logf("Waiting for node instance group size %d, current size %d", size, currentSize)
|
||||
continue
|
||||
}
|
||||
Logf("Node instance group has reached the desired size %d", size)
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("timeout waiting %v for node instance group size to be %d", timeout, size)
|
||||
}
|
@ -413,7 +413,7 @@ var _ = framework.KubeDescribe("Pod Disks", func() {
|
||||
It("should be able to detach from a node which was deleted [Slow] [Disruptive] [Volume]", func() {
|
||||
framework.SkipUnlessProviderIs("gce")
|
||||
|
||||
initialGroupSize, err := GroupSize(framework.TestContext.CloudConfig.NodeInstanceGroup)
|
||||
initialGroupSize, err := framework.GroupSize(framework.TestContext.CloudConfig.NodeInstanceGroup)
|
||||
framework.ExpectNoError(err, "Error getting group size")
|
||||
|
||||
By("Creating a pd")
|
||||
@ -461,13 +461,13 @@ var _ = framework.KubeDescribe("Pod Disks", func() {
|
||||
// The disk should be detached from host0 on it's deletion
|
||||
By("Waiting for pd to detach from host0")
|
||||
waitForPDDetach(diskName, host0Name)
|
||||
framework.ExpectNoError(WaitForGroupSize(framework.TestContext.CloudConfig.NodeInstanceGroup, int32(initialGroupSize)), "Unable to get back the cluster to inital size")
|
||||
framework.ExpectNoError(framework.WaitForGroupSize(framework.TestContext.CloudConfig.NodeInstanceGroup, int32(initialGroupSize)), "Unable to get back the cluster to inital size")
|
||||
return
|
||||
})
|
||||
|
||||
It("should be able to detach from a node whose api object was deleted [Slow] [Disruptive] [Volume]", func() {
|
||||
framework.SkipUnlessProviderIs("gce")
|
||||
initialGroupSize, err := GroupSize(framework.TestContext.CloudConfig.NodeInstanceGroup)
|
||||
initialGroupSize, err := framework.GroupSize(framework.TestContext.CloudConfig.NodeInstanceGroup)
|
||||
framework.ExpectNoError(err, "Error getting group size")
|
||||
By("Creating a pd")
|
||||
diskName, err := framework.CreatePDWithRetry()
|
||||
@ -484,7 +484,7 @@ var _ = framework.KubeDescribe("Pod Disks", func() {
|
||||
// need to set the resource version or else the Create() fails
|
||||
_, err := nodeClient.Create(nodeToDelete)
|
||||
framework.ExpectNoError(err, "Unable to re-create the deleted node")
|
||||
framework.ExpectNoError(WaitForGroupSize(framework.TestContext.CloudConfig.NodeInstanceGroup, int32(initialGroupSize)), "Unable to get the node group back to the original size")
|
||||
framework.ExpectNoError(framework.WaitForGroupSize(framework.TestContext.CloudConfig.NodeInstanceGroup, int32(initialGroupSize)), "Unable to get the node group back to the original size")
|
||||
framework.WaitForNodeToBeReady(f.ClientSet, nodeToDelete.Name, nodeStatusTimeout)
|
||||
if len(nodes.Items) != originalCount {
|
||||
return fmt.Errorf("The node count is not back to original count")
|
||||
|
@ -18,8 +18,6 @@ package e2e
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os/exec"
|
||||
"regexp"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
@ -29,11 +27,8 @@ import (
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws/session"
|
||||
"github.com/aws/aws-sdk-go/service/autoscaling"
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
awscloud "k8s.io/kubernetes/pkg/cloudprovider/providers/aws"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -46,95 +41,6 @@ const (
|
||||
testPort = 9376
|
||||
)
|
||||
|
||||
func ResizeGroup(group string, size int32) error {
|
||||
if framework.TestContext.ReportDir != "" {
|
||||
framework.CoreDump(framework.TestContext.ReportDir)
|
||||
defer framework.CoreDump(framework.TestContext.ReportDir)
|
||||
}
|
||||
if framework.TestContext.Provider == "gce" || framework.TestContext.Provider == "gke" {
|
||||
// TODO: make this hit the compute API directly instead of shelling out to gcloud.
|
||||
// TODO: make gce/gke implement InstanceGroups, so we can eliminate the per-provider logic
|
||||
output, err := exec.Command("gcloud", "compute", "instance-groups", "managed", "resize",
|
||||
group, fmt.Sprintf("--size=%v", size),
|
||||
"--project="+framework.TestContext.CloudConfig.ProjectID, "--zone="+framework.TestContext.CloudConfig.Zone).CombinedOutput()
|
||||
if err != nil {
|
||||
framework.Logf("Failed to resize node instance group: %v", string(output))
|
||||
}
|
||||
return err
|
||||
} else if framework.TestContext.Provider == "aws" {
|
||||
client := autoscaling.New(session.New())
|
||||
return awscloud.ResizeInstanceGroup(client, group, int(size))
|
||||
} else {
|
||||
return fmt.Errorf("Provider does not support InstanceGroups")
|
||||
}
|
||||
}
|
||||
|
||||
func GetGroupNodes(group string) ([]string, error) {
|
||||
if framework.TestContext.Provider == "gce" || framework.TestContext.Provider == "gke" {
|
||||
// TODO: make this hit the compute API directly instead of shelling out to gcloud.
|
||||
// TODO: make gce/gke implement InstanceGroups, so we can eliminate the per-provider logic
|
||||
output, err := exec.Command("gcloud", "compute", "instance-groups", "managed",
|
||||
"list-instances", group, "--project="+framework.TestContext.CloudConfig.ProjectID,
|
||||
"--zone="+framework.TestContext.CloudConfig.Zone).CombinedOutput()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
re := regexp.MustCompile(".*RUNNING")
|
||||
lines := re.FindAllString(string(output), -1)
|
||||
for i, line := range lines {
|
||||
lines[i] = line[:strings.Index(line, " ")]
|
||||
}
|
||||
return lines, nil
|
||||
} else {
|
||||
return nil, fmt.Errorf("provider does not support InstanceGroups")
|
||||
}
|
||||
}
|
||||
|
||||
func GroupSize(group string) (int, error) {
|
||||
if framework.TestContext.Provider == "gce" || framework.TestContext.Provider == "gke" {
|
||||
// TODO: make this hit the compute API directly instead of shelling out to gcloud.
|
||||
// TODO: make gce/gke implement InstanceGroups, so we can eliminate the per-provider logic
|
||||
output, err := exec.Command("gcloud", "compute", "instance-groups", "managed",
|
||||
"list-instances", group, "--project="+framework.TestContext.CloudConfig.ProjectID,
|
||||
"--zone="+framework.TestContext.CloudConfig.Zone).CombinedOutput()
|
||||
if err != nil {
|
||||
return -1, err
|
||||
}
|
||||
re := regexp.MustCompile("RUNNING")
|
||||
return len(re.FindAllString(string(output), -1)), nil
|
||||
} else if framework.TestContext.Provider == "aws" {
|
||||
client := autoscaling.New(session.New())
|
||||
instanceGroup, err := awscloud.DescribeInstanceGroup(client, group)
|
||||
if err != nil {
|
||||
return -1, fmt.Errorf("error describing instance group: %v", err)
|
||||
}
|
||||
if instanceGroup == nil {
|
||||
return -1, fmt.Errorf("instance group not found: %s", group)
|
||||
}
|
||||
return instanceGroup.CurrentSize()
|
||||
} else {
|
||||
return -1, fmt.Errorf("provider does not support InstanceGroups")
|
||||
}
|
||||
}
|
||||
|
||||
func WaitForGroupSize(group string, size int32) error {
|
||||
timeout := 30 * time.Minute
|
||||
for start := time.Now(); time.Since(start) < timeout; time.Sleep(20 * time.Second) {
|
||||
currentSize, err := GroupSize(group)
|
||||
if err != nil {
|
||||
framework.Logf("Failed to get node instance group size: %v", err)
|
||||
continue
|
||||
}
|
||||
if currentSize != int(size) {
|
||||
framework.Logf("Waiting for node instance group size %d, current size %d", size, currentSize)
|
||||
continue
|
||||
}
|
||||
framework.Logf("Node instance group has reached the desired size %d", size)
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("timeout waiting %v for node instance group size to be %d", timeout, size)
|
||||
}
|
||||
|
||||
func svcByName(name string, port int) *v1.Service {
|
||||
return &v1.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
@ -213,7 +119,7 @@ var _ = framework.KubeDescribe("Nodes [Disruptive]", func() {
|
||||
}
|
||||
|
||||
By("restoring the original node instance group size")
|
||||
if err := ResizeGroup(group, int32(framework.TestContext.CloudConfig.NumNodes)); err != nil {
|
||||
if err := framework.ResizeGroup(group, int32(framework.TestContext.CloudConfig.NumNodes)); err != nil {
|
||||
framework.Failf("Couldn't restore the original node instance group size: %v", err)
|
||||
}
|
||||
// In GKE, our current tunneling setup has the potential to hold on to a broken tunnel (from a
|
||||
@ -228,7 +134,7 @@ var _ = framework.KubeDescribe("Nodes [Disruptive]", func() {
|
||||
By("waiting 5 minutes for all dead tunnels to be dropped")
|
||||
time.Sleep(5 * time.Minute)
|
||||
}
|
||||
if err := WaitForGroupSize(group, int32(framework.TestContext.CloudConfig.NumNodes)); err != nil {
|
||||
if err := framework.WaitForGroupSize(group, int32(framework.TestContext.CloudConfig.NumNodes)); err != nil {
|
||||
framework.Failf("Couldn't restore the original node instance group size: %v", err)
|
||||
}
|
||||
if err := framework.WaitForClusterSize(c, framework.TestContext.CloudConfig.NumNodes, 10*time.Minute); err != nil {
|
||||
@ -253,9 +159,9 @@ var _ = framework.KubeDescribe("Nodes [Disruptive]", func() {
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By(fmt.Sprintf("decreasing cluster size to %d", replicas-1))
|
||||
err = ResizeGroup(group, replicas-1)
|
||||
err = framework.ResizeGroup(group, replicas-1)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
err = WaitForGroupSize(group, replicas-1)
|
||||
err = framework.WaitForGroupSize(group, replicas-1)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
err = framework.WaitForClusterSize(c, int(replicas-1), 10*time.Minute)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
@ -281,9 +187,9 @@ var _ = framework.KubeDescribe("Nodes [Disruptive]", func() {
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By(fmt.Sprintf("increasing cluster size to %d", replicas+1))
|
||||
err = ResizeGroup(group, replicas+1)
|
||||
err = framework.ResizeGroup(group, replicas+1)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
err = WaitForGroupSize(group, replicas+1)
|
||||
err = framework.WaitForGroupSize(group, replicas+1)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
err = framework.WaitForClusterSize(c, int(replicas+1), 10*time.Minute)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
Loading…
Reference in New Issue
Block a user