mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-08-10 12:32:03 +00:00
Enabled Autoscaling e2e test for cpu utilization
This commit is contained in:
parent
8dcbebae5e
commit
fb44d5e045
@ -102,6 +102,7 @@ GCE_FLAKY_TESTS=(
|
|||||||
# Tests which are not able to be run in parallel.
|
# Tests which are not able to be run in parallel.
|
||||||
GCE_PARALLEL_SKIP_TESTS=(
|
GCE_PARALLEL_SKIP_TESTS=(
|
||||||
${GCE_DEFAULT_SKIP_TESTS[@]:+${GCE_DEFAULT_SKIP_TESTS[@]}}
|
${GCE_DEFAULT_SKIP_TESTS[@]:+${GCE_DEFAULT_SKIP_TESTS[@]}}
|
||||||
|
"Autoscaling"
|
||||||
"Etcd"
|
"Etcd"
|
||||||
"NetworkingNew"
|
"NetworkingNew"
|
||||||
"Nodes\sNetwork"
|
"Nodes\sNetwork"
|
||||||
@ -127,6 +128,7 @@ GCE_PARALLEL_FLAKY_TESTS=(
|
|||||||
|
|
||||||
# Tests that should not run on soak cluster.
|
# Tests that should not run on soak cluster.
|
||||||
GCE_SOAK_CONTINUOUS_SKIP_TESTS=(
|
GCE_SOAK_CONTINUOUS_SKIP_TESTS=(
|
||||||
|
"Autoscaling"
|
||||||
"Density.*30\spods"
|
"Density.*30\spods"
|
||||||
"Elasticsearch"
|
"Elasticsearch"
|
||||||
"Etcd.*SIGKILL"
|
"Etcd.*SIGKILL"
|
||||||
@ -141,6 +143,10 @@ GCE_SOAK_CONTINUOUS_SKIP_TESTS=(
|
|||||||
"Skipped"
|
"Skipped"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
GCE_RELEASE_SKIP_TESTS=(
|
||||||
|
"Autoscaling"
|
||||||
|
)
|
||||||
|
|
||||||
# Define environment variables based on the Jenkins project name.
|
# Define environment variables based on the Jenkins project name.
|
||||||
case ${JOB_NAME} in
|
case ${JOB_NAME} in
|
||||||
# Runs all non-flaky tests on GCE, sequentially.
|
# Runs all non-flaky tests on GCE, sequentially.
|
||||||
@ -283,6 +289,7 @@ case ${JOB_NAME} in
|
|||||||
: ${E2E_NETWORK:="e2e-gce-release"}
|
: ${E2E_NETWORK:="e2e-gce-release"}
|
||||||
: ${GINKGO_TEST_ARGS:="--ginkgo.skip=$(join_regex_allow_empty \
|
: ${GINKGO_TEST_ARGS:="--ginkgo.skip=$(join_regex_allow_empty \
|
||||||
${GCE_DEFAULT_SKIP_TESTS[@]:+${GCE_DEFAULT_SKIP_TESTS[@]}} \
|
${GCE_DEFAULT_SKIP_TESTS[@]:+${GCE_DEFAULT_SKIP_TESTS[@]}} \
|
||||||
|
${GCE_RELEASE_SKIP_TESTS[@]:+${GCE_RELEASE_SKIP_TESTS[@]}} \
|
||||||
${GCE_FLAKY_TESTS[@]:+${GCE_FLAKY_TESTS[@]}} \
|
${GCE_FLAKY_TESTS[@]:+${GCE_FLAKY_TESTS[@]}} \
|
||||||
)"}
|
)"}
|
||||||
: ${KUBE_GCE_INSTANCE_PREFIX="e2e-gce"}
|
: ${KUBE_GCE_INSTANCE_PREFIX="e2e-gce"}
|
||||||
|
@ -21,29 +21,43 @@ import (
|
|||||||
"os/exec"
|
"os/exec"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"k8s.io/kubernetes/pkg/api"
|
||||||
|
"k8s.io/kubernetes/pkg/fields"
|
||||||
|
"k8s.io/kubernetes/pkg/labels"
|
||||||
|
"k8s.io/kubernetes/pkg/util"
|
||||||
|
|
||||||
. "github.com/onsi/ginkgo"
|
. "github.com/onsi/ginkgo"
|
||||||
|
. "github.com/onsi/gomega"
|
||||||
)
|
)
|
||||||
|
|
||||||
var _ = Describe("Autoscaling", func() {
|
var _ = Describe("Autoscaling", func() {
|
||||||
f := NewFramework("autoscaling")
|
f := NewFramework("autoscaling")
|
||||||
|
var nodeCount int
|
||||||
|
var coresPerNode int
|
||||||
|
|
||||||
BeforeEach(func() {
|
BeforeEach(func() {
|
||||||
// Ensure cluster size is equal to 1.
|
SkipUnlessProviderIs("gce")
|
||||||
expectNoError(waitForClusterSize(f.Client, 1))
|
|
||||||
|
nodes, err := f.Client.Nodes().List(labels.Everything(), fields.Everything())
|
||||||
|
expectNoError(err)
|
||||||
|
nodeCount = len(nodes.Items)
|
||||||
|
Expect(nodeCount).NotTo(BeZero())
|
||||||
|
res := nodes.Items[0].Status.Capacity[api.ResourceCPU]
|
||||||
|
coresPerNode = int((&res).MilliValue() / 1000)
|
||||||
})
|
})
|
||||||
|
|
||||||
AfterEach(func() {
|
AfterEach(func() {
|
||||||
cleanUpAutoscaler()
|
cleanUpAutoscaler()
|
||||||
})
|
})
|
||||||
|
|
||||||
It("[Skipped] [Autoscaling] should scale cluster size based on cpu utilization", func() {
|
It("[Autoscaling] should scale cluster size based on cpu utilization", func() {
|
||||||
setUpAutoscaler("cpu/node_utilization", 0.8, 1, 10)
|
setUpAutoscaler("cpu/node_utilization", 0.7, nodeCount, nodeCount+1)
|
||||||
|
|
||||||
ConsumeCpu(f, "cpu-utilization", 1)
|
ConsumeCpu(f, "cpu-utilization", nodeCount*coresPerNode)
|
||||||
expectNoError(waitForClusterSize(f.Client, 2))
|
expectNoError(waitForClusterSize(f.Client, nodeCount+1))
|
||||||
|
|
||||||
StopConsuming(f, "cpu-utilization")
|
StopConsuming(f, "cpu-utilization")
|
||||||
expectNoError(waitForClusterSize(f.Client, 1))
|
expectNoError(waitForClusterSize(f.Client, nodeCount))
|
||||||
})
|
})
|
||||||
|
|
||||||
It("[Skipped] [Autoscaling] should scale cluster size based on cpu reservation", func() {
|
It("[Skipped] [Autoscaling] should scale cluster size based on cpu reservation", func() {
|
||||||
@ -77,7 +91,7 @@ var _ = Describe("Autoscaling", func() {
|
|||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
func setUpAutoscaler(metric string, target float64, min, max int64) {
|
func setUpAutoscaler(metric string, target float64, min, max int) {
|
||||||
// TODO integrate with kube-up.sh script once it will support autoscaler setup.
|
// TODO integrate with kube-up.sh script once it will support autoscaler setup.
|
||||||
By("Setting up autoscaler to scale based on " + metric)
|
By("Setting up autoscaler to scale based on " + metric)
|
||||||
_, err := exec.Command("gcloud", "preview", "autoscaler",
|
_, err := exec.Command("gcloud", "preview", "autoscaler",
|
||||||
@ -100,7 +114,28 @@ func cleanUpAutoscaler() {
|
|||||||
expectNoError(err)
|
expectNoError(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func CreateService(f *Framework, name string) {
|
||||||
|
By("Running sevice" + name)
|
||||||
|
service := &api.Service{
|
||||||
|
ObjectMeta: api.ObjectMeta{
|
||||||
|
Name: name,
|
||||||
|
},
|
||||||
|
Spec: api.ServiceSpec{
|
||||||
|
Selector: map[string]string{
|
||||||
|
"name": name,
|
||||||
|
},
|
||||||
|
Ports: []api.ServicePort{{
|
||||||
|
Port: 8080,
|
||||||
|
TargetPort: util.NewIntOrStringFromInt(8080),
|
||||||
|
}},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
_, err := f.Client.Services(f.Namespace.Name).Create(service)
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
}
|
||||||
|
|
||||||
func ConsumeCpu(f *Framework, id string, cores int) {
|
func ConsumeCpu(f *Framework, id string, cores int) {
|
||||||
|
CreateService(f, id)
|
||||||
By(fmt.Sprintf("Running RC which consumes %v cores", cores))
|
By(fmt.Sprintf("Running RC which consumes %v cores", cores))
|
||||||
config := &RCConfig{
|
config := &RCConfig{
|
||||||
Client: f.Client,
|
Client: f.Client,
|
||||||
@ -157,6 +192,9 @@ func ReserveMemory(f *Framework, id string, gigabytes int) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func StopConsuming(f *Framework, id string) {
|
func StopConsuming(f *Framework, id string) {
|
||||||
|
By("Stopping service " + id)
|
||||||
|
err := f.Client.Services(f.Namespace.Name).Delete(id)
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
By("Stopping RC " + id)
|
By("Stopping RC " + id)
|
||||||
expectNoError(DeleteRC(f.Client, f.Namespace.Name, id))
|
expectNoError(DeleteRC(f.Client, f.Namespace.Name, id))
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user