mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-27 13:37:30 +00:00
Merge pull request #14275 from socaa/autoscaling
Auto commit by PR queue bot
This commit is contained in:
commit
09cf38e9a8
@ -24,7 +24,6 @@ import (
|
|||||||
"k8s.io/kubernetes/pkg/api"
|
"k8s.io/kubernetes/pkg/api"
|
||||||
"k8s.io/kubernetes/pkg/fields"
|
"k8s.io/kubernetes/pkg/fields"
|
||||||
"k8s.io/kubernetes/pkg/labels"
|
"k8s.io/kubernetes/pkg/labels"
|
||||||
"k8s.io/kubernetes/pkg/util"
|
|
||||||
|
|
||||||
. "github.com/onsi/ginkgo"
|
. "github.com/onsi/ginkgo"
|
||||||
. "github.com/onsi/gomega"
|
. "github.com/onsi/gomega"
|
||||||
@ -54,12 +53,14 @@ var _ = Describe("Autoscaling", func() {
|
|||||||
})
|
})
|
||||||
|
|
||||||
It("[Skipped][Autoscaling Suite] should scale cluster size based on cpu utilization", func() {
|
It("[Skipped][Autoscaling Suite] should scale cluster size based on cpu utilization", func() {
|
||||||
setUpAutoscaler("cpu/node_utilization", 0.7, nodeCount, nodeCount+1)
|
setUpAutoscaler("cpu/node_utilization", 0.5, nodeCount, nodeCount+1)
|
||||||
|
|
||||||
ConsumeCpu(f, "cpu-utilization", nodeCount*coresPerNode)
|
// Consume 60% CPU
|
||||||
|
millicoresPerReplica := 600
|
||||||
|
rc := NewStaticResourceConsumer("rc", nodeCount*coresPerNode, millicoresPerReplica*nodeCount*coresPerNode, 0, int64(millicoresPerReplica), 100, f)
|
||||||
expectNoError(waitForClusterSize(f.Client, nodeCount+1, 20*time.Minute))
|
expectNoError(waitForClusterSize(f.Client, nodeCount+1, 20*time.Minute))
|
||||||
|
|
||||||
StopConsuming(f, "cpu-utilization")
|
rc.CleanUp()
|
||||||
expectNoError(waitForClusterSize(f.Client, nodeCount, 20*time.Minute))
|
expectNoError(waitForClusterSize(f.Client, nodeCount, 20*time.Minute))
|
||||||
})
|
})
|
||||||
|
|
||||||
@ -76,12 +77,12 @@ var _ = Describe("Autoscaling", func() {
|
|||||||
It("[Skipped][Autoscaling Suite] should scale cluster size based on memory utilization", func() {
|
It("[Skipped][Autoscaling Suite] should scale cluster size based on memory utilization", func() {
|
||||||
setUpAutoscaler("memory/node_utilization", 0.5, nodeCount, nodeCount+1)
|
setUpAutoscaler("memory/node_utilization", 0.5, nodeCount, nodeCount+1)
|
||||||
|
|
||||||
// Consume 60% of total memory capacity in 256MB chunks.
|
// Consume 60% of total memory capacity
|
||||||
chunks := memCapacityMb * nodeCount * 6 / 10 / 256
|
megabytesPerReplica := int(memCapacityMb * 6 / 10 / coresPerNode)
|
||||||
ConsumeMemory(f, "memory-utilization", chunks)
|
rc := NewStaticResourceConsumer("rc", nodeCount*coresPerNode, 0, megabytesPerReplica*nodeCount*coresPerNode, 100, int64(megabytesPerReplica+100), f)
|
||||||
expectNoError(waitForClusterSize(f.Client, nodeCount+1, 20*time.Minute))
|
expectNoError(waitForClusterSize(f.Client, nodeCount+1, 20*time.Minute))
|
||||||
|
|
||||||
StopConsuming(f, "memory-utilization")
|
rc.CleanUp()
|
||||||
expectNoError(waitForClusterSize(f.Client, nodeCount, 20*time.Minute))
|
expectNoError(waitForClusterSize(f.Client, nodeCount, 20*time.Minute))
|
||||||
})
|
})
|
||||||
|
|
||||||
@ -120,59 +121,6 @@ func cleanUpAutoscaler() {
|
|||||||
expectNoError(err, "Output: "+string(out))
|
expectNoError(err, "Output: "+string(out))
|
||||||
}
|
}
|
||||||
|
|
||||||
func CreateService(f *Framework, name string) {
|
|
||||||
By("Running sevice" + name)
|
|
||||||
service := &api.Service{
|
|
||||||
ObjectMeta: api.ObjectMeta{
|
|
||||||
Name: name,
|
|
||||||
},
|
|
||||||
Spec: api.ServiceSpec{
|
|
||||||
Selector: map[string]string{
|
|
||||||
"name": name,
|
|
||||||
},
|
|
||||||
Ports: []api.ServicePort{{
|
|
||||||
Port: 8080,
|
|
||||||
TargetPort: util.NewIntOrStringFromInt(8080),
|
|
||||||
}},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
_, err := f.Client.Services(f.Namespace.Name).Create(service)
|
|
||||||
Expect(err).NotTo(HaveOccurred())
|
|
||||||
}
|
|
||||||
|
|
||||||
func ConsumeCpu(f *Framework, id string, cores int) {
|
|
||||||
CreateService(f, id)
|
|
||||||
By(fmt.Sprintf("Running RC which consumes %v cores", cores))
|
|
||||||
config := &RCConfig{
|
|
||||||
Client: f.Client,
|
|
||||||
Name: id,
|
|
||||||
Namespace: f.Namespace.Name,
|
|
||||||
Timeout: 10 * time.Minute,
|
|
||||||
Image: "jess/stress",
|
|
||||||
Command: []string{"stress", "-c", "1"},
|
|
||||||
Replicas: cores,
|
|
||||||
CpuRequest: 500,
|
|
||||||
CpuLimit: 1000,
|
|
||||||
}
|
|
||||||
expectNoError(RunRC(*config))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Consume <chunks> chunks of size 256MB.
|
|
||||||
func ConsumeMemory(f *Framework, id string, chunks int) {
|
|
||||||
CreateService(f, id)
|
|
||||||
By(fmt.Sprintf("Running RC which consumes %v MB of memory in 256MB chunks", chunks*256))
|
|
||||||
config := &RCConfig{
|
|
||||||
Client: f.Client,
|
|
||||||
Name: id,
|
|
||||||
Namespace: f.Namespace.Name,
|
|
||||||
Timeout: 10 * time.Minute,
|
|
||||||
Image: "jess/stress",
|
|
||||||
Command: []string{"stress", "-m", "1", "--vm-hang", "0"},
|
|
||||||
Replicas: chunks,
|
|
||||||
}
|
|
||||||
expectNoError(RunRC(*config))
|
|
||||||
}
|
|
||||||
|
|
||||||
func ReserveCpu(f *Framework, id string, millicores int) {
|
func ReserveCpu(f *Framework, id string, millicores int) {
|
||||||
By(fmt.Sprintf("Running RC which reserves %v millicores", millicores))
|
By(fmt.Sprintf("Running RC which reserves %v millicores", millicores))
|
||||||
config := &RCConfig{
|
config := &RCConfig{
|
||||||
|
Loading…
Reference in New Issue
Block a user