mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-22 19:31:44 +00:00
Merge pull request #80261 from kubernetes/revert-79954-promote-pod-autoscaling
Revert "Promote pod autoscaling"
This commit is contained in:
commit
7b25fd4846
1
test/conformance/testdata/conformance.txt
vendored
1
test/conformance/testdata/conformance.txt
vendored
@ -38,7 +38,6 @@ test/e2e/apps/statefulset.go: "Burst scaling should run to completion even with
|
||||
test/e2e/apps/statefulset.go: "Should recreate evicted statefulset"
|
||||
test/e2e/auth/service_accounts.go: "should mount an API token into pods"
|
||||
test/e2e/auth/service_accounts.go: "should allow opting out of API token automount"
|
||||
test/e2e/autoscaling/horizontal_pod_autoscaling.go: "should scale from 1 pod to 2 pods"
|
||||
test/e2e/common/configmap.go: "should be consumable via environment variable"
|
||||
test/e2e/common/configmap.go: "should be consumable via the environment"
|
||||
test/e2e/common/configmap.go: "should fail to create ConfigMap with empty key"
|
||||
|
@ -67,14 +67,7 @@ var _ = SIGDescribe("[HPA] Horizontal pod autoscaling (scale resource: CPU)", fu
|
||||
})
|
||||
|
||||
SIGDescribe("ReplicationController light", func() {
|
||||
/*
|
||||
Release : v1.16
|
||||
Testname: Horizontal Pod Autoscaling, CPU Target Utilization Scale
|
||||
Description: Given 1 Pod with 150mCPU usage, 200mCPU per Pod request, and
|
||||
targeted CPU uitilzation of 50% the HP Autoscaler MUST create another pod,
|
||||
spreading the 150mCPU usage across both Pods.
|
||||
*/
|
||||
framework.ConformanceIt("should scale from 1 pod to 2 pods", func() {
|
||||
ginkgo.It("Should scale from 1 pod to 2 pods", func() {
|
||||
scaleTest := &HPAScaleTest{
|
||||
initPods: 1,
|
||||
totalInitialCPUUsage: 150,
|
||||
|
Loading…
Reference in New Issue
Block a user