Make HPA controller set HPA status conditions

This commit causes the HPA controller to set a variety of status
conditions using the new `Status.Conditions` field of
autoscaling/v2alpha1.  These provide insight into the current state
of the HPA, and generally correspond to similar events being emitted.
This commit is contained in:
Solly Ross 2017-05-24 17:09:47 -04:00
parent 26ef38fe89
commit 1334b81d20
2 changed files with 546 additions and 22 deletions

View File

@ -47,13 +47,13 @@ import (
"k8s.io/kubernetes/pkg/controller"
)
const (
var (
// Usage shoud exceed the tolerance before we start downscale or upscale the pods.
// TODO: make it a flag or HPA spec element.
tolerance = 0.1
scaleUpLimitFactor = 2
scaleUpLimitMinimum = 4
scaleUpLimitFactor = 2.0
scaleUpLimitMinimum = 4.0
)
func calculateScaleUpLimit(currentReplicas int32) int32 {
@ -220,6 +220,7 @@ func (a *HorizontalController) computeReplicasForMetrics(hpa *autoscalingv2.Hori
if len(scale.Status.Selector) == 0 && len(scale.Status.TargetSelector) == 0 {
errMsg := "selector is required"
a.eventRecorder.Event(hpa, v1.EventTypeWarning, "SelectorRequired", errMsg)
setCondition(hpa, autoscalingv2.ScalingActive, v1.ConditionFalse, "InvalidSelector", "the HPA target's scale is missing a selector")
return 0, "", nil, time.Time{}, fmt.Errorf(errMsg)
}
@ -234,6 +235,7 @@ func (a *HorizontalController) computeReplicasForMetrics(hpa *autoscalingv2.Hori
if err != nil {
errMsg := fmt.Sprintf("couldn't convert selector into a corresponding internal selector object: %v", err)
a.eventRecorder.Event(hpa, v1.EventTypeWarning, "InvalidSelector", errMsg)
setCondition(hpa, autoscalingv2.ScalingActive, v1.ConditionFalse, "InvalidSelector", errMsg)
return 0, "", nil, time.Time{}, fmt.Errorf(errMsg)
}
@ -247,6 +249,7 @@ func (a *HorizontalController) computeReplicasForMetrics(hpa *autoscalingv2.Hori
replicaCountProposal, utilizationProposal, timestampProposal, err = a.replicaCalc.GetObjectMetricReplicas(currentReplicas, metricSpec.Object.TargetValue.MilliValue(), metricSpec.Object.MetricName, hpa.Namespace, &metricSpec.Object.Target)
if err != nil {
a.eventRecorder.Event(hpa, v1.EventTypeWarning, "FailedGetObjectMetric", err.Error())
setCondition(hpa, autoscalingv2.ScalingActive, v1.ConditionFalse, "FailedGetObjectMetric", "the HPA was unable to compute the replica count: %v", err)
return 0, "", nil, time.Time{}, fmt.Errorf("failed to get object metric value: %v", err)
}
metricNameProposal = fmt.Sprintf("%s metric %s", metricSpec.Object.Target.Kind, metricSpec.Object.MetricName)
@ -262,6 +265,7 @@ func (a *HorizontalController) computeReplicasForMetrics(hpa *autoscalingv2.Hori
replicaCountProposal, utilizationProposal, timestampProposal, err = a.replicaCalc.GetMetricReplicas(currentReplicas, metricSpec.Pods.TargetAverageValue.MilliValue(), metricSpec.Pods.MetricName, hpa.Namespace, selector)
if err != nil {
a.eventRecorder.Event(hpa, v1.EventTypeWarning, "FailedGetPodsMetric", err.Error())
setCondition(hpa, autoscalingv2.ScalingActive, v1.ConditionFalse, "FailedGetPodsMetric", "the HPA was unable to compute the replica count: %v", err)
return 0, "", nil, time.Time{}, fmt.Errorf("failed to get pods metric value: %v", err)
}
metricNameProposal = fmt.Sprintf("pods metric %s", metricSpec.Pods.MetricName)
@ -278,6 +282,7 @@ func (a *HorizontalController) computeReplicasForMetrics(hpa *autoscalingv2.Hori
replicaCountProposal, rawProposal, timestampProposal, err = a.replicaCalc.GetRawResourceReplicas(currentReplicas, metricSpec.Resource.TargetAverageValue.MilliValue(), metricSpec.Resource.Name, hpa.Namespace, selector)
if err != nil {
a.eventRecorder.Event(hpa, v1.EventTypeWarning, "FailedGetResourceMetric", err.Error())
setCondition(hpa, autoscalingv2.ScalingActive, v1.ConditionFalse, "FailedGetResourceMetric", "the HPA was unable to compute the replica count: %v", err)
return 0, "", nil, time.Time{}, fmt.Errorf("failed to get %s utilization: %v", metricSpec.Resource.Name, err)
}
metricNameProposal = fmt.Sprintf("%s resource", metricSpec.Resource.Name)
@ -293,6 +298,7 @@ func (a *HorizontalController) computeReplicasForMetrics(hpa *autoscalingv2.Hori
if metricSpec.Resource.TargetAverageUtilization == nil {
errMsg := "invalid resource metric source: neither a utilization target nor a value target was set"
a.eventRecorder.Event(hpa, v1.EventTypeWarning, "FailedGetResourceMetric", errMsg)
setCondition(hpa, autoscalingv2.ScalingActive, v1.ConditionFalse, "FailedGetResourceMetric", "the HPA was unable to compute the replica count: %s", errMsg)
return 0, "", nil, time.Time{}, fmt.Errorf(errMsg)
}
@ -303,6 +309,7 @@ func (a *HorizontalController) computeReplicasForMetrics(hpa *autoscalingv2.Hori
replicaCountProposal, percentageProposal, rawProposal, timestampProposal, err = a.replicaCalc.GetResourceReplicas(currentReplicas, targetUtilization, metricSpec.Resource.Name, hpa.Namespace, selector)
if err != nil {
a.eventRecorder.Event(hpa, v1.EventTypeWarning, "FailedGetResourceMetric", err.Error())
setCondition(hpa, autoscalingv2.ScalingActive, v1.ConditionFalse, "FailedGetResourceMetric", "the HPA was unable to compute the replica count: %v", err)
return 0, "", nil, time.Time{}, fmt.Errorf("failed to get %s utilization: %v", metricSpec.Resource.Name, err)
}
metricNameProposal = fmt.Sprintf("%s resource utilization (percentage of request)", metricSpec.Resource.Name)
@ -318,6 +325,7 @@ func (a *HorizontalController) computeReplicasForMetrics(hpa *autoscalingv2.Hori
default:
errMsg := fmt.Sprintf("unknown metric source type %q", string(metricSpec.Type))
a.eventRecorder.Event(hpa, v1.EventTypeWarning, "InvalidMetricSourceType", errMsg)
setCondition(hpa, autoscalingv2.ScalingActive, v1.ConditionFalse, "InvalidMetricSourceType", "the HPA was unable to compute the replica count: %s", errMsg)
return 0, "", nil, time.Time{}, fmt.Errorf(errMsg)
}
@ -328,6 +336,7 @@ func (a *HorizontalController) computeReplicasForMetrics(hpa *autoscalingv2.Hori
}
}
setCondition(hpa, autoscalingv2.ScalingActive, v1.ConditionTrue, "ValidMetricFound", "the HPA was able to succesfully calculate a replica count from %s", metric)
return replicas, metric, statuses, timestamp, nil
}
@ -368,8 +377,11 @@ func (a *HorizontalController) reconcileAutoscaler(hpav1Shared *autoscalingv1.Ho
scale, err := a.scaleNamespacer.Scales(hpa.Namespace).Get(hpa.Spec.ScaleTargetRef.Kind, hpa.Spec.ScaleTargetRef.Name)
if err != nil {
a.eventRecorder.Event(hpa, v1.EventTypeWarning, "FailedGetScale", err.Error())
setCondition(hpa, autoscalingv2.AbleToScale, v1.ConditionFalse, "FailedGetScale", "the HPA controller was unable to get the target's current scale: %v", err)
a.update(hpa)
return fmt.Errorf("failed to query scale subresource for %s: %v", reference, err)
}
setCondition(hpa, autoscalingv2.AbleToScale, v1.ConditionTrue, "SucceededGetScale", "the HPA controller was able to get the target's current scale")
currentReplicas := scale.Status.Replicas
var metricStatuses []autoscalingv2.MetricStatus
@ -387,6 +399,7 @@ func (a *HorizontalController) reconcileAutoscaler(hpav1Shared *autoscalingv1.Ho
// Autoscaling is disabled for this resource
desiredReplicas = 0
rescale = false
setCondition(hpa, autoscalingv2.ScalingActive, v1.ConditionFalse, "ScalingDisabled", "scaling is disabled since the replica count of the target is zero")
} else if currentReplicas > hpa.Spec.MaxReplicas {
rescaleReason = "Current number of replicas above Spec.MaxReplicas"
desiredReplicas = hpa.Spec.MaxReplicas
@ -419,26 +432,55 @@ func (a *HorizontalController) reconcileAutoscaler(hpav1Shared *autoscalingv1.Ho
rescaleReason = "All metrics below target"
}
if hpa.Spec.MinReplicas != nil && desiredReplicas < *hpa.Spec.MinReplicas {
desiredReplicas = *hpa.Spec.MinReplicas
}
// never scale down to 0, reserved for disabling autoscaling
if desiredReplicas == 0 {
desiredReplicas = 1
}
if desiredReplicas > hpa.Spec.MaxReplicas {
desiredReplicas = hpa.Spec.MaxReplicas
}
// Do not upscale too much to prevent incorrect rapid increase of the number of master replicas caused by
// bogus CPU usage report from heapster/kubelet (like in issue #32304).
if scaleUpLimit := calculateScaleUpLimit(currentReplicas); desiredReplicas > scaleUpLimit {
scaleUpLimit := calculateScaleUpLimit(currentReplicas)
switch {
case desiredReplicas > scaleUpLimit:
setCondition(hpa, autoscalingv2.ScalingLimited, v1.ConditionTrue, "ScaleUpLimit", "the desired replica count is increasing faster than the maximum scale rate")
desiredReplicas = scaleUpLimit
case desiredReplicas == 0:
// never scale down to 0, reserved for disabling autoscaling
setCondition(hpa, autoscalingv2.ScalingLimited, v1.ConditionTrue, "TooFewReplicas", "the desired replica count was zero")
desiredReplicas = 1
case hpa.Spec.MinReplicas != nil && desiredReplicas < *hpa.Spec.MinReplicas:
// make sure we aren't below our minimum
setCondition(hpa, autoscalingv2.ScalingLimited, v1.ConditionTrue, "TooFewReplicas", "the desired replica count was less than the minimum replica count")
desiredReplicas = *hpa.Spec.MinReplicas
case desiredReplicas > hpa.Spec.MaxReplicas:
// make sure we aren't above our maximum
setCondition(hpa, autoscalingv2.ScalingLimited, v1.ConditionTrue, "TooManyReplicas", "the desired replica count was more than the maximum replica count")
desiredReplicas = hpa.Spec.MaxReplicas
default:
// mark that we're within acceptible limits
setCondition(hpa, autoscalingv2.ScalingLimited, v1.ConditionFalse, "DesiredWithinRange", "the desired replica count is within the acceptible range")
}
rescale = a.shouldScale(hpa, currentReplicas, desiredReplicas, timestamp)
backoffDown := false
backoffUp := false
if hpa.Status.LastScaleTime != nil {
if !hpa.Status.LastScaleTime.Add(a.downscaleForbiddenWindow).Before(timestamp) {
setCondition(hpa, autoscalingv2.AbleToScale, v1.ConditionFalse, "BackoffDownscale", "the time since the previous scale is still within the downscale forbidden window")
backoffDown = true
}
if !hpa.Status.LastScaleTime.Add(a.upscaleForbiddenWindow).Before(timestamp) {
backoffUp = true
if backoffDown {
setCondition(hpa, autoscalingv2.AbleToScale, v1.ConditionFalse, "BackoffBoth", "the time since the previous scale is still within both the downscale and upscale forbidden windows")
} else {
setCondition(hpa, autoscalingv2.AbleToScale, v1.ConditionFalse, "BackoffUpscale", "the time since the previous scale is still within the upscale forbidden window")
}
}
}
if !backoffDown && !backoffUp {
// mark that we're not backing off
setCondition(hpa, autoscalingv2.AbleToScale, v1.ConditionTrue, "ReadyForNewScale", "the last scale time was sufficiently old as to warrant a new scale")
}
}
if rescale {
@ -446,8 +488,11 @@ func (a *HorizontalController) reconcileAutoscaler(hpav1Shared *autoscalingv1.Ho
_, err = a.scaleNamespacer.Scales(hpa.Namespace).Update(hpa.Spec.ScaleTargetRef.Kind, scale)
if err != nil {
a.eventRecorder.Eventf(hpa, v1.EventTypeWarning, "FailedRescale", "New size: %d; reason: %s; error: %v", desiredReplicas, rescaleReason, err.Error())
setCondition(hpa, autoscalingv2.AbleToScale, v1.ConditionFalse, "FailedUpdateScale", "the HPA controller was unable to update the target scale: %v", err)
a.updateCurrentReplicasInStatus(hpa, currentReplicas)
return fmt.Errorf("failed to rescale %s: %v", reference, err)
}
setCondition(hpa, autoscalingv2.AbleToScale, v1.ConditionTrue, "SucceededRescale", "the HPA controller was able to update the target scale to %d", desiredReplicas)
a.eventRecorder.Eventf(hpa, v1.EventTypeNormal, "SuccessfulRescale", "New size: %d; reason: %s", desiredReplicas, rescaleReason)
glog.Infof("Successful rescale of %s, old size: %d, new size: %d, reason: %s",
hpa.Name, currentReplicas, desiredReplicas, rescaleReason)
@ -456,7 +501,7 @@ func (a *HorizontalController) reconcileAutoscaler(hpav1Shared *autoscalingv1.Ho
desiredReplicas = currentReplicas
}
return a.updateStatus(hpa, currentReplicas, desiredReplicas, metricStatuses, rescale)
return a.updateStatusWithReplicas(hpa, currentReplicas, desiredReplicas, metricStatuses, rescale)
}
func (a *HorizontalController) shouldScale(hpa *autoscalingv2.HorizontalPodAutoscaler, currentReplicas, desiredReplicas int32, timestamp time.Time) bool {
@ -484,18 +529,19 @@ func (a *HorizontalController) shouldScale(hpa *autoscalingv2.HorizontalPodAutos
}
func (a *HorizontalController) updateCurrentReplicasInStatus(hpa *autoscalingv2.HorizontalPodAutoscaler, currentReplicas int32) {
err := a.updateStatus(hpa, currentReplicas, hpa.Status.DesiredReplicas, hpa.Status.CurrentMetrics, false)
err := a.updateStatusWithReplicas(hpa, currentReplicas, hpa.Status.DesiredReplicas, hpa.Status.CurrentMetrics, false)
if err != nil {
utilruntime.HandleError(err)
}
}
func (a *HorizontalController) updateStatus(hpa *autoscalingv2.HorizontalPodAutoscaler, currentReplicas, desiredReplicas int32, metricStatuses []autoscalingv2.MetricStatus, rescale bool) error {
func (a *HorizontalController) updateStatusWithReplicas(hpa *autoscalingv2.HorizontalPodAutoscaler, currentReplicas, desiredReplicas int32, metricStatuses []autoscalingv2.MetricStatus, rescale bool) error {
hpa.Status = autoscalingv2.HorizontalPodAutoscalerStatus{
CurrentReplicas: currentReplicas,
DesiredReplicas: desiredReplicas,
LastScaleTime: hpa.Status.LastScaleTime,
CurrentMetrics: metricStatuses,
Conditions: hpa.Status.Conditions,
}
if rescale {
@ -503,6 +549,10 @@ func (a *HorizontalController) updateStatus(hpa *autoscalingv2.HorizontalPodAuto
hpa.Status.LastScaleTime = &now
}
return a.update(hpa)
}
func (a *HorizontalController) update(hpa *autoscalingv2.HorizontalPodAutoscaler) error {
// convert back to autoscalingv1
hpaRaw, err := UnsafeConvertToVersionVia(hpa, autoscalingv1.SchemeGroupVersion)
if err != nil {
@ -519,3 +569,42 @@ func (a *HorizontalController) updateStatus(hpa *autoscalingv2.HorizontalPodAuto
glog.V(2).Infof("Successfully updated status for %s", hpa.Name)
return nil
}
// setCondition sets the specific condition type on the given HPA to the specified value with the given reason
// and message. The message and args are treated like a format string. The condition will be added if it is
// not present.
func setCondition(hpa *autoscalingv2.HorizontalPodAutoscaler, conditionType autoscalingv2.HorizontalPodAutoscalerConditionType, status v1.ConditionStatus, reason, message string, args ...interface{}) {
hpa.Status.Conditions = setConditionInList(hpa.Status.Conditions, conditionType, status, reason, message, args...)
}
// setConditionInList sets the specific condition type on the given HPA to the specified value with the given
// reason and message. The message and args are treated like a format string. The condition will be added if
// it is not present. The new list will be returned.
func setConditionInList(inputList []autoscalingv2.HorizontalPodAutoscalerCondition, conditionType autoscalingv2.HorizontalPodAutoscalerConditionType, status v1.ConditionStatus, reason, message string, args ...interface{}) []autoscalingv2.HorizontalPodAutoscalerCondition {
resList := inputList
var existingCond *autoscalingv2.HorizontalPodAutoscalerCondition
for i, condition := range resList {
if condition.Type == conditionType {
// can't take a pointer to an iteration variable
existingCond = &resList[i]
break
}
}
if existingCond == nil {
resList = append(resList, autoscalingv2.HorizontalPodAutoscalerCondition{
Type: conditionType,
})
existingCond = &resList[len(resList)-1]
}
if existingCond.Status != status {
existingCond.LastTransitionTime = metav1.Now()
}
existingCond.Status = status
existingCond.Reason = reason
existingCond.Message = fmt.Sprintf(message, args...)
return resList
}

View File

@ -17,6 +17,7 @@ limitations under the License.
package podautoscaler
import (
"encoding/json"
"fmt"
"math"
"sync"
@ -33,6 +34,7 @@ import (
core "k8s.io/client-go/testing"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/apis/autoscaling"
autoscalingv1 "k8s.io/kubernetes/pkg/apis/autoscaling/v1"
autoscalingv2 "k8s.io/kubernetes/pkg/apis/autoscaling/v2alpha1"
extensions "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
@ -52,6 +54,33 @@ import (
_ "k8s.io/kubernetes/pkg/apis/extensions/install"
)
var statusOk = []autoscalingv2.HorizontalPodAutoscalerCondition{
{Type: autoscalingv2.AbleToScale, Status: v1.ConditionTrue, Reason: "SucceededRescale"},
{Type: autoscalingv2.ScalingActive, Status: v1.ConditionTrue, Reason: "ValidMetricFound"},
{Type: autoscalingv2.ScalingLimited, Status: v1.ConditionFalse, Reason: "DesiredWithinRange"},
}
// statusOkWithOverrides returns the "ok" status with the given conditions as overridden
func statusOkWithOverrides(overrides ...autoscalingv2.HorizontalPodAutoscalerCondition) []autoscalingv1.HorizontalPodAutoscalerCondition {
resv2 := make([]autoscalingv2.HorizontalPodAutoscalerCondition, len(statusOk))
copy(resv2, statusOk)
for _, override := range overrides {
resv2 = setConditionInList(resv2, override.Type, override.Status, override.Reason, override.Message)
}
// copy to a v1 slice
resv1 := make([]autoscalingv1.HorizontalPodAutoscalerCondition, len(resv2))
for i, cond := range resv2 {
resv1[i] = autoscalingv1.HorizontalPodAutoscalerCondition{
Type: autoscalingv1.HorizontalPodAutoscalerConditionType(cond.Type),
Status: cond.Status,
Reason: cond.Reason,
}
}
return resv1
}
func alwaysReady() bool { return true }
type fakeResource struct {
@ -80,6 +109,7 @@ type testCase struct {
verifyEvents bool
useMetricsApi bool
metricsTarget []autoscalingv2.MetricSpec
expectedConditions []autoscalingv1.HorizontalPodAutoscalerCondition
// Channel with names of HPA objects which we have reconciled.
processed chan string
@ -88,6 +118,11 @@ type testCase struct {
// Last scale time
lastScaleTime *metav1.Time
// override the test clients
testClient *fake.Clientset
testMetricsClient *metricsfake.Clientset
testCMClient *cmfake.FakeCustomMetricsClient
}
// Needs to be called under a lock.
@ -106,6 +141,11 @@ func (tc *testCase) computeCPUCurrent() {
tc.CPUCurrent = int32(100 * reported / requested)
}
func init() {
// set this high so we don't accidentally run into it when testing
scaleUpLimitFactor = 8
}
func (tc *testCase) prepareTestClient(t *testing.T) (*fake.Clientset, *metricsfake.Clientset, *cmfake.FakeCustomMetricsClient) {
namespace := "test-namespace"
hpaName := "test-hpa"
@ -159,12 +199,13 @@ func (tc *testCase) prepareTestClient(t *testing.T) (*fake.Clientset, *metricsfa
Status: autoscalingv2.HorizontalPodAutoscalerStatus{
CurrentReplicas: tc.initialReplicas,
DesiredReplicas: tc.initialReplicas,
LastScaleTime: tc.lastScaleTime,
},
},
},
}
if tc.CPUTarget > 0.0 {
if tc.CPUTarget > 0 {
obj.Items[0].Spec.Metrics = []autoscalingv2.MetricSpec{
{
Type: autoscalingv2.ResourceMetricSourceType,
@ -350,6 +391,22 @@ func (tc *testCase) prepareTestClient(t *testing.T) (*fake.Clientset, *metricsfa
assert.NotNil(t, obj.Status.CurrentCPUUtilizationPercentage, "the reported CPU utilization percentage should be non-nil")
assert.Equal(t, tc.CPUCurrent, *obj.Status.CurrentCPUUtilizationPercentage, "the report CPU utilization percentage should be as expected")
}
var actualConditions []autoscalingv1.HorizontalPodAutoscalerCondition
if err := json.Unmarshal([]byte(obj.ObjectMeta.Annotations[autoscaling.HorizontalPodAutoscalerConditionsAnnotation]), &actualConditions); err != nil {
return true, nil, err
}
// TODO: it's ok not to sort these becaues statusOk
// contains all the conditions, so we'll never be appending.
// Default to statusOk when missing any specific conditions
if tc.expectedConditions == nil {
tc.expectedConditions = statusOkWithOverrides()
}
// clear the message so that we can easily compare
for i := range actualConditions {
actualConditions[i].Message = ""
actualConditions[i].LastTransitionTime = metav1.Time{}
}
assert.Equal(t, tc.expectedConditions, actualConditions, "the status conditions should have been as expected")
tc.statusUpdated = true
// Every time we reconcile HPA object we are updating status.
tc.processed <- obj.Name
@ -483,6 +540,15 @@ func (tc *testCase) verifyResults(t *testing.T) {
func (tc *testCase) runTest(t *testing.T) {
testClient, testMetricsClient, testCMClient := tc.prepareTestClient(t)
if tc.testClient != nil {
testClient = tc.testClient
}
if tc.testMetricsClient != nil {
testMetricsClient = tc.testMetricsClient
}
if tc.testCMClient != nil {
testCMClient = tc.testCMClient
}
metricsClient := metrics.NewRESTMetricsClient(
testMetricsClient.MetricsV1alpha1(),
testCMClient,
@ -595,6 +661,11 @@ func TestScaleUpUnreadyNoScale(t *testing.T) {
reportedCPURequests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
reportedPodReadiness: []v1.ConditionStatus{v1.ConditionTrue, v1.ConditionFalse, v1.ConditionFalse},
useMetricsApi: true,
expectedConditions: statusOkWithOverrides(autoscalingv2.HorizontalPodAutoscalerCondition{
Type: autoscalingv2.AbleToScale,
Status: v1.ConditionTrue,
Reason: "ReadyForNewScale",
}),
}
tc.runTest(t)
}
@ -703,6 +774,11 @@ func TestScaleUpCMUnreadyNoScaleWouldScaleDown(t *testing.T) {
reportedLevels: []uint64{50000, 15000, 30000},
reportedPodReadiness: []v1.ConditionStatus{v1.ConditionFalse, v1.ConditionTrue, v1.ConditionFalse},
reportedCPURequests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
expectedConditions: statusOkWithOverrides(autoscalingv2.HorizontalPodAutoscalerCondition{
Type: autoscalingv2.AbleToScale,
Status: v1.ConditionTrue,
Reason: "ReadyForNewScale",
}),
}
tc.runTest(t)
}
@ -824,6 +900,11 @@ func TestTolerance(t *testing.T) {
reportedLevels: []uint64{1010, 1030, 1020},
reportedCPURequests: []resource.Quantity{resource.MustParse("0.9"), resource.MustParse("1.0"), resource.MustParse("1.1")},
useMetricsApi: true,
expectedConditions: statusOkWithOverrides(autoscalingv2.HorizontalPodAutoscalerCondition{
Type: autoscalingv2.AbleToScale,
Status: v1.ConditionTrue,
Reason: "ReadyForNewScale",
}),
}
tc.runTest(t)
}
@ -845,6 +926,11 @@ func TestToleranceCM(t *testing.T) {
},
reportedLevels: []uint64{20000, 20001, 21000},
reportedCPURequests: []resource.Quantity{resource.MustParse("0.9"), resource.MustParse("1.0"), resource.MustParse("1.1")},
expectedConditions: statusOkWithOverrides(autoscalingv2.HorizontalPodAutoscalerCondition{
Type: autoscalingv2.AbleToScale,
Status: v1.ConditionTrue,
Reason: "ReadyForNewScale",
}),
}
tc.runTest(t)
}
@ -871,6 +957,11 @@ func TestToleranceCMObject(t *testing.T) {
},
reportedLevels: []uint64{20050},
reportedCPURequests: []resource.Quantity{resource.MustParse("0.9"), resource.MustParse("1.0"), resource.MustParse("1.1")},
expectedConditions: statusOkWithOverrides(autoscalingv2.HorizontalPodAutoscalerCondition{
Type: autoscalingv2.AbleToScale,
Status: v1.ConditionTrue,
Reason: "ReadyForNewScale",
}),
}
tc.runTest(t)
}
@ -885,6 +976,11 @@ func TestMinReplicas(t *testing.T) {
reportedLevels: []uint64{10, 95, 10},
reportedCPURequests: []resource.Quantity{resource.MustParse("0.9"), resource.MustParse("1.0"), resource.MustParse("1.1")},
useMetricsApi: true,
expectedConditions: statusOkWithOverrides(autoscalingv2.HorizontalPodAutoscalerCondition{
Type: autoscalingv2.ScalingLimited,
Status: v1.ConditionTrue,
Reason: "TooFewReplicas",
}),
}
tc.runTest(t)
}
@ -899,6 +995,10 @@ func TestZeroReplicas(t *testing.T) {
reportedLevels: []uint64{},
reportedCPURequests: []resource.Quantity{},
useMetricsApi: true,
expectedConditions: []autoscalingv1.HorizontalPodAutoscalerCondition{
{Type: autoscalingv1.AbleToScale, Status: v1.ConditionTrue, Reason: "SucceededGetScale"},
{Type: autoscalingv1.ScalingActive, Status: v1.ConditionFalse, Reason: "ScalingDisabled"},
},
}
tc.runTest(t)
}
@ -913,6 +1013,9 @@ func TestTooFewReplicas(t *testing.T) {
reportedLevels: []uint64{},
reportedCPURequests: []resource.Quantity{},
useMetricsApi: true,
expectedConditions: []autoscalingv1.HorizontalPodAutoscalerCondition{
{Type: autoscalingv1.AbleToScale, Status: v1.ConditionTrue, Reason: "SucceededRescale"},
},
}
tc.runTest(t)
}
@ -927,6 +1030,9 @@ func TestTooManyReplicas(t *testing.T) {
reportedLevels: []uint64{},
reportedCPURequests: []resource.Quantity{},
useMetricsApi: true,
expectedConditions: []autoscalingv1.HorizontalPodAutoscalerCondition{
{Type: autoscalingv1.AbleToScale, Status: v1.ConditionTrue, Reason: "SucceededRescale"},
},
}
tc.runTest(t)
}
@ -941,6 +1047,11 @@ func TestMaxReplicas(t *testing.T) {
reportedLevels: []uint64{8000, 9500, 1000},
reportedCPURequests: []resource.Quantity{resource.MustParse("0.9"), resource.MustParse("1.0"), resource.MustParse("1.1")},
useMetricsApi: true,
expectedConditions: statusOkWithOverrides(autoscalingv2.HorizontalPodAutoscalerCondition{
Type: autoscalingv2.ScalingLimited,
Status: v1.ConditionTrue,
Reason: "TooManyReplicas",
}),
}
tc.runTest(t)
}
@ -955,6 +1066,11 @@ func TestSuperfluousMetrics(t *testing.T) {
reportedLevels: []uint64{4000, 9500, 3000, 7000, 3200, 2000},
reportedCPURequests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
useMetricsApi: true,
expectedConditions: statusOkWithOverrides(autoscalingv2.HorizontalPodAutoscalerCondition{
Type: autoscalingv2.ScalingLimited,
Status: v1.ConditionTrue,
Reason: "TooManyReplicas",
}),
}
tc.runTest(t)
}
@ -983,6 +1099,10 @@ func TestEmptyMetrics(t *testing.T) {
reportedLevels: []uint64{},
reportedCPURequests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
useMetricsApi: true,
expectedConditions: []autoscalingv1.HorizontalPodAutoscalerCondition{
{Type: autoscalingv1.AbleToScale, Status: v1.ConditionTrue, Reason: "SucceededGetScale"},
{Type: autoscalingv1.ScalingActive, Status: v1.ConditionFalse, Reason: "FailedGetResourceMetric"},
},
}
tc.runTest(t)
}
@ -996,6 +1116,10 @@ func TestEmptyCPURequest(t *testing.T) {
CPUTarget: 100,
reportedLevels: []uint64{200},
useMetricsApi: true,
expectedConditions: []autoscalingv1.HorizontalPodAutoscalerCondition{
{Type: autoscalingv1.AbleToScale, Status: v1.ConditionTrue, Reason: "SucceededGetScale"},
{Type: autoscalingv1.ScalingActive, Status: v1.ConditionFalse, Reason: "FailedGetResourceMetric"},
},
}
tc.runTest(t)
}
@ -1026,6 +1150,11 @@ func TestEventNotCreated(t *testing.T) {
reportedCPURequests: []resource.Quantity{resource.MustParse("0.4"), resource.MustParse("0.4")},
verifyEvents: true,
useMetricsApi: true,
expectedConditions: statusOkWithOverrides(autoscalingv2.HorizontalPodAutoscalerCondition{
Type: autoscalingv2.AbleToScale,
Status: v1.ConditionTrue,
Reason: "ReadyForNewScale",
}),
}
tc.runTest(t)
}
@ -1049,11 +1178,306 @@ func TestUpscaleCap(t *testing.T) {
minReplicas: 1,
maxReplicas: 100,
initialReplicas: 3,
desiredReplicas: 6,
desiredReplicas: 24,
CPUTarget: 10,
reportedLevels: []uint64{100, 200, 300},
reportedCPURequests: []resource.Quantity{resource.MustParse("0.1"), resource.MustParse("0.1"), resource.MustParse("0.1")},
useMetricsApi: true,
expectedConditions: statusOkWithOverrides(autoscalingv2.HorizontalPodAutoscalerCondition{
Type: autoscalingv2.ScalingLimited,
Status: v1.ConditionTrue,
Reason: "ScaleUpLimit",
}),
}
tc.runTest(t)
}
func TestConditionInvalidSelectorMissing(t *testing.T) {
tc := testCase{
minReplicas: 1,
maxReplicas: 100,
initialReplicas: 3,
desiredReplicas: 3,
CPUTarget: 10,
reportedLevels: []uint64{100, 200, 300},
reportedCPURequests: []resource.Quantity{resource.MustParse("0.1"), resource.MustParse("0.1"), resource.MustParse("0.1")},
useMetricsApi: true,
expectedConditions: []autoscalingv1.HorizontalPodAutoscalerCondition{
{
Type: autoscalingv1.AbleToScale,
Status: v1.ConditionTrue,
Reason: "SucceededGetScale",
},
{
Type: autoscalingv1.ScalingActive,
Status: v1.ConditionFalse,
Reason: "InvalidSelector",
},
},
}
testClient, _, _ := tc.prepareTestClient(t)
tc.testClient = testClient
testClient.PrependReactor("get", "replicationcontrollers", func(action core.Action) (handled bool, ret runtime.Object, err error) {
obj := &extensions.Scale{
ObjectMeta: metav1.ObjectMeta{
Name: tc.resource.name,
},
Spec: extensions.ScaleSpec{
Replicas: tc.initialReplicas,
},
Status: extensions.ScaleStatus{
Replicas: tc.initialReplicas,
},
}
return true, obj, nil
})
tc.runTest(t)
}
func TestConditionInvalidSelectorUnparsable(t *testing.T) {
tc := testCase{
minReplicas: 1,
maxReplicas: 100,
initialReplicas: 3,
desiredReplicas: 3,
CPUTarget: 10,
reportedLevels: []uint64{100, 200, 300},
reportedCPURequests: []resource.Quantity{resource.MustParse("0.1"), resource.MustParse("0.1"), resource.MustParse("0.1")},
useMetricsApi: true,
expectedConditions: []autoscalingv1.HorizontalPodAutoscalerCondition{
{
Type: autoscalingv1.AbleToScale,
Status: v1.ConditionTrue,
Reason: "SucceededGetScale",
},
{
Type: autoscalingv1.ScalingActive,
Status: v1.ConditionFalse,
Reason: "InvalidSelector",
},
},
}
testClient, _, _ := tc.prepareTestClient(t)
tc.testClient = testClient
testClient.PrependReactor("get", "replicationcontrollers", func(action core.Action) (handled bool, ret runtime.Object, err error) {
obj := &extensions.Scale{
ObjectMeta: metav1.ObjectMeta{
Name: tc.resource.name,
},
Spec: extensions.ScaleSpec{
Replicas: tc.initialReplicas,
},
Status: extensions.ScaleStatus{
Replicas: tc.initialReplicas,
TargetSelector: "cheddar cheese",
},
}
return true, obj, nil
})
tc.runTest(t)
}
func TestConditionFailedGetMetrics(t *testing.T) {
metricsTargets := map[string][]autoscalingv2.MetricSpec{
"FailedGetResourceMetric": nil,
"FailedGetPodsMetric": {
{
Type: autoscalingv2.PodsMetricSourceType,
Pods: &autoscalingv2.PodsMetricSource{
MetricName: "qps",
TargetAverageValue: resource.MustParse("15.0"),
},
},
},
"FailedGetObjectMetric": {
{
Type: autoscalingv2.ObjectMetricSourceType,
Object: &autoscalingv2.ObjectMetricSource{
Target: autoscalingv2.CrossVersionObjectReference{
APIVersion: "extensions/v1beta1",
Kind: "Deployment",
Name: "some-deployment",
},
MetricName: "qps",
TargetValue: resource.MustParse("15.0"),
},
},
},
}
for reason, specs := range metricsTargets {
tc := testCase{
minReplicas: 1,
maxReplicas: 100,
initialReplicas: 3,
desiredReplicas: 3,
CPUTarget: 10,
reportedLevels: []uint64{100, 200, 300},
reportedCPURequests: []resource.Quantity{resource.MustParse("0.1"), resource.MustParse("0.1"), resource.MustParse("0.1")},
useMetricsApi: true,
}
_, testMetricsClient, testCMClient := tc.prepareTestClient(t)
tc.testMetricsClient = testMetricsClient
tc.testCMClient = testCMClient
testMetricsClient.PrependReactor("list", "pods", func(action core.Action) (handled bool, ret runtime.Object, err error) {
return true, &metricsapi.PodMetricsList{}, fmt.Errorf("something went wrong!")
})
testCMClient.PrependReactor("get", "*", func(action core.Action) (handled bool, ret runtime.Object, err error) {
return true, &cmapi.MetricValueList{}, fmt.Errorf("something went wrong!")
})
tc.expectedConditions = []autoscalingv1.HorizontalPodAutoscalerCondition{
{Type: autoscalingv1.AbleToScale, Status: v1.ConditionTrue, Reason: "SucceededGetScale"},
{Type: autoscalingv1.ScalingActive, Status: v1.ConditionFalse, Reason: reason},
}
if specs != nil {
tc.CPUTarget = 0
} else {
tc.CPUTarget = 10
}
tc.metricsTarget = specs
tc.runTest(t)
}
}
func TestConditionInvalidSourceType(t *testing.T) {
tc := testCase{
minReplicas: 2,
maxReplicas: 6,
initialReplicas: 3,
desiredReplicas: 3,
CPUTarget: 0,
metricsTarget: []autoscalingv2.MetricSpec{
{
Type: "CheddarCheese",
},
},
reportedLevels: []uint64{20000},
expectedConditions: []autoscalingv1.HorizontalPodAutoscalerCondition{
{
Type: autoscalingv1.AbleToScale,
Status: v1.ConditionTrue,
Reason: "SucceededGetScale",
},
{
Type: autoscalingv1.ScalingActive,
Status: v1.ConditionFalse,
Reason: "InvalidMetricSourceType",
},
},
}
tc.runTest(t)
}
func TestConditionFailedGetScale(t *testing.T) {
tc := testCase{
minReplicas: 1,
maxReplicas: 100,
initialReplicas: 3,
desiredReplicas: 3,
CPUTarget: 10,
reportedLevels: []uint64{100, 200, 300},
reportedCPURequests: []resource.Quantity{resource.MustParse("0.1"), resource.MustParse("0.1"), resource.MustParse("0.1")},
useMetricsApi: true,
expectedConditions: []autoscalingv1.HorizontalPodAutoscalerCondition{
{
Type: autoscalingv1.AbleToScale,
Status: v1.ConditionFalse,
Reason: "FailedGetScale",
},
},
}
testClient, _, _ := tc.prepareTestClient(t)
tc.testClient = testClient
testClient.PrependReactor("get", "replicationcontrollers", func(action core.Action) (handled bool, ret runtime.Object, err error) {
return true, &extensions.Scale{}, fmt.Errorf("something went wrong!")
})
tc.runTest(t)
}
func TestConditionFailedUpdateScale(t *testing.T) {
tc := testCase{
minReplicas: 1,
maxReplicas: 5,
initialReplicas: 3,
desiredReplicas: 3,
CPUTarget: 100,
reportedLevels: []uint64{150, 150, 150},
reportedCPURequests: []resource.Quantity{resource.MustParse("0.1"), resource.MustParse("0.1"), resource.MustParse("0.1")},
useMetricsApi: true,
expectedConditions: statusOkWithOverrides(autoscalingv2.HorizontalPodAutoscalerCondition{
Type: autoscalingv2.AbleToScale,
Status: v1.ConditionFalse,
Reason: "FailedUpdateScale",
}),
}
testClient, _, _ := tc.prepareTestClient(t)
tc.testClient = testClient
testClient.PrependReactor("update", "replicationcontrollers", func(action core.Action) (handled bool, ret runtime.Object, err error) {
return true, &extensions.Scale{}, fmt.Errorf("something went wrong!")
})
tc.runTest(t)
}
func TestBackoffUpscale(t *testing.T) {
time := metav1.Time{Time: time.Now()}
tc := testCase{
minReplicas: 1,
maxReplicas: 5,
initialReplicas: 3,
desiredReplicas: 3,
CPUTarget: 100,
reportedLevels: []uint64{150, 150, 150},
reportedCPURequests: []resource.Quantity{resource.MustParse("0.1"), resource.MustParse("0.1"), resource.MustParse("0.1")},
useMetricsApi: true,
lastScaleTime: &time,
expectedConditions: statusOkWithOverrides(autoscalingv2.HorizontalPodAutoscalerCondition{
Type: autoscalingv2.AbleToScale,
Status: v1.ConditionTrue,
Reason: "ReadyForNewScale",
}, autoscalingv2.HorizontalPodAutoscalerCondition{
Type: autoscalingv2.AbleToScale,
Status: v1.ConditionFalse,
Reason: "BackoffBoth",
}),
}
tc.runTest(t)
}
func TestBackoffDownscale(t *testing.T) {
time := metav1.Time{Time: time.Now().Add(-4 * time.Minute)}
tc := testCase{
minReplicas: 1,
maxReplicas: 5,
initialReplicas: 4,
desiredReplicas: 4,
CPUTarget: 100,
reportedLevels: []uint64{50, 50, 50},
reportedCPURequests: []resource.Quantity{resource.MustParse("0.1"), resource.MustParse("0.1"), resource.MustParse("0.1")},
useMetricsApi: true,
lastScaleTime: &time,
expectedConditions: statusOkWithOverrides(autoscalingv2.HorizontalPodAutoscalerCondition{
Type: autoscalingv2.AbleToScale,
Status: v1.ConditionTrue,
Reason: "ReadyForNewScale",
}, autoscalingv2.HorizontalPodAutoscalerCondition{
Type: autoscalingv2.AbleToScale,
Status: v1.ConditionFalse,
Reason: "BackoffDownscale",
}),
}
tc.runTest(t)
}
@ -1124,6 +1548,11 @@ func TestComputedToleranceAlgImplementation(t *testing.T) {
tc.CPUTarget = finalCpuPercentTarget
tc.initialReplicas = startPods
tc.desiredReplicas = startPods
tc.expectedConditions = statusOkWithOverrides(autoscalingv2.HorizontalPodAutoscalerCondition{
Type: autoscalingv2.AbleToScale,
Status: v1.ConditionTrue,
Reason: "ReadyForNewScale",
})
tc.runTest(t)
}
@ -1139,6 +1568,9 @@ func TestScaleUpRCImmediately(t *testing.T) {
reportedCPURequests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
useMetricsApi: true,
lastScaleTime: &time,
expectedConditions: []autoscalingv1.HorizontalPodAutoscalerCondition{
{Type: autoscalingv1.AbleToScale, Status: v1.ConditionTrue, Reason: "SucceededRescale"},
},
}
tc.runTest(t)
}
@ -1155,6 +1587,9 @@ func TestScaleDownRCImmediately(t *testing.T) {
reportedCPURequests: []resource.Quantity{resource.MustParse("0.9"), resource.MustParse("1.0"), resource.MustParse("1.1")},
useMetricsApi: true,
lastScaleTime: &time,
expectedConditions: []autoscalingv1.HorizontalPodAutoscalerCondition{
{Type: autoscalingv1.AbleToScale, Status: v1.ConditionTrue, Reason: "SucceededRescale"},
},
}
tc.runTest(t)
}