Make HPA controller use polymorphic scale client

This updates the HPA controller to use the polymorphic scale client from
client-go.  This should enable HPAs to work with arbitrary scalable
resources, instead of just those in the extensions API group (meaning we
can deprecate the copy of ReplicationController in extensions/v1beta1).
It also means that the HPA controller now pays attention to the
APIVersion field in `scaleTargetRef` (more specifically, the group part
of it).

Note that currently, discovery information on which resources are
available where is only fetched once (the first time that it's
requested).  In the future, we may want a refreshing discovery REST
mapper.
This commit is contained in:
Solly Ross 2017-10-11 14:31:04 -04:00
parent 3f51babdd1
commit d2b41120ea
7 changed files with 344 additions and 264 deletions

View File

@ -124,6 +124,7 @@ go_library(
"//vendor/k8s.io/client-go/kubernetes:go_default_library", "//vendor/k8s.io/client-go/kubernetes:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library", "//vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
"//vendor/k8s.io/client-go/rest:go_default_library", "//vendor/k8s.io/client-go/rest:go_default_library",
"//vendor/k8s.io/client-go/scale:go_default_library",
"//vendor/k8s.io/client-go/tools/clientcmd:go_default_library", "//vendor/k8s.io/client-go/tools/clientcmd:go_default_library",
"//vendor/k8s.io/client-go/tools/leaderelection:go_default_library", "//vendor/k8s.io/client-go/tools/leaderelection:go_default_library",
"//vendor/k8s.io/client-go/tools/leaderelection/resourcelock:go_default_library", "//vendor/k8s.io/client-go/tools/leaderelection/resourcelock:go_default_library",

View File

@ -21,7 +21,12 @@ limitations under the License.
package app package app
import ( import (
apimeta "k8s.io/apimachinery/pkg/api/meta"
"k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/client-go/discovery"
discocache "k8s.io/client-go/discovery/cached" // Saturday Night Fever
"k8s.io/client-go/dynamic"
"k8s.io/client-go/scale"
"k8s.io/kubernetes/pkg/controller/podautoscaler" "k8s.io/kubernetes/pkg/controller/podautoscaler"
"k8s.io/kubernetes/pkg/controller/podautoscaler/metrics" "k8s.io/kubernetes/pkg/controller/podautoscaler/metrics"
resourceclient "k8s.io/metrics/pkg/client/clientset_generated/clientset/typed/metrics/v1beta1" resourceclient "k8s.io/metrics/pkg/client/clientset_generated/clientset/typed/metrics/v1beta1"
@ -63,16 +68,33 @@ func startHPAControllerWithLegacyClient(ctx ControllerContext) (bool, error) {
} }
func startHPAControllerWithMetricsClient(ctx ControllerContext, metricsClient metrics.MetricsClient) (bool, error) { func startHPAControllerWithMetricsClient(ctx ControllerContext, metricsClient metrics.MetricsClient) (bool, error) {
hpaClientGoClient := ctx.ClientBuilder.ClientGoClientOrDie("horizontal-pod-autoscaler")
hpaClient := ctx.ClientBuilder.ClientOrDie("horizontal-pod-autoscaler") hpaClient := ctx.ClientBuilder.ClientOrDie("horizontal-pod-autoscaler")
hpaClientConfig := ctx.ClientBuilder.ConfigOrDie("horizontal-pod-autoscaler")
// TODO: we need something like deferred discovery REST mapper that calls invalidate
// on cache misses.
cachedDiscovery := discocache.NewMemCacheClient(hpaClientGoClient.Discovery())
restMapper := discovery.NewDeferredDiscoveryRESTMapper(cachedDiscovery, apimeta.InterfacesForUnstructured)
restMapper.Reset()
// we don't use cached discovery because DiscoveryScaleKindResolver does its own caching,
// so we want to re-fetch every time when we actually ask for it
scaleKindResolver := scale.NewDiscoveryScaleKindResolver(hpaClientGoClient.Discovery())
scaleClient, err := scale.NewForConfig(hpaClientConfig, restMapper, dynamic.LegacyAPIPathResolverFunc, scaleKindResolver)
if err != nil {
return false, err
}
replicaCalc := podautoscaler.NewReplicaCalculator( replicaCalc := podautoscaler.NewReplicaCalculator(
metricsClient, metricsClient,
hpaClient.Core(), hpaClient.Core(),
ctx.Options.HorizontalPodAutoscalerTolerance, ctx.Options.HorizontalPodAutoscalerTolerance,
) )
go podautoscaler.NewHorizontalController( go podautoscaler.NewHorizontalController(
ctx.ClientBuilder.ClientGoClientOrDie("horizontal-pod-autoscaler").Core(), hpaClientGoClient.Core(),
hpaClient.Extensions(), scaleClient,
hpaClient.Autoscaling(), hpaClient.Autoscaling(),
restMapper,
replicaCalc, replicaCalc,
ctx.InformerFactory.Autoscaling().V1().HorizontalPodAutoscalers(), ctx.InformerFactory.Autoscaling().V1().HorizontalPodAutoscalers(),
ctx.Options.HorizontalPodAutoscalerSyncPeriod.Duration, ctx.Options.HorizontalPodAutoscalerSyncPeriod.Duration,

View File

@ -12,7 +12,6 @@ go_library(
deps = [ deps = [
"//pkg/apis/extensions:go_default_library", "//pkg/apis/extensions:go_default_library",
"//vendor/github.com/google/gofuzz:go_default_library", "//vendor/github.com/google/gofuzz:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
], ],

View File

@ -24,9 +24,9 @@ go_library(
"//vendor/k8s.io/api/autoscaling/v1:go_default_library", "//vendor/k8s.io/api/autoscaling/v1:go_default_library",
"//vendor/k8s.io/api/autoscaling/v2beta1:go_default_library", "//vendor/k8s.io/api/autoscaling/v2beta1:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/api/extensions/v1beta1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/equality:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/equality:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library", "//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
@ -39,8 +39,8 @@ go_library(
"//vendor/k8s.io/client-go/kubernetes/scheme:go_default_library", "//vendor/k8s.io/client-go/kubernetes/scheme:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1:go_default_library", "//vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library", "//vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1:go_default_library",
"//vendor/k8s.io/client-go/listers/autoscaling/v1:go_default_library", "//vendor/k8s.io/client-go/listers/autoscaling/v1:go_default_library",
"//vendor/k8s.io/client-go/scale:go_default_library",
"//vendor/k8s.io/client-go/tools/cache:go_default_library", "//vendor/k8s.io/client-go/tools/cache:go_default_library",
"//vendor/k8s.io/client-go/tools/record:go_default_library", "//vendor/k8s.io/client-go/tools/record:go_default_library",
"//vendor/k8s.io/client-go/util/workqueue:go_default_library", "//vendor/k8s.io/client-go/util/workqueue:go_default_library",
@ -58,6 +58,7 @@ go_test(
importpath = "k8s.io/kubernetes/pkg/controller/podautoscaler", importpath = "k8s.io/kubernetes/pkg/controller/podautoscaler",
library = ":go_default_library", library = ":go_default_library",
deps = [ deps = [
"//pkg/api/install:go_default_library",
"//pkg/api/legacyscheme:go_default_library", "//pkg/api/legacyscheme:go_default_library",
"//pkg/apis/autoscaling:go_default_library", "//pkg/apis/autoscaling:go_default_library",
"//pkg/apis/autoscaling/install:go_default_library", "//pkg/apis/autoscaling/install:go_default_library",
@ -69,15 +70,16 @@ go_test(
"//vendor/k8s.io/api/autoscaling/v1:go_default_library", "//vendor/k8s.io/api/autoscaling/v1:go_default_library",
"//vendor/k8s.io/api/autoscaling/v2beta1:go_default_library", "//vendor/k8s.io/api/autoscaling/v2beta1:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library", "//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/api/extensions/v1beta1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library", "//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", "//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/watch:go_default_library", "//vendor/k8s.io/apimachinery/pkg/watch:go_default_library",
"//vendor/k8s.io/client-go/informers:go_default_library", "//vendor/k8s.io/client-go/informers:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/fake:go_default_library", "//vendor/k8s.io/client-go/kubernetes/fake:go_default_library",
"//vendor/k8s.io/client-go/rest:go_default_library", "//vendor/k8s.io/client-go/rest:go_default_library",
"//vendor/k8s.io/client-go/scale/fake:go_default_library",
"//vendor/k8s.io/client-go/testing:go_default_library", "//vendor/k8s.io/client-go/testing:go_default_library",
"//vendor/k8s.io/heapster/metrics/api/v1/types:go_default_library", "//vendor/k8s.io/heapster/metrics/api/v1/types:go_default_library",
"//vendor/k8s.io/metrics/pkg/apis/custom_metrics/v1beta1:go_default_library", "//vendor/k8s.io/metrics/pkg/apis/custom_metrics/v1beta1:go_default_library",

View File

@ -25,9 +25,9 @@ import (
autoscalingv1 "k8s.io/api/autoscaling/v1" autoscalingv1 "k8s.io/api/autoscaling/v1"
autoscalingv2 "k8s.io/api/autoscaling/v2beta1" autoscalingv2 "k8s.io/api/autoscaling/v2beta1"
"k8s.io/api/core/v1" "k8s.io/api/core/v1"
extensions "k8s.io/api/extensions/v1beta1"
apiequality "k8s.io/apimachinery/pkg/api/equality" apiequality "k8s.io/apimachinery/pkg/api/equality"
"k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/errors"
apimeta "k8s.io/apimachinery/pkg/api/meta"
"k8s.io/apimachinery/pkg/api/resource" "k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/labels"
@ -39,8 +39,8 @@ import (
"k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/kubernetes/scheme"
autoscalingclient "k8s.io/client-go/kubernetes/typed/autoscaling/v1" autoscalingclient "k8s.io/client-go/kubernetes/typed/autoscaling/v1"
v1core "k8s.io/client-go/kubernetes/typed/core/v1" v1core "k8s.io/client-go/kubernetes/typed/core/v1"
extensionsclient "k8s.io/client-go/kubernetes/typed/extensions/v1beta1"
autoscalinglisters "k8s.io/client-go/listers/autoscaling/v1" autoscalinglisters "k8s.io/client-go/listers/autoscaling/v1"
scaleclient "k8s.io/client-go/scale"
"k8s.io/client-go/tools/cache" "k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/record" "k8s.io/client-go/tools/record"
"k8s.io/client-go/util/workqueue" "k8s.io/client-go/util/workqueue"
@ -57,8 +57,9 @@ var (
// in the system with the actual deployments/replication controllers they // in the system with the actual deployments/replication controllers they
// control. // control.
type HorizontalController struct { type HorizontalController struct {
scaleNamespacer extensionsclient.ScalesGetter scaleNamespacer scaleclient.ScalesGetter
hpaNamespacer autoscalingclient.HorizontalPodAutoscalersGetter hpaNamespacer autoscalingclient.HorizontalPodAutoscalersGetter
mapper apimeta.RESTMapper
replicaCalc *ReplicaCalculator replicaCalc *ReplicaCalculator
eventRecorder record.EventRecorder eventRecorder record.EventRecorder
@ -78,8 +79,9 @@ type HorizontalController struct {
// NewHorizontalController creates a new HorizontalController. // NewHorizontalController creates a new HorizontalController.
func NewHorizontalController( func NewHorizontalController(
evtNamespacer v1core.EventsGetter, evtNamespacer v1core.EventsGetter,
scaleNamespacer extensionsclient.ScalesGetter, scaleNamespacer scaleclient.ScalesGetter,
hpaNamespacer autoscalingclient.HorizontalPodAutoscalersGetter, hpaNamespacer autoscalingclient.HorizontalPodAutoscalersGetter,
mapper apimeta.RESTMapper,
replicaCalc *ReplicaCalculator, replicaCalc *ReplicaCalculator,
hpaInformer autoscalinginformers.HorizontalPodAutoscalerInformer, hpaInformer autoscalinginformers.HorizontalPodAutoscalerInformer,
resyncPeriod time.Duration, resyncPeriod time.Duration,
@ -99,7 +101,8 @@ func NewHorizontalController(
hpaNamespacer: hpaNamespacer, hpaNamespacer: hpaNamespacer,
upscaleForbiddenWindow: upscaleForbiddenWindow, upscaleForbiddenWindow: upscaleForbiddenWindow,
downscaleForbiddenWindow: downscaleForbiddenWindow, downscaleForbiddenWindow: downscaleForbiddenWindow,
queue: workqueue.NewNamedRateLimitingQueue(NewDefaultHPARateLimiter(resyncPeriod), "horizontalpodautoscaler"), queue: workqueue.NewNamedRateLimitingQueue(NewDefaultHPARateLimiter(resyncPeriod), "horizontalpodautoscaler"),
mapper: mapper,
} }
hpaInformer.Informer().AddEventHandlerWithResyncPeriod( hpaInformer.Informer().AddEventHandlerWithResyncPeriod(
@ -189,7 +192,7 @@ func (a *HorizontalController) processNextWorkItem() bool {
// Computes the desired number of replicas for the metric specifications listed in the HPA, returning the maximum // Computes the desired number of replicas for the metric specifications listed in the HPA, returning the maximum
// of the computed replica counts, a description of the associated metric, and the statuses of all metrics // of the computed replica counts, a description of the associated metric, and the statuses of all metrics
// computed. // computed.
func (a *HorizontalController) computeReplicasForMetrics(hpa *autoscalingv2.HorizontalPodAutoscaler, scale *extensions.Scale, func (a *HorizontalController) computeReplicasForMetrics(hpa *autoscalingv2.HorizontalPodAutoscaler, scale *autoscalingv1.Scale,
metricSpecs []autoscalingv2.MetricSpec) (replicas int32, metric string, statuses []autoscalingv2.MetricStatus, timestamp time.Time, err error) { metricSpecs []autoscalingv2.MetricSpec) (replicas int32, metric string, statuses []autoscalingv2.MetricStatus, timestamp time.Time, err error) {
currentReplicas := scale.Status.Replicas currentReplicas := scale.Status.Replicas
@ -197,21 +200,14 @@ func (a *HorizontalController) computeReplicasForMetrics(hpa *autoscalingv2.Hori
statuses = make([]autoscalingv2.MetricStatus, len(metricSpecs)) statuses = make([]autoscalingv2.MetricStatus, len(metricSpecs))
for i, metricSpec := range metricSpecs { for i, metricSpec := range metricSpecs {
if len(scale.Status.Selector) == 0 && len(scale.Status.TargetSelector) == 0 { if scale.Status.Selector == "" {
errMsg := "selector is required" errMsg := "selector is required"
a.eventRecorder.Event(hpa, v1.EventTypeWarning, "SelectorRequired", errMsg) a.eventRecorder.Event(hpa, v1.EventTypeWarning, "SelectorRequired", errMsg)
setCondition(hpa, autoscalingv2.ScalingActive, v1.ConditionFalse, "InvalidSelector", "the HPA target's scale is missing a selector") setCondition(hpa, autoscalingv2.ScalingActive, v1.ConditionFalse, "InvalidSelector", "the HPA target's scale is missing a selector")
return 0, "", nil, time.Time{}, fmt.Errorf(errMsg) return 0, "", nil, time.Time{}, fmt.Errorf(errMsg)
} }
var selector labels.Selector selector, err := labels.Parse(scale.Status.Selector)
var err error
if len(scale.Status.Selector) > 0 {
selector = labels.SelectorFromSet(labels.Set(scale.Status.Selector))
err = nil
} else {
selector, err = labels.Parse(scale.Status.TargetSelector)
}
if err != nil { if err != nil {
errMsg := fmt.Sprintf("couldn't convert selector into a corresponding internal selector object: %v", err) errMsg := fmt.Sprintf("couldn't convert selector into a corresponding internal selector object: %v", err)
a.eventRecorder.Event(hpa, v1.EventTypeWarning, "InvalidSelector", errMsg) a.eventRecorder.Event(hpa, v1.EventTypeWarning, "InvalidSelector", errMsg)
@ -349,7 +345,28 @@ func (a *HorizontalController) reconcileAutoscaler(hpav1Shared *autoscalingv1.Ho
reference := fmt.Sprintf("%s/%s/%s", hpa.Spec.ScaleTargetRef.Kind, hpa.Namespace, hpa.Spec.ScaleTargetRef.Name) reference := fmt.Sprintf("%s/%s/%s", hpa.Spec.ScaleTargetRef.Kind, hpa.Namespace, hpa.Spec.ScaleTargetRef.Name)
scale, err := a.scaleNamespacer.Scales(hpa.Namespace).Get(hpa.Spec.ScaleTargetRef.Kind, hpa.Spec.ScaleTargetRef.Name) targetGV, err := schema.ParseGroupVersion(hpa.Spec.ScaleTargetRef.APIVersion)
if err != nil {
a.eventRecorder.Event(hpa, v1.EventTypeWarning, "FailedGetScale", err.Error())
setCondition(hpa, autoscalingv2.AbleToScale, v1.ConditionFalse, "FailedGetScale", "the HPA controller was unable to get the target's current scale: %v", err)
a.updateStatusIfNeeded(hpaStatusOriginal, hpa)
return fmt.Errorf("invalid API version in scale target reference: %v", err)
}
targetGK := schema.GroupKind{
Group: targetGV.Group,
Kind: hpa.Spec.ScaleTargetRef.Kind,
}
mappings, err := a.mapper.RESTMappings(targetGK)
if err != nil {
a.eventRecorder.Event(hpa, v1.EventTypeWarning, "FailedGetScale", err.Error())
setCondition(hpa, autoscalingv2.AbleToScale, v1.ConditionFalse, "FailedGetScale", "the HPA controller was unable to get the target's current scale: %v", err)
a.updateStatusIfNeeded(hpaStatusOriginal, hpa)
return fmt.Errorf("unable to determine resource for scale target reference: %v", err)
}
scale, targetGR, err := a.scaleForResourceMappings(hpa.Namespace, hpa.Spec.ScaleTargetRef.Name, mappings)
if err != nil { if err != nil {
a.eventRecorder.Event(hpa, v1.EventTypeWarning, "FailedGetScale", err.Error()) a.eventRecorder.Event(hpa, v1.EventTypeWarning, "FailedGetScale", err.Error())
setCondition(hpa, autoscalingv2.AbleToScale, v1.ConditionFalse, "FailedGetScale", "the HPA controller was unable to get the target's current scale: %v", err) setCondition(hpa, autoscalingv2.AbleToScale, v1.ConditionFalse, "FailedGetScale", "the HPA controller was unable to get the target's current scale: %v", err)
@ -479,7 +496,7 @@ func (a *HorizontalController) reconcileAutoscaler(hpav1Shared *autoscalingv1.Ho
if rescale { if rescale {
scale.Spec.Replicas = desiredReplicas scale.Spec.Replicas = desiredReplicas
_, err = a.scaleNamespacer.Scales(hpa.Namespace).Update(hpa.Spec.ScaleTargetRef.Kind, scale) _, err = a.scaleNamespacer.Scales(hpa.Namespace).Update(targetGR, scale)
if err != nil { if err != nil {
a.eventRecorder.Eventf(hpa, v1.EventTypeWarning, "FailedRescale", "New size: %d; reason: %s; error: %v", desiredReplicas, rescaleReason, err.Error()) a.eventRecorder.Eventf(hpa, v1.EventTypeWarning, "FailedRescale", "New size: %d; reason: %s; error: %v", desiredReplicas, rescaleReason, err.Error())
setCondition(hpa, autoscalingv2.AbleToScale, v1.ConditionFalse, "FailedUpdateScale", "the HPA controller was unable to update the target scale: %v", err) setCondition(hpa, autoscalingv2.AbleToScale, v1.ConditionFalse, "FailedUpdateScale", "the HPA controller was unable to update the target scale: %v", err)
@ -526,6 +543,35 @@ func (a *HorizontalController) shouldScale(hpa *autoscalingv2.HorizontalPodAutos
return false return false
} }
// scaleForResourceMappings attempts to fetch the scale for the
// resource with the given name and namespace, trying each RESTMapping
// in turn until a working one is found. If none work, the first error
// is returned. It returns both the scale, as well as the group-resource from
// the working mapping.
func (a *HorizontalController) scaleForResourceMappings(namespace, name string, mappings []*apimeta.RESTMapping) (*autoscalingv1.Scale, schema.GroupResource, error) {
var firstErr error
for i, mapping := range mappings {
targetGR := mapping.GroupVersionKind.GroupVersion().WithResource(mapping.Resource).GroupResource()
scale, err := a.scaleNamespacer.Scales(namespace).Get(targetGR, name)
if err == nil {
return scale, targetGR, nil
}
// if this is the first error, remember it,
// then go on and try other mappings until we find a good one
if i == 0 {
firstErr = err
}
}
// make sure we handle an empty set of mappings
if firstErr == nil {
firstErr = fmt.Errorf("unrecognized resource")
}
return nil, schema.GroupResource{}, firstErr
}
// setCurrentReplicasInStatus sets the current replica count in the status of the HPA. // setCurrentReplicasInStatus sets the current replica count in the status of the HPA.
func (a *HorizontalController) setCurrentReplicasInStatus(hpa *autoscalingv2.HorizontalPodAutoscaler, currentReplicas int32) { func (a *HorizontalController) setCurrentReplicasInStatus(hpa *autoscalingv2.HorizontalPodAutoscaler, currentReplicas int32) {
a.setStatus(hpa, currentReplicas, hpa.Status.DesiredReplicas, hpa.Status.CurrentMetrics, false) a.setStatus(hpa, currentReplicas, hpa.Status.DesiredReplicas, hpa.Status.CurrentMetrics, false)

View File

@ -27,15 +27,16 @@ import (
autoscalingv1 "k8s.io/api/autoscaling/v1" autoscalingv1 "k8s.io/api/autoscaling/v1"
autoscalingv2 "k8s.io/api/autoscaling/v2beta1" autoscalingv2 "k8s.io/api/autoscaling/v2beta1"
"k8s.io/api/core/v1" "k8s.io/api/core/v1"
extensions "k8s.io/api/extensions/v1beta1"
"k8s.io/apimachinery/pkg/api/resource" "k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/watch" "k8s.io/apimachinery/pkg/watch"
"k8s.io/client-go/informers" "k8s.io/client-go/informers"
"k8s.io/client-go/kubernetes/fake" "k8s.io/client-go/kubernetes/fake"
clientfake "k8s.io/client-go/kubernetes/fake" clientfake "k8s.io/client-go/kubernetes/fake"
scalefake "k8s.io/client-go/scale/fake"
core "k8s.io/client-go/testing" core "k8s.io/client-go/testing"
"k8s.io/kubernetes/pkg/api/legacyscheme" "k8s.io/kubernetes/pkg/api/legacyscheme"
"k8s.io/kubernetes/pkg/apis/autoscaling" "k8s.io/kubernetes/pkg/apis/autoscaling"
@ -121,6 +122,7 @@ type testCase struct {
testClient *fake.Clientset testClient *fake.Clientset
testMetricsClient *metricsfake.Clientset testMetricsClient *metricsfake.Clientset
testCMClient *cmfake.FakeCustomMetricsClient testCMClient *cmfake.FakeCustomMetricsClient
testScaleClient *scalefake.FakeScaleClient
} }
// Needs to be called under a lock. // Needs to be called under a lock.
@ -144,12 +146,12 @@ func init() {
scaleUpLimitFactor = 8 scaleUpLimitFactor = 8
} }
func (tc *testCase) prepareTestClient(t *testing.T) (*fake.Clientset, *metricsfake.Clientset, *cmfake.FakeCustomMetricsClient) { func (tc *testCase) prepareTestClient(t *testing.T) (*fake.Clientset, *metricsfake.Clientset, *cmfake.FakeCustomMetricsClient, *scalefake.FakeScaleClient) {
namespace := "test-namespace" namespace := "test-namespace"
hpaName := "test-hpa" hpaName := "test-hpa"
podNamePrefix := "test-pod" podNamePrefix := "test-pod"
// TODO: also test with TargetSelector labelSet := map[string]string{"name": podNamePrefix}
selector := map[string]string{"name": podNamePrefix} selector := labels.SelectorFromSet(labelSet).String()
tc.Lock() tc.Lock()
@ -161,13 +163,11 @@ func (tc *testCase) prepareTestClient(t *testing.T) (*fake.Clientset, *metricsfa
tc.computeCPUCurrent() tc.computeCPUCurrent()
} }
// TODO(madhusudancs): HPA only supports resources in extensions/v1beta1 right now. Add
// tests for "v1" replicationcontrollers when HPA adds support for cross-group scale.
if tc.resource == nil { if tc.resource == nil {
tc.resource = &fakeResource{ tc.resource = &fakeResource{
name: "test-rc", name: "test-rc",
apiVersion: "extensions/v1beta1", apiVersion: "v1",
kind: "replicationcontrollers", kind: "ReplicationController",
} }
} }
tc.Unlock() tc.Unlock()
@ -239,66 +239,6 @@ func (tc *testCase) prepareTestClient(t *testing.T) (*fake.Clientset, *metricsfa
return true, objv1, nil return true, objv1, nil
}) })
fakeClient.AddReactor("get", "replicationcontrollers", func(action core.Action) (handled bool, ret runtime.Object, err error) {
tc.Lock()
defer tc.Unlock()
obj := &extensions.Scale{
ObjectMeta: metav1.ObjectMeta{
Name: tc.resource.name,
Namespace: namespace,
},
Spec: extensions.ScaleSpec{
Replicas: tc.initialReplicas,
},
Status: extensions.ScaleStatus{
Replicas: tc.initialReplicas,
Selector: selector,
},
}
return true, obj, nil
})
fakeClient.AddReactor("get", "deployments", func(action core.Action) (handled bool, ret runtime.Object, err error) {
tc.Lock()
defer tc.Unlock()
obj := &extensions.Scale{
ObjectMeta: metav1.ObjectMeta{
Name: tc.resource.name,
Namespace: namespace,
},
Spec: extensions.ScaleSpec{
Replicas: tc.initialReplicas,
},
Status: extensions.ScaleStatus{
Replicas: tc.initialReplicas,
Selector: selector,
},
}
return true, obj, nil
})
fakeClient.AddReactor("get", "replicasets", func(action core.Action) (handled bool, ret runtime.Object, err error) {
tc.Lock()
defer tc.Unlock()
obj := &extensions.Scale{
ObjectMeta: metav1.ObjectMeta{
Name: tc.resource.name,
Namespace: namespace,
},
Spec: extensions.ScaleSpec{
Replicas: tc.initialReplicas,
},
Status: extensions.ScaleStatus{
Replicas: tc.initialReplicas,
Selector: selector,
},
}
return true, obj, nil
})
fakeClient.AddReactor("list", "pods", func(action core.Action) (handled bool, ret runtime.Object, err error) { fakeClient.AddReactor("list", "pods", func(action core.Action) (handled bool, ret runtime.Object, err error) {
tc.Lock() tc.Lock()
defer tc.Unlock() defer tc.Unlock()
@ -344,39 +284,6 @@ func (tc *testCase) prepareTestClient(t *testing.T) (*fake.Clientset, *metricsfa
return true, obj, nil return true, obj, nil
}) })
fakeClient.AddReactor("update", "replicationcontrollers", func(action core.Action) (handled bool, ret runtime.Object, err error) {
tc.Lock()
defer tc.Unlock()
obj := action.(core.UpdateAction).GetObject().(*extensions.Scale)
replicas := action.(core.UpdateAction).GetObject().(*extensions.Scale).Spec.Replicas
assert.Equal(t, tc.desiredReplicas, replicas, "the replica count of the RC should be as expected")
tc.scaleUpdated = true
return true, obj, nil
})
fakeClient.AddReactor("update", "deployments", func(action core.Action) (handled bool, ret runtime.Object, err error) {
tc.Lock()
defer tc.Unlock()
obj := action.(core.UpdateAction).GetObject().(*extensions.Scale)
replicas := action.(core.UpdateAction).GetObject().(*extensions.Scale).Spec.Replicas
assert.Equal(t, tc.desiredReplicas, replicas, "the replica count of the deployment should be as expected")
tc.scaleUpdated = true
return true, obj, nil
})
fakeClient.AddReactor("update", "replicasets", func(action core.Action) (handled bool, ret runtime.Object, err error) {
tc.Lock()
defer tc.Unlock()
obj := action.(core.UpdateAction).GetObject().(*extensions.Scale)
replicas := action.(core.UpdateAction).GetObject().(*extensions.Scale).Spec.Replicas
assert.Equal(t, tc.desiredReplicas, replicas, "the replica count of the replicaset should be as expected")
tc.scaleUpdated = true
return true, obj, nil
})
fakeClient.AddReactor("update", "horizontalpodautoscalers", func(action core.Action) (handled bool, ret runtime.Object, err error) { fakeClient.AddReactor("update", "horizontalpodautoscalers", func(action core.Action) (handled bool, ret runtime.Object, err error) {
tc.Lock() tc.Lock()
defer tc.Unlock() defer tc.Unlock()
@ -386,8 +293,9 @@ func (tc *testCase) prepareTestClient(t *testing.T) (*fake.Clientset, *metricsfa
assert.Equal(t, hpaName, obj.Name, "the HPA name should be as expected") assert.Equal(t, hpaName, obj.Name, "the HPA name should be as expected")
assert.Equal(t, tc.desiredReplicas, obj.Status.DesiredReplicas, "the desired replica count reported in the object status should be as expected") assert.Equal(t, tc.desiredReplicas, obj.Status.DesiredReplicas, "the desired replica count reported in the object status should be as expected")
if tc.verifyCPUCurrent { if tc.verifyCPUCurrent {
assert.NotNil(t, obj.Status.CurrentCPUUtilizationPercentage, "the reported CPU utilization percentage should be non-nil") if assert.NotNil(t, obj.Status.CurrentCPUUtilizationPercentage, "the reported CPU utilization percentage should be non-nil") {
assert.Equal(t, tc.CPUCurrent, *obj.Status.CurrentCPUUtilizationPercentage, "the report CPU utilization percentage should be as expected") assert.Equal(t, tc.CPUCurrent, *obj.Status.CurrentCPUUtilizationPercentage, "the report CPU utilization percentage should be as expected")
}
} }
var actualConditions []autoscalingv1.HorizontalPodAutoscalerCondition var actualConditions []autoscalingv1.HorizontalPodAutoscalerCondition
if err := json.Unmarshal([]byte(obj.ObjectMeta.Annotations[autoscaling.HorizontalPodAutoscalerConditionsAnnotation]), &actualConditions); err != nil { if err := json.Unmarshal([]byte(obj.ObjectMeta.Annotations[autoscaling.HorizontalPodAutoscalerConditionsAnnotation]), &actualConditions); err != nil {
@ -411,6 +319,100 @@ func (tc *testCase) prepareTestClient(t *testing.T) (*fake.Clientset, *metricsfa
return true, obj, nil return true, obj, nil
}) })
fakeScaleClient := &scalefake.FakeScaleClient{}
fakeScaleClient.AddReactor("get", "replicationcontrollers", func(action core.Action) (handled bool, ret runtime.Object, err error) {
tc.Lock()
defer tc.Unlock()
obj := &autoscalingv1.Scale{
ObjectMeta: metav1.ObjectMeta{
Name: tc.resource.name,
Namespace: namespace,
},
Spec: autoscalingv1.ScaleSpec{
Replicas: tc.initialReplicas,
},
Status: autoscalingv1.ScaleStatus{
Replicas: tc.initialReplicas,
Selector: selector,
},
}
return true, obj, nil
})
fakeScaleClient.AddReactor("get", "deployments", func(action core.Action) (handled bool, ret runtime.Object, err error) {
tc.Lock()
defer tc.Unlock()
obj := &autoscalingv1.Scale{
ObjectMeta: metav1.ObjectMeta{
Name: tc.resource.name,
Namespace: namespace,
},
Spec: autoscalingv1.ScaleSpec{
Replicas: tc.initialReplicas,
},
Status: autoscalingv1.ScaleStatus{
Replicas: tc.initialReplicas,
Selector: selector,
},
}
return true, obj, nil
})
fakeScaleClient.AddReactor("get", "replicasets", func(action core.Action) (handled bool, ret runtime.Object, err error) {
tc.Lock()
defer tc.Unlock()
obj := &autoscalingv1.Scale{
ObjectMeta: metav1.ObjectMeta{
Name: tc.resource.name,
Namespace: namespace,
},
Spec: autoscalingv1.ScaleSpec{
Replicas: tc.initialReplicas,
},
Status: autoscalingv1.ScaleStatus{
Replicas: tc.initialReplicas,
Selector: selector,
},
}
return true, obj, nil
})
fakeScaleClient.AddReactor("update", "replicationcontrollers", func(action core.Action) (handled bool, ret runtime.Object, err error) {
tc.Lock()
defer tc.Unlock()
obj := action.(core.UpdateAction).GetObject().(*autoscalingv1.Scale)
replicas := action.(core.UpdateAction).GetObject().(*autoscalingv1.Scale).Spec.Replicas
assert.Equal(t, tc.desiredReplicas, replicas, "the replica count of the RC should be as expected")
tc.scaleUpdated = true
return true, obj, nil
})
fakeScaleClient.AddReactor("update", "deployments", func(action core.Action) (handled bool, ret runtime.Object, err error) {
tc.Lock()
defer tc.Unlock()
obj := action.(core.UpdateAction).GetObject().(*autoscalingv1.Scale)
replicas := action.(core.UpdateAction).GetObject().(*autoscalingv1.Scale).Spec.Replicas
assert.Equal(t, tc.desiredReplicas, replicas, "the replica count of the deployment should be as expected")
tc.scaleUpdated = true
return true, obj, nil
})
fakeScaleClient.AddReactor("update", "replicasets", func(action core.Action) (handled bool, ret runtime.Object, err error) {
tc.Lock()
defer tc.Unlock()
obj := action.(core.UpdateAction).GetObject().(*autoscalingv1.Scale)
replicas := action.(core.UpdateAction).GetObject().(*autoscalingv1.Scale).Spec.Replicas
assert.Equal(t, tc.desiredReplicas, replicas, "the replica count of the replicaset should be as expected")
tc.scaleUpdated = true
return true, obj, nil
})
fakeWatch := watch.NewFake() fakeWatch := watch.NewFake()
fakeClient.AddWatchReactor("*", core.DefaultWatchReactor(fakeWatch, nil)) fakeClient.AddWatchReactor("*", core.DefaultWatchReactor(fakeWatch, nil))
@ -427,7 +429,7 @@ func (tc *testCase) prepareTestClient(t *testing.T) (*fake.Clientset, *metricsfa
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Name: fmt.Sprintf("%s-%d", podNamePrefix, i), Name: fmt.Sprintf("%s-%d", podNamePrefix, i),
Namespace: namespace, Namespace: namespace,
Labels: selector, Labels: labelSet,
}, },
Timestamp: metav1.Time{Time: time.Now()}, Timestamp: metav1.Time{Time: time.Now()},
Containers: []metricsapi.ContainerMetrics{ Containers: []metricsapi.ContainerMetrics{
@ -522,7 +524,7 @@ func (tc *testCase) prepareTestClient(t *testing.T) (*fake.Clientset, *metricsfa
return true, metrics, nil return true, metrics, nil
}) })
return fakeClient, fakeMetricsClient, fakeCMClient return fakeClient, fakeMetricsClient, fakeCMClient, fakeScaleClient
} }
func (tc *testCase) verifyResults(t *testing.T) { func (tc *testCase) verifyResults(t *testing.T) {
@ -537,7 +539,7 @@ func (tc *testCase) verifyResults(t *testing.T) {
} }
func (tc *testCase) setupController(t *testing.T) (*HorizontalController, informers.SharedInformerFactory) { func (tc *testCase) setupController(t *testing.T) (*HorizontalController, informers.SharedInformerFactory) {
testClient, testMetricsClient, testCMClient := tc.prepareTestClient(t) testClient, testMetricsClient, testCMClient, testScaleClient := tc.prepareTestClient(t)
if tc.testClient != nil { if tc.testClient != nil {
testClient = tc.testClient testClient = tc.testClient
} }
@ -547,6 +549,9 @@ func (tc *testCase) setupController(t *testing.T) (*HorizontalController, inform
if tc.testCMClient != nil { if tc.testCMClient != nil {
testCMClient = tc.testCMClient testCMClient = tc.testCMClient
} }
if tc.testScaleClient != nil {
testScaleClient = tc.testScaleClient
}
metricsClient := metrics.NewRESTMetricsClient( metricsClient := metrics.NewRESTMetricsClient(
testMetricsClient.MetricsV1beta1(), testMetricsClient.MetricsV1beta1(),
testCMClient, testCMClient,
@ -587,8 +592,9 @@ func (tc *testCase) setupController(t *testing.T) (*HorizontalController, inform
hpaController := NewHorizontalController( hpaController := NewHorizontalController(
eventClient.Core(), eventClient.Core(),
testClient.Extensions(), testScaleClient,
testClient.Autoscaling(), testClient.Autoscaling(),
legacyscheme.Registry.RESTMapper(),
replicaCalc, replicaCalc,
informerFactory.Autoscaling().V1().HorizontalPodAutoscalers(), informerFactory.Autoscaling().V1().HorizontalPodAutoscalers(),
controller.NoResyncPeriodFunc(), controller.NoResyncPeriodFunc(),
@ -692,7 +698,7 @@ func TestScaleUpDeployment(t *testing.T) {
resource: &fakeResource{ resource: &fakeResource{
name: "test-dep", name: "test-dep",
apiVersion: "extensions/v1beta1", apiVersion: "extensions/v1beta1",
kind: "deployments", kind: "Deployment",
}, },
} }
tc.runTest(t) tc.runTest(t)
@ -712,7 +718,7 @@ func TestScaleUpReplicaSet(t *testing.T) {
resource: &fakeResource{ resource: &fakeResource{
name: "test-replicaset", name: "test-replicaset",
apiVersion: "extensions/v1beta1", apiVersion: "extensions/v1beta1",
kind: "replicasets", kind: "ReplicaSet",
}, },
} }
tc.runTest(t) tc.runTest(t)
@ -1267,18 +1273,18 @@ func TestConditionInvalidSelectorMissing(t *testing.T) {
}, },
} }
testClient, _, _ := tc.prepareTestClient(t) _, _, _, testScaleClient := tc.prepareTestClient(t)
tc.testClient = testClient tc.testScaleClient = testScaleClient
testClient.PrependReactor("get", "replicationcontrollers", func(action core.Action) (handled bool, ret runtime.Object, err error) { testScaleClient.PrependReactor("get", "replicationcontrollers", func(action core.Action) (handled bool, ret runtime.Object, err error) {
obj := &extensions.Scale{ obj := &autoscalingv1.Scale{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Name: tc.resource.name, Name: tc.resource.name,
}, },
Spec: extensions.ScaleSpec{ Spec: autoscalingv1.ScaleSpec{
Replicas: tc.initialReplicas, Replicas: tc.initialReplicas,
}, },
Status: extensions.ScaleStatus{ Status: autoscalingv1.ScaleStatus{
Replicas: tc.initialReplicas, Replicas: tc.initialReplicas,
}, },
} }
@ -1312,20 +1318,20 @@ func TestConditionInvalidSelectorUnparsable(t *testing.T) {
}, },
} }
testClient, _, _ := tc.prepareTestClient(t) _, _, _, testScaleClient := tc.prepareTestClient(t)
tc.testClient = testClient tc.testScaleClient = testScaleClient
testClient.PrependReactor("get", "replicationcontrollers", func(action core.Action) (handled bool, ret runtime.Object, err error) { testScaleClient.PrependReactor("get", "replicationcontrollers", func(action core.Action) (handled bool, ret runtime.Object, err error) {
obj := &extensions.Scale{ obj := &autoscalingv1.Scale{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Name: tc.resource.name, Name: tc.resource.name,
}, },
Spec: extensions.ScaleSpec{ Spec: autoscalingv1.ScaleSpec{
Replicas: tc.initialReplicas, Replicas: tc.initialReplicas,
}, },
Status: extensions.ScaleStatus{ Status: autoscalingv1.ScaleStatus{
Replicas: tc.initialReplicas, Replicas: tc.initialReplicas,
TargetSelector: "cheddar cheese", Selector: "cheddar cheese",
}, },
} }
return true, obj, nil return true, obj, nil
@ -1373,7 +1379,7 @@ func TestConditionFailedGetMetrics(t *testing.T) {
reportedCPURequests: []resource.Quantity{resource.MustParse("0.1"), resource.MustParse("0.1"), resource.MustParse("0.1")}, reportedCPURequests: []resource.Quantity{resource.MustParse("0.1"), resource.MustParse("0.1"), resource.MustParse("0.1")},
useMetricsAPI: true, useMetricsAPI: true,
} }
_, testMetricsClient, testCMClient := tc.prepareTestClient(t) _, testMetricsClient, testCMClient, _ := tc.prepareTestClient(t)
tc.testMetricsClient = testMetricsClient tc.testMetricsClient = testMetricsClient
tc.testCMClient = testCMClient tc.testCMClient = testCMClient
@ -1446,11 +1452,11 @@ func TestConditionFailedGetScale(t *testing.T) {
}, },
} }
testClient, _, _ := tc.prepareTestClient(t) _, _, _, testScaleClient := tc.prepareTestClient(t)
tc.testClient = testClient tc.testScaleClient = testScaleClient
testClient.PrependReactor("get", "replicationcontrollers", func(action core.Action) (handled bool, ret runtime.Object, err error) { testScaleClient.PrependReactor("get", "replicationcontrollers", func(action core.Action) (handled bool, ret runtime.Object, err error) {
return true, &extensions.Scale{}, fmt.Errorf("something went wrong") return true, &autoscalingv1.Scale{}, fmt.Errorf("something went wrong")
}) })
tc.runTest(t) tc.runTest(t)
@ -1473,11 +1479,11 @@ func TestConditionFailedUpdateScale(t *testing.T) {
}), }),
} }
testClient, _, _ := tc.prepareTestClient(t) _, _, _, testScaleClient := tc.prepareTestClient(t)
tc.testClient = testClient tc.testScaleClient = testScaleClient
testClient.PrependReactor("update", "replicationcontrollers", func(action core.Action) (handled bool, ret runtime.Object, err error) { testScaleClient.PrependReactor("update", "replicationcontrollers", func(action core.Action) (handled bool, ret runtime.Object, err error) {
return true, &extensions.Scale{}, fmt.Errorf("something went wrong") return true, &autoscalingv1.Scale{}, fmt.Errorf("something went wrong")
}) })
tc.runTest(t) tc.runTest(t)
@ -1659,7 +1665,7 @@ func TestAvoidUncessaryUpdates(t *testing.T) {
reportedPodReadiness: []v1.ConditionStatus{v1.ConditionTrue, v1.ConditionFalse, v1.ConditionFalse}, reportedPodReadiness: []v1.ConditionStatus{v1.ConditionTrue, v1.ConditionFalse, v1.ConditionFalse},
useMetricsAPI: true, useMetricsAPI: true,
} }
testClient, _, _ := tc.prepareTestClient(t) testClient, _, _, _ := tc.prepareTestClient(t)
tc.testClient = testClient tc.testClient = testClient
var savedHPA *autoscalingv1.HorizontalPodAutoscaler var savedHPA *autoscalingv1.HorizontalPodAutoscaler
testClient.PrependReactor("list", "horizontalpodautoscalers", func(action core.Action) (handled bool, ret runtime.Object, err error) { testClient.PrependReactor("list", "horizontalpodautoscalers", func(action core.Action) (handled bool, ret runtime.Object, err error) {

View File

@ -30,16 +30,18 @@ import (
autoscalingv1 "k8s.io/api/autoscaling/v1" autoscalingv1 "k8s.io/api/autoscaling/v1"
autoscalingv2 "k8s.io/api/autoscaling/v2beta1" autoscalingv2 "k8s.io/api/autoscaling/v2beta1"
"k8s.io/api/core/v1" "k8s.io/api/core/v1"
extensions "k8s.io/api/extensions/v1beta1"
"k8s.io/apimachinery/pkg/api/resource" "k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/watch" "k8s.io/apimachinery/pkg/watch"
"k8s.io/client-go/informers" "k8s.io/client-go/informers"
"k8s.io/client-go/kubernetes/fake" "k8s.io/client-go/kubernetes/fake"
clientfake "k8s.io/client-go/kubernetes/fake" clientfake "k8s.io/client-go/kubernetes/fake"
restclient "k8s.io/client-go/rest" restclient "k8s.io/client-go/rest"
scalefake "k8s.io/client-go/scale/fake"
core "k8s.io/client-go/testing" core "k8s.io/client-go/testing"
"k8s.io/kubernetes/pkg/api/legacyscheme"
"k8s.io/kubernetes/pkg/controller" "k8s.io/kubernetes/pkg/controller"
"k8s.io/kubernetes/pkg/controller/podautoscaler/metrics" "k8s.io/kubernetes/pkg/controller/podautoscaler/metrics"
@ -48,6 +50,7 @@ import (
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
_ "k8s.io/kubernetes/pkg/api/install"
_ "k8s.io/kubernetes/pkg/apis/autoscaling/install" _ "k8s.io/kubernetes/pkg/apis/autoscaling/install"
_ "k8s.io/kubernetes/pkg/apis/extensions/install" _ "k8s.io/kubernetes/pkg/apis/extensions/install"
) )
@ -114,12 +117,12 @@ func (tc *legacyTestCase) computeCPUCurrent() {
tc.CPUCurrent = int32(100 * reported / requested) tc.CPUCurrent = int32(100 * reported / requested)
} }
func (tc *legacyTestCase) prepareTestClient(t *testing.T) *fake.Clientset { func (tc *legacyTestCase) prepareTestClient(t *testing.T) (*fake.Clientset, *scalefake.FakeScaleClient) {
namespace := "test-namespace" namespace := "test-namespace"
hpaName := "test-hpa" hpaName := "test-hpa"
podNamePrefix := "test-pod" podNamePrefix := "test-pod"
// TODO: also test with TargetSelector labelSet := map[string]string{"name": podNamePrefix}
selector := map[string]string{"name": podNamePrefix} selector := labels.SelectorFromSet(labelSet).String()
tc.Lock() tc.Lock()
@ -131,13 +134,11 @@ func (tc *legacyTestCase) prepareTestClient(t *testing.T) *fake.Clientset {
tc.computeCPUCurrent() tc.computeCPUCurrent()
} }
// TODO(madhusudancs): HPA only supports resources in extensions/v1beta1 right now. Add
// tests for "v1" replicationcontrollers when HPA adds support for cross-group scale.
if tc.resource == nil { if tc.resource == nil {
tc.resource = &fakeResource{ tc.resource = &fakeResource{
name: "test-rc", name: "test-rc",
apiVersion: "extensions/v1beta1", apiVersion: "v1",
kind: "replicationcontrollers", kind: "ReplicationController",
} }
} }
tc.Unlock() tc.Unlock()
@ -208,66 +209,6 @@ func (tc *legacyTestCase) prepareTestClient(t *testing.T) *fake.Clientset {
return true, objv1, nil return true, objv1, nil
}) })
fakeClient.AddReactor("get", "replicationcontrollers", func(action core.Action) (handled bool, ret runtime.Object, err error) {
tc.Lock()
defer tc.Unlock()
obj := &extensions.Scale{
ObjectMeta: metav1.ObjectMeta{
Name: tc.resource.name,
Namespace: namespace,
},
Spec: extensions.ScaleSpec{
Replicas: tc.initialReplicas,
},
Status: extensions.ScaleStatus{
Replicas: tc.initialReplicas,
Selector: selector,
},
}
return true, obj, nil
})
fakeClient.AddReactor("get", "deployments", func(action core.Action) (handled bool, ret runtime.Object, err error) {
tc.Lock()
defer tc.Unlock()
obj := &extensions.Scale{
ObjectMeta: metav1.ObjectMeta{
Name: tc.resource.name,
Namespace: namespace,
},
Spec: extensions.ScaleSpec{
Replicas: tc.initialReplicas,
},
Status: extensions.ScaleStatus{
Replicas: tc.initialReplicas,
Selector: selector,
},
}
return true, obj, nil
})
fakeClient.AddReactor("get", "replicasets", func(action core.Action) (handled bool, ret runtime.Object, err error) {
tc.Lock()
defer tc.Unlock()
obj := &extensions.Scale{
ObjectMeta: metav1.ObjectMeta{
Name: tc.resource.name,
Namespace: namespace,
},
Spec: extensions.ScaleSpec{
Replicas: tc.initialReplicas,
},
Status: extensions.ScaleStatus{
Replicas: tc.initialReplicas,
Selector: selector,
},
}
return true, obj, nil
})
fakeClient.AddReactor("list", "pods", func(action core.Action) (handled bool, ret runtime.Object, err error) { fakeClient.AddReactor("list", "pods", func(action core.Action) (handled bool, ret runtime.Object, err error) {
tc.Lock() tc.Lock()
defer tc.Unlock() defer tc.Unlock()
@ -386,39 +327,6 @@ func (tc *legacyTestCase) prepareTestClient(t *testing.T) *fake.Clientset {
return true, newFakeResponseWrapper(heapsterRawMemResponse), nil return true, newFakeResponseWrapper(heapsterRawMemResponse), nil
}) })
fakeClient.AddReactor("update", "replicationcontrollers", func(action core.Action) (handled bool, ret runtime.Object, err error) {
tc.Lock()
defer tc.Unlock()
obj := action.(core.UpdateAction).GetObject().(*extensions.Scale)
replicas := action.(core.UpdateAction).GetObject().(*extensions.Scale).Spec.Replicas
assert.Equal(t, tc.desiredReplicas, replicas, "the replica count of the RC should be as expected")
tc.scaleUpdated = true
return true, obj, nil
})
fakeClient.AddReactor("update", "deployments", func(action core.Action) (handled bool, ret runtime.Object, err error) {
tc.Lock()
defer tc.Unlock()
obj := action.(core.UpdateAction).GetObject().(*extensions.Scale)
replicas := action.(core.UpdateAction).GetObject().(*extensions.Scale).Spec.Replicas
assert.Equal(t, tc.desiredReplicas, replicas, "the replica count of the deployment should be as expected")
tc.scaleUpdated = true
return true, obj, nil
})
fakeClient.AddReactor("update", "replicasets", func(action core.Action) (handled bool, ret runtime.Object, err error) {
tc.Lock()
defer tc.Unlock()
obj := action.(core.UpdateAction).GetObject().(*extensions.Scale)
replicas := action.(core.UpdateAction).GetObject().(*extensions.Scale).Spec.Replicas
assert.Equal(t, tc.desiredReplicas, replicas, "the replica count of the replicaset should be as expected")
tc.scaleUpdated = true
return true, obj, nil
})
fakeClient.AddReactor("update", "horizontalpodautoscalers", func(action core.Action) (handled bool, ret runtime.Object, err error) { fakeClient.AddReactor("update", "horizontalpodautoscalers", func(action core.Action) (handled bool, ret runtime.Object, err error) {
tc.Lock() tc.Lock()
defer tc.Unlock() defer tc.Unlock()
@ -428,8 +336,9 @@ func (tc *legacyTestCase) prepareTestClient(t *testing.T) *fake.Clientset {
assert.Equal(t, hpaName, obj.Name, "the HPA name should be as expected") assert.Equal(t, hpaName, obj.Name, "the HPA name should be as expected")
assert.Equal(t, tc.desiredReplicas, obj.Status.DesiredReplicas, "the desired replica count reported in the object status should be as expected") assert.Equal(t, tc.desiredReplicas, obj.Status.DesiredReplicas, "the desired replica count reported in the object status should be as expected")
if tc.verifyCPUCurrent { if tc.verifyCPUCurrent {
assert.NotNil(t, obj.Status.CurrentCPUUtilizationPercentage, "the reported CPU utilization percentage should be non-nil") if assert.NotNil(t, obj.Status.CurrentCPUUtilizationPercentage, "the reported CPU utilization percentage should be non-nil") {
assert.Equal(t, tc.CPUCurrent, *obj.Status.CurrentCPUUtilizationPercentage, "the report CPU utilization percentage should be as expected") assert.Equal(t, tc.CPUCurrent, *obj.Status.CurrentCPUUtilizationPercentage, "the report CPU utilization percentage should be as expected")
}
} }
tc.statusUpdated = true tc.statusUpdated = true
// Every time we reconcile HPA object we are updating status. // Every time we reconcile HPA object we are updating status.
@ -437,10 +346,104 @@ func (tc *legacyTestCase) prepareTestClient(t *testing.T) *fake.Clientset {
return true, obj, nil return true, obj, nil
}) })
fakeScaleClient := &scalefake.FakeScaleClient{}
fakeScaleClient.AddReactor("get", "replicationcontrollers", func(action core.Action) (handled bool, ret runtime.Object, err error) {
tc.Lock()
defer tc.Unlock()
obj := &autoscalingv1.Scale{
ObjectMeta: metav1.ObjectMeta{
Name: tc.resource.name,
Namespace: namespace,
},
Spec: autoscalingv1.ScaleSpec{
Replicas: tc.initialReplicas,
},
Status: autoscalingv1.ScaleStatus{
Replicas: tc.initialReplicas,
Selector: selector,
},
}
return true, obj, nil
})
fakeScaleClient.AddReactor("get", "deployments", func(action core.Action) (handled bool, ret runtime.Object, err error) {
tc.Lock()
defer tc.Unlock()
obj := &autoscalingv1.Scale{
ObjectMeta: metav1.ObjectMeta{
Name: tc.resource.name,
Namespace: namespace,
},
Spec: autoscalingv1.ScaleSpec{
Replicas: tc.initialReplicas,
},
Status: autoscalingv1.ScaleStatus{
Replicas: tc.initialReplicas,
Selector: selector,
},
}
return true, obj, nil
})
fakeScaleClient.AddReactor("get", "replicasets", func(action core.Action) (handled bool, ret runtime.Object, err error) {
tc.Lock()
defer tc.Unlock()
obj := &autoscalingv1.Scale{
ObjectMeta: metav1.ObjectMeta{
Name: tc.resource.name,
Namespace: namespace,
},
Spec: autoscalingv1.ScaleSpec{
Replicas: tc.initialReplicas,
},
Status: autoscalingv1.ScaleStatus{
Replicas: tc.initialReplicas,
Selector: selector,
},
}
return true, obj, nil
})
fakeScaleClient.AddReactor("update", "replicationcontrollers", func(action core.Action) (handled bool, ret runtime.Object, err error) {
tc.Lock()
defer tc.Unlock()
obj := action.(core.UpdateAction).GetObject().(*autoscalingv1.Scale)
replicas := action.(core.UpdateAction).GetObject().(*autoscalingv1.Scale).Spec.Replicas
assert.Equal(t, tc.desiredReplicas, replicas, "the replica count of the RC should be as expected")
tc.scaleUpdated = true
return true, obj, nil
})
fakeScaleClient.AddReactor("update", "deployments", func(action core.Action) (handled bool, ret runtime.Object, err error) {
tc.Lock()
defer tc.Unlock()
obj := action.(core.UpdateAction).GetObject().(*autoscalingv1.Scale)
replicas := action.(core.UpdateAction).GetObject().(*autoscalingv1.Scale).Spec.Replicas
assert.Equal(t, tc.desiredReplicas, replicas, "the replica count of the deployment should be as expected")
tc.scaleUpdated = true
return true, obj, nil
})
fakeScaleClient.AddReactor("update", "replicasets", func(action core.Action) (handled bool, ret runtime.Object, err error) {
tc.Lock()
defer tc.Unlock()
obj := action.(core.UpdateAction).GetObject().(*autoscalingv1.Scale)
replicas := action.(core.UpdateAction).GetObject().(*autoscalingv1.Scale).Spec.Replicas
assert.Equal(t, tc.desiredReplicas, replicas, "the replica count of the replicaset should be as expected")
tc.scaleUpdated = true
return true, obj, nil
})
fakeWatch := watch.NewFake() fakeWatch := watch.NewFake()
fakeClient.AddWatchReactor("*", core.DefaultWatchReactor(fakeWatch, nil)) fakeClient.AddWatchReactor("*", core.DefaultWatchReactor(fakeWatch, nil))
return fakeClient return fakeClient, fakeScaleClient
} }
func (tc *legacyTestCase) verifyResults(t *testing.T) { func (tc *legacyTestCase) verifyResults(t *testing.T) {
@ -455,7 +458,7 @@ func (tc *legacyTestCase) verifyResults(t *testing.T) {
} }
func (tc *legacyTestCase) runTest(t *testing.T) { func (tc *legacyTestCase) runTest(t *testing.T) {
testClient := tc.prepareTestClient(t) testClient, testScaleClient := tc.prepareTestClient(t)
metricsClient := metrics.NewHeapsterMetricsClient(testClient, metrics.DefaultHeapsterNamespace, metrics.DefaultHeapsterScheme, metrics.DefaultHeapsterService, metrics.DefaultHeapsterPort) metricsClient := metrics.NewHeapsterMetricsClient(testClient, metrics.DefaultHeapsterNamespace, metrics.DefaultHeapsterScheme, metrics.DefaultHeapsterService, metrics.DefaultHeapsterPort)
eventClient := &clientfake.Clientset{} eventClient := &clientfake.Clientset{}
@ -493,8 +496,9 @@ func (tc *legacyTestCase) runTest(t *testing.T) {
hpaController := NewHorizontalController( hpaController := NewHorizontalController(
eventClient.Core(), eventClient.Core(),
testClient.Extensions(), testScaleClient,
testClient.Autoscaling(), testClient.Autoscaling(),
legacyscheme.Registry.RESTMapper(),
replicaCalc, replicaCalc,
informerFactory.Autoscaling().V1().HorizontalPodAutoscalers(), informerFactory.Autoscaling().V1().HorizontalPodAutoscalers(),
controller.NoResyncPeriodFunc(), controller.NoResyncPeriodFunc(),
@ -584,7 +588,7 @@ func LegacyTestScaleUpDeployment(t *testing.T) {
resource: &fakeResource{ resource: &fakeResource{
name: "test-dep", name: "test-dep",
apiVersion: "extensions/v1beta1", apiVersion: "extensions/v1beta1",
kind: "deployments", kind: "Deployment",
}, },
} }
tc.runTest(t) tc.runTest(t)
@ -604,7 +608,7 @@ func LegacyTestScaleUpReplicaSet(t *testing.T) {
resource: &fakeResource{ resource: &fakeResource{
name: "test-replicaset", name: "test-replicaset",
apiVersion: "extensions/v1beta1", apiVersion: "extensions/v1beta1",
kind: "replicasets", kind: "ReplicaSet",
}, },
} }
tc.runTest(t) tc.runTest(t)