mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-09-09 21:21:14 +00:00
Merge pull request #22345 from fgrzadkowski/hpa_events
Auto commit by PR queue bot
This commit is contained in:
@@ -293,8 +293,8 @@ func StartControllers(s *options.CMServer, kubeClient *client.Client, kubeconfig
|
|||||||
metrics.DefaultHeapsterService,
|
metrics.DefaultHeapsterService,
|
||||||
metrics.DefaultHeapsterPort,
|
metrics.DefaultHeapsterPort,
|
||||||
)
|
)
|
||||||
podautoscaler.NewHorizontalController(hpaClient.Core(), hpaClient.Extensions(), hpaClient, metricsClient).
|
go podautoscaler.NewHorizontalController(hpaClient.Core(), hpaClient.Extensions(), hpaClient, metricsClient, s.HorizontalPodAutoscalerSyncPeriod.Duration).
|
||||||
Run(s.HorizontalPodAutoscalerSyncPeriod.Duration)
|
Run(wait.NeverStop)
|
||||||
}
|
}
|
||||||
|
|
||||||
if containsResource(resources, "daemonsets") {
|
if containsResource(resources, "daemonsets") {
|
||||||
|
@@ -240,8 +240,8 @@ func (s *CMServer) Run(_ []string) error {
|
|||||||
metrics.DefaultHeapsterService,
|
metrics.DefaultHeapsterService,
|
||||||
metrics.DefaultHeapsterPort,
|
metrics.DefaultHeapsterPort,
|
||||||
)
|
)
|
||||||
podautoscaler.NewHorizontalController(hpaClient.Core(), hpaClient.Extensions(), hpaClient, metricsClient).
|
go podautoscaler.NewHorizontalController(hpaClient.Core(), hpaClient.Extensions(), hpaClient, metricsClient, s.HorizontalPodAutoscalerSyncPeriod.Duration).
|
||||||
Run(s.HorizontalPodAutoscalerSyncPeriod.Duration)
|
Run(wait.NeverStop)
|
||||||
}
|
}
|
||||||
|
|
||||||
if containsResource(resources, "daemonsets") {
|
if containsResource(resources, "daemonsets") {
|
||||||
|
@@ -27,11 +27,15 @@ import (
|
|||||||
"k8s.io/kubernetes/pkg/api/resource"
|
"k8s.io/kubernetes/pkg/api/resource"
|
||||||
"k8s.io/kubernetes/pkg/api/unversioned"
|
"k8s.io/kubernetes/pkg/api/unversioned"
|
||||||
"k8s.io/kubernetes/pkg/apis/extensions"
|
"k8s.io/kubernetes/pkg/apis/extensions"
|
||||||
|
"k8s.io/kubernetes/pkg/client/cache"
|
||||||
"k8s.io/kubernetes/pkg/client/record"
|
"k8s.io/kubernetes/pkg/client/record"
|
||||||
unversionedcore "k8s.io/kubernetes/pkg/client/typed/generated/core/unversioned"
|
unversionedcore "k8s.io/kubernetes/pkg/client/typed/generated/core/unversioned"
|
||||||
unversionedextensions "k8s.io/kubernetes/pkg/client/typed/generated/extensions/unversioned"
|
unversionedextensions "k8s.io/kubernetes/pkg/client/typed/generated/extensions/unversioned"
|
||||||
|
"k8s.io/kubernetes/pkg/controller/framework"
|
||||||
"k8s.io/kubernetes/pkg/controller/podautoscaler/metrics"
|
"k8s.io/kubernetes/pkg/controller/podautoscaler/metrics"
|
||||||
"k8s.io/kubernetes/pkg/util/wait"
|
"k8s.io/kubernetes/pkg/runtime"
|
||||||
|
utilruntime "k8s.io/kubernetes/pkg/util/runtime"
|
||||||
|
"k8s.io/kubernetes/pkg/watch"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@@ -51,33 +55,75 @@ type HorizontalController struct {
|
|||||||
|
|
||||||
metricsClient metrics.MetricsClient
|
metricsClient metrics.MetricsClient
|
||||||
eventRecorder record.EventRecorder
|
eventRecorder record.EventRecorder
|
||||||
|
|
||||||
|
// A store of HPA objects, populated by the controller.
|
||||||
|
store cache.Store
|
||||||
|
// Watches changes to all HPA objects.
|
||||||
|
controller *framework.Controller
|
||||||
}
|
}
|
||||||
|
|
||||||
var downscaleForbiddenWindow = 5 * time.Minute
|
var downscaleForbiddenWindow = 5 * time.Minute
|
||||||
var upscaleForbiddenWindow = 3 * time.Minute
|
var upscaleForbiddenWindow = 3 * time.Minute
|
||||||
|
|
||||||
func NewHorizontalController(evtNamespacer unversionedcore.EventsGetter, scaleNamespacer unversionedextensions.ScalesGetter, hpaNamespacer unversionedextensions.HorizontalPodAutoscalersGetter, metricsClient metrics.MetricsClient) *HorizontalController {
|
func NewHorizontalController(evtNamespacer unversionedcore.EventsGetter, scaleNamespacer unversionedextensions.ScalesGetter, hpaNamespacer unversionedextensions.HorizontalPodAutoscalersGetter, metricsClient metrics.MetricsClient, resyncPeriod time.Duration) *HorizontalController {
|
||||||
broadcaster := record.NewBroadcaster()
|
broadcaster := record.NewBroadcaster()
|
||||||
broadcaster.StartRecordingToSink(&unversionedcore.EventSinkImpl{evtNamespacer.Events("")})
|
broadcaster.StartRecordingToSink(&unversionedcore.EventSinkImpl{evtNamespacer.Events("")})
|
||||||
recorder := broadcaster.NewRecorder(api.EventSource{Component: "horizontal-pod-autoscaler"})
|
recorder := broadcaster.NewRecorder(api.EventSource{Component: "horizontal-pod-autoscaler"})
|
||||||
|
|
||||||
return &HorizontalController{
|
controller := &HorizontalController{
|
||||||
metricsClient: metricsClient,
|
metricsClient: metricsClient,
|
||||||
eventRecorder: recorder,
|
eventRecorder: recorder,
|
||||||
scaleNamespacer: scaleNamespacer,
|
scaleNamespacer: scaleNamespacer,
|
||||||
hpaNamespacer: hpaNamespacer,
|
hpaNamespacer: hpaNamespacer,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
controller.store, controller.controller = framework.NewInformer(
|
||||||
|
&cache.ListWatch{
|
||||||
|
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
|
||||||
|
return controller.hpaNamespacer.HorizontalPodAutoscalers(api.NamespaceAll).List(options)
|
||||||
|
},
|
||||||
|
WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
|
||||||
|
return controller.hpaNamespacer.HorizontalPodAutoscalers(api.NamespaceAll).Watch(options)
|
||||||
|
},
|
||||||
|
},
|
||||||
|
&extensions.HorizontalPodAutoscaler{},
|
||||||
|
resyncPeriod,
|
||||||
|
framework.ResourceEventHandlerFuncs{
|
||||||
|
AddFunc: func(obj interface{}) {
|
||||||
|
hpa := obj.(*extensions.HorizontalPodAutoscaler)
|
||||||
|
hasCPUPolicy := hpa.Spec.CPUUtilization != nil
|
||||||
|
_, hasCustomMetricsPolicy := hpa.Annotations[HpaCustomMetricsTargetAnnotationName]
|
||||||
|
if !hasCPUPolicy && !hasCustomMetricsPolicy {
|
||||||
|
controller.eventRecorder.Event(hpa, api.EventTypeNormal, "DefaultPolicy", "No scaling policy specified - will use default one. See documentation for details")
|
||||||
|
}
|
||||||
|
err := controller.reconcileAutoscaler(hpa)
|
||||||
|
if err != nil {
|
||||||
|
glog.Warningf("Failed to reconcile %s: %v", hpa.Name, err)
|
||||||
|
}
|
||||||
|
},
|
||||||
|
UpdateFunc: func(old, cur interface{}) {
|
||||||
|
hpa := cur.(*extensions.HorizontalPodAutoscaler)
|
||||||
|
err := controller.reconcileAutoscaler(hpa)
|
||||||
|
if err != nil {
|
||||||
|
glog.Warningf("Failed to reconcile %s: %v", hpa.Name, err)
|
||||||
|
}
|
||||||
|
},
|
||||||
|
// We are not interested in deletions.
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
return controller
|
||||||
}
|
}
|
||||||
|
|
||||||
func (a *HorizontalController) Run(syncPeriod time.Duration) {
|
func (a *HorizontalController) Run(stopCh <-chan struct{}) {
|
||||||
go wait.Until(func() {
|
defer utilruntime.HandleCrash()
|
||||||
if err := a.reconcileAutoscalers(); err != nil {
|
glog.Infof("Starting HPA Controller")
|
||||||
glog.Errorf("Couldn't reconcile horizontal pod autoscalers: %v", err)
|
go a.controller.Run(stopCh)
|
||||||
}
|
<-stopCh
|
||||||
}, syncPeriod, wait.NeverStop)
|
glog.Infof("Shutting down HPA Controller")
|
||||||
}
|
}
|
||||||
|
|
||||||
func (a *HorizontalController) computeReplicasForCPUUtilization(hpa extensions.HorizontalPodAutoscaler, scale *extensions.Scale) (int, *int, time.Time, error) {
|
func (a *HorizontalController) computeReplicasForCPUUtilization(hpa *extensions.HorizontalPodAutoscaler, scale *extensions.Scale) (int, *int, time.Time, error) {
|
||||||
targetUtilization := defaultTargetCPUUtilizationPercentage
|
targetUtilization := defaultTargetCPUUtilizationPercentage
|
||||||
if hpa.Spec.CPUUtilization != nil {
|
if hpa.Spec.CPUUtilization != nil {
|
||||||
targetUtilization = hpa.Spec.CPUUtilization.TargetPercentage
|
targetUtilization = hpa.Spec.CPUUtilization.TargetPercentage
|
||||||
@@ -87,8 +133,8 @@ func (a *HorizontalController) computeReplicasForCPUUtilization(hpa extensions.H
|
|||||||
|
|
||||||
// TODO: what to do on partial errors (like metrics obtained for 75% of pods).
|
// TODO: what to do on partial errors (like metrics obtained for 75% of pods).
|
||||||
if err != nil {
|
if err != nil {
|
||||||
a.eventRecorder.Event(&hpa, api.EventTypeWarning, "FailedGetMetrics", err.Error())
|
a.eventRecorder.Event(hpa, api.EventTypeWarning, "FailedGetMetrics", err.Error())
|
||||||
return 0, nil, time.Time{}, fmt.Errorf("failed to get cpu utilization: %v", err)
|
return 0, nil, time.Time{}, fmt.Errorf("failed to get CPU utilization: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
usageRatio := float64(*currentUtilization) / float64(targetUtilization)
|
usageRatio := float64(*currentUtilization) / float64(targetUtilization)
|
||||||
@@ -101,25 +147,29 @@ func (a *HorizontalController) computeReplicasForCPUUtilization(hpa extensions.H
|
|||||||
|
|
||||||
// Computes the desired number of replicas based on the CustomMetrics passed in cmAnnotation as json-serialized
|
// Computes the desired number of replicas based on the CustomMetrics passed in cmAnnotation as json-serialized
|
||||||
// extensions.CustomMetricsTargetList.
|
// extensions.CustomMetricsTargetList.
|
||||||
// Returns number of replicas, status string (also json-serialized extensions.CustomMetricsCurrentStatusList),
|
// Returns number of replicas, metric which required highest number of replicas,
|
||||||
|
// status string (also json-serialized extensions.CustomMetricsCurrentStatusList),
|
||||||
// last timestamp of the metrics involved in computations or error, if occurred.
|
// last timestamp of the metrics involved in computations or error, if occurred.
|
||||||
func (a *HorizontalController) computeReplicasForCustomMetrics(hpa extensions.HorizontalPodAutoscaler, scale *extensions.Scale,
|
func (a *HorizontalController) computeReplicasForCustomMetrics(hpa *extensions.HorizontalPodAutoscaler, scale *extensions.Scale,
|
||||||
cmAnnotation string) (int, string, time.Time, error) {
|
cmAnnotation string) (replicas int, metric string, status string, timestamp time.Time, err error) {
|
||||||
|
|
||||||
currentReplicas := scale.Status.Replicas
|
currentReplicas := scale.Status.Replicas
|
||||||
replicas := 0
|
replicas = 0
|
||||||
timestamp := time.Time{}
|
metric = ""
|
||||||
|
status = ""
|
||||||
|
timestamp = time.Time{}
|
||||||
|
err = nil
|
||||||
|
|
||||||
if cmAnnotation == "" {
|
if cmAnnotation == "" {
|
||||||
return 0, "", time.Time{}, nil
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
var targetList extensions.CustomMetricTargetList
|
var targetList extensions.CustomMetricTargetList
|
||||||
if err := json.Unmarshal([]byte(cmAnnotation), &targetList); err != nil {
|
if err := json.Unmarshal([]byte(cmAnnotation), &targetList); err != nil {
|
||||||
return 0, "", time.Time{}, fmt.Errorf("failed to parse custom metrics annotation: %v", err)
|
return 0, "", "", time.Time{}, fmt.Errorf("failed to parse custom metrics annotation: %v", err)
|
||||||
}
|
}
|
||||||
if len(targetList.Items) == 0 {
|
if len(targetList.Items) == 0 {
|
||||||
return 0, "", time.Time{}, fmt.Errorf("no custom metrics in annotation")
|
return 0, "", "", time.Time{}, fmt.Errorf("no custom metrics in annotation")
|
||||||
}
|
}
|
||||||
|
|
||||||
statusList := extensions.CustomMetricCurrentStatusList{
|
statusList := extensions.CustomMetricCurrentStatusList{
|
||||||
@@ -130,8 +180,8 @@ func (a *HorizontalController) computeReplicasForCustomMetrics(hpa extensions.Ho
|
|||||||
value, currentTimestamp, err := a.metricsClient.GetCustomMetric(customMetricTarget.Name, hpa.Namespace, scale.Status.Selector)
|
value, currentTimestamp, err := a.metricsClient.GetCustomMetric(customMetricTarget.Name, hpa.Namespace, scale.Status.Selector)
|
||||||
// TODO: what to do on partial errors (like metrics obtained for 75% of pods).
|
// TODO: what to do on partial errors (like metrics obtained for 75% of pods).
|
||||||
if err != nil {
|
if err != nil {
|
||||||
a.eventRecorder.Event(&hpa, api.EventTypeWarning, "FailedGetCustomMetrics", err.Error())
|
a.eventRecorder.Event(hpa, api.EventTypeWarning, "FailedGetCustomMetrics", err.Error())
|
||||||
return 0, "", time.Time{}, fmt.Errorf("failed to get custom metric value: %v", err)
|
return 0, "", "", time.Time{}, fmt.Errorf("failed to get custom metric value: %v", err)
|
||||||
}
|
}
|
||||||
floatTarget := float64(customMetricTarget.TargetValue.MilliValue()) / 1000.0
|
floatTarget := float64(customMetricTarget.TargetValue.MilliValue()) / 1000.0
|
||||||
usageRatio := *value / floatTarget
|
usageRatio := *value / floatTarget
|
||||||
@@ -145,10 +195,11 @@ func (a *HorizontalController) computeReplicasForCustomMetrics(hpa extensions.Ho
|
|||||||
if replicaCountProposal > replicas {
|
if replicaCountProposal > replicas {
|
||||||
timestamp = currentTimestamp
|
timestamp = currentTimestamp
|
||||||
replicas = replicaCountProposal
|
replicas = replicaCountProposal
|
||||||
|
metric = fmt.Sprintf("Custom metric %s", customMetricTarget.Name)
|
||||||
}
|
}
|
||||||
quantity, err := resource.ParseQuantity(fmt.Sprintf("%.3f", *value))
|
quantity, err := resource.ParseQuantity(fmt.Sprintf("%.3f", *value))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, "", time.Time{}, fmt.Errorf("failed to set custom metric value: %v", err)
|
return 0, "", "", time.Time{}, fmt.Errorf("failed to set custom metric value: %v", err)
|
||||||
}
|
}
|
||||||
statusList.Items = append(statusList.Items, extensions.CustomMetricCurrentStatus{
|
statusList.Items = append(statusList.Items, extensions.CustomMetricCurrentStatus{
|
||||||
Name: customMetricTarget.Name,
|
Name: customMetricTarget.Name,
|
||||||
@@ -157,18 +208,18 @@ func (a *HorizontalController) computeReplicasForCustomMetrics(hpa extensions.Ho
|
|||||||
}
|
}
|
||||||
byteStatusList, err := json.Marshal(statusList)
|
byteStatusList, err := json.Marshal(statusList)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, "", time.Time{}, fmt.Errorf("failed to serialize custom metric status: %v", err)
|
return 0, "", "", time.Time{}, fmt.Errorf("failed to serialize custom metric status: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return replicas, string(byteStatusList), timestamp, nil
|
return replicas, metric, string(byteStatusList), timestamp, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (a *HorizontalController) reconcileAutoscaler(hpa extensions.HorizontalPodAutoscaler) error {
|
func (a *HorizontalController) reconcileAutoscaler(hpa *extensions.HorizontalPodAutoscaler) error {
|
||||||
reference := fmt.Sprintf("%s/%s/%s", hpa.Spec.ScaleRef.Kind, hpa.Namespace, hpa.Spec.ScaleRef.Name)
|
reference := fmt.Sprintf("%s/%s/%s", hpa.Spec.ScaleRef.Kind, hpa.Namespace, hpa.Spec.ScaleRef.Name)
|
||||||
|
|
||||||
scale, err := a.scaleNamespacer.Scales(hpa.Namespace).Get(hpa.Spec.ScaleRef.Kind, hpa.Spec.ScaleRef.Name)
|
scale, err := a.scaleNamespacer.Scales(hpa.Namespace).Get(hpa.Spec.ScaleRef.Kind, hpa.Spec.ScaleRef.Name)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
a.eventRecorder.Event(&hpa, api.EventTypeWarning, "FailedGetScale", err.Error())
|
a.eventRecorder.Event(hpa, api.EventTypeWarning, "FailedGetScale", err.Error())
|
||||||
return fmt.Errorf("failed to query scale subresource for %s: %v", reference, err)
|
return fmt.Errorf("failed to query scale subresource for %s: %v", reference, err)
|
||||||
}
|
}
|
||||||
currentReplicas := scale.Status.Replicas
|
currentReplicas := scale.Status.Replicas
|
||||||
@@ -178,17 +229,22 @@ func (a *HorizontalController) reconcileAutoscaler(hpa extensions.HorizontalPodA
|
|||||||
cpuTimestamp := time.Time{}
|
cpuTimestamp := time.Time{}
|
||||||
|
|
||||||
cmDesiredReplicas := 0
|
cmDesiredReplicas := 0
|
||||||
|
cmMetric := ""
|
||||||
cmStatus := ""
|
cmStatus := ""
|
||||||
cmTimestamp := time.Time{}
|
cmTimestamp := time.Time{}
|
||||||
|
|
||||||
desiredReplicas := 0
|
desiredReplicas := 0
|
||||||
|
rescaleReason := ""
|
||||||
timestamp := time.Now()
|
timestamp := time.Now()
|
||||||
|
|
||||||
if currentReplicas > hpa.Spec.MaxReplicas {
|
if currentReplicas > hpa.Spec.MaxReplicas {
|
||||||
|
rescaleReason = "Current number of replicas above Spec.MaxReplicas"
|
||||||
desiredReplicas = hpa.Spec.MaxReplicas
|
desiredReplicas = hpa.Spec.MaxReplicas
|
||||||
} else if hpa.Spec.MinReplicas != nil && currentReplicas < *hpa.Spec.MinReplicas {
|
} else if hpa.Spec.MinReplicas != nil && currentReplicas < *hpa.Spec.MinReplicas {
|
||||||
|
rescaleReason = "Current number of replicas below Spec.MinReplicas"
|
||||||
desiredReplicas = *hpa.Spec.MinReplicas
|
desiredReplicas = *hpa.Spec.MinReplicas
|
||||||
} else if currentReplicas == 0 {
|
} else if currentReplicas == 0 {
|
||||||
|
rescaleReason = "Current number of replicas must be greater than 0"
|
||||||
desiredReplicas = 1
|
desiredReplicas = 1
|
||||||
} else {
|
} else {
|
||||||
// All basic scenarios covered, the state should be sane, lets use metrics.
|
// All basic scenarios covered, the state should be sane, lets use metrics.
|
||||||
@@ -198,27 +254,35 @@ func (a *HorizontalController) reconcileAutoscaler(hpa extensions.HorizontalPodA
|
|||||||
cpuDesiredReplicas, cpuCurrentUtilization, cpuTimestamp, err = a.computeReplicasForCPUUtilization(hpa, scale)
|
cpuDesiredReplicas, cpuCurrentUtilization, cpuTimestamp, err = a.computeReplicasForCPUUtilization(hpa, scale)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
a.updateCurrentReplicasInStatus(hpa, currentReplicas)
|
a.updateCurrentReplicasInStatus(hpa, currentReplicas)
|
||||||
a.eventRecorder.Event(&hpa, api.EventTypeWarning, "FailedComputeReplicas", err.Error())
|
a.eventRecorder.Event(hpa, api.EventTypeWarning, "FailedComputeReplicas", err.Error())
|
||||||
return fmt.Errorf("failed to compute desired number of replicas based on CPU utilization for %s: %v", reference, err)
|
return fmt.Errorf("failed to compute desired number of replicas based on CPU utilization for %s: %v", reference, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if cmAnnotationFound {
|
if cmAnnotationFound {
|
||||||
cmDesiredReplicas, cmStatus, cmTimestamp, err = a.computeReplicasForCustomMetrics(hpa, scale, cmAnnotation)
|
cmDesiredReplicas, cmMetric, cmStatus, cmTimestamp, err = a.computeReplicasForCustomMetrics(hpa, scale, cmAnnotation)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
a.updateCurrentReplicasInStatus(hpa, currentReplicas)
|
a.updateCurrentReplicasInStatus(hpa, currentReplicas)
|
||||||
a.eventRecorder.Event(&hpa, api.EventTypeWarning, "FailedComputeCMReplicas", err.Error())
|
a.eventRecorder.Event(hpa, api.EventTypeWarning, "FailedComputeCMReplicas", err.Error())
|
||||||
return fmt.Errorf("failed to compute desired number of replicas based on Custom Metrics for %s: %v", reference, err)
|
return fmt.Errorf("failed to compute desired number of replicas based on Custom Metrics for %s: %v", reference, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
rescaleMetric := ""
|
||||||
if cpuDesiredReplicas > desiredReplicas {
|
if cpuDesiredReplicas > desiredReplicas {
|
||||||
desiredReplicas = cpuDesiredReplicas
|
desiredReplicas = cpuDesiredReplicas
|
||||||
timestamp = cpuTimestamp
|
timestamp = cpuTimestamp
|
||||||
|
rescaleMetric = "CPU utilization"
|
||||||
}
|
}
|
||||||
if cmDesiredReplicas > desiredReplicas {
|
if cmDesiredReplicas > desiredReplicas {
|
||||||
desiredReplicas = cmDesiredReplicas
|
desiredReplicas = cmDesiredReplicas
|
||||||
timestamp = cmTimestamp
|
timestamp = cmTimestamp
|
||||||
|
rescaleMetric = cmMetric
|
||||||
|
}
|
||||||
|
if desiredReplicas > currentReplicas {
|
||||||
|
rescaleReason = fmt.Sprintf("%s above target", rescaleMetric)
|
||||||
|
} else if desiredReplicas < currentReplicas {
|
||||||
|
rescaleReason = "All metrics below target"
|
||||||
}
|
}
|
||||||
|
|
||||||
if hpa.Spec.MinReplicas != nil && desiredReplicas < *hpa.Spec.MinReplicas {
|
if hpa.Spec.MinReplicas != nil && desiredReplicas < *hpa.Spec.MinReplicas {
|
||||||
@@ -240,12 +304,12 @@ func (a *HorizontalController) reconcileAutoscaler(hpa extensions.HorizontalPodA
|
|||||||
scale.Spec.Replicas = desiredReplicas
|
scale.Spec.Replicas = desiredReplicas
|
||||||
_, err = a.scaleNamespacer.Scales(hpa.Namespace).Update(hpa.Spec.ScaleRef.Kind, scale)
|
_, err = a.scaleNamespacer.Scales(hpa.Namespace).Update(hpa.Spec.ScaleRef.Kind, scale)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
a.eventRecorder.Eventf(&hpa, api.EventTypeWarning, "FailedRescale", "New size: %d; error: %v", desiredReplicas, err.Error())
|
a.eventRecorder.Eventf(hpa, api.EventTypeWarning, "FailedRescale", "New size: %d; reason: %s; error: %v", desiredReplicas, rescaleReason, err.Error())
|
||||||
return fmt.Errorf("failed to rescale %s: %v", reference, err)
|
return fmt.Errorf("failed to rescale %s: %v", reference, err)
|
||||||
}
|
}
|
||||||
a.eventRecorder.Eventf(&hpa, api.EventTypeNormal, "SuccessfulRescale", "New size: %d", desiredReplicas)
|
a.eventRecorder.Eventf(hpa, api.EventTypeNormal, "SuccessfulRescale", "New size: %d; reason: %s", desiredReplicas, rescaleReason)
|
||||||
glog.Infof("Successfull rescale of %s, old size: %d, new size: %d",
|
glog.Infof("Successfull rescale of %s, old size: %d, new size: %d, reason: %s",
|
||||||
hpa.Name, currentReplicas, desiredReplicas)
|
hpa.Name, currentReplicas, desiredReplicas, rescaleReason)
|
||||||
} else {
|
} else {
|
||||||
desiredReplicas = currentReplicas
|
desiredReplicas = currentReplicas
|
||||||
}
|
}
|
||||||
@@ -253,7 +317,7 @@ func (a *HorizontalController) reconcileAutoscaler(hpa extensions.HorizontalPodA
|
|||||||
return a.updateStatus(hpa, currentReplicas, desiredReplicas, cpuCurrentUtilization, cmStatus, rescale)
|
return a.updateStatus(hpa, currentReplicas, desiredReplicas, cpuCurrentUtilization, cmStatus, rescale)
|
||||||
}
|
}
|
||||||
|
|
||||||
func shouldScale(hpa extensions.HorizontalPodAutoscaler, currentReplicas, desiredReplicas int, timestamp time.Time) bool {
|
func shouldScale(hpa *extensions.HorizontalPodAutoscaler, currentReplicas, desiredReplicas int, timestamp time.Time) bool {
|
||||||
if desiredReplicas != currentReplicas {
|
if desiredReplicas != currentReplicas {
|
||||||
// Going down only if the usageRatio dropped significantly below the target
|
// Going down only if the usageRatio dropped significantly below the target
|
||||||
// and there was no rescaling in the last downscaleForbiddenWindow.
|
// and there was no rescaling in the last downscaleForbiddenWindow.
|
||||||
@@ -274,14 +338,14 @@ func shouldScale(hpa extensions.HorizontalPodAutoscaler, currentReplicas, desire
|
|||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
func (a *HorizontalController) updateCurrentReplicasInStatus(hpa extensions.HorizontalPodAutoscaler, currentReplicas int) {
|
func (a *HorizontalController) updateCurrentReplicasInStatus(hpa *extensions.HorizontalPodAutoscaler, currentReplicas int) {
|
||||||
err := a.updateStatus(hpa, currentReplicas, hpa.Status.DesiredReplicas, hpa.Status.CurrentCPUUtilizationPercentage, hpa.Annotations[HpaCustomMetricsStatusAnnotationName], false)
|
err := a.updateStatus(hpa, currentReplicas, hpa.Status.DesiredReplicas, hpa.Status.CurrentCPUUtilizationPercentage, hpa.Annotations[HpaCustomMetricsStatusAnnotationName], false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.Errorf("%v", err)
|
glog.Errorf("%v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (a *HorizontalController) updateStatus(hpa extensions.HorizontalPodAutoscaler, currentReplicas, desiredReplicas int, cpuCurrentUtilization *int, cmStatus string, rescale bool) error {
|
func (a *HorizontalController) updateStatus(hpa *extensions.HorizontalPodAutoscaler, currentReplicas, desiredReplicas int, cpuCurrentUtilization *int, cmStatus string, rescale bool) error {
|
||||||
hpa.Status = extensions.HorizontalPodAutoscalerStatus{
|
hpa.Status = extensions.HorizontalPodAutoscalerStatus{
|
||||||
CurrentReplicas: currentReplicas,
|
CurrentReplicas: currentReplicas,
|
||||||
DesiredReplicas: desiredReplicas,
|
DesiredReplicas: desiredReplicas,
|
||||||
@@ -297,25 +361,11 @@ func (a *HorizontalController) updateStatus(hpa extensions.HorizontalPodAutoscal
|
|||||||
hpa.Status.LastScaleTime = &now
|
hpa.Status.LastScaleTime = &now
|
||||||
}
|
}
|
||||||
|
|
||||||
_, err := a.hpaNamespacer.HorizontalPodAutoscalers(hpa.Namespace).UpdateStatus(&hpa)
|
_, err := a.hpaNamespacer.HorizontalPodAutoscalers(hpa.Namespace).UpdateStatus(hpa)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
a.eventRecorder.Event(&hpa, api.EventTypeWarning, "FailedUpdateStatus", err.Error())
|
a.eventRecorder.Event(hpa, api.EventTypeWarning, "FailedUpdateStatus", err.Error())
|
||||||
return fmt.Errorf("failed to update status for %s: %v", hpa.Name, err)
|
return fmt.Errorf("failed to update status for %s: %v", hpa.Name, err)
|
||||||
}
|
}
|
||||||
return nil
|
glog.V(2).Infof("Successfully updated status for %s", hpa.Name)
|
||||||
}
|
|
||||||
|
|
||||||
func (a *HorizontalController) reconcileAutoscalers() error {
|
|
||||||
ns := api.NamespaceAll
|
|
||||||
list, err := a.hpaNamespacer.HorizontalPodAutoscalers(ns).List(api.ListOptions{})
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("error listing nodes: %v", err)
|
|
||||||
}
|
|
||||||
for _, hpa := range list.Items {
|
|
||||||
err := a.reconcileAutoscaler(hpa)
|
|
||||||
if err != nil {
|
|
||||||
glog.Warningf("Failed to reconcile %s: %v", hpa.Name, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@@ -33,6 +33,7 @@ import (
|
|||||||
"k8s.io/kubernetes/pkg/client/unversioned/testclient"
|
"k8s.io/kubernetes/pkg/client/unversioned/testclient"
|
||||||
"k8s.io/kubernetes/pkg/controller/podautoscaler/metrics"
|
"k8s.io/kubernetes/pkg/controller/podautoscaler/metrics"
|
||||||
"k8s.io/kubernetes/pkg/runtime"
|
"k8s.io/kubernetes/pkg/runtime"
|
||||||
|
"k8s.io/kubernetes/pkg/watch"
|
||||||
|
|
||||||
heapster "k8s.io/heapster/api/v1/types"
|
heapster "k8s.io/heapster/api/v1/types"
|
||||||
|
|
||||||
@@ -71,6 +72,8 @@ type testCase struct {
|
|||||||
statusUpdated bool
|
statusUpdated bool
|
||||||
eventCreated bool
|
eventCreated bool
|
||||||
verifyEvents bool
|
verifyEvents bool
|
||||||
|
// Channel with names of HPA objects which we have reconciled.
|
||||||
|
processed chan string
|
||||||
}
|
}
|
||||||
|
|
||||||
func (tc *testCase) computeCPUCurrent() {
|
func (tc *testCase) computeCPUCurrent() {
|
||||||
@@ -97,6 +100,7 @@ func (tc *testCase) prepareTestClient(t *testing.T) *fake.Clientset {
|
|||||||
tc.scaleUpdated = false
|
tc.scaleUpdated = false
|
||||||
tc.statusUpdated = false
|
tc.statusUpdated = false
|
||||||
tc.eventCreated = false
|
tc.eventCreated = false
|
||||||
|
tc.processed = make(chan string, 100)
|
||||||
tc.computeCPUCurrent()
|
tc.computeCPUCurrent()
|
||||||
|
|
||||||
fakeClient := &fake.Clientset{}
|
fakeClient := &fake.Clientset{}
|
||||||
@@ -215,11 +219,13 @@ func (tc *testCase) prepareTestClient(t *testing.T) *fake.Clientset {
|
|||||||
assert.Equal(t, namespace, obj.Namespace)
|
assert.Equal(t, namespace, obj.Namespace)
|
||||||
assert.Equal(t, hpaName, obj.Name)
|
assert.Equal(t, hpaName, obj.Name)
|
||||||
assert.Equal(t, tc.desiredReplicas, obj.Status.DesiredReplicas)
|
assert.Equal(t, tc.desiredReplicas, obj.Status.DesiredReplicas)
|
||||||
tc.statusUpdated = true
|
|
||||||
if tc.verifyCPUCurrent {
|
if tc.verifyCPUCurrent {
|
||||||
assert.NotNil(t, obj.Status.CurrentCPUUtilizationPercentage)
|
assert.NotNil(t, obj.Status.CurrentCPUUtilizationPercentage)
|
||||||
assert.Equal(t, tc.CPUCurrent, *obj.Status.CurrentCPUUtilizationPercentage)
|
assert.Equal(t, tc.CPUCurrent, *obj.Status.CurrentCPUUtilizationPercentage)
|
||||||
}
|
}
|
||||||
|
tc.statusUpdated = true
|
||||||
|
// Every time we reconcile HPA object we are updating status.
|
||||||
|
tc.processed <- obj.Name
|
||||||
return true, obj, nil
|
return true, obj, nil
|
||||||
})
|
})
|
||||||
|
|
||||||
@@ -227,12 +233,15 @@ func (tc *testCase) prepareTestClient(t *testing.T) *fake.Clientset {
|
|||||||
obj := action.(testclient.CreateAction).GetObject().(*api.Event)
|
obj := action.(testclient.CreateAction).GetObject().(*api.Event)
|
||||||
if tc.verifyEvents {
|
if tc.verifyEvents {
|
||||||
assert.Equal(t, "SuccessfulRescale", obj.Reason)
|
assert.Equal(t, "SuccessfulRescale", obj.Reason)
|
||||||
assert.Equal(t, fmt.Sprintf("New size: %d", tc.desiredReplicas), obj.Message)
|
assert.Equal(t, fmt.Sprintf("New size: %d; reason: CPU utilization above target", tc.desiredReplicas), obj.Message)
|
||||||
}
|
}
|
||||||
tc.eventCreated = true
|
tc.eventCreated = true
|
||||||
return true, obj, nil
|
return true, obj, nil
|
||||||
})
|
})
|
||||||
|
|
||||||
|
fakeWatch := watch.NewFake()
|
||||||
|
fakeClient.AddWatchReactor("*", core.DefaultWatchReactor(fakeWatch, nil))
|
||||||
|
|
||||||
return fakeClient
|
return fakeClient
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -247,13 +256,16 @@ func (tc *testCase) verifyResults(t *testing.T) {
|
|||||||
func (tc *testCase) runTest(t *testing.T) {
|
func (tc *testCase) runTest(t *testing.T) {
|
||||||
testClient := tc.prepareTestClient(t)
|
testClient := tc.prepareTestClient(t)
|
||||||
metricsClient := metrics.NewHeapsterMetricsClient(testClient, metrics.DefaultHeapsterNamespace, metrics.DefaultHeapsterScheme, metrics.DefaultHeapsterService, metrics.DefaultHeapsterPort)
|
metricsClient := metrics.NewHeapsterMetricsClient(testClient, metrics.DefaultHeapsterNamespace, metrics.DefaultHeapsterScheme, metrics.DefaultHeapsterService, metrics.DefaultHeapsterPort)
|
||||||
hpaController := NewHorizontalController(testClient.Core(), testClient.Extensions(), testClient.Extensions(), metricsClient)
|
hpaController := NewHorizontalController(testClient.Core(), testClient.Extensions(), testClient.Extensions(), metricsClient, 0)
|
||||||
err := hpaController.reconcileAutoscalers()
|
stop := make(chan struct{})
|
||||||
assert.Equal(t, nil, err)
|
defer close(stop)
|
||||||
|
go hpaController.Run(stop)
|
||||||
if tc.verifyEvents {
|
if tc.verifyEvents {
|
||||||
// We need to wait for events to be broadcasted (sleep for longer than record.sleepDuration).
|
// We need to wait for events to be broadcasted (sleep for longer than record.sleepDuration).
|
||||||
time.Sleep(12 * time.Second)
|
time.Sleep(12 * time.Second)
|
||||||
}
|
}
|
||||||
|
// Wait for HPA to be processed.
|
||||||
|
<-tc.processed
|
||||||
tc.verifyResults(t)
|
tc.verifyResults(t)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Reference in New Issue
Block a user