mirror of
https://github.com/k3s-io/kubernetes.git
synced 2026-01-05 07:27:21 +00:00
Implementation of HorizontalPodAutoscaler
This commit is contained in:
@@ -17,16 +17,21 @@ limitations under the License.
|
||||
package autoscalercontroller
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/resource"
|
||||
client "k8s.io/kubernetes/pkg/client/unversioned"
|
||||
"k8s.io/kubernetes/pkg/expapi"
|
||||
"k8s.io/kubernetes/pkg/fields"
|
||||
"k8s.io/kubernetes/pkg/labels"
|
||||
"k8s.io/kubernetes/pkg/util"
|
||||
|
||||
heapster "k8s.io/heapster/api/v1/types"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -34,16 +39,38 @@ const (
|
||||
heapsterService = "monitoring-heapster"
|
||||
)
|
||||
|
||||
var resourceToMetric = map[api.ResourceName]string{
|
||||
api.ResourceCPU: "cpu-usage",
|
||||
}
|
||||
var heapsterQueryStart, _ = time.ParseDuration("-20m")
|
||||
|
||||
type HorizontalPodAutoscalerController struct {
|
||||
client *client.Client
|
||||
expClient client.ExperimentalInterface
|
||||
}
|
||||
|
||||
// Aggregates results into ResourceConsumption. Also returns number of
|
||||
// pods included in the aggregation.
|
||||
type metricAggregator func(heapster.MetricResultList) (expapi.ResourceConsumption, int)
|
||||
|
||||
type metricDefinition struct {
|
||||
name string
|
||||
aggregator metricAggregator
|
||||
}
|
||||
|
||||
var resourceDefinitions = map[api.ResourceName]metricDefinition{
|
||||
//TODO: add memory
|
||||
api.ResourceCPU: {"cpu-usage",
|
||||
func(metrics heapster.MetricResultList) (expapi.ResourceConsumption, int) {
|
||||
sum, count := calculateSumFromLatestSample(metrics)
|
||||
value := "0"
|
||||
if count > 0 {
|
||||
// assumes that cpu usage is in millis
|
||||
value = fmt.Sprintf("%dm", sum/uint64(count))
|
||||
}
|
||||
return expapi.ResourceConsumption{Resource: api.ResourceCPU, Quantity: resource.MustParse(value)}, count
|
||||
}},
|
||||
}
|
||||
|
||||
var heapsterQueryStart, _ = time.ParseDuration("-5m")
|
||||
var downscaleForbiddenWindow, _ = time.ParseDuration("20m")
|
||||
var upscaleForbiddenWindow, _ = time.ParseDuration("3m")
|
||||
|
||||
func New(client *client.Client, expClient client.ExperimentalInterface) *HorizontalPodAutoscalerController {
|
||||
//TODO: switch to client.Interface
|
||||
return &HorizontalPodAutoscalerController{
|
||||
@@ -86,16 +113,18 @@ func (a *HorizontalPodAutoscalerController) reconcileAutoscalers() error {
|
||||
podNames = append(podNames, pod.Name)
|
||||
}
|
||||
|
||||
metric, metricDefined := resourceToMetric[hpa.Spec.Target.Resource]
|
||||
metricSpec, metricDefined := resourceDefinitions[hpa.Spec.Target.Resource]
|
||||
if !metricDefined {
|
||||
glog.Warningf("Heapster metric not defined for %s %v", reference, hpa.Spec.Target.Resource)
|
||||
continue
|
||||
}
|
||||
startTime := time.Now().Add(heapsterQueryStart)
|
||||
now := time.Now()
|
||||
|
||||
startTime := now.Add(heapsterQueryStart)
|
||||
metricPath := fmt.Sprintf("/api/v1/model/namespaces/%s/pod-list/%s/metrics/%s",
|
||||
hpa.Spec.ScaleRef.Namespace,
|
||||
strings.Join(podNames, ","),
|
||||
metric)
|
||||
metricSpec.name)
|
||||
|
||||
resultRaw, err := a.client.
|
||||
Get().
|
||||
@@ -113,7 +142,90 @@ func (a *HorizontalPodAutoscalerController) reconcileAutoscalers() error {
|
||||
continue
|
||||
}
|
||||
|
||||
var metrics heapster.MetricResultList
|
||||
err = json.Unmarshal(resultRaw, &metrics)
|
||||
if err != nil {
|
||||
glog.Warningf("Failed to unmarshall heapster response: %v", err)
|
||||
continue
|
||||
}
|
||||
|
||||
glog.Infof("Metrics available for %s: %s", reference, string(resultRaw))
|
||||
|
||||
currentConsumption, count := metricSpec.aggregator(metrics)
|
||||
if count != len(podList.Items) {
|
||||
glog.Warningf("Metrics obtained for %d/%d of pods", count, len(podList.Items))
|
||||
continue
|
||||
}
|
||||
|
||||
// if the ratio is 1.2 we want to have 2 replicas
|
||||
desiredReplicas := 1 + int((currentConsumption.Quantity.MilliValue()*int64(count))/hpa.Spec.Target.Quantity.MilliValue())
|
||||
|
||||
if desiredReplicas < hpa.Spec.MinCount {
|
||||
desiredReplicas = hpa.Spec.MinCount
|
||||
}
|
||||
if desiredReplicas > hpa.Spec.MaxCount {
|
||||
desiredReplicas = hpa.Spec.MaxCount
|
||||
}
|
||||
|
||||
rescale := false
|
||||
|
||||
if desiredReplicas != count {
|
||||
// Going down
|
||||
if desiredReplicas < count && (hpa.Status.LastScaleTimestamp == nil ||
|
||||
hpa.Status.LastScaleTimestamp.Add(downscaleForbiddenWindow).Before(now)) {
|
||||
rescale = true
|
||||
}
|
||||
|
||||
// Going up
|
||||
if desiredReplicas > count && (hpa.Status.LastScaleTimestamp == nil ||
|
||||
hpa.Status.LastScaleTimestamp.Add(upscaleForbiddenWindow).Before(now)) {
|
||||
rescale = true
|
||||
}
|
||||
|
||||
if rescale {
|
||||
scale.Spec.Replicas = desiredReplicas
|
||||
_, err = a.expClient.Scales(hpa.Namespace).Update(hpa.Spec.ScaleRef.Kind, scale)
|
||||
if err != nil {
|
||||
glog.Warningf("Failed to rescale %s: %v", reference, err)
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
hpa.Status = expapi.HorizontalPodAutoscalerStatus{
|
||||
CurrentReplicas: count,
|
||||
DesiredReplicas: desiredReplicas,
|
||||
CurrentConsumption: currentConsumption,
|
||||
}
|
||||
if rescale {
|
||||
now := util.NewTime(now)
|
||||
hpa.Status.LastScaleTimestamp = &now
|
||||
}
|
||||
|
||||
_, err = a.expClient.HorizontalPodAutoscalers(hpa.Namespace).Update(&hpa)
|
||||
if err != nil {
|
||||
glog.Warningf("Failed to update HorizontalPodAutoscaler %s: %v", hpa.Name, err)
|
||||
continue
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func calculateSumFromLatestSample(metrics heapster.MetricResultList) (uint64, int) {
|
||||
sum := uint64(0)
|
||||
count := 0
|
||||
for _, metrics := range metrics.Items {
|
||||
var newest *heapster.MetricPoint
|
||||
newest = nil
|
||||
for _, metricPoint := range metrics.Metrics {
|
||||
if newest == nil || newest.Timestamp.Before(metricPoint.Timestamp) {
|
||||
newest = &metricPoint
|
||||
}
|
||||
}
|
||||
if newest != nil {
|
||||
sum += newest.Value
|
||||
count++
|
||||
}
|
||||
}
|
||||
return sum, count
|
||||
}
|
||||
|
||||
@@ -17,10 +17,12 @@ limitations under the License.
|
||||
package autoscalercontroller
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
_ "k8s.io/kubernetes/pkg/api/latest"
|
||||
@@ -32,6 +34,9 @@ import (
|
||||
"k8s.io/kubernetes/pkg/util"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
heapster "k8s.io/heapster/api/v1/types"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -39,20 +44,23 @@ const (
|
||||
rcName = "app-rc"
|
||||
podNameLabel = "app"
|
||||
podName = "p1"
|
||||
)
|
||||
hpaName = "foo"
|
||||
|
||||
var target = expapi.ResourceConsumption{Resource: api.ResourceCPU, Quantity: resource.MustParse("0.8")}
|
||||
hpaListHandler = "HpaList"
|
||||
scaleHandler = "Scale"
|
||||
podListHandler = "PodList"
|
||||
heapsterHandler = "Heapster"
|
||||
updateHpaHandler = "HpaUpdate"
|
||||
)
|
||||
|
||||
type serverResponse struct {
|
||||
statusCode int
|
||||
obj interface{}
|
||||
}
|
||||
|
||||
func makeTestServer(t *testing.T, hpaResponse serverResponse,
|
||||
scaleResponse serverResponse, podListResponse serverResponse,
|
||||
heapsterResponse serverResponse) (*httptest.Server, []*util.FakeHandler) {
|
||||
func makeTestServer(t *testing.T, responses map[string]*serverResponse) (*httptest.Server, map[string]*util.FakeHandler) {
|
||||
|
||||
handlers := []*util.FakeHandler{}
|
||||
handlers := map[string]*util.FakeHandler{}
|
||||
mux := http.NewServeMux()
|
||||
|
||||
mkHandler := func(url string, response serverResponse) *util.FakeHandler {
|
||||
@@ -75,13 +83,29 @@ func makeTestServer(t *testing.T, hpaResponse serverResponse,
|
||||
return &handler
|
||||
}
|
||||
|
||||
handlers = append(handlers, mkHandler("/experimental/v1/horizontalpodautoscalers", hpaResponse))
|
||||
handlers = append(handlers, mkHandler(
|
||||
fmt.Sprintf("/experimental/v1/namespaces/%s/replicationcontrollers/%s/scale", namespace, rcName), scaleResponse))
|
||||
handlers = append(handlers, mkHandler(fmt.Sprintf("/api/v1/namespaces/%s/pods", namespace), podListResponse))
|
||||
handlers = append(handlers, mkRawHandler(
|
||||
fmt.Sprintf("/api/v1/proxy/namespaces/kube-system/services/monitoring-heapster/api/v1/model/namespaces/%s/pod-list/%s/metrics/cpu-usage",
|
||||
namespace, podName), heapsterResponse))
|
||||
if responses[hpaListHandler] != nil {
|
||||
handlers[hpaListHandler] = mkHandler("/experimental/v1/horizontalpodautoscalers", *responses[hpaListHandler])
|
||||
}
|
||||
|
||||
if responses[scaleHandler] != nil {
|
||||
handlers[scaleHandler] = mkHandler(
|
||||
fmt.Sprintf("/experimental/v1/namespaces/%s/replicationcontrollers/%s/scale", namespace, rcName), *responses[scaleHandler])
|
||||
}
|
||||
|
||||
if responses[podListHandler] != nil {
|
||||
handlers[podListHandler] = mkHandler(fmt.Sprintf("/api/v1/namespaces/%s/pods", namespace), *responses[podListHandler])
|
||||
}
|
||||
|
||||
if responses[heapsterHandler] != nil {
|
||||
handlers[heapsterHandler] = mkRawHandler(
|
||||
fmt.Sprintf("/api/v1/proxy/namespaces/kube-system/services/monitoring-heapster/api/v1/model/namespaces/%s/pod-list/%s/metrics/cpu-usage",
|
||||
namespace, podName), *responses[heapsterHandler])
|
||||
}
|
||||
|
||||
if responses[updateHpaHandler] != nil {
|
||||
handlers[updateHpaHandler] = mkHandler(fmt.Sprintf("/experimental/v1/namespaces/%s/horizontalpodautoscalers/%s", namespace, hpaName),
|
||||
*responses[updateHpaHandler])
|
||||
}
|
||||
|
||||
mux.HandleFunc("/", func(res http.ResponseWriter, req *http.Request) {
|
||||
t.Errorf("unexpected request: %v", req.RequestURI)
|
||||
@@ -96,7 +120,7 @@ func TestSyncEndpointsItemsPreserveNoSelector(t *testing.T) {
|
||||
Items: []expapi.HorizontalPodAutoscaler{
|
||||
{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Name: "foo",
|
||||
Name: hpaName,
|
||||
Namespace: namespace,
|
||||
},
|
||||
Spec: expapi.HorizontalPodAutoscalerSpec{
|
||||
@@ -108,20 +132,20 @@ func TestSyncEndpointsItemsPreserveNoSelector(t *testing.T) {
|
||||
},
|
||||
MinCount: 1,
|
||||
MaxCount: 5,
|
||||
Target: target,
|
||||
Target: expapi.ResourceConsumption{Resource: api.ResourceCPU, Quantity: resource.MustParse("0.3")},
|
||||
},
|
||||
}}}}
|
||||
|
||||
scaleResponse := serverResponse{http.StatusOK, &expapi.Scale{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Name: "rcName",
|
||||
Name: rcName,
|
||||
Namespace: namespace,
|
||||
},
|
||||
Spec: expapi.ScaleSpec{
|
||||
Replicas: 5,
|
||||
Replicas: 1,
|
||||
},
|
||||
Status: expapi.ScaleStatus{
|
||||
Replicas: 2,
|
||||
Replicas: 1,
|
||||
Selector: map[string]string{"name": podNameLabel},
|
||||
},
|
||||
}}
|
||||
@@ -134,11 +158,49 @@ func TestSyncEndpointsItemsPreserveNoSelector(t *testing.T) {
|
||||
Namespace: namespace,
|
||||
},
|
||||
}}}}
|
||||
timestamp := time.Now()
|
||||
metrics := heapster.MetricResultList{
|
||||
Items: []heapster.MetricResult{{
|
||||
Metrics: []heapster.MetricPoint{{timestamp, 650}},
|
||||
LatestTimestamp: timestamp,
|
||||
}}}
|
||||
|
||||
heapsterRawResponse := "UPADTE ME"
|
||||
heapsterResponse := serverResponse{http.StatusOK, &heapsterRawResponse}
|
||||
updateHpaResponse := serverResponse{http.StatusOK, &expapi.HorizontalPodAutoscaler{
|
||||
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Name: hpaName,
|
||||
Namespace: namespace,
|
||||
},
|
||||
Spec: expapi.HorizontalPodAutoscalerSpec{
|
||||
ScaleRef: &expapi.SubresourceReference{
|
||||
Kind: "replicationController",
|
||||
Name: rcName,
|
||||
Namespace: namespace,
|
||||
Subresource: "scale",
|
||||
},
|
||||
MinCount: 1,
|
||||
MaxCount: 5,
|
||||
Target: expapi.ResourceConsumption{Resource: api.ResourceCPU, Quantity: resource.MustParse("0.3")},
|
||||
},
|
||||
Status: expapi.HorizontalPodAutoscalerStatus{
|
||||
CurrentReplicas: 1,
|
||||
DesiredReplicas: 3,
|
||||
},
|
||||
}}
|
||||
|
||||
heapsterRawResponse, _ := json.Marshal(&metrics)
|
||||
heapsterStrResponse := string(heapsterRawResponse)
|
||||
heapsterResponse := serverResponse{http.StatusOK, &heapsterStrResponse}
|
||||
|
||||
testServer, handlers := makeTestServer(t,
|
||||
map[string]*serverResponse{
|
||||
hpaListHandler: &hpaResponse,
|
||||
scaleHandler: &scaleResponse,
|
||||
podListHandler: &podListResponse,
|
||||
heapsterHandler: &heapsterResponse,
|
||||
updateHpaHandler: &updateHpaResponse,
|
||||
})
|
||||
|
||||
testServer, handlers := makeTestServer(t, hpaResponse, scaleResponse, podListResponse, heapsterResponse)
|
||||
defer testServer.Close()
|
||||
kubeClient := client.NewOrDie(&client.Config{Host: testServer.URL, Version: testapi.Version()})
|
||||
expClient := client.NewExperimentalOrDie(&client.Config{Host: testServer.URL, Version: testapi.Version()})
|
||||
@@ -146,9 +208,18 @@ func TestSyncEndpointsItemsPreserveNoSelector(t *testing.T) {
|
||||
hpaController := New(kubeClient, expClient)
|
||||
err := hpaController.reconcileAutoscalers()
|
||||
if err != nil {
|
||||
t.Fatal("Failed to reconcile %v", err)
|
||||
t.Fatal("Failed to reconcile: %v", err)
|
||||
}
|
||||
for _, h := range handlers {
|
||||
h.ValidateRequestCount(t, 1)
|
||||
}
|
||||
obj, err := expClient.Codec.Decode([]byte(handlers[updateHpaHandler].RequestBody))
|
||||
if err != nil {
|
||||
t.Fatal("Failed to decode: %v %v", err)
|
||||
}
|
||||
hpa, _ := obj.(*expapi.HorizontalPodAutoscaler)
|
||||
|
||||
assert.Equal(t, 3, hpa.Status.DesiredReplicas)
|
||||
assert.Equal(t, int64(650), hpa.Status.CurrentConsumption.Quantity.MilliValue())
|
||||
assert.NotNil(t, hpa.Status.LastScaleTimestamp)
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user