mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-09-20 01:23:48 +00:00
Remove legacy metrics client from podautoscaler
This commit is contained in:
@@ -35,10 +35,6 @@ type HPAControllerConfiguration struct {
|
||||
// horizontalPodAutoscalerTolerance is the tolerance for when
|
||||
// resource usage suggests upscaling/downscaling
|
||||
HorizontalPodAutoscalerTolerance float64
|
||||
// HorizontalPodAutoscalerUseRESTClients causes the HPA controller to use REST clients
|
||||
// through the kube-aggregator when enabled, instead of using the legacy metrics client
|
||||
// through the API server proxy.
|
||||
HorizontalPodAutoscalerUseRESTClients bool
|
||||
// HorizontalPodAutoscalerCPUInitializationPeriod is the period after pod start when CPU samples
|
||||
// might be skipped.
|
||||
HorizontalPodAutoscalerCPUInitializationPeriod metav1.Duration
|
||||
|
@@ -21,7 +21,6 @@ import (
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
kubectrlmgrconfigv1alpha1 "k8s.io/kube-controller-manager/config/v1alpha1"
|
||||
utilpointer "k8s.io/utils/pointer"
|
||||
)
|
||||
|
||||
// RecommendedDefaultHPAControllerConfiguration defaults a pointer to a
|
||||
@@ -35,9 +34,6 @@ import (
|
||||
// run it in your wrapper struct of this type in its `SetDefaults_` method.
|
||||
func RecommendedDefaultHPAControllerConfiguration(obj *kubectrlmgrconfigv1alpha1.HPAControllerConfiguration) {
|
||||
zero := metav1.Duration{}
|
||||
if obj.HorizontalPodAutoscalerUseRESTClients == nil {
|
||||
obj.HorizontalPodAutoscalerUseRESTClients = utilpointer.BoolPtr(true)
|
||||
}
|
||||
if obj.HorizontalPodAutoscalerSyncPeriod == zero {
|
||||
obj.HorizontalPodAutoscalerSyncPeriod = metav1.Duration{Duration: 15 * time.Second}
|
||||
}
|
||||
|
@@ -86,9 +86,6 @@ func autoConvert_v1alpha1_HPAControllerConfiguration_To_config_HPAControllerConf
|
||||
out.HorizontalPodAutoscalerDownscaleStabilizationWindow = in.HorizontalPodAutoscalerDownscaleStabilizationWindow
|
||||
out.HorizontalPodAutoscalerDownscaleForbiddenWindow = in.HorizontalPodAutoscalerDownscaleForbiddenWindow
|
||||
out.HorizontalPodAutoscalerTolerance = in.HorizontalPodAutoscalerTolerance
|
||||
if err := v1.Convert_Pointer_bool_To_bool(&in.HorizontalPodAutoscalerUseRESTClients, &out.HorizontalPodAutoscalerUseRESTClients, s); err != nil {
|
||||
return err
|
||||
}
|
||||
out.HorizontalPodAutoscalerCPUInitializationPeriod = in.HorizontalPodAutoscalerCPUInitializationPeriod
|
||||
out.HorizontalPodAutoscalerInitialReadinessDelay = in.HorizontalPodAutoscalerInitialReadinessDelay
|
||||
return nil
|
||||
@@ -100,9 +97,6 @@ func autoConvert_config_HPAControllerConfiguration_To_v1alpha1_HPAControllerConf
|
||||
out.HorizontalPodAutoscalerDownscaleForbiddenWindow = in.HorizontalPodAutoscalerDownscaleForbiddenWindow
|
||||
out.HorizontalPodAutoscalerDownscaleStabilizationWindow = in.HorizontalPodAutoscalerDownscaleStabilizationWindow
|
||||
out.HorizontalPodAutoscalerTolerance = in.HorizontalPodAutoscalerTolerance
|
||||
if err := v1.Convert_bool_To_Pointer_bool(&in.HorizontalPodAutoscalerUseRESTClients, &out.HorizontalPodAutoscalerUseRESTClients, s); err != nil {
|
||||
return err
|
||||
}
|
||||
out.HorizontalPodAutoscalerCPUInitializationPeriod = in.HorizontalPodAutoscalerCPUInitializationPeriod
|
||||
out.HorizontalPodAutoscalerInitialReadinessDelay = in.HorizontalPodAutoscalerInitialReadinessDelay
|
||||
return nil
|
||||
|
@@ -1,802 +0,0 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package podautoscaler
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
autoscalingv1 "k8s.io/api/autoscaling/v1"
|
||||
autoscalingv2 "k8s.io/api/autoscaling/v2beta1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/meta/testrestmapper"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
"k8s.io/client-go/informers"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
scalefake "k8s.io/client-go/scale/fake"
|
||||
core "k8s.io/client-go/testing"
|
||||
"k8s.io/kubernetes/pkg/api/legacyscheme"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
"k8s.io/kubernetes/pkg/controller/podautoscaler/metrics"
|
||||
|
||||
heapster "k8s.io/heapster/metrics/api/v1/types"
|
||||
metricsapi "k8s.io/metrics/pkg/apis/metrics/v1alpha1"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
_ "k8s.io/kubernetes/pkg/apis/apps/install"
|
||||
_ "k8s.io/kubernetes/pkg/apis/autoscaling/install"
|
||||
_ "k8s.io/kubernetes/pkg/apis/core/install"
|
||||
)
|
||||
|
||||
func (w fakeResponseWrapper) DoRaw(context.Context) ([]byte, error) {
|
||||
return w.raw, nil
|
||||
}
|
||||
|
||||
func (w fakeResponseWrapper) Stream(context.Context) (io.ReadCloser, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func newFakeResponseWrapper(raw []byte) fakeResponseWrapper {
|
||||
return fakeResponseWrapper{raw: raw}
|
||||
}
|
||||
|
||||
type fakeResponseWrapper struct {
|
||||
raw []byte
|
||||
}
|
||||
|
||||
type legacyTestCase struct {
|
||||
sync.Mutex
|
||||
minReplicas int32
|
||||
maxReplicas int32
|
||||
initialReplicas int32
|
||||
desiredReplicas int32
|
||||
|
||||
// CPU target utilization as a percentage of the requested resources.
|
||||
CPUTarget int32
|
||||
CPUCurrent int32
|
||||
verifyCPUCurrent bool
|
||||
reportedLevels []uint64
|
||||
reportedCPURequests []resource.Quantity
|
||||
reportedPodReadiness []v1.ConditionStatus
|
||||
scaleUpdated bool
|
||||
statusUpdated bool
|
||||
eventCreated bool
|
||||
verifyEvents bool
|
||||
useMetricsAPI bool
|
||||
metricsTarget []autoscalingv2.MetricSpec
|
||||
// Channel with names of HPA objects which we have reconciled.
|
||||
processed chan string
|
||||
|
||||
// Target resource information.
|
||||
resource *fakeResource
|
||||
|
||||
// Last scale time
|
||||
lastScaleTime *metav1.Time
|
||||
recommendations []timestampedRecommendation
|
||||
|
||||
finished bool
|
||||
}
|
||||
|
||||
// Needs to be called under a lock.
|
||||
func (tc *legacyTestCase) computeCPUCurrent() {
|
||||
if len(tc.reportedLevels) != len(tc.reportedCPURequests) || len(tc.reportedLevels) == 0 {
|
||||
return
|
||||
}
|
||||
reported := 0
|
||||
for _, r := range tc.reportedLevels {
|
||||
reported += int(r)
|
||||
}
|
||||
requested := 0
|
||||
for _, req := range tc.reportedCPURequests {
|
||||
requested += int(req.MilliValue())
|
||||
}
|
||||
tc.CPUCurrent = int32(100 * reported / requested)
|
||||
}
|
||||
|
||||
func (tc *legacyTestCase) prepareTestClient(t *testing.T) (*fake.Clientset, *scalefake.FakeScaleClient) {
|
||||
namespace := "test-namespace"
|
||||
hpaName := "test-hpa"
|
||||
podNamePrefix := "test-pod"
|
||||
labelSet := map[string]string{"name": podNamePrefix}
|
||||
selector := labels.SelectorFromSet(labelSet).String()
|
||||
|
||||
tc.Lock()
|
||||
|
||||
tc.scaleUpdated = false
|
||||
tc.statusUpdated = false
|
||||
tc.eventCreated = false
|
||||
tc.processed = make(chan string, 100)
|
||||
if tc.CPUCurrent == 0 {
|
||||
tc.computeCPUCurrent()
|
||||
}
|
||||
|
||||
if tc.resource == nil {
|
||||
tc.resource = &fakeResource{
|
||||
name: "test-rc",
|
||||
apiVersion: "v1",
|
||||
kind: "ReplicationController",
|
||||
}
|
||||
}
|
||||
tc.Unlock()
|
||||
|
||||
fakeClient := &fake.Clientset{}
|
||||
fakeClient.AddReactor("list", "horizontalpodautoscalers", func(action core.Action) (handled bool, ret runtime.Object, err error) {
|
||||
tc.Lock()
|
||||
defer tc.Unlock()
|
||||
|
||||
obj := &autoscalingv2.HorizontalPodAutoscalerList{
|
||||
Items: []autoscalingv2.HorizontalPodAutoscaler{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: hpaName,
|
||||
Namespace: namespace,
|
||||
SelfLink: "experimental/v1/namespaces/" + namespace + "/horizontalpodautoscalers/" + hpaName,
|
||||
},
|
||||
Spec: autoscalingv2.HorizontalPodAutoscalerSpec{
|
||||
ScaleTargetRef: autoscalingv2.CrossVersionObjectReference{
|
||||
Kind: tc.resource.kind,
|
||||
Name: tc.resource.name,
|
||||
APIVersion: tc.resource.apiVersion,
|
||||
},
|
||||
MinReplicas: &tc.minReplicas,
|
||||
MaxReplicas: tc.maxReplicas,
|
||||
},
|
||||
Status: autoscalingv2.HorizontalPodAutoscalerStatus{
|
||||
CurrentReplicas: tc.initialReplicas,
|
||||
DesiredReplicas: tc.initialReplicas,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
if tc.CPUTarget > 0.0 {
|
||||
obj.Items[0].Spec.Metrics = []autoscalingv2.MetricSpec{
|
||||
{
|
||||
Type: autoscalingv2.ResourceMetricSourceType,
|
||||
Resource: &autoscalingv2.ResourceMetricSource{
|
||||
Name: v1.ResourceCPU,
|
||||
TargetAverageUtilization: &tc.CPUTarget,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
if len(tc.metricsTarget) > 0 {
|
||||
obj.Items[0].Spec.Metrics = append(obj.Items[0].Spec.Metrics, tc.metricsTarget...)
|
||||
}
|
||||
|
||||
if len(obj.Items[0].Spec.Metrics) == 0 {
|
||||
// manually add in the defaulting logic
|
||||
obj.Items[0].Spec.Metrics = []autoscalingv2.MetricSpec{
|
||||
{
|
||||
Type: autoscalingv2.ResourceMetricSourceType,
|
||||
Resource: &autoscalingv2.ResourceMetricSource{
|
||||
Name: v1.ResourceCPU,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// and... convert to autoscaling v1 to return the right type
|
||||
objv1, err := unsafeConvertToVersionVia(obj, autoscalingv1.SchemeGroupVersion)
|
||||
if err != nil {
|
||||
return true, nil, err
|
||||
}
|
||||
|
||||
return true, objv1, nil
|
||||
})
|
||||
|
||||
fakeClient.AddReactor("list", "pods", func(action core.Action) (handled bool, ret runtime.Object, err error) {
|
||||
tc.Lock()
|
||||
defer tc.Unlock()
|
||||
|
||||
obj := &v1.PodList{}
|
||||
for i := 0; i < len(tc.reportedCPURequests); i++ {
|
||||
podReadiness := v1.ConditionTrue
|
||||
if tc.reportedPodReadiness != nil {
|
||||
podReadiness = tc.reportedPodReadiness[i]
|
||||
}
|
||||
podName := fmt.Sprintf("%s-%d", podNamePrefix, i)
|
||||
pod := v1.Pod{
|
||||
Status: v1.PodStatus{
|
||||
StartTime: &metav1.Time{Time: time.Now().Add(-3 * time.Minute)},
|
||||
Phase: v1.PodRunning,
|
||||
Conditions: []v1.PodCondition{
|
||||
{
|
||||
Type: v1.PodReady,
|
||||
Status: podReadiness,
|
||||
},
|
||||
},
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: podName,
|
||||
Namespace: namespace,
|
||||
Labels: map[string]string{
|
||||
"name": podNamePrefix,
|
||||
},
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Resources: v1.ResourceRequirements{
|
||||
Requests: v1.ResourceList{
|
||||
v1.ResourceCPU: tc.reportedCPURequests[i],
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
obj.Items = append(obj.Items, pod)
|
||||
}
|
||||
return true, obj, nil
|
||||
})
|
||||
|
||||
fakeClient.AddProxyReactor("services", func(action core.Action) (handled bool, ret restclient.ResponseWrapper, err error) {
|
||||
tc.Lock()
|
||||
defer tc.Unlock()
|
||||
|
||||
var heapsterRawMemResponse []byte
|
||||
|
||||
if tc.useMetricsAPI {
|
||||
metrics := metricsapi.PodMetricsList{}
|
||||
for i, cpu := range tc.reportedLevels {
|
||||
podMetric := metricsapi.PodMetrics{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: fmt.Sprintf("%s-%d", podNamePrefix, i),
|
||||
Namespace: namespace,
|
||||
},
|
||||
Timestamp: metav1.Time{Time: time.Now()},
|
||||
Containers: []metricsapi.ContainerMetrics{
|
||||
{
|
||||
Name: "container",
|
||||
Usage: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(
|
||||
int64(cpu),
|
||||
resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(
|
||||
int64(1024*1024),
|
||||
resource.BinarySI),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
metrics.Items = append(metrics.Items, podMetric)
|
||||
}
|
||||
heapsterRawMemResponse, _ = json.Marshal(&metrics)
|
||||
} else {
|
||||
// only return the pods that we actually asked for
|
||||
proxyAction := action.(core.ProxyGetAction)
|
||||
pathParts := strings.Split(proxyAction.GetPath(), "/")
|
||||
// pathParts should look like [ api, v1, model, namespaces, $NS, pod-list, $PODS, metrics, $METRIC... ]
|
||||
if len(pathParts) < 9 {
|
||||
return true, nil, fmt.Errorf("invalid heapster path %q", proxyAction.GetPath())
|
||||
}
|
||||
|
||||
podNames := strings.Split(pathParts[7], ",")
|
||||
podPresent := make([]bool, len(tc.reportedLevels))
|
||||
for _, name := range podNames {
|
||||
if len(name) <= len(podNamePrefix)+1 {
|
||||
return true, nil, fmt.Errorf("unknown pod %q", name)
|
||||
}
|
||||
num, err := strconv.Atoi(name[len(podNamePrefix)+1:])
|
||||
if err != nil {
|
||||
return true, nil, fmt.Errorf("unknown pod %q", name)
|
||||
}
|
||||
podPresent[num] = true
|
||||
}
|
||||
|
||||
timestamp := time.Now()
|
||||
metrics := heapster.MetricResultList{}
|
||||
for i, level := range tc.reportedLevels {
|
||||
if !podPresent[i] {
|
||||
continue
|
||||
}
|
||||
|
||||
metric := heapster.MetricResult{
|
||||
Metrics: []heapster.MetricPoint{{Timestamp: timestamp, Value: level, FloatValue: nil}},
|
||||
LatestTimestamp: timestamp,
|
||||
}
|
||||
metrics.Items = append(metrics.Items, metric)
|
||||
}
|
||||
heapsterRawMemResponse, _ = json.Marshal(&metrics)
|
||||
}
|
||||
|
||||
return true, newFakeResponseWrapper(heapsterRawMemResponse), nil
|
||||
})
|
||||
|
||||
fakeClient.AddReactor("update", "horizontalpodautoscalers", func(action core.Action) (handled bool, ret runtime.Object, err error) {
|
||||
obj := func() *autoscalingv1.HorizontalPodAutoscaler {
|
||||
tc.Lock()
|
||||
defer tc.Unlock()
|
||||
|
||||
obj := action.(core.UpdateAction).GetObject().(*autoscalingv1.HorizontalPodAutoscaler)
|
||||
assert.Equal(t, namespace, obj.Namespace, "the HPA namespace should be as expected")
|
||||
assert.Equal(t, hpaName, obj.Name, "the HPA name should be as expected")
|
||||
assert.Equal(t, tc.desiredReplicas, obj.Status.DesiredReplicas, "the desired replica count reported in the object status should be as expected")
|
||||
if tc.verifyCPUCurrent {
|
||||
if assert.NotNil(t, obj.Status.CurrentCPUUtilizationPercentage, "the reported CPU utilization percentage should be non-nil") {
|
||||
assert.Equal(t, tc.CPUCurrent, *obj.Status.CurrentCPUUtilizationPercentage, "the report CPU utilization percentage should be as expected")
|
||||
}
|
||||
}
|
||||
tc.statusUpdated = true
|
||||
return obj
|
||||
}()
|
||||
// Every time we reconcile HPA object we are updating status.
|
||||
tc.processed <- obj.Name
|
||||
return true, obj, nil
|
||||
})
|
||||
|
||||
fakeScaleClient := &scalefake.FakeScaleClient{}
|
||||
fakeScaleClient.AddReactor("get", "replicationcontrollers", func(action core.Action) (handled bool, ret runtime.Object, err error) {
|
||||
tc.Lock()
|
||||
defer tc.Unlock()
|
||||
|
||||
obj := &autoscalingv1.Scale{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: tc.resource.name,
|
||||
Namespace: namespace,
|
||||
},
|
||||
Spec: autoscalingv1.ScaleSpec{
|
||||
Replicas: tc.initialReplicas,
|
||||
},
|
||||
Status: autoscalingv1.ScaleStatus{
|
||||
Replicas: tc.initialReplicas,
|
||||
Selector: selector,
|
||||
},
|
||||
}
|
||||
return true, obj, nil
|
||||
})
|
||||
|
||||
fakeScaleClient.AddReactor("get", "deployments", func(action core.Action) (handled bool, ret runtime.Object, err error) {
|
||||
tc.Lock()
|
||||
defer tc.Unlock()
|
||||
|
||||
obj := &autoscalingv1.Scale{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: tc.resource.name,
|
||||
Namespace: namespace,
|
||||
},
|
||||
Spec: autoscalingv1.ScaleSpec{
|
||||
Replicas: tc.initialReplicas,
|
||||
},
|
||||
Status: autoscalingv1.ScaleStatus{
|
||||
Replicas: tc.initialReplicas,
|
||||
Selector: selector,
|
||||
},
|
||||
}
|
||||
return true, obj, nil
|
||||
})
|
||||
|
||||
fakeScaleClient.AddReactor("get", "replicasets", func(action core.Action) (handled bool, ret runtime.Object, err error) {
|
||||
tc.Lock()
|
||||
defer tc.Unlock()
|
||||
|
||||
obj := &autoscalingv1.Scale{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: tc.resource.name,
|
||||
Namespace: namespace,
|
||||
},
|
||||
Spec: autoscalingv1.ScaleSpec{
|
||||
Replicas: tc.initialReplicas,
|
||||
},
|
||||
Status: autoscalingv1.ScaleStatus{
|
||||
Replicas: tc.initialReplicas,
|
||||
Selector: selector,
|
||||
},
|
||||
}
|
||||
return true, obj, nil
|
||||
})
|
||||
|
||||
fakeScaleClient.AddReactor("update", "replicationcontrollers", func(action core.Action) (handled bool, ret runtime.Object, err error) {
|
||||
tc.Lock()
|
||||
defer tc.Unlock()
|
||||
|
||||
obj := action.(core.UpdateAction).GetObject().(*autoscalingv1.Scale)
|
||||
replicas := action.(core.UpdateAction).GetObject().(*autoscalingv1.Scale).Spec.Replicas
|
||||
assert.Equal(t, tc.desiredReplicas, replicas, "the replica count of the RC should be as expected")
|
||||
tc.scaleUpdated = true
|
||||
return true, obj, nil
|
||||
})
|
||||
|
||||
fakeScaleClient.AddReactor("update", "deployments", func(action core.Action) (handled bool, ret runtime.Object, err error) {
|
||||
tc.Lock()
|
||||
defer tc.Unlock()
|
||||
|
||||
obj := action.(core.UpdateAction).GetObject().(*autoscalingv1.Scale)
|
||||
replicas := action.(core.UpdateAction).GetObject().(*autoscalingv1.Scale).Spec.Replicas
|
||||
assert.Equal(t, tc.desiredReplicas, replicas, "the replica count of the deployment should be as expected")
|
||||
tc.scaleUpdated = true
|
||||
return true, obj, nil
|
||||
})
|
||||
|
||||
fakeScaleClient.AddReactor("update", "replicasets", func(action core.Action) (handled bool, ret runtime.Object, err error) {
|
||||
tc.Lock()
|
||||
defer tc.Unlock()
|
||||
|
||||
obj := action.(core.UpdateAction).GetObject().(*autoscalingv1.Scale)
|
||||
replicas := action.(core.UpdateAction).GetObject().(*autoscalingv1.Scale).Spec.Replicas
|
||||
assert.Equal(t, tc.desiredReplicas, replicas, "the replica count of the replicaset should be as expected")
|
||||
tc.scaleUpdated = true
|
||||
return true, obj, nil
|
||||
})
|
||||
|
||||
fakeWatch := watch.NewFake()
|
||||
fakeClient.AddWatchReactor("*", core.DefaultWatchReactor(fakeWatch, nil))
|
||||
|
||||
return fakeClient, fakeScaleClient
|
||||
}
|
||||
|
||||
func (tc *legacyTestCase) verifyResults(t *testing.T) {
|
||||
tc.Lock()
|
||||
defer tc.Unlock()
|
||||
|
||||
assert.Equal(t, tc.initialReplicas != tc.desiredReplicas, tc.scaleUpdated, "the scale should only be updated if we expected a change in replicas")
|
||||
assert.True(t, tc.statusUpdated, "the status should have been updated")
|
||||
if tc.verifyEvents {
|
||||
assert.Equal(t, tc.initialReplicas != tc.desiredReplicas, tc.eventCreated, "an event should have been created only if we expected a change in replicas")
|
||||
}
|
||||
}
|
||||
|
||||
func (tc *legacyTestCase) runTest(t *testing.T) {
|
||||
testClient, testScaleClient := tc.prepareTestClient(t)
|
||||
metricsClient := metrics.NewHeapsterMetricsClient(testClient, metrics.DefaultHeapsterNamespace, metrics.DefaultHeapsterScheme, metrics.DefaultHeapsterService, metrics.DefaultHeapsterPort)
|
||||
eventClient := &fake.Clientset{}
|
||||
eventClient.AddReactor("*", "events", func(action core.Action) (handled bool, ret runtime.Object, err error) {
|
||||
tc.Lock()
|
||||
defer tc.Unlock()
|
||||
|
||||
if tc.finished {
|
||||
return true, &v1.Event{}, nil
|
||||
}
|
||||
create, ok := action.(core.CreateAction)
|
||||
if !ok {
|
||||
return false, nil, nil
|
||||
}
|
||||
obj := create.GetObject().(*v1.Event)
|
||||
if tc.verifyEvents {
|
||||
switch obj.Reason {
|
||||
case "SuccessfulRescale":
|
||||
assert.Equal(t, fmt.Sprintf("New size: %d; reason: cpu resource utilization (percentage of request) above target", tc.desiredReplicas), obj.Message)
|
||||
case "DesiredReplicasComputed":
|
||||
assert.Equal(t, fmt.Sprintf(
|
||||
"Computed the desired num of replicas: %d (avgCPUutil: %d, current replicas: %d)",
|
||||
tc.desiredReplicas,
|
||||
(int64(tc.reportedLevels[0])*100)/tc.reportedCPURequests[0].MilliValue(), tc.initialReplicas), obj.Message)
|
||||
default:
|
||||
assert.False(t, true, fmt.Sprintf("Unexpected event: %s / %s", obj.Reason, obj.Message))
|
||||
}
|
||||
}
|
||||
tc.eventCreated = true
|
||||
return true, obj, nil
|
||||
})
|
||||
|
||||
informerFactory := informers.NewSharedInformerFactory(testClient, controller.NoResyncPeriodFunc())
|
||||
defaultDownscaleStabilisationWindow := 5 * time.Minute
|
||||
|
||||
hpaController := NewHorizontalController(
|
||||
eventClient.CoreV1(),
|
||||
testScaleClient,
|
||||
testClient.AutoscalingV1(),
|
||||
testrestmapper.TestOnlyStaticRESTMapper(legacyscheme.Scheme),
|
||||
metricsClient,
|
||||
informerFactory.Autoscaling().V1().HorizontalPodAutoscalers(),
|
||||
informerFactory.Core().V1().Pods(),
|
||||
controller.NoResyncPeriodFunc(),
|
||||
defaultDownscaleStabilisationWindow,
|
||||
defaultTestingTolerance,
|
||||
defaultTestingCPUInitializationPeriod,
|
||||
defaultTestingDelayOfInitialReadinessStatus,
|
||||
)
|
||||
hpaController.hpaListerSynced = alwaysReady
|
||||
|
||||
if tc.recommendations != nil {
|
||||
hpaController.recommendations["test-namespace/test-hpa"] = tc.recommendations
|
||||
}
|
||||
|
||||
stop := make(chan struct{})
|
||||
defer close(stop)
|
||||
informerFactory.Start(stop)
|
||||
go hpaController.Run(stop)
|
||||
|
||||
// Wait for HPA to be processed.
|
||||
<-tc.processed
|
||||
tc.Lock()
|
||||
tc.finished = true
|
||||
if tc.verifyEvents {
|
||||
tc.Unlock()
|
||||
// We need to wait for events to be broadcasted (sleep for longer than record.sleepDuration).
|
||||
time.Sleep(2 * time.Second)
|
||||
} else {
|
||||
tc.Unlock()
|
||||
}
|
||||
tc.verifyResults(t)
|
||||
|
||||
}
|
||||
|
||||
func TestLegacyScaleUp(t *testing.T) {
|
||||
tc := legacyTestCase{
|
||||
minReplicas: 2,
|
||||
maxReplicas: 6,
|
||||
initialReplicas: 3,
|
||||
desiredReplicas: 5,
|
||||
CPUTarget: 30,
|
||||
verifyCPUCurrent: true,
|
||||
reportedLevels: []uint64{300, 500, 700},
|
||||
reportedCPURequests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
|
||||
useMetricsAPI: true,
|
||||
}
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func TestLegacyScaleUpUnreadyLessScale(t *testing.T) {
|
||||
tc := legacyTestCase{
|
||||
minReplicas: 2,
|
||||
maxReplicas: 6,
|
||||
initialReplicas: 3,
|
||||
desiredReplicas: 4,
|
||||
CPUTarget: 30,
|
||||
verifyCPUCurrent: false,
|
||||
reportedLevels: []uint64{300, 500, 700},
|
||||
reportedCPURequests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
|
||||
reportedPodReadiness: []v1.ConditionStatus{v1.ConditionFalse, v1.ConditionTrue, v1.ConditionTrue},
|
||||
useMetricsAPI: true,
|
||||
}
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func TestLegacyScaleUpUnreadyNoScale(t *testing.T) {
|
||||
tc := legacyTestCase{
|
||||
minReplicas: 2,
|
||||
maxReplicas: 6,
|
||||
initialReplicas: 3,
|
||||
desiredReplicas: 3,
|
||||
CPUTarget: 30,
|
||||
CPUCurrent: 40,
|
||||
verifyCPUCurrent: true,
|
||||
reportedLevels: []uint64{400, 500, 700},
|
||||
reportedCPURequests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
|
||||
reportedPodReadiness: []v1.ConditionStatus{v1.ConditionTrue, v1.ConditionFalse, v1.ConditionFalse},
|
||||
useMetricsAPI: true,
|
||||
}
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func TestLegacyScaleUpDeployment(t *testing.T) {
|
||||
tc := legacyTestCase{
|
||||
minReplicas: 2,
|
||||
maxReplicas: 6,
|
||||
initialReplicas: 3,
|
||||
desiredReplicas: 5,
|
||||
CPUTarget: 30,
|
||||
verifyCPUCurrent: true,
|
||||
reportedLevels: []uint64{300, 500, 700},
|
||||
reportedCPURequests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
|
||||
useMetricsAPI: true,
|
||||
resource: &fakeResource{
|
||||
name: "test-dep",
|
||||
apiVersion: "apps/v1",
|
||||
kind: "Deployment",
|
||||
},
|
||||
}
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func TestLegacyScaleUpReplicaSet(t *testing.T) {
|
||||
tc := legacyTestCase{
|
||||
minReplicas: 2,
|
||||
maxReplicas: 6,
|
||||
initialReplicas: 3,
|
||||
desiredReplicas: 5,
|
||||
CPUTarget: 30,
|
||||
verifyCPUCurrent: true,
|
||||
reportedLevels: []uint64{300, 500, 700},
|
||||
reportedCPURequests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
|
||||
useMetricsAPI: true,
|
||||
resource: &fakeResource{
|
||||
name: "test-replicaset",
|
||||
apiVersion: "apps/v1",
|
||||
kind: "ReplicaSet",
|
||||
},
|
||||
}
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func TestLegacyScaleUpCM(t *testing.T) {
|
||||
tc := legacyTestCase{
|
||||
minReplicas: 2,
|
||||
maxReplicas: 6,
|
||||
initialReplicas: 3,
|
||||
desiredReplicas: 4,
|
||||
CPUTarget: 0,
|
||||
metricsTarget: []autoscalingv2.MetricSpec{
|
||||
{
|
||||
Type: autoscalingv2.PodsMetricSourceType,
|
||||
Pods: &autoscalingv2.PodsMetricSource{
|
||||
MetricName: "qps",
|
||||
TargetAverageValue: resource.MustParse("15.0"),
|
||||
},
|
||||
},
|
||||
},
|
||||
reportedLevels: []uint64{20, 10, 30},
|
||||
reportedCPURequests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
|
||||
}
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func TestLegacyScaleUpCMUnreadyNoLessScale(t *testing.T) {
|
||||
tc := legacyTestCase{
|
||||
minReplicas: 2,
|
||||
maxReplicas: 6,
|
||||
initialReplicas: 3,
|
||||
desiredReplicas: 6,
|
||||
CPUTarget: 0,
|
||||
metricsTarget: []autoscalingv2.MetricSpec{
|
||||
{
|
||||
Type: autoscalingv2.PodsMetricSourceType,
|
||||
Pods: &autoscalingv2.PodsMetricSource{
|
||||
MetricName: "qps",
|
||||
TargetAverageValue: resource.MustParse("15.0"),
|
||||
},
|
||||
},
|
||||
},
|
||||
reportedLevels: []uint64{50, 10, 30},
|
||||
reportedPodReadiness: []v1.ConditionStatus{v1.ConditionTrue, v1.ConditionTrue, v1.ConditionFalse},
|
||||
reportedCPURequests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
|
||||
}
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func TestLegacyScaleUpCMUnreadyNoScaleWouldScaleDown(t *testing.T) {
|
||||
tc := legacyTestCase{
|
||||
minReplicas: 2,
|
||||
maxReplicas: 6,
|
||||
initialReplicas: 3,
|
||||
desiredReplicas: 6,
|
||||
CPUTarget: 0,
|
||||
metricsTarget: []autoscalingv2.MetricSpec{
|
||||
{
|
||||
Type: autoscalingv2.PodsMetricSourceType,
|
||||
Pods: &autoscalingv2.PodsMetricSource{
|
||||
MetricName: "qps",
|
||||
TargetAverageValue: resource.MustParse("15.0"),
|
||||
},
|
||||
},
|
||||
},
|
||||
reportedLevels: []uint64{50, 15, 30},
|
||||
reportedPodReadiness: []v1.ConditionStatus{v1.ConditionFalse, v1.ConditionTrue, v1.ConditionFalse},
|
||||
reportedCPURequests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
|
||||
}
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func TestLegacyScaleDown(t *testing.T) {
|
||||
tc := legacyTestCase{
|
||||
minReplicas: 2,
|
||||
maxReplicas: 6,
|
||||
initialReplicas: 5,
|
||||
desiredReplicas: 3,
|
||||
CPUTarget: 50,
|
||||
verifyCPUCurrent: true,
|
||||
reportedLevels: []uint64{100, 300, 500, 250, 250},
|
||||
reportedCPURequests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
|
||||
useMetricsAPI: true,
|
||||
recommendations: []timestampedRecommendation{},
|
||||
}
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func TestLegacyScaleDownCM(t *testing.T) {
|
||||
tc := legacyTestCase{
|
||||
minReplicas: 2,
|
||||
maxReplicas: 6,
|
||||
initialReplicas: 5,
|
||||
desiredReplicas: 3,
|
||||
CPUTarget: 0,
|
||||
metricsTarget: []autoscalingv2.MetricSpec{
|
||||
{
|
||||
Type: autoscalingv2.PodsMetricSourceType,
|
||||
Pods: &autoscalingv2.PodsMetricSource{
|
||||
MetricName: "qps",
|
||||
TargetAverageValue: resource.MustParse("20.0"),
|
||||
},
|
||||
},
|
||||
},
|
||||
reportedLevels: []uint64{12, 12, 12, 12, 12},
|
||||
reportedCPURequests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
|
||||
recommendations: []timestampedRecommendation{},
|
||||
}
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func TestLegacyScaleDownIgnoresUnreadyPods(t *testing.T) {
|
||||
tc := legacyTestCase{
|
||||
minReplicas: 2,
|
||||
maxReplicas: 6,
|
||||
initialReplicas: 5,
|
||||
desiredReplicas: 2,
|
||||
CPUTarget: 50,
|
||||
CPUCurrent: 30,
|
||||
verifyCPUCurrent: true,
|
||||
reportedLevels: []uint64{100, 300, 500, 250, 250},
|
||||
reportedCPURequests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
|
||||
useMetricsAPI: true,
|
||||
reportedPodReadiness: []v1.ConditionStatus{v1.ConditionTrue, v1.ConditionTrue, v1.ConditionTrue, v1.ConditionFalse, v1.ConditionFalse},
|
||||
recommendations: []timestampedRecommendation{},
|
||||
}
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func TestLegacySuperfluousMetrics(t *testing.T) {
|
||||
tc := legacyTestCase{
|
||||
minReplicas: 2,
|
||||
maxReplicas: 6,
|
||||
initialReplicas: 4,
|
||||
desiredReplicas: 6,
|
||||
CPUTarget: 100,
|
||||
reportedLevels: []uint64{4000, 9500, 3000, 7000, 3200, 2000},
|
||||
reportedCPURequests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
|
||||
useMetricsAPI: true,
|
||||
}
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func TestLegacyScaleUpRCImmediately(t *testing.T) {
|
||||
time := metav1.Time{Time: time.Now()}
|
||||
tc := legacyTestCase{
|
||||
minReplicas: 2,
|
||||
maxReplicas: 6,
|
||||
initialReplicas: 1,
|
||||
desiredReplicas: 2,
|
||||
verifyCPUCurrent: false,
|
||||
reportedLevels: []uint64{0, 0, 0, 0},
|
||||
reportedCPURequests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
|
||||
useMetricsAPI: true,
|
||||
lastScaleTime: &time,
|
||||
}
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func TestLegacyScaleDownRCImmediately(t *testing.T) {
|
||||
time := metav1.Time{Time: time.Now()}
|
||||
tc := legacyTestCase{
|
||||
minReplicas: 2,
|
||||
maxReplicas: 5,
|
||||
initialReplicas: 6,
|
||||
desiredReplicas: 5,
|
||||
CPUTarget: 50,
|
||||
reportedLevels: []uint64{8000, 9500, 1000},
|
||||
reportedCPURequests: []resource.Quantity{resource.MustParse("0.9"), resource.MustParse("1.0"), resource.MustParse("1.1")},
|
||||
useMetricsAPI: true,
|
||||
lastScaleTime: &time,
|
||||
}
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
// TODO: add more tests
|
@@ -1,670 +0,0 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package podautoscaler
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"math"
|
||||
"strconv"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/client-go/informers"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
core "k8s.io/client-go/testing"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
"k8s.io/kubernetes/pkg/controller/podautoscaler/metrics"
|
||||
|
||||
heapster "k8s.io/heapster/metrics/api/v1/types"
|
||||
metricsapi "k8s.io/metrics/pkg/apis/metrics/v1alpha1"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
type legacyReplicaCalcTestCase struct {
|
||||
currentReplicas int32
|
||||
expectedReplicas int32
|
||||
expectedError error
|
||||
|
||||
timestamp time.Time
|
||||
|
||||
resource *resourceInfo
|
||||
metric *metricInfo
|
||||
|
||||
podReadiness []v1.ConditionStatus
|
||||
}
|
||||
|
||||
func (tc *legacyReplicaCalcTestCase) prepareTestClient(t *testing.T) *fake.Clientset {
|
||||
|
||||
fakeClient := &fake.Clientset{}
|
||||
fakeClient.AddReactor("list", "pods", func(action core.Action) (handled bool, ret runtime.Object, err error) {
|
||||
obj := &v1.PodList{}
|
||||
for i := 0; i < int(tc.currentReplicas); i++ {
|
||||
podReadiness := v1.ConditionTrue
|
||||
if tc.podReadiness != nil {
|
||||
podReadiness = tc.podReadiness[i]
|
||||
}
|
||||
podName := fmt.Sprintf("%s-%d", podNamePrefix, i)
|
||||
pod := v1.Pod{
|
||||
Status: v1.PodStatus{
|
||||
Phase: v1.PodRunning,
|
||||
StartTime: &metav1.Time{Time: time.Now().Add(-3 * time.Minute)},
|
||||
Conditions: []v1.PodCondition{
|
||||
{
|
||||
Type: v1.PodReady,
|
||||
Status: podReadiness,
|
||||
},
|
||||
},
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: podName,
|
||||
Namespace: testNamespace,
|
||||
Labels: map[string]string{
|
||||
"name": podNamePrefix,
|
||||
},
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{{}, {}},
|
||||
},
|
||||
}
|
||||
|
||||
if tc.resource != nil && i < len(tc.resource.requests) {
|
||||
pod.Spec.Containers[0].Resources = v1.ResourceRequirements{
|
||||
Requests: v1.ResourceList{
|
||||
tc.resource.name: tc.resource.requests[i],
|
||||
},
|
||||
}
|
||||
pod.Spec.Containers[1].Resources = v1.ResourceRequirements{
|
||||
Requests: v1.ResourceList{
|
||||
tc.resource.name: tc.resource.requests[i],
|
||||
},
|
||||
}
|
||||
}
|
||||
obj.Items = append(obj.Items, pod)
|
||||
}
|
||||
return true, obj, nil
|
||||
})
|
||||
|
||||
fakeClient.AddProxyReactor("services", func(action core.Action) (handled bool, ret restclient.ResponseWrapper, err error) {
|
||||
var heapsterRawMemResponse []byte
|
||||
|
||||
if tc.resource != nil {
|
||||
metrics := metricsapi.PodMetricsList{}
|
||||
for i, resValue := range tc.resource.levels {
|
||||
podName := fmt.Sprintf("%s-%d", podNamePrefix, i)
|
||||
if len(tc.resource.podNames) > i {
|
||||
podName = tc.resource.podNames[i]
|
||||
}
|
||||
podMetric := metricsapi.PodMetrics{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: podName,
|
||||
Namespace: testNamespace,
|
||||
},
|
||||
Timestamp: metav1.Time{Time: tc.timestamp},
|
||||
Containers: make([]metricsapi.ContainerMetrics, numContainersPerPod),
|
||||
}
|
||||
for i, m := range resValue {
|
||||
podMetric.Containers[i] = metricsapi.ContainerMetrics{
|
||||
Name: fmt.Sprintf("container%v", i),
|
||||
Usage: v1.ResourceList{
|
||||
tc.resource.name: *resource.NewMilliQuantity(m, resource.DecimalSI),
|
||||
},
|
||||
}
|
||||
}
|
||||
metrics.Items = append(metrics.Items, podMetric)
|
||||
}
|
||||
heapsterRawMemResponse, _ = json.Marshal(&metrics)
|
||||
} else {
|
||||
// only return the pods that we actually asked for
|
||||
proxyAction := action.(core.ProxyGetAction)
|
||||
pathParts := strings.Split(proxyAction.GetPath(), "/")
|
||||
// pathParts should look like [ api, v1, model, namespaces, $NS, pod-list, $PODS, metrics, $METRIC... ]
|
||||
if len(pathParts) < 9 {
|
||||
return true, nil, fmt.Errorf("invalid heapster path %q", proxyAction.GetPath())
|
||||
}
|
||||
|
||||
podNames := strings.Split(pathParts[7], ",")
|
||||
podPresent := make([]bool, len(tc.metric.levels))
|
||||
for _, name := range podNames {
|
||||
if len(name) <= len(podNamePrefix)+1 {
|
||||
return true, nil, fmt.Errorf("unknown pod %q", name)
|
||||
}
|
||||
num, err := strconv.Atoi(name[len(podNamePrefix)+1:])
|
||||
if err != nil {
|
||||
return true, nil, fmt.Errorf("unknown pod %q", name)
|
||||
}
|
||||
podPresent[num] = true
|
||||
}
|
||||
|
||||
timestamp := tc.timestamp
|
||||
metrics := heapster.MetricResultList{}
|
||||
for i, level := range tc.metric.levels {
|
||||
if !podPresent[i] {
|
||||
continue
|
||||
}
|
||||
|
||||
floatVal := float64(tc.metric.levels[i]) / 1000.0
|
||||
metric := heapster.MetricResult{
|
||||
Metrics: []heapster.MetricPoint{{Timestamp: timestamp, Value: uint64(level), FloatValue: &floatVal}},
|
||||
LatestTimestamp: timestamp,
|
||||
}
|
||||
metrics.Items = append(metrics.Items, metric)
|
||||
}
|
||||
heapsterRawMemResponse, _ = json.Marshal(&metrics)
|
||||
}
|
||||
|
||||
return true, newFakeResponseWrapper(heapsterRawMemResponse), nil
|
||||
})
|
||||
|
||||
return fakeClient
|
||||
}
|
||||
|
||||
func (tc *legacyReplicaCalcTestCase) runTest(t *testing.T) {
|
||||
testClient := tc.prepareTestClient(t)
|
||||
metricsClient := metrics.NewHeapsterMetricsClient(testClient, metrics.DefaultHeapsterNamespace, metrics.DefaultHeapsterScheme, metrics.DefaultHeapsterService, metrics.DefaultHeapsterPort)
|
||||
|
||||
informerFactory := informers.NewSharedInformerFactory(testClient, controller.NoResyncPeriodFunc())
|
||||
informer := informerFactory.Core().V1().Pods()
|
||||
|
||||
replicaCalc := NewReplicaCalculator(metricsClient, informer.Lister(), defaultTestingTolerance, defaultTestingCPUInitializationPeriod, defaultTestingDelayOfInitialReadinessStatus)
|
||||
|
||||
stop := make(chan struct{})
|
||||
defer close(stop)
|
||||
informerFactory.Start(stop)
|
||||
if !cache.WaitForNamedCacheSync("HPA", stop, informer.Informer().HasSynced) {
|
||||
return
|
||||
}
|
||||
|
||||
selector, err := metav1.LabelSelectorAsSelector(&metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{"name": podNamePrefix},
|
||||
})
|
||||
if err != nil {
|
||||
require.Nil(t, err, "something went horribly wrong...")
|
||||
}
|
||||
|
||||
if tc.resource != nil {
|
||||
outReplicas, outUtilization, outRawValue, outTimestamp, err := replicaCalc.GetResourceReplicas(tc.currentReplicas, tc.resource.targetUtilization, tc.resource.name, testNamespace, selector, "")
|
||||
|
||||
if tc.expectedError != nil {
|
||||
require.Error(t, err, "there should be an error calculating the replica count")
|
||||
assert.Contains(t, err.Error(), tc.expectedError.Error(), "the error message should have contained the expected error message")
|
||||
return
|
||||
}
|
||||
require.NoError(t, err, "there should not have been an error calculating the replica count")
|
||||
assert.Equal(t, tc.expectedReplicas, outReplicas, "replicas should be as expected")
|
||||
assert.Equal(t, tc.resource.expectedUtilization, outUtilization, "utilization should be as expected")
|
||||
assert.Equal(t, tc.resource.expectedValue, outRawValue, "raw value should be as expected")
|
||||
assert.True(t, tc.timestamp.Equal(outTimestamp), "timestamp should be as expected")
|
||||
|
||||
} else {
|
||||
outReplicas, outUtilization, outTimestamp, err := replicaCalc.GetMetricReplicas(tc.currentReplicas, tc.metric.targetUtilization, tc.metric.name, testNamespace, selector, nil)
|
||||
|
||||
if tc.expectedError != nil {
|
||||
require.Error(t, err, "there should be an error calculating the replica count")
|
||||
assert.Contains(t, err.Error(), tc.expectedError.Error(), "the error message should have contained the expected error message")
|
||||
return
|
||||
}
|
||||
require.NoError(t, err, "there should not have been an error calculating the replica count")
|
||||
assert.Equal(t, tc.expectedReplicas, outReplicas, "replicas should be as expected")
|
||||
assert.Equal(t, tc.metric.expectedUtilization, outUtilization, "utilization should be as expected")
|
||||
assert.True(t, tc.timestamp.Equal(outTimestamp), "timestamp should be as expected")
|
||||
}
|
||||
}
|
||||
|
||||
func TestLegacyReplicaCalcDisjointResourcesMetrics(t *testing.T) {
|
||||
tc := legacyReplicaCalcTestCase{
|
||||
currentReplicas: 1,
|
||||
expectedError: fmt.Errorf("no metrics returned matched known pods"),
|
||||
resource: &resourceInfo{
|
||||
name: v1.ResourceCPU,
|
||||
requests: []resource.Quantity{resource.MustParse("1.0")},
|
||||
levels: makePodMetricLevels(100),
|
||||
podNames: []string{"an-older-pod-name"},
|
||||
|
||||
targetUtilization: 100,
|
||||
},
|
||||
}
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func TestLegacyReplicaCalcScaleUp(t *testing.T) {
|
||||
tc := legacyReplicaCalcTestCase{
|
||||
currentReplicas: 3,
|
||||
expectedReplicas: 5,
|
||||
resource: &resourceInfo{
|
||||
name: v1.ResourceCPU,
|
||||
requests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
|
||||
levels: makePodMetricLevels(300, 500, 700),
|
||||
|
||||
targetUtilization: 30,
|
||||
expectedUtilization: 50,
|
||||
expectedValue: numContainersPerPod * 500,
|
||||
},
|
||||
}
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func TestLegacyReplicaCalcScaleUpUnreadyLessScale(t *testing.T) {
|
||||
tc := legacyReplicaCalcTestCase{
|
||||
currentReplicas: 3,
|
||||
expectedReplicas: 4,
|
||||
podReadiness: []v1.ConditionStatus{v1.ConditionFalse, v1.ConditionTrue, v1.ConditionTrue},
|
||||
resource: &resourceInfo{
|
||||
name: v1.ResourceCPU,
|
||||
requests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
|
||||
levels: makePodMetricLevels(300, 500, 700),
|
||||
|
||||
targetUtilization: 30,
|
||||
expectedUtilization: 60,
|
||||
expectedValue: numContainersPerPod * 600,
|
||||
},
|
||||
}
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func TestLegacyReplicaCalcScaleUpUnreadyNoScale(t *testing.T) {
|
||||
tc := legacyReplicaCalcTestCase{
|
||||
currentReplicas: 3,
|
||||
expectedReplicas: 3,
|
||||
podReadiness: []v1.ConditionStatus{v1.ConditionTrue, v1.ConditionFalse, v1.ConditionFalse},
|
||||
resource: &resourceInfo{
|
||||
name: v1.ResourceCPU,
|
||||
requests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
|
||||
levels: makePodMetricLevels(400, 500, 700),
|
||||
|
||||
targetUtilization: 30,
|
||||
expectedUtilization: 40,
|
||||
expectedValue: numContainersPerPod * 400,
|
||||
},
|
||||
}
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func TestLegacyReplicaCalcScaleUpCM(t *testing.T) {
|
||||
tc := legacyReplicaCalcTestCase{
|
||||
currentReplicas: 3,
|
||||
expectedReplicas: 4,
|
||||
metric: &metricInfo{
|
||||
name: "qps",
|
||||
levels: []int64{20000, 10000, 30000},
|
||||
targetUtilization: 15000,
|
||||
expectedUtilization: 20000,
|
||||
},
|
||||
}
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func TestLegacyReplicaCalcScaleUpCMUnreadyNoLessScale(t *testing.T) {
|
||||
tc := legacyReplicaCalcTestCase{
|
||||
currentReplicas: 3,
|
||||
expectedReplicas: 6,
|
||||
podReadiness: []v1.ConditionStatus{v1.ConditionTrue, v1.ConditionTrue, v1.ConditionFalse},
|
||||
metric: &metricInfo{
|
||||
name: "qps",
|
||||
levels: []int64{50000, 10000, 30000},
|
||||
targetUtilization: 15000,
|
||||
expectedUtilization: 30000,
|
||||
},
|
||||
}
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func TestLegacyReplicaCalcScaleUpCMUnreadyScale(t *testing.T) {
|
||||
tc := legacyReplicaCalcTestCase{
|
||||
currentReplicas: 3,
|
||||
expectedReplicas: 7,
|
||||
podReadiness: []v1.ConditionStatus{v1.ConditionFalse, v1.ConditionTrue, v1.ConditionFalse},
|
||||
metric: &metricInfo{
|
||||
name: "qps",
|
||||
levels: []int64{50000, 15000, 30000},
|
||||
targetUtilization: 15000,
|
||||
expectedUtilization: 31666,
|
||||
},
|
||||
}
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func TestLegacyReplicaCalcScaleDown(t *testing.T) {
|
||||
tc := legacyReplicaCalcTestCase{
|
||||
currentReplicas: 5,
|
||||
expectedReplicas: 3,
|
||||
resource: &resourceInfo{
|
||||
name: v1.ResourceCPU,
|
||||
requests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
|
||||
levels: makePodMetricLevels(100, 300, 500, 250, 250),
|
||||
|
||||
targetUtilization: 50,
|
||||
expectedUtilization: 28,
|
||||
expectedValue: numContainersPerPod * 280,
|
||||
},
|
||||
}
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func TestLegacyReplicaCalcScaleDownCM(t *testing.T) {
|
||||
tc := legacyReplicaCalcTestCase{
|
||||
currentReplicas: 5,
|
||||
expectedReplicas: 3,
|
||||
metric: &metricInfo{
|
||||
name: "qps",
|
||||
levels: []int64{12000, 12000, 12000, 12000, 12000},
|
||||
targetUtilization: 20000,
|
||||
expectedUtilization: 12000,
|
||||
},
|
||||
}
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func TestLegacyReplicaCalcScaleDownIgnoresUnreadyPods(t *testing.T) {
|
||||
tc := legacyReplicaCalcTestCase{
|
||||
currentReplicas: 5,
|
||||
expectedReplicas: 2,
|
||||
podReadiness: []v1.ConditionStatus{v1.ConditionTrue, v1.ConditionTrue, v1.ConditionTrue, v1.ConditionFalse, v1.ConditionFalse},
|
||||
resource: &resourceInfo{
|
||||
name: v1.ResourceCPU,
|
||||
requests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
|
||||
levels: makePodMetricLevels(100, 300, 500, 250, 250),
|
||||
|
||||
targetUtilization: 50,
|
||||
expectedUtilization: 30,
|
||||
expectedValue: numContainersPerPod * 300,
|
||||
},
|
||||
}
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func TestLegacyReplicaCalcTolerance(t *testing.T) {
|
||||
tc := legacyReplicaCalcTestCase{
|
||||
currentReplicas: 3,
|
||||
expectedReplicas: 3,
|
||||
resource: &resourceInfo{
|
||||
name: v1.ResourceCPU,
|
||||
requests: []resource.Quantity{resource.MustParse("0.9"), resource.MustParse("1.0"), resource.MustParse("1.1")},
|
||||
levels: makePodMetricLevels(1010, 1030, 1020),
|
||||
|
||||
targetUtilization: 100,
|
||||
expectedUtilization: 102,
|
||||
expectedValue: numContainersPerPod * 1020,
|
||||
},
|
||||
}
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func TestLegacyReplicaCalcToleranceCM(t *testing.T) {
|
||||
tc := legacyReplicaCalcTestCase{
|
||||
currentReplicas: 3,
|
||||
expectedReplicas: 3,
|
||||
metric: &metricInfo{
|
||||
name: "qps",
|
||||
levels: []int64{20000, 21000, 21000},
|
||||
targetUtilization: 20000,
|
||||
expectedUtilization: 20666,
|
||||
},
|
||||
}
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func TestLegacyReplicaCalcSuperfluousMetrics(t *testing.T) {
|
||||
tc := legacyReplicaCalcTestCase{
|
||||
currentReplicas: 4,
|
||||
expectedReplicas: 24,
|
||||
resource: &resourceInfo{
|
||||
name: v1.ResourceCPU,
|
||||
requests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
|
||||
levels: makePodMetricLevels(4000, 9500, 3000, 7000, 3200, 2000),
|
||||
targetUtilization: 100,
|
||||
expectedUtilization: 587,
|
||||
expectedValue: numContainersPerPod * 5875,
|
||||
},
|
||||
}
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func TestLegacyReplicaCalcMissingMetrics(t *testing.T) {
|
||||
tc := legacyReplicaCalcTestCase{
|
||||
currentReplicas: 4,
|
||||
expectedReplicas: 3,
|
||||
resource: &resourceInfo{
|
||||
name: v1.ResourceCPU,
|
||||
requests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
|
||||
levels: makePodMetricLevels(400, 95),
|
||||
|
||||
targetUtilization: 100,
|
||||
expectedUtilization: 24,
|
||||
expectedValue: 495, // numContainersPerPod * 247, for sufficiently large values of 247
|
||||
},
|
||||
}
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func TestLegacyReplicaCalcEmptyMetrics(t *testing.T) {
|
||||
tc := legacyReplicaCalcTestCase{
|
||||
currentReplicas: 4,
|
||||
expectedError: fmt.Errorf("unable to get metrics for resource cpu: no metrics returned from heapster"),
|
||||
resource: &resourceInfo{
|
||||
name: v1.ResourceCPU,
|
||||
requests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
|
||||
levels: makePodMetricLevels(),
|
||||
|
||||
targetUtilization: 100,
|
||||
},
|
||||
}
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func TestLegacyReplicaCalcEmptyCPURequest(t *testing.T) {
|
||||
tc := legacyReplicaCalcTestCase{
|
||||
currentReplicas: 1,
|
||||
expectedError: fmt.Errorf("missing request for"),
|
||||
resource: &resourceInfo{
|
||||
name: v1.ResourceCPU,
|
||||
requests: []resource.Quantity{},
|
||||
levels: makePodMetricLevels(200),
|
||||
|
||||
targetUtilization: 100,
|
||||
},
|
||||
}
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func TestLegacyReplicaCalcMissingMetricsNoChangeEq(t *testing.T) {
|
||||
tc := legacyReplicaCalcTestCase{
|
||||
currentReplicas: 2,
|
||||
expectedReplicas: 2,
|
||||
resource: &resourceInfo{
|
||||
name: v1.ResourceCPU,
|
||||
requests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0")},
|
||||
levels: makePodMetricLevels(1000),
|
||||
|
||||
targetUtilization: 100,
|
||||
expectedUtilization: 100,
|
||||
expectedValue: numContainersPerPod * 1000,
|
||||
},
|
||||
}
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func TestLegacyReplicaCalcMissingMetricsNoChangeGt(t *testing.T) {
|
||||
tc := legacyReplicaCalcTestCase{
|
||||
currentReplicas: 2,
|
||||
expectedReplicas: 2,
|
||||
resource: &resourceInfo{
|
||||
name: v1.ResourceCPU,
|
||||
requests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0")},
|
||||
levels: makePodMetricLevels(1900),
|
||||
|
||||
targetUtilization: 100,
|
||||
expectedUtilization: 190,
|
||||
expectedValue: numContainersPerPod * 1900,
|
||||
},
|
||||
}
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func TestLegacyReplicaCalcMissingMetricsNoChangeLt(t *testing.T) {
|
||||
tc := legacyReplicaCalcTestCase{
|
||||
currentReplicas: 2,
|
||||
expectedReplicas: 2,
|
||||
resource: &resourceInfo{
|
||||
name: v1.ResourceCPU,
|
||||
requests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0")},
|
||||
levels: makePodMetricLevels(600),
|
||||
|
||||
targetUtilization: 100,
|
||||
expectedUtilization: 60,
|
||||
expectedValue: numContainersPerPod * 600,
|
||||
},
|
||||
}
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func TestLegacyReplicaCalcMissingMetricsUnreadyNoChange(t *testing.T) {
|
||||
tc := legacyReplicaCalcTestCase{
|
||||
currentReplicas: 3,
|
||||
expectedReplicas: 3,
|
||||
podReadiness: []v1.ConditionStatus{v1.ConditionFalse, v1.ConditionTrue, v1.ConditionTrue},
|
||||
resource: &resourceInfo{
|
||||
name: v1.ResourceCPU,
|
||||
requests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
|
||||
levels: makePodMetricLevels(100, 450),
|
||||
|
||||
targetUtilization: 50,
|
||||
expectedUtilization: 45,
|
||||
expectedValue: numContainersPerPod * 450,
|
||||
},
|
||||
}
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func TestLegacyReplicaCalcMissingMetricsUnreadyScaleUp(t *testing.T) {
|
||||
tc := legacyReplicaCalcTestCase{
|
||||
currentReplicas: 3,
|
||||
expectedReplicas: 4,
|
||||
podReadiness: []v1.ConditionStatus{v1.ConditionFalse, v1.ConditionTrue, v1.ConditionTrue},
|
||||
resource: &resourceInfo{
|
||||
name: v1.ResourceCPU,
|
||||
requests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
|
||||
levels: makePodMetricLevels(100, 2000),
|
||||
|
||||
targetUtilization: 50,
|
||||
expectedUtilization: 200,
|
||||
expectedValue: numContainersPerPod * 2000,
|
||||
},
|
||||
}
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func TestLegacyReplicaCalcMissingMetricsUnreadyScaleDown(t *testing.T) {
|
||||
tc := legacyReplicaCalcTestCase{
|
||||
currentReplicas: 4,
|
||||
expectedReplicas: 3,
|
||||
podReadiness: []v1.ConditionStatus{v1.ConditionFalse, v1.ConditionTrue, v1.ConditionTrue, v1.ConditionTrue},
|
||||
resource: &resourceInfo{
|
||||
name: v1.ResourceCPU,
|
||||
requests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
|
||||
levels: makePodMetricLevels(100, 100, 100),
|
||||
|
||||
targetUtilization: 50,
|
||||
expectedUtilization: 10,
|
||||
expectedValue: numContainersPerPod * 100,
|
||||
},
|
||||
}
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
// TestComputedToleranceAlgImplementation is a regression test which
|
||||
// back-calculates a minimal percentage for downscaling based on a small percentage
|
||||
// increase in pod utilization which is calibrated against the tolerance value.
|
||||
func TestLegacyReplicaCalcComputedToleranceAlgImplementation(t *testing.T) {
|
||||
|
||||
startPods := int32(10)
|
||||
// 150 mCPU per pod.
|
||||
totalUsedCPUOfAllPods := int64(startPods * 150)
|
||||
// Each pod starts out asking for 2X what is really needed.
|
||||
// This means we will have a 50% ratio of used/requested
|
||||
totalRequestedCPUOfAllPods := int32(2 * totalUsedCPUOfAllPods)
|
||||
requestedToUsed := float64(totalRequestedCPUOfAllPods / int32(totalUsedCPUOfAllPods))
|
||||
// Spread the amount we ask over 10 pods. We can add some jitter later in reportedLevels.
|
||||
perPodRequested := totalRequestedCPUOfAllPods / startPods
|
||||
|
||||
// Force a minimal scaling event by satisfying (tolerance < 1 - resourcesUsedRatio).
|
||||
target := math.Abs(1/(requestedToUsed*(1-defaultTestingTolerance))) + .01
|
||||
finalCPUPercentTarget := int32(target * 100)
|
||||
resourcesUsedRatio := float64(totalUsedCPUOfAllPods) / float64(float64(totalRequestedCPUOfAllPods)*target)
|
||||
|
||||
// i.e. .60 * 20 -> scaled down expectation.
|
||||
finalPods := int32(math.Ceil(resourcesUsedRatio * float64(startPods)))
|
||||
|
||||
// To breach tolerance we will create a utilization ratio difference of tolerance to usageRatioToleranceValue)
|
||||
tc := legacyReplicaCalcTestCase{
|
||||
currentReplicas: startPods,
|
||||
expectedReplicas: finalPods,
|
||||
resource: &resourceInfo{
|
||||
name: v1.ResourceCPU,
|
||||
levels: makePodMetricLevels(
|
||||
totalUsedCPUOfAllPods/10,
|
||||
totalUsedCPUOfAllPods/10,
|
||||
totalUsedCPUOfAllPods/10,
|
||||
totalUsedCPUOfAllPods/10,
|
||||
totalUsedCPUOfAllPods/10,
|
||||
totalUsedCPUOfAllPods/10,
|
||||
totalUsedCPUOfAllPods/10,
|
||||
totalUsedCPUOfAllPods/10,
|
||||
totalUsedCPUOfAllPods/10,
|
||||
totalUsedCPUOfAllPods/10,
|
||||
),
|
||||
requests: []resource.Quantity{
|
||||
resource.MustParse(fmt.Sprint(perPodRequested+100) + "m"),
|
||||
resource.MustParse(fmt.Sprint(perPodRequested-100) + "m"),
|
||||
resource.MustParse(fmt.Sprint(perPodRequested+10) + "m"),
|
||||
resource.MustParse(fmt.Sprint(perPodRequested-10) + "m"),
|
||||
resource.MustParse(fmt.Sprint(perPodRequested+2) + "m"),
|
||||
resource.MustParse(fmt.Sprint(perPodRequested-2) + "m"),
|
||||
resource.MustParse(fmt.Sprint(perPodRequested+1) + "m"),
|
||||
resource.MustParse(fmt.Sprint(perPodRequested-1) + "m"),
|
||||
resource.MustParse(fmt.Sprint(perPodRequested) + "m"),
|
||||
resource.MustParse(fmt.Sprint(perPodRequested) + "m"),
|
||||
},
|
||||
|
||||
targetUtilization: finalCPUPercentTarget,
|
||||
expectedUtilization: int32(totalUsedCPUOfAllPods*100) / totalRequestedCPUOfAllPods,
|
||||
expectedValue: numContainersPerPod * totalUsedCPUOfAllPods / 10,
|
||||
},
|
||||
}
|
||||
|
||||
tc.runTest(t)
|
||||
|
||||
// Reuse the data structure above, now testing "unscaling".
|
||||
// Now, we test that no scaling happens if we are in a very close margin to the tolerance
|
||||
target = math.Abs(1/(requestedToUsed*(1-defaultTestingTolerance))) + .004
|
||||
finalCPUPercentTarget = int32(target * 100)
|
||||
tc.resource.targetUtilization = finalCPUPercentTarget
|
||||
tc.currentReplicas = startPods
|
||||
tc.expectedReplicas = startPods
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
// TODO: add more tests
|
@@ -42,6 +42,14 @@ import (
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
var fixedTimestamp = time.Date(2015, time.November, 10, 12, 30, 0, 0, time.UTC)
|
||||
|
||||
// timestamp is used for establishing order on metricPoints
|
||||
type metricPoint struct {
|
||||
level uint64
|
||||
timestamp int
|
||||
}
|
||||
|
||||
type restClientTestCase struct {
|
||||
desiredMetricValues PodMetricsInfo
|
||||
desiredError error
|
||||
@@ -424,3 +432,7 @@ func TestRESTClientContainerCPUEmptyMetricsForOnePod(t *testing.T) {
|
||||
}
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func offsetTimestampBy(t int) time.Time {
|
||||
return fixedTimestamp.Add(time.Duration(t) * time.Minute)
|
||||
}
|
@@ -1,229 +0,0 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package metrics
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
heapster "k8s.io/heapster/metrics/api/v1/types"
|
||||
"k8s.io/klog/v2"
|
||||
metricsapi "k8s.io/metrics/pkg/apis/metrics/v1alpha1"
|
||||
|
||||
autoscaling "k8s.io/api/autoscaling/v2beta2"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
v1core "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
)
|
||||
|
||||
const (
|
||||
DefaultHeapsterNamespace = "kube-system"
|
||||
DefaultHeapsterScheme = "http"
|
||||
DefaultHeapsterService = "heapster"
|
||||
DefaultHeapsterPort = "" // use the first exposed port on the service
|
||||
heapsterDefaultMetricWindow = time.Minute
|
||||
)
|
||||
|
||||
var heapsterQueryStart = -5 * time.Minute
|
||||
|
||||
type HeapsterMetricsClient struct {
|
||||
services v1core.ServiceInterface
|
||||
podsGetter v1core.PodsGetter
|
||||
heapsterScheme string
|
||||
heapsterService string
|
||||
heapsterPort string
|
||||
}
|
||||
|
||||
func NewHeapsterMetricsClient(client clientset.Interface, namespace, scheme, service, port string) MetricsClient {
|
||||
return &HeapsterMetricsClient{
|
||||
services: client.CoreV1().Services(namespace),
|
||||
podsGetter: client.CoreV1(),
|
||||
heapsterScheme: scheme,
|
||||
heapsterService: service,
|
||||
heapsterPort: port,
|
||||
}
|
||||
}
|
||||
|
||||
func (h *HeapsterMetricsClient) GetResourceMetric(resource v1.ResourceName, namespace string, selector labels.Selector, container string) (PodMetricsInfo, time.Time, error) {
|
||||
metricPath := fmt.Sprintf("/apis/metrics/v1alpha1/namespaces/%s/pods", namespace)
|
||||
params := map[string]string{"labelSelector": selector.String()}
|
||||
|
||||
resultRaw, err := h.services.
|
||||
ProxyGet(h.heapsterScheme, h.heapsterService, h.heapsterPort, metricPath, params).
|
||||
DoRaw(context.TODO())
|
||||
if err != nil {
|
||||
return nil, time.Time{}, fmt.Errorf("failed to get pod resource metrics: %v", err)
|
||||
}
|
||||
|
||||
klog.V(8).Infof("Heapster metrics result: %s", string(resultRaw))
|
||||
|
||||
metrics := metricsapi.PodMetricsList{}
|
||||
err = json.Unmarshal(resultRaw, &metrics)
|
||||
if err != nil {
|
||||
return nil, time.Time{}, fmt.Errorf("failed to unmarshal heapster response: %v", err)
|
||||
}
|
||||
|
||||
if len(metrics.Items) == 0 {
|
||||
return nil, time.Time{}, fmt.Errorf("no metrics returned from heapster")
|
||||
}
|
||||
|
||||
res := make(PodMetricsInfo, len(metrics.Items))
|
||||
|
||||
for _, m := range metrics.Items {
|
||||
podSum := int64(0)
|
||||
missing := len(m.Containers) == 0
|
||||
for _, c := range m.Containers {
|
||||
if container == "" || container == c.Name {
|
||||
resValue, found := c.Usage[v1.ResourceName(resource)]
|
||||
if !found {
|
||||
missing = true
|
||||
klog.V(2).Infof("missing resource metric %v for container %s in pod %s/%s", resource, c.Name, namespace, m.Name)
|
||||
continue
|
||||
}
|
||||
podSum += resValue.MilliValue()
|
||||
}
|
||||
}
|
||||
|
||||
if !missing {
|
||||
res[m.Name] = PodMetric{
|
||||
Timestamp: m.Timestamp.Time,
|
||||
Window: m.Window.Duration,
|
||||
Value: int64(podSum),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
timestamp := metrics.Items[0].Timestamp.Time
|
||||
|
||||
return res, timestamp, nil
|
||||
}
|
||||
|
||||
func (h *HeapsterMetricsClient) GetRawMetric(metricName string, namespace string, selector labels.Selector, metricSelector labels.Selector) (PodMetricsInfo, time.Time, error) {
|
||||
podList, err := h.podsGetter.Pods(namespace).List(context.TODO(), metav1.ListOptions{LabelSelector: selector.String()})
|
||||
if err != nil {
|
||||
return nil, time.Time{}, fmt.Errorf("failed to get pod list while fetching metrics: %v", err)
|
||||
}
|
||||
|
||||
if len(podList.Items) == 0 {
|
||||
return nil, time.Time{}, fmt.Errorf("no pods matched the provided selector")
|
||||
}
|
||||
|
||||
podNames := make([]string, len(podList.Items))
|
||||
for i, pod := range podList.Items {
|
||||
podNames[i] = pod.Name
|
||||
}
|
||||
|
||||
now := time.Now()
|
||||
|
||||
startTime := now.Add(heapsterQueryStart)
|
||||
metricPath := fmt.Sprintf("/api/v1/model/namespaces/%s/pod-list/%s/metrics/%s",
|
||||
namespace,
|
||||
strings.Join(podNames, ","),
|
||||
metricName)
|
||||
|
||||
resultRaw, err := h.services.
|
||||
ProxyGet(h.heapsterScheme, h.heapsterService, h.heapsterPort, metricPath, map[string]string{"start": startTime.Format(time.RFC3339)}).
|
||||
DoRaw(context.TODO())
|
||||
if err != nil {
|
||||
return nil, time.Time{}, fmt.Errorf("failed to get pod metrics: %v", err)
|
||||
}
|
||||
|
||||
var metrics heapster.MetricResultList
|
||||
err = json.Unmarshal(resultRaw, &metrics)
|
||||
if err != nil {
|
||||
return nil, time.Time{}, fmt.Errorf("failed to unmarshal heapster response: %v", err)
|
||||
}
|
||||
|
||||
klog.V(4).Infof("Heapster metrics result: %s", string(resultRaw))
|
||||
|
||||
if len(metrics.Items) != len(podNames) {
|
||||
// if we get too many metrics or two few metrics, we have no way of knowing which metric goes to which pod
|
||||
// (note that Heapster returns *empty* metric items when a pod does not exist or have that metric, so this
|
||||
// does not cover the "missing metric entry" case)
|
||||
return nil, time.Time{}, fmt.Errorf("requested metrics for %v pods, got metrics for %v", len(podNames), len(metrics.Items))
|
||||
}
|
||||
|
||||
var timestamp *time.Time
|
||||
res := make(PodMetricsInfo, len(metrics.Items))
|
||||
for i, podMetrics := range metrics.Items {
|
||||
val, podTimestamp, hadMetrics := collapseTimeSamples(podMetrics, time.Minute)
|
||||
if hadMetrics {
|
||||
res[podNames[i]] = PodMetric{
|
||||
Timestamp: podTimestamp,
|
||||
Window: heapsterDefaultMetricWindow,
|
||||
Value: int64(val),
|
||||
}
|
||||
|
||||
if timestamp == nil || podTimestamp.Before(*timestamp) {
|
||||
timestamp = &podTimestamp
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if timestamp == nil {
|
||||
timestamp = &time.Time{}
|
||||
}
|
||||
|
||||
return res, *timestamp, nil
|
||||
}
|
||||
|
||||
func (h *HeapsterMetricsClient) GetObjectMetric(metricName string, namespace string, objectRef *autoscaling.CrossVersionObjectReference, metricSelector labels.Selector) (int64, time.Time, error) {
|
||||
return 0, time.Time{}, fmt.Errorf("object metrics are not yet supported")
|
||||
}
|
||||
|
||||
func (h *HeapsterMetricsClient) GetExternalMetric(metricName, namespace string, selector labels.Selector) ([]int64, time.Time, error) {
|
||||
return nil, time.Time{}, fmt.Errorf("external metrics aren't supported")
|
||||
}
|
||||
|
||||
func collapseTimeSamples(metrics heapster.MetricResult, duration time.Duration) (int64, time.Time, bool) {
|
||||
floatSum := float64(0)
|
||||
intSum := int64(0)
|
||||
intSumCount := 0
|
||||
floatSumCount := 0
|
||||
|
||||
var newest *heapster.MetricPoint // creation time of the newest sample for this pod
|
||||
for i, metricPoint := range metrics.Metrics {
|
||||
if newest == nil || newest.Timestamp.Before(metricPoint.Timestamp) {
|
||||
newest = &metrics.Metrics[i]
|
||||
}
|
||||
}
|
||||
if newest != nil {
|
||||
for _, metricPoint := range metrics.Metrics {
|
||||
if metricPoint.Timestamp.Add(duration).After(newest.Timestamp) {
|
||||
intSum += int64(metricPoint.Value)
|
||||
intSumCount++
|
||||
if metricPoint.FloatValue != nil {
|
||||
floatSum += *metricPoint.FloatValue
|
||||
floatSumCount++
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if newest.FloatValue != nil {
|
||||
return int64(floatSum / float64(floatSumCount) * 1000), newest.Timestamp, true
|
||||
} else {
|
||||
return (intSum * 1000) / int64(intSumCount), newest.Timestamp, true
|
||||
}
|
||||
}
|
||||
|
||||
return 0, time.Time{}, false
|
||||
}
|
@@ -1,395 +0,0 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package metrics
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
core "k8s.io/client-go/testing"
|
||||
|
||||
heapster "k8s.io/heapster/metrics/api/v1/types"
|
||||
metricsapi "k8s.io/metrics/pkg/apis/metrics/v1alpha1"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
var fixedTimestamp = time.Date(2015, time.November, 10, 12, 30, 0, 0, time.UTC)
|
||||
|
||||
func (w fakeResponseWrapper) DoRaw(context.Context) ([]byte, error) {
|
||||
return w.raw, nil
|
||||
}
|
||||
|
||||
func (w fakeResponseWrapper) Stream(context.Context) (io.ReadCloser, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func newFakeResponseWrapper(raw []byte) fakeResponseWrapper {
|
||||
return fakeResponseWrapper{raw: raw}
|
||||
}
|
||||
|
||||
type fakeResponseWrapper struct {
|
||||
raw []byte
|
||||
}
|
||||
|
||||
// timestamp is used for establishing order on metricPoints
|
||||
type metricPoint struct {
|
||||
level uint64
|
||||
timestamp int
|
||||
}
|
||||
|
||||
type testCase struct {
|
||||
desiredMetricValues PodMetricsInfo
|
||||
desiredError error
|
||||
|
||||
replicas int
|
||||
targetTimestamp int
|
||||
window time.Duration
|
||||
reportedMetricsPoints [][]metricPoint
|
||||
reportedPodMetrics [][]int64
|
||||
|
||||
namespace string
|
||||
selector labels.Selector
|
||||
metricSelector labels.Selector
|
||||
resourceName v1.ResourceName
|
||||
metricName string
|
||||
}
|
||||
|
||||
func (tc *testCase) prepareTestClient(t *testing.T) *fake.Clientset {
|
||||
namespace := "test-namespace"
|
||||
tc.namespace = namespace
|
||||
podNamePrefix := "test-pod"
|
||||
podLabels := map[string]string{"name": podNamePrefix}
|
||||
tc.selector = labels.SelectorFromSet(podLabels)
|
||||
|
||||
// it's a resource test if we have a resource name
|
||||
isResource := len(tc.resourceName) > 0
|
||||
|
||||
fakeClient := &fake.Clientset{}
|
||||
|
||||
fakeClient.AddReactor("list", "pods", func(action core.Action) (handled bool, ret runtime.Object, err error) {
|
||||
obj := &v1.PodList{}
|
||||
for i := 0; i < tc.replicas; i++ {
|
||||
podName := fmt.Sprintf("%s-%d", podNamePrefix, i)
|
||||
pod := buildPod(namespace, podName, podLabels, v1.PodRunning, "1024")
|
||||
obj.Items = append(obj.Items, pod)
|
||||
}
|
||||
return true, obj, nil
|
||||
})
|
||||
|
||||
if isResource {
|
||||
fakeClient.AddProxyReactor("services", func(action core.Action) (handled bool, ret restclient.ResponseWrapper, err error) {
|
||||
metrics := metricsapi.PodMetricsList{}
|
||||
for i, containers := range tc.reportedPodMetrics {
|
||||
metric := metricsapi.PodMetrics{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: fmt.Sprintf("%s-%d", podNamePrefix, i),
|
||||
Namespace: namespace,
|
||||
},
|
||||
Timestamp: metav1.Time{Time: offsetTimestampBy(tc.targetTimestamp)},
|
||||
Window: metav1.Duration{Duration: tc.window},
|
||||
Containers: []metricsapi.ContainerMetrics{},
|
||||
}
|
||||
for j, cpu := range containers {
|
||||
cm := metricsapi.ContainerMetrics{
|
||||
Name: fmt.Sprintf("%s-%d-container-%d", podNamePrefix, i, j),
|
||||
Usage: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(
|
||||
cpu,
|
||||
resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(
|
||||
int64(1024*1024),
|
||||
resource.BinarySI),
|
||||
},
|
||||
}
|
||||
metric.Containers = append(metric.Containers, cm)
|
||||
}
|
||||
metrics.Items = append(metrics.Items, metric)
|
||||
}
|
||||
heapsterRawMemResponse, _ := json.Marshal(&metrics)
|
||||
return true, newFakeResponseWrapper(heapsterRawMemResponse), nil
|
||||
})
|
||||
} else {
|
||||
fakeClient.AddProxyReactor("services", func(action core.Action) (handled bool, ret restclient.ResponseWrapper, err error) {
|
||||
metrics := heapster.MetricResultList{}
|
||||
var latestTimestamp time.Time
|
||||
for _, reportedMetricPoints := range tc.reportedMetricsPoints {
|
||||
var heapsterMetricPoints []heapster.MetricPoint
|
||||
for _, reportedMetricPoint := range reportedMetricPoints {
|
||||
timestamp := offsetTimestampBy(reportedMetricPoint.timestamp)
|
||||
if latestTimestamp.Before(timestamp) {
|
||||
latestTimestamp = timestamp
|
||||
}
|
||||
heapsterMetricPoint := heapster.MetricPoint{Timestamp: timestamp, Value: reportedMetricPoint.level, FloatValue: nil}
|
||||
heapsterMetricPoints = append(heapsterMetricPoints, heapsterMetricPoint)
|
||||
}
|
||||
metric := heapster.MetricResult{
|
||||
Metrics: heapsterMetricPoints,
|
||||
LatestTimestamp: latestTimestamp,
|
||||
}
|
||||
metrics.Items = append(metrics.Items, metric)
|
||||
}
|
||||
heapsterRawMemResponse, _ := json.Marshal(&metrics)
|
||||
return true, newFakeResponseWrapper(heapsterRawMemResponse), nil
|
||||
})
|
||||
}
|
||||
|
||||
return fakeClient
|
||||
}
|
||||
|
||||
func buildPod(namespace, podName string, podLabels map[string]string, phase v1.PodPhase, request string) v1.Pod {
|
||||
return v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: podName,
|
||||
Namespace: namespace,
|
||||
Labels: podLabels,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Resources: v1.ResourceRequirements{
|
||||
Requests: v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse(request),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Status: v1.PodStatus{
|
||||
Phase: phase,
|
||||
Conditions: []v1.PodCondition{
|
||||
{
|
||||
Type: v1.PodReady,
|
||||
Status: v1.ConditionTrue,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (tc *testCase) verifyResults(t *testing.T, metrics PodMetricsInfo, timestamp time.Time, err error) {
|
||||
if tc.desiredError != nil {
|
||||
assert.Error(t, err, "there should be an error retrieving the metrics")
|
||||
assert.Contains(t, fmt.Sprintf("%v", err), fmt.Sprintf("%v", tc.desiredError), "the error message should be eas expected")
|
||||
return
|
||||
}
|
||||
assert.NoError(t, err, "there should be no error retrieving the metrics")
|
||||
assert.NotNil(t, metrics, "there should be metrics returned")
|
||||
if len(metrics) != len(tc.desiredMetricValues) {
|
||||
t.Errorf("Not equal:\nexpected: %v\nactual: %v", tc.desiredMetricValues, metrics)
|
||||
} else {
|
||||
for k, m := range metrics {
|
||||
if !m.Timestamp.Equal(tc.desiredMetricValues[k].Timestamp) ||
|
||||
m.Window != tc.desiredMetricValues[k].Window ||
|
||||
m.Value != tc.desiredMetricValues[k].Value {
|
||||
t.Errorf("Not equal:\nexpected: %v\nactual: %v", tc.desiredMetricValues, metrics)
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
targetTimestamp := offsetTimestampBy(tc.targetTimestamp)
|
||||
assert.True(t, targetTimestamp.Equal(timestamp), fmt.Sprintf("the timestamp should be as expected (%s) but was %s", targetTimestamp, timestamp))
|
||||
}
|
||||
|
||||
func (tc *testCase) runTest(t *testing.T) {
|
||||
testClient := tc.prepareTestClient(t)
|
||||
metricsClient := NewHeapsterMetricsClient(testClient, DefaultHeapsterNamespace, DefaultHeapsterScheme, DefaultHeapsterService, DefaultHeapsterPort)
|
||||
isResource := len(tc.resourceName) > 0
|
||||
if isResource {
|
||||
info, timestamp, err := metricsClient.GetResourceMetric(tc.resourceName, tc.namespace, tc.selector, "")
|
||||
tc.verifyResults(t, info, timestamp, err)
|
||||
} else {
|
||||
info, timestamp, err := metricsClient.GetRawMetric(tc.metricName, tc.namespace, tc.selector, tc.metricSelector)
|
||||
tc.verifyResults(t, info, timestamp, err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCPU(t *testing.T) {
|
||||
targetTimestamp := 1
|
||||
window := 30 * time.Second
|
||||
tc := testCase{
|
||||
replicas: 3,
|
||||
desiredMetricValues: PodMetricsInfo{
|
||||
"test-pod-0": PodMetric{Value: 5000, Timestamp: offsetTimestampBy(targetTimestamp), Window: window},
|
||||
"test-pod-1": PodMetric{Value: 5000, Timestamp: offsetTimestampBy(targetTimestamp), Window: window},
|
||||
"test-pod-2": PodMetric{Value: 5000, Timestamp: offsetTimestampBy(targetTimestamp), Window: window},
|
||||
},
|
||||
resourceName: v1.ResourceCPU,
|
||||
targetTimestamp: targetTimestamp,
|
||||
window: window,
|
||||
reportedPodMetrics: [][]int64{{5000}, {5000}, {5000}},
|
||||
}
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func TestQPS(t *testing.T) {
|
||||
targetTimestamp := 1
|
||||
tc := testCase{
|
||||
replicas: 3,
|
||||
desiredMetricValues: PodMetricsInfo{
|
||||
"test-pod-0": PodMetric{Value: 10000, Timestamp: offsetTimestampBy(targetTimestamp), Window: heapsterDefaultMetricWindow},
|
||||
"test-pod-1": PodMetric{Value: 20000, Timestamp: offsetTimestampBy(targetTimestamp), Window: heapsterDefaultMetricWindow},
|
||||
"test-pod-2": PodMetric{Value: 10000, Timestamp: offsetTimestampBy(targetTimestamp), Window: heapsterDefaultMetricWindow},
|
||||
},
|
||||
metricName: "qps",
|
||||
targetTimestamp: targetTimestamp,
|
||||
reportedMetricsPoints: [][]metricPoint{{{10, 1}}, {{20, 1}}, {{10, 1}}},
|
||||
}
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func TestQpsSumEqualZero(t *testing.T) {
|
||||
targetTimestamp := 0
|
||||
tc := testCase{
|
||||
replicas: 3,
|
||||
desiredMetricValues: PodMetricsInfo{
|
||||
"test-pod-0": PodMetric{Value: 0, Timestamp: offsetTimestampBy(targetTimestamp), Window: heapsterDefaultMetricWindow},
|
||||
"test-pod-1": PodMetric{Value: 0, Timestamp: offsetTimestampBy(targetTimestamp), Window: heapsterDefaultMetricWindow},
|
||||
"test-pod-2": PodMetric{Value: 0, Timestamp: offsetTimestampBy(targetTimestamp), Window: heapsterDefaultMetricWindow},
|
||||
},
|
||||
metricName: "qps",
|
||||
targetTimestamp: targetTimestamp,
|
||||
reportedMetricsPoints: [][]metricPoint{{{0, 0}}, {{0, 0}}, {{0, 0}}},
|
||||
}
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func TestCPUMoreMetrics(t *testing.T) {
|
||||
targetTimestamp := 10
|
||||
window := 30 * time.Second
|
||||
tc := testCase{
|
||||
replicas: 5,
|
||||
desiredMetricValues: PodMetricsInfo{
|
||||
"test-pod-0": PodMetric{Value: 5000, Timestamp: offsetTimestampBy(targetTimestamp), Window: window},
|
||||
"test-pod-1": PodMetric{Value: 5000, Timestamp: offsetTimestampBy(targetTimestamp), Window: window},
|
||||
"test-pod-2": PodMetric{Value: 5000, Timestamp: offsetTimestampBy(targetTimestamp), Window: window},
|
||||
"test-pod-3": PodMetric{Value: 5000, Timestamp: offsetTimestampBy(targetTimestamp), Window: window},
|
||||
"test-pod-4": PodMetric{Value: 5000, Timestamp: offsetTimestampBy(targetTimestamp), Window: window},
|
||||
},
|
||||
resourceName: v1.ResourceCPU,
|
||||
targetTimestamp: targetTimestamp,
|
||||
window: window,
|
||||
reportedPodMetrics: [][]int64{{1000, 2000, 2000}, {5000}, {1000, 1000, 1000, 2000}, {4000, 1000}, {5000}},
|
||||
}
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func TestCPUMissingMetrics(t *testing.T) {
|
||||
targetTimestamp := 0
|
||||
window := 30 * time.Second
|
||||
tc := testCase{
|
||||
replicas: 3,
|
||||
desiredMetricValues: PodMetricsInfo{
|
||||
"test-pod-0": PodMetric{Value: 4000, Timestamp: offsetTimestampBy(targetTimestamp), Window: window},
|
||||
},
|
||||
resourceName: v1.ResourceCPU,
|
||||
targetTimestamp: targetTimestamp,
|
||||
window: window,
|
||||
reportedPodMetrics: [][]int64{{4000}},
|
||||
}
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func TestQpsMissingMetrics(t *testing.T) {
|
||||
tc := testCase{
|
||||
replicas: 3,
|
||||
desiredError: fmt.Errorf("requested metrics for 3 pods, got metrics for 1"),
|
||||
metricName: "qps",
|
||||
targetTimestamp: 1,
|
||||
reportedMetricsPoints: [][]metricPoint{{{4000, 4}}},
|
||||
}
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func TestQpsSuperfluousMetrics(t *testing.T) {
|
||||
tc := testCase{
|
||||
replicas: 3,
|
||||
desiredError: fmt.Errorf("requested metrics for 3 pods, got metrics for 6"),
|
||||
metricName: "qps",
|
||||
reportedMetricsPoints: [][]metricPoint{{{1000, 1}}, {{2000, 4}}, {{2000, 1}}, {{4000, 5}}, {{2000, 1}}, {{4000, 4}}},
|
||||
}
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func TestCPUEmptyMetrics(t *testing.T) {
|
||||
tc := testCase{
|
||||
replicas: 3,
|
||||
resourceName: v1.ResourceCPU,
|
||||
desiredError: fmt.Errorf("no metrics returned from heapster"),
|
||||
reportedMetricsPoints: [][]metricPoint{},
|
||||
reportedPodMetrics: [][]int64{},
|
||||
}
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func TestQpsEmptyEntries(t *testing.T) {
|
||||
targetTimestamp := 4
|
||||
tc := testCase{
|
||||
replicas: 3,
|
||||
metricName: "qps",
|
||||
desiredMetricValues: PodMetricsInfo{
|
||||
"test-pod-0": PodMetric{Value: 4000000, Timestamp: offsetTimestampBy(targetTimestamp), Window: heapsterDefaultMetricWindow},
|
||||
"test-pod-2": PodMetric{Value: 2000000, Timestamp: offsetTimestampBy(targetTimestamp), Window: heapsterDefaultMetricWindow},
|
||||
},
|
||||
targetTimestamp: targetTimestamp,
|
||||
reportedMetricsPoints: [][]metricPoint{{{4000, 4}}, {}, {{2000, 4}}},
|
||||
}
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func TestCPUZeroReplicas(t *testing.T) {
|
||||
tc := testCase{
|
||||
replicas: 0,
|
||||
resourceName: v1.ResourceCPU,
|
||||
desiredError: fmt.Errorf("no metrics returned from heapster"),
|
||||
reportedPodMetrics: [][]int64{},
|
||||
}
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func TestCPUEmptyMetricsForOnePod(t *testing.T) {
|
||||
targetTimestamp := 0
|
||||
window := 30 * time.Second
|
||||
tc := testCase{
|
||||
replicas: 3,
|
||||
resourceName: v1.ResourceCPU,
|
||||
desiredMetricValues: PodMetricsInfo{
|
||||
"test-pod-0": PodMetric{Value: 100, Timestamp: offsetTimestampBy(targetTimestamp), Window: window},
|
||||
"test-pod-1": PodMetric{Value: 700, Timestamp: offsetTimestampBy(targetTimestamp), Window: window},
|
||||
},
|
||||
targetTimestamp: targetTimestamp,
|
||||
window: window,
|
||||
reportedPodMetrics: [][]int64{{100}, {300, 400}, {}},
|
||||
}
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func offsetTimestampBy(t int) time.Time {
|
||||
return fixedTimestamp.Add(time.Duration(t) * time.Minute)
|
||||
}
|
Reference in New Issue
Block a user