mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-21 19:01:49 +00:00
HPA Controller: Use Custom Metrics API
This commit switches over the HPA controller to use the custom metrics API. It also converts the HPA controller to use the generated client in k8s.io/metrics for the resource metrics API. In order to enable support, you must enable `--horizontal-pod-autoscaler-use-rest-clients` on the controller-manager, which will switch the HPA controller's MetricsClient implementation over to use the standard rest clients for both custom metrics and resource metrics. This requires that at the least resource metrics API is registered with kube-aggregator, and that the controller manager is pointed at kube-aggregator. For this to work, Heapster must be serving the new-style API server (`--api-server=true`).
This commit is contained in:
parent
2249550b57
commit
d6fe1e8764
@ -107,6 +107,10 @@ go_library(
|
|||||||
"//vendor:k8s.io/client-go/tools/clientcmd",
|
"//vendor:k8s.io/client-go/tools/clientcmd",
|
||||||
"//vendor:k8s.io/client-go/tools/record",
|
"//vendor:k8s.io/client-go/tools/record",
|
||||||
"//vendor:k8s.io/client-go/util/cert",
|
"//vendor:k8s.io/client-go/util/cert",
|
||||||
|
"//vendor:k8s.io/metrics/pkg/apis/custom_metrics/install",
|
||||||
|
"//vendor:k8s.io/metrics/pkg/apis/metrics/install",
|
||||||
|
"//vendor:k8s.io/metrics/pkg/client/clientset_generated/clientset/typed/metrics/v1alpha1",
|
||||||
|
"//vendor:k8s.io/metrics/pkg/client/custom_metrics",
|
||||||
],
|
],
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -24,12 +24,37 @@ import (
|
|||||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||||
"k8s.io/kubernetes/pkg/controller/podautoscaler"
|
"k8s.io/kubernetes/pkg/controller/podautoscaler"
|
||||||
"k8s.io/kubernetes/pkg/controller/podautoscaler/metrics"
|
"k8s.io/kubernetes/pkg/controller/podautoscaler/metrics"
|
||||||
|
resourceclient "k8s.io/metrics/pkg/client/clientset_generated/clientset/typed/metrics/v1alpha1"
|
||||||
|
"k8s.io/metrics/pkg/client/custom_metrics"
|
||||||
|
|
||||||
|
// install the APIs so that they're registered with the scheme for the clients
|
||||||
|
_ "k8s.io/metrics/pkg/apis/custom_metrics/install"
|
||||||
|
_ "k8s.io/metrics/pkg/apis/metrics/install"
|
||||||
)
|
)
|
||||||
|
|
||||||
func startHPAController(ctx ControllerContext) (bool, error) {
|
func startHPAController(ctx ControllerContext) (bool, error) {
|
||||||
if !ctx.AvailableResources[schema.GroupVersionResource{Group: "autoscaling", Version: "v1", Resource: "horizontalpodautoscalers"}] {
|
if !ctx.AvailableResources[schema.GroupVersionResource{Group: "autoscaling", Version: "v1", Resource: "horizontalpodautoscalers"}] {
|
||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if ctx.Options.HorizontalPodAutoscalerUseRESTClients {
|
||||||
|
// use the new-style clients if support for custom metrics is enabled
|
||||||
|
return startHPAControllerWithRESTClient(ctx)
|
||||||
|
}
|
||||||
|
|
||||||
|
return startHPAControllerWithLegacyClient(ctx)
|
||||||
|
}
|
||||||
|
|
||||||
|
func startHPAControllerWithRESTClient(ctx ControllerContext) (bool, error) {
|
||||||
|
clientConfig := ctx.ClientBuilder.ConfigOrDie("horizontal-pod-autoscaler")
|
||||||
|
metricsClient := metrics.NewRESTMetricsClient(
|
||||||
|
resourceclient.NewForConfigOrDie(clientConfig),
|
||||||
|
custom_metrics.NewForConfigOrDie(clientConfig),
|
||||||
|
)
|
||||||
|
return startHPAControllerWithMetricsClient(ctx, metricsClient)
|
||||||
|
}
|
||||||
|
|
||||||
|
func startHPAControllerWithLegacyClient(ctx ControllerContext) (bool, error) {
|
||||||
hpaClient := ctx.ClientBuilder.ClientOrDie("horizontal-pod-autoscaler")
|
hpaClient := ctx.ClientBuilder.ClientOrDie("horizontal-pod-autoscaler")
|
||||||
metricsClient := metrics.NewHeapsterMetricsClient(
|
metricsClient := metrics.NewHeapsterMetricsClient(
|
||||||
hpaClient,
|
hpaClient,
|
||||||
@ -38,6 +63,11 @@ func startHPAController(ctx ControllerContext) (bool, error) {
|
|||||||
metrics.DefaultHeapsterService,
|
metrics.DefaultHeapsterService,
|
||||||
metrics.DefaultHeapsterPort,
|
metrics.DefaultHeapsterPort,
|
||||||
)
|
)
|
||||||
|
return startHPAControllerWithMetricsClient(ctx, metricsClient)
|
||||||
|
}
|
||||||
|
|
||||||
|
func startHPAControllerWithMetricsClient(ctx ControllerContext, metricsClient metrics.MetricsClient) (bool, error) {
|
||||||
|
hpaClient := ctx.ClientBuilder.ClientOrDie("horizontal-pod-autoscaler")
|
||||||
replicaCalc := podautoscaler.NewReplicaCalculator(metricsClient, hpaClient.Core())
|
replicaCalc := podautoscaler.NewReplicaCalculator(metricsClient, hpaClient.Core())
|
||||||
go podautoscaler.NewHorizontalController(
|
go podautoscaler.NewHorizontalController(
|
||||||
ctx.ClientBuilder.ClientGoClientOrDie("horizontal-pod-autoscaler").Core(),
|
ctx.ClientBuilder.ClientGoClientOrDie("horizontal-pod-autoscaler").Core(),
|
||||||
|
@ -94,18 +94,19 @@ func NewCMServer() *CMServer {
|
|||||||
},
|
},
|
||||||
FlexVolumePluginDir: "/usr/libexec/kubernetes/kubelet-plugins/volume/exec/",
|
FlexVolumePluginDir: "/usr/libexec/kubernetes/kubelet-plugins/volume/exec/",
|
||||||
},
|
},
|
||||||
ContentType: "application/vnd.kubernetes.protobuf",
|
ContentType: "application/vnd.kubernetes.protobuf",
|
||||||
KubeAPIQPS: 20.0,
|
KubeAPIQPS: 20.0,
|
||||||
KubeAPIBurst: 30,
|
KubeAPIBurst: 30,
|
||||||
LeaderElection: leaderelection.DefaultLeaderElectionConfiguration(),
|
LeaderElection: leaderelection.DefaultLeaderElectionConfiguration(),
|
||||||
ControllerStartInterval: metav1.Duration{Duration: 0 * time.Second},
|
ControllerStartInterval: metav1.Duration{Duration: 0 * time.Second},
|
||||||
EnableGarbageCollector: true,
|
EnableGarbageCollector: true,
|
||||||
ConcurrentGCSyncs: 20,
|
ConcurrentGCSyncs: 20,
|
||||||
ClusterSigningCertFile: "/etc/kubernetes/ca/ca.pem",
|
ClusterSigningCertFile: "/etc/kubernetes/ca/ca.pem",
|
||||||
ClusterSigningKeyFile: "/etc/kubernetes/ca/ca.key",
|
ClusterSigningKeyFile: "/etc/kubernetes/ca/ca.key",
|
||||||
ReconcilerSyncLoopPeriod: metav1.Duration{Duration: 60 * time.Second},
|
ReconcilerSyncLoopPeriod: metav1.Duration{Duration: 60 * time.Second},
|
||||||
EnableTaintManager: true,
|
EnableTaintManager: true,
|
||||||
UseTaintBasedEvictions: false,
|
UseTaintBasedEvictions: false,
|
||||||
|
HorizontalPodAutoscalerUseRESTClients: false,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
s.LeaderElection.LeaderElect = true
|
s.LeaderElection.LeaderElect = true
|
||||||
@ -200,6 +201,7 @@ func (s *CMServer) AddFlags(fs *pflag.FlagSet, allControllers []string, disabled
|
|||||||
fs.DurationVar(&s.ReconcilerSyncLoopPeriod.Duration, "attach-detach-reconcile-sync-period", s.ReconcilerSyncLoopPeriod.Duration, "The reconciler sync wait time between volume attach detach. This duration must be larger than one second, and increasing this value from the default may allow for volumes to be mismatched with pods.")
|
fs.DurationVar(&s.ReconcilerSyncLoopPeriod.Duration, "attach-detach-reconcile-sync-period", s.ReconcilerSyncLoopPeriod.Duration, "The reconciler sync wait time between volume attach detach. This duration must be larger than one second, and increasing this value from the default may allow for volumes to be mismatched with pods.")
|
||||||
fs.BoolVar(&s.EnableTaintManager, "enable-taint-manager", s.EnableTaintManager, "WARNING: Beta feature. If set to true enables NoExecute Taints and will evict all not-tolerating Pod running on Nodes tainted with this kind of Taints.")
|
fs.BoolVar(&s.EnableTaintManager, "enable-taint-manager", s.EnableTaintManager, "WARNING: Beta feature. If set to true enables NoExecute Taints and will evict all not-tolerating Pod running on Nodes tainted with this kind of Taints.")
|
||||||
fs.BoolVar(&s.UseTaintBasedEvictions, "use-taint-based-evictions", s.UseTaintBasedEvictions, "WARNING: Alpha feature. If set to true NodeController will use taints to evict Pods from notReady and unreachable Nodes.")
|
fs.BoolVar(&s.UseTaintBasedEvictions, "use-taint-based-evictions", s.UseTaintBasedEvictions, "WARNING: Alpha feature. If set to true NodeController will use taints to evict Pods from notReady and unreachable Nodes.")
|
||||||
|
fs.BoolVar(&s.HorizontalPodAutoscalerUseRESTClients, "horizontal-pod-autoscaler-use-rest-clients", s.HorizontalPodAutoscalerUseRESTClients, "WARNING: alpha feature. If set to true, causes the horizontal pod autoscaler controller to use REST clients through the kube-aggregator, instead of using the legacy metrics client through the API server proxy. This is required for custom metrics support in the horizonal pod autoscaler.")
|
||||||
|
|
||||||
leaderelection.BindFlags(&s.LeaderElection, fs)
|
leaderelection.BindFlags(&s.LeaderElection, fs)
|
||||||
|
|
||||||
|
@ -691,3 +691,4 @@ www-prefix
|
|||||||
zone-id
|
zone-id
|
||||||
zone-name
|
zone-name
|
||||||
|
|
||||||
|
horizontal-pod-autoscaler-use-rest-clients
|
||||||
|
@ -810,6 +810,10 @@ type KubeControllerManagerConfiguration struct {
|
|||||||
EnableTaintManager bool
|
EnableTaintManager bool
|
||||||
// If set to true NodeController will use taints to evict Pods from notReady and unreachable Nodes.
|
// If set to true NodeController will use taints to evict Pods from notReady and unreachable Nodes.
|
||||||
UseTaintBasedEvictions bool
|
UseTaintBasedEvictions bool
|
||||||
|
// HorizontalPodAutoscalerUseRESTClients causes the HPA controller to use REST clients
|
||||||
|
// through the kube-aggregator when enabled, instead of using the legacy metrics client
|
||||||
|
// through the API server proxy.
|
||||||
|
HorizontalPodAutoscalerUseRESTClients bool
|
||||||
}
|
}
|
||||||
|
|
||||||
// VolumeConfiguration contains *all* enumerated flags meant to configure all volume
|
// VolumeConfiguration contains *all* enumerated flags meant to configure all volume
|
||||||
|
@ -47,6 +47,8 @@ go_test(
|
|||||||
name = "go_default_test",
|
name = "go_default_test",
|
||||||
srcs = [
|
srcs = [
|
||||||
"horizontal_test.go",
|
"horizontal_test.go",
|
||||||
|
"legacy_horizontal_test.go",
|
||||||
|
"legacy_replica_calculator_test.go",
|
||||||
"replica_calculator_test.go",
|
"replica_calculator_test.go",
|
||||||
],
|
],
|
||||||
library = ":go_default_library",
|
library = ":go_default_library",
|
||||||
@ -68,13 +70,19 @@ go_test(
|
|||||||
"//vendor:k8s.io/apimachinery/pkg/api/resource",
|
"//vendor:k8s.io/apimachinery/pkg/api/resource",
|
||||||
"//vendor:k8s.io/apimachinery/pkg/apis/meta/v1",
|
"//vendor:k8s.io/apimachinery/pkg/apis/meta/v1",
|
||||||
"//vendor:k8s.io/apimachinery/pkg/runtime",
|
"//vendor:k8s.io/apimachinery/pkg/runtime",
|
||||||
|
"//vendor:k8s.io/apimachinery/pkg/runtime/schema",
|
||||||
"//vendor:k8s.io/apimachinery/pkg/watch",
|
"//vendor:k8s.io/apimachinery/pkg/watch",
|
||||||
"//vendor:k8s.io/client-go/kubernetes/fake",
|
"//vendor:k8s.io/client-go/kubernetes/fake",
|
||||||
|
"//vendor:k8s.io/client-go/pkg/api",
|
||||||
"//vendor:k8s.io/client-go/pkg/api/v1",
|
"//vendor:k8s.io/client-go/pkg/api/v1",
|
||||||
"//vendor:k8s.io/client-go/rest",
|
"//vendor:k8s.io/client-go/rest",
|
||||||
"//vendor:k8s.io/client-go/testing",
|
"//vendor:k8s.io/client-go/testing",
|
||||||
"//vendor:k8s.io/heapster/metrics/api/v1/types",
|
"//vendor:k8s.io/heapster/metrics/api/v1/types",
|
||||||
"//vendor:k8s.io/heapster/metrics/apis/metrics/v1alpha1",
|
"//vendor:k8s.io/heapster/metrics/apis/metrics/v1alpha1",
|
||||||
|
"//vendor:k8s.io/metrics/pkg/apis/custom_metrics/v1alpha1",
|
||||||
|
"//vendor:k8s.io/metrics/pkg/apis/metrics/v1alpha1",
|
||||||
|
"//vendor:k8s.io/metrics/pkg/client/clientset_generated/clientset/fake",
|
||||||
|
"//vendor:k8s.io/metrics/pkg/client/custom_metrics/fake",
|
||||||
],
|
],
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -17,12 +17,8 @@ limitations under the License.
|
|||||||
package podautoscaler
|
package podautoscaler
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/json"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
|
||||||
"math"
|
"math"
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
"sync"
|
"sync"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
@ -30,12 +26,12 @@ import (
|
|||||||
"k8s.io/apimachinery/pkg/api/resource"
|
"k8s.io/apimachinery/pkg/api/resource"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/runtime"
|
"k8s.io/apimachinery/pkg/runtime"
|
||||||
|
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||||
"k8s.io/apimachinery/pkg/watch"
|
"k8s.io/apimachinery/pkg/watch"
|
||||||
clientfake "k8s.io/client-go/kubernetes/fake"
|
clientfake "k8s.io/client-go/kubernetes/fake"
|
||||||
|
"k8s.io/client-go/pkg/api"
|
||||||
clientv1 "k8s.io/client-go/pkg/api/v1"
|
clientv1 "k8s.io/client-go/pkg/api/v1"
|
||||||
restclient "k8s.io/client-go/rest"
|
|
||||||
core "k8s.io/client-go/testing"
|
core "k8s.io/client-go/testing"
|
||||||
"k8s.io/kubernetes/pkg/api/unversioned"
|
|
||||||
"k8s.io/kubernetes/pkg/api/v1"
|
"k8s.io/kubernetes/pkg/api/v1"
|
||||||
autoscalingv1 "k8s.io/kubernetes/pkg/apis/autoscaling/v1"
|
autoscalingv1 "k8s.io/kubernetes/pkg/apis/autoscaling/v1"
|
||||||
autoscalingv2 "k8s.io/kubernetes/pkg/apis/autoscaling/v2alpha1"
|
autoscalingv2 "k8s.io/kubernetes/pkg/apis/autoscaling/v2alpha1"
|
||||||
@ -44,9 +40,11 @@ import (
|
|||||||
informers "k8s.io/kubernetes/pkg/client/informers/informers_generated/externalversions"
|
informers "k8s.io/kubernetes/pkg/client/informers/informers_generated/externalversions"
|
||||||
"k8s.io/kubernetes/pkg/controller"
|
"k8s.io/kubernetes/pkg/controller"
|
||||||
"k8s.io/kubernetes/pkg/controller/podautoscaler/metrics"
|
"k8s.io/kubernetes/pkg/controller/podautoscaler/metrics"
|
||||||
|
metricsfake "k8s.io/metrics/pkg/client/clientset_generated/clientset/fake"
|
||||||
|
cmfake "k8s.io/metrics/pkg/client/custom_metrics/fake"
|
||||||
|
|
||||||
heapster "k8s.io/heapster/metrics/api/v1/types"
|
cmapi "k8s.io/metrics/pkg/apis/custom_metrics/v1alpha1"
|
||||||
metricsapi "k8s.io/heapster/metrics/apis/metrics/v1alpha1"
|
metricsapi "k8s.io/metrics/pkg/apis/metrics/v1alpha1"
|
||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
|
|
||||||
@ -56,22 +54,6 @@ import (
|
|||||||
|
|
||||||
func alwaysReady() bool { return true }
|
func alwaysReady() bool { return true }
|
||||||
|
|
||||||
func (w fakeResponseWrapper) DoRaw() ([]byte, error) {
|
|
||||||
return w.raw, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w fakeResponseWrapper) Stream() (io.ReadCloser, error) {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func newFakeResponseWrapper(raw []byte) fakeResponseWrapper {
|
|
||||||
return fakeResponseWrapper{raw: raw}
|
|
||||||
}
|
|
||||||
|
|
||||||
type fakeResponseWrapper struct {
|
|
||||||
raw []byte
|
|
||||||
}
|
|
||||||
|
|
||||||
type fakeResource struct {
|
type fakeResource struct {
|
||||||
name string
|
name string
|
||||||
apiVersion string
|
apiVersion string
|
||||||
@ -124,7 +106,7 @@ func (tc *testCase) computeCPUCurrent() {
|
|||||||
tc.CPUCurrent = int32(100 * reported / requested)
|
tc.CPUCurrent = int32(100 * reported / requested)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (tc *testCase) prepareTestClient(t *testing.T) *fake.Clientset {
|
func (tc *testCase) prepareTestClient(t *testing.T) (*fake.Clientset, *metricsfake.Clientset, *cmfake.FakeCustomMetricsClient) {
|
||||||
namespace := "test-namespace"
|
namespace := "test-namespace"
|
||||||
hpaName := "test-hpa"
|
hpaName := "test-hpa"
|
||||||
podNamePrefix := "test-pod"
|
podNamePrefix := "test-pod"
|
||||||
@ -323,79 +305,6 @@ func (tc *testCase) prepareTestClient(t *testing.T) *fake.Clientset {
|
|||||||
return true, obj, nil
|
return true, obj, nil
|
||||||
})
|
})
|
||||||
|
|
||||||
fakeClient.AddProxyReactor("services", func(action core.Action) (handled bool, ret restclient.ResponseWrapper, err error) {
|
|
||||||
tc.Lock()
|
|
||||||
defer tc.Unlock()
|
|
||||||
|
|
||||||
var heapsterRawMemResponse []byte
|
|
||||||
|
|
||||||
if tc.useMetricsApi {
|
|
||||||
metrics := metricsapi.PodMetricsList{}
|
|
||||||
for i, cpu := range tc.reportedLevels {
|
|
||||||
podMetric := metricsapi.PodMetrics{
|
|
||||||
ObjectMeta: v1.ObjectMeta{
|
|
||||||
Name: fmt.Sprintf("%s-%d", podNamePrefix, i),
|
|
||||||
Namespace: namespace,
|
|
||||||
},
|
|
||||||
Timestamp: unversioned.Time{Time: time.Now()},
|
|
||||||
Containers: []metricsapi.ContainerMetrics{
|
|
||||||
{
|
|
||||||
Name: "container",
|
|
||||||
Usage: v1.ResourceList{
|
|
||||||
v1.ResourceCPU: *resource.NewMilliQuantity(
|
|
||||||
int64(cpu),
|
|
||||||
resource.DecimalSI),
|
|
||||||
v1.ResourceMemory: *resource.NewQuantity(
|
|
||||||
int64(1024*1024),
|
|
||||||
resource.BinarySI),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
metrics.Items = append(metrics.Items, podMetric)
|
|
||||||
}
|
|
||||||
heapsterRawMemResponse, _ = json.Marshal(&metrics)
|
|
||||||
} else {
|
|
||||||
// only return the pods that we actually asked for
|
|
||||||
proxyAction := action.(core.ProxyGetAction)
|
|
||||||
pathParts := strings.Split(proxyAction.GetPath(), "/")
|
|
||||||
// pathParts should look like [ api, v1, model, namespaces, $NS, pod-list, $PODS, metrics, $METRIC... ]
|
|
||||||
if len(pathParts) < 9 {
|
|
||||||
return true, nil, fmt.Errorf("invalid heapster path %q", proxyAction.GetPath())
|
|
||||||
}
|
|
||||||
|
|
||||||
podNames := strings.Split(pathParts[7], ",")
|
|
||||||
podPresent := make([]bool, len(tc.reportedLevels))
|
|
||||||
for _, name := range podNames {
|
|
||||||
if len(name) <= len(podNamePrefix)+1 {
|
|
||||||
return true, nil, fmt.Errorf("unknown pod %q", name)
|
|
||||||
}
|
|
||||||
num, err := strconv.Atoi(name[len(podNamePrefix)+1:])
|
|
||||||
if err != nil {
|
|
||||||
return true, nil, fmt.Errorf("unknown pod %q", name)
|
|
||||||
}
|
|
||||||
podPresent[num] = true
|
|
||||||
}
|
|
||||||
|
|
||||||
timestamp := time.Now()
|
|
||||||
metrics := heapster.MetricResultList{}
|
|
||||||
for i, level := range tc.reportedLevels {
|
|
||||||
if !podPresent[i] {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
metric := heapster.MetricResult{
|
|
||||||
Metrics: []heapster.MetricPoint{{Timestamp: timestamp, Value: level, FloatValue: nil}},
|
|
||||||
LatestTimestamp: timestamp,
|
|
||||||
}
|
|
||||||
metrics.Items = append(metrics.Items, metric)
|
|
||||||
}
|
|
||||||
heapsterRawMemResponse, _ = json.Marshal(&metrics)
|
|
||||||
}
|
|
||||||
|
|
||||||
return true, newFakeResponseWrapper(heapsterRawMemResponse), nil
|
|
||||||
})
|
|
||||||
|
|
||||||
fakeClient.AddReactor("update", "replicationcontrollers", func(action core.Action) (handled bool, ret runtime.Object, err error) {
|
fakeClient.AddReactor("update", "replicationcontrollers", func(action core.Action) (handled bool, ret runtime.Object, err error) {
|
||||||
tc.Lock()
|
tc.Lock()
|
||||||
defer tc.Unlock()
|
defer tc.Unlock()
|
||||||
@ -450,7 +359,116 @@ func (tc *testCase) prepareTestClient(t *testing.T) *fake.Clientset {
|
|||||||
fakeWatch := watch.NewFake()
|
fakeWatch := watch.NewFake()
|
||||||
fakeClient.AddWatchReactor("*", core.DefaultWatchReactor(fakeWatch, nil))
|
fakeClient.AddWatchReactor("*", core.DefaultWatchReactor(fakeWatch, nil))
|
||||||
|
|
||||||
return fakeClient
|
fakeMetricsClient := &metricsfake.Clientset{}
|
||||||
|
// NB: we have to sound like Gollum due to gengo's inability to handle already-plural resource names
|
||||||
|
fakeMetricsClient.AddReactor("list", "podmetricses", func(action core.Action) (handled bool, ret runtime.Object, err error) {
|
||||||
|
tc.Lock()
|
||||||
|
defer tc.Unlock()
|
||||||
|
|
||||||
|
metrics := &metricsapi.PodMetricsList{}
|
||||||
|
for i, cpu := range tc.reportedLevels {
|
||||||
|
// NB: the list reactor actually does label selector filtering for us,
|
||||||
|
// so we have to make sure our results match the label selector
|
||||||
|
podMetric := metricsapi.PodMetrics{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Name: fmt.Sprintf("%s-%d", podNamePrefix, i),
|
||||||
|
Namespace: namespace,
|
||||||
|
Labels: selector,
|
||||||
|
},
|
||||||
|
Timestamp: metav1.Time{Time: time.Now()},
|
||||||
|
Containers: []metricsapi.ContainerMetrics{
|
||||||
|
{
|
||||||
|
Name: "container",
|
||||||
|
Usage: clientv1.ResourceList{
|
||||||
|
clientv1.ResourceCPU: *resource.NewMilliQuantity(
|
||||||
|
int64(cpu),
|
||||||
|
resource.DecimalSI),
|
||||||
|
clientv1.ResourceMemory: *resource.NewQuantity(
|
||||||
|
int64(1024*1024),
|
||||||
|
resource.BinarySI),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
metrics.Items = append(metrics.Items, podMetric)
|
||||||
|
}
|
||||||
|
|
||||||
|
return true, metrics, nil
|
||||||
|
})
|
||||||
|
|
||||||
|
fakeCMClient := &cmfake.FakeCustomMetricsClient{}
|
||||||
|
fakeCMClient.AddReactor("get", "*", func(action core.Action) (handled bool, ret runtime.Object, err error) {
|
||||||
|
tc.Lock()
|
||||||
|
defer tc.Unlock()
|
||||||
|
|
||||||
|
getForAction, wasGetFor := action.(cmfake.GetForAction)
|
||||||
|
if !wasGetFor {
|
||||||
|
return true, nil, fmt.Errorf("expected a get-for action, got %v instead", action)
|
||||||
|
}
|
||||||
|
|
||||||
|
if getForAction.GetName() == "*" {
|
||||||
|
metrics := &cmapi.MetricValueList{}
|
||||||
|
|
||||||
|
// multiple objects
|
||||||
|
assert.Equal(t, "pods", getForAction.GetResource().Resource, "the type of object that we requested multiple metrics for should have been pods")
|
||||||
|
assert.Equal(t, "qps", getForAction.GetMetricName(), "the metric name requested should have been qps, as specified in the metric spec")
|
||||||
|
|
||||||
|
for i, level := range tc.reportedLevels {
|
||||||
|
podMetric := cmapi.MetricValue{
|
||||||
|
DescribedObject: clientv1.ObjectReference{
|
||||||
|
Kind: "Pod",
|
||||||
|
Name: fmt.Sprintf("%s-%d", podNamePrefix, i),
|
||||||
|
Namespace: namespace,
|
||||||
|
},
|
||||||
|
Timestamp: metav1.Time{Time: time.Now()},
|
||||||
|
MetricName: "qps",
|
||||||
|
Value: *resource.NewMilliQuantity(int64(level), resource.DecimalSI),
|
||||||
|
}
|
||||||
|
metrics.Items = append(metrics.Items, podMetric)
|
||||||
|
}
|
||||||
|
|
||||||
|
return true, metrics, nil
|
||||||
|
} else {
|
||||||
|
name := getForAction.GetName()
|
||||||
|
mapper := api.Registry.RESTMapper()
|
||||||
|
metrics := &cmapi.MetricValueList{}
|
||||||
|
var matchedTarget *autoscalingv2.MetricSpec
|
||||||
|
for i, target := range tc.metricsTarget {
|
||||||
|
if target.Type == autoscalingv2.ObjectMetricSourceType && name == target.Object.Target.Name {
|
||||||
|
gk := schema.FromAPIVersionAndKind(target.Object.Target.APIVersion, target.Object.Target.Kind).GroupKind()
|
||||||
|
mapping, err := mapper.RESTMapping(gk)
|
||||||
|
if err != nil {
|
||||||
|
t.Logf("unable to get mapping for %s: %v", gk.String(), err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
groupResource := schema.GroupResource{Group: mapping.GroupVersionKind.Group, Resource: mapping.Resource}
|
||||||
|
|
||||||
|
if getForAction.GetResource().Resource == groupResource.String() {
|
||||||
|
matchedTarget = &tc.metricsTarget[i]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
assert.NotNil(t, matchedTarget, "this request should have matched one of the metric specs")
|
||||||
|
assert.Equal(t, "qps", getForAction.GetMetricName(), "the metric name requested should have been qps, as specified in the metric spec")
|
||||||
|
|
||||||
|
metrics.Items = []cmapi.MetricValue{
|
||||||
|
{
|
||||||
|
DescribedObject: clientv1.ObjectReference{
|
||||||
|
Kind: matchedTarget.Object.Target.Kind,
|
||||||
|
APIVersion: matchedTarget.Object.Target.APIVersion,
|
||||||
|
Name: name,
|
||||||
|
},
|
||||||
|
Timestamp: metav1.Time{Time: time.Now()},
|
||||||
|
MetricName: "qps",
|
||||||
|
Value: *resource.NewMilliQuantity(int64(tc.reportedLevels[0]), resource.DecimalSI),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
return true, metrics, nil
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
return fakeClient, fakeMetricsClient, fakeCMClient
|
||||||
}
|
}
|
||||||
|
|
||||||
func (tc *testCase) verifyResults(t *testing.T) {
|
func (tc *testCase) verifyResults(t *testing.T) {
|
||||||
@ -465,8 +483,11 @@ func (tc *testCase) verifyResults(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (tc *testCase) runTest(t *testing.T) {
|
func (tc *testCase) runTest(t *testing.T) {
|
||||||
testClient := tc.prepareTestClient(t)
|
testClient, testMetricsClient, testCMClient := tc.prepareTestClient(t)
|
||||||
metricsClient := metrics.NewHeapsterMetricsClient(testClient, metrics.DefaultHeapsterNamespace, metrics.DefaultHeapsterScheme, metrics.DefaultHeapsterService, metrics.DefaultHeapsterPort)
|
metricsClient := metrics.NewRESTMetricsClient(
|
||||||
|
testMetricsClient.MetricsV1alpha1(),
|
||||||
|
testCMClient,
|
||||||
|
)
|
||||||
|
|
||||||
eventClient := &clientfake.Clientset{}
|
eventClient := &clientfake.Clientset{}
|
||||||
eventClient.AddReactor("*", "events", func(action core.Action) (handled bool, ret runtime.Object, err error) {
|
eventClient.AddReactor("*", "events", func(action core.Action) (handled bool, ret runtime.Object, err error) {
|
||||||
@ -631,7 +652,7 @@ func TestScaleUpCM(t *testing.T) {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
reportedLevels: []uint64{20, 10, 30},
|
reportedLevels: []uint64{20000, 10000, 30000},
|
||||||
reportedCPURequests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
|
reportedCPURequests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
|
||||||
}
|
}
|
||||||
tc.runTest(t)
|
tc.runTest(t)
|
||||||
@ -653,7 +674,7 @@ func TestScaleUpCMUnreadyLessScale(t *testing.T) {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
reportedLevels: []uint64{50, 10, 30},
|
reportedLevels: []uint64{50000, 10000, 30000},
|
||||||
reportedPodReadiness: []v1.ConditionStatus{v1.ConditionTrue, v1.ConditionTrue, v1.ConditionFalse},
|
reportedPodReadiness: []v1.ConditionStatus{v1.ConditionTrue, v1.ConditionTrue, v1.ConditionFalse},
|
||||||
reportedCPURequests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
|
reportedCPURequests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
|
||||||
}
|
}
|
||||||
@ -676,13 +697,39 @@ func TestScaleUpCMUnreadyNoScaleWouldScaleDown(t *testing.T) {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
reportedLevels: []uint64{50, 15, 30},
|
reportedLevels: []uint64{50000, 15000, 30000},
|
||||||
reportedPodReadiness: []v1.ConditionStatus{v1.ConditionFalse, v1.ConditionTrue, v1.ConditionFalse},
|
reportedPodReadiness: []v1.ConditionStatus{v1.ConditionFalse, v1.ConditionTrue, v1.ConditionFalse},
|
||||||
reportedCPURequests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
|
reportedCPURequests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
|
||||||
}
|
}
|
||||||
tc.runTest(t)
|
tc.runTest(t)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestScaleUpCMObject(t *testing.T) {
|
||||||
|
tc := testCase{
|
||||||
|
minReplicas: 2,
|
||||||
|
maxReplicas: 6,
|
||||||
|
initialReplicas: 3,
|
||||||
|
desiredReplicas: 4,
|
||||||
|
CPUTarget: 0,
|
||||||
|
metricsTarget: []autoscalingv2.MetricSpec{
|
||||||
|
{
|
||||||
|
Type: autoscalingv2.ObjectMetricSourceType,
|
||||||
|
Object: &autoscalingv2.ObjectMetricSource{
|
||||||
|
Target: autoscalingv2.CrossVersionObjectReference{
|
||||||
|
APIVersion: "extensions/v1beta1",
|
||||||
|
Kind: "Deployment",
|
||||||
|
Name: "some-deployment",
|
||||||
|
},
|
||||||
|
MetricName: "qps",
|
||||||
|
TargetValue: resource.MustParse("15.0"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
reportedLevels: []uint64{20000},
|
||||||
|
}
|
||||||
|
tc.runTest(t)
|
||||||
|
}
|
||||||
|
|
||||||
func TestScaleDown(t *testing.T) {
|
func TestScaleDown(t *testing.T) {
|
||||||
tc := testCase{
|
tc := testCase{
|
||||||
minReplicas: 2,
|
minReplicas: 2,
|
||||||
@ -714,7 +761,34 @@ func TestScaleDownCM(t *testing.T) {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
reportedLevels: []uint64{12, 12, 12, 12, 12},
|
reportedLevels: []uint64{12000, 12000, 12000, 12000, 12000},
|
||||||
|
reportedCPURequests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
|
||||||
|
}
|
||||||
|
tc.runTest(t)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestScaleDownCMObject(t *testing.T) {
|
||||||
|
tc := testCase{
|
||||||
|
minReplicas: 2,
|
||||||
|
maxReplicas: 6,
|
||||||
|
initialReplicas: 5,
|
||||||
|
desiredReplicas: 3,
|
||||||
|
CPUTarget: 0,
|
||||||
|
metricsTarget: []autoscalingv2.MetricSpec{
|
||||||
|
{
|
||||||
|
Type: autoscalingv2.ObjectMetricSourceType,
|
||||||
|
Object: &autoscalingv2.ObjectMetricSource{
|
||||||
|
Target: autoscalingv2.CrossVersionObjectReference{
|
||||||
|
APIVersion: "extensions/v1beta1",
|
||||||
|
Kind: "Deployment",
|
||||||
|
Name: "some-deployment",
|
||||||
|
},
|
||||||
|
MetricName: "qps",
|
||||||
|
TargetValue: resource.MustParse("20.0"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
reportedLevels: []uint64{12000},
|
||||||
reportedCPURequests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
|
reportedCPURequests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
|
||||||
}
|
}
|
||||||
tc.runTest(t)
|
tc.runTest(t)
|
||||||
@ -766,7 +840,33 @@ func TestToleranceCM(t *testing.T) {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
reportedLevels: []uint64{20, 21, 21},
|
reportedLevels: []uint64{20000, 20001, 21000},
|
||||||
|
reportedCPURequests: []resource.Quantity{resource.MustParse("0.9"), resource.MustParse("1.0"), resource.MustParse("1.1")},
|
||||||
|
}
|
||||||
|
tc.runTest(t)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestToleranceCMObject(t *testing.T) {
|
||||||
|
tc := testCase{
|
||||||
|
minReplicas: 1,
|
||||||
|
maxReplicas: 5,
|
||||||
|
initialReplicas: 3,
|
||||||
|
desiredReplicas: 3,
|
||||||
|
metricsTarget: []autoscalingv2.MetricSpec{
|
||||||
|
{
|
||||||
|
Type: autoscalingv2.ObjectMetricSourceType,
|
||||||
|
Object: &autoscalingv2.ObjectMetricSource{
|
||||||
|
Target: autoscalingv2.CrossVersionObjectReference{
|
||||||
|
APIVersion: "extensions/v1beta1",
|
||||||
|
Kind: "Deployment",
|
||||||
|
Name: "some-deployment",
|
||||||
|
},
|
||||||
|
MetricName: "qps",
|
||||||
|
TargetValue: resource.MustParse("20.0"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
reportedLevels: []uint64{20050},
|
||||||
reportedCPURequests: []resource.Quantity{resource.MustParse("0.9"), resource.MustParse("1.0"), resource.MustParse("1.1")},
|
reportedCPURequests: []resource.Quantity{resource.MustParse("0.9"), resource.MustParse("1.0"), resource.MustParse("1.1")},
|
||||||
}
|
}
|
||||||
tc.runTest(t)
|
tc.runTest(t)
|
||||||
|
1051
pkg/controller/podautoscaler/legacy_horizontal_test.go
Normal file
1051
pkg/controller/podautoscaler/legacy_horizontal_test.go
Normal file
File diff suppressed because it is too large
Load Diff
663
pkg/controller/podautoscaler/legacy_replica_calculator_test.go
Normal file
663
pkg/controller/podautoscaler/legacy_replica_calculator_test.go
Normal file
@ -0,0 +1,663 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2016 The Kubernetes Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package podautoscaler
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"math"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"k8s.io/apimachinery/pkg/api/resource"
|
||||||
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
"k8s.io/apimachinery/pkg/runtime"
|
||||||
|
restclient "k8s.io/client-go/rest"
|
||||||
|
core "k8s.io/client-go/testing"
|
||||||
|
"k8s.io/kubernetes/pkg/api/unversioned"
|
||||||
|
"k8s.io/kubernetes/pkg/api/v1"
|
||||||
|
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset/fake"
|
||||||
|
"k8s.io/kubernetes/pkg/controller/podautoscaler/metrics"
|
||||||
|
|
||||||
|
heapster "k8s.io/heapster/metrics/api/v1/types"
|
||||||
|
metricsapi "k8s.io/heapster/metrics/apis/metrics/v1alpha1"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
type legacyReplicaCalcTestCase struct {
|
||||||
|
currentReplicas int32
|
||||||
|
expectedReplicas int32
|
||||||
|
expectedError error
|
||||||
|
|
||||||
|
timestamp time.Time
|
||||||
|
|
||||||
|
resource *resourceInfo
|
||||||
|
metric *metricInfo
|
||||||
|
|
||||||
|
podReadiness []v1.ConditionStatus
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tc *legacyReplicaCalcTestCase) prepareTestClient(t *testing.T) *fake.Clientset {
|
||||||
|
|
||||||
|
fakeClient := &fake.Clientset{}
|
||||||
|
fakeClient.AddReactor("list", "pods", func(action core.Action) (handled bool, ret runtime.Object, err error) {
|
||||||
|
obj := &v1.PodList{}
|
||||||
|
for i := 0; i < int(tc.currentReplicas); i++ {
|
||||||
|
podReadiness := v1.ConditionTrue
|
||||||
|
if tc.podReadiness != nil {
|
||||||
|
podReadiness = tc.podReadiness[i]
|
||||||
|
}
|
||||||
|
podName := fmt.Sprintf("%s-%d", podNamePrefix, i)
|
||||||
|
pod := v1.Pod{
|
||||||
|
Status: v1.PodStatus{
|
||||||
|
Phase: v1.PodRunning,
|
||||||
|
Conditions: []v1.PodCondition{
|
||||||
|
{
|
||||||
|
Type: v1.PodReady,
|
||||||
|
Status: podReadiness,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Name: podName,
|
||||||
|
Namespace: testNamespace,
|
||||||
|
Labels: map[string]string{
|
||||||
|
"name": podNamePrefix,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Spec: v1.PodSpec{
|
||||||
|
Containers: []v1.Container{{}, {}},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
if tc.resource != nil && i < len(tc.resource.requests) {
|
||||||
|
pod.Spec.Containers[0].Resources = v1.ResourceRequirements{
|
||||||
|
Requests: v1.ResourceList{
|
||||||
|
tc.resource.name: tc.resource.requests[i],
|
||||||
|
},
|
||||||
|
}
|
||||||
|
pod.Spec.Containers[1].Resources = v1.ResourceRequirements{
|
||||||
|
Requests: v1.ResourceList{
|
||||||
|
tc.resource.name: tc.resource.requests[i],
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
obj.Items = append(obj.Items, pod)
|
||||||
|
}
|
||||||
|
return true, obj, nil
|
||||||
|
})
|
||||||
|
|
||||||
|
fakeClient.AddProxyReactor("services", func(action core.Action) (handled bool, ret restclient.ResponseWrapper, err error) {
|
||||||
|
var heapsterRawMemResponse []byte
|
||||||
|
|
||||||
|
if tc.resource != nil {
|
||||||
|
metrics := metricsapi.PodMetricsList{}
|
||||||
|
for i, resValue := range tc.resource.levels {
|
||||||
|
podName := fmt.Sprintf("%s-%d", podNamePrefix, i)
|
||||||
|
if len(tc.resource.podNames) > i {
|
||||||
|
podName = tc.resource.podNames[i]
|
||||||
|
}
|
||||||
|
podMetric := metricsapi.PodMetrics{
|
||||||
|
ObjectMeta: v1.ObjectMeta{
|
||||||
|
Name: podName,
|
||||||
|
Namespace: testNamespace,
|
||||||
|
},
|
||||||
|
Timestamp: unversioned.Time{Time: tc.timestamp},
|
||||||
|
Containers: make([]metricsapi.ContainerMetrics, numContainersPerPod),
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := 0; i < numContainersPerPod; i++ {
|
||||||
|
podMetric.Containers[i] = metricsapi.ContainerMetrics{
|
||||||
|
Name: fmt.Sprintf("container%v", i),
|
||||||
|
Usage: v1.ResourceList{
|
||||||
|
v1.ResourceName(tc.resource.name): *resource.NewMilliQuantity(
|
||||||
|
int64(resValue),
|
||||||
|
resource.DecimalSI),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
metrics.Items = append(metrics.Items, podMetric)
|
||||||
|
}
|
||||||
|
heapsterRawMemResponse, _ = json.Marshal(&metrics)
|
||||||
|
} else {
|
||||||
|
// only return the pods that we actually asked for
|
||||||
|
proxyAction := action.(core.ProxyGetAction)
|
||||||
|
pathParts := strings.Split(proxyAction.GetPath(), "/")
|
||||||
|
// pathParts should look like [ api, v1, model, namespaces, $NS, pod-list, $PODS, metrics, $METRIC... ]
|
||||||
|
if len(pathParts) < 9 {
|
||||||
|
return true, nil, fmt.Errorf("invalid heapster path %q", proxyAction.GetPath())
|
||||||
|
}
|
||||||
|
|
||||||
|
podNames := strings.Split(pathParts[7], ",")
|
||||||
|
podPresent := make([]bool, len(tc.metric.levels))
|
||||||
|
for _, name := range podNames {
|
||||||
|
if len(name) <= len(podNamePrefix)+1 {
|
||||||
|
return true, nil, fmt.Errorf("unknown pod %q", name)
|
||||||
|
}
|
||||||
|
num, err := strconv.Atoi(name[len(podNamePrefix)+1:])
|
||||||
|
if err != nil {
|
||||||
|
return true, nil, fmt.Errorf("unknown pod %q", name)
|
||||||
|
}
|
||||||
|
podPresent[num] = true
|
||||||
|
}
|
||||||
|
|
||||||
|
timestamp := tc.timestamp
|
||||||
|
metrics := heapster.MetricResultList{}
|
||||||
|
for i, level := range tc.metric.levels {
|
||||||
|
if !podPresent[i] {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
floatVal := float64(tc.metric.levels[i]) / 1000.0
|
||||||
|
metric := heapster.MetricResult{
|
||||||
|
Metrics: []heapster.MetricPoint{{Timestamp: timestamp, Value: uint64(level), FloatValue: &floatVal}},
|
||||||
|
LatestTimestamp: timestamp,
|
||||||
|
}
|
||||||
|
metrics.Items = append(metrics.Items, metric)
|
||||||
|
}
|
||||||
|
heapsterRawMemResponse, _ = json.Marshal(&metrics)
|
||||||
|
}
|
||||||
|
|
||||||
|
return true, newFakeResponseWrapper(heapsterRawMemResponse), nil
|
||||||
|
})
|
||||||
|
|
||||||
|
return fakeClient
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tc *legacyReplicaCalcTestCase) runTest(t *testing.T) {
|
||||||
|
testClient := tc.prepareTestClient(t)
|
||||||
|
metricsClient := metrics.NewHeapsterMetricsClient(testClient, metrics.DefaultHeapsterNamespace, metrics.DefaultHeapsterScheme, metrics.DefaultHeapsterService, metrics.DefaultHeapsterPort)
|
||||||
|
|
||||||
|
replicaCalc := &ReplicaCalculator{
|
||||||
|
metricsClient: metricsClient,
|
||||||
|
podsGetter: testClient.Core(),
|
||||||
|
}
|
||||||
|
|
||||||
|
selector, err := metav1.LabelSelectorAsSelector(&metav1.LabelSelector{
|
||||||
|
MatchLabels: map[string]string{"name": podNamePrefix},
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
require.Nil(t, err, "something went horribly wrong...")
|
||||||
|
}
|
||||||
|
|
||||||
|
if tc.resource != nil {
|
||||||
|
outReplicas, outUtilization, outRawValue, outTimestamp, err := replicaCalc.GetResourceReplicas(tc.currentReplicas, tc.resource.targetUtilization, tc.resource.name, testNamespace, selector)
|
||||||
|
|
||||||
|
if tc.expectedError != nil {
|
||||||
|
require.Error(t, err, "there should be an error calculating the replica count")
|
||||||
|
assert.Contains(t, err.Error(), tc.expectedError.Error(), "the error message should have contained the expected error message")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
require.NoError(t, err, "there should not have been an error calculating the replica count")
|
||||||
|
assert.Equal(t, tc.expectedReplicas, outReplicas, "replicas should be as expected")
|
||||||
|
assert.Equal(t, tc.resource.expectedUtilization, outUtilization, "utilization should be as expected")
|
||||||
|
assert.Equal(t, tc.resource.expectedValue, outRawValue, "raw value should be as expected")
|
||||||
|
assert.True(t, tc.timestamp.Equal(outTimestamp), "timestamp should be as expected")
|
||||||
|
|
||||||
|
} else {
|
||||||
|
outReplicas, outUtilization, outTimestamp, err := replicaCalc.GetMetricReplicas(tc.currentReplicas, tc.metric.targetUtilization, tc.metric.name, testNamespace, selector)
|
||||||
|
|
||||||
|
if tc.expectedError != nil {
|
||||||
|
require.Error(t, err, "there should be an error calculating the replica count")
|
||||||
|
assert.Contains(t, err.Error(), tc.expectedError.Error(), "the error message should have contained the expected error message")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
require.NoError(t, err, "there should not have been an error calculating the replica count")
|
||||||
|
assert.Equal(t, tc.expectedReplicas, outReplicas, "replicas should be as expected")
|
||||||
|
assert.Equal(t, tc.metric.expectedUtilization, outUtilization, "utilization should be as expected")
|
||||||
|
assert.True(t, tc.timestamp.Equal(outTimestamp), "timestamp should be as expected")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func LegacyTestReplicaCalcDisjointResourcesMetrics(t *testing.T) {
|
||||||
|
tc := legacyReplicaCalcTestCase{
|
||||||
|
currentReplicas: 1,
|
||||||
|
expectedError: fmt.Errorf("no metrics returned matched known pods"),
|
||||||
|
resource: &resourceInfo{
|
||||||
|
name: v1.ResourceCPU,
|
||||||
|
requests: []resource.Quantity{resource.MustParse("1.0")},
|
||||||
|
levels: []int64{100},
|
||||||
|
podNames: []string{"an-older-pod-name"},
|
||||||
|
|
||||||
|
targetUtilization: 100,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
tc.runTest(t)
|
||||||
|
}
|
||||||
|
|
||||||
|
func LegacyTestReplicaCalcScaleUp(t *testing.T) {
|
||||||
|
tc := legacyReplicaCalcTestCase{
|
||||||
|
currentReplicas: 3,
|
||||||
|
expectedReplicas: 5,
|
||||||
|
resource: &resourceInfo{
|
||||||
|
name: v1.ResourceCPU,
|
||||||
|
requests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
|
||||||
|
levels: []int64{300, 500, 700},
|
||||||
|
|
||||||
|
targetUtilization: 30,
|
||||||
|
expectedUtilization: 50,
|
||||||
|
expectedValue: numContainersPerPod * 500,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
tc.runTest(t)
|
||||||
|
}
|
||||||
|
|
||||||
|
func LegacyTestReplicaCalcScaleUpUnreadyLessScale(t *testing.T) {
|
||||||
|
tc := legacyReplicaCalcTestCase{
|
||||||
|
currentReplicas: 3,
|
||||||
|
expectedReplicas: 4,
|
||||||
|
podReadiness: []v1.ConditionStatus{v1.ConditionFalse, v1.ConditionTrue, v1.ConditionTrue},
|
||||||
|
resource: &resourceInfo{
|
||||||
|
name: v1.ResourceCPU,
|
||||||
|
requests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
|
||||||
|
levels: []int64{300, 500, 700},
|
||||||
|
|
||||||
|
targetUtilization: 30,
|
||||||
|
expectedUtilization: 60,
|
||||||
|
expectedValue: numContainersPerPod * 600,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
tc.runTest(t)
|
||||||
|
}
|
||||||
|
|
||||||
|
func LegacyTestReplicaCalcScaleUpUnreadyNoScale(t *testing.T) {
|
||||||
|
tc := legacyReplicaCalcTestCase{
|
||||||
|
currentReplicas: 3,
|
||||||
|
expectedReplicas: 3,
|
||||||
|
podReadiness: []v1.ConditionStatus{v1.ConditionTrue, v1.ConditionFalse, v1.ConditionFalse},
|
||||||
|
resource: &resourceInfo{
|
||||||
|
name: v1.ResourceCPU,
|
||||||
|
requests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
|
||||||
|
levels: []int64{400, 500, 700},
|
||||||
|
|
||||||
|
targetUtilization: 30,
|
||||||
|
expectedUtilization: 40,
|
||||||
|
expectedValue: numContainersPerPod * 400,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
tc.runTest(t)
|
||||||
|
}
|
||||||
|
|
||||||
|
func LegacyTestReplicaCalcScaleUpCM(t *testing.T) {
|
||||||
|
tc := legacyReplicaCalcTestCase{
|
||||||
|
currentReplicas: 3,
|
||||||
|
expectedReplicas: 4,
|
||||||
|
metric: &metricInfo{
|
||||||
|
name: "qps",
|
||||||
|
levels: []int64{20000, 10000, 30000},
|
||||||
|
targetUtilization: 15000,
|
||||||
|
expectedUtilization: 20000,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
tc.runTest(t)
|
||||||
|
}
|
||||||
|
|
||||||
|
func LegacyTestReplicaCalcScaleUpCMUnreadyLessScale(t *testing.T) {
|
||||||
|
tc := legacyReplicaCalcTestCase{
|
||||||
|
currentReplicas: 3,
|
||||||
|
expectedReplicas: 4,
|
||||||
|
podReadiness: []v1.ConditionStatus{v1.ConditionTrue, v1.ConditionTrue, v1.ConditionFalse},
|
||||||
|
metric: &metricInfo{
|
||||||
|
name: "qps",
|
||||||
|
levels: []int64{50000, 10000, 30000},
|
||||||
|
targetUtilization: 15000,
|
||||||
|
expectedUtilization: 30000,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
tc.runTest(t)
|
||||||
|
}
|
||||||
|
|
||||||
|
func LegacyTestReplicaCalcScaleUpCMUnreadyNoScaleWouldScaleDown(t *testing.T) {
|
||||||
|
tc := legacyReplicaCalcTestCase{
|
||||||
|
currentReplicas: 3,
|
||||||
|
expectedReplicas: 3,
|
||||||
|
podReadiness: []v1.ConditionStatus{v1.ConditionFalse, v1.ConditionTrue, v1.ConditionFalse},
|
||||||
|
metric: &metricInfo{
|
||||||
|
name: "qps",
|
||||||
|
levels: []int64{50000, 15000, 30000},
|
||||||
|
targetUtilization: 15000,
|
||||||
|
expectedUtilization: 15000,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
tc.runTest(t)
|
||||||
|
}
|
||||||
|
|
||||||
|
func LegacyTestReplicaCalcScaleDown(t *testing.T) {
|
||||||
|
tc := legacyReplicaCalcTestCase{
|
||||||
|
currentReplicas: 5,
|
||||||
|
expectedReplicas: 3,
|
||||||
|
resource: &resourceInfo{
|
||||||
|
name: v1.ResourceCPU,
|
||||||
|
requests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
|
||||||
|
levels: []int64{100, 300, 500, 250, 250},
|
||||||
|
|
||||||
|
targetUtilization: 50,
|
||||||
|
expectedUtilization: 28,
|
||||||
|
expectedValue: numContainersPerPod * 280,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
tc.runTest(t)
|
||||||
|
}
|
||||||
|
|
||||||
|
func LegacyTestReplicaCalcScaleDownCM(t *testing.T) {
|
||||||
|
tc := legacyReplicaCalcTestCase{
|
||||||
|
currentReplicas: 5,
|
||||||
|
expectedReplicas: 3,
|
||||||
|
metric: &metricInfo{
|
||||||
|
name: "qps",
|
||||||
|
levels: []int64{12000, 12000, 12000, 12000, 12000},
|
||||||
|
targetUtilization: 20000,
|
||||||
|
expectedUtilization: 12000,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
tc.runTest(t)
|
||||||
|
}
|
||||||
|
|
||||||
|
func LegacyTestReplicaCalcScaleDownIgnoresUnreadyPods(t *testing.T) {
|
||||||
|
tc := legacyReplicaCalcTestCase{
|
||||||
|
currentReplicas: 5,
|
||||||
|
expectedReplicas: 2,
|
||||||
|
podReadiness: []v1.ConditionStatus{v1.ConditionTrue, v1.ConditionTrue, v1.ConditionTrue, v1.ConditionFalse, v1.ConditionFalse},
|
||||||
|
resource: &resourceInfo{
|
||||||
|
name: v1.ResourceCPU,
|
||||||
|
requests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
|
||||||
|
levels: []int64{100, 300, 500, 250, 250},
|
||||||
|
|
||||||
|
targetUtilization: 50,
|
||||||
|
expectedUtilization: 30,
|
||||||
|
expectedValue: numContainersPerPod * 300,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
tc.runTest(t)
|
||||||
|
}
|
||||||
|
|
||||||
|
func LegacyTestReplicaCalcTolerance(t *testing.T) {
|
||||||
|
tc := legacyReplicaCalcTestCase{
|
||||||
|
currentReplicas: 3,
|
||||||
|
expectedReplicas: 3,
|
||||||
|
resource: &resourceInfo{
|
||||||
|
name: v1.ResourceCPU,
|
||||||
|
requests: []resource.Quantity{resource.MustParse("0.9"), resource.MustParse("1.0"), resource.MustParse("1.1")},
|
||||||
|
levels: []int64{1010, 1030, 1020},
|
||||||
|
|
||||||
|
targetUtilization: 100,
|
||||||
|
expectedUtilization: 102,
|
||||||
|
expectedValue: numContainersPerPod * 1020,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
tc.runTest(t)
|
||||||
|
}
|
||||||
|
|
||||||
|
func LegacyTestReplicaCalcToleranceCM(t *testing.T) {
|
||||||
|
tc := legacyReplicaCalcTestCase{
|
||||||
|
currentReplicas: 3,
|
||||||
|
expectedReplicas: 3,
|
||||||
|
metric: &metricInfo{
|
||||||
|
name: "qps",
|
||||||
|
levels: []int64{20000, 21000, 21000},
|
||||||
|
targetUtilization: 20000,
|
||||||
|
expectedUtilization: 20666,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
tc.runTest(t)
|
||||||
|
}
|
||||||
|
|
||||||
|
func LegacyTestReplicaCalcSuperfluousMetrics(t *testing.T) {
|
||||||
|
tc := legacyReplicaCalcTestCase{
|
||||||
|
currentReplicas: 4,
|
||||||
|
expectedReplicas: 24,
|
||||||
|
resource: &resourceInfo{
|
||||||
|
name: v1.ResourceCPU,
|
||||||
|
requests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
|
||||||
|
levels: []int64{4000, 9500, 3000, 7000, 3200, 2000},
|
||||||
|
targetUtilization: 100,
|
||||||
|
expectedUtilization: 587,
|
||||||
|
expectedValue: numContainersPerPod * 5875,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
tc.runTest(t)
|
||||||
|
}
|
||||||
|
|
||||||
|
func LegacyTestReplicaCalcMissingMetrics(t *testing.T) {
|
||||||
|
tc := legacyReplicaCalcTestCase{
|
||||||
|
currentReplicas: 4,
|
||||||
|
expectedReplicas: 3,
|
||||||
|
resource: &resourceInfo{
|
||||||
|
name: v1.ResourceCPU,
|
||||||
|
requests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
|
||||||
|
levels: []int64{400, 95},
|
||||||
|
|
||||||
|
targetUtilization: 100,
|
||||||
|
expectedUtilization: 24,
|
||||||
|
expectedValue: 495, // numContainersPerPod * 247, for sufficiently large values of 247
|
||||||
|
},
|
||||||
|
}
|
||||||
|
tc.runTest(t)
|
||||||
|
}
|
||||||
|
|
||||||
|
func LegacyTestReplicaCalcEmptyMetrics(t *testing.T) {
|
||||||
|
tc := legacyReplicaCalcTestCase{
|
||||||
|
currentReplicas: 4,
|
||||||
|
expectedError: fmt.Errorf("unable to get metrics for resource cpu: no metrics returned from heapster"),
|
||||||
|
resource: &resourceInfo{
|
||||||
|
name: v1.ResourceCPU,
|
||||||
|
requests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
|
||||||
|
levels: []int64{},
|
||||||
|
|
||||||
|
targetUtilization: 100,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
tc.runTest(t)
|
||||||
|
}
|
||||||
|
|
||||||
|
func LegacyTestReplicaCalcEmptyCPURequest(t *testing.T) {
|
||||||
|
tc := legacyReplicaCalcTestCase{
|
||||||
|
currentReplicas: 1,
|
||||||
|
expectedError: fmt.Errorf("missing request for"),
|
||||||
|
resource: &resourceInfo{
|
||||||
|
name: v1.ResourceCPU,
|
||||||
|
requests: []resource.Quantity{},
|
||||||
|
levels: []int64{200},
|
||||||
|
|
||||||
|
targetUtilization: 100,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
tc.runTest(t)
|
||||||
|
}
|
||||||
|
|
||||||
|
func LegacyTestReplicaCalcMissingMetricsNoChangeEq(t *testing.T) {
|
||||||
|
tc := legacyReplicaCalcTestCase{
|
||||||
|
currentReplicas: 2,
|
||||||
|
expectedReplicas: 2,
|
||||||
|
resource: &resourceInfo{
|
||||||
|
name: v1.ResourceCPU,
|
||||||
|
requests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0")},
|
||||||
|
levels: []int64{1000},
|
||||||
|
|
||||||
|
targetUtilization: 100,
|
||||||
|
expectedUtilization: 100,
|
||||||
|
expectedValue: numContainersPerPod * 1000,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
tc.runTest(t)
|
||||||
|
}
|
||||||
|
|
||||||
|
func LegacyTestReplicaCalcMissingMetricsNoChangeGt(t *testing.T) {
|
||||||
|
tc := legacyReplicaCalcTestCase{
|
||||||
|
currentReplicas: 2,
|
||||||
|
expectedReplicas: 2,
|
||||||
|
resource: &resourceInfo{
|
||||||
|
name: v1.ResourceCPU,
|
||||||
|
requests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0")},
|
||||||
|
levels: []int64{1900},
|
||||||
|
|
||||||
|
targetUtilization: 100,
|
||||||
|
expectedUtilization: 190,
|
||||||
|
expectedValue: numContainersPerPod * 1900,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
tc.runTest(t)
|
||||||
|
}
|
||||||
|
|
||||||
|
func LegacyTestReplicaCalcMissingMetricsNoChangeLt(t *testing.T) {
|
||||||
|
tc := legacyReplicaCalcTestCase{
|
||||||
|
currentReplicas: 2,
|
||||||
|
expectedReplicas: 2,
|
||||||
|
resource: &resourceInfo{
|
||||||
|
name: v1.ResourceCPU,
|
||||||
|
requests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0")},
|
||||||
|
levels: []int64{600},
|
||||||
|
|
||||||
|
targetUtilization: 100,
|
||||||
|
expectedUtilization: 60,
|
||||||
|
expectedValue: numContainersPerPod * 600,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
tc.runTest(t)
|
||||||
|
}
|
||||||
|
|
||||||
|
func LegacyTestReplicaCalcMissingMetricsUnreadyNoChange(t *testing.T) {
|
||||||
|
tc := legacyReplicaCalcTestCase{
|
||||||
|
currentReplicas: 3,
|
||||||
|
expectedReplicas: 3,
|
||||||
|
podReadiness: []v1.ConditionStatus{v1.ConditionFalse, v1.ConditionTrue, v1.ConditionTrue},
|
||||||
|
resource: &resourceInfo{
|
||||||
|
name: v1.ResourceCPU,
|
||||||
|
requests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
|
||||||
|
levels: []int64{100, 450},
|
||||||
|
|
||||||
|
targetUtilization: 50,
|
||||||
|
expectedUtilization: 45,
|
||||||
|
expectedValue: numContainersPerPod * 450,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
tc.runTest(t)
|
||||||
|
}
|
||||||
|
|
||||||
|
func LegacyTestReplicaCalcMissingMetricsUnreadyScaleUp(t *testing.T) {
|
||||||
|
tc := legacyReplicaCalcTestCase{
|
||||||
|
currentReplicas: 3,
|
||||||
|
expectedReplicas: 4,
|
||||||
|
podReadiness: []v1.ConditionStatus{v1.ConditionFalse, v1.ConditionTrue, v1.ConditionTrue},
|
||||||
|
resource: &resourceInfo{
|
||||||
|
name: v1.ResourceCPU,
|
||||||
|
requests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
|
||||||
|
levels: []int64{100, 2000},
|
||||||
|
|
||||||
|
targetUtilization: 50,
|
||||||
|
expectedUtilization: 200,
|
||||||
|
expectedValue: numContainersPerPod * 2000,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
tc.runTest(t)
|
||||||
|
}
|
||||||
|
|
||||||
|
func LegacyTestReplicaCalcMissingMetricsUnreadyScaleDown(t *testing.T) {
|
||||||
|
tc := legacyReplicaCalcTestCase{
|
||||||
|
currentReplicas: 4,
|
||||||
|
expectedReplicas: 3,
|
||||||
|
podReadiness: []v1.ConditionStatus{v1.ConditionFalse, v1.ConditionTrue, v1.ConditionTrue, v1.ConditionTrue},
|
||||||
|
resource: &resourceInfo{
|
||||||
|
name: v1.ResourceCPU,
|
||||||
|
requests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
|
||||||
|
levels: []int64{100, 100, 100},
|
||||||
|
|
||||||
|
targetUtilization: 50,
|
||||||
|
expectedUtilization: 10,
|
||||||
|
expectedValue: numContainersPerPod * 100,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
tc.runTest(t)
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestComputedToleranceAlgImplementation is a regression test which
|
||||||
|
// back-calculates a minimal percentage for downscaling based on a small percentage
|
||||||
|
// increase in pod utilization which is calibrated against the tolerance value.
|
||||||
|
func LegacyTestReplicaCalcComputedToleranceAlgImplementation(t *testing.T) {
|
||||||
|
|
||||||
|
startPods := int32(10)
|
||||||
|
// 150 mCPU per pod.
|
||||||
|
totalUsedCPUOfAllPods := int64(startPods * 150)
|
||||||
|
// Each pod starts out asking for 2X what is really needed.
|
||||||
|
// This means we will have a 50% ratio of used/requested
|
||||||
|
totalRequestedCPUOfAllPods := int32(2 * totalUsedCPUOfAllPods)
|
||||||
|
requestedToUsed := float64(totalRequestedCPUOfAllPods / int32(totalUsedCPUOfAllPods))
|
||||||
|
// Spread the amount we ask over 10 pods. We can add some jitter later in reportedLevels.
|
||||||
|
perPodRequested := totalRequestedCPUOfAllPods / startPods
|
||||||
|
|
||||||
|
// Force a minimal scaling event by satisfying (tolerance < 1 - resourcesUsedRatio).
|
||||||
|
target := math.Abs(1/(requestedToUsed*(1-tolerance))) + .01
|
||||||
|
finalCpuPercentTarget := int32(target * 100)
|
||||||
|
resourcesUsedRatio := float64(totalUsedCPUOfAllPods) / float64(float64(totalRequestedCPUOfAllPods)*target)
|
||||||
|
|
||||||
|
// i.e. .60 * 20 -> scaled down expectation.
|
||||||
|
finalPods := int32(math.Ceil(resourcesUsedRatio * float64(startPods)))
|
||||||
|
|
||||||
|
// To breach tolerance we will create a utilization ratio difference of tolerance to usageRatioToleranceValue)
|
||||||
|
tc := legacyReplicaCalcTestCase{
|
||||||
|
currentReplicas: startPods,
|
||||||
|
expectedReplicas: finalPods,
|
||||||
|
resource: &resourceInfo{
|
||||||
|
name: v1.ResourceCPU,
|
||||||
|
levels: []int64{
|
||||||
|
totalUsedCPUOfAllPods / 10,
|
||||||
|
totalUsedCPUOfAllPods / 10,
|
||||||
|
totalUsedCPUOfAllPods / 10,
|
||||||
|
totalUsedCPUOfAllPods / 10,
|
||||||
|
totalUsedCPUOfAllPods / 10,
|
||||||
|
totalUsedCPUOfAllPods / 10,
|
||||||
|
totalUsedCPUOfAllPods / 10,
|
||||||
|
totalUsedCPUOfAllPods / 10,
|
||||||
|
totalUsedCPUOfAllPods / 10,
|
||||||
|
totalUsedCPUOfAllPods / 10,
|
||||||
|
},
|
||||||
|
requests: []resource.Quantity{
|
||||||
|
resource.MustParse(fmt.Sprint(perPodRequested+100) + "m"),
|
||||||
|
resource.MustParse(fmt.Sprint(perPodRequested-100) + "m"),
|
||||||
|
resource.MustParse(fmt.Sprint(perPodRequested+10) + "m"),
|
||||||
|
resource.MustParse(fmt.Sprint(perPodRequested-10) + "m"),
|
||||||
|
resource.MustParse(fmt.Sprint(perPodRequested+2) + "m"),
|
||||||
|
resource.MustParse(fmt.Sprint(perPodRequested-2) + "m"),
|
||||||
|
resource.MustParse(fmt.Sprint(perPodRequested+1) + "m"),
|
||||||
|
resource.MustParse(fmt.Sprint(perPodRequested-1) + "m"),
|
||||||
|
resource.MustParse(fmt.Sprint(perPodRequested) + "m"),
|
||||||
|
resource.MustParse(fmt.Sprint(perPodRequested) + "m"),
|
||||||
|
},
|
||||||
|
|
||||||
|
targetUtilization: finalCpuPercentTarget,
|
||||||
|
expectedUtilization: int32(totalUsedCPUOfAllPods*100) / totalRequestedCPUOfAllPods,
|
||||||
|
expectedValue: numContainersPerPod * totalUsedCPUOfAllPods / 10,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
tc.runTest(t)
|
||||||
|
|
||||||
|
// Reuse the data structure above, now testing "unscaling".
|
||||||
|
// Now, we test that no scaling happens if we are in a very close margin to the tolerance
|
||||||
|
target = math.Abs(1/(requestedToUsed*(1-tolerance))) + .004
|
||||||
|
finalCpuPercentTarget = int32(target * 100)
|
||||||
|
tc.resource.targetUtilization = finalCpuPercentTarget
|
||||||
|
tc.currentReplicas = startPods
|
||||||
|
tc.expectedReplicas = startPods
|
||||||
|
tc.runTest(t)
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO: add more tests
|
@ -11,7 +11,9 @@ load(
|
|||||||
go_library(
|
go_library(
|
||||||
name = "go_default_library",
|
name = "go_default_library",
|
||||||
srcs = [
|
srcs = [
|
||||||
"metrics_client.go",
|
"interfaces.go",
|
||||||
|
"legacy_metrics_client.go",
|
||||||
|
"rest_metrics_client.go",
|
||||||
"utilization.go",
|
"utilization.go",
|
||||||
],
|
],
|
||||||
tags = ["automanaged"],
|
tags = ["automanaged"],
|
||||||
@ -23,29 +25,47 @@ go_library(
|
|||||||
"//vendor:github.com/golang/glog",
|
"//vendor:github.com/golang/glog",
|
||||||
"//vendor:k8s.io/apimachinery/pkg/apis/meta/v1",
|
"//vendor:k8s.io/apimachinery/pkg/apis/meta/v1",
|
||||||
"//vendor:k8s.io/apimachinery/pkg/labels",
|
"//vendor:k8s.io/apimachinery/pkg/labels",
|
||||||
|
"//vendor:k8s.io/apimachinery/pkg/runtime/schema",
|
||||||
|
"//vendor:k8s.io/client-go/pkg/api/v1",
|
||||||
"//vendor:k8s.io/heapster/metrics/api/v1/types",
|
"//vendor:k8s.io/heapster/metrics/api/v1/types",
|
||||||
"//vendor:k8s.io/heapster/metrics/apis/metrics/v1alpha1",
|
"//vendor:k8s.io/heapster/metrics/apis/metrics/v1alpha1",
|
||||||
|
"//vendor:k8s.io/metrics/pkg/apis/custom_metrics/v1alpha1",
|
||||||
|
"//vendor:k8s.io/metrics/pkg/client/clientset_generated/clientset/typed/metrics/v1alpha1",
|
||||||
|
"//vendor:k8s.io/metrics/pkg/client/custom_metrics",
|
||||||
],
|
],
|
||||||
)
|
)
|
||||||
|
|
||||||
go_test(
|
go_test(
|
||||||
name = "go_default_test",
|
name = "go_default_test",
|
||||||
srcs = ["metrics_client_test.go"],
|
srcs = [
|
||||||
|
"legacy_metrics_client_test.go",
|
||||||
|
"rest_metrics_client_test.go",
|
||||||
|
],
|
||||||
library = ":go_default_library",
|
library = ":go_default_library",
|
||||||
tags = ["automanaged"],
|
tags = ["automanaged"],
|
||||||
deps = [
|
deps = [
|
||||||
"//pkg/api/unversioned:go_default_library",
|
"//pkg/api/unversioned:go_default_library",
|
||||||
"//pkg/api/v1:go_default_library",
|
"//pkg/api/v1:go_default_library",
|
||||||
|
"//pkg/apis/autoscaling/v2alpha1:go_default_library",
|
||||||
"//pkg/client/clientset_generated/clientset/fake:go_default_library",
|
"//pkg/client/clientset_generated/clientset/fake:go_default_library",
|
||||||
"//vendor:github.com/stretchr/testify/assert",
|
"//vendor:github.com/stretchr/testify/assert",
|
||||||
"//vendor:k8s.io/apimachinery/pkg/api/resource",
|
"//vendor:k8s.io/apimachinery/pkg/api/resource",
|
||||||
"//vendor:k8s.io/apimachinery/pkg/apis/meta/v1",
|
"//vendor:k8s.io/apimachinery/pkg/apis/meta/v1",
|
||||||
"//vendor:k8s.io/apimachinery/pkg/labels",
|
"//vendor:k8s.io/apimachinery/pkg/labels",
|
||||||
"//vendor:k8s.io/apimachinery/pkg/runtime",
|
"//vendor:k8s.io/apimachinery/pkg/runtime",
|
||||||
|
"//vendor:k8s.io/apimachinery/pkg/runtime/schema",
|
||||||
|
"//vendor:k8s.io/client-go/pkg/api",
|
||||||
|
"//vendor:k8s.io/client-go/pkg/api/install",
|
||||||
|
"//vendor:k8s.io/client-go/pkg/api/v1",
|
||||||
|
"//vendor:k8s.io/client-go/pkg/apis/extensions/install",
|
||||||
"//vendor:k8s.io/client-go/rest",
|
"//vendor:k8s.io/client-go/rest",
|
||||||
"//vendor:k8s.io/client-go/testing",
|
"//vendor:k8s.io/client-go/testing",
|
||||||
"//vendor:k8s.io/heapster/metrics/api/v1/types",
|
"//vendor:k8s.io/heapster/metrics/api/v1/types",
|
||||||
"//vendor:k8s.io/heapster/metrics/apis/metrics/v1alpha1",
|
"//vendor:k8s.io/heapster/metrics/apis/metrics/v1alpha1",
|
||||||
|
"//vendor:k8s.io/metrics/pkg/apis/custom_metrics/v1alpha1",
|
||||||
|
"//vendor:k8s.io/metrics/pkg/apis/metrics/v1alpha1",
|
||||||
|
"//vendor:k8s.io/metrics/pkg/client/clientset_generated/clientset/fake",
|
||||||
|
"//vendor:k8s.io/metrics/pkg/client/custom_metrics/fake",
|
||||||
],
|
],
|
||||||
)
|
)
|
||||||
|
|
||||||
|
45
pkg/controller/podautoscaler/metrics/interfaces.go
Normal file
45
pkg/controller/podautoscaler/metrics/interfaces.go
Normal file
@ -0,0 +1,45 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2017 The Kubernetes Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package metrics
|
||||||
|
|
||||||
|
import (
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"k8s.io/apimachinery/pkg/labels"
|
||||||
|
"k8s.io/kubernetes/pkg/api/v1"
|
||||||
|
autoscaling "k8s.io/kubernetes/pkg/apis/autoscaling/v2alpha1"
|
||||||
|
)
|
||||||
|
|
||||||
|
// PodMetricsInfo contains pod metric values as a map from pod names to
|
||||||
|
// metric values (the metric values are expected to be the metric as a milli-value)
|
||||||
|
type PodMetricsInfo map[string]int64
|
||||||
|
|
||||||
|
// MetricsClient knows how to query a remote interface to retrieve container-level
|
||||||
|
// resource metrics as well as pod-level arbitrary metrics
|
||||||
|
type MetricsClient interface {
|
||||||
|
// GetResourceMetric gets the given resource metric (and an associated oldest timestamp)
|
||||||
|
// for all pods matching the specified selector in the given namespace
|
||||||
|
GetResourceMetric(resource v1.ResourceName, namespace string, selector labels.Selector) (PodMetricsInfo, time.Time, error)
|
||||||
|
|
||||||
|
// GetRawMetric gets the given metric (and an associated oldest timestamp)
|
||||||
|
// for all pods matching the specified selector in the given namespace
|
||||||
|
GetRawMetric(metricName string, namespace string, selector labels.Selector) (PodMetricsInfo, time.Time, error)
|
||||||
|
|
||||||
|
// GetObjectMetric gets the given metric (and an associated timestamp) for the given
|
||||||
|
// object in the given namespace
|
||||||
|
GetObjectMetric(metricName string, namespace string, objectRef *autoscaling.CrossVersionObjectReference) (int64, time.Time, error)
|
||||||
|
}
|
@ -34,26 +34,6 @@ import (
|
|||||||
v1core "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/core/v1"
|
v1core "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/core/v1"
|
||||||
)
|
)
|
||||||
|
|
||||||
// PodMetricsInfo contains pod metric values as a map from pod names to
|
|
||||||
// metric values (the metric values are expected to be the metric as a milli-value)
|
|
||||||
type PodMetricsInfo map[string]int64
|
|
||||||
|
|
||||||
// MetricsClient knows how to query a remote interface to retrieve container-level
|
|
||||||
// resource metrics as well as pod-level arbitrary metrics
|
|
||||||
type MetricsClient interface {
|
|
||||||
// GetResourceMetric gets the given resource metric (and an associated oldest timestamp)
|
|
||||||
// for all pods matching the specified selector in the given namespace
|
|
||||||
GetResourceMetric(resource v1.ResourceName, namespace string, selector labels.Selector) (PodMetricsInfo, time.Time, error)
|
|
||||||
|
|
||||||
// GetRawMetric gets the given metric (and an associated oldest timestamp)
|
|
||||||
// for all pods matching the specified selector in the given namespace
|
|
||||||
GetRawMetric(metricName string, namespace string, selector labels.Selector) (PodMetricsInfo, time.Time, error)
|
|
||||||
|
|
||||||
// GetObjectMetric gets the given metric (and an associated timestamp) for the given
|
|
||||||
// object in the given namespace
|
|
||||||
GetObjectMetric(metricName string, namespace string, objectRef *autoscaling.CrossVersionObjectReference) (int64, time.Time, error)
|
|
||||||
}
|
|
||||||
|
|
||||||
const (
|
const (
|
||||||
DefaultHeapsterNamespace = "kube-system"
|
DefaultHeapsterNamespace = "kube-system"
|
||||||
DefaultHeapsterScheme = "http"
|
DefaultHeapsterScheme = "http"
|
142
pkg/controller/podautoscaler/metrics/rest_metrics_client.go
Normal file
142
pkg/controller/podautoscaler/metrics/rest_metrics_client.go
Normal file
@ -0,0 +1,142 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2017 The Kubernetes Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package metrics
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/golang/glog"
|
||||||
|
|
||||||
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
"k8s.io/apimachinery/pkg/labels"
|
||||||
|
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||||
|
clientv1 "k8s.io/client-go/pkg/api/v1"
|
||||||
|
"k8s.io/kubernetes/pkg/api/v1"
|
||||||
|
autoscaling "k8s.io/kubernetes/pkg/apis/autoscaling/v2alpha1"
|
||||||
|
customapi "k8s.io/metrics/pkg/apis/custom_metrics/v1alpha1"
|
||||||
|
resourceclient "k8s.io/metrics/pkg/client/clientset_generated/clientset/typed/metrics/v1alpha1"
|
||||||
|
customclient "k8s.io/metrics/pkg/client/custom_metrics"
|
||||||
|
)
|
||||||
|
|
||||||
|
func NewRESTMetricsClient(resourceClient resourceclient.PodMetricsesGetter, customClient customclient.CustomMetricsClient) MetricsClient {
|
||||||
|
return &restMetricsClient{
|
||||||
|
&resourceMetricsClient{resourceClient},
|
||||||
|
&customMetricsClient{customClient},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// restMetricsClient is a client which supports fetching
|
||||||
|
// metrics from both the resource metrics API and the
|
||||||
|
// custom metrics API.
|
||||||
|
type restMetricsClient struct {
|
||||||
|
*resourceMetricsClient
|
||||||
|
*customMetricsClient
|
||||||
|
}
|
||||||
|
|
||||||
|
// resourceMetricsClient implements the resource-metrics-related parts of MetricsClient,
|
||||||
|
// using data from the reosurce metrics API.
|
||||||
|
type resourceMetricsClient struct {
|
||||||
|
client resourceclient.PodMetricsesGetter
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetResourceMetric gets the given resource metric (and an associated oldest timestamp)
|
||||||
|
// for all pods matching the specified selector in the given namespace
|
||||||
|
func (c *resourceMetricsClient) GetResourceMetric(resource v1.ResourceName, namespace string, selector labels.Selector) (PodMetricsInfo, time.Time, error) {
|
||||||
|
metrics, err := c.client.PodMetricses(namespace).List(metav1.ListOptions{LabelSelector: selector.String()})
|
||||||
|
if err != nil {
|
||||||
|
return nil, time.Time{}, fmt.Errorf("unable to fetch metrics from API: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(metrics.Items) == 0 {
|
||||||
|
return nil, time.Time{}, fmt.Errorf("no metrics returned from heapster")
|
||||||
|
}
|
||||||
|
|
||||||
|
res := make(PodMetricsInfo, len(metrics.Items))
|
||||||
|
|
||||||
|
for _, m := range metrics.Items {
|
||||||
|
podSum := int64(0)
|
||||||
|
missing := len(m.Containers) == 0
|
||||||
|
for _, c := range m.Containers {
|
||||||
|
resValue, found := c.Usage[clientv1.ResourceName(resource)]
|
||||||
|
if !found {
|
||||||
|
missing = true
|
||||||
|
glog.V(2).Infof("missing resource metric %v for container %s in pod %s/%s", resource, c.Name, namespace, m.Name)
|
||||||
|
break // containers loop
|
||||||
|
}
|
||||||
|
podSum += resValue.MilliValue()
|
||||||
|
}
|
||||||
|
|
||||||
|
if !missing {
|
||||||
|
res[m.Name] = int64(podSum)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
timestamp := metrics.Items[0].Timestamp.Time
|
||||||
|
|
||||||
|
return res, timestamp, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// customMetricsClient implements the custom-metrics-related parts of MetricsClient,
|
||||||
|
// using data from the custom metrics API.
|
||||||
|
type customMetricsClient struct {
|
||||||
|
client customclient.CustomMetricsClient
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetRawMetric gets the given metric (and an associated oldest timestamp)
|
||||||
|
// for all pods matching the specified selector in the given namespace
|
||||||
|
func (c *customMetricsClient) GetRawMetric(metricName string, namespace string, selector labels.Selector) (PodMetricsInfo, time.Time, error) {
|
||||||
|
metrics, err := c.client.NamespacedMetrics(namespace).GetForObjects(schema.GroupKind{Kind: "Pod"}, selector, metricName)
|
||||||
|
if err != nil {
|
||||||
|
return nil, time.Time{}, fmt.Errorf("unable to fetch metrics from API: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(metrics.Items) == 0 {
|
||||||
|
return nil, time.Time{}, fmt.Errorf("no metrics returned from custom metrics API")
|
||||||
|
}
|
||||||
|
|
||||||
|
res := make(PodMetricsInfo, len(metrics.Items))
|
||||||
|
for _, m := range metrics.Items {
|
||||||
|
res[m.DescribedObject.Name] = m.Value.MilliValue()
|
||||||
|
}
|
||||||
|
|
||||||
|
timestamp := metrics.Items[0].Timestamp.Time
|
||||||
|
|
||||||
|
return res, timestamp, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetObjectMetric gets the given metric (and an associated timestamp) for the given
|
||||||
|
// object in the given namespace
|
||||||
|
func (c *customMetricsClient) GetObjectMetric(metricName string, namespace string, objectRef *autoscaling.CrossVersionObjectReference) (int64, time.Time, error) {
|
||||||
|
gvk := schema.FromAPIVersionAndKind(objectRef.APIVersion, objectRef.Kind)
|
||||||
|
var metricValue *customapi.MetricValue
|
||||||
|
var err error
|
||||||
|
if gvk.Kind == "Namespace" && gvk.Group == "" {
|
||||||
|
// handle namespace separately
|
||||||
|
// NB: we ignore namespace name here, since CrossVersionObjectReference isn't
|
||||||
|
// supposed to allow you to escape your namespace
|
||||||
|
metricValue, err = c.client.RootScopedMetrics().GetForObject(gvk.GroupKind(), namespace, metricName)
|
||||||
|
} else {
|
||||||
|
metricValue, err = c.client.NamespacedMetrics(namespace).GetForObject(gvk.GroupKind(), objectRef.Name, metricName)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return 0, time.Time{}, fmt.Errorf("unable to fetch metrics from API: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return metricValue.Value.MilliValue(), metricValue.Timestamp.Time, nil
|
||||||
|
}
|
275
pkg/controller/podautoscaler/metrics/rest_metrics_client_test.go
Normal file
275
pkg/controller/podautoscaler/metrics/rest_metrics_client_test.go
Normal file
@ -0,0 +1,275 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2017 The Kubernetes Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package metrics
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"k8s.io/apimachinery/pkg/api/resource"
|
||||||
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
"k8s.io/apimachinery/pkg/labels"
|
||||||
|
"k8s.io/apimachinery/pkg/runtime"
|
||||||
|
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||||
|
"k8s.io/client-go/pkg/api"
|
||||||
|
"k8s.io/client-go/pkg/api/v1"
|
||||||
|
core "k8s.io/client-go/testing"
|
||||||
|
kv1 "k8s.io/kubernetes/pkg/api/v1"
|
||||||
|
autoscalingapi "k8s.io/kubernetes/pkg/apis/autoscaling/v2alpha1"
|
||||||
|
metricsfake "k8s.io/metrics/pkg/client/clientset_generated/clientset/fake"
|
||||||
|
cmfake "k8s.io/metrics/pkg/client/custom_metrics/fake"
|
||||||
|
|
||||||
|
cmapi "k8s.io/metrics/pkg/apis/custom_metrics/v1alpha1"
|
||||||
|
metricsapi "k8s.io/metrics/pkg/apis/metrics/v1alpha1"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
|
||||||
|
// we need the API types for rest mapping lookup
|
||||||
|
_ "k8s.io/client-go/pkg/api/install"
|
||||||
|
_ "k8s.io/client-go/pkg/apis/extensions/install"
|
||||||
|
)
|
||||||
|
|
||||||
|
type restClientTestCase struct {
|
||||||
|
desiredMetricValues PodMetricsInfo
|
||||||
|
desiredError error
|
||||||
|
|
||||||
|
// "timestamps" here are actually the offset in minutes from a base timestamp
|
||||||
|
targetTimestamp int
|
||||||
|
reportedMetricPoints []metricPoint
|
||||||
|
reportedPodMetrics [][]int64
|
||||||
|
singleObject *autoscalingapi.CrossVersionObjectReference
|
||||||
|
|
||||||
|
namespace string
|
||||||
|
selector labels.Selector
|
||||||
|
resourceName v1.ResourceName
|
||||||
|
metricName string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tc *restClientTestCase) prepareTestClient(t *testing.T) (*metricsfake.Clientset, *cmfake.FakeCustomMetricsClient) {
|
||||||
|
namespace := "test-namespace"
|
||||||
|
tc.namespace = namespace
|
||||||
|
podNamePrefix := "test-pod"
|
||||||
|
podLabels := map[string]string{"name": podNamePrefix}
|
||||||
|
tc.selector = labels.SelectorFromSet(podLabels)
|
||||||
|
|
||||||
|
// it's a resource test if we have a resource name
|
||||||
|
isResource := len(tc.resourceName) > 0
|
||||||
|
|
||||||
|
fakeMetricsClient := &metricsfake.Clientset{}
|
||||||
|
fakeCMClient := &cmfake.FakeCustomMetricsClient{}
|
||||||
|
|
||||||
|
if isResource {
|
||||||
|
fakeMetricsClient.AddReactor("list", "podmetricses", func(action core.Action) (handled bool, ret runtime.Object, err error) {
|
||||||
|
metrics := &metricsapi.PodMetricsList{}
|
||||||
|
for i, containers := range tc.reportedPodMetrics {
|
||||||
|
metric := metricsapi.PodMetrics{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Name: fmt.Sprintf("%s-%d", podNamePrefix, i),
|
||||||
|
Namespace: namespace,
|
||||||
|
Labels: podLabels,
|
||||||
|
},
|
||||||
|
Timestamp: metav1.Time{Time: fixedTimestamp.Add(time.Duration(tc.targetTimestamp) * time.Minute)},
|
||||||
|
Containers: []metricsapi.ContainerMetrics{},
|
||||||
|
}
|
||||||
|
for j, cpu := range containers {
|
||||||
|
cm := metricsapi.ContainerMetrics{
|
||||||
|
Name: fmt.Sprintf("%s-%d-container-%d", podNamePrefix, i, j),
|
||||||
|
Usage: v1.ResourceList{
|
||||||
|
v1.ResourceCPU: *resource.NewMilliQuantity(
|
||||||
|
cpu,
|
||||||
|
resource.DecimalSI),
|
||||||
|
v1.ResourceMemory: *resource.NewQuantity(
|
||||||
|
int64(1024*1024),
|
||||||
|
resource.BinarySI),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
metric.Containers = append(metric.Containers, cm)
|
||||||
|
}
|
||||||
|
metrics.Items = append(metrics.Items, metric)
|
||||||
|
}
|
||||||
|
return true, metrics, nil
|
||||||
|
})
|
||||||
|
} else {
|
||||||
|
fakeCMClient.AddReactor("get", "*", func(action core.Action) (handled bool, ret runtime.Object, err error) {
|
||||||
|
getForAction := action.(cmfake.GetForAction)
|
||||||
|
assert.Equal(t, tc.metricName, getForAction.GetMetricName(), "the metric requested should have matched the one specified")
|
||||||
|
|
||||||
|
if getForAction.GetName() == "*" {
|
||||||
|
// multiple objects
|
||||||
|
metrics := cmapi.MetricValueList{}
|
||||||
|
assert.Equal(t, "pods", getForAction.GetResource().Resource, "type of object that we requested multiple metrics for should have been pods")
|
||||||
|
|
||||||
|
for i, metricPoint := range tc.reportedMetricPoints {
|
||||||
|
timestamp := fixedTimestamp.Add(time.Duration(metricPoint.timestamp) * time.Minute)
|
||||||
|
metric := cmapi.MetricValue{
|
||||||
|
DescribedObject: v1.ObjectReference{
|
||||||
|
Kind: "Pod",
|
||||||
|
APIVersion: "v1",
|
||||||
|
Name: fmt.Sprintf("%s-%d", podNamePrefix, i),
|
||||||
|
},
|
||||||
|
Value: *resource.NewMilliQuantity(int64(metricPoint.level), resource.DecimalSI),
|
||||||
|
Timestamp: metav1.Time{Time: timestamp},
|
||||||
|
MetricName: tc.metricName,
|
||||||
|
}
|
||||||
|
|
||||||
|
metrics.Items = append(metrics.Items, metric)
|
||||||
|
}
|
||||||
|
|
||||||
|
return true, &metrics, nil
|
||||||
|
} else {
|
||||||
|
name := getForAction.GetName()
|
||||||
|
mapper := api.Registry.RESTMapper()
|
||||||
|
assert.NotNil(t, tc.singleObject, "should have only requested a single-object metric when we asked for metrics for a single object")
|
||||||
|
gk := schema.FromAPIVersionAndKind(tc.singleObject.APIVersion, tc.singleObject.Kind).GroupKind()
|
||||||
|
mapping, err := mapper.RESTMapping(gk)
|
||||||
|
if err != nil {
|
||||||
|
return true, nil, fmt.Errorf("unable to get mapping for %s: %v", gk.String(), err)
|
||||||
|
}
|
||||||
|
groupResource := schema.GroupResource{Group: mapping.GroupVersionKind.Group, Resource: mapping.Resource}
|
||||||
|
|
||||||
|
assert.Equal(t, groupResource.String(), getForAction.GetResource().Resource, "should have requested metrics for the resource matching the GroupKind passed in")
|
||||||
|
assert.Equal(t, tc.singleObject.Name, name, "should have requested metrics for the object matching the name passed in")
|
||||||
|
metricPoint := tc.reportedMetricPoints[0]
|
||||||
|
timestamp := fixedTimestamp.Add(time.Duration(metricPoint.timestamp) * time.Minute)
|
||||||
|
|
||||||
|
metrics := &cmapi.MetricValueList{
|
||||||
|
Items: []cmapi.MetricValue{
|
||||||
|
{
|
||||||
|
DescribedObject: v1.ObjectReference{
|
||||||
|
Kind: tc.singleObject.Kind,
|
||||||
|
APIVersion: tc.singleObject.APIVersion,
|
||||||
|
Name: tc.singleObject.Name,
|
||||||
|
},
|
||||||
|
Timestamp: metav1.Time{Time: timestamp},
|
||||||
|
MetricName: tc.metricName,
|
||||||
|
Value: *resource.NewMilliQuantity(int64(metricPoint.level), resource.DecimalSI),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
return true, metrics, nil
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
return fakeMetricsClient, fakeCMClient
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tc *restClientTestCase) verifyResults(t *testing.T, metrics PodMetricsInfo, timestamp time.Time, err error) {
|
||||||
|
if tc.desiredError != nil {
|
||||||
|
assert.Error(t, err, "there should be an error retrieving the metrics")
|
||||||
|
assert.Contains(t, fmt.Sprintf("%v", err), fmt.Sprintf("%v", tc.desiredError), "the error message should be eas expected")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
assert.NoError(t, err, "there should be no error retrieving the metrics")
|
||||||
|
assert.NotNil(t, metrics, "there should be metrics returned")
|
||||||
|
|
||||||
|
assert.Equal(t, tc.desiredMetricValues, metrics, "the metrics values should be as expected")
|
||||||
|
|
||||||
|
targetTimestamp := fixedTimestamp.Add(time.Duration(tc.targetTimestamp) * time.Minute)
|
||||||
|
assert.True(t, targetTimestamp.Equal(timestamp), fmt.Sprintf("the timestamp should be as expected (%s) but was %s", targetTimestamp, timestamp))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (tc *restClientTestCase) runTest(t *testing.T) {
|
||||||
|
testMetricsClient, testCMClient := tc.prepareTestClient(t)
|
||||||
|
metricsClient := NewRESTMetricsClient(testMetricsClient.MetricsV1alpha1(), testCMClient)
|
||||||
|
isResource := len(tc.resourceName) > 0
|
||||||
|
if isResource {
|
||||||
|
info, timestamp, err := metricsClient.GetResourceMetric(kv1.ResourceName(tc.resourceName), tc.namespace, tc.selector)
|
||||||
|
tc.verifyResults(t, info, timestamp, err)
|
||||||
|
} else if tc.singleObject == nil {
|
||||||
|
info, timestamp, err := metricsClient.GetRawMetric(tc.metricName, tc.namespace, tc.selector)
|
||||||
|
tc.verifyResults(t, info, timestamp, err)
|
||||||
|
} else {
|
||||||
|
val, timestamp, err := metricsClient.GetObjectMetric(tc.metricName, tc.namespace, tc.singleObject)
|
||||||
|
info := PodMetricsInfo{tc.singleObject.Name: val}
|
||||||
|
tc.verifyResults(t, info, timestamp, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestRESTClientCPU(t *testing.T) {
|
||||||
|
tc := restClientTestCase{
|
||||||
|
desiredMetricValues: PodMetricsInfo{
|
||||||
|
"test-pod-0": 5000, "test-pod-1": 5000, "test-pod-2": 5000,
|
||||||
|
},
|
||||||
|
resourceName: v1.ResourceCPU,
|
||||||
|
targetTimestamp: 1,
|
||||||
|
reportedPodMetrics: [][]int64{{5000}, {5000}, {5000}},
|
||||||
|
}
|
||||||
|
tc.runTest(t)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestRESTClientQPS(t *testing.T) {
|
||||||
|
tc := restClientTestCase{
|
||||||
|
desiredMetricValues: PodMetricsInfo{
|
||||||
|
"test-pod-0": 10000, "test-pod-1": 20000, "test-pod-2": 10000,
|
||||||
|
},
|
||||||
|
metricName: "qps",
|
||||||
|
targetTimestamp: 1,
|
||||||
|
reportedMetricPoints: []metricPoint{{10000, 1}, {20000, 1}, {10000, 1}},
|
||||||
|
}
|
||||||
|
tc.runTest(t)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestRESTClientSingleObject(t *testing.T) {
|
||||||
|
tc := restClientTestCase{
|
||||||
|
desiredMetricValues: PodMetricsInfo{"some-dep": 10},
|
||||||
|
metricName: "queue-length",
|
||||||
|
targetTimestamp: 1,
|
||||||
|
reportedMetricPoints: []metricPoint{{10, 1}},
|
||||||
|
singleObject: &autoscalingapi.CrossVersionObjectReference{
|
||||||
|
APIVersion: "extensions/v1beta1",
|
||||||
|
Kind: "Deployment",
|
||||||
|
Name: "some-dep",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
tc.runTest(t)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestRESTClientQpsSumEqualZero(t *testing.T) {
|
||||||
|
tc := restClientTestCase{
|
||||||
|
desiredMetricValues: PodMetricsInfo{
|
||||||
|
"test-pod-0": 0, "test-pod-1": 0, "test-pod-2": 0,
|
||||||
|
},
|
||||||
|
metricName: "qps",
|
||||||
|
targetTimestamp: 0,
|
||||||
|
reportedMetricPoints: []metricPoint{{0, 0}, {0, 0}, {0, 0}},
|
||||||
|
}
|
||||||
|
tc.runTest(t)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestRESTClientCPUEmptyMetrics(t *testing.T) {
|
||||||
|
tc := restClientTestCase{
|
||||||
|
resourceName: v1.ResourceCPU,
|
||||||
|
desiredError: fmt.Errorf("no metrics returned from heapster"),
|
||||||
|
reportedMetricPoints: []metricPoint{},
|
||||||
|
reportedPodMetrics: [][]int64{},
|
||||||
|
}
|
||||||
|
tc.runTest(t)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestRESTClientCPUEmptyMetricsForOnePod(t *testing.T) {
|
||||||
|
tc := restClientTestCase{
|
||||||
|
resourceName: v1.ResourceCPU,
|
||||||
|
desiredMetricValues: PodMetricsInfo{
|
||||||
|
"test-pod-0": 100, "test-pod-1": 700,
|
||||||
|
},
|
||||||
|
reportedPodMetrics: [][]int64{{100}, {300, 400}, {}},
|
||||||
|
}
|
||||||
|
tc.runTest(t)
|
||||||
|
}
|
@ -17,26 +17,27 @@ limitations under the License.
|
|||||||
package podautoscaler
|
package podautoscaler
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/json"
|
|
||||||
"fmt"
|
"fmt"
|
||||||
"math"
|
"math"
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"k8s.io/apimachinery/pkg/api/resource"
|
"k8s.io/apimachinery/pkg/api/resource"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/runtime"
|
"k8s.io/apimachinery/pkg/runtime"
|
||||||
restclient "k8s.io/client-go/rest"
|
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||||
|
"k8s.io/client-go/pkg/api"
|
||||||
|
clientv1 "k8s.io/client-go/pkg/api/v1"
|
||||||
core "k8s.io/client-go/testing"
|
core "k8s.io/client-go/testing"
|
||||||
"k8s.io/kubernetes/pkg/api/unversioned"
|
|
||||||
"k8s.io/kubernetes/pkg/api/v1"
|
"k8s.io/kubernetes/pkg/api/v1"
|
||||||
|
autoscalingv2 "k8s.io/kubernetes/pkg/apis/autoscaling/v2alpha1"
|
||||||
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset/fake"
|
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset/fake"
|
||||||
"k8s.io/kubernetes/pkg/controller/podautoscaler/metrics"
|
"k8s.io/kubernetes/pkg/controller/podautoscaler/metrics"
|
||||||
|
metricsfake "k8s.io/metrics/pkg/client/clientset_generated/clientset/fake"
|
||||||
|
cmfake "k8s.io/metrics/pkg/client/custom_metrics/fake"
|
||||||
|
|
||||||
heapster "k8s.io/heapster/metrics/api/v1/types"
|
cmapi "k8s.io/metrics/pkg/apis/custom_metrics/v1alpha1"
|
||||||
metricsapi "k8s.io/heapster/metrics/apis/metrics/v1alpha1"
|
metricsapi "k8s.io/metrics/pkg/apis/metrics/v1alpha1"
|
||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
@ -55,8 +56,9 @@ type resourceInfo struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type metricInfo struct {
|
type metricInfo struct {
|
||||||
name string
|
name string
|
||||||
levels []float64
|
levels []int64
|
||||||
|
singleObject *autoscalingv2.CrossVersionObjectReference
|
||||||
|
|
||||||
targetUtilization int64
|
targetUtilization int64
|
||||||
expectedUtilization int64
|
expectedUtilization int64
|
||||||
@ -81,7 +83,7 @@ const (
|
|||||||
numContainersPerPod = 2
|
numContainersPerPod = 2
|
||||||
)
|
)
|
||||||
|
|
||||||
func (tc *replicaCalcTestCase) prepareTestClient(t *testing.T) *fake.Clientset {
|
func (tc *replicaCalcTestCase) prepareTestClient(t *testing.T) (*fake.Clientset, *metricsfake.Clientset, *cmfake.FakeCustomMetricsClient) {
|
||||||
|
|
||||||
fakeClient := &fake.Clientset{}
|
fakeClient := &fake.Clientset{}
|
||||||
fakeClient.AddReactor("list", "pods", func(action core.Action) (handled bool, ret runtime.Object, err error) {
|
fakeClient.AddReactor("list", "pods", func(action core.Action) (handled bool, ret runtime.Object, err error) {
|
||||||
@ -131,30 +133,33 @@ func (tc *replicaCalcTestCase) prepareTestClient(t *testing.T) *fake.Clientset {
|
|||||||
return true, obj, nil
|
return true, obj, nil
|
||||||
})
|
})
|
||||||
|
|
||||||
fakeClient.AddProxyReactor("services", func(action core.Action) (handled bool, ret restclient.ResponseWrapper, err error) {
|
fakeMetricsClient := &metricsfake.Clientset{}
|
||||||
var heapsterRawMemResponse []byte
|
// NB: we have to sound like Gollum due to gengo's inability to handle already-plural resource names
|
||||||
|
fakeMetricsClient.AddReactor("list", "podmetricses", func(action core.Action) (handled bool, ret runtime.Object, err error) {
|
||||||
if tc.resource != nil {
|
if tc.resource != nil {
|
||||||
metrics := metricsapi.PodMetricsList{}
|
metrics := &metricsapi.PodMetricsList{}
|
||||||
for i, resValue := range tc.resource.levels {
|
for i, resValue := range tc.resource.levels {
|
||||||
podName := fmt.Sprintf("%s-%d", podNamePrefix, i)
|
podName := fmt.Sprintf("%s-%d", podNamePrefix, i)
|
||||||
if len(tc.resource.podNames) > i {
|
if len(tc.resource.podNames) > i {
|
||||||
podName = tc.resource.podNames[i]
|
podName = tc.resource.podNames[i]
|
||||||
}
|
}
|
||||||
|
// NB: the list reactor actually does label selector filtering for us,
|
||||||
|
// so we have to make sure our results match the label selector
|
||||||
podMetric := metricsapi.PodMetrics{
|
podMetric := metricsapi.PodMetrics{
|
||||||
ObjectMeta: v1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
Name: podName,
|
Name: podName,
|
||||||
Namespace: testNamespace,
|
Namespace: testNamespace,
|
||||||
|
Labels: map[string]string{"name": podNamePrefix},
|
||||||
},
|
},
|
||||||
Timestamp: unversioned.Time{Time: tc.timestamp},
|
Timestamp: metav1.Time{Time: tc.timestamp},
|
||||||
Containers: make([]metricsapi.ContainerMetrics, numContainersPerPod),
|
Containers: make([]metricsapi.ContainerMetrics, numContainersPerPod),
|
||||||
}
|
}
|
||||||
|
|
||||||
for i := 0; i < numContainersPerPod; i++ {
|
for i := 0; i < numContainersPerPod; i++ {
|
||||||
podMetric.Containers[i] = metricsapi.ContainerMetrics{
|
podMetric.Containers[i] = metricsapi.ContainerMetrics{
|
||||||
Name: fmt.Sprintf("container%v", i),
|
Name: fmt.Sprintf("container%v", i),
|
||||||
Usage: v1.ResourceList{
|
Usage: clientv1.ResourceList{
|
||||||
v1.ResourceName(tc.resource.name): *resource.NewMilliQuantity(
|
clientv1.ResourceName(tc.resource.name): *resource.NewMilliQuantity(
|
||||||
int64(resValue),
|
int64(resValue),
|
||||||
resource.DecimalSI),
|
resource.DecimalSI),
|
||||||
},
|
},
|
||||||
@ -162,54 +167,84 @@ func (tc *replicaCalcTestCase) prepareTestClient(t *testing.T) *fake.Clientset {
|
|||||||
}
|
}
|
||||||
metrics.Items = append(metrics.Items, podMetric)
|
metrics.Items = append(metrics.Items, podMetric)
|
||||||
}
|
}
|
||||||
heapsterRawMemResponse, _ = json.Marshal(&metrics)
|
return true, metrics, nil
|
||||||
} else {
|
|
||||||
// only return the pods that we actually asked for
|
|
||||||
proxyAction := action.(core.ProxyGetAction)
|
|
||||||
pathParts := strings.Split(proxyAction.GetPath(), "/")
|
|
||||||
// pathParts should look like [ api, v1, model, namespaces, $NS, pod-list, $PODS, metrics, $METRIC... ]
|
|
||||||
if len(pathParts) < 9 {
|
|
||||||
return true, nil, fmt.Errorf("invalid heapster path %q", proxyAction.GetPath())
|
|
||||||
}
|
|
||||||
|
|
||||||
podNames := strings.Split(pathParts[7], ",")
|
|
||||||
podPresent := make([]bool, len(tc.metric.levels))
|
|
||||||
for _, name := range podNames {
|
|
||||||
if len(name) <= len(podNamePrefix)+1 {
|
|
||||||
return true, nil, fmt.Errorf("unknown pod %q", name)
|
|
||||||
}
|
|
||||||
num, err := strconv.Atoi(name[len(podNamePrefix)+1:])
|
|
||||||
if err != nil {
|
|
||||||
return true, nil, fmt.Errorf("unknown pod %q", name)
|
|
||||||
}
|
|
||||||
podPresent[num] = true
|
|
||||||
}
|
|
||||||
|
|
||||||
timestamp := tc.timestamp
|
|
||||||
metrics := heapster.MetricResultList{}
|
|
||||||
for i, level := range tc.metric.levels {
|
|
||||||
if !podPresent[i] {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
metric := heapster.MetricResult{
|
|
||||||
Metrics: []heapster.MetricPoint{{Timestamp: timestamp, Value: uint64(level), FloatValue: &tc.metric.levels[i]}},
|
|
||||||
LatestTimestamp: timestamp,
|
|
||||||
}
|
|
||||||
metrics.Items = append(metrics.Items, metric)
|
|
||||||
}
|
|
||||||
heapsterRawMemResponse, _ = json.Marshal(&metrics)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return true, newFakeResponseWrapper(heapsterRawMemResponse), nil
|
return true, nil, fmt.Errorf("no pod resource metrics specified in test client")
|
||||||
})
|
})
|
||||||
|
|
||||||
return fakeClient
|
fakeCMClient := &cmfake.FakeCustomMetricsClient{}
|
||||||
|
fakeCMClient.AddReactor("get", "*", func(action core.Action) (handled bool, ret runtime.Object, err error) {
|
||||||
|
getForAction, wasGetFor := action.(cmfake.GetForAction)
|
||||||
|
if !wasGetFor {
|
||||||
|
return true, nil, fmt.Errorf("expected a get-for action, got %v instead", action)
|
||||||
|
}
|
||||||
|
|
||||||
|
if tc.metric == nil {
|
||||||
|
return true, nil, fmt.Errorf("no custom metrics specified in test client")
|
||||||
|
}
|
||||||
|
|
||||||
|
assert.Equal(t, tc.metric.name, getForAction.GetMetricName(), "the metric requested should have matched the one specified")
|
||||||
|
|
||||||
|
if getForAction.GetName() == "*" {
|
||||||
|
metrics := cmapi.MetricValueList{}
|
||||||
|
|
||||||
|
// multiple objects
|
||||||
|
assert.Equal(t, "pods", getForAction.GetResource().Resource, "the type of object that we requested multiple metrics for should have been pods")
|
||||||
|
|
||||||
|
for i, level := range tc.metric.levels {
|
||||||
|
podMetric := cmapi.MetricValue{
|
||||||
|
DescribedObject: clientv1.ObjectReference{
|
||||||
|
Kind: "Pod",
|
||||||
|
Name: fmt.Sprintf("%s-%d", podNamePrefix, i),
|
||||||
|
Namespace: testNamespace,
|
||||||
|
},
|
||||||
|
Timestamp: metav1.Time{Time: tc.timestamp},
|
||||||
|
MetricName: tc.metric.name,
|
||||||
|
Value: *resource.NewMilliQuantity(level, resource.DecimalSI),
|
||||||
|
}
|
||||||
|
metrics.Items = append(metrics.Items, podMetric)
|
||||||
|
}
|
||||||
|
|
||||||
|
return true, &metrics, nil
|
||||||
|
} else {
|
||||||
|
name := getForAction.GetName()
|
||||||
|
mapper := api.Registry.RESTMapper()
|
||||||
|
metrics := &cmapi.MetricValueList{}
|
||||||
|
assert.NotNil(t, tc.metric.singleObject, "should have only requested a single-object metric when calling GetObjectMetricReplicas")
|
||||||
|
gk := schema.FromAPIVersionAndKind(tc.metric.singleObject.APIVersion, tc.metric.singleObject.Kind).GroupKind()
|
||||||
|
mapping, err := mapper.RESTMapping(gk)
|
||||||
|
if err != nil {
|
||||||
|
return true, nil, fmt.Errorf("unable to get mapping for %s: %v", gk.String(), err)
|
||||||
|
}
|
||||||
|
groupResource := schema.GroupResource{Group: mapping.GroupVersionKind.Group, Resource: mapping.Resource}
|
||||||
|
|
||||||
|
assert.Equal(t, groupResource.String(), getForAction.GetResource().Resource, "should have requested metrics for the resource matching the GroupKind passed in")
|
||||||
|
assert.Equal(t, tc.metric.singleObject.Name, name, "should have requested metrics for the object matching the name passed in")
|
||||||
|
|
||||||
|
metrics.Items = []cmapi.MetricValue{
|
||||||
|
{
|
||||||
|
DescribedObject: clientv1.ObjectReference{
|
||||||
|
Kind: tc.metric.singleObject.Kind,
|
||||||
|
APIVersion: tc.metric.singleObject.APIVersion,
|
||||||
|
Name: name,
|
||||||
|
},
|
||||||
|
Timestamp: metav1.Time{Time: tc.timestamp},
|
||||||
|
MetricName: tc.metric.name,
|
||||||
|
Value: *resource.NewMilliQuantity(int64(tc.metric.levels[0]), resource.DecimalSI),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
return true, metrics, nil
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
return fakeClient, fakeMetricsClient, fakeCMClient
|
||||||
}
|
}
|
||||||
|
|
||||||
func (tc *replicaCalcTestCase) runTest(t *testing.T) {
|
func (tc *replicaCalcTestCase) runTest(t *testing.T) {
|
||||||
testClient := tc.prepareTestClient(t)
|
testClient, testMetricsClient, testCMClient := tc.prepareTestClient(t)
|
||||||
metricsClient := metrics.NewHeapsterMetricsClient(testClient, metrics.DefaultHeapsterNamespace, metrics.DefaultHeapsterScheme, metrics.DefaultHeapsterService, metrics.DefaultHeapsterPort)
|
metricsClient := metrics.NewRESTMetricsClient(testMetricsClient.MetricsV1alpha1(), testCMClient)
|
||||||
|
|
||||||
replicaCalc := &ReplicaCalculator{
|
replicaCalc := &ReplicaCalculator{
|
||||||
metricsClient: metricsClient,
|
metricsClient: metricsClient,
|
||||||
@ -238,7 +273,15 @@ func (tc *replicaCalcTestCase) runTest(t *testing.T) {
|
|||||||
assert.True(t, tc.timestamp.Equal(outTimestamp), "timestamp should be as expected")
|
assert.True(t, tc.timestamp.Equal(outTimestamp), "timestamp should be as expected")
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
outReplicas, outUtilization, outTimestamp, err := replicaCalc.GetMetricReplicas(tc.currentReplicas, tc.metric.targetUtilization, tc.metric.name, testNamespace, selector)
|
var outReplicas int32
|
||||||
|
var outUtilization int64
|
||||||
|
var outTimestamp time.Time
|
||||||
|
var err error
|
||||||
|
if tc.metric.singleObject != nil {
|
||||||
|
outReplicas, outUtilization, outTimestamp, err = replicaCalc.GetObjectMetricReplicas(tc.currentReplicas, tc.metric.targetUtilization, tc.metric.name, testNamespace, tc.metric.singleObject)
|
||||||
|
} else {
|
||||||
|
outReplicas, outUtilization, outTimestamp, err = replicaCalc.GetMetricReplicas(tc.currentReplicas, tc.metric.targetUtilization, tc.metric.name, testNamespace, selector)
|
||||||
|
}
|
||||||
|
|
||||||
if tc.expectedError != nil {
|
if tc.expectedError != nil {
|
||||||
require.Error(t, err, "there should be an error calculating the replica count")
|
require.Error(t, err, "there should be an error calculating the replica count")
|
||||||
@ -327,7 +370,7 @@ func TestReplicaCalcScaleUpCM(t *testing.T) {
|
|||||||
expectedReplicas: 4,
|
expectedReplicas: 4,
|
||||||
metric: &metricInfo{
|
metric: &metricInfo{
|
||||||
name: "qps",
|
name: "qps",
|
||||||
levels: []float64{20.0, 10.0, 30.0},
|
levels: []int64{20000, 10000, 30000},
|
||||||
targetUtilization: 15000,
|
targetUtilization: 15000,
|
||||||
expectedUtilization: 20000,
|
expectedUtilization: 20000,
|
||||||
},
|
},
|
||||||
@ -342,7 +385,7 @@ func TestReplicaCalcScaleUpCMUnreadyLessScale(t *testing.T) {
|
|||||||
podReadiness: []v1.ConditionStatus{v1.ConditionTrue, v1.ConditionTrue, v1.ConditionFalse},
|
podReadiness: []v1.ConditionStatus{v1.ConditionTrue, v1.ConditionTrue, v1.ConditionFalse},
|
||||||
metric: &metricInfo{
|
metric: &metricInfo{
|
||||||
name: "qps",
|
name: "qps",
|
||||||
levels: []float64{50.0, 10.0, 30.0},
|
levels: []int64{50000, 10000, 30000},
|
||||||
targetUtilization: 15000,
|
targetUtilization: 15000,
|
||||||
expectedUtilization: 30000,
|
expectedUtilization: 30000,
|
||||||
},
|
},
|
||||||
@ -357,7 +400,7 @@ func TestReplicaCalcScaleUpCMUnreadyNoScaleWouldScaleDown(t *testing.T) {
|
|||||||
podReadiness: []v1.ConditionStatus{v1.ConditionFalse, v1.ConditionTrue, v1.ConditionFalse},
|
podReadiness: []v1.ConditionStatus{v1.ConditionFalse, v1.ConditionTrue, v1.ConditionFalse},
|
||||||
metric: &metricInfo{
|
metric: &metricInfo{
|
||||||
name: "qps",
|
name: "qps",
|
||||||
levels: []float64{50.0, 15.0, 30.0},
|
levels: []int64{50000, 15000, 30000},
|
||||||
targetUtilization: 15000,
|
targetUtilization: 15000,
|
||||||
expectedUtilization: 15000,
|
expectedUtilization: 15000,
|
||||||
},
|
},
|
||||||
@ -365,6 +408,25 @@ func TestReplicaCalcScaleUpCMUnreadyNoScaleWouldScaleDown(t *testing.T) {
|
|||||||
tc.runTest(t)
|
tc.runTest(t)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestReplicaCalcScaleUpCMObject(t *testing.T) {
|
||||||
|
tc := replicaCalcTestCase{
|
||||||
|
currentReplicas: 3,
|
||||||
|
expectedReplicas: 4,
|
||||||
|
metric: &metricInfo{
|
||||||
|
name: "qps",
|
||||||
|
levels: []int64{20000},
|
||||||
|
targetUtilization: 15000,
|
||||||
|
expectedUtilization: 20000,
|
||||||
|
singleObject: &autoscalingv2.CrossVersionObjectReference{
|
||||||
|
Kind: "Deployment",
|
||||||
|
APIVersion: "extensions/v1beta1",
|
||||||
|
Name: "some-deployment",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
tc.runTest(t)
|
||||||
|
}
|
||||||
|
|
||||||
func TestReplicaCalcScaleDown(t *testing.T) {
|
func TestReplicaCalcScaleDown(t *testing.T) {
|
||||||
tc := replicaCalcTestCase{
|
tc := replicaCalcTestCase{
|
||||||
currentReplicas: 5,
|
currentReplicas: 5,
|
||||||
@ -388,7 +450,7 @@ func TestReplicaCalcScaleDownCM(t *testing.T) {
|
|||||||
expectedReplicas: 3,
|
expectedReplicas: 3,
|
||||||
metric: &metricInfo{
|
metric: &metricInfo{
|
||||||
name: "qps",
|
name: "qps",
|
||||||
levels: []float64{12.0, 12.0, 12.0, 12.0, 12.0},
|
levels: []int64{12000, 12000, 12000, 12000, 12000},
|
||||||
targetUtilization: 20000,
|
targetUtilization: 20000,
|
||||||
expectedUtilization: 12000,
|
expectedUtilization: 12000,
|
||||||
},
|
},
|
||||||
@ -396,6 +458,25 @@ func TestReplicaCalcScaleDownCM(t *testing.T) {
|
|||||||
tc.runTest(t)
|
tc.runTest(t)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestReplicaCalcScaleDownCMObject(t *testing.T) {
|
||||||
|
tc := replicaCalcTestCase{
|
||||||
|
currentReplicas: 5,
|
||||||
|
expectedReplicas: 3,
|
||||||
|
metric: &metricInfo{
|
||||||
|
name: "qps",
|
||||||
|
levels: []int64{12000},
|
||||||
|
targetUtilization: 20000,
|
||||||
|
expectedUtilization: 12000,
|
||||||
|
singleObject: &autoscalingv2.CrossVersionObjectReference{
|
||||||
|
Kind: "Deployment",
|
||||||
|
APIVersion: "extensions/v1beta1",
|
||||||
|
Name: "some-deployment",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
tc.runTest(t)
|
||||||
|
}
|
||||||
|
|
||||||
func TestReplicaCalcScaleDownIgnoresUnreadyPods(t *testing.T) {
|
func TestReplicaCalcScaleDownIgnoresUnreadyPods(t *testing.T) {
|
||||||
tc := replicaCalcTestCase{
|
tc := replicaCalcTestCase{
|
||||||
currentReplicas: 5,
|
currentReplicas: 5,
|
||||||
@ -437,7 +518,7 @@ func TestReplicaCalcToleranceCM(t *testing.T) {
|
|||||||
expectedReplicas: 3,
|
expectedReplicas: 3,
|
||||||
metric: &metricInfo{
|
metric: &metricInfo{
|
||||||
name: "qps",
|
name: "qps",
|
||||||
levels: []float64{20.0, 21.0, 21.0},
|
levels: []int64{20000, 21000, 21000},
|
||||||
targetUtilization: 20000,
|
targetUtilization: 20000,
|
||||||
expectedUtilization: 20666,
|
expectedUtilization: 20666,
|
||||||
},
|
},
|
||||||
@ -445,6 +526,25 @@ func TestReplicaCalcToleranceCM(t *testing.T) {
|
|||||||
tc.runTest(t)
|
tc.runTest(t)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestReplicaCalcToleranceCMObject(t *testing.T) {
|
||||||
|
tc := replicaCalcTestCase{
|
||||||
|
currentReplicas: 3,
|
||||||
|
expectedReplicas: 3,
|
||||||
|
metric: &metricInfo{
|
||||||
|
name: "qps",
|
||||||
|
levels: []int64{20666},
|
||||||
|
targetUtilization: 20000,
|
||||||
|
expectedUtilization: 20666,
|
||||||
|
singleObject: &autoscalingv2.CrossVersionObjectReference{
|
||||||
|
Kind: "Deployment",
|
||||||
|
APIVersion: "extensions/v1beta1",
|
||||||
|
Name: "some-deployment",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
tc.runTest(t)
|
||||||
|
}
|
||||||
|
|
||||||
func TestReplicaCalcSuperfluousMetrics(t *testing.T) {
|
func TestReplicaCalcSuperfluousMetrics(t *testing.T) {
|
||||||
tc := replicaCalcTestCase{
|
tc := replicaCalcTestCase{
|
||||||
currentReplicas: 4,
|
currentReplicas: 4,
|
||||||
|
Loading…
Reference in New Issue
Block a user