mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-09-20 01:23:48 +00:00
Added functionality and API for pod autoscaling based on container resources
Signed-off-by: Arjun Naik <anaik@redhat.com>
This commit is contained in:
@@ -329,6 +329,11 @@ func (a *HorizontalController) computeReplicasForMetric(hpa *autoscalingv2.Horiz
|
||||
if err != nil {
|
||||
return 0, "", time.Time{}, condition, err
|
||||
}
|
||||
case autoscalingv2.ContainerResourceMetricSourceType:
|
||||
replicaCountProposal, timestampProposal, metricNameProposal, condition, err = a.computeStatusForContainerResourceMetric(specReplicas, spec, hpa, selector, status)
|
||||
if err != nil {
|
||||
return 0, "", time.Time{}, condition, err
|
||||
}
|
||||
case autoscalingv2.ExternalMetricSourceType:
|
||||
replicaCountProposal, timestampProposal, metricNameProposal, condition, err = a.computeStatusForExternalMetric(specReplicas, statusReplicas, spec, hpa, selector, status)
|
||||
if err != nil {
|
||||
@@ -435,51 +440,80 @@ func (a *HorizontalController) computeStatusForPodsMetric(currentReplicas int32,
|
||||
return replicaCountProposal, timestampProposal, fmt.Sprintf("pods metric %s", metricSpec.Pods.Metric.Name), autoscalingv2.HorizontalPodAutoscalerCondition{}, nil
|
||||
}
|
||||
|
||||
// computeStatusForResourceMetric computes the desired number of replicas for the specified metric of type ResourceMetricSourceType.
|
||||
func (a *HorizontalController) computeStatusForResourceMetric(currentReplicas int32, metricSpec autoscalingv2.MetricSpec, hpa *autoscalingv2.HorizontalPodAutoscaler, selector labels.Selector, status *autoscalingv2.MetricStatus) (replicaCountProposal int32, timestampProposal time.Time, metricNameProposal string, condition autoscalingv2.HorizontalPodAutoscalerCondition, err error) {
|
||||
if metricSpec.Resource.Target.AverageValue != nil {
|
||||
func (a *HorizontalController) computeStatusForResourceMetricGeneric(currentReplicas int32, target autoscalingv2.MetricTarget,
|
||||
resourceName v1.ResourceName, namespace string, container string, selector labels.Selector) (replicaCountProposal int32,
|
||||
metricStatus *autoscalingv2.MetricValueStatus, timestampProposal time.Time, metricNameProposal string,
|
||||
condition autoscalingv2.HorizontalPodAutoscalerCondition, err error) {
|
||||
if target.AverageValue != nil {
|
||||
var rawProposal int64
|
||||
replicaCountProposal, rawProposal, timestampProposal, err := a.replicaCalc.GetRawResourceReplicas(currentReplicas, metricSpec.Resource.Target.AverageValue.MilliValue(), metricSpec.Resource.Name, hpa.Namespace, selector)
|
||||
replicaCountProposal, rawProposal, timestampProposal, err := a.replicaCalc.GetRawResourceReplicas(currentReplicas, target.AverageValue.MilliValue(), resourceName, namespace, selector, container)
|
||||
if err != nil {
|
||||
condition = a.getUnableComputeReplicaCountCondition(hpa, "FailedGetResourceMetric", err)
|
||||
return 0, time.Time{}, "", condition, fmt.Errorf("failed to get %s utilization: %v", metricSpec.Resource.Name, err)
|
||||
return 0, nil, time.Time{}, "", condition, fmt.Errorf("failed to get %s utilization: %v", resourceName, err)
|
||||
}
|
||||
metricNameProposal = fmt.Sprintf("%s resource", metricSpec.Resource.Name)
|
||||
*status = autoscalingv2.MetricStatus{
|
||||
Type: autoscalingv2.ResourceMetricSourceType,
|
||||
Resource: &autoscalingv2.ResourceMetricStatus{
|
||||
Name: metricSpec.Resource.Name,
|
||||
Current: autoscalingv2.MetricValueStatus{
|
||||
AverageValue: resource.NewMilliQuantity(rawProposal, resource.DecimalSI),
|
||||
},
|
||||
},
|
||||
metricNameProposal = fmt.Sprintf("%s resource", resourceName.String())
|
||||
status := autoscalingv2.MetricValueStatus{
|
||||
AverageValue: resource.NewMilliQuantity(rawProposal, resource.DecimalSI),
|
||||
}
|
||||
return replicaCountProposal, timestampProposal, metricNameProposal, autoscalingv2.HorizontalPodAutoscalerCondition{}, nil
|
||||
return replicaCountProposal, &status, timestampProposal, metricNameProposal, autoscalingv2.HorizontalPodAutoscalerCondition{}, nil
|
||||
}
|
||||
if metricSpec.Resource.Target.AverageUtilization == nil {
|
||||
|
||||
if target.AverageUtilization == nil {
|
||||
errMsg := "invalid resource metric source: neither a utilization target nor a value target was set"
|
||||
err = fmt.Errorf(errMsg)
|
||||
condition = a.getUnableComputeReplicaCountCondition(hpa, "FailedGetResourceMetric", err)
|
||||
return 0, time.Time{}, "", condition, fmt.Errorf(errMsg)
|
||||
return 0, nil, time.Time{}, "", condition, fmt.Errorf(errMsg)
|
||||
}
|
||||
targetUtilization := *metricSpec.Resource.Target.AverageUtilization
|
||||
replicaCountProposal, percentageProposal, rawProposal, timestampProposal, err := a.replicaCalc.GetResourceReplicas(currentReplicas, targetUtilization, metricSpec.Resource.Name, hpa.Namespace, selector)
|
||||
|
||||
targetUtilization := *target.AverageUtilization
|
||||
replicaCountProposal, percentageProposal, rawProposal, timestampProposal, err := a.replicaCalc.GetResourceReplicas(currentReplicas, targetUtilization, resourceName, namespace, selector, container)
|
||||
if err != nil {
|
||||
return 0, nil, time.Time{}, "", condition, fmt.Errorf("failed to get %s utilization: %v", resourceName, err)
|
||||
}
|
||||
|
||||
metricNameProposal = fmt.Sprintf("%s resource utilization (percentage of request)", resourceName)
|
||||
status := autoscalingv2.MetricValueStatus{
|
||||
AverageUtilization: &percentageProposal,
|
||||
AverageValue: resource.NewMilliQuantity(rawProposal, resource.DecimalSI),
|
||||
}
|
||||
return replicaCountProposal, &status, timestampProposal, metricNameProposal, autoscalingv2.HorizontalPodAutoscalerCondition{}, nil
|
||||
}
|
||||
|
||||
// computeStatusForResourceMetric computes the desired number of replicas for the specified metric of type ResourceMetricSourceType.
|
||||
func (a *HorizontalController) computeStatusForResourceMetric(currentReplicas int32, metricSpec autoscalingv2.MetricSpec, hpa *autoscalingv2.HorizontalPodAutoscaler,
|
||||
selector labels.Selector, status *autoscalingv2.MetricStatus) (replicaCountProposal int32, timestampProposal time.Time,
|
||||
metricNameProposal string, condition autoscalingv2.HorizontalPodAutoscalerCondition, err error) {
|
||||
replicaCountProposal, metricValueStatus, timestampProposal, metricNameProposal, condition, err := a.computeStatusForResourceMetricGeneric(currentReplicas, metricSpec.Resource.Target, metricSpec.Resource.Name, hpa.Namespace, "", selector)
|
||||
if err != nil {
|
||||
condition = a.getUnableComputeReplicaCountCondition(hpa, "FailedGetResourceMetric", err)
|
||||
return 0, time.Time{}, "", condition, fmt.Errorf("failed to get %s utilization: %v", metricSpec.Resource.Name, err)
|
||||
return replicaCountProposal, timestampProposal, metricNameProposal, condition, err
|
||||
}
|
||||
metricNameProposal = fmt.Sprintf("%s resource utilization (percentage of request)", metricSpec.Resource.Name)
|
||||
*status = autoscalingv2.MetricStatus{
|
||||
Type: autoscalingv2.ResourceMetricSourceType,
|
||||
Resource: &autoscalingv2.ResourceMetricStatus{
|
||||
Name: metricSpec.Resource.Name,
|
||||
Current: autoscalingv2.MetricValueStatus{
|
||||
AverageUtilization: &percentageProposal,
|
||||
AverageValue: resource.NewMilliQuantity(rawProposal, resource.DecimalSI),
|
||||
},
|
||||
Name: metricSpec.Resource.Name,
|
||||
Current: *metricValueStatus,
|
||||
},
|
||||
}
|
||||
return replicaCountProposal, timestampProposal, metricNameProposal, autoscalingv2.HorizontalPodAutoscalerCondition{}, nil
|
||||
return replicaCountProposal, timestampProposal, metricNameProposal, condition, nil
|
||||
}
|
||||
|
||||
// computeStatusForContainerResourceMetric computes the desired number of replicas for the specified metric of type ResourceMetricSourceType.
|
||||
func (a *HorizontalController) computeStatusForContainerResourceMetric(currentReplicas int32, metricSpec autoscalingv2.MetricSpec, hpa *autoscalingv2.HorizontalPodAutoscaler,
|
||||
selector labels.Selector, status *autoscalingv2.MetricStatus) (replicaCountProposal int32, timestampProposal time.Time,
|
||||
metricNameProposal string, condition autoscalingv2.HorizontalPodAutoscalerCondition, err error) {
|
||||
replicaCountProposal, metricValueStatus, timestampProposal, metricNameProposal, condition, err := a.computeStatusForResourceMetricGeneric(currentReplicas, metricSpec.ContainerResource.Target, metricSpec.ContainerResource.Name, hpa.Namespace, metricSpec.ContainerResource.Container, selector)
|
||||
if err != nil {
|
||||
condition = a.getUnableComputeReplicaCountCondition(hpa, "FailedGetContainerResourceMetric", err)
|
||||
return replicaCountProposal, timestampProposal, metricNameProposal, condition, err
|
||||
}
|
||||
*status = autoscalingv2.MetricStatus{
|
||||
Type: autoscalingv2.ContainerResourceMetricSourceType,
|
||||
ContainerResource: &autoscalingv2.ContainerResourceMetricStatus{
|
||||
Name: metricSpec.ContainerResource.Name,
|
||||
Container: metricSpec.ContainerResource.Container,
|
||||
Current: *metricValueStatus,
|
||||
},
|
||||
}
|
||||
return replicaCountProposal, timestampProposal, metricNameProposal, condition, nil
|
||||
}
|
||||
|
||||
// computeStatusForExternalMetric computes the desired number of replicas for the specified metric of type ExternalMetricSourceType.
|
||||
@@ -783,7 +817,7 @@ func getReplicasChangePerPeriod(periodSeconds int32, scaleEvents []timestampedSc
|
||||
|
||||
}
|
||||
|
||||
func (a *HorizontalController) getUnableComputeReplicaCountCondition(hpa *autoscalingv2.HorizontalPodAutoscaler, reason string, err error) (condition autoscalingv2.HorizontalPodAutoscalerCondition) {
|
||||
func (a *HorizontalController) getUnableComputeReplicaCountCondition(hpa runtime.Object, reason string, err error) (condition autoscalingv2.HorizontalPodAutoscalerCondition) {
|
||||
a.eventRecorder.Event(hpa, v1.EventTypeWarning, reason, err.Error())
|
||||
return autoscalingv2.HorizontalPodAutoscalerCondition{
|
||||
Type: autoscalingv2.ScalingActive,
|
||||
|
@@ -336,9 +336,18 @@ func (tc *testCase) prepareTestClient(t *testing.T) (*fake.Clientset, *metricsfa
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "container1",
|
||||
Resources: v1.ResourceRequirements{
|
||||
Requests: v1.ResourceList{
|
||||
v1.ResourceCPU: reportedCPURequest,
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(reportedCPURequest.MilliValue()/2, resource.DecimalSI),
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "container2",
|
||||
Resources: v1.ResourceRequirements{
|
||||
Requests: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(reportedCPURequest.MilliValue()/2, resource.DecimalSI),
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -509,13 +518,24 @@ func (tc *testCase) prepareTestClient(t *testing.T) (*fake.Clientset, *metricsfa
|
||||
Window: metav1.Duration{Duration: time.Minute},
|
||||
Containers: []metricsapi.ContainerMetrics{
|
||||
{
|
||||
Name: "container",
|
||||
Name: "container1",
|
||||
Usage: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(
|
||||
int64(cpu),
|
||||
int64(cpu/2),
|
||||
resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(
|
||||
int64(1024*1024),
|
||||
int64(1024*1024/2),
|
||||
resource.BinarySI),
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "container2",
|
||||
Usage: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(
|
||||
int64(cpu/2),
|
||||
resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(
|
||||
int64(1024*1024/2),
|
||||
resource.BinarySI),
|
||||
},
|
||||
},
|
||||
@@ -773,6 +793,31 @@ func TestScaleUp(t *testing.T) {
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func TestScaleUpContainer(t *testing.T) {
|
||||
tc := testCase{
|
||||
minReplicas: 2,
|
||||
maxReplicas: 6,
|
||||
specReplicas: 3,
|
||||
statusReplicas: 3,
|
||||
expectedDesiredReplicas: 5,
|
||||
metricsTarget: []autoscalingv2.MetricSpec{{
|
||||
Type: autoscalingv2.ContainerResourceMetricSourceType,
|
||||
ContainerResource: &autoscalingv2.ContainerResourceMetricSource{
|
||||
Name: v1.ResourceCPU,
|
||||
Target: autoscalingv2.MetricTarget{
|
||||
Type: autoscalingv2.UtilizationMetricType,
|
||||
AverageUtilization: utilpointer.Int32Ptr(30),
|
||||
},
|
||||
Container: "container1",
|
||||
},
|
||||
}},
|
||||
reportedLevels: []uint64{300, 500, 700},
|
||||
reportedCPURequests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
|
||||
useMetricsAPI: true,
|
||||
}
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func TestScaleUpUnreadyLessScale(t *testing.T) {
|
||||
tc := testCase{
|
||||
minReplicas: 2,
|
||||
@@ -1269,6 +1314,32 @@ func TestScaleDown(t *testing.T) {
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func TestScaleDownContainerResource(t *testing.T) {
|
||||
tc := testCase{
|
||||
minReplicas: 2,
|
||||
maxReplicas: 6,
|
||||
specReplicas: 5,
|
||||
statusReplicas: 5,
|
||||
expectedDesiredReplicas: 3,
|
||||
reportedLevels: []uint64{100, 300, 500, 250, 250},
|
||||
reportedCPURequests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
|
||||
metricsTarget: []autoscalingv2.MetricSpec{{
|
||||
Type: autoscalingv2.ContainerResourceMetricSourceType,
|
||||
ContainerResource: &autoscalingv2.ContainerResourceMetricSource{
|
||||
Container: "container2",
|
||||
Name: v1.ResourceCPU,
|
||||
Target: autoscalingv2.MetricTarget{
|
||||
Type: autoscalingv2.UtilizationMetricType,
|
||||
AverageUtilization: utilpointer.Int32Ptr(50),
|
||||
},
|
||||
},
|
||||
}},
|
||||
useMetricsAPI: true,
|
||||
recommendations: []timestampedRecommendation{},
|
||||
}
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func TestScaleDownWithScalingRules(t *testing.T) {
|
||||
tc := testCase{
|
||||
minReplicas: 2,
|
||||
@@ -2809,7 +2880,7 @@ func TestScaleDownRCImmediately(t *testing.T) {
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func TestAvoidUncessaryUpdates(t *testing.T) {
|
||||
func TestAvoidUnnecessaryUpdates(t *testing.T) {
|
||||
now := metav1.Time{Time: time.Now().Add(-time.Hour)}
|
||||
tc := testCase{
|
||||
minReplicas: 2,
|
||||
|
@@ -126,14 +126,11 @@ func (tc *legacyReplicaCalcTestCase) prepareTestClient(t *testing.T) *fake.Clien
|
||||
Timestamp: metav1.Time{Time: tc.timestamp},
|
||||
Containers: make([]metricsapi.ContainerMetrics, numContainersPerPod),
|
||||
}
|
||||
|
||||
for i := 0; i < numContainersPerPod; i++ {
|
||||
for i, m := range resValue {
|
||||
podMetric.Containers[i] = metricsapi.ContainerMetrics{
|
||||
Name: fmt.Sprintf("container%v", i),
|
||||
Usage: v1.ResourceList{
|
||||
v1.ResourceName(tc.resource.name): *resource.NewMilliQuantity(
|
||||
int64(resValue),
|
||||
resource.DecimalSI),
|
||||
tc.resource.name: *resource.NewMilliQuantity(m, resource.DecimalSI),
|
||||
},
|
||||
}
|
||||
}
|
||||
@@ -209,7 +206,7 @@ func (tc *legacyReplicaCalcTestCase) runTest(t *testing.T) {
|
||||
}
|
||||
|
||||
if tc.resource != nil {
|
||||
outReplicas, outUtilization, outRawValue, outTimestamp, err := replicaCalc.GetResourceReplicas(tc.currentReplicas, tc.resource.targetUtilization, tc.resource.name, testNamespace, selector)
|
||||
outReplicas, outUtilization, outRawValue, outTimestamp, err := replicaCalc.GetResourceReplicas(tc.currentReplicas, tc.resource.targetUtilization, tc.resource.name, testNamespace, selector, "")
|
||||
|
||||
if tc.expectedError != nil {
|
||||
require.Error(t, err, "there should be an error calculating the replica count")
|
||||
@@ -244,7 +241,7 @@ func TestLegacyReplicaCalcDisjointResourcesMetrics(t *testing.T) {
|
||||
resource: &resourceInfo{
|
||||
name: v1.ResourceCPU,
|
||||
requests: []resource.Quantity{resource.MustParse("1.0")},
|
||||
levels: []int64{100},
|
||||
levels: makePodMetricLevels(100),
|
||||
podNames: []string{"an-older-pod-name"},
|
||||
|
||||
targetUtilization: 100,
|
||||
@@ -260,7 +257,7 @@ func TestLegacyReplicaCalcScaleUp(t *testing.T) {
|
||||
resource: &resourceInfo{
|
||||
name: v1.ResourceCPU,
|
||||
requests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
|
||||
levels: []int64{300, 500, 700},
|
||||
levels: makePodMetricLevels(300, 500, 700),
|
||||
|
||||
targetUtilization: 30,
|
||||
expectedUtilization: 50,
|
||||
@@ -278,7 +275,7 @@ func TestLegacyReplicaCalcScaleUpUnreadyLessScale(t *testing.T) {
|
||||
resource: &resourceInfo{
|
||||
name: v1.ResourceCPU,
|
||||
requests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
|
||||
levels: []int64{300, 500, 700},
|
||||
levels: makePodMetricLevels(300, 500, 700),
|
||||
|
||||
targetUtilization: 30,
|
||||
expectedUtilization: 60,
|
||||
@@ -296,7 +293,7 @@ func TestLegacyReplicaCalcScaleUpUnreadyNoScale(t *testing.T) {
|
||||
resource: &resourceInfo{
|
||||
name: v1.ResourceCPU,
|
||||
requests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
|
||||
levels: []int64{400, 500, 700},
|
||||
levels: makePodMetricLevels(400, 500, 700),
|
||||
|
||||
targetUtilization: 30,
|
||||
expectedUtilization: 40,
|
||||
@@ -357,7 +354,7 @@ func TestLegacyReplicaCalcScaleDown(t *testing.T) {
|
||||
resource: &resourceInfo{
|
||||
name: v1.ResourceCPU,
|
||||
requests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
|
||||
levels: []int64{100, 300, 500, 250, 250},
|
||||
levels: makePodMetricLevels(100, 300, 500, 250, 250),
|
||||
|
||||
targetUtilization: 50,
|
||||
expectedUtilization: 28,
|
||||
@@ -389,7 +386,7 @@ func TestLegacyReplicaCalcScaleDownIgnoresUnreadyPods(t *testing.T) {
|
||||
resource: &resourceInfo{
|
||||
name: v1.ResourceCPU,
|
||||
requests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
|
||||
levels: []int64{100, 300, 500, 250, 250},
|
||||
levels: makePodMetricLevels(100, 300, 500, 250, 250),
|
||||
|
||||
targetUtilization: 50,
|
||||
expectedUtilization: 30,
|
||||
@@ -406,7 +403,7 @@ func TestLegacyReplicaCalcTolerance(t *testing.T) {
|
||||
resource: &resourceInfo{
|
||||
name: v1.ResourceCPU,
|
||||
requests: []resource.Quantity{resource.MustParse("0.9"), resource.MustParse("1.0"), resource.MustParse("1.1")},
|
||||
levels: []int64{1010, 1030, 1020},
|
||||
levels: makePodMetricLevels(1010, 1030, 1020),
|
||||
|
||||
targetUtilization: 100,
|
||||
expectedUtilization: 102,
|
||||
@@ -437,7 +434,7 @@ func TestLegacyReplicaCalcSuperfluousMetrics(t *testing.T) {
|
||||
resource: &resourceInfo{
|
||||
name: v1.ResourceCPU,
|
||||
requests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
|
||||
levels: []int64{4000, 9500, 3000, 7000, 3200, 2000},
|
||||
levels: makePodMetricLevels(4000, 9500, 3000, 7000, 3200, 2000),
|
||||
targetUtilization: 100,
|
||||
expectedUtilization: 587,
|
||||
expectedValue: numContainersPerPod * 5875,
|
||||
@@ -453,7 +450,7 @@ func TestLegacyReplicaCalcMissingMetrics(t *testing.T) {
|
||||
resource: &resourceInfo{
|
||||
name: v1.ResourceCPU,
|
||||
requests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
|
||||
levels: []int64{400, 95},
|
||||
levels: makePodMetricLevels(400, 95),
|
||||
|
||||
targetUtilization: 100,
|
||||
expectedUtilization: 24,
|
||||
@@ -470,7 +467,7 @@ func TestLegacyReplicaCalcEmptyMetrics(t *testing.T) {
|
||||
resource: &resourceInfo{
|
||||
name: v1.ResourceCPU,
|
||||
requests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
|
||||
levels: []int64{},
|
||||
levels: makePodMetricLevels(),
|
||||
|
||||
targetUtilization: 100,
|
||||
},
|
||||
@@ -485,7 +482,7 @@ func TestLegacyReplicaCalcEmptyCPURequest(t *testing.T) {
|
||||
resource: &resourceInfo{
|
||||
name: v1.ResourceCPU,
|
||||
requests: []resource.Quantity{},
|
||||
levels: []int64{200},
|
||||
levels: makePodMetricLevels(200),
|
||||
|
||||
targetUtilization: 100,
|
||||
},
|
||||
@@ -500,7 +497,7 @@ func TestLegacyReplicaCalcMissingMetricsNoChangeEq(t *testing.T) {
|
||||
resource: &resourceInfo{
|
||||
name: v1.ResourceCPU,
|
||||
requests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0")},
|
||||
levels: []int64{1000},
|
||||
levels: makePodMetricLevels(1000),
|
||||
|
||||
targetUtilization: 100,
|
||||
expectedUtilization: 100,
|
||||
@@ -517,7 +514,7 @@ func TestLegacyReplicaCalcMissingMetricsNoChangeGt(t *testing.T) {
|
||||
resource: &resourceInfo{
|
||||
name: v1.ResourceCPU,
|
||||
requests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0")},
|
||||
levels: []int64{1900},
|
||||
levels: makePodMetricLevels(1900),
|
||||
|
||||
targetUtilization: 100,
|
||||
expectedUtilization: 190,
|
||||
@@ -534,7 +531,7 @@ func TestLegacyReplicaCalcMissingMetricsNoChangeLt(t *testing.T) {
|
||||
resource: &resourceInfo{
|
||||
name: v1.ResourceCPU,
|
||||
requests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0")},
|
||||
levels: []int64{600},
|
||||
levels: makePodMetricLevels(600),
|
||||
|
||||
targetUtilization: 100,
|
||||
expectedUtilization: 60,
|
||||
@@ -552,7 +549,7 @@ func TestLegacyReplicaCalcMissingMetricsUnreadyNoChange(t *testing.T) {
|
||||
resource: &resourceInfo{
|
||||
name: v1.ResourceCPU,
|
||||
requests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
|
||||
levels: []int64{100, 450},
|
||||
levels: makePodMetricLevels(100, 450),
|
||||
|
||||
targetUtilization: 50,
|
||||
expectedUtilization: 45,
|
||||
@@ -570,7 +567,7 @@ func TestLegacyReplicaCalcMissingMetricsUnreadyScaleUp(t *testing.T) {
|
||||
resource: &resourceInfo{
|
||||
name: v1.ResourceCPU,
|
||||
requests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
|
||||
levels: []int64{100, 2000},
|
||||
levels: makePodMetricLevels(100, 2000),
|
||||
|
||||
targetUtilization: 50,
|
||||
expectedUtilization: 200,
|
||||
@@ -588,7 +585,7 @@ func TestLegacyReplicaCalcMissingMetricsUnreadyScaleDown(t *testing.T) {
|
||||
resource: &resourceInfo{
|
||||
name: v1.ResourceCPU,
|
||||
requests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
|
||||
levels: []int64{100, 100, 100},
|
||||
levels: makePodMetricLevels(100, 100, 100),
|
||||
|
||||
targetUtilization: 50,
|
||||
expectedUtilization: 10,
|
||||
@@ -627,18 +624,18 @@ func TestLegacyReplicaCalcComputedToleranceAlgImplementation(t *testing.T) {
|
||||
expectedReplicas: finalPods,
|
||||
resource: &resourceInfo{
|
||||
name: v1.ResourceCPU,
|
||||
levels: []int64{
|
||||
totalUsedCPUOfAllPods / 10,
|
||||
totalUsedCPUOfAllPods / 10,
|
||||
totalUsedCPUOfAllPods / 10,
|
||||
totalUsedCPUOfAllPods / 10,
|
||||
totalUsedCPUOfAllPods / 10,
|
||||
totalUsedCPUOfAllPods / 10,
|
||||
totalUsedCPUOfAllPods / 10,
|
||||
totalUsedCPUOfAllPods / 10,
|
||||
totalUsedCPUOfAllPods / 10,
|
||||
totalUsedCPUOfAllPods / 10,
|
||||
},
|
||||
levels: makePodMetricLevels(
|
||||
totalUsedCPUOfAllPods/10,
|
||||
totalUsedCPUOfAllPods/10,
|
||||
totalUsedCPUOfAllPods/10,
|
||||
totalUsedCPUOfAllPods/10,
|
||||
totalUsedCPUOfAllPods/10,
|
||||
totalUsedCPUOfAllPods/10,
|
||||
totalUsedCPUOfAllPods/10,
|
||||
totalUsedCPUOfAllPods/10,
|
||||
totalUsedCPUOfAllPods/10,
|
||||
totalUsedCPUOfAllPods/10,
|
||||
),
|
||||
requests: []resource.Quantity{
|
||||
resource.MustParse(fmt.Sprint(perPodRequested+100) + "m"),
|
||||
resource.MustParse(fmt.Sprint(perPodRequested-100) + "m"),
|
||||
|
@@ -20,6 +20,7 @@ go_library(
|
||||
"//staging/src/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/metrics/pkg/apis/custom_metrics/v1beta2:go_default_library",
|
||||
"//staging/src/k8s.io/metrics/pkg/apis/metrics/v1alpha1:go_default_library",
|
||||
"//staging/src/k8s.io/metrics/pkg/apis/metrics/v1beta1:go_default_library",
|
||||
"//staging/src/k8s.io/metrics/pkg/client/clientset/versioned/typed/metrics/v1beta1:go_default_library",
|
||||
"//staging/src/k8s.io/metrics/pkg/client/custom_metrics:go_default_library",
|
||||
"//staging/src/k8s.io/metrics/pkg/client/external_metrics:go_default_library",
|
||||
|
@@ -38,8 +38,9 @@ type PodMetricsInfo map[string]PodMetric
|
||||
// resource metrics as well as pod-level arbitrary metrics
|
||||
type MetricsClient interface {
|
||||
// GetResourceMetric gets the given resource metric (and an associated oldest timestamp)
|
||||
// for all pods matching the specified selector in the given namespace
|
||||
GetResourceMetric(resource v1.ResourceName, namespace string, selector labels.Selector) (PodMetricsInfo, time.Time, error)
|
||||
// for the specified named container in all pods matching the specified selector in the given namespace and when
|
||||
// the container is an empty string it returns the sum of all the container metrics.
|
||||
GetResourceMetric(resource v1.ResourceName, namespace string, selector labels.Selector, container string) (PodMetricsInfo, time.Time, error)
|
||||
|
||||
// GetRawMetric gets the given metric (and an associated oldest timestamp)
|
||||
// for all pods matching the specified selector in the given namespace
|
||||
|
@@ -63,7 +63,7 @@ func NewHeapsterMetricsClient(client clientset.Interface, namespace, scheme, ser
|
||||
}
|
||||
}
|
||||
|
||||
func (h *HeapsterMetricsClient) GetResourceMetric(resource v1.ResourceName, namespace string, selector labels.Selector) (PodMetricsInfo, time.Time, error) {
|
||||
func (h *HeapsterMetricsClient) GetResourceMetric(resource v1.ResourceName, namespace string, selector labels.Selector, container string) (PodMetricsInfo, time.Time, error) {
|
||||
metricPath := fmt.Sprintf("/apis/metrics/v1alpha1/namespaces/%s/pods", namespace)
|
||||
params := map[string]string{"labelSelector": selector.String()}
|
||||
|
||||
@@ -92,13 +92,15 @@ func (h *HeapsterMetricsClient) GetResourceMetric(resource v1.ResourceName, name
|
||||
podSum := int64(0)
|
||||
missing := len(m.Containers) == 0
|
||||
for _, c := range m.Containers {
|
||||
resValue, found := c.Usage[v1.ResourceName(resource)]
|
||||
if !found {
|
||||
missing = true
|
||||
klog.V(2).Infof("missing resource metric %v for container %s in pod %s/%s", resource, c.Name, namespace, m.Name)
|
||||
continue
|
||||
if container == "" || container == c.Name {
|
||||
resValue, found := c.Usage[v1.ResourceName(resource)]
|
||||
if !found {
|
||||
missing = true
|
||||
klog.V(2).Infof("missing resource metric %v for container %s in pod %s/%s", resource, c.Name, namespace, m.Name)
|
||||
continue
|
||||
}
|
||||
podSum += resValue.MilliValue()
|
||||
}
|
||||
podSum += resValue.MilliValue()
|
||||
}
|
||||
|
||||
if !missing {
|
||||
|
@@ -222,7 +222,7 @@ func (tc *testCase) runTest(t *testing.T) {
|
||||
metricsClient := NewHeapsterMetricsClient(testClient, DefaultHeapsterNamespace, DefaultHeapsterScheme, DefaultHeapsterService, DefaultHeapsterPort)
|
||||
isResource := len(tc.resourceName) > 0
|
||||
if isResource {
|
||||
info, timestamp, err := metricsClient.GetResourceMetric(tc.resourceName, tc.namespace, tc.selector)
|
||||
info, timestamp, err := metricsClient.GetResourceMetric(tc.resourceName, tc.namespace, tc.selector, "")
|
||||
tc.verifyResults(t, info, timestamp, err)
|
||||
} else {
|
||||
info, timestamp, err := metricsClient.GetRawMetric(tc.metricName, tc.namespace, tc.selector, tc.metricSelector)
|
||||
|
@@ -29,6 +29,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
customapi "k8s.io/metrics/pkg/apis/custom_metrics/v1beta2"
|
||||
metricsapi "k8s.io/metrics/pkg/apis/metrics/v1beta1"
|
||||
resourceclient "k8s.io/metrics/pkg/client/clientset/versioned/typed/metrics/v1beta1"
|
||||
customclient "k8s.io/metrics/pkg/client/custom_metrics"
|
||||
externalclient "k8s.io/metrics/pkg/client/external_metrics"
|
||||
@@ -63,7 +64,7 @@ type resourceMetricsClient struct {
|
||||
|
||||
// GetResourceMetric gets the given resource metric (and an associated oldest timestamp)
|
||||
// for all pods matching the specified selector in the given namespace
|
||||
func (c *resourceMetricsClient) GetResourceMetric(resource v1.ResourceName, namespace string, selector labels.Selector) (PodMetricsInfo, time.Time, error) {
|
||||
func (c *resourceMetricsClient) GetResourceMetric(resource v1.ResourceName, namespace string, selector labels.Selector, container string) (PodMetricsInfo, time.Time, error) {
|
||||
metrics, err := c.client.PodMetricses(namespace).List(context.TODO(), metav1.ListOptions{LabelSelector: selector.String()})
|
||||
if err != nil {
|
||||
return nil, time.Time{}, fmt.Errorf("unable to fetch metrics from resource metrics API: %v", err)
|
||||
@@ -72,34 +73,66 @@ func (c *resourceMetricsClient) GetResourceMetric(resource v1.ResourceName, name
|
||||
if len(metrics.Items) == 0 {
|
||||
return nil, time.Time{}, fmt.Errorf("no metrics returned from resource metrics API")
|
||||
}
|
||||
var res PodMetricsInfo
|
||||
if container != "" {
|
||||
res, err = getContainerMetrics(metrics.Items, resource, container)
|
||||
if err != nil {
|
||||
return nil, time.Time{}, fmt.Errorf("failed to get container metrics: %v", err)
|
||||
}
|
||||
} else {
|
||||
res = getPodMetrics(metrics.Items, resource)
|
||||
}
|
||||
timestamp := metrics.Items[0].Timestamp.Time
|
||||
return res, timestamp, nil
|
||||
}
|
||||
|
||||
res := make(PodMetricsInfo, len(metrics.Items))
|
||||
func getContainerMetrics(rawMetrics []metricsapi.PodMetrics, resource v1.ResourceName, container string) (PodMetricsInfo, error) {
|
||||
res := make(PodMetricsInfo, len(rawMetrics))
|
||||
for _, m := range rawMetrics {
|
||||
containerFound := false
|
||||
for _, c := range m.Containers {
|
||||
if c.Name == container {
|
||||
containerFound = true
|
||||
if val, resFound := c.Usage[resource]; resFound {
|
||||
res[m.Name] = PodMetric{
|
||||
Timestamp: m.Timestamp.Time,
|
||||
Window: m.Window.Duration,
|
||||
Value: val.MilliValue(),
|
||||
}
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
if !containerFound {
|
||||
return nil, fmt.Errorf("container %s not present in metrics for pod %s/%s", container, m.Namespace, m.Name)
|
||||
}
|
||||
}
|
||||
return res, nil
|
||||
}
|
||||
|
||||
for _, m := range metrics.Items {
|
||||
func getPodMetrics(rawMetrics []metricsapi.PodMetrics, resource v1.ResourceName) PodMetricsInfo {
|
||||
res := make(PodMetricsInfo, len(rawMetrics))
|
||||
for _, m := range rawMetrics {
|
||||
podSum := int64(0)
|
||||
missing := len(m.Containers) == 0
|
||||
for _, c := range m.Containers {
|
||||
resValue, found := c.Usage[v1.ResourceName(resource)]
|
||||
resValue, found := c.Usage[resource]
|
||||
if !found {
|
||||
missing = true
|
||||
klog.V(2).Infof("missing resource metric %v for container %s in pod %s/%s", resource, c.Name, namespace, m.Name)
|
||||
break // containers loop
|
||||
klog.V(2).Infof("missing resource metric %v for %s/%s", resource, m.Namespace, m.Name)
|
||||
break
|
||||
}
|
||||
podSum += resValue.MilliValue()
|
||||
}
|
||||
|
||||
if !missing {
|
||||
res[m.Name] = PodMetric{
|
||||
Timestamp: m.Timestamp.Time,
|
||||
Window: m.Window.Duration,
|
||||
Value: int64(podSum),
|
||||
Value: podSum,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
timestamp := metrics.Items[0].Timestamp.Time
|
||||
|
||||
return res, timestamp, nil
|
||||
return res
|
||||
}
|
||||
|
||||
// customMetricsClient implements the custom-metrics-related parts of MetricsClient,
|
||||
|
@@ -50,12 +50,13 @@ type restClientTestCase struct {
|
||||
targetTimestamp int
|
||||
window time.Duration
|
||||
reportedMetricPoints []metricPoint
|
||||
reportedPodMetrics [][]int64
|
||||
reportedPodMetrics []map[string]int64
|
||||
singleObject *autoscalingapi.CrossVersionObjectReference
|
||||
|
||||
namespace string
|
||||
selector labels.Selector
|
||||
resourceName v1.ResourceName
|
||||
container string
|
||||
metricName string
|
||||
metricSelector *metav1.LabelSelector
|
||||
metricLabelSelector labels.Selector
|
||||
@@ -91,9 +92,9 @@ func (tc *restClientTestCase) prepareTestClient(t *testing.T) (*metricsfake.Clie
|
||||
Window: metav1.Duration{Duration: tc.window},
|
||||
Containers: []metricsapi.ContainerMetrics{},
|
||||
}
|
||||
for j, cpu := range containers {
|
||||
for containerName, cpu := range containers {
|
||||
cm := metricsapi.ContainerMetrics{
|
||||
Name: fmt.Sprintf("%s-%d-container-%d", podNamePrefix, i, j),
|
||||
Name: containerName,
|
||||
Usage: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(
|
||||
cpu,
|
||||
@@ -230,7 +231,7 @@ func (tc *restClientTestCase) runTest(t *testing.T) {
|
||||
isResource := len(tc.resourceName) > 0
|
||||
isExternal := tc.metricSelector != nil
|
||||
if isResource {
|
||||
info, timestamp, err := metricsClient.GetResourceMetric(v1.ResourceName(tc.resourceName), tc.namespace, tc.selector)
|
||||
info, timestamp, err := metricsClient.GetResourceMetric(v1.ResourceName(tc.resourceName), tc.namespace, tc.selector, tc.container)
|
||||
tc.verifyResults(t, info, timestamp, err)
|
||||
} else if isExternal {
|
||||
tc.metricLabelSelector, err = metav1.LabelSelectorAsSelector(tc.metricSelector)
|
||||
@@ -253,7 +254,7 @@ func (tc *restClientTestCase) runTest(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestRESTClientCPU(t *testing.T) {
|
||||
func TestRESTClientPodCPU(t *testing.T) {
|
||||
targetTimestamp := 1
|
||||
window := 30 * time.Second
|
||||
tc := restClientTestCase{
|
||||
@@ -265,7 +266,25 @@ func TestRESTClientCPU(t *testing.T) {
|
||||
resourceName: v1.ResourceCPU,
|
||||
targetTimestamp: targetTimestamp,
|
||||
window: window,
|
||||
reportedPodMetrics: [][]int64{{5000}, {5000}, {5000}},
|
||||
reportedPodMetrics: []map[string]int64{{"test": 5000}, {"test": 5000}, {"test": 5000}},
|
||||
}
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func TestRESTClientContainerCPU(t *testing.T) {
|
||||
targetTimestamp := 1
|
||||
window := 30 * time.Second
|
||||
tc := restClientTestCase{
|
||||
desiredMetricValues: PodMetricsInfo{
|
||||
"test-pod-0": {Value: 5000, Timestamp: offsetTimestampBy(targetTimestamp), Window: window},
|
||||
"test-pod-1": {Value: 5000, Timestamp: offsetTimestampBy(targetTimestamp), Window: window},
|
||||
"test-pod-2": {Value: 5000, Timestamp: offsetTimestampBy(targetTimestamp), Window: window},
|
||||
},
|
||||
container: "test-1",
|
||||
resourceName: v1.ResourceCPU,
|
||||
targetTimestamp: targetTimestamp,
|
||||
window: window,
|
||||
reportedPodMetrics: []map[string]int64{{"test-1": 5000, "test-2": 500}, {"test-1": 5000, "test-2": 500}, {"test-1": 5000, "test-2": 500}},
|
||||
}
|
||||
tc.runTest(t)
|
||||
}
|
||||
@@ -362,17 +381,17 @@ func TestRESTClientExternalEmptyMetrics(t *testing.T) {
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func TestRESTClientCPUEmptyMetrics(t *testing.T) {
|
||||
func TestRESTClientPodCPUEmptyMetrics(t *testing.T) {
|
||||
tc := restClientTestCase{
|
||||
resourceName: v1.ResourceCPU,
|
||||
desiredError: fmt.Errorf("no metrics returned from resource metrics API"),
|
||||
reportedMetricPoints: []metricPoint{},
|
||||
reportedPodMetrics: [][]int64{},
|
||||
reportedPodMetrics: []map[string]int64{},
|
||||
}
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func TestRESTClientCPUEmptyMetricsForOnePod(t *testing.T) {
|
||||
func TestRESTClientPodCPUEmptyMetricsForOnePod(t *testing.T) {
|
||||
targetTimestamp := 1
|
||||
window := 30 * time.Second
|
||||
tc := restClientTestCase{
|
||||
@@ -383,7 +402,25 @@ func TestRESTClientCPUEmptyMetricsForOnePod(t *testing.T) {
|
||||
},
|
||||
targetTimestamp: targetTimestamp,
|
||||
window: window,
|
||||
reportedPodMetrics: [][]int64{{100}, {300, 400}, {}},
|
||||
reportedPodMetrics: []map[string]int64{{"test-1": 100}, {"test-1": 300, "test-2": 400}, {}},
|
||||
}
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func TestRESTClientContainerCPUEmptyMetricsForOnePod(t *testing.T) {
|
||||
targetTimestamp := 1
|
||||
window := 30 * time.Second
|
||||
tc := restClientTestCase{
|
||||
resourceName: v1.ResourceCPU,
|
||||
desiredMetricValues: PodMetricsInfo{
|
||||
"test-pod-0": {Value: 100, Timestamp: offsetTimestampBy(targetTimestamp), Window: window},
|
||||
"test-pod-1": {Value: 300, Timestamp: offsetTimestampBy(targetTimestamp), Window: window},
|
||||
},
|
||||
container: "test-1",
|
||||
targetTimestamp: targetTimestamp,
|
||||
window: window,
|
||||
desiredError: fmt.Errorf("failed to get container metrics"),
|
||||
reportedPodMetrics: []map[string]int64{{"test-1": 100}, {"test-1": 300, "test-2": 400}, {}},
|
||||
}
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
@@ -61,8 +61,8 @@ func NewReplicaCalculator(metricsClient metricsclient.MetricsClient, podLister c
|
||||
|
||||
// GetResourceReplicas calculates the desired replica count based on a target resource utilization percentage
|
||||
// of the given resource for pods matching the given selector in the given namespace, and the current replica count
|
||||
func (c *ReplicaCalculator) GetResourceReplicas(currentReplicas int32, targetUtilization int32, resource v1.ResourceName, namespace string, selector labels.Selector) (replicaCount int32, utilization int32, rawUtilization int64, timestamp time.Time, err error) {
|
||||
metrics, timestamp, err := c.metricsClient.GetResourceMetric(resource, namespace, selector)
|
||||
func (c *ReplicaCalculator) GetResourceReplicas(currentReplicas int32, targetUtilization int32, resource v1.ResourceName, namespace string, selector labels.Selector, container string) (replicaCount int32, utilization int32, rawUtilization int64, timestamp time.Time, err error) {
|
||||
metrics, timestamp, err := c.metricsClient.GetResourceMetric(resource, namespace, selector, container)
|
||||
if err != nil {
|
||||
return 0, 0, 0, time.Time{}, fmt.Errorf("unable to get metrics for resource %s: %v", resource, err)
|
||||
}
|
||||
@@ -79,7 +79,7 @@ func (c *ReplicaCalculator) GetResourceReplicas(currentReplicas int32, targetUti
|
||||
readyPodCount, unreadyPods, missingPods, ignoredPods := groupPods(podList, metrics, resource, c.cpuInitializationPeriod, c.delayOfInitialReadinessStatus)
|
||||
removeMetricsForPods(metrics, ignoredPods)
|
||||
removeMetricsForPods(metrics, unreadyPods)
|
||||
requests, err := calculatePodRequests(podList, resource)
|
||||
requests, err := calculatePodRequests(podList, container, resource)
|
||||
if err != nil {
|
||||
return 0, 0, 0, time.Time{}, err
|
||||
}
|
||||
@@ -150,8 +150,8 @@ func (c *ReplicaCalculator) GetResourceReplicas(currentReplicas int32, targetUti
|
||||
|
||||
// GetRawResourceReplicas calculates the desired replica count based on a target resource utilization (as a raw milli-value)
|
||||
// for pods matching the given selector in the given namespace, and the current replica count
|
||||
func (c *ReplicaCalculator) GetRawResourceReplicas(currentReplicas int32, targetUtilization int64, resource v1.ResourceName, namespace string, selector labels.Selector) (replicaCount int32, utilization int64, timestamp time.Time, err error) {
|
||||
metrics, timestamp, err := c.metricsClient.GetResourceMetric(resource, namespace, selector)
|
||||
func (c *ReplicaCalculator) GetRawResourceReplicas(currentReplicas int32, targetUtilization int64, resource v1.ResourceName, namespace string, selector labels.Selector, container string) (replicaCount int32, utilization int64, timestamp time.Time, err error) {
|
||||
metrics, timestamp, err := c.metricsClient.GetResourceMetric(resource, namespace, selector, container)
|
||||
if err != nil {
|
||||
return 0, 0, time.Time{}, fmt.Errorf("unable to get metrics for resource %s: %v", resource, err)
|
||||
}
|
||||
@@ -414,15 +414,17 @@ func groupPods(pods []*v1.Pod, metrics metricsclient.PodMetricsInfo, resource v1
|
||||
return
|
||||
}
|
||||
|
||||
func calculatePodRequests(pods []*v1.Pod, resource v1.ResourceName) (map[string]int64, error) {
|
||||
func calculatePodRequests(pods []*v1.Pod, container string, resource v1.ResourceName) (map[string]int64, error) {
|
||||
requests := make(map[string]int64, len(pods))
|
||||
for _, pod := range pods {
|
||||
podSum := int64(0)
|
||||
for _, container := range pod.Spec.Containers {
|
||||
if containerRequest, ok := container.Resources.Requests[resource]; ok {
|
||||
podSum += containerRequest.MilliValue()
|
||||
} else {
|
||||
return nil, fmt.Errorf("missing request for %s", resource)
|
||||
for _, c := range pod.Spec.Containers {
|
||||
if container == "" || container == c.Name {
|
||||
if containerRequest, ok := c.Resources.Requests[resource]; ok {
|
||||
podSum += containerRequest.MilliValue()
|
||||
} else {
|
||||
return nil, fmt.Errorf("missing request for %s", resource)
|
||||
}
|
||||
}
|
||||
}
|
||||
requests[pod.Name] = podSum
|
||||
|
@@ -52,7 +52,7 @@ import (
|
||||
type resourceInfo struct {
|
||||
name v1.ResourceName
|
||||
requests []resource.Quantity
|
||||
levels []int64
|
||||
levels [][]int64
|
||||
// only applies to pod names returned from "heapster"
|
||||
podNames []string
|
||||
|
||||
@@ -93,6 +93,7 @@ type replicaCalcTestCase struct {
|
||||
resource *resourceInfo
|
||||
metric *metricInfo
|
||||
metricLabelSelector labels.Selector
|
||||
container string
|
||||
|
||||
podReadiness []v1.ConditionStatus
|
||||
podStartTime []metav1.Time
|
||||
@@ -152,7 +153,7 @@ func (tc *replicaCalcTestCase) prepareTestClientSet() *fake.Clientset {
|
||||
},
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{{}, {}},
|
||||
Containers: []v1.Container{{Name: "container1"}, {Name: "container2"}},
|
||||
},
|
||||
}
|
||||
if podDeletionTimestamp {
|
||||
@@ -202,13 +203,11 @@ func (tc *replicaCalcTestCase) prepareTestMetricsClient() *metricsfake.Clientset
|
||||
Containers: make([]metricsapi.ContainerMetrics, numContainersPerPod),
|
||||
}
|
||||
|
||||
for i := 0; i < numContainersPerPod; i++ {
|
||||
for i, m := range resValue {
|
||||
podMetric.Containers[i] = metricsapi.ContainerMetrics{
|
||||
Name: fmt.Sprintf("container%v", i),
|
||||
Name: fmt.Sprintf("container%v", i+1),
|
||||
Usage: v1.ResourceList{
|
||||
v1.ResourceName(tc.resource.name): *resource.NewMilliQuantity(
|
||||
int64(resValue),
|
||||
resource.DecimalSI),
|
||||
tc.resource.name: *resource.NewMilliQuantity(m, resource.DecimalSI),
|
||||
},
|
||||
}
|
||||
}
|
||||
@@ -362,7 +361,7 @@ func (tc *replicaCalcTestCase) runTest(t *testing.T) {
|
||||
}
|
||||
|
||||
if tc.resource != nil {
|
||||
outReplicas, outUtilization, outRawValue, outTimestamp, err := replicaCalc.GetResourceReplicas(tc.currentReplicas, tc.resource.targetUtilization, tc.resource.name, testNamespace, selector)
|
||||
outReplicas, outUtilization, outRawValue, outTimestamp, err := replicaCalc.GetResourceReplicas(tc.currentReplicas, tc.resource.targetUtilization, tc.resource.name, testNamespace, selector, tc.container)
|
||||
|
||||
if tc.expectedError != nil {
|
||||
require.Error(t, err, "there should be an error calculating the replica count")
|
||||
@@ -424,7 +423,16 @@ func (tc *replicaCalcTestCase) runTest(t *testing.T) {
|
||||
assert.Equal(t, tc.metric.expectedUtilization, outUtilization, "utilization should be as expected")
|
||||
assert.True(t, tc.timestamp.Equal(outTimestamp), "timestamp should be as expected")
|
||||
}
|
||||
|
||||
func makePodMetricLevels(containerMetric ...int64) [][]int64 {
|
||||
metrics := make([][]int64, len(containerMetric))
|
||||
for i := 0; i < len(containerMetric); i++ {
|
||||
metrics[i] = make([]int64, numContainersPerPod)
|
||||
for j := 0; j < numContainersPerPod; j++ {
|
||||
metrics[i][j] = containerMetric[i]
|
||||
}
|
||||
}
|
||||
return metrics
|
||||
}
|
||||
func TestReplicaCalcDisjointResourcesMetrics(t *testing.T) {
|
||||
tc := replicaCalcTestCase{
|
||||
currentReplicas: 1,
|
||||
@@ -432,7 +440,7 @@ func TestReplicaCalcDisjointResourcesMetrics(t *testing.T) {
|
||||
resource: &resourceInfo{
|
||||
name: v1.ResourceCPU,
|
||||
requests: []resource.Quantity{resource.MustParse("1.0")},
|
||||
levels: []int64{100},
|
||||
levels: makePodMetricLevels(100),
|
||||
podNames: []string{"an-older-pod-name"},
|
||||
|
||||
targetUtilization: 100,
|
||||
@@ -441,6 +449,20 @@ func TestReplicaCalcDisjointResourcesMetrics(t *testing.T) {
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func TestReplicaCalcMissingContainerMetricError(t *testing.T) {
|
||||
tc := replicaCalcTestCase{
|
||||
currentReplicas: 1,
|
||||
expectedError: fmt.Errorf("container container2 not present in metrics for pod test-namespace/test-pod-0"),
|
||||
resource: &resourceInfo{
|
||||
name: v1.ResourceCPU,
|
||||
requests: []resource.Quantity{resource.MustParse("1.0")},
|
||||
levels: [][]int64{{0}},
|
||||
},
|
||||
container: "container2",
|
||||
}
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func TestReplicaCalcScaleUp(t *testing.T) {
|
||||
tc := replicaCalcTestCase{
|
||||
currentReplicas: 3,
|
||||
@@ -448,7 +470,7 @@ func TestReplicaCalcScaleUp(t *testing.T) {
|
||||
resource: &resourceInfo{
|
||||
name: v1.ResourceCPU,
|
||||
requests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
|
||||
levels: []int64{300, 500, 700},
|
||||
levels: makePodMetricLevels(300, 500, 700),
|
||||
|
||||
targetUtilization: 30,
|
||||
expectedUtilization: 50,
|
||||
@@ -458,6 +480,24 @@ func TestReplicaCalcScaleUp(t *testing.T) {
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func TestReplicaCalcContainerScaleUp(t *testing.T) {
|
||||
tc := replicaCalcTestCase{
|
||||
currentReplicas: 3,
|
||||
expectedReplicas: 5,
|
||||
resource: &resourceInfo{
|
||||
name: v1.ResourceCPU,
|
||||
requests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
|
||||
levels: [][]int64{{1000, 300}, {1000, 500}, {1000, 700}},
|
||||
|
||||
targetUtilization: 30,
|
||||
expectedUtilization: 50,
|
||||
expectedValue: 500,
|
||||
},
|
||||
container: "container2",
|
||||
}
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func TestReplicaCalcScaleUpUnreadyLessScale(t *testing.T) {
|
||||
tc := replicaCalcTestCase{
|
||||
currentReplicas: 3,
|
||||
@@ -466,7 +506,7 @@ func TestReplicaCalcScaleUpUnreadyLessScale(t *testing.T) {
|
||||
resource: &resourceInfo{
|
||||
name: v1.ResourceCPU,
|
||||
requests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
|
||||
levels: []int64{300, 500, 700},
|
||||
levels: makePodMetricLevels(300, 500, 700),
|
||||
|
||||
targetUtilization: 30,
|
||||
expectedUtilization: 60,
|
||||
@@ -476,6 +516,25 @@ func TestReplicaCalcScaleUpUnreadyLessScale(t *testing.T) {
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func TestReplicaCalcScaleUpContainerHotCpuLessScale(t *testing.T) {
|
||||
tc := replicaCalcTestCase{
|
||||
currentReplicas: 3,
|
||||
expectedReplicas: 4,
|
||||
podStartTime: []metav1.Time{hotCPUCreationTime(), coolCPUCreationTime(), coolCPUCreationTime()},
|
||||
resource: &resourceInfo{
|
||||
name: v1.ResourceCPU,
|
||||
requests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
|
||||
levels: [][]int64{{0, 300}, {0, 500}, {0, 700}},
|
||||
|
||||
targetUtilization: 30,
|
||||
expectedUtilization: 60,
|
||||
expectedValue: 600,
|
||||
},
|
||||
container: "container2",
|
||||
}
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func TestReplicaCalcScaleUpHotCpuLessScale(t *testing.T) {
|
||||
tc := replicaCalcTestCase{
|
||||
currentReplicas: 3,
|
||||
@@ -484,7 +543,7 @@ func TestReplicaCalcScaleUpHotCpuLessScale(t *testing.T) {
|
||||
resource: &resourceInfo{
|
||||
name: v1.ResourceCPU,
|
||||
requests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
|
||||
levels: []int64{300, 500, 700},
|
||||
levels: makePodMetricLevels(300, 500, 700),
|
||||
|
||||
targetUtilization: 30,
|
||||
expectedUtilization: 60,
|
||||
@@ -502,7 +561,7 @@ func TestReplicaCalcScaleUpUnreadyNoScale(t *testing.T) {
|
||||
resource: &resourceInfo{
|
||||
name: v1.ResourceCPU,
|
||||
requests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
|
||||
levels: []int64{400, 500, 700},
|
||||
levels: makePodMetricLevels(400, 500, 700),
|
||||
|
||||
targetUtilization: 30,
|
||||
expectedUtilization: 40,
|
||||
@@ -521,7 +580,7 @@ func TestReplicaCalcScaleHotCpuNoScale(t *testing.T) {
|
||||
resource: &resourceInfo{
|
||||
name: v1.ResourceCPU,
|
||||
requests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
|
||||
levels: []int64{400, 500, 700},
|
||||
levels: makePodMetricLevels(400, 500, 700),
|
||||
|
||||
targetUtilization: 30,
|
||||
expectedUtilization: 40,
|
||||
@@ -540,7 +599,7 @@ func TestReplicaCalcScaleUpIgnoresFailedPods(t *testing.T) {
|
||||
resource: &resourceInfo{
|
||||
name: v1.ResourceCPU,
|
||||
requests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
|
||||
levels: []int64{500, 700},
|
||||
levels: makePodMetricLevels(500, 700),
|
||||
|
||||
targetUtilization: 30,
|
||||
expectedUtilization: 60,
|
||||
@@ -550,6 +609,26 @@ func TestReplicaCalcScaleUpIgnoresFailedPods(t *testing.T) {
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func TestReplicaCalcScaleUpContainerIgnoresFailedPods(t *testing.T) {
|
||||
tc := replicaCalcTestCase{
|
||||
currentReplicas: 2,
|
||||
expectedReplicas: 4,
|
||||
podReadiness: []v1.ConditionStatus{v1.ConditionTrue, v1.ConditionTrue, v1.ConditionFalse, v1.ConditionFalse},
|
||||
podPhase: []v1.PodPhase{v1.PodRunning, v1.PodRunning, v1.PodFailed, v1.PodFailed},
|
||||
resource: &resourceInfo{
|
||||
name: v1.ResourceCPU,
|
||||
requests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
|
||||
levels: [][]int64{{1000, 500}, {9000, 700}},
|
||||
|
||||
targetUtilization: 30,
|
||||
expectedUtilization: 60,
|
||||
expectedValue: 600,
|
||||
},
|
||||
container: "container2",
|
||||
}
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func TestReplicaCalcScaleUpIgnoresDeletionPods(t *testing.T) {
|
||||
tc := replicaCalcTestCase{
|
||||
currentReplicas: 2,
|
||||
@@ -560,7 +639,7 @@ func TestReplicaCalcScaleUpIgnoresDeletionPods(t *testing.T) {
|
||||
resource: &resourceInfo{
|
||||
name: v1.ResourceCPU,
|
||||
requests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
|
||||
levels: []int64{500, 700},
|
||||
levels: makePodMetricLevels(500, 700),
|
||||
|
||||
targetUtilization: 30,
|
||||
expectedUtilization: 60,
|
||||
@@ -570,6 +649,27 @@ func TestReplicaCalcScaleUpIgnoresDeletionPods(t *testing.T) {
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func TestReplicaCalcScaleUpContainerIgnoresDeletionPods(t *testing.T) {
|
||||
tc := replicaCalcTestCase{
|
||||
currentReplicas: 2,
|
||||
expectedReplicas: 4,
|
||||
podReadiness: []v1.ConditionStatus{v1.ConditionTrue, v1.ConditionTrue, v1.ConditionFalse, v1.ConditionFalse},
|
||||
podPhase: []v1.PodPhase{v1.PodRunning, v1.PodRunning, v1.PodRunning, v1.PodRunning},
|
||||
podDeletionTimestamp: []bool{false, false, true, true},
|
||||
resource: &resourceInfo{
|
||||
name: v1.ResourceCPU,
|
||||
requests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
|
||||
levels: makePodMetricLevels(500, 700), // TODO: This test is broken and works only because of missing metrics
|
||||
|
||||
targetUtilization: 30,
|
||||
expectedUtilization: 60,
|
||||
expectedValue: 600,
|
||||
},
|
||||
container: "container1",
|
||||
}
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func TestReplicaCalcScaleUpCM(t *testing.T) {
|
||||
tc := replicaCalcTestCase{
|
||||
currentReplicas: 3,
|
||||
@@ -749,7 +849,7 @@ func TestReplicaCalcScaleDown(t *testing.T) {
|
||||
resource: &resourceInfo{
|
||||
name: v1.ResourceCPU,
|
||||
requests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
|
||||
levels: []int64{100, 300, 500, 250, 250},
|
||||
levels: makePodMetricLevels(100, 300, 500, 250, 250),
|
||||
|
||||
targetUtilization: 50,
|
||||
expectedUtilization: 28,
|
||||
@@ -759,6 +859,24 @@ func TestReplicaCalcScaleDown(t *testing.T) {
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func TestReplicaCalcContainerScaleDown(t *testing.T) {
|
||||
tc := replicaCalcTestCase{
|
||||
currentReplicas: 5,
|
||||
expectedReplicas: 3,
|
||||
resource: &resourceInfo{
|
||||
name: v1.ResourceCPU,
|
||||
requests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
|
||||
levels: [][]int64{{1000, 100}, {1000, 300}, {1000, 500}, {1000, 250}, {1000, 250}},
|
||||
|
||||
targetUtilization: 50,
|
||||
expectedUtilization: 28,
|
||||
expectedValue: 280,
|
||||
},
|
||||
container: "container2",
|
||||
}
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func TestReplicaCalcScaleDownCM(t *testing.T) {
|
||||
tc := replicaCalcTestCase{
|
||||
currentReplicas: 5,
|
||||
@@ -845,7 +963,7 @@ func TestReplicaCalcScaleDownPerPodCMExternal(t *testing.T) {
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func TestReplicaCalcScaleDownIncludeUnreadyPods(t *testing.T) {
|
||||
func TestReplicaCalcScaleDownExcludeUnreadyPods(t *testing.T) {
|
||||
tc := replicaCalcTestCase{
|
||||
currentReplicas: 5,
|
||||
expectedReplicas: 2,
|
||||
@@ -853,7 +971,7 @@ func TestReplicaCalcScaleDownIncludeUnreadyPods(t *testing.T) {
|
||||
resource: &resourceInfo{
|
||||
name: v1.ResourceCPU,
|
||||
requests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
|
||||
levels: []int64{100, 300, 500, 250, 250},
|
||||
levels: makePodMetricLevels(100, 300, 500, 250, 250),
|
||||
|
||||
targetUtilization: 50,
|
||||
expectedUtilization: 30,
|
||||
@@ -863,6 +981,25 @@ func TestReplicaCalcScaleDownIncludeUnreadyPods(t *testing.T) {
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func TestReplicaCalcScaleDownContainerExcludeUnreadyPods(t *testing.T) {
|
||||
tc := replicaCalcTestCase{
|
||||
currentReplicas: 5,
|
||||
expectedReplicas: 2,
|
||||
podReadiness: []v1.ConditionStatus{v1.ConditionTrue, v1.ConditionTrue, v1.ConditionTrue, v1.ConditionFalse, v1.ConditionFalse},
|
||||
resource: &resourceInfo{
|
||||
name: v1.ResourceCPU,
|
||||
requests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
|
||||
levels: [][]int64{{1000, 100}, {1000, 300}, {1000, 500}, {1000, 250}, {1000, 250}},
|
||||
|
||||
targetUtilization: 50,
|
||||
expectedUtilization: 30,
|
||||
expectedValue: 300,
|
||||
},
|
||||
container: "container2",
|
||||
}
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func TestReplicaCalcScaleDownExcludeUnscheduledPods(t *testing.T) {
|
||||
tc := replicaCalcTestCase{
|
||||
currentReplicas: 5,
|
||||
@@ -872,7 +1009,7 @@ func TestReplicaCalcScaleDownExcludeUnscheduledPods(t *testing.T) {
|
||||
resource: &resourceInfo{
|
||||
name: v1.ResourceCPU,
|
||||
requests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
|
||||
levels: []int64{100},
|
||||
levels: makePodMetricLevels(100),
|
||||
|
||||
targetUtilization: 50,
|
||||
expectedUtilization: 10,
|
||||
@@ -882,6 +1019,26 @@ func TestReplicaCalcScaleDownExcludeUnscheduledPods(t *testing.T) {
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func TestReplicaCalcScaleDownContainerExcludeUnscheduledPods(t *testing.T) {
|
||||
tc := replicaCalcTestCase{
|
||||
currentReplicas: 5,
|
||||
expectedReplicas: 1,
|
||||
podReadiness: []v1.ConditionStatus{v1.ConditionTrue, v1.ConditionFalse, v1.ConditionFalse, v1.ConditionFalse, v1.ConditionFalse},
|
||||
podPhase: []v1.PodPhase{v1.PodRunning, v1.PodPending, v1.PodPending, v1.PodPending, v1.PodPending},
|
||||
resource: &resourceInfo{
|
||||
name: v1.ResourceCPU,
|
||||
requests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
|
||||
levels: [][]int64{{1000, 100}, {1000, 300}, {1000, 500}, {1000, 250}, {1000, 250}},
|
||||
|
||||
targetUtilization: 50,
|
||||
expectedUtilization: 10,
|
||||
expectedValue: 100,
|
||||
},
|
||||
container: "container2",
|
||||
}
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func TestReplicaCalcScaleDownIgnoreHotCpuPods(t *testing.T) {
|
||||
tc := replicaCalcTestCase{
|
||||
currentReplicas: 5,
|
||||
@@ -890,7 +1047,7 @@ func TestReplicaCalcScaleDownIgnoreHotCpuPods(t *testing.T) {
|
||||
resource: &resourceInfo{
|
||||
name: v1.ResourceCPU,
|
||||
requests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
|
||||
levels: []int64{100, 300, 500, 250, 250},
|
||||
levels: makePodMetricLevels(100, 300, 500, 250, 250),
|
||||
|
||||
targetUtilization: 50,
|
||||
expectedUtilization: 30,
|
||||
@@ -900,6 +1057,25 @@ func TestReplicaCalcScaleDownIgnoreHotCpuPods(t *testing.T) {
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func TestReplicaCalcScaleDownContainerIgnoreHotCpuPods(t *testing.T) {
|
||||
tc := replicaCalcTestCase{
|
||||
currentReplicas: 5,
|
||||
expectedReplicas: 2,
|
||||
podStartTime: []metav1.Time{coolCPUCreationTime(), coolCPUCreationTime(), coolCPUCreationTime(), hotCPUCreationTime(), hotCPUCreationTime()},
|
||||
resource: &resourceInfo{
|
||||
name: v1.ResourceCPU,
|
||||
requests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
|
||||
levels: [][]int64{{1000, 100}, {1000, 300}, {1000, 500}, {1000, 1000}, {1000, 1000}},
|
||||
|
||||
targetUtilization: 50,
|
||||
expectedUtilization: 30,
|
||||
expectedValue: 300,
|
||||
},
|
||||
container: "container2",
|
||||
}
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func TestReplicaCalcScaleDownIgnoresFailedPods(t *testing.T) {
|
||||
tc := replicaCalcTestCase{
|
||||
currentReplicas: 5,
|
||||
@@ -909,7 +1085,7 @@ func TestReplicaCalcScaleDownIgnoresFailedPods(t *testing.T) {
|
||||
resource: &resourceInfo{
|
||||
name: v1.ResourceCPU,
|
||||
requests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
|
||||
levels: []int64{100, 300, 500, 250, 250},
|
||||
levels: makePodMetricLevels(100, 300, 500, 250, 250),
|
||||
|
||||
targetUtilization: 50,
|
||||
expectedUtilization: 28,
|
||||
@@ -919,6 +1095,26 @@ func TestReplicaCalcScaleDownIgnoresFailedPods(t *testing.T) {
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func TestReplicaCalcScaleDownContainerIgnoresFailedPods(t *testing.T) {
|
||||
tc := replicaCalcTestCase{
|
||||
currentReplicas: 5,
|
||||
expectedReplicas: 3,
|
||||
podReadiness: []v1.ConditionStatus{v1.ConditionTrue, v1.ConditionTrue, v1.ConditionTrue, v1.ConditionTrue, v1.ConditionTrue, v1.ConditionFalse, v1.ConditionFalse},
|
||||
podPhase: []v1.PodPhase{v1.PodRunning, v1.PodRunning, v1.PodRunning, v1.PodRunning, v1.PodRunning, v1.PodFailed, v1.PodFailed},
|
||||
resource: &resourceInfo{
|
||||
name: v1.ResourceCPU,
|
||||
requests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
|
||||
levels: [][]int64{{1000, 100}, {1000, 300}, {1000, 500}, {1000, 250}, {1000, 250}}, //TODO: Test is broken
|
||||
|
||||
targetUtilization: 50,
|
||||
expectedUtilization: 28,
|
||||
expectedValue: 280,
|
||||
},
|
||||
container: "container2",
|
||||
}
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
func TestReplicaCalcScaleDownIgnoresDeletionPods(t *testing.T) {
|
||||
tc := replicaCalcTestCase{
|
||||
currentReplicas: 5,
|
||||
@@ -929,7 +1125,7 @@ func TestReplicaCalcScaleDownIgnoresDeletionPods(t *testing.T) {
|
||||
resource: &resourceInfo{
|
||||
name: v1.ResourceCPU,
|
||||
requests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
|
||||
levels: []int64{100, 300, 500, 250, 250},
|
||||
levels: makePodMetricLevels(100, 300, 500, 250, 250),
|
||||
|
||||
targetUtilization: 50,
|
||||
expectedUtilization: 28,
|
||||
@@ -950,12 +1146,13 @@ func TestReplicaCalcScaleDownIgnoresDeletionPods_StillRunning(t *testing.T) {
|
||||
resource: &resourceInfo{
|
||||
name: v1.ResourceCPU,
|
||||
requests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
|
||||
levels: []int64{100, 300, 500, 250, 250, 0, 0},
|
||||
levels: [][]int64{{1000, 100}, {1000, 300}, {1000, 500}, {1000, 250}, {1000, 250}},
|
||||
|
||||
targetUtilization: 50,
|
||||
expectedUtilization: 28,
|
||||
expectedValue: numContainersPerPod * 280,
|
||||
expectedValue: 280,
|
||||
},
|
||||
container: "container2",
|
||||
}
|
||||
tc.runTest(t)
|
||||
}
|
||||
@@ -967,7 +1164,7 @@ func TestReplicaCalcTolerance(t *testing.T) {
|
||||
resource: &resourceInfo{
|
||||
name: v1.ResourceCPU,
|
||||
requests: []resource.Quantity{resource.MustParse("0.9"), resource.MustParse("1.0"), resource.MustParse("1.1")},
|
||||
levels: []int64{1010, 1030, 1020},
|
||||
levels: makePodMetricLevels(1010, 1030, 1020),
|
||||
|
||||
targetUtilization: 100,
|
||||
expectedUtilization: 102,
|
||||
@@ -1070,7 +1267,7 @@ func TestReplicaCalcSuperfluousMetrics(t *testing.T) {
|
||||
resource: &resourceInfo{
|
||||
name: v1.ResourceCPU,
|
||||
requests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
|
||||
levels: []int64{4000, 9500, 3000, 7000, 3200, 2000},
|
||||
levels: makePodMetricLevels(4000, 9500, 3000, 7000, 3200, 2000),
|
||||
targetUtilization: 100,
|
||||
expectedUtilization: 587,
|
||||
expectedValue: numContainersPerPod * 5875,
|
||||
@@ -1086,7 +1283,7 @@ func TestReplicaCalcMissingMetrics(t *testing.T) {
|
||||
resource: &resourceInfo{
|
||||
name: v1.ResourceCPU,
|
||||
requests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
|
||||
levels: []int64{400, 95},
|
||||
levels: makePodMetricLevels(400, 95),
|
||||
|
||||
targetUtilization: 100,
|
||||
expectedUtilization: 24,
|
||||
@@ -1103,7 +1300,7 @@ func TestReplicaCalcEmptyMetrics(t *testing.T) {
|
||||
resource: &resourceInfo{
|
||||
name: v1.ResourceCPU,
|
||||
requests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
|
||||
levels: []int64{},
|
||||
levels: makePodMetricLevels(),
|
||||
|
||||
targetUtilization: 100,
|
||||
},
|
||||
@@ -1118,7 +1315,7 @@ func TestReplicaCalcEmptyCPURequest(t *testing.T) {
|
||||
resource: &resourceInfo{
|
||||
name: v1.ResourceCPU,
|
||||
requests: []resource.Quantity{},
|
||||
levels: []int64{200},
|
||||
levels: makePodMetricLevels(200),
|
||||
|
||||
targetUtilization: 100,
|
||||
},
|
||||
@@ -1133,7 +1330,7 @@ func TestReplicaCalcMissingMetricsNoChangeEq(t *testing.T) {
|
||||
resource: &resourceInfo{
|
||||
name: v1.ResourceCPU,
|
||||
requests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0")},
|
||||
levels: []int64{1000},
|
||||
levels: makePodMetricLevels(1000),
|
||||
|
||||
targetUtilization: 100,
|
||||
expectedUtilization: 100,
|
||||
@@ -1150,7 +1347,7 @@ func TestReplicaCalcMissingMetricsNoChangeGt(t *testing.T) {
|
||||
resource: &resourceInfo{
|
||||
name: v1.ResourceCPU,
|
||||
requests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0")},
|
||||
levels: []int64{1900},
|
||||
levels: makePodMetricLevels(1900),
|
||||
|
||||
targetUtilization: 100,
|
||||
expectedUtilization: 190,
|
||||
@@ -1167,7 +1364,7 @@ func TestReplicaCalcMissingMetricsNoChangeLt(t *testing.T) {
|
||||
resource: &resourceInfo{
|
||||
name: v1.ResourceCPU,
|
||||
requests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0")},
|
||||
levels: []int64{600},
|
||||
levels: makePodMetricLevels(600),
|
||||
|
||||
targetUtilization: 100,
|
||||
expectedUtilization: 60,
|
||||
@@ -1185,7 +1382,7 @@ func TestReplicaCalcMissingMetricsUnreadyChange(t *testing.T) {
|
||||
resource: &resourceInfo{
|
||||
name: v1.ResourceCPU,
|
||||
requests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
|
||||
levels: []int64{100, 450},
|
||||
levels: makePodMetricLevels(100, 450),
|
||||
|
||||
targetUtilization: 50,
|
||||
expectedUtilization: 45,
|
||||
@@ -1203,7 +1400,7 @@ func TestReplicaCalcMissingMetricsHotCpuNoChange(t *testing.T) {
|
||||
resource: &resourceInfo{
|
||||
name: v1.ResourceCPU,
|
||||
requests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
|
||||
levels: []int64{100, 450},
|
||||
levels: makePodMetricLevels(100, 450),
|
||||
|
||||
targetUtilization: 50,
|
||||
expectedUtilization: 45,
|
||||
@@ -1221,7 +1418,7 @@ func TestReplicaCalcMissingMetricsUnreadyScaleUp(t *testing.T) {
|
||||
resource: &resourceInfo{
|
||||
name: v1.ResourceCPU,
|
||||
requests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
|
||||
levels: []int64{100, 2000},
|
||||
levels: makePodMetricLevels(100, 2000),
|
||||
|
||||
targetUtilization: 50,
|
||||
expectedUtilization: 200,
|
||||
@@ -1240,7 +1437,7 @@ func TestReplicaCalcMissingMetricsHotCpuScaleUp(t *testing.T) {
|
||||
resource: &resourceInfo{
|
||||
name: v1.ResourceCPU,
|
||||
requests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
|
||||
levels: []int64{100, 2000},
|
||||
levels: makePodMetricLevels(100, 2000),
|
||||
|
||||
targetUtilization: 50,
|
||||
expectedUtilization: 200,
|
||||
@@ -1258,7 +1455,7 @@ func TestReplicaCalcMissingMetricsUnreadyScaleDown(t *testing.T) {
|
||||
resource: &resourceInfo{
|
||||
name: v1.ResourceCPU,
|
||||
requests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
|
||||
levels: []int64{100, 100, 100},
|
||||
levels: makePodMetricLevels(100, 100, 100),
|
||||
|
||||
targetUtilization: 50,
|
||||
expectedUtilization: 10,
|
||||
@@ -1276,7 +1473,7 @@ func TestReplicaCalcDuringRollingUpdateWithMaxSurge(t *testing.T) {
|
||||
resource: &resourceInfo{
|
||||
name: v1.ResourceCPU,
|
||||
requests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
|
||||
levels: []int64{100, 100},
|
||||
levels: makePodMetricLevels(100, 100),
|
||||
|
||||
targetUtilization: 50,
|
||||
expectedUtilization: 10,
|
||||
@@ -1315,18 +1512,18 @@ func TestReplicaCalcComputedToleranceAlgImplementation(t *testing.T) {
|
||||
expectedReplicas: finalPods,
|
||||
resource: &resourceInfo{
|
||||
name: v1.ResourceCPU,
|
||||
levels: []int64{
|
||||
totalUsedCPUOfAllPods / 10,
|
||||
totalUsedCPUOfAllPods / 10,
|
||||
totalUsedCPUOfAllPods / 10,
|
||||
totalUsedCPUOfAllPods / 10,
|
||||
totalUsedCPUOfAllPods / 10,
|
||||
totalUsedCPUOfAllPods / 10,
|
||||
totalUsedCPUOfAllPods / 10,
|
||||
totalUsedCPUOfAllPods / 10,
|
||||
totalUsedCPUOfAllPods / 10,
|
||||
totalUsedCPUOfAllPods / 10,
|
||||
},
|
||||
levels: makePodMetricLevels(
|
||||
totalUsedCPUOfAllPods/10,
|
||||
totalUsedCPUOfAllPods/10,
|
||||
totalUsedCPUOfAllPods/10,
|
||||
totalUsedCPUOfAllPods/10,
|
||||
totalUsedCPUOfAllPods/10,
|
||||
totalUsedCPUOfAllPods/10,
|
||||
totalUsedCPUOfAllPods/10,
|
||||
totalUsedCPUOfAllPods/10,
|
||||
totalUsedCPUOfAllPods/10,
|
||||
totalUsedCPUOfAllPods/10,
|
||||
),
|
||||
requests: []resource.Quantity{
|
||||
resource.MustParse(fmt.Sprint(perPodRequested+100) + "m"),
|
||||
resource.MustParse(fmt.Sprint(perPodRequested-100) + "m"),
|
||||
|
Reference in New Issue
Block a user