mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-29 22:46:12 +00:00
Merge pull request #65907 from jbartosik/hpa-improv-refactor-run-test
Automatic merge from submit-queue (batch tested with PRs 64681, 65907). If you want to cherry-pick this change to another branch, please follow the instructions <a href="https://github.com/kubernetes/community/blob/master/contributors/devel/cherry-picks.md">here</a>. Make runTest easier to understand Fewer nested conditions, more checking for incorrect looking test cases. **What this PR does / why we need it**: Make HPA tests easier to understand. **Which issue(s) this PR fixes** *(optional, in `fixes #<issue number>(, fixes #<issue_number>, ...)` format, will close the issue(s) when PR gets merged)*: **Special notes for your reviewer**: **Release note**: ```release-note NONE ```
This commit is contained in:
commit
1e3d23c5c3
@ -56,11 +56,21 @@ type resourceInfo struct {
|
||||
expectedValue int64
|
||||
}
|
||||
|
||||
type metricType int
|
||||
|
||||
const (
|
||||
objectMetric metricType = iota
|
||||
externalMetric
|
||||
externalPerPodMetric
|
||||
podMetric
|
||||
)
|
||||
|
||||
type metricInfo struct {
|
||||
name string
|
||||
levels []int64
|
||||
singleObject *autoscalingv2.CrossVersionObjectReference
|
||||
selector *metav1.LabelSelector
|
||||
metricType metricType
|
||||
|
||||
targetUtilization int64
|
||||
perPodTargetUtilization int64
|
||||
@ -332,34 +342,50 @@ func (tc *replicaCalcTestCase) runTest(t *testing.T) {
|
||||
assert.Equal(t, tc.resource.expectedUtilization, outUtilization, "utilization should be as expected")
|
||||
assert.Equal(t, tc.resource.expectedValue, outRawValue, "raw value should be as expected")
|
||||
assert.True(t, tc.timestamp.Equal(outTimestamp), "timestamp should be as expected")
|
||||
|
||||
} else {
|
||||
var outReplicas int32
|
||||
var outUtilization int64
|
||||
var outTimestamp time.Time
|
||||
var err error
|
||||
if tc.metric.singleObject != nil {
|
||||
outReplicas, outUtilization, outTimestamp, err = replicaCalc.GetObjectMetricReplicas(tc.currentReplicas, tc.metric.targetUtilization, tc.metric.name, testNamespace, tc.metric.singleObject, selector)
|
||||
} else if tc.metric.selector != nil {
|
||||
if tc.metric.targetUtilization > 0 {
|
||||
outReplicas, outUtilization, outTimestamp, err = replicaCalc.GetExternalMetricReplicas(tc.currentReplicas, tc.metric.targetUtilization, tc.metric.name, testNamespace, tc.metric.selector, selector)
|
||||
} else if tc.metric.perPodTargetUtilization > 0 {
|
||||
outReplicas, outUtilization, outTimestamp, err = replicaCalc.GetExternalPerPodMetricReplicas(tc.currentReplicas, tc.metric.perPodTargetUtilization, tc.metric.name, testNamespace, tc.metric.selector)
|
||||
}
|
||||
} else {
|
||||
outReplicas, outUtilization, outTimestamp, err = replicaCalc.GetMetricReplicas(tc.currentReplicas, tc.metric.targetUtilization, tc.metric.name, testNamespace, selector)
|
||||
}
|
||||
|
||||
if tc.expectedError != nil {
|
||||
require.Error(t, err, "there should be an error calculating the replica count")
|
||||
assert.Contains(t, err.Error(), tc.expectedError.Error(), "the error message should have contained the expected error message")
|
||||
return
|
||||
}
|
||||
require.NoError(t, err, "there should not have been an error calculating the replica count")
|
||||
assert.Equal(t, tc.expectedReplicas, outReplicas, "replicas should be as expected")
|
||||
assert.Equal(t, tc.metric.expectedUtilization, outUtilization, "utilization should be as expected")
|
||||
assert.True(t, tc.timestamp.Equal(outTimestamp), "timestamp should be as expected")
|
||||
return
|
||||
}
|
||||
|
||||
var outReplicas int32
|
||||
var outUtilization int64
|
||||
var outTimestamp time.Time
|
||||
switch tc.metric.metricType {
|
||||
case objectMetric:
|
||||
if tc.metric.singleObject == nil {
|
||||
t.Fatal("Metric specified as objectMetric but metric.singleObject is nil.")
|
||||
}
|
||||
outReplicas, outUtilization, outTimestamp, err = replicaCalc.GetObjectMetricReplicas(tc.currentReplicas, tc.metric.targetUtilization, tc.metric.name, testNamespace, tc.metric.singleObject, selector)
|
||||
case externalMetric:
|
||||
if tc.metric.selector == nil {
|
||||
t.Fatal("Metric specified as externalMetric but metric.selector is nil.")
|
||||
}
|
||||
if tc.metric.targetUtilization <= 0 {
|
||||
t.Fatalf("Metric specified as externalMetric but metric.targetUtilization is %d which is <=0.", tc.metric.targetUtilization)
|
||||
}
|
||||
outReplicas, outUtilization, outTimestamp, err = replicaCalc.GetExternalMetricReplicas(tc.currentReplicas, tc.metric.targetUtilization, tc.metric.name, testNamespace, tc.metric.selector, selector)
|
||||
case externalPerPodMetric:
|
||||
if tc.metric.selector == nil {
|
||||
t.Fatal("Metric specified as externalPerPodMetric but metric.selector is nil.")
|
||||
}
|
||||
if tc.metric.perPodTargetUtilization <= 0 {
|
||||
t.Fatalf("Metric specified as externalPerPodMetric but metric.perPodTargetUtilization is %d which is <=0.", tc.metric.perPodTargetUtilization)
|
||||
}
|
||||
|
||||
outReplicas, outUtilization, outTimestamp, err = replicaCalc.GetExternalPerPodMetricReplicas(tc.currentReplicas, tc.metric.perPodTargetUtilization, tc.metric.name, testNamespace, tc.metric.selector)
|
||||
case podMetric:
|
||||
outReplicas, outUtilization, outTimestamp, err = replicaCalc.GetMetricReplicas(tc.currentReplicas, tc.metric.targetUtilization, tc.metric.name, testNamespace, selector)
|
||||
default:
|
||||
t.Fatalf("Unknown metric type: %d", tc.metric.metricType)
|
||||
}
|
||||
|
||||
if tc.expectedError != nil {
|
||||
require.Error(t, err, "there should be an error calculating the replica count")
|
||||
assert.Contains(t, err.Error(), tc.expectedError.Error(), "the error message should have contained the expected error message")
|
||||
return
|
||||
}
|
||||
require.NoError(t, err, "there should not have been an error calculating the replica count")
|
||||
assert.Equal(t, tc.expectedReplicas, outReplicas, "replicas should be as expected")
|
||||
assert.Equal(t, tc.metric.expectedUtilization, outUtilization, "utilization should be as expected")
|
||||
assert.True(t, tc.timestamp.Equal(outTimestamp), "timestamp should be as expected")
|
||||
}
|
||||
|
||||
func TestReplicaCalcDisjointResourcesMetrics(t *testing.T) {
|
||||
@ -459,6 +485,7 @@ func TestReplicaCalcScaleUpCM(t *testing.T) {
|
||||
levels: []int64{20000, 10000, 30000},
|
||||
targetUtilization: 15000,
|
||||
expectedUtilization: 20000,
|
||||
metricType: podMetric,
|
||||
},
|
||||
}
|
||||
tc.runTest(t)
|
||||
@ -474,6 +501,7 @@ func TestReplicaCalcScaleUpCMUnreadyLessScale(t *testing.T) {
|
||||
levels: []int64{50000, 10000, 30000},
|
||||
targetUtilization: 15000,
|
||||
expectedUtilization: 30000,
|
||||
metricType: podMetric,
|
||||
},
|
||||
}
|
||||
tc.runTest(t)
|
||||
@ -489,6 +517,7 @@ func TestReplicaCalcScaleUpCMUnreadyNoScaleWouldScaleDown(t *testing.T) {
|
||||
levels: []int64{50000, 15000, 30000},
|
||||
targetUtilization: 15000,
|
||||
expectedUtilization: 15000,
|
||||
metricType: podMetric,
|
||||
},
|
||||
}
|
||||
tc.runTest(t)
|
||||
@ -543,6 +572,7 @@ func TestReplicaCalcScaleUpCMExternal(t *testing.T) {
|
||||
targetUtilization: 4400,
|
||||
expectedUtilization: 8600,
|
||||
selector: &metav1.LabelSelector{MatchLabels: map[string]string{"label": "value"}},
|
||||
metricType: podMetric,
|
||||
},
|
||||
}
|
||||
tc.runTest(t)
|
||||
@ -559,6 +589,7 @@ func TestReplicaCalcScaleUpCMExternalIgnoresUnreadyPods(t *testing.T) {
|
||||
targetUtilization: 4400,
|
||||
expectedUtilization: 8600,
|
||||
selector: &metav1.LabelSelector{MatchLabels: map[string]string{"label": "value"}},
|
||||
metricType: externalMetric,
|
||||
},
|
||||
}
|
||||
tc.runTest(t)
|
||||
@ -573,6 +604,7 @@ func TestReplicaCalcScaleUpCMExternalNoLabels(t *testing.T) {
|
||||
levels: []int64{8600},
|
||||
targetUtilization: 4400,
|
||||
expectedUtilization: 8600,
|
||||
metricType: podMetric,
|
||||
},
|
||||
}
|
||||
tc.runTest(t)
|
||||
@ -588,6 +620,7 @@ func TestReplicaCalcScaleUpPerPodCMExternal(t *testing.T) {
|
||||
perPodTargetUtilization: 2150,
|
||||
expectedUtilization: 2867,
|
||||
selector: &metav1.LabelSelector{MatchLabels: map[string]string{"label": "value"}},
|
||||
metricType: externalPerPodMetric,
|
||||
},
|
||||
}
|
||||
tc.runTest(t)
|
||||
@ -619,6 +652,7 @@ func TestReplicaCalcScaleDownCM(t *testing.T) {
|
||||
levels: []int64{12000, 12000, 12000, 12000, 12000},
|
||||
targetUtilization: 20000,
|
||||
expectedUtilization: 12000,
|
||||
metricType: podMetric,
|
||||
},
|
||||
}
|
||||
tc.runTest(t)
|
||||
@ -653,6 +687,7 @@ func TestReplicaCalcScaleDownCMExternal(t *testing.T) {
|
||||
targetUtilization: 14334,
|
||||
expectedUtilization: 8600,
|
||||
selector: &metav1.LabelSelector{MatchLabels: map[string]string{"label": "value"}},
|
||||
metricType: externalMetric,
|
||||
},
|
||||
}
|
||||
tc.runTest(t)
|
||||
@ -668,6 +703,7 @@ func TestReplicaCalcScaleDownPerPodCMExternal(t *testing.T) {
|
||||
perPodTargetUtilization: 2867,
|
||||
expectedUtilization: 1720,
|
||||
selector: &metav1.LabelSelector{MatchLabels: map[string]string{"label": "value"}},
|
||||
metricType: externalPerPodMetric,
|
||||
},
|
||||
}
|
||||
tc.runTest(t)
|
||||
@ -736,6 +772,7 @@ func TestReplicaCalcToleranceCM(t *testing.T) {
|
||||
levels: []int64{20000, 21000, 21000},
|
||||
targetUtilization: 20000,
|
||||
expectedUtilization: 20666,
|
||||
metricType: podMetric,
|
||||
},
|
||||
}
|
||||
tc.runTest(t)
|
||||
@ -770,6 +807,7 @@ func TestReplicaCalcToleranceCMExternal(t *testing.T) {
|
||||
targetUtilization: 8888,
|
||||
expectedUtilization: 8600,
|
||||
selector: &metav1.LabelSelector{MatchLabels: map[string]string{"label": "value"}},
|
||||
metricType: externalMetric,
|
||||
},
|
||||
}
|
||||
tc.runTest(t)
|
||||
@ -785,6 +823,7 @@ func TestReplicaCalcTolerancePerPodCMExternal(t *testing.T) {
|
||||
perPodTargetUtilization: 2900,
|
||||
expectedUtilization: 2867,
|
||||
selector: &metav1.LabelSelector{MatchLabels: map[string]string{"label": "value"}},
|
||||
metricType: externalPerPodMetric,
|
||||
},
|
||||
}
|
||||
tc.runTest(t)
|
||||
|
Loading…
Reference in New Issue
Block a user