mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-23 11:50:44 +00:00
Merge pull request #33735 from jszczepkowski/hpa-pending-fix
Automatic merge from submit-queue Fixes in HPA: consider only running pods; proper denominator in avg. Fixes in HPA: consider only running pods; proper denominator in avg request calculations.
This commit is contained in:
commit
24031f50d6
@ -123,8 +123,8 @@ func (h *HeapsterMetricsClient) GetCpuConsumptionAndRequestInMillis(namespace st
|
||||
requestSum := int64(0)
|
||||
missing := false
|
||||
for _, pod := range podList.Items {
|
||||
if pod.Status.Phase == api.PodPending {
|
||||
// Skip pending pods.
|
||||
if pod.Status.Phase != api.PodRunning {
|
||||
// Count only running pods.
|
||||
continue
|
||||
}
|
||||
|
||||
@ -144,7 +144,7 @@ func (h *HeapsterMetricsClient) GetCpuConsumptionAndRequestInMillis(namespace st
|
||||
return 0, 0, time.Time{}, fmt.Errorf("some pods do not have request for cpu")
|
||||
}
|
||||
glog.V(4).Infof("%s %s - sum of CPU requested: %d", namespace, selector, requestSum)
|
||||
requestAvg := requestSum / int64(len(podList.Items))
|
||||
requestAvg := requestSum / int64(len(podNames))
|
||||
// Consumption is already averaged and in millis.
|
||||
consumption, timestamp, err := h.getCpuUtilizationForPods(namespace, selector, podNames)
|
||||
if err != nil {
|
||||
|
@ -67,6 +67,7 @@ type metricPoint struct {
|
||||
type testCase struct {
|
||||
replicas int
|
||||
desiredValue float64
|
||||
desiredRequest *float64
|
||||
desiredError error
|
||||
targetResource string
|
||||
targetTimestamp int
|
||||
@ -94,7 +95,7 @@ func (tc *testCase) prepareTestClient(t *testing.T) *fake.Clientset {
|
||||
obj := &api.PodList{}
|
||||
for i := 0; i < tc.replicas; i++ {
|
||||
podName := fmt.Sprintf("%s-%d", podNamePrefix, i)
|
||||
pod := buildPod(namespace, podName, podLabels, api.PodRunning)
|
||||
pod := buildPod(namespace, podName, podLabels, api.PodRunning, "1024")
|
||||
obj.Items = append(obj.Items, pod)
|
||||
}
|
||||
return true, obj, nil
|
||||
@ -159,7 +160,7 @@ func (tc *testCase) prepareTestClient(t *testing.T) *fake.Clientset {
|
||||
return fakeClient
|
||||
}
|
||||
|
||||
func buildPod(namespace, podName string, podLabels map[string]string, phase api.PodPhase) api.Pod {
|
||||
func buildPod(namespace, podName string, podLabels map[string]string, phase api.PodPhase, request string) api.Pod {
|
||||
return api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Name: podName,
|
||||
@ -171,7 +172,7 @@ func buildPod(namespace, podName string, podLabels map[string]string, phase api.
|
||||
{
|
||||
Resources: api.ResourceRequirements{
|
||||
Requests: api.ResourceList{
|
||||
api.ResourceCPU: resource.MustParse("10"),
|
||||
api.ResourceCPU: resource.MustParse(request),
|
||||
},
|
||||
},
|
||||
},
|
||||
@ -183,7 +184,7 @@ func buildPod(namespace, podName string, podLabels map[string]string, phase api.
|
||||
}
|
||||
}
|
||||
|
||||
func (tc *testCase) verifyResults(t *testing.T, val *float64, timestamp time.Time, err error) {
|
||||
func (tc *testCase) verifyResults(t *testing.T, val *float64, req *float64, timestamp time.Time, err error) {
|
||||
if tc.desiredError != nil {
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, fmt.Sprintf("%v", err), fmt.Sprintf("%v", tc.desiredError))
|
||||
@ -194,6 +195,11 @@ func (tc *testCase) verifyResults(t *testing.T, val *float64, timestamp time.Tim
|
||||
assert.True(t, tc.desiredValue-0.001 < *val)
|
||||
assert.True(t, tc.desiredValue+0.001 > *val)
|
||||
|
||||
if tc.desiredRequest != nil {
|
||||
assert.True(t, *tc.desiredRequest-0.001 < *req)
|
||||
assert.True(t, *tc.desiredRequest+0.001 > *req)
|
||||
}
|
||||
|
||||
targetTimestamp := fixedTimestamp.Add(time.Duration(tc.targetTimestamp) * time.Minute)
|
||||
assert.True(t, targetTimestamp.Equal(timestamp))
|
||||
}
|
||||
@ -202,12 +208,13 @@ func (tc *testCase) runTest(t *testing.T) {
|
||||
testClient := tc.prepareTestClient(t)
|
||||
metricsClient := NewHeapsterMetricsClient(testClient, DefaultHeapsterNamespace, DefaultHeapsterScheme, DefaultHeapsterService, DefaultHeapsterPort)
|
||||
if tc.targetResource == "cpu-usage" {
|
||||
val, _, timestamp, err := metricsClient.GetCpuConsumptionAndRequestInMillis(tc.namespace, tc.selector)
|
||||
val, req, timestamp, err := metricsClient.GetCpuConsumptionAndRequestInMillis(tc.namespace, tc.selector)
|
||||
fval := float64(val)
|
||||
tc.verifyResults(t, &fval, timestamp, err)
|
||||
freq := float64(req)
|
||||
tc.verifyResults(t, &fval, &freq, timestamp, err)
|
||||
} else {
|
||||
val, timestamp, err := metricsClient.GetCustomMetric(tc.targetResource, tc.namespace, tc.selector)
|
||||
tc.verifyResults(t, val, timestamp, err)
|
||||
tc.verifyResults(t, val, nil, timestamp, err)
|
||||
}
|
||||
}
|
||||
|
||||
@ -224,9 +231,11 @@ func TestCPU(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestCPUPending(t *testing.T) {
|
||||
desiredRequest := float64(2048 * 1000)
|
||||
tc := testCase{
|
||||
replicas: 4,
|
||||
replicas: 5,
|
||||
desiredValue: 5000,
|
||||
desiredRequest: &desiredRequest,
|
||||
targetResource: "cpu-usage",
|
||||
targetTimestamp: 1,
|
||||
reportedPodMetrics: [][]int64{{5000}, {5000}, {5000}},
|
||||
@ -237,12 +246,14 @@ func TestCPUPending(t *testing.T) {
|
||||
namespace := "test-namespace"
|
||||
podNamePrefix := "test-pod"
|
||||
podLabels := map[string]string{"name": podNamePrefix}
|
||||
podRequest := []string{"1024", "2048", "3072", "200", "100"}
|
||||
for i := 0; i < tc.replicas; i++ {
|
||||
podName := fmt.Sprintf("%s-%d", podNamePrefix, i)
|
||||
pod := buildPod(namespace, podName, podLabels, api.PodRunning)
|
||||
pod := buildPod(namespace, podName, podLabels, api.PodRunning, podRequest[i])
|
||||
tc.podListOverride.Items = append(tc.podListOverride.Items, pod)
|
||||
}
|
||||
tc.podListOverride.Items[3].Status.Phase = api.PodPending
|
||||
tc.podListOverride.Items[4].Status.Phase = api.PodFailed
|
||||
|
||||
tc.runTest(t)
|
||||
}
|
||||
@ -263,7 +274,7 @@ func TestCPUAllPending(t *testing.T) {
|
||||
podLabels := map[string]string{"name": podNamePrefix}
|
||||
for i := 0; i < tc.replicas; i++ {
|
||||
podName := fmt.Sprintf("%s-%d", podNamePrefix, i)
|
||||
pod := buildPod(namespace, podName, podLabels, api.PodPending)
|
||||
pod := buildPod(namespace, podName, podLabels, api.PodPending, "2048")
|
||||
tc.podListOverride.Items = append(tc.podListOverride.Items, pod)
|
||||
}
|
||||
tc.runTest(t)
|
||||
@ -295,7 +306,7 @@ func TestQPSPending(t *testing.T) {
|
||||
podLabels := map[string]string{"name": podNamePrefix}
|
||||
for i := 0; i < tc.replicas; i++ {
|
||||
podName := fmt.Sprintf("%s-%d", podNamePrefix, i)
|
||||
pod := buildPod(namespace, podName, podLabels, api.PodRunning)
|
||||
pod := buildPod(namespace, podName, podLabels, api.PodRunning, "256")
|
||||
tc.podListOverride.Items = append(tc.podListOverride.Items, pod)
|
||||
}
|
||||
tc.podListOverride.Items[0].Status.Phase = api.PodPending
|
||||
@ -317,7 +328,7 @@ func TestQPSAllPending(t *testing.T) {
|
||||
podLabels := map[string]string{"name": podNamePrefix}
|
||||
for i := 0; i < tc.replicas; i++ {
|
||||
podName := fmt.Sprintf("%s-%d", podNamePrefix, i)
|
||||
pod := buildPod(namespace, podName, podLabels, api.PodPending)
|
||||
pod := buildPod(namespace, podName, podLabels, api.PodPending, "512")
|
||||
tc.podListOverride.Items = append(tc.podListOverride.Items, pod)
|
||||
}
|
||||
tc.podListOverride.Items[0].Status.Phase = api.PodPending
|
||||
|
Loading…
Reference in New Issue
Block a user