Merge pull request #79657 from josephburnett/hpastuck

Ignore unschedulable pods
This commit is contained in:
Kubernetes Prow Robot 2019-07-10 11:34:29 -07:00 committed by GitHub
commit 57eef32041
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
2 changed files with 56 additions and 10 deletions

View File

@ -365,11 +365,18 @@ func groupPods(pods []*v1.Pod, metrics metricsclient.PodMetricsInfo, resource v1
if pod.DeletionTimestamp != nil || pod.Status.Phase == v1.PodFailed {
continue
}
// Pending pods are ignored.
if pod.Status.Phase == v1.PodPending {
ignoredPods.Insert(pod.Name)
continue
}
// Pods missing metrics.
metric, found := metrics[pod.Name]
if !found {
missingPods.Insert(pod.Name)
continue
}
// Unready pods are ignored.
if resource == v1.ResourceCPU {
var ignorePod bool
_, condition := podutil.GetPodCondition(&pod.Status, v1.PodReady)

View File

@ -862,6 +862,25 @@ func TestReplicaCalcScaleDownIncludeUnreadyPods(t *testing.T) {
tc.runTest(t)
}
func TestReplicaCalcScaleDownExcludeUnscheduledPods(t *testing.T) {
tc := replicaCalcTestCase{
currentReplicas: 5,
expectedReplicas: 1,
podReadiness: []v1.ConditionStatus{v1.ConditionTrue, v1.ConditionFalse, v1.ConditionFalse, v1.ConditionFalse, v1.ConditionFalse},
podPhase: []v1.PodPhase{v1.PodRunning, v1.PodPending, v1.PodPending, v1.PodPending, v1.PodPending},
resource: &resourceInfo{
name: v1.ResourceCPU,
requests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
levels: []int64{100},
targetUtilization: 50,
expectedUtilization: 10,
expectedValue: numContainersPerPod * 100,
},
}
tc.runTest(t)
}
func TestReplicaCalcScaleDownIgnoreHotCpuPods(t *testing.T) {
tc := replicaCalcTestCase{
currentReplicas: 5,
@ -1616,18 +1635,38 @@ func TestGroupPods(t *testing.T) {
sets.NewString("lucretius"),
sets.NewString("epicurus"),
},
{
name: "pending pods are ignored",
pods: []*v1.Pod{
{
ObjectMeta: metav1.ObjectMeta{
Name: "unscheduled",
},
Status: v1.PodStatus{
Phase: v1.PodPending,
},
},
},
metrics: metricsclient.PodMetricsInfo{},
resource: v1.ResourceCPU,
expectReadyPodCount: 0,
expectIgnoredPods: sets.NewString("unscheduled"),
expectMissingPods: sets.NewString(),
},
}
for _, tc := range tests {
readyPodCount, ignoredPods, missingPods := groupPods(tc.pods, tc.metrics, tc.resource, defaultTestingCpuInitializationPeriod, defaultTestingDelayOfInitialReadinessStatus)
if readyPodCount != tc.expectReadyPodCount {
t.Errorf("%s got readyPodCount %d, expected %d", tc.name, readyPodCount, tc.expectReadyPodCount)
}
if !ignoredPods.Equal(tc.expectIgnoredPods) {
t.Errorf("%s got unreadyPods %v, expected %v", tc.name, ignoredPods, tc.expectIgnoredPods)
}
if !missingPods.Equal(tc.expectMissingPods) {
t.Errorf("%s got missingPods %v, expected %v", tc.name, missingPods, tc.expectMissingPods)
}
t.Run(tc.name, func(t *testing.T) {
readyPodCount, ignoredPods, missingPods := groupPods(tc.pods, tc.metrics, tc.resource, defaultTestingCpuInitializationPeriod, defaultTestingDelayOfInitialReadinessStatus)
if readyPodCount != tc.expectReadyPodCount {
t.Errorf("%s got readyPodCount %d, expected %d", tc.name, readyPodCount, tc.expectReadyPodCount)
}
if !ignoredPods.Equal(tc.expectIgnoredPods) {
t.Errorf("%s got unreadyPods %v, expected %v", tc.name, ignoredPods, tc.expectIgnoredPods)
}
if !missingPods.Equal(tc.expectMissingPods) {
t.Errorf("%s got missingPods %v, expected %v", tc.name, missingPods, tc.expectMissingPods)
}
})
}
}