Honor disabled LocalStorageCapacityIsolation in scheduling

This commit is contained in:
Wei Huang 2020-11-01 21:56:26 -08:00
parent d1c296431e
commit fb782eee57
No known key found for this signature in database
GPG Key ID: BE5E9752F8B6E005
3 changed files with 23 additions and 3 deletions

View File

@ -67,6 +67,7 @@ go_test(
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
"//staging/src/k8s.io/component-base/featuregate:go_default_library",
"//staging/src/k8s.io/component-base/featuregate/testing:go_default_library",
"//vendor/github.com/stretchr/testify/assert:go_default_library",
],

View File

@ -25,7 +25,11 @@ import (
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/apiserver/pkg/util/feature"
"k8s.io/component-base/featuregate"
featuregatetesting "k8s.io/component-base/featuregate/testing"
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
"k8s.io/kubernetes/pkg/features"
"k8s.io/kubernetes/pkg/scheduler/apis/config"
"k8s.io/kubernetes/pkg/scheduler/framework"
)
@ -63,7 +67,7 @@ func makeAllocatableResources(milliCPU, memory, pods, extendedA, storage, hugePa
}
func newResourcePod(usage ...framework.Resource) *v1.Pod {
containers := []v1.Container{}
var containers []v1.Container
for _, req := range usage {
containers = append(containers, v1.Container{
Resources: v1.ResourceRequirements{Requests: req.ResourceList()},
@ -501,6 +505,7 @@ func TestStorageRequests(t *testing.T) {
pod *v1.Pod
nodeInfo *framework.NodeInfo
name string
features map[featuregate.Feature]bool
wantStatus *framework.Status
}{
{
@ -523,6 +528,15 @@ func TestStorageRequests(t *testing.T) {
name: "storage ephemeral local storage request exceeds allocatable",
wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(v1.ResourceEphemeralStorage)),
},
{
pod: newResourceInitPod(newResourcePod(framework.Resource{EphemeralStorage: 25}), framework.Resource{EphemeralStorage: 25}),
nodeInfo: framework.NewNodeInfo(
newResourcePod(framework.Resource{MilliCPU: 2, Memory: 2})),
name: "ephemeral local storage request is ignored due to disabled feature gate",
features: map[featuregate.Feature]bool{
features.LocalStorageCapacityIsolation: false,
},
},
{
pod: newResourcePod(framework.Resource{EphemeralStorage: 10}),
nodeInfo: framework.NewNodeInfo(
@ -533,6 +547,9 @@ func TestStorageRequests(t *testing.T) {
for _, test := range storagePodsTests {
t.Run(test.name, func(t *testing.T) {
for k, v := range test.features {
defer featuregatetesting.SetFeatureGateDuringTest(t, feature.DefaultFeatureGate, k, v)()
}
node := v1.Node{Status: v1.NodeStatus{Capacity: makeResources(10, 20, 32, 5, 20, 5).Capacity, Allocatable: makeAllocatableResources(10, 20, 32, 5, 20, 5)}}
test.nodeInfo.SetNode(&node)

View File

@ -390,8 +390,10 @@ func (r *Resource) SetMaxResource(rl v1.ResourceList) {
r.MilliCPU = cpu
}
case v1.ResourceEphemeralStorage:
if ephemeralStorage := rQuantity.Value(); ephemeralStorage > r.EphemeralStorage {
r.EphemeralStorage = ephemeralStorage
if utilfeature.DefaultFeatureGate.Enabled(features.LocalStorageCapacityIsolation) {
if ephemeralStorage := rQuantity.Value(); ephemeralStorage > r.EphemeralStorage {
r.EphemeralStorage = ephemeralStorage
}
}
default:
if v1helper.IsScalarResourceName(rName) {