mirror of
https://github.com/k3s-io/kubernetes.git
synced 2026-01-29 21:29:24 +00:00
Scheduler: remove pkg/features dependency from NodeResources plugins
This commit is contained in:
@@ -23,9 +23,8 @@ import (
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
"k8s.io/kubernetes/pkg/scheduler/framework"
|
||||
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/feature"
|
||||
)
|
||||
|
||||
// BalancedAllocation is a score plugin that calculates the difference between the cpu and memory fraction
|
||||
@@ -67,13 +66,15 @@ func (ba *BalancedAllocation) ScoreExtensions() framework.ScoreExtensions {
|
||||
}
|
||||
|
||||
// NewBalancedAllocation initializes a new plugin and returns it.
|
||||
func NewBalancedAllocation(_ runtime.Object, h framework.Handle) (framework.Plugin, error) {
|
||||
func NewBalancedAllocation(_ runtime.Object, h framework.Handle, fts feature.Features) (framework.Plugin, error) {
|
||||
return &BalancedAllocation{
|
||||
handle: h,
|
||||
resourceAllocationScorer: resourceAllocationScorer{
|
||||
BalancedAllocationName,
|
||||
balancedResourceScorer,
|
||||
defaultRequestedRatioResources,
|
||||
Name: BalancedAllocationName,
|
||||
scorer: balancedResourceScorer,
|
||||
resourceToWeightMap: defaultRequestedRatioResources,
|
||||
enablePodOverhead: fts.EnablePodOverhead,
|
||||
enableBalanceAttachedNodeVolumes: fts.EnableBalanceAttachedNodeVolumes,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
@@ -88,7 +89,8 @@ func balancedResourceScorer(requested, allocable resourceToValueMap, includeVolu
|
||||
return 0
|
||||
}
|
||||
|
||||
if includeVolumes && utilfeature.DefaultFeatureGate.Enabled(features.BalanceAttachedNodeVolumes) && allocatableVolumes > 0 {
|
||||
// includeVolumes is only true when BalanceAttachedNodeVolumes feature gate is enabled (see resource_allocation.go#score())
|
||||
if includeVolumes && allocatableVolumes > 0 {
|
||||
volumeFraction := float64(requestedVolumes) / float64(allocatableVolumes)
|
||||
if volumeFraction >= 1 {
|
||||
// if requested >= capacity, the corresponding host should never be preferred.
|
||||
|
||||
@@ -28,6 +28,7 @@ import (
|
||||
featuregatetesting "k8s.io/component-base/featuregate/testing"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
"k8s.io/kubernetes/pkg/scheduler/framework"
|
||||
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/feature"
|
||||
"k8s.io/kubernetes/pkg/scheduler/framework/runtime"
|
||||
"k8s.io/kubernetes/pkg/scheduler/internal/cache"
|
||||
)
|
||||
@@ -390,7 +391,7 @@ func TestNodeResourcesBalancedAllocation(t *testing.T) {
|
||||
}
|
||||
}
|
||||
fh, _ := runtime.NewFramework(nil, nil, runtime.WithSnapshotSharedLister(snapshot))
|
||||
p, _ := NewBalancedAllocation(nil, fh)
|
||||
p, _ := NewBalancedAllocation(nil, fh, feature.Features{EnablePodOverhead: true, EnableBalanceAttachedNodeVolumes: true})
|
||||
|
||||
for i := range test.nodes {
|
||||
hostResult, err := p.(framework.ScorePlugin).Score(context.Background(), nil, test.pod, test.nodes[i].Name)
|
||||
|
||||
@@ -24,12 +24,11 @@ import (
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
"k8s.io/kubernetes/pkg/scheduler/apis/config"
|
||||
"k8s.io/kubernetes/pkg/scheduler/apis/config/validation"
|
||||
"k8s.io/kubernetes/pkg/scheduler/framework"
|
||||
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/feature"
|
||||
)
|
||||
|
||||
var _ framework.PreFilterPlugin = &Fit{}
|
||||
@@ -49,6 +48,7 @@ const (
|
||||
type Fit struct {
|
||||
ignoredResources sets.String
|
||||
ignoredResourceGroups sets.String
|
||||
enablePodOverhead bool
|
||||
}
|
||||
|
||||
// preFilterState computed at PreFilter and used at Filter.
|
||||
@@ -67,7 +67,7 @@ func (f *Fit) Name() string {
|
||||
}
|
||||
|
||||
// NewFit initializes a new plugin and returns it.
|
||||
func NewFit(plArgs runtime.Object, _ framework.Handle) (framework.Plugin, error) {
|
||||
func NewFit(plArgs runtime.Object, _ framework.Handle, fts feature.Features) (framework.Plugin, error) {
|
||||
args, ok := plArgs.(*config.NodeResourcesFitArgs)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("want args to be of type NodeResourcesFitArgs, got %T", plArgs)
|
||||
@@ -78,6 +78,7 @@ func NewFit(plArgs runtime.Object, _ framework.Handle) (framework.Plugin, error)
|
||||
return &Fit{
|
||||
ignoredResources: sets.NewString(args.IgnoredResources...),
|
||||
ignoredResourceGroups: sets.NewString(args.IgnoredResourceGroups...),
|
||||
enablePodOverhead: fts.EnablePodOverhead,
|
||||
}, nil
|
||||
}
|
||||
|
||||
@@ -108,7 +109,7 @@ func NewFit(plArgs runtime.Object, _ framework.Handle) (framework.Plugin, error)
|
||||
// Memory: 1G
|
||||
//
|
||||
// Result: CPU: 3, Memory: 3G
|
||||
func computePodResourceRequest(pod *v1.Pod) *preFilterState {
|
||||
func computePodResourceRequest(pod *v1.Pod, enablePodOverhead bool) *preFilterState {
|
||||
result := &preFilterState{}
|
||||
for _, container := range pod.Spec.Containers {
|
||||
result.Add(container.Resources.Requests)
|
||||
@@ -120,7 +121,7 @@ func computePodResourceRequest(pod *v1.Pod) *preFilterState {
|
||||
}
|
||||
|
||||
// If Overhead is being utilized, add to the total requests for the pod
|
||||
if pod.Spec.Overhead != nil && utilfeature.DefaultFeatureGate.Enabled(features.PodOverhead) {
|
||||
if pod.Spec.Overhead != nil && enablePodOverhead {
|
||||
result.Add(pod.Spec.Overhead)
|
||||
}
|
||||
|
||||
@@ -129,7 +130,7 @@ func computePodResourceRequest(pod *v1.Pod) *preFilterState {
|
||||
|
||||
// PreFilter invoked at the prefilter extension point.
|
||||
func (f *Fit) PreFilter(ctx context.Context, cycleState *framework.CycleState, pod *v1.Pod) *framework.Status {
|
||||
cycleState.Write(preFilterStateKey, computePodResourceRequest(pod))
|
||||
cycleState.Write(preFilterStateKey, computePodResourceRequest(pod, f.enablePodOverhead))
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -198,8 +199,8 @@ type InsufficientResource struct {
|
||||
}
|
||||
|
||||
// Fits checks if node have enough resources to host the pod.
|
||||
func Fits(pod *v1.Pod, nodeInfo *framework.NodeInfo) []InsufficientResource {
|
||||
return fitsRequest(computePodResourceRequest(pod), nodeInfo, nil, nil)
|
||||
func Fits(pod *v1.Pod, nodeInfo *framework.NodeInfo, enablePodOverhead bool) []InsufficientResource {
|
||||
return fitsRequest(computePodResourceRequest(pod, enablePodOverhead), nodeInfo, nil, nil)
|
||||
}
|
||||
|
||||
func fitsRequest(podRequest *preFilterState, nodeInfo *framework.NodeInfo, ignoredExtendedResources, ignoredResourceGroups sets.String) []InsufficientResource {
|
||||
|
||||
@@ -27,9 +27,9 @@ import (
|
||||
"k8s.io/apiserver/pkg/util/feature"
|
||||
"k8s.io/component-base/featuregate"
|
||||
featuregatetesting "k8s.io/component-base/featuregate/testing"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
"k8s.io/kubernetes/pkg/scheduler/apis/config"
|
||||
"k8s.io/kubernetes/pkg/scheduler/framework"
|
||||
plfeature "k8s.io/kubernetes/pkg/scheduler/framework/plugins/feature"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -414,7 +414,7 @@ func TestEnoughRequests(t *testing.T) {
|
||||
node := v1.Node{Status: v1.NodeStatus{Capacity: makeResources(10, 20, 32, 5, 20, 5).Capacity, Allocatable: makeAllocatableResources(10, 20, 32, 5, 20, 5)}}
|
||||
test.nodeInfo.SetNode(&node)
|
||||
|
||||
p, err := NewFit(&test.args, nil)
|
||||
p, err := NewFit(&test.args, nil, plfeature.Features{EnablePodOverhead: true})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -429,7 +429,7 @@ func TestEnoughRequests(t *testing.T) {
|
||||
t.Errorf("status does not match: %v, want: %v", gotStatus, test.wantStatus)
|
||||
}
|
||||
|
||||
gotInsufficientResources := fitsRequest(computePodResourceRequest(test.pod), test.nodeInfo, p.(*Fit).ignoredResources, p.(*Fit).ignoredResourceGroups)
|
||||
gotInsufficientResources := fitsRequest(computePodResourceRequest(test.pod, true), test.nodeInfo, p.(*Fit).ignoredResources, p.(*Fit).ignoredResourceGroups)
|
||||
if !reflect.DeepEqual(gotInsufficientResources, test.wantInsufficientResources) {
|
||||
t.Errorf("insufficient resources do not match: %+v, want: %v", gotInsufficientResources, test.wantInsufficientResources)
|
||||
}
|
||||
@@ -442,7 +442,7 @@ func TestPreFilterDisabled(t *testing.T) {
|
||||
nodeInfo := framework.NewNodeInfo()
|
||||
node := v1.Node{}
|
||||
nodeInfo.SetNode(&node)
|
||||
p, err := NewFit(&config.NodeResourcesFitArgs{}, nil)
|
||||
p, err := NewFit(&config.NodeResourcesFitArgs{}, nil, plfeature.Features{EnablePodOverhead: true})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -492,7 +492,7 @@ func TestNotEnoughRequests(t *testing.T) {
|
||||
node := v1.Node{Status: v1.NodeStatus{Capacity: v1.ResourceList{}, Allocatable: makeAllocatableResources(10, 20, 1, 0, 0, 0)}}
|
||||
test.nodeInfo.SetNode(&node)
|
||||
|
||||
p, err := NewFit(&config.NodeResourcesFitArgs{}, nil)
|
||||
p, err := NewFit(&config.NodeResourcesFitArgs{}, nil, plfeature.Features{EnablePodOverhead: true})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -545,7 +545,7 @@ func TestStorageRequests(t *testing.T) {
|
||||
newResourcePod(framework.Resource{MilliCPU: 2, Memory: 2})),
|
||||
name: "ephemeral local storage request is ignored due to disabled feature gate",
|
||||
features: map[featuregate.Feature]bool{
|
||||
features.LocalStorageCapacityIsolation: false,
|
||||
"LocalStorageCapacityIsolation": false,
|
||||
},
|
||||
},
|
||||
{
|
||||
@@ -564,7 +564,7 @@ func TestStorageRequests(t *testing.T) {
|
||||
node := v1.Node{Status: v1.NodeStatus{Capacity: makeResources(10, 20, 32, 5, 20, 5).Capacity, Allocatable: makeAllocatableResources(10, 20, 32, 5, 20, 5)}}
|
||||
test.nodeInfo.SetNode(&node)
|
||||
|
||||
p, err := NewFit(&config.NodeResourcesFitArgs{}, nil)
|
||||
p, err := NewFit(&config.NodeResourcesFitArgs{}, nil, plfeature.Features{EnablePodOverhead: true})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
@@ -25,6 +25,7 @@ import (
|
||||
"k8s.io/kubernetes/pkg/scheduler/apis/config"
|
||||
"k8s.io/kubernetes/pkg/scheduler/apis/config/validation"
|
||||
"k8s.io/kubernetes/pkg/scheduler/framework"
|
||||
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/feature"
|
||||
)
|
||||
|
||||
// LeastAllocated is a score plugin that favors nodes with fewer allocation requested resources based on requested resources.
|
||||
@@ -65,7 +66,7 @@ func (la *LeastAllocated) ScoreExtensions() framework.ScoreExtensions {
|
||||
}
|
||||
|
||||
// NewLeastAllocated initializes a new plugin and returns it.
|
||||
func NewLeastAllocated(laArgs runtime.Object, h framework.Handle) (framework.Plugin, error) {
|
||||
func NewLeastAllocated(laArgs runtime.Object, h framework.Handle, fts feature.Features) (framework.Plugin, error) {
|
||||
args, ok := laArgs.(*config.NodeResourcesLeastAllocatedArgs)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("want args to be of type NodeResourcesLeastAllocatedArgs, got %T", laArgs)
|
||||
@@ -85,6 +86,7 @@ func NewLeastAllocated(laArgs runtime.Object, h framework.Handle) (framework.Plu
|
||||
Name: LeastAllocatedName,
|
||||
scorer: leastResourceScorer(resToWeightMap),
|
||||
resourceToWeightMap: resToWeightMap,
|
||||
enablePodOverhead: fts.EnablePodOverhead,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
@@ -29,6 +29,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/util/validation/field"
|
||||
"k8s.io/kubernetes/pkg/scheduler/apis/config"
|
||||
"k8s.io/kubernetes/pkg/scheduler/framework"
|
||||
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/feature"
|
||||
"k8s.io/kubernetes/pkg/scheduler/framework/runtime"
|
||||
"k8s.io/kubernetes/pkg/scheduler/internal/cache"
|
||||
)
|
||||
@@ -313,7 +314,7 @@ func TestNodeResourcesLeastAllocated(t *testing.T) {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
snapshot := cache.NewSnapshot(test.pods, test.nodes)
|
||||
fh, _ := runtime.NewFramework(nil, nil, runtime.WithSnapshotSharedLister(snapshot))
|
||||
p, err := NewLeastAllocated(&test.args, fh)
|
||||
p, err := NewLeastAllocated(&test.args, fh, feature.Features{EnablePodOverhead: true})
|
||||
|
||||
if test.wantErr != nil {
|
||||
if err != nil {
|
||||
|
||||
@@ -25,6 +25,7 @@ import (
|
||||
"k8s.io/kubernetes/pkg/scheduler/apis/config"
|
||||
"k8s.io/kubernetes/pkg/scheduler/apis/config/validation"
|
||||
"k8s.io/kubernetes/pkg/scheduler/framework"
|
||||
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/feature"
|
||||
)
|
||||
|
||||
// MostAllocated is a score plugin that favors nodes with high allocation based on requested resources.
|
||||
@@ -63,7 +64,7 @@ func (ma *MostAllocated) ScoreExtensions() framework.ScoreExtensions {
|
||||
}
|
||||
|
||||
// NewMostAllocated initializes a new plugin and returns it.
|
||||
func NewMostAllocated(maArgs runtime.Object, h framework.Handle) (framework.Plugin, error) {
|
||||
func NewMostAllocated(maArgs runtime.Object, h framework.Handle, fts feature.Features) (framework.Plugin, error) {
|
||||
args, ok := maArgs.(*config.NodeResourcesMostAllocatedArgs)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("want args to be of type NodeResourcesMostAllocatedArgs, got %T", args)
|
||||
@@ -83,6 +84,7 @@ func NewMostAllocated(maArgs runtime.Object, h framework.Handle) (framework.Plug
|
||||
Name: MostAllocatedName,
|
||||
scorer: mostResourceScorer(resToWeightMap),
|
||||
resourceToWeightMap: resToWeightMap,
|
||||
enablePodOverhead: fts.EnablePodOverhead,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
@@ -29,6 +29,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/util/validation/field"
|
||||
"k8s.io/kubernetes/pkg/scheduler/apis/config"
|
||||
"k8s.io/kubernetes/pkg/scheduler/framework"
|
||||
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/feature"
|
||||
"k8s.io/kubernetes/pkg/scheduler/framework/runtime"
|
||||
"k8s.io/kubernetes/pkg/scheduler/internal/cache"
|
||||
)
|
||||
@@ -273,7 +274,7 @@ func TestNodeResourcesMostAllocated(t *testing.T) {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
snapshot := cache.NewSnapshot(test.pods, test.nodes)
|
||||
fh, _ := runtime.NewFramework(nil, nil, runtime.WithSnapshotSharedLister(snapshot))
|
||||
p, err := NewMostAllocated(&test.args, fh)
|
||||
p, err := NewMostAllocated(&test.args, fh, feature.Features{EnablePodOverhead: true})
|
||||
|
||||
if test.wantErr != nil {
|
||||
if err != nil {
|
||||
|
||||
@@ -26,6 +26,7 @@ import (
|
||||
"k8s.io/kubernetes/pkg/scheduler/apis/config"
|
||||
"k8s.io/kubernetes/pkg/scheduler/apis/config/validation"
|
||||
"k8s.io/kubernetes/pkg/scheduler/framework"
|
||||
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/feature"
|
||||
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/helper"
|
||||
)
|
||||
|
||||
@@ -36,7 +37,7 @@ const (
|
||||
)
|
||||
|
||||
// NewRequestedToCapacityRatio initializes a new plugin and returns it.
|
||||
func NewRequestedToCapacityRatio(plArgs runtime.Object, handle framework.Handle) (framework.Plugin, error) {
|
||||
func NewRequestedToCapacityRatio(plArgs runtime.Object, handle framework.Handle, fts feature.Features) (framework.Plugin, error) {
|
||||
args, err := getRequestedToCapacityRatioArgs(plArgs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -68,9 +69,10 @@ func NewRequestedToCapacityRatio(plArgs runtime.Object, handle framework.Handle)
|
||||
return &RequestedToCapacityRatio{
|
||||
handle: handle,
|
||||
resourceAllocationScorer: resourceAllocationScorer{
|
||||
RequestedToCapacityRatioName,
|
||||
buildRequestedToCapacityRatioScorerFunction(shape, resourceToWeightMap),
|
||||
resourceToWeightMap,
|
||||
Name: RequestedToCapacityRatioName,
|
||||
scorer: buildRequestedToCapacityRatioScorerFunction(shape, resourceToWeightMap),
|
||||
resourceToWeightMap: resourceToWeightMap,
|
||||
enablePodOverhead: fts.EnablePodOverhead,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
@@ -27,6 +27,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
"k8s.io/kubernetes/pkg/scheduler/apis/config"
|
||||
"k8s.io/kubernetes/pkg/scheduler/framework"
|
||||
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/feature"
|
||||
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/helper"
|
||||
"k8s.io/kubernetes/pkg/scheduler/framework/runtime"
|
||||
"k8s.io/kubernetes/pkg/scheduler/internal/cache"
|
||||
@@ -80,7 +81,7 @@ func TestRequestedToCapacityRatio(t *testing.T) {
|
||||
{Name: "cpu", Weight: 1},
|
||||
},
|
||||
}
|
||||
p, err := NewRequestedToCapacityRatio(&args, fh)
|
||||
p, err := NewRequestedToCapacityRatio(&args, fh, feature.Features{EnablePodOverhead: true})
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
@@ -333,7 +334,7 @@ func TestResourceBinPackingSingleExtended(t *testing.T) {
|
||||
{Name: "intel.com/foo", Weight: 1},
|
||||
},
|
||||
}
|
||||
p, err := NewRequestedToCapacityRatio(&args, fh)
|
||||
p, err := NewRequestedToCapacityRatio(&args, fh, feature.Features{EnablePodOverhead: true})
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
@@ -577,7 +578,7 @@ func TestResourceBinPackingMultipleExtended(t *testing.T) {
|
||||
{Name: "intel.com/bar", Weight: 5},
|
||||
},
|
||||
}
|
||||
p, err := NewRequestedToCapacityRatio(&args, fh)
|
||||
p, err := NewRequestedToCapacityRatio(&args, fh, feature.Features{EnablePodOverhead: true})
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
|
||||
@@ -18,9 +18,7 @@ package noderesources
|
||||
|
||||
import (
|
||||
v1 "k8s.io/api/core/v1"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
"k8s.io/klog/v2"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
"k8s.io/kubernetes/pkg/scheduler/framework"
|
||||
schedutil "k8s.io/kubernetes/pkg/scheduler/util"
|
||||
)
|
||||
@@ -36,6 +34,9 @@ type resourceAllocationScorer struct {
|
||||
Name string
|
||||
scorer func(requested, allocable resourceToValueMap, includeVolumes bool, requestedVolumes int, allocatableVolumes int) int64
|
||||
resourceToWeightMap resourceToWeightMap
|
||||
|
||||
enablePodOverhead bool
|
||||
enableBalanceAttachedNodeVolumes bool
|
||||
}
|
||||
|
||||
// resourceToValueMap contains resource name and score.
|
||||
@@ -55,18 +56,18 @@ func (r *resourceAllocationScorer) score(
|
||||
requested := make(resourceToValueMap, len(r.resourceToWeightMap))
|
||||
allocatable := make(resourceToValueMap, len(r.resourceToWeightMap))
|
||||
for resource := range r.resourceToWeightMap {
|
||||
allocatable[resource], requested[resource] = calculateResourceAllocatableRequest(nodeInfo, pod, resource)
|
||||
allocatable[resource], requested[resource] = calculateResourceAllocatableRequest(nodeInfo, pod, resource, r.enablePodOverhead)
|
||||
}
|
||||
var score int64
|
||||
|
||||
// Check if the pod has volumes and this could be added to scorer function for balanced resource allocation.
|
||||
if len(pod.Spec.Volumes) > 0 && utilfeature.DefaultFeatureGate.Enabled(features.BalanceAttachedNodeVolumes) && nodeInfo.TransientInfo != nil {
|
||||
if len(pod.Spec.Volumes) > 0 && r.enableBalanceAttachedNodeVolumes && nodeInfo.TransientInfo != nil {
|
||||
score = r.scorer(requested, allocatable, true, nodeInfo.TransientInfo.TransNodeInfo.RequestedVolumes, nodeInfo.TransientInfo.TransNodeInfo.AllocatableVolumesCount)
|
||||
} else {
|
||||
score = r.scorer(requested, allocatable, false, 0, 0)
|
||||
}
|
||||
if klog.V(10).Enabled() {
|
||||
if len(pod.Spec.Volumes) > 0 && utilfeature.DefaultFeatureGate.Enabled(features.BalanceAttachedNodeVolumes) && nodeInfo.TransientInfo != nil {
|
||||
if len(pod.Spec.Volumes) > 0 && r.enableBalanceAttachedNodeVolumes && nodeInfo.TransientInfo != nil {
|
||||
klog.Infof(
|
||||
"%v -> %v: %v, map of allocatable resources %v, map of requested resources %v , allocatable volumes %d, requested volumes %d, score %d",
|
||||
pod.Name, node.Name, r.Name,
|
||||
@@ -88,8 +89,8 @@ func (r *resourceAllocationScorer) score(
|
||||
}
|
||||
|
||||
// calculateResourceAllocatableRequest returns resources Allocatable and Requested values
|
||||
func calculateResourceAllocatableRequest(nodeInfo *framework.NodeInfo, pod *v1.Pod, resource v1.ResourceName) (int64, int64) {
|
||||
podRequest := calculatePodResourceRequest(pod, resource)
|
||||
func calculateResourceAllocatableRequest(nodeInfo *framework.NodeInfo, pod *v1.Pod, resource v1.ResourceName, enablePodOverhead bool) (int64, int64) {
|
||||
podRequest := calculatePodResourceRequest(pod, resource, enablePodOverhead)
|
||||
switch resource {
|
||||
case v1.ResourceCPU:
|
||||
return nodeInfo.Allocatable.MilliCPU, (nodeInfo.NonZeroRequested.MilliCPU + podRequest)
|
||||
@@ -114,7 +115,7 @@ func calculateResourceAllocatableRequest(nodeInfo *framework.NodeInfo, pod *v1.P
|
||||
// calculatePodResourceRequest returns the total non-zero requests. If Overhead is defined for the pod and the
|
||||
// PodOverhead feature is enabled, the Overhead is added to the result.
|
||||
// podResourceRequest = max(sum(podSpec.Containers), podSpec.InitContainers) + overHead
|
||||
func calculatePodResourceRequest(pod *v1.Pod, resource v1.ResourceName) int64 {
|
||||
func calculatePodResourceRequest(pod *v1.Pod, resource v1.ResourceName, enablePodOverhead bool) int64 {
|
||||
var podRequest int64
|
||||
for i := range pod.Spec.Containers {
|
||||
container := &pod.Spec.Containers[i]
|
||||
@@ -131,7 +132,7 @@ func calculatePodResourceRequest(pod *v1.Pod, resource v1.ResourceName) int64 {
|
||||
}
|
||||
|
||||
// If Overhead is being utilized, add to the total requests for the pod
|
||||
if pod.Spec.Overhead != nil && utilfeature.DefaultFeatureGate.Enabled(features.PodOverhead) {
|
||||
if pod.Spec.Overhead != nil && enablePodOverhead {
|
||||
if quantity, found := pod.Spec.Overhead[resource]; found {
|
||||
podRequest += quantity.Value()
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user