mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-11-13 14:41:42 +00:00
kubernetes#60525 introduced Balanced attached node volumes feature gate to include volume count for prioritizing nodes. The reason for introducing this flag was its usefulness in Red Hat OpenShift Online environment which is not being used any more. So, removing the flag as it helps in maintainability of the scheduler code base as mentioned at kubernetes#101489 (comment)
127 lines
4.5 KiB
Go
127 lines
4.5 KiB
Go
/*
|
|
Copyright 2017 The Kubernetes Authors.
|
|
|
|
Licensed under the Apache License, Version 2.0 (the "License");
|
|
you may not use this file except in compliance with the License.
|
|
You may obtain a copy of the License at
|
|
|
|
http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
Unless required by applicable law or agreed to in writing, software
|
|
distributed under the License is distributed on an "AS IS" BASIS,
|
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
See the License for the specific language governing permissions and
|
|
limitations under the License.
|
|
*/
|
|
|
|
package noderesources
|
|
|
|
import (
|
|
v1 "k8s.io/api/core/v1"
|
|
"k8s.io/klog/v2"
|
|
"k8s.io/kubernetes/pkg/scheduler/framework"
|
|
schedutil "k8s.io/kubernetes/pkg/scheduler/util"
|
|
)
|
|
|
|
// resourceToWeightMap contains resource name and weight.
|
|
type resourceToWeightMap map[v1.ResourceName]int64
|
|
|
|
// defaultRequestedRatioResources is used to set default requestToWeight map for CPU and memory
|
|
var defaultRequestedRatioResources = resourceToWeightMap{v1.ResourceMemory: 1, v1.ResourceCPU: 1}
|
|
|
|
// resourceAllocationScorer contains information to calculate resource allocation score.
|
|
type resourceAllocationScorer struct {
|
|
Name string
|
|
scorer func(requested, allocable resourceToValueMap) int64
|
|
resourceToWeightMap resourceToWeightMap
|
|
|
|
enablePodOverhead bool
|
|
}
|
|
|
|
// resourceToValueMap contains resource name and score.
|
|
type resourceToValueMap map[v1.ResourceName]int64
|
|
|
|
// score will use `scorer` function to calculate the score.
|
|
func (r *resourceAllocationScorer) score(
|
|
pod *v1.Pod,
|
|
nodeInfo *framework.NodeInfo) (int64, *framework.Status) {
|
|
node := nodeInfo.Node()
|
|
if node == nil {
|
|
return 0, framework.NewStatus(framework.Error, "node not found")
|
|
}
|
|
if r.resourceToWeightMap == nil {
|
|
return 0, framework.NewStatus(framework.Error, "resources not found")
|
|
}
|
|
requested := make(resourceToValueMap, len(r.resourceToWeightMap))
|
|
allocatable := make(resourceToValueMap, len(r.resourceToWeightMap))
|
|
for resource := range r.resourceToWeightMap {
|
|
allocatable[resource], requested[resource] = calculateResourceAllocatableRequest(nodeInfo, pod, resource, r.enablePodOverhead)
|
|
}
|
|
var score int64
|
|
|
|
score = r.scorer(requested, allocatable)
|
|
|
|
if klog.V(10).Enabled() {
|
|
klog.Infof(
|
|
"%v -> %v: %v, map of allocatable resources %v, map of requested resources %v ,score %d,",
|
|
pod.Name, node.Name, r.Name,
|
|
allocatable, requested, score,
|
|
)
|
|
}
|
|
|
|
return score, nil
|
|
}
|
|
|
|
// calculateResourceAllocatableRequest returns resources Allocatable and Requested values
|
|
func calculateResourceAllocatableRequest(nodeInfo *framework.NodeInfo, pod *v1.Pod, resource v1.ResourceName, enablePodOverhead bool) (int64, int64) {
|
|
podRequest := calculatePodResourceRequest(pod, resource, enablePodOverhead)
|
|
switch resource {
|
|
case v1.ResourceCPU:
|
|
return nodeInfo.Allocatable.MilliCPU, (nodeInfo.NonZeroRequested.MilliCPU + podRequest)
|
|
case v1.ResourceMemory:
|
|
return nodeInfo.Allocatable.Memory, (nodeInfo.NonZeroRequested.Memory + podRequest)
|
|
|
|
case v1.ResourceEphemeralStorage:
|
|
return nodeInfo.Allocatable.EphemeralStorage, (nodeInfo.Requested.EphemeralStorage + podRequest)
|
|
default:
|
|
if _, exists := nodeInfo.Allocatable.ScalarResources[resource]; exists {
|
|
return nodeInfo.Allocatable.ScalarResources[resource], (nodeInfo.Requested.ScalarResources[resource] + podRequest)
|
|
}
|
|
}
|
|
if klog.V(10).Enabled() {
|
|
klog.Infof("requested resource %v not considered for node score calculation",
|
|
resource,
|
|
)
|
|
}
|
|
return 0, 0
|
|
}
|
|
|
|
// calculatePodResourceRequest returns the total non-zero requests. If Overhead is defined for the pod and the
|
|
// PodOverhead feature is enabled, the Overhead is added to the result.
|
|
// podResourceRequest = max(sum(podSpec.Containers), podSpec.InitContainers) + overHead
|
|
func calculatePodResourceRequest(pod *v1.Pod, resource v1.ResourceName, enablePodOverhead bool) int64 {
|
|
var podRequest int64
|
|
for i := range pod.Spec.Containers {
|
|
container := &pod.Spec.Containers[i]
|
|
value := schedutil.GetNonzeroRequestForResource(resource, &container.Resources.Requests)
|
|
podRequest += value
|
|
}
|
|
|
|
for i := range pod.Spec.InitContainers {
|
|
initContainer := &pod.Spec.InitContainers[i]
|
|
value := schedutil.GetNonzeroRequestForResource(resource, &initContainer.Resources.Requests)
|
|
if podRequest < value {
|
|
podRequest = value
|
|
}
|
|
}
|
|
|
|
// If Overhead is being utilized, add to the total requests for the pod
|
|
if pod.Spec.Overhead != nil && enablePodOverhead {
|
|
if quantity, found := pod.Spec.Overhead[resource]; found {
|
|
podRequest += quantity.Value()
|
|
}
|
|
}
|
|
|
|
return podRequest
|
|
}
|