mirror of
https://github.com/k3s-io/kubernetes.git
synced 2026-01-05 23:47:50 +00:00
Scheduler: replace system modeler with scheduler cache
This commit is contained in:
@@ -44,19 +44,12 @@ func calculateScore(requested int64, capacity int64, node string) int {
|
||||
|
||||
// Calculate the resource occupancy on a node. 'node' has information about the resources on the node.
|
||||
// 'pods' is a list of pods currently scheduled on the node.
|
||||
func calculateResourceOccupancy(pod *api.Pod, node api.Node, pods []*api.Pod) schedulerapi.HostPriority {
|
||||
totalMilliCPU := int64(0)
|
||||
totalMemory := int64(0)
|
||||
func calculateResourceOccupancy(pod *api.Pod, node api.Node, nodeInfo *schedulercache.NodeInfo) schedulerapi.HostPriority {
|
||||
totalMilliCPU := nodeInfo.NonZeroRequest().MilliCPU
|
||||
totalMemory := nodeInfo.NonZeroRequest().Memory
|
||||
capacityMilliCPU := node.Status.Allocatable.Cpu().MilliValue()
|
||||
capacityMemory := node.Status.Allocatable.Memory().Value()
|
||||
|
||||
for _, existingPod := range pods {
|
||||
for _, container := range existingPod.Spec.Containers {
|
||||
cpu, memory := priorityutil.GetNonzeroRequests(&container.Resources.Requests)
|
||||
totalMilliCPU += cpu
|
||||
totalMemory += memory
|
||||
}
|
||||
}
|
||||
// Add the resources requested by the current pod being scheduled.
|
||||
// This also helps differentiate between differently sized, but empty, nodes.
|
||||
for _, container := range pod.Spec.Containers {
|
||||
@@ -93,7 +86,7 @@ func LeastRequestedPriority(pod *api.Pod, nodeNameToInfo map[string]*schedulerca
|
||||
|
||||
list := schedulerapi.HostPriorityList{}
|
||||
for _, node := range nodes.Items {
|
||||
list = append(list, calculateResourceOccupancy(pod, node, nodeNameToInfo[node.Name].Pods()))
|
||||
list = append(list, calculateResourceOccupancy(pod, node, nodeNameToInfo[node.Name]))
|
||||
}
|
||||
return list, nil
|
||||
}
|
||||
@@ -227,22 +220,15 @@ func BalancedResourceAllocation(pod *api.Pod, nodeNameToInfo map[string]*schedul
|
||||
|
||||
list := schedulerapi.HostPriorityList{}
|
||||
for _, node := range nodes.Items {
|
||||
list = append(list, calculateBalancedResourceAllocation(pod, node, nodeNameToInfo[node.Name].Pods()))
|
||||
list = append(list, calculateBalancedResourceAllocation(pod, node, nodeNameToInfo[node.Name]))
|
||||
}
|
||||
return list, nil
|
||||
}
|
||||
|
||||
func calculateBalancedResourceAllocation(pod *api.Pod, node api.Node, pods []*api.Pod) schedulerapi.HostPriority {
|
||||
totalMilliCPU := int64(0)
|
||||
totalMemory := int64(0)
|
||||
func calculateBalancedResourceAllocation(pod *api.Pod, node api.Node, nodeInfo *schedulercache.NodeInfo) schedulerapi.HostPriority {
|
||||
totalMilliCPU := nodeInfo.NonZeroRequest().MilliCPU
|
||||
totalMemory := nodeInfo.NonZeroRequest().Memory
|
||||
score := int(0)
|
||||
for _, existingPod := range pods {
|
||||
for _, container := range existingPod.Spec.Containers {
|
||||
cpu, memory := priorityutil.GetNonzeroRequests(&container.Resources.Requests)
|
||||
totalMilliCPU += cpu
|
||||
totalMemory += memory
|
||||
}
|
||||
}
|
||||
// Add the resources requested by the current pod being scheduled.
|
||||
// This also helps differentiate between differently sized, but empty, nodes.
|
||||
for _, container := range pod.Spec.Containers {
|
||||
|
||||
@@ -140,7 +140,6 @@ func TestZeroRequest(t *testing.T) {
|
||||
list, err := scheduler.PrioritizeNodes(
|
||||
test.pod,
|
||||
nodeNameToInfo,
|
||||
algorithm.FakePodLister(test.pods),
|
||||
// This should match the configuration in defaultPriorities() in
|
||||
// plugin/pkg/scheduler/algorithmprovider/defaults/defaults.go if you want
|
||||
// to test what's actually in production.
|
||||
|
||||
Reference in New Issue
Block a user