Unify resources

This commit is contained in:
Wojciech Tyczynski 2016-07-12 16:33:30 +02:00
parent 89be039352
commit 58c201834c

View File

@ -28,18 +28,13 @@ import (
"k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache" "k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache"
) )
type resources struct { func getNonZeroRequests(pod *api.Pod) *schedulercache.Resource {
millicpu int64 result := &schedulercache.Resource{}
memory int64
}
func getNonZeroRequests(pod *api.Pod) *resources {
result := &resources{}
for i := range pod.Spec.Containers { for i := range pod.Spec.Containers {
container := &pod.Spec.Containers[i] container := &pod.Spec.Containers[i]
cpu, memory := priorityutil.GetNonzeroRequests(&container.Resources.Requests) cpu, memory := priorityutil.GetNonzeroRequests(&container.Resources.Requests)
result.millicpu += cpu result.MilliCPU += cpu
result.memory += memory result.Memory += memory
} }
return result return result
} }
@ -61,16 +56,16 @@ func calculateScore(requested int64, capacity int64, node string) int64 {
// Calculate the resource occupancy on a node. 'node' has information about the resources on the node. // Calculate the resource occupancy on a node. 'node' has information about the resources on the node.
// 'pods' is a list of pods currently scheduled on the node. // 'pods' is a list of pods currently scheduled on the node.
// TODO: Use Node() from nodeInfo instead of passing it. // TODO: Use Node() from nodeInfo instead of passing it.
func calculateResourceOccupancy(pod *api.Pod, podRequests *resources, node *api.Node, nodeInfo *schedulercache.NodeInfo) schedulerapi.HostPriority { func calculateResourceOccupancy(pod *api.Pod, podRequests *schedulercache.Resource, node *api.Node, nodeInfo *schedulercache.NodeInfo) schedulerapi.HostPriority {
capacityMilliCPU := node.Status.Allocatable.Cpu().MilliValue() capacityMilliCPU := node.Status.Allocatable.Cpu().MilliValue()
capacityMemory := node.Status.Allocatable.Memory().Value() capacityMemory := node.Status.Allocatable.Memory().Value()
totalResources := *podRequests totalResources := *podRequests
totalResources.millicpu += nodeInfo.NonZeroRequest().MilliCPU totalResources.MilliCPU += nodeInfo.NonZeroRequest().MilliCPU
totalResources.memory += nodeInfo.NonZeroRequest().Memory totalResources.Memory += nodeInfo.NonZeroRequest().Memory
cpuScore := calculateScore(totalResources.millicpu, capacityMilliCPU, node.Name) cpuScore := calculateScore(totalResources.MilliCPU, capacityMilliCPU, node.Name)
memoryScore := calculateScore(totalResources.memory, capacityMemory, node.Name) memoryScore := calculateScore(totalResources.Memory, capacityMemory, node.Name)
if glog.V(10) { if glog.V(10) {
// We explicitly don't do glog.V(10).Infof() to avoid computing all the parameters if this is // We explicitly don't do glog.V(10).Infof() to avoid computing all the parameters if this is
// not logged. There is visible performance gain from it. // not logged. There is visible performance gain from it.
@ -78,7 +73,7 @@ func calculateResourceOccupancy(pod *api.Pod, podRequests *resources, node *api.
"%v -> %v: Least Requested Priority, capacity %d millicores %d memory bytes, total request %d millicores %d memory bytes, score %d CPU %d memory", "%v -> %v: Least Requested Priority, capacity %d millicores %d memory bytes, total request %d millicores %d memory bytes, score %d CPU %d memory",
pod.Name, node.Name, pod.Name, node.Name,
capacityMilliCPU, capacityMemory, capacityMilliCPU, capacityMemory,
totalResources.millicpu, totalResources.memory, totalResources.MilliCPU, totalResources.Memory,
cpuScore, memoryScore, cpuScore, memoryScore,
) )
} }
@ -243,16 +238,16 @@ func BalancedResourceAllocation(pod *api.Pod, nodeNameToInfo map[string]*schedul
} }
// TODO: Use Node() from nodeInfo instead of passing it. // TODO: Use Node() from nodeInfo instead of passing it.
func calculateBalancedResourceAllocation(pod *api.Pod, podRequests *resources, node *api.Node, nodeInfo *schedulercache.NodeInfo) schedulerapi.HostPriority { func calculateBalancedResourceAllocation(pod *api.Pod, podRequests *schedulercache.Resource, node *api.Node, nodeInfo *schedulercache.NodeInfo) schedulerapi.HostPriority {
capacityMilliCPU := node.Status.Allocatable.Cpu().MilliValue() capacityMilliCPU := node.Status.Allocatable.Cpu().MilliValue()
capacityMemory := node.Status.Allocatable.Memory().Value() capacityMemory := node.Status.Allocatable.Memory().Value()
totalResources := *podRequests totalResources := *podRequests
totalResources.millicpu += nodeInfo.NonZeroRequest().MilliCPU totalResources.MilliCPU += nodeInfo.NonZeroRequest().MilliCPU
totalResources.memory += nodeInfo.NonZeroRequest().Memory totalResources.Memory += nodeInfo.NonZeroRequest().Memory
cpuFraction := fractionOfCapacity(totalResources.millicpu, capacityMilliCPU) cpuFraction := fractionOfCapacity(totalResources.MilliCPU, capacityMilliCPU)
memoryFraction := fractionOfCapacity(totalResources.memory, capacityMemory) memoryFraction := fractionOfCapacity(totalResources.Memory, capacityMemory)
score := int(0) score := int(0)
if cpuFraction >= 1 || memoryFraction >= 1 { if cpuFraction >= 1 || memoryFraction >= 1 {
// if requested >= capacity, the corresponding host should never be preferrred. // if requested >= capacity, the corresponding host should never be preferrred.
@ -272,7 +267,7 @@ func calculateBalancedResourceAllocation(pod *api.Pod, podRequests *resources, n
"%v -> %v: Balanced Resource Allocation, capacity %d millicores %d memory bytes, total request %d millicores %d memory bytes, score %d", "%v -> %v: Balanced Resource Allocation, capacity %d millicores %d memory bytes, total request %d millicores %d memory bytes, score %d",
pod.Name, node.Name, pod.Name, node.Name,
capacityMilliCPU, capacityMemory, capacityMilliCPU, capacityMemory,
totalResources.millicpu, totalResources.memory, totalResources.MilliCPU, totalResources.Memory,
score, score,
) )
} }