Cache Allocatable Resources

This commit is contained in:
Wojciech Tyczynski
2016-07-12 16:30:26 +02:00
parent 58c201834c
commit c929d95884
5 changed files with 82 additions and 44 deletions

View File

@@ -57,22 +57,20 @@ func calculateScore(requested int64, capacity int64, node string) int64 {
// 'pods' is a list of pods currently scheduled on the node.
// TODO: Use Node() from nodeInfo instead of passing it.
func calculateResourceOccupancy(pod *api.Pod, podRequests *schedulercache.Resource, node *api.Node, nodeInfo *schedulercache.NodeInfo) schedulerapi.HostPriority {
capacityMilliCPU := node.Status.Allocatable.Cpu().MilliValue()
capacityMemory := node.Status.Allocatable.Memory().Value()
allocatableResources := nodeInfo.AllocatableResource()
totalResources := *podRequests
totalResources.MilliCPU += nodeInfo.NonZeroRequest().MilliCPU
totalResources.Memory += nodeInfo.NonZeroRequest().Memory
cpuScore := calculateScore(totalResources.MilliCPU, capacityMilliCPU, node.Name)
memoryScore := calculateScore(totalResources.Memory, capacityMemory, node.Name)
cpuScore := calculateScore(totalResources.MilliCPU, allocatableResources.MilliCPU, node.Name)
memoryScore := calculateScore(totalResources.Memory, allocatableResources.Memory, node.Name)
if glog.V(10) {
// We explicitly don't do glog.V(10).Infof() to avoid computing all the parameters if this is
// not logged. There is visible performance gain from it.
glog.V(10).Infof(
"%v -> %v: Least Requested Priority, capacity %d millicores %d memory bytes, total request %d millicores %d memory bytes, score %d CPU %d memory",
pod.Name, node.Name,
capacityMilliCPU, capacityMemory,
allocatableResources.MilliCPU, allocatableResources.Memory,
totalResources.MilliCPU, totalResources.Memory,
cpuScore, memoryScore,
)
@@ -239,15 +237,13 @@ func BalancedResourceAllocation(pod *api.Pod, nodeNameToInfo map[string]*schedul
// TODO: Use Node() from nodeInfo instead of passing it.
func calculateBalancedResourceAllocation(pod *api.Pod, podRequests *schedulercache.Resource, node *api.Node, nodeInfo *schedulercache.NodeInfo) schedulerapi.HostPriority {
capacityMilliCPU := node.Status.Allocatable.Cpu().MilliValue()
capacityMemory := node.Status.Allocatable.Memory().Value()
allocatableResources := nodeInfo.AllocatableResource()
totalResources := *podRequests
totalResources.MilliCPU += nodeInfo.NonZeroRequest().MilliCPU
totalResources.Memory += nodeInfo.NonZeroRequest().Memory
cpuFraction := fractionOfCapacity(totalResources.MilliCPU, capacityMilliCPU)
memoryFraction := fractionOfCapacity(totalResources.Memory, capacityMemory)
cpuFraction := fractionOfCapacity(totalResources.MilliCPU, allocatableResources.MilliCPU)
memoryFraction := fractionOfCapacity(totalResources.Memory, allocatableResources.Memory)
score := int(0)
if cpuFraction >= 1 || memoryFraction >= 1 {
// if requested >= capacity, the corresponding host should never be preferrred.
@@ -266,7 +262,7 @@ func calculateBalancedResourceAllocation(pod *api.Pod, podRequests *schedulercac
glog.V(10).Infof(
"%v -> %v: Balanced Resource Allocation, capacity %d millicores %d memory bytes, total request %d millicores %d memory bytes, score %d",
pod.Name, node.Name,
capacityMilliCPU, capacityMemory,
allocatableResources.MilliCPU, allocatableResources.Memory,
totalResources.MilliCPU, totalResources.Memory,
score,
)

View File

@@ -138,6 +138,12 @@ func TestZeroRequest(t *testing.T) {
const expectedPriority int = 25
for _, test := range tests {
nodeNameToInfo := schedulercache.CreateNodeNameToInfoMap(test.pods)
for _, node := range test.nodes {
if _, ok := nodeNameToInfo[node.Name]; !ok {
nodeNameToInfo[node.Name] = schedulercache.NewNodeInfo()
}
nodeNameToInfo[node.Name].SetNode(node)
}
list, err := scheduler.PrioritizeNodes(
test.pod,
nodeNameToInfo,
@@ -389,6 +395,12 @@ func TestLeastRequested(t *testing.T) {
for _, test := range tests {
nodeNameToInfo := schedulercache.CreateNodeNameToInfoMap(test.pods)
for _, node := range test.nodes {
if _, ok := nodeNameToInfo[node.Name]; !ok {
nodeNameToInfo[node.Name] = schedulercache.NewNodeInfo()
}
nodeNameToInfo[node.Name].SetNode(node)
}
list, err := LeastRequestedPriority(test.pod, nodeNameToInfo, algorithm.FakeNodeLister(test.nodes))
if err != nil {
t.Errorf("unexpected error: %v", err)
@@ -722,6 +734,12 @@ func TestBalancedResourceAllocation(t *testing.T) {
for _, test := range tests {
nodeNameToInfo := schedulercache.CreateNodeNameToInfoMap(test.pods)
for _, node := range test.nodes {
if _, ok := nodeNameToInfo[node.Name]; !ok {
nodeNameToInfo[node.Name] = schedulercache.NewNodeInfo()
}
nodeNameToInfo[node.Name].SetNode(node)
}
list, err := BalancedResourceAllocation(test.pod, nodeNameToInfo, algorithm.FakeNodeLister(test.nodes))
if err != nil {
t.Errorf("unexpected error: %v", err)