Merge pull request #19083 from resouer/allocatable

Use Allocatable to replace Capacity
This commit is contained in:
Alex Mohr
2016-01-21 16:05:05 -08:00
5 changed files with 142 additions and 13 deletions

View File

@@ -254,9 +254,9 @@ func getResourceRequest(pod *api.Pod) resourceRequest {
return result
}
func CheckPodsExceedingFreeResources(pods []*api.Pod, capacity api.ResourceList) (fitting []*api.Pod, notFittingCPU, notFittingMemory []*api.Pod) {
totalMilliCPU := capacity.Cpu().MilliValue()
totalMemory := capacity.Memory().Value()
func CheckPodsExceedingFreeResources(pods []*api.Pod, allocatable api.ResourceList) (fitting []*api.Pod, notFittingCPU, notFittingMemory []*api.Pod) {
totalMilliCPU := allocatable.Cpu().MilliValue()
totalMemory := allocatable.Memory().Value()
milliCPURequested := int64(0)
memoryRequested := int64(0)
for _, pod := range pods {
@@ -292,8 +292,9 @@ func (r *ResourceFit) PodFitsResources(pod *api.Pod, existingPods []*api.Pod, no
return false, err
}
if int64(len(existingPods))+1 > info.Status.Capacity.Pods().Value() {
glog.V(10).Infof("Cannot schedule Pod %+v, because Node %+v is full, running %v out of %v Pods.", podName(pod), node, len(existingPods), info.Status.Capacity.Pods().Value())
allocatable := info.Status.Allocatable
if int64(len(existingPods))+1 > allocatable.Pods().Value() {
glog.V(10).Infof("Cannot schedule Pod %+v, because Node %+v is full, running %v out of %v Pods.", podName(pod), node, len(existingPods), allocatable.Pods().Value())
return false, ErrExceededMaxPodNumber
}
@@ -303,7 +304,7 @@ func (r *ResourceFit) PodFitsResources(pod *api.Pod, existingPods []*api.Pod, no
}
pods := append(existingPods, pod)
_, exceedingCPU, exceedingMemory := CheckPodsExceedingFreeResources(pods, info.Status.Capacity)
_, exceedingCPU, exceedingMemory := CheckPodsExceedingFreeResources(pods, allocatable)
if len(exceedingCPU) > 0 {
glog.V(10).Infof("Cannot schedule Pod %+v, because Node %v does not have sufficient CPU", podName(pod), node)
return false, ErrInsufficientFreeCPU
@@ -312,7 +313,7 @@ func (r *ResourceFit) PodFitsResources(pod *api.Pod, existingPods []*api.Pod, no
glog.V(10).Infof("Cannot schedule Pod %+v, because Node %v does not have sufficient Memory", podName(pod), node)
return false, ErrInsufficientFreeMemory
}
glog.V(10).Infof("Schedule Pod %+v on Node %+v is allowed, Node is running only %v out of %v Pods.", podName(pod), node, len(pods)-1, info.Status.Capacity.Pods().Value())
glog.V(10).Infof("Schedule Pod %+v on Node %+v is allowed, Node is running only %v out of %v Pods.", podName(pod), node, len(pods)-1, allocatable.Pods().Value())
return true, nil
}

View File

@@ -54,6 +54,14 @@ func makeResources(milliCPU int64, memory int64, pods int64) api.NodeResources {
}
}
func makeAllocatableResources(milliCPU int64, memory int64, pods int64) api.ResourceList {
return api.ResourceList{
api.ResourceCPU: *resource.NewMilliQuantity(milliCPU, resource.DecimalSI),
api.ResourceMemory: *resource.NewQuantity(memory, resource.BinarySI),
api.ResourcePods: *resource.NewQuantity(pods, resource.DecimalSI),
}
}
func newResourcePod(usage ...resourceRequest) *api.Pod {
containers := []api.Container{}
for _, req := range usage {
@@ -130,7 +138,7 @@ func TestPodFitsResources(t *testing.T) {
}
for _, test := range enoughPodsTests {
node := api.Node{Status: api.NodeStatus{Capacity: makeResources(10, 20, 32).Capacity}}
node := api.Node{Status: api.NodeStatus{Capacity: makeResources(10, 20, 32).Capacity, Allocatable: makeAllocatableResources(10, 20, 32)}}
fit := ResourceFit{FakeNodeInfo(node)}
fits, err := fit.PodFitsResources(test.pod, test.existingPods, "machine")
@@ -178,7 +186,7 @@ func TestPodFitsResources(t *testing.T) {
},
}
for _, test := range notEnoughPodsTests {
node := api.Node{Status: api.NodeStatus{Capacity: makeResources(10, 20, 1).Capacity}}
node := api.Node{Status: api.NodeStatus{Capacity: api.ResourceList{}, Allocatable: makeAllocatableResources(10, 20, 1)}}
fit := ResourceFit{FakeNodeInfo(node)}
fits, err := fit.PodFitsResources(test.pod, test.existingPods, "machine")