mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-08-02 16:29:21 +00:00
Use Allocatable to replace Capacity
Use allocate instead in priorities
This commit is contained in:
parent
ad827c6b62
commit
e64fe82245
@ -266,9 +266,9 @@ func getResourceRequest(pod *api.Pod) resourceRequest {
|
|||||||
return result
|
return result
|
||||||
}
|
}
|
||||||
|
|
||||||
func CheckPodsExceedingFreeResources(pods []*api.Pod, capacity api.ResourceList) (fitting []*api.Pod, notFittingCPU, notFittingMemory []*api.Pod) {
|
func CheckPodsExceedingFreeResources(pods []*api.Pod, allocatable api.ResourceList) (fitting []*api.Pod, notFittingCPU, notFittingMemory []*api.Pod) {
|
||||||
totalMilliCPU := capacity.Cpu().MilliValue()
|
totalMilliCPU := allocatable.Cpu().MilliValue()
|
||||||
totalMemory := capacity.Memory().Value()
|
totalMemory := allocatable.Memory().Value()
|
||||||
milliCPURequested := int64(0)
|
milliCPURequested := int64(0)
|
||||||
memoryRequested := int64(0)
|
memoryRequested := int64(0)
|
||||||
for _, pod := range pods {
|
for _, pod := range pods {
|
||||||
@ -304,8 +304,10 @@ func (r *ResourceFit) PodFitsResources(pod *api.Pod, existingPods []*api.Pod, no
|
|||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
|
|
||||||
if int64(len(existingPods))+1 > info.Status.Capacity.Pods().Value() {
|
allocatable := info.Status.Allocatable
|
||||||
glog.V(10).Infof("Cannot schedule Pod %+v, because Node %+v is full, running %v out of %v Pods.", podName(pod), node, len(existingPods), info.Status.Capacity.Pods().Value())
|
|
||||||
|
if int64(len(existingPods))+1 > allocatable.Pods().Value() {
|
||||||
|
glog.V(10).Infof("Cannot schedule Pod %+v, because Node %+v is full, running %v out of %v Pods.", podName(pod), node, len(existingPods), allocatable.Pods().Value())
|
||||||
return false, ErrExceededMaxPodNumber
|
return false, ErrExceededMaxPodNumber
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -315,7 +317,7 @@ func (r *ResourceFit) PodFitsResources(pod *api.Pod, existingPods []*api.Pod, no
|
|||||||
}
|
}
|
||||||
|
|
||||||
pods := append(existingPods, pod)
|
pods := append(existingPods, pod)
|
||||||
_, exceedingCPU, exceedingMemory := CheckPodsExceedingFreeResources(pods, info.Status.Capacity)
|
_, exceedingCPU, exceedingMemory := CheckPodsExceedingFreeResources(pods, allocatable)
|
||||||
if len(exceedingCPU) > 0 {
|
if len(exceedingCPU) > 0 {
|
||||||
glog.V(10).Infof("Cannot schedule Pod %+v, because Node %v does not have sufficient CPU", podName(pod), node)
|
glog.V(10).Infof("Cannot schedule Pod %+v, because Node %v does not have sufficient CPU", podName(pod), node)
|
||||||
return false, ErrInsufficientFreeCPU
|
return false, ErrInsufficientFreeCPU
|
||||||
@ -324,7 +326,7 @@ func (r *ResourceFit) PodFitsResources(pod *api.Pod, existingPods []*api.Pod, no
|
|||||||
glog.V(10).Infof("Cannot schedule Pod %+v, because Node %v does not have sufficient Memory", podName(pod), node)
|
glog.V(10).Infof("Cannot schedule Pod %+v, because Node %v does not have sufficient Memory", podName(pod), node)
|
||||||
return false, ErrInsufficientFreeMemory
|
return false, ErrInsufficientFreeMemory
|
||||||
}
|
}
|
||||||
glog.V(10).Infof("Schedule Pod %+v on Node %+v is allowed, Node is running only %v out of %v Pods.", podName(pod), node, len(pods)-1, info.Status.Capacity.Pods().Value())
|
glog.V(10).Infof("Schedule Pod %+v on Node %+v is allowed, Node is running only %v out of %v Pods.", podName(pod), node, len(pods)-1, allocatable.Pods().Value())
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -54,6 +54,14 @@ func makeResources(milliCPU int64, memory int64, pods int64) api.NodeResources {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func makeAllocatableResources(milliCPU int64, memory int64, pods int64) api.ResourceList {
|
||||||
|
return api.ResourceList{
|
||||||
|
api.ResourceCPU: *resource.NewMilliQuantity(milliCPU, resource.DecimalSI),
|
||||||
|
api.ResourceMemory: *resource.NewQuantity(memory, resource.BinarySI),
|
||||||
|
api.ResourcePods: *resource.NewQuantity(pods, resource.DecimalSI),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func newResourcePod(usage ...resourceRequest) *api.Pod {
|
func newResourcePod(usage ...resourceRequest) *api.Pod {
|
||||||
containers := []api.Container{}
|
containers := []api.Container{}
|
||||||
for _, req := range usage {
|
for _, req := range usage {
|
||||||
@ -130,7 +138,7 @@ func TestPodFitsResources(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, test := range enoughPodsTests {
|
for _, test := range enoughPodsTests {
|
||||||
node := api.Node{Status: api.NodeStatus{Capacity: makeResources(10, 20, 32).Capacity}}
|
node := api.Node{Status: api.NodeStatus{Capacity: makeResources(10, 20, 32).Capacity, Allocatable: makeAllocatableResources(10, 20, 32)}}
|
||||||
|
|
||||||
fit := ResourceFit{FakeNodeInfo(node)}
|
fit := ResourceFit{FakeNodeInfo(node)}
|
||||||
fits, err := fit.PodFitsResources(test.pod, test.existingPods, "machine")
|
fits, err := fit.PodFitsResources(test.pod, test.existingPods, "machine")
|
||||||
@ -178,7 +186,7 @@ func TestPodFitsResources(t *testing.T) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
for _, test := range notEnoughPodsTests {
|
for _, test := range notEnoughPodsTests {
|
||||||
node := api.Node{Status: api.NodeStatus{Capacity: makeResources(10, 20, 1).Capacity}}
|
node := api.Node{Status: api.NodeStatus{Capacity: api.ResourceList{}, Allocatable: makeAllocatableResources(10, 20, 1)}}
|
||||||
|
|
||||||
fit := ResourceFit{FakeNodeInfo(node)}
|
fit := ResourceFit{FakeNodeInfo(node)}
|
||||||
fits, err := fit.PodFitsResources(test.pod, test.existingPods, "machine")
|
fits, err := fit.PodFitsResources(test.pod, test.existingPods, "machine")
|
||||||
|
@ -76,8 +76,8 @@ func getNonzeroRequests(requests *api.ResourceList) (int64, int64) {
|
|||||||
func calculateResourceOccupancy(pod *api.Pod, node api.Node, pods []*api.Pod) schedulerapi.HostPriority {
|
func calculateResourceOccupancy(pod *api.Pod, node api.Node, pods []*api.Pod) schedulerapi.HostPriority {
|
||||||
totalMilliCPU := int64(0)
|
totalMilliCPU := int64(0)
|
||||||
totalMemory := int64(0)
|
totalMemory := int64(0)
|
||||||
capacityMilliCPU := node.Status.Capacity.Cpu().MilliValue()
|
capacityMilliCPU := node.Status.Allocatable.Cpu().MilliValue()
|
||||||
capacityMemory := node.Status.Capacity.Memory().Value()
|
capacityMemory := node.Status.Allocatable.Memory().Value()
|
||||||
|
|
||||||
for _, existingPod := range pods {
|
for _, existingPod := range pods {
|
||||||
for _, container := range existingPod.Spec.Containers {
|
for _, container := range existingPod.Spec.Containers {
|
||||||
@ -208,8 +208,8 @@ func calculateBalancedResourceAllocation(pod *api.Pod, node api.Node, pods []*ap
|
|||||||
totalMemory += memory
|
totalMemory += memory
|
||||||
}
|
}
|
||||||
|
|
||||||
capacityMilliCPU := node.Status.Capacity.Cpu().MilliValue()
|
capacityMilliCPU := node.Status.Allocatable.Cpu().MilliValue()
|
||||||
capacityMemory := node.Status.Capacity.Memory().Value()
|
capacityMemory := node.Status.Allocatable.Memory().Value()
|
||||||
|
|
||||||
cpuFraction := fractionOfCapacity(totalMilliCPU, capacityMilliCPU)
|
cpuFraction := fractionOfCapacity(totalMilliCPU, capacityMilliCPU)
|
||||||
memoryFraction := fractionOfCapacity(totalMemory, capacityMemory)
|
memoryFraction := fractionOfCapacity(totalMemory, capacityMemory)
|
||||||
|
@ -38,6 +38,10 @@ func makeNode(node string, milliCPU, memory int64) api.Node {
|
|||||||
"cpu": *resource.NewMilliQuantity(milliCPU, resource.DecimalSI),
|
"cpu": *resource.NewMilliQuantity(milliCPU, resource.DecimalSI),
|
||||||
"memory": *resource.NewQuantity(memory, resource.BinarySI),
|
"memory": *resource.NewQuantity(memory, resource.BinarySI),
|
||||||
},
|
},
|
||||||
|
Allocatable: api.ResourceList{
|
||||||
|
"cpu": *resource.NewMilliQuantity(milliCPU, resource.DecimalSI),
|
||||||
|
"memory": *resource.NewQuantity(memory, resource.BinarySI),
|
||||||
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user