mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-09-15 22:20:51 +00:00
Adding ResourceRequirementSpec to v1beta1, v1beta2, and v1beta3 APIs. The old resource
quantities 'CPU' and 'Memory' will be preserved until support for v1beta1 and v1beta2 APIs are dropped. Improved resource validation in the process.
This commit is contained in:
@@ -95,8 +95,9 @@ type resourceRequest struct {
|
||||
func getResourceRequest(pod *api.Pod) resourceRequest {
|
||||
result := resourceRequest{}
|
||||
for ix := range pod.Spec.Containers {
|
||||
result.memory += pod.Spec.Containers[ix].Memory.Value()
|
||||
result.milliCPU += pod.Spec.Containers[ix].CPU.MilliValue()
|
||||
limits := pod.Spec.Containers[ix].Resources.Limits
|
||||
result.memory += limits.Memory().Value()
|
||||
result.milliCPU += limits.Cpu().MilliValue()
|
||||
}
|
||||
return result
|
||||
}
|
||||
@@ -120,8 +121,8 @@ func (r *ResourceFit) PodFitsResources(pod api.Pod, existingPods []api.Pod, node
|
||||
memoryRequested += existingRequest.memory
|
||||
}
|
||||
|
||||
totalMilliCPU := info.Spec.Capacity.Get(api.ResourceCPU).MilliValue()
|
||||
totalMemory := info.Spec.Capacity.Get(api.ResourceMemory).Value()
|
||||
totalMilliCPU := info.Spec.Capacity.Cpu().MilliValue()
|
||||
totalMemory := info.Spec.Capacity.Memory().Value()
|
||||
|
||||
fitsCPU := totalMilliCPU == 0 || (totalMilliCPU-milliCPURequested) >= podRequest.milliCPU
|
||||
fitsMemory := totalMemory == 0 || (totalMemory-memoryRequested) >= podRequest.memory
|
||||
|
@@ -46,8 +46,8 @@ func (nodes FakeNodeListInfo) GetNodeInfo(nodeName string) (*api.Node, error) {
|
||||
func makeResources(milliCPU int64, memory int64) api.NodeResources {
|
||||
return api.NodeResources{
|
||||
Capacity: api.ResourceList{
|
||||
api.ResourceCPU: *resource.NewMilliQuantity(milliCPU, resource.DecimalSI),
|
||||
api.ResourceMemory: *resource.NewQuantity(memory, resource.BinarySI),
|
||||
"cpu": *resource.NewMilliQuantity(milliCPU, resource.DecimalSI),
|
||||
"memory": *resource.NewQuantity(memory, resource.BinarySI),
|
||||
},
|
||||
}
|
||||
}
|
||||
@@ -56,8 +56,12 @@ func newResourcePod(usage ...resourceRequest) api.Pod {
|
||||
containers := []api.Container{}
|
||||
for _, req := range usage {
|
||||
containers = append(containers, api.Container{
|
||||
Memory: *resource.NewQuantity(req.memory, resource.BinarySI),
|
||||
CPU: *resource.NewMilliQuantity(req.milliCPU, resource.DecimalSI),
|
||||
Resources: api.ResourceRequirementSpec{
|
||||
Limits: api.ResourceList{
|
||||
"cpu": *resource.NewMilliQuantity(req.milliCPU, resource.DecimalSI),
|
||||
"memory": *resource.NewQuantity(req.memory, resource.BinarySI),
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
return api.Pod{
|
||||
|
@@ -42,19 +42,19 @@ func calculateOccupancy(pod api.Pod, node api.Node, pods []api.Pod) HostPriority
|
||||
totalMemory := int64(0)
|
||||
for _, existingPod := range pods {
|
||||
for _, container := range existingPod.Spec.Containers {
|
||||
totalMilliCPU += container.CPU.MilliValue()
|
||||
totalMemory += container.Memory.Value()
|
||||
totalMilliCPU += container.Resources.Limits.Cpu().MilliValue()
|
||||
totalMemory += container.Resources.Limits.Memory().Value()
|
||||
}
|
||||
}
|
||||
// Add the resources requested by the current pod being scheduled.
|
||||
// This also helps differentiate between differently sized, but empty, minions.
|
||||
for _, container := range pod.Spec.Containers {
|
||||
totalMilliCPU += container.CPU.MilliValue()
|
||||
totalMemory += container.Memory.Value()
|
||||
totalMilliCPU += container.Resources.Limits.Cpu().MilliValue()
|
||||
totalMemory += container.Resources.Limits.Memory().Value()
|
||||
}
|
||||
|
||||
capacityMilliCPU := node.Spec.Capacity.Get(api.ResourceCPU).MilliValue()
|
||||
capacityMemory := node.Spec.Capacity.Get(api.ResourceMemory).Value()
|
||||
capacityMilliCPU := node.Spec.Capacity.Cpu().MilliValue()
|
||||
capacityMemory := node.Spec.Capacity.Memory().Value()
|
||||
|
||||
cpuScore := calculateScore(totalMilliCPU, capacityMilliCPU, node.Name)
|
||||
memoryScore := calculateScore(totalMemory, capacityMemory, node.Name)
|
||||
|
@@ -30,8 +30,8 @@ func makeMinion(node string, milliCPU, memory int64) api.Node {
|
||||
ObjectMeta: api.ObjectMeta{Name: node},
|
||||
Spec: api.NodeSpec{
|
||||
Capacity: api.ResourceList{
|
||||
api.ResourceCPU: *resource.NewMilliQuantity(milliCPU, resource.DecimalSI),
|
||||
api.ResourceMemory: *resource.NewQuantity(memory, resource.BinarySI),
|
||||
"cpu": *resource.NewMilliQuantity(milliCPU, resource.DecimalSI),
|
||||
"memory": *resource.NewQuantity(memory, resource.BinarySI),
|
||||
},
|
||||
},
|
||||
}
|
||||
@@ -57,14 +57,40 @@ func TestLeastRequested(t *testing.T) {
|
||||
}
|
||||
cpuOnly := api.PodSpec{
|
||||
Containers: []api.Container{
|
||||
{CPU: resource.MustParse("1000m")},
|
||||
{CPU: resource.MustParse("2000m")},
|
||||
{
|
||||
Resources: api.ResourceRequirementSpec{
|
||||
Limits: api.ResourceList{
|
||||
"cpu": resource.MustParse("1000m"),
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Resources: api.ResourceRequirementSpec{
|
||||
Limits: api.ResourceList{
|
||||
"cpu": resource.MustParse("2000m"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
cpuAndMemory := api.PodSpec{
|
||||
Containers: []api.Container{
|
||||
{CPU: resource.MustParse("1000m"), Memory: resource.MustParse("2000")},
|
||||
{CPU: resource.MustParse("2000m"), Memory: resource.MustParse("3000")},
|
||||
{
|
||||
Resources: api.ResourceRequirementSpec{
|
||||
Limits: api.ResourceList{
|
||||
"cpu": resource.MustParse("1000m"),
|
||||
"memory": resource.MustParse("2000"),
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Resources: api.ResourceRequirementSpec{
|
||||
Limits: api.ResourceList{
|
||||
"cpu": resource.MustParse("2000m"),
|
||||
"memory": resource.MustParse("3000"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
tests := []struct {
|
||||
|
Reference in New Issue
Block a user