mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-09-13 13:14:05 +00:00
Put quantity into packages
kubelet, GCE, validation, client
This commit is contained in:
@@ -22,7 +22,6 @@ import (
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/client"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/labels"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/resources"
|
||||
"github.com/golang/glog"
|
||||
)
|
||||
|
||||
@@ -89,15 +88,15 @@ type ResourceFit struct {
|
||||
}
|
||||
|
||||
type resourceRequest struct {
|
||||
milliCPU int
|
||||
memory int
|
||||
milliCPU int64
|
||||
memory int64
|
||||
}
|
||||
|
||||
func getResourceRequest(pod *api.Pod) resourceRequest {
|
||||
result := resourceRequest{}
|
||||
for ix := range pod.Spec.Containers {
|
||||
result.memory += pod.Spec.Containers[ix].Memory
|
||||
result.milliCPU += pod.Spec.Containers[ix].CPU
|
||||
result.memory += pod.Spec.Containers[ix].Memory.Value()
|
||||
result.milliCPU += pod.Spec.Containers[ix].CPU.MilliValue()
|
||||
}
|
||||
return result
|
||||
}
|
||||
@@ -113,17 +112,16 @@ func (r *ResourceFit) PodFitsResources(pod api.Pod, existingPods []api.Pod, node
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
milliCPURequested := 0
|
||||
memoryRequested := 0
|
||||
milliCPURequested := int64(0)
|
||||
memoryRequested := int64(0)
|
||||
for ix := range existingPods {
|
||||
existingRequest := getResourceRequest(&existingPods[ix])
|
||||
milliCPURequested += existingRequest.milliCPU
|
||||
memoryRequested += existingRequest.memory
|
||||
}
|
||||
|
||||
// TODO: convert to general purpose resource matching, when pods ask for resources
|
||||
totalMilliCPU := int(resources.GetFloatResource(info.Spec.Capacity, resources.CPU, 0) * 1000)
|
||||
totalMemory := resources.GetIntegerResource(info.Spec.Capacity, resources.Memory, 0)
|
||||
totalMilliCPU := info.Spec.Capacity.Get(api.ResourceCPU).MilliValue()
|
||||
totalMemory := info.Spec.Capacity.Get(api.ResourceMemory).Value()
|
||||
|
||||
fitsCPU := totalMilliCPU == 0 || (totalMilliCPU-milliCPURequested) >= podRequest.milliCPU
|
||||
fitsMemory := totalMemory == 0 || (totalMemory-memoryRequested) >= podRequest.memory
|
||||
|
@@ -21,8 +21,7 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/resources"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/util"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/api/resource"
|
||||
)
|
||||
|
||||
type FakeNodeInfo api.Node
|
||||
@@ -32,17 +31,11 @@ func (n FakeNodeInfo) GetNodeInfo(nodeName string) (*api.Node, error) {
|
||||
return &node, nil
|
||||
}
|
||||
|
||||
func makeResources(milliCPU int, memory int) api.NodeResources {
|
||||
func makeResources(milliCPU int64, memory int64) api.NodeResources {
|
||||
return api.NodeResources{
|
||||
Capacity: api.ResourceList{
|
||||
resources.CPU: util.IntOrString{
|
||||
IntVal: milliCPU,
|
||||
Kind: util.IntstrInt,
|
||||
},
|
||||
resources.Memory: util.IntOrString{
|
||||
IntVal: memory,
|
||||
Kind: util.IntstrInt,
|
||||
},
|
||||
api.ResourceCPU: *resource.NewMilliQuantity(milliCPU, resource.DecimalSI),
|
||||
api.ResourceMemory: *resource.NewQuantity(memory, resource.BinarySI),
|
||||
},
|
||||
}
|
||||
}
|
||||
@@ -51,8 +44,8 @@ func newResourcePod(usage ...resourceRequest) api.Pod {
|
||||
containers := []api.Container{}
|
||||
for _, req := range usage {
|
||||
containers = append(containers, api.Container{
|
||||
Memory: req.memory,
|
||||
CPU: req.milliCPU,
|
||||
Memory: *resource.NewQuantity(req.memory, resource.BinarySI),
|
||||
CPU: *resource.NewMilliQuantity(req.milliCPU, resource.DecimalSI),
|
||||
})
|
||||
}
|
||||
return api.Pod{
|
||||
|
@@ -18,13 +18,12 @@ package scheduler
|
||||
|
||||
import (
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/resources"
|
||||
"github.com/golang/glog"
|
||||
)
|
||||
|
||||
// the unused capacity is calculated on a scale of 0-10
|
||||
// 0 being the lowest priority and 10 being the highest
|
||||
func calculateScore(requested, capacity int, node string) int {
|
||||
func calculateScore(requested, capacity int64, node string) int {
|
||||
if capacity == 0 {
|
||||
return 0
|
||||
}
|
||||
@@ -32,30 +31,39 @@ func calculateScore(requested, capacity int, node string) int {
|
||||
glog.Errorf("Combined requested resources from existing pods exceeds capacity on minion: %s", node)
|
||||
return 0
|
||||
}
|
||||
return ((capacity - requested) * 10) / capacity
|
||||
return int(((capacity - requested) * 10) / capacity)
|
||||
}
|
||||
|
||||
// Calculate the occupancy on a node. 'node' has information about the resources on the node.
|
||||
// 'pods' is a list of pods currently scheduled on the node.
|
||||
func calculateOccupancy(pod api.Pod, node api.Node, pods []api.Pod) HostPriority {
|
||||
totalCPU := 0
|
||||
totalMemory := 0
|
||||
totalMilliCPU := int64(0)
|
||||
totalMemory := int64(0)
|
||||
for _, existingPod := range pods {
|
||||
for _, container := range existingPod.Spec.Containers {
|
||||
totalCPU += container.CPU
|
||||
totalMemory += container.Memory
|
||||
totalMilliCPU += container.CPU.MilliValue()
|
||||
totalMemory += container.Memory.Value()
|
||||
}
|
||||
}
|
||||
// Add the resources requested by the current pod being scheduled.
|
||||
// This also helps differentiate between differently sized, but empty, minions.
|
||||
for _, container := range pod.Spec.Containers {
|
||||
totalCPU += container.CPU
|
||||
totalMemory += container.Memory
|
||||
totalMilliCPU += container.CPU.MilliValue()
|
||||
totalMemory += container.Memory.Value()
|
||||
}
|
||||
|
||||
cpuScore := calculateScore(totalCPU, resources.GetIntegerResource(node.Spec.Capacity, resources.CPU, 0), node.Name)
|
||||
memoryScore := calculateScore(totalMemory, resources.GetIntegerResource(node.Spec.Capacity, resources.Memory, 0), node.Name)
|
||||
glog.V(4).Infof("Least Requested Priority, AbsoluteRequested: (%d, %d) Score:(%d, %d)", totalCPU, totalMemory, cpuScore, memoryScore)
|
||||
capacityMilliCPU := node.Spec.Capacity.Get(api.ResourceCPU).MilliValue()
|
||||
capacityMemory := node.Spec.Capacity.Get(api.ResourceMemory).Value()
|
||||
|
||||
cpuScore := calculateScore(totalMilliCPU, capacityMilliCPU, node.Name)
|
||||
memoryScore := calculateScore(totalMemory, capacityMemory, node.Name)
|
||||
glog.V(4).Infof(
|
||||
"%v -> %v: Least Requested Priority, AbsoluteRequested: (%d, %d) / (%d, %d) Score: (%d, %d)",
|
||||
pod.Name, node.Name,
|
||||
totalMilliCPU, totalMemory,
|
||||
capacityMilliCPU, capacityMemory,
|
||||
cpuScore, memoryScore,
|
||||
)
|
||||
|
||||
return HostPriority{
|
||||
host: node.Name,
|
||||
|
@@ -21,17 +21,16 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/resources"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/util"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/api/resource"
|
||||
)
|
||||
|
||||
func makeMinion(node string, cpu, memory int) api.Node {
|
||||
func makeMinion(node string, milliCPU, memory int64) api.Node {
|
||||
return api.Node{
|
||||
ObjectMeta: api.ObjectMeta{Name: node},
|
||||
Spec: api.NodeSpec{
|
||||
Capacity: api.ResourceList{
|
||||
resources.CPU: util.NewIntOrStringFromInt(cpu),
|
||||
resources.Memory: util.NewIntOrStringFromInt(memory),
|
||||
api.ResourceCPU: *resource.NewMilliQuantity(milliCPU, resource.DecimalSI),
|
||||
api.ResourceMemory: *resource.NewQuantity(memory, resource.BinarySI),
|
||||
},
|
||||
},
|
||||
}
|
||||
@@ -57,14 +56,14 @@ func TestLeastRequested(t *testing.T) {
|
||||
}
|
||||
cpuOnly := api.PodSpec{
|
||||
Containers: []api.Container{
|
||||
{CPU: 1000},
|
||||
{CPU: 2000},
|
||||
{CPU: *resource.Q("1000m")},
|
||||
{CPU: *resource.Q("2000m")},
|
||||
},
|
||||
}
|
||||
cpuAndMemory := api.PodSpec{
|
||||
Containers: []api.Container{
|
||||
{CPU: 1000, Memory: 2000},
|
||||
{CPU: 2000, Memory: 3000},
|
||||
{CPU: *resource.Q("1000m"), Memory: *resource.Q("2000")},
|
||||
{CPU: *resource.Q("2000m"), Memory: *resource.Q("3000")},
|
||||
},
|
||||
}
|
||||
tests := []struct {
|
||||
|
Reference in New Issue
Block a user