mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-09-13 13:14:05 +00:00
Add resource fit predicates.
This commit is contained in:
@@ -19,8 +19,62 @@ package scheduler
|
||||
import (
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/labels"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/resources"
|
||||
"github.com/golang/glog"
|
||||
)
|
||||
|
||||
type NodeInfo interface {
|
||||
GetNodeInfo(nodeName string) (api.Minion, error)
|
||||
}
|
||||
|
||||
type ResourceFit struct {
|
||||
info NodeInfo
|
||||
}
|
||||
|
||||
type resourceRequest struct {
|
||||
milliCPU int
|
||||
memory int
|
||||
}
|
||||
|
||||
func getResourceRequest(pod *api.Pod) resourceRequest {
|
||||
result := resourceRequest{}
|
||||
for ix := range pod.DesiredState.Manifest.Containers {
|
||||
result.memory += pod.DesiredState.Manifest.Containers[ix].Memory
|
||||
result.milliCPU += pod.DesiredState.Manifest.Containers[ix].CPU
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// PodFitsResources calculates fit based on requested, rather than used resources
|
||||
func (r *ResourceFit) PodFitsResources(pod api.Pod, existingPods []api.Pod, node string) (bool, error) {
|
||||
podRequest := getResourceRequest(&pod)
|
||||
if podRequest.milliCPU == 0 && podRequest.memory == 0 {
|
||||
// no resources requested always fits.
|
||||
return true, nil
|
||||
}
|
||||
info, err := r.info.GetNodeInfo(node)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
milliCPURequested := 0
|
||||
memoryRequested := 0
|
||||
for ix := range existingPods {
|
||||
existingRequest := getResourceRequest(&existingPods[ix])
|
||||
milliCPURequested += existingRequest.milliCPU
|
||||
memoryRequested += existingRequest.memory
|
||||
}
|
||||
|
||||
// TODO: convert to general purpose resource matching, when pods ask for resources
|
||||
totalMilliCPU := int(resources.GetFloatResource(info.NodeResources.Capacity, resources.CPU, 0) * 1000)
|
||||
totalMemory := resources.GetIntegerResource(info.NodeResources.Capacity, resources.Memory, 0)
|
||||
|
||||
fitsCPU := totalMilliCPU == 0 || (totalMilliCPU-milliCPURequested) >= podRequest.milliCPU
|
||||
fitsMemory := totalMemory == 0 || (totalMemory-memoryRequested) >= podRequest.memory
|
||||
glog.V(3).Infof("Calculated fit: cpu: %s, memory %s", fitsCPU, fitsMemory)
|
||||
|
||||
return fitsCPU && fitsMemory, nil
|
||||
}
|
||||
|
||||
func PodFitsPorts(pod api.Pod, existingPods []api.Pod, node string) (bool, error) {
|
||||
for _, scheduledPod := range existingPods {
|
||||
for _, container := range pod.DesiredState.Manifest.Containers {
|
||||
|
@@ -20,8 +20,110 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/resources"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/util"
|
||||
)
|
||||
|
||||
type FakeNodeInfo api.Minion
|
||||
|
||||
func (n FakeNodeInfo) GetNodeInfo(nodeName string) (api.Minion, error) {
|
||||
return api.Minion(n), nil
|
||||
}
|
||||
|
||||
func makeResources(milliCPU int, memory int) api.NodeResources {
|
||||
return api.NodeResources{
|
||||
Capacity: api.ResourceList{
|
||||
resources.CPU: util.IntOrString{
|
||||
IntVal: milliCPU,
|
||||
Kind: util.IntstrInt,
|
||||
},
|
||||
resources.Memory: util.IntOrString{
|
||||
IntVal: memory,
|
||||
Kind: util.IntstrInt,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func newResourcePod(usage ...resourceRequest) api.Pod {
|
||||
containers := []api.Container{}
|
||||
for _, req := range usage {
|
||||
containers = append(containers, api.Container{
|
||||
Memory: req.memory,
|
||||
CPU: req.milliCPU,
|
||||
})
|
||||
}
|
||||
return api.Pod{
|
||||
DesiredState: api.PodState{
|
||||
Manifest: api.ContainerManifest{
|
||||
Containers: containers,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func TestPodFitsResources(t *testing.T) {
|
||||
tests := []struct {
|
||||
pod api.Pod
|
||||
existingPods []api.Pod
|
||||
fits bool
|
||||
test string
|
||||
}{
|
||||
{
|
||||
pod: api.Pod{},
|
||||
existingPods: []api.Pod{
|
||||
newResourcePod(resourceRequest{milliCPU: 10, memory: 20}),
|
||||
},
|
||||
fits: true,
|
||||
test: "no resources requested always fits",
|
||||
},
|
||||
{
|
||||
pod: newResourcePod(resourceRequest{milliCPU: 1, memory: 1}),
|
||||
existingPods: []api.Pod{
|
||||
newResourcePod(resourceRequest{milliCPU: 10, memory: 20}),
|
||||
},
|
||||
fits: false,
|
||||
test: "too many resources fails",
|
||||
},
|
||||
{
|
||||
pod: newResourcePod(resourceRequest{milliCPU: 1, memory: 1}),
|
||||
existingPods: []api.Pod{
|
||||
newResourcePod(resourceRequest{milliCPU: 5, memory: 5}),
|
||||
},
|
||||
fits: true,
|
||||
test: "both resources fit",
|
||||
},
|
||||
{
|
||||
pod: newResourcePod(resourceRequest{milliCPU: 1, memory: 2}),
|
||||
existingPods: []api.Pod{
|
||||
newResourcePod(resourceRequest{milliCPU: 5, memory: 19}),
|
||||
},
|
||||
fits: false,
|
||||
test: "one resources fits",
|
||||
},
|
||||
{
|
||||
pod: newResourcePod(resourceRequest{milliCPU: 5, memory: 1}),
|
||||
existingPods: []api.Pod{
|
||||
newResourcePod(resourceRequest{milliCPU: 5, memory: 19}),
|
||||
},
|
||||
fits: true,
|
||||
test: "equal edge case",
|
||||
},
|
||||
}
|
||||
for _, test := range tests {
|
||||
node := api.Minion{NodeResources: makeResources(10, 20)}
|
||||
|
||||
fit := ResourceFit{FakeNodeInfo(node)}
|
||||
fits, err := fit.PodFitsResources(test.pod, test.existingPods, "machine")
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
if fits != test.fits {
|
||||
t.Errorf("%s: expected: %v got %v", test.test, test.fits, fits)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestPodFitsPorts(t *testing.T) {
|
||||
tests := []struct {
|
||||
pod api.Pod
|
||||
|
Reference in New Issue
Block a user