mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-24 04:06:03 +00:00
Merge pull request #1604 from brendandburns/resource
Add a least-requested priority function
This commit is contained in:
commit
1a1b0699bc
@ -63,18 +63,18 @@ func (g *genericScheduler) selectHost(priorityList HostPriorityList) (string, er
|
||||
return hosts[ix], nil
|
||||
}
|
||||
|
||||
func findNodesThatFit(pod api.Pod, podLister PodLister, predicates []FitPredicate, nodes []string) ([]string, error) {
|
||||
filtered := []string{}
|
||||
func findNodesThatFit(pod api.Pod, podLister PodLister, predicates []FitPredicate, nodes api.MinionList) (api.MinionList, error) {
|
||||
filtered := []api.Minion{}
|
||||
machineToPods, err := MapPodsToMachines(podLister)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return api.MinionList{}, err
|
||||
}
|
||||
for _, node := range nodes {
|
||||
for _, node := range nodes.Items {
|
||||
fits := true
|
||||
for _, predicate := range predicates {
|
||||
fit, err := predicate(pod, machineToPods[node], node)
|
||||
fit, err := predicate(pod, machineToPods[node.ID], node.ID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return api.MinionList{}, err
|
||||
}
|
||||
if !fit {
|
||||
fits = false
|
||||
@ -85,7 +85,7 @@ func findNodesThatFit(pod api.Pod, podLister PodLister, predicates []FitPredicat
|
||||
filtered = append(filtered, node)
|
||||
}
|
||||
}
|
||||
return filtered, nil
|
||||
return api.MinionList{Items: filtered}, nil
|
||||
}
|
||||
|
||||
func getMinHosts(list HostPriorityList) []string {
|
||||
@ -109,9 +109,9 @@ func EqualPriority(pod api.Pod, podLister PodLister, minionLister MinionLister)
|
||||
fmt.Errorf("failed to list nodes: %v", err)
|
||||
return []HostPriority{}, err
|
||||
}
|
||||
for _, minion := range nodes {
|
||||
for _, minion := range nodes.Items {
|
||||
result = append(result, HostPriority{
|
||||
host: minion,
|
||||
host: minion.ID,
|
||||
score: 1,
|
||||
})
|
||||
}
|
||||
|
@ -45,19 +45,29 @@ func numericPriority(pod api.Pod, podLister PodLister, minionLister MinionLister
|
||||
fmt.Errorf("failed to list nodes: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
for _, minion := range nodes {
|
||||
score, err := strconv.Atoi(minion)
|
||||
for _, minion := range nodes.Items {
|
||||
score, err := strconv.Atoi(minion.ID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
result = append(result, HostPriority{
|
||||
host: minion,
|
||||
host: minion.ID,
|
||||
score: score,
|
||||
})
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func makeMinionList(nodeNames []string) api.MinionList {
|
||||
result := api.MinionList{
|
||||
Items: make([]api.Minion, len(nodeNames)),
|
||||
}
|
||||
for ix := range nodeNames {
|
||||
result.Items[ix].ID = nodeNames[ix]
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
func TestGenericScheduler(t *testing.T) {
|
||||
tests := []struct {
|
||||
predicates []FitPredicate
|
||||
@ -112,7 +122,7 @@ func TestGenericScheduler(t *testing.T) {
|
||||
for _, test := range tests {
|
||||
random := rand.New(rand.NewSource(0))
|
||||
scheduler := NewGenericScheduler(test.predicates, test.prioritizer, FakePodLister([]api.Pod{}), random)
|
||||
machine, err := scheduler.Schedule(test.pod, FakeMinionLister(test.nodes))
|
||||
machine, err := scheduler.Schedule(test.pod, FakeMinionLister(makeMinionList(test.nodes)))
|
||||
if test.expectsErr {
|
||||
if err == nil {
|
||||
t.Error("Unexpected non-error")
|
||||
|
@ -23,15 +23,15 @@ import (
|
||||
|
||||
// MinionLister interface represents anything that can list minions for a scheduler.
|
||||
type MinionLister interface {
|
||||
List() (machines []string, err error)
|
||||
List() (list api.MinionList, err error)
|
||||
}
|
||||
|
||||
// FakeMinionLister implements MinionLister on a []string for test purposes.
|
||||
type FakeMinionLister []string
|
||||
type FakeMinionLister api.MinionList
|
||||
|
||||
// List returns minions as a []string.
|
||||
func (f FakeMinionLister) List() ([]string, error) {
|
||||
return []string(f), nil
|
||||
func (f FakeMinionLister) List() (api.MinionList, error) {
|
||||
return api.MinionList(f), nil
|
||||
}
|
||||
|
||||
// PodLister interface represents anything that can list pods for a scheduler.
|
||||
|
63
pkg/scheduler/priorities.go
Normal file
63
pkg/scheduler/priorities.go
Normal file
@ -0,0 +1,63 @@
|
||||
/*
|
||||
Copyright 2014 Google Inc. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package scheduler
|
||||
|
||||
import (
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/resources"
|
||||
"github.com/golang/glog"
|
||||
)
|
||||
|
||||
// Calculate the occupancy on a node. 'node' has information about the resources on the node.
|
||||
// 'pods' is a list of pods currently scheduled on the node.
|
||||
func calculateOccupancy(node api.Minion, pods []api.Pod) HostPriority {
|
||||
totalCPU := 0
|
||||
totalMemory := 0
|
||||
for ix := range pods {
|
||||
for cIx := range pods[ix].DesiredState.Manifest.Containers {
|
||||
container := &(pods[ix].DesiredState.Manifest.Containers[cIx])
|
||||
totalCPU += container.CPU
|
||||
totalMemory += container.Memory
|
||||
}
|
||||
}
|
||||
percentageCPU := (totalCPU * 100) / resources.GetIntegerResource(node.NodeResources.Capacity, resources.CPU, 0)
|
||||
percentageMemory := (totalMemory * 100) / resources.GetIntegerResource(node.NodeResources.Capacity, resources.Memory, 0)
|
||||
glog.V(4).Infof("Least Requested Priority, AbsoluteRequested: (%d, %d) Percentage:(%d\\%m, %d\\%)", totalCPU, totalMemory, percentageCPU, percentageMemory)
|
||||
|
||||
return HostPriority{
|
||||
host: node.ID,
|
||||
score: int((percentageCPU + percentageMemory) / 2),
|
||||
}
|
||||
}
|
||||
|
||||
// LeastRequestedPriority is a priority function that favors nodes with fewer requested resources.
|
||||
// It calculates the percentage of memory and CPU requested by pods scheduled on the node, and prioritizes
|
||||
// based on the minimum of the average of the fraction of requested to capacity.
|
||||
// Details: (Sum(requested cpu) / Capacity + Sum(requested memory) / Capacity) * 50
|
||||
func LeastRequestedPriority(pod api.Pod, podLister PodLister, minionLister MinionLister) (HostPriorityList, error) {
|
||||
nodes, err := minionLister.List()
|
||||
if err != nil {
|
||||
return HostPriorityList{}, err
|
||||
}
|
||||
podsToMachines, err := MapPodsToMachines(podLister)
|
||||
|
||||
list := HostPriorityList{}
|
||||
for _, node := range nodes.Items {
|
||||
list = append(list, calculateOccupancy(node, podsToMachines[node.ID]))
|
||||
}
|
||||
return list, nil
|
||||
}
|
114
pkg/scheduler/priorities_test.go
Normal file
114
pkg/scheduler/priorities_test.go
Normal file
@ -0,0 +1,114 @@
|
||||
/*
|
||||
Copyright 2014 Google Inc. All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package scheduler
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/resources"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/util"
|
||||
)
|
||||
|
||||
func makeMinion(node string, cpu, memory int) api.Minion {
|
||||
return api.Minion{
|
||||
TypeMeta: api.TypeMeta{ID: node},
|
||||
NodeResources: api.NodeResources{
|
||||
Capacity: api.ResourceList{
|
||||
resources.CPU: util.NewIntOrStringFromInt(cpu),
|
||||
resources.Memory: util.NewIntOrStringFromInt(memory),
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func TestLeastRequested(t *testing.T) {
|
||||
labels1 := map[string]string{
|
||||
"foo": "bar",
|
||||
"baz": "blah",
|
||||
}
|
||||
labels2 := map[string]string{
|
||||
"bar": "foo",
|
||||
"baz": "blah",
|
||||
}
|
||||
machine1State := api.PodState{
|
||||
Host: "machine1",
|
||||
}
|
||||
machine2State := api.PodState{
|
||||
Host: "machine2",
|
||||
}
|
||||
cpuOnly := api.PodState{
|
||||
Manifest: api.ContainerManifest{
|
||||
Containers: []api.Container{
|
||||
{CPU: 1000},
|
||||
{CPU: 2000},
|
||||
},
|
||||
},
|
||||
}
|
||||
cpuAndMemory := api.PodState{
|
||||
Manifest: api.ContainerManifest{
|
||||
Containers: []api.Container{
|
||||
{CPU: 1000, Memory: 2000},
|
||||
{CPU: 2000, Memory: 3000},
|
||||
},
|
||||
},
|
||||
}
|
||||
tests := []struct {
|
||||
pod api.Pod
|
||||
pods []api.Pod
|
||||
nodes []api.Minion
|
||||
expectedList HostPriorityList
|
||||
test string
|
||||
}{
|
||||
{
|
||||
nodes: []api.Minion{makeMinion("machine1", 4000, 10000), makeMinion("machine2", 4000, 10000)},
|
||||
expectedList: []HostPriority{{"machine1", 0}, {"machine2", 0}},
|
||||
test: "nothing scheduled",
|
||||
},
|
||||
{
|
||||
nodes: []api.Minion{makeMinion("machine1", 4000, 10000), makeMinion("machine2", 4000, 10000)},
|
||||
expectedList: []HostPriority{{"machine1", 0}, {"machine2", 0}},
|
||||
test: "no resources requested",
|
||||
pods: []api.Pod{
|
||||
{CurrentState: machine1State, Labels: labels2},
|
||||
{CurrentState: machine1State, Labels: labels1},
|
||||
{CurrentState: machine2State, Labels: labels1},
|
||||
{CurrentState: machine2State, Labels: labels1},
|
||||
},
|
||||
},
|
||||
{
|
||||
nodes: []api.Minion{makeMinion("machine1", 4000, 10000), makeMinion("machine2", 4000, 10000)},
|
||||
expectedList: []HostPriority{{"machine1", 37 /* int(75% / 2) */}, {"machine2", 62 /* int( 75% + 50% / 2) */}},
|
||||
test: "no resources requested",
|
||||
pods: []api.Pod{
|
||||
{DesiredState: cpuOnly, CurrentState: machine1State},
|
||||
{DesiredState: cpuAndMemory, CurrentState: machine2State},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
list, err := LeastRequestedPriority(test.pod, FakePodLister(test.pods), FakeMinionLister(api.MinionList{Items: test.nodes}))
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
if !reflect.DeepEqual(test.expectedList, list) {
|
||||
t.Errorf("%s: expected %#v, got %#v", test.test, test.expectedList, list)
|
||||
}
|
||||
}
|
||||
}
|
@ -43,8 +43,8 @@ func CalculateSpreadPriority(pod api.Pod, podLister PodLister, minionLister Mini
|
||||
}
|
||||
|
||||
result := []HostPriority{}
|
||||
for _, minion := range minions {
|
||||
result = append(result, HostPriority{host: minion, score: counts[minion]})
|
||||
for _, minion := range minions.Items {
|
||||
result = append(result, HostPriority{host: minion.ID, score: counts[minion.ID]})
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
@ -100,7 +100,7 @@ func TestSpreadPriority(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
list, err := CalculateSpreadPriority(test.pod, FakePodLister(test.pods), FakeMinionLister(test.nodes))
|
||||
list, err := CalculateSpreadPriority(test.pod, FakePodLister(test.pods), FakeMinionLister(makeMinionList(test.nodes)))
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
|
@ -202,9 +202,9 @@ type storeToMinionLister struct {
|
||||
cache.Store
|
||||
}
|
||||
|
||||
func (s *storeToMinionLister) List() (machines []string, err error) {
|
||||
func (s *storeToMinionLister) List() (machines api.MinionList, err error) {
|
||||
for _, m := range s.Store.List() {
|
||||
machines = append(machines, m.(*api.Minion).ID)
|
||||
machines.Items = append(machines.Items, *(m.(*api.Minion)))
|
||||
}
|
||||
return machines, nil
|
||||
}
|
||||
|
@ -223,10 +223,14 @@ func TestStoreToMinionLister(t *testing.T) {
|
||||
}
|
||||
sml := storeToMinionLister{store}
|
||||
|
||||
got, err := sml.List()
|
||||
gotNodes, err := sml.List()
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected error: %v", err)
|
||||
}
|
||||
got := make([]string, len(gotNodes.Items))
|
||||
for ix := range gotNodes.Items {
|
||||
got[ix] = gotNodes.Items[ix].ID
|
||||
}
|
||||
if !ids.HasAll(got...) || len(got) != len(ids) {
|
||||
t.Errorf("Expected %v, got %v", ids, got)
|
||||
}
|
||||
|
@ -81,8 +81,10 @@ func TestScheduler(t *testing.T) {
|
||||
var gotPod *api.Pod
|
||||
var gotBinding *api.Binding
|
||||
c := &Config{
|
||||
MinionLister: scheduler.FakeMinionLister{"machine1"},
|
||||
Algorithm: item.algo,
|
||||
MinionLister: scheduler.FakeMinionLister(
|
||||
api.MinionList{Items: []api.Minion{{TypeMeta: api.TypeMeta{ID: "machine1"}}}},
|
||||
),
|
||||
Algorithm: item.algo,
|
||||
Binder: fakeBinder{func(b *api.Binding) error {
|
||||
gotBinding = b
|
||||
return item.injectBindError
|
||||
|
Loading…
Reference in New Issue
Block a user