mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-08-24 19:12:02 +00:00
Implementing PR feedback and adding test cases
This commit is contained in:
parent
5fa1dbc07b
commit
d17cebcd2a
@ -66,7 +66,7 @@ func (g *genericScheduler) selectHost(priorityList HostPriorityList) (string, er
|
|||||||
}
|
}
|
||||||
sort.Sort(sort.Reverse(priorityList))
|
sort.Sort(sort.Reverse(priorityList))
|
||||||
|
|
||||||
hosts := getMinHosts(priorityList)
|
hosts := getBestHosts(priorityList)
|
||||||
g.randomLock.Lock()
|
g.randomLock.Lock()
|
||||||
defer g.randomLock.Unlock()
|
defer g.randomLock.Unlock()
|
||||||
|
|
||||||
@ -107,16 +107,16 @@ func findNodesThatFit(pod api.Pod, podLister PodLister, predicates []FitPredicat
|
|||||||
// Each priority function can also have its own weight
|
// Each priority function can also have its own weight
|
||||||
// The minion scores returned by the priority function are multiplied by the weights to get weighted scores
|
// The minion scores returned by the priority function are multiplied by the weights to get weighted scores
|
||||||
// All scores are finally combined (added) to get the total weighted scores of all minions
|
// All scores are finally combined (added) to get the total weighted scores of all minions
|
||||||
func prioritizeNodes(pod api.Pod, podLister PodLister, priorities []PriorityConfig, minionLister MinionLister) (HostPriorityList, error) {
|
func prioritizeNodes(pod api.Pod, podLister PodLister, priorityConfigs []PriorityConfig, minionLister MinionLister) (HostPriorityList, error) {
|
||||||
result := HostPriorityList{}
|
result := HostPriorityList{}
|
||||||
combinedScores := map[string]int{}
|
combinedScores := map[string]int{}
|
||||||
for _, priority := range priorities {
|
for _, priorityConfig := range priorityConfigs {
|
||||||
weight := priority.Weight
|
weight := priorityConfig.Weight
|
||||||
// skip the priority function if the weight is specified as 0
|
// skip the priority function if the weight is specified as 0
|
||||||
if weight == 0 {
|
if weight == 0 {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
priorityFunc := priority.Function
|
priorityFunc := priorityConfig.Function
|
||||||
prioritizedList, err := priorityFunc(pod, podLister, minionLister)
|
prioritizedList, err := priorityFunc(pod, podLister, minionLister)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return HostPriorityList{}, err
|
return HostPriorityList{}, err
|
||||||
@ -131,7 +131,7 @@ func prioritizeNodes(pod api.Pod, podLister PodLister, priorities []PriorityConf
|
|||||||
return result, nil
|
return result, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func getMinHosts(list HostPriorityList) []string {
|
func getBestHosts(list HostPriorityList) []string {
|
||||||
result := []string{}
|
result := []string{}
|
||||||
for _, hostEntry := range list {
|
for _, hostEntry := range list {
|
||||||
if hostEntry.score == list[0].score {
|
if hostEntry.score == list[0].score {
|
||||||
|
@ -24,27 +24,37 @@ import (
|
|||||||
|
|
||||||
// the unused capacity is calculated on a scale of 0-10
|
// the unused capacity is calculated on a scale of 0-10
|
||||||
// 0 being the lowest priority and 10 being the highest
|
// 0 being the lowest priority and 10 being the highest
|
||||||
func calculateScore(requested, capacity int) int {
|
func calculateScore(requested, capacity int, node string) int {
|
||||||
if capacity == 0 {
|
if capacity == 0 {
|
||||||
return 0
|
return 0
|
||||||
}
|
}
|
||||||
|
if requested > capacity {
|
||||||
|
glog.Errorf("Combined requested resources from existing pods exceeds capacity on minion: %s", node)
|
||||||
|
return 0
|
||||||
|
}
|
||||||
return ((capacity - requested) * 10) / capacity
|
return ((capacity - requested) * 10) / capacity
|
||||||
}
|
}
|
||||||
|
|
||||||
// Calculate the occupancy on a node. 'node' has information about the resources on the node.
|
// Calculate the occupancy on a node. 'node' has information about the resources on the node.
|
||||||
// 'pods' is a list of pods currently scheduled on the node.
|
// 'pods' is a list of pods currently scheduled on the node.
|
||||||
func calculateOccupancy(node api.Minion, pods []api.Pod) HostPriority {
|
func calculateOccupancy(pod api.Pod, node api.Minion, pods []api.Pod) HostPriority {
|
||||||
totalCPU := 0
|
totalCPU := 0
|
||||||
totalMemory := 0
|
totalMemory := 0
|
||||||
for _, pod := range pods {
|
for _, existingPod := range pods {
|
||||||
for _, container := range pod.Spec.Containers {
|
for _, container := range existingPod.Spec.Containers {
|
||||||
totalCPU += container.CPU
|
totalCPU += container.CPU
|
||||||
totalMemory += container.Memory
|
totalMemory += container.Memory
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
// Add the resources requested by the current pod being scheduled.
|
||||||
|
// This also helps differentiate between differently sized, but empty, minions.
|
||||||
|
for _, container := range pod.Spec.Containers {
|
||||||
|
totalCPU += container.CPU
|
||||||
|
totalMemory += container.Memory
|
||||||
|
}
|
||||||
|
|
||||||
cpuScore := calculateScore(totalCPU, resources.GetIntegerResource(node.Spec.Capacity, resources.CPU, 0))
|
cpuScore := calculateScore(totalCPU, resources.GetIntegerResource(node.Spec.Capacity, resources.CPU, 0), node.Name)
|
||||||
memoryScore := calculateScore(totalMemory, resources.GetIntegerResource(node.Spec.Capacity, resources.Memory, 0))
|
memoryScore := calculateScore(totalMemory, resources.GetIntegerResource(node.Spec.Capacity, resources.Memory, 0), node.Name)
|
||||||
glog.V(4).Infof("Least Requested Priority, AbsoluteRequested: (%d, %d) Score:(%d, %d)", totalCPU, totalMemory, cpuScore, memoryScore)
|
glog.V(4).Infof("Least Requested Priority, AbsoluteRequested: (%d, %d) Score:(%d, %d)", totalCPU, totalMemory, cpuScore, memoryScore)
|
||||||
|
|
||||||
return HostPriority{
|
return HostPriority{
|
||||||
@ -66,7 +76,7 @@ func LeastRequestedPriority(pod api.Pod, podLister PodLister, minionLister Minio
|
|||||||
|
|
||||||
list := HostPriorityList{}
|
list := HostPriorityList{}
|
||||||
for _, node := range nodes.Items {
|
for _, node := range nodes.Items {
|
||||||
list = append(list, calculateOccupancy(node, podsToMachines[node.Name]))
|
list = append(list, calculateOccupancy(pod, node, podsToMachines[node.Name]))
|
||||||
}
|
}
|
||||||
return list, nil
|
return list, nil
|
||||||
}
|
}
|
||||||
|
@ -52,12 +52,14 @@ func TestLeastRequested(t *testing.T) {
|
|||||||
machine2Status := api.PodStatus{
|
machine2Status := api.PodStatus{
|
||||||
Host: "machine2",
|
Host: "machine2",
|
||||||
}
|
}
|
||||||
|
noResources := api.PodSpec{
|
||||||
|
Containers: []api.Container{},
|
||||||
|
}
|
||||||
cpuOnly := api.PodSpec{
|
cpuOnly := api.PodSpec{
|
||||||
Containers: []api.Container{
|
Containers: []api.Container{
|
||||||
{CPU: 1000},
|
{CPU: 1000},
|
||||||
{CPU: 2000},
|
{CPU: 2000},
|
||||||
},
|
},
|
||||||
// Host: "machine1",
|
|
||||||
}
|
}
|
||||||
cpuAndMemory := api.PodSpec{
|
cpuAndMemory := api.PodSpec{
|
||||||
Containers: []api.Container{
|
Containers: []api.Container{
|
||||||
@ -73,14 +75,55 @@ func TestLeastRequested(t *testing.T) {
|
|||||||
test string
|
test string
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
|
/*
|
||||||
|
Minion1 scores (remaining resources) on 0-10 scale
|
||||||
|
CPU Score: (4000 - 0) / 4000 = 10
|
||||||
|
Memory Score: (10000 - 0) / 10000 = 10
|
||||||
|
Minion1 Score: (10 + 10) / 2 = 10
|
||||||
|
|
||||||
|
Minion2 scores (remaining resources) on 0-10 scale
|
||||||
|
CPU Score: (4000 - 0) / 4000 = 10
|
||||||
|
Memory Score: (10000 - 0) / 10000 = 10
|
||||||
|
Minion2 Score: (10 + 10) / 2 = 10
|
||||||
|
*/
|
||||||
|
pod: api.Pod{Spec: noResources},
|
||||||
nodes: []api.Minion{makeMinion("machine1", 4000, 10000), makeMinion("machine2", 4000, 10000)},
|
nodes: []api.Minion{makeMinion("machine1", 4000, 10000), makeMinion("machine2", 4000, 10000)},
|
||||||
expectedList: []HostPriority{{"machine1", 10}, {"machine2", 10}},
|
expectedList: []HostPriority{{"machine1", 10}, {"machine2", 10}},
|
||||||
test: "nothing scheduled",
|
test: "nothing scheduled, nothing requested",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
/*
|
||||||
|
Minion1 scores on 0-10 scale
|
||||||
|
CPU Score: (4000 - 3000) / 4000 = 2.5
|
||||||
|
Memory Score: (10000 - 5000) / 10000 = 5
|
||||||
|
Minion1 Score: (2.5 + 5) / 2 = 3
|
||||||
|
|
||||||
|
Minion2 scores on 0-10 scale
|
||||||
|
CPU Score: (6000 - 3000) / 6000 = 5
|
||||||
|
Memory Score: (10000 - 5000) / 10000 = 5
|
||||||
|
Minion2 Score: (5 + 5) / 2 = 5
|
||||||
|
*/
|
||||||
|
pod: api.Pod{Spec: cpuAndMemory},
|
||||||
|
nodes: []api.Minion{makeMinion("machine1", 4000, 10000), makeMinion("machine2", 6000, 10000)},
|
||||||
|
expectedList: []HostPriority{{"machine1", 3}, {"machine2", 5}},
|
||||||
|
test: "nothing scheduled, resources requested, differently sized machines",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
/*
|
||||||
|
Minion1 scores on 0-10 scale
|
||||||
|
CPU Score: (4000 - 0) / 4000 = 10
|
||||||
|
Memory Score: (10000 - 0) / 10000 = 10
|
||||||
|
Minion1 Score: (10 + 10) / 2 = 10
|
||||||
|
|
||||||
|
Minion2 scores on 0-10 scale
|
||||||
|
CPU Score: (4000 - 0) / 4000 = 10
|
||||||
|
Memory Score: (10000 - 0) / 10000 = 10
|
||||||
|
Minion2 Score: (10 + 10) / 2 = 10
|
||||||
|
*/
|
||||||
|
pod: api.Pod{Spec: noResources},
|
||||||
nodes: []api.Minion{makeMinion("machine1", 4000, 10000), makeMinion("machine2", 4000, 10000)},
|
nodes: []api.Minion{makeMinion("machine1", 4000, 10000), makeMinion("machine2", 4000, 10000)},
|
||||||
expectedList: []HostPriority{{"machine1", 10}, {"machine2", 10}},
|
expectedList: []HostPriority{{"machine1", 10}, {"machine2", 10}},
|
||||||
test: "no resources requested",
|
test: "no resources requested, pods scheduled",
|
||||||
pods: []api.Pod{
|
pods: []api.Pod{
|
||||||
{Status: machine1Status, ObjectMeta: api.ObjectMeta{Labels: labels2}},
|
{Status: machine1Status, ObjectMeta: api.ObjectMeta{Labels: labels2}},
|
||||||
{Status: machine1Status, ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
{Status: machine1Status, ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
||||||
@ -89,18 +132,96 @@ func TestLeastRequested(t *testing.T) {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
nodes: []api.Minion{makeMinion("machine1", 4000, 10000), makeMinion("machine2", 4000, 10000)},
|
/*
|
||||||
expectedList: []HostPriority{{"machine1", 6 /* int(200%-75% / 2) */}, {"machine2", 3 /* int( 200%-125% / 2) */}},
|
Minion1 scores on 0-10 scale
|
||||||
test: "resources requested",
|
CPU Score: (10000 - 6000) / 10000 = 4
|
||||||
|
Memory Score: (20000 - 0) / 20000 = 10
|
||||||
|
Minion1 Score: (4 + 10) / 2 = 7
|
||||||
|
|
||||||
|
Minion2 scores on 0-10 scale
|
||||||
|
CPU Score: (10000 - 6000) / 10000 = 4
|
||||||
|
Memory Score: (20000 - 5000) / 20000 = 7.5
|
||||||
|
Minion2 Score: (4 + 7.5) / 2 = 5
|
||||||
|
*/
|
||||||
|
pod: api.Pod{Spec: noResources},
|
||||||
|
nodes: []api.Minion{makeMinion("machine1", 10000, 20000), makeMinion("machine2", 10000, 20000)},
|
||||||
|
expectedList: []HostPriority{{"machine1", 7}, {"machine2", 5}},
|
||||||
|
test: "no resources requested, pods scheduled with resources",
|
||||||
pods: []api.Pod{
|
pods: []api.Pod{
|
||||||
{Spec: cpuOnly, Status: api.PodStatus{Host: "machine1"}},
|
{Spec: cpuOnly, Status: machine1Status, ObjectMeta: api.ObjectMeta{Labels: labels2}},
|
||||||
{Spec: cpuAndMemory, Status: api.PodStatus{Host: "machine2"}},
|
{Spec: cpuOnly, Status: machine1Status, ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
||||||
|
{Spec: cpuOnly, Status: machine2Status, ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
||||||
|
{Spec: cpuAndMemory, Status: machine2Status, ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
/*
|
||||||
|
Minion1 scores on 0-10 scale
|
||||||
|
CPU Score: (10000 - 6000) / 10000 = 4
|
||||||
|
Memory Score: (20000 - 5000) / 20000 = 7.5
|
||||||
|
Minion1 Score: (4 + 7.5) / 2 = 5
|
||||||
|
|
||||||
|
Minion2 scores on 0-10 scale
|
||||||
|
CPU Score: (10000 - 6000) / 10000 = 4
|
||||||
|
Memory Score: (20000 - 10000) / 20000 = 5
|
||||||
|
Minion2 Score: (4 + 5) / 2 = 4
|
||||||
|
*/
|
||||||
|
pod: api.Pod{Spec: cpuAndMemory},
|
||||||
|
nodes: []api.Minion{makeMinion("machine1", 10000, 20000), makeMinion("machine2", 10000, 20000)},
|
||||||
|
expectedList: []HostPriority{{"machine1", 5}, {"machine2", 4}},
|
||||||
|
test: "resources requested, pods scheduled with resources",
|
||||||
|
pods: []api.Pod{
|
||||||
|
{Spec: cpuOnly, Status: machine1Status},
|
||||||
|
{Spec: cpuAndMemory, Status: machine2Status},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
/*
|
||||||
|
Minion1 scores on 0-10 scale
|
||||||
|
CPU Score: (10000 - 6000) / 10000 = 4
|
||||||
|
Memory Score: (20000 - 5000) / 20000 = 7.5
|
||||||
|
Minion1 Score: (4 + 7.5) / 2 = 5
|
||||||
|
|
||||||
|
Minion2 scores on 0-10 scale
|
||||||
|
CPU Score: (10000 - 6000) / 10000 = 4
|
||||||
|
Memory Score: (50000 - 10000) / 50000 = 8
|
||||||
|
Minion2 Score: (4 + 8) / 2 = 6
|
||||||
|
*/
|
||||||
|
pod: api.Pod{Spec: cpuAndMemory},
|
||||||
|
nodes: []api.Minion{makeMinion("machine1", 10000, 20000), makeMinion("machine2", 10000, 50000)},
|
||||||
|
expectedList: []HostPriority{{"machine1", 5}, {"machine2", 6}},
|
||||||
|
test: "resources requested, pods scheduled with resources, differently sized machines",
|
||||||
|
pods: []api.Pod{
|
||||||
|
{Spec: cpuOnly, Status: machine1Status},
|
||||||
|
{Spec: cpuAndMemory, Status: machine2Status},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
/*
|
||||||
|
Minion1 scores on 0-10 scale
|
||||||
|
CPU Score: (4000 - 6000) / 4000 = 0
|
||||||
|
Memory Score: (10000 - 0) / 10000 = 10
|
||||||
|
Minion1 Score: (0 + 10) / 2 = 5
|
||||||
|
|
||||||
|
Minion2 scores on 0-10 scale
|
||||||
|
CPU Score: (4000 - 6000) / 4000 = 0
|
||||||
|
Memory Score: (10000 - 5000) / 10000 = 5
|
||||||
|
Minion2 Score: (0 + 5) / 2 = 2
|
||||||
|
*/
|
||||||
|
pod: api.Pod{Spec: cpuOnly},
|
||||||
|
nodes: []api.Minion{makeMinion("machine1", 4000, 10000), makeMinion("machine2", 4000, 10000)},
|
||||||
|
expectedList: []HostPriority{{"machine1", 5}, {"machine2", 2}},
|
||||||
|
test: "requested resources exceed minion capacity",
|
||||||
|
pods: []api.Pod{
|
||||||
|
{Spec: cpuOnly, Status: machine1Status},
|
||||||
|
{Spec: cpuAndMemory, Status: machine2Status},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
pod: api.Pod{Spec: noResources},
|
||||||
nodes: []api.Minion{makeMinion("machine1", 0, 0), makeMinion("machine2", 0, 0)},
|
nodes: []api.Minion{makeMinion("machine1", 0, 0), makeMinion("machine2", 0, 0)},
|
||||||
expectedList: []HostPriority{{"machine1", 0}, {"machine2", 0}},
|
expectedList: []HostPriority{{"machine1", 0}, {"machine2", 0}},
|
||||||
test: "zero minion resources",
|
test: "zero minion resources, pods scheduled with resources",
|
||||||
pods: []api.Pod{
|
pods: []api.Pod{
|
||||||
{Spec: cpuOnly},
|
{Spec: cpuOnly},
|
||||||
{Spec: cpuAndMemory},
|
{Spec: cpuAndMemory},
|
||||||
|
@ -18,7 +18,6 @@ package scheduler
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"math/rand"
|
"math/rand"
|
||||||
"sort"
|
|
||||||
|
|
||||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
|
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
|
||||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/labels"
|
"github.com/GoogleCloudPlatform/kubernetes/pkg/labels"
|
||||||
@ -44,18 +43,11 @@ func CalculateSpreadPriority(pod api.Pod, podLister PodLister, minionLister Mini
|
|||||||
if len(pods) > 0 {
|
if len(pods) > 0 {
|
||||||
for _, pod := range pods {
|
for _, pod := range pods {
|
||||||
counts[pod.Status.Host]++
|
counts[pod.Status.Host]++
|
||||||
|
// Compute the maximum number of pods hosted on any minion
|
||||||
|
if counts[pod.Status.Host] > maxCount {
|
||||||
|
maxCount = counts[pod.Status.Host]
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// doing this separately since the pod count can be much higher
|
|
||||||
// than the filtered minion count
|
|
||||||
values := make([]int, len(counts))
|
|
||||||
idx := 0
|
|
||||||
for _, count := range counts {
|
|
||||||
values[idx] = count
|
|
||||||
idx++
|
|
||||||
}
|
|
||||||
sort.Sort(sort.IntSlice(values))
|
|
||||||
maxCount = values[len(values)-1]
|
|
||||||
}
|
}
|
||||||
|
|
||||||
result := []HostPriority{}
|
result := []HostPriority{}
|
||||||
|
@ -57,7 +57,8 @@ type ConfigFactory struct {
|
|||||||
// NewConfigFactory initializes the factory.
|
// NewConfigFactory initializes the factory.
|
||||||
func NewConfigFactory(client *client.Client) *ConfigFactory {
|
func NewConfigFactory(client *client.Client) *ConfigFactory {
|
||||||
// initialize the factory struct
|
// initialize the factory struct
|
||||||
factory := &ConfigFactory{Client: client,
|
factory := &ConfigFactory{
|
||||||
|
Client: client,
|
||||||
PodQueue: cache.NewFIFO(),
|
PodQueue: cache.NewFIFO(),
|
||||||
PodLister: &storeToPodLister{cache.NewStore()},
|
PodLister: &storeToPodLister{cache.NewStore()},
|
||||||
MinionLister: &storeToMinionLister{cache.NewStore()},
|
MinionLister: &storeToMinionLister{cache.NewStore()},
|
||||||
@ -83,7 +84,7 @@ func (factory *ConfigFactory) Create(predicateKeys, priorityKeys []string) (*sch
|
|||||||
}
|
}
|
||||||
|
|
||||||
if priorityKeys == nil {
|
if priorityKeys == nil {
|
||||||
glog.V(2).Infof("Custom priority list not provided, using default priorities")
|
glog.V(2).Infof("Custom priority list not provided, using default priority: LeastRequestedPriority")
|
||||||
priorityKeys = []string{"LeastRequestedPriority"}
|
priorityKeys = []string{"LeastRequestedPriority"}
|
||||||
}
|
}
|
||||||
priorityConfigs, err := factory.getPriorityConfigs(priorityKeys)
|
priorityConfigs, err := factory.getPriorityConfigs(priorityKeys)
|
||||||
|
Loading…
Reference in New Issue
Block a user