mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-08-06 18:54:06 +00:00
Add a test for DumbSpreadingPriority.
This commit is contained in:
parent
cb9d515004
commit
53518e37a6
@ -62,7 +62,8 @@ func calculateResourceOccupancy(pod *api.Pod, node api.Node, pods []*api.Pod) al
|
|||||||
|
|
||||||
cpuScore := calculateScore(totalMilliCPU, capacityMilliCPU, node.Name)
|
cpuScore := calculateScore(totalMilliCPU, capacityMilliCPU, node.Name)
|
||||||
memoryScore := calculateScore(totalMemory, capacityMemory, node.Name)
|
memoryScore := calculateScore(totalMemory, capacityMemory, node.Name)
|
||||||
glog.V(10).Infof(
|
// glog.V(10).Infof(
|
||||||
|
glog.Infof(
|
||||||
"%v -> %v: Least Requested Priority, Absolute/Requested: (%d, %d) / (%d, %d) Score: (%d, %d)",
|
"%v -> %v: Least Requested Priority, Absolute/Requested: (%d, %d) / (%d, %d) Score: (%d, %d)",
|
||||||
pod.Name, node.Name,
|
pod.Name, node.Name,
|
||||||
totalMilliCPU, totalMemory,
|
totalMilliCPU, totalMemory,
|
||||||
@ -121,9 +122,15 @@ func DumbSpreadingPriority(pod *api.Pod, podLister algorithm.PodLister, minionLi
|
|||||||
list := algorithm.HostPriorityList{}
|
list := algorithm.HostPriorityList{}
|
||||||
for _, node := range nodes.Items {
|
for _, node := range nodes.Items {
|
||||||
npods := int64(len(podsToMachines[node.Name]))
|
npods := int64(len(podsToMachines[node.Name]))
|
||||||
|
score := calculateScore(min(npods+1, dumbSpreadingDenominator), dumbSpreadingDenominator, node.Name)
|
||||||
|
// glog.V(10).Infof(
|
||||||
|
glog.Infof(
|
||||||
|
"%v -> %v: DumbSpreadPriority, Old # pods (%d) Score: (%d)",
|
||||||
|
pod.Name, node.Name, npods, score,
|
||||||
|
)
|
||||||
list = append(list, algorithm.HostPriority{
|
list = append(list, algorithm.HostPriority{
|
||||||
Host: node.Name,
|
Host: node.Name,
|
||||||
Score: calculateScore(min(npods+1, dumbSpreadingDenominator), dumbSpreadingDenominator, node.Name),
|
Score: score,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
return list, nil
|
return list, nil
|
||||||
@ -225,7 +232,8 @@ func calculateBalancedResourceAllocation(pod *api.Pod, node api.Node, pods []*ap
|
|||||||
diff := math.Abs(cpuFraction - memoryFraction)
|
diff := math.Abs(cpuFraction - memoryFraction)
|
||||||
score = int(10 - diff*10)
|
score = int(10 - diff*10)
|
||||||
}
|
}
|
||||||
glog.V(10).Infof(
|
// glog.V(10).Infof(
|
||||||
|
glog.Infof(
|
||||||
"%v -> %v: Balanced Resource Allocation, Absolute/Requested: (%d, %d) / (%d, %d) Score: (%d)",
|
"%v -> %v: Balanced Resource Allocation, Absolute/Requested: (%d, %d) / (%d, %d) Score: (%d)",
|
||||||
pod.Name, node.Name,
|
pod.Name, node.Name,
|
||||||
totalMilliCPU, totalMemory,
|
totalMilliCPU, totalMemory,
|
||||||
|
@ -23,6 +23,7 @@ import (
|
|||||||
|
|
||||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
|
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
|
||||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/api/resource"
|
"github.com/GoogleCloudPlatform/kubernetes/pkg/api/resource"
|
||||||
|
"github.com/GoogleCloudPlatform/kubernetes/plugin/pkg/scheduler"
|
||||||
"github.com/GoogleCloudPlatform/kubernetes/plugin/pkg/scheduler/algorithm"
|
"github.com/GoogleCloudPlatform/kubernetes/plugin/pkg/scheduler/algorithm"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -38,6 +39,88 @@ func makeMinion(node string, milliCPU, memory int64) api.Node {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestDumbSpreading(t *testing.T) {
|
||||||
|
noResources := api.PodSpec{
|
||||||
|
Containers: []api.Container{},
|
||||||
|
}
|
||||||
|
small := api.PodSpec{
|
||||||
|
NodeName: "machine1",
|
||||||
|
Containers: []api.Container{
|
||||||
|
{
|
||||||
|
Resources: api.ResourceRequirements{
|
||||||
|
Limits: api.ResourceList{
|
||||||
|
"cpu": resource.MustParse("100m"),
|
||||||
|
"memory": resource.MustParse("1000"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
large := api.PodSpec{
|
||||||
|
NodeName: "machine2",
|
||||||
|
Containers: []api.Container{
|
||||||
|
{
|
||||||
|
Resources: api.ResourceRequirements{
|
||||||
|
Limits: api.ResourceList{
|
||||||
|
"cpu": resource.MustParse("600m"),
|
||||||
|
"memory": resource.MustParse("6000"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
tests := []struct {
|
||||||
|
pod *api.Pod
|
||||||
|
pods []*api.Pod
|
||||||
|
nodes []api.Node
|
||||||
|
expectedList algorithm.HostPriorityList
|
||||||
|
test string
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
/* Minion1 CPU capacity 1000m, free 700m/7000, 3 pods
|
||||||
|
LeastRequestedPriority score 7
|
||||||
|
BalancedResourceAllocation score 10
|
||||||
|
ServiceSpreadingPriority score 10
|
||||||
|
DumbSpreadingPriority score 6
|
||||||
|
Total: 7 + 10 + 10 + 2*6 = 39
|
||||||
|
|
||||||
|
Minion2 CPU capacity 1000m, free 400m/4000, 1 pod
|
||||||
|
LeastRequestedPriority score 4
|
||||||
|
BalancedResourceAllocation score 10
|
||||||
|
ServiceSpreadingPriority score 10
|
||||||
|
DumbSpreadingPriority score 8
|
||||||
|
Total: 4 + 10 + 10 + 2*8 = 40
|
||||||
|
|
||||||
|
Moral of the story: We prefer the machine that is more heavily loaded,
|
||||||
|
because it has fewer pods.
|
||||||
|
*/
|
||||||
|
pod: &api.Pod{Spec: noResources},
|
||||||
|
nodes: []api.Node{makeMinion("machine1", 1000, 10000), makeMinion("machine2", 1000, 10000)},
|
||||||
|
expectedList: []algorithm.HostPriority{{"machine1", 39}, {"machine2", 40}},
|
||||||
|
test: "nothing scheduled, nothing requested",
|
||||||
|
pods: []*api.Pod {
|
||||||
|
{Spec: small}, {Spec: small}, {Spec: small},
|
||||||
|
{Spec: large},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, test := range tests {
|
||||||
|
list, err := scheduler.PrioritizeNodes(
|
||||||
|
test.pod,
|
||||||
|
algorithm.FakePodLister(test.pods),
|
||||||
|
[]algorithm.PriorityConfig{{Function: LeastRequestedPriority, Weight: 1}, {Function: BalancedResourceAllocation, Weight: 1}, {Function: DumbSpreadingPriority, Weight: 2}, {Function: NewServiceSpreadPriority(algorithm.FakeServiceLister([]api.Service{})), Weight: 1}},
|
||||||
|
algorithm.FakeMinionLister(api.NodeList{Items: test.nodes}))
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("unexpected error: %v", err)
|
||||||
|
}
|
||||||
|
if !reflect.DeepEqual(test.expectedList, list) {
|
||||||
|
t.Errorf("%s: expected %#v, got %#v", test.test, test.expectedList, list)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
func TestLeastRequested(t *testing.T) {
|
func TestLeastRequested(t *testing.T) {
|
||||||
labels1 := map[string]string{
|
labels1 := map[string]string{
|
||||||
"foo": "bar",
|
"foo": "bar",
|
||||||
|
@ -20,6 +20,7 @@ import (
|
|||||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
|
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
|
||||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/labels"
|
"github.com/GoogleCloudPlatform/kubernetes/pkg/labels"
|
||||||
"github.com/GoogleCloudPlatform/kubernetes/plugin/pkg/scheduler/algorithm"
|
"github.com/GoogleCloudPlatform/kubernetes/plugin/pkg/scheduler/algorithm"
|
||||||
|
"github.com/golang/glog"
|
||||||
)
|
)
|
||||||
|
|
||||||
type ServiceSpread struct {
|
type ServiceSpread struct {
|
||||||
@ -82,6 +83,10 @@ func (s *ServiceSpread) CalculateSpreadPriority(pod *api.Pod, podLister algorith
|
|||||||
fScore = 10 * (float32(maxCount-counts[minion.Name]) / float32(maxCount))
|
fScore = 10 * (float32(maxCount-counts[minion.Name]) / float32(maxCount))
|
||||||
}
|
}
|
||||||
result = append(result, algorithm.HostPriority{Host: minion.Name, Score: int(fScore)})
|
result = append(result, algorithm.HostPriority{Host: minion.Name, Score: int(fScore)})
|
||||||
|
// glog.V(10).Infof(
|
||||||
|
glog.Infof(
|
||||||
|
"%v -> %v: ServiceSpreadPriority, Sore: (%d)", pod.Name, minion.Name, int(fScore),
|
||||||
|
)
|
||||||
}
|
}
|
||||||
return result, nil
|
return result, nil
|
||||||
}
|
}
|
||||||
|
@ -71,7 +71,7 @@ func (g *genericScheduler) Schedule(pod *api.Pod, minionLister algorithm.MinionL
|
|||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
|
|
||||||
priorityList, err := prioritizeNodes(pod, g.pods, g.prioritizers, algorithm.FakeMinionLister(filteredNodes))
|
priorityList, err := PrioritizeNodes(pod, g.pods, g.prioritizers, algorithm.FakeMinionLister(filteredNodes))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
@ -139,7 +139,7 @@ func findNodesThatFit(pod *api.Pod, podLister algorithm.PodLister, predicateFunc
|
|||||||
// Each priority function can also have its own weight
|
// Each priority function can also have its own weight
|
||||||
// The minion scores returned by the priority function are multiplied by the weights to get weighted scores
|
// The minion scores returned by the priority function are multiplied by the weights to get weighted scores
|
||||||
// All scores are finally combined (added) to get the total weighted scores of all minions
|
// All scores are finally combined (added) to get the total weighted scores of all minions
|
||||||
func prioritizeNodes(pod *api.Pod, podLister algorithm.PodLister, priorityConfigs []algorithm.PriorityConfig, minionLister algorithm.MinionLister) (algorithm.HostPriorityList, error) {
|
func PrioritizeNodes(pod *api.Pod, podLister algorithm.PodLister, priorityConfigs []algorithm.PriorityConfig, minionLister algorithm.MinionLister) (algorithm.HostPriorityList, error) {
|
||||||
result := algorithm.HostPriorityList{}
|
result := algorithm.HostPriorityList{}
|
||||||
|
|
||||||
// If no priority configs are provided, then the EqualPriority function is applied
|
// If no priority configs are provided, then the EqualPriority function is applied
|
||||||
@ -165,6 +165,7 @@ func prioritizeNodes(pod *api.Pod, podLister algorithm.PodLister, priorityConfig
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
for host, score := range combinedScores {
|
for host, score := range combinedScores {
|
||||||
|
glog.V(10).Infof("Host %s Score %d", host, score)
|
||||||
result = append(result, algorithm.HostPriority{Host: host, Score: score})
|
result = append(result, algorithm.HostPriority{Host: host, Score: score})
|
||||||
}
|
}
|
||||||
return result, nil
|
return result, nil
|
||||||
|
Loading…
Reference in New Issue
Block a user