mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-23 11:50:44 +00:00
Merge pull request #84606 from alculquicondor/test/priorities
Add benchmark test to compare EvenPodsSpreadPriority and SelectorSpreadingPriority
This commit is contained in:
commit
3c4ae1c89a
@ -1657,7 +1657,7 @@ func BenchmarkTestGetTPMapMatchingSpreadConstraints(b *testing.B) {
|
||||
{
|
||||
name: "1000nodes/single-constraint-zone",
|
||||
pod: st.MakePod().Name("p").Label("foo", "").
|
||||
SpreadConstraint(1, "zone", hardSpread, st.MakeLabelSelector().Exists("foo").Obj()).
|
||||
SpreadConstraint(1, v1.LabelZoneFailureDomain, hardSpread, st.MakeLabelSelector().Exists("foo").Obj()).
|
||||
Obj(),
|
||||
existingPodsNum: 10000,
|
||||
allNodesNum: 1000,
|
||||
@ -1666,7 +1666,7 @@ func BenchmarkTestGetTPMapMatchingSpreadConstraints(b *testing.B) {
|
||||
{
|
||||
name: "1000nodes/single-constraint-node",
|
||||
pod: st.MakePod().Name("p").Label("foo", "").
|
||||
SpreadConstraint(1, "node", hardSpread, st.MakeLabelSelector().Exists("foo").Obj()).
|
||||
SpreadConstraint(1, v1.LabelHostname, hardSpread, st.MakeLabelSelector().Exists("foo").Obj()).
|
||||
Obj(),
|
||||
existingPodsNum: 10000,
|
||||
allNodesNum: 1000,
|
||||
@ -1675,8 +1675,8 @@ func BenchmarkTestGetTPMapMatchingSpreadConstraints(b *testing.B) {
|
||||
{
|
||||
name: "1000nodes/two-constraints-zone-node",
|
||||
pod: st.MakePod().Name("p").Label("foo", "").Label("bar", "").
|
||||
SpreadConstraint(1, "zone", hardSpread, st.MakeLabelSelector().Exists("foo").Obj()).
|
||||
SpreadConstraint(1, "node", hardSpread, st.MakeLabelSelector().Exists("bar").Obj()).
|
||||
SpreadConstraint(1, v1.LabelZoneFailureDomain, hardSpread, st.MakeLabelSelector().Exists("foo").Obj()).
|
||||
SpreadConstraint(1, v1.LabelHostname, hardSpread, st.MakeLabelSelector().Exists("bar").Obj()).
|
||||
Obj(),
|
||||
existingPodsNum: 10000,
|
||||
allNodesNum: 1000,
|
||||
@ -1685,7 +1685,7 @@ func BenchmarkTestGetTPMapMatchingSpreadConstraints(b *testing.B) {
|
||||
}
|
||||
for _, tt := range tests {
|
||||
b.Run(tt.name, func(b *testing.B) {
|
||||
existingPods, allNodes, _ := st.MakeNodesAndPodsForEvenPodsSpread(tt.pod, tt.existingPodsNum, tt.allNodesNum, tt.filteredNodesNum)
|
||||
existingPods, allNodes, _ := st.MakeNodesAndPodsForEvenPodsSpread(tt.pod.Labels, tt.existingPodsNum, tt.allNodesNum, tt.filteredNodesNum)
|
||||
s := nodeinfosnapshot.NewSnapshot(existingPods, allNodes)
|
||||
l, _ := s.NodeInfos().List()
|
||||
b.ResetTimer()
|
||||
|
@ -69,6 +69,7 @@ go_test(
|
||||
"requested_to_capacity_ratio_test.go",
|
||||
"resource_limits_test.go",
|
||||
"selector_spreading_test.go",
|
||||
"spreading_perf_test.go",
|
||||
"taint_toleration_test.go",
|
||||
"types_test.go",
|
||||
],
|
||||
|
@ -468,7 +468,7 @@ func BenchmarkTestCalculateEvenPodsSpreadPriority(b *testing.B) {
|
||||
{
|
||||
name: "1000nodes/single-constraint-zone",
|
||||
pod: st.MakePod().Name("p").Label("foo", "").
|
||||
SpreadConstraint(1, "zone", softSpread, st.MakeLabelSelector().Exists("foo").Obj()).
|
||||
SpreadConstraint(1, v1.LabelZoneFailureDomain, softSpread, st.MakeLabelSelector().Exists("foo").Obj()).
|
||||
Obj(),
|
||||
existingPodsNum: 10000,
|
||||
allNodesNum: 1000,
|
||||
@ -477,7 +477,7 @@ func BenchmarkTestCalculateEvenPodsSpreadPriority(b *testing.B) {
|
||||
{
|
||||
name: "1000nodes/single-constraint-node",
|
||||
pod: st.MakePod().Name("p").Label("foo", "").
|
||||
SpreadConstraint(1, "node", softSpread, st.MakeLabelSelector().Exists("foo").Obj()).
|
||||
SpreadConstraint(1, v1.LabelHostname, softSpread, st.MakeLabelSelector().Exists("foo").Obj()).
|
||||
Obj(),
|
||||
existingPodsNum: 10000,
|
||||
allNodesNum: 1000,
|
||||
@ -486,8 +486,8 @@ func BenchmarkTestCalculateEvenPodsSpreadPriority(b *testing.B) {
|
||||
{
|
||||
name: "1000nodes/two-constraints-zone-node",
|
||||
pod: st.MakePod().Name("p").Label("foo", "").Label("bar", "").
|
||||
SpreadConstraint(1, "zone", softSpread, st.MakeLabelSelector().Exists("foo").Obj()).
|
||||
SpreadConstraint(1, "node", softSpread, st.MakeLabelSelector().Exists("bar").Obj()).
|
||||
SpreadConstraint(1, v1.LabelZoneFailureDomain, softSpread, st.MakeLabelSelector().Exists("foo").Obj()).
|
||||
SpreadConstraint(1, v1.LabelHostname, softSpread, st.MakeLabelSelector().Exists("bar").Obj()).
|
||||
Obj(),
|
||||
existingPodsNum: 10000,
|
||||
allNodesNum: 1000,
|
||||
@ -496,7 +496,7 @@ func BenchmarkTestCalculateEvenPodsSpreadPriority(b *testing.B) {
|
||||
}
|
||||
for _, tt := range tests {
|
||||
b.Run(tt.name, func(b *testing.B) {
|
||||
existingPods, allNodes, filteredNodes := st.MakeNodesAndPodsForEvenPodsSpread(tt.pod, tt.existingPodsNum, tt.allNodesNum, tt.filteredNodesNum)
|
||||
existingPods, allNodes, filteredNodes := st.MakeNodesAndPodsForEvenPodsSpread(tt.pod.Labels, tt.existingPodsNum, tt.allNodesNum, tt.filteredNodesNum)
|
||||
snapshot := nodeinfosnapshot.NewSnapshot(existingPods, allNodes)
|
||||
meta := &priorityMetadata{
|
||||
podTopologySpreadMap: buildPodTopologySpreadMap(tt.pod, filteredNodes, snapshot.NodeInfoList),
|
||||
|
109
pkg/scheduler/algorithm/priorities/spreading_perf_test.go
Normal file
109
pkg/scheduler/algorithm/priorities/spreading_perf_test.go
Normal file
@ -0,0 +1,109 @@
|
||||
/*
|
||||
Copyright 2019 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package priorities
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
|
||||
"k8s.io/kubernetes/pkg/scheduler/listers/fake"
|
||||
nodeinfosnapshot "k8s.io/kubernetes/pkg/scheduler/nodeinfo/snapshot"
|
||||
st "k8s.io/kubernetes/pkg/scheduler/testing"
|
||||
)
|
||||
|
||||
// The tests in this file compare the performance of SelectorSpreadPriority
|
||||
// against EvenPodsSpreadPriority with a similar rule.
|
||||
|
||||
var (
|
||||
tests = []struct {
|
||||
name string
|
||||
existingPodsNum int
|
||||
allNodesNum int
|
||||
}{
|
||||
{
|
||||
name: "100nodes",
|
||||
existingPodsNum: 1000,
|
||||
allNodesNum: 100,
|
||||
},
|
||||
{
|
||||
name: "1000nodes",
|
||||
existingPodsNum: 10000,
|
||||
allNodesNum: 1000,
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
func BenchmarkTestDefaultEvenPodsSpreadPriority(b *testing.B) {
|
||||
for _, tt := range tests {
|
||||
b.Run(tt.name, func(b *testing.B) {
|
||||
pod := st.MakePod().Name("p").Label("foo", "").
|
||||
SpreadConstraint(1, v1.LabelHostname, softSpread, st.MakeLabelSelector().Exists("foo").Obj()).
|
||||
SpreadConstraint(1, v1.LabelZoneFailureDomain, softSpread, st.MakeLabelSelector().Exists("foo").Obj()).Obj()
|
||||
existingPods, allNodes, filteredNodes := st.MakeNodesAndPodsForEvenPodsSpread(pod.Labels, tt.existingPodsNum, tt.allNodesNum, tt.allNodesNum)
|
||||
snapshot := nodeinfosnapshot.NewSnapshot(existingPods, allNodes)
|
||||
b.ResetTimer()
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
meta := &priorityMetadata{
|
||||
podTopologySpreadMap: buildPodTopologySpreadMap(pod, filteredNodes, snapshot.NodeInfoList),
|
||||
}
|
||||
var gotList framework.NodeScoreList
|
||||
for _, n := range filteredNodes {
|
||||
score, err := CalculateEvenPodsSpreadPriorityMap(pod, meta, snapshot.NodeInfoMap[n.Name])
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
gotList = append(gotList, score)
|
||||
}
|
||||
err := CalculateEvenPodsSpreadPriorityReduce(pod, meta, snapshot, gotList)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkTestSelectorSpreadPriority(b *testing.B) {
|
||||
for _, tt := range tests {
|
||||
b.Run(tt.name, func(b *testing.B) {
|
||||
pod := st.MakePod().Name("p").Label("foo", "").Obj()
|
||||
existingPods, allNodes, filteredNodes := st.MakeNodesAndPodsForEvenPodsSpread(pod.Labels, tt.existingPodsNum, tt.allNodesNum, tt.allNodesNum)
|
||||
snapshot := nodeinfosnapshot.NewSnapshot(existingPods, allNodes)
|
||||
services := []*v1.Service{{Spec: v1.ServiceSpec{Selector: map[string]string{"foo": ""}}}}
|
||||
ss := SelectorSpread{
|
||||
serviceLister: fake.ServiceLister(services),
|
||||
controllerLister: fake.ControllerLister(nil),
|
||||
replicaSetLister: fake.ReplicaSetLister(nil),
|
||||
statefulSetLister: fake.StatefulSetLister(nil),
|
||||
}
|
||||
b.ResetTimer()
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
meta := &priorityMetadata{
|
||||
podSelectors: getSelectors(pod, ss.serviceLister, ss.controllerLister, ss.replicaSetLister, ss.statefulSetLister),
|
||||
}
|
||||
ttp := priorityFunction(ss.CalculateSpreadPriorityMap, ss.CalculateSpreadPriorityReduce, meta)
|
||||
_, err := ttp(pod, snapshot, filteredNodes)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
@ -22,44 +22,39 @@ import (
|
||||
"k8s.io/api/core/v1"
|
||||
)
|
||||
|
||||
type keyVal struct {
|
||||
k string
|
||||
v string
|
||||
}
|
||||
|
||||
// MakeNodesAndPodsForEvenPodsSpread serves as a testing helper for EvenPodsSpread feature.
|
||||
// It builds a fake cluster containing running Pods and Nodes.
|
||||
// The size of Pods and Nodes are determined by input arguments.
|
||||
// The specs of Pods and Nodes are generated with the following rules:
|
||||
// - If `pod` has "node" as a topologyKey, each generated node is applied with a unique label: "node: node<i>".
|
||||
// - If `pod` has "zone" as a topologyKey, each generated node is applied with a rotating label: "zone: zone[0-9]".
|
||||
// - Depending on "labelSelector.MatchExpressions[0].Key" the `pod` has in each topologySpreadConstraint,
|
||||
// each generated pod will be applied with label "key1", "key1,key2", ..., "key1,key2,...,keyN" in a rotating manner.
|
||||
func MakeNodesAndPodsForEvenPodsSpread(pod *v1.Pod, existingPodsNum, allNodesNum, filteredNodesNum int) (existingPods []*v1.Pod, allNodes []*v1.Node, filteredNodes []*v1.Node) {
|
||||
var topologyKeys []string
|
||||
var labels []string
|
||||
zones := 10
|
||||
for _, c := range pod.Spec.TopologySpreadConstraints {
|
||||
topologyKeys = append(topologyKeys, c.TopologyKey)
|
||||
labels = append(labels, c.LabelSelector.MatchExpressions[0].Key)
|
||||
// - Each generated node is applied with a unique label: "node: node<i>".
|
||||
// - Each generated node is applied with a rotating label: "zone: zone[0-9]".
|
||||
// - Depending on the input labels, each generated pod will be applied with
|
||||
// label "key1", "key1,key2", ..., "key1,key2,...,keyN" in a rotating manner.
|
||||
func MakeNodesAndPodsForEvenPodsSpread(labels map[string]string, existingPodsNum, allNodesNum, filteredNodesNum int) (existingPods []*v1.Pod, allNodes []*v1.Node, filteredNodes []*v1.Node) {
|
||||
var labelPairs []keyVal
|
||||
for k, v := range labels {
|
||||
labelPairs = append(labelPairs, keyVal{k: k, v: v})
|
||||
}
|
||||
zones := 10
|
||||
// build nodes
|
||||
for i := 0; i < allNodesNum; i++ {
|
||||
nodeWrapper := MakeNode().Name(fmt.Sprintf("node%d", i))
|
||||
for _, tpKey := range topologyKeys {
|
||||
if tpKey == "zone" {
|
||||
nodeWrapper = nodeWrapper.Label("zone", fmt.Sprintf("zone%d", i%zones))
|
||||
} else if tpKey == "node" {
|
||||
nodeWrapper = nodeWrapper.Label("node", fmt.Sprintf("node%d", i))
|
||||
}
|
||||
}
|
||||
node := nodeWrapper.Obj()
|
||||
node := MakeNode().Name(fmt.Sprintf("node%d", i)).
|
||||
Label(v1.LabelZoneFailureDomain, fmt.Sprintf("zone%d", i%zones)).
|
||||
Label(v1.LabelHostname, fmt.Sprintf("node%d", i)).Obj()
|
||||
allNodes = append(allNodes, node)
|
||||
if len(filteredNodes) < filteredNodesNum {
|
||||
filteredNodes = append(filteredNodes, node)
|
||||
}
|
||||
}
|
||||
filteredNodes = allNodes[:filteredNodesNum]
|
||||
// build pods
|
||||
for i := 0; i < existingPodsNum; i++ {
|
||||
podWrapper := MakePod().Name(fmt.Sprintf("pod%d", i)).Node(fmt.Sprintf("node%d", i%allNodesNum))
|
||||
// apply labels[0], labels[0,1], ..., labels[all] to each pod in turn
|
||||
for _, label := range labels[:i%len(labels)+1] {
|
||||
podWrapper = podWrapper.Label(label, "")
|
||||
for _, p := range labelPairs[:i%len(labelPairs)+1] {
|
||||
podWrapper = podWrapper.Label(p.k, p.v)
|
||||
}
|
||||
existingPods = append(existingPods, podWrapper.Obj())
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user