mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-22 11:21:47 +00:00
Add Benchmark test for EvenPodsSpread predicate
- go test k8s.io/kubernetes/pkg/scheduler/algorithm/predicates -benchmem -run=^$ -bench .
This commit is contained in:
parent
46d65d0a46
commit
7f1a3965fd
@ -1784,6 +1784,55 @@ func TestPodSpreadMap_removePod(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkTestGetTPMapMatchingSpreadConstraints(b *testing.B) {
|
||||
tests := []struct {
|
||||
name string
|
||||
pod *v1.Pod
|
||||
existingPodsNum int
|
||||
allNodesNum int
|
||||
filteredNodesNum int
|
||||
}{
|
||||
{
|
||||
name: "1000nodes/single-constraint-zone",
|
||||
pod: st.MakePod().Name("p").Label("foo", "").
|
||||
SpreadConstraint(1, "zone", hardSpread, st.MakeLabelSelector().Exists("foo").Obj()).
|
||||
Obj(),
|
||||
existingPodsNum: 10000,
|
||||
allNodesNum: 1000,
|
||||
filteredNodesNum: 500,
|
||||
},
|
||||
{
|
||||
name: "1000nodes/single-constraint-node",
|
||||
pod: st.MakePod().Name("p").Label("foo", "").
|
||||
SpreadConstraint(1, "node", hardSpread, st.MakeLabelSelector().Exists("foo").Obj()).
|
||||
Obj(),
|
||||
existingPodsNum: 10000,
|
||||
allNodesNum: 1000,
|
||||
filteredNodesNum: 500,
|
||||
},
|
||||
{
|
||||
name: "1000nodes/two-constraints-zone-node",
|
||||
pod: st.MakePod().Name("p").Label("foo", "").Label("bar", "").
|
||||
SpreadConstraint(1, "zone", hardSpread, st.MakeLabelSelector().Exists("foo").Obj()).
|
||||
SpreadConstraint(1, "node", hardSpread, st.MakeLabelSelector().Exists("bar").Obj()).
|
||||
Obj(),
|
||||
existingPodsNum: 10000,
|
||||
allNodesNum: 1000,
|
||||
filteredNodesNum: 500,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
b.Run(tt.name, func(b *testing.B) {
|
||||
existingPods, allNodes, _ := st.MakeNodesAndPods(tt.pod, tt.existingPodsNum, tt.allNodesNum, tt.filteredNodesNum)
|
||||
nodeNameToInfo := schedulernodeinfo.CreateNodeNameToInfoMap(existingPods, allNodes)
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
getTPMapMatchingSpreadConstraints(tt.pod, nodeNameToInfo)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
hardSpread = v1.DoNotSchedule
|
||||
softSpread = v1.ScheduleAnyway
|
||||
|
@ -17,7 +17,6 @@ limitations under the License.
|
||||
package priorities
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
@ -445,43 +444,6 @@ func TestCalculateEvenPodsSpreadPriority(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func makeNodesAndPods(pod *v1.Pod, existingPodsNum, allNodesNum, filteredNodesNum int) (existingPods []*v1.Pod, allNodes []*v1.Node, filteredNodes []*v1.Node) {
|
||||
var topologyKeys []string
|
||||
var labels []string
|
||||
// regions := 3
|
||||
zones := 10
|
||||
for _, c := range pod.Spec.TopologySpreadConstraints {
|
||||
topologyKeys = append(topologyKeys, c.TopologyKey)
|
||||
labels = append(labels, c.LabelSelector.MatchExpressions[0].Key)
|
||||
}
|
||||
// build nodes
|
||||
for i := 0; i < allNodesNum; i++ {
|
||||
nodeWrapper := st.MakeNode().Name(fmt.Sprintf("node%d", i))
|
||||
for _, tpKey := range topologyKeys {
|
||||
if tpKey == "zone" {
|
||||
nodeWrapper = nodeWrapper.Label("zone", fmt.Sprintf("zone%d", i%zones))
|
||||
} else if tpKey == "node" {
|
||||
nodeWrapper = nodeWrapper.Label("node", fmt.Sprintf("node%d", i))
|
||||
}
|
||||
}
|
||||
node := nodeWrapper.Obj()
|
||||
allNodes = append(allNodes, node)
|
||||
if len(filteredNodes) < filteredNodesNum {
|
||||
filteredNodes = append(filteredNodes, node)
|
||||
}
|
||||
}
|
||||
// build pods
|
||||
for i := 0; i < existingPodsNum; i++ {
|
||||
podWrapper := st.MakePod().Name(fmt.Sprintf("pod%d", i)).Node(fmt.Sprintf("node%d", i%allNodesNum))
|
||||
// apply labels[0], labels[0,1], ..., labels[all] to each pod in turn
|
||||
for _, label := range labels[:i%len(labels)+1] {
|
||||
podWrapper = podWrapper.Label(label, "")
|
||||
}
|
||||
existingPods = append(existingPods, podWrapper.Obj())
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func BenchmarkTestCalculateEvenPodsSpreadPriority(b *testing.B) {
|
||||
tests := []struct {
|
||||
name string
|
||||
@ -521,7 +483,7 @@ func BenchmarkTestCalculateEvenPodsSpreadPriority(b *testing.B) {
|
||||
}
|
||||
for _, tt := range tests {
|
||||
b.Run(tt.name, func(b *testing.B) {
|
||||
existingPods, allNodes, filteredNodes := makeNodesAndPods(tt.pod, tt.existingPodsNum, tt.allNodesNum, tt.filteredNodesNum)
|
||||
existingPods, allNodes, filteredNodes := st.MakeNodesAndPods(tt.pod, tt.existingPodsNum, tt.allNodesNum, tt.filteredNodesNum)
|
||||
nodeNameToInfo := schedulernodeinfo.CreateNodeNameToInfoMap(existingPods, allNodes)
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
|
@ -6,6 +6,7 @@ go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"fake_lister.go",
|
||||
"workload_prep.go",
|
||||
"wrappers.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/pkg/scheduler/testing",
|
||||
|
67
pkg/scheduler/testing/workload_prep.go
Normal file
67
pkg/scheduler/testing/workload_prep.go
Normal file
@ -0,0 +1,67 @@
|
||||
/*
|
||||
Copyright 2019 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package testing
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
)
|
||||
|
||||
// MakeNodesAndPods serves as a testing helper for EvenPodsSpread feature.
|
||||
// It builds a fake cluster containing running Pods and Nodes.
|
||||
// The size of Pods and Nodes are determined by input arguments.
|
||||
// The specs of Pods and Nodes are generated with the following rules:
|
||||
// - If `pod` has "node" as a topologyKey, each generated node is applied with a unique label: "node: node<i>".
|
||||
// - If `pod` has "zone" as a topologyKey, each generated node is applied with a rotating label: "zone: zone[0-9]".
|
||||
// - Depending on "lableSelector.MatchExpressions[0].Key" the `pod` has in each topologySpreadConstraint,
|
||||
// each generated pod will be applied with label "key1", "key1,key2", ..., "key1,key2,...,keyN" in a rotating manner.
|
||||
func MakeNodesAndPods(pod *v1.Pod, existingPodsNum, allNodesNum, filteredNodesNum int) (existingPods []*v1.Pod, allNodes []*v1.Node, filteredNodes []*v1.Node) {
|
||||
var topologyKeys []string
|
||||
var labels []string
|
||||
zones := 10
|
||||
for _, c := range pod.Spec.TopologySpreadConstraints {
|
||||
topologyKeys = append(topologyKeys, c.TopologyKey)
|
||||
labels = append(labels, c.LabelSelector.MatchExpressions[0].Key)
|
||||
}
|
||||
// build nodes
|
||||
for i := 0; i < allNodesNum; i++ {
|
||||
nodeWrapper := MakeNode().Name(fmt.Sprintf("node%d", i))
|
||||
for _, tpKey := range topologyKeys {
|
||||
if tpKey == "zone" {
|
||||
nodeWrapper = nodeWrapper.Label("zone", fmt.Sprintf("zone%d", i%zones))
|
||||
} else if tpKey == "node" {
|
||||
nodeWrapper = nodeWrapper.Label("node", fmt.Sprintf("node%d", i))
|
||||
}
|
||||
}
|
||||
node := nodeWrapper.Obj()
|
||||
allNodes = append(allNodes, node)
|
||||
if len(filteredNodes) < filteredNodesNum {
|
||||
filteredNodes = append(filteredNodes, node)
|
||||
}
|
||||
}
|
||||
// build pods
|
||||
for i := 0; i < existingPodsNum; i++ {
|
||||
podWrapper := MakePod().Name(fmt.Sprintf("pod%d", i)).Node(fmt.Sprintf("node%d", i%allNodesNum))
|
||||
// apply labels[0], labels[0,1], ..., labels[all] to each pod in turn
|
||||
for _, label := range labels[:i%len(labels)+1] {
|
||||
podWrapper = podWrapper.Label(label, "")
|
||||
}
|
||||
existingPods = append(existingPods, podWrapper.Obj())
|
||||
}
|
||||
return
|
||||
}
|
Loading…
Reference in New Issue
Block a user