Merge pull request #89272 from alculquicondor/perf-mixed-pods

Add multiple init pods to scheduler perf test cases
This commit is contained in:
Kubernetes Prow Robot 2020-03-25 00:29:02 -07:00 committed by GitHub
commit 7e7c4d1021
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
6 changed files with 192 additions and 125 deletions

View File

@ -1,15 +1,15 @@
- template:
desc: SchedulingBasic
initPods:
podTemplatePath: config/pod-default.yaml
- podTemplatePath: config/pod-default.yaml
podsToSchedule:
podTemplatePath: config/pod-default.yaml
params:
- numNodes: 500
numInitPods: 500
numInitPods: [500]
numPodsToSchedule: 1000
- numNodes: 5000
numInitPods: 5000
numInitPods: [5000]
numPodsToSchedule: 1000
- template:
desc: SchedulingPodAntiAffinity
@ -17,43 +17,43 @@
uniqueNodeLabelStrategy:
labelKey: kubernetes.io/hostname
initPods:
podTemplatePath: config/pod-with-pod-anti-affinity.yaml
- podTemplatePath: config/pod-with-pod-anti-affinity.yaml
podsToSchedule:
podTemplatePath: config/pod-with-pod-anti-affinity.yaml
params:
- numNodes: 500
numInitPods: 100
numInitPods: [100]
numPodsToSchedule: 400
- numNodes: 5000
numInitPods: 1000
numInitPods: [1000]
numPodsToSchedule: 1000
- template:
desc: SchedulingSecrets
initPods:
podTemplatePath: config/pod-with-secret-volume.yaml
- podTemplatePath: config/pod-with-secret-volume.yaml
podsToSchedule:
podTemplatePath: config/pod-with-secret-volume.yaml
params:
- numNodes: 500
numInitPods: 500
numInitPods: [500]
numPodsToSchedule: 1000
- numNodes: 5000
numInitPods: 5000
numInitPods: [5000]
numPodsToSchedule: 1000
- template:
desc: SchedulingInTreePVs
initPods:
persistentVolumeTemplatePath: config/pv-aws.yaml
- persistentVolumeTemplatePath: config/pv-aws.yaml
persistentVolumeClaimTemplatePath: config/pvc.yaml
podsToSchedule:
persistentVolumeTemplatePath: config/pv-aws.yaml
persistentVolumeClaimTemplatePath: config/pvc.yaml
params:
- numNodes: 500
numInitPods: 500
numInitPods: [500]
numPodsToSchedule: 1000
- numNodes: 5000
numInitPods: 5000
numInitPods: [5000]
numPodsToSchedule: 1000
- template:
desc: SchedulingMigratedInTreePVs
@ -68,7 +68,7 @@
migratedPlugins:
- "kubernetes.io/aws-ebs"
initPods:
persistentVolumeTemplatePath: config/pv-aws.yaml
- persistentVolumeTemplatePath: config/pv-aws.yaml
persistentVolumeClaimTemplatePath: config/pvc.yaml
podsToSchedule:
persistentVolumeTemplatePath: config/pv-aws.yaml
@ -78,10 +78,10 @@
CSIMigrationAWS: true
params:
- numNodes: 500
numInitPods: 500
numInitPods: [500]
numPodsToSchedule: 1000
- numNodes: 5000
numInitPods: 5000
numInitPods: [5000]
numPodsToSchedule: 1000
- template:
desc: SchedulingCSIPVs
@ -94,17 +94,17 @@
ebs.csi.aws.com:
count: 39
initPods:
persistentVolumeTemplatePath: config/pv-csi.yaml
- persistentVolumeTemplatePath: config/pv-csi.yaml
persistentVolumeClaimTemplatePath: config/pvc.yaml
podsToSchedule:
persistentVolumeTemplatePath: config/pv-csi.yaml
persistentVolumeClaimTemplatePath: config/pvc.yaml
params:
- numNodes: 500
numInitPods: 500
numInitPods: [500]
numPodsToSchedule: 1000
- numNodes: 5000
numInitPods: 5000
numInitPods: [5000]
numPodsToSchedule: 1000
- template:
desc: SchedulingPodAffinity
@ -114,15 +114,15 @@
labelKey: "failure-domain.beta.kubernetes.io/zone"
labelValue: "zone1"
initPods:
podTemplatePath: config/pod-with-pod-affinity.yaml
- podTemplatePath: config/pod-with-pod-affinity.yaml
podsToSchedule:
podTemplatePath: config/pod-with-pod-affinity.yaml
params:
- numNodes: 500
numInitPods: 500
numInitPods: [500]
numPodsToSchedule: 1000
- numNodes: 5000
numInitPods: 5000
numInitPods: [5000]
numPodsToSchedule: 1000
- template:
desc: SchedulingPreferredPodAffinity
@ -130,15 +130,15 @@
uniqueNodeLabelStrategy:
labelKey: kubernetes.io/hostname
initPods:
podTemplatePath: config/pod-with-preferred-pod-affinity.yaml
- podTemplatePath: config/pod-with-preferred-pod-affinity.yaml
podsToSchedule:
podTemplatePath: config/pod-with-preferred-pod-affinity.yaml
params:
- numNodes: 500
numInitPods: 500
numInitPods: [500]
numPodsToSchedule: 1000
- numNodes: 5000
numInitPods: 5000
numInitPods: [5000]
numPodsToSchedule: 1000
- template:
desc: SchedulingPreferredPodAntiAffinity
@ -146,15 +146,15 @@
uniqueNodeLabelStrategy:
labelKey: kubernetes.io/hostname
initPods:
podTemplatePath: config/pod-with-preferred-pod-anti-affinity.yaml
- podTemplatePath: config/pod-with-preferred-pod-anti-affinity.yaml
podsToSchedule:
podTemplatePath: config/pod-with-preferred-pod-anti-affinity.yaml
params:
- numNodes: 500
numInitPods: 500
numInitPods: [500]
numPodsToSchedule: 1000
- numNodes: 5000
numInitPods: 5000
numInitPods: [5000]
numPodsToSchedule: 1000
- template:
desc: SchedulingNodeAffinity
@ -164,13 +164,30 @@
labelKey: "failure-domain.beta.kubernetes.io/zone"
labelValue: "zone1"
initPods:
podTemplatePath: config/pod-with-node-affinity.yaml
- podTemplatePath: config/pod-with-node-affinity.yaml
podsToSchedule:
podTemplatePath: config/pod-with-node-affinity.yaml
params:
- numNodes: 500
numInitPods: 500
numInitPods: [500]
numPodsToSchedule: 1000
- numNodes: 5000
numInitPods: 5000
numInitPods: [5000]
numPodsToSchedule: 1000
- template:
desc: MixedSchedulingBasePod
initPods:
- podTemplatePath: config/pod-default.yaml
- podTemplatePath: config/pod-with-pod-affinity.yaml
- podTemplatePath: config/pod-with-pod-anti-affinity.yaml
- podTemplatePath: config/pod-with-preferred-pod-affinity.yaml
- podTemplatePath: config/pod-with-preferred-pod-anti-affinity.yaml
podsToSchedule:
podTemplatePath: config/pod-default.yaml
params:
- numNodes: 500
numInitPods: [200, 200, 200, 200, 200]
numPodsToSchedule: 1000
- numNodes: 5000
numInitPods: [2000, 2000, 2000, 2000, 2000]
numPodsToSchedule: 1000

View File

@ -3,14 +3,14 @@ kind: Pod
metadata:
generateName: affinity-pod-
labels:
foo: ""
color: blue
spec:
affinity:
podAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchLabels:
foo: ""
color: blue
topologyKey: failure-domain.beta.kubernetes.io/zone
namespaces: ["sched-test", "sched-setup"]
containers:

View File

@ -4,7 +4,6 @@ metadata:
generateName: anti-affinity-pod-
labels:
color: green
name: test
spec:
affinity:
podAntiAffinity:

View File

@ -3,7 +3,7 @@ kind: Pod
metadata:
generateName: preferred-affinity-pod-
labels:
foo: ""
color: red
spec:
affinity:
podAffinity:
@ -11,7 +11,7 @@ spec:
- podAffinityTerm:
labelSelector:
matchLabels:
foo: ""
color: red
topologyKey: kubernetes.io/hostname
namespaces: ["sched-test", "sched-setup"]
weight: 1

View File

@ -3,7 +3,7 @@ kind: Pod
metadata:
generateName: preferred-anti-affinity-pod-
labels:
foo: ""
color: yellow
spec:
affinity:
podAntiAffinity:
@ -11,7 +11,7 @@ spec:
- podAffinityTerm:
labelSelector:
matchLabels:
foo: ""
color: yellow
topologyKey: kubernetes.io/hostname
namespaces: ["sched-test", "sched-setup"]
weight: 1

View File

@ -65,7 +65,7 @@ type testCase struct {
// configures nodes in the cluster
Nodes nodeCase
// configures pods in the cluster before running the tests
InitPods podCase
InitPods []podCase
// pods to be scheduled during the test.
PodsToSchedule podCase
// optional, feature gates to set before running the test
@ -100,7 +100,7 @@ type simpleTestCases struct {
type testParams struct {
NumNodes int
NumInitPods int
NumInitPods []int
NumPodsToSchedule int
}
@ -111,10 +111,17 @@ type testDataCollector interface {
func BenchmarkPerfScheduling(b *testing.B) {
dataItems := DataItems{Version: "v1"}
tests := getSimpleTestCases(configFile)
tests, err := parseTestCases(configFile)
if err != nil {
b.Fatal(err)
}
for _, test := range tests {
name := fmt.Sprintf("%v/%vNodes/%vInitPods/%vPodsToSchedule", test.Desc, test.Nodes.Num, test.InitPods.Num, test.PodsToSchedule.Num)
initPods := 0
for _, p := range test.InitPods {
initPods += p.Num
}
name := fmt.Sprintf("%v/%vNodes/%vInitPods/%vPodsToSchedule", test.Desc, test.Nodes.Num, initPods, test.PodsToSchedule.Num)
b.Run(name, func(b *testing.B) {
for feature, flag := range test.FeatureGates {
defer featuregatetesting.SetFeatureGateDuringTest(b, utilfeature.DefaultFeatureGate, feature, flag)()
@ -131,14 +138,25 @@ func perfScheduling(test testCase, b *testing.B) []DataItem {
finalFunc, podInformer, clientset := mustSetupScheduler()
defer finalFunc()
nodePreparer := getNodePreparer(test.Nodes, clientset)
nodePreparer, err := getNodePreparer(test.Nodes, clientset)
if err != nil {
b.Fatal(err)
}
if err := nodePreparer.PrepareNodes(); err != nil {
klog.Fatalf("%v", err)
b.Fatal(err)
}
defer nodePreparer.CleanupNodes()
createPods(setupNamespace, test.InitPods, clientset)
waitNumPodsScheduled(test.InitPods.Num, podInformer)
total := 0
for _, p := range test.InitPods {
if err := createPods(setupNamespace, p, clientset); err != nil {
b.Fatal(err)
}
total += p.Num
}
if err := waitNumPodsScheduled(b, total, podInformer); err != nil {
b.Fatal(err)
}
// start benchmark
b.ResetTimer()
@ -151,8 +169,12 @@ func perfScheduling(test testCase, b *testing.B) []DataItem {
}
// Schedule the main workload
createPods(testNamespace, test.PodsToSchedule, clientset)
waitNumPodsScheduled(test.InitPods.Num+test.PodsToSchedule.Num, podInformer)
if err := createPods(testNamespace, test.PodsToSchedule, clientset); err != nil {
b.Fatal(err)
}
if err := waitNumPodsScheduled(b, total+test.PodsToSchedule.Num, podInformer); err != nil {
b.Fatal(err)
}
close(stopCh)
// Note: without this line we're taking the overhead of defer() into account.
@ -165,18 +187,19 @@ func perfScheduling(test testCase, b *testing.B) []DataItem {
return dataItems
}
func waitNumPodsScheduled(num int, podInformer coreinformers.PodInformer) {
func waitNumPodsScheduled(b *testing.B, num int, podInformer coreinformers.PodInformer) error {
for {
scheduled, err := getScheduledPods(podInformer)
if err != nil {
klog.Fatalf("%v", err)
return err
}
if len(scheduled) >= num {
break
}
klog.Infof("got %d existing pods, required: %d", len(scheduled), num)
klog.Infof("%s: got %d existing pods, required: %d", b.Name(), len(scheduled), num)
time.Sleep(1 * time.Second)
}
return nil
}
func getTestDataCollectors(tc testCase, podInformer coreinformers.PodInformer, b *testing.B) []testDataCollector {
@ -189,7 +212,7 @@ func getTestDataCollectors(tc testCase, podInformer coreinformers.PodInformer, b
return collectors
}
func getNodePreparer(nc nodeCase, clientset clientset.Interface) testutils.TestNodePreparer {
func getNodePreparer(nc nodeCase, clientset clientset.Interface) (testutils.TestNodePreparer, error) {
var nodeStrategy testutils.PrepareNodeStrategy = &testutils.TrivialNodePrepareStrategy{}
if nc.NodeAllocatableStrategy != nil {
nodeStrategy = nc.NodeAllocatableStrategy
@ -200,91 +223,119 @@ func getNodePreparer(nc nodeCase, clientset clientset.Interface) testutils.TestN
}
if nc.NodeTemplatePath != nil {
node, err := getNodeSpecFromFile(nc.NodeTemplatePath)
if err != nil {
return nil, err
}
return framework.NewIntegrationTestNodePreparerWithNodeSpec(
clientset,
[]testutils.CountToStrategy{{Count: nc.Num, Strategy: nodeStrategy}},
getNodeSpecFromFile(nc.NodeTemplatePath),
)
node,
), nil
}
return framework.NewIntegrationTestNodePreparer(
clientset,
[]testutils.CountToStrategy{{Count: nc.Num, Strategy: nodeStrategy}},
"scheduler-perf-",
)
), nil
}
func createPods(ns string, pc podCase, clientset clientset.Interface) {
strategy := getPodStrategy(pc)
func createPods(ns string, pc podCase, clientset clientset.Interface) error {
strategy, err := getPodStrategy(pc)
if err != nil {
return err
}
config := testutils.NewTestPodCreatorConfig()
config.AddStrategy(ns, pc.Num, strategy)
podCreator := testutils.NewTestPodCreator(clientset, config)
podCreator.CreatePods()
return podCreator.CreatePods()
}
func getPodStrategy(pc podCase) testutils.TestPodCreateStrategy {
func getPodStrategy(pc podCase) (testutils.TestPodCreateStrategy, error) {
basePod := makeBasePod()
if pc.PodTemplatePath != nil {
basePod = getPodSpecFromFile(pc.PodTemplatePath)
var err error
basePod, err = getPodSpecFromFile(pc.PodTemplatePath)
if err != nil {
return nil, err
}
}
if pc.PersistentVolumeClaimTemplatePath == nil {
return testutils.NewCustomCreatePodStrategy(basePod)
return testutils.NewCustomCreatePodStrategy(basePod), nil
}
pvTemplate := getPersistentVolumeSpecFromFile(pc.PersistentVolumeTemplatePath)
pvcTemplate := getPersistentVolumeClaimSpecFromFile(pc.PersistentVolumeClaimTemplatePath)
return testutils.NewCreatePodWithPersistentVolumeStrategy(pvcTemplate, getCustomVolumeFactory(pvTemplate), basePod)
pvTemplate, err := getPersistentVolumeSpecFromFile(pc.PersistentVolumeTemplatePath)
if err != nil {
return nil, err
}
pvcTemplate, err := getPersistentVolumeClaimSpecFromFile(pc.PersistentVolumeClaimTemplatePath)
if err != nil {
return nil, err
}
return testutils.NewCreatePodWithPersistentVolumeStrategy(pvcTemplate, getCustomVolumeFactory(pvTemplate), basePod), nil
}
func getSimpleTestCases(path string) []testCase {
func parseTestCases(path string) ([]testCase, error) {
var simpleTests []simpleTestCases
getSpecFromFile(&path, &simpleTests)
if err := getSpecFromFile(&path, &simpleTests); err != nil {
return nil, fmt.Errorf("parsing test cases: %v", err)
}
testCases := make([]testCase, 0)
for _, s := range simpleTests {
testCase := s.Template
for _, p := range s.Params {
testCase.Nodes.Num = p.NumNodes
testCase.InitPods.Num = p.NumInitPods
testCase.InitPods = append([]podCase(nil), testCase.InitPods...)
for i, v := range p.NumInitPods {
testCase.InitPods[i].Num = v
}
testCase.PodsToSchedule.Num = p.NumPodsToSchedule
testCases = append(testCases, testCase)
}
}
return testCases
return testCases, nil
}
func getNodeSpecFromFile(path *string) *v1.Node {
func getNodeSpecFromFile(path *string) (*v1.Node, error) {
nodeSpec := &v1.Node{}
getSpecFromFile(path, nodeSpec)
return nodeSpec
if err := getSpecFromFile(path, nodeSpec); err != nil {
return nil, fmt.Errorf("parsing Node: %v", err)
}
return nodeSpec, nil
}
func getPodSpecFromFile(path *string) *v1.Pod {
func getPodSpecFromFile(path *string) (*v1.Pod, error) {
podSpec := &v1.Pod{}
getSpecFromFile(path, podSpec)
return podSpec
if err := getSpecFromFile(path, podSpec); err != nil {
return nil, fmt.Errorf("parsing Pod: %v", err)
}
return podSpec, nil
}
func getPersistentVolumeSpecFromFile(path *string) *v1.PersistentVolume {
func getPersistentVolumeSpecFromFile(path *string) (*v1.PersistentVolume, error) {
persistentVolumeSpec := &v1.PersistentVolume{}
getSpecFromFile(path, persistentVolumeSpec)
return persistentVolumeSpec
if err := getSpecFromFile(path, persistentVolumeSpec); err != nil {
return nil, fmt.Errorf("parsing PersistentVolume: %v", err)
}
return persistentVolumeSpec, nil
}
func getPersistentVolumeClaimSpecFromFile(path *string) *v1.PersistentVolumeClaim {
func getPersistentVolumeClaimSpecFromFile(path *string) (*v1.PersistentVolumeClaim, error) {
persistentVolumeClaimSpec := &v1.PersistentVolumeClaim{}
getSpecFromFile(path, persistentVolumeClaimSpec)
return persistentVolumeClaimSpec
if err := getSpecFromFile(path, persistentVolumeClaimSpec); err != nil {
return nil, fmt.Errorf("parsing PersistentVolumeClaim: %v", err)
}
return persistentVolumeClaimSpec, nil
}
func getSpecFromFile(path *string, spec interface{}) {
func getSpecFromFile(path *string, spec interface{}) error {
bytes, err := ioutil.ReadFile(*path)
if err != nil {
klog.Fatalf("%v", err)
}
if err := yaml.Unmarshal(bytes, spec); err != nil {
klog.Fatalf("%v", err)
return err
}
return yaml.Unmarshal(bytes, spec)
}
func getCustomVolumeFactory(pvTemplate *v1.PersistentVolume) func(id int) *v1.PersistentVolume {