mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-23 19:56:01 +00:00
Merge pull request #89272 from alculquicondor/perf-mixed-pods
Add multiple init pods to scheduler perf test cases
This commit is contained in:
commit
7e7c4d1021
@ -1,60 +1,60 @@
|
|||||||
- template:
|
- template:
|
||||||
desc: SchedulingBasic
|
desc: SchedulingBasic
|
||||||
initPods:
|
initPods:
|
||||||
podTemplatePath: config/pod-default.yaml
|
- podTemplatePath: config/pod-default.yaml
|
||||||
podsToSchedule:
|
podsToSchedule:
|
||||||
podTemplatePath: config/pod-default.yaml
|
podTemplatePath: config/pod-default.yaml
|
||||||
params:
|
params:
|
||||||
- numNodes: 500
|
- numNodes: 500
|
||||||
numInitPods: 500
|
numInitPods: [500]
|
||||||
numPodsToSchedule: 1000
|
numPodsToSchedule: 1000
|
||||||
- numNodes: 5000
|
- numNodes: 5000
|
||||||
numInitPods: 5000
|
numInitPods: [5000]
|
||||||
numPodsToSchedule: 1000
|
numPodsToSchedule: 1000
|
||||||
- template:
|
- template:
|
||||||
desc: SchedulingPodAntiAffinity
|
desc: SchedulingPodAntiAffinity
|
||||||
nodes:
|
nodes:
|
||||||
uniqueNodeLabelStrategy:
|
uniqueNodeLabelStrategy:
|
||||||
labelKey: kubernetes.io/hostname
|
labelKey: kubernetes.io/hostname
|
||||||
initPods:
|
initPods:
|
||||||
podTemplatePath: config/pod-with-pod-anti-affinity.yaml
|
- podTemplatePath: config/pod-with-pod-anti-affinity.yaml
|
||||||
podsToSchedule:
|
podsToSchedule:
|
||||||
podTemplatePath: config/pod-with-pod-anti-affinity.yaml
|
podTemplatePath: config/pod-with-pod-anti-affinity.yaml
|
||||||
params:
|
params:
|
||||||
- numNodes: 500
|
- numNodes: 500
|
||||||
numInitPods: 100
|
numInitPods: [100]
|
||||||
numPodsToSchedule: 400
|
numPodsToSchedule: 400
|
||||||
- numNodes: 5000
|
- numNodes: 5000
|
||||||
numInitPods: 1000
|
numInitPods: [1000]
|
||||||
numPodsToSchedule: 1000
|
numPodsToSchedule: 1000
|
||||||
- template:
|
- template:
|
||||||
desc: SchedulingSecrets
|
desc: SchedulingSecrets
|
||||||
initPods:
|
initPods:
|
||||||
podTemplatePath: config/pod-with-secret-volume.yaml
|
- podTemplatePath: config/pod-with-secret-volume.yaml
|
||||||
podsToSchedule:
|
podsToSchedule:
|
||||||
podTemplatePath: config/pod-with-secret-volume.yaml
|
podTemplatePath: config/pod-with-secret-volume.yaml
|
||||||
params:
|
params:
|
||||||
- numNodes: 500
|
- numNodes: 500
|
||||||
numInitPods: 500
|
numInitPods: [500]
|
||||||
numPodsToSchedule: 1000
|
numPodsToSchedule: 1000
|
||||||
- numNodes: 5000
|
- numNodes: 5000
|
||||||
numInitPods: 5000
|
numInitPods: [5000]
|
||||||
numPodsToSchedule: 1000
|
numPodsToSchedule: 1000
|
||||||
- template:
|
- template:
|
||||||
desc: SchedulingInTreePVs
|
desc: SchedulingInTreePVs
|
||||||
initPods:
|
initPods:
|
||||||
persistentVolumeTemplatePath: config/pv-aws.yaml
|
- persistentVolumeTemplatePath: config/pv-aws.yaml
|
||||||
persistentVolumeClaimTemplatePath: config/pvc.yaml
|
persistentVolumeClaimTemplatePath: config/pvc.yaml
|
||||||
podsToSchedule:
|
podsToSchedule:
|
||||||
persistentVolumeTemplatePath: config/pv-aws.yaml
|
persistentVolumeTemplatePath: config/pv-aws.yaml
|
||||||
persistentVolumeClaimTemplatePath: config/pvc.yaml
|
persistentVolumeClaimTemplatePath: config/pvc.yaml
|
||||||
params:
|
params:
|
||||||
- numNodes: 500
|
- numNodes: 500
|
||||||
numInitPods: 500
|
numInitPods: [500]
|
||||||
numPodsToSchedule: 1000
|
numPodsToSchedule: 1000
|
||||||
- numNodes: 5000
|
- numNodes: 5000
|
||||||
numInitPods: 5000
|
numInitPods: [5000]
|
||||||
numPodsToSchedule: 1000
|
numPodsToSchedule: 1000
|
||||||
- template:
|
- template:
|
||||||
desc: SchedulingMigratedInTreePVs
|
desc: SchedulingMigratedInTreePVs
|
||||||
nodes:
|
nodes:
|
||||||
@ -66,9 +66,9 @@
|
|||||||
ebs.csi.aws.com:
|
ebs.csi.aws.com:
|
||||||
count: 39
|
count: 39
|
||||||
migratedPlugins:
|
migratedPlugins:
|
||||||
- "kubernetes.io/aws-ebs"
|
- "kubernetes.io/aws-ebs"
|
||||||
initPods:
|
initPods:
|
||||||
persistentVolumeTemplatePath: config/pv-aws.yaml
|
- persistentVolumeTemplatePath: config/pv-aws.yaml
|
||||||
persistentVolumeClaimTemplatePath: config/pvc.yaml
|
persistentVolumeClaimTemplatePath: config/pvc.yaml
|
||||||
podsToSchedule:
|
podsToSchedule:
|
||||||
persistentVolumeTemplatePath: config/pv-aws.yaml
|
persistentVolumeTemplatePath: config/pv-aws.yaml
|
||||||
@ -77,12 +77,12 @@
|
|||||||
CSIMigration: true
|
CSIMigration: true
|
||||||
CSIMigrationAWS: true
|
CSIMigrationAWS: true
|
||||||
params:
|
params:
|
||||||
- numNodes: 500
|
- numNodes: 500
|
||||||
numInitPods: 500
|
numInitPods: [500]
|
||||||
numPodsToSchedule: 1000
|
numPodsToSchedule: 1000
|
||||||
- numNodes: 5000
|
- numNodes: 5000
|
||||||
numInitPods: 5000
|
numInitPods: [5000]
|
||||||
numPodsToSchedule: 1000
|
numPodsToSchedule: 1000
|
||||||
- template:
|
- template:
|
||||||
desc: SchedulingCSIPVs
|
desc: SchedulingCSIPVs
|
||||||
nodes:
|
nodes:
|
||||||
@ -94,18 +94,18 @@
|
|||||||
ebs.csi.aws.com:
|
ebs.csi.aws.com:
|
||||||
count: 39
|
count: 39
|
||||||
initPods:
|
initPods:
|
||||||
persistentVolumeTemplatePath: config/pv-csi.yaml
|
- persistentVolumeTemplatePath: config/pv-csi.yaml
|
||||||
persistentVolumeClaimTemplatePath: config/pvc.yaml
|
persistentVolumeClaimTemplatePath: config/pvc.yaml
|
||||||
podsToSchedule:
|
podsToSchedule:
|
||||||
persistentVolumeTemplatePath: config/pv-csi.yaml
|
persistentVolumeTemplatePath: config/pv-csi.yaml
|
||||||
persistentVolumeClaimTemplatePath: config/pvc.yaml
|
persistentVolumeClaimTemplatePath: config/pvc.yaml
|
||||||
params:
|
params:
|
||||||
- numNodes: 500
|
- numNodes: 500
|
||||||
numInitPods: 500
|
numInitPods: [500]
|
||||||
numPodsToSchedule: 1000
|
numPodsToSchedule: 1000
|
||||||
- numNodes: 5000
|
- numNodes: 5000
|
||||||
numInitPods: 5000
|
numInitPods: [5000]
|
||||||
numPodsToSchedule: 1000
|
numPodsToSchedule: 1000
|
||||||
- template:
|
- template:
|
||||||
desc: SchedulingPodAffinity
|
desc: SchedulingPodAffinity
|
||||||
nodes:
|
nodes:
|
||||||
@ -114,48 +114,48 @@
|
|||||||
labelKey: "failure-domain.beta.kubernetes.io/zone"
|
labelKey: "failure-domain.beta.kubernetes.io/zone"
|
||||||
labelValue: "zone1"
|
labelValue: "zone1"
|
||||||
initPods:
|
initPods:
|
||||||
podTemplatePath: config/pod-with-pod-affinity.yaml
|
- podTemplatePath: config/pod-with-pod-affinity.yaml
|
||||||
podsToSchedule:
|
podsToSchedule:
|
||||||
podTemplatePath: config/pod-with-pod-affinity.yaml
|
podTemplatePath: config/pod-with-pod-affinity.yaml
|
||||||
params:
|
params:
|
||||||
- numNodes: 500
|
- numNodes: 500
|
||||||
numInitPods: 500
|
numInitPods: [500]
|
||||||
numPodsToSchedule: 1000
|
numPodsToSchedule: 1000
|
||||||
- numNodes: 5000
|
- numNodes: 5000
|
||||||
numInitPods: 5000
|
numInitPods: [5000]
|
||||||
numPodsToSchedule: 1000
|
numPodsToSchedule: 1000
|
||||||
- template:
|
- template:
|
||||||
desc: SchedulingPreferredPodAffinity
|
desc: SchedulingPreferredPodAffinity
|
||||||
nodes:
|
nodes:
|
||||||
uniqueNodeLabelStrategy:
|
uniqueNodeLabelStrategy:
|
||||||
labelKey: kubernetes.io/hostname
|
labelKey: kubernetes.io/hostname
|
||||||
initPods:
|
initPods:
|
||||||
podTemplatePath: config/pod-with-preferred-pod-affinity.yaml
|
- podTemplatePath: config/pod-with-preferred-pod-affinity.yaml
|
||||||
podsToSchedule:
|
podsToSchedule:
|
||||||
podTemplatePath: config/pod-with-preferred-pod-affinity.yaml
|
podTemplatePath: config/pod-with-preferred-pod-affinity.yaml
|
||||||
params:
|
params:
|
||||||
- numNodes: 500
|
- numNodes: 500
|
||||||
numInitPods: 500
|
numInitPods: [500]
|
||||||
numPodsToSchedule: 1000
|
numPodsToSchedule: 1000
|
||||||
- numNodes: 5000
|
- numNodes: 5000
|
||||||
numInitPods: 5000
|
numInitPods: [5000]
|
||||||
numPodsToSchedule: 1000
|
numPodsToSchedule: 1000
|
||||||
- template:
|
- template:
|
||||||
desc: SchedulingPreferredPodAntiAffinity
|
desc: SchedulingPreferredPodAntiAffinity
|
||||||
nodes:
|
nodes:
|
||||||
uniqueNodeLabelStrategy:
|
uniqueNodeLabelStrategy:
|
||||||
labelKey: kubernetes.io/hostname
|
labelKey: kubernetes.io/hostname
|
||||||
initPods:
|
initPods:
|
||||||
podTemplatePath: config/pod-with-preferred-pod-anti-affinity.yaml
|
- podTemplatePath: config/pod-with-preferred-pod-anti-affinity.yaml
|
||||||
podsToSchedule:
|
podsToSchedule:
|
||||||
podTemplatePath: config/pod-with-preferred-pod-anti-affinity.yaml
|
podTemplatePath: config/pod-with-preferred-pod-anti-affinity.yaml
|
||||||
params:
|
params:
|
||||||
- numNodes: 500
|
- numNodes: 500
|
||||||
numInitPods: 500
|
numInitPods: [500]
|
||||||
numPodsToSchedule: 1000
|
numPodsToSchedule: 1000
|
||||||
- numNodes: 5000
|
- numNodes: 5000
|
||||||
numInitPods: 5000
|
numInitPods: [5000]
|
||||||
numPodsToSchedule: 1000
|
numPodsToSchedule: 1000
|
||||||
- template:
|
- template:
|
||||||
desc: SchedulingNodeAffinity
|
desc: SchedulingNodeAffinity
|
||||||
nodes:
|
nodes:
|
||||||
@ -164,13 +164,30 @@
|
|||||||
labelKey: "failure-domain.beta.kubernetes.io/zone"
|
labelKey: "failure-domain.beta.kubernetes.io/zone"
|
||||||
labelValue: "zone1"
|
labelValue: "zone1"
|
||||||
initPods:
|
initPods:
|
||||||
podTemplatePath: config/pod-with-node-affinity.yaml
|
- podTemplatePath: config/pod-with-node-affinity.yaml
|
||||||
podsToSchedule:
|
podsToSchedule:
|
||||||
podTemplatePath: config/pod-with-node-affinity.yaml
|
podTemplatePath: config/pod-with-node-affinity.yaml
|
||||||
params:
|
params:
|
||||||
- numNodes: 500
|
- numNodes: 500
|
||||||
numInitPods: 500
|
numInitPods: [500]
|
||||||
numPodsToSchedule: 1000
|
numPodsToSchedule: 1000
|
||||||
- numNodes: 5000
|
- numNodes: 5000
|
||||||
numInitPods: 5000
|
numInitPods: [5000]
|
||||||
numPodsToSchedule: 1000
|
numPodsToSchedule: 1000
|
||||||
|
- template:
|
||||||
|
desc: MixedSchedulingBasePod
|
||||||
|
initPods:
|
||||||
|
- podTemplatePath: config/pod-default.yaml
|
||||||
|
- podTemplatePath: config/pod-with-pod-affinity.yaml
|
||||||
|
- podTemplatePath: config/pod-with-pod-anti-affinity.yaml
|
||||||
|
- podTemplatePath: config/pod-with-preferred-pod-affinity.yaml
|
||||||
|
- podTemplatePath: config/pod-with-preferred-pod-anti-affinity.yaml
|
||||||
|
podsToSchedule:
|
||||||
|
podTemplatePath: config/pod-default.yaml
|
||||||
|
params:
|
||||||
|
- numNodes: 500
|
||||||
|
numInitPods: [200, 200, 200, 200, 200]
|
||||||
|
numPodsToSchedule: 1000
|
||||||
|
- numNodes: 5000
|
||||||
|
numInitPods: [2000, 2000, 2000, 2000, 2000]
|
||||||
|
numPodsToSchedule: 1000
|
||||||
|
@ -3,14 +3,14 @@ kind: Pod
|
|||||||
metadata:
|
metadata:
|
||||||
generateName: affinity-pod-
|
generateName: affinity-pod-
|
||||||
labels:
|
labels:
|
||||||
foo: ""
|
color: blue
|
||||||
spec:
|
spec:
|
||||||
affinity:
|
affinity:
|
||||||
podAffinity:
|
podAffinity:
|
||||||
requiredDuringSchedulingIgnoredDuringExecution:
|
requiredDuringSchedulingIgnoredDuringExecution:
|
||||||
- labelSelector:
|
- labelSelector:
|
||||||
matchLabels:
|
matchLabels:
|
||||||
foo: ""
|
color: blue
|
||||||
topologyKey: failure-domain.beta.kubernetes.io/zone
|
topologyKey: failure-domain.beta.kubernetes.io/zone
|
||||||
namespaces: ["sched-test", "sched-setup"]
|
namespaces: ["sched-test", "sched-setup"]
|
||||||
containers:
|
containers:
|
||||||
|
@ -4,7 +4,6 @@ metadata:
|
|||||||
generateName: anti-affinity-pod-
|
generateName: anti-affinity-pod-
|
||||||
labels:
|
labels:
|
||||||
color: green
|
color: green
|
||||||
name: test
|
|
||||||
spec:
|
spec:
|
||||||
affinity:
|
affinity:
|
||||||
podAntiAffinity:
|
podAntiAffinity:
|
||||||
|
@ -3,7 +3,7 @@ kind: Pod
|
|||||||
metadata:
|
metadata:
|
||||||
generateName: preferred-affinity-pod-
|
generateName: preferred-affinity-pod-
|
||||||
labels:
|
labels:
|
||||||
foo: ""
|
color: red
|
||||||
spec:
|
spec:
|
||||||
affinity:
|
affinity:
|
||||||
podAffinity:
|
podAffinity:
|
||||||
@ -11,7 +11,7 @@ spec:
|
|||||||
- podAffinityTerm:
|
- podAffinityTerm:
|
||||||
labelSelector:
|
labelSelector:
|
||||||
matchLabels:
|
matchLabels:
|
||||||
foo: ""
|
color: red
|
||||||
topologyKey: kubernetes.io/hostname
|
topologyKey: kubernetes.io/hostname
|
||||||
namespaces: ["sched-test", "sched-setup"]
|
namespaces: ["sched-test", "sched-setup"]
|
||||||
weight: 1
|
weight: 1
|
||||||
|
@ -3,7 +3,7 @@ kind: Pod
|
|||||||
metadata:
|
metadata:
|
||||||
generateName: preferred-anti-affinity-pod-
|
generateName: preferred-anti-affinity-pod-
|
||||||
labels:
|
labels:
|
||||||
foo: ""
|
color: yellow
|
||||||
spec:
|
spec:
|
||||||
affinity:
|
affinity:
|
||||||
podAntiAffinity:
|
podAntiAffinity:
|
||||||
@ -11,7 +11,7 @@ spec:
|
|||||||
- podAffinityTerm:
|
- podAffinityTerm:
|
||||||
labelSelector:
|
labelSelector:
|
||||||
matchLabels:
|
matchLabels:
|
||||||
foo: ""
|
color: yellow
|
||||||
topologyKey: kubernetes.io/hostname
|
topologyKey: kubernetes.io/hostname
|
||||||
namespaces: ["sched-test", "sched-setup"]
|
namespaces: ["sched-test", "sched-setup"]
|
||||||
weight: 1
|
weight: 1
|
||||||
|
@ -65,7 +65,7 @@ type testCase struct {
|
|||||||
// configures nodes in the cluster
|
// configures nodes in the cluster
|
||||||
Nodes nodeCase
|
Nodes nodeCase
|
||||||
// configures pods in the cluster before running the tests
|
// configures pods in the cluster before running the tests
|
||||||
InitPods podCase
|
InitPods []podCase
|
||||||
// pods to be scheduled during the test.
|
// pods to be scheduled during the test.
|
||||||
PodsToSchedule podCase
|
PodsToSchedule podCase
|
||||||
// optional, feature gates to set before running the test
|
// optional, feature gates to set before running the test
|
||||||
@ -100,7 +100,7 @@ type simpleTestCases struct {
|
|||||||
|
|
||||||
type testParams struct {
|
type testParams struct {
|
||||||
NumNodes int
|
NumNodes int
|
||||||
NumInitPods int
|
NumInitPods []int
|
||||||
NumPodsToSchedule int
|
NumPodsToSchedule int
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -111,10 +111,17 @@ type testDataCollector interface {
|
|||||||
|
|
||||||
func BenchmarkPerfScheduling(b *testing.B) {
|
func BenchmarkPerfScheduling(b *testing.B) {
|
||||||
dataItems := DataItems{Version: "v1"}
|
dataItems := DataItems{Version: "v1"}
|
||||||
tests := getSimpleTestCases(configFile)
|
tests, err := parseTestCases(configFile)
|
||||||
|
if err != nil {
|
||||||
|
b.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
for _, test := range tests {
|
for _, test := range tests {
|
||||||
name := fmt.Sprintf("%v/%vNodes/%vInitPods/%vPodsToSchedule", test.Desc, test.Nodes.Num, test.InitPods.Num, test.PodsToSchedule.Num)
|
initPods := 0
|
||||||
|
for _, p := range test.InitPods {
|
||||||
|
initPods += p.Num
|
||||||
|
}
|
||||||
|
name := fmt.Sprintf("%v/%vNodes/%vInitPods/%vPodsToSchedule", test.Desc, test.Nodes.Num, initPods, test.PodsToSchedule.Num)
|
||||||
b.Run(name, func(b *testing.B) {
|
b.Run(name, func(b *testing.B) {
|
||||||
for feature, flag := range test.FeatureGates {
|
for feature, flag := range test.FeatureGates {
|
||||||
defer featuregatetesting.SetFeatureGateDuringTest(b, utilfeature.DefaultFeatureGate, feature, flag)()
|
defer featuregatetesting.SetFeatureGateDuringTest(b, utilfeature.DefaultFeatureGate, feature, flag)()
|
||||||
@ -131,14 +138,25 @@ func perfScheduling(test testCase, b *testing.B) []DataItem {
|
|||||||
finalFunc, podInformer, clientset := mustSetupScheduler()
|
finalFunc, podInformer, clientset := mustSetupScheduler()
|
||||||
defer finalFunc()
|
defer finalFunc()
|
||||||
|
|
||||||
nodePreparer := getNodePreparer(test.Nodes, clientset)
|
nodePreparer, err := getNodePreparer(test.Nodes, clientset)
|
||||||
|
if err != nil {
|
||||||
|
b.Fatal(err)
|
||||||
|
}
|
||||||
if err := nodePreparer.PrepareNodes(); err != nil {
|
if err := nodePreparer.PrepareNodes(); err != nil {
|
||||||
klog.Fatalf("%v", err)
|
b.Fatal(err)
|
||||||
}
|
}
|
||||||
defer nodePreparer.CleanupNodes()
|
defer nodePreparer.CleanupNodes()
|
||||||
|
|
||||||
createPods(setupNamespace, test.InitPods, clientset)
|
total := 0
|
||||||
waitNumPodsScheduled(test.InitPods.Num, podInformer)
|
for _, p := range test.InitPods {
|
||||||
|
if err := createPods(setupNamespace, p, clientset); err != nil {
|
||||||
|
b.Fatal(err)
|
||||||
|
}
|
||||||
|
total += p.Num
|
||||||
|
}
|
||||||
|
if err := waitNumPodsScheduled(b, total, podInformer); err != nil {
|
||||||
|
b.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
// start benchmark
|
// start benchmark
|
||||||
b.ResetTimer()
|
b.ResetTimer()
|
||||||
@ -151,8 +169,12 @@ func perfScheduling(test testCase, b *testing.B) []DataItem {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Schedule the main workload
|
// Schedule the main workload
|
||||||
createPods(testNamespace, test.PodsToSchedule, clientset)
|
if err := createPods(testNamespace, test.PodsToSchedule, clientset); err != nil {
|
||||||
waitNumPodsScheduled(test.InitPods.Num+test.PodsToSchedule.Num, podInformer)
|
b.Fatal(err)
|
||||||
|
}
|
||||||
|
if err := waitNumPodsScheduled(b, total+test.PodsToSchedule.Num, podInformer); err != nil {
|
||||||
|
b.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
close(stopCh)
|
close(stopCh)
|
||||||
// Note: without this line we're taking the overhead of defer() into account.
|
// Note: without this line we're taking the overhead of defer() into account.
|
||||||
@ -165,18 +187,19 @@ func perfScheduling(test testCase, b *testing.B) []DataItem {
|
|||||||
return dataItems
|
return dataItems
|
||||||
}
|
}
|
||||||
|
|
||||||
func waitNumPodsScheduled(num int, podInformer coreinformers.PodInformer) {
|
func waitNumPodsScheduled(b *testing.B, num int, podInformer coreinformers.PodInformer) error {
|
||||||
for {
|
for {
|
||||||
scheduled, err := getScheduledPods(podInformer)
|
scheduled, err := getScheduledPods(podInformer)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
klog.Fatalf("%v", err)
|
return err
|
||||||
}
|
}
|
||||||
if len(scheduled) >= num {
|
if len(scheduled) >= num {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
klog.Infof("got %d existing pods, required: %d", len(scheduled), num)
|
klog.Infof("%s: got %d existing pods, required: %d", b.Name(), len(scheduled), num)
|
||||||
time.Sleep(1 * time.Second)
|
time.Sleep(1 * time.Second)
|
||||||
}
|
}
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func getTestDataCollectors(tc testCase, podInformer coreinformers.PodInformer, b *testing.B) []testDataCollector {
|
func getTestDataCollectors(tc testCase, podInformer coreinformers.PodInformer, b *testing.B) []testDataCollector {
|
||||||
@ -189,7 +212,7 @@ func getTestDataCollectors(tc testCase, podInformer coreinformers.PodInformer, b
|
|||||||
return collectors
|
return collectors
|
||||||
}
|
}
|
||||||
|
|
||||||
func getNodePreparer(nc nodeCase, clientset clientset.Interface) testutils.TestNodePreparer {
|
func getNodePreparer(nc nodeCase, clientset clientset.Interface) (testutils.TestNodePreparer, error) {
|
||||||
var nodeStrategy testutils.PrepareNodeStrategy = &testutils.TrivialNodePrepareStrategy{}
|
var nodeStrategy testutils.PrepareNodeStrategy = &testutils.TrivialNodePrepareStrategy{}
|
||||||
if nc.NodeAllocatableStrategy != nil {
|
if nc.NodeAllocatableStrategy != nil {
|
||||||
nodeStrategy = nc.NodeAllocatableStrategy
|
nodeStrategy = nc.NodeAllocatableStrategy
|
||||||
@ -200,91 +223,119 @@ func getNodePreparer(nc nodeCase, clientset clientset.Interface) testutils.TestN
|
|||||||
}
|
}
|
||||||
|
|
||||||
if nc.NodeTemplatePath != nil {
|
if nc.NodeTemplatePath != nil {
|
||||||
|
node, err := getNodeSpecFromFile(nc.NodeTemplatePath)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
return framework.NewIntegrationTestNodePreparerWithNodeSpec(
|
return framework.NewIntegrationTestNodePreparerWithNodeSpec(
|
||||||
clientset,
|
clientset,
|
||||||
[]testutils.CountToStrategy{{Count: nc.Num, Strategy: nodeStrategy}},
|
[]testutils.CountToStrategy{{Count: nc.Num, Strategy: nodeStrategy}},
|
||||||
getNodeSpecFromFile(nc.NodeTemplatePath),
|
node,
|
||||||
)
|
), nil
|
||||||
}
|
}
|
||||||
return framework.NewIntegrationTestNodePreparer(
|
return framework.NewIntegrationTestNodePreparer(
|
||||||
clientset,
|
clientset,
|
||||||
[]testutils.CountToStrategy{{Count: nc.Num, Strategy: nodeStrategy}},
|
[]testutils.CountToStrategy{{Count: nc.Num, Strategy: nodeStrategy}},
|
||||||
"scheduler-perf-",
|
"scheduler-perf-",
|
||||||
)
|
), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func createPods(ns string, pc podCase, clientset clientset.Interface) {
|
func createPods(ns string, pc podCase, clientset clientset.Interface) error {
|
||||||
strategy := getPodStrategy(pc)
|
strategy, err := getPodStrategy(pc)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
config := testutils.NewTestPodCreatorConfig()
|
config := testutils.NewTestPodCreatorConfig()
|
||||||
config.AddStrategy(ns, pc.Num, strategy)
|
config.AddStrategy(ns, pc.Num, strategy)
|
||||||
podCreator := testutils.NewTestPodCreator(clientset, config)
|
podCreator := testutils.NewTestPodCreator(clientset, config)
|
||||||
podCreator.CreatePods()
|
return podCreator.CreatePods()
|
||||||
}
|
}
|
||||||
|
|
||||||
func getPodStrategy(pc podCase) testutils.TestPodCreateStrategy {
|
func getPodStrategy(pc podCase) (testutils.TestPodCreateStrategy, error) {
|
||||||
basePod := makeBasePod()
|
basePod := makeBasePod()
|
||||||
if pc.PodTemplatePath != nil {
|
if pc.PodTemplatePath != nil {
|
||||||
basePod = getPodSpecFromFile(pc.PodTemplatePath)
|
var err error
|
||||||
|
basePod, err = getPodSpecFromFile(pc.PodTemplatePath)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
}
|
}
|
||||||
if pc.PersistentVolumeClaimTemplatePath == nil {
|
if pc.PersistentVolumeClaimTemplatePath == nil {
|
||||||
return testutils.NewCustomCreatePodStrategy(basePod)
|
return testutils.NewCustomCreatePodStrategy(basePod), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
pvTemplate := getPersistentVolumeSpecFromFile(pc.PersistentVolumeTemplatePath)
|
pvTemplate, err := getPersistentVolumeSpecFromFile(pc.PersistentVolumeTemplatePath)
|
||||||
pvcTemplate := getPersistentVolumeClaimSpecFromFile(pc.PersistentVolumeClaimTemplatePath)
|
if err != nil {
|
||||||
return testutils.NewCreatePodWithPersistentVolumeStrategy(pvcTemplate, getCustomVolumeFactory(pvTemplate), basePod)
|
return nil, err
|
||||||
|
}
|
||||||
|
pvcTemplate, err := getPersistentVolumeClaimSpecFromFile(pc.PersistentVolumeClaimTemplatePath)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return testutils.NewCreatePodWithPersistentVolumeStrategy(pvcTemplate, getCustomVolumeFactory(pvTemplate), basePod), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func getSimpleTestCases(path string) []testCase {
|
func parseTestCases(path string) ([]testCase, error) {
|
||||||
var simpleTests []simpleTestCases
|
var simpleTests []simpleTestCases
|
||||||
getSpecFromFile(&path, &simpleTests)
|
if err := getSpecFromFile(&path, &simpleTests); err != nil {
|
||||||
|
return nil, fmt.Errorf("parsing test cases: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
testCases := make([]testCase, 0)
|
testCases := make([]testCase, 0)
|
||||||
for _, s := range simpleTests {
|
for _, s := range simpleTests {
|
||||||
testCase := s.Template
|
testCase := s.Template
|
||||||
for _, p := range s.Params {
|
for _, p := range s.Params {
|
||||||
testCase.Nodes.Num = p.NumNodes
|
testCase.Nodes.Num = p.NumNodes
|
||||||
testCase.InitPods.Num = p.NumInitPods
|
testCase.InitPods = append([]podCase(nil), testCase.InitPods...)
|
||||||
|
for i, v := range p.NumInitPods {
|
||||||
|
testCase.InitPods[i].Num = v
|
||||||
|
}
|
||||||
testCase.PodsToSchedule.Num = p.NumPodsToSchedule
|
testCase.PodsToSchedule.Num = p.NumPodsToSchedule
|
||||||
testCases = append(testCases, testCase)
|
testCases = append(testCases, testCase)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return testCases
|
return testCases, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func getNodeSpecFromFile(path *string) *v1.Node {
|
func getNodeSpecFromFile(path *string) (*v1.Node, error) {
|
||||||
nodeSpec := &v1.Node{}
|
nodeSpec := &v1.Node{}
|
||||||
getSpecFromFile(path, nodeSpec)
|
if err := getSpecFromFile(path, nodeSpec); err != nil {
|
||||||
return nodeSpec
|
return nil, fmt.Errorf("parsing Node: %v", err)
|
||||||
|
}
|
||||||
|
return nodeSpec, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func getPodSpecFromFile(path *string) *v1.Pod {
|
func getPodSpecFromFile(path *string) (*v1.Pod, error) {
|
||||||
podSpec := &v1.Pod{}
|
podSpec := &v1.Pod{}
|
||||||
getSpecFromFile(path, podSpec)
|
if err := getSpecFromFile(path, podSpec); err != nil {
|
||||||
return podSpec
|
return nil, fmt.Errorf("parsing Pod: %v", err)
|
||||||
|
}
|
||||||
|
return podSpec, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func getPersistentVolumeSpecFromFile(path *string) *v1.PersistentVolume {
|
func getPersistentVolumeSpecFromFile(path *string) (*v1.PersistentVolume, error) {
|
||||||
persistentVolumeSpec := &v1.PersistentVolume{}
|
persistentVolumeSpec := &v1.PersistentVolume{}
|
||||||
getSpecFromFile(path, persistentVolumeSpec)
|
if err := getSpecFromFile(path, persistentVolumeSpec); err != nil {
|
||||||
return persistentVolumeSpec
|
return nil, fmt.Errorf("parsing PersistentVolume: %v", err)
|
||||||
|
}
|
||||||
|
return persistentVolumeSpec, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func getPersistentVolumeClaimSpecFromFile(path *string) *v1.PersistentVolumeClaim {
|
func getPersistentVolumeClaimSpecFromFile(path *string) (*v1.PersistentVolumeClaim, error) {
|
||||||
persistentVolumeClaimSpec := &v1.PersistentVolumeClaim{}
|
persistentVolumeClaimSpec := &v1.PersistentVolumeClaim{}
|
||||||
getSpecFromFile(path, persistentVolumeClaimSpec)
|
if err := getSpecFromFile(path, persistentVolumeClaimSpec); err != nil {
|
||||||
return persistentVolumeClaimSpec
|
return nil, fmt.Errorf("parsing PersistentVolumeClaim: %v", err)
|
||||||
|
}
|
||||||
|
return persistentVolumeClaimSpec, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func getSpecFromFile(path *string, spec interface{}) {
|
func getSpecFromFile(path *string, spec interface{}) error {
|
||||||
bytes, err := ioutil.ReadFile(*path)
|
bytes, err := ioutil.ReadFile(*path)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
klog.Fatalf("%v", err)
|
return err
|
||||||
}
|
|
||||||
if err := yaml.Unmarshal(bytes, spec); err != nil {
|
|
||||||
klog.Fatalf("%v", err)
|
|
||||||
}
|
}
|
||||||
|
return yaml.Unmarshal(bytes, spec)
|
||||||
}
|
}
|
||||||
|
|
||||||
func getCustomVolumeFactory(pvTemplate *v1.PersistentVolume) func(id int) *v1.PersistentVolume {
|
func getCustomVolumeFactory(pvTemplate *v1.PersistentVolume) func(id int) *v1.PersistentVolume {
|
||||||
|
Loading…
Reference in New Issue
Block a user