mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-21 19:01:49 +00:00
Define workloads specs by YAML
This commit is contained in:
parent
30a5db136f
commit
6511b603c9
@ -36,6 +36,7 @@ type IntegrationTestNodePreparer struct {
|
||||
client clientset.Interface
|
||||
countToStrategy []testutils.CountToStrategy
|
||||
nodeNamePrefix string
|
||||
nodeSpec *v1.Node
|
||||
}
|
||||
|
||||
// NewIntegrationTestNodePreparer creates an IntegrationTestNodePreparer configured with defaults.
|
||||
@ -47,6 +48,15 @@ func NewIntegrationTestNodePreparer(client clientset.Interface, countToStrategy
|
||||
}
|
||||
}
|
||||
|
||||
// NewIntegrationTestNodePreparerWithNodeSpec creates an IntegrationTestNodePreparer configured with nodespec.
|
||||
func NewIntegrationTestNodePreparerWithNodeSpec(client clientset.Interface, countToStrategy []testutils.CountToStrategy, nodeSpec *v1.Node) testutils.TestNodePreparer {
|
||||
return &IntegrationTestNodePreparer{
|
||||
client: client,
|
||||
countToStrategy: countToStrategy,
|
||||
nodeSpec: nodeSpec,
|
||||
}
|
||||
}
|
||||
|
||||
// PrepareNodes prepares countToStrategy test nodes.
|
||||
func (p *IntegrationTestNodePreparer) PrepareNodes() error {
|
||||
numNodes := 0
|
||||
@ -71,6 +81,11 @@ func (p *IntegrationTestNodePreparer) PrepareNodes() error {
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
if p.nodeSpec != nil {
|
||||
baseNode = p.nodeSpec
|
||||
}
|
||||
|
||||
for i := 0; i < numNodes; i++ {
|
||||
var err error
|
||||
for retry := 0; retry < retries; retry++ {
|
||||
|
@ -30,6 +30,7 @@ go_test(
|
||||
srcs = [
|
||||
"main_test.go",
|
||||
"scheduler_bench_test.go",
|
||||
"scheduler_perf_test.go",
|
||||
"scheduler_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
@ -45,11 +46,13 @@ go_test(
|
||||
"//staging/src/k8s.io/client-go/informers/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/cache:go_default_library",
|
||||
"//staging/src/k8s.io/component-base/featuregate:go_default_library",
|
||||
"//staging/src/k8s.io/component-base/featuregate/testing:go_default_library",
|
||||
"//staging/src/k8s.io/csi-translation-lib/plugins:go_default_library",
|
||||
"//test/integration/framework:go_default_library",
|
||||
"//test/utils:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
"//vendor/sigs.k8s.io/yaml:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
14
test/integration/scheduler_perf/config/node-default.yaml
Normal file
14
test/integration/scheduler_perf/config/node-default.yaml
Normal file
@ -0,0 +1,14 @@
|
||||
apiVersion: v1
|
||||
kind: Node
|
||||
metadata:
|
||||
generateName: scheduler-perf-
|
||||
spec: {}
|
||||
status:
|
||||
capacity:
|
||||
pods: "110"
|
||||
cpu: "4"
|
||||
memory: 32Gi
|
||||
conditions:
|
||||
- status: "True"
|
||||
type: Ready
|
||||
phase: Running
|
176
test/integration/scheduler_perf/config/performance-config.yaml
Normal file
176
test/integration/scheduler_perf/config/performance-config.yaml
Normal file
@ -0,0 +1,176 @@
|
||||
- template:
|
||||
desc: SchedulingBasic
|
||||
initPods:
|
||||
podTemplatePath: config/pod-default.yaml
|
||||
podsToSchedule:
|
||||
podTemplatePath: config/pod-default.yaml
|
||||
params:
|
||||
- numNodes: 500
|
||||
numInitPods: 500
|
||||
numPodsToSchedule: 1000
|
||||
- numNodes: 5000
|
||||
numInitPods: 5000
|
||||
numPodsToSchedule: 1000
|
||||
- template:
|
||||
desc: SchedulingPodAntiAffinity
|
||||
nodes:
|
||||
uniqueNodeLabelStrategy:
|
||||
labelKey: kubernetes.io/hostname
|
||||
initPods:
|
||||
podTemplatePath: config/pod-with-pod-anti-affinity.yaml
|
||||
podsToSchedule:
|
||||
podTemplatePath: config/pod-with-pod-anti-affinity.yaml
|
||||
params:
|
||||
- numNodes: 500
|
||||
numInitPods: 100
|
||||
numPodsToSchedule: 400
|
||||
- numNodes: 5000
|
||||
numInitPods: 1000
|
||||
numPodsToSchedule: 1000
|
||||
- template:
|
||||
desc: SchedulingSecrets
|
||||
initPods:
|
||||
podTemplatePath: config/pod-with-secret-volume.yaml
|
||||
podsToSchedule:
|
||||
podTemplatePath: config/pod-with-secret-volume.yaml
|
||||
params:
|
||||
- numNodes: 500
|
||||
numInitPods: 500
|
||||
numPodsToSchedule: 1000
|
||||
- numNodes: 5000
|
||||
numInitPods: 5000
|
||||
numPodsToSchedule: 1000
|
||||
- template:
|
||||
desc: SchedulingInTreePVs
|
||||
initPods:
|
||||
persistentVolumeTemplatePath: config/pv-aws.yaml
|
||||
persistentVolumeClaimTemplatePath: config/pvc.yaml
|
||||
podsToSchedule:
|
||||
persistentVolumeTemplatePath: config/pv-aws.yaml
|
||||
persistentVolumeClaimTemplatePath: config/pvc.yaml
|
||||
params:
|
||||
- numNodes: 500
|
||||
numInitPods: 500
|
||||
numPodsToSchedule: 1000
|
||||
- numNodes: 5000
|
||||
numInitPods: 5000
|
||||
numPodsToSchedule: 1000
|
||||
- template:
|
||||
desc: SchedulingMigratedInTreePVs
|
||||
nodes:
|
||||
nodeTemplatePath: config/node-default.yaml
|
||||
nodeAllocatableStrategy:
|
||||
nodeAllocatable:
|
||||
attachable-volumes-csi-ebs.csi.aws.com: 39
|
||||
csiNodeAllocatable:
|
||||
ebs.csi.aws.com:
|
||||
count: 39
|
||||
migratedPlugins:
|
||||
- "kubernetes.io/aws-ebs"
|
||||
initPods:
|
||||
persistentVolumeTemplatePath: config/pv-aws.yaml
|
||||
persistentVolumeClaimTemplatePath: config/pvc.yaml
|
||||
podsToSchedule:
|
||||
persistentVolumeTemplatePath: config/pv-aws.yaml
|
||||
persistentVolumeClaimTemplatePath: config/pvc.yaml
|
||||
featureGates:
|
||||
CSIMigration: true
|
||||
CSIMigrationAWS: true
|
||||
params:
|
||||
- numNodes: 500
|
||||
numInitPods: 500
|
||||
numPodsToSchedule: 1000
|
||||
- numNodes: 5000
|
||||
numInitPods: 5000
|
||||
numPodsToSchedule: 1000
|
||||
- template:
|
||||
desc: SchedulingCSIPVs
|
||||
nodes:
|
||||
nodeTemplatePath: config/node-default.yaml
|
||||
nodeAllocatableStrategy:
|
||||
nodeAllocatable:
|
||||
attachable-volumes-csi-ebs.csi.aws.com: 39
|
||||
csiNodeAllocatable:
|
||||
ebs.csi.aws.com:
|
||||
count: 39
|
||||
initPods:
|
||||
persistentVolumeTemplatePath: config/pv-csi.yaml
|
||||
persistentVolumeClaimTemplatePath: config/pvc.yaml
|
||||
podsToSchedule:
|
||||
persistentVolumeTemplatePath: config/pv-csi.yaml
|
||||
persistentVolumeClaimTemplatePath: config/pvc.yaml
|
||||
params:
|
||||
- numNodes: 500
|
||||
numInitPods: 500
|
||||
numPodsToSchedule: 1000
|
||||
- numNodes: 5000
|
||||
numInitPods: 5000
|
||||
numPodsToSchedule: 1000
|
||||
- template:
|
||||
desc: SchedulingPodAffinity
|
||||
nodes:
|
||||
nodeTemplatePath: config/node-default.yaml
|
||||
labelNodeStrategy:
|
||||
labelKey: "failure-domain.beta.kubernetes.io/zone"
|
||||
labelValue: "zone1"
|
||||
initPods:
|
||||
podTemplatePath: config/pod-with-pod-affinity.yaml
|
||||
podsToSchedule:
|
||||
podTemplatePath: config/pod-with-pod-affinity.yaml
|
||||
params:
|
||||
- numNodes: 500
|
||||
numInitPods: 500
|
||||
numPodsToSchedule: 1000
|
||||
- numNodes: 5000
|
||||
numInitPods: 5000
|
||||
numPodsToSchedule: 1000
|
||||
- template:
|
||||
desc: SchedulingPreferredPodAffinity
|
||||
nodes:
|
||||
uniqueNodeLabelStrategy:
|
||||
labelKey: kubernetes.io/hostname
|
||||
initPods:
|
||||
podTemplatePath: config/pod-with-preferred-pod-affinity.yaml
|
||||
podsToSchedule:
|
||||
podTemplatePath: config/pod-with-preferred-pod-affinity.yaml
|
||||
params:
|
||||
- numNodes: 500
|
||||
numInitPods: 500
|
||||
numPodsToSchedule: 1000
|
||||
- numNodes: 5000
|
||||
numInitPods: 5000
|
||||
numPodsToSchedule: 1000
|
||||
- template:
|
||||
desc: SchedulingPreferredPodAntiAffinity
|
||||
nodes:
|
||||
uniqueNodeLabelStrategy:
|
||||
labelKey: kubernetes.io/hostname
|
||||
initPods:
|
||||
podTemplatePath: config/pod-with-preferred-pod-anti-affinity.yaml
|
||||
podsToSchedule:
|
||||
podTemplatePath: config/pod-with-preferred-pod-anti-affinity.yaml
|
||||
params:
|
||||
- numNodes: 500
|
||||
numInitPods: 500
|
||||
numPodsToSchedule: 1000
|
||||
- numNodes: 5000
|
||||
numInitPods: 5000
|
||||
numPodsToSchedule: 1000
|
||||
- template:
|
||||
desc: SchedulingNodeAffinity
|
||||
nodes:
|
||||
nodeTemplatePath: config/node-default.yaml
|
||||
labelNodePrepareStrategy:
|
||||
labelKey: "failure-domain.beta.kubernetes.io/zone"
|
||||
labelValue: "zone1"
|
||||
initPods:
|
||||
podTemplatePath: config/pod-with-node-affinity.yaml
|
||||
podsToSchedule:
|
||||
podTemplatePath: config/pod-with-node-affinity.yaml
|
||||
params:
|
||||
- numNodes: 500
|
||||
numInitPods: 500
|
||||
numPodsToSchedule: 1000
|
||||
- numNodes: 5000
|
||||
numInitPods: 5000
|
||||
numPodsToSchedule: 1000
|
17
test/integration/scheduler_perf/config/pod-default.yaml
Normal file
17
test/integration/scheduler_perf/config/pod-default.yaml
Normal file
@ -0,0 +1,17 @@
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
generateName: pod-
|
||||
spec:
|
||||
containers:
|
||||
- image: k8s.gcr.io/pause:3.1
|
||||
name: pause
|
||||
ports:
|
||||
- containerPort: 80
|
||||
resources:
|
||||
limits:
|
||||
cpu: 100m
|
||||
memory: 500Mi
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 500Mi
|
@ -0,0 +1,27 @@
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
generateName: node-affinity-
|
||||
spec:
|
||||
affinity:
|
||||
nodeAffinity:
|
||||
requiredDuringSchedulingIgnoredDuringExecution:
|
||||
nodeSelectorTerms:
|
||||
- matchExpressions:
|
||||
- key: failure-domain.beta.kubernetes.io/zone
|
||||
operator: In
|
||||
values:
|
||||
- zone1
|
||||
- zone2
|
||||
containers:
|
||||
- image: k8s.gcr.io/pause:3.1
|
||||
name: pause
|
||||
ports:
|
||||
- containerPort: 80
|
||||
resources:
|
||||
limits:
|
||||
cpu: 100m
|
||||
memory: 500Mi
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 500Mi
|
@ -0,0 +1,27 @@
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
generateName: affinity-pod-
|
||||
labels:
|
||||
foo: ""
|
||||
spec:
|
||||
affinity:
|
||||
podAffinity:
|
||||
requiredDuringSchedulingIgnoredDuringExecution:
|
||||
- labelSelector:
|
||||
matchLabels:
|
||||
foo: ""
|
||||
topologyKey: failure-domain.beta.kubernetes.io/zone
|
||||
namespaces: ["sched-test", "sched-setup"]
|
||||
containers:
|
||||
- image: k8s.gcr.io/pause:3.1
|
||||
name: pause
|
||||
ports:
|
||||
- containerPort: 80
|
||||
resources:
|
||||
limits:
|
||||
cpu: 100m
|
||||
memory: 500Mi
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 500Mi
|
@ -0,0 +1,28 @@
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
generateName: anti-affinity-pod-
|
||||
labels:
|
||||
color: green
|
||||
name: test
|
||||
spec:
|
||||
affinity:
|
||||
podAntiAffinity:
|
||||
requiredDuringSchedulingIgnoredDuringExecution:
|
||||
- labelSelector:
|
||||
matchLabels:
|
||||
color: green
|
||||
topologyKey: kubernetes.io/hostname
|
||||
namespaces: ["sched-test", "sched-setup"]
|
||||
containers:
|
||||
- image: k8s.gcr.io/pause:3.1
|
||||
name: pause
|
||||
ports:
|
||||
- containerPort: 80
|
||||
resources:
|
||||
limits:
|
||||
cpu: 100m
|
||||
memory: 500Mi
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 500Mi
|
@ -0,0 +1,29 @@
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
generateName: preferred-affinity-pod-
|
||||
labels:
|
||||
foo: ""
|
||||
spec:
|
||||
affinity:
|
||||
podAffinity:
|
||||
preferredDuringSchedulingIgnoredDuringExecution:
|
||||
- podAffinityTerm:
|
||||
labelSelector:
|
||||
matchLabels:
|
||||
foo: ""
|
||||
topologyKey: kubernetes.io/hostname
|
||||
namespaces: ["sched-test", "sched-setup"]
|
||||
weight: 1
|
||||
containers:
|
||||
- image: k8s.gcr.io/pause:3.1
|
||||
name: pause
|
||||
ports:
|
||||
- containerPort: 80
|
||||
resources:
|
||||
limits:
|
||||
cpu: 100m
|
||||
memory: 500Mi
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 500Mi
|
@ -0,0 +1,29 @@
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
generateName: preferred-anti-affinity-pod-
|
||||
labels:
|
||||
foo: ""
|
||||
spec:
|
||||
affinity:
|
||||
podAntiAffinity:
|
||||
preferredDuringSchedulingIgnoredDuringExecution:
|
||||
- podAffinityTerm:
|
||||
labelSelector:
|
||||
matchLabels:
|
||||
foo: ""
|
||||
topologyKey: kubernetes.io/hostname
|
||||
namespaces: ["sched-test", "sched-setup"]
|
||||
weight: 1
|
||||
containers:
|
||||
- image: k8s.gcr.io/pause:3.1
|
||||
name: pause
|
||||
ports:
|
||||
- containerPort: 80
|
||||
resources:
|
||||
limits:
|
||||
cpu: 100m
|
||||
memory: 500Mi
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 500Mi
|
@ -0,0 +1,21 @@
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
generateName: secret-volume-
|
||||
spec:
|
||||
containers:
|
||||
- image: k8s.gcr.io/pause:3.1
|
||||
name: pause
|
||||
ports:
|
||||
- containerPort: 80
|
||||
resources:
|
||||
limits:
|
||||
cpu: 100m
|
||||
memory: 500Mi
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 500Mi
|
||||
volumes:
|
||||
- name: secret
|
||||
secret:
|
||||
secretName: secret
|
10
test/integration/scheduler_perf/config/pv-aws.yaml
Normal file
10
test/integration/scheduler_perf/config/pv-aws.yaml
Normal file
@ -0,0 +1,10 @@
|
||||
apiVersion: v1
|
||||
kind: PersistentVolume
|
||||
spec:
|
||||
accessModes:
|
||||
- ReadOnlyMany
|
||||
awsElasticBlockStore:
|
||||
volumeID: <volume-id>
|
||||
capacity:
|
||||
storage: 1Gi
|
||||
persistentVolumeReclaimPolicy: Retain
|
10
test/integration/scheduler_perf/config/pv-csi.yaml
Normal file
10
test/integration/scheduler_perf/config/pv-csi.yaml
Normal file
@ -0,0 +1,10 @@
|
||||
apiVersion: v1
|
||||
kind: PersistentVolume
|
||||
spec:
|
||||
accessModes:
|
||||
- ReadOnlyMany
|
||||
capacity:
|
||||
storage: 1Gi
|
||||
csi:
|
||||
driver: ebs.csi.aws.com
|
||||
persistentVolumeReclaimPolicy: Retain
|
11
test/integration/scheduler_perf/config/pvc.yaml
Normal file
11
test/integration/scheduler_perf/config/pvc.yaml
Normal file
@ -0,0 +1,11 @@
|
||||
apiVersion: v1
|
||||
kind: PersistentVolumeClaim
|
||||
metadata:
|
||||
annotations:
|
||||
pv.kubernetes.io/bind-completed: "true"
|
||||
spec:
|
||||
accessModes:
|
||||
- ReadOnlyMany
|
||||
resources:
|
||||
requests:
|
||||
storage: 1Gi
|
273
test/integration/scheduler_perf/scheduler_perf_test.go
Normal file
273
test/integration/scheduler_perf/scheduler_perf_test.go
Normal file
@ -0,0 +1,273 @@
|
||||
/*
|
||||
Copyright 2019 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package benchmark
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/component-base/featuregate"
|
||||
featuregatetesting "k8s.io/component-base/featuregate/testing"
|
||||
"k8s.io/klog"
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
"sigs.k8s.io/yaml"
|
||||
)
|
||||
|
||||
const (
|
||||
configFile = "config/performance-config.yaml"
|
||||
)
|
||||
|
||||
// testCase configures a test case to run the scheduler performance test. Users should be able to
|
||||
// provide this via a YAML file.
|
||||
//
|
||||
// It specifies nodes and pods in the cluster before running the test. It also specifies the pods to
|
||||
// schedule during the test. The config can be as simple as just specify number of nodes/pods, where
|
||||
// default spec will be applied. It also allows the user to specify a pod spec template for more compicated
|
||||
// test cases.
|
||||
//
|
||||
// It also specifies the metrics to be collected after the test. If nothing is specified, default metrics
|
||||
// such as scheduling throughput and latencies will be collected.
|
||||
type testCase struct {
|
||||
// description of the test case
|
||||
Desc string
|
||||
// configures nodes in the cluster
|
||||
Nodes nodeCase
|
||||
// configures pods in the cluster before running the tests
|
||||
InitPods podCase
|
||||
// pods to be scheduled during the test.
|
||||
PodsToSchedule podCase
|
||||
// optional, feature gates to set before running the test
|
||||
FeatureGates map[featuregate.Feature]bool
|
||||
}
|
||||
|
||||
type nodeCase struct {
|
||||
Num int
|
||||
NodeTemplatePath *string
|
||||
// At most one of the following strategies can be defined. If not specified, default to TrivialNodePrepareStrategy.
|
||||
NodeAllocatableStrategy *testutils.NodeAllocatableStrategy
|
||||
LabelNodePrepareStrategy *testutils.LabelNodePrepareStrategy
|
||||
UniqueNodeLabelStrategy *testutils.UniqueNodeLabelStrategy
|
||||
}
|
||||
|
||||
type podCase struct {
|
||||
Num int
|
||||
PodTemplatePath *string
|
||||
PersistentVolumeTemplatePath *string
|
||||
PersistentVolumeClaimTemplatePath *string
|
||||
}
|
||||
|
||||
// simpleTestCases defines a set of test cases that share the same template (node spec, pod spec, etc)
|
||||
// with testParams(e.g., NumNodes) being overridden. This provides a convenient way to define multiple tests
|
||||
// with various sizes.
|
||||
type simpleTestCases struct {
|
||||
Template testCase
|
||||
Params []testParams
|
||||
}
|
||||
|
||||
type testParams struct {
|
||||
NumNodes int
|
||||
NumInitPods int
|
||||
NumPodsToSchedule int
|
||||
}
|
||||
|
||||
func BenchmarkPerfScheduling(b *testing.B) {
|
||||
tests := getSimpleTestCases(configFile)
|
||||
|
||||
for _, test := range tests {
|
||||
name := fmt.Sprintf("%v/%vNodes/%vInitPods/%vPodsToSchedule", test.Desc, test.Nodes.Num, test.InitPods.Num, test.PodsToSchedule.Num)
|
||||
b.Run(name, func(b *testing.B) {
|
||||
for feature, flag := range test.FeatureGates {
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(b, utilfeature.DefaultFeatureGate, feature, flag)()
|
||||
}
|
||||
perfScheduling(test, b)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func perfScheduling(test testCase, b *testing.B) {
|
||||
var nodeStrategy testutils.PrepareNodeStrategy = &testutils.TrivialNodePrepareStrategy{}
|
||||
if test.Nodes.NodeAllocatableStrategy != nil {
|
||||
nodeStrategy = test.Nodes.NodeAllocatableStrategy
|
||||
} else if test.Nodes.LabelNodePrepareStrategy != nil {
|
||||
nodeStrategy = test.Nodes.LabelNodePrepareStrategy
|
||||
} else if test.Nodes.UniqueNodeLabelStrategy != nil {
|
||||
nodeStrategy = test.Nodes.UniqueNodeLabelStrategy
|
||||
}
|
||||
|
||||
setupPodStrategy := getPodStrategy(test.InitPods)
|
||||
testPodStrategy := getPodStrategy(test.PodsToSchedule)
|
||||
|
||||
var nodeSpec *v1.Node
|
||||
if test.Nodes.NodeTemplatePath != nil {
|
||||
nodeSpec = getNodeSpecFromFile(test.Nodes.NodeTemplatePath)
|
||||
}
|
||||
|
||||
finalFunc, podInformer, clientset := mustSetupScheduler()
|
||||
defer finalFunc()
|
||||
|
||||
var nodePreparer testutils.TestNodePreparer
|
||||
if nodeSpec != nil {
|
||||
nodePreparer = framework.NewIntegrationTestNodePreparerWithNodeSpec(
|
||||
clientset,
|
||||
[]testutils.CountToStrategy{{Count: test.Nodes.Num, Strategy: nodeStrategy}},
|
||||
nodeSpec,
|
||||
)
|
||||
} else {
|
||||
nodePreparer = framework.NewIntegrationTestNodePreparer(
|
||||
clientset,
|
||||
[]testutils.CountToStrategy{{Count: test.Nodes.Num, Strategy: nodeStrategy}},
|
||||
"scheduler-perf-",
|
||||
)
|
||||
}
|
||||
|
||||
if err := nodePreparer.PrepareNodes(); err != nil {
|
||||
klog.Fatalf("%v", err)
|
||||
}
|
||||
defer nodePreparer.CleanupNodes()
|
||||
|
||||
config := testutils.NewTestPodCreatorConfig()
|
||||
config.AddStrategy(setupNamespace, test.InitPods.Num, setupPodStrategy)
|
||||
podCreator := testutils.NewTestPodCreator(clientset, config)
|
||||
podCreator.CreatePods()
|
||||
|
||||
for {
|
||||
scheduled, err := getScheduledPods(podInformer)
|
||||
if err != nil {
|
||||
klog.Fatalf("%v", err)
|
||||
}
|
||||
if len(scheduled) >= test.InitPods.Num {
|
||||
break
|
||||
}
|
||||
klog.Infof("got %d existing pods, required: %d", len(scheduled), test.InitPods.Num)
|
||||
time.Sleep(1 * time.Second)
|
||||
}
|
||||
|
||||
scheduled := int32(0)
|
||||
completedCh := make(chan struct{})
|
||||
podInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||
UpdateFunc: func(old, cur interface{}) {
|
||||
curPod := cur.(*v1.Pod)
|
||||
oldPod := old.(*v1.Pod)
|
||||
|
||||
if len(oldPod.Spec.NodeName) == 0 && len(curPod.Spec.NodeName) > 0 {
|
||||
if atomic.AddInt32(&scheduled, 1) >= int32(test.PodsToSchedule.Num) {
|
||||
completedCh <- struct{}{}
|
||||
}
|
||||
}
|
||||
},
|
||||
})
|
||||
|
||||
// start benchmark
|
||||
b.ResetTimer()
|
||||
config = testutils.NewTestPodCreatorConfig()
|
||||
config.AddStrategy(testNamespace, test.PodsToSchedule.Num, testPodStrategy)
|
||||
podCreator = testutils.NewTestPodCreator(clientset, config)
|
||||
podCreator.CreatePods()
|
||||
|
||||
<-completedCh
|
||||
|
||||
// Note: without this line we're taking the overhead of defer() into account.
|
||||
b.StopTimer()
|
||||
}
|
||||
|
||||
func getPodStrategy(pc podCase) testutils.TestPodCreateStrategy {
|
||||
basePod := makeBasePod()
|
||||
if pc.PodTemplatePath != nil {
|
||||
basePod = getPodSpecFromFile(pc.PodTemplatePath)
|
||||
}
|
||||
if pc.PersistentVolumeClaimTemplatePath == nil {
|
||||
return testutils.NewCustomCreatePodStrategy(basePod)
|
||||
}
|
||||
|
||||
pvTemplate := getPersistentVolumeSpecFromFile(pc.PersistentVolumeTemplatePath)
|
||||
pvcTemplate := getPersistentVolumeClaimSpecFromFile(pc.PersistentVolumeClaimTemplatePath)
|
||||
return testutils.NewCreatePodWithPersistentVolumeStrategy(pvcTemplate, getCustomVolumeFactory(pvTemplate), basePod)
|
||||
}
|
||||
|
||||
func getSimpleTestCases(path string) []testCase {
|
||||
var simpleTests []simpleTestCases
|
||||
getSpecFromFile(&path, &simpleTests)
|
||||
|
||||
testCases := make([]testCase, 0)
|
||||
for _, s := range simpleTests {
|
||||
testCase := s.Template
|
||||
for _, p := range s.Params {
|
||||
testCase.Nodes.Num = p.NumNodes
|
||||
testCase.InitPods.Num = p.NumInitPods
|
||||
testCase.PodsToSchedule.Num = p.NumPodsToSchedule
|
||||
testCases = append(testCases, testCase)
|
||||
}
|
||||
}
|
||||
|
||||
return testCases
|
||||
}
|
||||
|
||||
func getNodeSpecFromFile(path *string) *v1.Node {
|
||||
nodeSpec := &v1.Node{}
|
||||
getSpecFromFile(path, nodeSpec)
|
||||
return nodeSpec
|
||||
}
|
||||
|
||||
func getPodSpecFromFile(path *string) *v1.Pod {
|
||||
podSpec := &v1.Pod{}
|
||||
getSpecFromFile(path, podSpec)
|
||||
return podSpec
|
||||
}
|
||||
|
||||
func getPersistentVolumeSpecFromFile(path *string) *v1.PersistentVolume {
|
||||
persistentVolumeSpec := &v1.PersistentVolume{}
|
||||
getSpecFromFile(path, persistentVolumeSpec)
|
||||
return persistentVolumeSpec
|
||||
}
|
||||
|
||||
func getPersistentVolumeClaimSpecFromFile(path *string) *v1.PersistentVolumeClaim {
|
||||
persistentVolumeClaimSpec := &v1.PersistentVolumeClaim{}
|
||||
getSpecFromFile(path, persistentVolumeClaimSpec)
|
||||
return persistentVolumeClaimSpec
|
||||
}
|
||||
|
||||
func getSpecFromFile(path *string, spec interface{}) {
|
||||
bytes, err := ioutil.ReadFile(*path)
|
||||
if err != nil {
|
||||
klog.Fatalf("%v", err)
|
||||
}
|
||||
if err := yaml.Unmarshal(bytes, spec); err != nil {
|
||||
klog.Fatalf("%v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func getCustomVolumeFactory(pvTemplate *v1.PersistentVolume) func(id int) *v1.PersistentVolume {
|
||||
return func(id int) *v1.PersistentVolume {
|
||||
pv := pvTemplate.DeepCopy()
|
||||
volumeID := fmt.Sprintf("vol-%d", id)
|
||||
pv.ObjectMeta.Name = volumeID
|
||||
pvs := pv.Spec.PersistentVolumeSource
|
||||
if pvs.CSI != nil {
|
||||
pvs.CSI.VolumeHandle = volumeID
|
||||
} else if pvs.AWSElasticBlockStore != nil {
|
||||
pvs.AWSElasticBlockStore.VolumeID = volumeID
|
||||
}
|
||||
return pv
|
||||
}
|
||||
}
|
@ -979,29 +979,29 @@ func (*TrivialNodePrepareStrategy) CleanupDependentObjects(nodeName string, clie
|
||||
}
|
||||
|
||||
type LabelNodePrepareStrategy struct {
|
||||
labelKey string
|
||||
labelValue string
|
||||
LabelKey string
|
||||
LabelValue string
|
||||
}
|
||||
|
||||
var _ PrepareNodeStrategy = &LabelNodePrepareStrategy{}
|
||||
|
||||
func NewLabelNodePrepareStrategy(labelKey string, labelValue string) *LabelNodePrepareStrategy {
|
||||
return &LabelNodePrepareStrategy{
|
||||
labelKey: labelKey,
|
||||
labelValue: labelValue,
|
||||
LabelKey: labelKey,
|
||||
LabelValue: labelValue,
|
||||
}
|
||||
}
|
||||
|
||||
func (s *LabelNodePrepareStrategy) PreparePatch(*v1.Node) []byte {
|
||||
labelString := fmt.Sprintf("{\"%v\":\"%v\"}", s.labelKey, s.labelValue)
|
||||
labelString := fmt.Sprintf("{\"%v\":\"%v\"}", s.LabelKey, s.LabelValue)
|
||||
patch := fmt.Sprintf(`{"metadata":{"labels":%v}}`, labelString)
|
||||
return []byte(patch)
|
||||
}
|
||||
|
||||
func (s *LabelNodePrepareStrategy) CleanupNode(node *v1.Node) *v1.Node {
|
||||
nodeCopy := node.DeepCopy()
|
||||
if node.Labels != nil && len(node.Labels[s.labelKey]) != 0 {
|
||||
delete(nodeCopy.Labels, s.labelKey)
|
||||
if node.Labels != nil && len(node.Labels[s.LabelKey]) != 0 {
|
||||
delete(nodeCopy.Labels, s.LabelKey)
|
||||
}
|
||||
return nodeCopy
|
||||
}
|
||||
@ -1019,22 +1019,26 @@ func (*LabelNodePrepareStrategy) CleanupDependentObjects(nodeName string, client
|
||||
// set to nil.
|
||||
type NodeAllocatableStrategy struct {
|
||||
// Node.status.allocatable to fill to all nodes.
|
||||
nodeAllocatable map[v1.ResourceName]string
|
||||
NodeAllocatable map[v1.ResourceName]string
|
||||
// Map <driver_name> -> VolumeNodeResources to fill into csiNode.spec.drivers[<driver_name>].
|
||||
csiNodeAllocatable map[string]*storagev1beta1.VolumeNodeResources
|
||||
CsiNodeAllocatable map[string]*storagev1beta1.VolumeNodeResources
|
||||
// List of in-tree volume plugins migrated to CSI.
|
||||
migratedPlugins []string
|
||||
MigratedPlugins []string
|
||||
}
|
||||
|
||||
var _ PrepareNodeStrategy = &NodeAllocatableStrategy{}
|
||||
|
||||
func NewNodeAllocatableStrategy(nodeAllocatable map[v1.ResourceName]string, csiNodeAllocatable map[string]*storagev1beta1.VolumeNodeResources, migratedPlugins []string) *NodeAllocatableStrategy {
|
||||
return &NodeAllocatableStrategy{nodeAllocatable, csiNodeAllocatable, migratedPlugins}
|
||||
return &NodeAllocatableStrategy{
|
||||
NodeAllocatable: nodeAllocatable,
|
||||
CsiNodeAllocatable: csiNodeAllocatable,
|
||||
MigratedPlugins: migratedPlugins,
|
||||
}
|
||||
}
|
||||
|
||||
func (s *NodeAllocatableStrategy) PreparePatch(node *v1.Node) []byte {
|
||||
newNode := node.DeepCopy()
|
||||
for name, value := range s.nodeAllocatable {
|
||||
for name, value := range s.NodeAllocatable {
|
||||
newNode.Status.Allocatable[name] = resource.MustParse(value)
|
||||
}
|
||||
|
||||
@ -1056,7 +1060,7 @@ func (s *NodeAllocatableStrategy) PreparePatch(node *v1.Node) []byte {
|
||||
|
||||
func (s *NodeAllocatableStrategy) CleanupNode(node *v1.Node) *v1.Node {
|
||||
nodeCopy := node.DeepCopy()
|
||||
for name := range s.nodeAllocatable {
|
||||
for name := range s.NodeAllocatable {
|
||||
delete(nodeCopy.Status.Allocatable, name)
|
||||
}
|
||||
return nodeCopy
|
||||
@ -1067,7 +1071,7 @@ func (s *NodeAllocatableStrategy) createCSINode(nodeName string, client clientse
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: nodeName,
|
||||
Annotations: map[string]string{
|
||||
v1.MigratedPluginsAnnotationKey: strings.Join(s.migratedPlugins, ","),
|
||||
v1.MigratedPluginsAnnotationKey: strings.Join(s.MigratedPlugins, ","),
|
||||
},
|
||||
},
|
||||
Spec: storagev1beta1.CSINodeSpec{
|
||||
@ -1075,7 +1079,7 @@ func (s *NodeAllocatableStrategy) createCSINode(nodeName string, client clientse
|
||||
},
|
||||
}
|
||||
|
||||
for driver, allocatable := range s.csiNodeAllocatable {
|
||||
for driver, allocatable := range s.CsiNodeAllocatable {
|
||||
d := storagev1beta1.CSINodeDriver{
|
||||
Name: driver,
|
||||
Allocatable: allocatable,
|
||||
@ -1094,7 +1098,7 @@ func (s *NodeAllocatableStrategy) createCSINode(nodeName string, client clientse
|
||||
}
|
||||
|
||||
func (s *NodeAllocatableStrategy) updateCSINode(csiNode *storagev1beta1.CSINode, client clientset.Interface) error {
|
||||
for driverName, allocatable := range s.csiNodeAllocatable {
|
||||
for driverName, allocatable := range s.CsiNodeAllocatable {
|
||||
found := false
|
||||
for i, driver := range csiNode.Spec.Drivers {
|
||||
if driver.Name == driverName {
|
||||
@ -1112,7 +1116,7 @@ func (s *NodeAllocatableStrategy) updateCSINode(csiNode *storagev1beta1.CSINode,
|
||||
csiNode.Spec.Drivers = append(csiNode.Spec.Drivers, d)
|
||||
}
|
||||
}
|
||||
csiNode.Annotations[v1.MigratedPluginsAnnotationKey] = strings.Join(s.migratedPlugins, ",")
|
||||
csiNode.Annotations[v1.MigratedPluginsAnnotationKey] = strings.Join(s.MigratedPlugins, ",")
|
||||
|
||||
_, err := client.StorageV1beta1().CSINodes().Update(csiNode)
|
||||
return err
|
||||
@ -1138,7 +1142,7 @@ func (s *NodeAllocatableStrategy) CleanupDependentObjects(nodeName string, clien
|
||||
return err
|
||||
}
|
||||
|
||||
for driverName := range s.csiNodeAllocatable {
|
||||
for driverName := range s.CsiNodeAllocatable {
|
||||
for i, driver := range csiNode.Spec.Drivers {
|
||||
if driver.Name == driverName {
|
||||
csiNode.Spec.Drivers[i].Allocatable = nil
|
||||
@ -1148,6 +1152,41 @@ func (s *NodeAllocatableStrategy) CleanupDependentObjects(nodeName string, clien
|
||||
return s.updateCSINode(csiNode, client)
|
||||
}
|
||||
|
||||
// UniqueNodeLabelStrategy sets a unique label for each node.
|
||||
type UniqueNodeLabelStrategy struct {
|
||||
LabelKey string
|
||||
}
|
||||
|
||||
var _ PrepareNodeStrategy = &UniqueNodeLabelStrategy{}
|
||||
|
||||
func NewUniqueNodeLabelStrategy(labelKey string) *UniqueNodeLabelStrategy {
|
||||
return &UniqueNodeLabelStrategy{
|
||||
LabelKey: labelKey,
|
||||
}
|
||||
}
|
||||
|
||||
func (s *UniqueNodeLabelStrategy) PreparePatch(*v1.Node) []byte {
|
||||
labelString := fmt.Sprintf("{\"%v\":\"%v\"}", s.LabelKey, string(uuid.NewUUID()))
|
||||
patch := fmt.Sprintf(`{"metadata":{"labels":%v}}`, labelString)
|
||||
return []byte(patch)
|
||||
}
|
||||
|
||||
func (s *UniqueNodeLabelStrategy) CleanupNode(node *v1.Node) *v1.Node {
|
||||
nodeCopy := node.DeepCopy()
|
||||
if node.Labels != nil && len(node.Labels[s.LabelKey]) != 0 {
|
||||
delete(nodeCopy.Labels, s.LabelKey)
|
||||
}
|
||||
return nodeCopy
|
||||
}
|
||||
|
||||
func (*UniqueNodeLabelStrategy) PrepareDependentObjects(node *v1.Node, client clientset.Interface) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (*UniqueNodeLabelStrategy) CleanupDependentObjects(nodeName string, client clientset.Interface) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func DoPrepareNode(client clientset.Interface, node *v1.Node, strategy PrepareNodeStrategy) error {
|
||||
var err error
|
||||
patch := strategy.PreparePatch(node)
|
||||
|
Loading…
Reference in New Issue
Block a user