mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-24 20:24:09 +00:00
Merge pull request #89380 from alculquicondor/perf-spreading
Add perf test case for Topology Spreading
This commit is contained in:
commit
d73907fc25
@ -110,9 +110,9 @@
|
|||||||
desc: SchedulingPodAffinity
|
desc: SchedulingPodAffinity
|
||||||
nodes:
|
nodes:
|
||||||
nodeTemplatePath: config/node-default.yaml
|
nodeTemplatePath: config/node-default.yaml
|
||||||
labelNodeStrategy:
|
labelNodePrepareStrategy:
|
||||||
labelKey: "failure-domain.beta.kubernetes.io/zone"
|
labelKey: "failure-domain.beta.kubernetes.io/zone"
|
||||||
labelValue: "zone1"
|
labelValues: ["zone1"]
|
||||||
initPods:
|
initPods:
|
||||||
- podTemplatePath: config/pod-with-pod-affinity.yaml
|
- podTemplatePath: config/pod-with-pod-affinity.yaml
|
||||||
podsToSchedule:
|
podsToSchedule:
|
||||||
@ -162,7 +162,7 @@
|
|||||||
nodeTemplatePath: config/node-default.yaml
|
nodeTemplatePath: config/node-default.yaml
|
||||||
labelNodePrepareStrategy:
|
labelNodePrepareStrategy:
|
||||||
labelKey: "failure-domain.beta.kubernetes.io/zone"
|
labelKey: "failure-domain.beta.kubernetes.io/zone"
|
||||||
labelValue: "zone1"
|
labelValues: ["zone1"]
|
||||||
initPods:
|
initPods:
|
||||||
- podTemplatePath: config/pod-with-node-affinity.yaml
|
- podTemplatePath: config/pod-with-node-affinity.yaml
|
||||||
podsToSchedule:
|
podsToSchedule:
|
||||||
@ -174,6 +174,42 @@
|
|||||||
- numNodes: 5000
|
- numNodes: 5000
|
||||||
numInitPods: [5000]
|
numInitPods: [5000]
|
||||||
numPodsToSchedule: 1000
|
numPodsToSchedule: 1000
|
||||||
|
- template:
|
||||||
|
desc: TopologySpreading
|
||||||
|
nodes:
|
||||||
|
nodeTemplatePath: config/node-default.yaml
|
||||||
|
labelNodePrepareStrategy:
|
||||||
|
labelKey: "topology.kubernetes.io/zone"
|
||||||
|
labelValues: ["moon-1", "moon-2", "moon-3"]
|
||||||
|
initPods:
|
||||||
|
- podTemplatePath: config/pod-default.yaml
|
||||||
|
podsToSchedule:
|
||||||
|
podTemplatePath: config/pod-with-topology-spreading.yaml
|
||||||
|
params:
|
||||||
|
- numNodes: 500
|
||||||
|
numInitPods: [1000]
|
||||||
|
numPodsToSchedule: 1000
|
||||||
|
- numNodes: 5000
|
||||||
|
numInitPods: [5000]
|
||||||
|
numPodsToSchedule: 2000
|
||||||
|
- template:
|
||||||
|
desc: PreferredTopologySpreading
|
||||||
|
nodes:
|
||||||
|
nodeTemplatePath: config/node-default.yaml
|
||||||
|
labelNodePrepareStrategy:
|
||||||
|
labelKey: "topology.kubernetes.io/zone"
|
||||||
|
labelValues: ["moon-1", "moon-2", "moon-3"]
|
||||||
|
initPods:
|
||||||
|
- podTemplatePath: config/pod-default.yaml
|
||||||
|
podsToSchedule:
|
||||||
|
podTemplatePath: config/pod-with-preferred-topology-spreading.yaml
|
||||||
|
params:
|
||||||
|
- numNodes: 500
|
||||||
|
numInitPods: [1000]
|
||||||
|
numPodsToSchedule: 1000
|
||||||
|
- numNodes: 5000
|
||||||
|
numInitPods: [5000]
|
||||||
|
numPodsToSchedule: 2000
|
||||||
- template:
|
- template:
|
||||||
desc: MixedSchedulingBasePod
|
desc: MixedSchedulingBasePod
|
||||||
initPods:
|
initPods:
|
||||||
|
@ -0,0 +1,21 @@
|
|||||||
|
apiVersion: v1
|
||||||
|
kind: Pod
|
||||||
|
metadata:
|
||||||
|
generateName: spreading-pod-
|
||||||
|
spec:
|
||||||
|
topologySpreadConstraints:
|
||||||
|
- maxSkew: 5
|
||||||
|
topologyKey: topology.kubernetes.io/zone
|
||||||
|
whenUnsatisfiable: ScheduleAnyway
|
||||||
|
containers:
|
||||||
|
- image: k8s.gcr.io/pause:3.2
|
||||||
|
name: pause
|
||||||
|
ports:
|
||||||
|
- containerPort: 80
|
||||||
|
resources:
|
||||||
|
limits:
|
||||||
|
cpu: 100m
|
||||||
|
memory: 500Mi
|
||||||
|
requests:
|
||||||
|
cpu: 100m
|
||||||
|
memory: 500Mi
|
@ -0,0 +1,21 @@
|
|||||||
|
apiVersion: v1
|
||||||
|
kind: Pod
|
||||||
|
metadata:
|
||||||
|
generateName: spreading-pod-
|
||||||
|
spec:
|
||||||
|
topologySpreadConstraints:
|
||||||
|
- maxSkew: 5
|
||||||
|
topologyKey: topology.kubernetes.io/zone
|
||||||
|
whenUnsatisfiable: DoNotSchedule
|
||||||
|
containers:
|
||||||
|
- image: k8s.gcr.io/pause:3.2
|
||||||
|
name: pause
|
||||||
|
ports:
|
||||||
|
- containerPort: 80
|
||||||
|
resources:
|
||||||
|
limits:
|
||||||
|
cpu: 100m
|
||||||
|
memory: 500Mi
|
||||||
|
requests:
|
||||||
|
cpu: 100m
|
||||||
|
memory: 500Mi
|
@ -980,22 +980,27 @@ func (*TrivialNodePrepareStrategy) CleanupDependentObjects(nodeName string, clie
|
|||||||
}
|
}
|
||||||
|
|
||||||
type LabelNodePrepareStrategy struct {
|
type LabelNodePrepareStrategy struct {
|
||||||
LabelKey string
|
LabelKey string
|
||||||
LabelValue string
|
LabelValues []string
|
||||||
|
roundRobinIdx int
|
||||||
}
|
}
|
||||||
|
|
||||||
var _ PrepareNodeStrategy = &LabelNodePrepareStrategy{}
|
var _ PrepareNodeStrategy = &LabelNodePrepareStrategy{}
|
||||||
|
|
||||||
func NewLabelNodePrepareStrategy(labelKey string, labelValue string) *LabelNodePrepareStrategy {
|
func NewLabelNodePrepareStrategy(labelKey string, labelValues ...string) *LabelNodePrepareStrategy {
|
||||||
return &LabelNodePrepareStrategy{
|
return &LabelNodePrepareStrategy{
|
||||||
LabelKey: labelKey,
|
LabelKey: labelKey,
|
||||||
LabelValue: labelValue,
|
LabelValues: labelValues,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *LabelNodePrepareStrategy) PreparePatch(*v1.Node) []byte {
|
func (s *LabelNodePrepareStrategy) PreparePatch(*v1.Node) []byte {
|
||||||
labelString := fmt.Sprintf("{\"%v\":\"%v\"}", s.LabelKey, s.LabelValue)
|
labelString := fmt.Sprintf("{\"%v\":\"%v\"}", s.LabelKey, s.LabelValues[s.roundRobinIdx])
|
||||||
patch := fmt.Sprintf(`{"metadata":{"labels":%v}}`, labelString)
|
patch := fmt.Sprintf(`{"metadata":{"labels":%v}}`, labelString)
|
||||||
|
s.roundRobinIdx++
|
||||||
|
if s.roundRobinIdx == len(s.LabelValues) {
|
||||||
|
s.roundRobinIdx = 0
|
||||||
|
}
|
||||||
return []byte(patch)
|
return []byte(patch)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user