mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-22 11:21:47 +00:00
Merge pull request #46117 from ravisantoshgudimetla/scheduler_perf_arch
Automatic merge from submit-queue (batch tested with PRs 49316, 46117, 49064, 48073, 49323) Modular extensions for kube scheduler perf testing framework **What this PR does / why we need it**: **Which issue this PR fixes** *(optional, in `fixes #<issue number>(, fixes #<issue_number>, ...)` format, will close that issue when PR gets merged)*: fixes #45973 **Special notes for your reviewer**: It is not same as the existing one, the previous one has a single nodeaffinity key with multiple values. This one has multiple keys, values. **Release note**: ``` NONE ```
This commit is contained in:
commit
fb5fbc944a
@ -53,6 +53,7 @@ go_test(
|
|||||||
"//test/utils:go_default_library",
|
"//test/utils:go_default_library",
|
||||||
"//vendor/github.com/golang/glog:go_default_library",
|
"//vendor/github.com/golang/glog:go_default_library",
|
||||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||||
|
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||||
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
|
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||||
],
|
],
|
||||||
|
@ -18,11 +18,14 @@ package benchmark
|
|||||||
|
|
||||||
// High Level Configuration for all predicates and priorities.
|
// High Level Configuration for all predicates and priorities.
|
||||||
type schedulerPerfConfig struct {
|
type schedulerPerfConfig struct {
|
||||||
|
NodeCount int // The number of nodes which will be seeded with metadata to match predicates and have non-trivial priority rankings.
|
||||||
|
PodCount int // The number of pods which will be seeded with metadata to match predicates and have non-trivial priority rankings.
|
||||||
NodeAffinity *nodeAffinity
|
NodeAffinity *nodeAffinity
|
||||||
|
// TODO: Other predicates and priorities to be added here.
|
||||||
}
|
}
|
||||||
|
|
||||||
// nodeAffinity priority configuration details.
|
// nodeAffinity priority configuration details.
|
||||||
type nodeAffinity struct {
|
type nodeAffinity struct {
|
||||||
numGroups int // number of Node-Pod sets with Pods NodeAffinity matching given Nodes.
|
|
||||||
nodeAffinityKey string // Node Selection Key.
|
nodeAffinityKey string // Node Selection Key.
|
||||||
|
LabelCount int // number of labels to be added to each node or pod.
|
||||||
}
|
}
|
||||||
|
@ -20,12 +20,13 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"github.com/golang/glog"
|
"github.com/golang/glog"
|
||||||
"k8s.io/api/core/v1"
|
"k8s.io/api/core/v1"
|
||||||
|
"k8s.io/apimachinery/pkg/api/resource"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/labels"
|
"k8s.io/apimachinery/pkg/labels"
|
||||||
"k8s.io/kubernetes/plugin/pkg/scheduler"
|
"k8s.io/kubernetes/plugin/pkg/scheduler"
|
||||||
"k8s.io/kubernetes/test/integration/framework"
|
|
||||||
testutils "k8s.io/kubernetes/test/utils"
|
testutils "k8s.io/kubernetes/test/utils"
|
||||||
"math"
|
"math"
|
||||||
|
"strconv"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
)
|
)
|
||||||
@ -37,13 +38,47 @@ const (
|
|||||||
threshold60K = 30
|
threshold60K = 30
|
||||||
)
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
basePodTemplate = &v1.Pod{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
GenerateName: "sched-perf-pod-",
|
||||||
|
},
|
||||||
|
// TODO: this needs to be configurable.
|
||||||
|
Spec: testutils.MakePodSpec(),
|
||||||
|
}
|
||||||
|
baseNodeTemplate = &v1.Node{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
GenerateName: "sample-node-",
|
||||||
|
},
|
||||||
|
Spec: v1.NodeSpec{
|
||||||
|
// TODO: investigate why this is needed.
|
||||||
|
ExternalID: "foo",
|
||||||
|
},
|
||||||
|
Status: v1.NodeStatus{
|
||||||
|
Capacity: v1.ResourceList{
|
||||||
|
v1.ResourcePods: *resource.NewQuantity(110, resource.DecimalSI),
|
||||||
|
v1.ResourceCPU: resource.MustParse("4"),
|
||||||
|
v1.ResourceMemory: resource.MustParse("32Gi"),
|
||||||
|
},
|
||||||
|
Phase: v1.NodeRunning,
|
||||||
|
Conditions: []v1.NodeCondition{
|
||||||
|
{Type: v1.NodeReady, Status: v1.ConditionTrue},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
// TestSchedule100Node3KPods schedules 3k pods on 100 nodes.
|
// TestSchedule100Node3KPods schedules 3k pods on 100 nodes.
|
||||||
func TestSchedule100Node3KPods(t *testing.T) {
|
func TestSchedule100Node3KPods(t *testing.T) {
|
||||||
if testing.Short() {
|
if testing.Short() {
|
||||||
t.Skip("Skipping because we want to run short tests")
|
t.Skip("Skipping because we want to run short tests")
|
||||||
}
|
}
|
||||||
|
|
||||||
config := getBaseConfig(100, 3000)
|
config := getBaseConfig(100, 3000)
|
||||||
writePodAndNodeTopologyToConfig(config)
|
err := writePodAndNodeTopologyToConfig(config)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Misconfiguration happened for nodes/pods chosen to have predicates and priorities")
|
||||||
|
}
|
||||||
min := schedulePods(config)
|
min := schedulePods(config)
|
||||||
if min < threshold3K {
|
if min < threshold3K {
|
||||||
t.Errorf("Failing: Scheduling rate was too low for an interval, we saw rate of %v, which is the allowed minimum of %v ! ", min, threshold3K)
|
t.Errorf("Failing: Scheduling rate was too low for an interval, we saw rate of %v, which is the allowed minimum of %v ! ", min, threshold3K)
|
||||||
@ -54,82 +89,6 @@ func TestSchedule100Node3KPods(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// TestSchedule100Node3KNodeAffinityPods schedules 3k pods using Node affinity on 100 nodes.
|
|
||||||
func TestSchedule100Node3KNodeAffinityPods(t *testing.T) {
|
|
||||||
if testing.Short() {
|
|
||||||
t.Skip("Skipping because we want to run short tests")
|
|
||||||
}
|
|
||||||
|
|
||||||
config := getBaseConfig(100, 3000)
|
|
||||||
// number of Node-Pod sets with Pods NodeAffinity matching given Nodes.
|
|
||||||
numGroups := 10
|
|
||||||
nodeAffinityKey := "kubernetes.io/sched-perf-node-affinity"
|
|
||||||
nodeStrategies := make([]testutils.CountToStrategy, 0, numGroups)
|
|
||||||
for i := 0; i < numGroups; i++ {
|
|
||||||
nodeStrategies = append(nodeStrategies, testutils.CountToStrategy{
|
|
||||||
Count: config.numNodes / numGroups,
|
|
||||||
Strategy: testutils.NewLabelNodePrepareStrategy(nodeAffinityKey, fmt.Sprintf("%v", i)),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
config.nodePreparer = framework.NewIntegrationTestNodePreparer(
|
|
||||||
config.schedulerSupportFunctions.GetClient(),
|
|
||||||
nodeStrategies,
|
|
||||||
"scheduler-perf-",
|
|
||||||
)
|
|
||||||
|
|
||||||
podCreatorConfig := testutils.NewTestPodCreatorConfig()
|
|
||||||
for i := 0; i < numGroups; i++ {
|
|
||||||
pod := &v1.Pod{
|
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
|
||||||
GenerateName: "sched-perf-node-affinity-pod-",
|
|
||||||
},
|
|
||||||
Spec: testutils.MakePodSpec(),
|
|
||||||
}
|
|
||||||
pod.Spec.Affinity = &v1.Affinity{
|
|
||||||
NodeAffinity: &v1.NodeAffinity{
|
|
||||||
RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
|
|
||||||
NodeSelectorTerms: []v1.NodeSelectorTerm{
|
|
||||||
{
|
|
||||||
MatchExpressions: []v1.NodeSelectorRequirement{
|
|
||||||
{
|
|
||||||
Key: nodeAffinityKey,
|
|
||||||
Operator: v1.NodeSelectorOpIn,
|
|
||||||
Values: []string{fmt.Sprintf("%v", i)},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
podCreatorConfig.AddStrategy("sched-perf-node-affinity", config.numPods/numGroups,
|
|
||||||
testutils.NewCustomCreatePodStrategy(pod),
|
|
||||||
)
|
|
||||||
}
|
|
||||||
config.podCreator = testutils.NewTestPodCreator(config.schedulerSupportFunctions.GetClient(), podCreatorConfig)
|
|
||||||
|
|
||||||
if min := schedulePods(config); min < threshold30K {
|
|
||||||
t.Errorf("Too small pod scheduling throughput for 30k pods. Expected %v got %v", threshold30K, min)
|
|
||||||
} else {
|
|
||||||
fmt.Printf("Minimal observed throughput for 30k pod test: %v\n", min)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// TestSchedule1000Node30KPods schedules 30k pods on 1000 nodes.
|
|
||||||
func TestSchedule1000Node30KPods(t *testing.T) {
|
|
||||||
if testing.Short() {
|
|
||||||
t.Skip("Skipping because we want to run short tests")
|
|
||||||
}
|
|
||||||
config := getBaseConfig(1000, 30000)
|
|
||||||
writePodAndNodeTopologyToConfig(config)
|
|
||||||
if min := schedulePods(config); min < threshold30K {
|
|
||||||
t.Errorf("To small pod scheduling throughput for 30k pods. Expected %v got %v", threshold30K, min)
|
|
||||||
} else {
|
|
||||||
fmt.Printf("Minimal observed throughput for 30k pod test: %v\n", min)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// TestSchedule2000Node60KPods schedules 60k pods on 2000 nodes.
|
// TestSchedule2000Node60KPods schedules 60k pods on 2000 nodes.
|
||||||
// This test won't fit in normal 10 minutes time window.
|
// This test won't fit in normal 10 minutes time window.
|
||||||
// func TestSchedule2000Node60KPods(t *testing.T) {
|
// func TestSchedule2000Node60KPods(t *testing.T) {
|
||||||
@ -146,34 +105,25 @@ func TestSchedule1000Node30KPods(t *testing.T) {
|
|||||||
|
|
||||||
// testConfig contains the some input parameters needed for running test-suite
|
// testConfig contains the some input parameters needed for running test-suite
|
||||||
type testConfig struct {
|
type testConfig struct {
|
||||||
// Note: We don't need numPods, numNodes anymore in this struct but keeping them for backward compatibility
|
|
||||||
numPods int
|
numPods int
|
||||||
numNodes int
|
numNodes int
|
||||||
nodePreparer testutils.TestNodePreparer
|
mutatedNodeTemplate *v1.Node
|
||||||
podCreator *testutils.TestPodCreator
|
mutatedPodTemplate *v1.Pod
|
||||||
schedulerSupportFunctions scheduler.Configurator
|
schedulerSupportFunctions scheduler.Configurator
|
||||||
destroyFunc func()
|
destroyFunc func()
|
||||||
}
|
}
|
||||||
|
|
||||||
// baseConfig returns a minimal testConfig to be customized for different tests.
|
// getBaseConfig returns baseConfig after initializing number of nodes and pods.
|
||||||
func baseConfig() *testConfig {
|
func getBaseConfig(nodes int, pods int) *testConfig {
|
||||||
schedulerConfigFactory, destroyFunc := mustSetupScheduler()
|
schedulerConfigFactory, destroyFunc := mustSetupScheduler()
|
||||||
return &testConfig{
|
return &testConfig{
|
||||||
schedulerSupportFunctions: schedulerConfigFactory,
|
schedulerSupportFunctions: schedulerConfigFactory,
|
||||||
destroyFunc: destroyFunc,
|
destroyFunc: destroyFunc,
|
||||||
|
numNodes: nodes,
|
||||||
|
numPods: pods,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// getBaseConfig returns baseConfig after initializing number of nodes and pods.
|
|
||||||
// We have to function for backward compatibility. We can combine this into baseConfig.
|
|
||||||
// TODO: Remove this function once the backward compatibility is not needed.
|
|
||||||
func getBaseConfig(nodes int, pods int) *testConfig {
|
|
||||||
config := baseConfig()
|
|
||||||
config.numNodes = nodes
|
|
||||||
config.numPods = pods
|
|
||||||
return config
|
|
||||||
}
|
|
||||||
|
|
||||||
// schedulePods schedules specific number of pods on specific number of nodes.
|
// schedulePods schedules specific number of pods on specific number of nodes.
|
||||||
// This is used to learn the scheduling throughput on various
|
// This is used to learn the scheduling throughput on various
|
||||||
// sizes of cluster and changes as more and more pods are scheduled.
|
// sizes of cluster and changes as more and more pods are scheduled.
|
||||||
@ -181,18 +131,11 @@ func getBaseConfig(nodes int, pods int) *testConfig {
|
|||||||
// It returns the minimum of throughput over whole run.
|
// It returns the minimum of throughput over whole run.
|
||||||
func schedulePods(config *testConfig) int32 {
|
func schedulePods(config *testConfig) int32 {
|
||||||
defer config.destroyFunc()
|
defer config.destroyFunc()
|
||||||
if err := config.nodePreparer.PrepareNodes(); err != nil {
|
|
||||||
glog.Fatalf("%v", err)
|
|
||||||
}
|
|
||||||
defer config.nodePreparer.CleanupNodes()
|
|
||||||
config.podCreator.CreatePods()
|
|
||||||
|
|
||||||
prev := 0
|
prev := 0
|
||||||
// On startup there may be a latent period where NO scheduling occurs (qps = 0).
|
// On startup there may be a latent period where NO scheduling occurs (qps = 0).
|
||||||
// We are interested in low scheduling rates (i.e. qps=2),
|
// We are interested in low scheduling rates (i.e. qps=2),
|
||||||
minQps := int32(math.MaxInt32)
|
minQps := int32(math.MaxInt32)
|
||||||
start := time.Now()
|
start := time.Now()
|
||||||
|
|
||||||
// Bake in time for the first pod scheduling event.
|
// Bake in time for the first pod scheduling event.
|
||||||
for {
|
for {
|
||||||
time.Sleep(50 * time.Millisecond)
|
time.Sleep(50 * time.Millisecond)
|
||||||
@ -240,107 +183,95 @@ func schedulePods(config *testConfig) int32 {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// mutateNodeSpec returns the strategy needed for creation of nodes.
|
// mutateNodeTemplate returns the modified node needed for creation of nodes.
|
||||||
// TODO: It should take the nodespec and return the modified version of it. As of now, returning the strategies for backward compatibilty.
|
func (na nodeAffinity) mutateNodeTemplate(node *v1.Node) {
|
||||||
func (na nodeAffinity) mutateNodeSpec(numNodes int) []testutils.CountToStrategy {
|
labels := make(map[string]string)
|
||||||
numGroups := na.numGroups
|
for i := 0; i < na.LabelCount; i++ {
|
||||||
nodeAffinityKey := na.nodeAffinityKey
|
value := strconv.Itoa(i)
|
||||||
nodeStrategies := make([]testutils.CountToStrategy, 0, numGroups)
|
key := na.nodeAffinityKey + value
|
||||||
for i := 0; i < numGroups; i++ {
|
labels[key] = value
|
||||||
nodeStrategies = append(nodeStrategies, testutils.CountToStrategy{
|
|
||||||
Count: numNodes / numGroups,
|
|
||||||
Strategy: testutils.NewLabelNodePrepareStrategy(nodeAffinityKey, fmt.Sprintf("%v", i)),
|
|
||||||
})
|
|
||||||
}
|
}
|
||||||
return nodeStrategies
|
node.ObjectMeta.Labels = labels
|
||||||
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// mutatePodSpec returns the list of pods after mutating the pod spec based on predicates and priorities.
|
// mutatePodTemplate returns the modified pod template after applying mutations.
|
||||||
// TODO: It should take the podspec and return the modified version of it. As of now, returning the podlist for backward compatibilty.
|
func (na nodeAffinity) mutatePodTemplate(pod *v1.Pod) {
|
||||||
func (na nodeAffinity) mutatePodSpec(numPods int, pod *v1.Pod) []*v1.Pod {
|
var nodeSelectorRequirements []v1.NodeSelectorRequirement
|
||||||
numGroups := na.numGroups
|
for i := 0; i < na.LabelCount; i++ {
|
||||||
nodeAffinityKey := na.nodeAffinityKey
|
value := strconv.Itoa(i)
|
||||||
podList := make([]*v1.Pod, 0, numGroups)
|
key := na.nodeAffinityKey + value
|
||||||
for i := 0; i < numGroups; i++ {
|
nodeSelector := v1.NodeSelectorRequirement{Key: key, Values: []string{value}, Operator: v1.NodeSelectorOpIn}
|
||||||
pod = &v1.Pod{
|
nodeSelectorRequirements = append(nodeSelectorRequirements, nodeSelector)
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
|
||||||
GenerateName: "sched-perf-node-affinity-pod-",
|
|
||||||
},
|
|
||||||
Spec: testutils.MakePodSpec(),
|
|
||||||
}
|
}
|
||||||
pod.Spec.Affinity = &v1.Affinity{
|
pod.Spec.Affinity = &v1.Affinity{
|
||||||
NodeAffinity: &v1.NodeAffinity{
|
NodeAffinity: &v1.NodeAffinity{
|
||||||
RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
|
RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
|
||||||
NodeSelectorTerms: []v1.NodeSelectorTerm{
|
NodeSelectorTerms: []v1.NodeSelectorTerm{
|
||||||
{
|
{
|
||||||
MatchExpressions: []v1.NodeSelectorRequirement{
|
MatchExpressions: nodeSelectorRequirements,
|
||||||
{
|
|
||||||
Key: nodeAffinityKey,
|
|
||||||
Operator: v1.NodeSelectorOpIn,
|
|
||||||
Values: []string{fmt.Sprintf("%v", i)},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
podList = append(podList, pod)
|
}
|
||||||
|
|
||||||
|
// generateNodes generates nodes to be used for scheduling.
|
||||||
|
func (inputConfig *schedulerPerfConfig) generateNodes(config *testConfig) {
|
||||||
|
for i := 0; i < inputConfig.NodeCount; i++ {
|
||||||
|
config.schedulerSupportFunctions.GetClient().Core().Nodes().Create(config.mutatedNodeTemplate)
|
||||||
|
|
||||||
}
|
}
|
||||||
return podList
|
for i := 0; i < config.numNodes-inputConfig.NodeCount; i++ {
|
||||||
|
config.schedulerSupportFunctions.GetClient().Core().Nodes().Create(baseNodeTemplate)
|
||||||
|
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// generatePods generates pods to be used for scheduling.
|
||||||
|
func (inputConfig *schedulerPerfConfig) generatePods(config *testConfig) {
|
||||||
|
testutils.CreatePod(config.schedulerSupportFunctions.GetClient(), "sample", inputConfig.PodCount, config.mutatedPodTemplate)
|
||||||
|
testutils.CreatePod(config.schedulerSupportFunctions.GetClient(), "sample", config.numPods-inputConfig.PodCount, basePodTemplate)
|
||||||
}
|
}
|
||||||
|
|
||||||
// generatePodAndNodeTopology is the wrapper function for modifying both pods and node objects.
|
// generatePodAndNodeTopology is the wrapper function for modifying both pods and node objects.
|
||||||
func (inputConfig *schedulerPerfConfig) generatePodAndNodeTopology(config *testConfig) {
|
func (inputConfig *schedulerPerfConfig) generatePodAndNodeTopology(config *testConfig) error {
|
||||||
|
if config.numNodes < inputConfig.NodeCount || config.numPods < inputConfig.PodCount {
|
||||||
|
return fmt.Errorf("NodeCount cannot be greater than numNodes")
|
||||||
|
}
|
||||||
nodeAffinity := inputConfig.NodeAffinity
|
nodeAffinity := inputConfig.NodeAffinity
|
||||||
podCreatorConfig := testutils.NewTestPodCreatorConfig()
|
// Node template that needs to be mutated.
|
||||||
var nodeStrategies []testutils.CountToStrategy
|
mutatedNodeTemplate := baseNodeTemplate
|
||||||
var pod *v1.Pod
|
// Pod template that needs to be mutated.
|
||||||
var podList []*v1.Pod
|
mutatedPodTemplate := basePodTemplate
|
||||||
if nodeAffinity != nil {
|
if nodeAffinity != nil {
|
||||||
// Mutate Node
|
nodeAffinity.mutateNodeTemplate(mutatedNodeTemplate)
|
||||||
nodeStrategies = nodeAffinity.mutateNodeSpec(config.numNodes)
|
nodeAffinity.mutatePodTemplate(mutatedPodTemplate)
|
||||||
// Mutate Pod TODO: Make this to return to podSpec.
|
|
||||||
podList = nodeAffinity.mutatePodSpec(config.numPods, pod)
|
|
||||||
numGroups := nodeAffinity.numGroups
|
|
||||||
for _, pod := range podList {
|
|
||||||
podCreatorConfig.AddStrategy("sched-perf-node-affinity", config.numPods/numGroups,
|
|
||||||
testutils.NewCustomCreatePodStrategy(pod),
|
|
||||||
)
|
|
||||||
}
|
|
||||||
config.nodePreparer = framework.NewIntegrationTestNodePreparer(
|
|
||||||
config.schedulerSupportFunctions.GetClient(),
|
|
||||||
nodeStrategies, "scheduler-perf-")
|
|
||||||
config.podCreator = testutils.NewTestPodCreator(config.schedulerSupportFunctions.GetClient(), podCreatorConfig)
|
|
||||||
// TODO: other predicates/priorities will be processed in subsequent if statements.
|
|
||||||
} else {
|
|
||||||
// Default configuration.
|
|
||||||
nodePreparer := framework.NewIntegrationTestNodePreparer(
|
|
||||||
config.schedulerSupportFunctions.GetClient(),
|
|
||||||
[]testutils.CountToStrategy{{Count: config.numNodes, Strategy: &testutils.TrivialNodePrepareStrategy{}}},
|
|
||||||
"scheduler-perf-",
|
|
||||||
)
|
|
||||||
|
|
||||||
podConfig := testutils.NewTestPodCreatorConfig()
|
} // TODO: other predicates/priorities will be processed in subsequent if statements or a switch:).
|
||||||
podConfig.AddStrategy("sched-test", config.numPods, testutils.NewSimpleWithControllerCreatePodStrategy("rc1"))
|
config.mutatedPodTemplate = mutatedPodTemplate
|
||||||
podCreator := testutils.NewTestPodCreator(config.schedulerSupportFunctions.GetClient(), podConfig)
|
config.mutatedNodeTemplate = mutatedNodeTemplate
|
||||||
config.nodePreparer = nodePreparer
|
inputConfig.generateNodes(config)
|
||||||
config.podCreator = podCreator
|
inputConfig.generatePods(config)
|
||||||
}
|
return nil
|
||||||
return
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// writePodAndNodeTopologyToConfig reads a configuration and then applies it to a test configuration.
|
// writePodAndNodeTopologyToConfig reads a configuration and then applies it to a test configuration.
|
||||||
//TODO: As of now, this function is not doing anything expect for reading input values to priority structs.
|
//TODO: As of now, this function is not doing anything expect for reading input values to priority structs.
|
||||||
func writePodAndNodeTopologyToConfig(config *testConfig) {
|
func writePodAndNodeTopologyToConfig(config *testConfig) error {
|
||||||
// High Level structure that should be filled for every predicate or priority.
|
// High Level structure that should be filled for every predicate or priority.
|
||||||
inputConfig := &schedulerPerfConfig{
|
inputConfig := &schedulerPerfConfig{
|
||||||
|
NodeCount: 100,
|
||||||
|
PodCount: 3000,
|
||||||
NodeAffinity: &nodeAffinity{
|
NodeAffinity: &nodeAffinity{
|
||||||
//number of Node-Pod sets with Pods NodeAffinity matching given Nodes.
|
nodeAffinityKey: "kubernetes.io/sched-perf-node-affinity-",
|
||||||
numGroups: 10,
|
LabelCount: 10,
|
||||||
nodeAffinityKey: "kubernetes.io/sched-perf-node-affinity",
|
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
inputConfig.generatePodAndNodeTopology(config)
|
err := inputConfig.generatePodAndNodeTopology(config)
|
||||||
return
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -1003,7 +1003,7 @@ func makeCreatePod(client clientset.Interface, namespace string, podTemplate *v1
|
|||||||
return fmt.Errorf("Terminal error while creating pod, won't retry: %v", err)
|
return fmt.Errorf("Terminal error while creating pod, won't retry: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
func createPod(client clientset.Interface, namespace string, podCount int, podTemplate *v1.Pod) error {
|
func CreatePod(client clientset.Interface, namespace string, podCount int, podTemplate *v1.Pod) error {
|
||||||
var createError error
|
var createError error
|
||||||
lock := sync.Mutex{}
|
lock := sync.Mutex{}
|
||||||
createPodFunc := func(i int) {
|
createPodFunc := func(i int) {
|
||||||
@ -1050,7 +1050,7 @@ func createController(client clientset.Interface, controllerName, namespace stri
|
|||||||
|
|
||||||
func NewCustomCreatePodStrategy(podTemplate *v1.Pod) TestPodCreateStrategy {
|
func NewCustomCreatePodStrategy(podTemplate *v1.Pod) TestPodCreateStrategy {
|
||||||
return func(client clientset.Interface, namespace string, podCount int) error {
|
return func(client clientset.Interface, namespace string, podCount int) error {
|
||||||
return createPod(client, namespace, podCount, podTemplate)
|
return CreatePod(client, namespace, podCount, podTemplate)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1076,7 +1076,7 @@ func NewSimpleWithControllerCreatePodStrategy(controllerName string) TestPodCrea
|
|||||||
if err := createController(client, controllerName, namespace, podCount, basePod); err != nil {
|
if err := createController(client, controllerName, namespace, podCount, basePod); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
return createPod(client, namespace, podCount, basePod)
|
return CreatePod(client, namespace, podCount, basePod)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user