From d650b571411bc8c81cc678554ca3ec25fe700e69 Mon Sep 17 00:00:00 2001 From: Abdullah Gharaibeh Date: Wed, 27 May 2020 16:29:26 -0400 Subject: [PATCH] Added Preemption benchmark --- test/integration/scheduler_perf/BUILD | 1 + .../config/performance-config.yaml | 13 +++++++++++++ .../config/pod-high-priority.yaml | 18 ++++++++++++++++++ .../config/pod-low-priority.yaml | 18 ++++++++++++++++++ .../scheduler_perf/scheduler_perf_test.go | 12 +++++++----- test/integration/scheduler_perf/util.go | 16 +++++++++++----- 6 files changed, 68 insertions(+), 10 deletions(-) create mode 100644 test/integration/scheduler_perf/config/pod-high-priority.yaml create mode 100644 test/integration/scheduler_perf/config/pod-low-priority.yaml diff --git a/test/integration/scheduler_perf/BUILD b/test/integration/scheduler_perf/BUILD index fc54041f722..bc6b8fb0ec0 100644 --- a/test/integration/scheduler_perf/BUILD +++ b/test/integration/scheduler_perf/BUILD @@ -18,6 +18,7 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//staging/src/k8s.io/client-go/informers/core/v1:go_default_library", "//staging/src/k8s.io/client-go/kubernetes:go_default_library", "//staging/src/k8s.io/client-go/rest:go_default_library", diff --git a/test/integration/scheduler_perf/config/performance-config.yaml b/test/integration/scheduler_perf/config/performance-config.yaml index 3be7e3b77af..841ce0cb89a 100644 --- a/test/integration/scheduler_perf/config/performance-config.yaml +++ b/test/integration/scheduler_perf/config/performance-config.yaml @@ -227,3 +227,16 @@ - numNodes: 5000 numInitPods: [2000, 2000, 2000, 2000, 2000] numPodsToSchedule: 1000 +- template: + desc: Preemption + initPods: + - podTemplatePath: config/pod-low-priority.yaml + podsToSchedule: + podTemplatePath: config/pod-high-priority.yaml + params: + - numNodes: 500 + numInitPods: [2000] + numPodsToSchedule: 500 + - numNodes: 5000 + numInitPods: [20000] + numPodsToSchedule: 5000 diff --git a/test/integration/scheduler_perf/config/pod-high-priority.yaml b/test/integration/scheduler_perf/config/pod-high-priority.yaml new file mode 100644 index 00000000000..2f15785af64 --- /dev/null +++ b/test/integration/scheduler_perf/config/pod-high-priority.yaml @@ -0,0 +1,18 @@ +apiVersion: v1 +kind: Pod +metadata: + generateName: pod- +spec: + priority: 10 + containers: + - image: k8s.gcr.io/pause:3.2 + name: pause + ports: + - containerPort: 80 + resources: + limits: + cpu: 3000m + memory: 500Mi + requests: + cpu: 3000m + memory: 500Mi diff --git a/test/integration/scheduler_perf/config/pod-low-priority.yaml b/test/integration/scheduler_perf/config/pod-low-priority.yaml new file mode 100644 index 00000000000..709d0e780ce --- /dev/null +++ b/test/integration/scheduler_perf/config/pod-low-priority.yaml @@ -0,0 +1,18 @@ +apiVersion: v1 +kind: Pod +metadata: + generateName: pod- +spec: + terminationGracePeriodSeconds: 0 + containers: + - image: k8s.gcr.io/pause:3.2 + name: pause + ports: + - containerPort: 80 + resources: + limits: + cpu: 900m + memory: 500Mi + requests: + cpu: 900m + memory: 500Mi diff --git a/test/integration/scheduler_perf/scheduler_perf_test.go b/test/integration/scheduler_perf/scheduler_perf_test.go index 8dc786a986b..461dca55dd1 100644 --- a/test/integration/scheduler_perf/scheduler_perf_test.go +++ b/test/integration/scheduler_perf/scheduler_perf_test.go @@ -45,6 +45,8 @@ var ( "scheduler_scheduling_algorithm_priority_evaluation_seconds", "scheduler_binding_duration_seconds", "scheduler_e2e_scheduling_duration_seconds", + "scheduler_scheduling_algorithm_preemption_evaluation_seconds", + "scheduler_pod_scheduling_duration_seconds", }, } ) @@ -154,7 +156,7 @@ func perfScheduling(test testCase, b *testing.B) []DataItem { } total += p.Num } - if err := waitNumPodsScheduled(b, total, podInformer); err != nil { + if err := waitNumPodsScheduled(b, total, podInformer, setupNamespace); err != nil { b.Fatal(err) } @@ -172,7 +174,7 @@ func perfScheduling(test testCase, b *testing.B) []DataItem { if err := createPods(testNamespace, test.PodsToSchedule, clientset); err != nil { b.Fatal(err) } - if err := waitNumPodsScheduled(b, total+test.PodsToSchedule.Num, podInformer); err != nil { + if err := waitNumPodsScheduled(b, test.PodsToSchedule.Num, podInformer, testNamespace); err != nil { b.Fatal(err) } @@ -187,9 +189,9 @@ func perfScheduling(test testCase, b *testing.B) []DataItem { return dataItems } -func waitNumPodsScheduled(b *testing.B, num int, podInformer coreinformers.PodInformer) error { +func waitNumPodsScheduled(b *testing.B, num int, podInformer coreinformers.PodInformer, namespace string) error { for { - scheduled, err := getScheduledPods(podInformer) + scheduled, err := getScheduledPods(podInformer, namespace) if err != nil { return err } @@ -203,7 +205,7 @@ func waitNumPodsScheduled(b *testing.B, num int, podInformer coreinformers.PodIn } func getTestDataCollectors(tc testCase, podInformer coreinformers.PodInformer, b *testing.B) []testDataCollector { - collectors := []testDataCollector{newThroughputCollector(podInformer, map[string]string{"Name": b.Name()})} + collectors := []testDataCollector{newThroughputCollector(podInformer, map[string]string{"Name": b.Name()}, []string{testNamespace})} metricsCollectorConfig := defaultMetricsCollectorConfig if tc.MetricsCollectorConfig != nil { metricsCollectorConfig = *tc.MetricsCollectorConfig diff --git a/test/integration/scheduler_perf/util.go b/test/integration/scheduler_perf/util.go index 0f7cab06540..f0cd294da8a 100644 --- a/test/integration/scheduler_perf/util.go +++ b/test/integration/scheduler_perf/util.go @@ -30,6 +30,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/sets" coreinformers "k8s.io/client-go/informers/core/v1" clientset "k8s.io/client-go/kubernetes" restclient "k8s.io/client-go/rest" @@ -76,16 +77,19 @@ func mustSetupScheduler() (util.ShutdownFunc, coreinformers.PodInformer, clients return shutdownFunc, podInformer, clientSet } -func getScheduledPods(podInformer coreinformers.PodInformer) ([]*v1.Pod, error) { +// Returns the list of scheduled pods in the specified namespaces. +// Note that no namespces specified matches all namespaces. +func getScheduledPods(podInformer coreinformers.PodInformer, namespaces ...string) ([]*v1.Pod, error) { pods, err := podInformer.Lister().List(labels.Everything()) if err != nil { return nil, err } + s := sets.NewString(namespaces...) scheduled := make([]*v1.Pod, 0, len(pods)) for i := range pods { pod := pods[i] - if len(pod.Spec.NodeName) > 0 { + if len(pod.Spec.NodeName) > 0 && (len(s) == 0 || s.Has(pod.Namespace)) { scheduled = append(scheduled, pod) } } @@ -213,17 +217,19 @@ type throughputCollector struct { podInformer coreinformers.PodInformer schedulingThroughputs []float64 labels map[string]string + namespaces []string } -func newThroughputCollector(podInformer coreinformers.PodInformer, labels map[string]string) *throughputCollector { +func newThroughputCollector(podInformer coreinformers.PodInformer, labels map[string]string, namespaces []string) *throughputCollector { return &throughputCollector{ podInformer: podInformer, labels: labels, + namespaces: namespaces, } } func (tc *throughputCollector) run(stopCh chan struct{}) { - podsScheduled, err := getScheduledPods(tc.podInformer) + podsScheduled, err := getScheduledPods(tc.podInformer, tc.namespaces...) if err != nil { klog.Fatalf("%v", err) } @@ -233,7 +239,7 @@ func (tc *throughputCollector) run(stopCh chan struct{}) { case <-stopCh: return case <-time.After(throughputSampleFrequency): - podsScheduled, err := getScheduledPods(tc.podInformer) + podsScheduled, err := getScheduledPods(tc.podInformer, tc.namespaces...) if err != nil { klog.Fatalf("%v", err) }