Merge pull request #91510 from ahg-g/ahg-preempt

Add Preemption benchmark
This commit is contained in:
Kubernetes Prow Robot 2020-05-28 16:38:12 -07:00 committed by GitHub
commit 0891f69f5e
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
6 changed files with 68 additions and 10 deletions

View File

@ -18,6 +18,7 @@ go_library(
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//staging/src/k8s.io/client-go/informers/core/v1:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
"//staging/src/k8s.io/client-go/rest:go_default_library",

View File

@ -227,3 +227,16 @@
- numNodes: 5000
numInitPods: [2000, 2000, 2000, 2000, 2000]
numPodsToSchedule: 1000
- template:
desc: Preemption
initPods:
- podTemplatePath: config/pod-low-priority.yaml
podsToSchedule:
podTemplatePath: config/pod-high-priority.yaml
params:
- numNodes: 500
numInitPods: [2000]
numPodsToSchedule: 500
- numNodes: 5000
numInitPods: [20000]
numPodsToSchedule: 5000

View File

@ -0,0 +1,18 @@
apiVersion: v1
kind: Pod
metadata:
generateName: pod-
spec:
priority: 10
containers:
- image: k8s.gcr.io/pause:3.2
name: pause
ports:
- containerPort: 80
resources:
limits:
cpu: 3000m
memory: 500Mi
requests:
cpu: 3000m
memory: 500Mi

View File

@ -0,0 +1,18 @@
apiVersion: v1
kind: Pod
metadata:
generateName: pod-
spec:
terminationGracePeriodSeconds: 0
containers:
- image: k8s.gcr.io/pause:3.2
name: pause
ports:
- containerPort: 80
resources:
limits:
cpu: 900m
memory: 500Mi
requests:
cpu: 900m
memory: 500Mi

View File

@ -45,6 +45,8 @@ var (
"scheduler_scheduling_algorithm_priority_evaluation_seconds",
"scheduler_binding_duration_seconds",
"scheduler_e2e_scheduling_duration_seconds",
"scheduler_scheduling_algorithm_preemption_evaluation_seconds",
"scheduler_pod_scheduling_duration_seconds",
},
}
)
@ -154,7 +156,7 @@ func perfScheduling(test testCase, b *testing.B) []DataItem {
}
total += p.Num
}
if err := waitNumPodsScheduled(b, total, podInformer); err != nil {
if err := waitNumPodsScheduled(b, total, podInformer, setupNamespace); err != nil {
b.Fatal(err)
}
@ -172,7 +174,7 @@ func perfScheduling(test testCase, b *testing.B) []DataItem {
if err := createPods(testNamespace, test.PodsToSchedule, clientset); err != nil {
b.Fatal(err)
}
if err := waitNumPodsScheduled(b, total+test.PodsToSchedule.Num, podInformer); err != nil {
if err := waitNumPodsScheduled(b, test.PodsToSchedule.Num, podInformer, testNamespace); err != nil {
b.Fatal(err)
}
@ -187,9 +189,9 @@ func perfScheduling(test testCase, b *testing.B) []DataItem {
return dataItems
}
func waitNumPodsScheduled(b *testing.B, num int, podInformer coreinformers.PodInformer) error {
func waitNumPodsScheduled(b *testing.B, num int, podInformer coreinformers.PodInformer, namespace string) error {
for {
scheduled, err := getScheduledPods(podInformer)
scheduled, err := getScheduledPods(podInformer, namespace)
if err != nil {
return err
}
@ -203,7 +205,7 @@ func waitNumPodsScheduled(b *testing.B, num int, podInformer coreinformers.PodIn
}
func getTestDataCollectors(tc testCase, podInformer coreinformers.PodInformer, b *testing.B) []testDataCollector {
collectors := []testDataCollector{newThroughputCollector(podInformer, map[string]string{"Name": b.Name()})}
collectors := []testDataCollector{newThroughputCollector(podInformer, map[string]string{"Name": b.Name()}, []string{testNamespace})}
metricsCollectorConfig := defaultMetricsCollectorConfig
if tc.MetricsCollectorConfig != nil {
metricsCollectorConfig = *tc.MetricsCollectorConfig

View File

@ -30,6 +30,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/util/sets"
coreinformers "k8s.io/client-go/informers/core/v1"
clientset "k8s.io/client-go/kubernetes"
restclient "k8s.io/client-go/rest"
@ -76,16 +77,19 @@ func mustSetupScheduler() (util.ShutdownFunc, coreinformers.PodInformer, clients
return shutdownFunc, podInformer, clientSet
}
func getScheduledPods(podInformer coreinformers.PodInformer) ([]*v1.Pod, error) {
// Returns the list of scheduled pods in the specified namespaces.
// Note that no namespces specified matches all namespaces.
func getScheduledPods(podInformer coreinformers.PodInformer, namespaces ...string) ([]*v1.Pod, error) {
pods, err := podInformer.Lister().List(labels.Everything())
if err != nil {
return nil, err
}
s := sets.NewString(namespaces...)
scheduled := make([]*v1.Pod, 0, len(pods))
for i := range pods {
pod := pods[i]
if len(pod.Spec.NodeName) > 0 {
if len(pod.Spec.NodeName) > 0 && (len(s) == 0 || s.Has(pod.Namespace)) {
scheduled = append(scheduled, pod)
}
}
@ -213,17 +217,19 @@ type throughputCollector struct {
podInformer coreinformers.PodInformer
schedulingThroughputs []float64
labels map[string]string
namespaces []string
}
func newThroughputCollector(podInformer coreinformers.PodInformer, labels map[string]string) *throughputCollector {
func newThroughputCollector(podInformer coreinformers.PodInformer, labels map[string]string, namespaces []string) *throughputCollector {
return &throughputCollector{
podInformer: podInformer,
labels: labels,
namespaces: namespaces,
}
}
func (tc *throughputCollector) run(stopCh chan struct{}) {
podsScheduled, err := getScheduledPods(tc.podInformer)
podsScheduled, err := getScheduledPods(tc.podInformer, tc.namespaces...)
if err != nil {
klog.Fatalf("%v", err)
}
@ -233,7 +239,7 @@ func (tc *throughputCollector) run(stopCh chan struct{}) {
case <-stopCh:
return
case <-time.After(throughputSampleFrequency):
podsScheduled, err := getScheduledPods(tc.podInformer)
podsScheduled, err := getScheduledPods(tc.podInformer, tc.namespaces...)
if err != nil {
klog.Fatalf("%v", err)
}