From 6988653457f15a7482cb6d33bfb1a7c6831d7066 Mon Sep 17 00:00:00 2001 From: Abdullah Gharaibeh Date: Wed, 21 Apr 2021 12:20:44 -0400 Subject: [PATCH] Added benchmarks for pod affinity namespaceselector --- .../config/namespace-with-labels.yaml | 5 + .../config/performance-config.yaml | 146 +++++++++++++ .../config/pod-affinity-ns-selector.yaml | 29 +++ .../config/pod-anti-affinity-ns-selector.yaml | 29 +++ .../pod-preferred-affinity-ns-selector.yaml | 31 +++ ...d-preferred-anti-affinity-ns-selector.yaml | 31 +++ .../scheduler_perf/scheduler_perf_test.go | 200 +++++++++++++++++- 7 files changed, 461 insertions(+), 10 deletions(-) create mode 100644 test/integration/scheduler_perf/config/namespace-with-labels.yaml create mode 100644 test/integration/scheduler_perf/config/pod-affinity-ns-selector.yaml create mode 100644 test/integration/scheduler_perf/config/pod-anti-affinity-ns-selector.yaml create mode 100644 test/integration/scheduler_perf/config/pod-preferred-affinity-ns-selector.yaml create mode 100644 test/integration/scheduler_perf/config/pod-preferred-anti-affinity-ns-selector.yaml diff --git a/test/integration/scheduler_perf/config/namespace-with-labels.yaml b/test/integration/scheduler_perf/config/namespace-with-labels.yaml new file mode 100644 index 00000000000..c6215ced09c --- /dev/null +++ b/test/integration/scheduler_perf/config/namespace-with-labels.yaml @@ -0,0 +1,5 @@ +apiVersion: v1 +kind: Namespace +metadata: + labels: + team: devops diff --git a/test/integration/scheduler_perf/config/performance-config.yaml b/test/integration/scheduler_perf/config/performance-config.yaml index 7578e40d848..0939008e1ef 100644 --- a/test/integration/scheduler_perf/config/performance-config.yaml +++ b/test/integration/scheduler_perf/config/performance-config.yaml @@ -475,3 +475,149 @@ params: initNodes: 5000 measurePods: 2000 + +- name: SchedulingRequiredPodAntiAffinityWithNSSelector + featureGates: + PodAffinityNamespaceSelector: true + workloadTemplate: + - opcode: createNodes + countParam: $initNodes + uniqueNodeLabelStrategy: + labelKey: kubernetes.io/hostname + - opcode: createNamespaces + prefix: init-ns + countParam: $initNamespaces + namespaceTemplatePath: config/namespace-with-labels.yaml + - opcode: createNamespaces + prefix: measure-ns + count: 1 + namespaceTemplatePath: config/namespace-with-labels.yaml + - opcode: createPodSets + countParam: $initNamespaces + namespacePrefix: init-ns + createPodsOp: + opcode: createPods + countParam: $initPodsPerNamespace + podTemplatePath: config/pod-anti-affinity-ns-selector.yaml + - opcode: createPods + countParam: $measurePods + podTemplatePath: config/pod-anti-affinity-ns-selector.yaml + collectMetrics: true + namespace: measure-ns-0 + workloads: + - name: 5000Nodes + params: + initNodes: 5000 + initPodsPerNamespace: 40 + initNamespaces: 100 + measurePods: 1000 + +- name: SchedulingPreferredAntiAffinityWithNSSelector + featureGates: + PodAffinityNamespaceSelector: true + workloadTemplate: + - opcode: createNodes + countParam: $initNodes + uniqueNodeLabelStrategy: + labelKey: kubernetes.io/hostname + - opcode: createNamespaces + prefix: init-ns + countParam: $initNamespaces + namespaceTemplatePath: config/namespace-with-labels.yaml + - opcode: createNamespaces + prefix: measure-ns + count: 1 + namespaceTemplatePath: config/namespace-with-labels.yaml + - opcode: createPodSets + countParam: $initNamespaces + namespacePrefix: init-ns + createPodsOp: + opcode: createPods + countParam: $initPodsPerNamespace + podTemplatePath: config/pod-preferred-anti-affinity-ns-selector.yaml + - opcode: createPods + countParam: $measurePods + podTemplatePath: config/pod-preferred-anti-affinity-ns-selector.yaml + collectMetrics: true + namespace: measure-ns-0 + workloads: + - name: 5000Nodes + params: + initNodes: 5000 + initPodsPerNamespace: 40 + initNamespaces: 100 + measurePods: 1000 + +- name: SchedulingRequiredPodAffinityWithNSSelector + featureGates: + PodAffinityNamespaceSelector: true + workloadTemplate: + - opcode: createNodes + countParam: $initNodes + labelNodePrepareStrategy: + labelKey: "topology.kubernetes.io/zone" + labelValues: ["zone1"] + - opcode: createNamespaces + prefix: init-ns + countParam: $initNamespaces + namespaceTemplatePath: config/namespace-with-labels.yaml + - opcode: createNamespaces + prefix: measure-ns + count: 1 + namespaceTemplatePath: config/namespace-with-labels.yaml + - opcode: createPodSets + countParam: $initNamespaces + namespacePrefix: init-ns + createPodsOp: + opcode: createPods + countParam: $initPodsPerNamespace + podTemplatePath: config/pod-affinity-ns-selector.yaml + - opcode: createPods + countParam: $measurePods + podTemplatePath: config/pod-affinity-ns-selector.yaml + collectMetrics: true + namespace: measure-ns-0 + workloads: + - name: 5000Nodes + params: + initNodes: 5000 + initPodsPerNamespace: 50 + initNamespaces: 100 + measurePods: 1000 + +- name: SchedulingPreferredAffinityWithNSSelector + featureGates: + PodAffinityNamespaceSelector: true + workloadTemplate: + - opcode: createNodes + countParam: $initNodes + uniqueNodeLabelStrategy: + labelKey: kubernetes.io/hostname + - opcode: createNamespaces + prefix: init-ns + countParam: $initNamespaces + namespaceTemplatePath: config/namespace-with-labels.yaml + - opcode: createNamespaces + prefix: measure-ns + count: 1 + namespaceTemplatePath: config/namespace-with-labels.yaml + - opcode: createPodSets + countParam: $initNamespaces + namespacePrefix: init-ns + createPodsOp: + opcode: createPods + countParam: $initPodsPerNamespace + podTemplatePath: config/pod-preferred-affinity-ns-selector.yaml + - opcode: createPods + countParam: $measurePods + podTemplatePath: config/pod-preferred-affinity-ns-selector.yaml + collectMetrics: true + namespace: measure-ns-0 + workloads: + - name: 5000Nodes + params: + initNodes: 5000 + initPodsPerNamespace: 50 + initNamespaces: 100 + measurePods: 1000 + diff --git a/test/integration/scheduler_perf/config/pod-affinity-ns-selector.yaml b/test/integration/scheduler_perf/config/pod-affinity-ns-selector.yaml new file mode 100644 index 00000000000..e47560f6df6 --- /dev/null +++ b/test/integration/scheduler_perf/config/pod-affinity-ns-selector.yaml @@ -0,0 +1,29 @@ +apiVersion: v1 +kind: Pod +metadata: + generateName: affinity-pod- + labels: + color: blue +spec: + affinity: + podAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + color: blue + topologyKey: topology.kubernetes.io/zone + namespaceSelector: + matchLabels: + team: devops + containers: + - image: k8s.gcr.io/pause:3.4.1 + name: pause + ports: + - containerPort: 80 + resources: + limits: + cpu: 100m + memory: 500Mi + requests: + cpu: 100m + memory: 500Mi diff --git a/test/integration/scheduler_perf/config/pod-anti-affinity-ns-selector.yaml b/test/integration/scheduler_perf/config/pod-anti-affinity-ns-selector.yaml new file mode 100644 index 00000000000..40f8dd3a062 --- /dev/null +++ b/test/integration/scheduler_perf/config/pod-anti-affinity-ns-selector.yaml @@ -0,0 +1,29 @@ +apiVersion: v1 +kind: Pod +metadata: + generateName: anti-affinity-pod- + labels: + color: green +spec: + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + color: green + topologyKey: kubernetes.io/hostname + namespaceSelector: + matchLabels: + team: devops + containers: + - image: k8s.gcr.io/pause:3.4.1 + name: pause + ports: + - containerPort: 80 + resources: + limits: + cpu: 100m + memory: 500Mi + requests: + cpu: 100m + memory: 500Mi diff --git a/test/integration/scheduler_perf/config/pod-preferred-affinity-ns-selector.yaml b/test/integration/scheduler_perf/config/pod-preferred-affinity-ns-selector.yaml new file mode 100644 index 00000000000..9ff97485a6a --- /dev/null +++ b/test/integration/scheduler_perf/config/pod-preferred-affinity-ns-selector.yaml @@ -0,0 +1,31 @@ +apiVersion: v1 +kind: Pod +metadata: + generateName: preferred-affinity-pod- + labels: + color: red +spec: + affinity: + podAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - podAffinityTerm: + labelSelector: + matchLabels: + color: red + topologyKey: kubernetes.io/hostname + namespaceSelector: + matchLabels: + team: devops + weight: 1 + containers: + - image: k8s.gcr.io/pause:3.4.1 + name: pause + ports: + - containerPort: 80 + resources: + limits: + cpu: 100m + memory: 500Mi + requests: + cpu: 100m + memory: 500Mi diff --git a/test/integration/scheduler_perf/config/pod-preferred-anti-affinity-ns-selector.yaml b/test/integration/scheduler_perf/config/pod-preferred-anti-affinity-ns-selector.yaml new file mode 100644 index 00000000000..b53017fafb8 --- /dev/null +++ b/test/integration/scheduler_perf/config/pod-preferred-anti-affinity-ns-selector.yaml @@ -0,0 +1,31 @@ +apiVersion: v1 +kind: Pod +metadata: + generateName: anti-affinity-pod- + labels: + color: green +spec: + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - podAffinityTerm: + labelSelector: + matchLabels: + color: green + topologyKey: kubernetes.io/hostname + namespaceSelector: + matchLabels: + team: devops + weight: 1 + containers: + - image: k8s.gcr.io/pause:3.4.1 + name: pause + ports: + - containerPort: 80 + resources: + limits: + cpu: 100m + memory: 500Mi + requests: + cpu: 100m + memory: 500Mi diff --git a/test/integration/scheduler_perf/scheduler_perf_test.go b/test/integration/scheduler_perf/scheduler_perf_test.go index 1f683e75752..085fa693e3f 100644 --- a/test/integration/scheduler_perf/scheduler_perf_test.go +++ b/test/integration/scheduler_perf/scheduler_perf_test.go @@ -53,11 +53,13 @@ import ( ) const ( - configFile = "config/performance-config.yaml" - createNodesOpcode = "createNodes" - createPodsOpcode = "createPods" - churnOpcode = "churn" - barrierOpcode = "barrier" + configFile = "config/performance-config.yaml" + createNodesOpcode = "createNodes" + createNamespacesOpcode = "createNamespaces" + createPodsOpcode = "createPods" + createPodSetsOpcode = "createPodSets" + churnOpcode = "churn" + barrierOpcode = "barrier" // Two modes supported in "churn" operator. @@ -142,7 +144,9 @@ type op struct { func (op *op) UnmarshalJSON(b []byte) error { possibleOps := []realOp{ &createNodesOp{}, + &createNamespacesOp{}, &createPodsOp{}, + &createPodSetsOp{}, &churnOp{}, &barrierOp{}, // TODO(#93793): add a sleep timer op to simulate waiting? @@ -204,7 +208,7 @@ type createNodesOp struct { func (cno *createNodesOp) isValid(allowParameterization bool) error { if cno.Opcode != createNodesOpcode { - return fmt.Errorf("invalid opcode") + return fmt.Errorf("invalid opcode %q", cno.Opcode) } ok := cno.Count > 0 || (cno.CountParam != "" && allowParameterization && isValidParameterizable(cno.CountParam)) @@ -228,6 +232,47 @@ func (cno createNodesOp) patchParams(w *workload) (realOp, error) { return &cno, (&cno).isValid(false) } +// createNamespacesOp defines an op for creating namespaces +type createNamespacesOp struct { + // Must be "createNamespaces". + Opcode string + // Name prefix of the Namespace. The format is "-", where number is + // between 0 and count-1. + Prefix string + // Number of namespaces to create. Parameterizable through CountParam. + Count int + // Template parameter for Count. Takes precedence over Count if both set. + CountParam string + // Path to spec file describing the Namespaces to create. Optional. + NamespaceTemplatePath *string +} + +func (cmo *createNamespacesOp) isValid(allowParameterization bool) error { + if cmo.Opcode != createNamespacesOpcode { + return fmt.Errorf("invalid opcode %q", cmo.Opcode) + } + ok := cmo.Count > 0 || + (cmo.CountParam != "" && allowParameterization && isValidParameterizable(cmo.CountParam)) + if !ok { + return fmt.Errorf("invalid Count=%d / CountParam=%q", cmo.Count, cmo.CountParam) + } + return nil +} + +func (*createNamespacesOp) collectsMetrics() bool { + return false +} + +func (cmo createNamespacesOp) patchParams(w *workload) (realOp, error) { + if cmo.CountParam != "" { + var ok bool + if cmo.Count, ok = w.Params[cmo.CountParam[1:]]; !ok { + return nil, fmt.Errorf("parameter %s is undefined", cmo.CountParam) + } + } + return &cmo, (&cmo).isValid(false) +} + // createPodsOp defines an op where pods are scheduled as a part of a workload. // The test can block on the completion of this op before moving forward or // continue asynchronously. @@ -257,7 +302,7 @@ type createPodsOp struct { func (cpo *createPodsOp) isValid(allowParameterization bool) error { if cpo.Opcode != createPodsOpcode { - return fmt.Errorf("invalid opcode") + return fmt.Errorf("invalid opcode %q; expected %q", cpo.Opcode, createPodsOpcode) } ok := cpo.Count > 0 || (cpo.CountParam != "" && allowParameterization && isValidParameterizable(cpo.CountParam)) @@ -287,6 +332,47 @@ func (cpo createPodsOp) patchParams(w *workload) (realOp, error) { return &cpo, (&cpo).isValid(false) } +// createPodSetsOp defines an op where a set of createPodsOp is created each in a unique namespace. +type createPodSetsOp struct { + // Must be "createPodSets". + Opcode string + // Number of sets to create. + Count int + // Template parameter for Count. + CountParam string + // Each set of pods will be created in a namespace of the form namespacePrefix-, + // where number is from 0 to count-1 + NamespacePrefix string + // The template of a createPodsOp. + CreatePodsOp createPodsOp +} + +func (cpso *createPodSetsOp) isValid(allowParameterization bool) error { + if cpso.Opcode != createPodSetsOpcode { + return fmt.Errorf("invalid opcode %q; expected %q", cpso.Opcode, createPodSetsOpcode) + } + ok := cpso.Count > 0 || + (cpso.CountParam != "" && allowParameterization && isValidParameterizable(cpso.CountParam)) + if !ok { + return fmt.Errorf("invalid Count=%d / CountParam=%q", cpso.Count, cpso.CountParam) + } + return cpso.CreatePodsOp.isValid(allowParameterization) +} + +func (cpso *createPodSetsOp) collectsMetrics() bool { + return cpso.CreatePodsOp.CollectMetrics +} + +func (cpso createPodSetsOp) patchParams(w *workload) (realOp, error) { + if cpso.CountParam != "" { + var ok bool + if cpso.Count, ok = w.Params[cpso.CountParam[1:]]; !ok { + return nil, fmt.Errorf("parameter %s is undefined", cpso.CountParam) + } + } + return &cpso, (&cpso).isValid(true) +} + // churnOp defines an op where services are created as a part of a workload. type churnOp struct { // Must be "churnOp". @@ -311,7 +397,7 @@ type churnOp struct { func (co *churnOp) isValid(_ bool) error { if co.Opcode != churnOpcode { - return fmt.Errorf("invalid opcode") + return fmt.Errorf("invalid opcode %q", co.Opcode) } if co.Mode != Recreate && co.Mode != Create { return fmt.Errorf("invalid mode: %v. must be one of %v", co.Mode, []string{Recreate, Create}) @@ -349,7 +435,7 @@ type barrierOp struct { func (bo *barrierOp) isValid(allowParameterization bool) error { if bo.Opcode != barrierOpcode { - return fmt.Errorf("invalid opcode") + return fmt.Errorf("invalid opcode %q", bo.Opcode) } return nil } @@ -408,6 +494,29 @@ func loadSchedulerConfig(file string) (*config.KubeSchedulerConfiguration, error return nil, fmt.Errorf("couldn't decode as KubeSchedulerConfiguration, got %s: ", gvk) } +func unrollWorkloadTemplate(b *testing.B, wt []op, w *workload) []op { + var unrolled []op + for opIndex, o := range wt { + realOp, err := o.realOp.patchParams(w) + if err != nil { + b.Fatalf("op %d: %v", opIndex, err) + } + switch concreteOp := realOp.(type) { + case *createPodSetsOp: + klog.Infof("Creating %d pod sets %s", concreteOp.Count, concreteOp.CountParam) + for i := 0; i < concreteOp.Count; i++ { + copy := concreteOp.CreatePodsOp + ns := fmt.Sprintf("%s-%d", concreteOp.NamespacePrefix, i) + copy.Namespace = &ns + unrolled = append(unrolled, op{realOp: ©}) + } + default: + unrolled = append(unrolled, o) + } + } + return unrolled +} + func runWorkload(b *testing.B, tc *testCase, w *workload) []DataItem { // 30 minutes should be plenty enough even for the 5000-node tests. ctx, cancel := context.WithTimeout(context.Background(), 30*time.Minute) @@ -431,7 +540,7 @@ func runWorkload(b *testing.B, tc *testCase, w *workload) []DataItem { numPodsScheduledPerNamespace := make(map[string]int) nextNodeIndex := 0 - for opIndex, op := range tc.WorkloadTemplate { + for opIndex, op := range unrollWorkloadTemplate(b, tc.WorkloadTemplate, w) { realOp, err := op.realOp.patchParams(w) if err != nil { b.Fatalf("op %d: %v", opIndex, err) @@ -455,6 +564,19 @@ func runWorkload(b *testing.B, tc *testCase, w *workload) []DataItem { }) nextNodeIndex += concreteOp.Count + case *createNamespacesOp: + nsPreparer, err := newNamespacePreparer(concreteOp, client) + if err != nil { + b.Fatalf("op %d: %v", opIndex, err) + } + if err := nsPreparer.prepare(); err != nil { + nsPreparer.cleanup() + b.Fatalf("op %d: %v", opIndex, err) + } + b.Cleanup(func() { + nsPreparer.cleanup() + }) + case *createPodsOp: var namespace string if concreteOp.Namespace != nil { @@ -681,6 +803,7 @@ func createPods(namespace string, cpo *createPodsOp, clientset clientset.Interfa if err != nil { return err } + klog.Infof("Creating %d pods in namespace %q", cpo.Count, namespace) config := testutils.NewTestPodCreatorConfig() config.AddStrategy(namespace, cpo.Count, strategy) podCreator := testutils.NewTestPodCreator(clientset, config) @@ -875,3 +998,60 @@ func getCustomVolumeFactory(pvTemplate *v1.PersistentVolume) func(id int) *v1.Pe return pv } } + +// namespacePreparer holds configuration information for the test namespace preparer. +type namespacePreparer struct { + client clientset.Interface + count int + prefix string + spec *v1.Namespace +} + +func newNamespacePreparer(cno *createNamespacesOp, clientset clientset.Interface) (*namespacePreparer, error) { + ns := &v1.Namespace{} + if cno.NamespaceTemplatePath != nil { + if err := getSpecFromFile(cno.NamespaceTemplatePath, ns); err != nil { + return nil, fmt.Errorf("parsing NamespaceTemplate: %w", err) + } + } + + return &namespacePreparer{ + client: clientset, + count: cno.Count, + prefix: cno.Prefix, + spec: ns, + }, nil +} + +// prepare creates the namespaces. +func (p *namespacePreparer) prepare() error { + base := &v1.Namespace{} + if p.spec != nil { + base = p.spec + } + klog.Infof("Making %d namespaces with prefix %q and template %v", p.count, p.prefix, *base) + for i := 0; i < p.count; i++ { + n := base.DeepCopy() + n.Name = fmt.Sprintf("%s-%d", p.prefix, i) + if err := testutils.RetryWithExponentialBackOff(func() (bool, error) { + _, err := p.client.CoreV1().Namespaces().Create(context.TODO(), n, metav1.CreateOptions{}) + return err == nil || apierrors.IsAlreadyExists(err), nil + }); err != nil { + return err + } + } + return nil +} + +// cleanup deletes existing test namespaces. +func (p *namespacePreparer) cleanup() error { + var errRet error + for i := 0; i < p.count; i++ { + n := fmt.Sprintf("%s-%d", p.prefix, i) + if err := p.client.CoreV1().Namespaces().Delete(context.TODO(), n, metav1.DeleteOptions{}); err != nil { + klog.Errorf("Deleting Namespace: %v", err) + errRet = err + } + } + return errRet +}