diff --git a/test/integration/scheduler_perf/README.md b/test/integration/scheduler_perf/README.md index 39f73759bf8..536ce951354 100644 --- a/test/integration/scheduler_perf/README.md +++ b/test/integration/scheduler_perf/README.md @@ -68,9 +68,9 @@ To produce a cpu profile: make test-integration WHAT=./test/integration/scheduler_perf KUBE_TIMEOUT="-timeout=3600s" KUBE_TEST_VMODULE="''" KUBE_TEST_ARGS="-alsologtostderr=false -logtostderr=false -run=^$$ -benchtime=1ns -bench=BenchmarkPerfScheduling -cpuprofile ~/cpu-profile.out" ``` -### How to configure bechmark tests +### How to configure benchmark tests -Configuration file located under config/performance-config.yaml contains a list of templates. +Configuration file located under `config/performance-config.yaml` contains a list of templates. Each template allows to set: - node manifest - manifests for initial and testing pod @@ -78,19 +78,13 @@ Each template allows to set: - templates for PVs and PVCs - feature gates -See `simpleTestCases` data type implementation for available configuration for each template. +See `op` data type implementation in [scheduler_perf_test.go](scheduler_perf_test.go) +for available operations to build `WorkloadTemplate`. Initial pods create a state of a cluster before the scheduler performance measurement can begin. Testing pods are then subject to performance measurement. -The configuration file under config/performance-config.yaml contains a default list of templates to cover +The configuration file under `config/performance-config.yaml` contains a default list of templates to cover various scenarios. In case you want to add your own, you can extend the list with new templates. -It's also possible to extend `simpleTestCases` data type, respectively its underlying data types +It's also possible to extend `op` data type, respectively its underlying data types to extend configuration of possible test cases. - - -[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/test/component/scheduler/perf/README.md?pixel)]() - - - -[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/test/integration/scheduler_perf/README.md?pixel)]() diff --git a/test/integration/scheduler_perf/scheduler_perf_test.go b/test/integration/scheduler_perf/scheduler_perf_test.go index 0a0511151a7..fd50bf1eb41 100644 --- a/test/integration/scheduler_perf/scheduler_perf_test.go +++ b/test/integration/scheduler_perf/scheduler_perf_test.go @@ -173,8 +173,8 @@ func (cno *createNodesOp) isValid(allowParameterization bool) error { if cno.Opcode != createNodesOpcode { return fmt.Errorf("invalid opcode") } - ok := (cno.Count > 0 || - (cno.CountParam != "" && allowParameterization && isValidParameterizable(cno.CountParam))) + ok := cno.Count > 0 || + (cno.CountParam != "" && allowParameterization && isValidParameterizable(cno.CountParam)) if !ok { return fmt.Errorf("invalid Count=%d / CountParam=%q", cno.Count, cno.CountParam) } @@ -226,8 +226,8 @@ func (cpo *createPodsOp) isValid(allowParameterization bool) error { if cpo.Opcode != createPodsOpcode { return fmt.Errorf("invalid opcode") } - ok := (cpo.Count > 0 || - (cpo.CountParam != "" && allowParameterization && isValidParameterizable(cpo.CountParam))) + ok := cpo.Count > 0 || + (cpo.CountParam != "" && allowParameterization && isValidParameterizable(cpo.CountParam)) if !ok { return fmt.Errorf("invalid Count=%d / CountParam=%q", cpo.Count, cpo.CountParam) }