diff --git a/pkg/scheduler/BUILD b/pkg/scheduler/BUILD index f9904c2931a..97d05f6ebf3 100644 --- a/pkg/scheduler/BUILD +++ b/pkg/scheduler/BUILD @@ -123,6 +123,7 @@ filegroup( "//pkg/scheduler/framework:all-srcs", "//pkg/scheduler/internal/cache:all-srcs", "//pkg/scheduler/internal/heap:all-srcs", + "//pkg/scheduler/internal/parallelize:all-srcs", "//pkg/scheduler/internal/queue:all-srcs", "//pkg/scheduler/listers:all-srcs", "//pkg/scheduler/metrics:all-srcs", diff --git a/pkg/scheduler/core/BUILD b/pkg/scheduler/core/BUILD index baa45505b3f..1e93a4144b3 100644 --- a/pkg/scheduler/core/BUILD +++ b/pkg/scheduler/core/BUILD @@ -13,6 +13,7 @@ go_library( "//pkg/scheduler/apis/config:go_default_library", "//pkg/scheduler/framework/v1alpha1:go_default_library", "//pkg/scheduler/internal/cache:go_default_library", + "//pkg/scheduler/internal/parallelize:go_default_library", "//pkg/scheduler/internal/queue:go_default_library", "//pkg/scheduler/listers:go_default_library", "//pkg/scheduler/metrics:go_default_library", @@ -28,7 +29,6 @@ go_library( "//staging/src/k8s.io/client-go/listers/core/v1:go_default_library", "//staging/src/k8s.io/client-go/listers/policy/v1beta1:go_default_library", "//staging/src/k8s.io/client-go/rest:go_default_library", - "//staging/src/k8s.io/client-go/util/workqueue:go_default_library", "//staging/src/k8s.io/kube-scheduler/extender/v1:go_default_library", "//vendor/k8s.io/klog:go_default_library", "//vendor/k8s.io/utils/trace:go_default_library", diff --git a/pkg/scheduler/core/generic_scheduler.go b/pkg/scheduler/core/generic_scheduler.go index 9dcd7abe6e7..141eacddec1 100644 --- a/pkg/scheduler/core/generic_scheduler.go +++ b/pkg/scheduler/core/generic_scheduler.go @@ -28,6 +28,7 @@ import ( "time" "k8s.io/klog" + "k8s.io/kubernetes/pkg/scheduler/internal/parallelize" v1 "k8s.io/api/core/v1" policy "k8s.io/api/policy/v1beta1" @@ -35,7 +36,6 @@ import ( "k8s.io/apimachinery/pkg/labels" corelisters "k8s.io/client-go/listers/core/v1" policylisters "k8s.io/client-go/listers/policy/v1beta1" - "k8s.io/client-go/util/workqueue" extenderv1 "k8s.io/kube-scheduler/extender/v1" podutil "k8s.io/kubernetes/pkg/api/v1/pod" framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" @@ -479,7 +479,7 @@ func (g *genericScheduler) findNodesThatPassFilters(ctx context.Context, prof *p // Stops searching for more nodes once the configured number of feasible nodes // are found. - workqueue.ParallelizeUntil(ctx, 16, len(allNodes), checkNode) + parallelize.Until(ctx, len(allNodes), checkNode) processedNodes := int(filteredLen) + len(statuses) g.nextStartNodeIndex = (g.nextStartNodeIndex + processedNodes) % len(allNodes) @@ -870,7 +870,7 @@ func (g *genericScheduler) selectNodesForPreemption( resultLock.Unlock() } } - workqueue.ParallelizeUntil(context.TODO(), 16, len(potentialNodes), checkNode) + parallelize.Until(ctx, len(potentialNodes), checkNode) return nodeToVictims, nil } diff --git a/pkg/scheduler/framework/plugins/interpodaffinity/BUILD b/pkg/scheduler/framework/plugins/interpodaffinity/BUILD index e9339eced84..b9c8a500af7 100644 --- a/pkg/scheduler/framework/plugins/interpodaffinity/BUILD +++ b/pkg/scheduler/framework/plugins/interpodaffinity/BUILD @@ -11,6 +11,7 @@ go_library( visibility = ["//visibility:public"], deps = [ "//pkg/scheduler/framework/v1alpha1:go_default_library", + "//pkg/scheduler/internal/parallelize:go_default_library", "//pkg/scheduler/listers:go_default_library", "//pkg/scheduler/nodeinfo:go_default_library", "//pkg/scheduler/util:go_default_library", @@ -20,7 +21,6 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/validation/field:go_default_library", - "//staging/src/k8s.io/client-go/util/workqueue:go_default_library", "//vendor/k8s.io/klog:go_default_library", "//vendor/k8s.io/utils/pointer:go_default_library", ], diff --git a/pkg/scheduler/framework/plugins/interpodaffinity/filtering.go b/pkg/scheduler/framework/plugins/interpodaffinity/filtering.go index af035260623..81a891cf4e9 100644 --- a/pkg/scheduler/framework/plugins/interpodaffinity/filtering.go +++ b/pkg/scheduler/framework/plugins/interpodaffinity/filtering.go @@ -25,9 +25,9 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/util/sets" - "k8s.io/client-go/util/workqueue" "k8s.io/klog" framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" + "k8s.io/kubernetes/pkg/scheduler/internal/parallelize" "k8s.io/kubernetes/pkg/scheduler/nodeinfo" schedutil "k8s.io/kubernetes/pkg/scheduler/util" ) @@ -240,7 +240,7 @@ func getTPMapMatchingExistingAntiAffinity(pod *v1.Pod, allNodes []*nodeinfo.Node } } } - workqueue.ParallelizeUntil(ctx, 16, len(allNodes), processNode) + parallelize.Until(ctx, len(allNodes), processNode) if err := errCh.ReceiveError(); err != nil { return nil, err @@ -304,7 +304,7 @@ func getTPMapMatchingIncomingAffinityAntiAffinity(pod *v1.Pod, allNodes []*nodei appendResult(node.Name, nodeTopologyPairsAffinityPodsMap, nodeTopologyPairsAntiAffinityPodsMap) } } - workqueue.ParallelizeUntil(context.Background(), 16, len(allNodes), processNode) + parallelize.Until(context.Background(), len(allNodes), processNode) return topologyPairsAffinityPodsMap, topologyToMatchedExistingAntiAffinityTerms, nil } diff --git a/pkg/scheduler/framework/plugins/interpodaffinity/scoring.go b/pkg/scheduler/framework/plugins/interpodaffinity/scoring.go index 7699ea72f22..d355faf13b4 100644 --- a/pkg/scheduler/framework/plugins/interpodaffinity/scoring.go +++ b/pkg/scheduler/framework/plugins/interpodaffinity/scoring.go @@ -22,9 +22,9 @@ import ( v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/util/workqueue" "k8s.io/klog" framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" + "k8s.io/kubernetes/pkg/scheduler/internal/parallelize" "k8s.io/kubernetes/pkg/scheduler/nodeinfo" schedutil "k8s.io/kubernetes/pkg/scheduler/util" ) @@ -259,7 +259,7 @@ func (pl *InterPodAffinity) PreScore( pl.Unlock() } } - workqueue.ParallelizeUntil(ctx, 16, len(allNodes), processNode) + parallelize.Until(ctx, len(allNodes), processNode) if err := errCh.ReceiveError(); err != nil { return framework.NewStatus(framework.Error, err.Error()) } diff --git a/pkg/scheduler/framework/plugins/podtopologyspread/BUILD b/pkg/scheduler/framework/plugins/podtopologyspread/BUILD index 2829a8ad5d4..78a644051bc 100644 --- a/pkg/scheduler/framework/plugins/podtopologyspread/BUILD +++ b/pkg/scheduler/framework/plugins/podtopologyspread/BUILD @@ -13,6 +13,7 @@ go_library( deps = [ "//pkg/scheduler/framework/plugins/helper:go_default_library", "//pkg/scheduler/framework/v1alpha1:go_default_library", + "//pkg/scheduler/internal/parallelize:go_default_library", "//pkg/scheduler/listers:go_default_library", "//pkg/scheduler/nodeinfo:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", @@ -25,7 +26,6 @@ go_library( "//staging/src/k8s.io/client-go/informers:go_default_library", "//staging/src/k8s.io/client-go/listers/apps/v1:go_default_library", "//staging/src/k8s.io/client-go/listers/core/v1:go_default_library", - "//staging/src/k8s.io/client-go/util/workqueue:go_default_library", "//vendor/k8s.io/klog:go_default_library", ], ) diff --git a/pkg/scheduler/framework/plugins/podtopologyspread/filtering.go b/pkg/scheduler/framework/plugins/podtopologyspread/filtering.go index 448d47f309b..16cb2ca120b 100644 --- a/pkg/scheduler/framework/plugins/podtopologyspread/filtering.go +++ b/pkg/scheduler/framework/plugins/podtopologyspread/filtering.go @@ -24,10 +24,10 @@ import ( v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/util/workqueue" "k8s.io/klog" pluginhelper "k8s.io/kubernetes/pkg/scheduler/framework/plugins/helper" framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" + "k8s.io/kubernetes/pkg/scheduler/internal/parallelize" "k8s.io/kubernetes/pkg/scheduler/nodeinfo" ) @@ -267,7 +267,7 @@ func (pl *PodTopologySpread) calPreFilterState(pod *v1.Pod) (*preFilterState, er addTopologyPairMatchNum(pair, matchTotal) } } - workqueue.ParallelizeUntil(context.Background(), 16, len(allNodes), processNode) + parallelize.Until(context.Background(), len(allNodes), processNode) // calculate min match for each topology pair for i := 0; i < len(constraints); i++ { diff --git a/pkg/scheduler/framework/plugins/podtopologyspread/scoring.go b/pkg/scheduler/framework/plugins/podtopologyspread/scoring.go index ccdc048c4dc..6686361f85a 100644 --- a/pkg/scheduler/framework/plugins/podtopologyspread/scoring.go +++ b/pkg/scheduler/framework/plugins/podtopologyspread/scoring.go @@ -25,10 +25,10 @@ import ( v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/util/sets" - "k8s.io/client-go/util/workqueue" "k8s.io/klog" pluginhelper "k8s.io/kubernetes/pkg/scheduler/framework/plugins/helper" framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" + "k8s.io/kubernetes/pkg/scheduler/internal/parallelize" ) const preScoreStateKey = "PreScore" + Name @@ -153,7 +153,7 @@ func (pl *PodTopologySpread) PreScore( atomic.AddInt64(state.TopologyPairToPodCounts[pair], matchSum) } } - workqueue.ParallelizeUntil(ctx, 16, len(allNodes), processAllNode) + parallelize.Until(ctx, len(allNodes), processAllNode) cycleState.Write(preScoreStateKey, state) return nil diff --git a/pkg/scheduler/framework/v1alpha1/BUILD b/pkg/scheduler/framework/v1alpha1/BUILD index 4a03bc53cca..24a76b74107 100644 --- a/pkg/scheduler/framework/v1alpha1/BUILD +++ b/pkg/scheduler/framework/v1alpha1/BUILD @@ -15,6 +15,7 @@ go_library( deps = [ "//pkg/controller/volume/scheduling:go_default_library", "//pkg/scheduler/apis/config:go_default_library", + "//pkg/scheduler/internal/parallelize:go_default_library", "//pkg/scheduler/listers:go_default_library", "//pkg/scheduler/metrics:go_default_library", "//pkg/scheduler/nodeinfo:go_default_library", @@ -26,7 +27,6 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//staging/src/k8s.io/client-go/informers:go_default_library", "//staging/src/k8s.io/client-go/kubernetes:go_default_library", - "//staging/src/k8s.io/client-go/util/workqueue:go_default_library", "//staging/src/k8s.io/component-base/metrics:go_default_library", "//vendor/k8s.io/klog:go_default_library", "//vendor/sigs.k8s.io/yaml:go_default_library", diff --git a/pkg/scheduler/framework/v1alpha1/framework.go b/pkg/scheduler/framework/v1alpha1/framework.go index 4924a51db2a..aae44cdf405 100644 --- a/pkg/scheduler/framework/v1alpha1/framework.go +++ b/pkg/scheduler/framework/v1alpha1/framework.go @@ -28,10 +28,10 @@ import ( "k8s.io/apimachinery/pkg/util/sets" "k8s.io/client-go/informers" clientset "k8s.io/client-go/kubernetes" - "k8s.io/client-go/util/workqueue" "k8s.io/klog" "k8s.io/kubernetes/pkg/controller/volume/scheduling" "k8s.io/kubernetes/pkg/scheduler/apis/config" + "k8s.io/kubernetes/pkg/scheduler/internal/parallelize" schedulerlisters "k8s.io/kubernetes/pkg/scheduler/listers" "k8s.io/kubernetes/pkg/scheduler/metrics" schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo" @@ -513,7 +513,7 @@ func (f *framework) RunScorePlugins(ctx context.Context, state *CycleState, pod errCh := schedutil.NewErrorChannel() // Run Score method for each node in parallel. - workqueue.ParallelizeUntil(ctx, 16, len(nodes), func(index int) { + parallelize.Until(ctx, len(nodes), func(index int) { for _, pl := range f.scorePlugins { nodeName := nodes[index].Name s, status := f.runScorePlugin(ctx, pl, state, pod, nodeName) @@ -534,7 +534,7 @@ func (f *framework) RunScorePlugins(ctx context.Context, state *CycleState, pod } // Run NormalizeScore method for each ScorePlugin in parallel. - workqueue.ParallelizeUntil(ctx, 16, len(f.scorePlugins), func(index int) { + parallelize.Until(ctx, len(f.scorePlugins), func(index int) { pl := f.scorePlugins[index] nodeScoreList := pluginToNodeScores[pl.Name()] if pl.ScoreExtensions() == nil { @@ -554,7 +554,7 @@ func (f *framework) RunScorePlugins(ctx context.Context, state *CycleState, pod } // Apply score defaultWeights for each ScorePlugin in parallel. - workqueue.ParallelizeUntil(ctx, 16, len(f.scorePlugins), func(index int) { + parallelize.Until(ctx, len(f.scorePlugins), func(index int) { pl := f.scorePlugins[index] // Score plugins' weight has been checked when they are initialized. weight := f.pluginNameToWeightMap[pl.Name()] diff --git a/pkg/scheduler/internal/parallelize/BUILD b/pkg/scheduler/internal/parallelize/BUILD new file mode 100644 index 00000000000..dcc3d6473fe --- /dev/null +++ b/pkg/scheduler/internal/parallelize/BUILD @@ -0,0 +1,23 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["parallelism.go"], + importpath = "k8s.io/kubernetes/pkg/scheduler/internal/parallelize", + visibility = ["//pkg/scheduler:__subpackages__"], + deps = ["//staging/src/k8s.io/client-go/util/workqueue:go_default_library"], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], + visibility = ["//visibility:public"], +) diff --git a/pkg/scheduler/internal/parallelize/parallelism.go b/pkg/scheduler/internal/parallelize/parallelism.go new file mode 100644 index 00000000000..ab337deeca1 --- /dev/null +++ b/pkg/scheduler/internal/parallelize/parallelism.go @@ -0,0 +1,43 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package parallelize + +import ( + "context" + "math" + + "k8s.io/client-go/util/workqueue" +) + +const parallelism = 16 + +// chunkSizeFor returns a chunk size for the given number of items to use for +// parallel work. The size aims to produce good CPU utilization. +func chunkSizeFor(n int) workqueue.Options { + s := int(math.Sqrt(float64(n))) + if r := n/parallelism + 1; s > r { + s = r + } else if s < 1 { + s = 1 + } + return workqueue.WithChunkSize(s) +} + +// Until is a wrapper around workqueue.ParallelizeUntil to use in scheduling algorithms. +func Until(ctx context.Context, pieces int, doWorkPiece workqueue.DoWorkPieceFunc) { + workqueue.ParallelizeUntil(ctx, parallelism, pieces, doWorkPiece, chunkSizeFor(pieces)) +} diff --git a/staging/src/k8s.io/client-go/go.mod b/staging/src/k8s.io/client-go/go.mod index 5f2761c0723..0b65250e2b0 100644 --- a/staging/src/k8s.io/client-go/go.mod +++ b/staging/src/k8s.io/client-go/go.mod @@ -14,6 +14,7 @@ require ( github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903 github.com/golang/protobuf v1.3.2 github.com/google/btree v1.0.0 // indirect + github.com/google/go-cmp v0.3.1 github.com/google/gofuzz v1.1.0 github.com/google/uuid v1.1.1 github.com/googleapis/gnostic v0.1.0 diff --git a/staging/src/k8s.io/client-go/util/workqueue/BUILD b/staging/src/k8s.io/client-go/util/workqueue/BUILD index 538ad31cde9..302d16a930c 100644 --- a/staging/src/k8s.io/client-go/util/workqueue/BUILD +++ b/staging/src/k8s.io/client-go/util/workqueue/BUILD @@ -13,6 +13,7 @@ go_test( "delaying_queue_test.go", "main_test.go", "metrics_test.go", + "parallelizer_test.go", "queue_test.go", "rate_limiting_queue_test.go", ], @@ -20,6 +21,7 @@ go_test( deps = [ "//staging/src/k8s.io/apimachinery/pkg/util/clock:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library", + "//vendor/github.com/google/go-cmp/cmp:go_default_library", ], ) diff --git a/staging/src/k8s.io/client-go/util/workqueue/parallelizer.go b/staging/src/k8s.io/client-go/util/workqueue/parallelizer.go index 5928a0c5b7d..366bf20a312 100644 --- a/staging/src/k8s.io/client-go/util/workqueue/parallelizer.go +++ b/staging/src/k8s.io/client-go/util/workqueue/parallelizer.go @@ -25,39 +25,77 @@ import ( type DoWorkPieceFunc func(piece int) +type options struct { + chunkSize int +} + +type Options func(*options) + +// WithChunkSize allows to set chunks of work items to the workers, rather than +// processing one by one. +// It is recommended to use this option if the number of pieces significantly +// higher than the number of workers and the work done for each item is small. +func WithChunkSize(c int) func(*options) { + return func(o *options) { + o.chunkSize = c + } +} + // ParallelizeUntil is a framework that allows for parallelizing N // independent pieces of work until done or the context is canceled. -func ParallelizeUntil(ctx context.Context, workers, pieces int, doWorkPiece DoWorkPieceFunc) { - var stop <-chan struct{} - if ctx != nil { - stop = ctx.Done() +func ParallelizeUntil(ctx context.Context, workers, pieces int, doWorkPiece DoWorkPieceFunc, opts ...Options) { + if pieces == 0 { + return + } + o := options{} + for _, opt := range opts { + opt(&o) + } + chunkSize := o.chunkSize + if chunkSize < 1 { + chunkSize = 1 } - toProcess := make(chan int, pieces) - for i := 0; i < pieces; i++ { + chunks := ceilDiv(pieces, chunkSize) + toProcess := make(chan int, chunks) + for i := 0; i < chunks; i++ { toProcess <- i } close(toProcess) - if pieces < workers { - workers = pieces + var stop <-chan struct{} + if ctx != nil { + stop = ctx.Done() + } + if chunks < workers { + workers = chunks } - wg := sync.WaitGroup{} wg.Add(workers) for i := 0; i < workers; i++ { go func() { defer utilruntime.HandleCrash() defer wg.Done() - for piece := range toProcess { - select { - case <-stop: - return - default: - doWorkPiece(piece) + for chunk := range toProcess { + start := chunk * chunkSize + end := start + chunkSize + if end > pieces { + end = pieces + } + for p := start; p < end; p++ { + select { + case <-stop: + return + default: + doWorkPiece(p) + } } } }() } wg.Wait() } + +func ceilDiv(a, b int) int { + return (a + b - 1) / b +} diff --git a/staging/src/k8s.io/client-go/util/workqueue/parallelizer_test.go b/staging/src/k8s.io/client-go/util/workqueue/parallelizer_test.go new file mode 100644 index 00000000000..24438346b18 --- /dev/null +++ b/staging/src/k8s.io/client-go/util/workqueue/parallelizer_test.go @@ -0,0 +1,111 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package workqueue + +import ( + "context" + "fmt" + "sync/atomic" + "testing" + + "github.com/google/go-cmp/cmp" +) + +type testCase struct { + pieces int + workers int + chunkSize int +} + +func (c testCase) String() string { + return fmt.Sprintf("pieces:%d,workers:%d,chunkSize:%d", c.pieces, c.workers, c.chunkSize) +} + +var cases = []testCase{ + { + pieces: 1000, + workers: 10, + chunkSize: 1, + }, + { + pieces: 1000, + workers: 10, + chunkSize: 10, + }, + { + pieces: 1000, + workers: 10, + chunkSize: 100, + }, + { + pieces: 999, + workers: 10, + chunkSize: 13, + }, +} + +func TestParallelizeUntil(t *testing.T) { + for _, tc := range cases { + t.Run(tc.String(), func(t *testing.T) { + seen := make([]int32, tc.pieces) + ctx := context.Background() + ParallelizeUntil(ctx, tc.workers, tc.pieces, func(p int) { + atomic.AddInt32(&seen[p], 1) + }, WithChunkSize(tc.chunkSize)) + + wantSeen := make([]int32, tc.pieces) + for i := 0; i < tc.pieces; i++ { + wantSeen[i] = 1 + } + if diff := cmp.Diff(wantSeen, seen); diff != "" { + t.Errorf("bad number of visits (-want,+got):\n%s", diff) + } + }) + } +} + +func BenchmarkParallelizeUntil(b *testing.B) { + for _, tc := range cases { + b.Run(tc.String(), func(b *testing.B) { + ctx := context.Background() + isPrime := make([]bool, tc.pieces) + b.ResetTimer() + for c := 0; c < b.N; c++ { + ParallelizeUntil(ctx, tc.workers, tc.pieces, func(p int) { + isPrime[p] = calPrime(p) + }, WithChunkSize(tc.chunkSize)) + } + b.StopTimer() + want := []bool{false, false, true, true, false, true, false, true, false, false, false, true} + if diff := cmp.Diff(want, isPrime[:len(want)]); diff != "" { + b.Errorf("miscalculated isPrime (-want,+got):\n%s", diff) + } + }) + } +} + +func calPrime(p int) bool { + if p <= 1 { + return false + } + for i := 2; i*i <= p; i++ { + if p%i == 0 { + return false + } + } + return true +} diff --git a/test/e2e/framework/.import-restrictions b/test/e2e/framework/.import-restrictions index de7fe003641..6236be3b033 100644 --- a/test/e2e/framework/.import-restrictions +++ b/test/e2e/framework/.import-restrictions @@ -187,6 +187,7 @@ "k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodeports", "k8s.io/kubernetes/pkg/scheduler/framework/plugins/noderesources", "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1", + "k8s.io/kubernetes/pkg/scheduler/internal/parallelize", "k8s.io/kubernetes/pkg/scheduler/listers", "k8s.io/kubernetes/pkg/scheduler/metrics", "k8s.io/kubernetes/pkg/scheduler/nodeinfo", diff --git a/test/integration/scheduler_perf/scheduler_bench_test.go b/test/integration/scheduler_perf/scheduler_bench_test.go index 8a12044e273..6f0e8c21214 100644 --- a/test/integration/scheduler_perf/scheduler_bench_test.go +++ b/test/integration/scheduler_perf/scheduler_bench_test.go @@ -47,6 +47,7 @@ var ( defaultTests = []struct{ nodes, existingPods, minPods int }{ {nodes: 500, existingPods: 500, minPods: 1000}, + {nodes: 600, existingPods: 10000, minPods: 1000}, {nodes: 5000, existingPods: 5000, minPods: 1000}, } testNamespace = "sched-test"