From 6a14203658c88ae196e154166752d6748fe06455 Mon Sep 17 00:00:00 2001 From: Aldo Culquicondor Date: Mon, 13 Jan 2020 14:38:25 -0500 Subject: [PATCH 1/2] Rename cache's Snapshot to Dump Signed-off-by: Aldo Culquicondor --- pkg/scheduler/internal/cache/cache.go | 4 ++-- pkg/scheduler/internal/cache/cache_test.go | 2 +- pkg/scheduler/internal/cache/debugger/comparer.go | 6 +++--- pkg/scheduler/internal/cache/debugger/dumper.go | 4 ++-- pkg/scheduler/internal/cache/fake/fake_cache.go | 4 ++-- pkg/scheduler/internal/cache/interface.go | 8 ++++---- 6 files changed, 14 insertions(+), 14 deletions(-) diff --git a/pkg/scheduler/internal/cache/cache.go b/pkg/scheduler/internal/cache/cache.go index 1d3fbb420ad..75ada0dfd29 100644 --- a/pkg/scheduler/internal/cache/cache.go +++ b/pkg/scheduler/internal/cache/cache.go @@ -177,7 +177,7 @@ func (cache *schedulerCache) removeNodeInfoFromList(name string) { // debugging purposes only and shouldn't be confused with UpdateNodeInfoSnapshot // function. // This method is expensive, and should be only used in non-critical path. -func (cache *schedulerCache) Snapshot() *Snapshot { +func (cache *schedulerCache) Dump() *Dump { cache.mu.RLock() defer cache.mu.RUnlock() @@ -191,7 +191,7 @@ func (cache *schedulerCache) Snapshot() *Snapshot { assumedPods[k] = v } - return &Snapshot{ + return &Dump{ Nodes: nodes, AssumedPods: assumedPods, } diff --git a/pkg/scheduler/internal/cache/cache_test.go b/pkg/scheduler/internal/cache/cache_test.go index c6630b4ce3a..d3abfe53033 100644 --- a/pkg/scheduler/internal/cache/cache_test.go +++ b/pkg/scheduler/internal/cache/cache_test.go @@ -398,7 +398,7 @@ func TestSnapshot(t *testing.T) { } } - snapshot := cache.Snapshot() + snapshot := cache.Dump() if len(snapshot.Nodes) != len(cache.nodes) { t.Errorf("Unequal number of nodes in the cache and its snapshot. expected: %v, got: %v", len(cache.nodes), len(snapshot.Nodes)) } diff --git a/pkg/scheduler/internal/cache/debugger/comparer.go b/pkg/scheduler/internal/cache/debugger/comparer.go index fce703d87fe..38c7cd7311b 100644 --- a/pkg/scheduler/internal/cache/debugger/comparer.go +++ b/pkg/scheduler/internal/cache/debugger/comparer.go @@ -52,15 +52,15 @@ func (c *CacheComparer) Compare() error { return err } - snapshot := c.Cache.Snapshot() + dump := c.Cache.Dump() pendingPods := c.PodQueue.PendingPods() - if missed, redundant := c.CompareNodes(nodes, snapshot.Nodes); len(missed)+len(redundant) != 0 { + if missed, redundant := c.CompareNodes(nodes, dump.Nodes); len(missed)+len(redundant) != 0 { klog.Warningf("cache mismatch: missed nodes: %s; redundant nodes: %s", missed, redundant) } - if missed, redundant := c.ComparePods(pods, pendingPods, snapshot.Nodes); len(missed)+len(redundant) != 0 { + if missed, redundant := c.ComparePods(pods, pendingPods, dump.Nodes); len(missed)+len(redundant) != 0 { klog.Warningf("cache mismatch: missed pods: %s; redundant pods: %s", missed, redundant) } diff --git a/pkg/scheduler/internal/cache/debugger/dumper.go b/pkg/scheduler/internal/cache/debugger/dumper.go index 6291b1f0629..601e13c9011 100644 --- a/pkg/scheduler/internal/cache/debugger/dumper.go +++ b/pkg/scheduler/internal/cache/debugger/dumper.go @@ -43,9 +43,9 @@ func (d *CacheDumper) DumpAll() { // dumpNodes writes NodeInfo to the scheduler logs. func (d *CacheDumper) dumpNodes() { - snapshot := d.cache.Snapshot() + dump := d.cache.Dump() klog.Info("Dump of cached NodeInfo") - for _, nodeInfo := range snapshot.Nodes { + for _, nodeInfo := range dump.Nodes { klog.Info(d.printNodeInfo(nodeInfo)) } } diff --git a/pkg/scheduler/internal/cache/fake/fake_cache.go b/pkg/scheduler/internal/cache/fake/fake_cache.go index e13c2f06ae6..ad14bd6bf79 100644 --- a/pkg/scheduler/internal/cache/fake/fake_cache.go +++ b/pkg/scheduler/internal/cache/fake/fake_cache.go @@ -89,8 +89,8 @@ func (c *Cache) FilteredList(filter schedulerlisters.PodFilter, selector labels. } // Snapshot is a fake method for testing -func (c *Cache) Snapshot() *internalcache.Snapshot { - return &internalcache.Snapshot{} +func (c *Cache) Dump() *internalcache.Dump { + return &internalcache.Dump{} } // GetNodeInfo is a fake method for testing. diff --git a/pkg/scheduler/internal/cache/interface.go b/pkg/scheduler/internal/cache/interface.go index a4c47cc0cc3..ac8f935f9a9 100644 --- a/pkg/scheduler/internal/cache/interface.go +++ b/pkg/scheduler/internal/cache/interface.go @@ -102,12 +102,12 @@ type Cache interface { // on this node. UpdateNodeInfoSnapshot(nodeSnapshot *nodeinfosnapshot.Snapshot) error - // Snapshot takes a snapshot on current cache - Snapshot() *Snapshot + // Dump produces a dump of the current cache. + Dump() *Dump } -// Snapshot is a snapshot of cache state -type Snapshot struct { +// Dump is a dump of the cache state. +type Dump struct { AssumedPods map[string]bool Nodes map[string]*schedulernodeinfo.NodeInfo } From f53d7e55dfff45e15327fc3608c356930b008996 Mon Sep 17 00:00:00 2001 From: Aldo Culquicondor Date: Mon, 13 Jan 2020 15:49:57 -0500 Subject: [PATCH 2/2] Move Snapshot from nodeinfo/snapshot to internal/cache Signed-off-by: Aldo Culquicondor --- pkg/scheduler/BUILD | 2 - pkg/scheduler/core/BUILD | 2 - pkg/scheduler/core/generic_scheduler.go | 7 +- pkg/scheduler/core/generic_scheduler_test.go | 19 +++-- pkg/scheduler/eventhandlers_test.go | 1 - pkg/scheduler/factory.go | 3 +- pkg/scheduler/factory_test.go | 3 +- .../plugins/defaultpodtopologyspread/BUILD | 2 +- .../default_pod_topology_spread_perf_test.go | 4 +- .../default_pod_topology_spread_test.go | 6 +- .../framework/plugins/imagelocality/BUILD | 2 +- .../imagelocality/image_locality_test.go | 4 +- .../framework/plugins/interpodaffinity/BUILD | 2 +- .../interpodaffinity/filtering_test.go | 16 ++-- .../plugins/interpodaffinity/scoring_test.go | 6 +- .../framework/plugins/nodeaffinity/BUILD | 2 +- .../nodeaffinity/node_affinity_test.go | 4 +- .../framework/plugins/nodelabel/BUILD | 2 +- .../plugins/nodelabel/node_label_test.go | 6 +- .../plugins/nodepreferavoidpods/BUILD | 2 +- .../node_prefer_avoid_pods_test.go | 4 +- .../framework/plugins/noderesources/BUILD | 2 +- .../noderesources/balanced_allocation_test.go | 4 +- .../noderesources/least_allocated_test.go | 4 +- .../noderesources/most_allocated_test.go | 4 +- .../requested_to_capacity_ratio_test.go | 8 +- .../noderesources/resource_limits_test.go | 4 +- .../framework/plugins/podtopologyspread/BUILD | 2 +- .../podtopologyspread/filtering_test.go | 14 ++-- .../plugins/podtopologyspread/scoring_test.go | 8 +- .../framework/plugins/serviceaffinity/BUILD | 2 +- .../serviceaffinity/service_affinity_test.go | 12 +-- .../framework/plugins/tainttoleration/BUILD | 2 +- .../tainttoleration/taint_toleration_test.go | 4 +- pkg/scheduler/internal/cache/BUILD | 5 +- pkg/scheduler/internal/cache/cache.go | 47 ++++++------ pkg/scheduler/internal/cache/cache_test.go | 48 ++++++------ pkg/scheduler/internal/cache/fake/BUILD | 1 - .../internal/cache/fake/fake_cache.go | 7 +- pkg/scheduler/internal/cache/interface.go | 5 +- .../snapshot => internal/cache}/snapshot.go | 74 +++++++++---------- .../cache}/snapshot_test.go | 2 +- pkg/scheduler/internal/queue/BUILD | 2 +- .../internal/queue/scheduling_queue_test.go | 4 +- pkg/scheduler/nodeinfo/BUILD | 5 +- pkg/scheduler/nodeinfo/snapshot/BUILD | 41 ---------- pkg/scheduler/scheduler.go | 3 +- pkg/scheduler/scheduler_test.go | 5 +- test/integration/scheduler/util.go | 4 +- 49 files changed, 179 insertions(+), 243 deletions(-) rename pkg/scheduler/{nodeinfo/snapshot => internal/cache}/snapshot.go (69%) rename pkg/scheduler/{nodeinfo/snapshot => internal/cache}/snapshot_test.go (99%) delete mode 100644 pkg/scheduler/nodeinfo/snapshot/BUILD diff --git a/pkg/scheduler/BUILD b/pkg/scheduler/BUILD index 7ec0a0c0fea..f307cb86699 100644 --- a/pkg/scheduler/BUILD +++ b/pkg/scheduler/BUILD @@ -27,7 +27,6 @@ go_library( "//pkg/scheduler/internal/cache/debugger:go_default_library", "//pkg/scheduler/internal/queue:go_default_library", "//pkg/scheduler/metrics:go_default_library", - "//pkg/scheduler/nodeinfo/snapshot:go_default_library", "//pkg/scheduler/volumebinder:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/api/storage/v1:go_default_library", @@ -81,7 +80,6 @@ go_test( "//pkg/scheduler/internal/queue:go_default_library", "//pkg/scheduler/listers:go_default_library", "//pkg/scheduler/nodeinfo:go_default_library", - "//pkg/scheduler/nodeinfo/snapshot:go_default_library", "//pkg/scheduler/testing:go_default_library", "//pkg/scheduler/volumebinder:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", diff --git a/pkg/scheduler/core/BUILD b/pkg/scheduler/core/BUILD index 4dd2633e627..54c1bb834ef 100644 --- a/pkg/scheduler/core/BUILD +++ b/pkg/scheduler/core/BUILD @@ -19,7 +19,6 @@ go_library( "//pkg/scheduler/listers:go_default_library", "//pkg/scheduler/metrics:go_default_library", "//pkg/scheduler/nodeinfo:go_default_library", - "//pkg/scheduler/nodeinfo/snapshot:go_default_library", "//pkg/scheduler/util:go_default_library", "//pkg/scheduler/volumebinder:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", @@ -68,7 +67,6 @@ go_test( "//pkg/scheduler/listers:go_default_library", "//pkg/scheduler/listers/fake:go_default_library", "//pkg/scheduler/nodeinfo:go_default_library", - "//pkg/scheduler/nodeinfo/snapshot:go_default_library", "//pkg/scheduler/testing:go_default_library", "//pkg/scheduler/util:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", diff --git a/pkg/scheduler/core/generic_scheduler.go b/pkg/scheduler/core/generic_scheduler.go index 7490b71562b..339aa9ecdcf 100644 --- a/pkg/scheduler/core/generic_scheduler.go +++ b/pkg/scheduler/core/generic_scheduler.go @@ -45,7 +45,6 @@ import ( "k8s.io/kubernetes/pkg/scheduler/listers" "k8s.io/kubernetes/pkg/scheduler/metrics" schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo" - nodeinfosnapshot "k8s.io/kubernetes/pkg/scheduler/nodeinfo/snapshot" "k8s.io/kubernetes/pkg/scheduler/util" "k8s.io/kubernetes/pkg/scheduler/volumebinder" utiltrace "k8s.io/utils/trace" @@ -138,7 +137,7 @@ type genericScheduler struct { schedulingQueue internalqueue.SchedulingQueue framework framework.Framework extenders []algorithm.SchedulerExtender - nodeInfoSnapshot *nodeinfosnapshot.Snapshot + nodeInfoSnapshot *internalcache.Snapshot volumeBinder *volumebinder.VolumeBinder pvcLister corelisters.PersistentVolumeClaimLister pdbLister policylisters.PodDisruptionBudgetLister @@ -152,7 +151,7 @@ type genericScheduler struct { // functions. func (g *genericScheduler) Snapshot() error { // Used for all fit and priority funcs. - return g.cache.UpdateNodeInfoSnapshot(g.nodeInfoSnapshot) + return g.cache.UpdateSnapshot(g.nodeInfoSnapshot) } // Framework returns the framework instance. @@ -1097,7 +1096,7 @@ func podPassesBasicChecks(pod *v1.Pod, pvcLister corelisters.PersistentVolumeCla func NewGenericScheduler( cache internalcache.Cache, podQueue internalqueue.SchedulingQueue, - nodeInfoSnapshot *nodeinfosnapshot.Snapshot, + nodeInfoSnapshot *internalcache.Snapshot, framework framework.Framework, extenders []algorithm.SchedulerExtender, volumeBinder *volumebinder.VolumeBinder, diff --git a/pkg/scheduler/core/generic_scheduler_test.go b/pkg/scheduler/core/generic_scheduler_test.go index 5e8f8cd8ab1..974bec21547 100644 --- a/pkg/scheduler/core/generic_scheduler_test.go +++ b/pkg/scheduler/core/generic_scheduler_test.go @@ -58,7 +58,6 @@ import ( fakelisters "k8s.io/kubernetes/pkg/scheduler/listers/fake" "k8s.io/kubernetes/pkg/scheduler/nodeinfo" schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo" - nodeinfosnapshot "k8s.io/kubernetes/pkg/scheduler/nodeinfo/snapshot" st "k8s.io/kubernetes/pkg/scheduler/testing" schedutil "k8s.io/kubernetes/pkg/scheduler/util" ) @@ -293,7 +292,7 @@ func (pl *falseMapPlugin) ScoreExtensions() framework.ScoreExtensions { return nil } -var emptySnapshot = nodeinfosnapshot.NewEmptySnapshot() +var emptySnapshot = internalcache.NewEmptySnapshot() func makeNodeList(nodeNames []string) []*v1.Node { result := make([]*v1.Node, 0, len(nodeNames)) @@ -796,7 +795,7 @@ func TestGenericScheduler(t *testing.T) { for _, f := range test.registerPlugins { f(®istry, plugins, pluginConfigs) } - snapshot := nodeinfosnapshot.NewSnapshot(nodeinfosnapshot.CreateNodeInfoMap(test.pods, nodes)) + snapshot := internalcache.NewSnapshot(test.pods, nodes) fwk, _ := framework.NewFramework(registry, plugins, pluginConfigs, framework.WithSnapshotSharedLister(snapshot)) var pvcs []v1.PersistentVolumeClaim @@ -855,7 +854,7 @@ func makeScheduler(nodes []*v1.Node, fns ...st.RegisterPluginFunc) *genericSched fwk, nil, nil, nil, nil, false, schedulerapi.DefaultPercentageOfNodesToScore, false) - cache.UpdateNodeInfoSnapshot(s.(*genericScheduler).nodeInfoSnapshot) + cache.UpdateSnapshot(s.(*genericScheduler).nodeInfoSnapshot) return s.(*genericScheduler) } @@ -984,7 +983,7 @@ func TestFindFitPredicateCallCounts(t *testing.T) { fwk, nil, nil, nil, nil, false, schedulerapi.DefaultPercentageOfNodesToScore, false).(*genericScheduler) - cache.UpdateNodeInfoSnapshot(scheduler.nodeInfoSnapshot) + cache.UpdateSnapshot(scheduler.nodeInfoSnapshot) queue.UpdateNominatedPodForNode(&v1.Pod{ObjectMeta: metav1.ObjectMeta{UID: types.UID("nominated")}, Spec: v1.PodSpec{Priority: &midPriority}}, "1") _, _, err := scheduler.findNodesThatFitPod(context.Background(), framework.NewCycleState(), test.pod) @@ -1115,7 +1114,7 @@ func TestZeroRequest(t *testing.T) { client := clientsetfake.NewSimpleClientset() informerFactory := informers.NewSharedInformerFactory(client, 0) - snapshot := nodeinfosnapshot.NewSnapshot(nodeinfosnapshot.CreateNodeInfoMap(test.pods, test.nodes)) + snapshot := internalcache.NewSnapshot(test.pods, test.nodes) registry := framework.Registry{} // TODO: instantiate the plugins dynamically. @@ -1604,7 +1603,7 @@ func TestSelectNodesForPreemption(t *testing.T) { f(®istry, plugins, pluginConfigs) } // Use a real snapshot since it's needed in some Filter Plugin (e.g., PodAffinity) - snapshot := nodeinfosnapshot.NewSnapshot(nodeinfosnapshot.CreateNodeInfoMap(test.pods, nodes)) + snapshot := internalcache.NewSnapshot(test.pods, nodes) fwk, _ := framework.NewFramework(registry, plugins, pluginConfigs, framework.WithSnapshotSharedLister(snapshot)) scheduler := NewGenericScheduler( @@ -1880,7 +1879,7 @@ func TestPickOneNodeForPreemption(t *testing.T) { for _, n := range test.nodes { nodes = append(nodes, makeNode(n, schedutil.DefaultMilliCPURequest*5, schedutil.DefaultMemoryRequest*5)) } - snapshot := nodeinfosnapshot.NewSnapshot(nodeinfosnapshot.CreateNodeInfoMap(test.pods, nodes)) + snapshot := internalcache.NewSnapshot(test.pods, nodes) registry := framework.Registry{} // TODO: instantiate the plugins dynamically. plugins := &schedulerapi.Plugins{ @@ -2393,7 +2392,7 @@ func TestPreempt(t *testing.T) { for _, f := range test.registerPlugins { f(®istry, plugins, pluginConfigs) } - snapshot := nodeinfosnapshot.NewSnapshot(nodeinfosnapshot.CreateNodeInfoMap(test.pods, nodes)) + snapshot := internalcache.NewSnapshot(test.pods, nodes) fwk, _ := framework.NewFramework(registry, plugins, pluginConfigs, framework.WithSnapshotSharedLister(snapshot)) scheduler := NewGenericScheduler( @@ -2555,7 +2554,7 @@ func TestFairEvaluationForNodes(t *testing.T) { } } -func nodesToNodeInfos(nodes []*v1.Node, snapshot *nodeinfosnapshot.Snapshot) ([]*schedulernodeinfo.NodeInfo, error) { +func nodesToNodeInfos(nodes []*v1.Node, snapshot *internalcache.Snapshot) ([]*schedulernodeinfo.NodeInfo, error) { var nodeInfos []*schedulernodeinfo.NodeInfo for _, n := range nodes { nodeInfo, err := snapshot.NodeInfos().Get(n.Name) diff --git a/pkg/scheduler/eventhandlers_test.go b/pkg/scheduler/eventhandlers_test.go index 0b47cc92dd3..1d267ba8ffc 100644 --- a/pkg/scheduler/eventhandlers_test.go +++ b/pkg/scheduler/eventhandlers_test.go @@ -23,7 +23,6 @@ import ( "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - fakecache "k8s.io/kubernetes/pkg/scheduler/internal/cache/fake" ) diff --git a/pkg/scheduler/factory.go b/pkg/scheduler/factory.go index a84f8aac987..808ddc3f496 100644 --- a/pkg/scheduler/factory.go +++ b/pkg/scheduler/factory.go @@ -50,7 +50,6 @@ import ( internalcache "k8s.io/kubernetes/pkg/scheduler/internal/cache" cachedebugger "k8s.io/kubernetes/pkg/scheduler/internal/cache/debugger" internalqueue "k8s.io/kubernetes/pkg/scheduler/internal/queue" - nodeinfosnapshot "k8s.io/kubernetes/pkg/scheduler/nodeinfo/snapshot" "k8s.io/kubernetes/pkg/scheduler/volumebinder" ) @@ -107,7 +106,7 @@ type Configurator struct { registry framework.Registry plugins *schedulerapi.Plugins pluginConfig []schedulerapi.PluginConfig - nodeInfoSnapshot *nodeinfosnapshot.Snapshot + nodeInfoSnapshot *internalcache.Snapshot } // create a scheduler from a set of registered plugins. diff --git a/pkg/scheduler/factory_test.go b/pkg/scheduler/factory_test.go index 060d4901d1d..dc2a1c6da23 100644 --- a/pkg/scheduler/factory_test.go +++ b/pkg/scheduler/factory_test.go @@ -51,7 +51,6 @@ import ( internalqueue "k8s.io/kubernetes/pkg/scheduler/internal/queue" "k8s.io/kubernetes/pkg/scheduler/listers" schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo" - nodeinfosnapshot "k8s.io/kubernetes/pkg/scheduler/nodeinfo/snapshot" ) const ( @@ -432,7 +431,7 @@ func newConfigFactoryWithFrameworkRegistry( client clientset.Interface, hardPodAffinitySymmetricWeight int32, stopCh <-chan struct{}, registry framework.Registry) *Configurator { informerFactory := informers.NewSharedInformerFactory(client, 0) - snapshot := nodeinfosnapshot.NewEmptySnapshot() + snapshot := internalcache.NewEmptySnapshot() return &Configurator{ client: client, informerFactory: informerFactory, diff --git a/pkg/scheduler/framework/plugins/defaultpodtopologyspread/BUILD b/pkg/scheduler/framework/plugins/defaultpodtopologyspread/BUILD index 93bd626ee57..b63d0938c67 100644 --- a/pkg/scheduler/framework/plugins/defaultpodtopologyspread/BUILD +++ b/pkg/scheduler/framework/plugins/defaultpodtopologyspread/BUILD @@ -29,7 +29,7 @@ go_test( embed = [":go_default_library"], deps = [ "//pkg/scheduler/framework/v1alpha1:go_default_library", - "//pkg/scheduler/nodeinfo/snapshot:go_default_library", + "//pkg/scheduler/internal/cache:go_default_library", "//pkg/scheduler/testing:go_default_library", "//staging/src/k8s.io/api/apps/v1:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", diff --git a/pkg/scheduler/framework/plugins/defaultpodtopologyspread/default_pod_topology_spread_perf_test.go b/pkg/scheduler/framework/plugins/defaultpodtopologyspread/default_pod_topology_spread_perf_test.go index 636d026afd0..82300746a0c 100644 --- a/pkg/scheduler/framework/plugins/defaultpodtopologyspread/default_pod_topology_spread_perf_test.go +++ b/pkg/scheduler/framework/plugins/defaultpodtopologyspread/default_pod_topology_spread_perf_test.go @@ -24,7 +24,7 @@ import ( "k8s.io/client-go/informers" clientsetfake "k8s.io/client-go/kubernetes/fake" framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" - nodeinfosnapshot "k8s.io/kubernetes/pkg/scheduler/nodeinfo/snapshot" + "k8s.io/kubernetes/pkg/scheduler/internal/cache" st "k8s.io/kubernetes/pkg/scheduler/testing" ) @@ -52,7 +52,7 @@ func BenchmarkTestSelectorSpreadPriority(b *testing.B) { b.Run(tt.name, func(b *testing.B) { pod := st.MakePod().Name("p").Label("foo", "").Obj() existingPods, allNodes, filteredNodes := st.MakeNodesAndPodsForEvenPodsSpread(pod.Labels, tt.existingPodsNum, tt.allNodesNum, tt.allNodesNum) - snapshot := nodeinfosnapshot.NewSnapshot(nodeinfosnapshot.CreateNodeInfoMap(existingPods, allNodes)) + snapshot := cache.NewSnapshot(existingPods, allNodes) services := &v1.ServiceList{ Items: []v1.Service{{Spec: v1.ServiceSpec{Selector: map[string]string{"foo": ""}}}}, } diff --git a/pkg/scheduler/framework/plugins/defaultpodtopologyspread/default_pod_topology_spread_test.go b/pkg/scheduler/framework/plugins/defaultpodtopologyspread/default_pod_topology_spread_test.go index 83262493cb8..71e5cda6dc8 100644 --- a/pkg/scheduler/framework/plugins/defaultpodtopologyspread/default_pod_topology_spread_test.go +++ b/pkg/scheduler/framework/plugins/defaultpodtopologyspread/default_pod_topology_spread_test.go @@ -30,7 +30,7 @@ import ( "k8s.io/client-go/informers" clientsetfake "k8s.io/client-go/kubernetes/fake" framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" - nodeinfosnapshot "k8s.io/kubernetes/pkg/scheduler/nodeinfo/snapshot" + "k8s.io/kubernetes/pkg/scheduler/internal/cache" ) func controllerRef(kind, name, uid string) []metav1.OwnerReference { @@ -343,7 +343,7 @@ func TestDefaultPodTopologySpreadScore(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { nodes := makeNodeList(test.nodes) - snapshot := nodeinfosnapshot.NewSnapshot(nodeinfosnapshot.CreateNodeInfoMap(test.pods, nodes)) + snapshot := cache.NewSnapshot(test.pods, nodes) ctx := context.Background() informerFactory, err := populateAndStartInformers(ctx, test.rcs, test.rss, test.services, test.sss) if err != nil { @@ -596,7 +596,7 @@ func TestZoneSelectorSpreadPriority(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { nodes := makeLabeledNodeList(labeledNodes) - snapshot := nodeinfosnapshot.NewSnapshot(nodeinfosnapshot.CreateNodeInfoMap(test.pods, nodes)) + snapshot := cache.NewSnapshot(test.pods, nodes) ctx := context.Background() informerFactory, err := populateAndStartInformers(ctx, test.rcs, test.rss, test.services, test.sss) if err != nil { diff --git a/pkg/scheduler/framework/plugins/imagelocality/BUILD b/pkg/scheduler/framework/plugins/imagelocality/BUILD index 89abade0a69..f9fe7982073 100644 --- a/pkg/scheduler/framework/plugins/imagelocality/BUILD +++ b/pkg/scheduler/framework/plugins/imagelocality/BUILD @@ -20,7 +20,7 @@ go_test( embed = [":go_default_library"], deps = [ "//pkg/scheduler/framework/v1alpha1:go_default_library", - "//pkg/scheduler/nodeinfo/snapshot:go_default_library", + "//pkg/scheduler/internal/cache:go_default_library", "//pkg/util/parsers:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", diff --git a/pkg/scheduler/framework/plugins/imagelocality/image_locality_test.go b/pkg/scheduler/framework/plugins/imagelocality/image_locality_test.go index 84694932e06..f30f272e597 100644 --- a/pkg/scheduler/framework/plugins/imagelocality/image_locality_test.go +++ b/pkg/scheduler/framework/plugins/imagelocality/image_locality_test.go @@ -26,7 +26,7 @@ import ( v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" - nodeinfosnapshot "k8s.io/kubernetes/pkg/scheduler/nodeinfo/snapshot" + "k8s.io/kubernetes/pkg/scheduler/internal/cache" "k8s.io/kubernetes/pkg/util/parsers" ) @@ -186,7 +186,7 @@ func TestImageLocalityPriority(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { - snapshot := nodeinfosnapshot.NewSnapshot(nodeinfosnapshot.CreateNodeInfoMap(nil, test.nodes)) + snapshot := cache.NewSnapshot(nil, test.nodes) state := framework.NewCycleState() diff --git a/pkg/scheduler/framework/plugins/interpodaffinity/BUILD b/pkg/scheduler/framework/plugins/interpodaffinity/BUILD index 02bb2dc9079..539d2d9bea0 100644 --- a/pkg/scheduler/framework/plugins/interpodaffinity/BUILD +++ b/pkg/scheduler/framework/plugins/interpodaffinity/BUILD @@ -33,8 +33,8 @@ go_test( embed = [":go_default_library"], deps = [ "//pkg/scheduler/framework/v1alpha1:go_default_library", + "//pkg/scheduler/internal/cache:go_default_library", "//pkg/scheduler/nodeinfo:go_default_library", - "//pkg/scheduler/nodeinfo/snapshot:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", diff --git a/pkg/scheduler/framework/plugins/interpodaffinity/filtering_test.go b/pkg/scheduler/framework/plugins/interpodaffinity/filtering_test.go index 144762778b5..af3ed27c926 100644 --- a/pkg/scheduler/framework/plugins/interpodaffinity/filtering_test.go +++ b/pkg/scheduler/framework/plugins/interpodaffinity/filtering_test.go @@ -24,8 +24,8 @@ import ( "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" + "k8s.io/kubernetes/pkg/scheduler/internal/cache" "k8s.io/kubernetes/pkg/scheduler/nodeinfo" - nodeinfosnapshot "k8s.io/kubernetes/pkg/scheduler/nodeinfo/snapshot" ) var ( @@ -777,7 +777,7 @@ func TestRequiredAffinitySingleNode(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { - snapshot := nodeinfosnapshot.NewSnapshot(nodeinfosnapshot.CreateNodeInfoMap(test.pods, []*v1.Node{test.node})) + snapshot := cache.NewSnapshot(test.pods, []*v1.Node{test.node}) p := &InterPodAffinity{ sharedLister: snapshot, } @@ -1614,7 +1614,7 @@ func TestRequiredAffinityMultipleNodes(t *testing.T) { for indexTest, test := range tests { t.Run(test.name, func(t *testing.T) { - snapshot := nodeinfosnapshot.NewSnapshot(nodeinfosnapshot.CreateNodeInfoMap(test.pods, test.nodes)) + snapshot := cache.NewSnapshot(test.pods, test.nodes) for indexNode, node := range test.nodes { p := &InterPodAffinity{ sharedLister: snapshot, @@ -1736,7 +1736,7 @@ func TestPreFilterDisabled(t *testing.T) { for indexTest, test := range tests { t.Run(test.name, func(t *testing.T) { - snapshot := nodeinfosnapshot.NewSnapshot(nodeinfosnapshot.CreateNodeInfoMap(test.pods, test.nodes)) + snapshot := cache.NewSnapshot(test.pods, test.nodes) for indexNode, node := range test.nodes { p := &InterPodAffinity{ sharedLister: snapshot, @@ -2011,8 +2011,8 @@ func TestPreFilterStateAddRemovePod(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { // getMeta creates predicate meta data given the list of pods. - getState := func(pods []*v1.Pod) (*InterPodAffinity, *framework.CycleState, *preFilterState, *nodeinfosnapshot.Snapshot) { - snapshot := nodeinfosnapshot.NewSnapshot(nodeinfosnapshot.CreateNodeInfoMap(pods, test.nodes)) + getState := func(pods []*v1.Pod) (*InterPodAffinity, *framework.CycleState, *preFilterState, *cache.Snapshot) { + snapshot := cache.NewSnapshot(pods, test.nodes) p := &InterPodAffinity{ sharedLister: snapshot, @@ -2298,7 +2298,7 @@ func TestGetTPMapMatchingIncomingAffinityAntiAffinity(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - s := nodeinfosnapshot.NewSnapshot(nodeinfosnapshot.CreateNodeInfoMap(tt.existingPods, tt.nodes)) + s := cache.NewSnapshot(tt.existingPods, tt.nodes) l, _ := s.NodeInfos().List() gotAffinityPodsMap, gotAntiAffinityPodsMap, err := getTPMapMatchingIncomingAffinityAntiAffinity(tt.pod, l) if (err != nil) != tt.wantErr { @@ -2315,7 +2315,7 @@ func TestGetTPMapMatchingIncomingAffinityAntiAffinity(t *testing.T) { } } -func mustGetNodeInfo(t *testing.T, snapshot *nodeinfosnapshot.Snapshot, name string) *nodeinfo.NodeInfo { +func mustGetNodeInfo(t *testing.T, snapshot *cache.Snapshot, name string) *nodeinfo.NodeInfo { t.Helper() nodeInfo, err := snapshot.NodeInfos().Get(name) if err != nil { diff --git a/pkg/scheduler/framework/plugins/interpodaffinity/scoring_test.go b/pkg/scheduler/framework/plugins/interpodaffinity/scoring_test.go index a1a8e1e38ee..9a1c71d33e0 100644 --- a/pkg/scheduler/framework/plugins/interpodaffinity/scoring_test.go +++ b/pkg/scheduler/framework/plugins/interpodaffinity/scoring_test.go @@ -26,7 +26,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" - nodeinfosnapshot "k8s.io/kubernetes/pkg/scheduler/nodeinfo/snapshot" + "k8s.io/kubernetes/pkg/scheduler/internal/cache" ) func TestPreferredAffinity(t *testing.T) { @@ -517,7 +517,7 @@ func TestPreferredAffinity(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { state := framework.NewCycleState() - snapshot := nodeinfosnapshot.NewSnapshot(nodeinfosnapshot.CreateNodeInfoMap(test.pods, test.nodes)) + snapshot := cache.NewSnapshot(test.pods, test.nodes) p := &InterPodAffinity{ sharedLister: snapshot, hardPodAffinityWeight: 1, @@ -623,7 +623,7 @@ func TestPreferredAffinityWithHardPodAffinitySymmetricWeight(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { state := framework.NewCycleState() - snapshot := nodeinfosnapshot.NewSnapshot(nodeinfosnapshot.CreateNodeInfoMap(test.pods, test.nodes)) + snapshot := cache.NewSnapshot(test.pods, test.nodes) fh, _ := framework.NewFramework(nil, nil, nil, framework.WithSnapshotSharedLister(snapshot)) args := &runtime.Unknown{Raw: []byte(fmt.Sprintf(`{"hardPodAffinityWeight":%d}`, test.hardPodAffinityWeight))} diff --git a/pkg/scheduler/framework/plugins/nodeaffinity/BUILD b/pkg/scheduler/framework/plugins/nodeaffinity/BUILD index c5c36f6541a..1cd808e4ae6 100644 --- a/pkg/scheduler/framework/plugins/nodeaffinity/BUILD +++ b/pkg/scheduler/framework/plugins/nodeaffinity/BUILD @@ -37,8 +37,8 @@ go_test( deps = [ "//pkg/apis/core:go_default_library", "//pkg/scheduler/framework/v1alpha1:go_default_library", + "//pkg/scheduler/internal/cache:go_default_library", "//pkg/scheduler/nodeinfo:go_default_library", - "//pkg/scheduler/nodeinfo/snapshot:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", ], diff --git a/pkg/scheduler/framework/plugins/nodeaffinity/node_affinity_test.go b/pkg/scheduler/framework/plugins/nodeaffinity/node_affinity_test.go index 00768953fb1..4968bba20bb 100644 --- a/pkg/scheduler/framework/plugins/nodeaffinity/node_affinity_test.go +++ b/pkg/scheduler/framework/plugins/nodeaffinity/node_affinity_test.go @@ -25,8 +25,8 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" api "k8s.io/kubernetes/pkg/apis/core" framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" + "k8s.io/kubernetes/pkg/scheduler/internal/cache" schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo" - nodeinfosnapshot "k8s.io/kubernetes/pkg/scheduler/nodeinfo/snapshot" ) // TODO: Add test case for RequiredDuringSchedulingRequiredDuringExecution after it's implemented. @@ -849,7 +849,7 @@ func TestNodeAffinityPriority(t *testing.T) { t.Run(test.name, func(t *testing.T) { state := framework.NewCycleState() - fh, _ := framework.NewFramework(nil, nil, nil, framework.WithSnapshotSharedLister(nodeinfosnapshot.NewSnapshot(nodeinfosnapshot.CreateNodeInfoMap(nil, test.nodes)))) + fh, _ := framework.NewFramework(nil, nil, nil, framework.WithSnapshotSharedLister(cache.NewSnapshot(nil, test.nodes))) p, _ := New(nil, fh) var gotList framework.NodeScoreList for _, n := range test.nodes { diff --git a/pkg/scheduler/framework/plugins/nodelabel/BUILD b/pkg/scheduler/framework/plugins/nodelabel/BUILD index d143017c914..34252117507 100644 --- a/pkg/scheduler/framework/plugins/nodelabel/BUILD +++ b/pkg/scheduler/framework/plugins/nodelabel/BUILD @@ -20,8 +20,8 @@ go_test( embed = [":go_default_library"], deps = [ "//pkg/scheduler/framework/v1alpha1:go_default_library", + "//pkg/scheduler/internal/cache:go_default_library", "//pkg/scheduler/nodeinfo:go_default_library", - "//pkg/scheduler/nodeinfo/snapshot:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", diff --git a/pkg/scheduler/framework/plugins/nodelabel/node_label_test.go b/pkg/scheduler/framework/plugins/nodelabel/node_label_test.go index 3a4b31be136..c662cf24baf 100644 --- a/pkg/scheduler/framework/plugins/nodelabel/node_label_test.go +++ b/pkg/scheduler/framework/plugins/nodelabel/node_label_test.go @@ -24,8 +24,8 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" + "k8s.io/kubernetes/pkg/scheduler/internal/cache" schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo" - nodeinfosnapshot "k8s.io/kubernetes/pkg/scheduler/nodeinfo/snapshot" ) func TestValidateNodeLabelArgs(t *testing.T) { @@ -227,7 +227,7 @@ func TestNodeLabelScore(t *testing.T) { t.Run(test.name, func(t *testing.T) { state := framework.NewCycleState() node := &v1.Node{ObjectMeta: metav1.ObjectMeta{Name: "machine1", Labels: map[string]string{"foo": "", "bar": ""}}} - fh, _ := framework.NewFramework(nil, nil, nil, framework.WithSnapshotSharedLister(nodeinfosnapshot.NewSnapshot(nodeinfosnapshot.CreateNodeInfoMap(nil, []*v1.Node{node})))) + fh, _ := framework.NewFramework(nil, nil, nil, framework.WithSnapshotSharedLister(cache.NewSnapshot(nil, []*v1.Node{node}))) args := &runtime.Unknown{Raw: []byte(test.rawArgs)} p, err := New(args, fh) if err != nil { @@ -262,7 +262,7 @@ func TestNodeLabelFilterWithoutNode(t *testing.T) { func TestNodeLabelScoreWithoutNode(t *testing.T) { t.Run("node does not exist", func(t *testing.T) { - fh, _ := framework.NewFramework(nil, nil, nil, framework.WithSnapshotSharedLister(nodeinfosnapshot.NewEmptySnapshot())) + fh, _ := framework.NewFramework(nil, nil, nil, framework.WithSnapshotSharedLister(cache.NewEmptySnapshot())) p, err := New(nil, fh) if err != nil { t.Fatalf("Failed to create plugin: %+v", err) diff --git a/pkg/scheduler/framework/plugins/nodepreferavoidpods/BUILD b/pkg/scheduler/framework/plugins/nodepreferavoidpods/BUILD index 93926ed1494..7beedb8fed2 100644 --- a/pkg/scheduler/framework/plugins/nodepreferavoidpods/BUILD +++ b/pkg/scheduler/framework/plugins/nodepreferavoidpods/BUILD @@ -20,7 +20,7 @@ go_test( embed = [":go_default_library"], deps = [ "//pkg/scheduler/framework/v1alpha1:go_default_library", - "//pkg/scheduler/nodeinfo/snapshot:go_default_library", + "//pkg/scheduler/internal/cache:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", ], diff --git a/pkg/scheduler/framework/plugins/nodepreferavoidpods/node_prefer_avoid_pods_test.go b/pkg/scheduler/framework/plugins/nodepreferavoidpods/node_prefer_avoid_pods_test.go index 7839700c21e..e7d85fb608c 100644 --- a/pkg/scheduler/framework/plugins/nodepreferavoidpods/node_prefer_avoid_pods_test.go +++ b/pkg/scheduler/framework/plugins/nodepreferavoidpods/node_prefer_avoid_pods_test.go @@ -24,7 +24,7 @@ import ( v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" - nodeinfosnapshot "k8s.io/kubernetes/pkg/scheduler/nodeinfo/snapshot" + "k8s.io/kubernetes/pkg/scheduler/internal/cache" ) func TestNodePreferAvoidPods(t *testing.T) { @@ -143,7 +143,7 @@ func TestNodePreferAvoidPods(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { state := framework.NewCycleState() - fh, _ := framework.NewFramework(nil, nil, nil, framework.WithSnapshotSharedLister(nodeinfosnapshot.NewSnapshot(nodeinfosnapshot.CreateNodeInfoMap(nil, test.nodes)))) + fh, _ := framework.NewFramework(nil, nil, nil, framework.WithSnapshotSharedLister(cache.NewSnapshot(nil, test.nodes))) p, _ := New(nil, fh) var gotList framework.NodeScoreList for _, n := range test.nodes { diff --git a/pkg/scheduler/framework/plugins/noderesources/BUILD b/pkg/scheduler/framework/plugins/noderesources/BUILD index 97867314695..f049b383872 100644 --- a/pkg/scheduler/framework/plugins/noderesources/BUILD +++ b/pkg/scheduler/framework/plugins/noderesources/BUILD @@ -60,8 +60,8 @@ go_test( "//pkg/apis/core/v1/helper:go_default_library", "//pkg/features:go_default_library", "//pkg/scheduler/framework/v1alpha1:go_default_library", + "//pkg/scheduler/internal/cache:go_default_library", "//pkg/scheduler/nodeinfo:go_default_library", - "//pkg/scheduler/nodeinfo/snapshot:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", diff --git a/pkg/scheduler/framework/plugins/noderesources/balanced_allocation_test.go b/pkg/scheduler/framework/plugins/noderesources/balanced_allocation_test.go index b3abd4ad5e7..fd6a4427e46 100644 --- a/pkg/scheduler/framework/plugins/noderesources/balanced_allocation_test.go +++ b/pkg/scheduler/framework/plugins/noderesources/balanced_allocation_test.go @@ -28,7 +28,7 @@ import ( featuregatetesting "k8s.io/component-base/featuregate/testing" "k8s.io/kubernetes/pkg/features" framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" - nodeinfosnapshot "k8s.io/kubernetes/pkg/scheduler/nodeinfo/snapshot" + "k8s.io/kubernetes/pkg/scheduler/internal/cache" ) // getExistingVolumeCountForNode gets the current number of volumes on node. @@ -379,7 +379,7 @@ func TestNodeResourcesBalancedAllocation(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { - snapshot := nodeinfosnapshot.NewSnapshot(nodeinfosnapshot.CreateNodeInfoMap(test.pods, test.nodes)) + snapshot := cache.NewSnapshot(test.pods, test.nodes) if len(test.pod.Spec.Volumes) > 0 { maxVolumes := 5 nodeInfoList, _ := snapshot.NodeInfos().List() diff --git a/pkg/scheduler/framework/plugins/noderesources/least_allocated_test.go b/pkg/scheduler/framework/plugins/noderesources/least_allocated_test.go index 169eea9bdbc..9924fede062 100644 --- a/pkg/scheduler/framework/plugins/noderesources/least_allocated_test.go +++ b/pkg/scheduler/framework/plugins/noderesources/least_allocated_test.go @@ -25,7 +25,7 @@ import ( "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" - nodeinfosnapshot "k8s.io/kubernetes/pkg/scheduler/nodeinfo/snapshot" + "k8s.io/kubernetes/pkg/scheduler/internal/cache" ) func TestNodeResourcesLeastAllocated(t *testing.T) { @@ -233,7 +233,7 @@ func TestNodeResourcesLeastAllocated(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { - snapshot := nodeinfosnapshot.NewSnapshot(nodeinfosnapshot.CreateNodeInfoMap(test.pods, test.nodes)) + snapshot := cache.NewSnapshot(test.pods, test.nodes) fh, _ := framework.NewFramework(nil, nil, nil, framework.WithSnapshotSharedLister(snapshot)) p, _ := NewLeastAllocated(nil, fh) for i := range test.nodes { diff --git a/pkg/scheduler/framework/plugins/noderesources/most_allocated_test.go b/pkg/scheduler/framework/plugins/noderesources/most_allocated_test.go index 1b6d2fa3595..2ebbfeff3c1 100644 --- a/pkg/scheduler/framework/plugins/noderesources/most_allocated_test.go +++ b/pkg/scheduler/framework/plugins/noderesources/most_allocated_test.go @@ -25,7 +25,7 @@ import ( "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" - nodeinfosnapshot "k8s.io/kubernetes/pkg/scheduler/nodeinfo/snapshot" + "k8s.io/kubernetes/pkg/scheduler/internal/cache" ) func TestNodeResourcesMostAllocated(t *testing.T) { @@ -196,7 +196,7 @@ func TestNodeResourcesMostAllocated(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { - snapshot := nodeinfosnapshot.NewSnapshot(nodeinfosnapshot.CreateNodeInfoMap(test.pods, test.nodes)) + snapshot := cache.NewSnapshot(test.pods, test.nodes) fh, _ := framework.NewFramework(nil, nil, nil, framework.WithSnapshotSharedLister(snapshot)) p, _ := NewMostAllocated(nil, fh) for i := range test.nodes { diff --git a/pkg/scheduler/framework/plugins/noderesources/requested_to_capacity_ratio_test.go b/pkg/scheduler/framework/plugins/noderesources/requested_to_capacity_ratio_test.go index 7caf2ca0dba..f386f1ce153 100644 --- a/pkg/scheduler/framework/plugins/noderesources/requested_to_capacity_ratio_test.go +++ b/pkg/scheduler/framework/plugins/noderesources/requested_to_capacity_ratio_test.go @@ -26,7 +26,7 @@ import ( "k8s.io/apimachinery/pkg/api/resource" "k8s.io/apimachinery/pkg/runtime" framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" - nodeinfosnapshot "k8s.io/kubernetes/pkg/scheduler/nodeinfo/snapshot" + "k8s.io/kubernetes/pkg/scheduler/internal/cache" ) func TestRequestedToCapacityRatio(t *testing.T) { @@ -65,7 +65,7 @@ func TestRequestedToCapacityRatio(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { state := framework.NewCycleState() - snapshot := nodeinfosnapshot.NewSnapshot(nodeinfosnapshot.CreateNodeInfoMap(test.scheduledPods, test.nodes)) + snapshot := cache.NewSnapshot(test.scheduledPods, test.nodes) fh, _ := framework.NewFramework(nil, nil, nil, framework.WithSnapshotSharedLister(snapshot)) args := &runtime.Unknown{Raw: []byte(`{"shape" : [{"utilization" : 0, "score" : 10}, {"utilization" : 100, "score" : 0}], "resources" : [{"name" : "memory", "weight" : 1}, {"name" : "cpu", "weight" : 1}]}`)} p, err := NewRequestedToCapacityRatio(args, fh) @@ -349,7 +349,7 @@ func TestResourceBinPackingSingleExtended(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { state := framework.NewCycleState() - snapshot := nodeinfosnapshot.NewSnapshot(nodeinfosnapshot.CreateNodeInfoMap(test.pods, test.nodes)) + snapshot := cache.NewSnapshot(test.pods, test.nodes) fh, _ := framework.NewFramework(nil, nil, nil, framework.WithSnapshotSharedLister(snapshot)) args := &runtime.Unknown{Raw: []byte(`{"shape" : [{"utilization" : 0, "score" : 0}, {"utilization" : 100, "score" : 1}], "resources" : [{"name" : "intel.com/foo", "weight" : 1}]}`)} p, err := NewRequestedToCapacityRatio(args, fh) @@ -584,7 +584,7 @@ func TestResourceBinPackingMultipleExtended(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { state := framework.NewCycleState() - snapshot := nodeinfosnapshot.NewSnapshot(nodeinfosnapshot.CreateNodeInfoMap(test.pods, test.nodes)) + snapshot := cache.NewSnapshot(test.pods, test.nodes) fh, _ := framework.NewFramework(nil, nil, nil, framework.WithSnapshotSharedLister(snapshot)) args := &runtime.Unknown{Raw: []byte(`{"shape" : [{"utilization" : 0, "score" : 0}, {"utilization" : 100, "score" : 1}], "resources" : [{"name" : "intel.com/foo", "weight" : 3}, {"name" : "intel.com/bar", "weight": 5}]}`)} p, err := NewRequestedToCapacityRatio(args, fh) diff --git a/pkg/scheduler/framework/plugins/noderesources/resource_limits_test.go b/pkg/scheduler/framework/plugins/noderesources/resource_limits_test.go index 894308ca4e9..c88fc58be4a 100644 --- a/pkg/scheduler/framework/plugins/noderesources/resource_limits_test.go +++ b/pkg/scheduler/framework/plugins/noderesources/resource_limits_test.go @@ -23,7 +23,7 @@ import ( v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" - nodeinfosnapshot "k8s.io/kubernetes/pkg/scheduler/nodeinfo/snapshot" + "k8s.io/kubernetes/pkg/scheduler/internal/cache" ) func TestResourceLimits(t *testing.T) { @@ -146,7 +146,7 @@ func TestResourceLimits(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { - snapshot := nodeinfosnapshot.NewSnapshot(nodeinfosnapshot.CreateNodeInfoMap(nil, test.nodes)) + snapshot := cache.NewSnapshot(nil, test.nodes) fh, _ := framework.NewFramework(nil, nil, nil, framework.WithSnapshotSharedLister(snapshot)) p := &ResourceLimits{handle: fh} for i := range test.nodes { diff --git a/pkg/scheduler/framework/plugins/podtopologyspread/BUILD b/pkg/scheduler/framework/plugins/podtopologyspread/BUILD index a1ca007dda8..bf67b3c4bfe 100644 --- a/pkg/scheduler/framework/plugins/podtopologyspread/BUILD +++ b/pkg/scheduler/framework/plugins/podtopologyspread/BUILD @@ -34,7 +34,7 @@ go_test( embed = [":go_default_library"], deps = [ "//pkg/scheduler/framework/v1alpha1:go_default_library", - "//pkg/scheduler/nodeinfo/snapshot:go_default_library", + "//pkg/scheduler/internal/cache:go_default_library", "//pkg/scheduler/testing:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", diff --git a/pkg/scheduler/framework/plugins/podtopologyspread/filtering_test.go b/pkg/scheduler/framework/plugins/podtopologyspread/filtering_test.go index 9ca27409f7e..5e54837815f 100644 --- a/pkg/scheduler/framework/plugins/podtopologyspread/filtering_test.go +++ b/pkg/scheduler/framework/plugins/podtopologyspread/filtering_test.go @@ -25,7 +25,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" - nodeinfosnapshot "k8s.io/kubernetes/pkg/scheduler/nodeinfo/snapshot" + "k8s.io/kubernetes/pkg/scheduler/internal/cache" st "k8s.io/kubernetes/pkg/scheduler/testing" ) @@ -414,7 +414,7 @@ func TestCalPreFilterState(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - s := nodeinfosnapshot.NewSnapshot(nodeinfosnapshot.CreateNodeInfoMap(tt.existingPods, tt.nodes)) + s := cache.NewSnapshot(tt.existingPods, tt.nodes) l, _ := s.NodeInfos().List() got, _ := calPreFilterState(tt.pod, l) got.sortCriticalPaths() @@ -712,7 +712,7 @@ func TestPreFilterStateAddPod(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - s := nodeinfosnapshot.NewSnapshot(nodeinfosnapshot.CreateNodeInfoMap(tt.existingPods, tt.nodes)) + s := cache.NewSnapshot(tt.existingPods, tt.nodes) l, _ := s.NodeInfos().List() state, _ := calPreFilterState(tt.preemptor, l) state.updateWithPod(tt.addedPod, tt.preemptor, tt.nodes[tt.nodeIdx], 1) @@ -902,7 +902,7 @@ func TestPreFilterStateRemovePod(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - s := nodeinfosnapshot.NewSnapshot(nodeinfosnapshot.CreateNodeInfoMap(tt.existingPods, tt.nodes)) + s := cache.NewSnapshot(tt.existingPods, tt.nodes) l, _ := s.NodeInfos().List() state, _ := calPreFilterState(tt.preemptor, l) @@ -961,7 +961,7 @@ func BenchmarkTestCalPreFilterState(b *testing.B) { for _, tt := range tests { b.Run(tt.name, func(b *testing.B) { existingPods, allNodes, _ := st.MakeNodesAndPodsForEvenPodsSpread(tt.pod.Labels, tt.existingPodsNum, tt.allNodesNum, tt.filteredNodesNum) - s := nodeinfosnapshot.NewSnapshot(nodeinfosnapshot.CreateNodeInfoMap(existingPods, allNodes)) + s := cache.NewSnapshot(existingPods, allNodes) l, _ := s.NodeInfos().List() b.ResetTimer() for i := 0; i < b.N; i++ { @@ -1228,7 +1228,7 @@ func TestSingleConstraint(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - snapshot := nodeinfosnapshot.NewSnapshot(nodeinfosnapshot.CreateNodeInfoMap(tt.existingPods, tt.nodes)) + snapshot := cache.NewSnapshot(tt.existingPods, tt.nodes) p := &PodTopologySpread{sharedLister: snapshot} state := framework.NewCycleState() preFilterStatus := p.PreFilter(context.Background(), state, tt.pod) @@ -1428,7 +1428,7 @@ func TestMultipleConstraints(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - snapshot := nodeinfosnapshot.NewSnapshot(nodeinfosnapshot.CreateNodeInfoMap(tt.existingPods, tt.nodes)) + snapshot := cache.NewSnapshot(tt.existingPods, tt.nodes) p := &PodTopologySpread{sharedLister: snapshot} state := framework.NewCycleState() preFilterStatus := p.PreFilter(context.Background(), state, tt.pod) diff --git a/pkg/scheduler/framework/plugins/podtopologyspread/scoring_test.go b/pkg/scheduler/framework/plugins/podtopologyspread/scoring_test.go index 2330390b3c7..41433962f06 100644 --- a/pkg/scheduler/framework/plugins/podtopologyspread/scoring_test.go +++ b/pkg/scheduler/framework/plugins/podtopologyspread/scoring_test.go @@ -24,7 +24,7 @@ import ( v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/util/sets" framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" - nodeinfosnapshot "k8s.io/kubernetes/pkg/scheduler/nodeinfo/snapshot" + "k8s.io/kubernetes/pkg/scheduler/internal/cache" st "k8s.io/kubernetes/pkg/scheduler/testing" ) @@ -435,7 +435,7 @@ func TestPodTopologySpreadScore(t *testing.T) { allNodes := append([]*v1.Node{}, tt.nodes...) allNodes = append(allNodes, tt.failedNodes...) state := framework.NewCycleState() - snapshot := nodeinfosnapshot.NewSnapshot(nodeinfosnapshot.CreateNodeInfoMap(tt.existingPods, allNodes)) + snapshot := cache.NewSnapshot(tt.existingPods, allNodes) p := &PodTopologySpread{sharedLister: snapshot} status := p.PostFilter(context.Background(), state, tt.pod, tt.nodes, nil) @@ -505,7 +505,7 @@ func BenchmarkTestPodTopologySpreadScore(b *testing.B) { b.Run(tt.name, func(b *testing.B) { existingPods, allNodes, filteredNodes := st.MakeNodesAndPodsForEvenPodsSpread(tt.pod.Labels, tt.existingPodsNum, tt.allNodesNum, tt.filteredNodesNum) state := framework.NewCycleState() - snapshot := nodeinfosnapshot.NewSnapshot(nodeinfosnapshot.CreateNodeInfoMap(existingPods, allNodes)) + snapshot := cache.NewSnapshot(existingPods, allNodes) p := &PodTopologySpread{sharedLister: snapshot} status := p.PostFilter(context.Background(), state, tt.pod, filteredNodes, nil) @@ -564,7 +564,7 @@ func BenchmarkTestDefaultEvenPodsSpreadPriority(b *testing.B) { SpreadConstraint(1, v1.LabelZoneFailureDomain, softSpread, st.MakeLabelSelector().Exists("foo").Obj()).Obj() existingPods, allNodes, filteredNodes := st.MakeNodesAndPodsForEvenPodsSpread(pod.Labels, tt.existingPodsNum, tt.allNodesNum, tt.allNodesNum) state := framework.NewCycleState() - snapshot := nodeinfosnapshot.NewSnapshot(nodeinfosnapshot.CreateNodeInfoMap(existingPods, allNodes)) + snapshot := cache.NewSnapshot(existingPods, allNodes) p := &PodTopologySpread{sharedLister: snapshot} status := p.PostFilter(context.Background(), state, pod, filteredNodes, nil) diff --git a/pkg/scheduler/framework/plugins/serviceaffinity/BUILD b/pkg/scheduler/framework/plugins/serviceaffinity/BUILD index d3fd46547ca..406ac179833 100644 --- a/pkg/scheduler/framework/plugins/serviceaffinity/BUILD +++ b/pkg/scheduler/framework/plugins/serviceaffinity/BUILD @@ -23,9 +23,9 @@ go_test( embed = [":go_default_library"], deps = [ "//pkg/scheduler/framework/v1alpha1:go_default_library", + "//pkg/scheduler/internal/cache:go_default_library", "//pkg/scheduler/listers/fake:go_default_library", "//pkg/scheduler/nodeinfo:go_default_library", - "//pkg/scheduler/nodeinfo/snapshot:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", ], diff --git a/pkg/scheduler/framework/plugins/serviceaffinity/service_affinity_test.go b/pkg/scheduler/framework/plugins/serviceaffinity/service_affinity_test.go index 96138c5f53e..4aacc8528d1 100644 --- a/pkg/scheduler/framework/plugins/serviceaffinity/service_affinity_test.go +++ b/pkg/scheduler/framework/plugins/serviceaffinity/service_affinity_test.go @@ -25,9 +25,9 @@ import ( v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" + "k8s.io/kubernetes/pkg/scheduler/internal/cache" fakelisters "k8s.io/kubernetes/pkg/scheduler/listers/fake" "k8s.io/kubernetes/pkg/scheduler/nodeinfo" - nodeinfosnapshot "k8s.io/kubernetes/pkg/scheduler/nodeinfo/snapshot" ) func TestServiceAffinity(t *testing.T) { @@ -160,7 +160,7 @@ func TestServiceAffinity(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { nodes := []*v1.Node{&node1, &node2, &node3, &node4, &node5} - snapshot := nodeinfosnapshot.NewSnapshot(nodeinfosnapshot.CreateNodeInfoMap(test.pods, nodes)) + snapshot := cache.NewSnapshot(test.pods, nodes) p := &ServiceAffinity{ sharedLister: snapshot, @@ -383,7 +383,7 @@ func TestServiceAffinityScore(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { nodes := makeLabeledNodeList(test.nodes) - snapshot := nodeinfosnapshot.NewSnapshot(nodeinfosnapshot.CreateNodeInfoMap(test.pods, nodes)) + snapshot := cache.NewSnapshot(test.pods, nodes) serviceLister := fakelisters.ServiceLister(test.services) p := &ServiceAffinity{ @@ -494,8 +494,8 @@ func TestPreFilterStateAddRemovePod(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { // getMeta creates predicate meta data given the list of pods. - getState := func(pods []*v1.Pod) (*ServiceAffinity, *framework.CycleState, *preFilterState, *nodeinfosnapshot.Snapshot) { - snapshot := nodeinfosnapshot.NewSnapshot(nodeinfosnapshot.CreateNodeInfoMap(pods, test.nodes)) + getState := func(pods []*v1.Pod) (*ServiceAffinity, *framework.CycleState, *preFilterState, *cache.Snapshot) { + snapshot := cache.NewSnapshot(pods, test.nodes) p := &ServiceAffinity{ sharedLister: snapshot, @@ -591,7 +591,7 @@ func sortNodeScoreList(out framework.NodeScoreList) { }) } -func mustGetNodeInfo(t *testing.T, snapshot *nodeinfosnapshot.Snapshot, name string) *nodeinfo.NodeInfo { +func mustGetNodeInfo(t *testing.T, snapshot *cache.Snapshot, name string) *nodeinfo.NodeInfo { t.Helper() nodeInfo, err := snapshot.NodeInfos().Get(name) if err != nil { diff --git a/pkg/scheduler/framework/plugins/tainttoleration/BUILD b/pkg/scheduler/framework/plugins/tainttoleration/BUILD index ed399b6d345..92b150da4e1 100644 --- a/pkg/scheduler/framework/plugins/tainttoleration/BUILD +++ b/pkg/scheduler/framework/plugins/tainttoleration/BUILD @@ -35,8 +35,8 @@ go_test( embed = [":go_default_library"], deps = [ "//pkg/scheduler/framework/v1alpha1:go_default_library", + "//pkg/scheduler/internal/cache:go_default_library", "//pkg/scheduler/nodeinfo:go_default_library", - "//pkg/scheduler/nodeinfo/snapshot:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", ], diff --git a/pkg/scheduler/framework/plugins/tainttoleration/taint_toleration_test.go b/pkg/scheduler/framework/plugins/tainttoleration/taint_toleration_test.go index 48c6f937f29..6a5fda91357 100644 --- a/pkg/scheduler/framework/plugins/tainttoleration/taint_toleration_test.go +++ b/pkg/scheduler/framework/plugins/tainttoleration/taint_toleration_test.go @@ -24,8 +24,8 @@ import ( v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" + "k8s.io/kubernetes/pkg/scheduler/internal/cache" schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo" - nodeinfosnapshot "k8s.io/kubernetes/pkg/scheduler/nodeinfo/snapshot" ) func nodeWithTaints(nodeName string, taints []v1.Taint) *v1.Node { @@ -229,7 +229,7 @@ func TestTaintTolerationScore(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { state := framework.NewCycleState() - snapshot := nodeinfosnapshot.NewSnapshot(nodeinfosnapshot.CreateNodeInfoMap(nil, test.nodes)) + snapshot := cache.NewSnapshot(nil, test.nodes) fh, _ := framework.NewFramework(nil, nil, nil, framework.WithSnapshotSharedLister(snapshot)) p, _ := New(nil, fh) diff --git a/pkg/scheduler/internal/cache/BUILD b/pkg/scheduler/internal/cache/BUILD index bb3d8466299..b9484a3320a 100644 --- a/pkg/scheduler/internal/cache/BUILD +++ b/pkg/scheduler/internal/cache/BUILD @@ -6,6 +6,7 @@ go_library( "cache.go", "interface.go", "node_tree.go", + "snapshot.go", ], importpath = "k8s.io/kubernetes/pkg/scheduler/internal/cache", visibility = ["//pkg/scheduler:__subpackages__"], @@ -14,7 +15,6 @@ go_library( "//pkg/scheduler/listers:go_default_library", "//pkg/scheduler/metrics:go_default_library", "//pkg/scheduler/nodeinfo:go_default_library", - "//pkg/scheduler/nodeinfo/snapshot:go_default_library", "//pkg/util/node:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library", @@ -30,17 +30,18 @@ go_test( srcs = [ "cache_test.go", "node_tree_test.go", + "snapshot_test.go", ], embed = [":go_default_library"], deps = [ "//pkg/features:go_default_library", "//pkg/scheduler/nodeinfo:go_default_library", - "//pkg/scheduler/nodeinfo/snapshot:go_default_library", "//pkg/scheduler/util:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library", "//staging/src/k8s.io/component-base/featuregate/testing:go_default_library", ], diff --git a/pkg/scheduler/internal/cache/cache.go b/pkg/scheduler/internal/cache/cache.go index 75ada0dfd29..6f8c68866af 100644 --- a/pkg/scheduler/internal/cache/cache.go +++ b/pkg/scheduler/internal/cache/cache.go @@ -31,7 +31,6 @@ import ( schedulerlisters "k8s.io/kubernetes/pkg/scheduler/listers" "k8s.io/kubernetes/pkg/scheduler/metrics" schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo" - nodeinfosnapshot "k8s.io/kubernetes/pkg/scheduler/nodeinfo/snapshot" ) var ( @@ -174,7 +173,7 @@ func (cache *schedulerCache) removeNodeInfoFromList(name string) { } // Snapshot takes a snapshot of the current scheduler cache. This is used for -// debugging purposes only and shouldn't be confused with UpdateNodeInfoSnapshot +// debugging purposes only and shouldn't be confused with UpdateSnapshot // function. // This method is expensive, and should be only used in non-critical path. func (cache *schedulerCache) Dump() *Dump { @@ -197,17 +196,17 @@ func (cache *schedulerCache) Dump() *Dump { } } -// UpdateNodeInfoSnapshot takes a snapshot of cached NodeInfo map. This is called at +// UpdateSnapshot takes a snapshot of cached NodeInfo map. This is called at // beginning of every scheduling cycle. // This function tracks generation number of NodeInfo and updates only the // entries of an existing snapshot that have changed after the snapshot was taken. -func (cache *schedulerCache) UpdateNodeInfoSnapshot(nodeSnapshot *nodeinfosnapshot.Snapshot) error { +func (cache *schedulerCache) UpdateSnapshot(nodeSnapshot *Snapshot) error { cache.mu.Lock() defer cache.mu.Unlock() balancedVolumesEnabled := utilfeature.DefaultFeatureGate.Enabled(features.BalanceAttachedNodeVolumes) // Get the last generation of the snapshot. - snapshotGeneration := nodeSnapshot.Generation + snapshotGeneration := nodeSnapshot.generation // NodeInfoList and HavePodsWithAffinityNodeInfoList must be re-created if a node was added // or removed from the cache. @@ -229,11 +228,11 @@ func (cache *schedulerCache) UpdateNodeInfoSnapshot(nodeSnapshot *nodeinfosnapsh node.info.TransientInfo.ResetTransientSchedulerInfo() } if np := node.info.Node(); np != nil { - existing, ok := nodeSnapshot.NodeInfoMap[np.Name] + existing, ok := nodeSnapshot.nodeInfoMap[np.Name] if !ok { updateAllLists = true existing = &schedulernodeinfo.NodeInfo{} - nodeSnapshot.NodeInfoMap[np.Name] = existing + nodeSnapshot.nodeInfoMap[np.Name] = existing } clone := node.info.Clone() // We track nodes that have pods with affinity, here we check if this node changed its @@ -249,10 +248,10 @@ func (cache *schedulerCache) UpdateNodeInfoSnapshot(nodeSnapshot *nodeinfosnapsh } // Update the snapshot generation with the latest NodeInfo generation. if cache.headNode != nil { - nodeSnapshot.Generation = cache.headNode.info.GetGeneration() + nodeSnapshot.generation = cache.headNode.info.GetGeneration() } - if len(nodeSnapshot.NodeInfoMap) > len(cache.nodes) { + if len(nodeSnapshot.nodeInfoMap) > len(cache.nodes) { cache.removeDeletedNodesFromSnapshot(nodeSnapshot) updateAllLists = true } @@ -261,12 +260,12 @@ func (cache *schedulerCache) UpdateNodeInfoSnapshot(nodeSnapshot *nodeinfosnapsh cache.updateNodeInfoSnapshotList(nodeSnapshot, updateAllLists) } - if len(nodeSnapshot.NodeInfoList) != cache.nodeTree.numNodes { + if len(nodeSnapshot.nodeInfoList) != cache.nodeTree.numNodes { errMsg := fmt.Sprintf("snapshot state is not consistent, length of NodeInfoList=%v not equal to length of nodes in tree=%v "+ ", length of NodeInfoMap=%v, length of nodes in cache=%v"+ ", trying to recover", - len(nodeSnapshot.NodeInfoList), cache.nodeTree.numNodes, - len(nodeSnapshot.NodeInfoMap), len(cache.nodes)) + len(nodeSnapshot.nodeInfoList), cache.nodeTree.numNodes, + len(nodeSnapshot.nodeInfoMap), len(cache.nodes)) klog.Error(errMsg) // We will try to recover by re-creating the lists for the next scheduling cycle, but still return an // error to surface the problem, the error will likely cause a failure to the current scheduling cycle. @@ -277,40 +276,40 @@ func (cache *schedulerCache) UpdateNodeInfoSnapshot(nodeSnapshot *nodeinfosnapsh return nil } -func (cache *schedulerCache) updateNodeInfoSnapshotList(nodeSnapshot *nodeinfosnapshot.Snapshot, updateAll bool) { - nodeSnapshot.HavePodsWithAffinityNodeInfoList = make([]*schedulernodeinfo.NodeInfo, 0, cache.nodeTree.numNodes) +func (cache *schedulerCache) updateNodeInfoSnapshotList(snapshot *Snapshot, updateAll bool) { + snapshot.havePodsWithAffinityNodeInfoList = make([]*schedulernodeinfo.NodeInfo, 0, cache.nodeTree.numNodes) if updateAll { // Take a snapshot of the nodes order in the tree - nodeSnapshot.NodeInfoList = make([]*schedulernodeinfo.NodeInfo, 0, cache.nodeTree.numNodes) + snapshot.nodeInfoList = make([]*schedulernodeinfo.NodeInfo, 0, cache.nodeTree.numNodes) for i := 0; i < cache.nodeTree.numNodes; i++ { nodeName := cache.nodeTree.next() - if n := nodeSnapshot.NodeInfoMap[nodeName]; n != nil { - nodeSnapshot.NodeInfoList = append(nodeSnapshot.NodeInfoList, n) + if n := snapshot.nodeInfoMap[nodeName]; n != nil { + snapshot.nodeInfoList = append(snapshot.nodeInfoList, n) if len(n.PodsWithAffinity()) > 0 { - nodeSnapshot.HavePodsWithAffinityNodeInfoList = append(nodeSnapshot.HavePodsWithAffinityNodeInfoList, n) + snapshot.havePodsWithAffinityNodeInfoList = append(snapshot.havePodsWithAffinityNodeInfoList, n) } } else { klog.Errorf("node %q exist in nodeTree but not in NodeInfoMap, this should not happen.", nodeName) } } } else { - for _, n := range nodeSnapshot.NodeInfoList { + for _, n := range snapshot.nodeInfoList { if len(n.PodsWithAffinity()) > 0 { - nodeSnapshot.HavePodsWithAffinityNodeInfoList = append(nodeSnapshot.HavePodsWithAffinityNodeInfoList, n) + snapshot.havePodsWithAffinityNodeInfoList = append(snapshot.havePodsWithAffinityNodeInfoList, n) } } } } // If certain nodes were deleted after the last snapshot was taken, we should remove them from the snapshot. -func (cache *schedulerCache) removeDeletedNodesFromSnapshot(nodeSnapshot *nodeinfosnapshot.Snapshot) { - toDelete := len(nodeSnapshot.NodeInfoMap) - len(cache.nodes) - for name := range nodeSnapshot.NodeInfoMap { +func (cache *schedulerCache) removeDeletedNodesFromSnapshot(snapshot *Snapshot) { + toDelete := len(snapshot.nodeInfoMap) - len(cache.nodes) + for name := range snapshot.nodeInfoMap { if toDelete <= 0 { break } if _, ok := cache.nodes[name]; !ok { - delete(nodeSnapshot.NodeInfoMap, name) + delete(snapshot.nodeInfoMap, name) toDelete-- } } diff --git a/pkg/scheduler/internal/cache/cache_test.go b/pkg/scheduler/internal/cache/cache_test.go index d3abfe53033..f4de7038339 100644 --- a/pkg/scheduler/internal/cache/cache_test.go +++ b/pkg/scheduler/internal/cache/cache_test.go @@ -32,7 +32,6 @@ import ( featuregatetesting "k8s.io/component-base/featuregate/testing" "k8s.io/kubernetes/pkg/features" schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo" - nodeinfosnapshot "k8s.io/kubernetes/pkg/scheduler/nodeinfo/snapshot" schedutil "k8s.io/kubernetes/pkg/scheduler/util" ) @@ -1121,12 +1120,12 @@ func TestNodeOperators(t *testing.T) { } // Step 2: dump cached nodes successfully. - cachedNodes := nodeinfosnapshot.NewEmptySnapshot() - if err := cache.UpdateNodeInfoSnapshot(cachedNodes); err != nil { + cachedNodes := NewEmptySnapshot() + if err := cache.UpdateSnapshot(cachedNodes); err != nil { t.Error(err) } - newNode, found := cachedNodes.NodeInfoMap[node.Name] - if !found || len(cachedNodes.NodeInfoMap) != 1 { + newNode, found := cachedNodes.nodeInfoMap[node.Name] + if !found || len(cachedNodes.nodeInfoMap) != 1 { t.Errorf("failed to dump cached nodes:\n got: %v \nexpected: %v", cachedNodes, cache.nodes) } expected.SetGeneration(newNode.GetGeneration()) @@ -1192,8 +1191,7 @@ func TestNodeOperators(t *testing.T) { } } -// TestSchedulerCache_UpdateNodeInfoSnapshot tests UpdateNodeInfoSnapshot function of cache. -func TestSchedulerCache_UpdateNodeInfoSnapshot(t *testing.T) { +func TestSchedulerCache_UpdateSnapshot(t *testing.T) { // Create a few nodes to be used in tests. nodes := []*v1.Node{} for i := 0; i < 10; i++ { @@ -1266,7 +1264,7 @@ func TestSchedulerCache_UpdateNodeInfoSnapshot(t *testing.T) { } var cache *schedulerCache - var snapshot *nodeinfosnapshot.Snapshot + var snapshot *Snapshot type operation = func() addNode := func(i int) operation { @@ -1320,7 +1318,7 @@ func TestSchedulerCache_UpdateNodeInfoSnapshot(t *testing.T) { } updateSnapshot := func() operation { return func() { - cache.UpdateNodeInfoSnapshot(snapshot) + cache.UpdateSnapshot(snapshot) if err := compareCacheWithNodeInfoSnapshot(cache, snapshot); err != nil { t.Error(err) } @@ -1470,7 +1468,7 @@ func TestSchedulerCache_UpdateNodeInfoSnapshot(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { cache = newSchedulerCache(time.Second, time.Second, nil) - snapshot = nodeinfosnapshot.NewEmptySnapshot() + snapshot = NewEmptySnapshot() for _, op := range test.operations { op() @@ -1493,12 +1491,12 @@ func TestSchedulerCache_UpdateNodeInfoSnapshot(t *testing.T) { } // Check number of nodes with pods with affinity - if len(snapshot.HavePodsWithAffinityNodeInfoList) != test.expectedHavePodsWithAffinity { - t.Errorf("unexpected number of HavePodsWithAffinity nodes. Expected: %v, got: %v", test.expectedHavePodsWithAffinity, len(snapshot.HavePodsWithAffinityNodeInfoList)) + if len(snapshot.havePodsWithAffinityNodeInfoList) != test.expectedHavePodsWithAffinity { + t.Errorf("unexpected number of HavePodsWithAffinity nodes. Expected: %v, got: %v", test.expectedHavePodsWithAffinity, len(snapshot.havePodsWithAffinityNodeInfoList)) } // Always update the snapshot at the end of operations and compare it. - if err := cache.UpdateNodeInfoSnapshot(snapshot); err != nil { + if err := cache.UpdateSnapshot(snapshot); err != nil { t.Error(err) } if err := compareCacheWithNodeInfoSnapshot(cache, snapshot); err != nil { @@ -1508,27 +1506,27 @@ func TestSchedulerCache_UpdateNodeInfoSnapshot(t *testing.T) { } } -func compareCacheWithNodeInfoSnapshot(cache *schedulerCache, snapshot *nodeinfosnapshot.Snapshot) error { +func compareCacheWithNodeInfoSnapshot(cache *schedulerCache, snapshot *Snapshot) error { // Compare the map. - if len(snapshot.NodeInfoMap) != len(cache.nodes) { - return fmt.Errorf("unexpected number of nodes in the snapshot. Expected: %v, got: %v", len(cache.nodes), len(snapshot.NodeInfoMap)) + if len(snapshot.nodeInfoMap) != len(cache.nodes) { + return fmt.Errorf("unexpected number of nodes in the snapshot. Expected: %v, got: %v", len(cache.nodes), len(snapshot.nodeInfoMap)) } for name, ni := range cache.nodes { - if !reflect.DeepEqual(snapshot.NodeInfoMap[name], ni.info) { - return fmt.Errorf("unexpected node info for node %q. Expected: %v, got: %v", name, ni.info, snapshot.NodeInfoMap[name]) + if !reflect.DeepEqual(snapshot.nodeInfoMap[name], ni.info) { + return fmt.Errorf("unexpected node info for node %q. Expected: %v, got: %v", name, ni.info, snapshot.nodeInfoMap[name]) } } // Compare the lists. - if len(snapshot.NodeInfoList) != len(cache.nodes) { - return fmt.Errorf("unexpected number of nodes in NodeInfoList. Expected: %v, got: %v", len(cache.nodes), len(snapshot.NodeInfoList)) + if len(snapshot.nodeInfoList) != len(cache.nodes) { + return fmt.Errorf("unexpected number of nodes in NodeInfoList. Expected: %v, got: %v", len(cache.nodes), len(snapshot.nodeInfoList)) } expectedNodeInfoList := make([]*schedulernodeinfo.NodeInfo, 0, cache.nodeTree.numNodes) expectedHavePodsWithAffinityNodeInfoList := make([]*schedulernodeinfo.NodeInfo, 0, cache.nodeTree.numNodes) for i := 0; i < cache.nodeTree.numNodes; i++ { nodeName := cache.nodeTree.next() - if n := snapshot.NodeInfoMap[nodeName]; n != nil { + if n := snapshot.nodeInfoMap[nodeName]; n != nil { expectedNodeInfoList = append(expectedNodeInfoList, n) if len(n.PodsWithAffinity()) > 0 { expectedHavePodsWithAffinityNodeInfoList = append(expectedHavePodsWithAffinityNodeInfoList, n) @@ -1539,14 +1537,14 @@ func compareCacheWithNodeInfoSnapshot(cache *schedulerCache, snapshot *nodeinfos } for i, expected := range expectedNodeInfoList { - got := snapshot.NodeInfoList[i] + got := snapshot.nodeInfoList[i] if expected != got { return fmt.Errorf("unexpected NodeInfo pointer in NodeInfoList. Expected: %p, got: %p", expected, got) } } for i, expected := range expectedHavePodsWithAffinityNodeInfoList { - got := snapshot.HavePodsWithAffinityNodeInfoList[i] + got := snapshot.havePodsWithAffinityNodeInfoList[i] if expected != got { return fmt.Errorf("unexpected NodeInfo pointer in HavePodsWithAffinityNodeInfoList. Expected: %p, got: %p", expected, got) } @@ -1561,8 +1559,8 @@ func BenchmarkUpdate1kNodes30kPods(b *testing.B) { cache := setupCacheOf1kNodes30kPods(b) b.ResetTimer() for n := 0; n < b.N; n++ { - cachedNodes := nodeinfosnapshot.NewEmptySnapshot() - cache.UpdateNodeInfoSnapshot(cachedNodes) + cachedNodes := NewEmptySnapshot() + cache.UpdateSnapshot(cachedNodes) } } diff --git a/pkg/scheduler/internal/cache/fake/BUILD b/pkg/scheduler/internal/cache/fake/BUILD index 34ef48e4e5c..4eb6c41e533 100644 --- a/pkg/scheduler/internal/cache/fake/BUILD +++ b/pkg/scheduler/internal/cache/fake/BUILD @@ -8,7 +8,6 @@ go_library( deps = [ "//pkg/scheduler/internal/cache:go_default_library", "//pkg/scheduler/listers:go_default_library", - "//pkg/scheduler/nodeinfo/snapshot:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library", ], diff --git a/pkg/scheduler/internal/cache/fake/fake_cache.go b/pkg/scheduler/internal/cache/fake/fake_cache.go index ad14bd6bf79..40010dbc793 100644 --- a/pkg/scheduler/internal/cache/fake/fake_cache.go +++ b/pkg/scheduler/internal/cache/fake/fake_cache.go @@ -21,7 +21,6 @@ import ( "k8s.io/apimachinery/pkg/labels" internalcache "k8s.io/kubernetes/pkg/scheduler/internal/cache" schedulerlisters "k8s.io/kubernetes/pkg/scheduler/listers" - nodeinfosnapshot "k8s.io/kubernetes/pkg/scheduler/nodeinfo/snapshot" ) // Cache is used for testing @@ -75,8 +74,8 @@ func (c *Cache) UpdateNode(oldNode, newNode *v1.Node) error { return nil } // RemoveNode is a fake method for testing. func (c *Cache) RemoveNode(node *v1.Node) error { return nil } -// UpdateNodeInfoSnapshot is a fake method for testing. -func (c *Cache) UpdateNodeInfoSnapshot(nodeSnapshot *nodeinfosnapshot.Snapshot) error { +// UpdateSnapshot is a fake method for testing. +func (c *Cache) UpdateSnapshot(snapshot *internalcache.Snapshot) error { return nil } @@ -88,7 +87,7 @@ func (c *Cache) FilteredList(filter schedulerlisters.PodFilter, selector labels. return nil, nil } -// Snapshot is a fake method for testing +// Dump is a fake method for testing. func (c *Cache) Dump() *internalcache.Dump { return &internalcache.Dump{} } diff --git a/pkg/scheduler/internal/cache/interface.go b/pkg/scheduler/internal/cache/interface.go index ac8f935f9a9..ceb3e3ae3b0 100644 --- a/pkg/scheduler/internal/cache/interface.go +++ b/pkg/scheduler/internal/cache/interface.go @@ -20,7 +20,6 @@ import ( v1 "k8s.io/api/core/v1" schedulerlisters "k8s.io/kubernetes/pkg/scheduler/listers" schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo" - nodeinfosnapshot "k8s.io/kubernetes/pkg/scheduler/nodeinfo/snapshot" ) // Cache collects pods' information and provides node-level aggregated information. @@ -97,10 +96,10 @@ type Cache interface { // RemoveNode removes overall information about node. RemoveNode(node *v1.Node) error - // UpdateNodeInfoSnapshot updates the passed infoSnapshot to the current contents of Cache. + // UpdateSnapshot updates the passed infoSnapshot to the current contents of Cache. // The node info contains aggregated information of pods scheduled (including assumed to be) // on this node. - UpdateNodeInfoSnapshot(nodeSnapshot *nodeinfosnapshot.Snapshot) error + UpdateSnapshot(nodeSnapshot *Snapshot) error // Dump produces a dump of the current cache. Dump() *Dump diff --git a/pkg/scheduler/nodeinfo/snapshot/snapshot.go b/pkg/scheduler/internal/cache/snapshot.go similarity index 69% rename from pkg/scheduler/nodeinfo/snapshot/snapshot.go rename to pkg/scheduler/internal/cache/snapshot.go index 94e2d50c897..3300bb9ac6f 100644 --- a/pkg/scheduler/nodeinfo/snapshot/snapshot.go +++ b/pkg/scheduler/internal/cache/snapshot.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package snapshot +package cache import ( "fmt" @@ -29,13 +29,13 @@ import ( // Snapshot is a snapshot of cache NodeInfo and NodeTree order. The scheduler takes a // snapshot at the beginning of each scheduling cycle and uses it for its operations in that cycle. type Snapshot struct { - // NodeInfoMap a map of node name to a snapshot of its NodeInfo. - NodeInfoMap map[string]*schedulernodeinfo.NodeInfo - // NodeInfoList is the list of nodes as ordered in the cache's nodeTree. - NodeInfoList []*schedulernodeinfo.NodeInfo - // HavePodsWithAffinityNodeInfoList is the list of nodes with at least one pod declaring affinity terms. - HavePodsWithAffinityNodeInfoList []*schedulernodeinfo.NodeInfo - Generation int64 + // nodeInfoMap a map of node name to a snapshot of its NodeInfo. + nodeInfoMap map[string]*schedulernodeinfo.NodeInfo + // nodeInfoList is the list of nodes as ordered in the cache's nodeTree. + nodeInfoList []*schedulernodeinfo.NodeInfo + // havePodsWithAffinityNodeInfoList is the list of nodes with at least one pod declaring affinity terms. + havePodsWithAffinityNodeInfoList []*schedulernodeinfo.NodeInfo + generation int64 } var _ schedulerlisters.SharedLister = &Snapshot{} @@ -43,12 +43,13 @@ var _ schedulerlisters.SharedLister = &Snapshot{} // NewEmptySnapshot initializes a Snapshot struct and returns it. func NewEmptySnapshot() *Snapshot { return &Snapshot{ - NodeInfoMap: make(map[string]*schedulernodeinfo.NodeInfo), + nodeInfoMap: make(map[string]*schedulernodeinfo.NodeInfo), } } // NewSnapshot initializes a Snapshot struct and returns it. -func NewSnapshot(nodeInfoMap map[string]*schedulernodeinfo.NodeInfo) *Snapshot { +func NewSnapshot(pods []*v1.Pod, nodes []*v1.Node) *Snapshot { + nodeInfoMap := createNodeInfoMap(pods, nodes) nodeInfoList := make([]*schedulernodeinfo.NodeInfo, 0, len(nodeInfoMap)) havePodsWithAffinityNodeInfoList := make([]*schedulernodeinfo.NodeInfo, 0, len(nodeInfoMap)) for _, v := range nodeInfoMap { @@ -59,16 +60,17 @@ func NewSnapshot(nodeInfoMap map[string]*schedulernodeinfo.NodeInfo) *Snapshot { } s := NewEmptySnapshot() - s.NodeInfoMap = nodeInfoMap - s.NodeInfoList = nodeInfoList - s.HavePodsWithAffinityNodeInfoList = havePodsWithAffinityNodeInfoList + s.nodeInfoMap = nodeInfoMap + s.nodeInfoList = nodeInfoList + s.havePodsWithAffinityNodeInfoList = havePodsWithAffinityNodeInfoList return s } -// CreateNodeInfoMap obtains a list of pods and pivots that list into a map where the keys are node names -// and the values are the aggregated information for that node. -func CreateNodeInfoMap(pods []*v1.Pod, nodes []*v1.Node) map[string]*schedulernodeinfo.NodeInfo { +// createNodeInfoMap obtains a list of pods and pivots that list into a map +// where the keys are node names and the values are the aggregated information +// for that node. +func createNodeInfoMap(pods []*v1.Pod, nodes []*v1.Node) map[string]*schedulernodeinfo.NodeInfo { nodeNameToInfo := make(map[string]*schedulernodeinfo.NodeInfo) for _, pod := range pods { nodeName := pod.Spec.NodeName @@ -124,42 +126,40 @@ func createImageExistenceMap(nodes []*v1.Node) map[string]sets.String { // Pods returns a PodLister func (s *Snapshot) Pods() schedulerlisters.PodLister { - return &podLister{snapshot: s} + return podLister(s.nodeInfoList) } // NodeInfos returns a NodeInfoLister. func (s *Snapshot) NodeInfos() schedulerlisters.NodeInfoLister { - return &nodeInfoLister{snapshot: s} + return s } // NumNodes returns the number of nodes in the snapshot. func (s *Snapshot) NumNodes() int { - return len(s.NodeInfoList) + return len(s.nodeInfoList) } -type podLister struct { - snapshot *Snapshot -} +type podLister []*schedulernodeinfo.NodeInfo // List returns the list of pods in the snapshot. -func (p *podLister) List(selector labels.Selector) ([]*v1.Pod, error) { - alwaysTrue := func(p *v1.Pod) bool { return true } +func (p podLister) List(selector labels.Selector) ([]*v1.Pod, error) { + alwaysTrue := func(*v1.Pod) bool { return true } return p.FilteredList(alwaysTrue, selector) } // FilteredList returns a filtered list of pods in the snapshot. -func (p *podLister) FilteredList(podFilter schedulerlisters.PodFilter, selector labels.Selector) ([]*v1.Pod, error) { +func (p podLister) FilteredList(filter schedulerlisters.PodFilter, selector labels.Selector) ([]*v1.Pod, error) { // podFilter is expected to return true for most or all of the pods. We // can avoid expensive array growth without wasting too much memory by // pre-allocating capacity. maxSize := 0 - for _, n := range p.snapshot.NodeInfoList { + for _, n := range p { maxSize += len(n.Pods()) } pods := make([]*v1.Pod, 0, maxSize) - for _, n := range p.snapshot.NodeInfoList { + for _, n := range p { for _, pod := range n.Pods() { - if podFilter(pod) && selector.Matches(labels.Set(pod.Labels)) { + if filter(pod) && selector.Matches(labels.Set(pod.Labels)) { pods = append(pods, pod) } } @@ -167,23 +167,19 @@ func (p *podLister) FilteredList(podFilter schedulerlisters.PodFilter, selector return pods, nil } -type nodeInfoLister struct { - snapshot *Snapshot -} - // List returns the list of nodes in the snapshot. -func (n *nodeInfoLister) List() ([]*schedulernodeinfo.NodeInfo, error) { - return n.snapshot.NodeInfoList, nil +func (s *Snapshot) List() ([]*schedulernodeinfo.NodeInfo, error) { + return s.nodeInfoList, nil } // HavePodsWithAffinityList returns the list of nodes with at least one pods with inter-pod affinity -func (n *nodeInfoLister) HavePodsWithAffinityList() ([]*schedulernodeinfo.NodeInfo, error) { - return n.snapshot.HavePodsWithAffinityNodeInfoList, nil +func (s *Snapshot) HavePodsWithAffinityList() ([]*schedulernodeinfo.NodeInfo, error) { + return s.havePodsWithAffinityNodeInfoList, nil } -// Returns the NodeInfo of the given node name. -func (n *nodeInfoLister) Get(nodeName string) (*schedulernodeinfo.NodeInfo, error) { - if v, ok := n.snapshot.NodeInfoMap[nodeName]; ok && v.Node() != nil { +// Get returns the NodeInfo of the given node name. +func (s *Snapshot) Get(nodeName string) (*schedulernodeinfo.NodeInfo, error) { + if v, ok := s.nodeInfoMap[nodeName]; ok && v.Node() != nil { return v, nil } return nil, fmt.Errorf("nodeinfo not found for node name %q", nodeName) diff --git a/pkg/scheduler/nodeinfo/snapshot/snapshot_test.go b/pkg/scheduler/internal/cache/snapshot_test.go similarity index 99% rename from pkg/scheduler/nodeinfo/snapshot/snapshot_test.go rename to pkg/scheduler/internal/cache/snapshot_test.go index 5457a2dd1fe..8c72d51d7d4 100644 --- a/pkg/scheduler/nodeinfo/snapshot/snapshot_test.go +++ b/pkg/scheduler/internal/cache/snapshot_test.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package snapshot +package cache import ( "reflect" diff --git a/pkg/scheduler/internal/queue/BUILD b/pkg/scheduler/internal/queue/BUILD index e5b150cde41..5722266c2ff 100644 --- a/pkg/scheduler/internal/queue/BUILD +++ b/pkg/scheduler/internal/queue/BUILD @@ -36,8 +36,8 @@ go_test( "//pkg/scheduler/apis/config:go_default_library", "//pkg/scheduler/framework/plugins:go_default_library", "//pkg/scheduler/framework/v1alpha1:go_default_library", + "//pkg/scheduler/internal/cache:go_default_library", "//pkg/scheduler/metrics:go_default_library", - "//pkg/scheduler/nodeinfo/snapshot:go_default_library", "//pkg/scheduler/util:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", diff --git a/pkg/scheduler/internal/queue/scheduling_queue_test.go b/pkg/scheduler/internal/queue/scheduling_queue_test.go index 2242a9e9629..f820def41a7 100644 --- a/pkg/scheduler/internal/queue/scheduling_queue_test.go +++ b/pkg/scheduler/internal/queue/scheduling_queue_test.go @@ -36,8 +36,8 @@ import ( schedulerapi "k8s.io/kubernetes/pkg/scheduler/apis/config" frameworkplugins "k8s.io/kubernetes/pkg/scheduler/framework/plugins" framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" + "k8s.io/kubernetes/pkg/scheduler/internal/cache" "k8s.io/kubernetes/pkg/scheduler/metrics" - nodeinfosnapshot "k8s.io/kubernetes/pkg/scheduler/nodeinfo/snapshot" "k8s.io/kubernetes/pkg/scheduler/util" ) @@ -174,7 +174,7 @@ func newDefaultFramework() framework.Framework { pls, framework.WithClientSet(fakeClient), framework.WithInformerFactory(informers.NewSharedInformerFactory(fakeClient, 0)), - framework.WithSnapshotSharedLister(nodeinfosnapshot.NewEmptySnapshot()), + framework.WithSnapshotSharedLister(cache.NewEmptySnapshot()), ) if err != nil { panic(err) diff --git a/pkg/scheduler/nodeinfo/BUILD b/pkg/scheduler/nodeinfo/BUILD index 7e8951b3d30..c270b724723 100644 --- a/pkg/scheduler/nodeinfo/BUILD +++ b/pkg/scheduler/nodeinfo/BUILD @@ -46,10 +46,7 @@ filegroup( filegroup( name = "all-srcs", - srcs = [ - ":package-srcs", - "//pkg/scheduler/nodeinfo/snapshot:all-srcs", - ], + srcs = [":package-srcs"], tags = ["automanaged"], visibility = ["//visibility:public"], ) diff --git a/pkg/scheduler/nodeinfo/snapshot/BUILD b/pkg/scheduler/nodeinfo/snapshot/BUILD deleted file mode 100644 index c89f823f782..00000000000 --- a/pkg/scheduler/nodeinfo/snapshot/BUILD +++ /dev/null @@ -1,41 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") - -filegroup( - name = "package-srcs", - srcs = glob(["**"]), - tags = ["automanaged"], - visibility = ["//visibility:private"], -) - -filegroup( - name = "all-srcs", - srcs = [":package-srcs"], - tags = ["automanaged"], - visibility = ["//visibility:public"], -) - -go_test( - name = "go_default_test", - srcs = ["snapshot_test.go"], - embed = [":go_default_library"], - deps = [ - "//pkg/scheduler/nodeinfo:go_default_library", - "//staging/src/k8s.io/api/core/v1:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", - ], -) - -go_library( - name = "go_default_library", - srcs = ["snapshot.go"], - importpath = "k8s.io/kubernetes/pkg/scheduler/nodeinfo/snapshot", - visibility = ["//visibility:public"], - deps = [ - "//pkg/scheduler/listers:go_default_library", - "//pkg/scheduler/nodeinfo:go_default_library", - "//staging/src/k8s.io/api/core/v1:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", - ], -) diff --git a/pkg/scheduler/scheduler.go b/pkg/scheduler/scheduler.go index 8b385ae136a..4b8809cccc5 100644 --- a/pkg/scheduler/scheduler.go +++ b/pkg/scheduler/scheduler.go @@ -45,7 +45,6 @@ import ( internalcache "k8s.io/kubernetes/pkg/scheduler/internal/cache" internalqueue "k8s.io/kubernetes/pkg/scheduler/internal/queue" "k8s.io/kubernetes/pkg/scheduler/metrics" - nodeinfosnapshot "k8s.io/kubernetes/pkg/scheduler/nodeinfo/snapshot" "k8s.io/kubernetes/pkg/scheduler/volumebinder" ) @@ -273,7 +272,7 @@ func New(client clientset.Interface, return nil, err } - snapshot := nodeinfosnapshot.NewEmptySnapshot() + snapshot := internalcache.NewEmptySnapshot() configurator := &Configurator{ client: client, diff --git a/pkg/scheduler/scheduler_test.go b/pkg/scheduler/scheduler_test.go index 8c79f5a1e2d..d1a05a3cb65 100644 --- a/pkg/scheduler/scheduler_test.go +++ b/pkg/scheduler/scheduler_test.go @@ -55,7 +55,6 @@ import ( internalcache "k8s.io/kubernetes/pkg/scheduler/internal/cache" fakecache "k8s.io/kubernetes/pkg/scheduler/internal/cache/fake" internalqueue "k8s.io/kubernetes/pkg/scheduler/internal/queue" - nodeinfosnapshot "k8s.io/kubernetes/pkg/scheduler/nodeinfo/snapshot" st "k8s.io/kubernetes/pkg/scheduler/testing" "k8s.io/kubernetes/pkg/scheduler/volumebinder" ) @@ -686,7 +685,7 @@ func setupTestScheduler(queuedPodStore *clientcache.FIFO, scache internalcache.C algo := core.NewGenericScheduler( scache, internalqueue.NewSchedulingQueue(nil), - nodeinfosnapshot.NewEmptySnapshot(), + internalcache.NewEmptySnapshot(), fwk, []algorithm.SchedulerExtender{}, nil, @@ -745,7 +744,7 @@ func setupTestSchedulerLongBindingWithRetry(queuedPodStore *clientcache.FIFO, sc algo := core.NewGenericScheduler( scache, queue, - nodeinfosnapshot.NewEmptySnapshot(), + internalcache.NewEmptySnapshot(), fwk, []algorithm.SchedulerExtender{}, nil, diff --git a/test/integration/scheduler/util.go b/test/integration/scheduler/util.go index abb43771a38..72274017be5 100644 --- a/test/integration/scheduler/util.go +++ b/test/integration/scheduler/util.go @@ -774,9 +774,9 @@ func cleanupPodsInNamespace(cs clientset.Interface, t *testing.T, ns string) { func waitForSchedulerCacheCleanup(sched *scheduler.Scheduler, t *testing.T) { schedulerCacheIsEmpty := func() (bool, error) { - snapshot := sched.Cache().Snapshot() + dump := sched.Cache().Dump() - return len(snapshot.Nodes) == 0 && len(snapshot.AssumedPods) == 0, nil + return len(dump.Nodes) == 0 && len(dump.AssumedPods) == 0, nil } if err := wait.Poll(time.Second, wait.ForeverTestTimeout, schedulerCacheIsEmpty); err != nil {