diff --git a/pkg/scheduler/algorithm/predicates/metadata_test.go b/pkg/scheduler/algorithm/predicates/metadata_test.go index 39827026fc0..a359fb7d433 100644 --- a/pkg/scheduler/algorithm/predicates/metadata_test.go +++ b/pkg/scheduler/algorithm/predicates/metadata_test.go @@ -355,7 +355,7 @@ func TestPredicateMetadata_AddRemovePod(t *testing.T) { allPodLister := fakelisters.PodLister(append(test.existingPods, test.addedPod)) // getMeta creates predicate meta data given the list of pods. getMeta := func(pods []*v1.Pod) (*predicateMetadata, map[string]*schedulernodeinfo.NodeInfo) { - s := nodeinfosnapshot.NewSnapshot(pods, test.nodes) + s := nodeinfosnapshot.NewSnapshot(nodeinfosnapshot.CreateNodeInfoMap(pods, test.nodes)) _, precompute := NewServiceAffinityPredicate(s.NodeInfos(), s.Pods(), fakelisters.ServiceLister(test.services), nil) RegisterPredicateMetadataProducer("ServiceAffinityMetaProducer", precompute) factory := &MetadataProducerFactory{} @@ -786,7 +786,7 @@ func TestGetTPMapMatchingIncomingAffinityAntiAffinity(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - s := nodeinfosnapshot.NewSnapshot(tt.existingPods, tt.nodes) + s := nodeinfosnapshot.NewSnapshot(nodeinfosnapshot.CreateNodeInfoMap(tt.existingPods, tt.nodes)) l, _ := s.NodeInfos().List() gotAffinityPodsMaps, gotAntiAffinityPodsMaps, err := getTPMapMatchingIncomingAffinityAntiAffinity(tt.pod, l) if (err != nil) != tt.wantErr { @@ -1182,7 +1182,7 @@ func TestGetTPMapMatchingSpreadConstraints(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - s := nodeinfosnapshot.NewSnapshot(tt.existingPods, tt.nodes) + s := nodeinfosnapshot.NewSnapshot(nodeinfosnapshot.CreateNodeInfoMap(tt.existingPods, tt.nodes)) l, _ := s.NodeInfos().List() got, _ := getEvenPodsSpreadMetadata(tt.pod, l) got.sortCriticalPaths() @@ -1450,7 +1450,7 @@ func TestPodSpreadCache_addPod(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - s := nodeinfosnapshot.NewSnapshot(tt.existingPods, tt.nodes) + s := nodeinfosnapshot.NewSnapshot(nodeinfosnapshot.CreateNodeInfoMap(tt.existingPods, tt.nodes)) l, _ := s.NodeInfos().List() evenPodsSpreadMetadata, _ := getEvenPodsSpreadMetadata(tt.preemptor, l) evenPodsSpreadMetadata.addPod(tt.addedPod, tt.preemptor, tt.nodes[tt.nodeIdx]) @@ -1628,7 +1628,7 @@ func TestPodSpreadCache_removePod(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - s := nodeinfosnapshot.NewSnapshot(tt.existingPods, tt.nodes) + s := nodeinfosnapshot.NewSnapshot(nodeinfosnapshot.CreateNodeInfoMap(tt.existingPods, tt.nodes)) l, _ := s.NodeInfos().List() evenPodsSpreadMetadata, _ := getEvenPodsSpreadMetadata(tt.preemptor, l) @@ -1687,7 +1687,7 @@ func BenchmarkTestGetTPMapMatchingSpreadConstraints(b *testing.B) { for _, tt := range tests { b.Run(tt.name, func(b *testing.B) { existingPods, allNodes, _ := st.MakeNodesAndPodsForEvenPodsSpread(tt.pod.Labels, tt.existingPodsNum, tt.allNodesNum, tt.filteredNodesNum) - s := nodeinfosnapshot.NewSnapshot(existingPods, allNodes) + s := nodeinfosnapshot.NewSnapshot(nodeinfosnapshot.CreateNodeInfoMap(existingPods, allNodes)) l, _ := s.NodeInfos().List() b.ResetTimer() for i := 0; i < b.N; i++ { diff --git a/pkg/scheduler/algorithm/predicates/predicates_test.go b/pkg/scheduler/algorithm/predicates/predicates_test.go index 8d4c9890208..fcbc7080ccb 100644 --- a/pkg/scheduler/algorithm/predicates/predicates_test.go +++ b/pkg/scheduler/algorithm/predicates/predicates_test.go @@ -1860,7 +1860,7 @@ func TestServiceAffinity(t *testing.T) { testIt := func(skipPrecompute bool) { t.Run(fmt.Sprintf("%v/skipPrecompute/%v", test.name, skipPrecompute), func(t *testing.T) { nodes := []*v1.Node{&node1, &node2, &node3, &node4, &node5} - s := nodeinfosnapshot.NewSnapshot(test.pods, nodes) + s := nodeinfosnapshot.NewSnapshot(nodeinfosnapshot.CreateNodeInfoMap(test.pods, nodes)) // Reimplementing the logic that the scheduler implements: Any time it makes a predicate, it registers any precomputations. predicate, precompute := NewServiceAffinityPredicate(s.NodeInfos(), s.Pods(), fakelisters.ServiceLister(test.services), test.labels) @@ -2929,7 +2929,7 @@ func TestInterPodAffinity(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { - s := nodeinfosnapshot.NewSnapshot(test.pods, []*v1.Node{test.node}) + s := nodeinfosnapshot.NewSnapshot(nodeinfosnapshot.CreateNodeInfoMap(test.pods, []*v1.Node{test.node})) fit := PodAffinityChecker{ nodeInfoLister: s.NodeInfos(), @@ -4025,7 +4025,7 @@ func TestInterPodAffinityWithMultipleNodes(t *testing.T) { for indexTest, test := range tests { t.Run(test.name, func(t *testing.T) { - snapshot := nodeinfosnapshot.NewSnapshot(test.pods, test.nodes) + snapshot := nodeinfosnapshot.NewSnapshot(nodeinfosnapshot.CreateNodeInfoMap(test.pods, test.nodes)) for indexNode, node := range test.nodes { testFit := PodAffinityChecker{ nodeInfoLister: snapshot.NodeInfos(), @@ -4044,7 +4044,7 @@ func TestInterPodAffinityWithMultipleNodes(t *testing.T) { } affinity := test.pod.Spec.Affinity if affinity != nil && affinity.NodeAffinity != nil { - s := nodeinfosnapshot.NewSnapshot(nil, []*v1.Node{node}) + s := nodeinfosnapshot.NewSnapshot(nodeinfosnapshot.CreateNodeInfoMap(nil, []*v1.Node{node})) factory := &MetadataProducerFactory{} fits2, reasons, err := PodMatchNodeSelector(test.pod, factory.GetPredicateMetadata(test.pod, s), s.NodeInfoMap[node.Name]) if err != nil { @@ -4967,7 +4967,7 @@ func TestEvenPodsSpreadPredicate_SingleConstraint(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - s := nodeinfosnapshot.NewSnapshot(tt.existingPods, tt.nodes) + s := nodeinfosnapshot.NewSnapshot(nodeinfosnapshot.CreateNodeInfoMap(tt.existingPods, tt.nodes)) factory := &MetadataProducerFactory{} meta := factory.GetPredicateMetadata(tt.pod, s) for _, node := range tt.nodes { @@ -5161,7 +5161,7 @@ func TestEvenPodsSpreadPredicate_MultipleConstraints(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - s := nodeinfosnapshot.NewSnapshot(tt.existingPods, tt.nodes) + s := nodeinfosnapshot.NewSnapshot(nodeinfosnapshot.CreateNodeInfoMap(tt.existingPods, tt.nodes)) factory := &MetadataProducerFactory{} meta := factory.GetPredicateMetadata(tt.pod, s) for _, node := range tt.nodes { diff --git a/pkg/scheduler/algorithm/priorities/balanced_resource_allocation_test.go b/pkg/scheduler/algorithm/priorities/balanced_resource_allocation_test.go index 2605ed83222..0e7d13bd3d2 100644 --- a/pkg/scheduler/algorithm/priorities/balanced_resource_allocation_test.go +++ b/pkg/scheduler/algorithm/priorities/balanced_resource_allocation_test.go @@ -401,7 +401,7 @@ func TestBalancedResourceAllocation(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { - snapshot := nodeinfosnapshot.NewSnapshot(test.pods, test.nodes) + snapshot := nodeinfosnapshot.NewSnapshot(nodeinfosnapshot.CreateNodeInfoMap(test.pods, test.nodes)) if len(test.pod.Spec.Volumes) > 0 { maxVolumes := 5 for _, info := range snapshot.NodeInfoMap { diff --git a/pkg/scheduler/algorithm/priorities/even_pods_spread_test.go b/pkg/scheduler/algorithm/priorities/even_pods_spread_test.go index 5d541b3c332..0af2c3668cf 100644 --- a/pkg/scheduler/algorithm/priorities/even_pods_spread_test.go +++ b/pkg/scheduler/algorithm/priorities/even_pods_spread_test.go @@ -434,7 +434,7 @@ func TestCalculateEvenPodsSpreadPriority(t *testing.T) { t.Run(tt.name, func(t *testing.T) { allNodes := append([]*v1.Node{}, tt.nodes...) allNodes = append(allNodes, tt.failedNodes...) - snapshot := nodeinfosnapshot.NewSnapshot(tt.existingPods, allNodes) + snapshot := nodeinfosnapshot.NewSnapshot(nodeinfosnapshot.CreateNodeInfoMap(tt.existingPods, allNodes)) meta := &priorityMetadata{ podTopologySpreadMap: buildPodTopologySpreadMap(tt.pod, tt.nodes, snapshot.NodeInfoList), @@ -497,7 +497,7 @@ func BenchmarkTestCalculateEvenPodsSpreadPriority(b *testing.B) { for _, tt := range tests { b.Run(tt.name, func(b *testing.B) { existingPods, allNodes, filteredNodes := st.MakeNodesAndPodsForEvenPodsSpread(tt.pod.Labels, tt.existingPodsNum, tt.allNodesNum, tt.filteredNodesNum) - snapshot := nodeinfosnapshot.NewSnapshot(existingPods, allNodes) + snapshot := nodeinfosnapshot.NewSnapshot(nodeinfosnapshot.CreateNodeInfoMap(existingPods, allNodes)) meta := &priorityMetadata{ podTopologySpreadMap: buildPodTopologySpreadMap(tt.pod, filteredNodes, snapshot.NodeInfoList), } diff --git a/pkg/scheduler/algorithm/priorities/image_locality_test.go b/pkg/scheduler/algorithm/priorities/image_locality_test.go index a7cdc596e24..e79c8e3aac3 100644 --- a/pkg/scheduler/algorithm/priorities/image_locality_test.go +++ b/pkg/scheduler/algorithm/priorities/image_locality_test.go @@ -184,7 +184,7 @@ func TestImageLocalityPriority(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { - snapshot := nodeinfosnapshot.NewSnapshot(test.pods, test.nodes) + snapshot := nodeinfosnapshot.NewSnapshot(nodeinfosnapshot.CreateNodeInfoMap(test.pods, test.nodes)) list, err := runMapReducePriority(ImageLocalityPriorityMap, nil, &priorityMetadata{totalNumNodes: len(test.nodes)}, test.pod, snapshot, test.nodes) if err != nil { t.Errorf("unexpected error: %v", err) diff --git a/pkg/scheduler/algorithm/priorities/interpod_affinity_test.go b/pkg/scheduler/algorithm/priorities/interpod_affinity_test.go index fa2f8368c3c..5783a27ca7a 100644 --- a/pkg/scheduler/algorithm/priorities/interpod_affinity_test.go +++ b/pkg/scheduler/algorithm/priorities/interpod_affinity_test.go @@ -515,7 +515,7 @@ func TestInterPodAffinityPriority(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { allNodes := append([]*v1.Node{}, test.nodes...) - snapshot := nodeinfosnapshot.NewSnapshot(test.pods, test.nodes) + snapshot := nodeinfosnapshot.NewSnapshot(nodeinfosnapshot.CreateNodeInfoMap(test.pods, test.nodes)) meta := &priorityMetadata{ topologyScore: buildTopologyPairToScore(test.pod, snapshot, allNodes, v1.DefaultHardPodAffinitySymmetricWeight), @@ -611,7 +611,7 @@ func TestHardPodAffinitySymmetricWeight(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { allNodes := append([]*v1.Node{}, test.nodes...) - snapshot := nodeinfosnapshot.NewSnapshot(test.pods, test.nodes) + snapshot := nodeinfosnapshot.NewSnapshot(nodeinfosnapshot.CreateNodeInfoMap(test.pods, test.nodes)) meta := &priorityMetadata{ topologyScore: buildTopologyPairToScore(test.pod, snapshot, allNodes, test.hardPodAffinityWeight), @@ -675,7 +675,7 @@ func BenchmarkInterPodAffinityPriority(b *testing.B) { for _, test := range tests { b.Run(test.name, func(b *testing.B) { existingPods, allNodes := test.prepFunc(test.existingPodsNum, test.allNodesNum) - snapshot := nodeinfosnapshot.NewSnapshot(existingPods, allNodes) + snapshot := nodeinfosnapshot.NewSnapshot(nodeinfosnapshot.CreateNodeInfoMap(existingPods, allNodes)) meta := &priorityMetadata{ topologyScore: buildTopologyPairToScore(test.pod, snapshot, allNodes, v1.DefaultHardPodAffinitySymmetricWeight), diff --git a/pkg/scheduler/algorithm/priorities/least_requested_test.go b/pkg/scheduler/algorithm/priorities/least_requested_test.go index 3ff6d7e4c61..ecec1232457 100644 --- a/pkg/scheduler/algorithm/priorities/least_requested_test.go +++ b/pkg/scheduler/algorithm/priorities/least_requested_test.go @@ -253,7 +253,7 @@ func TestLeastRequested(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { - snapshot := nodeinfosnapshot.NewSnapshot(test.pods, test.nodes) + snapshot := nodeinfosnapshot.NewSnapshot(nodeinfosnapshot.CreateNodeInfoMap(test.pods, test.nodes)) list, err := runMapReducePriority(LeastRequestedPriorityMap, nil, nil, test.pod, snapshot, test.nodes) if err != nil { t.Errorf("unexpected error: %v", err) diff --git a/pkg/scheduler/algorithm/priorities/most_requested_test.go b/pkg/scheduler/algorithm/priorities/most_requested_test.go index 1ed240c047f..946782efb70 100644 --- a/pkg/scheduler/algorithm/priorities/most_requested_test.go +++ b/pkg/scheduler/algorithm/priorities/most_requested_test.go @@ -210,7 +210,7 @@ func TestMostRequested(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { - snapshot := nodeinfosnapshot.NewSnapshot(test.pods, test.nodes) + snapshot := nodeinfosnapshot.NewSnapshot(nodeinfosnapshot.CreateNodeInfoMap(test.pods, test.nodes)) list, err := runMapReducePriority(MostRequestedPriorityMap, nil, nil, test.pod, snapshot, test.nodes) if err != nil { t.Errorf("unexpected error: %v", err) diff --git a/pkg/scheduler/algorithm/priorities/node_affinity_test.go b/pkg/scheduler/algorithm/priorities/node_affinity_test.go index c61f90afafc..5860e2965ce 100644 --- a/pkg/scheduler/algorithm/priorities/node_affinity_test.go +++ b/pkg/scheduler/algorithm/priorities/node_affinity_test.go @@ -167,7 +167,7 @@ func TestNodeAffinityPriority(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { - snapshot := nodeinfosnapshot.NewSnapshot(nil, test.nodes) + snapshot := nodeinfosnapshot.NewSnapshot(nodeinfosnapshot.CreateNodeInfoMap(nil, test.nodes)) list, err := runMapReducePriority(CalculateNodeAffinityPriorityMap, CalculateNodeAffinityPriorityReduce, nil, test.pod, snapshot, test.nodes) if err != nil { t.Errorf("unexpected error: %v", err) diff --git a/pkg/scheduler/algorithm/priorities/node_prefer_avoid_pods_test.go b/pkg/scheduler/algorithm/priorities/node_prefer_avoid_pods_test.go index f6ca0b3289f..dcdf95bdc17 100644 --- a/pkg/scheduler/algorithm/priorities/node_prefer_avoid_pods_test.go +++ b/pkg/scheduler/algorithm/priorities/node_prefer_avoid_pods_test.go @@ -141,7 +141,7 @@ func TestNodePreferAvoidPriority(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { - snapshot := nodeinfosnapshot.NewSnapshot(nil, test.nodes) + snapshot := nodeinfosnapshot.NewSnapshot(nodeinfosnapshot.CreateNodeInfoMap(nil, test.nodes)) list, err := runMapReducePriority(CalculateNodePreferAvoidPodsPriorityMap, nil, nil, test.pod, snapshot, test.nodes) if err != nil { t.Errorf("unexpected error: %v", err) diff --git a/pkg/scheduler/algorithm/priorities/requested_to_capacity_ratio_test.go b/pkg/scheduler/algorithm/priorities/requested_to_capacity_ratio_test.go index 0f7e5bf42f9..958c5c31b8b 100644 --- a/pkg/scheduler/algorithm/priorities/requested_to_capacity_ratio_test.go +++ b/pkg/scheduler/algorithm/priorities/requested_to_capacity_ratio_test.go @@ -240,7 +240,7 @@ func TestRequestedToCapacityRatio(t *testing.T) { newPod := buildResourcesPod("", test.requested) - snapshot := nodeinfosnapshot.NewSnapshot(scheduledPods, nodes) + snapshot := nodeinfosnapshot.NewSnapshot(nodeinfosnapshot.CreateNodeInfoMap(scheduledPods, nodes)) list, err := runMapReducePriority(RequestedToCapacityRatioResourceAllocationPriorityDefault().PriorityMap, nil, nil, newPod, snapshot, nodes) if err != nil { t.Errorf("unexpected error: %v", err) @@ -386,7 +386,7 @@ func TestResourceBinPackingSingleExtended(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { - snapshot := nodeinfosnapshot.NewSnapshot(test.pods, test.nodes) + snapshot := nodeinfosnapshot.NewSnapshot(nodeinfosnapshot.CreateNodeInfoMap(test.pods, test.nodes)) functionShape, _ := NewFunctionShape([]FunctionShapePoint{{0, 0}, {100, 10}}) resourceToWeightMap := ResourceToWeightMap{v1.ResourceName("intel.com/foo"): 1} prior := RequestedToCapacityRatioResourceAllocationPriority(functionShape, resourceToWeightMap) @@ -611,7 +611,7 @@ func TestResourceBinPackingMultipleExtended(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { - snapshot := nodeinfosnapshot.NewSnapshot(test.pods, test.nodes) + snapshot := nodeinfosnapshot.NewSnapshot(nodeinfosnapshot.CreateNodeInfoMap(test.pods, test.nodes)) functionShape, _ := NewFunctionShape([]FunctionShapePoint{{0, 0}, {100, 10}}) resourceToWeightMap := ResourceToWeightMap{v1.ResourceName("intel.com/foo"): 3, v1.ResourceName("intel.com/bar"): 5} prior := RequestedToCapacityRatioResourceAllocationPriority(functionShape, resourceToWeightMap) diff --git a/pkg/scheduler/algorithm/priorities/resource_limits_test.go b/pkg/scheduler/algorithm/priorities/resource_limits_test.go index 73005ef7418..22273bae529 100644 --- a/pkg/scheduler/algorithm/priorities/resource_limits_test.go +++ b/pkg/scheduler/algorithm/priorities/resource_limits_test.go @@ -138,7 +138,7 @@ func TestResourceLimitsPriority(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { - snapshot := nodeinfosnapshot.NewSnapshot(nil, test.nodes) + snapshot := nodeinfosnapshot.NewSnapshot(nodeinfosnapshot.CreateNodeInfoMap(nil, test.nodes)) metadata := &priorityMetadata{ podLimits: getResourceLimits(test.pod), } diff --git a/pkg/scheduler/algorithm/priorities/selector_spreading_test.go b/pkg/scheduler/algorithm/priorities/selector_spreading_test.go index ba78fce2960..eee79f97a48 100644 --- a/pkg/scheduler/algorithm/priorities/selector_spreading_test.go +++ b/pkg/scheduler/algorithm/priorities/selector_spreading_test.go @@ -339,7 +339,7 @@ func TestSelectorSpreadPriority(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { nodes := makeNodeList(test.nodes) - snapshot := nodeinfosnapshot.NewSnapshot(test.pods, nodes) + snapshot := nodeinfosnapshot.NewSnapshot(nodeinfosnapshot.CreateNodeInfoMap(test.pods, nodes)) selectorSpread := SelectorSpread{ serviceLister: fakelisters.ServiceLister(test.services), controllerLister: fakelisters.ControllerLister(test.rcs), @@ -577,7 +577,7 @@ func TestZoneSelectorSpreadPriority(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { nodes := makeLabeledNodeList(labeledNodes) - snapshot := nodeinfosnapshot.NewSnapshot(test.pods, nodes) + snapshot := nodeinfosnapshot.NewSnapshot(nodeinfosnapshot.CreateNodeInfoMap(test.pods, nodes)) selectorSpread := SelectorSpread{ serviceLister: fakelisters.ServiceLister(test.services), controllerLister: fakelisters.ControllerLister(test.rcs), @@ -771,7 +771,7 @@ func TestZoneSpreadPriority(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { nodes := makeLabeledNodeList(labeledNodes) - snapshot := nodeinfosnapshot.NewSnapshot(test.pods, nodes) + snapshot := nodeinfosnapshot.NewSnapshot(nodeinfosnapshot.CreateNodeInfoMap(test.pods, nodes)) zoneSpread := ServiceAntiAffinity{podLister: snapshot.Pods(), serviceLister: fakelisters.ServiceLister(test.services), labels: []string{"zone"}} metaDataProducer := NewMetadataFactory( diff --git a/pkg/scheduler/algorithm/priorities/spreading_perf_test.go b/pkg/scheduler/algorithm/priorities/spreading_perf_test.go index 65d8ac8ecec..ce4d7acc2d1 100644 --- a/pkg/scheduler/algorithm/priorities/spreading_perf_test.go +++ b/pkg/scheduler/algorithm/priorities/spreading_perf_test.go @@ -55,7 +55,7 @@ func BenchmarkTestDefaultEvenPodsSpreadPriority(b *testing.B) { SpreadConstraint(1, v1.LabelHostname, softSpread, st.MakeLabelSelector().Exists("foo").Obj()). SpreadConstraint(1, v1.LabelZoneFailureDomain, softSpread, st.MakeLabelSelector().Exists("foo").Obj()).Obj() existingPods, allNodes, filteredNodes := st.MakeNodesAndPodsForEvenPodsSpread(pod.Labels, tt.existingPodsNum, tt.allNodesNum, tt.allNodesNum) - snapshot := nodeinfosnapshot.NewSnapshot(existingPods, allNodes) + snapshot := nodeinfosnapshot.NewSnapshot(nodeinfosnapshot.CreateNodeInfoMap(existingPods, allNodes)) b.ResetTimer() for i := 0; i < b.N; i++ { @@ -84,7 +84,7 @@ func BenchmarkTestSelectorSpreadPriority(b *testing.B) { b.Run(tt.name, func(b *testing.B) { pod := st.MakePod().Name("p").Label("foo", "").Obj() existingPods, allNodes, filteredNodes := st.MakeNodesAndPodsForEvenPodsSpread(pod.Labels, tt.existingPodsNum, tt.allNodesNum, tt.allNodesNum) - snapshot := nodeinfosnapshot.NewSnapshot(existingPods, allNodes) + snapshot := nodeinfosnapshot.NewSnapshot(nodeinfosnapshot.CreateNodeInfoMap(existingPods, allNodes)) services := []*v1.Service{{Spec: v1.ServiceSpec{Selector: map[string]string{"foo": ""}}}} ss := SelectorSpread{ serviceLister: fake.ServiceLister(services), diff --git a/pkg/scheduler/algorithm/priorities/taint_toleration_test.go b/pkg/scheduler/algorithm/priorities/taint_toleration_test.go index cca211287bf..a6a0fc9b39b 100644 --- a/pkg/scheduler/algorithm/priorities/taint_toleration_test.go +++ b/pkg/scheduler/algorithm/priorities/taint_toleration_test.go @@ -227,7 +227,7 @@ func TestTaintAndToleration(t *testing.T) { } for _, test := range tests { t.Run(test.name, func(t *testing.T) { - snapshot := nodeinfosnapshot.NewSnapshot(nil, test.nodes) + snapshot := nodeinfosnapshot.NewSnapshot(nodeinfosnapshot.CreateNodeInfoMap(nil, test.nodes)) list, err := runMapReducePriority(ComputeTaintTolerationPriorityMap, ComputeTaintTolerationPriorityReduce, nil, test.pod, snapshot, test.nodes) if err != nil { t.Errorf("unexpected error: %v", err) diff --git a/pkg/scheduler/algorithm/priorities/types_test.go b/pkg/scheduler/algorithm/priorities/types_test.go index 8e0631586bc..191ed4f4a4b 100644 --- a/pkg/scheduler/algorithm/priorities/types_test.go +++ b/pkg/scheduler/algorithm/priorities/types_test.go @@ -32,7 +32,7 @@ func TestEmptyPriorityMetadataProducer(t *testing.T) { fakeLabelSelector := labels.SelectorFromSet(labels.Set{"foo": "bar"}) fakeNodes := []*v1.Node{st.MakeNode().Name("node1").Obj(), st.MakeNode().Name("node-a").Obj()} - snapshot := nodeinfosnapshot.NewSnapshot([]*v1.Pod{fakePod}, fakeNodes) + snapshot := nodeinfosnapshot.NewSnapshot(nodeinfosnapshot.CreateNodeInfoMap([]*v1.Pod{fakePod}, fakeNodes)) // Test EmptyMetadataProducer metadata := EmptyMetadataProducer(fakePod, fakeNodes, snapshot) if metadata != nil { diff --git a/pkg/scheduler/core/generic_scheduler_test.go b/pkg/scheduler/core/generic_scheduler_test.go index b3754d5e89a..f18d9c49774 100644 --- a/pkg/scheduler/core/generic_scheduler_test.go +++ b/pkg/scheduler/core/generic_scheduler_test.go @@ -996,7 +996,7 @@ func TestZeroRequest(t *testing.T) { pc := priorities.PriorityConfig{Map: selectorSpreadPriorityMap, Reduce: selectorSpreadPriorityReduce, Weight: 1} priorityConfigs = append(priorityConfigs, pc) - snapshot := nodeinfosnapshot.NewSnapshot(test.pods, test.nodes) + snapshot := nodeinfosnapshot.NewSnapshot(nodeinfosnapshot.CreateNodeInfoMap(test.pods, test.nodes)) metaDataProducer := priorities.NewMetadataFactory( informerFactory.Core().V1().Services().Lister(), @@ -1449,7 +1449,7 @@ func TestSelectNodesForPreemption(t *testing.T) { test.predicates[algorithmpredicates.MatchInterPodAffinityPred] = algorithmpredicates.NewPodAffinityPredicate(n, p) } - g.nodeInfoSnapshot = nodeinfosnapshot.NewSnapshot(test.pods, nodes) + g.nodeInfoSnapshot = nodeinfosnapshot.NewSnapshot(nodeinfosnapshot.CreateNodeInfoMap(test.pods, nodes)) // newnode simulate a case that a new node is added to the cluster, but nodeNameToInfo // doesn't have it yet. newnode := makeNode("newnode", 1000*5, priorityutil.DefaultMemoryRequest*5) @@ -1673,7 +1673,7 @@ func TestPickOneNodeForPreemption(t *testing.T) { for _, n := range test.nodes { nodes = append(nodes, makeNode(n, priorityutil.DefaultMilliCPURequest*5, priorityutil.DefaultMemoryRequest*5)) } - snapshot := nodeinfosnapshot.NewSnapshot(test.pods, nodes) + snapshot := nodeinfosnapshot.NewSnapshot(nodeinfosnapshot.CreateNodeInfoMap(test.pods, nodes)) fwk, _ := framework.NewFramework(emptyPluginRegistry, nil, []schedulerapi.PluginConfig{}, framework.WithSnapshotSharedLister(snapshot)) factory := algorithmpredicates.MetadataProducerFactory{} @@ -1830,7 +1830,7 @@ func TestNodesWherePreemptionMightHelp(t *testing.T) { FailedPredicates: test.failedPredMap, FilteredNodesStatuses: test.nodesStatuses, } - nodes := nodesWherePreemptionMightHelp(schedulernodeinfo.CreateNodeNameToInfoMap(nil, makeNodeList(nodeNames)), &fitErr) + nodes := nodesWherePreemptionMightHelp(nodeinfosnapshot.CreateNodeInfoMap(nil, makeNodeList(nodeNames)), &fitErr) if len(test.expected) != len(nodes) { t.Errorf("number of nodes is not the same as expected. exptectd: %d, got: %d. Nodes: %v", len(test.expected), len(nodes), nodes) } diff --git a/pkg/scheduler/framework/plugins/defaultpodtopologyspread/default_pod_topology_spread_test.go b/pkg/scheduler/framework/plugins/defaultpodtopologyspread/default_pod_topology_spread_test.go index 4804cfc6472..2c8d3d2e838 100644 --- a/pkg/scheduler/framework/plugins/defaultpodtopologyspread/default_pod_topology_spread_test.go +++ b/pkg/scheduler/framework/plugins/defaultpodtopologyspread/default_pod_topology_spread_test.go @@ -342,7 +342,7 @@ func TestDefaultPodTopologySpreadScore(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { nodes := makeNodeList(test.nodes) - snapshot := nodeinfosnapshot.NewSnapshot(test.pods, nodes) + snapshot := nodeinfosnapshot.NewSnapshot(nodeinfosnapshot.CreateNodeInfoMap(test.pods, nodes)) fh, _ := framework.NewFramework(nil, nil, nil, framework.WithSnapshotSharedLister(snapshot)) mapFunction, reduceFunction := priorities.NewSelectorSpreadPriority( @@ -601,7 +601,7 @@ func TestZoneSelectorSpreadPriority(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { nodes := makeLabeledNodeList(labeledNodes) - snapshot := nodeinfosnapshot.NewSnapshot(test.pods, nodes) + snapshot := nodeinfosnapshot.NewSnapshot(nodeinfosnapshot.CreateNodeInfoMap(test.pods, nodes)) fh, _ := framework.NewFramework(nil, nil, nil, framework.WithSnapshotSharedLister(snapshot)) mapFunction, reduceFunction := priorities.NewSelectorSpreadPriority( diff --git a/pkg/scheduler/framework/plugins/imagelocality/image_locality_test.go b/pkg/scheduler/framework/plugins/imagelocality/image_locality_test.go index a6cb30112b0..80f1cfcc9e3 100644 --- a/pkg/scheduler/framework/plugins/imagelocality/image_locality_test.go +++ b/pkg/scheduler/framework/plugins/imagelocality/image_locality_test.go @@ -201,7 +201,7 @@ func TestImageLocalityPriority(t *testing.T) { 1, ) - snapshot := nodeinfosnapshot.NewSnapshot(nil, test.nodes) + snapshot := nodeinfosnapshot.NewSnapshot(nodeinfosnapshot.CreateNodeInfoMap(nil, test.nodes)) meta := metaDataProducer(test.pod, test.nodes, snapshot) state := framework.NewCycleState() diff --git a/pkg/scheduler/framework/plugins/interpodaffinity/interpod_affinity_test.go b/pkg/scheduler/framework/plugins/interpodaffinity/interpod_affinity_test.go index 41b900f30d0..68433917560 100644 --- a/pkg/scheduler/framework/plugins/interpodaffinity/interpod_affinity_test.go +++ b/pkg/scheduler/framework/plugins/interpodaffinity/interpod_affinity_test.go @@ -735,7 +735,7 @@ func TestSingleNode(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { - snapshot := nodeinfosnapshot.NewSnapshot(test.pods, []*v1.Node{test.node}) + snapshot := nodeinfosnapshot.NewSnapshot(nodeinfosnapshot.CreateNodeInfoMap(test.pods, []*v1.Node{test.node})) factory := &predicates.MetadataProducerFactory{} meta := factory.GetPredicateMetadata(test.pod, snapshot) state := framework.NewCycleState() @@ -1436,7 +1436,7 @@ func TestMultipleNodes(t *testing.T) { for indexTest, test := range tests { t.Run(test.name, func(t *testing.T) { - snapshot := nodeinfosnapshot.NewSnapshot(test.pods, test.nodes) + snapshot := nodeinfosnapshot.NewSnapshot(nodeinfosnapshot.CreateNodeInfoMap(test.pods, test.nodes)) for indexNode, node := range test.nodes { factory := &predicates.MetadataProducerFactory{} meta := factory.GetPredicateMetadata(test.pod, snapshot) @@ -1943,7 +1943,7 @@ func TestInterPodAffinityPriority(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { state := framework.NewCycleState() - snapshot := nodeinfosnapshot.NewSnapshot(test.pods, test.nodes) + snapshot := nodeinfosnapshot.NewSnapshot(nodeinfosnapshot.CreateNodeInfoMap(test.pods, test.nodes)) fh, _ := framework.NewFramework(nil, nil, nil, framework.WithSnapshotSharedLister(snapshot)) client := clientsetfake.NewSimpleClientset() @@ -2058,7 +2058,7 @@ func TestHardPodAffinitySymmetricWeight(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { state := framework.NewCycleState() - snapshot := nodeinfosnapshot.NewSnapshot(test.pods, test.nodes) + snapshot := nodeinfosnapshot.NewSnapshot(nodeinfosnapshot.CreateNodeInfoMap(test.pods, test.nodes)) fh, _ := framework.NewFramework(nil, nil, nil, framework.WithSnapshotSharedLister(snapshot)) client := clientsetfake.NewSimpleClientset() diff --git a/pkg/scheduler/framework/plugins/nodeaffinity/node_affinity_test.go b/pkg/scheduler/framework/plugins/nodeaffinity/node_affinity_test.go index 96eeea9d501..88d653cd2f6 100644 --- a/pkg/scheduler/framework/plugins/nodeaffinity/node_affinity_test.go +++ b/pkg/scheduler/framework/plugins/nodeaffinity/node_affinity_test.go @@ -850,7 +850,7 @@ func TestNodeAffinityPriority(t *testing.T) { t.Run(test.name, func(t *testing.T) { state := framework.NewCycleState() - fh, _ := framework.NewFramework(nil, nil, nil, framework.WithSnapshotSharedLister(nodeinfosnapshot.NewSnapshot(nil, test.nodes))) + fh, _ := framework.NewFramework(nil, nil, nil, framework.WithSnapshotSharedLister(nodeinfosnapshot.NewSnapshot(nodeinfosnapshot.CreateNodeInfoMap(nil, test.nodes)))) p, _ := New(nil, fh) var gotList framework.NodeScoreList for _, n := range test.nodes { diff --git a/pkg/scheduler/framework/plugins/nodelabel/node_label_test.go b/pkg/scheduler/framework/plugins/nodelabel/node_label_test.go index 8d552b599f4..c03edff20b8 100644 --- a/pkg/scheduler/framework/plugins/nodelabel/node_label_test.go +++ b/pkg/scheduler/framework/plugins/nodelabel/node_label_test.go @@ -227,7 +227,7 @@ func TestNodeLabelScore(t *testing.T) { t.Run(test.name, func(t *testing.T) { state := framework.NewCycleState() node := &v1.Node{ObjectMeta: metav1.ObjectMeta{Name: "machine1", Labels: map[string]string{"foo": "", "bar": ""}}} - fh, _ := framework.NewFramework(nil, nil, nil, framework.WithSnapshotSharedLister(nodeinfosnapshot.NewSnapshot(nil, []*v1.Node{node}))) + fh, _ := framework.NewFramework(nil, nil, nil, framework.WithSnapshotSharedLister(nodeinfosnapshot.NewSnapshot(nodeinfosnapshot.CreateNodeInfoMap(nil, []*v1.Node{node})))) args := &runtime.Unknown{Raw: []byte(test.rawArgs)} p, err := New(args, fh) if err != nil { diff --git a/pkg/scheduler/framework/plugins/nodepreferavoidpods/node_prefer_avoid_pods_test.go b/pkg/scheduler/framework/plugins/nodepreferavoidpods/node_prefer_avoid_pods_test.go index 13a9462f9b3..7839700c21e 100644 --- a/pkg/scheduler/framework/plugins/nodepreferavoidpods/node_prefer_avoid_pods_test.go +++ b/pkg/scheduler/framework/plugins/nodepreferavoidpods/node_prefer_avoid_pods_test.go @@ -143,7 +143,7 @@ func TestNodePreferAvoidPods(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { state := framework.NewCycleState() - fh, _ := framework.NewFramework(nil, nil, nil, framework.WithSnapshotSharedLister(nodeinfosnapshot.NewSnapshot(nil, test.nodes))) + fh, _ := framework.NewFramework(nil, nil, nil, framework.WithSnapshotSharedLister(nodeinfosnapshot.NewSnapshot(nodeinfosnapshot.CreateNodeInfoMap(nil, test.nodes)))) p, _ := New(nil, fh) var gotList framework.NodeScoreList for _, n := range test.nodes { diff --git a/pkg/scheduler/framework/plugins/noderesources/balanced_allocation_test.go b/pkg/scheduler/framework/plugins/noderesources/balanced_allocation_test.go index c2b3692dc94..b3abd4ad5e7 100644 --- a/pkg/scheduler/framework/plugins/noderesources/balanced_allocation_test.go +++ b/pkg/scheduler/framework/plugins/noderesources/balanced_allocation_test.go @@ -379,7 +379,7 @@ func TestNodeResourcesBalancedAllocation(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { - snapshot := nodeinfosnapshot.NewSnapshot(test.pods, test.nodes) + snapshot := nodeinfosnapshot.NewSnapshot(nodeinfosnapshot.CreateNodeInfoMap(test.pods, test.nodes)) if len(test.pod.Spec.Volumes) > 0 { maxVolumes := 5 nodeInfoList, _ := snapshot.NodeInfos().List() diff --git a/pkg/scheduler/framework/plugins/noderesources/least_allocated_test.go b/pkg/scheduler/framework/plugins/noderesources/least_allocated_test.go index d72151a82f3..169eea9bdbc 100644 --- a/pkg/scheduler/framework/plugins/noderesources/least_allocated_test.go +++ b/pkg/scheduler/framework/plugins/noderesources/least_allocated_test.go @@ -233,7 +233,7 @@ func TestNodeResourcesLeastAllocated(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { - snapshot := nodeinfosnapshot.NewSnapshot(test.pods, test.nodes) + snapshot := nodeinfosnapshot.NewSnapshot(nodeinfosnapshot.CreateNodeInfoMap(test.pods, test.nodes)) fh, _ := framework.NewFramework(nil, nil, nil, framework.WithSnapshotSharedLister(snapshot)) p, _ := NewLeastAllocated(nil, fh) for i := range test.nodes { diff --git a/pkg/scheduler/framework/plugins/noderesources/most_allocated_test.go b/pkg/scheduler/framework/plugins/noderesources/most_allocated_test.go index e19fb4514aa..1b6d2fa3595 100644 --- a/pkg/scheduler/framework/plugins/noderesources/most_allocated_test.go +++ b/pkg/scheduler/framework/plugins/noderesources/most_allocated_test.go @@ -196,7 +196,7 @@ func TestNodeResourcesMostAllocated(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { - snapshot := nodeinfosnapshot.NewSnapshot(test.pods, test.nodes) + snapshot := nodeinfosnapshot.NewSnapshot(nodeinfosnapshot.CreateNodeInfoMap(test.pods, test.nodes)) fh, _ := framework.NewFramework(nil, nil, nil, framework.WithSnapshotSharedLister(snapshot)) p, _ := NewMostAllocated(nil, fh) for i := range test.nodes { diff --git a/pkg/scheduler/framework/plugins/podtopologyspread/pod_topology_spread_test.go b/pkg/scheduler/framework/plugins/podtopologyspread/pod_topology_spread_test.go index 4ab15201033..06630d90088 100644 --- a/pkg/scheduler/framework/plugins/podtopologyspread/pod_topology_spread_test.go +++ b/pkg/scheduler/framework/plugins/podtopologyspread/pod_topology_spread_test.go @@ -269,7 +269,7 @@ func TestPodTopologySpread_Filter_SingleConstraint(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - snapshot := nodeinfosnapshot.NewSnapshot(tt.existingPods, tt.nodes) + snapshot := nodeinfosnapshot.NewSnapshot(nodeinfosnapshot.CreateNodeInfoMap(tt.existingPods, tt.nodes)) factory := &predicates.MetadataProducerFactory{} meta := factory.GetPredicateMetadata(tt.pod, snapshot) state := framework.NewCycleState() @@ -467,7 +467,7 @@ func TestPodTopologySpread_Filter_MultipleConstraints(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - snapshot := nodeinfosnapshot.NewSnapshot(tt.existingPods, tt.nodes) + snapshot := nodeinfosnapshot.NewSnapshot(nodeinfosnapshot.CreateNodeInfoMap(tt.existingPods, tt.nodes)) factory := &predicates.MetadataProducerFactory{} meta := factory.GetPredicateMetadata(tt.pod, snapshot) state := framework.NewCycleState() diff --git a/pkg/scheduler/framework/plugins/requestedtocapacityratio/requested_to_capacity_ratio_test.go b/pkg/scheduler/framework/plugins/requestedtocapacityratio/requested_to_capacity_ratio_test.go index 5896ab69397..300da199c78 100644 --- a/pkg/scheduler/framework/plugins/requestedtocapacityratio/requested_to_capacity_ratio_test.go +++ b/pkg/scheduler/framework/plugins/requestedtocapacityratio/requested_to_capacity_ratio_test.go @@ -65,7 +65,7 @@ func TestRequestedToCapacityRatio(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { state := framework.NewCycleState() - snapshot := nodeinfosnapshot.NewSnapshot(test.scheduledPods, test.nodes) + snapshot := nodeinfosnapshot.NewSnapshot(nodeinfosnapshot.CreateNodeInfoMap(test.scheduledPods, test.nodes)) fh, _ := framework.NewFramework(nil, nil, nil, framework.WithSnapshotSharedLister(snapshot)) args := &runtime.Unknown{Raw: []byte(`{"FunctionShape" : [{"Utilization" : 0, "Score" : 100}, {"Utilization" : 100, "Score" : 0}], "ResourceToWeightMap" : {"memory" : 1, "cpu" : 1}}`)} p, _ := New(args, fh) diff --git a/pkg/scheduler/framework/plugins/serviceaffinity/service_affinity_test.go b/pkg/scheduler/framework/plugins/serviceaffinity/service_affinity_test.go index 6a943133426..f7fa458f36c 100644 --- a/pkg/scheduler/framework/plugins/serviceaffinity/service_affinity_test.go +++ b/pkg/scheduler/framework/plugins/serviceaffinity/service_affinity_test.go @@ -163,7 +163,7 @@ func TestServiceAffinity(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { nodes := []*v1.Node{&node1, &node2, &node3, &node4, &node5} - snapshot := nodeinfosnapshot.NewSnapshot(test.pods, nodes) + snapshot := nodeinfosnapshot.NewSnapshot(nodeinfosnapshot.CreateNodeInfoMap(test.pods, nodes)) predicate, precompute := predicates.NewServiceAffinityPredicate(snapshot.NodeInfos(), snapshot.Pods(), fakelisters.ServiceLister(test.services), test.labels) predicates.RegisterPredicateMetadataProducer("ServiceAffinityMetaProducer", precompute) @@ -390,7 +390,7 @@ func TestServiceAffinityScore(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { nodes := makeLabeledNodeList(test.nodes) - snapshot := nodeinfosnapshot.NewSnapshot(test.pods, nodes) + snapshot := nodeinfosnapshot.NewSnapshot(nodeinfosnapshot.CreateNodeInfoMap(test.pods, nodes)) fh, _ := framework.NewFramework(nil, nil, nil, framework.WithSnapshotSharedLister(snapshot)) serviceLister := fakelisters.ServiceLister(test.services) priorityMapFunction, priorityReduceFunction := priorities.NewServiceAntiAffinityPriority(snapshot.Pods(), serviceLister, test.labels) diff --git a/pkg/scheduler/framework/plugins/tainttoleration/taint_toleration_test.go b/pkg/scheduler/framework/plugins/tainttoleration/taint_toleration_test.go index ff63f1a37ae..44e486b7a10 100644 --- a/pkg/scheduler/framework/plugins/tainttoleration/taint_toleration_test.go +++ b/pkg/scheduler/framework/plugins/tainttoleration/taint_toleration_test.go @@ -230,7 +230,7 @@ func TestTaintTolerationScore(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { state := framework.NewCycleState() - snapshot := nodeinfosnapshot.NewSnapshot(nil, test.nodes) + snapshot := nodeinfosnapshot.NewSnapshot(nodeinfosnapshot.CreateNodeInfoMap(nil, test.nodes)) fh, _ := framework.NewFramework(nil, nil, nil, framework.WithSnapshotSharedLister(snapshot)) p, _ := New(nil, fh) diff --git a/pkg/scheduler/nodeinfo/BUILD b/pkg/scheduler/nodeinfo/BUILD index 1189a4c8020..a33c84cdee3 100644 --- a/pkg/scheduler/nodeinfo/BUILD +++ b/pkg/scheduler/nodeinfo/BUILD @@ -5,7 +5,6 @@ go_library( srcs = [ "host_ports.go", "node_info.go", - "util.go", ], importpath = "k8s.io/kubernetes/pkg/scheduler/nodeinfo", visibility = ["//visibility:public"], @@ -15,7 +14,6 @@ go_library( "//pkg/scheduler/algorithm/priorities/util:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library", "//vendor/k8s.io/klog:go_default_library", ], @@ -26,7 +24,6 @@ go_test( srcs = [ "host_ports_test.go", "node_info_test.go", - "util_test.go", ], embed = [":go_default_library"], deps = [ @@ -35,7 +32,6 @@ go_test( "//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library", "//staging/src/k8s.io/component-base/featuregate/testing:go_default_library", ], diff --git a/pkg/scheduler/nodeinfo/snapshot/BUILD b/pkg/scheduler/nodeinfo/snapshot/BUILD index 870614f9397..c89f823f782 100644 --- a/pkg/scheduler/nodeinfo/snapshot/BUILD +++ b/pkg/scheduler/nodeinfo/snapshot/BUILD @@ -1,17 +1,4 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["snapshot.go"], - importpath = "k8s.io/kubernetes/pkg/scheduler/nodeinfo/snapshot", - visibility = ["//visibility:public"], - deps = [ - "//pkg/scheduler/listers:go_default_library", - "//pkg/scheduler/nodeinfo:go_default_library", - "//staging/src/k8s.io/api/core/v1:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library", - ], -) +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") filegroup( name = "package-srcs", @@ -26,3 +13,29 @@ filegroup( tags = ["automanaged"], visibility = ["//visibility:public"], ) + +go_test( + name = "go_default_test", + srcs = ["snapshot_test.go"], + embed = [":go_default_library"], + deps = [ + "//pkg/scheduler/nodeinfo:go_default_library", + "//staging/src/k8s.io/api/core/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", + ], +) + +go_library( + name = "go_default_library", + srcs = ["snapshot.go"], + importpath = "k8s.io/kubernetes/pkg/scheduler/nodeinfo/snapshot", + visibility = ["//visibility:public"], + deps = [ + "//pkg/scheduler/listers:go_default_library", + "//pkg/scheduler/nodeinfo:go_default_library", + "//staging/src/k8s.io/api/core/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", + ], +) diff --git a/pkg/scheduler/nodeinfo/snapshot/snapshot.go b/pkg/scheduler/nodeinfo/snapshot/snapshot.go index 68e9cf82f29..46134a76f32 100644 --- a/pkg/scheduler/nodeinfo/snapshot/snapshot.go +++ b/pkg/scheduler/nodeinfo/snapshot/snapshot.go @@ -14,13 +14,14 @@ See the License for the specific language governing permissions and limitations under the License. */ -package nodeinfo +package snapshot import ( "fmt" v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/util/sets" schedulerlisters "k8s.io/kubernetes/pkg/scheduler/listers" schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo" ) @@ -47,10 +48,9 @@ func NewEmptySnapshot() *Snapshot { } // NewSnapshot initializes a Snapshot struct and returns it. -func NewSnapshot(pods []*v1.Pod, nodes []*v1.Node) *Snapshot { - nodeInfoMap := schedulernodeinfo.CreateNodeNameToInfoMap(pods, nodes) - nodeInfoList := make([]*schedulernodeinfo.NodeInfo, 0, len(nodes)) - havePodsWithAffinityNodeInfoList := make([]*schedulernodeinfo.NodeInfo, 0, len(nodes)) +func NewSnapshot(nodeInfoMap map[string]*schedulernodeinfo.NodeInfo) *Snapshot { + nodeInfoList := make([]*schedulernodeinfo.NodeInfo, 0, len(nodeInfoMap)) + havePodsWithAffinityNodeInfoList := make([]*schedulernodeinfo.NodeInfo, 0, len(nodeInfoMap)) for _, v := range nodeInfoMap { nodeInfoList = append(nodeInfoList, v) if len(v.PodsWithAffinity()) > 0 { @@ -66,6 +66,62 @@ func NewSnapshot(pods []*v1.Pod, nodes []*v1.Node) *Snapshot { return s } +// CreateNodeInfoMap obtains a list of pods and pivots that list into a map where the keys are node names +// and the values are the aggregated information for that node. +func CreateNodeInfoMap(pods []*v1.Pod, nodes []*v1.Node) map[string]*schedulernodeinfo.NodeInfo { + nodeNameToInfo := make(map[string]*schedulernodeinfo.NodeInfo) + for _, pod := range pods { + nodeName := pod.Spec.NodeName + if _, ok := nodeNameToInfo[nodeName]; !ok { + nodeNameToInfo[nodeName] = schedulernodeinfo.NewNodeInfo() + } + nodeNameToInfo[nodeName].AddPod(pod) + } + imageExistenceMap := createImageExistenceMap(nodes) + + for _, node := range nodes { + if _, ok := nodeNameToInfo[node.Name]; !ok { + nodeNameToInfo[node.Name] = schedulernodeinfo.NewNodeInfo() + } + nodeInfo := nodeNameToInfo[node.Name] + nodeInfo.SetNode(node) + nodeInfo.SetImageStates(getNodeImageStates(node, imageExistenceMap)) + } + return nodeNameToInfo +} + +// getNodeImageStates returns the given node's image states based on the given imageExistence map. +func getNodeImageStates(node *v1.Node, imageExistenceMap map[string]sets.String) map[string]*schedulernodeinfo.ImageStateSummary { + imageStates := make(map[string]*schedulernodeinfo.ImageStateSummary) + + for _, image := range node.Status.Images { + for _, name := range image.Names { + imageStates[name] = &schedulernodeinfo.ImageStateSummary{ + Size: image.SizeBytes, + NumNodes: len(imageExistenceMap[name]), + } + } + } + return imageStates +} + +// createImageExistenceMap returns a map recording on which nodes the images exist, keyed by the images' names. +func createImageExistenceMap(nodes []*v1.Node) map[string]sets.String { + imageExistenceMap := make(map[string]sets.String) + for _, node := range nodes { + for _, image := range node.Status.Images { + for _, name := range image.Names { + if _, ok := imageExistenceMap[name]; !ok { + imageExistenceMap[name] = sets.NewString(node.Name) + } else { + imageExistenceMap[name].Insert(node.Name) + } + } + } + } + return imageExistenceMap +} + // Pods returns a PodLister func (s *Snapshot) Pods() schedulerlisters.PodLister { return &podLister{snapshot: s} diff --git a/pkg/scheduler/nodeinfo/util_test.go b/pkg/scheduler/nodeinfo/snapshot/snapshot_test.go similarity index 93% rename from pkg/scheduler/nodeinfo/util_test.go rename to pkg/scheduler/nodeinfo/snapshot/snapshot_test.go index becd8d0442b..5457a2dd1fe 100644 --- a/pkg/scheduler/nodeinfo/util_test.go +++ b/pkg/scheduler/nodeinfo/snapshot/snapshot_test.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package nodeinfo +package snapshot import ( "reflect" @@ -23,6 +23,7 @@ import ( "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/sets" + schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo" ) const mb int64 = 1024 * 1024 @@ -31,7 +32,7 @@ func TestGetNodeImageStates(t *testing.T) { tests := []struct { node *v1.Node imageExistenceMap map[string]sets.String - expected map[string]*ImageStateSummary + expected map[string]*schedulernodeinfo.ImageStateSummary }{ { node: &v1.Node{ @@ -57,7 +58,7 @@ func TestGetNodeImageStates(t *testing.T) { "gcr.io/10:v1": sets.NewString("node-0", "node-1"), "gcr.io/200:v1": sets.NewString("node-0"), }, - expected: map[string]*ImageStateSummary{ + expected: map[string]*schedulernodeinfo.ImageStateSummary{ "gcr.io/10:v1": { Size: int64(10 * mb), NumNodes: 2, @@ -77,7 +78,7 @@ func TestGetNodeImageStates(t *testing.T) { "gcr.io/10:v1": sets.NewString("node-1"), "gcr.io/200:v1": sets.NewString(), }, - expected: map[string]*ImageStateSummary{}, + expected: map[string]*schedulernodeinfo.ImageStateSummary{}, }, } diff --git a/pkg/scheduler/nodeinfo/util.go b/pkg/scheduler/nodeinfo/util.go deleted file mode 100644 index bb1fd0ce612..00000000000 --- a/pkg/scheduler/nodeinfo/util.go +++ /dev/null @@ -1,78 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package nodeinfo - -import ( - "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/util/sets" -) - -// CreateNodeNameToInfoMap obtains a list of pods and pivots that list into a map where the keys are node names -// and the values are the aggregated information for that node. -func CreateNodeNameToInfoMap(pods []*v1.Pod, nodes []*v1.Node) map[string]*NodeInfo { - nodeNameToInfo := make(map[string]*NodeInfo) - for _, pod := range pods { - nodeName := pod.Spec.NodeName - if _, ok := nodeNameToInfo[nodeName]; !ok { - nodeNameToInfo[nodeName] = NewNodeInfo() - } - nodeNameToInfo[nodeName].AddPod(pod) - } - imageExistenceMap := createImageExistenceMap(nodes) - - for _, node := range nodes { - if _, ok := nodeNameToInfo[node.Name]; !ok { - nodeNameToInfo[node.Name] = NewNodeInfo() - } - nodeInfo := nodeNameToInfo[node.Name] - nodeInfo.SetNode(node) - nodeInfo.imageStates = getNodeImageStates(node, imageExistenceMap) - } - return nodeNameToInfo -} - -// getNodeImageStates returns the given node's image states based on the given imageExistence map. -func getNodeImageStates(node *v1.Node, imageExistenceMap map[string]sets.String) map[string]*ImageStateSummary { - imageStates := make(map[string]*ImageStateSummary) - - for _, image := range node.Status.Images { - for _, name := range image.Names { - imageStates[name] = &ImageStateSummary{ - Size: image.SizeBytes, - NumNodes: len(imageExistenceMap[name]), - } - } - } - return imageStates -} - -// createImageExistenceMap returns a map recording on which nodes the images exist, keyed by the images' names. -func createImageExistenceMap(nodes []*v1.Node) map[string]sets.String { - imageExistenceMap := make(map[string]sets.String) - for _, node := range nodes { - for _, image := range node.Status.Images { - for _, name := range image.Names { - if _, ok := imageExistenceMap[name]; !ok { - imageExistenceMap[name] = sets.NewString(node.Name) - } else { - imageExistenceMap[name].Insert(node.Name) - } - } - } - } - return imageExistenceMap -}