Merge pull request #87165 from alculquicondor/cleanup/mv_snapshot_2

Move Snapshot to internal/cache
This commit is contained in:
Kubernetes Prow Robot 2020-01-17 17:14:07 -08:00 committed by GitHub
commit 3538320d74
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
51 changed files with 193 additions and 257 deletions

View File

@ -27,7 +27,6 @@ go_library(
"//pkg/scheduler/internal/cache/debugger:go_default_library", "//pkg/scheduler/internal/cache/debugger:go_default_library",
"//pkg/scheduler/internal/queue:go_default_library", "//pkg/scheduler/internal/queue:go_default_library",
"//pkg/scheduler/metrics:go_default_library", "//pkg/scheduler/metrics:go_default_library",
"//pkg/scheduler/nodeinfo/snapshot:go_default_library",
"//pkg/scheduler/volumebinder:go_default_library", "//pkg/scheduler/volumebinder:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/api/storage/v1:go_default_library", "//staging/src/k8s.io/api/storage/v1:go_default_library",
@ -81,7 +80,6 @@ go_test(
"//pkg/scheduler/internal/queue:go_default_library", "//pkg/scheduler/internal/queue:go_default_library",
"//pkg/scheduler/listers:go_default_library", "//pkg/scheduler/listers:go_default_library",
"//pkg/scheduler/nodeinfo:go_default_library", "//pkg/scheduler/nodeinfo:go_default_library",
"//pkg/scheduler/nodeinfo/snapshot:go_default_library",
"//pkg/scheduler/testing:go_default_library", "//pkg/scheduler/testing:go_default_library",
"//pkg/scheduler/volumebinder:go_default_library", "//pkg/scheduler/volumebinder:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library",

View File

@ -19,7 +19,6 @@ go_library(
"//pkg/scheduler/listers:go_default_library", "//pkg/scheduler/listers:go_default_library",
"//pkg/scheduler/metrics:go_default_library", "//pkg/scheduler/metrics:go_default_library",
"//pkg/scheduler/nodeinfo:go_default_library", "//pkg/scheduler/nodeinfo:go_default_library",
"//pkg/scheduler/nodeinfo/snapshot:go_default_library",
"//pkg/scheduler/util:go_default_library", "//pkg/scheduler/util:go_default_library",
"//pkg/scheduler/volumebinder:go_default_library", "//pkg/scheduler/volumebinder:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library",
@ -68,7 +67,6 @@ go_test(
"//pkg/scheduler/listers:go_default_library", "//pkg/scheduler/listers:go_default_library",
"//pkg/scheduler/listers/fake:go_default_library", "//pkg/scheduler/listers/fake:go_default_library",
"//pkg/scheduler/nodeinfo:go_default_library", "//pkg/scheduler/nodeinfo:go_default_library",
"//pkg/scheduler/nodeinfo/snapshot:go_default_library",
"//pkg/scheduler/testing:go_default_library", "//pkg/scheduler/testing:go_default_library",
"//pkg/scheduler/util:go_default_library", "//pkg/scheduler/util:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library",

View File

@ -45,7 +45,6 @@ import (
"k8s.io/kubernetes/pkg/scheduler/listers" "k8s.io/kubernetes/pkg/scheduler/listers"
"k8s.io/kubernetes/pkg/scheduler/metrics" "k8s.io/kubernetes/pkg/scheduler/metrics"
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo" schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
nodeinfosnapshot "k8s.io/kubernetes/pkg/scheduler/nodeinfo/snapshot"
"k8s.io/kubernetes/pkg/scheduler/util" "k8s.io/kubernetes/pkg/scheduler/util"
"k8s.io/kubernetes/pkg/scheduler/volumebinder" "k8s.io/kubernetes/pkg/scheduler/volumebinder"
utiltrace "k8s.io/utils/trace" utiltrace "k8s.io/utils/trace"
@ -138,7 +137,7 @@ type genericScheduler struct {
schedulingQueue internalqueue.SchedulingQueue schedulingQueue internalqueue.SchedulingQueue
framework framework.Framework framework framework.Framework
extenders []algorithm.SchedulerExtender extenders []algorithm.SchedulerExtender
nodeInfoSnapshot *nodeinfosnapshot.Snapshot nodeInfoSnapshot *internalcache.Snapshot
volumeBinder *volumebinder.VolumeBinder volumeBinder *volumebinder.VolumeBinder
pvcLister corelisters.PersistentVolumeClaimLister pvcLister corelisters.PersistentVolumeClaimLister
pdbLister policylisters.PodDisruptionBudgetLister pdbLister policylisters.PodDisruptionBudgetLister
@ -152,7 +151,7 @@ type genericScheduler struct {
// functions. // functions.
func (g *genericScheduler) Snapshot() error { func (g *genericScheduler) Snapshot() error {
// Used for all fit and priority funcs. // Used for all fit and priority funcs.
return g.cache.UpdateNodeInfoSnapshot(g.nodeInfoSnapshot) return g.cache.UpdateSnapshot(g.nodeInfoSnapshot)
} }
// Framework returns the framework instance. // Framework returns the framework instance.
@ -1097,7 +1096,7 @@ func podPassesBasicChecks(pod *v1.Pod, pvcLister corelisters.PersistentVolumeCla
func NewGenericScheduler( func NewGenericScheduler(
cache internalcache.Cache, cache internalcache.Cache,
podQueue internalqueue.SchedulingQueue, podQueue internalqueue.SchedulingQueue,
nodeInfoSnapshot *nodeinfosnapshot.Snapshot, nodeInfoSnapshot *internalcache.Snapshot,
framework framework.Framework, framework framework.Framework,
extenders []algorithm.SchedulerExtender, extenders []algorithm.SchedulerExtender,
volumeBinder *volumebinder.VolumeBinder, volumeBinder *volumebinder.VolumeBinder,

View File

@ -58,7 +58,6 @@ import (
fakelisters "k8s.io/kubernetes/pkg/scheduler/listers/fake" fakelisters "k8s.io/kubernetes/pkg/scheduler/listers/fake"
"k8s.io/kubernetes/pkg/scheduler/nodeinfo" "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo" schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
nodeinfosnapshot "k8s.io/kubernetes/pkg/scheduler/nodeinfo/snapshot"
st "k8s.io/kubernetes/pkg/scheduler/testing" st "k8s.io/kubernetes/pkg/scheduler/testing"
schedutil "k8s.io/kubernetes/pkg/scheduler/util" schedutil "k8s.io/kubernetes/pkg/scheduler/util"
) )
@ -293,7 +292,7 @@ func (pl *falseMapPlugin) ScoreExtensions() framework.ScoreExtensions {
return nil return nil
} }
var emptySnapshot = nodeinfosnapshot.NewEmptySnapshot() var emptySnapshot = internalcache.NewEmptySnapshot()
func makeNodeList(nodeNames []string) []*v1.Node { func makeNodeList(nodeNames []string) []*v1.Node {
result := make([]*v1.Node, 0, len(nodeNames)) result := make([]*v1.Node, 0, len(nodeNames))
@ -796,7 +795,7 @@ func TestGenericScheduler(t *testing.T) {
for _, f := range test.registerPlugins { for _, f := range test.registerPlugins {
f(&registry, plugins, pluginConfigs) f(&registry, plugins, pluginConfigs)
} }
snapshot := nodeinfosnapshot.NewSnapshot(nodeinfosnapshot.CreateNodeInfoMap(test.pods, nodes)) snapshot := internalcache.NewSnapshot(test.pods, nodes)
fwk, _ := framework.NewFramework(registry, plugins, pluginConfigs, framework.WithSnapshotSharedLister(snapshot)) fwk, _ := framework.NewFramework(registry, plugins, pluginConfigs, framework.WithSnapshotSharedLister(snapshot))
var pvcs []v1.PersistentVolumeClaim var pvcs []v1.PersistentVolumeClaim
@ -855,7 +854,7 @@ func makeScheduler(nodes []*v1.Node, fns ...st.RegisterPluginFunc) *genericSched
fwk, fwk,
nil, nil, nil, nil, false, nil, nil, nil, nil, false,
schedulerapi.DefaultPercentageOfNodesToScore, false) schedulerapi.DefaultPercentageOfNodesToScore, false)
cache.UpdateNodeInfoSnapshot(s.(*genericScheduler).nodeInfoSnapshot) cache.UpdateSnapshot(s.(*genericScheduler).nodeInfoSnapshot)
return s.(*genericScheduler) return s.(*genericScheduler)
} }
@ -984,7 +983,7 @@ func TestFindFitPredicateCallCounts(t *testing.T) {
fwk, fwk,
nil, nil, nil, nil, false, nil, nil, nil, nil, false,
schedulerapi.DefaultPercentageOfNodesToScore, false).(*genericScheduler) schedulerapi.DefaultPercentageOfNodesToScore, false).(*genericScheduler)
cache.UpdateNodeInfoSnapshot(scheduler.nodeInfoSnapshot) cache.UpdateSnapshot(scheduler.nodeInfoSnapshot)
queue.UpdateNominatedPodForNode(&v1.Pod{ObjectMeta: metav1.ObjectMeta{UID: types.UID("nominated")}, Spec: v1.PodSpec{Priority: &midPriority}}, "1") queue.UpdateNominatedPodForNode(&v1.Pod{ObjectMeta: metav1.ObjectMeta{UID: types.UID("nominated")}, Spec: v1.PodSpec{Priority: &midPriority}}, "1")
_, _, err := scheduler.findNodesThatFitPod(context.Background(), framework.NewCycleState(), test.pod) _, _, err := scheduler.findNodesThatFitPod(context.Background(), framework.NewCycleState(), test.pod)
@ -1115,7 +1114,7 @@ func TestZeroRequest(t *testing.T) {
client := clientsetfake.NewSimpleClientset() client := clientsetfake.NewSimpleClientset()
informerFactory := informers.NewSharedInformerFactory(client, 0) informerFactory := informers.NewSharedInformerFactory(client, 0)
snapshot := nodeinfosnapshot.NewSnapshot(nodeinfosnapshot.CreateNodeInfoMap(test.pods, test.nodes)) snapshot := internalcache.NewSnapshot(test.pods, test.nodes)
registry := framework.Registry{} registry := framework.Registry{}
// TODO: instantiate the plugins dynamically. // TODO: instantiate the plugins dynamically.
@ -1604,7 +1603,7 @@ func TestSelectNodesForPreemption(t *testing.T) {
f(&registry, plugins, pluginConfigs) f(&registry, plugins, pluginConfigs)
} }
// Use a real snapshot since it's needed in some Filter Plugin (e.g., PodAffinity) // Use a real snapshot since it's needed in some Filter Plugin (e.g., PodAffinity)
snapshot := nodeinfosnapshot.NewSnapshot(nodeinfosnapshot.CreateNodeInfoMap(test.pods, nodes)) snapshot := internalcache.NewSnapshot(test.pods, nodes)
fwk, _ := framework.NewFramework(registry, plugins, pluginConfigs, framework.WithSnapshotSharedLister(snapshot)) fwk, _ := framework.NewFramework(registry, plugins, pluginConfigs, framework.WithSnapshotSharedLister(snapshot))
scheduler := NewGenericScheduler( scheduler := NewGenericScheduler(
@ -1880,7 +1879,7 @@ func TestPickOneNodeForPreemption(t *testing.T) {
for _, n := range test.nodes { for _, n := range test.nodes {
nodes = append(nodes, makeNode(n, schedutil.DefaultMilliCPURequest*5, schedutil.DefaultMemoryRequest*5)) nodes = append(nodes, makeNode(n, schedutil.DefaultMilliCPURequest*5, schedutil.DefaultMemoryRequest*5))
} }
snapshot := nodeinfosnapshot.NewSnapshot(nodeinfosnapshot.CreateNodeInfoMap(test.pods, nodes)) snapshot := internalcache.NewSnapshot(test.pods, nodes)
registry := framework.Registry{} registry := framework.Registry{}
// TODO: instantiate the plugins dynamically. // TODO: instantiate the plugins dynamically.
plugins := &schedulerapi.Plugins{ plugins := &schedulerapi.Plugins{
@ -2393,7 +2392,7 @@ func TestPreempt(t *testing.T) {
for _, f := range test.registerPlugins { for _, f := range test.registerPlugins {
f(&registry, plugins, pluginConfigs) f(&registry, plugins, pluginConfigs)
} }
snapshot := nodeinfosnapshot.NewSnapshot(nodeinfosnapshot.CreateNodeInfoMap(test.pods, nodes)) snapshot := internalcache.NewSnapshot(test.pods, nodes)
fwk, _ := framework.NewFramework(registry, plugins, pluginConfigs, framework.WithSnapshotSharedLister(snapshot)) fwk, _ := framework.NewFramework(registry, plugins, pluginConfigs, framework.WithSnapshotSharedLister(snapshot))
scheduler := NewGenericScheduler( scheduler := NewGenericScheduler(
@ -2555,7 +2554,7 @@ func TestFairEvaluationForNodes(t *testing.T) {
} }
} }
func nodesToNodeInfos(nodes []*v1.Node, snapshot *nodeinfosnapshot.Snapshot) ([]*schedulernodeinfo.NodeInfo, error) { func nodesToNodeInfos(nodes []*v1.Node, snapshot *internalcache.Snapshot) ([]*schedulernodeinfo.NodeInfo, error) {
var nodeInfos []*schedulernodeinfo.NodeInfo var nodeInfos []*schedulernodeinfo.NodeInfo
for _, n := range nodes { for _, n := range nodes {
nodeInfo, err := snapshot.NodeInfos().Get(n.Name) nodeInfo, err := snapshot.NodeInfos().Get(n.Name)

View File

@ -23,7 +23,6 @@ import (
"k8s.io/api/core/v1" "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource" "k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
fakecache "k8s.io/kubernetes/pkg/scheduler/internal/cache/fake" fakecache "k8s.io/kubernetes/pkg/scheduler/internal/cache/fake"
) )

View File

@ -50,7 +50,6 @@ import (
internalcache "k8s.io/kubernetes/pkg/scheduler/internal/cache" internalcache "k8s.io/kubernetes/pkg/scheduler/internal/cache"
cachedebugger "k8s.io/kubernetes/pkg/scheduler/internal/cache/debugger" cachedebugger "k8s.io/kubernetes/pkg/scheduler/internal/cache/debugger"
internalqueue "k8s.io/kubernetes/pkg/scheduler/internal/queue" internalqueue "k8s.io/kubernetes/pkg/scheduler/internal/queue"
nodeinfosnapshot "k8s.io/kubernetes/pkg/scheduler/nodeinfo/snapshot"
"k8s.io/kubernetes/pkg/scheduler/volumebinder" "k8s.io/kubernetes/pkg/scheduler/volumebinder"
) )
@ -107,7 +106,7 @@ type Configurator struct {
registry framework.Registry registry framework.Registry
plugins *schedulerapi.Plugins plugins *schedulerapi.Plugins
pluginConfig []schedulerapi.PluginConfig pluginConfig []schedulerapi.PluginConfig
nodeInfoSnapshot *nodeinfosnapshot.Snapshot nodeInfoSnapshot *internalcache.Snapshot
} }
// create a scheduler from a set of registered plugins. // create a scheduler from a set of registered plugins.

View File

@ -51,7 +51,6 @@ import (
internalqueue "k8s.io/kubernetes/pkg/scheduler/internal/queue" internalqueue "k8s.io/kubernetes/pkg/scheduler/internal/queue"
"k8s.io/kubernetes/pkg/scheduler/listers" "k8s.io/kubernetes/pkg/scheduler/listers"
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo" schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
nodeinfosnapshot "k8s.io/kubernetes/pkg/scheduler/nodeinfo/snapshot"
) )
const ( const (
@ -432,7 +431,7 @@ func newConfigFactoryWithFrameworkRegistry(
client clientset.Interface, hardPodAffinitySymmetricWeight int32, stopCh <-chan struct{}, client clientset.Interface, hardPodAffinitySymmetricWeight int32, stopCh <-chan struct{},
registry framework.Registry) *Configurator { registry framework.Registry) *Configurator {
informerFactory := informers.NewSharedInformerFactory(client, 0) informerFactory := informers.NewSharedInformerFactory(client, 0)
snapshot := nodeinfosnapshot.NewEmptySnapshot() snapshot := internalcache.NewEmptySnapshot()
return &Configurator{ return &Configurator{
client: client, client: client,
informerFactory: informerFactory, informerFactory: informerFactory,

View File

@ -29,7 +29,7 @@ go_test(
embed = [":go_default_library"], embed = [":go_default_library"],
deps = [ deps = [
"//pkg/scheduler/framework/v1alpha1:go_default_library", "//pkg/scheduler/framework/v1alpha1:go_default_library",
"//pkg/scheduler/nodeinfo/snapshot:go_default_library", "//pkg/scheduler/internal/cache:go_default_library",
"//pkg/scheduler/testing:go_default_library", "//pkg/scheduler/testing:go_default_library",
"//staging/src/k8s.io/api/apps/v1:go_default_library", "//staging/src/k8s.io/api/apps/v1:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library",

View File

@ -24,7 +24,7 @@ import (
"k8s.io/client-go/informers" "k8s.io/client-go/informers"
clientsetfake "k8s.io/client-go/kubernetes/fake" clientsetfake "k8s.io/client-go/kubernetes/fake"
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
nodeinfosnapshot "k8s.io/kubernetes/pkg/scheduler/nodeinfo/snapshot" "k8s.io/kubernetes/pkg/scheduler/internal/cache"
st "k8s.io/kubernetes/pkg/scheduler/testing" st "k8s.io/kubernetes/pkg/scheduler/testing"
) )
@ -52,7 +52,7 @@ func BenchmarkTestSelectorSpreadPriority(b *testing.B) {
b.Run(tt.name, func(b *testing.B) { b.Run(tt.name, func(b *testing.B) {
pod := st.MakePod().Name("p").Label("foo", "").Obj() pod := st.MakePod().Name("p").Label("foo", "").Obj()
existingPods, allNodes, filteredNodes := st.MakeNodesAndPodsForEvenPodsSpread(pod.Labels, tt.existingPodsNum, tt.allNodesNum, tt.allNodesNum) existingPods, allNodes, filteredNodes := st.MakeNodesAndPodsForEvenPodsSpread(pod.Labels, tt.existingPodsNum, tt.allNodesNum, tt.allNodesNum)
snapshot := nodeinfosnapshot.NewSnapshot(nodeinfosnapshot.CreateNodeInfoMap(existingPods, allNodes)) snapshot := cache.NewSnapshot(existingPods, allNodes)
services := &v1.ServiceList{ services := &v1.ServiceList{
Items: []v1.Service{{Spec: v1.ServiceSpec{Selector: map[string]string{"foo": ""}}}}, Items: []v1.Service{{Spec: v1.ServiceSpec{Selector: map[string]string{"foo": ""}}}},
} }

View File

@ -30,7 +30,7 @@ import (
"k8s.io/client-go/informers" "k8s.io/client-go/informers"
clientsetfake "k8s.io/client-go/kubernetes/fake" clientsetfake "k8s.io/client-go/kubernetes/fake"
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
nodeinfosnapshot "k8s.io/kubernetes/pkg/scheduler/nodeinfo/snapshot" "k8s.io/kubernetes/pkg/scheduler/internal/cache"
) )
func controllerRef(kind, name, uid string) []metav1.OwnerReference { func controllerRef(kind, name, uid string) []metav1.OwnerReference {
@ -343,7 +343,7 @@ func TestDefaultPodTopologySpreadScore(t *testing.T) {
for _, test := range tests { for _, test := range tests {
t.Run(test.name, func(t *testing.T) { t.Run(test.name, func(t *testing.T) {
nodes := makeNodeList(test.nodes) nodes := makeNodeList(test.nodes)
snapshot := nodeinfosnapshot.NewSnapshot(nodeinfosnapshot.CreateNodeInfoMap(test.pods, nodes)) snapshot := cache.NewSnapshot(test.pods, nodes)
ctx := context.Background() ctx := context.Background()
informerFactory, err := populateAndStartInformers(ctx, test.rcs, test.rss, test.services, test.sss) informerFactory, err := populateAndStartInformers(ctx, test.rcs, test.rss, test.services, test.sss)
if err != nil { if err != nil {
@ -596,7 +596,7 @@ func TestZoneSelectorSpreadPriority(t *testing.T) {
for _, test := range tests { for _, test := range tests {
t.Run(test.name, func(t *testing.T) { t.Run(test.name, func(t *testing.T) {
nodes := makeLabeledNodeList(labeledNodes) nodes := makeLabeledNodeList(labeledNodes)
snapshot := nodeinfosnapshot.NewSnapshot(nodeinfosnapshot.CreateNodeInfoMap(test.pods, nodes)) snapshot := cache.NewSnapshot(test.pods, nodes)
ctx := context.Background() ctx := context.Background()
informerFactory, err := populateAndStartInformers(ctx, test.rcs, test.rss, test.services, test.sss) informerFactory, err := populateAndStartInformers(ctx, test.rcs, test.rss, test.services, test.sss)
if err != nil { if err != nil {

View File

@ -20,7 +20,7 @@ go_test(
embed = [":go_default_library"], embed = [":go_default_library"],
deps = [ deps = [
"//pkg/scheduler/framework/v1alpha1:go_default_library", "//pkg/scheduler/framework/v1alpha1:go_default_library",
"//pkg/scheduler/nodeinfo/snapshot:go_default_library", "//pkg/scheduler/internal/cache:go_default_library",
"//pkg/util/parsers:go_default_library", "//pkg/util/parsers:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",

View File

@ -26,7 +26,7 @@ import (
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
nodeinfosnapshot "k8s.io/kubernetes/pkg/scheduler/nodeinfo/snapshot" "k8s.io/kubernetes/pkg/scheduler/internal/cache"
"k8s.io/kubernetes/pkg/util/parsers" "k8s.io/kubernetes/pkg/util/parsers"
) )
@ -186,7 +186,7 @@ func TestImageLocalityPriority(t *testing.T) {
for _, test := range tests { for _, test := range tests {
t.Run(test.name, func(t *testing.T) { t.Run(test.name, func(t *testing.T) {
snapshot := nodeinfosnapshot.NewSnapshot(nodeinfosnapshot.CreateNodeInfoMap(nil, test.nodes)) snapshot := cache.NewSnapshot(nil, test.nodes)
state := framework.NewCycleState() state := framework.NewCycleState()

View File

@ -33,8 +33,8 @@ go_test(
embed = [":go_default_library"], embed = [":go_default_library"],
deps = [ deps = [
"//pkg/scheduler/framework/v1alpha1:go_default_library", "//pkg/scheduler/framework/v1alpha1:go_default_library",
"//pkg/scheduler/internal/cache:go_default_library",
"//pkg/scheduler/nodeinfo:go_default_library", "//pkg/scheduler/nodeinfo:go_default_library",
"//pkg/scheduler/nodeinfo/snapshot:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",

View File

@ -24,8 +24,8 @@ import (
"k8s.io/api/core/v1" "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
"k8s.io/kubernetes/pkg/scheduler/internal/cache"
"k8s.io/kubernetes/pkg/scheduler/nodeinfo" "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
nodeinfosnapshot "k8s.io/kubernetes/pkg/scheduler/nodeinfo/snapshot"
) )
var ( var (
@ -777,7 +777,7 @@ func TestRequiredAffinitySingleNode(t *testing.T) {
for _, test := range tests { for _, test := range tests {
t.Run(test.name, func(t *testing.T) { t.Run(test.name, func(t *testing.T) {
snapshot := nodeinfosnapshot.NewSnapshot(nodeinfosnapshot.CreateNodeInfoMap(test.pods, []*v1.Node{test.node})) snapshot := cache.NewSnapshot(test.pods, []*v1.Node{test.node})
p := &InterPodAffinity{ p := &InterPodAffinity{
sharedLister: snapshot, sharedLister: snapshot,
} }
@ -1614,7 +1614,7 @@ func TestRequiredAffinityMultipleNodes(t *testing.T) {
for indexTest, test := range tests { for indexTest, test := range tests {
t.Run(test.name, func(t *testing.T) { t.Run(test.name, func(t *testing.T) {
snapshot := nodeinfosnapshot.NewSnapshot(nodeinfosnapshot.CreateNodeInfoMap(test.pods, test.nodes)) snapshot := cache.NewSnapshot(test.pods, test.nodes)
for indexNode, node := range test.nodes { for indexNode, node := range test.nodes {
p := &InterPodAffinity{ p := &InterPodAffinity{
sharedLister: snapshot, sharedLister: snapshot,
@ -1736,7 +1736,7 @@ func TestPreFilterDisabled(t *testing.T) {
for indexTest, test := range tests { for indexTest, test := range tests {
t.Run(test.name, func(t *testing.T) { t.Run(test.name, func(t *testing.T) {
snapshot := nodeinfosnapshot.NewSnapshot(nodeinfosnapshot.CreateNodeInfoMap(test.pods, test.nodes)) snapshot := cache.NewSnapshot(test.pods, test.nodes)
for indexNode, node := range test.nodes { for indexNode, node := range test.nodes {
p := &InterPodAffinity{ p := &InterPodAffinity{
sharedLister: snapshot, sharedLister: snapshot,
@ -2011,8 +2011,8 @@ func TestPreFilterStateAddRemovePod(t *testing.T) {
for _, test := range tests { for _, test := range tests {
t.Run(test.name, func(t *testing.T) { t.Run(test.name, func(t *testing.T) {
// getMeta creates predicate meta data given the list of pods. // getMeta creates predicate meta data given the list of pods.
getState := func(pods []*v1.Pod) (*InterPodAffinity, *framework.CycleState, *preFilterState, *nodeinfosnapshot.Snapshot) { getState := func(pods []*v1.Pod) (*InterPodAffinity, *framework.CycleState, *preFilterState, *cache.Snapshot) {
snapshot := nodeinfosnapshot.NewSnapshot(nodeinfosnapshot.CreateNodeInfoMap(pods, test.nodes)) snapshot := cache.NewSnapshot(pods, test.nodes)
p := &InterPodAffinity{ p := &InterPodAffinity{
sharedLister: snapshot, sharedLister: snapshot,
@ -2298,7 +2298,7 @@ func TestGetTPMapMatchingIncomingAffinityAntiAffinity(t *testing.T) {
} }
for _, tt := range tests { for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) { t.Run(tt.name, func(t *testing.T) {
s := nodeinfosnapshot.NewSnapshot(nodeinfosnapshot.CreateNodeInfoMap(tt.existingPods, tt.nodes)) s := cache.NewSnapshot(tt.existingPods, tt.nodes)
l, _ := s.NodeInfos().List() l, _ := s.NodeInfos().List()
gotAffinityPodsMap, gotAntiAffinityPodsMap, err := getTPMapMatchingIncomingAffinityAntiAffinity(tt.pod, l) gotAffinityPodsMap, gotAntiAffinityPodsMap, err := getTPMapMatchingIncomingAffinityAntiAffinity(tt.pod, l)
if (err != nil) != tt.wantErr { if (err != nil) != tt.wantErr {
@ -2315,7 +2315,7 @@ func TestGetTPMapMatchingIncomingAffinityAntiAffinity(t *testing.T) {
} }
} }
func mustGetNodeInfo(t *testing.T, snapshot *nodeinfosnapshot.Snapshot, name string) *nodeinfo.NodeInfo { func mustGetNodeInfo(t *testing.T, snapshot *cache.Snapshot, name string) *nodeinfo.NodeInfo {
t.Helper() t.Helper()
nodeInfo, err := snapshot.NodeInfos().Get(name) nodeInfo, err := snapshot.NodeInfos().Get(name)
if err != nil { if err != nil {

View File

@ -26,7 +26,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime"
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
nodeinfosnapshot "k8s.io/kubernetes/pkg/scheduler/nodeinfo/snapshot" "k8s.io/kubernetes/pkg/scheduler/internal/cache"
) )
func TestPreferredAffinity(t *testing.T) { func TestPreferredAffinity(t *testing.T) {
@ -517,7 +517,7 @@ func TestPreferredAffinity(t *testing.T) {
for _, test := range tests { for _, test := range tests {
t.Run(test.name, func(t *testing.T) { t.Run(test.name, func(t *testing.T) {
state := framework.NewCycleState() state := framework.NewCycleState()
snapshot := nodeinfosnapshot.NewSnapshot(nodeinfosnapshot.CreateNodeInfoMap(test.pods, test.nodes)) snapshot := cache.NewSnapshot(test.pods, test.nodes)
p := &InterPodAffinity{ p := &InterPodAffinity{
sharedLister: snapshot, sharedLister: snapshot,
hardPodAffinityWeight: 1, hardPodAffinityWeight: 1,
@ -623,7 +623,7 @@ func TestPreferredAffinityWithHardPodAffinitySymmetricWeight(t *testing.T) {
for _, test := range tests { for _, test := range tests {
t.Run(test.name, func(t *testing.T) { t.Run(test.name, func(t *testing.T) {
state := framework.NewCycleState() state := framework.NewCycleState()
snapshot := nodeinfosnapshot.NewSnapshot(nodeinfosnapshot.CreateNodeInfoMap(test.pods, test.nodes)) snapshot := cache.NewSnapshot(test.pods, test.nodes)
fh, _ := framework.NewFramework(nil, nil, nil, framework.WithSnapshotSharedLister(snapshot)) fh, _ := framework.NewFramework(nil, nil, nil, framework.WithSnapshotSharedLister(snapshot))
args := &runtime.Unknown{Raw: []byte(fmt.Sprintf(`{"hardPodAffinityWeight":%d}`, test.hardPodAffinityWeight))} args := &runtime.Unknown{Raw: []byte(fmt.Sprintf(`{"hardPodAffinityWeight":%d}`, test.hardPodAffinityWeight))}

View File

@ -37,8 +37,8 @@ go_test(
deps = [ deps = [
"//pkg/apis/core:go_default_library", "//pkg/apis/core:go_default_library",
"//pkg/scheduler/framework/v1alpha1:go_default_library", "//pkg/scheduler/framework/v1alpha1:go_default_library",
"//pkg/scheduler/internal/cache:go_default_library",
"//pkg/scheduler/nodeinfo:go_default_library", "//pkg/scheduler/nodeinfo:go_default_library",
"//pkg/scheduler/nodeinfo/snapshot:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
], ],

View File

@ -25,8 +25,8 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
api "k8s.io/kubernetes/pkg/apis/core" api "k8s.io/kubernetes/pkg/apis/core"
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
"k8s.io/kubernetes/pkg/scheduler/internal/cache"
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo" schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
nodeinfosnapshot "k8s.io/kubernetes/pkg/scheduler/nodeinfo/snapshot"
) )
// TODO: Add test case for RequiredDuringSchedulingRequiredDuringExecution after it's implemented. // TODO: Add test case for RequiredDuringSchedulingRequiredDuringExecution after it's implemented.
@ -849,7 +849,7 @@ func TestNodeAffinityPriority(t *testing.T) {
t.Run(test.name, func(t *testing.T) { t.Run(test.name, func(t *testing.T) {
state := framework.NewCycleState() state := framework.NewCycleState()
fh, _ := framework.NewFramework(nil, nil, nil, framework.WithSnapshotSharedLister(nodeinfosnapshot.NewSnapshot(nodeinfosnapshot.CreateNodeInfoMap(nil, test.nodes)))) fh, _ := framework.NewFramework(nil, nil, nil, framework.WithSnapshotSharedLister(cache.NewSnapshot(nil, test.nodes)))
p, _ := New(nil, fh) p, _ := New(nil, fh)
var gotList framework.NodeScoreList var gotList framework.NodeScoreList
for _, n := range test.nodes { for _, n := range test.nodes {

View File

@ -20,8 +20,8 @@ go_test(
embed = [":go_default_library"], embed = [":go_default_library"],
deps = [ deps = [
"//pkg/scheduler/framework/v1alpha1:go_default_library", "//pkg/scheduler/framework/v1alpha1:go_default_library",
"//pkg/scheduler/internal/cache:go_default_library",
"//pkg/scheduler/nodeinfo:go_default_library", "//pkg/scheduler/nodeinfo:go_default_library",
"//pkg/scheduler/nodeinfo/snapshot:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",

View File

@ -24,8 +24,8 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime"
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
"k8s.io/kubernetes/pkg/scheduler/internal/cache"
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo" schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
nodeinfosnapshot "k8s.io/kubernetes/pkg/scheduler/nodeinfo/snapshot"
) )
func TestValidateNodeLabelArgs(t *testing.T) { func TestValidateNodeLabelArgs(t *testing.T) {
@ -227,7 +227,7 @@ func TestNodeLabelScore(t *testing.T) {
t.Run(test.name, func(t *testing.T) { t.Run(test.name, func(t *testing.T) {
state := framework.NewCycleState() state := framework.NewCycleState()
node := &v1.Node{ObjectMeta: metav1.ObjectMeta{Name: "machine1", Labels: map[string]string{"foo": "", "bar": ""}}} node := &v1.Node{ObjectMeta: metav1.ObjectMeta{Name: "machine1", Labels: map[string]string{"foo": "", "bar": ""}}}
fh, _ := framework.NewFramework(nil, nil, nil, framework.WithSnapshotSharedLister(nodeinfosnapshot.NewSnapshot(nodeinfosnapshot.CreateNodeInfoMap(nil, []*v1.Node{node})))) fh, _ := framework.NewFramework(nil, nil, nil, framework.WithSnapshotSharedLister(cache.NewSnapshot(nil, []*v1.Node{node})))
args := &runtime.Unknown{Raw: []byte(test.rawArgs)} args := &runtime.Unknown{Raw: []byte(test.rawArgs)}
p, err := New(args, fh) p, err := New(args, fh)
if err != nil { if err != nil {
@ -262,7 +262,7 @@ func TestNodeLabelFilterWithoutNode(t *testing.T) {
func TestNodeLabelScoreWithoutNode(t *testing.T) { func TestNodeLabelScoreWithoutNode(t *testing.T) {
t.Run("node does not exist", func(t *testing.T) { t.Run("node does not exist", func(t *testing.T) {
fh, _ := framework.NewFramework(nil, nil, nil, framework.WithSnapshotSharedLister(nodeinfosnapshot.NewEmptySnapshot())) fh, _ := framework.NewFramework(nil, nil, nil, framework.WithSnapshotSharedLister(cache.NewEmptySnapshot()))
p, err := New(nil, fh) p, err := New(nil, fh)
if err != nil { if err != nil {
t.Fatalf("Failed to create plugin: %+v", err) t.Fatalf("Failed to create plugin: %+v", err)

View File

@ -20,7 +20,7 @@ go_test(
embed = [":go_default_library"], embed = [":go_default_library"],
deps = [ deps = [
"//pkg/scheduler/framework/v1alpha1:go_default_library", "//pkg/scheduler/framework/v1alpha1:go_default_library",
"//pkg/scheduler/nodeinfo/snapshot:go_default_library", "//pkg/scheduler/internal/cache:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
], ],

View File

@ -24,7 +24,7 @@ import (
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
nodeinfosnapshot "k8s.io/kubernetes/pkg/scheduler/nodeinfo/snapshot" "k8s.io/kubernetes/pkg/scheduler/internal/cache"
) )
func TestNodePreferAvoidPods(t *testing.T) { func TestNodePreferAvoidPods(t *testing.T) {
@ -143,7 +143,7 @@ func TestNodePreferAvoidPods(t *testing.T) {
for _, test := range tests { for _, test := range tests {
t.Run(test.name, func(t *testing.T) { t.Run(test.name, func(t *testing.T) {
state := framework.NewCycleState() state := framework.NewCycleState()
fh, _ := framework.NewFramework(nil, nil, nil, framework.WithSnapshotSharedLister(nodeinfosnapshot.NewSnapshot(nodeinfosnapshot.CreateNodeInfoMap(nil, test.nodes)))) fh, _ := framework.NewFramework(nil, nil, nil, framework.WithSnapshotSharedLister(cache.NewSnapshot(nil, test.nodes)))
p, _ := New(nil, fh) p, _ := New(nil, fh)
var gotList framework.NodeScoreList var gotList framework.NodeScoreList
for _, n := range test.nodes { for _, n := range test.nodes {

View File

@ -60,8 +60,8 @@ go_test(
"//pkg/apis/core/v1/helper:go_default_library", "//pkg/apis/core/v1/helper:go_default_library",
"//pkg/features:go_default_library", "//pkg/features:go_default_library",
"//pkg/scheduler/framework/v1alpha1:go_default_library", "//pkg/scheduler/framework/v1alpha1:go_default_library",
"//pkg/scheduler/internal/cache:go_default_library",
"//pkg/scheduler/nodeinfo:go_default_library", "//pkg/scheduler/nodeinfo:go_default_library",
"//pkg/scheduler/nodeinfo/snapshot:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",

View File

@ -28,7 +28,7 @@ import (
featuregatetesting "k8s.io/component-base/featuregate/testing" featuregatetesting "k8s.io/component-base/featuregate/testing"
"k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/pkg/features"
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
nodeinfosnapshot "k8s.io/kubernetes/pkg/scheduler/nodeinfo/snapshot" "k8s.io/kubernetes/pkg/scheduler/internal/cache"
) )
// getExistingVolumeCountForNode gets the current number of volumes on node. // getExistingVolumeCountForNode gets the current number of volumes on node.
@ -379,7 +379,7 @@ func TestNodeResourcesBalancedAllocation(t *testing.T) {
for _, test := range tests { for _, test := range tests {
t.Run(test.name, func(t *testing.T) { t.Run(test.name, func(t *testing.T) {
snapshot := nodeinfosnapshot.NewSnapshot(nodeinfosnapshot.CreateNodeInfoMap(test.pods, test.nodes)) snapshot := cache.NewSnapshot(test.pods, test.nodes)
if len(test.pod.Spec.Volumes) > 0 { if len(test.pod.Spec.Volumes) > 0 {
maxVolumes := 5 maxVolumes := 5
nodeInfoList, _ := snapshot.NodeInfos().List() nodeInfoList, _ := snapshot.NodeInfos().List()

View File

@ -25,7 +25,7 @@ import (
"k8s.io/apimachinery/pkg/api/resource" "k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
nodeinfosnapshot "k8s.io/kubernetes/pkg/scheduler/nodeinfo/snapshot" "k8s.io/kubernetes/pkg/scheduler/internal/cache"
) )
func TestNodeResourcesLeastAllocated(t *testing.T) { func TestNodeResourcesLeastAllocated(t *testing.T) {
@ -233,7 +233,7 @@ func TestNodeResourcesLeastAllocated(t *testing.T) {
for _, test := range tests { for _, test := range tests {
t.Run(test.name, func(t *testing.T) { t.Run(test.name, func(t *testing.T) {
snapshot := nodeinfosnapshot.NewSnapshot(nodeinfosnapshot.CreateNodeInfoMap(test.pods, test.nodes)) snapshot := cache.NewSnapshot(test.pods, test.nodes)
fh, _ := framework.NewFramework(nil, nil, nil, framework.WithSnapshotSharedLister(snapshot)) fh, _ := framework.NewFramework(nil, nil, nil, framework.WithSnapshotSharedLister(snapshot))
p, _ := NewLeastAllocated(nil, fh) p, _ := NewLeastAllocated(nil, fh)
for i := range test.nodes { for i := range test.nodes {

View File

@ -25,7 +25,7 @@ import (
"k8s.io/apimachinery/pkg/api/resource" "k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
nodeinfosnapshot "k8s.io/kubernetes/pkg/scheduler/nodeinfo/snapshot" "k8s.io/kubernetes/pkg/scheduler/internal/cache"
) )
func TestNodeResourcesMostAllocated(t *testing.T) { func TestNodeResourcesMostAllocated(t *testing.T) {
@ -196,7 +196,7 @@ func TestNodeResourcesMostAllocated(t *testing.T) {
for _, test := range tests { for _, test := range tests {
t.Run(test.name, func(t *testing.T) { t.Run(test.name, func(t *testing.T) {
snapshot := nodeinfosnapshot.NewSnapshot(nodeinfosnapshot.CreateNodeInfoMap(test.pods, test.nodes)) snapshot := cache.NewSnapshot(test.pods, test.nodes)
fh, _ := framework.NewFramework(nil, nil, nil, framework.WithSnapshotSharedLister(snapshot)) fh, _ := framework.NewFramework(nil, nil, nil, framework.WithSnapshotSharedLister(snapshot))
p, _ := NewMostAllocated(nil, fh) p, _ := NewMostAllocated(nil, fh)
for i := range test.nodes { for i := range test.nodes {

View File

@ -26,7 +26,7 @@ import (
"k8s.io/apimachinery/pkg/api/resource" "k8s.io/apimachinery/pkg/api/resource"
"k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime"
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
nodeinfosnapshot "k8s.io/kubernetes/pkg/scheduler/nodeinfo/snapshot" "k8s.io/kubernetes/pkg/scheduler/internal/cache"
) )
func TestRequestedToCapacityRatio(t *testing.T) { func TestRequestedToCapacityRatio(t *testing.T) {
@ -65,7 +65,7 @@ func TestRequestedToCapacityRatio(t *testing.T) {
for _, test := range tests { for _, test := range tests {
t.Run(test.name, func(t *testing.T) { t.Run(test.name, func(t *testing.T) {
state := framework.NewCycleState() state := framework.NewCycleState()
snapshot := nodeinfosnapshot.NewSnapshot(nodeinfosnapshot.CreateNodeInfoMap(test.scheduledPods, test.nodes)) snapshot := cache.NewSnapshot(test.scheduledPods, test.nodes)
fh, _ := framework.NewFramework(nil, nil, nil, framework.WithSnapshotSharedLister(snapshot)) fh, _ := framework.NewFramework(nil, nil, nil, framework.WithSnapshotSharedLister(snapshot))
args := &runtime.Unknown{Raw: []byte(`{"shape" : [{"utilization" : 0, "score" : 10}, {"utilization" : 100, "score" : 0}], "resources" : [{"name" : "memory", "weight" : 1}, {"name" : "cpu", "weight" : 1}]}`)} args := &runtime.Unknown{Raw: []byte(`{"shape" : [{"utilization" : 0, "score" : 10}, {"utilization" : 100, "score" : 0}], "resources" : [{"name" : "memory", "weight" : 1}, {"name" : "cpu", "weight" : 1}]}`)}
p, err := NewRequestedToCapacityRatio(args, fh) p, err := NewRequestedToCapacityRatio(args, fh)
@ -349,7 +349,7 @@ func TestResourceBinPackingSingleExtended(t *testing.T) {
for _, test := range tests { for _, test := range tests {
t.Run(test.name, func(t *testing.T) { t.Run(test.name, func(t *testing.T) {
state := framework.NewCycleState() state := framework.NewCycleState()
snapshot := nodeinfosnapshot.NewSnapshot(nodeinfosnapshot.CreateNodeInfoMap(test.pods, test.nodes)) snapshot := cache.NewSnapshot(test.pods, test.nodes)
fh, _ := framework.NewFramework(nil, nil, nil, framework.WithSnapshotSharedLister(snapshot)) fh, _ := framework.NewFramework(nil, nil, nil, framework.WithSnapshotSharedLister(snapshot))
args := &runtime.Unknown{Raw: []byte(`{"shape" : [{"utilization" : 0, "score" : 0}, {"utilization" : 100, "score" : 1}], "resources" : [{"name" : "intel.com/foo", "weight" : 1}]}`)} args := &runtime.Unknown{Raw: []byte(`{"shape" : [{"utilization" : 0, "score" : 0}, {"utilization" : 100, "score" : 1}], "resources" : [{"name" : "intel.com/foo", "weight" : 1}]}`)}
p, err := NewRequestedToCapacityRatio(args, fh) p, err := NewRequestedToCapacityRatio(args, fh)
@ -584,7 +584,7 @@ func TestResourceBinPackingMultipleExtended(t *testing.T) {
for _, test := range tests { for _, test := range tests {
t.Run(test.name, func(t *testing.T) { t.Run(test.name, func(t *testing.T) {
state := framework.NewCycleState() state := framework.NewCycleState()
snapshot := nodeinfosnapshot.NewSnapshot(nodeinfosnapshot.CreateNodeInfoMap(test.pods, test.nodes)) snapshot := cache.NewSnapshot(test.pods, test.nodes)
fh, _ := framework.NewFramework(nil, nil, nil, framework.WithSnapshotSharedLister(snapshot)) fh, _ := framework.NewFramework(nil, nil, nil, framework.WithSnapshotSharedLister(snapshot))
args := &runtime.Unknown{Raw: []byte(`{"shape" : [{"utilization" : 0, "score" : 0}, {"utilization" : 100, "score" : 1}], "resources" : [{"name" : "intel.com/foo", "weight" : 3}, {"name" : "intel.com/bar", "weight": 5}]}`)} args := &runtime.Unknown{Raw: []byte(`{"shape" : [{"utilization" : 0, "score" : 0}, {"utilization" : 100, "score" : 1}], "resources" : [{"name" : "intel.com/foo", "weight" : 3}, {"name" : "intel.com/bar", "weight": 5}]}`)}
p, err := NewRequestedToCapacityRatio(args, fh) p, err := NewRequestedToCapacityRatio(args, fh)

View File

@ -23,7 +23,7 @@ import (
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource" "k8s.io/apimachinery/pkg/api/resource"
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
nodeinfosnapshot "k8s.io/kubernetes/pkg/scheduler/nodeinfo/snapshot" "k8s.io/kubernetes/pkg/scheduler/internal/cache"
) )
func TestResourceLimits(t *testing.T) { func TestResourceLimits(t *testing.T) {
@ -146,7 +146,7 @@ func TestResourceLimits(t *testing.T) {
for _, test := range tests { for _, test := range tests {
t.Run(test.name, func(t *testing.T) { t.Run(test.name, func(t *testing.T) {
snapshot := nodeinfosnapshot.NewSnapshot(nodeinfosnapshot.CreateNodeInfoMap(nil, test.nodes)) snapshot := cache.NewSnapshot(nil, test.nodes)
fh, _ := framework.NewFramework(nil, nil, nil, framework.WithSnapshotSharedLister(snapshot)) fh, _ := framework.NewFramework(nil, nil, nil, framework.WithSnapshotSharedLister(snapshot))
p := &ResourceLimits{handle: fh} p := &ResourceLimits{handle: fh}
for i := range test.nodes { for i := range test.nodes {

View File

@ -34,7 +34,7 @@ go_test(
embed = [":go_default_library"], embed = [":go_default_library"],
deps = [ deps = [
"//pkg/scheduler/framework/v1alpha1:go_default_library", "//pkg/scheduler/framework/v1alpha1:go_default_library",
"//pkg/scheduler/nodeinfo/snapshot:go_default_library", "//pkg/scheduler/internal/cache:go_default_library",
"//pkg/scheduler/testing:go_default_library", "//pkg/scheduler/testing:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",

View File

@ -25,7 +25,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/labels"
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
nodeinfosnapshot "k8s.io/kubernetes/pkg/scheduler/nodeinfo/snapshot" "k8s.io/kubernetes/pkg/scheduler/internal/cache"
st "k8s.io/kubernetes/pkg/scheduler/testing" st "k8s.io/kubernetes/pkg/scheduler/testing"
) )
@ -414,7 +414,7 @@ func TestCalPreFilterState(t *testing.T) {
} }
for _, tt := range tests { for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) { t.Run(tt.name, func(t *testing.T) {
s := nodeinfosnapshot.NewSnapshot(nodeinfosnapshot.CreateNodeInfoMap(tt.existingPods, tt.nodes)) s := cache.NewSnapshot(tt.existingPods, tt.nodes)
l, _ := s.NodeInfos().List() l, _ := s.NodeInfos().List()
got, _ := calPreFilterState(tt.pod, l) got, _ := calPreFilterState(tt.pod, l)
got.sortCriticalPaths() got.sortCriticalPaths()
@ -712,7 +712,7 @@ func TestPreFilterStateAddPod(t *testing.T) {
} }
for _, tt := range tests { for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) { t.Run(tt.name, func(t *testing.T) {
s := nodeinfosnapshot.NewSnapshot(nodeinfosnapshot.CreateNodeInfoMap(tt.existingPods, tt.nodes)) s := cache.NewSnapshot(tt.existingPods, tt.nodes)
l, _ := s.NodeInfos().List() l, _ := s.NodeInfos().List()
state, _ := calPreFilterState(tt.preemptor, l) state, _ := calPreFilterState(tt.preemptor, l)
state.updateWithPod(tt.addedPod, tt.preemptor, tt.nodes[tt.nodeIdx], 1) state.updateWithPod(tt.addedPod, tt.preemptor, tt.nodes[tt.nodeIdx], 1)
@ -902,7 +902,7 @@ func TestPreFilterStateRemovePod(t *testing.T) {
} }
for _, tt := range tests { for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) { t.Run(tt.name, func(t *testing.T) {
s := nodeinfosnapshot.NewSnapshot(nodeinfosnapshot.CreateNodeInfoMap(tt.existingPods, tt.nodes)) s := cache.NewSnapshot(tt.existingPods, tt.nodes)
l, _ := s.NodeInfos().List() l, _ := s.NodeInfos().List()
state, _ := calPreFilterState(tt.preemptor, l) state, _ := calPreFilterState(tt.preemptor, l)
@ -961,7 +961,7 @@ func BenchmarkTestCalPreFilterState(b *testing.B) {
for _, tt := range tests { for _, tt := range tests {
b.Run(tt.name, func(b *testing.B) { b.Run(tt.name, func(b *testing.B) {
existingPods, allNodes, _ := st.MakeNodesAndPodsForEvenPodsSpread(tt.pod.Labels, tt.existingPodsNum, tt.allNodesNum, tt.filteredNodesNum) existingPods, allNodes, _ := st.MakeNodesAndPodsForEvenPodsSpread(tt.pod.Labels, tt.existingPodsNum, tt.allNodesNum, tt.filteredNodesNum)
s := nodeinfosnapshot.NewSnapshot(nodeinfosnapshot.CreateNodeInfoMap(existingPods, allNodes)) s := cache.NewSnapshot(existingPods, allNodes)
l, _ := s.NodeInfos().List() l, _ := s.NodeInfos().List()
b.ResetTimer() b.ResetTimer()
for i := 0; i < b.N; i++ { for i := 0; i < b.N; i++ {
@ -1228,7 +1228,7 @@ func TestSingleConstraint(t *testing.T) {
} }
for _, tt := range tests { for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) { t.Run(tt.name, func(t *testing.T) {
snapshot := nodeinfosnapshot.NewSnapshot(nodeinfosnapshot.CreateNodeInfoMap(tt.existingPods, tt.nodes)) snapshot := cache.NewSnapshot(tt.existingPods, tt.nodes)
p := &PodTopologySpread{sharedLister: snapshot} p := &PodTopologySpread{sharedLister: snapshot}
state := framework.NewCycleState() state := framework.NewCycleState()
preFilterStatus := p.PreFilter(context.Background(), state, tt.pod) preFilterStatus := p.PreFilter(context.Background(), state, tt.pod)
@ -1428,7 +1428,7 @@ func TestMultipleConstraints(t *testing.T) {
} }
for _, tt := range tests { for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) { t.Run(tt.name, func(t *testing.T) {
snapshot := nodeinfosnapshot.NewSnapshot(nodeinfosnapshot.CreateNodeInfoMap(tt.existingPods, tt.nodes)) snapshot := cache.NewSnapshot(tt.existingPods, tt.nodes)
p := &PodTopologySpread{sharedLister: snapshot} p := &PodTopologySpread{sharedLister: snapshot}
state := framework.NewCycleState() state := framework.NewCycleState()
preFilterStatus := p.PreFilter(context.Background(), state, tt.pod) preFilterStatus := p.PreFilter(context.Background(), state, tt.pod)

View File

@ -24,7 +24,7 @@ import (
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/sets"
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
nodeinfosnapshot "k8s.io/kubernetes/pkg/scheduler/nodeinfo/snapshot" "k8s.io/kubernetes/pkg/scheduler/internal/cache"
st "k8s.io/kubernetes/pkg/scheduler/testing" st "k8s.io/kubernetes/pkg/scheduler/testing"
) )
@ -435,7 +435,7 @@ func TestPodTopologySpreadScore(t *testing.T) {
allNodes := append([]*v1.Node{}, tt.nodes...) allNodes := append([]*v1.Node{}, tt.nodes...)
allNodes = append(allNodes, tt.failedNodes...) allNodes = append(allNodes, tt.failedNodes...)
state := framework.NewCycleState() state := framework.NewCycleState()
snapshot := nodeinfosnapshot.NewSnapshot(nodeinfosnapshot.CreateNodeInfoMap(tt.existingPods, allNodes)) snapshot := cache.NewSnapshot(tt.existingPods, allNodes)
p := &PodTopologySpread{sharedLister: snapshot} p := &PodTopologySpread{sharedLister: snapshot}
status := p.PostFilter(context.Background(), state, tt.pod, tt.nodes, nil) status := p.PostFilter(context.Background(), state, tt.pod, tt.nodes, nil)
@ -505,7 +505,7 @@ func BenchmarkTestPodTopologySpreadScore(b *testing.B) {
b.Run(tt.name, func(b *testing.B) { b.Run(tt.name, func(b *testing.B) {
existingPods, allNodes, filteredNodes := st.MakeNodesAndPodsForEvenPodsSpread(tt.pod.Labels, tt.existingPodsNum, tt.allNodesNum, tt.filteredNodesNum) existingPods, allNodes, filteredNodes := st.MakeNodesAndPodsForEvenPodsSpread(tt.pod.Labels, tt.existingPodsNum, tt.allNodesNum, tt.filteredNodesNum)
state := framework.NewCycleState() state := framework.NewCycleState()
snapshot := nodeinfosnapshot.NewSnapshot(nodeinfosnapshot.CreateNodeInfoMap(existingPods, allNodes)) snapshot := cache.NewSnapshot(existingPods, allNodes)
p := &PodTopologySpread{sharedLister: snapshot} p := &PodTopologySpread{sharedLister: snapshot}
status := p.PostFilter(context.Background(), state, tt.pod, filteredNodes, nil) status := p.PostFilter(context.Background(), state, tt.pod, filteredNodes, nil)
@ -564,7 +564,7 @@ func BenchmarkTestDefaultEvenPodsSpreadPriority(b *testing.B) {
SpreadConstraint(1, v1.LabelZoneFailureDomain, softSpread, st.MakeLabelSelector().Exists("foo").Obj()).Obj() SpreadConstraint(1, v1.LabelZoneFailureDomain, softSpread, st.MakeLabelSelector().Exists("foo").Obj()).Obj()
existingPods, allNodes, filteredNodes := st.MakeNodesAndPodsForEvenPodsSpread(pod.Labels, tt.existingPodsNum, tt.allNodesNum, tt.allNodesNum) existingPods, allNodes, filteredNodes := st.MakeNodesAndPodsForEvenPodsSpread(pod.Labels, tt.existingPodsNum, tt.allNodesNum, tt.allNodesNum)
state := framework.NewCycleState() state := framework.NewCycleState()
snapshot := nodeinfosnapshot.NewSnapshot(nodeinfosnapshot.CreateNodeInfoMap(existingPods, allNodes)) snapshot := cache.NewSnapshot(existingPods, allNodes)
p := &PodTopologySpread{sharedLister: snapshot} p := &PodTopologySpread{sharedLister: snapshot}
status := p.PostFilter(context.Background(), state, pod, filteredNodes, nil) status := p.PostFilter(context.Background(), state, pod, filteredNodes, nil)

View File

@ -23,9 +23,9 @@ go_test(
embed = [":go_default_library"], embed = [":go_default_library"],
deps = [ deps = [
"//pkg/scheduler/framework/v1alpha1:go_default_library", "//pkg/scheduler/framework/v1alpha1:go_default_library",
"//pkg/scheduler/internal/cache:go_default_library",
"//pkg/scheduler/listers/fake:go_default_library", "//pkg/scheduler/listers/fake:go_default_library",
"//pkg/scheduler/nodeinfo:go_default_library", "//pkg/scheduler/nodeinfo:go_default_library",
"//pkg/scheduler/nodeinfo/snapshot:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
], ],

View File

@ -25,9 +25,9 @@ import (
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
"k8s.io/kubernetes/pkg/scheduler/internal/cache"
fakelisters "k8s.io/kubernetes/pkg/scheduler/listers/fake" fakelisters "k8s.io/kubernetes/pkg/scheduler/listers/fake"
"k8s.io/kubernetes/pkg/scheduler/nodeinfo" "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
nodeinfosnapshot "k8s.io/kubernetes/pkg/scheduler/nodeinfo/snapshot"
) )
func TestServiceAffinity(t *testing.T) { func TestServiceAffinity(t *testing.T) {
@ -160,7 +160,7 @@ func TestServiceAffinity(t *testing.T) {
for _, test := range tests { for _, test := range tests {
t.Run(test.name, func(t *testing.T) { t.Run(test.name, func(t *testing.T) {
nodes := []*v1.Node{&node1, &node2, &node3, &node4, &node5} nodes := []*v1.Node{&node1, &node2, &node3, &node4, &node5}
snapshot := nodeinfosnapshot.NewSnapshot(nodeinfosnapshot.CreateNodeInfoMap(test.pods, nodes)) snapshot := cache.NewSnapshot(test.pods, nodes)
p := &ServiceAffinity{ p := &ServiceAffinity{
sharedLister: snapshot, sharedLister: snapshot,
@ -383,7 +383,7 @@ func TestServiceAffinityScore(t *testing.T) {
for _, test := range tests { for _, test := range tests {
t.Run(test.name, func(t *testing.T) { t.Run(test.name, func(t *testing.T) {
nodes := makeLabeledNodeList(test.nodes) nodes := makeLabeledNodeList(test.nodes)
snapshot := nodeinfosnapshot.NewSnapshot(nodeinfosnapshot.CreateNodeInfoMap(test.pods, nodes)) snapshot := cache.NewSnapshot(test.pods, nodes)
serviceLister := fakelisters.ServiceLister(test.services) serviceLister := fakelisters.ServiceLister(test.services)
p := &ServiceAffinity{ p := &ServiceAffinity{
@ -494,8 +494,8 @@ func TestPreFilterStateAddRemovePod(t *testing.T) {
for _, test := range tests { for _, test := range tests {
t.Run(test.name, func(t *testing.T) { t.Run(test.name, func(t *testing.T) {
// getMeta creates predicate meta data given the list of pods. // getMeta creates predicate meta data given the list of pods.
getState := func(pods []*v1.Pod) (*ServiceAffinity, *framework.CycleState, *preFilterState, *nodeinfosnapshot.Snapshot) { getState := func(pods []*v1.Pod) (*ServiceAffinity, *framework.CycleState, *preFilterState, *cache.Snapshot) {
snapshot := nodeinfosnapshot.NewSnapshot(nodeinfosnapshot.CreateNodeInfoMap(pods, test.nodes)) snapshot := cache.NewSnapshot(pods, test.nodes)
p := &ServiceAffinity{ p := &ServiceAffinity{
sharedLister: snapshot, sharedLister: snapshot,
@ -591,7 +591,7 @@ func sortNodeScoreList(out framework.NodeScoreList) {
}) })
} }
func mustGetNodeInfo(t *testing.T, snapshot *nodeinfosnapshot.Snapshot, name string) *nodeinfo.NodeInfo { func mustGetNodeInfo(t *testing.T, snapshot *cache.Snapshot, name string) *nodeinfo.NodeInfo {
t.Helper() t.Helper()
nodeInfo, err := snapshot.NodeInfos().Get(name) nodeInfo, err := snapshot.NodeInfos().Get(name)
if err != nil { if err != nil {

View File

@ -35,8 +35,8 @@ go_test(
embed = [":go_default_library"], embed = [":go_default_library"],
deps = [ deps = [
"//pkg/scheduler/framework/v1alpha1:go_default_library", "//pkg/scheduler/framework/v1alpha1:go_default_library",
"//pkg/scheduler/internal/cache:go_default_library",
"//pkg/scheduler/nodeinfo:go_default_library", "//pkg/scheduler/nodeinfo:go_default_library",
"//pkg/scheduler/nodeinfo/snapshot:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
], ],

View File

@ -24,8 +24,8 @@ import (
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
"k8s.io/kubernetes/pkg/scheduler/internal/cache"
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo" schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
nodeinfosnapshot "k8s.io/kubernetes/pkg/scheduler/nodeinfo/snapshot"
) )
func nodeWithTaints(nodeName string, taints []v1.Taint) *v1.Node { func nodeWithTaints(nodeName string, taints []v1.Taint) *v1.Node {
@ -229,7 +229,7 @@ func TestTaintTolerationScore(t *testing.T) {
for _, test := range tests { for _, test := range tests {
t.Run(test.name, func(t *testing.T) { t.Run(test.name, func(t *testing.T) {
state := framework.NewCycleState() state := framework.NewCycleState()
snapshot := nodeinfosnapshot.NewSnapshot(nodeinfosnapshot.CreateNodeInfoMap(nil, test.nodes)) snapshot := cache.NewSnapshot(nil, test.nodes)
fh, _ := framework.NewFramework(nil, nil, nil, framework.WithSnapshotSharedLister(snapshot)) fh, _ := framework.NewFramework(nil, nil, nil, framework.WithSnapshotSharedLister(snapshot))
p, _ := New(nil, fh) p, _ := New(nil, fh)

View File

@ -6,6 +6,7 @@ go_library(
"cache.go", "cache.go",
"interface.go", "interface.go",
"node_tree.go", "node_tree.go",
"snapshot.go",
], ],
importpath = "k8s.io/kubernetes/pkg/scheduler/internal/cache", importpath = "k8s.io/kubernetes/pkg/scheduler/internal/cache",
visibility = ["//pkg/scheduler:__subpackages__"], visibility = ["//pkg/scheduler:__subpackages__"],
@ -14,7 +15,6 @@ go_library(
"//pkg/scheduler/listers:go_default_library", "//pkg/scheduler/listers:go_default_library",
"//pkg/scheduler/metrics:go_default_library", "//pkg/scheduler/metrics:go_default_library",
"//pkg/scheduler/nodeinfo:go_default_library", "//pkg/scheduler/nodeinfo:go_default_library",
"//pkg/scheduler/nodeinfo/snapshot:go_default_library",
"//pkg/util/node:go_default_library", "//pkg/util/node:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
@ -30,17 +30,18 @@ go_test(
srcs = [ srcs = [
"cache_test.go", "cache_test.go",
"node_tree_test.go", "node_tree_test.go",
"snapshot_test.go",
], ],
embed = [":go_default_library"], embed = [":go_default_library"],
deps = [ deps = [
"//pkg/features:go_default_library", "//pkg/features:go_default_library",
"//pkg/scheduler/nodeinfo:go_default_library", "//pkg/scheduler/nodeinfo:go_default_library",
"//pkg/scheduler/nodeinfo/snapshot:go_default_library",
"//pkg/scheduler/util:go_default_library", "//pkg/scheduler/util:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library", "//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
"//staging/src/k8s.io/component-base/featuregate/testing:go_default_library", "//staging/src/k8s.io/component-base/featuregate/testing:go_default_library",
], ],

View File

@ -31,7 +31,6 @@ import (
schedulerlisters "k8s.io/kubernetes/pkg/scheduler/listers" schedulerlisters "k8s.io/kubernetes/pkg/scheduler/listers"
"k8s.io/kubernetes/pkg/scheduler/metrics" "k8s.io/kubernetes/pkg/scheduler/metrics"
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo" schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
nodeinfosnapshot "k8s.io/kubernetes/pkg/scheduler/nodeinfo/snapshot"
) )
var ( var (
@ -174,10 +173,10 @@ func (cache *schedulerCache) removeNodeInfoFromList(name string) {
} }
// Snapshot takes a snapshot of the current scheduler cache. This is used for // Snapshot takes a snapshot of the current scheduler cache. This is used for
// debugging purposes only and shouldn't be confused with UpdateNodeInfoSnapshot // debugging purposes only and shouldn't be confused with UpdateSnapshot
// function. // function.
// This method is expensive, and should be only used in non-critical path. // This method is expensive, and should be only used in non-critical path.
func (cache *schedulerCache) Snapshot() *Snapshot { func (cache *schedulerCache) Dump() *Dump {
cache.mu.RLock() cache.mu.RLock()
defer cache.mu.RUnlock() defer cache.mu.RUnlock()
@ -191,23 +190,23 @@ func (cache *schedulerCache) Snapshot() *Snapshot {
assumedPods[k] = v assumedPods[k] = v
} }
return &Snapshot{ return &Dump{
Nodes: nodes, Nodes: nodes,
AssumedPods: assumedPods, AssumedPods: assumedPods,
} }
} }
// UpdateNodeInfoSnapshot takes a snapshot of cached NodeInfo map. This is called at // UpdateSnapshot takes a snapshot of cached NodeInfo map. This is called at
// beginning of every scheduling cycle. // beginning of every scheduling cycle.
// This function tracks generation number of NodeInfo and updates only the // This function tracks generation number of NodeInfo and updates only the
// entries of an existing snapshot that have changed after the snapshot was taken. // entries of an existing snapshot that have changed after the snapshot was taken.
func (cache *schedulerCache) UpdateNodeInfoSnapshot(nodeSnapshot *nodeinfosnapshot.Snapshot) error { func (cache *schedulerCache) UpdateSnapshot(nodeSnapshot *Snapshot) error {
cache.mu.Lock() cache.mu.Lock()
defer cache.mu.Unlock() defer cache.mu.Unlock()
balancedVolumesEnabled := utilfeature.DefaultFeatureGate.Enabled(features.BalanceAttachedNodeVolumes) balancedVolumesEnabled := utilfeature.DefaultFeatureGate.Enabled(features.BalanceAttachedNodeVolumes)
// Get the last generation of the snapshot. // Get the last generation of the snapshot.
snapshotGeneration := nodeSnapshot.Generation snapshotGeneration := nodeSnapshot.generation
// NodeInfoList and HavePodsWithAffinityNodeInfoList must be re-created if a node was added // NodeInfoList and HavePodsWithAffinityNodeInfoList must be re-created if a node was added
// or removed from the cache. // or removed from the cache.
@ -229,11 +228,11 @@ func (cache *schedulerCache) UpdateNodeInfoSnapshot(nodeSnapshot *nodeinfosnapsh
node.info.TransientInfo.ResetTransientSchedulerInfo() node.info.TransientInfo.ResetTransientSchedulerInfo()
} }
if np := node.info.Node(); np != nil { if np := node.info.Node(); np != nil {
existing, ok := nodeSnapshot.NodeInfoMap[np.Name] existing, ok := nodeSnapshot.nodeInfoMap[np.Name]
if !ok { if !ok {
updateAllLists = true updateAllLists = true
existing = &schedulernodeinfo.NodeInfo{} existing = &schedulernodeinfo.NodeInfo{}
nodeSnapshot.NodeInfoMap[np.Name] = existing nodeSnapshot.nodeInfoMap[np.Name] = existing
} }
clone := node.info.Clone() clone := node.info.Clone()
// We track nodes that have pods with affinity, here we check if this node changed its // We track nodes that have pods with affinity, here we check if this node changed its
@ -249,10 +248,10 @@ func (cache *schedulerCache) UpdateNodeInfoSnapshot(nodeSnapshot *nodeinfosnapsh
} }
// Update the snapshot generation with the latest NodeInfo generation. // Update the snapshot generation with the latest NodeInfo generation.
if cache.headNode != nil { if cache.headNode != nil {
nodeSnapshot.Generation = cache.headNode.info.GetGeneration() nodeSnapshot.generation = cache.headNode.info.GetGeneration()
} }
if len(nodeSnapshot.NodeInfoMap) > len(cache.nodes) { if len(nodeSnapshot.nodeInfoMap) > len(cache.nodes) {
cache.removeDeletedNodesFromSnapshot(nodeSnapshot) cache.removeDeletedNodesFromSnapshot(nodeSnapshot)
updateAllLists = true updateAllLists = true
} }
@ -261,12 +260,12 @@ func (cache *schedulerCache) UpdateNodeInfoSnapshot(nodeSnapshot *nodeinfosnapsh
cache.updateNodeInfoSnapshotList(nodeSnapshot, updateAllLists) cache.updateNodeInfoSnapshotList(nodeSnapshot, updateAllLists)
} }
if len(nodeSnapshot.NodeInfoList) != cache.nodeTree.numNodes { if len(nodeSnapshot.nodeInfoList) != cache.nodeTree.numNodes {
errMsg := fmt.Sprintf("snapshot state is not consistent, length of NodeInfoList=%v not equal to length of nodes in tree=%v "+ errMsg := fmt.Sprintf("snapshot state is not consistent, length of NodeInfoList=%v not equal to length of nodes in tree=%v "+
", length of NodeInfoMap=%v, length of nodes in cache=%v"+ ", length of NodeInfoMap=%v, length of nodes in cache=%v"+
", trying to recover", ", trying to recover",
len(nodeSnapshot.NodeInfoList), cache.nodeTree.numNodes, len(nodeSnapshot.nodeInfoList), cache.nodeTree.numNodes,
len(nodeSnapshot.NodeInfoMap), len(cache.nodes)) len(nodeSnapshot.nodeInfoMap), len(cache.nodes))
klog.Error(errMsg) klog.Error(errMsg)
// We will try to recover by re-creating the lists for the next scheduling cycle, but still return an // We will try to recover by re-creating the lists for the next scheduling cycle, but still return an
// error to surface the problem, the error will likely cause a failure to the current scheduling cycle. // error to surface the problem, the error will likely cause a failure to the current scheduling cycle.
@ -277,40 +276,40 @@ func (cache *schedulerCache) UpdateNodeInfoSnapshot(nodeSnapshot *nodeinfosnapsh
return nil return nil
} }
func (cache *schedulerCache) updateNodeInfoSnapshotList(nodeSnapshot *nodeinfosnapshot.Snapshot, updateAll bool) { func (cache *schedulerCache) updateNodeInfoSnapshotList(snapshot *Snapshot, updateAll bool) {
nodeSnapshot.HavePodsWithAffinityNodeInfoList = make([]*schedulernodeinfo.NodeInfo, 0, cache.nodeTree.numNodes) snapshot.havePodsWithAffinityNodeInfoList = make([]*schedulernodeinfo.NodeInfo, 0, cache.nodeTree.numNodes)
if updateAll { if updateAll {
// Take a snapshot of the nodes order in the tree // Take a snapshot of the nodes order in the tree
nodeSnapshot.NodeInfoList = make([]*schedulernodeinfo.NodeInfo, 0, cache.nodeTree.numNodes) snapshot.nodeInfoList = make([]*schedulernodeinfo.NodeInfo, 0, cache.nodeTree.numNodes)
for i := 0; i < cache.nodeTree.numNodes; i++ { for i := 0; i < cache.nodeTree.numNodes; i++ {
nodeName := cache.nodeTree.next() nodeName := cache.nodeTree.next()
if n := nodeSnapshot.NodeInfoMap[nodeName]; n != nil { if n := snapshot.nodeInfoMap[nodeName]; n != nil {
nodeSnapshot.NodeInfoList = append(nodeSnapshot.NodeInfoList, n) snapshot.nodeInfoList = append(snapshot.nodeInfoList, n)
if len(n.PodsWithAffinity()) > 0 { if len(n.PodsWithAffinity()) > 0 {
nodeSnapshot.HavePodsWithAffinityNodeInfoList = append(nodeSnapshot.HavePodsWithAffinityNodeInfoList, n) snapshot.havePodsWithAffinityNodeInfoList = append(snapshot.havePodsWithAffinityNodeInfoList, n)
} }
} else { } else {
klog.Errorf("node %q exist in nodeTree but not in NodeInfoMap, this should not happen.", nodeName) klog.Errorf("node %q exist in nodeTree but not in NodeInfoMap, this should not happen.", nodeName)
} }
} }
} else { } else {
for _, n := range nodeSnapshot.NodeInfoList { for _, n := range snapshot.nodeInfoList {
if len(n.PodsWithAffinity()) > 0 { if len(n.PodsWithAffinity()) > 0 {
nodeSnapshot.HavePodsWithAffinityNodeInfoList = append(nodeSnapshot.HavePodsWithAffinityNodeInfoList, n) snapshot.havePodsWithAffinityNodeInfoList = append(snapshot.havePodsWithAffinityNodeInfoList, n)
} }
} }
} }
} }
// If certain nodes were deleted after the last snapshot was taken, we should remove them from the snapshot. // If certain nodes were deleted after the last snapshot was taken, we should remove them from the snapshot.
func (cache *schedulerCache) removeDeletedNodesFromSnapshot(nodeSnapshot *nodeinfosnapshot.Snapshot) { func (cache *schedulerCache) removeDeletedNodesFromSnapshot(snapshot *Snapshot) {
toDelete := len(nodeSnapshot.NodeInfoMap) - len(cache.nodes) toDelete := len(snapshot.nodeInfoMap) - len(cache.nodes)
for name := range nodeSnapshot.NodeInfoMap { for name := range snapshot.nodeInfoMap {
if toDelete <= 0 { if toDelete <= 0 {
break break
} }
if _, ok := cache.nodes[name]; !ok { if _, ok := cache.nodes[name]; !ok {
delete(nodeSnapshot.NodeInfoMap, name) delete(snapshot.nodeInfoMap, name)
toDelete-- toDelete--
} }
} }

View File

@ -32,7 +32,6 @@ import (
featuregatetesting "k8s.io/component-base/featuregate/testing" featuregatetesting "k8s.io/component-base/featuregate/testing"
"k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/pkg/features"
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo" schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
nodeinfosnapshot "k8s.io/kubernetes/pkg/scheduler/nodeinfo/snapshot"
schedutil "k8s.io/kubernetes/pkg/scheduler/util" schedutil "k8s.io/kubernetes/pkg/scheduler/util"
) )
@ -398,7 +397,7 @@ func TestSnapshot(t *testing.T) {
} }
} }
snapshot := cache.Snapshot() snapshot := cache.Dump()
if len(snapshot.Nodes) != len(cache.nodes) { if len(snapshot.Nodes) != len(cache.nodes) {
t.Errorf("Unequal number of nodes in the cache and its snapshot. expected: %v, got: %v", len(cache.nodes), len(snapshot.Nodes)) t.Errorf("Unequal number of nodes in the cache and its snapshot. expected: %v, got: %v", len(cache.nodes), len(snapshot.Nodes))
} }
@ -1121,12 +1120,12 @@ func TestNodeOperators(t *testing.T) {
} }
// Step 2: dump cached nodes successfully. // Step 2: dump cached nodes successfully.
cachedNodes := nodeinfosnapshot.NewEmptySnapshot() cachedNodes := NewEmptySnapshot()
if err := cache.UpdateNodeInfoSnapshot(cachedNodes); err != nil { if err := cache.UpdateSnapshot(cachedNodes); err != nil {
t.Error(err) t.Error(err)
} }
newNode, found := cachedNodes.NodeInfoMap[node.Name] newNode, found := cachedNodes.nodeInfoMap[node.Name]
if !found || len(cachedNodes.NodeInfoMap) != 1 { if !found || len(cachedNodes.nodeInfoMap) != 1 {
t.Errorf("failed to dump cached nodes:\n got: %v \nexpected: %v", cachedNodes, cache.nodes) t.Errorf("failed to dump cached nodes:\n got: %v \nexpected: %v", cachedNodes, cache.nodes)
} }
expected.SetGeneration(newNode.GetGeneration()) expected.SetGeneration(newNode.GetGeneration())
@ -1192,8 +1191,7 @@ func TestNodeOperators(t *testing.T) {
} }
} }
// TestSchedulerCache_UpdateNodeInfoSnapshot tests UpdateNodeInfoSnapshot function of cache. func TestSchedulerCache_UpdateSnapshot(t *testing.T) {
func TestSchedulerCache_UpdateNodeInfoSnapshot(t *testing.T) {
// Create a few nodes to be used in tests. // Create a few nodes to be used in tests.
nodes := []*v1.Node{} nodes := []*v1.Node{}
for i := 0; i < 10; i++ { for i := 0; i < 10; i++ {
@ -1266,7 +1264,7 @@ func TestSchedulerCache_UpdateNodeInfoSnapshot(t *testing.T) {
} }
var cache *schedulerCache var cache *schedulerCache
var snapshot *nodeinfosnapshot.Snapshot var snapshot *Snapshot
type operation = func() type operation = func()
addNode := func(i int) operation { addNode := func(i int) operation {
@ -1320,7 +1318,7 @@ func TestSchedulerCache_UpdateNodeInfoSnapshot(t *testing.T) {
} }
updateSnapshot := func() operation { updateSnapshot := func() operation {
return func() { return func() {
cache.UpdateNodeInfoSnapshot(snapshot) cache.UpdateSnapshot(snapshot)
if err := compareCacheWithNodeInfoSnapshot(cache, snapshot); err != nil { if err := compareCacheWithNodeInfoSnapshot(cache, snapshot); err != nil {
t.Error(err) t.Error(err)
} }
@ -1470,7 +1468,7 @@ func TestSchedulerCache_UpdateNodeInfoSnapshot(t *testing.T) {
for _, test := range tests { for _, test := range tests {
t.Run(test.name, func(t *testing.T) { t.Run(test.name, func(t *testing.T) {
cache = newSchedulerCache(time.Second, time.Second, nil) cache = newSchedulerCache(time.Second, time.Second, nil)
snapshot = nodeinfosnapshot.NewEmptySnapshot() snapshot = NewEmptySnapshot()
for _, op := range test.operations { for _, op := range test.operations {
op() op()
@ -1493,12 +1491,12 @@ func TestSchedulerCache_UpdateNodeInfoSnapshot(t *testing.T) {
} }
// Check number of nodes with pods with affinity // Check number of nodes with pods with affinity
if len(snapshot.HavePodsWithAffinityNodeInfoList) != test.expectedHavePodsWithAffinity { if len(snapshot.havePodsWithAffinityNodeInfoList) != test.expectedHavePodsWithAffinity {
t.Errorf("unexpected number of HavePodsWithAffinity nodes. Expected: %v, got: %v", test.expectedHavePodsWithAffinity, len(snapshot.HavePodsWithAffinityNodeInfoList)) t.Errorf("unexpected number of HavePodsWithAffinity nodes. Expected: %v, got: %v", test.expectedHavePodsWithAffinity, len(snapshot.havePodsWithAffinityNodeInfoList))
} }
// Always update the snapshot at the end of operations and compare it. // Always update the snapshot at the end of operations and compare it.
if err := cache.UpdateNodeInfoSnapshot(snapshot); err != nil { if err := cache.UpdateSnapshot(snapshot); err != nil {
t.Error(err) t.Error(err)
} }
if err := compareCacheWithNodeInfoSnapshot(cache, snapshot); err != nil { if err := compareCacheWithNodeInfoSnapshot(cache, snapshot); err != nil {
@ -1508,27 +1506,27 @@ func TestSchedulerCache_UpdateNodeInfoSnapshot(t *testing.T) {
} }
} }
func compareCacheWithNodeInfoSnapshot(cache *schedulerCache, snapshot *nodeinfosnapshot.Snapshot) error { func compareCacheWithNodeInfoSnapshot(cache *schedulerCache, snapshot *Snapshot) error {
// Compare the map. // Compare the map.
if len(snapshot.NodeInfoMap) != len(cache.nodes) { if len(snapshot.nodeInfoMap) != len(cache.nodes) {
return fmt.Errorf("unexpected number of nodes in the snapshot. Expected: %v, got: %v", len(cache.nodes), len(snapshot.NodeInfoMap)) return fmt.Errorf("unexpected number of nodes in the snapshot. Expected: %v, got: %v", len(cache.nodes), len(snapshot.nodeInfoMap))
} }
for name, ni := range cache.nodes { for name, ni := range cache.nodes {
if !reflect.DeepEqual(snapshot.NodeInfoMap[name], ni.info) { if !reflect.DeepEqual(snapshot.nodeInfoMap[name], ni.info) {
return fmt.Errorf("unexpected node info for node %q. Expected: %v, got: %v", name, ni.info, snapshot.NodeInfoMap[name]) return fmt.Errorf("unexpected node info for node %q. Expected: %v, got: %v", name, ni.info, snapshot.nodeInfoMap[name])
} }
} }
// Compare the lists. // Compare the lists.
if len(snapshot.NodeInfoList) != len(cache.nodes) { if len(snapshot.nodeInfoList) != len(cache.nodes) {
return fmt.Errorf("unexpected number of nodes in NodeInfoList. Expected: %v, got: %v", len(cache.nodes), len(snapshot.NodeInfoList)) return fmt.Errorf("unexpected number of nodes in NodeInfoList. Expected: %v, got: %v", len(cache.nodes), len(snapshot.nodeInfoList))
} }
expectedNodeInfoList := make([]*schedulernodeinfo.NodeInfo, 0, cache.nodeTree.numNodes) expectedNodeInfoList := make([]*schedulernodeinfo.NodeInfo, 0, cache.nodeTree.numNodes)
expectedHavePodsWithAffinityNodeInfoList := make([]*schedulernodeinfo.NodeInfo, 0, cache.nodeTree.numNodes) expectedHavePodsWithAffinityNodeInfoList := make([]*schedulernodeinfo.NodeInfo, 0, cache.nodeTree.numNodes)
for i := 0; i < cache.nodeTree.numNodes; i++ { for i := 0; i < cache.nodeTree.numNodes; i++ {
nodeName := cache.nodeTree.next() nodeName := cache.nodeTree.next()
if n := snapshot.NodeInfoMap[nodeName]; n != nil { if n := snapshot.nodeInfoMap[nodeName]; n != nil {
expectedNodeInfoList = append(expectedNodeInfoList, n) expectedNodeInfoList = append(expectedNodeInfoList, n)
if len(n.PodsWithAffinity()) > 0 { if len(n.PodsWithAffinity()) > 0 {
expectedHavePodsWithAffinityNodeInfoList = append(expectedHavePodsWithAffinityNodeInfoList, n) expectedHavePodsWithAffinityNodeInfoList = append(expectedHavePodsWithAffinityNodeInfoList, n)
@ -1539,14 +1537,14 @@ func compareCacheWithNodeInfoSnapshot(cache *schedulerCache, snapshot *nodeinfos
} }
for i, expected := range expectedNodeInfoList { for i, expected := range expectedNodeInfoList {
got := snapshot.NodeInfoList[i] got := snapshot.nodeInfoList[i]
if expected != got { if expected != got {
return fmt.Errorf("unexpected NodeInfo pointer in NodeInfoList. Expected: %p, got: %p", expected, got) return fmt.Errorf("unexpected NodeInfo pointer in NodeInfoList. Expected: %p, got: %p", expected, got)
} }
} }
for i, expected := range expectedHavePodsWithAffinityNodeInfoList { for i, expected := range expectedHavePodsWithAffinityNodeInfoList {
got := snapshot.HavePodsWithAffinityNodeInfoList[i] got := snapshot.havePodsWithAffinityNodeInfoList[i]
if expected != got { if expected != got {
return fmt.Errorf("unexpected NodeInfo pointer in HavePodsWithAffinityNodeInfoList. Expected: %p, got: %p", expected, got) return fmt.Errorf("unexpected NodeInfo pointer in HavePodsWithAffinityNodeInfoList. Expected: %p, got: %p", expected, got)
} }
@ -1561,8 +1559,8 @@ func BenchmarkUpdate1kNodes30kPods(b *testing.B) {
cache := setupCacheOf1kNodes30kPods(b) cache := setupCacheOf1kNodes30kPods(b)
b.ResetTimer() b.ResetTimer()
for n := 0; n < b.N; n++ { for n := 0; n < b.N; n++ {
cachedNodes := nodeinfosnapshot.NewEmptySnapshot() cachedNodes := NewEmptySnapshot()
cache.UpdateNodeInfoSnapshot(cachedNodes) cache.UpdateSnapshot(cachedNodes)
} }
} }

View File

@ -52,15 +52,15 @@ func (c *CacheComparer) Compare() error {
return err return err
} }
snapshot := c.Cache.Snapshot() dump := c.Cache.Dump()
pendingPods := c.PodQueue.PendingPods() pendingPods := c.PodQueue.PendingPods()
if missed, redundant := c.CompareNodes(nodes, snapshot.Nodes); len(missed)+len(redundant) != 0 { if missed, redundant := c.CompareNodes(nodes, dump.Nodes); len(missed)+len(redundant) != 0 {
klog.Warningf("cache mismatch: missed nodes: %s; redundant nodes: %s", missed, redundant) klog.Warningf("cache mismatch: missed nodes: %s; redundant nodes: %s", missed, redundant)
} }
if missed, redundant := c.ComparePods(pods, pendingPods, snapshot.Nodes); len(missed)+len(redundant) != 0 { if missed, redundant := c.ComparePods(pods, pendingPods, dump.Nodes); len(missed)+len(redundant) != 0 {
klog.Warningf("cache mismatch: missed pods: %s; redundant pods: %s", missed, redundant) klog.Warningf("cache mismatch: missed pods: %s; redundant pods: %s", missed, redundant)
} }

View File

@ -43,9 +43,9 @@ func (d *CacheDumper) DumpAll() {
// dumpNodes writes NodeInfo to the scheduler logs. // dumpNodes writes NodeInfo to the scheduler logs.
func (d *CacheDumper) dumpNodes() { func (d *CacheDumper) dumpNodes() {
snapshot := d.cache.Snapshot() dump := d.cache.Dump()
klog.Info("Dump of cached NodeInfo") klog.Info("Dump of cached NodeInfo")
for _, nodeInfo := range snapshot.Nodes { for _, nodeInfo := range dump.Nodes {
klog.Info(d.printNodeInfo(nodeInfo)) klog.Info(d.printNodeInfo(nodeInfo))
} }
} }

View File

@ -8,7 +8,6 @@ go_library(
deps = [ deps = [
"//pkg/scheduler/internal/cache:go_default_library", "//pkg/scheduler/internal/cache:go_default_library",
"//pkg/scheduler/listers:go_default_library", "//pkg/scheduler/listers:go_default_library",
"//pkg/scheduler/nodeinfo/snapshot:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
], ],

View File

@ -21,7 +21,6 @@ import (
"k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/labels"
internalcache "k8s.io/kubernetes/pkg/scheduler/internal/cache" internalcache "k8s.io/kubernetes/pkg/scheduler/internal/cache"
schedulerlisters "k8s.io/kubernetes/pkg/scheduler/listers" schedulerlisters "k8s.io/kubernetes/pkg/scheduler/listers"
nodeinfosnapshot "k8s.io/kubernetes/pkg/scheduler/nodeinfo/snapshot"
) )
// Cache is used for testing // Cache is used for testing
@ -75,8 +74,8 @@ func (c *Cache) UpdateNode(oldNode, newNode *v1.Node) error { return nil }
// RemoveNode is a fake method for testing. // RemoveNode is a fake method for testing.
func (c *Cache) RemoveNode(node *v1.Node) error { return nil } func (c *Cache) RemoveNode(node *v1.Node) error { return nil }
// UpdateNodeInfoSnapshot is a fake method for testing. // UpdateSnapshot is a fake method for testing.
func (c *Cache) UpdateNodeInfoSnapshot(nodeSnapshot *nodeinfosnapshot.Snapshot) error { func (c *Cache) UpdateSnapshot(snapshot *internalcache.Snapshot) error {
return nil return nil
} }
@ -88,9 +87,9 @@ func (c *Cache) FilteredList(filter schedulerlisters.PodFilter, selector labels.
return nil, nil return nil, nil
} }
// Snapshot is a fake method for testing // Dump is a fake method for testing.
func (c *Cache) Snapshot() *internalcache.Snapshot { func (c *Cache) Dump() *internalcache.Dump {
return &internalcache.Snapshot{} return &internalcache.Dump{}
} }
// GetNodeInfo is a fake method for testing. // GetNodeInfo is a fake method for testing.

View File

@ -20,7 +20,6 @@ import (
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
schedulerlisters "k8s.io/kubernetes/pkg/scheduler/listers" schedulerlisters "k8s.io/kubernetes/pkg/scheduler/listers"
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo" schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
nodeinfosnapshot "k8s.io/kubernetes/pkg/scheduler/nodeinfo/snapshot"
) )
// Cache collects pods' information and provides node-level aggregated information. // Cache collects pods' information and provides node-level aggregated information.
@ -97,17 +96,17 @@ type Cache interface {
// RemoveNode removes overall information about node. // RemoveNode removes overall information about node.
RemoveNode(node *v1.Node) error RemoveNode(node *v1.Node) error
// UpdateNodeInfoSnapshot updates the passed infoSnapshot to the current contents of Cache. // UpdateSnapshot updates the passed infoSnapshot to the current contents of Cache.
// The node info contains aggregated information of pods scheduled (including assumed to be) // The node info contains aggregated information of pods scheduled (including assumed to be)
// on this node. // on this node.
UpdateNodeInfoSnapshot(nodeSnapshot *nodeinfosnapshot.Snapshot) error UpdateSnapshot(nodeSnapshot *Snapshot) error
// Snapshot takes a snapshot on current cache // Dump produces a dump of the current cache.
Snapshot() *Snapshot Dump() *Dump
} }
// Snapshot is a snapshot of cache state // Dump is a dump of the cache state.
type Snapshot struct { type Dump struct {
AssumedPods map[string]bool AssumedPods map[string]bool
Nodes map[string]*schedulernodeinfo.NodeInfo Nodes map[string]*schedulernodeinfo.NodeInfo
} }

View File

@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License. limitations under the License.
*/ */
package snapshot package cache
import ( import (
"fmt" "fmt"
@ -29,13 +29,13 @@ import (
// Snapshot is a snapshot of cache NodeInfo and NodeTree order. The scheduler takes a // Snapshot is a snapshot of cache NodeInfo and NodeTree order. The scheduler takes a
// snapshot at the beginning of each scheduling cycle and uses it for its operations in that cycle. // snapshot at the beginning of each scheduling cycle and uses it for its operations in that cycle.
type Snapshot struct { type Snapshot struct {
// NodeInfoMap a map of node name to a snapshot of its NodeInfo. // nodeInfoMap a map of node name to a snapshot of its NodeInfo.
NodeInfoMap map[string]*schedulernodeinfo.NodeInfo nodeInfoMap map[string]*schedulernodeinfo.NodeInfo
// NodeInfoList is the list of nodes as ordered in the cache's nodeTree. // nodeInfoList is the list of nodes as ordered in the cache's nodeTree.
NodeInfoList []*schedulernodeinfo.NodeInfo nodeInfoList []*schedulernodeinfo.NodeInfo
// HavePodsWithAffinityNodeInfoList is the list of nodes with at least one pod declaring affinity terms. // havePodsWithAffinityNodeInfoList is the list of nodes with at least one pod declaring affinity terms.
HavePodsWithAffinityNodeInfoList []*schedulernodeinfo.NodeInfo havePodsWithAffinityNodeInfoList []*schedulernodeinfo.NodeInfo
Generation int64 generation int64
} }
var _ schedulerlisters.SharedLister = &Snapshot{} var _ schedulerlisters.SharedLister = &Snapshot{}
@ -43,12 +43,13 @@ var _ schedulerlisters.SharedLister = &Snapshot{}
// NewEmptySnapshot initializes a Snapshot struct and returns it. // NewEmptySnapshot initializes a Snapshot struct and returns it.
func NewEmptySnapshot() *Snapshot { func NewEmptySnapshot() *Snapshot {
return &Snapshot{ return &Snapshot{
NodeInfoMap: make(map[string]*schedulernodeinfo.NodeInfo), nodeInfoMap: make(map[string]*schedulernodeinfo.NodeInfo),
} }
} }
// NewSnapshot initializes a Snapshot struct and returns it. // NewSnapshot initializes a Snapshot struct and returns it.
func NewSnapshot(nodeInfoMap map[string]*schedulernodeinfo.NodeInfo) *Snapshot { func NewSnapshot(pods []*v1.Pod, nodes []*v1.Node) *Snapshot {
nodeInfoMap := createNodeInfoMap(pods, nodes)
nodeInfoList := make([]*schedulernodeinfo.NodeInfo, 0, len(nodeInfoMap)) nodeInfoList := make([]*schedulernodeinfo.NodeInfo, 0, len(nodeInfoMap))
havePodsWithAffinityNodeInfoList := make([]*schedulernodeinfo.NodeInfo, 0, len(nodeInfoMap)) havePodsWithAffinityNodeInfoList := make([]*schedulernodeinfo.NodeInfo, 0, len(nodeInfoMap))
for _, v := range nodeInfoMap { for _, v := range nodeInfoMap {
@ -59,16 +60,17 @@ func NewSnapshot(nodeInfoMap map[string]*schedulernodeinfo.NodeInfo) *Snapshot {
} }
s := NewEmptySnapshot() s := NewEmptySnapshot()
s.NodeInfoMap = nodeInfoMap s.nodeInfoMap = nodeInfoMap
s.NodeInfoList = nodeInfoList s.nodeInfoList = nodeInfoList
s.HavePodsWithAffinityNodeInfoList = havePodsWithAffinityNodeInfoList s.havePodsWithAffinityNodeInfoList = havePodsWithAffinityNodeInfoList
return s return s
} }
// CreateNodeInfoMap obtains a list of pods and pivots that list into a map where the keys are node names // createNodeInfoMap obtains a list of pods and pivots that list into a map
// and the values are the aggregated information for that node. // where the keys are node names and the values are the aggregated information
func CreateNodeInfoMap(pods []*v1.Pod, nodes []*v1.Node) map[string]*schedulernodeinfo.NodeInfo { // for that node.
func createNodeInfoMap(pods []*v1.Pod, nodes []*v1.Node) map[string]*schedulernodeinfo.NodeInfo {
nodeNameToInfo := make(map[string]*schedulernodeinfo.NodeInfo) nodeNameToInfo := make(map[string]*schedulernodeinfo.NodeInfo)
for _, pod := range pods { for _, pod := range pods {
nodeName := pod.Spec.NodeName nodeName := pod.Spec.NodeName
@ -124,42 +126,40 @@ func createImageExistenceMap(nodes []*v1.Node) map[string]sets.String {
// Pods returns a PodLister // Pods returns a PodLister
func (s *Snapshot) Pods() schedulerlisters.PodLister { func (s *Snapshot) Pods() schedulerlisters.PodLister {
return &podLister{snapshot: s} return podLister(s.nodeInfoList)
} }
// NodeInfos returns a NodeInfoLister. // NodeInfos returns a NodeInfoLister.
func (s *Snapshot) NodeInfos() schedulerlisters.NodeInfoLister { func (s *Snapshot) NodeInfos() schedulerlisters.NodeInfoLister {
return &nodeInfoLister{snapshot: s} return s
} }
// NumNodes returns the number of nodes in the snapshot. // NumNodes returns the number of nodes in the snapshot.
func (s *Snapshot) NumNodes() int { func (s *Snapshot) NumNodes() int {
return len(s.NodeInfoList) return len(s.nodeInfoList)
} }
type podLister struct { type podLister []*schedulernodeinfo.NodeInfo
snapshot *Snapshot
}
// List returns the list of pods in the snapshot. // List returns the list of pods in the snapshot.
func (p *podLister) List(selector labels.Selector) ([]*v1.Pod, error) { func (p podLister) List(selector labels.Selector) ([]*v1.Pod, error) {
alwaysTrue := func(p *v1.Pod) bool { return true } alwaysTrue := func(*v1.Pod) bool { return true }
return p.FilteredList(alwaysTrue, selector) return p.FilteredList(alwaysTrue, selector)
} }
// FilteredList returns a filtered list of pods in the snapshot. // FilteredList returns a filtered list of pods in the snapshot.
func (p *podLister) FilteredList(podFilter schedulerlisters.PodFilter, selector labels.Selector) ([]*v1.Pod, error) { func (p podLister) FilteredList(filter schedulerlisters.PodFilter, selector labels.Selector) ([]*v1.Pod, error) {
// podFilter is expected to return true for most or all of the pods. We // podFilter is expected to return true for most or all of the pods. We
// can avoid expensive array growth without wasting too much memory by // can avoid expensive array growth without wasting too much memory by
// pre-allocating capacity. // pre-allocating capacity.
maxSize := 0 maxSize := 0
for _, n := range p.snapshot.NodeInfoList { for _, n := range p {
maxSize += len(n.Pods()) maxSize += len(n.Pods())
} }
pods := make([]*v1.Pod, 0, maxSize) pods := make([]*v1.Pod, 0, maxSize)
for _, n := range p.snapshot.NodeInfoList { for _, n := range p {
for _, pod := range n.Pods() { for _, pod := range n.Pods() {
if podFilter(pod) && selector.Matches(labels.Set(pod.Labels)) { if filter(pod) && selector.Matches(labels.Set(pod.Labels)) {
pods = append(pods, pod) pods = append(pods, pod)
} }
} }
@ -167,23 +167,19 @@ func (p *podLister) FilteredList(podFilter schedulerlisters.PodFilter, selector
return pods, nil return pods, nil
} }
type nodeInfoLister struct {
snapshot *Snapshot
}
// List returns the list of nodes in the snapshot. // List returns the list of nodes in the snapshot.
func (n *nodeInfoLister) List() ([]*schedulernodeinfo.NodeInfo, error) { func (s *Snapshot) List() ([]*schedulernodeinfo.NodeInfo, error) {
return n.snapshot.NodeInfoList, nil return s.nodeInfoList, nil
} }
// HavePodsWithAffinityList returns the list of nodes with at least one pods with inter-pod affinity // HavePodsWithAffinityList returns the list of nodes with at least one pods with inter-pod affinity
func (n *nodeInfoLister) HavePodsWithAffinityList() ([]*schedulernodeinfo.NodeInfo, error) { func (s *Snapshot) HavePodsWithAffinityList() ([]*schedulernodeinfo.NodeInfo, error) {
return n.snapshot.HavePodsWithAffinityNodeInfoList, nil return s.havePodsWithAffinityNodeInfoList, nil
} }
// Returns the NodeInfo of the given node name. // Get returns the NodeInfo of the given node name.
func (n *nodeInfoLister) Get(nodeName string) (*schedulernodeinfo.NodeInfo, error) { func (s *Snapshot) Get(nodeName string) (*schedulernodeinfo.NodeInfo, error) {
if v, ok := n.snapshot.NodeInfoMap[nodeName]; ok && v.Node() != nil { if v, ok := s.nodeInfoMap[nodeName]; ok && v.Node() != nil {
return v, nil return v, nil
} }
return nil, fmt.Errorf("nodeinfo not found for node name %q", nodeName) return nil, fmt.Errorf("nodeinfo not found for node name %q", nodeName)

View File

@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License. limitations under the License.
*/ */
package snapshot package cache
import ( import (
"reflect" "reflect"

View File

@ -36,8 +36,8 @@ go_test(
"//pkg/scheduler/apis/config:go_default_library", "//pkg/scheduler/apis/config:go_default_library",
"//pkg/scheduler/framework/plugins:go_default_library", "//pkg/scheduler/framework/plugins:go_default_library",
"//pkg/scheduler/framework/v1alpha1:go_default_library", "//pkg/scheduler/framework/v1alpha1:go_default_library",
"//pkg/scheduler/internal/cache:go_default_library",
"//pkg/scheduler/metrics:go_default_library", "//pkg/scheduler/metrics:go_default_library",
"//pkg/scheduler/nodeinfo/snapshot:go_default_library",
"//pkg/scheduler/util:go_default_library", "//pkg/scheduler/util:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",

View File

@ -36,8 +36,8 @@ import (
schedulerapi "k8s.io/kubernetes/pkg/scheduler/apis/config" schedulerapi "k8s.io/kubernetes/pkg/scheduler/apis/config"
frameworkplugins "k8s.io/kubernetes/pkg/scheduler/framework/plugins" frameworkplugins "k8s.io/kubernetes/pkg/scheduler/framework/plugins"
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
"k8s.io/kubernetes/pkg/scheduler/internal/cache"
"k8s.io/kubernetes/pkg/scheduler/metrics" "k8s.io/kubernetes/pkg/scheduler/metrics"
nodeinfosnapshot "k8s.io/kubernetes/pkg/scheduler/nodeinfo/snapshot"
"k8s.io/kubernetes/pkg/scheduler/util" "k8s.io/kubernetes/pkg/scheduler/util"
) )
@ -174,7 +174,7 @@ func newDefaultFramework() framework.Framework {
pls, pls,
framework.WithClientSet(fakeClient), framework.WithClientSet(fakeClient),
framework.WithInformerFactory(informers.NewSharedInformerFactory(fakeClient, 0)), framework.WithInformerFactory(informers.NewSharedInformerFactory(fakeClient, 0)),
framework.WithSnapshotSharedLister(nodeinfosnapshot.NewEmptySnapshot()), framework.WithSnapshotSharedLister(cache.NewEmptySnapshot()),
) )
if err != nil { if err != nil {
panic(err) panic(err)

View File

@ -46,10 +46,7 @@ filegroup(
filegroup( filegroup(
name = "all-srcs", name = "all-srcs",
srcs = [ srcs = [":package-srcs"],
":package-srcs",
"//pkg/scheduler/nodeinfo/snapshot:all-srcs",
],
tags = ["automanaged"], tags = ["automanaged"],
visibility = ["//visibility:public"], visibility = ["//visibility:public"],
) )

View File

@ -1,41 +0,0 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)
go_test(
name = "go_default_test",
srcs = ["snapshot_test.go"],
embed = [":go_default_library"],
deps = [
"//pkg/scheduler/nodeinfo:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
],
)
go_library(
name = "go_default_library",
srcs = ["snapshot.go"],
importpath = "k8s.io/kubernetes/pkg/scheduler/nodeinfo/snapshot",
visibility = ["//visibility:public"],
deps = [
"//pkg/scheduler/listers:go_default_library",
"//pkg/scheduler/nodeinfo:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
],
)

View File

@ -45,7 +45,6 @@ import (
internalcache "k8s.io/kubernetes/pkg/scheduler/internal/cache" internalcache "k8s.io/kubernetes/pkg/scheduler/internal/cache"
internalqueue "k8s.io/kubernetes/pkg/scheduler/internal/queue" internalqueue "k8s.io/kubernetes/pkg/scheduler/internal/queue"
"k8s.io/kubernetes/pkg/scheduler/metrics" "k8s.io/kubernetes/pkg/scheduler/metrics"
nodeinfosnapshot "k8s.io/kubernetes/pkg/scheduler/nodeinfo/snapshot"
"k8s.io/kubernetes/pkg/scheduler/volumebinder" "k8s.io/kubernetes/pkg/scheduler/volumebinder"
) )
@ -273,7 +272,7 @@ func New(client clientset.Interface,
return nil, err return nil, err
} }
snapshot := nodeinfosnapshot.NewEmptySnapshot() snapshot := internalcache.NewEmptySnapshot()
configurator := &Configurator{ configurator := &Configurator{
client: client, client: client,

View File

@ -55,7 +55,6 @@ import (
internalcache "k8s.io/kubernetes/pkg/scheduler/internal/cache" internalcache "k8s.io/kubernetes/pkg/scheduler/internal/cache"
fakecache "k8s.io/kubernetes/pkg/scheduler/internal/cache/fake" fakecache "k8s.io/kubernetes/pkg/scheduler/internal/cache/fake"
internalqueue "k8s.io/kubernetes/pkg/scheduler/internal/queue" internalqueue "k8s.io/kubernetes/pkg/scheduler/internal/queue"
nodeinfosnapshot "k8s.io/kubernetes/pkg/scheduler/nodeinfo/snapshot"
st "k8s.io/kubernetes/pkg/scheduler/testing" st "k8s.io/kubernetes/pkg/scheduler/testing"
"k8s.io/kubernetes/pkg/scheduler/volumebinder" "k8s.io/kubernetes/pkg/scheduler/volumebinder"
) )
@ -686,7 +685,7 @@ func setupTestScheduler(queuedPodStore *clientcache.FIFO, scache internalcache.C
algo := core.NewGenericScheduler( algo := core.NewGenericScheduler(
scache, scache,
internalqueue.NewSchedulingQueue(nil), internalqueue.NewSchedulingQueue(nil),
nodeinfosnapshot.NewEmptySnapshot(), internalcache.NewEmptySnapshot(),
fwk, fwk,
[]algorithm.SchedulerExtender{}, []algorithm.SchedulerExtender{},
nil, nil,
@ -745,7 +744,7 @@ func setupTestSchedulerLongBindingWithRetry(queuedPodStore *clientcache.FIFO, sc
algo := core.NewGenericScheduler( algo := core.NewGenericScheduler(
scache, scache,
queue, queue,
nodeinfosnapshot.NewEmptySnapshot(), internalcache.NewEmptySnapshot(),
fwk, fwk,
[]algorithm.SchedulerExtender{}, []algorithm.SchedulerExtender{},
nil, nil,

View File

@ -774,9 +774,9 @@ func cleanupPodsInNamespace(cs clientset.Interface, t *testing.T, ns string) {
func waitForSchedulerCacheCleanup(sched *scheduler.Scheduler, t *testing.T) { func waitForSchedulerCacheCleanup(sched *scheduler.Scheduler, t *testing.T) {
schedulerCacheIsEmpty := func() (bool, error) { schedulerCacheIsEmpty := func() (bool, error) {
snapshot := sched.Cache().Snapshot() dump := sched.Cache().Dump()
return len(snapshot.Nodes) == 0 && len(snapshot.AssumedPods) == 0, nil return len(dump.Nodes) == 0 && len(dump.AssumedPods) == 0, nil
} }
if err := wait.Poll(time.Second, wait.ForeverTestTimeout, schedulerCacheIsEmpty); err != nil { if err := wait.Poll(time.Second, wait.ForeverTestTimeout, schedulerCacheIsEmpty); err != nil {