mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-21 02:41:25 +00:00
Merge pull request #87165 from alculquicondor/cleanup/mv_snapshot_2
Move Snapshot to internal/cache
This commit is contained in:
commit
3538320d74
@ -27,7 +27,6 @@ go_library(
|
||||
"//pkg/scheduler/internal/cache/debugger:go_default_library",
|
||||
"//pkg/scheduler/internal/queue:go_default_library",
|
||||
"//pkg/scheduler/metrics:go_default_library",
|
||||
"//pkg/scheduler/nodeinfo/snapshot:go_default_library",
|
||||
"//pkg/scheduler/volumebinder:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/api/storage/v1:go_default_library",
|
||||
@ -81,7 +80,6 @@ go_test(
|
||||
"//pkg/scheduler/internal/queue:go_default_library",
|
||||
"//pkg/scheduler/listers:go_default_library",
|
||||
"//pkg/scheduler/nodeinfo:go_default_library",
|
||||
"//pkg/scheduler/nodeinfo/snapshot:go_default_library",
|
||||
"//pkg/scheduler/testing:go_default_library",
|
||||
"//pkg/scheduler/volumebinder:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
|
@ -19,7 +19,6 @@ go_library(
|
||||
"//pkg/scheduler/listers:go_default_library",
|
||||
"//pkg/scheduler/metrics:go_default_library",
|
||||
"//pkg/scheduler/nodeinfo:go_default_library",
|
||||
"//pkg/scheduler/nodeinfo/snapshot:go_default_library",
|
||||
"//pkg/scheduler/util:go_default_library",
|
||||
"//pkg/scheduler/volumebinder:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
@ -68,7 +67,6 @@ go_test(
|
||||
"//pkg/scheduler/listers:go_default_library",
|
||||
"//pkg/scheduler/listers/fake:go_default_library",
|
||||
"//pkg/scheduler/nodeinfo:go_default_library",
|
||||
"//pkg/scheduler/nodeinfo/snapshot:go_default_library",
|
||||
"//pkg/scheduler/testing:go_default_library",
|
||||
"//pkg/scheduler/util:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
|
@ -45,7 +45,6 @@ import (
|
||||
"k8s.io/kubernetes/pkg/scheduler/listers"
|
||||
"k8s.io/kubernetes/pkg/scheduler/metrics"
|
||||
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||
nodeinfosnapshot "k8s.io/kubernetes/pkg/scheduler/nodeinfo/snapshot"
|
||||
"k8s.io/kubernetes/pkg/scheduler/util"
|
||||
"k8s.io/kubernetes/pkg/scheduler/volumebinder"
|
||||
utiltrace "k8s.io/utils/trace"
|
||||
@ -138,7 +137,7 @@ type genericScheduler struct {
|
||||
schedulingQueue internalqueue.SchedulingQueue
|
||||
framework framework.Framework
|
||||
extenders []algorithm.SchedulerExtender
|
||||
nodeInfoSnapshot *nodeinfosnapshot.Snapshot
|
||||
nodeInfoSnapshot *internalcache.Snapshot
|
||||
volumeBinder *volumebinder.VolumeBinder
|
||||
pvcLister corelisters.PersistentVolumeClaimLister
|
||||
pdbLister policylisters.PodDisruptionBudgetLister
|
||||
@ -152,7 +151,7 @@ type genericScheduler struct {
|
||||
// functions.
|
||||
func (g *genericScheduler) Snapshot() error {
|
||||
// Used for all fit and priority funcs.
|
||||
return g.cache.UpdateNodeInfoSnapshot(g.nodeInfoSnapshot)
|
||||
return g.cache.UpdateSnapshot(g.nodeInfoSnapshot)
|
||||
}
|
||||
|
||||
// Framework returns the framework instance.
|
||||
@ -1097,7 +1096,7 @@ func podPassesBasicChecks(pod *v1.Pod, pvcLister corelisters.PersistentVolumeCla
|
||||
func NewGenericScheduler(
|
||||
cache internalcache.Cache,
|
||||
podQueue internalqueue.SchedulingQueue,
|
||||
nodeInfoSnapshot *nodeinfosnapshot.Snapshot,
|
||||
nodeInfoSnapshot *internalcache.Snapshot,
|
||||
framework framework.Framework,
|
||||
extenders []algorithm.SchedulerExtender,
|
||||
volumeBinder *volumebinder.VolumeBinder,
|
||||
|
@ -58,7 +58,6 @@ import (
|
||||
fakelisters "k8s.io/kubernetes/pkg/scheduler/listers/fake"
|
||||
"k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||
nodeinfosnapshot "k8s.io/kubernetes/pkg/scheduler/nodeinfo/snapshot"
|
||||
st "k8s.io/kubernetes/pkg/scheduler/testing"
|
||||
schedutil "k8s.io/kubernetes/pkg/scheduler/util"
|
||||
)
|
||||
@ -293,7 +292,7 @@ func (pl *falseMapPlugin) ScoreExtensions() framework.ScoreExtensions {
|
||||
return nil
|
||||
}
|
||||
|
||||
var emptySnapshot = nodeinfosnapshot.NewEmptySnapshot()
|
||||
var emptySnapshot = internalcache.NewEmptySnapshot()
|
||||
|
||||
func makeNodeList(nodeNames []string) []*v1.Node {
|
||||
result := make([]*v1.Node, 0, len(nodeNames))
|
||||
@ -796,7 +795,7 @@ func TestGenericScheduler(t *testing.T) {
|
||||
for _, f := range test.registerPlugins {
|
||||
f(®istry, plugins, pluginConfigs)
|
||||
}
|
||||
snapshot := nodeinfosnapshot.NewSnapshot(nodeinfosnapshot.CreateNodeInfoMap(test.pods, nodes))
|
||||
snapshot := internalcache.NewSnapshot(test.pods, nodes)
|
||||
fwk, _ := framework.NewFramework(registry, plugins, pluginConfigs, framework.WithSnapshotSharedLister(snapshot))
|
||||
|
||||
var pvcs []v1.PersistentVolumeClaim
|
||||
@ -855,7 +854,7 @@ func makeScheduler(nodes []*v1.Node, fns ...st.RegisterPluginFunc) *genericSched
|
||||
fwk,
|
||||
nil, nil, nil, nil, false,
|
||||
schedulerapi.DefaultPercentageOfNodesToScore, false)
|
||||
cache.UpdateNodeInfoSnapshot(s.(*genericScheduler).nodeInfoSnapshot)
|
||||
cache.UpdateSnapshot(s.(*genericScheduler).nodeInfoSnapshot)
|
||||
return s.(*genericScheduler)
|
||||
|
||||
}
|
||||
@ -984,7 +983,7 @@ func TestFindFitPredicateCallCounts(t *testing.T) {
|
||||
fwk,
|
||||
nil, nil, nil, nil, false,
|
||||
schedulerapi.DefaultPercentageOfNodesToScore, false).(*genericScheduler)
|
||||
cache.UpdateNodeInfoSnapshot(scheduler.nodeInfoSnapshot)
|
||||
cache.UpdateSnapshot(scheduler.nodeInfoSnapshot)
|
||||
queue.UpdateNominatedPodForNode(&v1.Pod{ObjectMeta: metav1.ObjectMeta{UID: types.UID("nominated")}, Spec: v1.PodSpec{Priority: &midPriority}}, "1")
|
||||
|
||||
_, _, err := scheduler.findNodesThatFitPod(context.Background(), framework.NewCycleState(), test.pod)
|
||||
@ -1115,7 +1114,7 @@ func TestZeroRequest(t *testing.T) {
|
||||
client := clientsetfake.NewSimpleClientset()
|
||||
informerFactory := informers.NewSharedInformerFactory(client, 0)
|
||||
|
||||
snapshot := nodeinfosnapshot.NewSnapshot(nodeinfosnapshot.CreateNodeInfoMap(test.pods, test.nodes))
|
||||
snapshot := internalcache.NewSnapshot(test.pods, test.nodes)
|
||||
|
||||
registry := framework.Registry{}
|
||||
// TODO: instantiate the plugins dynamically.
|
||||
@ -1604,7 +1603,7 @@ func TestSelectNodesForPreemption(t *testing.T) {
|
||||
f(®istry, plugins, pluginConfigs)
|
||||
}
|
||||
// Use a real snapshot since it's needed in some Filter Plugin (e.g., PodAffinity)
|
||||
snapshot := nodeinfosnapshot.NewSnapshot(nodeinfosnapshot.CreateNodeInfoMap(test.pods, nodes))
|
||||
snapshot := internalcache.NewSnapshot(test.pods, nodes)
|
||||
fwk, _ := framework.NewFramework(registry, plugins, pluginConfigs, framework.WithSnapshotSharedLister(snapshot))
|
||||
|
||||
scheduler := NewGenericScheduler(
|
||||
@ -1880,7 +1879,7 @@ func TestPickOneNodeForPreemption(t *testing.T) {
|
||||
for _, n := range test.nodes {
|
||||
nodes = append(nodes, makeNode(n, schedutil.DefaultMilliCPURequest*5, schedutil.DefaultMemoryRequest*5))
|
||||
}
|
||||
snapshot := nodeinfosnapshot.NewSnapshot(nodeinfosnapshot.CreateNodeInfoMap(test.pods, nodes))
|
||||
snapshot := internalcache.NewSnapshot(test.pods, nodes)
|
||||
registry := framework.Registry{}
|
||||
// TODO: instantiate the plugins dynamically.
|
||||
plugins := &schedulerapi.Plugins{
|
||||
@ -2393,7 +2392,7 @@ func TestPreempt(t *testing.T) {
|
||||
for _, f := range test.registerPlugins {
|
||||
f(®istry, plugins, pluginConfigs)
|
||||
}
|
||||
snapshot := nodeinfosnapshot.NewSnapshot(nodeinfosnapshot.CreateNodeInfoMap(test.pods, nodes))
|
||||
snapshot := internalcache.NewSnapshot(test.pods, nodes)
|
||||
fwk, _ := framework.NewFramework(registry, plugins, pluginConfigs, framework.WithSnapshotSharedLister(snapshot))
|
||||
|
||||
scheduler := NewGenericScheduler(
|
||||
@ -2555,7 +2554,7 @@ func TestFairEvaluationForNodes(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func nodesToNodeInfos(nodes []*v1.Node, snapshot *nodeinfosnapshot.Snapshot) ([]*schedulernodeinfo.NodeInfo, error) {
|
||||
func nodesToNodeInfos(nodes []*v1.Node, snapshot *internalcache.Snapshot) ([]*schedulernodeinfo.NodeInfo, error) {
|
||||
var nodeInfos []*schedulernodeinfo.NodeInfo
|
||||
for _, n := range nodes {
|
||||
nodeInfo, err := snapshot.NodeInfos().Get(n.Name)
|
||||
|
@ -23,7 +23,6 @@ import (
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
fakecache "k8s.io/kubernetes/pkg/scheduler/internal/cache/fake"
|
||||
)
|
||||
|
||||
|
@ -50,7 +50,6 @@ import (
|
||||
internalcache "k8s.io/kubernetes/pkg/scheduler/internal/cache"
|
||||
cachedebugger "k8s.io/kubernetes/pkg/scheduler/internal/cache/debugger"
|
||||
internalqueue "k8s.io/kubernetes/pkg/scheduler/internal/queue"
|
||||
nodeinfosnapshot "k8s.io/kubernetes/pkg/scheduler/nodeinfo/snapshot"
|
||||
"k8s.io/kubernetes/pkg/scheduler/volumebinder"
|
||||
)
|
||||
|
||||
@ -107,7 +106,7 @@ type Configurator struct {
|
||||
registry framework.Registry
|
||||
plugins *schedulerapi.Plugins
|
||||
pluginConfig []schedulerapi.PluginConfig
|
||||
nodeInfoSnapshot *nodeinfosnapshot.Snapshot
|
||||
nodeInfoSnapshot *internalcache.Snapshot
|
||||
}
|
||||
|
||||
// create a scheduler from a set of registered plugins.
|
||||
|
@ -51,7 +51,6 @@ import (
|
||||
internalqueue "k8s.io/kubernetes/pkg/scheduler/internal/queue"
|
||||
"k8s.io/kubernetes/pkg/scheduler/listers"
|
||||
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||
nodeinfosnapshot "k8s.io/kubernetes/pkg/scheduler/nodeinfo/snapshot"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -432,7 +431,7 @@ func newConfigFactoryWithFrameworkRegistry(
|
||||
client clientset.Interface, hardPodAffinitySymmetricWeight int32, stopCh <-chan struct{},
|
||||
registry framework.Registry) *Configurator {
|
||||
informerFactory := informers.NewSharedInformerFactory(client, 0)
|
||||
snapshot := nodeinfosnapshot.NewEmptySnapshot()
|
||||
snapshot := internalcache.NewEmptySnapshot()
|
||||
return &Configurator{
|
||||
client: client,
|
||||
informerFactory: informerFactory,
|
||||
|
@ -29,7 +29,7 @@ go_test(
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//pkg/scheduler/framework/v1alpha1:go_default_library",
|
||||
"//pkg/scheduler/nodeinfo/snapshot:go_default_library",
|
||||
"//pkg/scheduler/internal/cache:go_default_library",
|
||||
"//pkg/scheduler/testing:go_default_library",
|
||||
"//staging/src/k8s.io/api/apps/v1:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
|
@ -24,7 +24,7 @@ import (
|
||||
"k8s.io/client-go/informers"
|
||||
clientsetfake "k8s.io/client-go/kubernetes/fake"
|
||||
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
|
||||
nodeinfosnapshot "k8s.io/kubernetes/pkg/scheduler/nodeinfo/snapshot"
|
||||
"k8s.io/kubernetes/pkg/scheduler/internal/cache"
|
||||
st "k8s.io/kubernetes/pkg/scheduler/testing"
|
||||
)
|
||||
|
||||
@ -52,7 +52,7 @@ func BenchmarkTestSelectorSpreadPriority(b *testing.B) {
|
||||
b.Run(tt.name, func(b *testing.B) {
|
||||
pod := st.MakePod().Name("p").Label("foo", "").Obj()
|
||||
existingPods, allNodes, filteredNodes := st.MakeNodesAndPodsForEvenPodsSpread(pod.Labels, tt.existingPodsNum, tt.allNodesNum, tt.allNodesNum)
|
||||
snapshot := nodeinfosnapshot.NewSnapshot(nodeinfosnapshot.CreateNodeInfoMap(existingPods, allNodes))
|
||||
snapshot := cache.NewSnapshot(existingPods, allNodes)
|
||||
services := &v1.ServiceList{
|
||||
Items: []v1.Service{{Spec: v1.ServiceSpec{Selector: map[string]string{"foo": ""}}}},
|
||||
}
|
||||
|
@ -30,7 +30,7 @@ import (
|
||||
"k8s.io/client-go/informers"
|
||||
clientsetfake "k8s.io/client-go/kubernetes/fake"
|
||||
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
|
||||
nodeinfosnapshot "k8s.io/kubernetes/pkg/scheduler/nodeinfo/snapshot"
|
||||
"k8s.io/kubernetes/pkg/scheduler/internal/cache"
|
||||
)
|
||||
|
||||
func controllerRef(kind, name, uid string) []metav1.OwnerReference {
|
||||
@ -343,7 +343,7 @@ func TestDefaultPodTopologySpreadScore(t *testing.T) {
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
nodes := makeNodeList(test.nodes)
|
||||
snapshot := nodeinfosnapshot.NewSnapshot(nodeinfosnapshot.CreateNodeInfoMap(test.pods, nodes))
|
||||
snapshot := cache.NewSnapshot(test.pods, nodes)
|
||||
ctx := context.Background()
|
||||
informerFactory, err := populateAndStartInformers(ctx, test.rcs, test.rss, test.services, test.sss)
|
||||
if err != nil {
|
||||
@ -596,7 +596,7 @@ func TestZoneSelectorSpreadPriority(t *testing.T) {
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
nodes := makeLabeledNodeList(labeledNodes)
|
||||
snapshot := nodeinfosnapshot.NewSnapshot(nodeinfosnapshot.CreateNodeInfoMap(test.pods, nodes))
|
||||
snapshot := cache.NewSnapshot(test.pods, nodes)
|
||||
ctx := context.Background()
|
||||
informerFactory, err := populateAndStartInformers(ctx, test.rcs, test.rss, test.services, test.sss)
|
||||
if err != nil {
|
||||
|
@ -20,7 +20,7 @@ go_test(
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//pkg/scheduler/framework/v1alpha1:go_default_library",
|
||||
"//pkg/scheduler/nodeinfo/snapshot:go_default_library",
|
||||
"//pkg/scheduler/internal/cache:go_default_library",
|
||||
"//pkg/util/parsers:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
|
@ -26,7 +26,7 @@ import (
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
|
||||
nodeinfosnapshot "k8s.io/kubernetes/pkg/scheduler/nodeinfo/snapshot"
|
||||
"k8s.io/kubernetes/pkg/scheduler/internal/cache"
|
||||
"k8s.io/kubernetes/pkg/util/parsers"
|
||||
)
|
||||
|
||||
@ -186,7 +186,7 @@ func TestImageLocalityPriority(t *testing.T) {
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
snapshot := nodeinfosnapshot.NewSnapshot(nodeinfosnapshot.CreateNodeInfoMap(nil, test.nodes))
|
||||
snapshot := cache.NewSnapshot(nil, test.nodes)
|
||||
|
||||
state := framework.NewCycleState()
|
||||
|
||||
|
@ -33,8 +33,8 @@ go_test(
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//pkg/scheduler/framework/v1alpha1:go_default_library",
|
||||
"//pkg/scheduler/internal/cache:go_default_library",
|
||||
"//pkg/scheduler/nodeinfo:go_default_library",
|
||||
"//pkg/scheduler/nodeinfo/snapshot:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
|
@ -24,8 +24,8 @@ import (
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
|
||||
"k8s.io/kubernetes/pkg/scheduler/internal/cache"
|
||||
"k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||
nodeinfosnapshot "k8s.io/kubernetes/pkg/scheduler/nodeinfo/snapshot"
|
||||
)
|
||||
|
||||
var (
|
||||
@ -777,7 +777,7 @@ func TestRequiredAffinitySingleNode(t *testing.T) {
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
snapshot := nodeinfosnapshot.NewSnapshot(nodeinfosnapshot.CreateNodeInfoMap(test.pods, []*v1.Node{test.node}))
|
||||
snapshot := cache.NewSnapshot(test.pods, []*v1.Node{test.node})
|
||||
p := &InterPodAffinity{
|
||||
sharedLister: snapshot,
|
||||
}
|
||||
@ -1614,7 +1614,7 @@ func TestRequiredAffinityMultipleNodes(t *testing.T) {
|
||||
|
||||
for indexTest, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
snapshot := nodeinfosnapshot.NewSnapshot(nodeinfosnapshot.CreateNodeInfoMap(test.pods, test.nodes))
|
||||
snapshot := cache.NewSnapshot(test.pods, test.nodes)
|
||||
for indexNode, node := range test.nodes {
|
||||
p := &InterPodAffinity{
|
||||
sharedLister: snapshot,
|
||||
@ -1736,7 +1736,7 @@ func TestPreFilterDisabled(t *testing.T) {
|
||||
|
||||
for indexTest, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
snapshot := nodeinfosnapshot.NewSnapshot(nodeinfosnapshot.CreateNodeInfoMap(test.pods, test.nodes))
|
||||
snapshot := cache.NewSnapshot(test.pods, test.nodes)
|
||||
for indexNode, node := range test.nodes {
|
||||
p := &InterPodAffinity{
|
||||
sharedLister: snapshot,
|
||||
@ -2011,8 +2011,8 @@ func TestPreFilterStateAddRemovePod(t *testing.T) {
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
// getMeta creates predicate meta data given the list of pods.
|
||||
getState := func(pods []*v1.Pod) (*InterPodAffinity, *framework.CycleState, *preFilterState, *nodeinfosnapshot.Snapshot) {
|
||||
snapshot := nodeinfosnapshot.NewSnapshot(nodeinfosnapshot.CreateNodeInfoMap(pods, test.nodes))
|
||||
getState := func(pods []*v1.Pod) (*InterPodAffinity, *framework.CycleState, *preFilterState, *cache.Snapshot) {
|
||||
snapshot := cache.NewSnapshot(pods, test.nodes)
|
||||
|
||||
p := &InterPodAffinity{
|
||||
sharedLister: snapshot,
|
||||
@ -2298,7 +2298,7 @@ func TestGetTPMapMatchingIncomingAffinityAntiAffinity(t *testing.T) {
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
s := nodeinfosnapshot.NewSnapshot(nodeinfosnapshot.CreateNodeInfoMap(tt.existingPods, tt.nodes))
|
||||
s := cache.NewSnapshot(tt.existingPods, tt.nodes)
|
||||
l, _ := s.NodeInfos().List()
|
||||
gotAffinityPodsMap, gotAntiAffinityPodsMap, err := getTPMapMatchingIncomingAffinityAntiAffinity(tt.pod, l)
|
||||
if (err != nil) != tt.wantErr {
|
||||
@ -2315,7 +2315,7 @@ func TestGetTPMapMatchingIncomingAffinityAntiAffinity(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func mustGetNodeInfo(t *testing.T, snapshot *nodeinfosnapshot.Snapshot, name string) *nodeinfo.NodeInfo {
|
||||
func mustGetNodeInfo(t *testing.T, snapshot *cache.Snapshot, name string) *nodeinfo.NodeInfo {
|
||||
t.Helper()
|
||||
nodeInfo, err := snapshot.NodeInfos().Get(name)
|
||||
if err != nil {
|
||||
|
@ -26,7 +26,7 @@ import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
|
||||
nodeinfosnapshot "k8s.io/kubernetes/pkg/scheduler/nodeinfo/snapshot"
|
||||
"k8s.io/kubernetes/pkg/scheduler/internal/cache"
|
||||
)
|
||||
|
||||
func TestPreferredAffinity(t *testing.T) {
|
||||
@ -517,7 +517,7 @@ func TestPreferredAffinity(t *testing.T) {
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
state := framework.NewCycleState()
|
||||
snapshot := nodeinfosnapshot.NewSnapshot(nodeinfosnapshot.CreateNodeInfoMap(test.pods, test.nodes))
|
||||
snapshot := cache.NewSnapshot(test.pods, test.nodes)
|
||||
p := &InterPodAffinity{
|
||||
sharedLister: snapshot,
|
||||
hardPodAffinityWeight: 1,
|
||||
@ -623,7 +623,7 @@ func TestPreferredAffinityWithHardPodAffinitySymmetricWeight(t *testing.T) {
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
state := framework.NewCycleState()
|
||||
snapshot := nodeinfosnapshot.NewSnapshot(nodeinfosnapshot.CreateNodeInfoMap(test.pods, test.nodes))
|
||||
snapshot := cache.NewSnapshot(test.pods, test.nodes)
|
||||
fh, _ := framework.NewFramework(nil, nil, nil, framework.WithSnapshotSharedLister(snapshot))
|
||||
|
||||
args := &runtime.Unknown{Raw: []byte(fmt.Sprintf(`{"hardPodAffinityWeight":%d}`, test.hardPodAffinityWeight))}
|
||||
|
@ -37,8 +37,8 @@ go_test(
|
||||
deps = [
|
||||
"//pkg/apis/core:go_default_library",
|
||||
"//pkg/scheduler/framework/v1alpha1:go_default_library",
|
||||
"//pkg/scheduler/internal/cache:go_default_library",
|
||||
"//pkg/scheduler/nodeinfo:go_default_library",
|
||||
"//pkg/scheduler/nodeinfo/snapshot:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
],
|
||||
|
@ -25,8 +25,8 @@ import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
api "k8s.io/kubernetes/pkg/apis/core"
|
||||
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
|
||||
"k8s.io/kubernetes/pkg/scheduler/internal/cache"
|
||||
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||
nodeinfosnapshot "k8s.io/kubernetes/pkg/scheduler/nodeinfo/snapshot"
|
||||
)
|
||||
|
||||
// TODO: Add test case for RequiredDuringSchedulingRequiredDuringExecution after it's implemented.
|
||||
@ -849,7 +849,7 @@ func TestNodeAffinityPriority(t *testing.T) {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
state := framework.NewCycleState()
|
||||
|
||||
fh, _ := framework.NewFramework(nil, nil, nil, framework.WithSnapshotSharedLister(nodeinfosnapshot.NewSnapshot(nodeinfosnapshot.CreateNodeInfoMap(nil, test.nodes))))
|
||||
fh, _ := framework.NewFramework(nil, nil, nil, framework.WithSnapshotSharedLister(cache.NewSnapshot(nil, test.nodes)))
|
||||
p, _ := New(nil, fh)
|
||||
var gotList framework.NodeScoreList
|
||||
for _, n := range test.nodes {
|
||||
|
@ -20,8 +20,8 @@ go_test(
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//pkg/scheduler/framework/v1alpha1:go_default_library",
|
||||
"//pkg/scheduler/internal/cache:go_default_library",
|
||||
"//pkg/scheduler/nodeinfo:go_default_library",
|
||||
"//pkg/scheduler/nodeinfo/snapshot:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
|
@ -24,8 +24,8 @@ import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
|
||||
"k8s.io/kubernetes/pkg/scheduler/internal/cache"
|
||||
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||
nodeinfosnapshot "k8s.io/kubernetes/pkg/scheduler/nodeinfo/snapshot"
|
||||
)
|
||||
|
||||
func TestValidateNodeLabelArgs(t *testing.T) {
|
||||
@ -227,7 +227,7 @@ func TestNodeLabelScore(t *testing.T) {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
state := framework.NewCycleState()
|
||||
node := &v1.Node{ObjectMeta: metav1.ObjectMeta{Name: "machine1", Labels: map[string]string{"foo": "", "bar": ""}}}
|
||||
fh, _ := framework.NewFramework(nil, nil, nil, framework.WithSnapshotSharedLister(nodeinfosnapshot.NewSnapshot(nodeinfosnapshot.CreateNodeInfoMap(nil, []*v1.Node{node}))))
|
||||
fh, _ := framework.NewFramework(nil, nil, nil, framework.WithSnapshotSharedLister(cache.NewSnapshot(nil, []*v1.Node{node})))
|
||||
args := &runtime.Unknown{Raw: []byte(test.rawArgs)}
|
||||
p, err := New(args, fh)
|
||||
if err != nil {
|
||||
@ -262,7 +262,7 @@ func TestNodeLabelFilterWithoutNode(t *testing.T) {
|
||||
|
||||
func TestNodeLabelScoreWithoutNode(t *testing.T) {
|
||||
t.Run("node does not exist", func(t *testing.T) {
|
||||
fh, _ := framework.NewFramework(nil, nil, nil, framework.WithSnapshotSharedLister(nodeinfosnapshot.NewEmptySnapshot()))
|
||||
fh, _ := framework.NewFramework(nil, nil, nil, framework.WithSnapshotSharedLister(cache.NewEmptySnapshot()))
|
||||
p, err := New(nil, fh)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create plugin: %+v", err)
|
||||
|
@ -20,7 +20,7 @@ go_test(
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//pkg/scheduler/framework/v1alpha1:go_default_library",
|
||||
"//pkg/scheduler/nodeinfo/snapshot:go_default_library",
|
||||
"//pkg/scheduler/internal/cache:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
],
|
||||
|
@ -24,7 +24,7 @@ import (
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
|
||||
nodeinfosnapshot "k8s.io/kubernetes/pkg/scheduler/nodeinfo/snapshot"
|
||||
"k8s.io/kubernetes/pkg/scheduler/internal/cache"
|
||||
)
|
||||
|
||||
func TestNodePreferAvoidPods(t *testing.T) {
|
||||
@ -143,7 +143,7 @@ func TestNodePreferAvoidPods(t *testing.T) {
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
state := framework.NewCycleState()
|
||||
fh, _ := framework.NewFramework(nil, nil, nil, framework.WithSnapshotSharedLister(nodeinfosnapshot.NewSnapshot(nodeinfosnapshot.CreateNodeInfoMap(nil, test.nodes))))
|
||||
fh, _ := framework.NewFramework(nil, nil, nil, framework.WithSnapshotSharedLister(cache.NewSnapshot(nil, test.nodes)))
|
||||
p, _ := New(nil, fh)
|
||||
var gotList framework.NodeScoreList
|
||||
for _, n := range test.nodes {
|
||||
|
@ -60,8 +60,8 @@ go_test(
|
||||
"//pkg/apis/core/v1/helper:go_default_library",
|
||||
"//pkg/features:go_default_library",
|
||||
"//pkg/scheduler/framework/v1alpha1:go_default_library",
|
||||
"//pkg/scheduler/internal/cache:go_default_library",
|
||||
"//pkg/scheduler/nodeinfo:go_default_library",
|
||||
"//pkg/scheduler/nodeinfo/snapshot:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
|
@ -28,7 +28,7 @@ import (
|
||||
featuregatetesting "k8s.io/component-base/featuregate/testing"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
|
||||
nodeinfosnapshot "k8s.io/kubernetes/pkg/scheduler/nodeinfo/snapshot"
|
||||
"k8s.io/kubernetes/pkg/scheduler/internal/cache"
|
||||
)
|
||||
|
||||
// getExistingVolumeCountForNode gets the current number of volumes on node.
|
||||
@ -379,7 +379,7 @@ func TestNodeResourcesBalancedAllocation(t *testing.T) {
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
snapshot := nodeinfosnapshot.NewSnapshot(nodeinfosnapshot.CreateNodeInfoMap(test.pods, test.nodes))
|
||||
snapshot := cache.NewSnapshot(test.pods, test.nodes)
|
||||
if len(test.pod.Spec.Volumes) > 0 {
|
||||
maxVolumes := 5
|
||||
nodeInfoList, _ := snapshot.NodeInfos().List()
|
||||
|
@ -25,7 +25,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
|
||||
nodeinfosnapshot "k8s.io/kubernetes/pkg/scheduler/nodeinfo/snapshot"
|
||||
"k8s.io/kubernetes/pkg/scheduler/internal/cache"
|
||||
)
|
||||
|
||||
func TestNodeResourcesLeastAllocated(t *testing.T) {
|
||||
@ -233,7 +233,7 @@ func TestNodeResourcesLeastAllocated(t *testing.T) {
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
snapshot := nodeinfosnapshot.NewSnapshot(nodeinfosnapshot.CreateNodeInfoMap(test.pods, test.nodes))
|
||||
snapshot := cache.NewSnapshot(test.pods, test.nodes)
|
||||
fh, _ := framework.NewFramework(nil, nil, nil, framework.WithSnapshotSharedLister(snapshot))
|
||||
p, _ := NewLeastAllocated(nil, fh)
|
||||
for i := range test.nodes {
|
||||
|
@ -25,7 +25,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
|
||||
nodeinfosnapshot "k8s.io/kubernetes/pkg/scheduler/nodeinfo/snapshot"
|
||||
"k8s.io/kubernetes/pkg/scheduler/internal/cache"
|
||||
)
|
||||
|
||||
func TestNodeResourcesMostAllocated(t *testing.T) {
|
||||
@ -196,7 +196,7 @@ func TestNodeResourcesMostAllocated(t *testing.T) {
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
snapshot := nodeinfosnapshot.NewSnapshot(nodeinfosnapshot.CreateNodeInfoMap(test.pods, test.nodes))
|
||||
snapshot := cache.NewSnapshot(test.pods, test.nodes)
|
||||
fh, _ := framework.NewFramework(nil, nil, nil, framework.WithSnapshotSharedLister(snapshot))
|
||||
p, _ := NewMostAllocated(nil, fh)
|
||||
for i := range test.nodes {
|
||||
|
@ -26,7 +26,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
|
||||
nodeinfosnapshot "k8s.io/kubernetes/pkg/scheduler/nodeinfo/snapshot"
|
||||
"k8s.io/kubernetes/pkg/scheduler/internal/cache"
|
||||
)
|
||||
|
||||
func TestRequestedToCapacityRatio(t *testing.T) {
|
||||
@ -65,7 +65,7 @@ func TestRequestedToCapacityRatio(t *testing.T) {
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
state := framework.NewCycleState()
|
||||
snapshot := nodeinfosnapshot.NewSnapshot(nodeinfosnapshot.CreateNodeInfoMap(test.scheduledPods, test.nodes))
|
||||
snapshot := cache.NewSnapshot(test.scheduledPods, test.nodes)
|
||||
fh, _ := framework.NewFramework(nil, nil, nil, framework.WithSnapshotSharedLister(snapshot))
|
||||
args := &runtime.Unknown{Raw: []byte(`{"shape" : [{"utilization" : 0, "score" : 10}, {"utilization" : 100, "score" : 0}], "resources" : [{"name" : "memory", "weight" : 1}, {"name" : "cpu", "weight" : 1}]}`)}
|
||||
p, err := NewRequestedToCapacityRatio(args, fh)
|
||||
@ -349,7 +349,7 @@ func TestResourceBinPackingSingleExtended(t *testing.T) {
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
state := framework.NewCycleState()
|
||||
snapshot := nodeinfosnapshot.NewSnapshot(nodeinfosnapshot.CreateNodeInfoMap(test.pods, test.nodes))
|
||||
snapshot := cache.NewSnapshot(test.pods, test.nodes)
|
||||
fh, _ := framework.NewFramework(nil, nil, nil, framework.WithSnapshotSharedLister(snapshot))
|
||||
args := &runtime.Unknown{Raw: []byte(`{"shape" : [{"utilization" : 0, "score" : 0}, {"utilization" : 100, "score" : 1}], "resources" : [{"name" : "intel.com/foo", "weight" : 1}]}`)}
|
||||
p, err := NewRequestedToCapacityRatio(args, fh)
|
||||
@ -584,7 +584,7 @@ func TestResourceBinPackingMultipleExtended(t *testing.T) {
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
state := framework.NewCycleState()
|
||||
snapshot := nodeinfosnapshot.NewSnapshot(nodeinfosnapshot.CreateNodeInfoMap(test.pods, test.nodes))
|
||||
snapshot := cache.NewSnapshot(test.pods, test.nodes)
|
||||
fh, _ := framework.NewFramework(nil, nil, nil, framework.WithSnapshotSharedLister(snapshot))
|
||||
args := &runtime.Unknown{Raw: []byte(`{"shape" : [{"utilization" : 0, "score" : 0}, {"utilization" : 100, "score" : 1}], "resources" : [{"name" : "intel.com/foo", "weight" : 3}, {"name" : "intel.com/bar", "weight": 5}]}`)}
|
||||
p, err := NewRequestedToCapacityRatio(args, fh)
|
||||
|
@ -23,7 +23,7 @@ import (
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
|
||||
nodeinfosnapshot "k8s.io/kubernetes/pkg/scheduler/nodeinfo/snapshot"
|
||||
"k8s.io/kubernetes/pkg/scheduler/internal/cache"
|
||||
)
|
||||
|
||||
func TestResourceLimits(t *testing.T) {
|
||||
@ -146,7 +146,7 @@ func TestResourceLimits(t *testing.T) {
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
snapshot := nodeinfosnapshot.NewSnapshot(nodeinfosnapshot.CreateNodeInfoMap(nil, test.nodes))
|
||||
snapshot := cache.NewSnapshot(nil, test.nodes)
|
||||
fh, _ := framework.NewFramework(nil, nil, nil, framework.WithSnapshotSharedLister(snapshot))
|
||||
p := &ResourceLimits{handle: fh}
|
||||
for i := range test.nodes {
|
||||
|
@ -34,7 +34,7 @@ go_test(
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//pkg/scheduler/framework/v1alpha1:go_default_library",
|
||||
"//pkg/scheduler/nodeinfo/snapshot:go_default_library",
|
||||
"//pkg/scheduler/internal/cache:go_default_library",
|
||||
"//pkg/scheduler/testing:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
|
@ -25,7 +25,7 @@ import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
|
||||
nodeinfosnapshot "k8s.io/kubernetes/pkg/scheduler/nodeinfo/snapshot"
|
||||
"k8s.io/kubernetes/pkg/scheduler/internal/cache"
|
||||
st "k8s.io/kubernetes/pkg/scheduler/testing"
|
||||
)
|
||||
|
||||
@ -414,7 +414,7 @@ func TestCalPreFilterState(t *testing.T) {
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
s := nodeinfosnapshot.NewSnapshot(nodeinfosnapshot.CreateNodeInfoMap(tt.existingPods, tt.nodes))
|
||||
s := cache.NewSnapshot(tt.existingPods, tt.nodes)
|
||||
l, _ := s.NodeInfos().List()
|
||||
got, _ := calPreFilterState(tt.pod, l)
|
||||
got.sortCriticalPaths()
|
||||
@ -712,7 +712,7 @@ func TestPreFilterStateAddPod(t *testing.T) {
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
s := nodeinfosnapshot.NewSnapshot(nodeinfosnapshot.CreateNodeInfoMap(tt.existingPods, tt.nodes))
|
||||
s := cache.NewSnapshot(tt.existingPods, tt.nodes)
|
||||
l, _ := s.NodeInfos().List()
|
||||
state, _ := calPreFilterState(tt.preemptor, l)
|
||||
state.updateWithPod(tt.addedPod, tt.preemptor, tt.nodes[tt.nodeIdx], 1)
|
||||
@ -902,7 +902,7 @@ func TestPreFilterStateRemovePod(t *testing.T) {
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
s := nodeinfosnapshot.NewSnapshot(nodeinfosnapshot.CreateNodeInfoMap(tt.existingPods, tt.nodes))
|
||||
s := cache.NewSnapshot(tt.existingPods, tt.nodes)
|
||||
l, _ := s.NodeInfos().List()
|
||||
state, _ := calPreFilterState(tt.preemptor, l)
|
||||
|
||||
@ -961,7 +961,7 @@ func BenchmarkTestCalPreFilterState(b *testing.B) {
|
||||
for _, tt := range tests {
|
||||
b.Run(tt.name, func(b *testing.B) {
|
||||
existingPods, allNodes, _ := st.MakeNodesAndPodsForEvenPodsSpread(tt.pod.Labels, tt.existingPodsNum, tt.allNodesNum, tt.filteredNodesNum)
|
||||
s := nodeinfosnapshot.NewSnapshot(nodeinfosnapshot.CreateNodeInfoMap(existingPods, allNodes))
|
||||
s := cache.NewSnapshot(existingPods, allNodes)
|
||||
l, _ := s.NodeInfos().List()
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
@ -1228,7 +1228,7 @@ func TestSingleConstraint(t *testing.T) {
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
snapshot := nodeinfosnapshot.NewSnapshot(nodeinfosnapshot.CreateNodeInfoMap(tt.existingPods, tt.nodes))
|
||||
snapshot := cache.NewSnapshot(tt.existingPods, tt.nodes)
|
||||
p := &PodTopologySpread{sharedLister: snapshot}
|
||||
state := framework.NewCycleState()
|
||||
preFilterStatus := p.PreFilter(context.Background(), state, tt.pod)
|
||||
@ -1428,7 +1428,7 @@ func TestMultipleConstraints(t *testing.T) {
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
snapshot := nodeinfosnapshot.NewSnapshot(nodeinfosnapshot.CreateNodeInfoMap(tt.existingPods, tt.nodes))
|
||||
snapshot := cache.NewSnapshot(tt.existingPods, tt.nodes)
|
||||
p := &PodTopologySpread{sharedLister: snapshot}
|
||||
state := framework.NewCycleState()
|
||||
preFilterStatus := p.PreFilter(context.Background(), state, tt.pod)
|
||||
|
@ -24,7 +24,7 @@ import (
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
|
||||
nodeinfosnapshot "k8s.io/kubernetes/pkg/scheduler/nodeinfo/snapshot"
|
||||
"k8s.io/kubernetes/pkg/scheduler/internal/cache"
|
||||
st "k8s.io/kubernetes/pkg/scheduler/testing"
|
||||
)
|
||||
|
||||
@ -435,7 +435,7 @@ func TestPodTopologySpreadScore(t *testing.T) {
|
||||
allNodes := append([]*v1.Node{}, tt.nodes...)
|
||||
allNodes = append(allNodes, tt.failedNodes...)
|
||||
state := framework.NewCycleState()
|
||||
snapshot := nodeinfosnapshot.NewSnapshot(nodeinfosnapshot.CreateNodeInfoMap(tt.existingPods, allNodes))
|
||||
snapshot := cache.NewSnapshot(tt.existingPods, allNodes)
|
||||
p := &PodTopologySpread{sharedLister: snapshot}
|
||||
|
||||
status := p.PostFilter(context.Background(), state, tt.pod, tt.nodes, nil)
|
||||
@ -505,7 +505,7 @@ func BenchmarkTestPodTopologySpreadScore(b *testing.B) {
|
||||
b.Run(tt.name, func(b *testing.B) {
|
||||
existingPods, allNodes, filteredNodes := st.MakeNodesAndPodsForEvenPodsSpread(tt.pod.Labels, tt.existingPodsNum, tt.allNodesNum, tt.filteredNodesNum)
|
||||
state := framework.NewCycleState()
|
||||
snapshot := nodeinfosnapshot.NewSnapshot(nodeinfosnapshot.CreateNodeInfoMap(existingPods, allNodes))
|
||||
snapshot := cache.NewSnapshot(existingPods, allNodes)
|
||||
p := &PodTopologySpread{sharedLister: snapshot}
|
||||
|
||||
status := p.PostFilter(context.Background(), state, tt.pod, filteredNodes, nil)
|
||||
@ -564,7 +564,7 @@ func BenchmarkTestDefaultEvenPodsSpreadPriority(b *testing.B) {
|
||||
SpreadConstraint(1, v1.LabelZoneFailureDomain, softSpread, st.MakeLabelSelector().Exists("foo").Obj()).Obj()
|
||||
existingPods, allNodes, filteredNodes := st.MakeNodesAndPodsForEvenPodsSpread(pod.Labels, tt.existingPodsNum, tt.allNodesNum, tt.allNodesNum)
|
||||
state := framework.NewCycleState()
|
||||
snapshot := nodeinfosnapshot.NewSnapshot(nodeinfosnapshot.CreateNodeInfoMap(existingPods, allNodes))
|
||||
snapshot := cache.NewSnapshot(existingPods, allNodes)
|
||||
p := &PodTopologySpread{sharedLister: snapshot}
|
||||
|
||||
status := p.PostFilter(context.Background(), state, pod, filteredNodes, nil)
|
||||
|
@ -23,9 +23,9 @@ go_test(
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//pkg/scheduler/framework/v1alpha1:go_default_library",
|
||||
"//pkg/scheduler/internal/cache:go_default_library",
|
||||
"//pkg/scheduler/listers/fake:go_default_library",
|
||||
"//pkg/scheduler/nodeinfo:go_default_library",
|
||||
"//pkg/scheduler/nodeinfo/snapshot:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
],
|
||||
|
@ -25,9 +25,9 @@ import (
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
|
||||
"k8s.io/kubernetes/pkg/scheduler/internal/cache"
|
||||
fakelisters "k8s.io/kubernetes/pkg/scheduler/listers/fake"
|
||||
"k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||
nodeinfosnapshot "k8s.io/kubernetes/pkg/scheduler/nodeinfo/snapshot"
|
||||
)
|
||||
|
||||
func TestServiceAffinity(t *testing.T) {
|
||||
@ -160,7 +160,7 @@ func TestServiceAffinity(t *testing.T) {
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
nodes := []*v1.Node{&node1, &node2, &node3, &node4, &node5}
|
||||
snapshot := nodeinfosnapshot.NewSnapshot(nodeinfosnapshot.CreateNodeInfoMap(test.pods, nodes))
|
||||
snapshot := cache.NewSnapshot(test.pods, nodes)
|
||||
|
||||
p := &ServiceAffinity{
|
||||
sharedLister: snapshot,
|
||||
@ -383,7 +383,7 @@ func TestServiceAffinityScore(t *testing.T) {
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
nodes := makeLabeledNodeList(test.nodes)
|
||||
snapshot := nodeinfosnapshot.NewSnapshot(nodeinfosnapshot.CreateNodeInfoMap(test.pods, nodes))
|
||||
snapshot := cache.NewSnapshot(test.pods, nodes)
|
||||
serviceLister := fakelisters.ServiceLister(test.services)
|
||||
|
||||
p := &ServiceAffinity{
|
||||
@ -494,8 +494,8 @@ func TestPreFilterStateAddRemovePod(t *testing.T) {
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
// getMeta creates predicate meta data given the list of pods.
|
||||
getState := func(pods []*v1.Pod) (*ServiceAffinity, *framework.CycleState, *preFilterState, *nodeinfosnapshot.Snapshot) {
|
||||
snapshot := nodeinfosnapshot.NewSnapshot(nodeinfosnapshot.CreateNodeInfoMap(pods, test.nodes))
|
||||
getState := func(pods []*v1.Pod) (*ServiceAffinity, *framework.CycleState, *preFilterState, *cache.Snapshot) {
|
||||
snapshot := cache.NewSnapshot(pods, test.nodes)
|
||||
|
||||
p := &ServiceAffinity{
|
||||
sharedLister: snapshot,
|
||||
@ -591,7 +591,7 @@ func sortNodeScoreList(out framework.NodeScoreList) {
|
||||
})
|
||||
}
|
||||
|
||||
func mustGetNodeInfo(t *testing.T, snapshot *nodeinfosnapshot.Snapshot, name string) *nodeinfo.NodeInfo {
|
||||
func mustGetNodeInfo(t *testing.T, snapshot *cache.Snapshot, name string) *nodeinfo.NodeInfo {
|
||||
t.Helper()
|
||||
nodeInfo, err := snapshot.NodeInfos().Get(name)
|
||||
if err != nil {
|
||||
|
@ -35,8 +35,8 @@ go_test(
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//pkg/scheduler/framework/v1alpha1:go_default_library",
|
||||
"//pkg/scheduler/internal/cache:go_default_library",
|
||||
"//pkg/scheduler/nodeinfo:go_default_library",
|
||||
"//pkg/scheduler/nodeinfo/snapshot:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
],
|
||||
|
@ -24,8 +24,8 @@ import (
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
|
||||
"k8s.io/kubernetes/pkg/scheduler/internal/cache"
|
||||
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||
nodeinfosnapshot "k8s.io/kubernetes/pkg/scheduler/nodeinfo/snapshot"
|
||||
)
|
||||
|
||||
func nodeWithTaints(nodeName string, taints []v1.Taint) *v1.Node {
|
||||
@ -229,7 +229,7 @@ func TestTaintTolerationScore(t *testing.T) {
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
state := framework.NewCycleState()
|
||||
snapshot := nodeinfosnapshot.NewSnapshot(nodeinfosnapshot.CreateNodeInfoMap(nil, test.nodes))
|
||||
snapshot := cache.NewSnapshot(nil, test.nodes)
|
||||
fh, _ := framework.NewFramework(nil, nil, nil, framework.WithSnapshotSharedLister(snapshot))
|
||||
|
||||
p, _ := New(nil, fh)
|
||||
|
5
pkg/scheduler/internal/cache/BUILD
vendored
5
pkg/scheduler/internal/cache/BUILD
vendored
@ -6,6 +6,7 @@ go_library(
|
||||
"cache.go",
|
||||
"interface.go",
|
||||
"node_tree.go",
|
||||
"snapshot.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/pkg/scheduler/internal/cache",
|
||||
visibility = ["//pkg/scheduler:__subpackages__"],
|
||||
@ -14,7 +15,6 @@ go_library(
|
||||
"//pkg/scheduler/listers:go_default_library",
|
||||
"//pkg/scheduler/metrics:go_default_library",
|
||||
"//pkg/scheduler/nodeinfo:go_default_library",
|
||||
"//pkg/scheduler/nodeinfo/snapshot:go_default_library",
|
||||
"//pkg/util/node:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
@ -30,17 +30,18 @@ go_test(
|
||||
srcs = [
|
||||
"cache_test.go",
|
||||
"node_tree_test.go",
|
||||
"snapshot_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//pkg/features:go_default_library",
|
||||
"//pkg/scheduler/nodeinfo:go_default_library",
|
||||
"//pkg/scheduler/nodeinfo/snapshot:go_default_library",
|
||||
"//pkg/scheduler/util:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
"//staging/src/k8s.io/component-base/featuregate/testing:go_default_library",
|
||||
],
|
||||
|
51
pkg/scheduler/internal/cache/cache.go
vendored
51
pkg/scheduler/internal/cache/cache.go
vendored
@ -31,7 +31,6 @@ import (
|
||||
schedulerlisters "k8s.io/kubernetes/pkg/scheduler/listers"
|
||||
"k8s.io/kubernetes/pkg/scheduler/metrics"
|
||||
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||
nodeinfosnapshot "k8s.io/kubernetes/pkg/scheduler/nodeinfo/snapshot"
|
||||
)
|
||||
|
||||
var (
|
||||
@ -174,10 +173,10 @@ func (cache *schedulerCache) removeNodeInfoFromList(name string) {
|
||||
}
|
||||
|
||||
// Snapshot takes a snapshot of the current scheduler cache. This is used for
|
||||
// debugging purposes only and shouldn't be confused with UpdateNodeInfoSnapshot
|
||||
// debugging purposes only and shouldn't be confused with UpdateSnapshot
|
||||
// function.
|
||||
// This method is expensive, and should be only used in non-critical path.
|
||||
func (cache *schedulerCache) Snapshot() *Snapshot {
|
||||
func (cache *schedulerCache) Dump() *Dump {
|
||||
cache.mu.RLock()
|
||||
defer cache.mu.RUnlock()
|
||||
|
||||
@ -191,23 +190,23 @@ func (cache *schedulerCache) Snapshot() *Snapshot {
|
||||
assumedPods[k] = v
|
||||
}
|
||||
|
||||
return &Snapshot{
|
||||
return &Dump{
|
||||
Nodes: nodes,
|
||||
AssumedPods: assumedPods,
|
||||
}
|
||||
}
|
||||
|
||||
// UpdateNodeInfoSnapshot takes a snapshot of cached NodeInfo map. This is called at
|
||||
// UpdateSnapshot takes a snapshot of cached NodeInfo map. This is called at
|
||||
// beginning of every scheduling cycle.
|
||||
// This function tracks generation number of NodeInfo and updates only the
|
||||
// entries of an existing snapshot that have changed after the snapshot was taken.
|
||||
func (cache *schedulerCache) UpdateNodeInfoSnapshot(nodeSnapshot *nodeinfosnapshot.Snapshot) error {
|
||||
func (cache *schedulerCache) UpdateSnapshot(nodeSnapshot *Snapshot) error {
|
||||
cache.mu.Lock()
|
||||
defer cache.mu.Unlock()
|
||||
balancedVolumesEnabled := utilfeature.DefaultFeatureGate.Enabled(features.BalanceAttachedNodeVolumes)
|
||||
|
||||
// Get the last generation of the snapshot.
|
||||
snapshotGeneration := nodeSnapshot.Generation
|
||||
snapshotGeneration := nodeSnapshot.generation
|
||||
|
||||
// NodeInfoList and HavePodsWithAffinityNodeInfoList must be re-created if a node was added
|
||||
// or removed from the cache.
|
||||
@ -229,11 +228,11 @@ func (cache *schedulerCache) UpdateNodeInfoSnapshot(nodeSnapshot *nodeinfosnapsh
|
||||
node.info.TransientInfo.ResetTransientSchedulerInfo()
|
||||
}
|
||||
if np := node.info.Node(); np != nil {
|
||||
existing, ok := nodeSnapshot.NodeInfoMap[np.Name]
|
||||
existing, ok := nodeSnapshot.nodeInfoMap[np.Name]
|
||||
if !ok {
|
||||
updateAllLists = true
|
||||
existing = &schedulernodeinfo.NodeInfo{}
|
||||
nodeSnapshot.NodeInfoMap[np.Name] = existing
|
||||
nodeSnapshot.nodeInfoMap[np.Name] = existing
|
||||
}
|
||||
clone := node.info.Clone()
|
||||
// We track nodes that have pods with affinity, here we check if this node changed its
|
||||
@ -249,10 +248,10 @@ func (cache *schedulerCache) UpdateNodeInfoSnapshot(nodeSnapshot *nodeinfosnapsh
|
||||
}
|
||||
// Update the snapshot generation with the latest NodeInfo generation.
|
||||
if cache.headNode != nil {
|
||||
nodeSnapshot.Generation = cache.headNode.info.GetGeneration()
|
||||
nodeSnapshot.generation = cache.headNode.info.GetGeneration()
|
||||
}
|
||||
|
||||
if len(nodeSnapshot.NodeInfoMap) > len(cache.nodes) {
|
||||
if len(nodeSnapshot.nodeInfoMap) > len(cache.nodes) {
|
||||
cache.removeDeletedNodesFromSnapshot(nodeSnapshot)
|
||||
updateAllLists = true
|
||||
}
|
||||
@ -261,12 +260,12 @@ func (cache *schedulerCache) UpdateNodeInfoSnapshot(nodeSnapshot *nodeinfosnapsh
|
||||
cache.updateNodeInfoSnapshotList(nodeSnapshot, updateAllLists)
|
||||
}
|
||||
|
||||
if len(nodeSnapshot.NodeInfoList) != cache.nodeTree.numNodes {
|
||||
if len(nodeSnapshot.nodeInfoList) != cache.nodeTree.numNodes {
|
||||
errMsg := fmt.Sprintf("snapshot state is not consistent, length of NodeInfoList=%v not equal to length of nodes in tree=%v "+
|
||||
", length of NodeInfoMap=%v, length of nodes in cache=%v"+
|
||||
", trying to recover",
|
||||
len(nodeSnapshot.NodeInfoList), cache.nodeTree.numNodes,
|
||||
len(nodeSnapshot.NodeInfoMap), len(cache.nodes))
|
||||
len(nodeSnapshot.nodeInfoList), cache.nodeTree.numNodes,
|
||||
len(nodeSnapshot.nodeInfoMap), len(cache.nodes))
|
||||
klog.Error(errMsg)
|
||||
// We will try to recover by re-creating the lists for the next scheduling cycle, but still return an
|
||||
// error to surface the problem, the error will likely cause a failure to the current scheduling cycle.
|
||||
@ -277,40 +276,40 @@ func (cache *schedulerCache) UpdateNodeInfoSnapshot(nodeSnapshot *nodeinfosnapsh
|
||||
return nil
|
||||
}
|
||||
|
||||
func (cache *schedulerCache) updateNodeInfoSnapshotList(nodeSnapshot *nodeinfosnapshot.Snapshot, updateAll bool) {
|
||||
nodeSnapshot.HavePodsWithAffinityNodeInfoList = make([]*schedulernodeinfo.NodeInfo, 0, cache.nodeTree.numNodes)
|
||||
func (cache *schedulerCache) updateNodeInfoSnapshotList(snapshot *Snapshot, updateAll bool) {
|
||||
snapshot.havePodsWithAffinityNodeInfoList = make([]*schedulernodeinfo.NodeInfo, 0, cache.nodeTree.numNodes)
|
||||
if updateAll {
|
||||
// Take a snapshot of the nodes order in the tree
|
||||
nodeSnapshot.NodeInfoList = make([]*schedulernodeinfo.NodeInfo, 0, cache.nodeTree.numNodes)
|
||||
snapshot.nodeInfoList = make([]*schedulernodeinfo.NodeInfo, 0, cache.nodeTree.numNodes)
|
||||
for i := 0; i < cache.nodeTree.numNodes; i++ {
|
||||
nodeName := cache.nodeTree.next()
|
||||
if n := nodeSnapshot.NodeInfoMap[nodeName]; n != nil {
|
||||
nodeSnapshot.NodeInfoList = append(nodeSnapshot.NodeInfoList, n)
|
||||
if n := snapshot.nodeInfoMap[nodeName]; n != nil {
|
||||
snapshot.nodeInfoList = append(snapshot.nodeInfoList, n)
|
||||
if len(n.PodsWithAffinity()) > 0 {
|
||||
nodeSnapshot.HavePodsWithAffinityNodeInfoList = append(nodeSnapshot.HavePodsWithAffinityNodeInfoList, n)
|
||||
snapshot.havePodsWithAffinityNodeInfoList = append(snapshot.havePodsWithAffinityNodeInfoList, n)
|
||||
}
|
||||
} else {
|
||||
klog.Errorf("node %q exist in nodeTree but not in NodeInfoMap, this should not happen.", nodeName)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
for _, n := range nodeSnapshot.NodeInfoList {
|
||||
for _, n := range snapshot.nodeInfoList {
|
||||
if len(n.PodsWithAffinity()) > 0 {
|
||||
nodeSnapshot.HavePodsWithAffinityNodeInfoList = append(nodeSnapshot.HavePodsWithAffinityNodeInfoList, n)
|
||||
snapshot.havePodsWithAffinityNodeInfoList = append(snapshot.havePodsWithAffinityNodeInfoList, n)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// If certain nodes were deleted after the last snapshot was taken, we should remove them from the snapshot.
|
||||
func (cache *schedulerCache) removeDeletedNodesFromSnapshot(nodeSnapshot *nodeinfosnapshot.Snapshot) {
|
||||
toDelete := len(nodeSnapshot.NodeInfoMap) - len(cache.nodes)
|
||||
for name := range nodeSnapshot.NodeInfoMap {
|
||||
func (cache *schedulerCache) removeDeletedNodesFromSnapshot(snapshot *Snapshot) {
|
||||
toDelete := len(snapshot.nodeInfoMap) - len(cache.nodes)
|
||||
for name := range snapshot.nodeInfoMap {
|
||||
if toDelete <= 0 {
|
||||
break
|
||||
}
|
||||
if _, ok := cache.nodes[name]; !ok {
|
||||
delete(nodeSnapshot.NodeInfoMap, name)
|
||||
delete(snapshot.nodeInfoMap, name)
|
||||
toDelete--
|
||||
}
|
||||
}
|
||||
|
50
pkg/scheduler/internal/cache/cache_test.go
vendored
50
pkg/scheduler/internal/cache/cache_test.go
vendored
@ -32,7 +32,6 @@ import (
|
||||
featuregatetesting "k8s.io/component-base/featuregate/testing"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||
nodeinfosnapshot "k8s.io/kubernetes/pkg/scheduler/nodeinfo/snapshot"
|
||||
schedutil "k8s.io/kubernetes/pkg/scheduler/util"
|
||||
)
|
||||
|
||||
@ -398,7 +397,7 @@ func TestSnapshot(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
snapshot := cache.Snapshot()
|
||||
snapshot := cache.Dump()
|
||||
if len(snapshot.Nodes) != len(cache.nodes) {
|
||||
t.Errorf("Unequal number of nodes in the cache and its snapshot. expected: %v, got: %v", len(cache.nodes), len(snapshot.Nodes))
|
||||
}
|
||||
@ -1121,12 +1120,12 @@ func TestNodeOperators(t *testing.T) {
|
||||
}
|
||||
|
||||
// Step 2: dump cached nodes successfully.
|
||||
cachedNodes := nodeinfosnapshot.NewEmptySnapshot()
|
||||
if err := cache.UpdateNodeInfoSnapshot(cachedNodes); err != nil {
|
||||
cachedNodes := NewEmptySnapshot()
|
||||
if err := cache.UpdateSnapshot(cachedNodes); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
newNode, found := cachedNodes.NodeInfoMap[node.Name]
|
||||
if !found || len(cachedNodes.NodeInfoMap) != 1 {
|
||||
newNode, found := cachedNodes.nodeInfoMap[node.Name]
|
||||
if !found || len(cachedNodes.nodeInfoMap) != 1 {
|
||||
t.Errorf("failed to dump cached nodes:\n got: %v \nexpected: %v", cachedNodes, cache.nodes)
|
||||
}
|
||||
expected.SetGeneration(newNode.GetGeneration())
|
||||
@ -1192,8 +1191,7 @@ func TestNodeOperators(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
// TestSchedulerCache_UpdateNodeInfoSnapshot tests UpdateNodeInfoSnapshot function of cache.
|
||||
func TestSchedulerCache_UpdateNodeInfoSnapshot(t *testing.T) {
|
||||
func TestSchedulerCache_UpdateSnapshot(t *testing.T) {
|
||||
// Create a few nodes to be used in tests.
|
||||
nodes := []*v1.Node{}
|
||||
for i := 0; i < 10; i++ {
|
||||
@ -1266,7 +1264,7 @@ func TestSchedulerCache_UpdateNodeInfoSnapshot(t *testing.T) {
|
||||
}
|
||||
|
||||
var cache *schedulerCache
|
||||
var snapshot *nodeinfosnapshot.Snapshot
|
||||
var snapshot *Snapshot
|
||||
type operation = func()
|
||||
|
||||
addNode := func(i int) operation {
|
||||
@ -1320,7 +1318,7 @@ func TestSchedulerCache_UpdateNodeInfoSnapshot(t *testing.T) {
|
||||
}
|
||||
updateSnapshot := func() operation {
|
||||
return func() {
|
||||
cache.UpdateNodeInfoSnapshot(snapshot)
|
||||
cache.UpdateSnapshot(snapshot)
|
||||
if err := compareCacheWithNodeInfoSnapshot(cache, snapshot); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
@ -1470,7 +1468,7 @@ func TestSchedulerCache_UpdateNodeInfoSnapshot(t *testing.T) {
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
cache = newSchedulerCache(time.Second, time.Second, nil)
|
||||
snapshot = nodeinfosnapshot.NewEmptySnapshot()
|
||||
snapshot = NewEmptySnapshot()
|
||||
|
||||
for _, op := range test.operations {
|
||||
op()
|
||||
@ -1493,12 +1491,12 @@ func TestSchedulerCache_UpdateNodeInfoSnapshot(t *testing.T) {
|
||||
}
|
||||
|
||||
// Check number of nodes with pods with affinity
|
||||
if len(snapshot.HavePodsWithAffinityNodeInfoList) != test.expectedHavePodsWithAffinity {
|
||||
t.Errorf("unexpected number of HavePodsWithAffinity nodes. Expected: %v, got: %v", test.expectedHavePodsWithAffinity, len(snapshot.HavePodsWithAffinityNodeInfoList))
|
||||
if len(snapshot.havePodsWithAffinityNodeInfoList) != test.expectedHavePodsWithAffinity {
|
||||
t.Errorf("unexpected number of HavePodsWithAffinity nodes. Expected: %v, got: %v", test.expectedHavePodsWithAffinity, len(snapshot.havePodsWithAffinityNodeInfoList))
|
||||
}
|
||||
|
||||
// Always update the snapshot at the end of operations and compare it.
|
||||
if err := cache.UpdateNodeInfoSnapshot(snapshot); err != nil {
|
||||
if err := cache.UpdateSnapshot(snapshot); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
if err := compareCacheWithNodeInfoSnapshot(cache, snapshot); err != nil {
|
||||
@ -1508,27 +1506,27 @@ func TestSchedulerCache_UpdateNodeInfoSnapshot(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func compareCacheWithNodeInfoSnapshot(cache *schedulerCache, snapshot *nodeinfosnapshot.Snapshot) error {
|
||||
func compareCacheWithNodeInfoSnapshot(cache *schedulerCache, snapshot *Snapshot) error {
|
||||
// Compare the map.
|
||||
if len(snapshot.NodeInfoMap) != len(cache.nodes) {
|
||||
return fmt.Errorf("unexpected number of nodes in the snapshot. Expected: %v, got: %v", len(cache.nodes), len(snapshot.NodeInfoMap))
|
||||
if len(snapshot.nodeInfoMap) != len(cache.nodes) {
|
||||
return fmt.Errorf("unexpected number of nodes in the snapshot. Expected: %v, got: %v", len(cache.nodes), len(snapshot.nodeInfoMap))
|
||||
}
|
||||
for name, ni := range cache.nodes {
|
||||
if !reflect.DeepEqual(snapshot.NodeInfoMap[name], ni.info) {
|
||||
return fmt.Errorf("unexpected node info for node %q. Expected: %v, got: %v", name, ni.info, snapshot.NodeInfoMap[name])
|
||||
if !reflect.DeepEqual(snapshot.nodeInfoMap[name], ni.info) {
|
||||
return fmt.Errorf("unexpected node info for node %q. Expected: %v, got: %v", name, ni.info, snapshot.nodeInfoMap[name])
|
||||
}
|
||||
}
|
||||
|
||||
// Compare the lists.
|
||||
if len(snapshot.NodeInfoList) != len(cache.nodes) {
|
||||
return fmt.Errorf("unexpected number of nodes in NodeInfoList. Expected: %v, got: %v", len(cache.nodes), len(snapshot.NodeInfoList))
|
||||
if len(snapshot.nodeInfoList) != len(cache.nodes) {
|
||||
return fmt.Errorf("unexpected number of nodes in NodeInfoList. Expected: %v, got: %v", len(cache.nodes), len(snapshot.nodeInfoList))
|
||||
}
|
||||
|
||||
expectedNodeInfoList := make([]*schedulernodeinfo.NodeInfo, 0, cache.nodeTree.numNodes)
|
||||
expectedHavePodsWithAffinityNodeInfoList := make([]*schedulernodeinfo.NodeInfo, 0, cache.nodeTree.numNodes)
|
||||
for i := 0; i < cache.nodeTree.numNodes; i++ {
|
||||
nodeName := cache.nodeTree.next()
|
||||
if n := snapshot.NodeInfoMap[nodeName]; n != nil {
|
||||
if n := snapshot.nodeInfoMap[nodeName]; n != nil {
|
||||
expectedNodeInfoList = append(expectedNodeInfoList, n)
|
||||
if len(n.PodsWithAffinity()) > 0 {
|
||||
expectedHavePodsWithAffinityNodeInfoList = append(expectedHavePodsWithAffinityNodeInfoList, n)
|
||||
@ -1539,14 +1537,14 @@ func compareCacheWithNodeInfoSnapshot(cache *schedulerCache, snapshot *nodeinfos
|
||||
}
|
||||
|
||||
for i, expected := range expectedNodeInfoList {
|
||||
got := snapshot.NodeInfoList[i]
|
||||
got := snapshot.nodeInfoList[i]
|
||||
if expected != got {
|
||||
return fmt.Errorf("unexpected NodeInfo pointer in NodeInfoList. Expected: %p, got: %p", expected, got)
|
||||
}
|
||||
}
|
||||
|
||||
for i, expected := range expectedHavePodsWithAffinityNodeInfoList {
|
||||
got := snapshot.HavePodsWithAffinityNodeInfoList[i]
|
||||
got := snapshot.havePodsWithAffinityNodeInfoList[i]
|
||||
if expected != got {
|
||||
return fmt.Errorf("unexpected NodeInfo pointer in HavePodsWithAffinityNodeInfoList. Expected: %p, got: %p", expected, got)
|
||||
}
|
||||
@ -1561,8 +1559,8 @@ func BenchmarkUpdate1kNodes30kPods(b *testing.B) {
|
||||
cache := setupCacheOf1kNodes30kPods(b)
|
||||
b.ResetTimer()
|
||||
for n := 0; n < b.N; n++ {
|
||||
cachedNodes := nodeinfosnapshot.NewEmptySnapshot()
|
||||
cache.UpdateNodeInfoSnapshot(cachedNodes)
|
||||
cachedNodes := NewEmptySnapshot()
|
||||
cache.UpdateSnapshot(cachedNodes)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -52,15 +52,15 @@ func (c *CacheComparer) Compare() error {
|
||||
return err
|
||||
}
|
||||
|
||||
snapshot := c.Cache.Snapshot()
|
||||
dump := c.Cache.Dump()
|
||||
|
||||
pendingPods := c.PodQueue.PendingPods()
|
||||
|
||||
if missed, redundant := c.CompareNodes(nodes, snapshot.Nodes); len(missed)+len(redundant) != 0 {
|
||||
if missed, redundant := c.CompareNodes(nodes, dump.Nodes); len(missed)+len(redundant) != 0 {
|
||||
klog.Warningf("cache mismatch: missed nodes: %s; redundant nodes: %s", missed, redundant)
|
||||
}
|
||||
|
||||
if missed, redundant := c.ComparePods(pods, pendingPods, snapshot.Nodes); len(missed)+len(redundant) != 0 {
|
||||
if missed, redundant := c.ComparePods(pods, pendingPods, dump.Nodes); len(missed)+len(redundant) != 0 {
|
||||
klog.Warningf("cache mismatch: missed pods: %s; redundant pods: %s", missed, redundant)
|
||||
}
|
||||
|
||||
|
@ -43,9 +43,9 @@ func (d *CacheDumper) DumpAll() {
|
||||
|
||||
// dumpNodes writes NodeInfo to the scheduler logs.
|
||||
func (d *CacheDumper) dumpNodes() {
|
||||
snapshot := d.cache.Snapshot()
|
||||
dump := d.cache.Dump()
|
||||
klog.Info("Dump of cached NodeInfo")
|
||||
for _, nodeInfo := range snapshot.Nodes {
|
||||
for _, nodeInfo := range dump.Nodes {
|
||||
klog.Info(d.printNodeInfo(nodeInfo))
|
||||
}
|
||||
}
|
||||
|
1
pkg/scheduler/internal/cache/fake/BUILD
vendored
1
pkg/scheduler/internal/cache/fake/BUILD
vendored
@ -8,7 +8,6 @@ go_library(
|
||||
deps = [
|
||||
"//pkg/scheduler/internal/cache:go_default_library",
|
||||
"//pkg/scheduler/listers:go_default_library",
|
||||
"//pkg/scheduler/nodeinfo/snapshot:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
],
|
||||
|
11
pkg/scheduler/internal/cache/fake/fake_cache.go
vendored
11
pkg/scheduler/internal/cache/fake/fake_cache.go
vendored
@ -21,7 +21,6 @@ import (
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
internalcache "k8s.io/kubernetes/pkg/scheduler/internal/cache"
|
||||
schedulerlisters "k8s.io/kubernetes/pkg/scheduler/listers"
|
||||
nodeinfosnapshot "k8s.io/kubernetes/pkg/scheduler/nodeinfo/snapshot"
|
||||
)
|
||||
|
||||
// Cache is used for testing
|
||||
@ -75,8 +74,8 @@ func (c *Cache) UpdateNode(oldNode, newNode *v1.Node) error { return nil }
|
||||
// RemoveNode is a fake method for testing.
|
||||
func (c *Cache) RemoveNode(node *v1.Node) error { return nil }
|
||||
|
||||
// UpdateNodeInfoSnapshot is a fake method for testing.
|
||||
func (c *Cache) UpdateNodeInfoSnapshot(nodeSnapshot *nodeinfosnapshot.Snapshot) error {
|
||||
// UpdateSnapshot is a fake method for testing.
|
||||
func (c *Cache) UpdateSnapshot(snapshot *internalcache.Snapshot) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -88,9 +87,9 @@ func (c *Cache) FilteredList(filter schedulerlisters.PodFilter, selector labels.
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Snapshot is a fake method for testing
|
||||
func (c *Cache) Snapshot() *internalcache.Snapshot {
|
||||
return &internalcache.Snapshot{}
|
||||
// Dump is a fake method for testing.
|
||||
func (c *Cache) Dump() *internalcache.Dump {
|
||||
return &internalcache.Dump{}
|
||||
}
|
||||
|
||||
// GetNodeInfo is a fake method for testing.
|
||||
|
13
pkg/scheduler/internal/cache/interface.go
vendored
13
pkg/scheduler/internal/cache/interface.go
vendored
@ -20,7 +20,6 @@ import (
|
||||
v1 "k8s.io/api/core/v1"
|
||||
schedulerlisters "k8s.io/kubernetes/pkg/scheduler/listers"
|
||||
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||
nodeinfosnapshot "k8s.io/kubernetes/pkg/scheduler/nodeinfo/snapshot"
|
||||
)
|
||||
|
||||
// Cache collects pods' information and provides node-level aggregated information.
|
||||
@ -97,17 +96,17 @@ type Cache interface {
|
||||
// RemoveNode removes overall information about node.
|
||||
RemoveNode(node *v1.Node) error
|
||||
|
||||
// UpdateNodeInfoSnapshot updates the passed infoSnapshot to the current contents of Cache.
|
||||
// UpdateSnapshot updates the passed infoSnapshot to the current contents of Cache.
|
||||
// The node info contains aggregated information of pods scheduled (including assumed to be)
|
||||
// on this node.
|
||||
UpdateNodeInfoSnapshot(nodeSnapshot *nodeinfosnapshot.Snapshot) error
|
||||
UpdateSnapshot(nodeSnapshot *Snapshot) error
|
||||
|
||||
// Snapshot takes a snapshot on current cache
|
||||
Snapshot() *Snapshot
|
||||
// Dump produces a dump of the current cache.
|
||||
Dump() *Dump
|
||||
}
|
||||
|
||||
// Snapshot is a snapshot of cache state
|
||||
type Snapshot struct {
|
||||
// Dump is a dump of the cache state.
|
||||
type Dump struct {
|
||||
AssumedPods map[string]bool
|
||||
Nodes map[string]*schedulernodeinfo.NodeInfo
|
||||
}
|
||||
|
@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package snapshot
|
||||
package cache
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
@ -29,13 +29,13 @@ import (
|
||||
// Snapshot is a snapshot of cache NodeInfo and NodeTree order. The scheduler takes a
|
||||
// snapshot at the beginning of each scheduling cycle and uses it for its operations in that cycle.
|
||||
type Snapshot struct {
|
||||
// NodeInfoMap a map of node name to a snapshot of its NodeInfo.
|
||||
NodeInfoMap map[string]*schedulernodeinfo.NodeInfo
|
||||
// NodeInfoList is the list of nodes as ordered in the cache's nodeTree.
|
||||
NodeInfoList []*schedulernodeinfo.NodeInfo
|
||||
// HavePodsWithAffinityNodeInfoList is the list of nodes with at least one pod declaring affinity terms.
|
||||
HavePodsWithAffinityNodeInfoList []*schedulernodeinfo.NodeInfo
|
||||
Generation int64
|
||||
// nodeInfoMap a map of node name to a snapshot of its NodeInfo.
|
||||
nodeInfoMap map[string]*schedulernodeinfo.NodeInfo
|
||||
// nodeInfoList is the list of nodes as ordered in the cache's nodeTree.
|
||||
nodeInfoList []*schedulernodeinfo.NodeInfo
|
||||
// havePodsWithAffinityNodeInfoList is the list of nodes with at least one pod declaring affinity terms.
|
||||
havePodsWithAffinityNodeInfoList []*schedulernodeinfo.NodeInfo
|
||||
generation int64
|
||||
}
|
||||
|
||||
var _ schedulerlisters.SharedLister = &Snapshot{}
|
||||
@ -43,12 +43,13 @@ var _ schedulerlisters.SharedLister = &Snapshot{}
|
||||
// NewEmptySnapshot initializes a Snapshot struct and returns it.
|
||||
func NewEmptySnapshot() *Snapshot {
|
||||
return &Snapshot{
|
||||
NodeInfoMap: make(map[string]*schedulernodeinfo.NodeInfo),
|
||||
nodeInfoMap: make(map[string]*schedulernodeinfo.NodeInfo),
|
||||
}
|
||||
}
|
||||
|
||||
// NewSnapshot initializes a Snapshot struct and returns it.
|
||||
func NewSnapshot(nodeInfoMap map[string]*schedulernodeinfo.NodeInfo) *Snapshot {
|
||||
func NewSnapshot(pods []*v1.Pod, nodes []*v1.Node) *Snapshot {
|
||||
nodeInfoMap := createNodeInfoMap(pods, nodes)
|
||||
nodeInfoList := make([]*schedulernodeinfo.NodeInfo, 0, len(nodeInfoMap))
|
||||
havePodsWithAffinityNodeInfoList := make([]*schedulernodeinfo.NodeInfo, 0, len(nodeInfoMap))
|
||||
for _, v := range nodeInfoMap {
|
||||
@ -59,16 +60,17 @@ func NewSnapshot(nodeInfoMap map[string]*schedulernodeinfo.NodeInfo) *Snapshot {
|
||||
}
|
||||
|
||||
s := NewEmptySnapshot()
|
||||
s.NodeInfoMap = nodeInfoMap
|
||||
s.NodeInfoList = nodeInfoList
|
||||
s.HavePodsWithAffinityNodeInfoList = havePodsWithAffinityNodeInfoList
|
||||
s.nodeInfoMap = nodeInfoMap
|
||||
s.nodeInfoList = nodeInfoList
|
||||
s.havePodsWithAffinityNodeInfoList = havePodsWithAffinityNodeInfoList
|
||||
|
||||
return s
|
||||
}
|
||||
|
||||
// CreateNodeInfoMap obtains a list of pods and pivots that list into a map where the keys are node names
|
||||
// and the values are the aggregated information for that node.
|
||||
func CreateNodeInfoMap(pods []*v1.Pod, nodes []*v1.Node) map[string]*schedulernodeinfo.NodeInfo {
|
||||
// createNodeInfoMap obtains a list of pods and pivots that list into a map
|
||||
// where the keys are node names and the values are the aggregated information
|
||||
// for that node.
|
||||
func createNodeInfoMap(pods []*v1.Pod, nodes []*v1.Node) map[string]*schedulernodeinfo.NodeInfo {
|
||||
nodeNameToInfo := make(map[string]*schedulernodeinfo.NodeInfo)
|
||||
for _, pod := range pods {
|
||||
nodeName := pod.Spec.NodeName
|
||||
@ -124,42 +126,40 @@ func createImageExistenceMap(nodes []*v1.Node) map[string]sets.String {
|
||||
|
||||
// Pods returns a PodLister
|
||||
func (s *Snapshot) Pods() schedulerlisters.PodLister {
|
||||
return &podLister{snapshot: s}
|
||||
return podLister(s.nodeInfoList)
|
||||
}
|
||||
|
||||
// NodeInfos returns a NodeInfoLister.
|
||||
func (s *Snapshot) NodeInfos() schedulerlisters.NodeInfoLister {
|
||||
return &nodeInfoLister{snapshot: s}
|
||||
return s
|
||||
}
|
||||
|
||||
// NumNodes returns the number of nodes in the snapshot.
|
||||
func (s *Snapshot) NumNodes() int {
|
||||
return len(s.NodeInfoList)
|
||||
return len(s.nodeInfoList)
|
||||
}
|
||||
|
||||
type podLister struct {
|
||||
snapshot *Snapshot
|
||||
}
|
||||
type podLister []*schedulernodeinfo.NodeInfo
|
||||
|
||||
// List returns the list of pods in the snapshot.
|
||||
func (p *podLister) List(selector labels.Selector) ([]*v1.Pod, error) {
|
||||
alwaysTrue := func(p *v1.Pod) bool { return true }
|
||||
func (p podLister) List(selector labels.Selector) ([]*v1.Pod, error) {
|
||||
alwaysTrue := func(*v1.Pod) bool { return true }
|
||||
return p.FilteredList(alwaysTrue, selector)
|
||||
}
|
||||
|
||||
// FilteredList returns a filtered list of pods in the snapshot.
|
||||
func (p *podLister) FilteredList(podFilter schedulerlisters.PodFilter, selector labels.Selector) ([]*v1.Pod, error) {
|
||||
func (p podLister) FilteredList(filter schedulerlisters.PodFilter, selector labels.Selector) ([]*v1.Pod, error) {
|
||||
// podFilter is expected to return true for most or all of the pods. We
|
||||
// can avoid expensive array growth without wasting too much memory by
|
||||
// pre-allocating capacity.
|
||||
maxSize := 0
|
||||
for _, n := range p.snapshot.NodeInfoList {
|
||||
for _, n := range p {
|
||||
maxSize += len(n.Pods())
|
||||
}
|
||||
pods := make([]*v1.Pod, 0, maxSize)
|
||||
for _, n := range p.snapshot.NodeInfoList {
|
||||
for _, n := range p {
|
||||
for _, pod := range n.Pods() {
|
||||
if podFilter(pod) && selector.Matches(labels.Set(pod.Labels)) {
|
||||
if filter(pod) && selector.Matches(labels.Set(pod.Labels)) {
|
||||
pods = append(pods, pod)
|
||||
}
|
||||
}
|
||||
@ -167,23 +167,19 @@ func (p *podLister) FilteredList(podFilter schedulerlisters.PodFilter, selector
|
||||
return pods, nil
|
||||
}
|
||||
|
||||
type nodeInfoLister struct {
|
||||
snapshot *Snapshot
|
||||
}
|
||||
|
||||
// List returns the list of nodes in the snapshot.
|
||||
func (n *nodeInfoLister) List() ([]*schedulernodeinfo.NodeInfo, error) {
|
||||
return n.snapshot.NodeInfoList, nil
|
||||
func (s *Snapshot) List() ([]*schedulernodeinfo.NodeInfo, error) {
|
||||
return s.nodeInfoList, nil
|
||||
}
|
||||
|
||||
// HavePodsWithAffinityList returns the list of nodes with at least one pods with inter-pod affinity
|
||||
func (n *nodeInfoLister) HavePodsWithAffinityList() ([]*schedulernodeinfo.NodeInfo, error) {
|
||||
return n.snapshot.HavePodsWithAffinityNodeInfoList, nil
|
||||
func (s *Snapshot) HavePodsWithAffinityList() ([]*schedulernodeinfo.NodeInfo, error) {
|
||||
return s.havePodsWithAffinityNodeInfoList, nil
|
||||
}
|
||||
|
||||
// Returns the NodeInfo of the given node name.
|
||||
func (n *nodeInfoLister) Get(nodeName string) (*schedulernodeinfo.NodeInfo, error) {
|
||||
if v, ok := n.snapshot.NodeInfoMap[nodeName]; ok && v.Node() != nil {
|
||||
// Get returns the NodeInfo of the given node name.
|
||||
func (s *Snapshot) Get(nodeName string) (*schedulernodeinfo.NodeInfo, error) {
|
||||
if v, ok := s.nodeInfoMap[nodeName]; ok && v.Node() != nil {
|
||||
return v, nil
|
||||
}
|
||||
return nil, fmt.Errorf("nodeinfo not found for node name %q", nodeName)
|
@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package snapshot
|
||||
package cache
|
||||
|
||||
import (
|
||||
"reflect"
|
@ -36,8 +36,8 @@ go_test(
|
||||
"//pkg/scheduler/apis/config:go_default_library",
|
||||
"//pkg/scheduler/framework/plugins:go_default_library",
|
||||
"//pkg/scheduler/framework/v1alpha1:go_default_library",
|
||||
"//pkg/scheduler/internal/cache:go_default_library",
|
||||
"//pkg/scheduler/metrics:go_default_library",
|
||||
"//pkg/scheduler/nodeinfo/snapshot:go_default_library",
|
||||
"//pkg/scheduler/util:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
|
@ -36,8 +36,8 @@ import (
|
||||
schedulerapi "k8s.io/kubernetes/pkg/scheduler/apis/config"
|
||||
frameworkplugins "k8s.io/kubernetes/pkg/scheduler/framework/plugins"
|
||||
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
|
||||
"k8s.io/kubernetes/pkg/scheduler/internal/cache"
|
||||
"k8s.io/kubernetes/pkg/scheduler/metrics"
|
||||
nodeinfosnapshot "k8s.io/kubernetes/pkg/scheduler/nodeinfo/snapshot"
|
||||
"k8s.io/kubernetes/pkg/scheduler/util"
|
||||
)
|
||||
|
||||
@ -174,7 +174,7 @@ func newDefaultFramework() framework.Framework {
|
||||
pls,
|
||||
framework.WithClientSet(fakeClient),
|
||||
framework.WithInformerFactory(informers.NewSharedInformerFactory(fakeClient, 0)),
|
||||
framework.WithSnapshotSharedLister(nodeinfosnapshot.NewEmptySnapshot()),
|
||||
framework.WithSnapshotSharedLister(cache.NewEmptySnapshot()),
|
||||
)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
|
@ -46,10 +46,7 @@ filegroup(
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [
|
||||
":package-srcs",
|
||||
"//pkg/scheduler/nodeinfo/snapshot:all-srcs",
|
||||
],
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
|
@ -1,41 +0,0 @@
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["snapshot_test.go"],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//pkg/scheduler/nodeinfo:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["snapshot.go"],
|
||||
importpath = "k8s.io/kubernetes/pkg/scheduler/nodeinfo/snapshot",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//pkg/scheduler/listers:go_default_library",
|
||||
"//pkg/scheduler/nodeinfo:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
],
|
||||
)
|
@ -45,7 +45,6 @@ import (
|
||||
internalcache "k8s.io/kubernetes/pkg/scheduler/internal/cache"
|
||||
internalqueue "k8s.io/kubernetes/pkg/scheduler/internal/queue"
|
||||
"k8s.io/kubernetes/pkg/scheduler/metrics"
|
||||
nodeinfosnapshot "k8s.io/kubernetes/pkg/scheduler/nodeinfo/snapshot"
|
||||
"k8s.io/kubernetes/pkg/scheduler/volumebinder"
|
||||
)
|
||||
|
||||
@ -273,7 +272,7 @@ func New(client clientset.Interface,
|
||||
return nil, err
|
||||
}
|
||||
|
||||
snapshot := nodeinfosnapshot.NewEmptySnapshot()
|
||||
snapshot := internalcache.NewEmptySnapshot()
|
||||
|
||||
configurator := &Configurator{
|
||||
client: client,
|
||||
|
@ -55,7 +55,6 @@ import (
|
||||
internalcache "k8s.io/kubernetes/pkg/scheduler/internal/cache"
|
||||
fakecache "k8s.io/kubernetes/pkg/scheduler/internal/cache/fake"
|
||||
internalqueue "k8s.io/kubernetes/pkg/scheduler/internal/queue"
|
||||
nodeinfosnapshot "k8s.io/kubernetes/pkg/scheduler/nodeinfo/snapshot"
|
||||
st "k8s.io/kubernetes/pkg/scheduler/testing"
|
||||
"k8s.io/kubernetes/pkg/scheduler/volumebinder"
|
||||
)
|
||||
@ -686,7 +685,7 @@ func setupTestScheduler(queuedPodStore *clientcache.FIFO, scache internalcache.C
|
||||
algo := core.NewGenericScheduler(
|
||||
scache,
|
||||
internalqueue.NewSchedulingQueue(nil),
|
||||
nodeinfosnapshot.NewEmptySnapshot(),
|
||||
internalcache.NewEmptySnapshot(),
|
||||
fwk,
|
||||
[]algorithm.SchedulerExtender{},
|
||||
nil,
|
||||
@ -745,7 +744,7 @@ func setupTestSchedulerLongBindingWithRetry(queuedPodStore *clientcache.FIFO, sc
|
||||
algo := core.NewGenericScheduler(
|
||||
scache,
|
||||
queue,
|
||||
nodeinfosnapshot.NewEmptySnapshot(),
|
||||
internalcache.NewEmptySnapshot(),
|
||||
fwk,
|
||||
[]algorithm.SchedulerExtender{},
|
||||
nil,
|
||||
|
@ -774,9 +774,9 @@ func cleanupPodsInNamespace(cs clientset.Interface, t *testing.T, ns string) {
|
||||
|
||||
func waitForSchedulerCacheCleanup(sched *scheduler.Scheduler, t *testing.T) {
|
||||
schedulerCacheIsEmpty := func() (bool, error) {
|
||||
snapshot := sched.Cache().Snapshot()
|
||||
dump := sched.Cache().Dump()
|
||||
|
||||
return len(snapshot.Nodes) == 0 && len(snapshot.AssumedPods) == 0, nil
|
||||
return len(dump.Nodes) == 0 && len(dump.AssumedPods) == 0, nil
|
||||
}
|
||||
|
||||
if err := wait.Poll(time.Second, wait.ForeverTestTimeout, schedulerCacheIsEmpty); err != nil {
|
||||
|
Loading…
Reference in New Issue
Block a user