mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-23 19:56:01 +00:00
Change all scheduler's Node and Pod list/lookups to be from the snapshot rather than the cache.
This commit is contained in:
parent
4ee1e7510f
commit
8ec56aeafb
@ -28,6 +28,7 @@ go_library(
|
|||||||
"//pkg/scheduler/internal/queue:go_default_library",
|
"//pkg/scheduler/internal/queue:go_default_library",
|
||||||
"//pkg/scheduler/listers:go_default_library",
|
"//pkg/scheduler/listers:go_default_library",
|
||||||
"//pkg/scheduler/metrics:go_default_library",
|
"//pkg/scheduler/metrics:go_default_library",
|
||||||
|
"//pkg/scheduler/nodeinfo/snapshot:go_default_library",
|
||||||
"//pkg/scheduler/volumebinder:go_default_library",
|
"//pkg/scheduler/volumebinder:go_default_library",
|
||||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||||
"//staging/src/k8s.io/api/storage/v1:go_default_library",
|
"//staging/src/k8s.io/api/storage/v1:go_default_library",
|
||||||
|
@ -355,7 +355,7 @@ func TestPredicateMetadata_AddRemovePod(t *testing.T) {
|
|||||||
// getMeta creates predicate meta data given the list of pods.
|
// getMeta creates predicate meta data given the list of pods.
|
||||||
getMeta := func(lister fakelisters.PodLister) (*predicateMetadata, map[string]*schedulernodeinfo.NodeInfo) {
|
getMeta := func(lister fakelisters.PodLister) (*predicateMetadata, map[string]*schedulernodeinfo.NodeInfo) {
|
||||||
nodeInfoMap := schedulernodeinfo.CreateNodeNameToInfoMap(lister, test.nodes)
|
nodeInfoMap := schedulernodeinfo.CreateNodeNameToInfoMap(lister, test.nodes)
|
||||||
_, precompute := NewServiceAffinityPredicate(fakelisters.NodeLister(test.nodes), lister, fakelisters.ServiceLister(test.services), nil)
|
_, precompute := NewServiceAffinityPredicate(fakelisters.NewNodeInfoLister(test.nodes), lister, fakelisters.ServiceLister(test.services), nil)
|
||||||
RegisterPredicateMetadataProducer("ServiceAffinityMetaProducer", precompute)
|
RegisterPredicateMetadataProducer("ServiceAffinityMetaProducer", precompute)
|
||||||
meta := GetPredicateMetadata(test.pendingPod, nodeInfoMap)
|
meta := GetPredicateMetadata(test.pendingPod, nodeInfoMap)
|
||||||
return meta.(*predicateMetadata), nodeInfoMap
|
return meta.(*predicateMetadata), nodeInfoMap
|
||||||
|
@ -991,7 +991,7 @@ func (n *NodeLabelChecker) CheckNodeLabelPresence(pod *v1.Pod, meta PredicateMet
|
|||||||
|
|
||||||
// ServiceAffinity defines a struct used for creating service affinity predicates.
|
// ServiceAffinity defines a struct used for creating service affinity predicates.
|
||||||
type ServiceAffinity struct {
|
type ServiceAffinity struct {
|
||||||
nodeLister schedulerlisters.NodeLister
|
nodeInfoLister schedulerlisters.NodeInfoLister
|
||||||
podLister schedulerlisters.PodLister
|
podLister schedulerlisters.PodLister
|
||||||
serviceLister corelisters.ServiceLister
|
serviceLister corelisters.ServiceLister
|
||||||
labels []string
|
labels []string
|
||||||
@ -1024,9 +1024,9 @@ func (s *ServiceAffinity) serviceAffinityMetadataProducer(pm *predicateMetadata)
|
|||||||
}
|
}
|
||||||
|
|
||||||
// NewServiceAffinityPredicate creates a ServiceAffinity.
|
// NewServiceAffinityPredicate creates a ServiceAffinity.
|
||||||
func NewServiceAffinityPredicate(nodeLister schedulerlisters.NodeLister, podLister schedulerlisters.PodLister, serviceLister corelisters.ServiceLister, labels []string) (FitPredicate, predicateMetadataProducer) {
|
func NewServiceAffinityPredicate(nodeInfoLister schedulerlisters.NodeInfoLister, podLister schedulerlisters.PodLister, serviceLister corelisters.ServiceLister, labels []string) (FitPredicate, predicateMetadataProducer) {
|
||||||
affinity := &ServiceAffinity{
|
affinity := &ServiceAffinity{
|
||||||
nodeLister: nodeLister,
|
nodeInfoLister: nodeInfoLister,
|
||||||
podLister: podLister,
|
podLister: podLister,
|
||||||
serviceLister: serviceLister,
|
serviceLister: serviceLister,
|
||||||
labels: labels,
|
labels: labels,
|
||||||
@ -1084,11 +1084,11 @@ func (s *ServiceAffinity) checkServiceAffinity(pod *v1.Pod, meta PredicateMetada
|
|||||||
if len(s.labels) > len(affinityLabels) {
|
if len(s.labels) > len(affinityLabels) {
|
||||||
if len(services) > 0 {
|
if len(services) > 0 {
|
||||||
if len(filteredPods) > 0 {
|
if len(filteredPods) > 0 {
|
||||||
nodeWithAffinityLabels, err := s.nodeLister.GetNodeInfo(filteredPods[0].Spec.NodeName)
|
nodeWithAffinityLabels, err := s.nodeInfoLister.Get(filteredPods[0].Spec.NodeName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, nil, err
|
return false, nil, err
|
||||||
}
|
}
|
||||||
AddUnsetLabelsToMap(affinityLabels, s.labels, labels.Set(nodeWithAffinityLabels.Labels))
|
AddUnsetLabelsToMap(affinityLabels, s.labels, labels.Set(nodeWithAffinityLabels.Node().Labels))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1192,14 +1192,14 @@ func EssentialPredicates(pod *v1.Pod, meta PredicateMetadata, nodeInfo *schedule
|
|||||||
|
|
||||||
// PodAffinityChecker contains information to check pod affinity.
|
// PodAffinityChecker contains information to check pod affinity.
|
||||||
type PodAffinityChecker struct {
|
type PodAffinityChecker struct {
|
||||||
nodeLister schedulerlisters.NodeLister
|
nodeInfoLister schedulerlisters.NodeInfoLister
|
||||||
podLister schedulerlisters.PodLister
|
podLister schedulerlisters.PodLister
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewPodAffinityPredicate creates a PodAffinityChecker.
|
// NewPodAffinityPredicate creates a PodAffinityChecker.
|
||||||
func NewPodAffinityPredicate(nodeLister schedulerlisters.NodeLister, podLister schedulerlisters.PodLister) FitPredicate {
|
func NewPodAffinityPredicate(nodeInfoLister schedulerlisters.NodeInfoLister, podLister schedulerlisters.PodLister) FitPredicate {
|
||||||
checker := &PodAffinityChecker{
|
checker := &PodAffinityChecker{
|
||||||
nodeLister: nodeLister,
|
nodeInfoLister: nodeInfoLister,
|
||||||
podLister: podLister,
|
podLister: podLister,
|
||||||
}
|
}
|
||||||
return checker.InterPodAffinityMatches
|
return checker.InterPodAffinityMatches
|
||||||
@ -1254,7 +1254,7 @@ func (c *PodAffinityChecker) podMatchesPodAffinityTerms(pod, targetPod *v1.Pod,
|
|||||||
return false, false, nil
|
return false, false, nil
|
||||||
}
|
}
|
||||||
// Namespace and selector of the terms have matched. Now we check topology of the terms.
|
// Namespace and selector of the terms have matched. Now we check topology of the terms.
|
||||||
targetPodNodeInfo, err := c.nodeLister.GetNodeInfo(targetPod.Spec.NodeName)
|
targetPodNodeInfo, err := c.nodeInfoLister.Get(targetPod.Spec.NodeName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, false, err
|
return false, false, err
|
||||||
}
|
}
|
||||||
@ -1262,7 +1262,7 @@ func (c *PodAffinityChecker) podMatchesPodAffinityTerms(pod, targetPod *v1.Pod,
|
|||||||
if len(term.TopologyKey) == 0 {
|
if len(term.TopologyKey) == 0 {
|
||||||
return false, false, fmt.Errorf("empty topologyKey is not allowed except for PreferredDuringScheduling pod anti-affinity")
|
return false, false, fmt.Errorf("empty topologyKey is not allowed except for PreferredDuringScheduling pod anti-affinity")
|
||||||
}
|
}
|
||||||
if !priorityutil.NodesHaveSameTopologyKey(nodeInfo.Node(), targetPodNodeInfo, term.TopologyKey) {
|
if !priorityutil.NodesHaveSameTopologyKey(nodeInfo.Node(), targetPodNodeInfo.Node(), term.TopologyKey) {
|
||||||
return false, true, nil
|
return false, true, nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1327,12 +1327,12 @@ func (c *PodAffinityChecker) getMatchingAntiAffinityTopologyPairsOfPods(pod *v1.
|
|||||||
topologyMaps := newTopologyPairsMaps()
|
topologyMaps := newTopologyPairsMaps()
|
||||||
|
|
||||||
for _, existingPod := range existingPods {
|
for _, existingPod := range existingPods {
|
||||||
existingPodNode, err := c.nodeLister.GetNodeInfo(existingPod.Spec.NodeName)
|
existingPodNodeInfo, err := c.nodeInfoLister.Get(existingPod.Spec.NodeName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
klog.Errorf("Pod %s has NodeName %q but node is not found", podName(existingPod), existingPod.Spec.NodeName)
|
klog.Errorf("Pod %s has NodeName %q but node is not found", podName(existingPod), existingPod.Spec.NodeName)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
existingPodTopologyMaps, err := getMatchingAntiAffinityTopologyPairsOfPod(pod, existingPod, existingPodNode)
|
existingPodTopologyMaps, err := getMatchingAntiAffinityTopologyPairsOfPod(pod, existingPod, existingPodNodeInfo.Node())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -1858,7 +1858,7 @@ func TestServiceAffinity(t *testing.T) {
|
|||||||
nodeInfo.SetNode(test.node)
|
nodeInfo.SetNode(test.node)
|
||||||
nodeInfoMap := map[string]*schedulernodeinfo.NodeInfo{test.node.Name: nodeInfo}
|
nodeInfoMap := map[string]*schedulernodeinfo.NodeInfo{test.node.Name: nodeInfo}
|
||||||
// Reimplementing the logic that the scheduler implements: Any time it makes a predicate, it registers any precomputations.
|
// Reimplementing the logic that the scheduler implements: Any time it makes a predicate, it registers any precomputations.
|
||||||
predicate, precompute := NewServiceAffinityPredicate(fakelisters.NodeLister(nodes), fakelisters.PodLister(test.pods), fakelisters.ServiceLister(test.services), test.labels)
|
predicate, precompute := NewServiceAffinityPredicate(fakelisters.NewNodeInfoLister(nodes), fakelisters.PodLister(test.pods), fakelisters.ServiceLister(test.services), test.labels)
|
||||||
// Register a precomputation or Rewrite the precomputation to a no-op, depending on the state we want to test.
|
// Register a precomputation or Rewrite the precomputation to a no-op, depending on the state we want to test.
|
||||||
RegisterPredicateMetadataProducer("ServiceAffinityMetaProducer", func(pm *predicateMetadata) {
|
RegisterPredicateMetadataProducer("ServiceAffinityMetaProducer", func(pm *predicateMetadata) {
|
||||||
if !skipPrecompute {
|
if !skipPrecompute {
|
||||||
@ -2931,7 +2931,7 @@ func TestInterPodAffinity(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fit := PodAffinityChecker{
|
fit := PodAffinityChecker{
|
||||||
nodeLister: fakelisters.NodeLister([]*v1.Node{node}),
|
nodeInfoLister: fakelisters.NewNodeInfoLister([]*v1.Node{node}),
|
||||||
podLister: fakelisters.PodLister(test.pods),
|
podLister: fakelisters.PodLister(test.pods),
|
||||||
}
|
}
|
||||||
nodeInfo := schedulernodeinfo.NewNodeInfo(podsOnNode...)
|
nodeInfo := schedulernodeinfo.NewNodeInfo(podsOnNode...)
|
||||||
@ -4044,7 +4044,7 @@ func TestInterPodAffinityWithMultipleNodes(t *testing.T) {
|
|||||||
|
|
||||||
for indexNode, node := range test.nodes {
|
for indexNode, node := range test.nodes {
|
||||||
testFit := PodAffinityChecker{
|
testFit := PodAffinityChecker{
|
||||||
nodeLister: fakelisters.NodeLister(test.nodes),
|
nodeInfoLister: fakelisters.NewNodeInfoLister(test.nodes),
|
||||||
podLister: fakelisters.PodLister(test.pods),
|
podLister: fakelisters.PodLister(test.pods),
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -80,6 +80,7 @@ go_test(
|
|||||||
"//pkg/scheduler/framework/v1alpha1:go_default_library",
|
"//pkg/scheduler/framework/v1alpha1:go_default_library",
|
||||||
"//pkg/scheduler/listers/fake:go_default_library",
|
"//pkg/scheduler/listers/fake:go_default_library",
|
||||||
"//pkg/scheduler/nodeinfo:go_default_library",
|
"//pkg/scheduler/nodeinfo:go_default_library",
|
||||||
|
"//pkg/scheduler/nodeinfo/snapshot:go_default_library",
|
||||||
"//pkg/scheduler/testing:go_default_library",
|
"//pkg/scheduler/testing:go_default_library",
|
||||||
"//pkg/util/parsers:go_default_library",
|
"//pkg/util/parsers:go_default_library",
|
||||||
"//staging/src/k8s.io/api/apps/v1:go_default_library",
|
"//staging/src/k8s.io/api/apps/v1:go_default_library",
|
||||||
|
@ -34,14 +34,14 @@ import (
|
|||||||
|
|
||||||
// InterPodAffinity contains information to calculate inter pod affinity.
|
// InterPodAffinity contains information to calculate inter pod affinity.
|
||||||
type InterPodAffinity struct {
|
type InterPodAffinity struct {
|
||||||
nodeLister schedulerlisters.NodeLister
|
nodeInfoLister schedulerlisters.NodeInfoLister
|
||||||
hardPodAffinityWeight int32
|
hardPodAffinityWeight int32
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewInterPodAffinityPriority creates an InterPodAffinity.
|
// NewInterPodAffinityPriority creates an InterPodAffinity.
|
||||||
func NewInterPodAffinityPriority(nodeLister schedulerlisters.NodeLister, hardPodAffinityWeight int32) PriorityFunction {
|
func NewInterPodAffinityPriority(nodeInfoLister schedulerlisters.NodeInfoLister, hardPodAffinityWeight int32) PriorityFunction {
|
||||||
interPodAffinity := &InterPodAffinity{
|
interPodAffinity := &InterPodAffinity{
|
||||||
nodeLister: nodeLister,
|
nodeInfoLister: nodeInfoLister,
|
||||||
hardPodAffinityWeight: hardPodAffinityWeight,
|
hardPodAffinityWeight: hardPodAffinityWeight,
|
||||||
}
|
}
|
||||||
return interPodAffinity.CalculateInterPodAffinityPriority
|
return interPodAffinity.CalculateInterPodAffinityPriority
|
||||||
@ -118,7 +118,7 @@ func (ipa *InterPodAffinity) CalculateInterPodAffinityPriority(pod *v1.Pod, node
|
|||||||
var maxCount, minCount int64
|
var maxCount, minCount int64
|
||||||
|
|
||||||
processPod := func(existingPod *v1.Pod) error {
|
processPod := func(existingPod *v1.Pod) error {
|
||||||
existingPodNode, err := ipa.nodeLister.GetNodeInfo(existingPod.Spec.NodeName)
|
existingPodNodeInfo, err := ipa.nodeInfoLister.Get(existingPod.Spec.NodeName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
klog.Errorf("Node not found, %v", existingPod.Spec.NodeName)
|
klog.Errorf("Node not found, %v", existingPod.Spec.NodeName)
|
||||||
return nil
|
return nil
|
||||||
@ -126,6 +126,7 @@ func (ipa *InterPodAffinity) CalculateInterPodAffinityPriority(pod *v1.Pod, node
|
|||||||
existingPodAffinity := existingPod.Spec.Affinity
|
existingPodAffinity := existingPod.Spec.Affinity
|
||||||
existingHasAffinityConstraints := existingPodAffinity != nil && existingPodAffinity.PodAffinity != nil
|
existingHasAffinityConstraints := existingPodAffinity != nil && existingPodAffinity.PodAffinity != nil
|
||||||
existingHasAntiAffinityConstraints := existingPodAffinity != nil && existingPodAffinity.PodAntiAffinity != nil
|
existingHasAntiAffinityConstraints := existingPodAffinity != nil && existingPodAffinity.PodAntiAffinity != nil
|
||||||
|
existingPodNode := existingPodNodeInfo.Node()
|
||||||
|
|
||||||
if hasAffinityConstraints {
|
if hasAffinityConstraints {
|
||||||
// For every soft pod affinity term of <pod>, if <existingPod> matches the term,
|
// For every soft pod affinity term of <pod>, if <existingPod> matches the term,
|
||||||
|
@ -23,8 +23,7 @@ import (
|
|||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
|
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
|
||||||
fakelisters "k8s.io/kubernetes/pkg/scheduler/listers/fake"
|
nodeinfosnapshot "k8s.io/kubernetes/pkg/scheduler/nodeinfo/snapshot"
|
||||||
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
|
||||||
st "k8s.io/kubernetes/pkg/scheduler/testing"
|
st "k8s.io/kubernetes/pkg/scheduler/testing"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -515,12 +514,12 @@ func TestInterPodAffinityPriority(t *testing.T) {
|
|||||||
}
|
}
|
||||||
for _, test := range tests {
|
for _, test := range tests {
|
||||||
t.Run(test.name, func(t *testing.T) {
|
t.Run(test.name, func(t *testing.T) {
|
||||||
nodeNameToInfo := schedulernodeinfo.CreateNodeNameToInfoMap(test.pods, test.nodes)
|
snapshot := nodeinfosnapshot.NewSnapshot(test.pods, test.nodes)
|
||||||
interPodAffinity := InterPodAffinity{
|
interPodAffinity := InterPodAffinity{
|
||||||
nodeLister: fakelisters.NodeLister(test.nodes),
|
nodeInfoLister: snapshot.NodeInfos(),
|
||||||
hardPodAffinityWeight: v1.DefaultHardPodAffinitySymmetricWeight,
|
hardPodAffinityWeight: v1.DefaultHardPodAffinitySymmetricWeight,
|
||||||
}
|
}
|
||||||
list, err := interPodAffinity.CalculateInterPodAffinityPriority(test.pod, nodeNameToInfo, test.nodes)
|
list, err := interPodAffinity.CalculateInterPodAffinityPriority(test.pod, snapshot.NodeInfoMap, test.nodes)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("unexpected error: %v", err)
|
t.Errorf("unexpected error: %v", err)
|
||||||
}
|
}
|
||||||
@ -603,12 +602,12 @@ func TestHardPodAffinitySymmetricWeight(t *testing.T) {
|
|||||||
}
|
}
|
||||||
for _, test := range tests {
|
for _, test := range tests {
|
||||||
t.Run(test.name, func(t *testing.T) {
|
t.Run(test.name, func(t *testing.T) {
|
||||||
nodeNameToInfo := schedulernodeinfo.CreateNodeNameToInfoMap(test.pods, test.nodes)
|
snapshot := nodeinfosnapshot.NewSnapshot(test.pods, test.nodes)
|
||||||
ipa := InterPodAffinity{
|
ipa := InterPodAffinity{
|
||||||
nodeLister: fakelisters.NodeLister(test.nodes),
|
nodeInfoLister: snapshot.NodeInfos(),
|
||||||
hardPodAffinityWeight: test.hardPodAffinityWeight,
|
hardPodAffinityWeight: test.hardPodAffinityWeight,
|
||||||
}
|
}
|
||||||
list, err := ipa.CalculateInterPodAffinityPriority(test.pod, nodeNameToInfo, test.nodes)
|
list, err := ipa.CalculateInterPodAffinityPriority(test.pod, snapshot.NodeInfoMap, test.nodes)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("unexpected error: %v", err)
|
t.Errorf("unexpected error: %v", err)
|
||||||
}
|
}
|
||||||
@ -660,14 +659,14 @@ func BenchmarkInterPodAffinityPriority(b *testing.B) {
|
|||||||
for _, tt := range tests {
|
for _, tt := range tests {
|
||||||
b.Run(tt.name, func(b *testing.B) {
|
b.Run(tt.name, func(b *testing.B) {
|
||||||
existingPods, allNodes := tt.prepFunc(tt.existingPodsNum, tt.allNodesNum)
|
existingPods, allNodes := tt.prepFunc(tt.existingPodsNum, tt.allNodesNum)
|
||||||
nodeNameToInfo := schedulernodeinfo.CreateNodeNameToInfoMap(existingPods, allNodes)
|
snapshot := nodeinfosnapshot.NewSnapshot(existingPods, allNodes)
|
||||||
interPodAffinity := InterPodAffinity{
|
interPodAffinity := InterPodAffinity{
|
||||||
nodeLister: fakelisters.NodeLister(allNodes),
|
nodeInfoLister: snapshot.NodeInfos(),
|
||||||
hardPodAffinityWeight: v1.DefaultHardPodAffinitySymmetricWeight,
|
hardPodAffinityWeight: v1.DefaultHardPodAffinitySymmetricWeight,
|
||||||
}
|
}
|
||||||
b.ResetTimer()
|
b.ResetTimer()
|
||||||
for i := 0; i < b.N; i++ {
|
for i := 0; i < b.N; i++ {
|
||||||
interPodAffinity.CalculateInterPodAffinityPriority(tt.pod, nodeNameToInfo, allNodes)
|
interPodAffinity.CalculateInterPodAffinityPriority(tt.pod, snapshot.NodeInfoMap, allNodes)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
@ -41,13 +41,13 @@ import (
|
|||||||
|
|
||||||
// PluginFactoryArgs are passed to all plugin factory functions.
|
// PluginFactoryArgs are passed to all plugin factory functions.
|
||||||
type PluginFactoryArgs struct {
|
type PluginFactoryArgs struct {
|
||||||
|
NodeInfoLister schedulerlisters.NodeInfoLister
|
||||||
PodLister schedulerlisters.PodLister
|
PodLister schedulerlisters.PodLister
|
||||||
ServiceLister corelisters.ServiceLister
|
ServiceLister corelisters.ServiceLister
|
||||||
ControllerLister corelisters.ReplicationControllerLister
|
ControllerLister corelisters.ReplicationControllerLister
|
||||||
ReplicaSetLister appslisters.ReplicaSetLister
|
ReplicaSetLister appslisters.ReplicaSetLister
|
||||||
StatefulSetLister appslisters.StatefulSetLister
|
StatefulSetLister appslisters.StatefulSetLister
|
||||||
PDBLister policylisters.PodDisruptionBudgetLister
|
PDBLister policylisters.PodDisruptionBudgetLister
|
||||||
NodeLister schedulerlisters.NodeLister
|
|
||||||
CSINodeLister v1beta1storagelisters.CSINodeLister
|
CSINodeLister v1beta1storagelisters.CSINodeLister
|
||||||
PVLister corelisters.PersistentVolumeLister
|
PVLister corelisters.PersistentVolumeLister
|
||||||
PVCLister corelisters.PersistentVolumeClaimLister
|
PVCLister corelisters.PersistentVolumeClaimLister
|
||||||
@ -270,7 +270,7 @@ func RegisterCustomFitPredicate(policy schedulerapi.PredicatePolicy) string {
|
|||||||
if policy.Argument.ServiceAffinity != nil {
|
if policy.Argument.ServiceAffinity != nil {
|
||||||
predicateFactory = func(args PluginFactoryArgs) predicates.FitPredicate {
|
predicateFactory = func(args PluginFactoryArgs) predicates.FitPredicate {
|
||||||
predicate, precomputationFunction := predicates.NewServiceAffinityPredicate(
|
predicate, precomputationFunction := predicates.NewServiceAffinityPredicate(
|
||||||
args.NodeLister,
|
args.NodeInfoLister,
|
||||||
args.PodLister,
|
args.PodLister,
|
||||||
args.ServiceLister,
|
args.ServiceLister,
|
||||||
policy.Argument.ServiceAffinity.Labels,
|
policy.Argument.ServiceAffinity.Labels,
|
||||||
|
@ -92,7 +92,7 @@ func init() {
|
|||||||
scheduler.RegisterFitPredicateFactory(
|
scheduler.RegisterFitPredicateFactory(
|
||||||
predicates.MatchInterPodAffinityPred,
|
predicates.MatchInterPodAffinityPred,
|
||||||
func(args scheduler.PluginFactoryArgs) predicates.FitPredicate {
|
func(args scheduler.PluginFactoryArgs) predicates.FitPredicate {
|
||||||
return predicates.NewPodAffinityPredicate(args.NodeLister, args.PodLister)
|
return predicates.NewPodAffinityPredicate(args.NodeInfoLister, args.PodLister)
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -70,7 +70,7 @@ func init() {
|
|||||||
priorities.InterPodAffinityPriority,
|
priorities.InterPodAffinityPriority,
|
||||||
scheduler.PriorityConfigFactory{
|
scheduler.PriorityConfigFactory{
|
||||||
Function: func(args scheduler.PluginFactoryArgs) priorities.PriorityFunction {
|
Function: func(args scheduler.PluginFactoryArgs) priorities.PriorityFunction {
|
||||||
return priorities.NewInterPodAffinityPriority(args.NodeLister, args.HardPodAffinitySymmetricWeight)
|
return priorities.NewInterPodAffinityPriority(args.NodeInfoLister, args.HardPodAffinitySymmetricWeight)
|
||||||
},
|
},
|
||||||
Weight: 1,
|
Weight: 1,
|
||||||
},
|
},
|
||||||
|
@ -1420,7 +1420,7 @@ func TestSelectNodesForPreemption(t *testing.T) {
|
|||||||
nodes = append(nodes, node)
|
nodes = append(nodes, node)
|
||||||
}
|
}
|
||||||
if test.addAffinityPredicate {
|
if test.addAffinityPredicate {
|
||||||
n := fakelisters.NodeLister([]*v1.Node{nodes[0]})
|
n := fakelisters.NewNodeInfoLister([]*v1.Node{nodes[0]})
|
||||||
p := fakelisters.PodLister(test.pods)
|
p := fakelisters.PodLister(test.pods)
|
||||||
test.predicates[algorithmpredicates.MatchInterPodAffinityPred] = algorithmpredicates.NewPodAffinityPredicate(n, p)
|
test.predicates[algorithmpredicates.MatchInterPodAffinityPred] = algorithmpredicates.NewPodAffinityPredicate(n, p)
|
||||||
}
|
}
|
||||||
|
@ -57,6 +57,7 @@ import (
|
|||||||
internalcache "k8s.io/kubernetes/pkg/scheduler/internal/cache"
|
internalcache "k8s.io/kubernetes/pkg/scheduler/internal/cache"
|
||||||
cachedebugger "k8s.io/kubernetes/pkg/scheduler/internal/cache/debugger"
|
cachedebugger "k8s.io/kubernetes/pkg/scheduler/internal/cache/debugger"
|
||||||
internalqueue "k8s.io/kubernetes/pkg/scheduler/internal/queue"
|
internalqueue "k8s.io/kubernetes/pkg/scheduler/internal/queue"
|
||||||
|
nodeinfosnapshot "k8s.io/kubernetes/pkg/scheduler/nodeinfo/snapshot"
|
||||||
"k8s.io/kubernetes/pkg/scheduler/volumebinder"
|
"k8s.io/kubernetes/pkg/scheduler/volumebinder"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -175,6 +176,7 @@ type Configurator struct {
|
|||||||
plugins *config.Plugins
|
plugins *config.Plugins
|
||||||
pluginConfig []config.PluginConfig
|
pluginConfig []config.PluginConfig
|
||||||
pluginConfigProducerRegistry *plugins.ConfigProducerRegistry
|
pluginConfigProducerRegistry *plugins.ConfigProducerRegistry
|
||||||
|
nodeInfoSnapshot *nodeinfosnapshot.Snapshot
|
||||||
}
|
}
|
||||||
|
|
||||||
// ConfigFactoryArgs is a set arguments passed to NewConfigFactory.
|
// ConfigFactoryArgs is a set arguments passed to NewConfigFactory.
|
||||||
@ -259,6 +261,7 @@ func NewConfigFactory(args *ConfigFactoryArgs) *Configurator {
|
|||||||
plugins: args.Plugins,
|
plugins: args.Plugins,
|
||||||
pluginConfig: args.PluginConfig,
|
pluginConfig: args.PluginConfig,
|
||||||
pluginConfigProducerRegistry: args.PluginConfigProducerRegistry,
|
pluginConfigProducerRegistry: args.PluginConfigProducerRegistry,
|
||||||
|
nodeInfoSnapshot: nodeinfosnapshot.NewEmptySnapshot(),
|
||||||
}
|
}
|
||||||
|
|
||||||
return c
|
return c
|
||||||
@ -407,6 +410,7 @@ func (c *Configurator) CreateFromKeys(predicateKeys, priorityKeys sets.String, e
|
|||||||
pluginConfig,
|
pluginConfig,
|
||||||
framework.WithClientSet(c.client),
|
framework.WithClientSet(c.client),
|
||||||
framework.WithInformerFactory(c.informerFactory),
|
framework.WithInformerFactory(c.informerFactory),
|
||||||
|
framework.WithNodeInfoSnapshot(c.nodeInfoSnapshot),
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
klog.Fatalf("error initializing the scheduling framework: %v", err)
|
klog.Fatalf("error initializing the scheduling framework: %v", err)
|
||||||
@ -582,13 +586,13 @@ func (c *Configurator) getPredicateConfigs(predicateKeys sets.String) (map[strin
|
|||||||
|
|
||||||
func (c *Configurator) getAlgorithmArgs() (*PluginFactoryArgs, *plugins.ConfigProducerArgs) {
|
func (c *Configurator) getAlgorithmArgs() (*PluginFactoryArgs, *plugins.ConfigProducerArgs) {
|
||||||
return &PluginFactoryArgs{
|
return &PluginFactoryArgs{
|
||||||
PodLister: c.schedulerCache,
|
NodeInfoLister: c.nodeInfoSnapshot.NodeInfos(),
|
||||||
|
PodLister: c.nodeInfoSnapshot.Pods(),
|
||||||
ServiceLister: c.serviceLister,
|
ServiceLister: c.serviceLister,
|
||||||
ControllerLister: c.controllerLister,
|
ControllerLister: c.controllerLister,
|
||||||
ReplicaSetLister: c.replicaSetLister,
|
ReplicaSetLister: c.replicaSetLister,
|
||||||
StatefulSetLister: c.statefulSetLister,
|
StatefulSetLister: c.statefulSetLister,
|
||||||
PDBLister: c.pdbLister,
|
PDBLister: c.pdbLister,
|
||||||
NodeLister: c.schedulerCache,
|
|
||||||
CSINodeLister: c.csiNodeLister,
|
CSINodeLister: c.csiNodeLister,
|
||||||
PVLister: c.pVLister,
|
PVLister: c.pVLister,
|
||||||
PVCLister: c.pVCLister,
|
PVCLister: c.pVCLister,
|
||||||
|
@ -24,7 +24,6 @@ go_library(
|
|||||||
"//pkg/scheduler/framework/plugins/volumerestrictions:go_default_library",
|
"//pkg/scheduler/framework/plugins/volumerestrictions:go_default_library",
|
||||||
"//pkg/scheduler/framework/plugins/volumezone:go_default_library",
|
"//pkg/scheduler/framework/plugins/volumezone:go_default_library",
|
||||||
"//pkg/scheduler/framework/v1alpha1:go_default_library",
|
"//pkg/scheduler/framework/v1alpha1:go_default_library",
|
||||||
"//pkg/scheduler/internal/cache:go_default_library",
|
|
||||||
"//pkg/scheduler/volumebinder:go_default_library",
|
"//pkg/scheduler/volumebinder:go_default_library",
|
||||||
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||||
],
|
],
|
||||||
|
@ -38,13 +38,11 @@ import (
|
|||||||
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/volumerestrictions"
|
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/volumerestrictions"
|
||||||
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/volumezone"
|
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/volumezone"
|
||||||
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
|
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
|
||||||
internalcache "k8s.io/kubernetes/pkg/scheduler/internal/cache"
|
|
||||||
"k8s.io/kubernetes/pkg/scheduler/volumebinder"
|
"k8s.io/kubernetes/pkg/scheduler/volumebinder"
|
||||||
)
|
)
|
||||||
|
|
||||||
// RegistryArgs arguments needed to create default plugin factories.
|
// RegistryArgs arguments needed to create default plugin factories.
|
||||||
type RegistryArgs struct {
|
type RegistryArgs struct {
|
||||||
SchedulerCache internalcache.Cache
|
|
||||||
VolumeBinder *volumebinder.VolumeBinder
|
VolumeBinder *volumebinder.VolumeBinder
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -75,9 +73,7 @@ func NewDefaultRegistry(args *RegistryArgs) framework.Registry {
|
|||||||
nodevolumelimits.GCEPDName: nodevolumelimits.NewGCEPD,
|
nodevolumelimits.GCEPDName: nodevolumelimits.NewGCEPD,
|
||||||
nodevolumelimits.AzureDiskName: nodevolumelimits.NewAzureDisk,
|
nodevolumelimits.AzureDiskName: nodevolumelimits.NewAzureDisk,
|
||||||
nodevolumelimits.CinderName: nodevolumelimits.NewCinder,
|
nodevolumelimits.CinderName: nodevolumelimits.NewCinder,
|
||||||
interpodaffinity.Name: func(_ *runtime.Unknown, _ framework.FrameworkHandle) (framework.Plugin, error) {
|
interpodaffinity.Name: interpodaffinity.New,
|
||||||
return interpodaffinity.New(args.SchedulerCache, args.SchedulerCache), nil
|
|
||||||
},
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -9,9 +9,9 @@ go_library(
|
|||||||
"//pkg/scheduler/algorithm/predicates:go_default_library",
|
"//pkg/scheduler/algorithm/predicates:go_default_library",
|
||||||
"//pkg/scheduler/framework/plugins/migration:go_default_library",
|
"//pkg/scheduler/framework/plugins/migration:go_default_library",
|
||||||
"//pkg/scheduler/framework/v1alpha1:go_default_library",
|
"//pkg/scheduler/framework/v1alpha1:go_default_library",
|
||||||
"//pkg/scheduler/listers:go_default_library",
|
|
||||||
"//pkg/scheduler/nodeinfo:go_default_library",
|
"//pkg/scheduler/nodeinfo:go_default_library",
|
||||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||||
|
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||||
],
|
],
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -23,8 +23,7 @@ go_test(
|
|||||||
"//pkg/scheduler/algorithm/predicates:go_default_library",
|
"//pkg/scheduler/algorithm/predicates:go_default_library",
|
||||||
"//pkg/scheduler/framework/plugins/migration:go_default_library",
|
"//pkg/scheduler/framework/plugins/migration:go_default_library",
|
||||||
"//pkg/scheduler/framework/v1alpha1:go_default_library",
|
"//pkg/scheduler/framework/v1alpha1:go_default_library",
|
||||||
"//pkg/scheduler/listers/fake:go_default_library",
|
"//pkg/scheduler/nodeinfo/snapshot:go_default_library",
|
||||||
"//pkg/scheduler/nodeinfo:go_default_library",
|
|
||||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||||
],
|
],
|
||||||
|
@ -21,10 +21,10 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
"k8s.io/api/core/v1"
|
"k8s.io/api/core/v1"
|
||||||
|
"k8s.io/apimachinery/pkg/runtime"
|
||||||
"k8s.io/kubernetes/pkg/scheduler/algorithm/predicates"
|
"k8s.io/kubernetes/pkg/scheduler/algorithm/predicates"
|
||||||
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/migration"
|
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/migration"
|
||||||
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
|
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
|
||||||
schedulerlisters "k8s.io/kubernetes/pkg/scheduler/listers"
|
|
||||||
"k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
"k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -54,8 +54,8 @@ func (pl *InterPodAffinity) Filter(ctx context.Context, cycleState *framework.Cy
|
|||||||
}
|
}
|
||||||
|
|
||||||
// New initializes a new plugin and returns it.
|
// New initializes a new plugin and returns it.
|
||||||
func New(nodeLister schedulerlisters.NodeLister, podLister schedulerlisters.PodLister) framework.Plugin {
|
func New(_ *runtime.Unknown, h framework.FrameworkHandle) (framework.Plugin, error) {
|
||||||
return &InterPodAffinity{
|
return &InterPodAffinity{
|
||||||
predicate: predicates.NewPodAffinityPredicate(nodeLister, podLister),
|
predicate: predicates.NewPodAffinityPredicate(h.SnapshotSharedLister().NodeInfos(), h.SnapshotSharedLister().Pods()),
|
||||||
}
|
}, nil
|
||||||
}
|
}
|
||||||
|
@ -26,8 +26,7 @@ import (
|
|||||||
"k8s.io/kubernetes/pkg/scheduler/algorithm/predicates"
|
"k8s.io/kubernetes/pkg/scheduler/algorithm/predicates"
|
||||||
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/migration"
|
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/migration"
|
||||||
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
|
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
|
||||||
fakelisters "k8s.io/kubernetes/pkg/scheduler/listers/fake"
|
nodeinfosnapshot "k8s.io/kubernetes/pkg/scheduler/nodeinfo/snapshot"
|
||||||
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
@ -733,14 +732,15 @@ func TestSingleNode(t *testing.T) {
|
|||||||
|
|
||||||
for _, test := range tests {
|
for _, test := range tests {
|
||||||
t.Run(test.name, func(t *testing.T) {
|
t.Run(test.name, func(t *testing.T) {
|
||||||
node := test.node
|
snapshot := nodeinfosnapshot.NewSnapshot(test.pods, []*v1.Node{test.node})
|
||||||
nodeInfoMap := schedulernodeinfo.CreateNodeNameToInfoMap(test.pods, []*v1.Node{node})
|
meta := predicates.GetPredicateMetadata(test.pod, snapshot.NodeInfoMap)
|
||||||
meta := predicates.GetPredicateMetadata(test.pod, nodeInfoMap)
|
|
||||||
state := framework.NewCycleState()
|
state := framework.NewCycleState()
|
||||||
state.Write(migration.PredicatesStateKey, &migration.PredicatesStateData{Reference: meta})
|
state.Write(migration.PredicatesStateKey, &migration.PredicatesStateData{Reference: meta})
|
||||||
|
|
||||||
p := New(fakelisters.NodeLister([]*v1.Node{node}), fakelisters.PodLister(test.pods))
|
p := &InterPodAffinity{
|
||||||
gotStatus := p.(framework.FilterPlugin).Filter(context.Background(), state, test.pod, nodeInfoMap[node.Name])
|
predicate: predicates.NewPodAffinityPredicate(snapshot.NodeInfos(), snapshot.Pods()),
|
||||||
|
}
|
||||||
|
gotStatus := p.Filter(context.Background(), state, test.pod, snapshot.NodeInfoMap[test.node.Name])
|
||||||
if !reflect.DeepEqual(gotStatus, test.wantStatus) {
|
if !reflect.DeepEqual(gotStatus, test.wantStatus) {
|
||||||
t.Errorf("status does not match: %v, want: %v", gotStatus, test.wantStatus)
|
t.Errorf("status does not match: %v, want: %v", gotStatus, test.wantStatus)
|
||||||
}
|
}
|
||||||
@ -1432,15 +1432,16 @@ func TestMultipleNodes(t *testing.T) {
|
|||||||
|
|
||||||
for indexTest, test := range tests {
|
for indexTest, test := range tests {
|
||||||
t.Run(test.name, func(t *testing.T) {
|
t.Run(test.name, func(t *testing.T) {
|
||||||
nodeListInfo := fakelisters.NodeLister(test.nodes)
|
snapshot := nodeinfosnapshot.NewSnapshot(test.pods, test.nodes)
|
||||||
nodeInfoMap := schedulernodeinfo.CreateNodeNameToInfoMap(test.pods, test.nodes)
|
|
||||||
for indexNode, node := range test.nodes {
|
for indexNode, node := range test.nodes {
|
||||||
meta := predicates.GetPredicateMetadata(test.pod, nodeInfoMap)
|
meta := predicates.GetPredicateMetadata(test.pod, snapshot.NodeInfoMap)
|
||||||
state := framework.NewCycleState()
|
state := framework.NewCycleState()
|
||||||
state.Write(migration.PredicatesStateKey, &migration.PredicatesStateData{Reference: meta})
|
state.Write(migration.PredicatesStateKey, &migration.PredicatesStateData{Reference: meta})
|
||||||
|
|
||||||
p := New(nodeListInfo, fakelisters.PodLister(test.pods))
|
p := &InterPodAffinity{
|
||||||
gotStatus := p.(framework.FilterPlugin).Filter(context.Background(), state, test.pod, nodeInfoMap[node.Name])
|
predicate: predicates.NewPodAffinityPredicate(snapshot.NodeInfos(), snapshot.Pods()),
|
||||||
|
}
|
||||||
|
gotStatus := p.Filter(context.Background(), state, test.pod, snapshot.NodeInfoMap[node.Name])
|
||||||
if !reflect.DeepEqual(gotStatus, test.wantStatuses[indexNode]) {
|
if !reflect.DeepEqual(gotStatus, test.wantStatuses[indexNode]) {
|
||||||
t.Errorf("index: %d status does not match: %v, want: %v", indexTest, gotStatus, test.wantStatuses[indexNode])
|
t.Errorf("index: %d status does not match: %v, want: %v", indexTest, gotStatus, test.wantStatuses[indexNode])
|
||||||
}
|
}
|
||||||
|
@ -13,6 +13,7 @@ go_library(
|
|||||||
visibility = ["//visibility:public"],
|
visibility = ["//visibility:public"],
|
||||||
deps = [
|
deps = [
|
||||||
"//pkg/scheduler/apis/config:go_default_library",
|
"//pkg/scheduler/apis/config:go_default_library",
|
||||||
|
"//pkg/scheduler/listers:go_default_library",
|
||||||
"//pkg/scheduler/metrics:go_default_library",
|
"//pkg/scheduler/metrics:go_default_library",
|
||||||
"//pkg/scheduler/nodeinfo:go_default_library",
|
"//pkg/scheduler/nodeinfo:go_default_library",
|
||||||
"//pkg/scheduler/nodeinfo/snapshot:go_default_library",
|
"//pkg/scheduler/nodeinfo/snapshot:go_default_library",
|
||||||
|
@ -31,6 +31,7 @@ import (
|
|||||||
"k8s.io/client-go/util/workqueue"
|
"k8s.io/client-go/util/workqueue"
|
||||||
"k8s.io/klog"
|
"k8s.io/klog"
|
||||||
"k8s.io/kubernetes/pkg/scheduler/apis/config"
|
"k8s.io/kubernetes/pkg/scheduler/apis/config"
|
||||||
|
schedulerlisters "k8s.io/kubernetes/pkg/scheduler/listers"
|
||||||
"k8s.io/kubernetes/pkg/scheduler/metrics"
|
"k8s.io/kubernetes/pkg/scheduler/metrics"
|
||||||
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||||
nodeinfosnapshot "k8s.io/kubernetes/pkg/scheduler/nodeinfo/snapshot"
|
nodeinfosnapshot "k8s.io/kubernetes/pkg/scheduler/nodeinfo/snapshot"
|
||||||
@ -107,6 +108,7 @@ func (f *framework) getExtensionPoints(plugins *config.Plugins) []extensionPoint
|
|||||||
type frameworkOptions struct {
|
type frameworkOptions struct {
|
||||||
clientSet clientset.Interface
|
clientSet clientset.Interface
|
||||||
informerFactory informers.SharedInformerFactory
|
informerFactory informers.SharedInformerFactory
|
||||||
|
nodeInfoSnapshot *nodeinfosnapshot.Snapshot
|
||||||
}
|
}
|
||||||
|
|
||||||
// Option for the framework.
|
// Option for the framework.
|
||||||
@ -126,7 +128,16 @@ func WithInformerFactory(informerFactory informers.SharedInformerFactory) Option
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
var defaultFrameworkOptions = frameworkOptions{}
|
// WithNodeInfoSnapshot sets the NodeInfo Snapshot.
|
||||||
|
func WithNodeInfoSnapshot(nodeInfoSnapshot *nodeinfosnapshot.Snapshot) Option {
|
||||||
|
return func(o *frameworkOptions) {
|
||||||
|
o.nodeInfoSnapshot = nodeInfoSnapshot
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var defaultFrameworkOptions = frameworkOptions{
|
||||||
|
nodeInfoSnapshot: nodeinfosnapshot.NewEmptySnapshot(),
|
||||||
|
}
|
||||||
|
|
||||||
var _ Framework = &framework{}
|
var _ Framework = &framework{}
|
||||||
|
|
||||||
@ -139,7 +150,7 @@ func NewFramework(r Registry, plugins *config.Plugins, args []config.PluginConfi
|
|||||||
|
|
||||||
f := &framework{
|
f := &framework{
|
||||||
registry: r,
|
registry: r,
|
||||||
nodeInfoSnapshot: nodeinfosnapshot.NewSnapshot(),
|
nodeInfoSnapshot: options.nodeInfoSnapshot,
|
||||||
pluginNameToWeightMap: make(map[string]int),
|
pluginNameToWeightMap: make(map[string]int),
|
||||||
waitingPods: newWaitingPodsMap(),
|
waitingPods: newWaitingPodsMap(),
|
||||||
clientSet: options.clientSet,
|
clientSet: options.clientSet,
|
||||||
@ -593,6 +604,14 @@ func (f *framework) RunPermitPlugins(
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// SnapshotSharedLister returns the scheduler's SharedLister of the latest NodeInfo
|
||||||
|
// snapshot. The snapshot is taken at the beginning of a scheduling cycle and remains
|
||||||
|
// unchanged until a pod finishes "Reserve". There is no guarantee that the information
|
||||||
|
// remains unchanged after "Reserve".
|
||||||
|
func (f *framework) SnapshotSharedLister() schedulerlisters.SharedLister {
|
||||||
|
return f.nodeInfoSnapshot
|
||||||
|
}
|
||||||
|
|
||||||
// NodeInfoSnapshot returns the latest NodeInfo snapshot. The snapshot
|
// NodeInfoSnapshot returns the latest NodeInfo snapshot. The snapshot
|
||||||
// is taken at the beginning of a scheduling cycle and remains unchanged until a
|
// is taken at the beginning of a scheduling cycle and remains unchanged until a
|
||||||
// pod finishes "Reserve". There is no guarantee that the information remains
|
// pod finishes "Reserve". There is no guarantee that the information remains
|
||||||
|
@ -29,6 +29,7 @@ import (
|
|||||||
"k8s.io/client-go/informers"
|
"k8s.io/client-go/informers"
|
||||||
clientset "k8s.io/client-go/kubernetes"
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
"k8s.io/kubernetes/pkg/scheduler/apis/config"
|
"k8s.io/kubernetes/pkg/scheduler/apis/config"
|
||||||
|
schedulerlisters "k8s.io/kubernetes/pkg/scheduler/listers"
|
||||||
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||||
nodeinfosnapshot "k8s.io/kubernetes/pkg/scheduler/nodeinfo/snapshot"
|
nodeinfosnapshot "k8s.io/kubernetes/pkg/scheduler/nodeinfo/snapshot"
|
||||||
)
|
)
|
||||||
@ -452,6 +453,15 @@ type Framework interface {
|
|||||||
// passed to the plugin factories at the time of plugin initialization. Plugins
|
// passed to the plugin factories at the time of plugin initialization. Plugins
|
||||||
// must store and use this handle to call framework functions.
|
// must store and use this handle to call framework functions.
|
||||||
type FrameworkHandle interface {
|
type FrameworkHandle interface {
|
||||||
|
// SnapshotSharedLister returns listers from the latest NodeInfo Snapshot. The snapshot
|
||||||
|
// is taken at the beginning of a scheduling cycle and remains unchanged until
|
||||||
|
// a pod finishes "Reserve" point. There is no guarantee that the information
|
||||||
|
// remains unchanged in the binding phase of scheduling, so plugins in the binding
|
||||||
|
// cycle(permit/pre-bind/bind/post-bind/un-reserve plugin) should not use it,
|
||||||
|
// otherwise a concurrent read/write error might occur, they should use scheduler
|
||||||
|
// cache instead.
|
||||||
|
SnapshotSharedLister() schedulerlisters.SharedLister
|
||||||
|
|
||||||
// NodeInfoSnapshot return the latest NodeInfo snapshot. The snapshot
|
// NodeInfoSnapshot return the latest NodeInfo snapshot. The snapshot
|
||||||
// is taken at the beginning of a scheduling cycle and remains unchanged until
|
// is taken at the beginning of a scheduling cycle and remains unchanged until
|
||||||
// a pod finishes "Reserve" point. There is no guarantee that the information
|
// a pod finishes "Reserve" point. There is no guarantee that the information
|
||||||
|
6
pkg/scheduler/internal/cache/cache_test.go
vendored
6
pkg/scheduler/internal/cache/cache_test.go
vendored
@ -1077,7 +1077,7 @@ func TestNodeOperators(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Case 2: dump cached nodes successfully.
|
// Case 2: dump cached nodes successfully.
|
||||||
cachedNodes := nodeinfosnapshot.NewSnapshot()
|
cachedNodes := nodeinfosnapshot.NewEmptySnapshot()
|
||||||
cache.UpdateNodeInfoSnapshot(cachedNodes)
|
cache.UpdateNodeInfoSnapshot(cachedNodes)
|
||||||
newNode, found := cachedNodes.NodeInfoMap[node.Name]
|
newNode, found := cachedNodes.NodeInfoMap[node.Name]
|
||||||
if !found || len(cachedNodes.NodeInfoMap) != 1 {
|
if !found || len(cachedNodes.NodeInfoMap) != 1 {
|
||||||
@ -1333,7 +1333,7 @@ func TestSchedulerCache_UpdateNodeInfoSnapshot(t *testing.T) {
|
|||||||
for _, test := range tests {
|
for _, test := range tests {
|
||||||
t.Run(test.name, func(t *testing.T) {
|
t.Run(test.name, func(t *testing.T) {
|
||||||
cache = newSchedulerCache(time.Second, time.Second, nil)
|
cache = newSchedulerCache(time.Second, time.Second, nil)
|
||||||
snapshot = nodeinfosnapshot.NewSnapshot()
|
snapshot = nodeinfosnapshot.NewEmptySnapshot()
|
||||||
|
|
||||||
for _, op := range test.operations {
|
for _, op := range test.operations {
|
||||||
op()
|
op()
|
||||||
@ -1382,7 +1382,7 @@ func BenchmarkUpdate1kNodes30kPods(b *testing.B) {
|
|||||||
cache := setupCacheOf1kNodes30kPods(b)
|
cache := setupCacheOf1kNodes30kPods(b)
|
||||||
b.ResetTimer()
|
b.ResetTimer()
|
||||||
for n := 0; n < b.N; n++ {
|
for n := 0; n < b.N; n++ {
|
||||||
cachedNodes := nodeinfosnapshot.NewSnapshot()
|
cachedNodes := nodeinfosnapshot.NewEmptySnapshot()
|
||||||
cache.UpdateNodeInfoSnapshot(cachedNodes)
|
cache.UpdateNodeInfoSnapshot(cachedNodes)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
3
pkg/scheduler/internal/cache/interface.go
vendored
3
pkg/scheduler/internal/cache/interface.go
vendored
@ -102,9 +102,6 @@ type Cache interface {
|
|||||||
// on this node.
|
// on this node.
|
||||||
UpdateNodeInfoSnapshot(nodeSnapshot *nodeinfosnapshot.Snapshot) error
|
UpdateNodeInfoSnapshot(nodeSnapshot *nodeinfosnapshot.Snapshot) error
|
||||||
|
|
||||||
// GetNodeInfo returns the node object with node string.
|
|
||||||
GetNodeInfo(nodeName string) (*v1.Node, error)
|
|
||||||
|
|
||||||
// Snapshot takes a snapshot on current cache
|
// Snapshot takes a snapshot on current cache
|
||||||
Snapshot() *Snapshot
|
Snapshot() *Snapshot
|
||||||
}
|
}
|
||||||
|
@ -37,6 +37,7 @@ go_test(
|
|||||||
"//pkg/api/v1/pod:go_default_library",
|
"//pkg/api/v1/pod:go_default_library",
|
||||||
"//pkg/scheduler/apis/config:go_default_library",
|
"//pkg/scheduler/apis/config:go_default_library",
|
||||||
"//pkg/scheduler/framework/v1alpha1:go_default_library",
|
"//pkg/scheduler/framework/v1alpha1:go_default_library",
|
||||||
|
"//pkg/scheduler/listers:go_default_library",
|
||||||
"//pkg/scheduler/metrics:go_default_library",
|
"//pkg/scheduler/metrics:go_default_library",
|
||||||
"//pkg/scheduler/nodeinfo:go_default_library",
|
"//pkg/scheduler/nodeinfo:go_default_library",
|
||||||
"//pkg/scheduler/nodeinfo/snapshot:go_default_library",
|
"//pkg/scheduler/nodeinfo/snapshot:go_default_library",
|
||||||
|
@ -35,6 +35,7 @@ import (
|
|||||||
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
|
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
|
||||||
"k8s.io/kubernetes/pkg/scheduler/apis/config"
|
"k8s.io/kubernetes/pkg/scheduler/apis/config"
|
||||||
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
|
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
|
||||||
|
schedulerlisters "k8s.io/kubernetes/pkg/scheduler/listers"
|
||||||
"k8s.io/kubernetes/pkg/scheduler/metrics"
|
"k8s.io/kubernetes/pkg/scheduler/metrics"
|
||||||
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||||
nodeinfosnapshot "k8s.io/kubernetes/pkg/scheduler/nodeinfo/snapshot"
|
nodeinfosnapshot "k8s.io/kubernetes/pkg/scheduler/nodeinfo/snapshot"
|
||||||
@ -246,6 +247,10 @@ func (*fakeFramework) SharedInformerFactory() informers.SharedInformerFactory {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (*fakeFramework) SnapshotSharedLister() schedulerlisters.SharedLister {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
func TestPriorityQueue_AddWithReversePriorityLessFunc(t *testing.T) {
|
func TestPriorityQueue_AddWithReversePriorityLessFunc(t *testing.T) {
|
||||||
q := NewPriorityQueue(nil, &fakeFramework{})
|
q := NewPriorityQueue(nil, &fakeFramework{})
|
||||||
if err := q.Add(&medPriorityPod); err != nil {
|
if err := q.Add(&medPriorityPod); err != nil {
|
||||||
|
@ -6,6 +6,7 @@ go_library(
|
|||||||
importpath = "k8s.io/kubernetes/pkg/scheduler/listers",
|
importpath = "k8s.io/kubernetes/pkg/scheduler/listers",
|
||||||
visibility = ["//visibility:public"],
|
visibility = ["//visibility:public"],
|
||||||
deps = [
|
deps = [
|
||||||
|
"//pkg/scheduler/nodeinfo:go_default_library",
|
||||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||||
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
|
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||||
],
|
],
|
||||||
|
@ -7,6 +7,7 @@ go_library(
|
|||||||
visibility = ["//visibility:public"],
|
visibility = ["//visibility:public"],
|
||||||
deps = [
|
deps = [
|
||||||
"//pkg/scheduler/listers:go_default_library",
|
"//pkg/scheduler/listers:go_default_library",
|
||||||
|
"//pkg/scheduler/nodeinfo:go_default_library",
|
||||||
"//staging/src/k8s.io/api/apps/v1:go_default_library",
|
"//staging/src/k8s.io/api/apps/v1:go_default_library",
|
||||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||||
"//staging/src/k8s.io/api/storage/v1:go_default_library",
|
"//staging/src/k8s.io/api/storage/v1:go_default_library",
|
||||||
|
@ -30,6 +30,7 @@ import (
|
|||||||
storagelisters "k8s.io/client-go/listers/storage/v1"
|
storagelisters "k8s.io/client-go/listers/storage/v1"
|
||||||
v1beta1storagelisters "k8s.io/client-go/listers/storage/v1beta1"
|
v1beta1storagelisters "k8s.io/client-go/listers/storage/v1beta1"
|
||||||
schedulerlisters "k8s.io/kubernetes/pkg/scheduler/listers"
|
schedulerlisters "k8s.io/kubernetes/pkg/scheduler/listers"
|
||||||
|
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||||
)
|
)
|
||||||
|
|
||||||
var _ schedulerlisters.PodLister = &PodLister{}
|
var _ schedulerlisters.PodLister = &PodLister{}
|
||||||
@ -115,7 +116,7 @@ func (f ControllerLister) GetPodControllers(pod *v1.Pod) (controllers []*v1.Repl
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
if len(controllers) == 0 {
|
if len(controllers) == 0 {
|
||||||
err = fmt.Errorf("Could not find Replication Controller for pod %s in namespace %s with labels: %v", pod.Name, pod.Namespace, pod.Labels)
|
err = fmt.Errorf("could not find Replication Controller for pod %s in namespace %s with labels: %v", pod.Name, pod.Namespace, pod.Labels)
|
||||||
}
|
}
|
||||||
|
|
||||||
return
|
return
|
||||||
@ -154,7 +155,7 @@ func (f ReplicaSetLister) GetPodReplicaSets(pod *v1.Pod) (rss []*appsv1.ReplicaS
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
if len(rss) == 0 {
|
if len(rss) == 0 {
|
||||||
err = fmt.Errorf("Could not find ReplicaSet for pod %s in namespace %s with labels: %v", pod.Name, pod.Namespace, pod.Labels)
|
err = fmt.Errorf("could not find ReplicaSet for pod %s in namespace %s with labels: %v", pod.Name, pod.Namespace, pod.Labels)
|
||||||
}
|
}
|
||||||
|
|
||||||
return
|
return
|
||||||
@ -192,7 +193,7 @@ func (f StatefulSetLister) GetPodStatefulSets(pod *v1.Pod) (sss []*appsv1.Statef
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
if len(sss) == 0 {
|
if len(sss) == 0 {
|
||||||
err = fmt.Errorf("Could not find StatefulSet for pod %s in namespace %s with labels: %v", pod.Name, pod.Namespace, pod.Labels)
|
err = fmt.Errorf("could not find StatefulSet for pod %s in namespace %s with labels: %v", pod.Name, pod.Namespace, pod.Labels)
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@ -243,17 +244,34 @@ func (pvcs PersistentVolumeClaimLister) PersistentVolumeClaims(namespace string)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// NodeLister declares a *v1.Node type for testing.
|
// NodeInfoLister declares a schedulernodeinfo.NodeInfo type for testing.
|
||||||
type NodeLister []*v1.Node
|
type NodeInfoLister []*schedulernodeinfo.NodeInfo
|
||||||
|
|
||||||
// GetNodeInfo returns a fake node object in the fake nodes.
|
// Get returns a fake node object in the fake nodes.
|
||||||
func (nodes NodeLister) GetNodeInfo(nodeName string) (*v1.Node, error) {
|
func (nodes NodeInfoLister) Get(nodeName string) (*schedulernodeinfo.NodeInfo, error) {
|
||||||
for _, node := range nodes {
|
for _, node := range nodes {
|
||||||
if node != nil && node.Name == nodeName {
|
if node != nil && node.Node().Name == nodeName {
|
||||||
return node, nil
|
return node, nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return nil, fmt.Errorf("Unable to find node: %s", nodeName)
|
return nil, fmt.Errorf("unable to find node: %s", nodeName)
|
||||||
|
}
|
||||||
|
|
||||||
|
// List lists all nodes.
|
||||||
|
func (nodes NodeInfoLister) List() ([]*schedulernodeinfo.NodeInfo, error) {
|
||||||
|
return nodes, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewNodeInfoLister create a new fake NodeInfoLister from a slice of v1.Nodes.
|
||||||
|
func NewNodeInfoLister(nodes []*v1.Node) schedulerlisters.NodeInfoLister {
|
||||||
|
nodeInfoList := make([]*schedulernodeinfo.NodeInfo, len(nodes))
|
||||||
|
for _, node := range nodes {
|
||||||
|
nodeInfo := schedulernodeinfo.NewNodeInfo()
|
||||||
|
nodeInfo.SetNode(node)
|
||||||
|
nodeInfoList = append(nodeInfoList, nodeInfo)
|
||||||
|
}
|
||||||
|
|
||||||
|
return NodeInfoLister(nodeInfoList)
|
||||||
}
|
}
|
||||||
|
|
||||||
var _ v1beta1storagelisters.CSINodeLister = CSINodeLister{}
|
var _ v1beta1storagelisters.CSINodeLister = CSINodeLister{}
|
||||||
|
@ -19,6 +19,7 @@ package listers
|
|||||||
import (
|
import (
|
||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
"k8s.io/apimachinery/pkg/labels"
|
"k8s.io/apimachinery/pkg/labels"
|
||||||
|
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||||
)
|
)
|
||||||
|
|
||||||
// PodFilter is a function to filter a pod. If pod passed return true else return false.
|
// PodFilter is a function to filter a pod. If pod passed return true else return false.
|
||||||
@ -33,8 +34,16 @@ type PodLister interface {
|
|||||||
FilteredList(podFilter PodFilter, selector labels.Selector) ([]*v1.Pod, error)
|
FilteredList(podFilter PodFilter, selector labels.Selector) ([]*v1.Pod, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
// NodeLister interface represents anything that can list/get node object from node name.
|
// NodeInfoLister interface represents anything that can list/get NodeInfo objects from node name.
|
||||||
type NodeLister interface {
|
type NodeInfoLister interface {
|
||||||
// TODO(ahg-g): rename to Get and add a List interface.
|
// Returns the list of NodeInfos.
|
||||||
GetNodeInfo(nodeName string) (*v1.Node, error)
|
List() ([]*schedulernodeinfo.NodeInfo, error)
|
||||||
|
// Returns the NodeInfo of the given node name.
|
||||||
|
Get(nodeName string) (*schedulernodeinfo.NodeInfo, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
// SharedLister groups scheduler-specific listers.
|
||||||
|
type SharedLister interface {
|
||||||
|
Pods() PodLister
|
||||||
|
NodeInfos() NodeInfoLister
|
||||||
}
|
}
|
||||||
|
@ -6,8 +6,10 @@ go_library(
|
|||||||
importpath = "k8s.io/kubernetes/pkg/scheduler/nodeinfo/snapshot",
|
importpath = "k8s.io/kubernetes/pkg/scheduler/nodeinfo/snapshot",
|
||||||
visibility = ["//visibility:public"],
|
visibility = ["//visibility:public"],
|
||||||
deps = [
|
deps = [
|
||||||
|
"//pkg/scheduler/listers:go_default_library",
|
||||||
"//pkg/scheduler/nodeinfo:go_default_library",
|
"//pkg/scheduler/nodeinfo:go_default_library",
|
||||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||||
|
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||||
],
|
],
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -17,7 +17,11 @@ limitations under the License.
|
|||||||
package nodeinfo
|
package nodeinfo
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"fmt"
|
||||||
|
|
||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
|
"k8s.io/apimachinery/pkg/labels"
|
||||||
|
schedulerlisters "k8s.io/kubernetes/pkg/scheduler/listers"
|
||||||
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -31,13 +35,40 @@ type Snapshot struct {
|
|||||||
Generation int64
|
Generation int64
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewSnapshot initializes a Snapshot struct and returns it.
|
var _ schedulerlisters.SharedLister = &Snapshot{}
|
||||||
func NewSnapshot() *Snapshot {
|
|
||||||
|
// NewEmptySnapshot initializes a Snapshot struct and returns it.
|
||||||
|
func NewEmptySnapshot() *Snapshot {
|
||||||
return &Snapshot{
|
return &Snapshot{
|
||||||
NodeInfoMap: make(map[string]*schedulernodeinfo.NodeInfo),
|
NodeInfoMap: make(map[string]*schedulernodeinfo.NodeInfo),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// NewSnapshot initializes a Snapshot struct and returns it.
|
||||||
|
func NewSnapshot(pods []*v1.Pod, nodes []*v1.Node) *Snapshot {
|
||||||
|
nodeInfoMap := schedulernodeinfo.CreateNodeNameToInfoMap(pods, nodes)
|
||||||
|
nodeInfoList := make([]*schedulernodeinfo.NodeInfo, 0, len(nodes))
|
||||||
|
for _, v := range nodeInfoMap {
|
||||||
|
nodeInfoList = append(nodeInfoList, v)
|
||||||
|
}
|
||||||
|
|
||||||
|
s := NewEmptySnapshot()
|
||||||
|
s.NodeInfoMap = nodeInfoMap
|
||||||
|
s.NodeInfoList = nodeInfoList
|
||||||
|
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Pods returns a PodLister
|
||||||
|
func (s *Snapshot) Pods() schedulerlisters.PodLister {
|
||||||
|
return &podLister{snapshot: s}
|
||||||
|
}
|
||||||
|
|
||||||
|
// NodeInfos returns a NodeInfoLister.
|
||||||
|
func (s *Snapshot) NodeInfos() schedulerlisters.NodeInfoLister {
|
||||||
|
return &nodeInfoLister{snapshot: s}
|
||||||
|
}
|
||||||
|
|
||||||
// ListNodes returns the list of nodes in the snapshot.
|
// ListNodes returns the list of nodes in the snapshot.
|
||||||
func (s *Snapshot) ListNodes() []*v1.Node {
|
func (s *Snapshot) ListNodes() []*v1.Node {
|
||||||
nodes := make([]*v1.Node, 0, len(s.NodeInfoMap))
|
nodes := make([]*v1.Node, 0, len(s.NodeInfoMap))
|
||||||
@ -48,3 +79,50 @@ func (s *Snapshot) ListNodes() []*v1.Node {
|
|||||||
}
|
}
|
||||||
return nodes
|
return nodes
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type podLister struct {
|
||||||
|
snapshot *Snapshot
|
||||||
|
}
|
||||||
|
|
||||||
|
// List returns the list of pods in the snapshot.
|
||||||
|
func (p *podLister) List(selector labels.Selector) ([]*v1.Pod, error) {
|
||||||
|
alwaysTrue := func(p *v1.Pod) bool { return true }
|
||||||
|
return p.FilteredList(alwaysTrue, selector)
|
||||||
|
}
|
||||||
|
|
||||||
|
// FilteredList returns a filtered list of pods in the snapshot.
|
||||||
|
func (p *podLister) FilteredList(podFilter schedulerlisters.PodFilter, selector labels.Selector) ([]*v1.Pod, error) {
|
||||||
|
// podFilter is expected to return true for most or all of the pods. We
|
||||||
|
// can avoid expensive array growth without wasting too much memory by
|
||||||
|
// pre-allocating capacity.
|
||||||
|
maxSize := 0
|
||||||
|
for _, n := range p.snapshot.NodeInfoMap {
|
||||||
|
maxSize += len(n.Pods())
|
||||||
|
}
|
||||||
|
pods := make([]*v1.Pod, 0, maxSize)
|
||||||
|
for _, n := range p.snapshot.NodeInfoMap {
|
||||||
|
for _, pod := range n.Pods() {
|
||||||
|
if podFilter(pod) && selector.Matches(labels.Set(pod.Labels)) {
|
||||||
|
pods = append(pods, pod)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return pods, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type nodeInfoLister struct {
|
||||||
|
snapshot *Snapshot
|
||||||
|
}
|
||||||
|
|
||||||
|
// List returns the list of nodes in the snapshot.
|
||||||
|
func (n *nodeInfoLister) List() ([]*schedulernodeinfo.NodeInfo, error) {
|
||||||
|
return n.snapshot.NodeInfoList, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Returns the NodeInfo of the given node name.
|
||||||
|
func (n *nodeInfoLister) Get(nodeName string) (*schedulernodeinfo.NodeInfo, error) {
|
||||||
|
if v, ok := n.snapshot.NodeInfoMap[nodeName]; ok {
|
||||||
|
return v, nil
|
||||||
|
}
|
||||||
|
return nil, fmt.Errorf("nodeinfo not found for node name %q", nodeName)
|
||||||
|
}
|
||||||
|
@ -277,7 +277,6 @@ func New(client clientset.Interface,
|
|||||||
registry := options.frameworkDefaultRegistry
|
registry := options.frameworkDefaultRegistry
|
||||||
if registry == nil {
|
if registry == nil {
|
||||||
registry = frameworkplugins.NewDefaultRegistry(&frameworkplugins.RegistryArgs{
|
registry = frameworkplugins.NewDefaultRegistry(&frameworkplugins.RegistryArgs{
|
||||||
SchedulerCache: schedulerCache,
|
|
||||||
VolumeBinder: volumeBinder,
|
VolumeBinder: volumeBinder,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user