Merge pull request #84015 from ahg-g/ahg-filters

cleanup unnecessary func parameters in genericScheduler methods
This commit is contained in:
Kubernetes Prow Robot 2019-10-17 19:50:56 -07:00 committed by GitHub
commit 9bf2ba7369
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
2 changed files with 25 additions and 38 deletions

View File

@ -334,8 +334,7 @@ func (g *genericScheduler) Preempt(ctx context.Context, state *framework.CycleSt
if err != nil { if err != nil {
return nil, nil, nil, err return nil, nil, nil, err
} }
nodeToVictims, err := g.selectNodesForPreemption(ctx, state, pod, g.nodeInfoSnapshot.NodeInfoMap, potentialNodes, g.predicates, nodeToVictims, err := g.selectNodesForPreemption(ctx, state, pod, potentialNodes, pdbs)
g.predicateMetaProducer, g.schedulingQueue, pdbs)
if err != nil { if err != nil {
return nil, nil, nil, err return nil, nil, nil, err
} }
@ -488,8 +487,6 @@ func (g *genericScheduler) findNodesThatFit(ctx context.Context, state *framewor
pod, pod,
meta, meta,
g.nodeInfoSnapshot.NodeInfoMap[nodeName], g.nodeInfoSnapshot.NodeInfoMap[nodeName],
g.predicates,
g.schedulingQueue,
g.alwaysCheckAllPredicates, g.alwaysCheckAllPredicates,
) )
if err != nil { if err != nil {
@ -563,13 +560,13 @@ func (g *genericScheduler) findNodesThatFit(ctx context.Context, state *framewor
// to run on the node given in nodeInfo to meta and nodeInfo. It returns 1) whether // to run on the node given in nodeInfo to meta and nodeInfo. It returns 1) whether
// any pod was added, 2) augmented metadata, 3) augmented CycleState 4) augmented nodeInfo. // any pod was added, 2) augmented metadata, 3) augmented CycleState 4) augmented nodeInfo.
func (g *genericScheduler) addNominatedPods(ctx context.Context, pod *v1.Pod, meta predicates.PredicateMetadata, state *framework.CycleState, func (g *genericScheduler) addNominatedPods(ctx context.Context, pod *v1.Pod, meta predicates.PredicateMetadata, state *framework.CycleState,
nodeInfo *schedulernodeinfo.NodeInfo, queue internalqueue.SchedulingQueue) (bool, predicates.PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, predicates.PredicateMetadata,
*framework.CycleState, *schedulernodeinfo.NodeInfo, error) { *framework.CycleState, *schedulernodeinfo.NodeInfo, error) {
if queue == nil || nodeInfo == nil || nodeInfo.Node() == nil { if g.schedulingQueue == nil || nodeInfo == nil || nodeInfo.Node() == nil {
// This may happen only in tests. // This may happen only in tests.
return false, meta, state, nodeInfo, nil return false, meta, state, nodeInfo, nil
} }
nominatedPods := queue.NominatedPodsForNode(nodeInfo.Node().Name) nominatedPods := g.schedulingQueue.NominatedPodsForNode(nodeInfo.Node().Name)
if len(nominatedPods) == 0 { if len(nominatedPods) == 0 {
return false, meta, state, nodeInfo, nil return false, meta, state, nodeInfo, nil
} }
@ -615,8 +612,6 @@ func (g *genericScheduler) podFitsOnNode(
pod *v1.Pod, pod *v1.Pod,
meta predicates.PredicateMetadata, meta predicates.PredicateMetadata,
info *schedulernodeinfo.NodeInfo, info *schedulernodeinfo.NodeInfo,
predicateFuncs map[string]predicates.FitPredicate,
queue internalqueue.SchedulingQueue,
alwaysCheckAllPredicates bool, alwaysCheckAllPredicates bool,
) (bool, []predicates.PredicateFailureReason, *framework.Status, error) { ) (bool, []predicates.PredicateFailureReason, *framework.Status, error) {
var failedPredicates []predicates.PredicateFailureReason var failedPredicates []predicates.PredicateFailureReason
@ -647,7 +642,7 @@ func (g *genericScheduler) podFitsOnNode(
nodeInfoToUse := info nodeInfoToUse := info
if i == 0 { if i == 0 {
var err error var err error
podsAdded, metaToUse, stateToUse, nodeInfoToUse, err = g.addNominatedPods(ctx, pod, meta, state, info, queue) podsAdded, metaToUse, stateToUse, nodeInfoToUse, err = g.addNominatedPods(ctx, pod, meta, state, info)
if err != nil { if err != nil {
return false, []predicates.PredicateFailureReason{}, nil, err return false, []predicates.PredicateFailureReason{}, nil, err
} }
@ -662,7 +657,7 @@ func (g *genericScheduler) podFitsOnNode(
err error err error
) )
if predicate, exist := predicateFuncs[predicateKey]; exist { if predicate, exist := g.predicates[predicateKey]; exist {
fit, reasons, err = predicate(pod, metaToUse, nodeInfoToUse) fit, reasons, err = predicate(pod, metaToUse, nodeInfoToUse)
if err != nil { if err != nil {
return false, []predicates.PredicateFailureReason{}, nil, err return false, []predicates.PredicateFailureReason{}, nil, err
@ -732,7 +727,7 @@ func PrioritizeNodes(
errs = append(errs, err) errs = append(errs, err)
} }
results := make([]framework.NodeScoreList, len(priorityConfigs), len(priorityConfigs)) results := make([]framework.NodeScoreList, len(priorityConfigs))
// DEPRECATED: we can remove this when all priorityConfigs implement the // DEPRECATED: we can remove this when all priorityConfigs implement the
// Map-Reduce pattern. // Map-Reduce pattern.
@ -1010,32 +1005,27 @@ func (g *genericScheduler) selectNodesForPreemption(
ctx context.Context, ctx context.Context,
state *framework.CycleState, state *framework.CycleState,
pod *v1.Pod, pod *v1.Pod,
nodeNameToInfo map[string]*schedulernodeinfo.NodeInfo,
potentialNodes []*v1.Node, potentialNodes []*v1.Node,
fitPredicates map[string]predicates.FitPredicate,
metadataProducer predicates.PredicateMetadataProducer,
queue internalqueue.SchedulingQueue,
pdbs []*policy.PodDisruptionBudget, pdbs []*policy.PodDisruptionBudget,
) (map[*v1.Node]*extenderv1.Victims, error) { ) (map[*v1.Node]*extenderv1.Victims, error) {
nodeToVictims := map[*v1.Node]*extenderv1.Victims{} nodeToVictims := map[*v1.Node]*extenderv1.Victims{}
var resultLock sync.Mutex var resultLock sync.Mutex
// We can use the same metadata producer for all nodes. // We can use the same metadata producer for all nodes.
meta := metadataProducer(pod, nodeNameToInfo) meta := g.predicateMetaProducer(pod, g.nodeInfoSnapshot.NodeInfoMap)
checkNode := func(i int) { checkNode := func(i int) {
nodeName := potentialNodes[i].Name nodeName := potentialNodes[i].Name
if nodeNameToInfo[nodeName] == nil { if g.nodeInfoSnapshot.NodeInfoMap[nodeName] == nil {
return return
} }
nodeInfoCopy := nodeNameToInfo[nodeName].Clone() nodeInfoCopy := g.nodeInfoSnapshot.NodeInfoMap[nodeName].Clone()
var metaCopy predicates.PredicateMetadata var metaCopy predicates.PredicateMetadata
if meta != nil { if meta != nil {
metaCopy = meta.ShallowCopy() metaCopy = meta.ShallowCopy()
} }
stateCopy := state.Clone() stateCopy := state.Clone()
stateCopy.Write(migration.PredicatesStateKey, &migration.PredicatesStateData{Reference: metaCopy}) stateCopy.Write(migration.PredicatesStateKey, &migration.PredicatesStateData{Reference: metaCopy})
pods, numPDBViolations, fits := g.selectVictimsOnNode( pods, numPDBViolations, fits := g.selectVictimsOnNode(ctx, stateCopy, pod, metaCopy, nodeInfoCopy, pdbs)
ctx, stateCopy, pod, metaCopy, nodeInfoCopy, fitPredicates, queue, pdbs)
if fits { if fits {
resultLock.Lock() resultLock.Lock()
victims := extenderv1.Victims{ victims := extenderv1.Victims{
@ -1110,8 +1100,6 @@ func (g *genericScheduler) selectVictimsOnNode(
pod *v1.Pod, pod *v1.Pod,
meta predicates.PredicateMetadata, meta predicates.PredicateMetadata,
nodeInfo *schedulernodeinfo.NodeInfo, nodeInfo *schedulernodeinfo.NodeInfo,
fitPredicates map[string]predicates.FitPredicate,
queue internalqueue.SchedulingQueue,
pdbs []*policy.PodDisruptionBudget, pdbs []*policy.PodDisruptionBudget,
) ([]*v1.Pod, int, bool) { ) ([]*v1.Pod, int, bool) {
var potentialVictims []*v1.Pod var potentialVictims []*v1.Pod
@ -1161,7 +1149,7 @@ func (g *genericScheduler) selectVictimsOnNode(
// inter-pod affinity to one or more victims, but we have decided not to // inter-pod affinity to one or more victims, but we have decided not to
// support this case for performance reasons. Having affinity to lower // support this case for performance reasons. Having affinity to lower
// priority pods is not a recommended configuration anyway. // priority pods is not a recommended configuration anyway.
if fits, _, _, err := g.podFitsOnNode(ctx, state, pod, meta, nodeInfo, fitPredicates, queue, false); !fits { if fits, _, _, err := g.podFitsOnNode(ctx, state, pod, meta, nodeInfo, false); !fits {
if err != nil { if err != nil {
klog.Warningf("Encountered error while selecting victims on node %v: %v", nodeInfo.Node().Name, err) klog.Warningf("Encountered error while selecting victims on node %v: %v", nodeInfo.Node().Name, err)
} }
@ -1179,7 +1167,7 @@ func (g *genericScheduler) selectVictimsOnNode(
if err := addPod(p); err != nil { if err := addPod(p); err != nil {
return false, err return false, err
} }
fits, _, _, _ := g.podFitsOnNode(ctx, state, pod, meta, nodeInfo, fitPredicates, queue, false) fits, _, _, _ := g.podFitsOnNode(ctx, state, pod, meta, nodeInfo, false)
if !fits { if !fits {
if err := removePod(p); err != nil { if err := removePod(p); err != nil {
return false, err return false, err
@ -1215,7 +1203,8 @@ func nodesWherePreemptionMightHelp(nodeNameToInfo map[string]*schedulernodeinfo.
if fitErr.FilteredNodesStatuses[name].Code() == framework.UnschedulableAndUnresolvable { if fitErr.FilteredNodesStatuses[name].Code() == framework.UnschedulableAndUnresolvable {
continue continue
} }
failedPredicates, _ := fitErr.FailedPredicates[name] failedPredicates := fitErr.FailedPredicates[name]
// If we assume that scheduler looks at all nodes and populates the failedPredicateMap // If we assume that scheduler looks at all nodes and populates the failedPredicateMap
// (which is the case today), the !found case should never happen, but we'd prefer // (which is the case today), the !found case should never happen, but we'd prefer
// to rely less on such assumptions in the code when checking does not impose // to rely less on such assumptions in the code when checking does not impose
@ -1297,8 +1286,7 @@ func NewGenericScheduler(
alwaysCheckAllPredicates bool, alwaysCheckAllPredicates bool,
disablePreemption bool, disablePreemption bool,
percentageOfNodesToScore int32, percentageOfNodesToScore int32,
enableNonPreempting bool, enableNonPreempting bool) ScheduleAlgorithm {
) ScheduleAlgorithm {
return &genericScheduler{ return &genericScheduler{
cache: cache, cache: cache,
schedulingQueue: podQueue, schedulingQueue: podQueue,

View File

@ -1061,10 +1061,6 @@ func (n FakeNodeInfo) GetNodeInfo(nodeName string) (*v1.Node, error) {
return &node, nil return &node, nil
} }
func PredicateMetadata(p *v1.Pod, nodeInfo map[string]*schedulernodeinfo.NodeInfo) algorithmpredicates.PredicateMetadata {
return algorithmpredicates.GetPredicateMetadata(p, nodeInfo)
}
var smallContainers = []v1.Container{ var smallContainers = []v1.Container{
{ {
Resources: v1.ResourceRequirements{ Resources: v1.ResourceRequirements{
@ -1384,14 +1380,12 @@ func TestSelectNodesForPreemption(t *testing.T) {
cache.AddNode(&v1.Node{ObjectMeta: metav1.ObjectMeta{Name: name, Labels: map[string]string{"hostname": name}}}) cache.AddNode(&v1.Node{ObjectMeta: metav1.ObjectMeta{Name: name, Labels: map[string]string{"hostname": name}}})
} }
predMetaProducer := algorithmpredicates.EmptyPredicateMetadataProducer
filterPlugin.failedNodeReturnCodeMap = filterFailedNodeReturnCodeMap filterPlugin.failedNodeReturnCodeMap = filterFailedNodeReturnCodeMap
scheduler := NewGenericScheduler( scheduler := NewGenericScheduler(
nil, nil,
internalqueue.NewSchedulingQueue(nil, nil), internalqueue.NewSchedulingQueue(nil, nil),
test.predicates, test.predicates,
predMetaProducer, algorithmpredicates.GetPredicateMetadata,
nil, nil,
priorities.EmptyPriorityMetadataProducer, priorities.EmptyPriorityMetadataProducer,
filterFramework, filterFramework,
@ -1429,7 +1423,8 @@ func TestSelectNodesForPreemption(t *testing.T) {
newnode.ObjectMeta.Labels = map[string]string{"hostname": "newnode"} newnode.ObjectMeta.Labels = map[string]string{"hostname": "newnode"}
nodes = append(nodes, newnode) nodes = append(nodes, newnode)
state := framework.NewCycleState() state := framework.NewCycleState()
nodeToPods, err := g.selectNodesForPreemption(context.Background(), state, test.pod, nodeNameToInfo, nodes, test.predicates, PredicateMetadata, nil, nil) g.nodeInfoSnapshot.NodeInfoMap = nodeNameToInfo
nodeToPods, err := g.selectNodesForPreemption(context.Background(), state, test.pod, nodes, nil)
if err != nil { if err != nil {
t.Error(err) t.Error(err)
} }
@ -1643,9 +1638,12 @@ func TestPickOneNodeForPreemption(t *testing.T) {
for _, test := range tests { for _, test := range tests {
t.Run(test.name, func(t *testing.T) { t.Run(test.name, func(t *testing.T) {
g := &genericScheduler{ g := &genericScheduler{
framework: emptyFramework, framework: emptyFramework,
predicates: test.predicates,
predicateMetaProducer: algorithmpredicates.GetPredicateMetadata,
} }
assignDefaultStartTime(test.pods) assignDefaultStartTime(test.pods)
g.nodeInfoSnapshot = g.framework.NodeInfoSnapshot()
nodes := []*v1.Node{} nodes := []*v1.Node{}
for _, n := range test.nodes { for _, n := range test.nodes {
@ -1653,7 +1651,8 @@ func TestPickOneNodeForPreemption(t *testing.T) {
} }
nodeNameToInfo := schedulernodeinfo.CreateNodeNameToInfoMap(test.pods, nodes) nodeNameToInfo := schedulernodeinfo.CreateNodeNameToInfoMap(test.pods, nodes)
state := framework.NewCycleState() state := framework.NewCycleState()
candidateNodes, _ := g.selectNodesForPreemption(context.Background(), state, test.pod, nodeNameToInfo, nodes, test.predicates, PredicateMetadata, nil, nil) g.nodeInfoSnapshot.NodeInfoMap = nodeNameToInfo
candidateNodes, _ := g.selectNodesForPreemption(context.Background(), state, test.pod, nodes, nil)
node := pickOneNodeForPreemption(candidateNodes) node := pickOneNodeForPreemption(candidateNodes)
found := false found := false
for _, nodeName := range test.expected { for _, nodeName := range test.expected {