Merge pull request #84015 from ahg-g/ahg-filters

cleanup unnecessary func parameters in genericScheduler methods
This commit is contained in:
Kubernetes Prow Robot 2019-10-17 19:50:56 -07:00 committed by GitHub
commit 9bf2ba7369
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
2 changed files with 25 additions and 38 deletions

View File

@ -334,8 +334,7 @@ func (g *genericScheduler) Preempt(ctx context.Context, state *framework.CycleSt
if err != nil {
return nil, nil, nil, err
}
nodeToVictims, err := g.selectNodesForPreemption(ctx, state, pod, g.nodeInfoSnapshot.NodeInfoMap, potentialNodes, g.predicates,
g.predicateMetaProducer, g.schedulingQueue, pdbs)
nodeToVictims, err := g.selectNodesForPreemption(ctx, state, pod, potentialNodes, pdbs)
if err != nil {
return nil, nil, nil, err
}
@ -488,8 +487,6 @@ func (g *genericScheduler) findNodesThatFit(ctx context.Context, state *framewor
pod,
meta,
g.nodeInfoSnapshot.NodeInfoMap[nodeName],
g.predicates,
g.schedulingQueue,
g.alwaysCheckAllPredicates,
)
if err != nil {
@ -563,13 +560,13 @@ func (g *genericScheduler) findNodesThatFit(ctx context.Context, state *framewor
// to run on the node given in nodeInfo to meta and nodeInfo. It returns 1) whether
// any pod was added, 2) augmented metadata, 3) augmented CycleState 4) augmented nodeInfo.
func (g *genericScheduler) addNominatedPods(ctx context.Context, pod *v1.Pod, meta predicates.PredicateMetadata, state *framework.CycleState,
nodeInfo *schedulernodeinfo.NodeInfo, queue internalqueue.SchedulingQueue) (bool, predicates.PredicateMetadata,
nodeInfo *schedulernodeinfo.NodeInfo) (bool, predicates.PredicateMetadata,
*framework.CycleState, *schedulernodeinfo.NodeInfo, error) {
if queue == nil || nodeInfo == nil || nodeInfo.Node() == nil {
if g.schedulingQueue == nil || nodeInfo == nil || nodeInfo.Node() == nil {
// This may happen only in tests.
return false, meta, state, nodeInfo, nil
}
nominatedPods := queue.NominatedPodsForNode(nodeInfo.Node().Name)
nominatedPods := g.schedulingQueue.NominatedPodsForNode(nodeInfo.Node().Name)
if len(nominatedPods) == 0 {
return false, meta, state, nodeInfo, nil
}
@ -615,8 +612,6 @@ func (g *genericScheduler) podFitsOnNode(
pod *v1.Pod,
meta predicates.PredicateMetadata,
info *schedulernodeinfo.NodeInfo,
predicateFuncs map[string]predicates.FitPredicate,
queue internalqueue.SchedulingQueue,
alwaysCheckAllPredicates bool,
) (bool, []predicates.PredicateFailureReason, *framework.Status, error) {
var failedPredicates []predicates.PredicateFailureReason
@ -647,7 +642,7 @@ func (g *genericScheduler) podFitsOnNode(
nodeInfoToUse := info
if i == 0 {
var err error
podsAdded, metaToUse, stateToUse, nodeInfoToUse, err = g.addNominatedPods(ctx, pod, meta, state, info, queue)
podsAdded, metaToUse, stateToUse, nodeInfoToUse, err = g.addNominatedPods(ctx, pod, meta, state, info)
if err != nil {
return false, []predicates.PredicateFailureReason{}, nil, err
}
@ -662,7 +657,7 @@ func (g *genericScheduler) podFitsOnNode(
err error
)
if predicate, exist := predicateFuncs[predicateKey]; exist {
if predicate, exist := g.predicates[predicateKey]; exist {
fit, reasons, err = predicate(pod, metaToUse, nodeInfoToUse)
if err != nil {
return false, []predicates.PredicateFailureReason{}, nil, err
@ -732,7 +727,7 @@ func PrioritizeNodes(
errs = append(errs, err)
}
results := make([]framework.NodeScoreList, len(priorityConfigs), len(priorityConfigs))
results := make([]framework.NodeScoreList, len(priorityConfigs))
// DEPRECATED: we can remove this when all priorityConfigs implement the
// Map-Reduce pattern.
@ -1010,32 +1005,27 @@ func (g *genericScheduler) selectNodesForPreemption(
ctx context.Context,
state *framework.CycleState,
pod *v1.Pod,
nodeNameToInfo map[string]*schedulernodeinfo.NodeInfo,
potentialNodes []*v1.Node,
fitPredicates map[string]predicates.FitPredicate,
metadataProducer predicates.PredicateMetadataProducer,
queue internalqueue.SchedulingQueue,
pdbs []*policy.PodDisruptionBudget,
) (map[*v1.Node]*extenderv1.Victims, error) {
nodeToVictims := map[*v1.Node]*extenderv1.Victims{}
var resultLock sync.Mutex
// We can use the same metadata producer for all nodes.
meta := metadataProducer(pod, nodeNameToInfo)
meta := g.predicateMetaProducer(pod, g.nodeInfoSnapshot.NodeInfoMap)
checkNode := func(i int) {
nodeName := potentialNodes[i].Name
if nodeNameToInfo[nodeName] == nil {
if g.nodeInfoSnapshot.NodeInfoMap[nodeName] == nil {
return
}
nodeInfoCopy := nodeNameToInfo[nodeName].Clone()
nodeInfoCopy := g.nodeInfoSnapshot.NodeInfoMap[nodeName].Clone()
var metaCopy predicates.PredicateMetadata
if meta != nil {
metaCopy = meta.ShallowCopy()
}
stateCopy := state.Clone()
stateCopy.Write(migration.PredicatesStateKey, &migration.PredicatesStateData{Reference: metaCopy})
pods, numPDBViolations, fits := g.selectVictimsOnNode(
ctx, stateCopy, pod, metaCopy, nodeInfoCopy, fitPredicates, queue, pdbs)
pods, numPDBViolations, fits := g.selectVictimsOnNode(ctx, stateCopy, pod, metaCopy, nodeInfoCopy, pdbs)
if fits {
resultLock.Lock()
victims := extenderv1.Victims{
@ -1110,8 +1100,6 @@ func (g *genericScheduler) selectVictimsOnNode(
pod *v1.Pod,
meta predicates.PredicateMetadata,
nodeInfo *schedulernodeinfo.NodeInfo,
fitPredicates map[string]predicates.FitPredicate,
queue internalqueue.SchedulingQueue,
pdbs []*policy.PodDisruptionBudget,
) ([]*v1.Pod, int, bool) {
var potentialVictims []*v1.Pod
@ -1161,7 +1149,7 @@ func (g *genericScheduler) selectVictimsOnNode(
// inter-pod affinity to one or more victims, but we have decided not to
// support this case for performance reasons. Having affinity to lower
// priority pods is not a recommended configuration anyway.
if fits, _, _, err := g.podFitsOnNode(ctx, state, pod, meta, nodeInfo, fitPredicates, queue, false); !fits {
if fits, _, _, err := g.podFitsOnNode(ctx, state, pod, meta, nodeInfo, false); !fits {
if err != nil {
klog.Warningf("Encountered error while selecting victims on node %v: %v", nodeInfo.Node().Name, err)
}
@ -1179,7 +1167,7 @@ func (g *genericScheduler) selectVictimsOnNode(
if err := addPod(p); err != nil {
return false, err
}
fits, _, _, _ := g.podFitsOnNode(ctx, state, pod, meta, nodeInfo, fitPredicates, queue, false)
fits, _, _, _ := g.podFitsOnNode(ctx, state, pod, meta, nodeInfo, false)
if !fits {
if err := removePod(p); err != nil {
return false, err
@ -1215,7 +1203,8 @@ func nodesWherePreemptionMightHelp(nodeNameToInfo map[string]*schedulernodeinfo.
if fitErr.FilteredNodesStatuses[name].Code() == framework.UnschedulableAndUnresolvable {
continue
}
failedPredicates, _ := fitErr.FailedPredicates[name]
failedPredicates := fitErr.FailedPredicates[name]
// If we assume that scheduler looks at all nodes and populates the failedPredicateMap
// (which is the case today), the !found case should never happen, but we'd prefer
// to rely less on such assumptions in the code when checking does not impose
@ -1297,8 +1286,7 @@ func NewGenericScheduler(
alwaysCheckAllPredicates bool,
disablePreemption bool,
percentageOfNodesToScore int32,
enableNonPreempting bool,
) ScheduleAlgorithm {
enableNonPreempting bool) ScheduleAlgorithm {
return &genericScheduler{
cache: cache,
schedulingQueue: podQueue,

View File

@ -1061,10 +1061,6 @@ func (n FakeNodeInfo) GetNodeInfo(nodeName string) (*v1.Node, error) {
return &node, nil
}
func PredicateMetadata(p *v1.Pod, nodeInfo map[string]*schedulernodeinfo.NodeInfo) algorithmpredicates.PredicateMetadata {
return algorithmpredicates.GetPredicateMetadata(p, nodeInfo)
}
var smallContainers = []v1.Container{
{
Resources: v1.ResourceRequirements{
@ -1384,14 +1380,12 @@ func TestSelectNodesForPreemption(t *testing.T) {
cache.AddNode(&v1.Node{ObjectMeta: metav1.ObjectMeta{Name: name, Labels: map[string]string{"hostname": name}}})
}
predMetaProducer := algorithmpredicates.EmptyPredicateMetadataProducer
filterPlugin.failedNodeReturnCodeMap = filterFailedNodeReturnCodeMap
scheduler := NewGenericScheduler(
nil,
internalqueue.NewSchedulingQueue(nil, nil),
test.predicates,
predMetaProducer,
algorithmpredicates.GetPredicateMetadata,
nil,
priorities.EmptyPriorityMetadataProducer,
filterFramework,
@ -1429,7 +1423,8 @@ func TestSelectNodesForPreemption(t *testing.T) {
newnode.ObjectMeta.Labels = map[string]string{"hostname": "newnode"}
nodes = append(nodes, newnode)
state := framework.NewCycleState()
nodeToPods, err := g.selectNodesForPreemption(context.Background(), state, test.pod, nodeNameToInfo, nodes, test.predicates, PredicateMetadata, nil, nil)
g.nodeInfoSnapshot.NodeInfoMap = nodeNameToInfo
nodeToPods, err := g.selectNodesForPreemption(context.Background(), state, test.pod, nodes, nil)
if err != nil {
t.Error(err)
}
@ -1643,9 +1638,12 @@ func TestPickOneNodeForPreemption(t *testing.T) {
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
g := &genericScheduler{
framework: emptyFramework,
framework: emptyFramework,
predicates: test.predicates,
predicateMetaProducer: algorithmpredicates.GetPredicateMetadata,
}
assignDefaultStartTime(test.pods)
g.nodeInfoSnapshot = g.framework.NodeInfoSnapshot()
nodes := []*v1.Node{}
for _, n := range test.nodes {
@ -1653,7 +1651,8 @@ func TestPickOneNodeForPreemption(t *testing.T) {
}
nodeNameToInfo := schedulernodeinfo.CreateNodeNameToInfoMap(test.pods, nodes)
state := framework.NewCycleState()
candidateNodes, _ := g.selectNodesForPreemption(context.Background(), state, test.pod, nodeNameToInfo, nodes, test.predicates, PredicateMetadata, nil, nil)
g.nodeInfoSnapshot.NodeInfoMap = nodeNameToInfo
candidateNodes, _ := g.selectNodesForPreemption(context.Background(), state, test.pod, nodes, nil)
node := pickOneNodeForPreemption(candidateNodes)
found := false
for _, nodeName := range test.expected {