mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-28 05:57:25 +00:00
Merge pull request #86498 from Huang-Wei/deprecate-failedPredicateMap
Cleanup failedPredicateMap from generic_scheduler.go
This commit is contained in:
commit
ff975e865d
@ -66,15 +66,10 @@ const (
|
|||||||
minFeasibleNodesPercentageToFind = 5
|
minFeasibleNodesPercentageToFind = 5
|
||||||
)
|
)
|
||||||
|
|
||||||
// FailedPredicateMap declares a map[string][]algorithm.PredicateFailureReason type.
|
|
||||||
type FailedPredicateMap map[string][]predicates.PredicateFailureReason
|
|
||||||
|
|
||||||
// FitError describes a fit error of a pod.
|
// FitError describes a fit error of a pod.
|
||||||
type FitError struct {
|
type FitError struct {
|
||||||
Pod *v1.Pod
|
Pod *v1.Pod
|
||||||
NumAllNodes int
|
NumAllNodes int
|
||||||
// TODO(Huang-Wei): remove 'FailedPredicates'
|
|
||||||
FailedPredicates FailedPredicateMap
|
|
||||||
FilteredNodesStatuses framework.NodeToStatusMap
|
FilteredNodesStatuses framework.NodeToStatusMap
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -89,12 +84,6 @@ const (
|
|||||||
// Error returns detailed information of why the pod failed to fit on each node
|
// Error returns detailed information of why the pod failed to fit on each node
|
||||||
func (f *FitError) Error() string {
|
func (f *FitError) Error() string {
|
||||||
reasons := make(map[string]int)
|
reasons := make(map[string]int)
|
||||||
for _, predicates := range f.FailedPredicates {
|
|
||||||
for _, pred := range predicates {
|
|
||||||
reasons[pred.GetReason()]++
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, status := range f.FilteredNodesStatuses {
|
for _, status := range f.FilteredNodesStatuses {
|
||||||
for _, reason := range status.Reasons() {
|
for _, reason := range status.Reasons() {
|
||||||
reasons[reason]++
|
reasons[reason]++
|
||||||
@ -102,7 +91,7 @@ func (f *FitError) Error() string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
sortReasonsHistogram := func() []string {
|
sortReasonsHistogram := func() []string {
|
||||||
reasonStrings := []string{}
|
var reasonStrings []string
|
||||||
for k, v := range reasons {
|
for k, v := range reasons {
|
||||||
reasonStrings = append(reasonStrings, fmt.Sprintf("%v %v", v, k))
|
reasonStrings = append(reasonStrings, fmt.Sprintf("%v %v", v, k))
|
||||||
}
|
}
|
||||||
@ -209,7 +198,7 @@ func (g *genericScheduler) Schedule(ctx context.Context, state *framework.CycleS
|
|||||||
trace.Step("Running prefilter plugins done")
|
trace.Step("Running prefilter plugins done")
|
||||||
|
|
||||||
startPredicateEvalTime := time.Now()
|
startPredicateEvalTime := time.Now()
|
||||||
filteredNodes, failedPredicateMap, filteredNodesStatuses, err := g.findNodesThatFit(ctx, state, pod)
|
filteredNodes, filteredNodesStatuses, err := g.findNodesThatFit(ctx, state, pod)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return result, err
|
return result, err
|
||||||
}
|
}
|
||||||
@ -225,7 +214,6 @@ func (g *genericScheduler) Schedule(ctx context.Context, state *framework.CycleS
|
|||||||
return result, &FitError{
|
return result, &FitError{
|
||||||
Pod: pod,
|
Pod: pod,
|
||||||
NumAllNodes: len(g.nodeInfoSnapshot.NodeInfoList),
|
NumAllNodes: len(g.nodeInfoSnapshot.NodeInfoList),
|
||||||
FailedPredicates: failedPredicateMap,
|
|
||||||
FilteredNodesStatuses: filteredNodesStatuses,
|
FilteredNodesStatuses: filteredNodesStatuses,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -242,7 +230,7 @@ func (g *genericScheduler) Schedule(ctx context.Context, state *framework.CycleS
|
|||||||
metrics.DeprecatedSchedulingAlgorithmPriorityEvaluationDuration.Observe(metrics.SinceInMicroseconds(startPriorityEvalTime))
|
metrics.DeprecatedSchedulingAlgorithmPriorityEvaluationDuration.Observe(metrics.SinceInMicroseconds(startPriorityEvalTime))
|
||||||
return ScheduleResult{
|
return ScheduleResult{
|
||||||
SuggestedHost: filteredNodes[0].Name,
|
SuggestedHost: filteredNodes[0].Name,
|
||||||
EvaluatedNodes: 1 + len(failedPredicateMap) + len(filteredNodesStatuses),
|
EvaluatedNodes: 1 + len(filteredNodesStatuses),
|
||||||
FeasibleNodes: 1,
|
FeasibleNodes: 1,
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
@ -263,7 +251,7 @@ func (g *genericScheduler) Schedule(ctx context.Context, state *framework.CycleS
|
|||||||
|
|
||||||
return ScheduleResult{
|
return ScheduleResult{
|
||||||
SuggestedHost: host,
|
SuggestedHost: host,
|
||||||
EvaluatedNodes: len(filteredNodes) + len(failedPredicateMap) + len(filteredNodesStatuses),
|
EvaluatedNodes: len(filteredNodes) + len(filteredNodesStatuses),
|
||||||
FeasibleNodes: len(filteredNodes),
|
FeasibleNodes: len(filteredNodes),
|
||||||
}, err
|
}, err
|
||||||
}
|
}
|
||||||
@ -470,10 +458,8 @@ func (g *genericScheduler) numFeasibleNodesToFind(numAllNodes int32) (numNodes i
|
|||||||
|
|
||||||
// Filters the nodes to find the ones that fit based on the given predicate functions
|
// Filters the nodes to find the ones that fit based on the given predicate functions
|
||||||
// Each node is passed through the predicate functions to determine if it is a fit
|
// Each node is passed through the predicate functions to determine if it is a fit
|
||||||
// TODO(Huang-Wei): remove 'FailedPredicateMap' from the return parameters.
|
func (g *genericScheduler) findNodesThatFit(ctx context.Context, state *framework.CycleState, pod *v1.Pod) ([]*v1.Node, framework.NodeToStatusMap, error) {
|
||||||
func (g *genericScheduler) findNodesThatFit(ctx context.Context, state *framework.CycleState, pod *v1.Pod) ([]*v1.Node, FailedPredicateMap, framework.NodeToStatusMap, error) {
|
|
||||||
var filtered []*v1.Node
|
var filtered []*v1.Node
|
||||||
failedPredicateMap := FailedPredicateMap{}
|
|
||||||
filteredNodesStatuses := framework.NodeToStatusMap{}
|
filteredNodesStatuses := framework.NodeToStatusMap{}
|
||||||
|
|
||||||
if !g.framework.HasFilterPlugins() {
|
if !g.framework.HasFilterPlugins() {
|
||||||
@ -496,7 +482,7 @@ func (g *genericScheduler) findNodesThatFit(ctx context.Context, state *framewor
|
|||||||
// We check the nodes starting from where we left off in the previous scheduling cycle,
|
// We check the nodes starting from where we left off in the previous scheduling cycle,
|
||||||
// this is to make sure all nodes have the same chance of being examined across pods.
|
// this is to make sure all nodes have the same chance of being examined across pods.
|
||||||
nodeInfo := g.nodeInfoSnapshot.NodeInfoList[(g.nextStartNodeIndex+i)%allNodes]
|
nodeInfo := g.nodeInfoSnapshot.NodeInfoList[(g.nextStartNodeIndex+i)%allNodes]
|
||||||
fits, _, status, err := g.podFitsOnNode(ctx, state, pod, nodeInfo)
|
fits, status, err := g.podFitsOnNode(ctx, state, pod, nodeInfo)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
errCh.SendErrorWithCancel(err, cancel)
|
errCh.SendErrorWithCancel(err, cancel)
|
||||||
return
|
return
|
||||||
@ -521,12 +507,12 @@ func (g *genericScheduler) findNodesThatFit(ctx context.Context, state *framewor
|
|||||||
// Stops searching for more nodes once the configured number of feasible nodes
|
// Stops searching for more nodes once the configured number of feasible nodes
|
||||||
// are found.
|
// are found.
|
||||||
workqueue.ParallelizeUntil(ctx, 16, allNodes, checkNode)
|
workqueue.ParallelizeUntil(ctx, 16, allNodes, checkNode)
|
||||||
processedNodes := int(filteredLen) + len(filteredNodesStatuses) + len(failedPredicateMap)
|
processedNodes := int(filteredLen) + len(filteredNodesStatuses)
|
||||||
g.nextStartNodeIndex = (g.nextStartNodeIndex + processedNodes) % allNodes
|
g.nextStartNodeIndex = (g.nextStartNodeIndex + processedNodes) % allNodes
|
||||||
|
|
||||||
filtered = filtered[:filteredLen]
|
filtered = filtered[:filteredLen]
|
||||||
if err := errCh.ReceiveError(); err != nil {
|
if err := errCh.ReceiveError(); err != nil {
|
||||||
return []*v1.Node{}, FailedPredicateMap{}, framework.NodeToStatusMap{}, err
|
return []*v1.Node{}, framework.NodeToStatusMap{}, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -543,15 +529,15 @@ func (g *genericScheduler) findNodesThatFit(ctx context.Context, state *framewor
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
return []*v1.Node{}, FailedPredicateMap{}, framework.NodeToStatusMap{}, err
|
return []*v1.Node{}, framework.NodeToStatusMap{}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO(Huang-Wei): refactor this to fill 'filteredNodesStatuses' instead of 'failedPredicateMap'.
|
|
||||||
for failedNodeName, failedMsg := range failedMap {
|
for failedNodeName, failedMsg := range failedMap {
|
||||||
if _, found := failedPredicateMap[failedNodeName]; !found {
|
if _, found := filteredNodesStatuses[failedNodeName]; !found {
|
||||||
failedPredicateMap[failedNodeName] = []predicates.PredicateFailureReason{}
|
filteredNodesStatuses[failedNodeName] = framework.NewStatus(framework.Unschedulable, failedMsg)
|
||||||
|
} else {
|
||||||
|
filteredNodesStatuses[failedNodeName].AppendReason(failedMsg)
|
||||||
}
|
}
|
||||||
failedPredicateMap[failedNodeName] = append(failedPredicateMap[failedNodeName], predicates.NewPredicateFailureError(extender.Name(), failedMsg))
|
|
||||||
}
|
}
|
||||||
filtered = filteredList
|
filtered = filteredList
|
||||||
if len(filtered) == 0 {
|
if len(filtered) == 0 {
|
||||||
@ -559,7 +545,7 @@ func (g *genericScheduler) findNodesThatFit(ctx context.Context, state *framewor
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return filtered, failedPredicateMap, filteredNodesStatuses, nil
|
return filtered, filteredNodesStatuses, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// addNominatedPods adds pods with equal or greater priority which are nominated
|
// addNominatedPods adds pods with equal or greater priority which are nominated
|
||||||
@ -606,8 +592,7 @@ func (g *genericScheduler) podFitsOnNode(
|
|||||||
state *framework.CycleState,
|
state *framework.CycleState,
|
||||||
pod *v1.Pod,
|
pod *v1.Pod,
|
||||||
info *schedulernodeinfo.NodeInfo,
|
info *schedulernodeinfo.NodeInfo,
|
||||||
) (bool, []predicates.PredicateFailureReason, *framework.Status, error) {
|
) (bool, *framework.Status, error) {
|
||||||
var failedPredicates []predicates.PredicateFailureReason
|
|
||||||
var status *framework.Status
|
var status *framework.Status
|
||||||
|
|
||||||
podsAdded := false
|
podsAdded := false
|
||||||
@ -636,19 +621,19 @@ func (g *genericScheduler) podFitsOnNode(
|
|||||||
var err error
|
var err error
|
||||||
podsAdded, stateToUse, nodeInfoToUse, err = g.addNominatedPods(ctx, pod, state, info)
|
podsAdded, stateToUse, nodeInfoToUse, err = g.addNominatedPods(ctx, pod, state, info)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, []predicates.PredicateFailureReason{}, nil, err
|
return false, nil, err
|
||||||
}
|
}
|
||||||
} else if !podsAdded || len(failedPredicates) != 0 || !status.IsSuccess() {
|
} else if !podsAdded || !status.IsSuccess() {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
|
||||||
status = g.framework.RunFilterPlugins(ctx, stateToUse, pod, nodeInfoToUse)
|
status = g.framework.RunFilterPlugins(ctx, stateToUse, pod, nodeInfoToUse)
|
||||||
if !status.IsSuccess() && !status.IsUnschedulable() {
|
if !status.IsSuccess() && !status.IsUnschedulable() {
|
||||||
return false, failedPredicates, status, status.AsError()
|
return false, status, status.AsError()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return len(failedPredicates) == 0 && status.IsSuccess(), failedPredicates, status, nil
|
return status.IsSuccess(), status, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// prioritizeNodes prioritizes the nodes by running the score plugins,
|
// prioritizeNodes prioritizes the nodes by running the score plugins,
|
||||||
@ -1011,7 +996,7 @@ func (g *genericScheduler) selectVictimsOnNode(
|
|||||||
// inter-pod affinity to one or more victims, but we have decided not to
|
// inter-pod affinity to one or more victims, but we have decided not to
|
||||||
// support this case for performance reasons. Having affinity to lower
|
// support this case for performance reasons. Having affinity to lower
|
||||||
// priority pods is not a recommended configuration anyway.
|
// priority pods is not a recommended configuration anyway.
|
||||||
if fits, _, _, err := g.podFitsOnNode(ctx, state, pod, nodeInfo); !fits {
|
if fits, _, err := g.podFitsOnNode(ctx, state, pod, nodeInfo); !fits {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
klog.Warningf("Encountered error while selecting victims on node %v: %v", nodeInfo.Node().Name, err)
|
klog.Warningf("Encountered error while selecting victims on node %v: %v", nodeInfo.Node().Name, err)
|
||||||
}
|
}
|
||||||
@ -1029,7 +1014,7 @@ func (g *genericScheduler) selectVictimsOnNode(
|
|||||||
if err := addPod(p); err != nil {
|
if err := addPod(p); err != nil {
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
fits, _, _, _ := g.podFitsOnNode(ctx, state, pod, nodeInfo)
|
fits, _, _ := g.podFitsOnNode(ctx, state, pod, nodeInfo)
|
||||||
if !fits {
|
if !fits {
|
||||||
if err := removePod(p); err != nil {
|
if err := removePod(p); err != nil {
|
||||||
return false, err
|
return false, err
|
||||||
@ -1060,22 +1045,15 @@ func (g *genericScheduler) selectVictimsOnNode(
|
|||||||
// nodesWherePreemptionMightHelp returns a list of nodes with failed predicates
|
// nodesWherePreemptionMightHelp returns a list of nodes with failed predicates
|
||||||
// that may be satisfied by removing pods from the node.
|
// that may be satisfied by removing pods from the node.
|
||||||
func nodesWherePreemptionMightHelp(nodeNameToInfo map[string]*schedulernodeinfo.NodeInfo, fitErr *FitError) []*v1.Node {
|
func nodesWherePreemptionMightHelp(nodeNameToInfo map[string]*schedulernodeinfo.NodeInfo, fitErr *FitError) []*v1.Node {
|
||||||
potentialNodes := []*v1.Node{}
|
var potentialNodes []*v1.Node
|
||||||
for name, node := range nodeNameToInfo {
|
for name, node := range nodeNameToInfo {
|
||||||
|
// We reply on the status by each plugin - 'Unschedulable' or 'UnschedulableAndUnresolvable'
|
||||||
|
// to determine whether preemption may help or not on the node.
|
||||||
if fitErr.FilteredNodesStatuses[name].Code() == framework.UnschedulableAndUnresolvable {
|
if fitErr.FilteredNodesStatuses[name].Code() == framework.UnschedulableAndUnresolvable {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
failedPredicates := fitErr.FailedPredicates[name]
|
klog.V(3).Infof("Node %v is a potential node for preemption.", name)
|
||||||
|
potentialNodes = append(potentialNodes, node.Node())
|
||||||
// If we assume that scheduler looks at all nodes and populates the failedPredicateMap
|
|
||||||
// (which is the case today), the !found case should never happen, but we'd prefer
|
|
||||||
// to rely less on such assumptions in the code when checking does not impose
|
|
||||||
// significant overhead.
|
|
||||||
// Also, we currently assume all failures returned by extender as resolvable.
|
|
||||||
if !predicates.UnresolvablePredicateExists(failedPredicates) {
|
|
||||||
klog.V(3).Infof("Node %v is a potential node for preemption.", name)
|
|
||||||
potentialNodes = append(potentialNodes, node.Node())
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
return potentialNodes
|
return potentialNodes
|
||||||
}
|
}
|
||||||
|
@ -387,9 +387,8 @@ func TestGenericScheduler(t *testing.T) {
|
|||||||
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "2", UID: types.UID("2")}},
|
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "2", UID: types.UID("2")}},
|
||||||
name: "test 1",
|
name: "test 1",
|
||||||
wErr: &FitError{
|
wErr: &FitError{
|
||||||
Pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "2", UID: types.UID("2")}},
|
Pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "2", UID: types.UID("2")}},
|
||||||
NumAllNodes: 2,
|
NumAllNodes: 2,
|
||||||
FailedPredicates: FailedPredicateMap{},
|
|
||||||
FilteredNodesStatuses: framework.NodeToStatusMap{
|
FilteredNodesStatuses: framework.NodeToStatusMap{
|
||||||
"machine1": framework.NewStatus(framework.Unschedulable, algorithmpredicates.ErrFakePredicate.GetReason()),
|
"machine1": framework.NewStatus(framework.Unschedulable, algorithmpredicates.ErrFakePredicate.GetReason()),
|
||||||
"machine2": framework.NewStatus(framework.Unschedulable, algorithmpredicates.ErrFakePredicate.GetReason()),
|
"machine2": framework.NewStatus(framework.Unschedulable, algorithmpredicates.ErrFakePredicate.GetReason()),
|
||||||
@ -461,9 +460,8 @@ func TestGenericScheduler(t *testing.T) {
|
|||||||
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "2", UID: types.UID("2")}},
|
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "2", UID: types.UID("2")}},
|
||||||
name: "test 7",
|
name: "test 7",
|
||||||
wErr: &FitError{
|
wErr: &FitError{
|
||||||
Pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "2", UID: types.UID("2")}},
|
Pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "2", UID: types.UID("2")}},
|
||||||
NumAllNodes: 3,
|
NumAllNodes: 3,
|
||||||
FailedPredicates: FailedPredicateMap{},
|
|
||||||
FilteredNodesStatuses: framework.NodeToStatusMap{
|
FilteredNodesStatuses: framework.NodeToStatusMap{
|
||||||
"3": framework.NewStatus(framework.Unschedulable, algorithmpredicates.ErrFakePredicate.GetReason()),
|
"3": framework.NewStatus(framework.Unschedulable, algorithmpredicates.ErrFakePredicate.GetReason()),
|
||||||
"2": framework.NewStatus(framework.Unschedulable, algorithmpredicates.ErrFakePredicate.GetReason()),
|
"2": framework.NewStatus(framework.Unschedulable, algorithmpredicates.ErrFakePredicate.GetReason()),
|
||||||
@ -492,9 +490,8 @@ func TestGenericScheduler(t *testing.T) {
|
|||||||
nodes: []string{"1", "2"},
|
nodes: []string{"1", "2"},
|
||||||
name: "test 8",
|
name: "test 8",
|
||||||
wErr: &FitError{
|
wErr: &FitError{
|
||||||
Pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "2", UID: types.UID("2")}},
|
Pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "2", UID: types.UID("2")}},
|
||||||
NumAllNodes: 2,
|
NumAllNodes: 2,
|
||||||
FailedPredicates: FailedPredicateMap{},
|
|
||||||
FilteredNodesStatuses: framework.NodeToStatusMap{
|
FilteredNodesStatuses: framework.NodeToStatusMap{
|
||||||
"1": framework.NewStatus(framework.Unschedulable, algorithmpredicates.ErrFakePredicate.GetReason()),
|
"1": framework.NewStatus(framework.Unschedulable, algorithmpredicates.ErrFakePredicate.GetReason()),
|
||||||
"2": framework.NewStatus(framework.Unschedulable, algorithmpredicates.ErrFakePredicate.GetReason()),
|
"2": framework.NewStatus(framework.Unschedulable, algorithmpredicates.ErrFakePredicate.GetReason()),
|
||||||
@ -707,9 +704,8 @@ func TestGenericScheduler(t *testing.T) {
|
|||||||
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "test-filter", UID: types.UID("test-filter")}},
|
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "test-filter", UID: types.UID("test-filter")}},
|
||||||
expectedHosts: nil,
|
expectedHosts: nil,
|
||||||
wErr: &FitError{
|
wErr: &FitError{
|
||||||
Pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "test-filter", UID: types.UID("test-filter")}},
|
Pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "test-filter", UID: types.UID("test-filter")}},
|
||||||
NumAllNodes: 1,
|
NumAllNodes: 1,
|
||||||
FailedPredicates: FailedPredicateMap{},
|
|
||||||
FilteredNodesStatuses: framework.NodeToStatusMap{
|
FilteredNodesStatuses: framework.NodeToStatusMap{
|
||||||
"3": framework.NewStatus(framework.Unschedulable, "injecting failure for pod test-filter"),
|
"3": framework.NewStatus(framework.Unschedulable, "injecting failure for pod test-filter"),
|
||||||
},
|
},
|
||||||
@ -728,9 +724,8 @@ func TestGenericScheduler(t *testing.T) {
|
|||||||
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "test-filter", UID: types.UID("test-filter")}},
|
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "test-filter", UID: types.UID("test-filter")}},
|
||||||
expectedHosts: nil,
|
expectedHosts: nil,
|
||||||
wErr: &FitError{
|
wErr: &FitError{
|
||||||
Pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "test-filter", UID: types.UID("test-filter")}},
|
Pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "test-filter", UID: types.UID("test-filter")}},
|
||||||
NumAllNodes: 1,
|
NumAllNodes: 1,
|
||||||
FailedPredicates: FailedPredicateMap{},
|
|
||||||
FilteredNodesStatuses: framework.NodeToStatusMap{
|
FilteredNodesStatuses: framework.NodeToStatusMap{
|
||||||
"3": framework.NewStatus(framework.UnschedulableAndUnresolvable, "injecting failure for pod test-filter"),
|
"3": framework.NewStatus(framework.UnschedulableAndUnresolvable, "injecting failure for pod test-filter"),
|
||||||
},
|
},
|
||||||
@ -852,7 +847,7 @@ func TestFindFitAllError(t *testing.T) {
|
|||||||
st.RegisterFilterPlugin("MatchFilter", NewMatchFilterPlugin),
|
st.RegisterFilterPlugin("MatchFilter", NewMatchFilterPlugin),
|
||||||
)
|
)
|
||||||
|
|
||||||
_, _, nodeToStatusMap, err := scheduler.findNodesThatFit(context.Background(), framework.NewCycleState(), &v1.Pod{})
|
_, nodeToStatusMap, err := scheduler.findNodesThatFit(context.Background(), framework.NewCycleState(), &v1.Pod{})
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("unexpected error: %v", err)
|
t.Errorf("unexpected error: %v", err)
|
||||||
@ -886,7 +881,7 @@ func TestFindFitSomeError(t *testing.T) {
|
|||||||
)
|
)
|
||||||
|
|
||||||
pod := &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "1", UID: types.UID("1")}}
|
pod := &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "1", UID: types.UID("1")}}
|
||||||
_, _, nodeToStatusMap, err := scheduler.findNodesThatFit(context.Background(), framework.NewCycleState(), pod)
|
_, nodeToStatusMap, err := scheduler.findNodesThatFit(context.Background(), framework.NewCycleState(), pod)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("unexpected error: %v", err)
|
t.Errorf("unexpected error: %v", err)
|
||||||
@ -969,7 +964,7 @@ func TestFindFitPredicateCallCounts(t *testing.T) {
|
|||||||
cache.UpdateNodeInfoSnapshot(scheduler.nodeInfoSnapshot)
|
cache.UpdateNodeInfoSnapshot(scheduler.nodeInfoSnapshot)
|
||||||
queue.UpdateNominatedPodForNode(&v1.Pod{ObjectMeta: metav1.ObjectMeta{UID: types.UID("nominated")}, Spec: v1.PodSpec{Priority: &midPriority}}, "1")
|
queue.UpdateNominatedPodForNode(&v1.Pod{ObjectMeta: metav1.ObjectMeta{UID: types.UID("nominated")}, Spec: v1.PodSpec{Priority: &midPriority}}, "1")
|
||||||
|
|
||||||
_, _, _, err := scheduler.findNodesThatFit(context.Background(), framework.NewCycleState(), test.pod)
|
_, _, err := scheduler.findNodesThatFit(context.Background(), framework.NewCycleState(), test.pod)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("unexpected error: %v", err)
|
t.Errorf("unexpected error: %v", err)
|
||||||
@ -1003,10 +998,10 @@ func TestHumanReadableFitError(t *testing.T) {
|
|||||||
err := &FitError{
|
err := &FitError{
|
||||||
Pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "2", UID: types.UID("2")}},
|
Pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "2", UID: types.UID("2")}},
|
||||||
NumAllNodes: 3,
|
NumAllNodes: 3,
|
||||||
FailedPredicates: FailedPredicateMap{
|
FilteredNodesStatuses: framework.NodeToStatusMap{
|
||||||
"1": []algorithmpredicates.PredicateFailureReason{algorithmpredicates.ErrNodeUnderMemoryPressure},
|
"1": framework.NewStatus(framework.Unschedulable, algorithmpredicates.ErrNodeUnderMemoryPressure.GetReason()),
|
||||||
"2": []algorithmpredicates.PredicateFailureReason{algorithmpredicates.ErrNodeUnderDiskPressure},
|
"2": framework.NewStatus(framework.Unschedulable, algorithmpredicates.ErrNodeUnderDiskPressure.GetReason()),
|
||||||
"3": []algorithmpredicates.PredicateFailureReason{algorithmpredicates.ErrNodeUnderDiskPressure},
|
"3": framework.NewStatus(framework.Unschedulable, algorithmpredicates.ErrNodeUnderDiskPressure.GetReason()),
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
if strings.Contains(err.Error(), "0/3 nodes are available") {
|
if strings.Contains(err.Error(), "0/3 nodes are available") {
|
||||||
@ -1884,94 +1879,91 @@ func TestNodesWherePreemptionMightHelp(t *testing.T) {
|
|||||||
|
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
name string
|
name string
|
||||||
failedPredMap FailedPredicateMap
|
|
||||||
nodesStatuses framework.NodeToStatusMap
|
nodesStatuses framework.NodeToStatusMap
|
||||||
expected map[string]bool // set of expected node names. Value is ignored.
|
expected map[string]bool // set of expected node names. Value is ignored.
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
name: "No node should be attempted",
|
name: "No node should be attempted",
|
||||||
failedPredMap: FailedPredicateMap{
|
nodesStatuses: framework.NodeToStatusMap{
|
||||||
"machine1": []algorithmpredicates.PredicateFailureReason{algorithmpredicates.ErrNodeSelectorNotMatch},
|
"machine1": framework.NewStatus(framework.UnschedulableAndUnresolvable, algorithmpredicates.ErrNodeSelectorNotMatch.GetReason()),
|
||||||
"machine2": []algorithmpredicates.PredicateFailureReason{algorithmpredicates.ErrPodNotMatchHostName},
|
"machine2": framework.NewStatus(framework.UnschedulableAndUnresolvable, algorithmpredicates.ErrPodNotMatchHostName.GetReason()),
|
||||||
"machine3": []algorithmpredicates.PredicateFailureReason{algorithmpredicates.ErrTaintsTolerationsNotMatch},
|
"machine3": framework.NewStatus(framework.UnschedulableAndUnresolvable, algorithmpredicates.ErrTaintsTolerationsNotMatch.GetReason()),
|
||||||
"machine4": []algorithmpredicates.PredicateFailureReason{algorithmpredicates.ErrNodeLabelPresenceViolated},
|
"machine4": framework.NewStatus(framework.UnschedulableAndUnresolvable, algorithmpredicates.ErrNodeLabelPresenceViolated.GetReason()),
|
||||||
},
|
},
|
||||||
expected: map[string]bool{},
|
expected: map[string]bool{},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "ErrPodAffinityNotMatch should be tried as it indicates that the pod is unschedulable due to inter-pod affinity or anti-affinity",
|
name: "ErrPodAffinityNotMatch should be tried as it indicates that the pod is unschedulable due to inter-pod affinity or anti-affinity",
|
||||||
failedPredMap: FailedPredicateMap{
|
nodesStatuses: framework.NodeToStatusMap{
|
||||||
"machine1": []algorithmpredicates.PredicateFailureReason{algorithmpredicates.ErrPodAffinityNotMatch},
|
"machine1": framework.NewStatus(framework.Unschedulable, algorithmpredicates.ErrPodAffinityNotMatch.GetReason()),
|
||||||
"machine2": []algorithmpredicates.PredicateFailureReason{algorithmpredicates.ErrPodNotMatchHostName},
|
"machine2": framework.NewStatus(framework.UnschedulableAndUnresolvable, algorithmpredicates.ErrPodNotMatchHostName.GetReason()),
|
||||||
"machine3": []algorithmpredicates.PredicateFailureReason{algorithmpredicates.ErrNodeUnschedulable},
|
"machine3": framework.NewStatus(framework.UnschedulableAndUnresolvable, algorithmpredicates.ErrNodeUnschedulable.GetReason()),
|
||||||
},
|
},
|
||||||
expected: map[string]bool{"machine1": true, "machine4": true},
|
expected: map[string]bool{"machine1": true, "machine4": true},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "pod with both pod affinity and anti-affinity should be tried",
|
name: "pod with both pod affinity and anti-affinity should be tried",
|
||||||
failedPredMap: FailedPredicateMap{
|
nodesStatuses: framework.NodeToStatusMap{
|
||||||
"machine1": []algorithmpredicates.PredicateFailureReason{algorithmpredicates.ErrPodAffinityNotMatch},
|
"machine1": framework.NewStatus(framework.Unschedulable, algorithmpredicates.ErrPodAffinityNotMatch.GetReason()),
|
||||||
"machine2": []algorithmpredicates.PredicateFailureReason{algorithmpredicates.ErrPodNotMatchHostName},
|
"machine2": framework.NewStatus(framework.UnschedulableAndUnresolvable, algorithmpredicates.ErrPodNotMatchHostName.GetReason()),
|
||||||
},
|
},
|
||||||
expected: map[string]bool{"machine1": true, "machine3": true, "machine4": true},
|
expected: map[string]bool{"machine1": true, "machine3": true, "machine4": true},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "ErrPodAffinityRulesNotMatch should not be tried as it indicates that the pod is unschedulable due to inter-pod affinity, but ErrPodAffinityNotMatch should be tried as it indicates that the pod is unschedulable due to inter-pod affinity or anti-affinity",
|
name: "ErrPodAffinityRulesNotMatch should not be tried as it indicates that the pod is unschedulable due to inter-pod affinity, but ErrPodAffinityNotMatch should be tried as it indicates that the pod is unschedulable due to inter-pod affinity or anti-affinity",
|
||||||
failedPredMap: FailedPredicateMap{
|
nodesStatuses: framework.NodeToStatusMap{
|
||||||
"machine1": []algorithmpredicates.PredicateFailureReason{algorithmpredicates.ErrPodAffinityRulesNotMatch},
|
"machine1": framework.NewStatus(framework.UnschedulableAndUnresolvable, algorithmpredicates.ErrPodAffinityRulesNotMatch.GetReason()),
|
||||||
"machine2": []algorithmpredicates.PredicateFailureReason{algorithmpredicates.ErrPodAffinityNotMatch},
|
"machine2": framework.NewStatus(framework.Unschedulable, algorithmpredicates.ErrPodAffinityNotMatch.GetReason()),
|
||||||
},
|
},
|
||||||
expected: map[string]bool{"machine2": true, "machine3": true, "machine4": true},
|
expected: map[string]bool{"machine2": true, "machine3": true, "machine4": true},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "Mix of failed predicates works fine",
|
name: "Mix of failed predicates works fine",
|
||||||
failedPredMap: FailedPredicateMap{
|
nodesStatuses: framework.NodeToStatusMap{
|
||||||
"machine1": []algorithmpredicates.PredicateFailureReason{algorithmpredicates.ErrNodeSelectorNotMatch, algorithmpredicates.ErrNodeUnderDiskPressure, algorithmpredicates.NewInsufficientResourceError(v1.ResourceMemory, 1000, 500, 300)},
|
"machine1": framework.NewStatus(framework.UnschedulableAndUnresolvable, algorithmpredicates.ErrNodeUnderDiskPressure.GetReason()),
|
||||||
"machine2": []algorithmpredicates.PredicateFailureReason{algorithmpredicates.ErrPodNotMatchHostName, algorithmpredicates.ErrDiskConflict},
|
"machine2": framework.NewStatus(framework.UnschedulableAndUnresolvable, algorithmpredicates.ErrDiskConflict.GetReason()),
|
||||||
"machine3": []algorithmpredicates.PredicateFailureReason{algorithmpredicates.NewInsufficientResourceError(v1.ResourceMemory, 1000, 600, 400)},
|
"machine3": framework.NewStatus(framework.Unschedulable, algorithmpredicates.NewInsufficientResourceError(v1.ResourceMemory, 1000, 600, 400).GetReason()),
|
||||||
"machine4": []algorithmpredicates.PredicateFailureReason{},
|
|
||||||
},
|
},
|
||||||
expected: map[string]bool{"machine3": true, "machine4": true},
|
expected: map[string]bool{"machine3": true, "machine4": true},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "Node condition errors should be considered unresolvable",
|
name: "Node condition errors should be considered unresolvable",
|
||||||
failedPredMap: FailedPredicateMap{
|
nodesStatuses: framework.NodeToStatusMap{
|
||||||
"machine1": []algorithmpredicates.PredicateFailureReason{algorithmpredicates.ErrNodeUnderDiskPressure},
|
"machine1": framework.NewStatus(framework.UnschedulableAndUnresolvable, algorithmpredicates.ErrNodeUnderDiskPressure.GetReason()),
|
||||||
"machine2": []algorithmpredicates.PredicateFailureReason{algorithmpredicates.ErrNodeUnderPIDPressure},
|
"machine2": framework.NewStatus(framework.UnschedulableAndUnresolvable, algorithmpredicates.ErrNodeUnderPIDPressure.GetReason()),
|
||||||
"machine3": []algorithmpredicates.PredicateFailureReason{algorithmpredicates.ErrNodeUnderMemoryPressure},
|
"machine3": framework.NewStatus(framework.UnschedulableAndUnresolvable, algorithmpredicates.ErrNodeUnderMemoryPressure.GetReason()),
|
||||||
},
|
},
|
||||||
expected: map[string]bool{"machine4": true},
|
expected: map[string]bool{"machine4": true},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "Node condition errors and ErrNodeUnknownCondition should be considered unresolvable",
|
name: "Node condition errors and ErrNodeUnknownCondition should be considered unresolvable",
|
||||||
failedPredMap: FailedPredicateMap{
|
nodesStatuses: framework.NodeToStatusMap{
|
||||||
"machine1": []algorithmpredicates.PredicateFailureReason{algorithmpredicates.ErrNodeNotReady},
|
"machine1": framework.NewStatus(framework.UnschedulableAndUnresolvable, algorithmpredicates.ErrNodeNotReady.GetReason()),
|
||||||
"machine2": []algorithmpredicates.PredicateFailureReason{algorithmpredicates.ErrNodeNetworkUnavailable},
|
"machine2": framework.NewStatus(framework.UnschedulableAndUnresolvable, algorithmpredicates.ErrNodeNetworkUnavailable.GetReason()),
|
||||||
"machine3": []algorithmpredicates.PredicateFailureReason{algorithmpredicates.ErrNodeUnknownCondition},
|
"machine3": framework.NewStatus(framework.UnschedulableAndUnresolvable, algorithmpredicates.ErrNodeUnknownCondition.GetReason()),
|
||||||
},
|
},
|
||||||
expected: map[string]bool{"machine4": true},
|
expected: map[string]bool{"machine4": true},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "ErrVolume... errors should not be tried as it indicates that the pod is unschedulable due to no matching volumes for pod on node",
|
name: "ErrVolume... errors should not be tried as it indicates that the pod is unschedulable due to no matching volumes for pod on node",
|
||||||
failedPredMap: FailedPredicateMap{
|
nodesStatuses: framework.NodeToStatusMap{
|
||||||
"machine1": []algorithmpredicates.PredicateFailureReason{algorithmpredicates.ErrVolumeZoneConflict},
|
"machine1": framework.NewStatus(framework.UnschedulableAndUnresolvable, algorithmpredicates.ErrVolumeZoneConflict.GetReason()),
|
||||||
"machine2": []algorithmpredicates.PredicateFailureReason{algorithmpredicates.ErrVolumeNodeConflict},
|
"machine2": framework.NewStatus(framework.UnschedulableAndUnresolvable, algorithmpredicates.ErrVolumeNodeConflict.GetReason()),
|
||||||
"machine3": []algorithmpredicates.PredicateFailureReason{algorithmpredicates.ErrVolumeBindConflict},
|
"machine3": framework.NewStatus(framework.UnschedulableAndUnresolvable, algorithmpredicates.ErrVolumeBindConflict.GetReason()),
|
||||||
},
|
},
|
||||||
expected: map[string]bool{"machine4": true},
|
expected: map[string]bool{"machine4": true},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "ErrTopologySpreadConstraintsNotMatch should be tried as it indicates that the pod is unschedulable due to topology spread constraints",
|
name: "ErrTopologySpreadConstraintsNotMatch should be tried as it indicates that the pod is unschedulable due to topology spread constraints",
|
||||||
failedPredMap: FailedPredicateMap{
|
nodesStatuses: framework.NodeToStatusMap{
|
||||||
"machine1": []algorithmpredicates.PredicateFailureReason{algorithmpredicates.ErrTopologySpreadConstraintsNotMatch},
|
"machine1": framework.NewStatus(framework.Unschedulable, algorithmpredicates.ErrTopologySpreadConstraintsNotMatch.GetReason()),
|
||||||
"machine2": []algorithmpredicates.PredicateFailureReason{algorithmpredicates.ErrPodNotMatchHostName},
|
"machine2": framework.NewStatus(framework.UnschedulableAndUnresolvable, algorithmpredicates.ErrPodNotMatchHostName.GetReason()),
|
||||||
"machine3": []algorithmpredicates.PredicateFailureReason{algorithmpredicates.ErrTopologySpreadConstraintsNotMatch},
|
"machine3": framework.NewStatus(framework.Unschedulable, algorithmpredicates.ErrTopologySpreadConstraintsNotMatch.GetReason()),
|
||||||
},
|
},
|
||||||
expected: map[string]bool{"machine1": true, "machine3": true, "machine4": true},
|
expected: map[string]bool{"machine1": true, "machine3": true, "machine4": true},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "UnschedulableAndUnresolvable status should be skipped but Unschedulable should be tried",
|
name: "UnschedulableAndUnresolvable status should be skipped but Unschedulable should be tried",
|
||||||
failedPredMap: FailedPredicateMap{},
|
|
||||||
nodesStatuses: framework.NodeToStatusMap{
|
nodesStatuses: framework.NodeToStatusMap{
|
||||||
"machine2": framework.NewStatus(framework.UnschedulableAndUnresolvable, ""),
|
"machine2": framework.NewStatus(framework.UnschedulableAndUnresolvable, ""),
|
||||||
"machine3": framework.NewStatus(framework.Unschedulable, ""),
|
"machine3": framework.NewStatus(framework.Unschedulable, ""),
|
||||||
@ -1979,28 +1971,11 @@ func TestNodesWherePreemptionMightHelp(t *testing.T) {
|
|||||||
},
|
},
|
||||||
expected: map[string]bool{"machine1": true, "machine3": true},
|
expected: map[string]bool{"machine1": true, "machine3": true},
|
||||||
},
|
},
|
||||||
{
|
|
||||||
name: "Failed predicates and statuses should be evaluated",
|
|
||||||
failedPredMap: FailedPredicateMap{
|
|
||||||
"machine1": []algorithmpredicates.PredicateFailureReason{algorithmpredicates.ErrPodAffinityNotMatch},
|
|
||||||
"machine2": []algorithmpredicates.PredicateFailureReason{algorithmpredicates.ErrPodAffinityNotMatch},
|
|
||||||
"machine3": []algorithmpredicates.PredicateFailureReason{algorithmpredicates.ErrPodNotMatchHostName},
|
|
||||||
"machine4": []algorithmpredicates.PredicateFailureReason{algorithmpredicates.ErrPodNotMatchHostName},
|
|
||||||
},
|
|
||||||
nodesStatuses: framework.NodeToStatusMap{
|
|
||||||
"machine1": framework.NewStatus(framework.Unschedulable, ""),
|
|
||||||
"machine2": framework.NewStatus(framework.UnschedulableAndUnresolvable, ""),
|
|
||||||
"machine3": framework.NewStatus(framework.Unschedulable, ""),
|
|
||||||
"machine4": framework.NewStatus(framework.UnschedulableAndUnresolvable, ""),
|
|
||||||
},
|
|
||||||
expected: map[string]bool{"machine1": true},
|
|
||||||
},
|
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, test := range tests {
|
for _, test := range tests {
|
||||||
t.Run(test.name, func(t *testing.T) {
|
t.Run(test.name, func(t *testing.T) {
|
||||||
fitErr := FitError{
|
fitErr := FitError{
|
||||||
FailedPredicates: test.failedPredMap,
|
|
||||||
FilteredNodesStatuses: test.nodesStatuses,
|
FilteredNodesStatuses: test.nodesStatuses,
|
||||||
}
|
}
|
||||||
nodes := nodesWherePreemptionMightHelp(nodeinfosnapshot.CreateNodeInfoMap(nil, makeNodeList(nodeNames)), &fitErr)
|
nodes := nodesWherePreemptionMightHelp(nodeinfosnapshot.CreateNodeInfoMap(nil, makeNodeList(nodeNames)), &fitErr)
|
||||||
@ -2494,7 +2469,7 @@ func TestFairEvaluationForNodes(t *testing.T) {
|
|||||||
|
|
||||||
// Iterating over all nodes more than twice
|
// Iterating over all nodes more than twice
|
||||||
for i := 0; i < 2*(numAllNodes/nodesToFind+1); i++ {
|
for i := 0; i < 2*(numAllNodes/nodesToFind+1); i++ {
|
||||||
nodesThatFit, _, _, err := g.findNodesThatFit(context.Background(), framework.NewCycleState(), &v1.Pod{})
|
nodesThatFit, _, err := g.findNodesThatFit(context.Background(), framework.NewCycleState(), &v1.Pod{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("unexpected error: %v", err)
|
t.Errorf("unexpected error: %v", err)
|
||||||
}
|
}
|
||||||
|
@ -122,6 +122,11 @@ func (s *Status) Reasons() []string {
|
|||||||
return s.reasons
|
return s.reasons
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// AppendReason appends given reason to the Status.
|
||||||
|
func (s *Status) AppendReason(reason string) {
|
||||||
|
s.reasons = append(s.reasons, reason)
|
||||||
|
}
|
||||||
|
|
||||||
// IsSuccess returns true if and only if "Status" is nil or Code is "Success".
|
// IsSuccess returns true if and only if "Status" is nil or Code is "Success".
|
||||||
func (s *Status) IsSuccess() bool {
|
func (s *Status) IsSuccess() bool {
|
||||||
return s.Code() == Success
|
return s.Code() == Success
|
||||||
|
@ -143,10 +143,6 @@ func podWithResources(id, desiredHost string, limits v1.ResourceList, requests v
|
|||||||
return pod
|
return pod
|
||||||
}
|
}
|
||||||
|
|
||||||
func PredicateOne(pod *v1.Pod, meta predicates.Metadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []predicates.PredicateFailureReason, error) {
|
|
||||||
return true, nil, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func PriorityOne(pod *v1.Pod, meta interface{}, nodeInfo *schedulernodeinfo.NodeInfo) (framework.NodeScore, error) {
|
func PriorityOne(pod *v1.Pod, meta interface{}, nodeInfo *schedulernodeinfo.NodeInfo) (framework.NodeScore, error) {
|
||||||
return framework.NodeScore{}, nil
|
return framework.NodeScore{}, nil
|
||||||
}
|
}
|
||||||
@ -186,7 +182,6 @@ func TestSchedulerCreation(t *testing.T) {
|
|||||||
testSource := "testProvider"
|
testSource := "testProvider"
|
||||||
eventBroadcaster := events.NewBroadcaster(&events.EventSinkImpl{Interface: client.EventsV1beta1().Events("")})
|
eventBroadcaster := events.NewBroadcaster(&events.EventSinkImpl{Interface: client.EventsV1beta1().Events("")})
|
||||||
|
|
||||||
RegisterFitPredicate("PredicateOne", PredicateOne)
|
|
||||||
RegisterPriorityMapReduceFunction("PriorityOne", PriorityOne, nil, 1)
|
RegisterPriorityMapReduceFunction("PriorityOne", PriorityOne, nil, 1)
|
||||||
RegisterAlgorithmProvider(testSource, sets.NewString("PredicateOne"), sets.NewString("PriorityOne"))
|
RegisterAlgorithmProvider(testSource, sets.NewString("PredicateOne"), sets.NewString("PriorityOne"))
|
||||||
|
|
||||||
@ -447,9 +442,8 @@ func TestSchedulerNoPhantomPodAfterDelete(t *testing.T) {
|
|||||||
select {
|
select {
|
||||||
case err := <-errChan:
|
case err := <-errChan:
|
||||||
expectErr := &core.FitError{
|
expectErr := &core.FitError{
|
||||||
Pod: secondPod,
|
Pod: secondPod,
|
||||||
NumAllNodes: 1,
|
NumAllNodes: 1,
|
||||||
FailedPredicates: core.FailedPredicateMap{},
|
|
||||||
FilteredNodesStatuses: framework.NodeToStatusMap{
|
FilteredNodesStatuses: framework.NodeToStatusMap{
|
||||||
node.Name: framework.NewStatus(
|
node.Name: framework.NewStatus(
|
||||||
framework.Unschedulable,
|
framework.Unschedulable,
|
||||||
@ -659,7 +653,6 @@ func TestSchedulerFailedSchedulingReasons(t *testing.T) {
|
|||||||
expectErr := &core.FitError{
|
expectErr := &core.FitError{
|
||||||
Pod: podWithTooBigResourceRequests,
|
Pod: podWithTooBigResourceRequests,
|
||||||
NumAllNodes: len(nodes),
|
NumAllNodes: len(nodes),
|
||||||
FailedPredicates: core.FailedPredicateMap{},
|
|
||||||
FilteredNodesStatuses: failedNodeStatues,
|
FilteredNodesStatuses: failedNodeStatues,
|
||||||
}
|
}
|
||||||
if len(fmt.Sprint(expectErr)) > 150 {
|
if len(fmt.Sprint(expectErr)) > 150 {
|
||||||
|
Loading…
Reference in New Issue
Block a user