From 06d3cd33b23f1c9e56c202bed272f5d3cbed3bc3 Mon Sep 17 00:00:00 2001 From: carlory Date: Sun, 28 Apr 2024 18:59:23 +0800 Subject: [PATCH] use slices library instead --- .../dynamicresources/dynamicresources.go | 33 ++----------------- 1 file changed, 3 insertions(+), 30 deletions(-) diff --git a/pkg/scheduler/framework/plugins/dynamicresources/dynamicresources.go b/pkg/scheduler/framework/plugins/dynamicresources/dynamicresources.go index f54cf0d2bea..a3d17cb8f12 100644 --- a/pkg/scheduler/framework/plugins/dynamicresources/dynamicresources.go +++ b/pkg/scheduler/framework/plugins/dynamicresources/dynamicresources.go @@ -731,7 +731,7 @@ func (pl *dynamicResources) isSchedulableAfterPodSchedulingContextChange(logger // before moving DRA to beta. if podScheduling.Spec.SelectedNode != "" { for _, claimStatus := range podScheduling.Status.ResourceClaims { - if sliceContains(claimStatus.UnsuitableNodes, podScheduling.Spec.SelectedNode) { + if slices.Contains(claimStatus.UnsuitableNodes, podScheduling.Spec.SelectedNode) { logger.V(5).Info("PodSchedulingContext has unsuitable selected node, schedule immediately", "pod", klog.KObj(pod), "selectedNode", podScheduling.Spec.SelectedNode, "podResourceName", claimStatus.Name) return framework.Queue, nil } @@ -769,15 +769,6 @@ func podSchedulingHasClaimInfo(podScheduling *resourcev1alpha2.PodSchedulingCont return false } -func sliceContains(hay []string, needle string) bool { - for _, item := range hay { - if item == needle { - return true - } - } - return false -} - // podResourceClaims returns the ResourceClaims for all pod.Spec.PodResourceClaims. func (pl *dynamicResources) podResourceClaims(pod *v1.Pod) ([]*resourcev1alpha2.ResourceClaim, error) { claims := make([]*resourcev1alpha2.ResourceClaim, 0, len(pod.Spec.ResourceClaims)) @@ -1330,22 +1321,13 @@ func haveAllPotentialNodes(schedulingCtx *resourcev1alpha2.PodSchedulingContext, return false } for _, node := range nodes { - if !haveNode(schedulingCtx.Spec.PotentialNodes, node.Node().Name) { + if !slices.Contains(schedulingCtx.Spec.PotentialNodes, node.Node().Name) { return false } } return true } -func haveNode(nodeNames []string, nodeName string) bool { - for _, n := range nodeNames { - if n == nodeName { - return true - } - } - return false -} - // Reserve reserves claims for the pod. func (pl *dynamicResources) Reserve(ctx context.Context, cs *framework.CycleState, pod *v1.Pod, nodeName string) (status *framework.Status) { if !pl.enabled { @@ -1402,7 +1384,7 @@ func (pl *dynamicResources) Reserve(ctx context.Context, cs *framework.CycleStat // scheduler will pick it forever even when it cannot satisfy // the claim. if state.podSchedulingState.schedulingCtx == nil || - !containsNode(state.podSchedulingState.schedulingCtx.Spec.PotentialNodes, nodeName) { + !slices.Contains(state.podSchedulingState.schedulingCtx.Spec.PotentialNodes, nodeName) { potentialNodes := []string{nodeName} state.podSchedulingState.potentialNodes = &potentialNodes logger.V(5).Info("asking for information about single potential node", "pod", klog.KObj(pod), "node", klog.ObjectRef{Name: nodeName}) @@ -1478,15 +1460,6 @@ func (pl *dynamicResources) Reserve(ctx context.Context, cs *framework.CycleStat return statusPending(logger, "waiting for resource driver to provide information", "pod", klog.KObj(pod)) } -func containsNode(hay []string, needle string) bool { - for _, node := range hay { - if node == needle { - return true - } - } - return false -} - // Unreserve clears the ReservedFor field for all claims. // It's idempotent, and does nothing if no state found for the given pod. func (pl *dynamicResources) Unreserve(ctx context.Context, cs *framework.CycleState, pod *v1.Pod, nodeName string) {