mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-08-04 09:49:50 +00:00
use slices library instead
This commit is contained in:
parent
38c2a963b4
commit
06d3cd33b2
@ -731,7 +731,7 @@ func (pl *dynamicResources) isSchedulableAfterPodSchedulingContextChange(logger
|
||||
// before moving DRA to beta.
|
||||
if podScheduling.Spec.SelectedNode != "" {
|
||||
for _, claimStatus := range podScheduling.Status.ResourceClaims {
|
||||
if sliceContains(claimStatus.UnsuitableNodes, podScheduling.Spec.SelectedNode) {
|
||||
if slices.Contains(claimStatus.UnsuitableNodes, podScheduling.Spec.SelectedNode) {
|
||||
logger.V(5).Info("PodSchedulingContext has unsuitable selected node, schedule immediately", "pod", klog.KObj(pod), "selectedNode", podScheduling.Spec.SelectedNode, "podResourceName", claimStatus.Name)
|
||||
return framework.Queue, nil
|
||||
}
|
||||
@ -769,15 +769,6 @@ func podSchedulingHasClaimInfo(podScheduling *resourcev1alpha2.PodSchedulingCont
|
||||
return false
|
||||
}
|
||||
|
||||
func sliceContains(hay []string, needle string) bool {
|
||||
for _, item := range hay {
|
||||
if item == needle {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// podResourceClaims returns the ResourceClaims for all pod.Spec.PodResourceClaims.
|
||||
func (pl *dynamicResources) podResourceClaims(pod *v1.Pod) ([]*resourcev1alpha2.ResourceClaim, error) {
|
||||
claims := make([]*resourcev1alpha2.ResourceClaim, 0, len(pod.Spec.ResourceClaims))
|
||||
@ -1330,22 +1321,13 @@ func haveAllPotentialNodes(schedulingCtx *resourcev1alpha2.PodSchedulingContext,
|
||||
return false
|
||||
}
|
||||
for _, node := range nodes {
|
||||
if !haveNode(schedulingCtx.Spec.PotentialNodes, node.Node().Name) {
|
||||
if !slices.Contains(schedulingCtx.Spec.PotentialNodes, node.Node().Name) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func haveNode(nodeNames []string, nodeName string) bool {
|
||||
for _, n := range nodeNames {
|
||||
if n == nodeName {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Reserve reserves claims for the pod.
|
||||
func (pl *dynamicResources) Reserve(ctx context.Context, cs *framework.CycleState, pod *v1.Pod, nodeName string) (status *framework.Status) {
|
||||
if !pl.enabled {
|
||||
@ -1402,7 +1384,7 @@ func (pl *dynamicResources) Reserve(ctx context.Context, cs *framework.CycleStat
|
||||
// scheduler will pick it forever even when it cannot satisfy
|
||||
// the claim.
|
||||
if state.podSchedulingState.schedulingCtx == nil ||
|
||||
!containsNode(state.podSchedulingState.schedulingCtx.Spec.PotentialNodes, nodeName) {
|
||||
!slices.Contains(state.podSchedulingState.schedulingCtx.Spec.PotentialNodes, nodeName) {
|
||||
potentialNodes := []string{nodeName}
|
||||
state.podSchedulingState.potentialNodes = &potentialNodes
|
||||
logger.V(5).Info("asking for information about single potential node", "pod", klog.KObj(pod), "node", klog.ObjectRef{Name: nodeName})
|
||||
@ -1478,15 +1460,6 @@ func (pl *dynamicResources) Reserve(ctx context.Context, cs *framework.CycleStat
|
||||
return statusPending(logger, "waiting for resource driver to provide information", "pod", klog.KObj(pod))
|
||||
}
|
||||
|
||||
func containsNode(hay []string, needle string) bool {
|
||||
for _, node := range hay {
|
||||
if node == needle {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Unreserve clears the ReservedFor field for all claims.
|
||||
// It's idempotent, and does nothing if no state found for the given pod.
|
||||
func (pl *dynamicResources) Unreserve(ctx context.Context, cs *framework.CycleState, pod *v1.Pod, nodeName string) {
|
||||
|
Loading…
Reference in New Issue
Block a user