mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-23 11:50:44 +00:00
Avoid unnecessary copies
This commit is contained in:
parent
989202c384
commit
dcb2ca54ad
@ -470,7 +470,7 @@ func GetTaintsFromNodeAnnotations(annotations map[string]string) ([]Taint, error
|
||||
}
|
||||
|
||||
// TolerationToleratesTaint checks if the toleration tolerates the taint.
|
||||
func TolerationToleratesTaint(toleration Toleration, taint Taint) bool {
|
||||
func TolerationToleratesTaint(toleration *Toleration, taint *Taint) bool {
|
||||
if len(toleration.Effect) != 0 && toleration.Effect != taint.Effect {
|
||||
return false
|
||||
}
|
||||
@ -490,10 +490,10 @@ func TolerationToleratesTaint(toleration Toleration, taint Taint) bool {
|
||||
}
|
||||
|
||||
// TaintToleratedByTolerations checks if taint is tolerated by any of the tolerations.
|
||||
func TaintToleratedByTolerations(taint Taint, tolerations []Toleration) bool {
|
||||
func TaintToleratedByTolerations(taint *Taint, tolerations []Toleration) bool {
|
||||
tolerated := false
|
||||
for _, toleration := range tolerations {
|
||||
if TolerationToleratesTaint(toleration, taint) {
|
||||
for i := range tolerations {
|
||||
if TolerationToleratesTaint(&tolerations[i], taint) {
|
||||
tolerated = true
|
||||
break
|
||||
}
|
||||
|
@ -771,8 +771,10 @@ func getUsedPorts(pods ...*api.Pod) map[int]bool {
|
||||
// TODO: Aggregate it at the NodeInfo level.
|
||||
ports := make(map[int]bool)
|
||||
for _, pod := range pods {
|
||||
for _, container := range pod.Spec.Containers {
|
||||
for _, podPort := range container.Ports {
|
||||
for j := range pod.Spec.Containers {
|
||||
container := &pod.Spec.Containers[j]
|
||||
for k := range container.Ports {
|
||||
podPort := &container.Ports[k]
|
||||
// "0" is explicitly ignored in PodFitsHostPorts,
|
||||
// which is the only function that uses this value.
|
||||
if podPort.HostPort != 0 {
|
||||
@ -1045,7 +1047,8 @@ func tolerationsToleratesTaints(tolerations []api.Toleration, taints []api.Taint
|
||||
return false
|
||||
}
|
||||
|
||||
for _, taint := range taints {
|
||||
for i := range taints {
|
||||
taint := &taints[i]
|
||||
// skip taints that have effect PreferNoSchedule, since it is for priorities
|
||||
if taint.Effect == api.TaintEffectPreferNoSchedule {
|
||||
continue
|
||||
|
@ -31,14 +31,14 @@ import (
|
||||
// the node satisfies and the more the preferredSchedulingTerm that is satisfied weights, the higher
|
||||
// score the node gets.
|
||||
func CalculateNodeAffinityPriority(pod *api.Pod, nodeNameToInfo map[string]*schedulercache.NodeInfo, nodeLister algorithm.NodeLister) (schedulerapi.HostPriorityList, error) {
|
||||
var maxCount int
|
||||
counts := map[string]int{}
|
||||
|
||||
nodes, err := nodeLister.List()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var maxCount int
|
||||
counts := make(map[string]int, len(nodes.Items))
|
||||
|
||||
affinity, err := api.GetAffinityFromPodAnnotations(pod.Annotations)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -71,7 +71,7 @@ func CalculateNodeAffinityPriority(pod *api.Pod, nodeNameToInfo map[string]*sche
|
||||
}
|
||||
}
|
||||
|
||||
result := []schedulerapi.HostPriority{}
|
||||
result := make(schedulerapi.HostPriorityList, 0, len(nodes.Items))
|
||||
for i := range nodes.Items {
|
||||
node := &nodes.Items[i]
|
||||
fScore := float64(0)
|
||||
|
@ -26,7 +26,8 @@ import (
|
||||
|
||||
// CountIntolerableTaintsPreferNoSchedule gives the count of intolerable taints of a pod with effect PreferNoSchedule
|
||||
func countIntolerableTaintsPreferNoSchedule(taints []api.Taint, tolerations []api.Toleration) (intolerableTaints int) {
|
||||
for _, taint := range taints {
|
||||
for i := range taints {
|
||||
taint := &taints[i]
|
||||
// check only on taints that have effect PreferNoSchedule
|
||||
if taint.Effect != api.TaintEffectPreferNoSchedule {
|
||||
continue
|
||||
@ -41,9 +42,10 @@ func countIntolerableTaintsPreferNoSchedule(taints []api.Taint, tolerations []ap
|
||||
|
||||
// getAllTolerationEffectPreferNoSchedule gets the list of all Toleration with Effect PreferNoSchedule
|
||||
func getAllTolerationPreferNoSchedule(tolerations []api.Toleration) (tolerationList []api.Toleration) {
|
||||
for _, toleration := range tolerations {
|
||||
for i := range tolerations {
|
||||
toleration := &tolerations[i]
|
||||
if len(toleration.Effect) == 0 || toleration.Effect == api.TaintEffectPreferNoSchedule {
|
||||
tolerationList = append(tolerationList, toleration)
|
||||
tolerationList = append(tolerationList, *toleration)
|
||||
}
|
||||
}
|
||||
return
|
||||
@ -51,17 +53,16 @@ func getAllTolerationPreferNoSchedule(tolerations []api.Toleration) (tolerationL
|
||||
|
||||
// ComputeTaintTolerationPriority prepares the priority list for all the nodes based on the number of intolerable taints on the node
|
||||
func ComputeTaintTolerationPriority(pod *api.Pod, nodeNameToInfo map[string]*schedulercache.NodeInfo, nodeLister algorithm.NodeLister) (schedulerapi.HostPriorityList, error) {
|
||||
// counts hold the count of intolerable taints of a pod for a given node
|
||||
counts := make(map[string]int)
|
||||
|
||||
// the max value of counts
|
||||
var maxCount int
|
||||
|
||||
nodes, err := nodeLister.List()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// the max value of counts
|
||||
var maxCount int
|
||||
// counts hold the count of intolerable taints of a pod for a given node
|
||||
counts := make(map[string]int, len(nodes.Items))
|
||||
|
||||
tolerations, err := api.GetTolerationsFromPodAnnotations(pod.Annotations)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -88,7 +89,8 @@ func ComputeTaintTolerationPriority(pod *api.Pod, nodeNameToInfo map[string]*sch
|
||||
// Priority values range from 0 - maxPriority
|
||||
const maxPriority = 10
|
||||
result := make(schedulerapi.HostPriorityList, 0, len(nodes.Items))
|
||||
for _, node := range nodes.Items {
|
||||
for i := range nodes.Items {
|
||||
node := &nodes.Items[i]
|
||||
fScore := float64(maxPriority)
|
||||
if maxCount > 0 {
|
||||
fScore = (1.0 - float64(counts[node.Name])/float64(maxCount)) * 10
|
||||
|
@ -433,7 +433,8 @@ func (f *ConfigFactory) responsibleForPod(pod *api.Pod) bool {
|
||||
|
||||
func getNodeConditionPredicate() cache.NodeConditionPredicate {
|
||||
return func(node *api.Node) bool {
|
||||
for _, cond := range node.Status.Conditions {
|
||||
for i := range node.Status.Conditions {
|
||||
cond := &node.Status.Conditions[i]
|
||||
// We consider the node for scheduling only when its:
|
||||
// - NodeReady condition status is ConditionTrue,
|
||||
// - NodeOutOfDisk condition status is ConditionFalse,
|
||||
|
@ -232,7 +232,7 @@ func PrioritizeNodes(
|
||||
nodeLister algorithm.NodeLister,
|
||||
extenders []algorithm.SchedulerExtender,
|
||||
) (schedulerapi.HostPriorityList, error) {
|
||||
result := schedulerapi.HostPriorityList{}
|
||||
result := make(schedulerapi.HostPriorityList, 0, len(nodeNameToInfo))
|
||||
|
||||
// If no priority configs are provided, then the EqualPriority function is applied
|
||||
// This is required to generate the priority list in the required format
|
||||
@ -243,7 +243,7 @@ func PrioritizeNodes(
|
||||
var (
|
||||
mu = sync.Mutex{}
|
||||
wg = sync.WaitGroup{}
|
||||
combinedScores = map[string]int{}
|
||||
combinedScores = make(map[string]int, len(nodeNameToInfo))
|
||||
errs []error
|
||||
)
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user