From eb8f3f194fed16484162aebdaab69168e02f8cb4 Mon Sep 17 00:00:00 2001 From: weilaaa Date: Fri, 15 Dec 2023 15:09:11 +0800 Subject: [PATCH] use build-in max and min func to instead of k8s.io/utils/integer funcs --- pkg/controller/controller_utils.go | 3 +-- pkg/controller/daemon/daemon_controller.go | 5 ++--- pkg/controller/deployment/rolling.go | 5 ++--- .../deployment/util/deployment_util.go | 6 ++--- pkg/controller/replicaset/replica_set.go | 3 +-- .../statefulset/stateful_set_control.go | 3 +-- pkg/controlplane/apiserver/options/options.go | 3 +-- pkg/kubelet/cm/cpumanager/cpu_assignment.go | 7 ------ pkg/kubelet/kubelet.go | 3 +-- pkg/scheduler/framework/types.go | 7 ------ .../apiserver/pkg/storage/cacher/util.go | 14 ------------ .../fairqueuing/queueset/queueset.go | 22 ++++--------------- .../client-go/util/flowcontrol/backoff.go | 3 +-- .../src/k8s.io/kubectl/pkg/cmd/get/sorter.go | 3 +-- .../kubectl/pkg/util/podutils/podutils.go | 3 +-- 15 files changed, 19 insertions(+), 71 deletions(-) diff --git a/pkg/controller/controller_utils.go b/pkg/controller/controller_utils.go index 6a44ec3c036..64eb3b317bb 100644 --- a/pkg/controller/controller_utils.go +++ b/pkg/controller/controller_utils.go @@ -52,7 +52,6 @@ import ( hashutil "k8s.io/kubernetes/pkg/util/hash" taintutils "k8s.io/kubernetes/pkg/util/taints" "k8s.io/utils/clock" - "k8s.io/utils/integer" "k8s.io/klog/v2" ) @@ -940,7 +939,7 @@ func podReadyTime(pod *v1.Pod) *metav1.Time { func maxContainerRestarts(pod *v1.Pod) int { maxRestarts := 0 for _, c := range pod.Status.ContainerStatuses { - maxRestarts = integer.IntMax(maxRestarts, int(c.RestartCount)) + maxRestarts = max(maxRestarts, int(c.RestartCount)) } return maxRestarts } diff --git a/pkg/controller/daemon/daemon_controller.go b/pkg/controller/daemon/daemon_controller.go index 40fec239a19..94393a7b03b 100644 --- a/pkg/controller/daemon/daemon_controller.go +++ b/pkg/controller/daemon/daemon_controller.go @@ -51,7 +51,6 @@ import ( podutil "k8s.io/kubernetes/pkg/api/v1/pod" "k8s.io/kubernetes/pkg/controller" "k8s.io/kubernetes/pkg/controller/daemon/util" - "k8s.io/utils/integer" ) const ( @@ -999,8 +998,8 @@ func (dsc *DaemonSetsController) syncNodes(ctx context.Context, ds *apps.DaemonS // prevented from spamming the API service with the pod create requests // after one of its pods fails. Conveniently, this also prevents the // event spam that those failures would generate. - batchSize := integer.IntMin(createDiff, controller.SlowStartInitialBatchSize) - for pos := 0; createDiff > pos; batchSize, pos = integer.IntMin(2*batchSize, createDiff-(pos+batchSize)), pos+batchSize { + batchSize := min(createDiff, controller.SlowStartInitialBatchSize) + for pos := 0; createDiff > pos; batchSize, pos = min(2*batchSize, createDiff-(pos+batchSize)), pos+batchSize { errorCount := len(errCh) createWait.Add(batchSize) for i := pos; i < pos+batchSize; i++ { diff --git a/pkg/controller/deployment/rolling.go b/pkg/controller/deployment/rolling.go index 1d3446b5f7f..8da48469a23 100644 --- a/pkg/controller/deployment/rolling.go +++ b/pkg/controller/deployment/rolling.go @@ -25,7 +25,6 @@ import ( "k8s.io/klog/v2" "k8s.io/kubernetes/pkg/controller" deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util" - "k8s.io/utils/integer" ) // rolloutRolling implements the logic for rolling a new replica set. @@ -174,7 +173,7 @@ func (dc *DeploymentController) cleanupUnhealthyReplicas(ctx context.Context, ol continue } - scaledDownCount := int32(integer.IntMin(int(maxCleanupCount-totalScaledDown), int(*(targetRS.Spec.Replicas)-targetRS.Status.AvailableReplicas))) + scaledDownCount := min(maxCleanupCount-totalScaledDown, *(targetRS.Spec.Replicas)-targetRS.Status.AvailableReplicas) newReplicasCount := *(targetRS.Spec.Replicas) - scaledDownCount if newReplicasCount > *(targetRS.Spec.Replicas) { return nil, 0, fmt.Errorf("when cleaning up unhealthy replicas, got invalid request to scale down %s/%s %d -> %d", targetRS.Namespace, targetRS.Name, *(targetRS.Spec.Replicas), newReplicasCount) @@ -219,7 +218,7 @@ func (dc *DeploymentController) scaleDownOldReplicaSetsForRollingUpdate(ctx cont continue } // Scale down. - scaleDownCount := int32(integer.IntMin(int(*(targetRS.Spec.Replicas)), int(totalScaleDownCount-totalScaledDown))) + scaleDownCount := min(*(targetRS.Spec.Replicas), totalScaleDownCount-totalScaledDown) newReplicasCount := *(targetRS.Spec.Replicas) - scaleDownCount if newReplicasCount > *(targetRS.Spec.Replicas) { return 0, fmt.Errorf("when scaling down old RS, got invalid request to scale down %s/%s %d -> %d", targetRS.Namespace, targetRS.Name, *(targetRS.Spec.Replicas), newReplicasCount) diff --git a/pkg/controller/deployment/util/deployment_util.go b/pkg/controller/deployment/util/deployment_util.go index d071dbfed09..772c25de25b 100644 --- a/pkg/controller/deployment/util/deployment_util.go +++ b/pkg/controller/deployment/util/deployment_util.go @@ -479,12 +479,12 @@ func GetProportion(logger klog.Logger, rs *apps.ReplicaSet, d apps.Deployment, d // Use the minimum between the replica set fraction and the maximum allowed replicas // when scaling up. This way we ensure we will not scale up more than the allowed // replicas we can add. - return integer.Int32Min(rsFraction, allowed) + return min(rsFraction, allowed) } // Use the maximum between the replica set fraction and the maximum allowed replicas // when scaling down. This way we ensure we will not scale down more than the allowed // replicas we can remove. - return integer.Int32Max(rsFraction, allowed) + return max(rsFraction, allowed) } // getReplicaSetFraction estimates the fraction of replicas a replica set can have in @@ -799,7 +799,7 @@ func NewRSNewReplicas(deployment *apps.Deployment, allRSs []*apps.ReplicaSet, ne // Scale up. scaleUpCount := maxTotalPods - currentPodCount // Do not exceed the number of desired replicas. - scaleUpCount = int32(integer.IntMin(int(scaleUpCount), int(*(deployment.Spec.Replicas)-*(newRS.Spec.Replicas)))) + scaleUpCount = min(scaleUpCount, *(deployment.Spec.Replicas)-*(newRS.Spec.Replicas)) return *(newRS.Spec.Replicas) + scaleUpCount, nil case apps.RecreateDeploymentStrategyType: return *(deployment.Spec.Replicas), nil diff --git a/pkg/controller/replicaset/replica_set.go b/pkg/controller/replicaset/replica_set.go index 1307d1b2903..0f688a8fce4 100644 --- a/pkg/controller/replicaset/replica_set.go +++ b/pkg/controller/replicaset/replica_set.go @@ -60,7 +60,6 @@ import ( podutil "k8s.io/kubernetes/pkg/api/v1/pod" "k8s.io/kubernetes/pkg/controller" "k8s.io/kubernetes/pkg/controller/replicaset/metrics" - "k8s.io/utils/integer" ) const ( @@ -768,7 +767,7 @@ func (rsc *ReplicaSetController) claimPods(ctx context.Context, rs *apps.Replica func slowStartBatch(count int, initialBatchSize int, fn func() error) (int, error) { remaining := count successes := 0 - for batchSize := integer.IntMin(remaining, initialBatchSize); batchSize > 0; batchSize = integer.IntMin(2*batchSize, remaining) { + for batchSize := min(remaining, initialBatchSize); batchSize > 0; batchSize = min(2*batchSize, remaining) { errCh := make(chan error, batchSize) var wg sync.WaitGroup wg.Add(batchSize) diff --git a/pkg/controller/statefulset/stateful_set_control.go b/pkg/controller/statefulset/stateful_set_control.go index e007d89b802..ce8c693fc8b 100644 --- a/pkg/controller/statefulset/stateful_set_control.go +++ b/pkg/controller/statefulset/stateful_set_control.go @@ -31,7 +31,6 @@ import ( "k8s.io/klog/v2" "k8s.io/kubernetes/pkg/controller/history" "k8s.io/kubernetes/pkg/features" - "k8s.io/utils/integer" ) // Realistic value for maximum in-flight requests when processing in parallel mode. @@ -281,7 +280,7 @@ func (ssc *defaultStatefulSetControl) getStatefulSetRevisions( func slowStartBatch(initialBatchSize int, remaining int, fn func(int) (bool, error)) (int, error) { successes := 0 j := 0 - for batchSize := integer.IntMin(remaining, initialBatchSize); batchSize > 0; batchSize = integer.IntMin(integer.IntMin(2*batchSize, remaining), MaxBatchSize) { + for batchSize := min(remaining, initialBatchSize); batchSize > 0; batchSize = min(min(2*batchSize, remaining), MaxBatchSize) { errCh := make(chan error, batchSize) var wg sync.WaitGroup wg.Add(batchSize) diff --git a/pkg/controlplane/apiserver/options/options.go b/pkg/controlplane/apiserver/options/options.go index 5a1c3ec9734..f4ef5be2831 100644 --- a/pkg/controlplane/apiserver/options/options.go +++ b/pkg/controlplane/apiserver/options/options.go @@ -33,7 +33,6 @@ import ( logsapi "k8s.io/component-base/logs/api/v1" "k8s.io/component-base/metrics" "k8s.io/klog/v2" - "k8s.io/utils/integer" netutil "k8s.io/utils/net" _ "k8s.io/kubernetes/pkg/features" @@ -299,7 +298,7 @@ func ServiceIPRange(passedServiceClusterIPRange net.IPNet) (net.IPNet, net.IP, e serviceClusterIPRange = kubeoptions.DefaultServiceIPCIDR } - size := integer.Int64Min(netutil.RangeSize(&serviceClusterIPRange), 1<<16) + size := min(netutil.RangeSize(&serviceClusterIPRange), 1<<16) if size < 8 { return net.IPNet{}, net.IP{}, fmt.Errorf("the service cluster IP range must be at least %d IP addresses", 8) } diff --git a/pkg/kubelet/cm/cpumanager/cpu_assignment.go b/pkg/kubelet/cm/cpumanager/cpu_assignment.go index f0efd74e80e..46d96a5d26b 100644 --- a/pkg/kubelet/cm/cpumanager/cpu_assignment.go +++ b/pkg/kubelet/cm/cpumanager/cpu_assignment.go @@ -92,13 +92,6 @@ func standardDeviation(xs []int) float64 { return math.Round(s*1000) / 1000 } -func min(x, y int) int { - if x < y { - return x - } - return y -} - type numaOrSocketsFirstFuncs interface { takeFullFirstLevel() takeFullSecondLevel() diff --git a/pkg/kubelet/kubelet.go b/pkg/kubelet/kubelet.go index 62083b62db4..4c44ccbbc79 100644 --- a/pkg/kubelet/kubelet.go +++ b/pkg/kubelet/kubelet.go @@ -41,7 +41,6 @@ import ( "k8s.io/client-go/informers" "k8s.io/mount-utils" - "k8s.io/utils/integer" netutils "k8s.io/utils/net" v1 "k8s.io/api/core/v1" @@ -757,7 +756,7 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration, return nil, err } klet.containerGC = containerGC - klet.containerDeletor = newPodContainerDeletor(klet.containerRuntime, integer.IntMax(containerGCPolicy.MaxPerPodContainer, minDeadContainerInPod)) + klet.containerDeletor = newPodContainerDeletor(klet.containerRuntime, max(containerGCPolicy.MaxPerPodContainer, minDeadContainerInPod)) // setup imageManager imageManager, err := images.NewImageGCManager(klet.containerRuntime, klet.StatsProvider, kubeDeps.Recorder, nodeRef, imageGCPolicy, kubeDeps.TracerProvider) diff --git a/pkg/scheduler/framework/types.go b/pkg/scheduler/framework/types.go index 696ad9b41ac..85cd960ac8a 100644 --- a/pkg/scheduler/framework/types.go +++ b/pkg/scheduler/framework/types.go @@ -815,13 +815,6 @@ func (n *NodeInfo) update(pod *v1.Pod, sign int64) { n.Generation = nextGeneration() } -func max(a, b int64) int64 { - if a >= b { - return a - } - return b -} - func calculateResource(pod *v1.Pod) (Resource, int64, int64) { var non0InitCPU, non0InitMem int64 var non0CPU, non0Mem int64 diff --git a/staging/src/k8s.io/apiserver/pkg/storage/cacher/util.go b/staging/src/k8s.io/apiserver/pkg/storage/cacher/util.go index 7943a93dcab..63a23800f02 100644 --- a/staging/src/k8s.io/apiserver/pkg/storage/cacher/util.go +++ b/staging/src/k8s.io/apiserver/pkg/storage/cacher/util.go @@ -44,17 +44,3 @@ func hasPathPrefix(s, pathPrefix string) bool { } return false } - -func max(a, b int) int { - if a > b { - return a - } - return b -} - -func min(a, b int) int { - if a < b { - return a - } - return b -} diff --git a/staging/src/k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/queueset/queueset.go b/staging/src/k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/queueset/queueset.go index b675bb5453c..2cb6db75436 100644 --- a/staging/src/k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/queueset/queueset.go +++ b/staging/src/k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/queueset/queueset.go @@ -792,11 +792,11 @@ func (qs *queueSet) findDispatchQueueToBoundLocked() (*queue, *request) { queue := qs.queues[qs.robinIndex] oldestWaiting, _ := queue.requestsWaiting.Peek() if oldestWaiting != nil { - sMin = ssMin(sMin, queue.nextDispatchR) - sMax = ssMax(sMax, queue.nextDispatchR) + sMin = min(sMin, queue.nextDispatchR) + sMax = max(sMax, queue.nextDispatchR) estimatedWorkInProgress := fqrequest.SeatsTimesDuration(float64(queue.seatsInUse), qs.estimatedServiceDuration) - dsMin = ssMin(dsMin, queue.nextDispatchR-estimatedWorkInProgress) - dsMax = ssMax(dsMax, queue.nextDispatchR-estimatedWorkInProgress) + dsMin = min(dsMin, queue.nextDispatchR-estimatedWorkInProgress) + dsMax = max(dsMax, queue.nextDispatchR-estimatedWorkInProgress) currentVirtualFinish := queue.nextDispatchR + oldestWaiting.totalWork() klog.V(11).InfoS("Considering queue to dispatch", "queueSet", qs.qCfg.Name, "queue", qs.robinIndex, "finishR", currentVirtualFinish) if currentVirtualFinish < minVirtualFinish { @@ -848,20 +848,6 @@ func (qs *queueSet) findDispatchQueueToBoundLocked() (*queue, *request) { return minQueue, oldestReqFromMinQueue } -func ssMin(a, b fqrequest.SeatSeconds) fqrequest.SeatSeconds { - if a > b { - return b - } - return a -} - -func ssMax(a, b fqrequest.SeatSeconds) fqrequest.SeatSeconds { - if a < b { - return b - } - return a -} - // finishRequestAndDispatchAsMuchAsPossible is a convenience method // which calls finishRequest for a given request and then dispatches // as many requests as possible. This is all of what needs to be done diff --git a/staging/src/k8s.io/client-go/util/flowcontrol/backoff.go b/staging/src/k8s.io/client-go/util/flowcontrol/backoff.go index 3ef88dbdb89..82e4c4c4089 100644 --- a/staging/src/k8s.io/client-go/util/flowcontrol/backoff.go +++ b/staging/src/k8s.io/client-go/util/flowcontrol/backoff.go @@ -23,7 +23,6 @@ import ( "k8s.io/utils/clock" testingclock "k8s.io/utils/clock/testing" - "k8s.io/utils/integer" ) type backoffEntry struct { @@ -100,7 +99,7 @@ func (p *Backoff) Next(id string, eventTime time.Time) { } else { delay := entry.backoff * 2 // exponential delay += p.jitter(entry.backoff) // add some jitter to the delay - entry.backoff = time.Duration(integer.Int64Min(int64(delay), int64(p.maxDuration))) + entry.backoff = min(delay, p.maxDuration) } entry.lastUpdate = p.Clock.Now() } diff --git a/staging/src/k8s.io/kubectl/pkg/cmd/get/sorter.go b/staging/src/k8s.io/kubectl/pkg/cmd/get/sorter.go index 9f9e2b80424..7fe2437e42d 100644 --- a/staging/src/k8s.io/kubectl/pkg/cmd/get/sorter.go +++ b/staging/src/k8s.io/kubectl/pkg/cmd/get/sorter.go @@ -32,7 +32,6 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/cli-runtime/pkg/printers" "k8s.io/client-go/util/jsonpath" - "k8s.io/utils/integer" "github.com/fvbommel/sortorder" ) @@ -206,7 +205,7 @@ func isLess(i, j reflect.Value) (bool, error) { return true, nil case reflect.Array, reflect.Slice: // note: the length of i and j may be different - for idx := 0; idx < integer.IntMin(i.Len(), j.Len()); idx++ { + for idx := 0; idx < min(i.Len(), j.Len()); idx++ { less, err := isLess(i.Index(idx), j.Index(idx)) if err != nil || !less { return less, err diff --git a/staging/src/k8s.io/kubectl/pkg/util/podutils/podutils.go b/staging/src/k8s.io/kubectl/pkg/util/podutils/podutils.go index e9cfdeba335..642a6d47a7a 100644 --- a/staging/src/k8s.io/kubectl/pkg/util/podutils/podutils.go +++ b/staging/src/k8s.io/kubectl/pkg/util/podutils/podutils.go @@ -21,7 +21,6 @@ import ( corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/utils/integer" ) // IsPodAvailable returns true if a pod is available; false otherwise. @@ -194,7 +193,7 @@ func podReadyTime(pod *corev1.Pod) *metav1.Time { func maxContainerRestarts(pod *corev1.Pod) int { maxRestarts := 0 for _, c := range pod.Status.ContainerStatuses { - maxRestarts = integer.IntMax(maxRestarts, int(c.RestartCount)) + maxRestarts = max(maxRestarts, int(c.RestartCount)) } return maxRestarts }