mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-08-09 03:57:41 +00:00
use build-in max and min func to instead of k8s.io/utils/integer funcs
This commit is contained in:
parent
17823e00d1
commit
eb8f3f194f
@ -52,7 +52,6 @@ import (
|
|||||||
hashutil "k8s.io/kubernetes/pkg/util/hash"
|
hashutil "k8s.io/kubernetes/pkg/util/hash"
|
||||||
taintutils "k8s.io/kubernetes/pkg/util/taints"
|
taintutils "k8s.io/kubernetes/pkg/util/taints"
|
||||||
"k8s.io/utils/clock"
|
"k8s.io/utils/clock"
|
||||||
"k8s.io/utils/integer"
|
|
||||||
|
|
||||||
"k8s.io/klog/v2"
|
"k8s.io/klog/v2"
|
||||||
)
|
)
|
||||||
@ -940,7 +939,7 @@ func podReadyTime(pod *v1.Pod) *metav1.Time {
|
|||||||
func maxContainerRestarts(pod *v1.Pod) int {
|
func maxContainerRestarts(pod *v1.Pod) int {
|
||||||
maxRestarts := 0
|
maxRestarts := 0
|
||||||
for _, c := range pod.Status.ContainerStatuses {
|
for _, c := range pod.Status.ContainerStatuses {
|
||||||
maxRestarts = integer.IntMax(maxRestarts, int(c.RestartCount))
|
maxRestarts = max(maxRestarts, int(c.RestartCount))
|
||||||
}
|
}
|
||||||
return maxRestarts
|
return maxRestarts
|
||||||
}
|
}
|
||||||
|
@ -51,7 +51,6 @@ import (
|
|||||||
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
|
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
|
||||||
"k8s.io/kubernetes/pkg/controller"
|
"k8s.io/kubernetes/pkg/controller"
|
||||||
"k8s.io/kubernetes/pkg/controller/daemon/util"
|
"k8s.io/kubernetes/pkg/controller/daemon/util"
|
||||||
"k8s.io/utils/integer"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@ -999,8 +998,8 @@ func (dsc *DaemonSetsController) syncNodes(ctx context.Context, ds *apps.DaemonS
|
|||||||
// prevented from spamming the API service with the pod create requests
|
// prevented from spamming the API service with the pod create requests
|
||||||
// after one of its pods fails. Conveniently, this also prevents the
|
// after one of its pods fails. Conveniently, this also prevents the
|
||||||
// event spam that those failures would generate.
|
// event spam that those failures would generate.
|
||||||
batchSize := integer.IntMin(createDiff, controller.SlowStartInitialBatchSize)
|
batchSize := min(createDiff, controller.SlowStartInitialBatchSize)
|
||||||
for pos := 0; createDiff > pos; batchSize, pos = integer.IntMin(2*batchSize, createDiff-(pos+batchSize)), pos+batchSize {
|
for pos := 0; createDiff > pos; batchSize, pos = min(2*batchSize, createDiff-(pos+batchSize)), pos+batchSize {
|
||||||
errorCount := len(errCh)
|
errorCount := len(errCh)
|
||||||
createWait.Add(batchSize)
|
createWait.Add(batchSize)
|
||||||
for i := pos; i < pos+batchSize; i++ {
|
for i := pos; i < pos+batchSize; i++ {
|
||||||
|
@ -25,7 +25,6 @@ import (
|
|||||||
"k8s.io/klog/v2"
|
"k8s.io/klog/v2"
|
||||||
"k8s.io/kubernetes/pkg/controller"
|
"k8s.io/kubernetes/pkg/controller"
|
||||||
deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util"
|
deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util"
|
||||||
"k8s.io/utils/integer"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// rolloutRolling implements the logic for rolling a new replica set.
|
// rolloutRolling implements the logic for rolling a new replica set.
|
||||||
@ -174,7 +173,7 @@ func (dc *DeploymentController) cleanupUnhealthyReplicas(ctx context.Context, ol
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
scaledDownCount := int32(integer.IntMin(int(maxCleanupCount-totalScaledDown), int(*(targetRS.Spec.Replicas)-targetRS.Status.AvailableReplicas)))
|
scaledDownCount := min(maxCleanupCount-totalScaledDown, *(targetRS.Spec.Replicas)-targetRS.Status.AvailableReplicas)
|
||||||
newReplicasCount := *(targetRS.Spec.Replicas) - scaledDownCount
|
newReplicasCount := *(targetRS.Spec.Replicas) - scaledDownCount
|
||||||
if newReplicasCount > *(targetRS.Spec.Replicas) {
|
if newReplicasCount > *(targetRS.Spec.Replicas) {
|
||||||
return nil, 0, fmt.Errorf("when cleaning up unhealthy replicas, got invalid request to scale down %s/%s %d -> %d", targetRS.Namespace, targetRS.Name, *(targetRS.Spec.Replicas), newReplicasCount)
|
return nil, 0, fmt.Errorf("when cleaning up unhealthy replicas, got invalid request to scale down %s/%s %d -> %d", targetRS.Namespace, targetRS.Name, *(targetRS.Spec.Replicas), newReplicasCount)
|
||||||
@ -219,7 +218,7 @@ func (dc *DeploymentController) scaleDownOldReplicaSetsForRollingUpdate(ctx cont
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
// Scale down.
|
// Scale down.
|
||||||
scaleDownCount := int32(integer.IntMin(int(*(targetRS.Spec.Replicas)), int(totalScaleDownCount-totalScaledDown)))
|
scaleDownCount := min(*(targetRS.Spec.Replicas), totalScaleDownCount-totalScaledDown)
|
||||||
newReplicasCount := *(targetRS.Spec.Replicas) - scaleDownCount
|
newReplicasCount := *(targetRS.Spec.Replicas) - scaleDownCount
|
||||||
if newReplicasCount > *(targetRS.Spec.Replicas) {
|
if newReplicasCount > *(targetRS.Spec.Replicas) {
|
||||||
return 0, fmt.Errorf("when scaling down old RS, got invalid request to scale down %s/%s %d -> %d", targetRS.Namespace, targetRS.Name, *(targetRS.Spec.Replicas), newReplicasCount)
|
return 0, fmt.Errorf("when scaling down old RS, got invalid request to scale down %s/%s %d -> %d", targetRS.Namespace, targetRS.Name, *(targetRS.Spec.Replicas), newReplicasCount)
|
||||||
|
@ -479,12 +479,12 @@ func GetProportion(logger klog.Logger, rs *apps.ReplicaSet, d apps.Deployment, d
|
|||||||
// Use the minimum between the replica set fraction and the maximum allowed replicas
|
// Use the minimum between the replica set fraction and the maximum allowed replicas
|
||||||
// when scaling up. This way we ensure we will not scale up more than the allowed
|
// when scaling up. This way we ensure we will not scale up more than the allowed
|
||||||
// replicas we can add.
|
// replicas we can add.
|
||||||
return integer.Int32Min(rsFraction, allowed)
|
return min(rsFraction, allowed)
|
||||||
}
|
}
|
||||||
// Use the maximum between the replica set fraction and the maximum allowed replicas
|
// Use the maximum between the replica set fraction and the maximum allowed replicas
|
||||||
// when scaling down. This way we ensure we will not scale down more than the allowed
|
// when scaling down. This way we ensure we will not scale down more than the allowed
|
||||||
// replicas we can remove.
|
// replicas we can remove.
|
||||||
return integer.Int32Max(rsFraction, allowed)
|
return max(rsFraction, allowed)
|
||||||
}
|
}
|
||||||
|
|
||||||
// getReplicaSetFraction estimates the fraction of replicas a replica set can have in
|
// getReplicaSetFraction estimates the fraction of replicas a replica set can have in
|
||||||
@ -799,7 +799,7 @@ func NewRSNewReplicas(deployment *apps.Deployment, allRSs []*apps.ReplicaSet, ne
|
|||||||
// Scale up.
|
// Scale up.
|
||||||
scaleUpCount := maxTotalPods - currentPodCount
|
scaleUpCount := maxTotalPods - currentPodCount
|
||||||
// Do not exceed the number of desired replicas.
|
// Do not exceed the number of desired replicas.
|
||||||
scaleUpCount = int32(integer.IntMin(int(scaleUpCount), int(*(deployment.Spec.Replicas)-*(newRS.Spec.Replicas))))
|
scaleUpCount = min(scaleUpCount, *(deployment.Spec.Replicas)-*(newRS.Spec.Replicas))
|
||||||
return *(newRS.Spec.Replicas) + scaleUpCount, nil
|
return *(newRS.Spec.Replicas) + scaleUpCount, nil
|
||||||
case apps.RecreateDeploymentStrategyType:
|
case apps.RecreateDeploymentStrategyType:
|
||||||
return *(deployment.Spec.Replicas), nil
|
return *(deployment.Spec.Replicas), nil
|
||||||
|
@ -60,7 +60,6 @@ import (
|
|||||||
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
|
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
|
||||||
"k8s.io/kubernetes/pkg/controller"
|
"k8s.io/kubernetes/pkg/controller"
|
||||||
"k8s.io/kubernetes/pkg/controller/replicaset/metrics"
|
"k8s.io/kubernetes/pkg/controller/replicaset/metrics"
|
||||||
"k8s.io/utils/integer"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@ -768,7 +767,7 @@ func (rsc *ReplicaSetController) claimPods(ctx context.Context, rs *apps.Replica
|
|||||||
func slowStartBatch(count int, initialBatchSize int, fn func() error) (int, error) {
|
func slowStartBatch(count int, initialBatchSize int, fn func() error) (int, error) {
|
||||||
remaining := count
|
remaining := count
|
||||||
successes := 0
|
successes := 0
|
||||||
for batchSize := integer.IntMin(remaining, initialBatchSize); batchSize > 0; batchSize = integer.IntMin(2*batchSize, remaining) {
|
for batchSize := min(remaining, initialBatchSize); batchSize > 0; batchSize = min(2*batchSize, remaining) {
|
||||||
errCh := make(chan error, batchSize)
|
errCh := make(chan error, batchSize)
|
||||||
var wg sync.WaitGroup
|
var wg sync.WaitGroup
|
||||||
wg.Add(batchSize)
|
wg.Add(batchSize)
|
||||||
|
@ -31,7 +31,6 @@ import (
|
|||||||
"k8s.io/klog/v2"
|
"k8s.io/klog/v2"
|
||||||
"k8s.io/kubernetes/pkg/controller/history"
|
"k8s.io/kubernetes/pkg/controller/history"
|
||||||
"k8s.io/kubernetes/pkg/features"
|
"k8s.io/kubernetes/pkg/features"
|
||||||
"k8s.io/utils/integer"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// Realistic value for maximum in-flight requests when processing in parallel mode.
|
// Realistic value for maximum in-flight requests when processing in parallel mode.
|
||||||
@ -281,7 +280,7 @@ func (ssc *defaultStatefulSetControl) getStatefulSetRevisions(
|
|||||||
func slowStartBatch(initialBatchSize int, remaining int, fn func(int) (bool, error)) (int, error) {
|
func slowStartBatch(initialBatchSize int, remaining int, fn func(int) (bool, error)) (int, error) {
|
||||||
successes := 0
|
successes := 0
|
||||||
j := 0
|
j := 0
|
||||||
for batchSize := integer.IntMin(remaining, initialBatchSize); batchSize > 0; batchSize = integer.IntMin(integer.IntMin(2*batchSize, remaining), MaxBatchSize) {
|
for batchSize := min(remaining, initialBatchSize); batchSize > 0; batchSize = min(min(2*batchSize, remaining), MaxBatchSize) {
|
||||||
errCh := make(chan error, batchSize)
|
errCh := make(chan error, batchSize)
|
||||||
var wg sync.WaitGroup
|
var wg sync.WaitGroup
|
||||||
wg.Add(batchSize)
|
wg.Add(batchSize)
|
||||||
|
@ -33,7 +33,6 @@ import (
|
|||||||
logsapi "k8s.io/component-base/logs/api/v1"
|
logsapi "k8s.io/component-base/logs/api/v1"
|
||||||
"k8s.io/component-base/metrics"
|
"k8s.io/component-base/metrics"
|
||||||
"k8s.io/klog/v2"
|
"k8s.io/klog/v2"
|
||||||
"k8s.io/utils/integer"
|
|
||||||
netutil "k8s.io/utils/net"
|
netutil "k8s.io/utils/net"
|
||||||
|
|
||||||
_ "k8s.io/kubernetes/pkg/features"
|
_ "k8s.io/kubernetes/pkg/features"
|
||||||
@ -299,7 +298,7 @@ func ServiceIPRange(passedServiceClusterIPRange net.IPNet) (net.IPNet, net.IP, e
|
|||||||
serviceClusterIPRange = kubeoptions.DefaultServiceIPCIDR
|
serviceClusterIPRange = kubeoptions.DefaultServiceIPCIDR
|
||||||
}
|
}
|
||||||
|
|
||||||
size := integer.Int64Min(netutil.RangeSize(&serviceClusterIPRange), 1<<16)
|
size := min(netutil.RangeSize(&serviceClusterIPRange), 1<<16)
|
||||||
if size < 8 {
|
if size < 8 {
|
||||||
return net.IPNet{}, net.IP{}, fmt.Errorf("the service cluster IP range must be at least %d IP addresses", 8)
|
return net.IPNet{}, net.IP{}, fmt.Errorf("the service cluster IP range must be at least %d IP addresses", 8)
|
||||||
}
|
}
|
||||||
|
@ -92,13 +92,6 @@ func standardDeviation(xs []int) float64 {
|
|||||||
return math.Round(s*1000) / 1000
|
return math.Round(s*1000) / 1000
|
||||||
}
|
}
|
||||||
|
|
||||||
func min(x, y int) int {
|
|
||||||
if x < y {
|
|
||||||
return x
|
|
||||||
}
|
|
||||||
return y
|
|
||||||
}
|
|
||||||
|
|
||||||
type numaOrSocketsFirstFuncs interface {
|
type numaOrSocketsFirstFuncs interface {
|
||||||
takeFullFirstLevel()
|
takeFullFirstLevel()
|
||||||
takeFullSecondLevel()
|
takeFullSecondLevel()
|
||||||
|
@ -41,7 +41,6 @@ import (
|
|||||||
"k8s.io/client-go/informers"
|
"k8s.io/client-go/informers"
|
||||||
|
|
||||||
"k8s.io/mount-utils"
|
"k8s.io/mount-utils"
|
||||||
"k8s.io/utils/integer"
|
|
||||||
netutils "k8s.io/utils/net"
|
netutils "k8s.io/utils/net"
|
||||||
|
|
||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
@ -757,7 +756,7 @@ func NewMainKubelet(kubeCfg *kubeletconfiginternal.KubeletConfiguration,
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
klet.containerGC = containerGC
|
klet.containerGC = containerGC
|
||||||
klet.containerDeletor = newPodContainerDeletor(klet.containerRuntime, integer.IntMax(containerGCPolicy.MaxPerPodContainer, minDeadContainerInPod))
|
klet.containerDeletor = newPodContainerDeletor(klet.containerRuntime, max(containerGCPolicy.MaxPerPodContainer, minDeadContainerInPod))
|
||||||
|
|
||||||
// setup imageManager
|
// setup imageManager
|
||||||
imageManager, err := images.NewImageGCManager(klet.containerRuntime, klet.StatsProvider, kubeDeps.Recorder, nodeRef, imageGCPolicy, kubeDeps.TracerProvider)
|
imageManager, err := images.NewImageGCManager(klet.containerRuntime, klet.StatsProvider, kubeDeps.Recorder, nodeRef, imageGCPolicy, kubeDeps.TracerProvider)
|
||||||
|
@ -815,13 +815,6 @@ func (n *NodeInfo) update(pod *v1.Pod, sign int64) {
|
|||||||
n.Generation = nextGeneration()
|
n.Generation = nextGeneration()
|
||||||
}
|
}
|
||||||
|
|
||||||
func max(a, b int64) int64 {
|
|
||||||
if a >= b {
|
|
||||||
return a
|
|
||||||
}
|
|
||||||
return b
|
|
||||||
}
|
|
||||||
|
|
||||||
func calculateResource(pod *v1.Pod) (Resource, int64, int64) {
|
func calculateResource(pod *v1.Pod) (Resource, int64, int64) {
|
||||||
var non0InitCPU, non0InitMem int64
|
var non0InitCPU, non0InitMem int64
|
||||||
var non0CPU, non0Mem int64
|
var non0CPU, non0Mem int64
|
||||||
|
@ -44,17 +44,3 @@ func hasPathPrefix(s, pathPrefix string) bool {
|
|||||||
}
|
}
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
func max(a, b int) int {
|
|
||||||
if a > b {
|
|
||||||
return a
|
|
||||||
}
|
|
||||||
return b
|
|
||||||
}
|
|
||||||
|
|
||||||
func min(a, b int) int {
|
|
||||||
if a < b {
|
|
||||||
return a
|
|
||||||
}
|
|
||||||
return b
|
|
||||||
}
|
|
||||||
|
@ -792,11 +792,11 @@ func (qs *queueSet) findDispatchQueueToBoundLocked() (*queue, *request) {
|
|||||||
queue := qs.queues[qs.robinIndex]
|
queue := qs.queues[qs.robinIndex]
|
||||||
oldestWaiting, _ := queue.requestsWaiting.Peek()
|
oldestWaiting, _ := queue.requestsWaiting.Peek()
|
||||||
if oldestWaiting != nil {
|
if oldestWaiting != nil {
|
||||||
sMin = ssMin(sMin, queue.nextDispatchR)
|
sMin = min(sMin, queue.nextDispatchR)
|
||||||
sMax = ssMax(sMax, queue.nextDispatchR)
|
sMax = max(sMax, queue.nextDispatchR)
|
||||||
estimatedWorkInProgress := fqrequest.SeatsTimesDuration(float64(queue.seatsInUse), qs.estimatedServiceDuration)
|
estimatedWorkInProgress := fqrequest.SeatsTimesDuration(float64(queue.seatsInUse), qs.estimatedServiceDuration)
|
||||||
dsMin = ssMin(dsMin, queue.nextDispatchR-estimatedWorkInProgress)
|
dsMin = min(dsMin, queue.nextDispatchR-estimatedWorkInProgress)
|
||||||
dsMax = ssMax(dsMax, queue.nextDispatchR-estimatedWorkInProgress)
|
dsMax = max(dsMax, queue.nextDispatchR-estimatedWorkInProgress)
|
||||||
currentVirtualFinish := queue.nextDispatchR + oldestWaiting.totalWork()
|
currentVirtualFinish := queue.nextDispatchR + oldestWaiting.totalWork()
|
||||||
klog.V(11).InfoS("Considering queue to dispatch", "queueSet", qs.qCfg.Name, "queue", qs.robinIndex, "finishR", currentVirtualFinish)
|
klog.V(11).InfoS("Considering queue to dispatch", "queueSet", qs.qCfg.Name, "queue", qs.robinIndex, "finishR", currentVirtualFinish)
|
||||||
if currentVirtualFinish < minVirtualFinish {
|
if currentVirtualFinish < minVirtualFinish {
|
||||||
@ -848,20 +848,6 @@ func (qs *queueSet) findDispatchQueueToBoundLocked() (*queue, *request) {
|
|||||||
return minQueue, oldestReqFromMinQueue
|
return minQueue, oldestReqFromMinQueue
|
||||||
}
|
}
|
||||||
|
|
||||||
func ssMin(a, b fqrequest.SeatSeconds) fqrequest.SeatSeconds {
|
|
||||||
if a > b {
|
|
||||||
return b
|
|
||||||
}
|
|
||||||
return a
|
|
||||||
}
|
|
||||||
|
|
||||||
func ssMax(a, b fqrequest.SeatSeconds) fqrequest.SeatSeconds {
|
|
||||||
if a < b {
|
|
||||||
return b
|
|
||||||
}
|
|
||||||
return a
|
|
||||||
}
|
|
||||||
|
|
||||||
// finishRequestAndDispatchAsMuchAsPossible is a convenience method
|
// finishRequestAndDispatchAsMuchAsPossible is a convenience method
|
||||||
// which calls finishRequest for a given request and then dispatches
|
// which calls finishRequest for a given request and then dispatches
|
||||||
// as many requests as possible. This is all of what needs to be done
|
// as many requests as possible. This is all of what needs to be done
|
||||||
|
@ -23,7 +23,6 @@ import (
|
|||||||
|
|
||||||
"k8s.io/utils/clock"
|
"k8s.io/utils/clock"
|
||||||
testingclock "k8s.io/utils/clock/testing"
|
testingclock "k8s.io/utils/clock/testing"
|
||||||
"k8s.io/utils/integer"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type backoffEntry struct {
|
type backoffEntry struct {
|
||||||
@ -100,7 +99,7 @@ func (p *Backoff) Next(id string, eventTime time.Time) {
|
|||||||
} else {
|
} else {
|
||||||
delay := entry.backoff * 2 // exponential
|
delay := entry.backoff * 2 // exponential
|
||||||
delay += p.jitter(entry.backoff) // add some jitter to the delay
|
delay += p.jitter(entry.backoff) // add some jitter to the delay
|
||||||
entry.backoff = time.Duration(integer.Int64Min(int64(delay), int64(p.maxDuration)))
|
entry.backoff = min(delay, p.maxDuration)
|
||||||
}
|
}
|
||||||
entry.lastUpdate = p.Clock.Now()
|
entry.lastUpdate = p.Clock.Now()
|
||||||
}
|
}
|
||||||
|
@ -32,7 +32,6 @@ import (
|
|||||||
"k8s.io/apimachinery/pkg/runtime"
|
"k8s.io/apimachinery/pkg/runtime"
|
||||||
"k8s.io/cli-runtime/pkg/printers"
|
"k8s.io/cli-runtime/pkg/printers"
|
||||||
"k8s.io/client-go/util/jsonpath"
|
"k8s.io/client-go/util/jsonpath"
|
||||||
"k8s.io/utils/integer"
|
|
||||||
|
|
||||||
"github.com/fvbommel/sortorder"
|
"github.com/fvbommel/sortorder"
|
||||||
)
|
)
|
||||||
@ -206,7 +205,7 @@ func isLess(i, j reflect.Value) (bool, error) {
|
|||||||
return true, nil
|
return true, nil
|
||||||
case reflect.Array, reflect.Slice:
|
case reflect.Array, reflect.Slice:
|
||||||
// note: the length of i and j may be different
|
// note: the length of i and j may be different
|
||||||
for idx := 0; idx < integer.IntMin(i.Len(), j.Len()); idx++ {
|
for idx := 0; idx < min(i.Len(), j.Len()); idx++ {
|
||||||
less, err := isLess(i.Index(idx), j.Index(idx))
|
less, err := isLess(i.Index(idx), j.Index(idx))
|
||||||
if err != nil || !less {
|
if err != nil || !less {
|
||||||
return less, err
|
return less, err
|
||||||
|
@ -21,7 +21,6 @@ import (
|
|||||||
|
|
||||||
corev1 "k8s.io/api/core/v1"
|
corev1 "k8s.io/api/core/v1"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/utils/integer"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// IsPodAvailable returns true if a pod is available; false otherwise.
|
// IsPodAvailable returns true if a pod is available; false otherwise.
|
||||||
@ -194,7 +193,7 @@ func podReadyTime(pod *corev1.Pod) *metav1.Time {
|
|||||||
func maxContainerRestarts(pod *corev1.Pod) int {
|
func maxContainerRestarts(pod *corev1.Pod) int {
|
||||||
maxRestarts := 0
|
maxRestarts := 0
|
||||||
for _, c := range pod.Status.ContainerStatuses {
|
for _, c := range pod.Status.ContainerStatuses {
|
||||||
maxRestarts = integer.IntMax(maxRestarts, int(c.RestartCount))
|
maxRestarts = max(maxRestarts, int(c.RestartCount))
|
||||||
}
|
}
|
||||||
return maxRestarts
|
return maxRestarts
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user