Merge pull request #117624 from skitt/intstr-fromint32-apps

Apps: use new intstr functions
This commit is contained in:
Kubernetes Prow Robot 2023-05-01 11:18:16 -07:00 committed by GitHub
commit 73bd83cfa7
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
10 changed files with 58 additions and 58 deletions

View File

@ -124,7 +124,7 @@ func newDaemonSet(name string) *apps.DaemonSet {
}
func newRollingUpdateStrategy() *apps.DaemonSetUpdateStrategy {
one := intstr.FromInt(1)
one := intstr.FromInt32(1)
return &apps.DaemonSetUpdateStrategy{
Type: apps.RollingUpdateDaemonSetStrategyType,
RollingUpdate: &apps.RollingUpdateDaemonSet{MaxUnavailable: &one},
@ -3217,7 +3217,7 @@ func getQueuedKeys(queue workqueue.RateLimitingInterface) []string {
func TestSurgeDealsWithExistingPods(t *testing.T) {
_, ctx := ktesting.NewTestContext(t)
ds := newDaemonSet("foo")
ds.Spec.UpdateStrategy = newUpdateSurge(intstr.FromInt(1))
ds.Spec.UpdateStrategy = newUpdateSurge(intstr.FromInt32(1))
manager, podControl, _, err := newTestController(ctx, ds)
if err != nil {
t.Fatalf("error creating DaemonSets controller: %v", err)
@ -3234,7 +3234,7 @@ func TestSurgeDealsWithExistingPods(t *testing.T) {
func TestSurgePreservesReadyOldPods(t *testing.T) {
_, ctx := ktesting.NewTestContext(t)
ds := newDaemonSet("foo")
ds.Spec.UpdateStrategy = newUpdateSurge(intstr.FromInt(1))
ds.Spec.UpdateStrategy = newUpdateSurge(intstr.FromInt32(1))
manager, podControl, _, err := newTestController(ctx, ds)
if err != nil {
t.Fatalf("error creating DaemonSets controller: %v", err)
@ -3274,7 +3274,7 @@ func TestSurgePreservesReadyOldPods(t *testing.T) {
func TestSurgeCreatesNewPodWhenAtMaxSurgeAndOldPodDeleted(t *testing.T) {
_, ctx := ktesting.NewTestContext(t)
ds := newDaemonSet("foo")
ds.Spec.UpdateStrategy = newUpdateSurge(intstr.FromInt(1))
ds.Spec.UpdateStrategy = newUpdateSurge(intstr.FromInt32(1))
manager, podControl, _, err := newTestController(ctx, ds)
if err != nil {
t.Fatalf("error creating DaemonSets controller: %v", err)
@ -3321,7 +3321,7 @@ func TestSurgeCreatesNewPodWhenAtMaxSurgeAndOldPodDeleted(t *testing.T) {
func TestSurgeDeletesUnreadyOldPods(t *testing.T) {
_, ctx := ktesting.NewTestContext(t)
ds := newDaemonSet("foo")
ds.Spec.UpdateStrategy = newUpdateSurge(intstr.FromInt(1))
ds.Spec.UpdateStrategy = newUpdateSurge(intstr.FromInt32(1))
manager, podControl, _, err := newTestController(ctx, ds)
if err != nil {
t.Fatalf("error creating DaemonSets controller: %v", err)
@ -3362,7 +3362,7 @@ func TestSurgePreservesOldReadyWithUnsatisfiedMinReady(t *testing.T) {
_, ctx := ktesting.NewTestContext(t)
ds := newDaemonSet("foo")
ds.Spec.MinReadySeconds = 15
ds.Spec.UpdateStrategy = newUpdateSurge(intstr.FromInt(1))
ds.Spec.UpdateStrategy = newUpdateSurge(intstr.FromInt32(1))
manager, podControl, _, err := newTestController(ctx, ds)
if err != nil {
t.Fatalf("error creating DaemonSets controller: %v", err)
@ -3407,7 +3407,7 @@ func TestSurgeDeletesOldReadyWithUnsatisfiedMinReady(t *testing.T) {
_, ctx := ktesting.NewTestContext(t)
ds := newDaemonSet("foo")
ds.Spec.MinReadySeconds = 15
ds.Spec.UpdateStrategy = newUpdateSurge(intstr.FromInt(1))
ds.Spec.UpdateStrategy = newUpdateSurge(intstr.FromInt32(1))
manager, podControl, _, err := newTestController(ctx, ds)
if err != nil {
t.Fatalf("error creating DaemonSets controller: %v", err)

View File

@ -358,7 +358,7 @@ func TestDaemonSetUpdatesNoTemplateChanged(t *testing.T) {
}
func newUpdateSurge(value intstr.IntOrString) apps.DaemonSetUpdateStrategy {
zero := intstr.FromInt(0)
zero := intstr.FromInt32(0)
return apps.DaemonSetUpdateStrategy{
Type: apps.RollingUpdateDaemonSetStrategyType,
RollingUpdate: &apps.RollingUpdateDaemonSet{
@ -399,7 +399,7 @@ func TestGetUnavailableNumbers(t *testing.T) {
},
ds: func() *apps.DaemonSet {
ds := newDaemonSet("x")
ds.Spec.UpdateStrategy = newUpdateUnavailable(intstr.FromInt(0))
ds.Spec.UpdateStrategy = newUpdateUnavailable(intstr.FromInt32(0))
return ds
}(),
nodeToPods: make(map[string][]*v1.Pod),
@ -418,7 +418,7 @@ func TestGetUnavailableNumbers(t *testing.T) {
},
ds: func() *apps.DaemonSet {
ds := newDaemonSet("x")
ds.Spec.UpdateStrategy = newUpdateUnavailable(intstr.FromInt(1))
ds.Spec.UpdateStrategy = newUpdateUnavailable(intstr.FromInt32(1))
return ds
}(),
nodeToPods: func() map[string][]*v1.Pod {
@ -446,7 +446,7 @@ func TestGetUnavailableNumbers(t *testing.T) {
},
ds: func() *apps.DaemonSet {
ds := newDaemonSet("x")
ds.Spec.UpdateStrategy = newUpdateUnavailable(intstr.FromInt(0))
ds.Spec.UpdateStrategy = newUpdateUnavailable(intstr.FromInt32(0))
return ds
}(),
nodeToPods: func() map[string][]*v1.Pod {
@ -471,7 +471,7 @@ func TestGetUnavailableNumbers(t *testing.T) {
},
ds: func() *apps.DaemonSet {
ds := newDaemonSet("x")
ds.Spec.UpdateStrategy = newUpdateSurge(intstr.FromInt(0))
ds.Spec.UpdateStrategy = newUpdateSurge(intstr.FromInt32(0))
return ds
}(),
nodeToPods: func() map[string][]*v1.Pod {

View File

@ -93,8 +93,8 @@ func newDeployment(name string, replicas int, revisionHistoryLimit *int32, maxSu
Strategy: apps.DeploymentStrategy{
Type: apps.RollingUpdateDeploymentStrategyType,
RollingUpdate: &apps.RollingUpdateDeployment{
MaxUnavailable: func() *intstr.IntOrString { i := intstr.FromInt(0); return &i }(),
MaxSurge: func() *intstr.IntOrString { i := intstr.FromInt(0); return &i }(),
MaxUnavailable: func() *intstr.IntOrString { i := intstr.FromInt32(0); return &i }(),
MaxSurge: func() *intstr.IntOrString { i := intstr.FromInt32(0); return &i }(),
},
},
Replicas: pointer.Int32(int32(replicas)),

View File

@ -40,14 +40,14 @@ func TestDeploymentController_reconcileNewReplicaSet(t *testing.T) {
{
// Should not scale up.
deploymentReplicas: 10,
maxSurge: intstr.FromInt(0),
maxSurge: intstr.FromInt32(0),
oldReplicas: 10,
newReplicas: 0,
scaleExpected: false,
},
{
deploymentReplicas: 10,
maxSurge: intstr.FromInt(2),
maxSurge: intstr.FromInt32(2),
oldReplicas: 10,
newReplicas: 0,
scaleExpected: true,
@ -55,7 +55,7 @@ func TestDeploymentController_reconcileNewReplicaSet(t *testing.T) {
},
{
deploymentReplicas: 10,
maxSurge: intstr.FromInt(2),
maxSurge: intstr.FromInt32(2),
oldReplicas: 5,
newReplicas: 0,
scaleExpected: true,
@ -63,7 +63,7 @@ func TestDeploymentController_reconcileNewReplicaSet(t *testing.T) {
},
{
deploymentReplicas: 10,
maxSurge: intstr.FromInt(2),
maxSurge: intstr.FromInt32(2),
oldReplicas: 10,
newReplicas: 2,
scaleExpected: false,
@ -71,7 +71,7 @@ func TestDeploymentController_reconcileNewReplicaSet(t *testing.T) {
{
// Should scale down.
deploymentReplicas: 10,
maxSurge: intstr.FromInt(2),
maxSurge: intstr.FromInt32(2),
oldReplicas: 2,
newReplicas: 11,
scaleExpected: true,
@ -85,7 +85,7 @@ func TestDeploymentController_reconcileNewReplicaSet(t *testing.T) {
newRS := rs("foo-v2", test.newReplicas, nil, noTimestamp)
oldRS := rs("foo-v2", test.oldReplicas, nil, noTimestamp)
allRSs := []*apps.ReplicaSet{newRS, oldRS}
maxUnavailable := intstr.FromInt(0)
maxUnavailable := intstr.FromInt32(0)
deployment := newDeployment("foo", test.deploymentReplicas, nil, &test.maxSurge, &maxUnavailable, map[string]string{"foo": "bar"})
fake := fake.Clientset{}
controller := &DeploymentController{
@ -134,7 +134,7 @@ func TestDeploymentController_reconcileOldReplicaSets(t *testing.T) {
}{
{
deploymentReplicas: 10,
maxUnavailable: intstr.FromInt(0),
maxUnavailable: intstr.FromInt32(0),
oldReplicas: 10,
newReplicas: 0,
readyPodsFromOldRS: 10,
@ -144,7 +144,7 @@ func TestDeploymentController_reconcileOldReplicaSets(t *testing.T) {
},
{
deploymentReplicas: 10,
maxUnavailable: intstr.FromInt(2),
maxUnavailable: intstr.FromInt32(2),
oldReplicas: 10,
newReplicas: 0,
readyPodsFromOldRS: 10,
@ -154,7 +154,7 @@ func TestDeploymentController_reconcileOldReplicaSets(t *testing.T) {
},
{ // expect unhealthy replicas from old replica sets been cleaned up
deploymentReplicas: 10,
maxUnavailable: intstr.FromInt(2),
maxUnavailable: intstr.FromInt32(2),
oldReplicas: 10,
newReplicas: 0,
readyPodsFromOldRS: 8,
@ -164,7 +164,7 @@ func TestDeploymentController_reconcileOldReplicaSets(t *testing.T) {
},
{ // expect 1 unhealthy replica from old replica sets been cleaned up, and 1 ready pod been scaled down
deploymentReplicas: 10,
maxUnavailable: intstr.FromInt(2),
maxUnavailable: intstr.FromInt32(2),
oldReplicas: 10,
newReplicas: 0,
readyPodsFromOldRS: 9,
@ -174,7 +174,7 @@ func TestDeploymentController_reconcileOldReplicaSets(t *testing.T) {
},
{ // the unavailable pods from the newRS would not make us scale down old RSs in a further step
deploymentReplicas: 10,
maxUnavailable: intstr.FromInt(2),
maxUnavailable: intstr.FromInt32(2),
oldReplicas: 8,
newReplicas: 2,
readyPodsFromOldRS: 8,
@ -194,7 +194,7 @@ func TestDeploymentController_reconcileOldReplicaSets(t *testing.T) {
oldRS.Status.AvailableReplicas = int32(test.readyPodsFromOldRS)
oldRSs := []*apps.ReplicaSet{oldRS}
allRSs := []*apps.ReplicaSet{oldRS, newRS}
maxSurge := intstr.FromInt(0)
maxSurge := intstr.FromInt32(0)
deployment := newDeployment("foo", test.deploymentReplicas, nil, &maxSurge, &test.maxUnavailable, newSelector)
fakeClientset := fake.Clientset{}
controller := &DeploymentController{
@ -261,8 +261,8 @@ func TestDeploymentController_cleanupUnhealthyReplicas(t *testing.T) {
oldRS := rs("foo-v2", test.oldReplicas, nil, noTimestamp)
oldRS.Status.AvailableReplicas = int32(test.readyPods)
oldRSs := []*apps.ReplicaSet{oldRS}
maxSurge := intstr.FromInt(2)
maxUnavailable := intstr.FromInt(2)
maxSurge := intstr.FromInt32(2)
maxUnavailable := intstr.FromInt32(2)
deployment := newDeployment("foo", 10, nil, &maxSurge, &maxUnavailable, nil)
fakeClientset := fake.Clientset{}
@ -294,7 +294,7 @@ func TestDeploymentController_scaleDownOldReplicaSetsForRollingUpdate(t *testing
}{
{
deploymentReplicas: 10,
maxUnavailable: intstr.FromInt(0),
maxUnavailable: intstr.FromInt32(0),
readyPods: 10,
oldReplicas: 10,
scaleExpected: true,
@ -302,7 +302,7 @@ func TestDeploymentController_scaleDownOldReplicaSetsForRollingUpdate(t *testing
},
{
deploymentReplicas: 10,
maxUnavailable: intstr.FromInt(2),
maxUnavailable: intstr.FromInt32(2),
readyPods: 10,
oldReplicas: 10,
scaleExpected: true,
@ -310,21 +310,21 @@ func TestDeploymentController_scaleDownOldReplicaSetsForRollingUpdate(t *testing
},
{
deploymentReplicas: 10,
maxUnavailable: intstr.FromInt(2),
maxUnavailable: intstr.FromInt32(2),
readyPods: 8,
oldReplicas: 10,
scaleExpected: false,
},
{
deploymentReplicas: 10,
maxUnavailable: intstr.FromInt(2),
maxUnavailable: intstr.FromInt32(2),
readyPods: 10,
oldReplicas: 0,
scaleExpected: false,
},
{
deploymentReplicas: 10,
maxUnavailable: intstr.FromInt(2),
maxUnavailable: intstr.FromInt32(2),
readyPods: 1,
oldReplicas: 10,
scaleExpected: false,
@ -338,7 +338,7 @@ func TestDeploymentController_scaleDownOldReplicaSetsForRollingUpdate(t *testing
oldRS.Status.AvailableReplicas = int32(test.readyPods)
allRSs := []*apps.ReplicaSet{oldRS}
oldRSs := []*apps.ReplicaSet{oldRS}
maxSurge := intstr.FromInt(0)
maxSurge := intstr.FromInt32(0)
deployment := newDeployment("foo", test.deploymentReplicas, nil, &maxSurge, &test.maxUnavailable, map[string]string{"foo": "bar"})
fakeClientset := fake.Clientset{}
controller := &DeploymentController{

View File

@ -849,11 +849,11 @@ func WaitForObservedDeployment(getDeploymentFunc func() (*apps.Deployment, error
// 2 desired, max unavailable 0%, surge 1% - should scale new(+1), then old(-1), then new(+1), then old(-1)
// 1 desired, max unavailable 0%, surge 1% - should scale new(+1), then old(-1)
func ResolveFenceposts(maxSurge, maxUnavailable *intstrutil.IntOrString, desired int32) (int32, int32, error) {
surge, err := intstrutil.GetScaledValueFromIntOrPercent(intstrutil.ValueOrDefault(maxSurge, intstrutil.FromInt(0)), int(desired), true)
surge, err := intstrutil.GetScaledValueFromIntOrPercent(intstrutil.ValueOrDefault(maxSurge, intstrutil.FromInt32(0)), int(desired), true)
if err != nil {
return 0, 0, err
}
unavailable, err := intstrutil.GetScaledValueFromIntOrPercent(intstrutil.ValueOrDefault(maxUnavailable, intstrutil.FromInt(0)), int(desired), false)
unavailable, err := intstrutil.GetScaledValueFromIntOrPercent(intstrutil.ValueOrDefault(maxUnavailable, intstrutil.FromInt32(0)), int(desired), false)
if err != nil {
return 0, 0, err
}

View File

@ -975,22 +975,22 @@ func TestMaxUnavailable(t *testing.T) {
}{
{
name: "maxUnavailable less than replicas",
deployment: deployment(10, intstr.FromInt(5)),
deployment: deployment(10, intstr.FromInt32(5)),
expected: int32(5),
},
{
name: "maxUnavailable equal replicas",
deployment: deployment(10, intstr.FromInt(10)),
deployment: deployment(10, intstr.FromInt32(10)),
expected: int32(10),
},
{
name: "maxUnavailable greater than replicas",
deployment: deployment(5, intstr.FromInt(10)),
deployment: deployment(5, intstr.FromInt32(10)),
expected: int32(5),
},
{
name: "maxUnavailable with replicas is 0",
deployment: deployment(0, intstr.FromInt(10)),
deployment: deployment(0, intstr.FromInt32(10)),
expected: int32(0),
},
{
@ -1277,22 +1277,22 @@ func TestMinAvailable(t *testing.T) {
}{
{
name: "replicas greater than maxUnavailable",
deployment: deployment(10, intstr.FromInt(5)),
deployment: deployment(10, intstr.FromInt32(5)),
expected: 5,
},
{
name: "replicas equal maxUnavailable",
deployment: deployment(10, intstr.FromInt(10)),
deployment: deployment(10, intstr.FromInt32(10)),
expected: 0,
},
{
name: "replicas less than maxUnavailable",
deployment: deployment(5, intstr.FromInt(10)),
deployment: deployment(5, intstr.FromInt32(10)),
expected: 0,
},
{
name: "replicas is 0",
deployment: deployment(0, intstr.FromInt(10)),
deployment: deployment(0, intstr.FromInt32(10)),
expected: 0,
},
{

View File

@ -426,7 +426,7 @@ func add(t *testing.T, store cache.Store, obj interface{}) {
func TestNoSelector(t *testing.T) {
dc, ps := newFakeDisruptionController()
pdb, pdbName := newMinAvailablePodDisruptionBudget(t, intstr.FromInt(3))
pdb, pdbName := newMinAvailablePodDisruptionBudget(t, intstr.FromInt32(3))
pdb.Spec.Selector = &metav1.LabelSelector{}
pod, _ := newPod(t, "yo-yo-yo")
@ -445,7 +445,7 @@ func TestNoSelector(t *testing.T) {
func TestUnavailable(t *testing.T) {
dc, ps := newFakeDisruptionController()
pdb, pdbName := newMinAvailablePodDisruptionBudget(t, intstr.FromInt(3))
pdb, pdbName := newMinAvailablePodDisruptionBudget(t, intstr.FromInt32(3))
ctx := context.TODO()
add(t, dc.pdbStore, pdb)
dc.sync(ctx, pdbName)
@ -475,7 +475,7 @@ func TestUnavailable(t *testing.T) {
func TestIntegerMaxUnavailable(t *testing.T) {
dc, ps := newFakeDisruptionController()
pdb, pdbName := newMaxUnavailablePodDisruptionBudget(t, intstr.FromInt(1))
pdb, pdbName := newMaxUnavailablePodDisruptionBudget(t, intstr.FromInt32(1))
add(t, dc.pdbStore, pdb)
ctx := context.TODO()
dc.sync(ctx, pdbName)
@ -496,7 +496,7 @@ func TestIntegerMaxUnavailable(t *testing.T) {
func TestIntegerMaxUnavailableWithScaling(t *testing.T) {
dc, ps := newFakeDisruptionController()
pdb, pdbName := newMaxUnavailablePodDisruptionBudget(t, intstr.FromInt(2))
pdb, pdbName := newMaxUnavailablePodDisruptionBudget(t, intstr.FromInt32(2))
add(t, dc.pdbStore, pdb)
rs, _ := newReplicaSet(t, 7)
@ -672,7 +672,7 @@ func TestScaleResource(t *testing.T) {
return true, obj, nil
})
pdb, pdbName := newMaxUnavailablePodDisruptionBudget(t, intstr.FromInt(int(maxUnavailable)))
pdb, pdbName := newMaxUnavailablePodDisruptionBudget(t, intstr.FromInt32(maxUnavailable))
add(t, dc.pdbStore, pdb)
trueVal := true
@ -1030,7 +1030,7 @@ func TestPDBNotExist(t *testing.T) {
func TestUpdateDisruptedPods(t *testing.T) {
dc, ps := newFakeDisruptionController()
dc.recheckQueue = workqueue.NewNamedDelayingQueue("pdb_queue")
pdb, pdbName := newMinAvailablePodDisruptionBudget(t, intstr.FromInt(1))
pdb, pdbName := newMinAvailablePodDisruptionBudget(t, intstr.FromInt32(1))
currentTime := dc.clock.Now()
pdb.Status.DisruptedPods = map[string]metav1.Time{
"p1": {Time: currentTime}, // Should be removed, pod deletion started.
@ -1272,7 +1272,7 @@ func TestUpdatePDBStatusRetries(t *testing.T) {
dc.getUpdater = func() updater { return dc.writePdbStatus }
ctx := context.TODO()
// Create a PDB and 3 pods that match it.
pdb, pdbKey := newMinAvailablePodDisruptionBudget(t, intstr.FromInt(1))
pdb, pdbKey := newMinAvailablePodDisruptionBudget(t, intstr.FromInt32(1))
pdb, err := dc.coreClient.PolicyV1().PodDisruptionBudgets(pdb.Namespace).Create(ctx, pdb, metav1.CreateOptions{})
if err != nil {
t.Fatalf("Failed to create PDB: %v", err)
@ -1409,7 +1409,7 @@ func TestInvalidSelectors(t *testing.T) {
t.Run(tn, func(t *testing.T) {
dc, ps := newFakeDisruptionController()
pdb, pdbName := newMinAvailablePodDisruptionBudget(t, intstr.FromInt(3))
pdb, pdbName := newMinAvailablePodDisruptionBudget(t, intstr.FromInt32(3))
pdb.Spec.Selector = tc.labelSelector
add(t, dc.pdbStore, pdb)

View File

@ -1042,7 +1042,7 @@ func TestStatefulSetControlRollingUpdateWithMaxUnavailable(t *testing.T) {
// Setup the statefulSet controller
totalPods := 6
var partition int32 = 3
var maxUnavailable = intstr.FromInt(2)
var maxUnavailable = intstr.FromInt32(2)
set := setupPodManagementPolicy(tc.policyType, newStatefulSet(totalPods))
set.Spec.UpdateStrategy = apps.StatefulSetUpdateStrategy{
Type: apps.RollingUpdateStatefulSetStrategyType,
@ -1130,7 +1130,7 @@ func setupForInvariant(t *testing.T) (*apps.StatefulSet, *fakeObjectManager, Sta
set := newStatefulSet(totalPods)
// update all pods >=3(3,4,5)
var partition int32 = 3
var maxUnavailable = intstr.FromInt(2)
var maxUnavailable = intstr.FromInt32(2)
set.Spec.UpdateStrategy = apps.StatefulSetUpdateStrategy{
Type: apps.RollingUpdateStatefulSetStrategyType,
RollingUpdate: func() *apps.RollingUpdateStatefulSetStrategy {

View File

@ -608,7 +608,7 @@ func (ao ascendingOrdinal) Less(i, j int) bool {
// Note that API validation has already guaranteed the maxUnavailable field to be >1 if it is an integer
// or 0% < value <= 100% if it is a percentage, so we don't have to consider other cases.
func getStatefulSetMaxUnavailable(maxUnavailable *intstr.IntOrString, replicaCount int) (int, error) {
maxUnavailableNum, err := intstr.GetScaledValueFromIntOrPercent(intstr.ValueOrDefault(maxUnavailable, intstr.FromInt(1)), replicaCount, false)
maxUnavailableNum, err := intstr.GetScaledValueFromIntOrPercent(intstr.ValueOrDefault(maxUnavailable, intstr.FromInt32(1)), replicaCount, false)
if err != nil {
return 0, err
}

View File

@ -948,9 +948,9 @@ func TestGetStatefulSetMaxUnavailable(t *testing.T) {
}{
// it wouldn't hurt to also test 0 and 0%, even if they should have been forbidden by API validation.
{maxUnavailable: nil, replicaCount: 10, expectedMaxUnavailable: 1},
{maxUnavailable: intOrStrP(intstr.FromInt(3)), replicaCount: 10, expectedMaxUnavailable: 3},
{maxUnavailable: intOrStrP(intstr.FromInt(3)), replicaCount: 0, expectedMaxUnavailable: 3},
{maxUnavailable: intOrStrP(intstr.FromInt(0)), replicaCount: 0, expectedMaxUnavailable: 1},
{maxUnavailable: intOrStrP(intstr.FromInt32(3)), replicaCount: 10, expectedMaxUnavailable: 3},
{maxUnavailable: intOrStrP(intstr.FromInt32(3)), replicaCount: 0, expectedMaxUnavailable: 3},
{maxUnavailable: intOrStrP(intstr.FromInt32(0)), replicaCount: 0, expectedMaxUnavailable: 1},
{maxUnavailable: intOrStrP(intstr.FromString("10%")), replicaCount: 25, expectedMaxUnavailable: 2},
{maxUnavailable: intOrStrP(intstr.FromString("100%")), replicaCount: 5, expectedMaxUnavailable: 5},
{maxUnavailable: intOrStrP(intstr.FromString("50%")), replicaCount: 5, expectedMaxUnavailable: 2},