mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-08-08 11:38:15 +00:00
Merge pull request #117624 from skitt/intstr-fromint32-apps
Apps: use new intstr functions
This commit is contained in:
commit
73bd83cfa7
@ -124,7 +124,7 @@ func newDaemonSet(name string) *apps.DaemonSet {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func newRollingUpdateStrategy() *apps.DaemonSetUpdateStrategy {
|
func newRollingUpdateStrategy() *apps.DaemonSetUpdateStrategy {
|
||||||
one := intstr.FromInt(1)
|
one := intstr.FromInt32(1)
|
||||||
return &apps.DaemonSetUpdateStrategy{
|
return &apps.DaemonSetUpdateStrategy{
|
||||||
Type: apps.RollingUpdateDaemonSetStrategyType,
|
Type: apps.RollingUpdateDaemonSetStrategyType,
|
||||||
RollingUpdate: &apps.RollingUpdateDaemonSet{MaxUnavailable: &one},
|
RollingUpdate: &apps.RollingUpdateDaemonSet{MaxUnavailable: &one},
|
||||||
@ -3217,7 +3217,7 @@ func getQueuedKeys(queue workqueue.RateLimitingInterface) []string {
|
|||||||
func TestSurgeDealsWithExistingPods(t *testing.T) {
|
func TestSurgeDealsWithExistingPods(t *testing.T) {
|
||||||
_, ctx := ktesting.NewTestContext(t)
|
_, ctx := ktesting.NewTestContext(t)
|
||||||
ds := newDaemonSet("foo")
|
ds := newDaemonSet("foo")
|
||||||
ds.Spec.UpdateStrategy = newUpdateSurge(intstr.FromInt(1))
|
ds.Spec.UpdateStrategy = newUpdateSurge(intstr.FromInt32(1))
|
||||||
manager, podControl, _, err := newTestController(ctx, ds)
|
manager, podControl, _, err := newTestController(ctx, ds)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("error creating DaemonSets controller: %v", err)
|
t.Fatalf("error creating DaemonSets controller: %v", err)
|
||||||
@ -3234,7 +3234,7 @@ func TestSurgeDealsWithExistingPods(t *testing.T) {
|
|||||||
func TestSurgePreservesReadyOldPods(t *testing.T) {
|
func TestSurgePreservesReadyOldPods(t *testing.T) {
|
||||||
_, ctx := ktesting.NewTestContext(t)
|
_, ctx := ktesting.NewTestContext(t)
|
||||||
ds := newDaemonSet("foo")
|
ds := newDaemonSet("foo")
|
||||||
ds.Spec.UpdateStrategy = newUpdateSurge(intstr.FromInt(1))
|
ds.Spec.UpdateStrategy = newUpdateSurge(intstr.FromInt32(1))
|
||||||
manager, podControl, _, err := newTestController(ctx, ds)
|
manager, podControl, _, err := newTestController(ctx, ds)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("error creating DaemonSets controller: %v", err)
|
t.Fatalf("error creating DaemonSets controller: %v", err)
|
||||||
@ -3274,7 +3274,7 @@ func TestSurgePreservesReadyOldPods(t *testing.T) {
|
|||||||
func TestSurgeCreatesNewPodWhenAtMaxSurgeAndOldPodDeleted(t *testing.T) {
|
func TestSurgeCreatesNewPodWhenAtMaxSurgeAndOldPodDeleted(t *testing.T) {
|
||||||
_, ctx := ktesting.NewTestContext(t)
|
_, ctx := ktesting.NewTestContext(t)
|
||||||
ds := newDaemonSet("foo")
|
ds := newDaemonSet("foo")
|
||||||
ds.Spec.UpdateStrategy = newUpdateSurge(intstr.FromInt(1))
|
ds.Spec.UpdateStrategy = newUpdateSurge(intstr.FromInt32(1))
|
||||||
manager, podControl, _, err := newTestController(ctx, ds)
|
manager, podControl, _, err := newTestController(ctx, ds)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("error creating DaemonSets controller: %v", err)
|
t.Fatalf("error creating DaemonSets controller: %v", err)
|
||||||
@ -3321,7 +3321,7 @@ func TestSurgeCreatesNewPodWhenAtMaxSurgeAndOldPodDeleted(t *testing.T) {
|
|||||||
func TestSurgeDeletesUnreadyOldPods(t *testing.T) {
|
func TestSurgeDeletesUnreadyOldPods(t *testing.T) {
|
||||||
_, ctx := ktesting.NewTestContext(t)
|
_, ctx := ktesting.NewTestContext(t)
|
||||||
ds := newDaemonSet("foo")
|
ds := newDaemonSet("foo")
|
||||||
ds.Spec.UpdateStrategy = newUpdateSurge(intstr.FromInt(1))
|
ds.Spec.UpdateStrategy = newUpdateSurge(intstr.FromInt32(1))
|
||||||
manager, podControl, _, err := newTestController(ctx, ds)
|
manager, podControl, _, err := newTestController(ctx, ds)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("error creating DaemonSets controller: %v", err)
|
t.Fatalf("error creating DaemonSets controller: %v", err)
|
||||||
@ -3362,7 +3362,7 @@ func TestSurgePreservesOldReadyWithUnsatisfiedMinReady(t *testing.T) {
|
|||||||
_, ctx := ktesting.NewTestContext(t)
|
_, ctx := ktesting.NewTestContext(t)
|
||||||
ds := newDaemonSet("foo")
|
ds := newDaemonSet("foo")
|
||||||
ds.Spec.MinReadySeconds = 15
|
ds.Spec.MinReadySeconds = 15
|
||||||
ds.Spec.UpdateStrategy = newUpdateSurge(intstr.FromInt(1))
|
ds.Spec.UpdateStrategy = newUpdateSurge(intstr.FromInt32(1))
|
||||||
manager, podControl, _, err := newTestController(ctx, ds)
|
manager, podControl, _, err := newTestController(ctx, ds)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("error creating DaemonSets controller: %v", err)
|
t.Fatalf("error creating DaemonSets controller: %v", err)
|
||||||
@ -3407,7 +3407,7 @@ func TestSurgeDeletesOldReadyWithUnsatisfiedMinReady(t *testing.T) {
|
|||||||
_, ctx := ktesting.NewTestContext(t)
|
_, ctx := ktesting.NewTestContext(t)
|
||||||
ds := newDaemonSet("foo")
|
ds := newDaemonSet("foo")
|
||||||
ds.Spec.MinReadySeconds = 15
|
ds.Spec.MinReadySeconds = 15
|
||||||
ds.Spec.UpdateStrategy = newUpdateSurge(intstr.FromInt(1))
|
ds.Spec.UpdateStrategy = newUpdateSurge(intstr.FromInt32(1))
|
||||||
manager, podControl, _, err := newTestController(ctx, ds)
|
manager, podControl, _, err := newTestController(ctx, ds)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("error creating DaemonSets controller: %v", err)
|
t.Fatalf("error creating DaemonSets controller: %v", err)
|
||||||
|
@ -358,7 +358,7 @@ func TestDaemonSetUpdatesNoTemplateChanged(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func newUpdateSurge(value intstr.IntOrString) apps.DaemonSetUpdateStrategy {
|
func newUpdateSurge(value intstr.IntOrString) apps.DaemonSetUpdateStrategy {
|
||||||
zero := intstr.FromInt(0)
|
zero := intstr.FromInt32(0)
|
||||||
return apps.DaemonSetUpdateStrategy{
|
return apps.DaemonSetUpdateStrategy{
|
||||||
Type: apps.RollingUpdateDaemonSetStrategyType,
|
Type: apps.RollingUpdateDaemonSetStrategyType,
|
||||||
RollingUpdate: &apps.RollingUpdateDaemonSet{
|
RollingUpdate: &apps.RollingUpdateDaemonSet{
|
||||||
@ -399,7 +399,7 @@ func TestGetUnavailableNumbers(t *testing.T) {
|
|||||||
},
|
},
|
||||||
ds: func() *apps.DaemonSet {
|
ds: func() *apps.DaemonSet {
|
||||||
ds := newDaemonSet("x")
|
ds := newDaemonSet("x")
|
||||||
ds.Spec.UpdateStrategy = newUpdateUnavailable(intstr.FromInt(0))
|
ds.Spec.UpdateStrategy = newUpdateUnavailable(intstr.FromInt32(0))
|
||||||
return ds
|
return ds
|
||||||
}(),
|
}(),
|
||||||
nodeToPods: make(map[string][]*v1.Pod),
|
nodeToPods: make(map[string][]*v1.Pod),
|
||||||
@ -418,7 +418,7 @@ func TestGetUnavailableNumbers(t *testing.T) {
|
|||||||
},
|
},
|
||||||
ds: func() *apps.DaemonSet {
|
ds: func() *apps.DaemonSet {
|
||||||
ds := newDaemonSet("x")
|
ds := newDaemonSet("x")
|
||||||
ds.Spec.UpdateStrategy = newUpdateUnavailable(intstr.FromInt(1))
|
ds.Spec.UpdateStrategy = newUpdateUnavailable(intstr.FromInt32(1))
|
||||||
return ds
|
return ds
|
||||||
}(),
|
}(),
|
||||||
nodeToPods: func() map[string][]*v1.Pod {
|
nodeToPods: func() map[string][]*v1.Pod {
|
||||||
@ -446,7 +446,7 @@ func TestGetUnavailableNumbers(t *testing.T) {
|
|||||||
},
|
},
|
||||||
ds: func() *apps.DaemonSet {
|
ds: func() *apps.DaemonSet {
|
||||||
ds := newDaemonSet("x")
|
ds := newDaemonSet("x")
|
||||||
ds.Spec.UpdateStrategy = newUpdateUnavailable(intstr.FromInt(0))
|
ds.Spec.UpdateStrategy = newUpdateUnavailable(intstr.FromInt32(0))
|
||||||
return ds
|
return ds
|
||||||
}(),
|
}(),
|
||||||
nodeToPods: func() map[string][]*v1.Pod {
|
nodeToPods: func() map[string][]*v1.Pod {
|
||||||
@ -471,7 +471,7 @@ func TestGetUnavailableNumbers(t *testing.T) {
|
|||||||
},
|
},
|
||||||
ds: func() *apps.DaemonSet {
|
ds: func() *apps.DaemonSet {
|
||||||
ds := newDaemonSet("x")
|
ds := newDaemonSet("x")
|
||||||
ds.Spec.UpdateStrategy = newUpdateSurge(intstr.FromInt(0))
|
ds.Spec.UpdateStrategy = newUpdateSurge(intstr.FromInt32(0))
|
||||||
return ds
|
return ds
|
||||||
}(),
|
}(),
|
||||||
nodeToPods: func() map[string][]*v1.Pod {
|
nodeToPods: func() map[string][]*v1.Pod {
|
||||||
|
@ -93,8 +93,8 @@ func newDeployment(name string, replicas int, revisionHistoryLimit *int32, maxSu
|
|||||||
Strategy: apps.DeploymentStrategy{
|
Strategy: apps.DeploymentStrategy{
|
||||||
Type: apps.RollingUpdateDeploymentStrategyType,
|
Type: apps.RollingUpdateDeploymentStrategyType,
|
||||||
RollingUpdate: &apps.RollingUpdateDeployment{
|
RollingUpdate: &apps.RollingUpdateDeployment{
|
||||||
MaxUnavailable: func() *intstr.IntOrString { i := intstr.FromInt(0); return &i }(),
|
MaxUnavailable: func() *intstr.IntOrString { i := intstr.FromInt32(0); return &i }(),
|
||||||
MaxSurge: func() *intstr.IntOrString { i := intstr.FromInt(0); return &i }(),
|
MaxSurge: func() *intstr.IntOrString { i := intstr.FromInt32(0); return &i }(),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
Replicas: pointer.Int32(int32(replicas)),
|
Replicas: pointer.Int32(int32(replicas)),
|
||||||
|
@ -40,14 +40,14 @@ func TestDeploymentController_reconcileNewReplicaSet(t *testing.T) {
|
|||||||
{
|
{
|
||||||
// Should not scale up.
|
// Should not scale up.
|
||||||
deploymentReplicas: 10,
|
deploymentReplicas: 10,
|
||||||
maxSurge: intstr.FromInt(0),
|
maxSurge: intstr.FromInt32(0),
|
||||||
oldReplicas: 10,
|
oldReplicas: 10,
|
||||||
newReplicas: 0,
|
newReplicas: 0,
|
||||||
scaleExpected: false,
|
scaleExpected: false,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
deploymentReplicas: 10,
|
deploymentReplicas: 10,
|
||||||
maxSurge: intstr.FromInt(2),
|
maxSurge: intstr.FromInt32(2),
|
||||||
oldReplicas: 10,
|
oldReplicas: 10,
|
||||||
newReplicas: 0,
|
newReplicas: 0,
|
||||||
scaleExpected: true,
|
scaleExpected: true,
|
||||||
@ -55,7 +55,7 @@ func TestDeploymentController_reconcileNewReplicaSet(t *testing.T) {
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
deploymentReplicas: 10,
|
deploymentReplicas: 10,
|
||||||
maxSurge: intstr.FromInt(2),
|
maxSurge: intstr.FromInt32(2),
|
||||||
oldReplicas: 5,
|
oldReplicas: 5,
|
||||||
newReplicas: 0,
|
newReplicas: 0,
|
||||||
scaleExpected: true,
|
scaleExpected: true,
|
||||||
@ -63,7 +63,7 @@ func TestDeploymentController_reconcileNewReplicaSet(t *testing.T) {
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
deploymentReplicas: 10,
|
deploymentReplicas: 10,
|
||||||
maxSurge: intstr.FromInt(2),
|
maxSurge: intstr.FromInt32(2),
|
||||||
oldReplicas: 10,
|
oldReplicas: 10,
|
||||||
newReplicas: 2,
|
newReplicas: 2,
|
||||||
scaleExpected: false,
|
scaleExpected: false,
|
||||||
@ -71,7 +71,7 @@ func TestDeploymentController_reconcileNewReplicaSet(t *testing.T) {
|
|||||||
{
|
{
|
||||||
// Should scale down.
|
// Should scale down.
|
||||||
deploymentReplicas: 10,
|
deploymentReplicas: 10,
|
||||||
maxSurge: intstr.FromInt(2),
|
maxSurge: intstr.FromInt32(2),
|
||||||
oldReplicas: 2,
|
oldReplicas: 2,
|
||||||
newReplicas: 11,
|
newReplicas: 11,
|
||||||
scaleExpected: true,
|
scaleExpected: true,
|
||||||
@ -85,7 +85,7 @@ func TestDeploymentController_reconcileNewReplicaSet(t *testing.T) {
|
|||||||
newRS := rs("foo-v2", test.newReplicas, nil, noTimestamp)
|
newRS := rs("foo-v2", test.newReplicas, nil, noTimestamp)
|
||||||
oldRS := rs("foo-v2", test.oldReplicas, nil, noTimestamp)
|
oldRS := rs("foo-v2", test.oldReplicas, nil, noTimestamp)
|
||||||
allRSs := []*apps.ReplicaSet{newRS, oldRS}
|
allRSs := []*apps.ReplicaSet{newRS, oldRS}
|
||||||
maxUnavailable := intstr.FromInt(0)
|
maxUnavailable := intstr.FromInt32(0)
|
||||||
deployment := newDeployment("foo", test.deploymentReplicas, nil, &test.maxSurge, &maxUnavailable, map[string]string{"foo": "bar"})
|
deployment := newDeployment("foo", test.deploymentReplicas, nil, &test.maxSurge, &maxUnavailable, map[string]string{"foo": "bar"})
|
||||||
fake := fake.Clientset{}
|
fake := fake.Clientset{}
|
||||||
controller := &DeploymentController{
|
controller := &DeploymentController{
|
||||||
@ -134,7 +134,7 @@ func TestDeploymentController_reconcileOldReplicaSets(t *testing.T) {
|
|||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
deploymentReplicas: 10,
|
deploymentReplicas: 10,
|
||||||
maxUnavailable: intstr.FromInt(0),
|
maxUnavailable: intstr.FromInt32(0),
|
||||||
oldReplicas: 10,
|
oldReplicas: 10,
|
||||||
newReplicas: 0,
|
newReplicas: 0,
|
||||||
readyPodsFromOldRS: 10,
|
readyPodsFromOldRS: 10,
|
||||||
@ -144,7 +144,7 @@ func TestDeploymentController_reconcileOldReplicaSets(t *testing.T) {
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
deploymentReplicas: 10,
|
deploymentReplicas: 10,
|
||||||
maxUnavailable: intstr.FromInt(2),
|
maxUnavailable: intstr.FromInt32(2),
|
||||||
oldReplicas: 10,
|
oldReplicas: 10,
|
||||||
newReplicas: 0,
|
newReplicas: 0,
|
||||||
readyPodsFromOldRS: 10,
|
readyPodsFromOldRS: 10,
|
||||||
@ -154,7 +154,7 @@ func TestDeploymentController_reconcileOldReplicaSets(t *testing.T) {
|
|||||||
},
|
},
|
||||||
{ // expect unhealthy replicas from old replica sets been cleaned up
|
{ // expect unhealthy replicas from old replica sets been cleaned up
|
||||||
deploymentReplicas: 10,
|
deploymentReplicas: 10,
|
||||||
maxUnavailable: intstr.FromInt(2),
|
maxUnavailable: intstr.FromInt32(2),
|
||||||
oldReplicas: 10,
|
oldReplicas: 10,
|
||||||
newReplicas: 0,
|
newReplicas: 0,
|
||||||
readyPodsFromOldRS: 8,
|
readyPodsFromOldRS: 8,
|
||||||
@ -164,7 +164,7 @@ func TestDeploymentController_reconcileOldReplicaSets(t *testing.T) {
|
|||||||
},
|
},
|
||||||
{ // expect 1 unhealthy replica from old replica sets been cleaned up, and 1 ready pod been scaled down
|
{ // expect 1 unhealthy replica from old replica sets been cleaned up, and 1 ready pod been scaled down
|
||||||
deploymentReplicas: 10,
|
deploymentReplicas: 10,
|
||||||
maxUnavailable: intstr.FromInt(2),
|
maxUnavailable: intstr.FromInt32(2),
|
||||||
oldReplicas: 10,
|
oldReplicas: 10,
|
||||||
newReplicas: 0,
|
newReplicas: 0,
|
||||||
readyPodsFromOldRS: 9,
|
readyPodsFromOldRS: 9,
|
||||||
@ -174,7 +174,7 @@ func TestDeploymentController_reconcileOldReplicaSets(t *testing.T) {
|
|||||||
},
|
},
|
||||||
{ // the unavailable pods from the newRS would not make us scale down old RSs in a further step
|
{ // the unavailable pods from the newRS would not make us scale down old RSs in a further step
|
||||||
deploymentReplicas: 10,
|
deploymentReplicas: 10,
|
||||||
maxUnavailable: intstr.FromInt(2),
|
maxUnavailable: intstr.FromInt32(2),
|
||||||
oldReplicas: 8,
|
oldReplicas: 8,
|
||||||
newReplicas: 2,
|
newReplicas: 2,
|
||||||
readyPodsFromOldRS: 8,
|
readyPodsFromOldRS: 8,
|
||||||
@ -194,7 +194,7 @@ func TestDeploymentController_reconcileOldReplicaSets(t *testing.T) {
|
|||||||
oldRS.Status.AvailableReplicas = int32(test.readyPodsFromOldRS)
|
oldRS.Status.AvailableReplicas = int32(test.readyPodsFromOldRS)
|
||||||
oldRSs := []*apps.ReplicaSet{oldRS}
|
oldRSs := []*apps.ReplicaSet{oldRS}
|
||||||
allRSs := []*apps.ReplicaSet{oldRS, newRS}
|
allRSs := []*apps.ReplicaSet{oldRS, newRS}
|
||||||
maxSurge := intstr.FromInt(0)
|
maxSurge := intstr.FromInt32(0)
|
||||||
deployment := newDeployment("foo", test.deploymentReplicas, nil, &maxSurge, &test.maxUnavailable, newSelector)
|
deployment := newDeployment("foo", test.deploymentReplicas, nil, &maxSurge, &test.maxUnavailable, newSelector)
|
||||||
fakeClientset := fake.Clientset{}
|
fakeClientset := fake.Clientset{}
|
||||||
controller := &DeploymentController{
|
controller := &DeploymentController{
|
||||||
@ -261,8 +261,8 @@ func TestDeploymentController_cleanupUnhealthyReplicas(t *testing.T) {
|
|||||||
oldRS := rs("foo-v2", test.oldReplicas, nil, noTimestamp)
|
oldRS := rs("foo-v2", test.oldReplicas, nil, noTimestamp)
|
||||||
oldRS.Status.AvailableReplicas = int32(test.readyPods)
|
oldRS.Status.AvailableReplicas = int32(test.readyPods)
|
||||||
oldRSs := []*apps.ReplicaSet{oldRS}
|
oldRSs := []*apps.ReplicaSet{oldRS}
|
||||||
maxSurge := intstr.FromInt(2)
|
maxSurge := intstr.FromInt32(2)
|
||||||
maxUnavailable := intstr.FromInt(2)
|
maxUnavailable := intstr.FromInt32(2)
|
||||||
deployment := newDeployment("foo", 10, nil, &maxSurge, &maxUnavailable, nil)
|
deployment := newDeployment("foo", 10, nil, &maxSurge, &maxUnavailable, nil)
|
||||||
fakeClientset := fake.Clientset{}
|
fakeClientset := fake.Clientset{}
|
||||||
|
|
||||||
@ -294,7 +294,7 @@ func TestDeploymentController_scaleDownOldReplicaSetsForRollingUpdate(t *testing
|
|||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
deploymentReplicas: 10,
|
deploymentReplicas: 10,
|
||||||
maxUnavailable: intstr.FromInt(0),
|
maxUnavailable: intstr.FromInt32(0),
|
||||||
readyPods: 10,
|
readyPods: 10,
|
||||||
oldReplicas: 10,
|
oldReplicas: 10,
|
||||||
scaleExpected: true,
|
scaleExpected: true,
|
||||||
@ -302,7 +302,7 @@ func TestDeploymentController_scaleDownOldReplicaSetsForRollingUpdate(t *testing
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
deploymentReplicas: 10,
|
deploymentReplicas: 10,
|
||||||
maxUnavailable: intstr.FromInt(2),
|
maxUnavailable: intstr.FromInt32(2),
|
||||||
readyPods: 10,
|
readyPods: 10,
|
||||||
oldReplicas: 10,
|
oldReplicas: 10,
|
||||||
scaleExpected: true,
|
scaleExpected: true,
|
||||||
@ -310,21 +310,21 @@ func TestDeploymentController_scaleDownOldReplicaSetsForRollingUpdate(t *testing
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
deploymentReplicas: 10,
|
deploymentReplicas: 10,
|
||||||
maxUnavailable: intstr.FromInt(2),
|
maxUnavailable: intstr.FromInt32(2),
|
||||||
readyPods: 8,
|
readyPods: 8,
|
||||||
oldReplicas: 10,
|
oldReplicas: 10,
|
||||||
scaleExpected: false,
|
scaleExpected: false,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
deploymentReplicas: 10,
|
deploymentReplicas: 10,
|
||||||
maxUnavailable: intstr.FromInt(2),
|
maxUnavailable: intstr.FromInt32(2),
|
||||||
readyPods: 10,
|
readyPods: 10,
|
||||||
oldReplicas: 0,
|
oldReplicas: 0,
|
||||||
scaleExpected: false,
|
scaleExpected: false,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
deploymentReplicas: 10,
|
deploymentReplicas: 10,
|
||||||
maxUnavailable: intstr.FromInt(2),
|
maxUnavailable: intstr.FromInt32(2),
|
||||||
readyPods: 1,
|
readyPods: 1,
|
||||||
oldReplicas: 10,
|
oldReplicas: 10,
|
||||||
scaleExpected: false,
|
scaleExpected: false,
|
||||||
@ -338,7 +338,7 @@ func TestDeploymentController_scaleDownOldReplicaSetsForRollingUpdate(t *testing
|
|||||||
oldRS.Status.AvailableReplicas = int32(test.readyPods)
|
oldRS.Status.AvailableReplicas = int32(test.readyPods)
|
||||||
allRSs := []*apps.ReplicaSet{oldRS}
|
allRSs := []*apps.ReplicaSet{oldRS}
|
||||||
oldRSs := []*apps.ReplicaSet{oldRS}
|
oldRSs := []*apps.ReplicaSet{oldRS}
|
||||||
maxSurge := intstr.FromInt(0)
|
maxSurge := intstr.FromInt32(0)
|
||||||
deployment := newDeployment("foo", test.deploymentReplicas, nil, &maxSurge, &test.maxUnavailable, map[string]string{"foo": "bar"})
|
deployment := newDeployment("foo", test.deploymentReplicas, nil, &maxSurge, &test.maxUnavailable, map[string]string{"foo": "bar"})
|
||||||
fakeClientset := fake.Clientset{}
|
fakeClientset := fake.Clientset{}
|
||||||
controller := &DeploymentController{
|
controller := &DeploymentController{
|
||||||
|
@ -849,11 +849,11 @@ func WaitForObservedDeployment(getDeploymentFunc func() (*apps.Deployment, error
|
|||||||
// 2 desired, max unavailable 0%, surge 1% - should scale new(+1), then old(-1), then new(+1), then old(-1)
|
// 2 desired, max unavailable 0%, surge 1% - should scale new(+1), then old(-1), then new(+1), then old(-1)
|
||||||
// 1 desired, max unavailable 0%, surge 1% - should scale new(+1), then old(-1)
|
// 1 desired, max unavailable 0%, surge 1% - should scale new(+1), then old(-1)
|
||||||
func ResolveFenceposts(maxSurge, maxUnavailable *intstrutil.IntOrString, desired int32) (int32, int32, error) {
|
func ResolveFenceposts(maxSurge, maxUnavailable *intstrutil.IntOrString, desired int32) (int32, int32, error) {
|
||||||
surge, err := intstrutil.GetScaledValueFromIntOrPercent(intstrutil.ValueOrDefault(maxSurge, intstrutil.FromInt(0)), int(desired), true)
|
surge, err := intstrutil.GetScaledValueFromIntOrPercent(intstrutil.ValueOrDefault(maxSurge, intstrutil.FromInt32(0)), int(desired), true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, 0, err
|
return 0, 0, err
|
||||||
}
|
}
|
||||||
unavailable, err := intstrutil.GetScaledValueFromIntOrPercent(intstrutil.ValueOrDefault(maxUnavailable, intstrutil.FromInt(0)), int(desired), false)
|
unavailable, err := intstrutil.GetScaledValueFromIntOrPercent(intstrutil.ValueOrDefault(maxUnavailable, intstrutil.FromInt32(0)), int(desired), false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, 0, err
|
return 0, 0, err
|
||||||
}
|
}
|
||||||
|
@ -975,22 +975,22 @@ func TestMaxUnavailable(t *testing.T) {
|
|||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
name: "maxUnavailable less than replicas",
|
name: "maxUnavailable less than replicas",
|
||||||
deployment: deployment(10, intstr.FromInt(5)),
|
deployment: deployment(10, intstr.FromInt32(5)),
|
||||||
expected: int32(5),
|
expected: int32(5),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "maxUnavailable equal replicas",
|
name: "maxUnavailable equal replicas",
|
||||||
deployment: deployment(10, intstr.FromInt(10)),
|
deployment: deployment(10, intstr.FromInt32(10)),
|
||||||
expected: int32(10),
|
expected: int32(10),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "maxUnavailable greater than replicas",
|
name: "maxUnavailable greater than replicas",
|
||||||
deployment: deployment(5, intstr.FromInt(10)),
|
deployment: deployment(5, intstr.FromInt32(10)),
|
||||||
expected: int32(5),
|
expected: int32(5),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "maxUnavailable with replicas is 0",
|
name: "maxUnavailable with replicas is 0",
|
||||||
deployment: deployment(0, intstr.FromInt(10)),
|
deployment: deployment(0, intstr.FromInt32(10)),
|
||||||
expected: int32(0),
|
expected: int32(0),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -1277,22 +1277,22 @@ func TestMinAvailable(t *testing.T) {
|
|||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
name: "replicas greater than maxUnavailable",
|
name: "replicas greater than maxUnavailable",
|
||||||
deployment: deployment(10, intstr.FromInt(5)),
|
deployment: deployment(10, intstr.FromInt32(5)),
|
||||||
expected: 5,
|
expected: 5,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "replicas equal maxUnavailable",
|
name: "replicas equal maxUnavailable",
|
||||||
deployment: deployment(10, intstr.FromInt(10)),
|
deployment: deployment(10, intstr.FromInt32(10)),
|
||||||
expected: 0,
|
expected: 0,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "replicas less than maxUnavailable",
|
name: "replicas less than maxUnavailable",
|
||||||
deployment: deployment(5, intstr.FromInt(10)),
|
deployment: deployment(5, intstr.FromInt32(10)),
|
||||||
expected: 0,
|
expected: 0,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "replicas is 0",
|
name: "replicas is 0",
|
||||||
deployment: deployment(0, intstr.FromInt(10)),
|
deployment: deployment(0, intstr.FromInt32(10)),
|
||||||
expected: 0,
|
expected: 0,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
@ -426,7 +426,7 @@ func add(t *testing.T, store cache.Store, obj interface{}) {
|
|||||||
func TestNoSelector(t *testing.T) {
|
func TestNoSelector(t *testing.T) {
|
||||||
dc, ps := newFakeDisruptionController()
|
dc, ps := newFakeDisruptionController()
|
||||||
|
|
||||||
pdb, pdbName := newMinAvailablePodDisruptionBudget(t, intstr.FromInt(3))
|
pdb, pdbName := newMinAvailablePodDisruptionBudget(t, intstr.FromInt32(3))
|
||||||
pdb.Spec.Selector = &metav1.LabelSelector{}
|
pdb.Spec.Selector = &metav1.LabelSelector{}
|
||||||
pod, _ := newPod(t, "yo-yo-yo")
|
pod, _ := newPod(t, "yo-yo-yo")
|
||||||
|
|
||||||
@ -445,7 +445,7 @@ func TestNoSelector(t *testing.T) {
|
|||||||
func TestUnavailable(t *testing.T) {
|
func TestUnavailable(t *testing.T) {
|
||||||
dc, ps := newFakeDisruptionController()
|
dc, ps := newFakeDisruptionController()
|
||||||
|
|
||||||
pdb, pdbName := newMinAvailablePodDisruptionBudget(t, intstr.FromInt(3))
|
pdb, pdbName := newMinAvailablePodDisruptionBudget(t, intstr.FromInt32(3))
|
||||||
ctx := context.TODO()
|
ctx := context.TODO()
|
||||||
add(t, dc.pdbStore, pdb)
|
add(t, dc.pdbStore, pdb)
|
||||||
dc.sync(ctx, pdbName)
|
dc.sync(ctx, pdbName)
|
||||||
@ -475,7 +475,7 @@ func TestUnavailable(t *testing.T) {
|
|||||||
func TestIntegerMaxUnavailable(t *testing.T) {
|
func TestIntegerMaxUnavailable(t *testing.T) {
|
||||||
dc, ps := newFakeDisruptionController()
|
dc, ps := newFakeDisruptionController()
|
||||||
|
|
||||||
pdb, pdbName := newMaxUnavailablePodDisruptionBudget(t, intstr.FromInt(1))
|
pdb, pdbName := newMaxUnavailablePodDisruptionBudget(t, intstr.FromInt32(1))
|
||||||
add(t, dc.pdbStore, pdb)
|
add(t, dc.pdbStore, pdb)
|
||||||
ctx := context.TODO()
|
ctx := context.TODO()
|
||||||
dc.sync(ctx, pdbName)
|
dc.sync(ctx, pdbName)
|
||||||
@ -496,7 +496,7 @@ func TestIntegerMaxUnavailable(t *testing.T) {
|
|||||||
func TestIntegerMaxUnavailableWithScaling(t *testing.T) {
|
func TestIntegerMaxUnavailableWithScaling(t *testing.T) {
|
||||||
dc, ps := newFakeDisruptionController()
|
dc, ps := newFakeDisruptionController()
|
||||||
|
|
||||||
pdb, pdbName := newMaxUnavailablePodDisruptionBudget(t, intstr.FromInt(2))
|
pdb, pdbName := newMaxUnavailablePodDisruptionBudget(t, intstr.FromInt32(2))
|
||||||
add(t, dc.pdbStore, pdb)
|
add(t, dc.pdbStore, pdb)
|
||||||
|
|
||||||
rs, _ := newReplicaSet(t, 7)
|
rs, _ := newReplicaSet(t, 7)
|
||||||
@ -672,7 +672,7 @@ func TestScaleResource(t *testing.T) {
|
|||||||
return true, obj, nil
|
return true, obj, nil
|
||||||
})
|
})
|
||||||
|
|
||||||
pdb, pdbName := newMaxUnavailablePodDisruptionBudget(t, intstr.FromInt(int(maxUnavailable)))
|
pdb, pdbName := newMaxUnavailablePodDisruptionBudget(t, intstr.FromInt32(maxUnavailable))
|
||||||
add(t, dc.pdbStore, pdb)
|
add(t, dc.pdbStore, pdb)
|
||||||
|
|
||||||
trueVal := true
|
trueVal := true
|
||||||
@ -1030,7 +1030,7 @@ func TestPDBNotExist(t *testing.T) {
|
|||||||
func TestUpdateDisruptedPods(t *testing.T) {
|
func TestUpdateDisruptedPods(t *testing.T) {
|
||||||
dc, ps := newFakeDisruptionController()
|
dc, ps := newFakeDisruptionController()
|
||||||
dc.recheckQueue = workqueue.NewNamedDelayingQueue("pdb_queue")
|
dc.recheckQueue = workqueue.NewNamedDelayingQueue("pdb_queue")
|
||||||
pdb, pdbName := newMinAvailablePodDisruptionBudget(t, intstr.FromInt(1))
|
pdb, pdbName := newMinAvailablePodDisruptionBudget(t, intstr.FromInt32(1))
|
||||||
currentTime := dc.clock.Now()
|
currentTime := dc.clock.Now()
|
||||||
pdb.Status.DisruptedPods = map[string]metav1.Time{
|
pdb.Status.DisruptedPods = map[string]metav1.Time{
|
||||||
"p1": {Time: currentTime}, // Should be removed, pod deletion started.
|
"p1": {Time: currentTime}, // Should be removed, pod deletion started.
|
||||||
@ -1272,7 +1272,7 @@ func TestUpdatePDBStatusRetries(t *testing.T) {
|
|||||||
dc.getUpdater = func() updater { return dc.writePdbStatus }
|
dc.getUpdater = func() updater { return dc.writePdbStatus }
|
||||||
ctx := context.TODO()
|
ctx := context.TODO()
|
||||||
// Create a PDB and 3 pods that match it.
|
// Create a PDB and 3 pods that match it.
|
||||||
pdb, pdbKey := newMinAvailablePodDisruptionBudget(t, intstr.FromInt(1))
|
pdb, pdbKey := newMinAvailablePodDisruptionBudget(t, intstr.FromInt32(1))
|
||||||
pdb, err := dc.coreClient.PolicyV1().PodDisruptionBudgets(pdb.Namespace).Create(ctx, pdb, metav1.CreateOptions{})
|
pdb, err := dc.coreClient.PolicyV1().PodDisruptionBudgets(pdb.Namespace).Create(ctx, pdb, metav1.CreateOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Failed to create PDB: %v", err)
|
t.Fatalf("Failed to create PDB: %v", err)
|
||||||
@ -1409,7 +1409,7 @@ func TestInvalidSelectors(t *testing.T) {
|
|||||||
t.Run(tn, func(t *testing.T) {
|
t.Run(tn, func(t *testing.T) {
|
||||||
dc, ps := newFakeDisruptionController()
|
dc, ps := newFakeDisruptionController()
|
||||||
|
|
||||||
pdb, pdbName := newMinAvailablePodDisruptionBudget(t, intstr.FromInt(3))
|
pdb, pdbName := newMinAvailablePodDisruptionBudget(t, intstr.FromInt32(3))
|
||||||
pdb.Spec.Selector = tc.labelSelector
|
pdb.Spec.Selector = tc.labelSelector
|
||||||
|
|
||||||
add(t, dc.pdbStore, pdb)
|
add(t, dc.pdbStore, pdb)
|
||||||
|
@ -1042,7 +1042,7 @@ func TestStatefulSetControlRollingUpdateWithMaxUnavailable(t *testing.T) {
|
|||||||
// Setup the statefulSet controller
|
// Setup the statefulSet controller
|
||||||
totalPods := 6
|
totalPods := 6
|
||||||
var partition int32 = 3
|
var partition int32 = 3
|
||||||
var maxUnavailable = intstr.FromInt(2)
|
var maxUnavailable = intstr.FromInt32(2)
|
||||||
set := setupPodManagementPolicy(tc.policyType, newStatefulSet(totalPods))
|
set := setupPodManagementPolicy(tc.policyType, newStatefulSet(totalPods))
|
||||||
set.Spec.UpdateStrategy = apps.StatefulSetUpdateStrategy{
|
set.Spec.UpdateStrategy = apps.StatefulSetUpdateStrategy{
|
||||||
Type: apps.RollingUpdateStatefulSetStrategyType,
|
Type: apps.RollingUpdateStatefulSetStrategyType,
|
||||||
@ -1130,7 +1130,7 @@ func setupForInvariant(t *testing.T) (*apps.StatefulSet, *fakeObjectManager, Sta
|
|||||||
set := newStatefulSet(totalPods)
|
set := newStatefulSet(totalPods)
|
||||||
// update all pods >=3(3,4,5)
|
// update all pods >=3(3,4,5)
|
||||||
var partition int32 = 3
|
var partition int32 = 3
|
||||||
var maxUnavailable = intstr.FromInt(2)
|
var maxUnavailable = intstr.FromInt32(2)
|
||||||
set.Spec.UpdateStrategy = apps.StatefulSetUpdateStrategy{
|
set.Spec.UpdateStrategy = apps.StatefulSetUpdateStrategy{
|
||||||
Type: apps.RollingUpdateStatefulSetStrategyType,
|
Type: apps.RollingUpdateStatefulSetStrategyType,
|
||||||
RollingUpdate: func() *apps.RollingUpdateStatefulSetStrategy {
|
RollingUpdate: func() *apps.RollingUpdateStatefulSetStrategy {
|
||||||
|
@ -608,7 +608,7 @@ func (ao ascendingOrdinal) Less(i, j int) bool {
|
|||||||
// Note that API validation has already guaranteed the maxUnavailable field to be >1 if it is an integer
|
// Note that API validation has already guaranteed the maxUnavailable field to be >1 if it is an integer
|
||||||
// or 0% < value <= 100% if it is a percentage, so we don't have to consider other cases.
|
// or 0% < value <= 100% if it is a percentage, so we don't have to consider other cases.
|
||||||
func getStatefulSetMaxUnavailable(maxUnavailable *intstr.IntOrString, replicaCount int) (int, error) {
|
func getStatefulSetMaxUnavailable(maxUnavailable *intstr.IntOrString, replicaCount int) (int, error) {
|
||||||
maxUnavailableNum, err := intstr.GetScaledValueFromIntOrPercent(intstr.ValueOrDefault(maxUnavailable, intstr.FromInt(1)), replicaCount, false)
|
maxUnavailableNum, err := intstr.GetScaledValueFromIntOrPercent(intstr.ValueOrDefault(maxUnavailable, intstr.FromInt32(1)), replicaCount, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, err
|
return 0, err
|
||||||
}
|
}
|
||||||
|
@ -948,9 +948,9 @@ func TestGetStatefulSetMaxUnavailable(t *testing.T) {
|
|||||||
}{
|
}{
|
||||||
// it wouldn't hurt to also test 0 and 0%, even if they should have been forbidden by API validation.
|
// it wouldn't hurt to also test 0 and 0%, even if they should have been forbidden by API validation.
|
||||||
{maxUnavailable: nil, replicaCount: 10, expectedMaxUnavailable: 1},
|
{maxUnavailable: nil, replicaCount: 10, expectedMaxUnavailable: 1},
|
||||||
{maxUnavailable: intOrStrP(intstr.FromInt(3)), replicaCount: 10, expectedMaxUnavailable: 3},
|
{maxUnavailable: intOrStrP(intstr.FromInt32(3)), replicaCount: 10, expectedMaxUnavailable: 3},
|
||||||
{maxUnavailable: intOrStrP(intstr.FromInt(3)), replicaCount: 0, expectedMaxUnavailable: 3},
|
{maxUnavailable: intOrStrP(intstr.FromInt32(3)), replicaCount: 0, expectedMaxUnavailable: 3},
|
||||||
{maxUnavailable: intOrStrP(intstr.FromInt(0)), replicaCount: 0, expectedMaxUnavailable: 1},
|
{maxUnavailable: intOrStrP(intstr.FromInt32(0)), replicaCount: 0, expectedMaxUnavailable: 1},
|
||||||
{maxUnavailable: intOrStrP(intstr.FromString("10%")), replicaCount: 25, expectedMaxUnavailable: 2},
|
{maxUnavailable: intOrStrP(intstr.FromString("10%")), replicaCount: 25, expectedMaxUnavailable: 2},
|
||||||
{maxUnavailable: intOrStrP(intstr.FromString("100%")), replicaCount: 5, expectedMaxUnavailable: 5},
|
{maxUnavailable: intOrStrP(intstr.FromString("100%")), replicaCount: 5, expectedMaxUnavailable: 5},
|
||||||
{maxUnavailable: intOrStrP(intstr.FromString("50%")), replicaCount: 5, expectedMaxUnavailable: 2},
|
{maxUnavailable: intOrStrP(intstr.FromString("50%")), replicaCount: 5, expectedMaxUnavailable: 2},
|
||||||
|
Loading…
Reference in New Issue
Block a user