diff --git a/pkg/controller/disruption/disruption.go b/pkg/controller/disruption/disruption.go index c297e09db5d..9a9caa60843 100644 --- a/pkg/controller/disruption/disruption.go +++ b/pkg/controller/disruption/disruption.go @@ -564,7 +564,7 @@ func (dc *DisruptionController) updatePdbSpec(pdb *policy.PodDisruptionBudget, c // pods are in a safe state when their first pods appear but this controller // has not updated their status yet. This isn't the only race, but it's a // common one that's easy to detect. - disruptionAllowed := currentHealthy >= desiredHealthy && expectedCount > 0 + disruptionAllowed := currentHealthy-1 >= desiredHealthy && expectedCount > 0 if pdb.Status.CurrentHealthy == currentHealthy && pdb.Status.DesiredHealthy == desiredHealthy && pdb.Status.ExpectedPods == expectedCount && pdb.Status.PodDisruptionAllowed == disruptionAllowed { return nil diff --git a/pkg/controller/disruption/disruption_test.go b/pkg/controller/disruption/disruption_test.go index 7446753b25e..cc59abd914b 100644 --- a/pkg/controller/disruption/disruption_test.go +++ b/pkg/controller/disruption/disruption_test.go @@ -266,14 +266,14 @@ func TestUnavailable(t *testing.T) { // Add three pods, verifying that the counts go up at each step. pods := []*api.Pod{} - for i := int32(0); i < 3; i++ { + for i := int32(0); i < 4; i++ { ps.VerifyPdbStatus(t, pdbName, false, i, 3, i) pod, _ := newPod(t, fmt.Sprintf("yo-yo-yo %d", i)) pods = append(pods, pod) add(t, dc.podLister.Indexer, pod) dc.sync(pdbName) } - ps.VerifyPdbStatus(t, pdbName, true, 3, 3, 3) + ps.VerifyPdbStatus(t, pdbName, true, 4, 3, 4) // Now set one pod as unavailable pods[0].Status.Conditions = []api.PodCondition{} @@ -281,7 +281,7 @@ func TestUnavailable(t *testing.T) { dc.sync(pdbName) // Verify expected update - ps.VerifyPdbStatus(t, pdbName, false, 2, 3, 3) + ps.VerifyPdbStatus(t, pdbName, false, 3, 3, 4) } // Create a pod with no controller, and verify that a PDB with a percentage @@ -366,8 +366,8 @@ func TestReplicationController(t *testing.T) { dc, ps := newFakeDisruptionController() - // 67% should round up to 3 - pdb, pdbName := newPodDisruptionBudget(t, intstr.FromString("67%")) + // 34% should round up to 2 + pdb, pdbName := newPodDisruptionBudget(t, intstr.FromString("34%")) add(t, dc.pdbLister.Store, pdb) rc, _ := newReplicationController(t, 3) rc.Spec.Selector = labels @@ -386,9 +386,9 @@ func TestReplicationController(t *testing.T) { add(t, dc.podLister.Indexer, pod) dc.sync(pdbName) if i < 2 { - ps.VerifyPdbStatus(t, pdbName, false, i+1, 3, 3) + ps.VerifyPdbStatus(t, pdbName, false, i+1, 2, 3) } else { - ps.VerifyPdbStatus(t, pdbName, true, 3, 3, 3) + ps.VerifyPdbStatus(t, pdbName, true, 3, 2, 3) } } @@ -411,9 +411,18 @@ func TestTwoControllers(t *testing.T) { } dc, ps := newFakeDisruptionController() + // These constants are related, but I avoid calculating the correct values in + // code. If you update a parameter here, recalculate the correct values for + // all of them. Further down in the test, we use these to control loops, and + // that level of logic is enough complexity for me. + const collectionSize int32 = 11 // How big each collection is + const minAvailable string = "28%" // minAvailable we'll specify + const minimumOne int32 = 4 // integer minimum with one controller + const minimumTwo int32 = 7 // integer minimum with two controllers + pdb, pdbName := newPodDisruptionBudget(t, intstr.FromString("28%")) add(t, dc.pdbLister.Store, pdb) - rc, _ := newReplicationController(t, 11) + rc, _ := newReplicationController(t, collectionSize) rc.Spec.Selector = rcLabels add(t, dc.rcLister.Indexer, rc) dc.sync(pdbName) @@ -422,71 +431,74 @@ func TestTwoControllers(t *testing.T) { pods := []*api.Pod{} - for i := int32(0); i < 11; i++ { + unavailablePods := collectionSize - minimumOne - 1 + for i := int32(1); i <= collectionSize; i++ { pod, _ := newPod(t, fmt.Sprintf("quux %d", i)) pods = append(pods, pod) pod.Labels = rcLabels - if i < 7 { + if i <= unavailablePods { pod.Status.Conditions = []api.PodCondition{} } add(t, dc.podLister.Indexer, pod) dc.sync(pdbName) - if i < 7 { - ps.VerifyPdbStatus(t, pdbName, false, 0, 4, 11) - } else if i < 10 { - ps.VerifyPdbStatus(t, pdbName, false, i-6, 4, 11) + if i <= unavailablePods { + ps.VerifyPdbStatus(t, pdbName, false, 0, minimumOne, collectionSize) + } else if i-unavailablePods <= minimumOne { + ps.VerifyPdbStatus(t, pdbName, false, i-unavailablePods, minimumOne, collectionSize) } else { - ps.VerifyPdbStatus(t, pdbName, true, 4, 4, 11) + ps.VerifyPdbStatus(t, pdbName, true, i-unavailablePods, minimumOne, collectionSize) } } - d, _ := newDeployment(t, 11) + d, _ := newDeployment(t, collectionSize) d.Spec.Selector = newSel(dLabels) add(t, dc.dLister.Indexer, d) dc.sync(pdbName) - ps.VerifyPdbStatus(t, pdbName, true, 4, 4, 11) + ps.VerifyPdbStatus(t, pdbName, true, minimumOne+1, minimumOne, collectionSize) - rs, _ := newReplicaSet(t, 11) + rs, _ := newReplicaSet(t, collectionSize) rs.Spec.Selector = newSel(dLabels) rs.Labels = dLabels add(t, dc.rsLister.Store, rs) dc.sync(pdbName) - ps.VerifyPdbStatus(t, pdbName, true, 4, 4, 11) + ps.VerifyPdbStatus(t, pdbName, true, minimumOne+1, minimumOne, collectionSize) - for i := int32(0); i < 11; i++ { + // By the end of this loop, the number of ready pods should be N+2 (hence minimumTwo+2). + unavailablePods = 2*collectionSize - (minimumTwo + 2) - unavailablePods + for i := int32(1); i <= collectionSize; i++ { pod, _ := newPod(t, fmt.Sprintf("quuux %d", i)) pods = append(pods, pod) pod.Labels = dLabels - if i < 7 { + if i <= unavailablePods { pod.Status.Conditions = []api.PodCondition{} } add(t, dc.podLister.Indexer, pod) dc.sync(pdbName) - if i < 7 { - ps.VerifyPdbStatus(t, pdbName, false, 4, 7, 22) - } else if i < 9 { - ps.VerifyPdbStatus(t, pdbName, false, 4+i-6, 7, 22) + if i <= unavailablePods { + ps.VerifyPdbStatus(t, pdbName, false, minimumOne+1, minimumTwo, 2*collectionSize) + } else if i-unavailablePods <= minimumTwo-(minimumOne+1) { + ps.VerifyPdbStatus(t, pdbName, false, (minimumOne+1)+(i-unavailablePods), minimumTwo, 2*collectionSize) } else { - ps.VerifyPdbStatus(t, pdbName, true, 4+i-6, 7, 22) + ps.VerifyPdbStatus(t, pdbName, true, (minimumOne+1)+(i-unavailablePods), minimumTwo, 2*collectionSize) } } // Now we verify we can bring down 1 pod and a disruption is still permitted, // but if we bring down two, it's not. Then we make the pod ready again and // verify that a disruption is permitted again. - ps.VerifyPdbStatus(t, pdbName, true, 8, 7, 22) - pods[10].Status.Conditions = []api.PodCondition{} - update(t, dc.podLister.Indexer, pods[10]) + ps.VerifyPdbStatus(t, pdbName, true, 2+minimumTwo, minimumTwo, 2*collectionSize) + pods[collectionSize-1].Status.Conditions = []api.PodCondition{} + update(t, dc.podLister.Indexer, pods[collectionSize-1]) dc.sync(pdbName) - ps.VerifyPdbStatus(t, pdbName, true, 7, 7, 22) + ps.VerifyPdbStatus(t, pdbName, true, 1+minimumTwo, minimumTwo, 2*collectionSize) - pods[9].Status.Conditions = []api.PodCondition{} - update(t, dc.podLister.Indexer, pods[9]) + pods[collectionSize-2].Status.Conditions = []api.PodCondition{} + update(t, dc.podLister.Indexer, pods[collectionSize-2]) dc.sync(pdbName) - ps.VerifyPdbStatus(t, pdbName, false, 6, 7, 22) + ps.VerifyPdbStatus(t, pdbName, false, minimumTwo, minimumTwo, 2*collectionSize) - pods[10].Status.Conditions = []api.PodCondition{{Type: api.PodReady, Status: api.ConditionTrue}} - update(t, dc.podLister.Indexer, pods[10]) + pods[collectionSize-1].Status.Conditions = []api.PodCondition{{Type: api.PodReady, Status: api.ConditionTrue}} + update(t, dc.podLister.Indexer, pods[collectionSize-1]) dc.sync(pdbName) - ps.VerifyPdbStatus(t, pdbName, true, 7, 7, 22) + ps.VerifyPdbStatus(t, pdbName, true, 1+minimumTwo, minimumTwo, 2*collectionSize) } diff --git a/test/e2e/disruption.go b/test/e2e/disruption.go index e9ff04b2e22..b6e0599b603 100644 --- a/test/e2e/disruption.go +++ b/test/e2e/disruption.go @@ -61,7 +61,7 @@ var _ = framework.KubeDescribe("DisruptionController", func() { It("should update PodDisruptionBudget status", func() { createPodDisruptionBudgetOrDie(cs, ns, intstr.FromInt(2)) - createPodsOrDie(cs, ns, 2) + createPodsOrDie(cs, ns, 3) // Since disruptionAllowed starts out false, if we see it ever become true, // that means the controller is working. @@ -92,22 +92,22 @@ var _ = framework.KubeDescribe("DisruptionController", func() { }, { description: "too few pods, absolute", minAvailable: intstr.FromInt(2), - podCount: 1, + podCount: 2, shouldDeny: true, }, { description: "enough pods, absolute", minAvailable: intstr.FromInt(2), - podCount: 2, + podCount: 3, shouldDeny: false, }, { description: "enough pods, replicaSet, percentage", - minAvailable: intstr.FromString("100%"), + minAvailable: intstr.FromString("90%"), replicaSetSize: 10, exclusive: false, shouldDeny: false, }, { description: "too few pods, replicaSet, percentage", - minAvailable: intstr.FromString("100%"), + minAvailable: intstr.FromString("90%"), replicaSetSize: 10, exclusive: true, shouldDeny: true,