mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-26 05:03:09 +00:00
Merge pull request #33143 from mml/off_by_one_._._._the_story_of_an_engineer
Automatic merge from submit-queue Only approve evictions when budgets would stay enforced after. Prior to this, we would approve eviction as long as the current state of the pods matched the budget. The new version requires that after the eviction, the pods would still match the budget. Also update tests to match.
This commit is contained in:
commit
31379c102d
@ -564,7 +564,7 @@ func (dc *DisruptionController) updatePdbSpec(pdb *policy.PodDisruptionBudget, c
|
|||||||
// pods are in a safe state when their first pods appear but this controller
|
// pods are in a safe state when their first pods appear but this controller
|
||||||
// has not updated their status yet. This isn't the only race, but it's a
|
// has not updated their status yet. This isn't the only race, but it's a
|
||||||
// common one that's easy to detect.
|
// common one that's easy to detect.
|
||||||
disruptionAllowed := currentHealthy >= desiredHealthy && expectedCount > 0
|
disruptionAllowed := currentHealthy-1 >= desiredHealthy && expectedCount > 0
|
||||||
|
|
||||||
if pdb.Status.CurrentHealthy == currentHealthy && pdb.Status.DesiredHealthy == desiredHealthy && pdb.Status.ExpectedPods == expectedCount && pdb.Status.PodDisruptionAllowed == disruptionAllowed {
|
if pdb.Status.CurrentHealthy == currentHealthy && pdb.Status.DesiredHealthy == desiredHealthy && pdb.Status.ExpectedPods == expectedCount && pdb.Status.PodDisruptionAllowed == disruptionAllowed {
|
||||||
return nil
|
return nil
|
||||||
|
@ -266,14 +266,14 @@ func TestUnavailable(t *testing.T) {
|
|||||||
|
|
||||||
// Add three pods, verifying that the counts go up at each step.
|
// Add three pods, verifying that the counts go up at each step.
|
||||||
pods := []*api.Pod{}
|
pods := []*api.Pod{}
|
||||||
for i := int32(0); i < 3; i++ {
|
for i := int32(0); i < 4; i++ {
|
||||||
ps.VerifyPdbStatus(t, pdbName, false, i, 3, i)
|
ps.VerifyPdbStatus(t, pdbName, false, i, 3, i)
|
||||||
pod, _ := newPod(t, fmt.Sprintf("yo-yo-yo %d", i))
|
pod, _ := newPod(t, fmt.Sprintf("yo-yo-yo %d", i))
|
||||||
pods = append(pods, pod)
|
pods = append(pods, pod)
|
||||||
add(t, dc.podLister.Indexer, pod)
|
add(t, dc.podLister.Indexer, pod)
|
||||||
dc.sync(pdbName)
|
dc.sync(pdbName)
|
||||||
}
|
}
|
||||||
ps.VerifyPdbStatus(t, pdbName, true, 3, 3, 3)
|
ps.VerifyPdbStatus(t, pdbName, true, 4, 3, 4)
|
||||||
|
|
||||||
// Now set one pod as unavailable
|
// Now set one pod as unavailable
|
||||||
pods[0].Status.Conditions = []api.PodCondition{}
|
pods[0].Status.Conditions = []api.PodCondition{}
|
||||||
@ -281,7 +281,7 @@ func TestUnavailable(t *testing.T) {
|
|||||||
dc.sync(pdbName)
|
dc.sync(pdbName)
|
||||||
|
|
||||||
// Verify expected update
|
// Verify expected update
|
||||||
ps.VerifyPdbStatus(t, pdbName, false, 2, 3, 3)
|
ps.VerifyPdbStatus(t, pdbName, false, 3, 3, 4)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create a pod with no controller, and verify that a PDB with a percentage
|
// Create a pod with no controller, and verify that a PDB with a percentage
|
||||||
@ -366,8 +366,8 @@ func TestReplicationController(t *testing.T) {
|
|||||||
|
|
||||||
dc, ps := newFakeDisruptionController()
|
dc, ps := newFakeDisruptionController()
|
||||||
|
|
||||||
// 67% should round up to 3
|
// 34% should round up to 2
|
||||||
pdb, pdbName := newPodDisruptionBudget(t, intstr.FromString("67%"))
|
pdb, pdbName := newPodDisruptionBudget(t, intstr.FromString("34%"))
|
||||||
add(t, dc.pdbLister.Store, pdb)
|
add(t, dc.pdbLister.Store, pdb)
|
||||||
rc, _ := newReplicationController(t, 3)
|
rc, _ := newReplicationController(t, 3)
|
||||||
rc.Spec.Selector = labels
|
rc.Spec.Selector = labels
|
||||||
@ -386,9 +386,9 @@ func TestReplicationController(t *testing.T) {
|
|||||||
add(t, dc.podLister.Indexer, pod)
|
add(t, dc.podLister.Indexer, pod)
|
||||||
dc.sync(pdbName)
|
dc.sync(pdbName)
|
||||||
if i < 2 {
|
if i < 2 {
|
||||||
ps.VerifyPdbStatus(t, pdbName, false, i+1, 3, 3)
|
ps.VerifyPdbStatus(t, pdbName, false, i+1, 2, 3)
|
||||||
} else {
|
} else {
|
||||||
ps.VerifyPdbStatus(t, pdbName, true, 3, 3, 3)
|
ps.VerifyPdbStatus(t, pdbName, true, 3, 2, 3)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -411,9 +411,18 @@ func TestTwoControllers(t *testing.T) {
|
|||||||
}
|
}
|
||||||
dc, ps := newFakeDisruptionController()
|
dc, ps := newFakeDisruptionController()
|
||||||
|
|
||||||
|
// These constants are related, but I avoid calculating the correct values in
|
||||||
|
// code. If you update a parameter here, recalculate the correct values for
|
||||||
|
// all of them. Further down in the test, we use these to control loops, and
|
||||||
|
// that level of logic is enough complexity for me.
|
||||||
|
const collectionSize int32 = 11 // How big each collection is
|
||||||
|
const minAvailable string = "28%" // minAvailable we'll specify
|
||||||
|
const minimumOne int32 = 4 // integer minimum with one controller
|
||||||
|
const minimumTwo int32 = 7 // integer minimum with two controllers
|
||||||
|
|
||||||
pdb, pdbName := newPodDisruptionBudget(t, intstr.FromString("28%"))
|
pdb, pdbName := newPodDisruptionBudget(t, intstr.FromString("28%"))
|
||||||
add(t, dc.pdbLister.Store, pdb)
|
add(t, dc.pdbLister.Store, pdb)
|
||||||
rc, _ := newReplicationController(t, 11)
|
rc, _ := newReplicationController(t, collectionSize)
|
||||||
rc.Spec.Selector = rcLabels
|
rc.Spec.Selector = rcLabels
|
||||||
add(t, dc.rcLister.Indexer, rc)
|
add(t, dc.rcLister.Indexer, rc)
|
||||||
dc.sync(pdbName)
|
dc.sync(pdbName)
|
||||||
@ -422,71 +431,74 @@ func TestTwoControllers(t *testing.T) {
|
|||||||
|
|
||||||
pods := []*api.Pod{}
|
pods := []*api.Pod{}
|
||||||
|
|
||||||
for i := int32(0); i < 11; i++ {
|
unavailablePods := collectionSize - minimumOne - 1
|
||||||
|
for i := int32(1); i <= collectionSize; i++ {
|
||||||
pod, _ := newPod(t, fmt.Sprintf("quux %d", i))
|
pod, _ := newPod(t, fmt.Sprintf("quux %d", i))
|
||||||
pods = append(pods, pod)
|
pods = append(pods, pod)
|
||||||
pod.Labels = rcLabels
|
pod.Labels = rcLabels
|
||||||
if i < 7 {
|
if i <= unavailablePods {
|
||||||
pod.Status.Conditions = []api.PodCondition{}
|
pod.Status.Conditions = []api.PodCondition{}
|
||||||
}
|
}
|
||||||
add(t, dc.podLister.Indexer, pod)
|
add(t, dc.podLister.Indexer, pod)
|
||||||
dc.sync(pdbName)
|
dc.sync(pdbName)
|
||||||
if i < 7 {
|
if i <= unavailablePods {
|
||||||
ps.VerifyPdbStatus(t, pdbName, false, 0, 4, 11)
|
ps.VerifyPdbStatus(t, pdbName, false, 0, minimumOne, collectionSize)
|
||||||
} else if i < 10 {
|
} else if i-unavailablePods <= minimumOne {
|
||||||
ps.VerifyPdbStatus(t, pdbName, false, i-6, 4, 11)
|
ps.VerifyPdbStatus(t, pdbName, false, i-unavailablePods, minimumOne, collectionSize)
|
||||||
} else {
|
} else {
|
||||||
ps.VerifyPdbStatus(t, pdbName, true, 4, 4, 11)
|
ps.VerifyPdbStatus(t, pdbName, true, i-unavailablePods, minimumOne, collectionSize)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
d, _ := newDeployment(t, 11)
|
d, _ := newDeployment(t, collectionSize)
|
||||||
d.Spec.Selector = newSel(dLabels)
|
d.Spec.Selector = newSel(dLabels)
|
||||||
add(t, dc.dLister.Indexer, d)
|
add(t, dc.dLister.Indexer, d)
|
||||||
dc.sync(pdbName)
|
dc.sync(pdbName)
|
||||||
ps.VerifyPdbStatus(t, pdbName, true, 4, 4, 11)
|
ps.VerifyPdbStatus(t, pdbName, true, minimumOne+1, minimumOne, collectionSize)
|
||||||
|
|
||||||
rs, _ := newReplicaSet(t, 11)
|
rs, _ := newReplicaSet(t, collectionSize)
|
||||||
rs.Spec.Selector = newSel(dLabels)
|
rs.Spec.Selector = newSel(dLabels)
|
||||||
rs.Labels = dLabels
|
rs.Labels = dLabels
|
||||||
add(t, dc.rsLister.Store, rs)
|
add(t, dc.rsLister.Store, rs)
|
||||||
dc.sync(pdbName)
|
dc.sync(pdbName)
|
||||||
ps.VerifyPdbStatus(t, pdbName, true, 4, 4, 11)
|
ps.VerifyPdbStatus(t, pdbName, true, minimumOne+1, minimumOne, collectionSize)
|
||||||
|
|
||||||
for i := int32(0); i < 11; i++ {
|
// By the end of this loop, the number of ready pods should be N+2 (hence minimumTwo+2).
|
||||||
|
unavailablePods = 2*collectionSize - (minimumTwo + 2) - unavailablePods
|
||||||
|
for i := int32(1); i <= collectionSize; i++ {
|
||||||
pod, _ := newPod(t, fmt.Sprintf("quuux %d", i))
|
pod, _ := newPod(t, fmt.Sprintf("quuux %d", i))
|
||||||
pods = append(pods, pod)
|
pods = append(pods, pod)
|
||||||
pod.Labels = dLabels
|
pod.Labels = dLabels
|
||||||
if i < 7 {
|
if i <= unavailablePods {
|
||||||
pod.Status.Conditions = []api.PodCondition{}
|
pod.Status.Conditions = []api.PodCondition{}
|
||||||
}
|
}
|
||||||
add(t, dc.podLister.Indexer, pod)
|
add(t, dc.podLister.Indexer, pod)
|
||||||
dc.sync(pdbName)
|
dc.sync(pdbName)
|
||||||
if i < 7 {
|
if i <= unavailablePods {
|
||||||
ps.VerifyPdbStatus(t, pdbName, false, 4, 7, 22)
|
ps.VerifyPdbStatus(t, pdbName, false, minimumOne+1, minimumTwo, 2*collectionSize)
|
||||||
} else if i < 9 {
|
} else if i-unavailablePods <= minimumTwo-(minimumOne+1) {
|
||||||
ps.VerifyPdbStatus(t, pdbName, false, 4+i-6, 7, 22)
|
ps.VerifyPdbStatus(t, pdbName, false, (minimumOne+1)+(i-unavailablePods), minimumTwo, 2*collectionSize)
|
||||||
} else {
|
} else {
|
||||||
ps.VerifyPdbStatus(t, pdbName, true, 4+i-6, 7, 22)
|
ps.VerifyPdbStatus(t, pdbName, true, (minimumOne+1)+(i-unavailablePods), minimumTwo, 2*collectionSize)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Now we verify we can bring down 1 pod and a disruption is still permitted,
|
// Now we verify we can bring down 1 pod and a disruption is still permitted,
|
||||||
// but if we bring down two, it's not. Then we make the pod ready again and
|
// but if we bring down two, it's not. Then we make the pod ready again and
|
||||||
// verify that a disruption is permitted again.
|
// verify that a disruption is permitted again.
|
||||||
ps.VerifyPdbStatus(t, pdbName, true, 8, 7, 22)
|
ps.VerifyPdbStatus(t, pdbName, true, 2+minimumTwo, minimumTwo, 2*collectionSize)
|
||||||
pods[10].Status.Conditions = []api.PodCondition{}
|
pods[collectionSize-1].Status.Conditions = []api.PodCondition{}
|
||||||
update(t, dc.podLister.Indexer, pods[10])
|
update(t, dc.podLister.Indexer, pods[collectionSize-1])
|
||||||
dc.sync(pdbName)
|
dc.sync(pdbName)
|
||||||
ps.VerifyPdbStatus(t, pdbName, true, 7, 7, 22)
|
ps.VerifyPdbStatus(t, pdbName, true, 1+minimumTwo, minimumTwo, 2*collectionSize)
|
||||||
|
|
||||||
pods[9].Status.Conditions = []api.PodCondition{}
|
pods[collectionSize-2].Status.Conditions = []api.PodCondition{}
|
||||||
update(t, dc.podLister.Indexer, pods[9])
|
update(t, dc.podLister.Indexer, pods[collectionSize-2])
|
||||||
dc.sync(pdbName)
|
dc.sync(pdbName)
|
||||||
ps.VerifyPdbStatus(t, pdbName, false, 6, 7, 22)
|
ps.VerifyPdbStatus(t, pdbName, false, minimumTwo, minimumTwo, 2*collectionSize)
|
||||||
|
|
||||||
pods[10].Status.Conditions = []api.PodCondition{{Type: api.PodReady, Status: api.ConditionTrue}}
|
pods[collectionSize-1].Status.Conditions = []api.PodCondition{{Type: api.PodReady, Status: api.ConditionTrue}}
|
||||||
update(t, dc.podLister.Indexer, pods[10])
|
update(t, dc.podLister.Indexer, pods[collectionSize-1])
|
||||||
dc.sync(pdbName)
|
dc.sync(pdbName)
|
||||||
ps.VerifyPdbStatus(t, pdbName, true, 7, 7, 22)
|
ps.VerifyPdbStatus(t, pdbName, true, 1+minimumTwo, minimumTwo, 2*collectionSize)
|
||||||
}
|
}
|
||||||
|
@ -61,7 +61,7 @@ var _ = framework.KubeDescribe("DisruptionController", func() {
|
|||||||
It("should update PodDisruptionBudget status", func() {
|
It("should update PodDisruptionBudget status", func() {
|
||||||
createPodDisruptionBudgetOrDie(cs, ns, intstr.FromInt(2))
|
createPodDisruptionBudgetOrDie(cs, ns, intstr.FromInt(2))
|
||||||
|
|
||||||
createPodsOrDie(cs, ns, 2)
|
createPodsOrDie(cs, ns, 3)
|
||||||
|
|
||||||
// Since disruptionAllowed starts out false, if we see it ever become true,
|
// Since disruptionAllowed starts out false, if we see it ever become true,
|
||||||
// that means the controller is working.
|
// that means the controller is working.
|
||||||
@ -92,22 +92,22 @@ var _ = framework.KubeDescribe("DisruptionController", func() {
|
|||||||
}, {
|
}, {
|
||||||
description: "too few pods, absolute",
|
description: "too few pods, absolute",
|
||||||
minAvailable: intstr.FromInt(2),
|
minAvailable: intstr.FromInt(2),
|
||||||
podCount: 1,
|
podCount: 2,
|
||||||
shouldDeny: true,
|
shouldDeny: true,
|
||||||
}, {
|
}, {
|
||||||
description: "enough pods, absolute",
|
description: "enough pods, absolute",
|
||||||
minAvailable: intstr.FromInt(2),
|
minAvailable: intstr.FromInt(2),
|
||||||
podCount: 2,
|
podCount: 3,
|
||||||
shouldDeny: false,
|
shouldDeny: false,
|
||||||
}, {
|
}, {
|
||||||
description: "enough pods, replicaSet, percentage",
|
description: "enough pods, replicaSet, percentage",
|
||||||
minAvailable: intstr.FromString("100%"),
|
minAvailable: intstr.FromString("90%"),
|
||||||
replicaSetSize: 10,
|
replicaSetSize: 10,
|
||||||
exclusive: false,
|
exclusive: false,
|
||||||
shouldDeny: false,
|
shouldDeny: false,
|
||||||
}, {
|
}, {
|
||||||
description: "too few pods, replicaSet, percentage",
|
description: "too few pods, replicaSet, percentage",
|
||||||
minAvailable: intstr.FromString("100%"),
|
minAvailable: intstr.FromString("90%"),
|
||||||
replicaSetSize: 10,
|
replicaSetSize: 10,
|
||||||
exclusive: true,
|
exclusive: true,
|
||||||
shouldDeny: true,
|
shouldDeny: true,
|
||||||
|
Loading…
Reference in New Issue
Block a user