mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-23 11:50:44 +00:00
Merge pull request #36196 from mwielgus/disruptedpods
Automatic merge from submit-queue Add DisruptedPod list to PodDisruptionBudgetStatus To ensure that PodDisruptionBudget is race free a list of pods that were planned to be disrupted needs to be added to the status. ApiServer when evicting a pod will add it to this list. Disruption controller will skip pods from that list when calculating the number of healthy pods. The pods from the list are removed either when they are gone or when they were not actually disrupted.
This commit is contained in:
commit
a10975d05d
@ -33879,7 +33879,8 @@
|
||||
"disruptionsAllowed",
|
||||
"currentHealthy",
|
||||
"desiredHealthy",
|
||||
"expectedPods"
|
||||
"expectedPods",
|
||||
"disruptedPods"
|
||||
],
|
||||
"properties": {
|
||||
"currentHealthy": {
|
||||
@ -33892,6 +33893,13 @@
|
||||
"type": "integer",
|
||||
"format": "int32"
|
||||
},
|
||||
"disruptedPods": {
|
||||
"description": "DisruptedPods contains information about pods whose eviction was processed by the API server eviction subresource handler but has not yet been observed by the PodDisruptionBudget controller. A pod will be in this map from the time when the API server processed the eviction request to the time when the pod is seen by PDB controller as having been marked for deletion (or after a timeout). The key in the map is the name of the pod and the value is the time when the API server processed the eviction request. If the deletion didn't occur and a pod is still there it will be removed from the list automatically by PodDisruptionBudget controller after some time. If everything goes smooth this map should be empty for the most of the time. Large number of entries in the map may indicate problems with pod deletions.",
|
||||
"type": "object",
|
||||
"additionalProperties": {
|
||||
"$ref": "#/definitions/unversioned.Time"
|
||||
}
|
||||
},
|
||||
"disruptionsAllowed": {
|
||||
"description": "Number of pod disruptions that are currently allowed.",
|
||||
"type": "integer",
|
||||
|
@ -1239,7 +1239,8 @@
|
||||
"disruptionsAllowed",
|
||||
"currentHealthy",
|
||||
"desiredHealthy",
|
||||
"expectedPods"
|
||||
"expectedPods",
|
||||
"disruptedPods"
|
||||
],
|
||||
"properties": {
|
||||
"disruptionsAllowed": {
|
||||
@ -1261,6 +1262,10 @@
|
||||
"type": "integer",
|
||||
"format": "int32",
|
||||
"description": "total number of pods counted by this disruption budget"
|
||||
},
|
||||
"disruptedPods": {
|
||||
"type": "object",
|
||||
"description": "DisruptedPods contains information about pods whose eviction was processed by the API server eviction subresource handler but has not yet been observed by the PodDisruptionBudget controller. A pod will be in this map from the time when the API server processed the eviction request to the time when the pod is seen by PDB controller as having been marked for deletion (or after a timeout). The key in the map is the name of the pod and the value is the time when the API server processed the eviction request. If the deletion didn't occur and a pod is still there it will be removed from the list automatically by PodDisruptionBudget controller after some time. If everything goes smooth this map should be empty for the most of the time. Large number of entries in the map may indicate problems with pod deletions."
|
||||
}
|
||||
}
|
||||
},
|
||||
|
@ -841,6 +841,13 @@ span.icon > [class^="icon-"], span.icon > [class*=" icon-"] { cursor: default; }
|
||||
<td class="tableblock halign-left valign-top"><p class="tableblock">integer (int32)</p></td>
|
||||
<td class="tableblock halign-left valign-top"></td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td class="tableblock halign-left valign-top"><p class="tableblock">disruptedPods</p></td>
|
||||
<td class="tableblock halign-left valign-top"><p class="tableblock">DisruptedPods contains information about pods whose eviction was processed by the API server eviction subresource handler but has not yet been observed by the PodDisruptionBudget controller. A pod will be in this map from the time when the API server processed the eviction request to the time when the pod is seen by PDB controller as having been marked for deletion (or after a timeout). The key in the map is the name of the pod and the value is the time when the API server processed the eviction request. If the deletion didn’t occur and a pod is still there it will be removed from the list automatically by PodDisruptionBudget controller after some time. If everything goes smooth this map should be empty for the most of the time. Large number of entries in the map may indicate problems with pod deletions.</p></td>
|
||||
<td class="tableblock halign-left valign-top"><p class="tableblock">true</p></td>
|
||||
<td class="tableblock halign-left valign-top"><p class="tableblock">object</p></td>
|
||||
<td class="tableblock halign-left valign-top"></td>
|
||||
</tr>
|
||||
</tbody>
|
||||
</table>
|
||||
|
||||
@ -1388,7 +1395,7 @@ Examples:<br>
|
||||
</div>
|
||||
<div id="footer">
|
||||
<div id="footer-text">
|
||||
Last updated 2016-11-04 22:52:42 UTC
|
||||
Last updated 2016-11-06 12:57:24 UTC
|
||||
</div>
|
||||
</div>
|
||||
</body>
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -51,6 +51,19 @@ type PodDisruptionBudgetStatus struct {
|
||||
|
||||
// total number of pods counted by this disruption budget
|
||||
ExpectedPods int32 `json:"expectedPods"`
|
||||
|
||||
// DisruptedPods contains information about pods whose eviction was
|
||||
// processed by the API server eviction subresource handler but has not
|
||||
// yet been observed by the PodDisruptionBudget controller.
|
||||
// A pod will be in this map from the time when the API server processed the
|
||||
// eviction request to the time when the pod is seen by PDB controller
|
||||
// as having been marked for deletion (or after a timeout). The key in the map is the name of the pod
|
||||
// and the value is the time when the API server processed the eviction request. If
|
||||
// the deletion didn't occur and a pod is still there it will be removed from
|
||||
// the list automatically by PodDisruptionBudget controller after some time.
|
||||
// If everything goes smooth this map should be empty for the most of the time.
|
||||
// Large number of entries in the map may indicate problems with pod deletions.
|
||||
DisruptedPods map[string]unversioned.Time `json:"disruptedPods" protobuf:"bytes,5,rep,name=disruptedPods"`
|
||||
}
|
||||
|
||||
// +genclient=true
|
||||
|
@ -34,6 +34,7 @@ go_library(
|
||||
"//pkg/util/intstr:go_default_library",
|
||||
"//pkg/watch/versioned:go_default_library",
|
||||
"//vendor:github.com/gogo/protobuf/proto",
|
||||
"//vendor:github.com/gogo/protobuf/sortkeys",
|
||||
"//vendor:github.com/ugorji/go/codec",
|
||||
],
|
||||
)
|
||||
|
@ -42,6 +42,7 @@ import k8s_io_kubernetes_pkg_api_v1 "k8s.io/kubernetes/pkg/api/v1"
|
||||
|
||||
import strings "strings"
|
||||
import reflect "reflect"
|
||||
import github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys"
|
||||
|
||||
import io "io"
|
||||
|
||||
@ -262,6 +263,28 @@ func (m *PodDisruptionBudgetStatus) MarshalTo(data []byte) (int, error) {
|
||||
data[i] = 0x20
|
||||
i++
|
||||
i = encodeVarintGenerated(data, i, uint64(m.ExpectedPods))
|
||||
if len(m.DisruptedPods) > 0 {
|
||||
for k := range m.DisruptedPods {
|
||||
data[i] = 0x2a
|
||||
i++
|
||||
v := m.DisruptedPods[k]
|
||||
msgSize := (&v).Size()
|
||||
mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + msgSize + sovGenerated(uint64(msgSize))
|
||||
i = encodeVarintGenerated(data, i, uint64(mapSize))
|
||||
data[i] = 0xa
|
||||
i++
|
||||
i = encodeVarintGenerated(data, i, uint64(len(k)))
|
||||
i += copy(data[i:], k)
|
||||
data[i] = 0x12
|
||||
i++
|
||||
i = encodeVarintGenerated(data, i, uint64((&v).Size()))
|
||||
n9, err := (&v).MarshalTo(data[i:])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
i += n9
|
||||
}
|
||||
}
|
||||
return i, nil
|
||||
}
|
||||
|
||||
@ -349,6 +372,15 @@ func (m *PodDisruptionBudgetStatus) Size() (n int) {
|
||||
n += 1 + sovGenerated(uint64(m.CurrentHealthy))
|
||||
n += 1 + sovGenerated(uint64(m.DesiredHealthy))
|
||||
n += 1 + sovGenerated(uint64(m.ExpectedPods))
|
||||
if len(m.DisruptedPods) > 0 {
|
||||
for k, v := range m.DisruptedPods {
|
||||
_ = k
|
||||
_ = v
|
||||
l = v.Size()
|
||||
mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l))
|
||||
n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize))
|
||||
}
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
@ -414,11 +446,22 @@ func (this *PodDisruptionBudgetStatus) String() string {
|
||||
if this == nil {
|
||||
return "nil"
|
||||
}
|
||||
keysForDisruptedPods := make([]string, 0, len(this.DisruptedPods))
|
||||
for k := range this.DisruptedPods {
|
||||
keysForDisruptedPods = append(keysForDisruptedPods, k)
|
||||
}
|
||||
github_com_gogo_protobuf_sortkeys.Strings(keysForDisruptedPods)
|
||||
mapStringForDisruptedPods := "map[string]k8s_io_kubernetes_pkg_api_unversioned.Time{"
|
||||
for _, k := range keysForDisruptedPods {
|
||||
mapStringForDisruptedPods += fmt.Sprintf("%v: %v,", k, this.DisruptedPods[k])
|
||||
}
|
||||
mapStringForDisruptedPods += "}"
|
||||
s := strings.Join([]string{`&PodDisruptionBudgetStatus{`,
|
||||
`PodDisruptionsAllowed:` + fmt.Sprintf("%v", this.PodDisruptionsAllowed) + `,`,
|
||||
`CurrentHealthy:` + fmt.Sprintf("%v", this.CurrentHealthy) + `,`,
|
||||
`DesiredHealthy:` + fmt.Sprintf("%v", this.DesiredHealthy) + `,`,
|
||||
`ExpectedPods:` + fmt.Sprintf("%v", this.ExpectedPods) + `,`,
|
||||
`DisruptedPods:` + mapStringForDisruptedPods + `,`,
|
||||
`}`,
|
||||
}, "")
|
||||
return s
|
||||
@ -1013,6 +1056,122 @@ func (m *PodDisruptionBudgetStatus) Unmarshal(data []byte) error {
|
||||
break
|
||||
}
|
||||
}
|
||||
case 5:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field DisruptedPods", wireType)
|
||||
}
|
||||
var msglen int
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowGenerated
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := data[iNdEx]
|
||||
iNdEx++
|
||||
msglen |= (int(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
if msglen < 0 {
|
||||
return ErrInvalidLengthGenerated
|
||||
}
|
||||
postIndex := iNdEx + msglen
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
var keykey uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowGenerated
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := data[iNdEx]
|
||||
iNdEx++
|
||||
keykey |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
var stringLenmapkey uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowGenerated
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := data[iNdEx]
|
||||
iNdEx++
|
||||
stringLenmapkey |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
intStringLenmapkey := int(stringLenmapkey)
|
||||
if intStringLenmapkey < 0 {
|
||||
return ErrInvalidLengthGenerated
|
||||
}
|
||||
postStringIndexmapkey := iNdEx + intStringLenmapkey
|
||||
if postStringIndexmapkey > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
mapkey := string(data[iNdEx:postStringIndexmapkey])
|
||||
iNdEx = postStringIndexmapkey
|
||||
var valuekey uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowGenerated
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := data[iNdEx]
|
||||
iNdEx++
|
||||
valuekey |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
var mapmsglen int
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowGenerated
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := data[iNdEx]
|
||||
iNdEx++
|
||||
mapmsglen |= (int(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
if mapmsglen < 0 {
|
||||
return ErrInvalidLengthGenerated
|
||||
}
|
||||
postmsgIndex := iNdEx + mapmsglen
|
||||
if mapmsglen < 0 {
|
||||
return ErrInvalidLengthGenerated
|
||||
}
|
||||
if postmsgIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
mapvalue := &k8s_io_kubernetes_pkg_api_unversioned.Time{}
|
||||
if err := mapvalue.Unmarshal(data[iNdEx:postmsgIndex]); err != nil {
|
||||
return err
|
||||
}
|
||||
iNdEx = postmsgIndex
|
||||
if m.DisruptedPods == nil {
|
||||
m.DisruptedPods = make(map[string]k8s_io_kubernetes_pkg_api_unversioned.Time)
|
||||
}
|
||||
m.DisruptedPods[mapkey] = *mapvalue
|
||||
iNdEx = postIndex
|
||||
default:
|
||||
iNdEx = preIndex
|
||||
skippy, err := skipGenerated(data[iNdEx:])
|
||||
@ -1140,46 +1299,51 @@ var (
|
||||
)
|
||||
|
||||
var fileDescriptorGenerated = []byte{
|
||||
// 655 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xb4, 0x93, 0x4d, 0x4f, 0xdb, 0x4c,
|
||||
0x10, 0xc7, 0x63, 0x20, 0x3c, 0xd1, 0x12, 0xd0, 0x53, 0xf7, 0x2d, 0x8d, 0x54, 0x53, 0xe5, 0x04,
|
||||
0x6a, 0x59, 0x2b, 0xa8, 0x95, 0xe8, 0x05, 0x09, 0x37, 0x48, 0x45, 0x2a, 0x02, 0x99, 0x4b, 0x55,
|
||||
0xa9, 0x95, 0xfc, 0x32, 0x35, 0xdb, 0x38, 0x5e, 0x6b, 0x77, 0x9c, 0x96, 0x5b, 0x3f, 0x42, 0x3f,
|
||||
0x44, 0xbf, 0x49, 0x2f, 0xa8, 0x27, 0x8e, 0xbd, 0x14, 0x95, 0xf0, 0x45, 0x2a, 0x6f, 0x36, 0x21,
|
||||
0xce, 0x0b, 0x42, 0x42, 0xbd, 0x79, 0x76, 0xe7, 0xf7, 0xff, 0xcf, 0x8c, 0x67, 0xc9, 0xcb, 0xf6,
|
||||
0x96, 0xa4, 0x8c, 0xdb, 0xed, 0xcc, 0x07, 0x91, 0x00, 0x82, 0xb4, 0xd3, 0x76, 0x64, 0x7b, 0x29,
|
||||
0x93, 0x76, 0xca, 0x63, 0x16, 0x9c, 0xd8, 0xdd, 0xa6, 0x0f, 0xe8, 0x35, 0xed, 0x08, 0x12, 0x10,
|
||||
0x1e, 0x42, 0x48, 0x53, 0xc1, 0x91, 0x9b, 0xeb, 0x7d, 0x94, 0x5e, 0xa1, 0x34, 0x6d, 0x47, 0x34,
|
||||
0x47, 0x69, 0x1f, 0xa5, 0x1a, 0xad, 0x6f, 0x44, 0x0c, 0x8f, 0x33, 0x9f, 0x06, 0xbc, 0x63, 0x47,
|
||||
0x3c, 0xe2, 0xb6, 0x52, 0xf0, 0xb3, 0x8f, 0x2a, 0x52, 0x81, 0xfa, 0xea, 0x2b, 0xd7, 0x37, 0x67,
|
||||
0x16, 0x65, 0x0b, 0x90, 0x3c, 0x13, 0x01, 0x8c, 0x57, 0x53, 0x7f, 0x31, 0x9b, 0xc9, 0x92, 0x2e,
|
||||
0x08, 0xc9, 0x78, 0x02, 0xe1, 0x04, 0xf6, 0x6c, 0x36, 0xd6, 0x9d, 0x68, 0xb9, 0xbe, 0x31, 0x3d,
|
||||
0x5b, 0x64, 0x09, 0xb2, 0xce, 0x64, 0x4d, 0xcd, 0xe9, 0xe9, 0x19, 0xb2, 0xd8, 0x66, 0x09, 0x4a,
|
||||
0x14, 0xe3, 0x48, 0xe3, 0xa7, 0x41, 0x2a, 0xbb, 0x5d, 0x16, 0x20, 0xe3, 0x89, 0xf9, 0x96, 0x54,
|
||||
0x3a, 0x80, 0x5e, 0xe8, 0xa1, 0x57, 0x33, 0x9e, 0x18, 0x6b, 0x4b, 0x9b, 0x6b, 0x74, 0xe6, 0xd0,
|
||||
0x69, 0xb7, 0x49, 0x0f, 0xfc, 0x4f, 0x10, 0xe0, 0x3e, 0xa0, 0xe7, 0x98, 0xa7, 0xe7, 0xab, 0xa5,
|
||||
0xde, 0xf9, 0x2a, 0xb9, 0x3a, 0x73, 0x87, 0x6a, 0x66, 0x48, 0x96, 0x43, 0x88, 0x01, 0xe1, 0x20,
|
||||
0xcd, 0x9d, 0x64, 0x6d, 0x4e, 0xc9, 0x3f, 0xbd, 0x5e, 0xbe, 0x35, 0x8a, 0x38, 0x77, 0x7a, 0xe7,
|
||||
0xab, 0xcb, 0x85, 0x23, 0xb7, 0x28, 0xda, 0xf8, 0x31, 0x47, 0xee, 0x1e, 0xf2, 0xb0, 0xc5, 0xa4,
|
||||
0xc8, 0xd4, 0x91, 0x93, 0x85, 0x11, 0xe0, 0x3f, 0xed, 0x6b, 0x41, 0xa6, 0x10, 0xe8, 0x76, 0x1c,
|
||||
0x7a, 0xe3, 0x15, 0xa5, 0x53, 0xea, 0x3c, 0x4a, 0x21, 0x70, 0xaa, 0xda, 0x6f, 0x21, 0x8f, 0x5c,
|
||||
0xa5, 0x6e, 0xc6, 0x64, 0x51, 0xa2, 0x87, 0x99, 0xac, 0xcd, 0x2b, 0x9f, 0xd6, 0x2d, 0x7d, 0x94,
|
||||
0x96, 0xb3, 0xa2, 0x9d, 0x16, 0xfb, 0xb1, 0xab, 0x3d, 0x1a, 0xbf, 0x0d, 0xf2, 0x70, 0x0a, 0xf5,
|
||||
0x86, 0x49, 0x34, 0xdf, 0x4f, 0x4c, 0xd2, 0xbe, 0x66, 0x92, 0x23, 0x0f, 0x81, 0xe6, 0xb8, 0x1a,
|
||||
0xe8, 0xff, 0xda, 0xb6, 0x32, 0x38, 0x19, 0x19, 0x67, 0x40, 0xca, 0x0c, 0xa1, 0x93, 0xaf, 0xc7,
|
||||
0xfc, 0xda, 0xd2, 0xe6, 0xf6, 0xed, 0xfa, 0x74, 0x96, 0xb5, 0x55, 0x79, 0x2f, 0x17, 0x75, 0xfb,
|
||||
0xda, 0x8d, 0xcb, 0xe9, 0xfd, 0xe5, 0xf3, 0x36, 0x8f, 0x49, 0xb5, 0xc3, 0x92, 0x9d, 0xae, 0xc7,
|
||||
0x62, 0xcf, 0x8f, 0x41, 0xf7, 0x48, 0x67, 0xd4, 0x91, 0x3f, 0x2c, 0xda, 0x7f, 0x58, 0x74, 0x2f,
|
||||
0xc1, 0x03, 0x71, 0x84, 0x82, 0x25, 0x91, 0x73, 0x4f, 0xfb, 0x56, 0xf7, 0x47, 0xb4, 0xdc, 0x82,
|
||||
0xb2, 0xf9, 0x81, 0x54, 0x24, 0xc4, 0x10, 0x20, 0x17, 0x7a, 0x7b, 0x9e, 0xdf, 0x74, 0x92, 0x9e,
|
||||
0x0f, 0xf1, 0x91, 0x66, 0x9d, 0x6a, 0x3e, 0xca, 0x41, 0xe4, 0x0e, 0x35, 0x1b, 0xdf, 0xe7, 0xc8,
|
||||
0xa3, 0x99, 0xff, 0xde, 0xdc, 0x27, 0x66, 0x38, 0xbc, 0x91, 0x3b, 0x71, 0xcc, 0x3f, 0x43, 0xa8,
|
||||
0xba, 0x2d, 0x3b, 0x8f, 0x75, 0xf5, 0xf7, 0x0b, 0xf8, 0x20, 0xc9, 0x9d, 0x02, 0x9a, 0xdb, 0x64,
|
||||
0x25, 0xc8, 0x84, 0x80, 0x04, 0x5f, 0x83, 0x17, 0xe3, 0xf1, 0x89, 0x6a, 0xa9, 0xec, 0x3c, 0xd0,
|
||||
0x52, 0x2b, 0xaf, 0x0a, 0xb7, 0xee, 0x58, 0x76, 0xce, 0x87, 0x20, 0x99, 0x80, 0x70, 0xc0, 0xcf,
|
||||
0x17, 0xf9, 0x56, 0xe1, 0xd6, 0x1d, 0xcb, 0x36, 0xb7, 0x48, 0x15, 0xbe, 0xa4, 0x10, 0x20, 0x84,
|
||||
0x87, 0x3c, 0x94, 0xb5, 0x05, 0x45, 0x0f, 0x7f, 0xc3, 0xee, 0xc8, 0x9d, 0x5b, 0xc8, 0x74, 0xd6,
|
||||
0x4f, 0x2f, 0xac, 0xd2, 0xd9, 0x85, 0x55, 0xfa, 0x75, 0x61, 0x95, 0xbe, 0xf6, 0x2c, 0xe3, 0xb4,
|
||||
0x67, 0x19, 0x67, 0x3d, 0xcb, 0xf8, 0xd3, 0xb3, 0x8c, 0x6f, 0x97, 0x56, 0xe9, 0xdd, 0x7f, 0x7a,
|
||||
0xc3, 0xfe, 0x06, 0x00, 0x00, 0xff, 0xff, 0x45, 0xd6, 0xb4, 0xe3, 0xbb, 0x06, 0x00, 0x00,
|
||||
// 728 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xb4, 0x94, 0xcf, 0x6e, 0xd3, 0x4e,
|
||||
0x10, 0xc7, 0xe3, 0x26, 0xe9, 0x2f, 0xbf, 0x6d, 0x52, 0x95, 0x85, 0x42, 0x88, 0x84, 0x8b, 0x72,
|
||||
0x6a, 0x05, 0x5d, 0x2b, 0x15, 0x48, 0x85, 0x43, 0xa5, 0x9a, 0x54, 0xa2, 0x12, 0x55, 0x2b, 0x17,
|
||||
0x09, 0x84, 0x04, 0x92, 0x63, 0x0f, 0xee, 0x12, 0xc7, 0xb6, 0x76, 0xd7, 0x81, 0xdc, 0x78, 0x04,
|
||||
0x0e, 0x3c, 0x05, 0xaf, 0xc1, 0xa5, 0xe2, 0xd4, 0x23, 0x17, 0x2a, 0x9a, 0xbe, 0x08, 0xf2, 0x66,
|
||||
0x93, 0xda, 0xf9, 0x53, 0x55, 0x2a, 0xdc, 0xbc, 0xbb, 0xf3, 0xf9, 0xce, 0x7c, 0x67, 0x67, 0x8d,
|
||||
0x9e, 0xb4, 0x37, 0x39, 0xa1, 0xa1, 0xd1, 0x8e, 0x5b, 0xc0, 0x02, 0x10, 0xc0, 0x8d, 0xa8, 0xed,
|
||||
0x19, 0x76, 0x44, 0xb9, 0x11, 0x85, 0x3e, 0x75, 0x7a, 0x46, 0xb7, 0xd1, 0x02, 0x61, 0x37, 0x0c,
|
||||
0x0f, 0x02, 0x60, 0xb6, 0x00, 0x97, 0x44, 0x2c, 0x14, 0x21, 0x5e, 0x1b, 0xa0, 0xe4, 0x02, 0x25,
|
||||
0x51, 0xdb, 0x23, 0x09, 0x4a, 0x06, 0x28, 0x51, 0x68, 0x6d, 0xdd, 0xa3, 0xe2, 0x28, 0x6e, 0x11,
|
||||
0x27, 0xec, 0x18, 0x5e, 0xe8, 0x85, 0x86, 0x54, 0x68, 0xc5, 0xef, 0xe5, 0x4a, 0x2e, 0xe4, 0xd7,
|
||||
0x40, 0xb9, 0xb6, 0x31, 0xb3, 0x28, 0x83, 0x01, 0x0f, 0x63, 0xe6, 0xc0, 0x78, 0x35, 0xb5, 0xc7,
|
||||
0xb3, 0x99, 0x38, 0xe8, 0x02, 0xe3, 0x34, 0x0c, 0xc0, 0x9d, 0xc0, 0x1e, 0xce, 0xc6, 0xba, 0x13,
|
||||
0x96, 0x6b, 0xeb, 0xd3, 0xa3, 0x59, 0x1c, 0x08, 0xda, 0x99, 0xac, 0xa9, 0x31, 0x3d, 0x3c, 0x16,
|
||||
0xd4, 0x37, 0x68, 0x20, 0xb8, 0x60, 0xe3, 0x48, 0xfd, 0x87, 0x86, 0x4a, 0x3b, 0x5d, 0xea, 0x08,
|
||||
0x1a, 0x06, 0xf8, 0x35, 0x2a, 0x75, 0x40, 0xd8, 0xae, 0x2d, 0xec, 0xaa, 0x76, 0x5f, 0x5b, 0x5d,
|
||||
0xd8, 0x58, 0x25, 0x33, 0x9b, 0x4e, 0xba, 0x0d, 0xb2, 0xdf, 0xfa, 0x00, 0x8e, 0xd8, 0x03, 0x61,
|
||||
0x9b, 0xf8, 0xf8, 0x74, 0x25, 0xd7, 0x3f, 0x5d, 0x41, 0x17, 0x7b, 0xd6, 0x48, 0x0d, 0xbb, 0xa8,
|
||||
0xe2, 0x82, 0x0f, 0x02, 0xf6, 0xa3, 0x24, 0x13, 0xaf, 0xce, 0x49, 0xf9, 0x07, 0x97, 0xcb, 0x37,
|
||||
0xd3, 0x88, 0x79, 0xa3, 0x7f, 0xba, 0x52, 0xc9, 0x6c, 0x59, 0x59, 0xd1, 0xfa, 0xf7, 0x39, 0x74,
|
||||
0xf3, 0x20, 0x74, 0x9b, 0x94, 0xb3, 0x58, 0x6e, 0x99, 0xb1, 0xeb, 0x81, 0xf8, 0xa7, 0xbe, 0x0a,
|
||||
0x3c, 0x02, 0x47, 0xd9, 0x31, 0xc9, 0x95, 0x47, 0x94, 0x4c, 0xa9, 0xf3, 0x30, 0x02, 0xc7, 0x2c,
|
||||
0xab, 0x7c, 0x85, 0x64, 0x65, 0x49, 0x75, 0xec, 0xa3, 0x79, 0x2e, 0x6c, 0x11, 0xf3, 0x6a, 0x5e,
|
||||
0xe6, 0x69, 0x5e, 0x33, 0x8f, 0xd4, 0x32, 0x17, 0x55, 0xa6, 0xf9, 0xc1, 0xda, 0x52, 0x39, 0xea,
|
||||
0xbf, 0x34, 0x74, 0x67, 0x0a, 0xf5, 0x82, 0x72, 0x81, 0xdf, 0x4e, 0x74, 0xd2, 0xb8, 0xa4, 0x93,
|
||||
0xa9, 0x87, 0x40, 0x12, 0x5c, 0x36, 0x74, 0x49, 0xa5, 0x2d, 0x0d, 0x77, 0x52, 0xed, 0x74, 0x50,
|
||||
0x91, 0x0a, 0xe8, 0x24, 0xe3, 0x91, 0x5f, 0x5d, 0xd8, 0xd8, 0xba, 0x9e, 0x4f, 0xb3, 0xa2, 0x52,
|
||||
0x15, 0x77, 0x13, 0x51, 0x6b, 0xa0, 0x5d, 0x3f, 0x9f, 0xee, 0x2f, 0xe9, 0x37, 0x3e, 0x42, 0xe5,
|
||||
0x0e, 0x0d, 0xb6, 0xbb, 0x36, 0xf5, 0xed, 0x96, 0x0f, 0xca, 0x23, 0x99, 0x51, 0x47, 0xf2, 0xb0,
|
||||
0xc8, 0xe0, 0x61, 0x91, 0xdd, 0x40, 0xec, 0xb3, 0x43, 0xc1, 0x68, 0xe0, 0x99, 0xb7, 0x54, 0xde,
|
||||
0xf2, 0x5e, 0x4a, 0xcb, 0xca, 0x28, 0xe3, 0x77, 0xa8, 0xc4, 0xc1, 0x07, 0x47, 0x84, 0x4c, 0x4d,
|
||||
0xcf, 0xa3, 0xab, 0x76, 0xd2, 0x6e, 0x81, 0x7f, 0xa8, 0x58, 0xb3, 0x9c, 0xb4, 0x72, 0xb8, 0xb2,
|
||||
0x46, 0x9a, 0xf5, 0x6f, 0x05, 0x74, 0x77, 0xe6, 0xdd, 0xe3, 0x3d, 0x84, 0xdd, 0xd1, 0x09, 0xdf,
|
||||
0xf6, 0xfd, 0xf0, 0x23, 0xb8, 0xd2, 0x6d, 0xd1, 0xbc, 0xa7, 0xaa, 0x5f, 0xce, 0xe0, 0xc3, 0x20,
|
||||
0x6b, 0x0a, 0x88, 0xb7, 0xd0, 0xa2, 0x13, 0x33, 0x06, 0x81, 0x78, 0x0e, 0xb6, 0x2f, 0x8e, 0x7a,
|
||||
0xd2, 0x52, 0xd1, 0xbc, 0xad, 0xa4, 0x16, 0x9f, 0x65, 0x4e, 0xad, 0xb1, 0xe8, 0x84, 0x77, 0x81,
|
||||
0x53, 0x06, 0xee, 0x90, 0xcf, 0x67, 0xf9, 0x66, 0xe6, 0xd4, 0x1a, 0x8b, 0xc6, 0x9b, 0xa8, 0x0c,
|
||||
0x9f, 0x22, 0x70, 0x04, 0xb8, 0x07, 0xa1, 0xcb, 0xab, 0x05, 0x49, 0x8f, 0xae, 0x61, 0x27, 0x75,
|
||||
0x66, 0x65, 0x22, 0xf1, 0x57, 0x0d, 0x55, 0x94, 0x21, 0xc5, 0x16, 0xe5, 0xe8, 0xbd, 0xfa, 0x1b,
|
||||
0x4f, 0x8c, 0x34, 0xd3, 0xca, 0x3b, 0x81, 0x60, 0x3d, 0x73, 0x59, 0x15, 0x55, 0xc9, 0x9c, 0x59,
|
||||
0xd9, 0x22, 0x6a, 0x1d, 0x84, 0x27, 0x59, 0xbc, 0x84, 0xf2, 0x6d, 0xe8, 0xc9, 0x6b, 0xfa, 0xdf,
|
||||
0x4a, 0x3e, 0xf1, 0x36, 0x2a, 0x76, 0x6d, 0x3f, 0x86, 0x2b, 0xfc, 0x4f, 0xd3, 0x23, 0xf4, 0x92,
|
||||
0x76, 0xc0, 0x1a, 0x90, 0x4f, 0xe7, 0x36, 0x35, 0x73, 0xed, 0xf8, 0x4c, 0xcf, 0x9d, 0x9c, 0xe9,
|
||||
0xb9, 0x9f, 0x67, 0x7a, 0xee, 0x73, 0x5f, 0xd7, 0x8e, 0xfb, 0xba, 0x76, 0xd2, 0xd7, 0xb5, 0xdf,
|
||||
0x7d, 0x5d, 0xfb, 0x72, 0xae, 0xe7, 0xde, 0xfc, 0xa7, 0xcc, 0xfe, 0x09, 0x00, 0x00, 0xff, 0xff,
|
||||
0xd0, 0x3c, 0x91, 0x46, 0xc1, 0x07, 0x00, 0x00,
|
||||
}
|
||||
|
@ -86,5 +86,18 @@ message PodDisruptionBudgetStatus {
|
||||
|
||||
// total number of pods counted by this disruption budget
|
||||
optional int32 expectedPods = 4;
|
||||
|
||||
// DisruptedPods contains information about pods whose eviction was
|
||||
// processed by the API server eviction subresource handler but has not
|
||||
// yet been observed by the PodDisruptionBudget controller.
|
||||
// A pod will be in this map from the time when the API server processed the
|
||||
// eviction request to the time when the pod is seen by PDB controller
|
||||
// as having been marked for deletion (or after a timeout). The key in the map is the name of the pod
|
||||
// and the value is the time when the API server processed the eviction request. If
|
||||
// the deletion didn't occur and a pod is still there it will be removed from
|
||||
// the list automatically by PodDisruptionBudget controller after some time.
|
||||
// If everything goes smooth this map should be empty for the most of the time.
|
||||
// Large number of entries in the map may indicate problems with pod deletions.
|
||||
map<string, k8s.io.kubernetes.pkg.api.unversioned.Time> disruptedPods = 5;
|
||||
}
|
||||
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -49,6 +49,19 @@ type PodDisruptionBudgetStatus struct {
|
||||
|
||||
// total number of pods counted by this disruption budget
|
||||
ExpectedPods int32 `json:"expectedPods" protobuf:"varint,4,opt,name=expectedPods"`
|
||||
|
||||
// DisruptedPods contains information about pods whose eviction was
|
||||
// processed by the API server eviction subresource handler but has not
|
||||
// yet been observed by the PodDisruptionBudget controller.
|
||||
// A pod will be in this map from the time when the API server processed the
|
||||
// eviction request to the time when the pod is seen by PDB controller
|
||||
// as having been marked for deletion (or after a timeout). The key in the map is the name of the pod
|
||||
// and the value is the time when the API server processed the eviction request. If
|
||||
// the deletion didn't occur and a pod is still there it will be removed from
|
||||
// the list automatically by PodDisruptionBudget controller after some time.
|
||||
// If everything goes smooth this map should be empty for the most of the time.
|
||||
// Large number of entries in the map may indicate problems with pod deletions.
|
||||
DisruptedPods map[string]unversioned.Time `json:"disruptedPods" protobuf:"bytes,5,rep,name=disruptedPods"`
|
||||
}
|
||||
|
||||
// +genclient=true
|
||||
|
@ -71,6 +71,7 @@ var map_PodDisruptionBudgetStatus = map[string]string{
|
||||
"currentHealthy": "current number of healthy pods",
|
||||
"desiredHealthy": "minimum desired number of healthy pods",
|
||||
"expectedPods": "total number of pods counted by this disruption budget",
|
||||
"disruptedPods": "DisruptedPods contains information about pods whose eviction was processed by the API server eviction subresource handler but has not yet been observed by the PodDisruptionBudget controller. A pod will be in this map from the time when the API server processed the eviction request to the time when the pod is seen by PDB controller as having been marked for deletion (or after a timeout). The key in the map is the name of the pod and the value is the time when the API server processed the eviction request. If the deletion didn't occur and a pod is still there it will be removed from the list automatically by PodDisruptionBudget controller after some time. If everything goes smooth this map should be empty for the most of the time. Large number of entries in the map may indicate problems with pod deletions.",
|
||||
}
|
||||
|
||||
func (PodDisruptionBudgetStatus) SwaggerDoc() map[string]string {
|
||||
|
@ -158,6 +158,7 @@ func autoConvert_v1beta1_PodDisruptionBudgetStatus_To_policy_PodDisruptionBudget
|
||||
out.CurrentHealthy = in.CurrentHealthy
|
||||
out.DesiredHealthy = in.DesiredHealthy
|
||||
out.ExpectedPods = in.ExpectedPods
|
||||
out.DisruptedPods = *(*map[string]unversioned.Time)(unsafe.Pointer(&in.DisruptedPods))
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -170,6 +171,7 @@ func autoConvert_policy_PodDisruptionBudgetStatus_To_v1beta1_PodDisruptionBudget
|
||||
out.CurrentHealthy = in.CurrentHealthy
|
||||
out.DesiredHealthy = in.DesiredHealthy
|
||||
out.ExpectedPods = in.ExpectedPods
|
||||
out.DisruptedPods = *(*map[string]unversioned.Time)(unsafe.Pointer(&in.DisruptedPods))
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -76,7 +76,9 @@ func DeepCopy_v1beta1_PodDisruptionBudget(in interface{}, out interface{}, c *co
|
||||
if err := DeepCopy_v1beta1_PodDisruptionBudgetSpec(&in.Spec, &out.Spec, c); err != nil {
|
||||
return err
|
||||
}
|
||||
out.Status = in.Status
|
||||
if err := DeepCopy_v1beta1_PodDisruptionBudgetStatus(&in.Status, &out.Status, c); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
@ -128,6 +130,15 @@ func DeepCopy_v1beta1_PodDisruptionBudgetStatus(in interface{}, out interface{},
|
||||
out.CurrentHealthy = in.CurrentHealthy
|
||||
out.DesiredHealthy = in.DesiredHealthy
|
||||
out.ExpectedPods = in.ExpectedPods
|
||||
if in.DisruptedPods != nil {
|
||||
in, out := &in.DisruptedPods, &out.DisruptedPods
|
||||
*out = make(map[string]unversioned.Time)
|
||||
for key, val := range *in {
|
||||
(*out)[key] = val.DeepCopy()
|
||||
}
|
||||
} else {
|
||||
out.DisruptedPods = nil
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
@ -76,7 +76,9 @@ func DeepCopy_policy_PodDisruptionBudget(in interface{}, out interface{}, c *con
|
||||
if err := DeepCopy_policy_PodDisruptionBudgetSpec(&in.Spec, &out.Spec, c); err != nil {
|
||||
return err
|
||||
}
|
||||
out.Status = in.Status
|
||||
if err := DeepCopy_policy_PodDisruptionBudgetStatus(&in.Status, &out.Status, c); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
@ -128,6 +130,15 @@ func DeepCopy_policy_PodDisruptionBudgetStatus(in interface{}, out interface{},
|
||||
out.CurrentHealthy = in.CurrentHealthy
|
||||
out.DesiredHealthy = in.DesiredHealthy
|
||||
out.ExpectedPods = in.ExpectedPods
|
||||
if in.DisruptedPods != nil {
|
||||
in, out := &in.DisruptedPods, &out.DisruptedPods
|
||||
*out = make(map[string]unversioned.Time)
|
||||
for key, val := range *in {
|
||||
(*out)[key] = val.DeepCopy()
|
||||
}
|
||||
} else {
|
||||
out.DisruptedPods = nil
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
@ -52,5 +52,6 @@ go_test(
|
||||
"//pkg/controller:go_default_library",
|
||||
"//pkg/util/intstr:go_default_library",
|
||||
"//pkg/util/uuid:go_default_library",
|
||||
"//pkg/util/workqueue:go_default_library",
|
||||
],
|
||||
)
|
||||
|
@ -18,6 +18,7 @@ package disruption
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"time"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
@ -43,6 +44,17 @@ import (
|
||||
|
||||
const statusUpdateRetries = 2
|
||||
|
||||
// DeletionTimeout sets maximum time from the moment a pod is added to DisruptedPods in PDB.Status
|
||||
// to the time when the pod is expected to be seen by PDB controller as having been marked for deletion.
|
||||
// If the pod was not marked for deletion during that time it is assumed that it won't be deleted at
|
||||
// all and the corresponding entry can be removed from pdb.Status.DisruptedPods. It is assumed that
|
||||
// pod/pdb apiserver to controller latency is relatively small (like 1-2sec) so the below value should
|
||||
// be more than enough.
|
||||
// If the cotroller is running on a different node it is important that the two nodes have synced
|
||||
// clock (via ntp for example). Otherwise PodDisruptionBudget controller may not provide enough
|
||||
// protection against unwanted pod disruptions.
|
||||
const DeletionTimeout = 2 * 60 * time.Second
|
||||
|
||||
type updater func(*policy.PodDisruptionBudget) error
|
||||
|
||||
type DisruptionController struct {
|
||||
@ -68,7 +80,8 @@ type DisruptionController struct {
|
||||
dLister cache.StoreToDeploymentLister
|
||||
|
||||
// PodDisruptionBudget keys that need to be synced.
|
||||
queue workqueue.RateLimitingInterface
|
||||
queue workqueue.RateLimitingInterface
|
||||
recheckQueue workqueue.DelayingInterface
|
||||
|
||||
broadcaster record.EventBroadcaster
|
||||
recorder record.EventRecorder
|
||||
@ -92,6 +105,7 @@ func NewDisruptionController(podInformer cache.SharedIndexInformer, kubeClient i
|
||||
kubeClient: kubeClient,
|
||||
podController: podInformer.GetController(),
|
||||
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "disruption"),
|
||||
recheckQueue: workqueue.NewNamedDelayingQueue("disruption-recheck"),
|
||||
broadcaster: record.NewBroadcaster(),
|
||||
}
|
||||
dc.recorder = dc.broadcaster.NewRecorder(api.EventSource{Component: "controllermanager"})
|
||||
@ -270,6 +284,8 @@ func (dc *DisruptionController) Run(stopCh <-chan struct{}) {
|
||||
go dc.rsController.Run(stopCh)
|
||||
go dc.dController.Run(stopCh)
|
||||
go wait.Until(dc.worker, time.Second, stopCh)
|
||||
go wait.Until(dc.recheckWorker, time.Second, stopCh)
|
||||
|
||||
<-stopCh
|
||||
glog.V(0).Infof("Shutting down disruption controller")
|
||||
}
|
||||
@ -355,6 +371,15 @@ func (dc *DisruptionController) enqueuePdb(pdb *policy.PodDisruptionBudget) {
|
||||
dc.queue.Add(key)
|
||||
}
|
||||
|
||||
func (dc *DisruptionController) enqueuePdbForRecheck(pdb *policy.PodDisruptionBudget, delay time.Duration) {
|
||||
key, err := controller.KeyFunc(pdb)
|
||||
if err != nil {
|
||||
glog.Errorf("Cound't get key for PodDisruptionBudget object %+v: %v", pdb, err)
|
||||
return
|
||||
}
|
||||
dc.recheckQueue.AddAfter(key, delay)
|
||||
}
|
||||
|
||||
func (dc *DisruptionController) getPdbForPod(pod *api.Pod) *policy.PodDisruptionBudget {
|
||||
// GetPodPodDisruptionBudgets returns an error only if no
|
||||
// PodDisruptionBudgets are found. We don't return that as an error to the
|
||||
@ -417,6 +442,21 @@ func (dc *DisruptionController) processNextWorkItem() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (dc *DisruptionController) recheckWorker() {
|
||||
for dc.processNextRecheckWorkItem() {
|
||||
}
|
||||
}
|
||||
|
||||
func (dc *DisruptionController) processNextRecheckWorkItem() bool {
|
||||
dKey, quit := dc.recheckQueue.Get()
|
||||
if quit {
|
||||
return false
|
||||
}
|
||||
defer dc.recheckQueue.Done(dKey)
|
||||
dc.queue.AddRateLimited(dKey)
|
||||
return true
|
||||
}
|
||||
|
||||
func (dc *DisruptionController) sync(key string) error {
|
||||
startTime := time.Now()
|
||||
defer func() {
|
||||
@ -452,9 +492,17 @@ func (dc *DisruptionController) trySync(pdb *policy.PodDisruptionBudget) error {
|
||||
return err
|
||||
}
|
||||
|
||||
currentHealthy := countHealthyPods(pods)
|
||||
err = dc.updatePdbSpec(pdb, currentHealthy, desiredHealthy, expectedCount)
|
||||
currentTime := time.Now()
|
||||
disruptedPods, recheckTime := buildDisruptedPodMap(pods, pdb, currentTime)
|
||||
currentHealthy := countHealthyPods(pods, disruptedPods, currentTime)
|
||||
err = dc.updatePdbStatus(pdb, currentHealthy, desiredHealthy, expectedCount, disruptedPods)
|
||||
|
||||
if err == nil && recheckTime != nil {
|
||||
// There is always at most one PDB waiting with a particular name in the queue,
|
||||
// and each PDB in the queue is associated with the lowest timestamp
|
||||
// that was supplied when a PDB with that name was added.
|
||||
dc.enqueuePdbForRecheck(pdb, recheckTime.Sub(currentTime))
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
@ -527,20 +575,60 @@ func (dc *DisruptionController) getExpectedPodCount(pdb *policy.PodDisruptionBud
|
||||
return
|
||||
}
|
||||
|
||||
func countHealthyPods(pods []*api.Pod) (currentHealthy int32) {
|
||||
func countHealthyPods(pods []*api.Pod, disruptedPods map[string]unversioned.Time, currentTime time.Time) (currentHealthy int32) {
|
||||
Pod:
|
||||
for _, pod := range pods {
|
||||
for _, c := range pod.Status.Conditions {
|
||||
if c.Type == api.PodReady && c.Status == api.ConditionTrue {
|
||||
currentHealthy++
|
||||
continue Pod
|
||||
}
|
||||
// Pod is beeing deleted.
|
||||
if pod.DeletionTimestamp != nil {
|
||||
continue
|
||||
}
|
||||
// Pod is expected to be deleted soon.
|
||||
if disruptionTime, found := disruptedPods[pod.Name]; found && disruptionTime.Time.Add(DeletionTimeout).After(currentTime) {
|
||||
continue
|
||||
}
|
||||
if api.IsPodReady(pod) {
|
||||
currentHealthy++
|
||||
continue Pod
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// Builds new PodDisruption map, possibly removing items that refer to non-existing, already deleted
|
||||
// or not-deleted at all items. Also returns an information when this check should be repeated.
|
||||
func buildDisruptedPodMap(pods []*api.Pod, pdb *policy.PodDisruptionBudget, currentTime time.Time) (map[string]unversioned.Time, *time.Time) {
|
||||
disruptedPods := pdb.Status.DisruptedPods
|
||||
result := make(map[string]unversioned.Time)
|
||||
var recheckTime *time.Time
|
||||
|
||||
if disruptedPods == nil || len(disruptedPods) == 0 {
|
||||
return result, recheckTime
|
||||
}
|
||||
for _, pod := range pods {
|
||||
if pod.DeletionTimestamp != nil {
|
||||
// Already being deleted.
|
||||
continue
|
||||
}
|
||||
disruptionTime, found := disruptedPods[pod.Name]
|
||||
if !found {
|
||||
// Pod not on the list.
|
||||
continue
|
||||
}
|
||||
expectedDeletion := disruptionTime.Time.Add(DeletionTimeout)
|
||||
if expectedDeletion.Before(currentTime) {
|
||||
glog.V(1).Infof("Pod %s/%s was expected to be deleted at %s but it wasn't, updating pdb %s/%s",
|
||||
pod.Namespace, pod.Name, disruptionTime.String(), pdb.Namespace, pdb.Name)
|
||||
} else {
|
||||
if recheckTime == nil || expectedDeletion.Before(*recheckTime) {
|
||||
recheckTime = &expectedDeletion
|
||||
}
|
||||
result[pod.Name] = disruptionTime
|
||||
}
|
||||
}
|
||||
return result, recheckTime
|
||||
}
|
||||
|
||||
// failSafe is an attempt to at least update the PodDisruptionsAllowed field to
|
||||
// 0 if everything else has failed. This is one place we
|
||||
// implement the "fail open" part of the design since if we manage to update
|
||||
@ -557,7 +645,9 @@ func (dc *DisruptionController) failSafe(pdb *policy.PodDisruptionBudget) error
|
||||
return dc.getUpdater()(&newPdb)
|
||||
}
|
||||
|
||||
func (dc *DisruptionController) updatePdbSpec(pdb *policy.PodDisruptionBudget, currentHealthy, desiredHealthy, expectedCount int32) error {
|
||||
func (dc *DisruptionController) updatePdbStatus(pdb *policy.PodDisruptionBudget, currentHealthy, desiredHealthy, expectedCount int32,
|
||||
disruptedPods map[string]unversioned.Time) error {
|
||||
|
||||
// We require expectedCount to be > 0 so that PDBs which currently match no
|
||||
// pods are in a safe state when their first pods appear but this controller
|
||||
// has not updated their status yet. This isn't the only race, but it's a
|
||||
@ -567,7 +657,11 @@ func (dc *DisruptionController) updatePdbSpec(pdb *policy.PodDisruptionBudget, c
|
||||
disruptionsAllowed = 0
|
||||
}
|
||||
|
||||
if pdb.Status.CurrentHealthy == currentHealthy && pdb.Status.DesiredHealthy == desiredHealthy && pdb.Status.ExpectedPods == expectedCount && pdb.Status.PodDisruptionsAllowed == disruptionsAllowed {
|
||||
if pdb.Status.CurrentHealthy == currentHealthy &&
|
||||
pdb.Status.DesiredHealthy == desiredHealthy &&
|
||||
pdb.Status.ExpectedPods == expectedCount &&
|
||||
pdb.Status.PodDisruptionsAllowed == disruptionsAllowed &&
|
||||
reflect.DeepEqual(pdb.Status.DisruptedPods, disruptedPods) {
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -582,6 +676,7 @@ func (dc *DisruptionController) updatePdbSpec(pdb *policy.PodDisruptionBudget, c
|
||||
DesiredHealthy: desiredHealthy,
|
||||
ExpectedPods: expectedCount,
|
||||
PodDisruptionsAllowed: disruptionsAllowed,
|
||||
DisruptedPods: disruptedPods,
|
||||
}
|
||||
|
||||
return dc.getUpdater()(&newPdb)
|
||||
|
@ -21,6 +21,7 @@ import (
|
||||
"reflect"
|
||||
"runtime/debug"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/unversioned"
|
||||
@ -32,6 +33,7 @@ import (
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
"k8s.io/kubernetes/pkg/util/intstr"
|
||||
"k8s.io/kubernetes/pkg/util/uuid"
|
||||
"k8s.io/kubernetes/pkg/util/workqueue"
|
||||
)
|
||||
|
||||
type pdbStates map[string]policy.PodDisruptionBudget
|
||||
@ -54,12 +56,14 @@ func (ps *pdbStates) Get(key string) policy.PodDisruptionBudget {
|
||||
return (*ps)[key]
|
||||
}
|
||||
|
||||
func (ps *pdbStates) VerifyPdbStatus(t *testing.T, key string, disruptionsAllowed, currentHealthy, desiredHealthy, expectedPods int32) {
|
||||
func (ps *pdbStates) VerifyPdbStatus(t *testing.T, key string, disruptionsAllowed, currentHealthy, desiredHealthy, expectedPods int32,
|
||||
disruptedPodMap map[string]unversioned.Time) {
|
||||
expectedStatus := policy.PodDisruptionBudgetStatus{
|
||||
PodDisruptionsAllowed: disruptionsAllowed,
|
||||
CurrentHealthy: currentHealthy,
|
||||
DesiredHealthy: desiredHealthy,
|
||||
ExpectedPods: expectedPods,
|
||||
DisruptedPods: disruptedPodMap,
|
||||
}
|
||||
actualStatus := ps.Get(key).Status
|
||||
if !reflect.DeepEqual(actualStatus, expectedStatus) {
|
||||
@ -251,11 +255,11 @@ func TestNoSelector(t *testing.T) {
|
||||
|
||||
add(t, dc.pdbLister.Store, pdb)
|
||||
dc.sync(pdbName)
|
||||
ps.VerifyPdbStatus(t, pdbName, 0, 0, 3, 0)
|
||||
ps.VerifyPdbStatus(t, pdbName, 0, 0, 3, 0, map[string]unversioned.Time{})
|
||||
|
||||
add(t, dc.podLister.Indexer, pod)
|
||||
dc.sync(pdbName)
|
||||
ps.VerifyPdbStatus(t, pdbName, 0, 0, 3, 0)
|
||||
ps.VerifyPdbStatus(t, pdbName, 0, 0, 3, 0, map[string]unversioned.Time{})
|
||||
}
|
||||
|
||||
// Verify that available/expected counts go up as we add pods, then verify that
|
||||
@ -270,13 +274,13 @@ func TestUnavailable(t *testing.T) {
|
||||
// Add three pods, verifying that the counts go up at each step.
|
||||
pods := []*api.Pod{}
|
||||
for i := int32(0); i < 4; i++ {
|
||||
ps.VerifyPdbStatus(t, pdbName, 0, i, 3, i)
|
||||
ps.VerifyPdbStatus(t, pdbName, 0, i, 3, i, map[string]unversioned.Time{})
|
||||
pod, _ := newPod(t, fmt.Sprintf("yo-yo-yo %d", i))
|
||||
pods = append(pods, pod)
|
||||
add(t, dc.podLister.Indexer, pod)
|
||||
dc.sync(pdbName)
|
||||
}
|
||||
ps.VerifyPdbStatus(t, pdbName, 1, 4, 3, 4)
|
||||
ps.VerifyPdbStatus(t, pdbName, 1, 4, 3, 4, map[string]unversioned.Time{})
|
||||
|
||||
// Now set one pod as unavailable
|
||||
pods[0].Status.Conditions = []api.PodCondition{}
|
||||
@ -284,7 +288,7 @@ func TestUnavailable(t *testing.T) {
|
||||
dc.sync(pdbName)
|
||||
|
||||
// Verify expected update
|
||||
ps.VerifyPdbStatus(t, pdbName, 0, 3, 3, 4)
|
||||
ps.VerifyPdbStatus(t, pdbName, 0, 3, 3, 4, map[string]unversioned.Time{})
|
||||
}
|
||||
|
||||
// Create a pod with no controller, and verify that a PDB with a percentage
|
||||
@ -318,7 +322,7 @@ func TestReplicaSet(t *testing.T) {
|
||||
pod, _ := newPod(t, "pod")
|
||||
add(t, dc.podLister.Indexer, pod)
|
||||
dc.sync(pdbName)
|
||||
ps.VerifyPdbStatus(t, pdbName, 0, 1, 2, 10)
|
||||
ps.VerifyPdbStatus(t, pdbName, 0, 1, 2, 10, map[string]unversioned.Time{})
|
||||
}
|
||||
|
||||
// Verify that multiple controllers doesn't allow the PDB to be set true.
|
||||
@ -376,9 +380,10 @@ func TestReplicationController(t *testing.T) {
|
||||
rc.Spec.Selector = labels
|
||||
add(t, dc.rcLister.Indexer, rc)
|
||||
dc.sync(pdbName)
|
||||
|
||||
// It starts out at 0 expected because, with no pods, the PDB doesn't know
|
||||
// about the RC. This is a known bug. TODO(mml): file issue
|
||||
ps.VerifyPdbStatus(t, pdbName, 0, 0, 0, 0)
|
||||
ps.VerifyPdbStatus(t, pdbName, 0, 0, 0, 0, map[string]unversioned.Time{})
|
||||
|
||||
pods := []*api.Pod{}
|
||||
|
||||
@ -389,9 +394,9 @@ func TestReplicationController(t *testing.T) {
|
||||
add(t, dc.podLister.Indexer, pod)
|
||||
dc.sync(pdbName)
|
||||
if i < 2 {
|
||||
ps.VerifyPdbStatus(t, pdbName, 0, i+1, 2, 3)
|
||||
ps.VerifyPdbStatus(t, pdbName, 0, i+1, 2, 3, map[string]unversioned.Time{})
|
||||
} else {
|
||||
ps.VerifyPdbStatus(t, pdbName, 1, 3, 2, 3)
|
||||
ps.VerifyPdbStatus(t, pdbName, 1, 3, 2, 3, map[string]unversioned.Time{})
|
||||
}
|
||||
}
|
||||
|
||||
@ -430,7 +435,7 @@ func TestTwoControllers(t *testing.T) {
|
||||
add(t, dc.rcLister.Indexer, rc)
|
||||
dc.sync(pdbName)
|
||||
|
||||
ps.VerifyPdbStatus(t, pdbName, 0, 0, 0, 0)
|
||||
ps.VerifyPdbStatus(t, pdbName, 0, 0, 0, 0, map[string]unversioned.Time{})
|
||||
|
||||
pods := []*api.Pod{}
|
||||
|
||||
@ -445,11 +450,11 @@ func TestTwoControllers(t *testing.T) {
|
||||
add(t, dc.podLister.Indexer, pod)
|
||||
dc.sync(pdbName)
|
||||
if i <= unavailablePods {
|
||||
ps.VerifyPdbStatus(t, pdbName, 0, 0, minimumOne, collectionSize)
|
||||
ps.VerifyPdbStatus(t, pdbName, 0, 0, minimumOne, collectionSize, map[string]unversioned.Time{})
|
||||
} else if i-unavailablePods <= minimumOne {
|
||||
ps.VerifyPdbStatus(t, pdbName, 0, i-unavailablePods, minimumOne, collectionSize)
|
||||
ps.VerifyPdbStatus(t, pdbName, 0, i-unavailablePods, minimumOne, collectionSize, map[string]unversioned.Time{})
|
||||
} else {
|
||||
ps.VerifyPdbStatus(t, pdbName, 1, i-unavailablePods, minimumOne, collectionSize)
|
||||
ps.VerifyPdbStatus(t, pdbName, 1, i-unavailablePods, minimumOne, collectionSize, map[string]unversioned.Time{})
|
||||
}
|
||||
}
|
||||
|
||||
@ -457,14 +462,14 @@ func TestTwoControllers(t *testing.T) {
|
||||
d.Spec.Selector = newSel(dLabels)
|
||||
add(t, dc.dLister.Indexer, d)
|
||||
dc.sync(pdbName)
|
||||
ps.VerifyPdbStatus(t, pdbName, 1, minimumOne+1, minimumOne, collectionSize)
|
||||
ps.VerifyPdbStatus(t, pdbName, 1, minimumOne+1, minimumOne, collectionSize, map[string]unversioned.Time{})
|
||||
|
||||
rs, _ := newReplicaSet(t, collectionSize)
|
||||
rs.Spec.Selector = newSel(dLabels)
|
||||
rs.Labels = dLabels
|
||||
add(t, dc.rsLister.Indexer, rs)
|
||||
dc.sync(pdbName)
|
||||
ps.VerifyPdbStatus(t, pdbName, 1, minimumOne+1, minimumOne, collectionSize)
|
||||
ps.VerifyPdbStatus(t, pdbName, 1, minimumOne+1, minimumOne, collectionSize, map[string]unversioned.Time{})
|
||||
|
||||
// By the end of this loop, the number of ready pods should be N+2 (hence minimumTwo+2).
|
||||
unavailablePods = 2*collectionSize - (minimumTwo + 2) - unavailablePods
|
||||
@ -478,33 +483,33 @@ func TestTwoControllers(t *testing.T) {
|
||||
add(t, dc.podLister.Indexer, pod)
|
||||
dc.sync(pdbName)
|
||||
if i <= unavailablePods {
|
||||
ps.VerifyPdbStatus(t, pdbName, 0, minimumOne+1, minimumTwo, 2*collectionSize)
|
||||
ps.VerifyPdbStatus(t, pdbName, 0, minimumOne+1, minimumTwo, 2*collectionSize, map[string]unversioned.Time{})
|
||||
} else if i-unavailablePods <= minimumTwo-(minimumOne+1) {
|
||||
ps.VerifyPdbStatus(t, pdbName, 0, (minimumOne+1)+(i-unavailablePods), minimumTwo, 2*collectionSize)
|
||||
ps.VerifyPdbStatus(t, pdbName, 0, (minimumOne+1)+(i-unavailablePods), minimumTwo, 2*collectionSize, map[string]unversioned.Time{})
|
||||
} else {
|
||||
ps.VerifyPdbStatus(t, pdbName, i-unavailablePods-(minimumTwo-(minimumOne+1)),
|
||||
(minimumOne+1)+(i-unavailablePods), minimumTwo, 2*collectionSize)
|
||||
(minimumOne+1)+(i-unavailablePods), minimumTwo, 2*collectionSize, map[string]unversioned.Time{})
|
||||
}
|
||||
}
|
||||
|
||||
// Now we verify we can bring down 1 pod and a disruption is still permitted,
|
||||
// but if we bring down two, it's not. Then we make the pod ready again and
|
||||
// verify that a disruption is permitted again.
|
||||
ps.VerifyPdbStatus(t, pdbName, 2, 2+minimumTwo, minimumTwo, 2*collectionSize)
|
||||
ps.VerifyPdbStatus(t, pdbName, 2, 2+minimumTwo, minimumTwo, 2*collectionSize, map[string]unversioned.Time{})
|
||||
pods[collectionSize-1].Status.Conditions = []api.PodCondition{}
|
||||
update(t, dc.podLister.Indexer, pods[collectionSize-1])
|
||||
dc.sync(pdbName)
|
||||
ps.VerifyPdbStatus(t, pdbName, 1, 1+minimumTwo, minimumTwo, 2*collectionSize)
|
||||
ps.VerifyPdbStatus(t, pdbName, 1, 1+minimumTwo, minimumTwo, 2*collectionSize, map[string]unversioned.Time{})
|
||||
|
||||
pods[collectionSize-2].Status.Conditions = []api.PodCondition{}
|
||||
update(t, dc.podLister.Indexer, pods[collectionSize-2])
|
||||
dc.sync(pdbName)
|
||||
ps.VerifyPdbStatus(t, pdbName, 0, minimumTwo, minimumTwo, 2*collectionSize)
|
||||
ps.VerifyPdbStatus(t, pdbName, 0, minimumTwo, minimumTwo, 2*collectionSize, map[string]unversioned.Time{})
|
||||
|
||||
pods[collectionSize-1].Status.Conditions = []api.PodCondition{{Type: api.PodReady, Status: api.ConditionTrue}}
|
||||
update(t, dc.podLister.Indexer, pods[collectionSize-1])
|
||||
dc.sync(pdbName)
|
||||
ps.VerifyPdbStatus(t, pdbName, 1, 1+minimumTwo, minimumTwo, 2*collectionSize)
|
||||
ps.VerifyPdbStatus(t, pdbName, 1, 1+minimumTwo, minimumTwo, 2*collectionSize, map[string]unversioned.Time{})
|
||||
}
|
||||
|
||||
// Test pdb doesn't exist
|
||||
@ -516,3 +521,30 @@ func TestPDBNotExist(t *testing.T) {
|
||||
t.Errorf("Unexpected error: %v, expect nil", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestUpdateDisruptedPods(t *testing.T) {
|
||||
dc, ps := newFakeDisruptionController()
|
||||
dc.recheckQueue = workqueue.NewNamedDelayingQueue("pdb-queue")
|
||||
pdb, pdbName := newPodDisruptionBudget(t, intstr.FromInt(1))
|
||||
currentTime := time.Now()
|
||||
pdb.Status.DisruptedPods = map[string]unversioned.Time{
|
||||
"p1": {Time: currentTime}, // Should be removed, pod deletion started.
|
||||
"p2": {Time: currentTime.Add(-5 * time.Minute)}, // Should be removed, expired.
|
||||
"p3": {Time: currentTime}, // Should remain, pod untouched.
|
||||
"notthere": {Time: currentTime}, // Should be removed, pod deleted.
|
||||
}
|
||||
add(t, dc.pdbLister.Store, pdb)
|
||||
|
||||
pod1, _ := newPod(t, "p1")
|
||||
pod1.DeletionTimestamp = &unversioned.Time{Time: time.Now()}
|
||||
pod2, _ := newPod(t, "p2")
|
||||
pod3, _ := newPod(t, "p3")
|
||||
|
||||
add(t, dc.podLister.Indexer, pod1)
|
||||
add(t, dc.podLister.Indexer, pod2)
|
||||
add(t, dc.podLister.Indexer, pod3)
|
||||
|
||||
dc.sync(pdbName)
|
||||
|
||||
ps.VerifyPdbStatus(t, pdbName, 0, 1, 1, 3, map[string]unversioned.Time{"p3": {Time: currentTime}})
|
||||
}
|
||||
|
@ -5240,11 +5240,25 @@ var OpenAPIDefinitions *common.OpenAPIDefinitions = &common.OpenAPIDefinitions{
|
||||
Format: "int32",
|
||||
},
|
||||
},
|
||||
"disruptedPods": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Description: "DisruptedPods contains information about pods whose eviction was processed by the API server eviction subresource handler but has not yet been observed by the PodDisruptionBudget controller. A pod will be in this map from the time when the API server processed the eviction request to the time when the pod is seen by PDB controller as having been marked for deletion (or after a timeout). The key in the map is the name of the pod and the value is the time when the API server processed the eviction request. If the deletion didn't occur and a pod is still there it will be removed from the list automatically by PodDisruptionBudget controller after some time. If everything goes smooth this map should be empty for the most of the time. Large number of entries in the map may indicate problems with pod deletions.",
|
||||
Type: []string{"object"},
|
||||
AdditionalProperties: &spec.SchemaOrBool{
|
||||
Schema: &spec.Schema{
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Ref: spec.MustCreateRef("#/definitions/unversioned.Time"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Required: []string{"disruptionsAllowed", "currentHealthy", "desiredHealthy", "expectedPods"},
|
||||
Required: []string{"disruptionsAllowed", "currentHealthy", "desiredHealthy", "expectedPods", "disruptedPods"},
|
||||
},
|
||||
},
|
||||
Dependencies: []string{},
|
||||
Dependencies: []string{
|
||||
"unversioned.Time"},
|
||||
},
|
||||
"rbac.ClusterRole": {
|
||||
Schema: spec.Schema{
|
||||
@ -16973,11 +16987,25 @@ var OpenAPIDefinitions *common.OpenAPIDefinitions = &common.OpenAPIDefinitions{
|
||||
Format: "int32",
|
||||
},
|
||||
},
|
||||
"disruptedPods": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Description: "DisruptedPods contains information about pods whose eviction was processed by the API server eviction subresource handler but has not yet been observed by the PodDisruptionBudget controller. A pod will be in this map from the time when the API server processed the eviction request to the time when the pod is seen by PDB controller as having been marked for deletion (or after a timeout). The key in the map is the name of the pod and the value is the time when the API server processed the eviction request. If the deletion didn't occur and a pod is still there it will be removed from the list automatically by PodDisruptionBudget controller after some time. If everything goes smooth this map should be empty for the most of the time. Large number of entries in the map may indicate problems with pod deletions.",
|
||||
Type: []string{"object"},
|
||||
AdditionalProperties: &spec.SchemaOrBool{
|
||||
Schema: &spec.Schema{
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Ref: spec.MustCreateRef("#/definitions/unversioned.Time"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Required: []string{"disruptionsAllowed", "currentHealthy", "desiredHealthy", "expectedPods"},
|
||||
Required: []string{"disruptionsAllowed", "currentHealthy", "desiredHealthy", "expectedPods", "disruptedPods"},
|
||||
},
|
||||
},
|
||||
Dependencies: []string{},
|
||||
Dependencies: []string{
|
||||
"unversioned.Time"},
|
||||
},
|
||||
"v1beta1.PodSecurityPolicy": {
|
||||
Schema: spec.Schema{
|
||||
|
@ -18,6 +18,7 @@ package etcd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/rest"
|
||||
@ -29,6 +30,16 @@ import (
|
||||
"k8s.io/kubernetes/pkg/runtime"
|
||||
)
|
||||
|
||||
const (
|
||||
// MaxDisruptedPodSize is the max size of PodDisruptionBudgetStatus.DisruptedPods. API server eviction
|
||||
// subresource handler will refuse to evict pods covered by the corresponding PDB
|
||||
// if the size of the map exceeds this value. It means a large number of
|
||||
// evictions have been approved by the API server but not noticed by the PDB controller yet.
|
||||
// This situation should self-correct because the PDB controller removes
|
||||
// entries from the map automatically after the PDB DeletionTimeout regardless.
|
||||
MaxDisruptedPodSize = 2000
|
||||
)
|
||||
|
||||
func newEvictionStorage(store *registry.Store, podDisruptionBudgetClient policyclient.PodDisruptionBudgetsGetter) *EvictionREST {
|
||||
return &EvictionREST{store: store, podDisruptionBudgetClient: podDisruptionBudgetClient}
|
||||
}
|
||||
@ -72,7 +83,7 @@ func (r *EvictionREST) Create(ctx api.Context, obj runtime.Object) (runtime.Obje
|
||||
|
||||
// If it was false already, or if it becomes false during the course of our retries,
|
||||
// raise an error marked as a 429.
|
||||
ok, err := r.checkAndDecrement(pod.Namespace, pdb)
|
||||
ok, err := r.checkAndDecrement(pod.Namespace, pod.Name, pdb)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -104,14 +115,25 @@ func (r *EvictionREST) Create(ctx api.Context, obj runtime.Object) (runtime.Obje
|
||||
return &unversioned.Status{Status: unversioned.StatusSuccess}, nil
|
||||
}
|
||||
|
||||
func (r *EvictionREST) checkAndDecrement(namespace string, pdb policy.PodDisruptionBudget) (ok bool, err error) {
|
||||
func (r *EvictionREST) checkAndDecrement(namespace string, podName string, pdb policy.PodDisruptionBudget) (ok bool, err error) {
|
||||
if pdb.Status.PodDisruptionsAllowed < 0 {
|
||||
return false, fmt.Errorf("pdb disruptions allowed is negative")
|
||||
}
|
||||
if len(pdb.Status.DisruptedPods) > MaxDisruptedPodSize {
|
||||
return false, fmt.Errorf("DisrputedPods map too big - too many evictions not confirmed by PDB controller")
|
||||
}
|
||||
if pdb.Status.PodDisruptionsAllowed == 0 {
|
||||
return false, nil
|
||||
}
|
||||
pdb.Status.PodDisruptionsAllowed--
|
||||
if pdb.Status.DisruptedPods == nil {
|
||||
pdb.Status.DisruptedPods = make(map[string]unversioned.Time)
|
||||
}
|
||||
// Eviction handler needs to inform the PDB controller that it is about to delete a pod
|
||||
// so it should not consider it as available in calculations when updating PodDisruptions allowed.
|
||||
// If the pod is not deleted within a reasonable time limit PDB controller will assume that it won't
|
||||
// be deleted at all and remove it from DisruptedPod map.
|
||||
pdb.Status.DisruptedPods[podName] = unversioned.Time{Time: time.Now()}
|
||||
if _, err := r.podDisruptionBudgetClient.PodDisruptionBudgets(namespace).UpdateStatus(&pdb); err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user