mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-21 10:51:29 +00:00
fix metrics for placeholder slice
There is always a placeholder slice. The ServicePortCache logic was considering always one endpointSlice per Endpoint, but if there are multiple empty Endpoints, we just use one placeholder slice, not multiple placeholder slices.
This commit is contained in:
parent
0d9689a55d
commit
baecb1981e
@ -91,6 +91,10 @@ func (spc *ServicePortCache) totals(maxEndpointsPerSlice int) (int, int, int) {
|
||||
actualSlices += eInfo.Slices
|
||||
desiredSlices += numDesiredSlices(eInfo.Endpoints, maxEndpointsPerSlice)
|
||||
}
|
||||
// there is always a placeholder slice
|
||||
if desiredSlices == 0 {
|
||||
desiredSlices = 1
|
||||
}
|
||||
return actualSlices, desiredSlices, endpoints
|
||||
}
|
||||
|
||||
@ -148,6 +152,9 @@ func (c *Cache) updateMetrics() {
|
||||
// numDesiredSlices calculates the number of EndpointSlices that would exist
|
||||
// with ideal endpoint distribution.
|
||||
func numDesiredSlices(numEndpoints, maxEndpointsPerSlice int) int {
|
||||
if numEndpoints == 0 {
|
||||
return 0
|
||||
}
|
||||
if numEndpoints <= maxEndpointsPerSlice {
|
||||
return 1
|
||||
}
|
||||
|
@ -60,6 +60,23 @@ func TestNumEndpointsAndSlices(t *testing.T) {
|
||||
expectNumEndpointsAndSlices(t, c, 4, 4, 160)
|
||||
}
|
||||
|
||||
func TestPlaceHolderSlice(t *testing.T) {
|
||||
c := NewCache(int32(100))
|
||||
|
||||
p80 := int32(80)
|
||||
p443 := int32(443)
|
||||
|
||||
pmKey80443 := endpointutil.NewPortMapKey([]discovery.EndpointPort{{Port: &p80}, {Port: &p443}})
|
||||
pmKey80 := endpointutil.NewPortMapKey([]discovery.EndpointPort{{Port: &p80}})
|
||||
|
||||
sp := NewServicePortCache()
|
||||
sp.Set(pmKey80, EfficiencyInfo{Endpoints: 0, Slices: 1})
|
||||
sp.Set(pmKey80443, EfficiencyInfo{Endpoints: 0, Slices: 1})
|
||||
|
||||
c.UpdateServicePortCache(types.NamespacedName{Namespace: "ns1", Name: "svc1"}, sp)
|
||||
expectNumEndpointsAndSlices(t, c, 1, 2, 0)
|
||||
}
|
||||
|
||||
func expectNumEndpointsAndSlices(t *testing.T, c *Cache, desired int, actual int, numEndpoints int) {
|
||||
t.Helper()
|
||||
if c.numSlicesDesired != desired {
|
||||
|
@ -223,11 +223,11 @@ func (r *reconciler) reconcileByAddressType(service *corev1.Service, pods []*cor
|
||||
slicesToDelete = slicesToDelete[:0]
|
||||
} else {
|
||||
slicesToCreate = append(slicesToCreate, placeholderSlice)
|
||||
spMetrics.Set(endpointutil.NewPortMapKey(placeholderSlice.Ports), metrics.EfficiencyInfo{
|
||||
Endpoints: 0,
|
||||
Slices: 1,
|
||||
})
|
||||
}
|
||||
spMetrics.Set(endpointutil.NewPortMapKey(placeholderSlice.Ports), metrics.EfficiencyInfo{
|
||||
Endpoints: 0,
|
||||
Slices: 1,
|
||||
})
|
||||
}
|
||||
|
||||
metrics.EndpointsAddedPerSync.WithLabelValues().Observe(float64(totalAdded))
|
||||
|
@ -481,7 +481,7 @@ func TestReconcile1EndpointSlice(t *testing.T) {
|
||||
{
|
||||
desc: "Existing placeholder that's the same",
|
||||
existing: newEndpointSlice(&svc, &endpointMeta{Ports: []discovery.EndpointPort{}, AddressType: discovery.AddressTypeIPv4}),
|
||||
wantMetrics: expectedMetrics{desiredSlices: 0, actualSlices: 0, desiredEndpoints: 0, addedPerSync: 0, removedPerSync: 0, numCreated: 0, numUpdated: 0, numDeleted: 0, slicesChangedPerSync: 0},
|
||||
wantMetrics: expectedMetrics{desiredSlices: 1, actualSlices: 1, desiredEndpoints: 0, addedPerSync: 0, removedPerSync: 0, numCreated: 0, numUpdated: 0, numDeleted: 0, slicesChangedPerSync: 0},
|
||||
},
|
||||
{
|
||||
desc: "Existing placeholder that's different",
|
||||
|
Loading…
Reference in New Issue
Block a user