mirror of
https://github.com/k3s-io/kubernetes.git
synced 2026-01-05 23:47:50 +00:00
Reworking kube-proxy to only compute endpointChanges on apply.
Computing EndpointChanges is a relatively expensive operation for kube-proxy when Endpoint Slices are used. This had been computed on every EndpointSlice update which became quite inefficient at high levels of scale when multiple EndpointSlice update events would be triggered before a syncProxyRules call. Profiling results showed that computing this on each update could consume ~80% of total kube-proxy CPU utilization at high levels of scale. This change reduced that to as little as 3% of total kube-proxy utilization at high levels of scale. It's worth noting that the difference is minimal when there is a 1:1 relationship between EndpointSlice updates and proxier syncs. This is primarily beneficial when there are many EndpointSlice updates between proxier sync loops.
This commit is contained in:
@@ -153,10 +153,10 @@ func TestEndpointsMapFromESC(t *testing.T) {
|
||||
esCache := NewEndpointSliceCache(tc.hostname, nil, nil, nil)
|
||||
|
||||
for _, endpointSlice := range tc.endpointSlices {
|
||||
esCache.Update(endpointSlice)
|
||||
esCache.updatePending(endpointSlice, false)
|
||||
}
|
||||
|
||||
compareEndpointsMapsStr(t, esCache.EndpointsMap(tc.namespacedName), tc.expectedMap)
|
||||
compareEndpointsMapsStr(t, esCache.getEndpointsMap(tc.namespacedName, esCache.trackerByServiceMap[tc.namespacedName].pending), tc.expectedMap)
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -185,17 +185,153 @@ func TestEndpointInfoByServicePort(t *testing.T) {
|
||||
}
|
||||
|
||||
for name, tc := range testCases {
|
||||
esCache := NewEndpointSliceCache(tc.hostname, nil, nil, nil)
|
||||
t.Run(name, func(t *testing.T) {
|
||||
esCache := NewEndpointSliceCache(tc.hostname, nil, nil, nil)
|
||||
|
||||
for _, endpointSlice := range tc.endpointSlices {
|
||||
esCache.Update(endpointSlice)
|
||||
}
|
||||
for _, endpointSlice := range tc.endpointSlices {
|
||||
esCache.updatePending(endpointSlice, false)
|
||||
}
|
||||
|
||||
got := esCache.endpointInfoByServicePort(tc.namespacedName)
|
||||
if !reflect.DeepEqual(got, tc.expectedMap) {
|
||||
t.Errorf("[%s] endpointInfoByServicePort does not match. Want: %+v, Got: %+v", name, tc.expectedMap, got)
|
||||
}
|
||||
got := esCache.endpointInfoByServicePort(tc.namespacedName, esCache.trackerByServiceMap[tc.namespacedName].pending)
|
||||
if !reflect.DeepEqual(got, tc.expectedMap) {
|
||||
t.Errorf("endpointInfoByServicePort does not match. Want: %+v, Got: %+v", tc.expectedMap, got)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestEsInfoChanged(t *testing.T) {
|
||||
p80 := int32(80)
|
||||
p443 := int32(443)
|
||||
tcpProto := v1.ProtocolTCP
|
||||
port80 := discovery.EndpointPort{Port: &p80, Name: utilpointer.StringPtr("http"), Protocol: &tcpProto}
|
||||
port443 := discovery.EndpointPort{Port: &p443, Name: utilpointer.StringPtr("https"), Protocol: &tcpProto}
|
||||
endpoint1 := discovery.Endpoint{Addresses: []string{"10.0.1.0"}}
|
||||
endpoint2 := discovery.Endpoint{Addresses: []string{"10.0.1.1"}}
|
||||
|
||||
objMeta := metav1.ObjectMeta{
|
||||
Name: "foo",
|
||||
Namespace: "bar",
|
||||
Labels: map[string]string{discovery.LabelServiceName: "svc1"},
|
||||
}
|
||||
|
||||
testCases := map[string]struct {
|
||||
cache *EndpointSliceCache
|
||||
initialSlice *discovery.EndpointSlice
|
||||
updatedSlice *discovery.EndpointSlice
|
||||
expectChanged bool
|
||||
}{
|
||||
"identical slices, ports only": {
|
||||
cache: NewEndpointSliceCache("", nil, nil, nil),
|
||||
initialSlice: &discovery.EndpointSlice{
|
||||
ObjectMeta: objMeta,
|
||||
Ports: []discovery.EndpointPort{port80},
|
||||
},
|
||||
updatedSlice: &discovery.EndpointSlice{
|
||||
ObjectMeta: objMeta,
|
||||
Ports: []discovery.EndpointPort{port80},
|
||||
},
|
||||
expectChanged: false,
|
||||
},
|
||||
"identical slices, ports out of order": {
|
||||
cache: NewEndpointSliceCache("", nil, nil, nil),
|
||||
initialSlice: &discovery.EndpointSlice{
|
||||
ObjectMeta: objMeta,
|
||||
Ports: []discovery.EndpointPort{port443, port80},
|
||||
},
|
||||
updatedSlice: &discovery.EndpointSlice{
|
||||
ObjectMeta: objMeta,
|
||||
Ports: []discovery.EndpointPort{port80, port443},
|
||||
},
|
||||
expectChanged: false,
|
||||
},
|
||||
"port removed": {
|
||||
cache: NewEndpointSliceCache("", nil, nil, nil),
|
||||
initialSlice: &discovery.EndpointSlice{
|
||||
ObjectMeta: objMeta,
|
||||
Ports: []discovery.EndpointPort{port443, port80},
|
||||
},
|
||||
updatedSlice: &discovery.EndpointSlice{
|
||||
ObjectMeta: objMeta,
|
||||
Ports: []discovery.EndpointPort{port443},
|
||||
},
|
||||
expectChanged: true,
|
||||
},
|
||||
"port added": {
|
||||
cache: NewEndpointSliceCache("", nil, nil, nil),
|
||||
initialSlice: &discovery.EndpointSlice{
|
||||
ObjectMeta: objMeta,
|
||||
Ports: []discovery.EndpointPort{port443},
|
||||
},
|
||||
updatedSlice: &discovery.EndpointSlice{
|
||||
ObjectMeta: objMeta,
|
||||
Ports: []discovery.EndpointPort{port443, port80},
|
||||
},
|
||||
expectChanged: true,
|
||||
},
|
||||
"identical with endpoints": {
|
||||
cache: NewEndpointSliceCache("", nil, nil, nil),
|
||||
initialSlice: &discovery.EndpointSlice{
|
||||
ObjectMeta: objMeta,
|
||||
Ports: []discovery.EndpointPort{port443},
|
||||
Endpoints: []discovery.Endpoint{endpoint1, endpoint2},
|
||||
},
|
||||
updatedSlice: &discovery.EndpointSlice{
|
||||
ObjectMeta: objMeta,
|
||||
Ports: []discovery.EndpointPort{port443},
|
||||
Endpoints: []discovery.Endpoint{endpoint1, endpoint2},
|
||||
},
|
||||
expectChanged: false,
|
||||
},
|
||||
"identical with endpoints out of order": {
|
||||
cache: NewEndpointSliceCache("", nil, nil, nil),
|
||||
initialSlice: &discovery.EndpointSlice{
|
||||
ObjectMeta: objMeta,
|
||||
Ports: []discovery.EndpointPort{port443},
|
||||
Endpoints: []discovery.Endpoint{endpoint1, endpoint2},
|
||||
},
|
||||
updatedSlice: &discovery.EndpointSlice{
|
||||
ObjectMeta: objMeta,
|
||||
Ports: []discovery.EndpointPort{port443},
|
||||
Endpoints: []discovery.Endpoint{endpoint2, endpoint1},
|
||||
},
|
||||
expectChanged: false,
|
||||
},
|
||||
"identical with endpoint added": {
|
||||
cache: NewEndpointSliceCache("", nil, nil, nil),
|
||||
initialSlice: &discovery.EndpointSlice{
|
||||
ObjectMeta: objMeta,
|
||||
Ports: []discovery.EndpointPort{port443},
|
||||
Endpoints: []discovery.Endpoint{endpoint1},
|
||||
},
|
||||
updatedSlice: &discovery.EndpointSlice{
|
||||
ObjectMeta: objMeta,
|
||||
Ports: []discovery.EndpointPort{port443},
|
||||
Endpoints: []discovery.Endpoint{endpoint2, endpoint1},
|
||||
},
|
||||
expectChanged: true,
|
||||
},
|
||||
}
|
||||
|
||||
for name, tc := range testCases {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
if tc.initialSlice != nil {
|
||||
tc.cache.updatePending(tc.initialSlice, false)
|
||||
tc.cache.checkoutChanges()
|
||||
}
|
||||
|
||||
serviceKey, sliceKey, err := endpointSliceCacheKeys(tc.updatedSlice)
|
||||
if err != nil {
|
||||
t.Fatalf("Expected no error calling endpointSliceCacheKeys(): %v", err)
|
||||
}
|
||||
|
||||
esInfo := newEndpointSliceInfo(tc.updatedSlice, false)
|
||||
changed := tc.cache.esInfoChanged(serviceKey, sliceKey, esInfo)
|
||||
|
||||
if tc.expectChanged != changed {
|
||||
t.Errorf("Expected esInfoChanged() to return %t, got %t", tc.expectChanged, changed)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
Reference in New Issue
Block a user