mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-20 18:31:15 +00:00
move the ignore logic higher up to the reconciler
Signed-off-by: Sanskar Jaiswal <jaiswalsanskar078@gmail.com>
This commit is contained in:
parent
7d8048dd59
commit
4314e58ae5
@ -364,6 +364,9 @@ func (c *Controller) syncService(key string) error {
|
||||
return err
|
||||
}
|
||||
|
||||
// Drop EndpointSlices that have been marked for deletion to prevent the controller from getting stuck.
|
||||
endpointSlices = dropEndpointSlicesPendingDeletion(endpointSlices)
|
||||
|
||||
if c.endpointSliceTracker.StaleSlices(service, endpointSlices) {
|
||||
return endpointsliceutil.NewStaleInformerCache("EndpointSlice informer cache is out of date")
|
||||
}
|
||||
@ -557,3 +560,14 @@ func trackSync(err error) {
|
||||
}
|
||||
endpointslicemetrics.EndpointSliceSyncs.WithLabelValues(metricLabel).Inc()
|
||||
}
|
||||
|
||||
func dropEndpointSlicesPendingDeletion(endpointSlices []*discovery.EndpointSlice) []*discovery.EndpointSlice {
|
||||
n := 0
|
||||
for _, endpointSlice := range endpointSlices {
|
||||
if endpointSlice.DeletionTimestamp == nil {
|
||||
endpointSlices[n] = endpointSlice
|
||||
n++
|
||||
}
|
||||
}
|
||||
return endpointSlices[:n]
|
||||
}
|
||||
|
@ -241,6 +241,52 @@ func TestSyncServicePodSelection(t *testing.T) {
|
||||
assert.EqualValues(t, endpoint.TargetRef, &v1.ObjectReference{Kind: "Pod", Namespace: ns, Name: pod1.Name})
|
||||
}
|
||||
|
||||
func TestSyncServiceEndpointSlicePendingDeletion(t *testing.T) {
|
||||
client, esController := newController([]string{"node-1"}, time.Duration(0))
|
||||
ns := metav1.NamespaceDefault
|
||||
serviceName := "testing-1"
|
||||
service := createService(t, esController, ns, serviceName)
|
||||
err := esController.syncService(fmt.Sprintf("%s/%s", ns, serviceName))
|
||||
assert.Nil(t, err, "Expected no error syncing service")
|
||||
|
||||
gvk := schema.GroupVersionKind{Version: "v1", Kind: "Service"}
|
||||
ownerRef := metav1.NewControllerRef(service, gvk)
|
||||
|
||||
deletedTs := metav1.Now()
|
||||
endpointSlice := &discovery.EndpointSlice{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "epSlice-1",
|
||||
Namespace: ns,
|
||||
OwnerReferences: []metav1.OwnerReference{*ownerRef},
|
||||
Labels: map[string]string{
|
||||
discovery.LabelServiceName: serviceName,
|
||||
discovery.LabelManagedBy: controllerName,
|
||||
},
|
||||
DeletionTimestamp: &deletedTs,
|
||||
},
|
||||
AddressType: discovery.AddressTypeIPv4,
|
||||
}
|
||||
err = esController.endpointSliceStore.Add(endpointSlice)
|
||||
if err != nil {
|
||||
t.Fatalf("Expected no error adding EndpointSlice: %v", err)
|
||||
}
|
||||
_, err = client.DiscoveryV1().EndpointSlices(ns).Create(context.TODO(), endpointSlice, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("Expected no error creating EndpointSlice: %v", err)
|
||||
}
|
||||
|
||||
numActionsBefore := len(client.Actions())
|
||||
err = esController.syncService(fmt.Sprintf("%s/%s", ns, serviceName))
|
||||
assert.Nil(t, err, "Expected no error syncing service")
|
||||
|
||||
// The EndpointSlice marked for deletion should be ignored by the controller, and thus
|
||||
// should not result in more than one action from the client (an update to the non-terminating
|
||||
// EndpointSlice removing the trigger time annotation.)
|
||||
if len(client.Actions()) != numActionsBefore+1 {
|
||||
t.Errorf("Expected 1 more action, got %d", len(client.Actions())-numActionsBefore)
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure SyncService correctly selects and labels EndpointSlices.
|
||||
func TestSyncServiceEndpointSliceLabelSelection(t *testing.T) {
|
||||
client, esController := newController([]string{"node-1"}, time.Duration(0))
|
||||
@ -1808,3 +1854,60 @@ func (cmc *cacheMutationCheck) Check(t *testing.T) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func Test_dropEndpointSlicesPendingDeletion(t *testing.T) {
|
||||
now := metav1.Now()
|
||||
endpointSlices := []*discovery.EndpointSlice{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "epSlice1",
|
||||
DeletionTimestamp: &now,
|
||||
},
|
||||
},
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "epSlice2",
|
||||
},
|
||||
AddressType: discovery.AddressTypeIPv4,
|
||||
Endpoints: []discovery.Endpoint{
|
||||
{
|
||||
Addresses: []string{"172.18.0.2"},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "epSlice3",
|
||||
},
|
||||
AddressType: discovery.AddressTypeIPv6,
|
||||
Endpoints: []discovery.Endpoint{
|
||||
{
|
||||
Addresses: []string{"3001:0da8:75a3:0000:0000:8a2e:0370:7334"},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
epSlice2 := endpointSlices[1]
|
||||
epSlice3 := endpointSlices[2]
|
||||
|
||||
result := dropEndpointSlicesPendingDeletion(endpointSlices)
|
||||
|
||||
assert.Len(t, result, 2)
|
||||
for _, epSlice := range result {
|
||||
if epSlice.Name == "epSlice1" {
|
||||
t.Errorf("Expected EndpointSlice marked for deletion to be dropped.")
|
||||
}
|
||||
}
|
||||
|
||||
// We don't use endpointSlices and instead check manually for equality, because
|
||||
// `dropEndpointSlicesPendingDeletion` mutates the slice it receives, so it's easy
|
||||
// to break this test later. This way, we can be absolutely sure that the result
|
||||
// has exactly what we expect it to.
|
||||
if !reflect.DeepEqual(epSlice2, result[0]) {
|
||||
t.Errorf("EndpointSlice was unexpectedly mutated. Expected: %+v, Mutated: %+v", epSlice2, result[0])
|
||||
}
|
||||
if !reflect.DeepEqual(epSlice3, result[1]) {
|
||||
t.Errorf("EndpointSlice was unexpectedly mutated. Expected: %+v, Mutated: %+v", epSlice3, result[1])
|
||||
}
|
||||
}
|
||||
|
@ -100,9 +100,6 @@ func (est *EndpointSliceTracker) StaleSlices(service *v1.Service, endpointSlices
|
||||
providedSlices[endpointSlice.UID] = endpointSlice.Generation
|
||||
g, ok := gfs[endpointSlice.UID]
|
||||
if ok && (g == deletionExpected || g > endpointSlice.Generation) {
|
||||
if endpointSlice.DeletionTimestamp != nil {
|
||||
continue
|
||||
}
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
@ -212,18 +212,6 @@ func TestEndpointSliceTrackerStaleSlices(t *testing.T) {
|
||||
serviceParam: &v1.Service{ObjectMeta: metav1.ObjectMeta{Name: "svc1", Namespace: "ns1"}},
|
||||
slicesParam: []*discovery.EndpointSlice{},
|
||||
expectNewer: true,
|
||||
}, {
|
||||
name: "slice in params is has non nil deletion timestamp",
|
||||
tracker: &EndpointSliceTracker{
|
||||
generationsByService: map[types.NamespacedName]GenerationsBySlice{
|
||||
{Name: "svc1", Namespace: "ns1"}: {
|
||||
epSlice1.UID: epSlice1.Generation,
|
||||
},
|
||||
},
|
||||
},
|
||||
serviceParam: &v1.Service{ObjectMeta: metav1.ObjectMeta{Name: "svc1", Namespace: "ns1"}},
|
||||
slicesParam: []*discovery.EndpointSlice{epTerminatingSlice},
|
||||
expectNewer: false,
|
||||
}}
|
||||
|
||||
for _, tc := range testCases {
|
||||
|
Loading…
Reference in New Issue
Block a user