mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-23 03:41:45 +00:00
Ensuring EndpointSlice controller does not create EndpointSlices for Services that are being deleted.
This should ensure that the controller does not conflict with garbage collection.
This commit is contained in:
parent
c5941e283f
commit
3f593710a7
@ -102,6 +102,25 @@ func TestSyncServiceNoSelector(t *testing.T) {
|
||||
assert.Len(t, client.Actions(), 0)
|
||||
}
|
||||
|
||||
// Ensure SyncService for service with pending deletion results in no action
|
||||
func TestSyncServicePendingDeletion(t *testing.T) {
|
||||
ns := metav1.NamespaceDefault
|
||||
serviceName := "testing-1"
|
||||
deletionTimestamp := metav1.Now()
|
||||
client, esController := newController([]string{"node-1"}, time.Duration(0))
|
||||
esController.serviceStore.Add(&v1.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: serviceName, Namespace: ns, DeletionTimestamp: &deletionTimestamp},
|
||||
Spec: v1.ServiceSpec{
|
||||
Selector: map[string]string{"foo": "bar"},
|
||||
Ports: []v1.ServicePort{{TargetPort: intstr.FromInt(80)}},
|
||||
},
|
||||
})
|
||||
|
||||
err := esController.syncService(fmt.Sprintf("%s/%s", ns, serviceName))
|
||||
assert.Nil(t, err)
|
||||
assert.Len(t, client.Actions(), 0)
|
||||
}
|
||||
|
||||
// Ensure SyncService for service with selector but no pods results in placeholder EndpointSlice
|
||||
func TestSyncServiceWithSelector(t *testing.T) {
|
||||
ns := metav1.NamespaceDefault
|
||||
|
@ -206,18 +206,23 @@ func (r *reconciler) finalize(
|
||||
}
|
||||
}
|
||||
|
||||
for _, endpointSlice := range slicesToCreate {
|
||||
addTriggerTimeAnnotation(endpointSlice, triggerTime)
|
||||
createdSlice, err := r.client.DiscoveryV1beta1().EndpointSlices(service.Namespace).Create(context.TODO(), endpointSlice, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
// If the namespace is terminating, creates will continue to fail. Simply drop the item.
|
||||
if errors.HasStatusCause(err, corev1.NamespaceTerminatingCause) {
|
||||
return nil
|
||||
// Don't create new EndpointSlices if the Service is pending deletion. This
|
||||
// is to avoid a potential race condition with the garbage collector where
|
||||
// it tries to delete EndpointSlices as this controller replaces them.
|
||||
if service.DeletionTimestamp == nil {
|
||||
for _, endpointSlice := range slicesToCreate {
|
||||
addTriggerTimeAnnotation(endpointSlice, triggerTime)
|
||||
createdSlice, err := r.client.DiscoveryV1beta1().EndpointSlices(service.Namespace).Create(context.TODO(), endpointSlice, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
// If the namespace is terminating, creates will continue to fail. Simply drop the item.
|
||||
if errors.HasStatusCause(err, corev1.NamespaceTerminatingCause) {
|
||||
return nil
|
||||
}
|
||||
errs = append(errs, fmt.Errorf("Error creating EndpointSlice for Service %s/%s: %v", service.Namespace, service.Name, err))
|
||||
} else {
|
||||
r.endpointSliceTracker.Update(createdSlice)
|
||||
metrics.EndpointSliceChanges.WithLabelValues("create").Inc()
|
||||
}
|
||||
errs = append(errs, fmt.Errorf("Error creating EndpointSlice for Service %s/%s: %v", service.Namespace, service.Name, err))
|
||||
} else {
|
||||
r.endpointSliceTracker.Update(createdSlice)
|
||||
metrics.EndpointSliceChanges.WithLabelValues("create").Inc()
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -742,6 +742,164 @@ func TestReconcileEndpointSlicesMetrics(t *testing.T) {
|
||||
expectMetrics(t, expectedMetrics{desiredSlices: 1, actualSlices: 1, desiredEndpoints: 10, addedPerSync: 20, removedPerSync: 10, numCreated: 1, numUpdated: 1, numDeleted: 0})
|
||||
}
|
||||
|
||||
// When a Service has a non-nil deletionTimestamp we want to avoid creating any
|
||||
// new EndpointSlices but continue to allow updates and deletes through. This
|
||||
// test uses 3 EndpointSlices, 1 "to-create", 1 "to-update", and 1 "to-delete".
|
||||
// Each test case exercises different combinations of calls to finalize with
|
||||
// those resources.
|
||||
func TestReconcilerFinalizeSvcDeletionTimestamp(t *testing.T) {
|
||||
now := metav1.Now()
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
deletionTimestamp *metav1.Time
|
||||
attemptCreate bool
|
||||
attemptUpdate bool
|
||||
attemptDelete bool
|
||||
expectCreatedSlice bool
|
||||
expectUpdatedSlice bool
|
||||
expectDeletedSlice bool
|
||||
}{{
|
||||
name: "Attempt create and update, nil deletion timestamp",
|
||||
deletionTimestamp: nil,
|
||||
attemptCreate: true,
|
||||
attemptUpdate: true,
|
||||
expectCreatedSlice: true,
|
||||
expectUpdatedSlice: true,
|
||||
expectDeletedSlice: true,
|
||||
}, {
|
||||
name: "Attempt create and update, deletion timestamp set",
|
||||
deletionTimestamp: &now,
|
||||
attemptCreate: true,
|
||||
attemptUpdate: true,
|
||||
expectCreatedSlice: false,
|
||||
expectUpdatedSlice: true,
|
||||
expectDeletedSlice: true,
|
||||
}, {
|
||||
// Slice scheduled for creation is transitioned to update of Slice
|
||||
// scheduled for deletion.
|
||||
name: "Attempt create, update, and delete, nil deletion timestamp, recycling in action",
|
||||
deletionTimestamp: nil,
|
||||
attemptCreate: true,
|
||||
attemptUpdate: true,
|
||||
attemptDelete: true,
|
||||
expectCreatedSlice: false,
|
||||
expectUpdatedSlice: true,
|
||||
expectDeletedSlice: true,
|
||||
}, {
|
||||
// Slice scheduled for creation is transitioned to update of Slice
|
||||
// scheduled for deletion.
|
||||
name: "Attempt create, update, and delete, deletion timestamp set, recycling in action",
|
||||
deletionTimestamp: &now,
|
||||
attemptCreate: true,
|
||||
attemptUpdate: true,
|
||||
attemptDelete: true,
|
||||
expectCreatedSlice: false,
|
||||
expectUpdatedSlice: true,
|
||||
expectDeletedSlice: true,
|
||||
}, {
|
||||
// Update and delete continue to work when deletionTimestamp is set.
|
||||
name: "Attempt update delete, deletion timestamp set",
|
||||
deletionTimestamp: &now,
|
||||
attemptCreate: false,
|
||||
attemptUpdate: true,
|
||||
attemptDelete: true,
|
||||
expectCreatedSlice: false,
|
||||
expectUpdatedSlice: true,
|
||||
expectDeletedSlice: false,
|
||||
}}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
client := newClientset()
|
||||
setupMetrics()
|
||||
r := newReconciler(client, []*corev1.Node{{ObjectMeta: metav1.ObjectMeta{Name: "node-1"}}}, defaultMaxEndpointsPerSlice)
|
||||
|
||||
namespace := "test"
|
||||
svc, endpointMeta := newServiceAndEndpointMeta("foo", namespace)
|
||||
svc.DeletionTimestamp = tc.deletionTimestamp
|
||||
esToCreate := &discovery.EndpointSlice{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "to-create"},
|
||||
AddressType: endpointMeta.AddressType,
|
||||
Ports: endpointMeta.Ports,
|
||||
}
|
||||
|
||||
// Add EndpointSlice that can be updated.
|
||||
esToUpdate, err := client.DiscoveryV1beta1().EndpointSlices(namespace).Create(context.TODO(), &discovery.EndpointSlice{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "to-update"},
|
||||
AddressType: endpointMeta.AddressType,
|
||||
Ports: endpointMeta.Ports,
|
||||
}, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("Expected no error creating EndpointSlice during test setup, got %v", err)
|
||||
}
|
||||
// Add an endpoint so we can see if this has actually been updated by
|
||||
// finalize func.
|
||||
esToUpdate.Endpoints = []discovery.Endpoint{{Addresses: []string{"10.2.3.4"}}}
|
||||
|
||||
// Add EndpointSlice that can be deleted.
|
||||
esToDelete, err := client.DiscoveryV1beta1().EndpointSlices(namespace).Create(context.TODO(), &discovery.EndpointSlice{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "to-delete"},
|
||||
AddressType: endpointMeta.AddressType,
|
||||
Ports: endpointMeta.Ports,
|
||||
}, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("Expected no error creating EndpointSlice during test setup, got %v", err)
|
||||
}
|
||||
|
||||
slicesToCreate := []*discovery.EndpointSlice{}
|
||||
if tc.attemptCreate {
|
||||
slicesToCreate = append(slicesToCreate, esToCreate.DeepCopy())
|
||||
}
|
||||
slicesToUpdate := []*discovery.EndpointSlice{}
|
||||
if tc.attemptUpdate {
|
||||
slicesToUpdate = append(slicesToUpdate, esToUpdate.DeepCopy())
|
||||
}
|
||||
slicesToDelete := []*discovery.EndpointSlice{}
|
||||
if tc.attemptDelete {
|
||||
slicesToDelete = append(slicesToDelete, esToDelete.DeepCopy())
|
||||
}
|
||||
|
||||
err = r.finalize(&svc, slicesToCreate, slicesToUpdate, slicesToDelete, time.Now())
|
||||
if err != nil {
|
||||
t.Errorf("Error calling r.finalize(): %v", err)
|
||||
}
|
||||
|
||||
fetchedSlices := fetchEndpointSlices(t, client, namespace)
|
||||
|
||||
createdSliceFound := false
|
||||
updatedSliceFound := false
|
||||
deletedSliceFound := false
|
||||
for _, epSlice := range fetchedSlices {
|
||||
if epSlice.Name == esToCreate.Name {
|
||||
createdSliceFound = true
|
||||
}
|
||||
if epSlice.Name == esToUpdate.Name {
|
||||
updatedSliceFound = true
|
||||
if tc.attemptUpdate && len(epSlice.Endpoints) != len(esToUpdate.Endpoints) {
|
||||
t.Errorf("Expected EndpointSlice to be updated with %d endpoints, got %d endpoints", len(esToUpdate.Endpoints), len(epSlice.Endpoints))
|
||||
}
|
||||
}
|
||||
if epSlice.Name == esToDelete.Name {
|
||||
deletedSliceFound = true
|
||||
}
|
||||
}
|
||||
|
||||
if createdSliceFound != tc.expectCreatedSlice {
|
||||
t.Errorf("Expected created EndpointSlice existence to be %t, got %t", tc.expectCreatedSlice, createdSliceFound)
|
||||
}
|
||||
|
||||
if updatedSliceFound != tc.expectUpdatedSlice {
|
||||
t.Errorf("Expected updated EndpointSlice existence to be %t, got %t", tc.expectUpdatedSlice, updatedSliceFound)
|
||||
}
|
||||
|
||||
if deletedSliceFound != tc.expectDeletedSlice {
|
||||
t.Errorf("Expected deleted EndpointSlice existence to be %t, got %t", tc.expectDeletedSlice, deletedSliceFound)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// Test Helpers
|
||||
|
||||
func newReconciler(client *fake.Clientset, nodes []*corev1.Node, maxEndpointsPerSlice int32) *reconciler {
|
||||
|
Loading…
Reference in New Issue
Block a user