From 6b8b2ff975bbac48900bc9e7aef9036b994400bc Mon Sep 17 00:00:00 2001 From: Rob Scott Date: Mon, 14 Oct 2019 17:52:32 -0700 Subject: [PATCH] Adding new label to indicate what is managing an EndpointSlice This adds a new Label to EndpointSlices that will ensure that multiple controllers or entities can manage subsets of EndpointSlices. This label provides a way to indicate the controller or entity responsible for managing an EndpointSlice. To provide a seamless upgrade from the alpha release of EndpointSlices that did not support this label, a temporary annotation has been added on Services to indicate that this label has been initially set on EndpointSlices. That annotation will be set automatically by the EndpointSlice controller with this commit once appropriate Labels have been added on the corresponding EndpointSlices. --- .../endpointslice/endpointslice_controller.go | 80 +++++- .../endpointslice_controller_test.go | 242 +++++++++++++++--- pkg/controller/endpointslice/utils.go | 5 +- pkg/controller/endpointslice/utils_test.go | 5 +- .../discovery/v1alpha1/well_known_labels.go | 6 + 5 files changed, 304 insertions(+), 34 deletions(-) diff --git a/pkg/controller/endpointslice/endpointslice_controller.go b/pkg/controller/endpointslice/endpointslice_controller.go index 62fc453ecfb..ca51ffb9c82 100644 --- a/pkg/controller/endpointslice/endpointslice_controller.go +++ b/pkg/controller/endpointslice/endpointslice_controller.go @@ -54,6 +54,20 @@ const ( // 5ms, 10ms, 20ms, 40ms, 80ms, 160ms, 320ms, 640ms, 1.3s, 2.6s, 5.1s, // 10.2s, 20.4s, 41s, 82s maxRetries = 15 + // controllerName is a unique value used with LabelManagedBy to indicated + // the component managing an EndpointSlice. + controllerName = "endpointslice-controller.k8s.io" + // managedBySetupAnnotation is set on a Service to indicate that + // EndpointSlices for the Service have already been configured with + // LabelManagedBy. If this annotation is not set, all related EndpointSlices + // will have LabelManagedBy set to reference this controller if the label + // is not already set. Once all EndpointSlices are labeled, the Controller + // will set this annotation on the Service. + managedBySetupAnnotation = "endpointslice.kubernetes.io/managed-by-setup" + // managedBySetupCompleteValue represents the value of the + // managedBySetupAnnotation that indicates that the setup process has been + // completed for a Service. + managedBySetupCompleteValue = "true" ) // NewController creates and initializes a new Controller @@ -286,7 +300,28 @@ func (c *Controller) syncService(key string) error { return err } - esLabelSelector := labels.Set(map[string]string{discovery.LabelServiceName: service.Name}).AsSelectorPreValidated() + // With the goal of different controllers being able to manage different + // subsets of EndpointSlices, LabelManagedBy has been added to indicate + // which controller or entity manages an EndpointSlice. As part of this + // v1.16->v1.17 change, EndpointSlices will initially be assumed to be + // managed by this controller unless a label is set to indicate otherwise. + // To ensure a seamless upgrade process, the managedBySetupAnnotation is + // used to indicate that LabelManagedBy has been set initially for related + // EndpointSlices. If it hasn't been set to the expected value here, we call + // ensureSetupManagedByAnnotation() to set up LabelManagedBy on each + // EndpointSlice. + // TODO(robscott): Remove this before v1.18. + err = c.ensureSetupManagedByAnnotation(service) + if err != nil { + c.eventRecorder.Eventf(service, v1.EventTypeWarning, "FailedToSetEndpointSliceManagedByLabel", + "Error adding managed-by Label to Endpoint Slices for Service %s/%s: %v", service.Namespace, service.Name, err) + return err + } + + esLabelSelector := labels.Set(map[string]string{ + discovery.LabelServiceName: service.Name, + discovery.LabelManagedBy: controllerName, + }).AsSelectorPreValidated() endpointSlices, err := c.endpointSliceLister.EndpointSlices(service.Namespace).List(esLabelSelector) if err != nil { @@ -337,6 +372,49 @@ func (c *Controller) onServiceDelete(obj interface{}) { c.queue.Add(key) } +// ensureSetupManagedByAnnotation selects all EndpointSlices for a Service and +// ensures they have LabelManagedBy set appropriately. This ensures that only +// one controller or entity is trying to manage a given EndpointSlice. This +// function provides backwards compatibility with the initial alpha release of +// EndpointSlices that did not include these labels. +// TODO(robscott): Remove this in time for v1.18. +func (c *Controller) ensureSetupManagedByAnnotation(service *v1.Service) error { + if managedBySetup, ok := service.Annotations[managedBySetupAnnotation]; ok && managedBySetup == managedBySetupCompleteValue { + return nil + } + + esLabelSelector := labels.Set(map[string]string{discovery.LabelServiceName: service.Name}).AsSelectorPreValidated() + endpointSlices, err := c.endpointSliceLister.EndpointSlices(service.Namespace).List(esLabelSelector) + + if err != nil { + c.eventRecorder.Eventf(service, v1.EventTypeWarning, "FailedToListEndpointSlices", + "Error listing Endpoint Slices for Service %s/%s: %v", service.Namespace, service.Name, err) + return err + } + + for _, endpointSlice := range endpointSlices { + if _, ok := endpointSlice.Labels[discovery.LabelManagedBy]; !ok { + if endpointSlice.Labels == nil { + endpointSlice.Labels = make(map[string]string) + } + + endpointSlice.Labels[discovery.LabelManagedBy] = controllerName + _, err = c.client.DiscoveryV1alpha1().EndpointSlices(endpointSlice.Namespace).Update(endpointSlice) + if err != nil { + return err + } + } + } + + if service.Annotations == nil { + service.Annotations = make(map[string]string) + } + + service.Annotations[managedBySetupAnnotation] = managedBySetupCompleteValue + _, err = c.client.CoreV1().Services(service.Namespace).Update(service) + return err +} + func (c *Controller) addPod(obj interface{}) { pod := obj.(*v1.Pod) services, err := c.serviceSelectorCache.GetPodServiceMemberships(c.serviceLister, pod) diff --git a/pkg/controller/endpointslice/endpointslice_controller_test.go b/pkg/controller/endpointslice/endpointslice_controller_test.go index 752d2e6df81..be65c299848 100644 --- a/pkg/controller/endpointslice/endpointslice_controller_test.go +++ b/pkg/controller/endpointslice/endpointslice_controller_test.go @@ -18,6 +18,7 @@ package endpointslice import ( "fmt" + "reflect" "testing" "time" @@ -28,6 +29,7 @@ import ( "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/client-go/informers" "k8s.io/client-go/kubernetes/fake" + k8stesting "k8s.io/client-go/testing" "k8s.io/client-go/tools/cache" "k8s.io/kubernetes/pkg/controller" endpointutil "k8s.io/kubernetes/pkg/controller/util/endpoint" @@ -100,7 +102,7 @@ func TestSyncServiceWithSelector(t *testing.T) { ns := metav1.NamespaceDefault serviceName := "testing-1" client, esController := newController([]string{"node-1"}) - standardSyncService(t, esController, ns, serviceName) + standardSyncService(t, esController, ns, serviceName, "true") expectActions(t, client.Actions(), 1, "create", "endpointslices") sliceList, err := client.DiscoveryV1alpha1().EndpointSlices(ns).List(metav1.ListOptions{}) @@ -166,7 +168,7 @@ func TestSyncServicePodSelection(t *testing.T) { pod2.Labels["foo"] = "boo" esController.podStore.Add(pod2) - standardSyncService(t, esController, ns, "testing-1") + standardSyncService(t, esController, ns, "testing-1", "true") expectActions(t, client.Actions(), 1, "create", "endpointslices") // an endpoint slice should be created, it should only reference pod1 (not pod2) @@ -180,40 +182,89 @@ func TestSyncServicePodSelection(t *testing.T) { assert.EqualValues(t, endpoint.TargetRef, &v1.ObjectReference{Kind: "Pod", Namespace: ns, Name: pod1.Name}) } -// Ensure SyncService correctly selects EndpointSlices. -func TestSyncServiceEndpointSliceSelection(t *testing.T) { +// Ensure SyncService correctly selects and labels EndpointSlices. +func TestSyncServiceEndpointSliceLabelSelection(t *testing.T) { client, esController := newController([]string{"node-1"}) ns := metav1.NamespaceDefault serviceName := "testing-1" - // 3 slices, 2 with matching labels for our service + // 5 slices, 3 with matching labels for our service endpointSlices := []*discovery.EndpointSlice{{ - ObjectMeta: metav1.ObjectMeta{Name: "matching-1", Namespace: ns, Labels: map[string]string{discovery.LabelServiceName: serviceName}}, + ObjectMeta: metav1.ObjectMeta{ + Name: "matching-1", + Namespace: ns, + Labels: map[string]string{ + discovery.LabelServiceName: serviceName, + discovery.LabelManagedBy: controllerName, + }, + }, }, { - ObjectMeta: metav1.ObjectMeta{Name: "matching-2", Namespace: ns, Labels: map[string]string{discovery.LabelServiceName: serviceName}}, + ObjectMeta: metav1.ObjectMeta{ + Name: "matching-2", + Namespace: ns, + Labels: map[string]string{ + discovery.LabelServiceName: serviceName, + discovery.LabelManagedBy: controllerName, + }, + }, }, { - ObjectMeta: metav1.ObjectMeta{Name: "not-matching-1", Namespace: ns, Labels: map[string]string{discovery.LabelServiceName: "something-else"}}, + ObjectMeta: metav1.ObjectMeta{ + Name: "partially-matching-1", + Namespace: ns, + Labels: map[string]string{ + discovery.LabelServiceName: serviceName, + }, + }, + }, { + ObjectMeta: metav1.ObjectMeta{ + Name: "not-matching-1", + Namespace: ns, + Labels: map[string]string{ + discovery.LabelServiceName: "something-else", + discovery.LabelManagedBy: controllerName, + }, + }, + }, { + ObjectMeta: metav1.ObjectMeta{ + Name: "not-matching-2", + Namespace: ns, + Labels: map[string]string{ + discovery.LabelServiceName: serviceName, + discovery.LabelManagedBy: "something-else", + }, + }, }} // need to add them to both store and fake clientset for _, endpointSlice := range endpointSlices { - addErr := esController.endpointSliceStore.Add(endpointSlice) - assert.Nil(t, addErr, "Expected no error adding EndpointSlice") - _, err := client.DiscoveryV1alpha1().EndpointSlices(ns).Create(endpointSlice) - assert.Nil(t, err, "Expected no error creating EndpointSlice") + err := esController.endpointSliceStore.Add(endpointSlice) + if err != nil { + t.Fatalf("Expected no error adding EndpointSlice: %v", err) + } + _, err = client.DiscoveryV1alpha1().EndpointSlices(ns).Create(endpointSlice) + if err != nil { + t.Fatalf("Expected no error creating EndpointSlice: %v", err) + } } - numActionsBefore := len(client.Actions()) - standardSyncService(t, esController, ns, serviceName) + // +1 for extra action involved in Service creation before syncService call. + numActionsBefore := len(client.Actions()) + 1 + standardSyncService(t, esController, ns, serviceName, "false") - // should only have 2 additional actions - assert.Len(t, client.Actions(), numActionsBefore+2) + if len(client.Actions()) != numActionsBefore+5 { + t.Errorf("Expected 5 more actions, got %d", len(client.Actions())-numActionsBefore) + } - // only 2 slices should match, 1 of those should be deleted, 1 should be updated as a placeholder - assert.Equal(t, "update", client.Actions()[numActionsBefore].GetVerb()) - assert.Equal(t, client.Actions()[numActionsBefore].GetResource().Resource, "endpointslices") - assert.Equal(t, "delete", client.Actions()[numActionsBefore+1].GetVerb()) - assert.Equal(t, client.Actions()[numActionsBefore+1].GetResource().Resource, "endpointslices") + // endpointslice should have LabelsManagedBy set as part of update. + expectAction(t, client.Actions(), numActionsBefore, "update", "endpointslices") + + // service should have managedBySetupAnnotation set as part of update. + expectAction(t, client.Actions(), numActionsBefore+1, "update", "services") + + // only 3 slices should match, 2 of those should be deleted, 1 should be updated as a placeholder + expectAction(t, client.Actions(), numActionsBefore+2, "update", "endpointslices") + expectAction(t, client.Actions(), numActionsBefore+3, "delete", "endpointslices") + expectAction(t, client.Actions(), numActionsBefore+4, "delete", "endpointslices") } // Ensure SyncService handles a variety of protocols and IPs appropriately. @@ -236,7 +287,7 @@ func TestSyncServiceFull(t *testing.T) { // create service with all protocols and multiple ports serviceCreateTime := time.Now() - esController.serviceStore.Add(&v1.Service{ + service := &v1.Service{ ObjectMeta: metav1.ObjectMeta{ Name: serviceName, Namespace: namespace, @@ -250,14 +301,16 @@ func TestSyncServiceFull(t *testing.T) { }, Selector: map[string]string{"foo": "bar"}, }, - }) + } + esController.serviceStore.Add(service) + _, err := esController.client.CoreV1().Services(namespace).Create(service) + assert.Nil(t, err, "Expected no error creating service") // run through full sync service loop - err := esController.syncService(fmt.Sprintf("%s/%s", namespace, serviceName)) + err = esController.syncService(fmt.Sprintf("%s/%s", namespace, serviceName)) assert.Nil(t, err) - // should only have 1 action - to create endpoint slice - assert.Len(t, client.Actions(), 1) + // last action should be to create endpoint slice expectActions(t, client.Actions(), 1, "create", "endpointslices") sliceList, err := client.DiscoveryV1alpha1().EndpointSlices(namespace).List(metav1.ListOptions{}) assert.Nil(t, err, "Expected no error fetching endpoint slices") @@ -294,23 +347,150 @@ func TestSyncServiceFull(t *testing.T) { }}, slice.Endpoints) } +func TestEnsureSetupManagedByAnnotation(t *testing.T) { + serviceName := "testing-1" + + testCases := map[string]struct { + serviceAnnotation string + startingSliceLabels map[string]string + expectedSliceLabels map[string]string + }{ + "already-labeled": { + serviceAnnotation: "foo", + startingSliceLabels: map[string]string{ + discovery.LabelServiceName: serviceName, + discovery.LabelManagedBy: controllerName, + }, + expectedSliceLabels: map[string]string{ + discovery.LabelServiceName: serviceName, + discovery.LabelManagedBy: controllerName, + }, + }, + "already-annotated": { + serviceAnnotation: managedBySetupCompleteValue, + startingSliceLabels: map[string]string{ + discovery.LabelServiceName: serviceName, + discovery.LabelManagedBy: "other-controller", + }, + expectedSliceLabels: map[string]string{ + discovery.LabelServiceName: serviceName, + discovery.LabelManagedBy: "other-controller", + }, + }, + "missing-and-extra-label": { + serviceAnnotation: "foo", + startingSliceLabels: map[string]string{ + discovery.LabelServiceName: serviceName, + "foo": "bar", + }, + expectedSliceLabels: map[string]string{ + discovery.LabelServiceName: serviceName, + discovery.LabelManagedBy: controllerName, + "foo": "bar", + }, + }, + "different-service": { + serviceAnnotation: "foo", + startingSliceLabels: map[string]string{ + discovery.LabelServiceName: "something-else", + }, + expectedSliceLabels: map[string]string{ + discovery.LabelServiceName: "something-else", + }, + }, + } + + for name, testCase := range testCases { + t.Run(name, func(t *testing.T) { + client, esController := newController([]string{"node-1"}) + ns := metav1.NamespaceDefault + service := createService(t, esController, ns, serviceName, testCase.serviceAnnotation) + + endpointSlice := &discovery.EndpointSlice{ + ObjectMeta: metav1.ObjectMeta{ + Name: "testing", + Namespace: ns, + Labels: testCase.startingSliceLabels, + }, + } + + err := esController.endpointSliceStore.Add(endpointSlice) + if err != nil { + t.Fatalf("Expected no error adding EndpointSlice: %v", err) + } + + _, err = client.DiscoveryV1alpha1().EndpointSlices(ns).Create(endpointSlice) + if err != nil { + t.Fatalf("Expected no error creating EndpointSlice: %v", err) + } + + esController.ensureSetupManagedByAnnotation(service) + + updatedService, err := client.CoreV1().Services(ns).Get(service.Name, metav1.GetOptions{}) + if err != nil { + t.Fatalf("Expected no error getting Service: %v", err) + } + + if updatedService.Annotations[managedBySetupAnnotation] != managedBySetupCompleteValue { + t.Errorf("Expected managedBySetupAnnotation: %+v, got: %+v", managedBySetupCompleteValue, updatedService.Annotations[managedBySetupAnnotation]) + } + + updatedSlice, err := client.DiscoveryV1alpha1().EndpointSlices(ns).Get(endpointSlice.Name, metav1.GetOptions{}) + if err != nil { + t.Fatalf("Expected no error getting EndpointSlice: %v", err) + } + + if !reflect.DeepEqual(updatedSlice.Labels, testCase.expectedSliceLabels) { + t.Errorf("Expected labels: %+v, got: %+v", updatedSlice.Labels, testCase.expectedSliceLabels) + } + }) + } +} + // Test helpers -func standardSyncService(t *testing.T, esController *endpointSliceController, namespace, serviceName string) { - esController.serviceStore.Add(&v1.Service{ +func standardSyncService(t *testing.T, esController *endpointSliceController, namespace, serviceName, managedBySetup string) { + t.Helper() + createService(t, esController, namespace, serviceName, managedBySetup) + + err := esController.syncService(fmt.Sprintf("%s/%s", namespace, serviceName)) + assert.Nil(t, err, "Expected no error syncing service") +} + +func createService(t *testing.T, esController *endpointSliceController, namespace, serviceName, managedBySetup string) *v1.Service { + t.Helper() + service := &v1.Service{ ObjectMeta: metav1.ObjectMeta{ Name: serviceName, Namespace: namespace, CreationTimestamp: metav1.NewTime(time.Now()), + Annotations: map[string]string{managedBySetupAnnotation: managedBySetup}, }, Spec: v1.ServiceSpec{ Ports: []v1.ServicePort{{TargetPort: intstr.FromInt(80)}}, Selector: map[string]string{"foo": "bar"}, }, - }) + } + esController.serviceStore.Add(service) + _, err := esController.client.CoreV1().Services(namespace).Create(service) + assert.Nil(t, err, "Expected no error creating service") + return service +} - err := esController.syncService(fmt.Sprintf("%s/%s", namespace, serviceName)) - assert.Nil(t, err, "Expected no error syncing service") +func expectAction(t *testing.T, actions []k8stesting.Action, index int, verb, resource string) { + t.Helper() + if len(actions) <= index { + t.Fatalf("Expected at least %d actions, got %d", index+1, len(actions)) + } + + action := actions[index] + if action.GetVerb() != verb { + t.Errorf("Expected action %d verb to be %s, got %s", index, verb, action.GetVerb()) + } + + if action.GetResource().Resource != resource { + t.Errorf("Expected action %d resource to be %s, got %s", index, resource, action.GetResource().Resource) + } } func strPtr(str string) *string { diff --git a/pkg/controller/endpointslice/utils.go b/pkg/controller/endpointslice/utils.go index aadfc6efdb1..bfc05eb53ac 100644 --- a/pkg/controller/endpointslice/utils.go +++ b/pkg/controller/endpointslice/utils.go @@ -162,7 +162,10 @@ func newEndpointSlice(service *corev1.Service, endpointMeta *endpointMeta) *disc ownerRef := metav1.NewControllerRef(service, gvk) return &discovery.EndpointSlice{ ObjectMeta: metav1.ObjectMeta{ - Labels: map[string]string{discovery.LabelServiceName: service.Name}, + Labels: map[string]string{ + discovery.LabelServiceName: service.Name, + discovery.LabelManagedBy: controllerName, + }, GenerateName: getEndpointSlicePrefix(service.Name), OwnerReferences: []metav1.OwnerReference{*ownerRef}, Namespace: service.Namespace, diff --git a/pkg/controller/endpointslice/utils_test.go b/pkg/controller/endpointslice/utils_test.go index 610f83e8c18..e7aa82c088b 100644 --- a/pkg/controller/endpointslice/utils_test.go +++ b/pkg/controller/endpointslice/utils_test.go @@ -58,7 +58,10 @@ func TestNewEndpointSlice(t *testing.T) { expectedSlice := discovery.EndpointSlice{ ObjectMeta: metav1.ObjectMeta{ - Labels: map[string]string{discovery.LabelServiceName: service.Name}, + Labels: map[string]string{ + discovery.LabelServiceName: service.Name, + discovery.LabelManagedBy: controllerName, + }, GenerateName: fmt.Sprintf("%s-", service.Name), OwnerReferences: []metav1.OwnerReference{*ownerRef}, Namespace: service.Namespace, diff --git a/staging/src/k8s.io/api/discovery/v1alpha1/well_known_labels.go b/staging/src/k8s.io/api/discovery/v1alpha1/well_known_labels.go index 850cd205925..8f9c72f088e 100644 --- a/staging/src/k8s.io/api/discovery/v1alpha1/well_known_labels.go +++ b/staging/src/k8s.io/api/discovery/v1alpha1/well_known_labels.go @@ -19,4 +19,10 @@ package v1alpha1 const ( // LabelServiceName is used to indicate the name of a Kubernetes service. LabelServiceName = "kubernetes.io/service-name" + // LabelManagedBy is used to indicate the controller or entity that manages + // an EndpointSlice. This label aims to enable different EndpointSlice + // objects to be managed by different controllers or entities within the + // same cluster. It is highly recommended to configure this label for all + // EndpointSlices. + LabelManagedBy = "endpointslice.kubernetes.io/managed-by" )