Reverting managed-by-setup annotation

This ended up causing far more problems than it was worth, especially
given that it just attempted to provide backwards compatibility with
the alpha release.
This commit is contained in:
Rob Scott 2019-11-15 11:50:58 -08:00
parent f16bfb069a
commit 7085d692b7
No known key found for this signature in database
GPG Key ID: 05B37CFC2CDE8B85
3 changed files with 6 additions and 187 deletions

View File

@ -57,17 +57,6 @@ const (
// controllerName is a unique value used with LabelManagedBy to indicated
// the component managing an EndpointSlice.
controllerName = "endpointslice-controller.k8s.io"
// managedBySetupAnnotation is set on a Service to indicate that
// EndpointSlices for the Service have already been configured with
// LabelManagedBy. If this annotation is not set, all related EndpointSlices
// will have LabelManagedBy set to reference this controller if the label
// is not already set. Once all EndpointSlices are labeled, the Controller
// will set this annotation on the Service.
managedBySetupAnnotation = "endpointslice.kubernetes.io/managed-by-setup"
// managedBySetupCompleteValue represents the value of the
// managedBySetupAnnotation that indicates that the setup process has been
// completed for a Service.
managedBySetupCompleteValue = "true"
)
// NewController creates and initializes a new Controller
@ -300,24 +289,6 @@ func (c *Controller) syncService(key string) error {
return err
}
// With the goal of different controllers being able to manage different
// subsets of EndpointSlices, LabelManagedBy has been added to indicate
// which controller or entity manages an EndpointSlice. As part of this
// v1.16->v1.17 change, EndpointSlices will initially be assumed to be
// managed by this controller unless a label is set to indicate otherwise.
// To ensure a seamless upgrade process, the managedBySetupAnnotation is
// used to indicate that LabelManagedBy has been set initially for related
// EndpointSlices. If it hasn't been set to the expected value here, we call
// ensureSetupManagedByAnnotation() to set up LabelManagedBy on each
// EndpointSlice.
// TODO(robscott): Remove this before v1.18.
err = c.ensureSetupManagedByAnnotation(service)
if err != nil {
c.eventRecorder.Eventf(service, v1.EventTypeWarning, "FailedToSetEndpointSliceManagedByLabel",
"Error adding managed-by Label to Endpoint Slices for Service %s/%s: %v", service.Namespace, service.Name, err)
return err
}
esLabelSelector := labels.Set(map[string]string{
discovery.LabelServiceName: service.Name,
discovery.LabelManagedBy: controllerName,
@ -372,49 +343,6 @@ func (c *Controller) onServiceDelete(obj interface{}) {
c.queue.Add(key)
}
// ensureSetupManagedByAnnotation selects all EndpointSlices for a Service and
// ensures they have LabelManagedBy set appropriately. This ensures that only
// one controller or entity is trying to manage a given EndpointSlice. This
// function provides backwards compatibility with the initial alpha release of
// EndpointSlices that did not include these labels.
// TODO(robscott): Remove this in time for v1.18.
func (c *Controller) ensureSetupManagedByAnnotation(service *v1.Service) error {
if managedBySetup, ok := service.Annotations[managedBySetupAnnotation]; ok && managedBySetup == managedBySetupCompleteValue {
return nil
}
esLabelSelector := labels.Set(map[string]string{discovery.LabelServiceName: service.Name}).AsSelectorPreValidated()
endpointSlices, err := c.endpointSliceLister.EndpointSlices(service.Namespace).List(esLabelSelector)
if err != nil {
c.eventRecorder.Eventf(service, v1.EventTypeWarning, "FailedToListEndpointSlices",
"Error listing Endpoint Slices for Service %s/%s: %v", service.Namespace, service.Name, err)
return err
}
for _, endpointSlice := range endpointSlices {
if _, ok := endpointSlice.Labels[discovery.LabelManagedBy]; !ok {
if endpointSlice.Labels == nil {
endpointSlice.Labels = make(map[string]string)
}
endpointSlice.Labels[discovery.LabelManagedBy] = controllerName
_, err = c.client.DiscoveryV1beta1().EndpointSlices(endpointSlice.Namespace).Update(endpointSlice)
if err != nil {
return err
}
}
}
if service.Annotations == nil {
service.Annotations = make(map[string]string)
}
service.Annotations[managedBySetupAnnotation] = managedBySetupCompleteValue
_, err = c.client.CoreV1().Services(service.Namespace).Update(service)
return err
}
func (c *Controller) addPod(obj interface{}) {
pod := obj.(*v1.Pod)
services, err := c.serviceSelectorCache.GetPodServiceMemberships(c.serviceLister, pod)

View File

@ -18,7 +18,6 @@ package endpointslice
import (
"fmt"
"reflect"
"testing"
"time"
@ -256,20 +255,13 @@ func TestSyncServiceEndpointSliceLabelSelection(t *testing.T) {
numActionsBefore := len(client.Actions()) + 1
standardSyncService(t, esController, ns, serviceName, "false")
if len(client.Actions()) != numActionsBefore+5 {
t.Errorf("Expected 5 more actions, got %d", len(client.Actions())-numActionsBefore)
if len(client.Actions()) != numActionsBefore+2 {
t.Errorf("Expected 2 more actions, got %d", len(client.Actions())-numActionsBefore)
}
// endpointslice should have LabelsManagedBy set as part of update.
// only 2 slices should match, 2 should be deleted, 1 should be updated as a placeholder
expectAction(t, client.Actions(), numActionsBefore, "update", "endpointslices")
// service should have managedBySetupAnnotation set as part of update.
expectAction(t, client.Actions(), numActionsBefore+1, "update", "services")
// only 3 slices should match, 2 of those should be deleted, 1 should be updated as a placeholder
expectAction(t, client.Actions(), numActionsBefore+2, "update", "endpointslices")
expectAction(t, client.Actions(), numActionsBefore+3, "delete", "endpointslices")
expectAction(t, client.Actions(), numActionsBefore+4, "delete", "endpointslices")
expectAction(t, client.Actions(), numActionsBefore+1, "delete", "endpointslices")
}
// Ensure SyncService handles a variety of protocols and IPs appropriately.
@ -345,106 +337,6 @@ func TestSyncServiceFull(t *testing.T) {
}}, slice.Endpoints)
}
func TestEnsureSetupManagedByAnnotation(t *testing.T) {
serviceName := "testing-1"
testCases := map[string]struct {
serviceAnnotation string
startingSliceLabels map[string]string
expectedSliceLabels map[string]string
}{
"already-labeled": {
serviceAnnotation: "foo",
startingSliceLabels: map[string]string{
discovery.LabelServiceName: serviceName,
discovery.LabelManagedBy: controllerName,
},
expectedSliceLabels: map[string]string{
discovery.LabelServiceName: serviceName,
discovery.LabelManagedBy: controllerName,
},
},
"already-annotated": {
serviceAnnotation: managedBySetupCompleteValue,
startingSliceLabels: map[string]string{
discovery.LabelServiceName: serviceName,
discovery.LabelManagedBy: "other-controller",
},
expectedSliceLabels: map[string]string{
discovery.LabelServiceName: serviceName,
discovery.LabelManagedBy: "other-controller",
},
},
"missing-and-extra-label": {
serviceAnnotation: "foo",
startingSliceLabels: map[string]string{
discovery.LabelServiceName: serviceName,
"foo": "bar",
},
expectedSliceLabels: map[string]string{
discovery.LabelServiceName: serviceName,
discovery.LabelManagedBy: controllerName,
"foo": "bar",
},
},
"different-service": {
serviceAnnotation: "foo",
startingSliceLabels: map[string]string{
discovery.LabelServiceName: "something-else",
},
expectedSliceLabels: map[string]string{
discovery.LabelServiceName: "something-else",
},
},
}
for name, testCase := range testCases {
t.Run(name, func(t *testing.T) {
client, esController := newController([]string{"node-1"})
ns := metav1.NamespaceDefault
service := createService(t, esController, ns, serviceName, testCase.serviceAnnotation)
endpointSlice := &discovery.EndpointSlice{
ObjectMeta: metav1.ObjectMeta{
Name: "testing",
Namespace: ns,
Labels: testCase.startingSliceLabels,
},
}
err := esController.endpointSliceStore.Add(endpointSlice)
if err != nil {
t.Fatalf("Expected no error adding EndpointSlice: %v", err)
}
_, err = client.DiscoveryV1beta1().EndpointSlices(ns).Create(endpointSlice)
if err != nil {
t.Fatalf("Expected no error creating EndpointSlice: %v", err)
}
esController.ensureSetupManagedByAnnotation(service)
updatedService, err := client.CoreV1().Services(ns).Get(service.Name, metav1.GetOptions{})
if err != nil {
t.Fatalf("Expected no error getting Service: %v", err)
}
if updatedService.Annotations[managedBySetupAnnotation] != managedBySetupCompleteValue {
t.Errorf("Expected managedBySetupAnnotation: %+v, got: %+v", managedBySetupCompleteValue, updatedService.Annotations[managedBySetupAnnotation])
}
updatedSlice, err := client.DiscoveryV1beta1().EndpointSlices(ns).Get(endpointSlice.Name, metav1.GetOptions{})
if err != nil {
t.Fatalf("Expected no error getting EndpointSlice: %v", err)
}
if !reflect.DeepEqual(updatedSlice.Labels, testCase.expectedSliceLabels) {
t.Errorf("Expected labels: %+v, got: %+v", updatedSlice.Labels, testCase.expectedSliceLabels)
}
})
}
}
// Test helpers
func standardSyncService(t *testing.T, esController *endpointSliceController, namespace, serviceName, managedBySetup string) {
@ -462,7 +354,6 @@ func createService(t *testing.T, esController *endpointSliceController, namespac
Name: serviceName,
Namespace: namespace,
CreationTimestamp: metav1.NewTime(time.Now()),
Annotations: map[string]string{managedBySetupAnnotation: managedBySetup},
},
Spec: v1.ServiceSpec{
Ports: []v1.ServicePort{{TargetPort: intstr.FromInt(80)}},

View File

@ -1017,13 +1017,13 @@ __EOF__
kubectl run testmetadata --image=nginx --replicas=2 --port=80 --expose --service-overrides='{ "metadata": { "annotations": { "zone-context": "home" } } } '
# Check result
kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" 'testmetadata:'
kube::test::get_object_assert 'service testmetadata' "{{.metadata.annotations}}" "map\[endpointslice.kubernetes.io/managed-by-setup:true zone-context:home\]"
kube::test::get_object_assert 'service testmetadata' "{{.metadata.annotations}}" "map\[zone-context:home\]"
### Expose deployment as a new service
# Command
kubectl expose deployment testmetadata --port=1000 --target-port=80 --type=NodePort --name=exposemetadata --overrides='{ "metadata": { "annotations": { "zone-context": "work" } } } '
# Check result
kube::test::get_object_assert 'service exposemetadata' "{{.metadata.annotations}}" "map\[endpointslice.kubernetes.io/managed-by-setup:true zone-context:work\]"
kube::test::get_object_assert 'service exposemetadata' "{{.metadata.annotations}}" "map\[zone-context:work\]"
# Clean-Up
# Command