From 61a5e7498d0c188c74f5133e511799e358739337 Mon Sep 17 00:00:00 2001 From: Tim Hockin Date: Fri, 27 Nov 2020 17:19:28 -0800 Subject: [PATCH] Svc REST: De-layer Delete Gut the "outer" Delete() and move it to the inner AfterDelete(). --- api/openapi-spec/swagger.json | 125 ++++++++++++- pkg/registry/core/rest/storage_core.go | 7 +- pkg/registry/core/service/storage/rest.go | 49 +---- .../core/service/storage/rest_test.go | 20 +- pkg/registry/core/service/storage/storage.go | 47 ++++- .../core/service/storage/storage_test.go | 177 +++++++++++++----- 6 files changed, 326 insertions(+), 99 deletions(-) diff --git a/api/openapi-spec/swagger.json b/api/openapi-spec/swagger.json index b53b14f1e15..112d2456bf7 100644 --- a/api/openapi-spec/swagger.json +++ b/api/openapi-spec/swagger.json @@ -25973,6 +25973,127 @@ } }, "/api/v1/namespaces/{namespace}/services": { + "delete": { + "consumes": [ + "*/*" + ], + "description": "delete collection of Service", + "operationId": "deleteCoreV1CollectionNamespacedService", + "parameters": [ + { + "in": "body", + "name": "body", + "schema": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptions" + } + }, + { + "description": "The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\".\n\nThis field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.", + "in": "query", + "name": "continue", + "type": "string", + "uniqueItems": true + }, + { + "description": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed", + "in": "query", + "name": "dryRun", + "type": "string", + "uniqueItems": true + }, + { + "description": "A selector to restrict the list of returned objects by their fields. Defaults to everything.", + "in": "query", + "name": "fieldSelector", + "type": "string", + "uniqueItems": true + }, + { + "description": "The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.", + "in": "query", + "name": "gracePeriodSeconds", + "type": "integer", + "uniqueItems": true + }, + { + "description": "A selector to restrict the list of returned objects by their labels. Defaults to everything.", + "in": "query", + "name": "labelSelector", + "type": "string", + "uniqueItems": true + }, + { + "description": "limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true.\n\nThe server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.", + "in": "query", + "name": "limit", + "type": "integer", + "uniqueItems": true + }, + { + "description": "Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.", + "in": "query", + "name": "orphanDependents", + "type": "boolean", + "uniqueItems": true + }, + { + "description": "Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.", + "in": "query", + "name": "propagationPolicy", + "type": "string", + "uniqueItems": true + }, + { + "description": "resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details.\n\nDefaults to unset", + "in": "query", + "name": "resourceVersion", + "type": "string", + "uniqueItems": true + }, + { + "description": "resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details.\n\nDefaults to unset", + "in": "query", + "name": "resourceVersionMatch", + "type": "string", + "uniqueItems": true + }, + { + "description": "Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.", + "in": "query", + "name": "timeoutSeconds", + "type": "integer", + "uniqueItems": true + } + ], + "produces": [ + "application/json", + "application/yaml", + "application/vnd.kubernetes.protobuf" + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status" + } + }, + "401": { + "description": "Unauthorized" + } + }, + "schemes": [ + "https" + ], + "tags": [ + "core_v1" + ], + "x-kubernetes-action": "deletecollection", + "x-kubernetes-group-version-kind": { + "group": "", + "kind": "Service", + "version": "v1" + } + }, "get": { "consumes": [ "*/*" @@ -26217,13 +26338,13 @@ "200": { "description": "OK", "schema": { - "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status" + "$ref": "#/definitions/io.k8s.api.core.v1.Service" } }, "202": { "description": "Accepted", "schema": { - "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status" + "$ref": "#/definitions/io.k8s.api.core.v1.Service" } }, "401": { diff --git a/pkg/registry/core/rest/storage_core.go b/pkg/registry/core/rest/storage_core.go index d2fa6a1d45f..61af3b7c0e8 100644 --- a/pkg/registry/core/rest/storage_core.go +++ b/pkg/registry/core/rest/storage_core.go @@ -261,7 +261,12 @@ func (c LegacyRESTStorageProvider) NewLegacyRESTStorage(restOptionsGetter generi serviceIPAllocators[secondaryServiceClusterIPAllocator.IPFamily()] = secondaryServiceClusterIPAllocator } - serviceRESTStorage, serviceStatusStorage, err := servicestore.NewGenericREST(restOptionsGetter, serviceClusterIPAllocator.IPFamily(), serviceIPAllocators, serviceNodePortAllocator) + serviceRESTStorage, serviceStatusStorage, err := servicestore.NewGenericREST( + restOptionsGetter, + serviceClusterIPAllocator.IPFamily(), + serviceIPAllocators, + serviceNodePortAllocator, + endpointsStorage) if err != nil { return LegacyRESTStorage{}, genericapiserver.APIGroupInfo{}, err } diff --git a/pkg/registry/core/service/storage/rest.go b/pkg/registry/core/service/storage/rest.go index 1f3d07dd805..06304661746 100644 --- a/pkg/registry/core/service/storage/rest.go +++ b/pkg/registry/core/service/storage/rest.go @@ -87,11 +87,6 @@ type ServiceStorage interface { rest.ResetFieldsStrategy } -type EndpointsStorage interface { - rest.Getter - rest.GracefulDeleter -} - // NewREST returns a wrapper around the underlying generic storage and performs // allocations and deallocations of various service related resources like ports. // TODO: all transactional behavior should be supported from within generic storage @@ -212,49 +207,7 @@ func (al *RESTAllocStuff) allocateCreate(service *api.Service, dryRun bool) (tra } func (rs *REST) Delete(ctx context.Context, id string, deleteValidation rest.ValidateObjectFunc, options *metav1.DeleteOptions) (runtime.Object, bool, error) { - // TODO: handle graceful - obj, _, err := rs.services.Delete(ctx, id, deleteValidation, options) - if err != nil { - return nil, false, err - } - - svc := obj.(*api.Service) - // (khenidak) double check that this is in fact the best place for this - - // delete strategy handles graceful delete only. It expects strategy - // to implement Graceful-Delete related interface. Hence we are not doing - // the below there. instead we are doing it locally. Until strategy.BeforeDelete works without - // having to implement graceful delete management - // set ClusterIPs based on ClusterIP - // because we depend on ClusterIPs and data might be saved without ClusterIPs .. - - if svc.Spec.ClusterIPs == nil && len(svc.Spec.ClusterIP) > 0 { - svc.Spec.ClusterIPs = []string{svc.Spec.ClusterIP} - } - - // Only perform the cleanup if this is a non-dryrun deletion - if !dryrun.IsDryRun(options.DryRun) { - // TODO: can leave dangling endpoints, and potentially return incorrect - // endpoints if a new service is created with the same name - _, _, err = rs.endpoints.Delete(ctx, id, rest.ValidateAllObjectFunc, &metav1.DeleteOptions{}) - if err != nil && !errors.IsNotFound(err) { - return nil, false, err - } - - rs.alloc.releaseAllocatedResources(svc) - } - - // TODO: this is duplicated from the generic storage, when this wrapper is fully removed we can drop this - details := &metav1.StatusDetails{ - Name: svc.Name, - UID: svc.UID, - } - if info, ok := genericapirequest.RequestInfoFrom(ctx); ok { - details.Group = info.APIGroup - details.Kind = info.Resource // legacy behavior - } - status := &metav1.Status{Status: metav1.StatusSuccess, Details: details} - return status, true, nil + return rs.services.Delete(ctx, id, deleteValidation, options) } func (al *RESTAllocStuff) releaseAllocatedResources(svc *api.Service) { diff --git a/pkg/registry/core/service/storage/rest_test.go b/pkg/registry/core/service/storage/rest_test.go index 8bd27fe8144..0e805bc3df2 100644 --- a/pkg/registry/core/service/storage/rest_test.go +++ b/pkg/registry/core/service/storage/rest_test.go @@ -144,9 +144,17 @@ func (s *serviceStorage) Update(ctx context.Context, name string, objInfo rest.U } func (s *serviceStorage) Delete(ctx context.Context, name string, deleteValidation rest.ValidateObjectFunc, options *metav1.DeleteOptions) (runtime.Object, bool, error) { - ret := s.Services[name] + ret, del, err := s.inner.Delete(ctx, name, deleteValidation, options) + if err != nil { + return ret, del, err + } + + if dryrun.IsDryRun(options.DryRun) { + return ret.DeepCopyObject(), del, nil + } delete(s.Services, name) - return ret, false, nil + + return ret.DeepCopyObject(), del, err } func (s *serviceStorage) DeleteCollection(ctx context.Context, deleteValidation rest.ValidateObjectFunc, options *metav1.DeleteOptions, listOptions *metainternalversion.ListOptions) (runtime.Object, error) { @@ -264,7 +272,13 @@ func newInnerREST(t *testing.T, etcdStorage *storagebackend.ConfigForResource, i DeleteCollectionWorkers: 1, ResourcePrefix: "services", } - inner, _, err := NewGenericREST(restOptions, api.IPv4Protocol, ipAllocs, portAlloc) + endpoints, err := endpointstore.NewREST(generic.RESTOptions{ + StorageConfig: etcdStorage, + Decorator: generic.UndecoratedStorage, + ResourcePrefix: "endpoints", + }) + + inner, _, err := NewGenericREST(restOptions, api.IPv4Protocol, ipAllocs, portAlloc, endpoints) if err != nil { t.Fatalf("unexpected error from REST storage: %v", err) } diff --git a/pkg/registry/core/service/storage/storage.go b/pkg/registry/core/service/storage/storage.go index 6b0e1a81309..6e18ad27d97 100644 --- a/pkg/registry/core/service/storage/storage.go +++ b/pkg/registry/core/service/storage/storage.go @@ -21,11 +21,14 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" + genericapirequest "k8s.io/apiserver/pkg/endpoints/request" "k8s.io/apiserver/pkg/registry/generic" genericregistry "k8s.io/apiserver/pkg/registry/generic/registry" "k8s.io/apiserver/pkg/registry/rest" "k8s.io/apiserver/pkg/util/dryrun" utilfeature "k8s.io/apiserver/pkg/util/feature" + "k8s.io/cri-api/pkg/errors" + "k8s.io/klog/v2" api "k8s.io/kubernetes/pkg/apis/core" "k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/pkg/printers" @@ -40,11 +43,17 @@ import ( netutil "k8s.io/utils/net" ) +type EndpointsStorage interface { + rest.Getter + rest.GracefulDeleter +} + type GenericREST struct { *genericregistry.Store primaryIPFamily api.IPFamily secondaryIPFamily api.IPFamily alloc RESTAllocStuff + endpoints EndpointsStorage } // NewGenericREST returns a RESTStorage object that will work against services. @@ -52,7 +61,8 @@ func NewGenericREST( optsGetter generic.RESTOptionsGetter, serviceIPFamily api.IPFamily, ipAllocs map[api.IPFamily]ipallocator.Interface, - portAlloc portallocator.Interface) (*GenericREST, *StatusREST, error) { + portAlloc portallocator.Interface, + endpoints EndpointsStorage) (*GenericREST, *StatusREST, error) { strategy, _ := svcreg.StrategyForServiceCIDRs(ipAllocs[serviceIPFamily].CIDR(), len(ipAllocs) > 1) @@ -84,8 +94,15 @@ func NewGenericREST( if len(ipAllocs) > 1 { secondaryIPFamily = otherFamily(serviceIPFamily) } - genericStore := &GenericREST{store, primaryIPFamily, secondaryIPFamily, makeAlloc(serviceIPFamily, ipAllocs, portAlloc)} + genericStore := &GenericREST{ + Store: store, + primaryIPFamily: primaryIPFamily, + secondaryIPFamily: secondaryIPFamily, + alloc: makeAlloc(serviceIPFamily, ipAllocs, portAlloc), + endpoints: endpoints, + } store.Decorator = genericStore.defaultOnRead + store.AfterDelete = genericStore.afterDelete store.BeginCreate = genericStore.beginCreate store.BeginUpdate = genericStore.beginUpdate @@ -249,6 +266,32 @@ func (r *GenericREST) defaultOnReadService(service *api.Service) { } } +func (r *GenericREST) afterDelete(obj runtime.Object, options *metav1.DeleteOptions) { + svc := obj.(*api.Service) + + // Normally this defaulting is done automatically, but the hook (Decorator) + // is called at the end of this process, and we want the fully-formed + // object. + r.defaultOnReadService(svc) + + // Only perform the cleanup if this is a non-dryrun deletion + if !dryrun.IsDryRun(options.DryRun) { + // It would be better if we had the caller context, but that changes + // this hook signature. + ctx := genericapirequest.WithNamespace(genericapirequest.NewContext(), svc.Namespace) + // TODO: This is clumsy. It was added for fear that the endpoints + // controller might lag, and we could end up rusing the service name + // with old endpoints. We should solve that better and remove this, or + // else we should do this for EndpointSlice, too. + _, _, err := r.endpoints.Delete(ctx, svc.Name, rest.ValidateAllObjectFunc, &metav1.DeleteOptions{}) + if err != nil && !errors.IsNotFound(err) { + klog.Errorf("delete service endpoints %s/%s failed: %v", svc.Name, svc.Namespace, err) + } + + r.alloc.releaseAllocatedResources(svc) + } +} + func (r *GenericREST) beginCreate(ctx context.Context, obj runtime.Object, options *metav1.CreateOptions) (genericregistry.FinishFunc, error) { svc := obj.(*api.Service) diff --git a/pkg/registry/core/service/storage/storage_test.go b/pkg/registry/core/service/storage/storage_test.go index 0d1ce05e1c3..88167e379b0 100644 --- a/pkg/registry/core/service/storage/storage_test.go +++ b/pkg/registry/core/service/storage/storage_test.go @@ -17,6 +17,7 @@ limitations under the License. package storage import ( + "context" "fmt" "net" "reflect" @@ -61,6 +62,16 @@ func makePortAllocator(ports machineryutilnet.PortRange) portallocator.Interface return al } +type fakeEndpoints struct{} + +func (fakeEndpoints) Delete(_ context.Context, _ string, _ rest.ValidateObjectFunc, _ *metav1.DeleteOptions) (runtime.Object, bool, error) { + return nil, false, nil +} + +func (fakeEndpoints) Get(_ context.Context, _ string, _ *metav1.GetOptions) (runtime.Object, error) { + return nil, nil +} + func ipIsAllocated(t *testing.T, alloc ipallocator.Interface, ipstr string) bool { t.Helper() ip := netutils.ParseIPSloppy(ipstr) @@ -105,7 +116,7 @@ func newStorage(t *testing.T, ipFamilies []api.IPFamily) (*GenericREST, *StatusR portAlloc := makePortAllocator(*(machineryutilnet.ParsePortRangeOrDie("30000-32767"))) - serviceStorage, statusStorage, err := NewGenericREST(restOptions, ipFamilies[0], ipAllocs, portAlloc) + serviceStorage, statusStorage, err := NewGenericREST(restOptions, ipFamilies[0], ipAllocs, portAlloc, fakeEndpoints{}) if err != nil { t.Fatalf("unexpected error from REST storage: %v", err) } @@ -640,15 +651,6 @@ func TestCreateIgnoresIPFamilyWithoutDualStack(t *testing.T) { } defer storage.Delete(ctx, tc.svc.Name, rest.ValidateAllObjectFunc, &metav1.DeleteOptions{}) createdSvc := createdObj.(*api.Service) - //FIXME: HACK!! Delete above calls "inner" which doesn't - // yet call the allocators - no release = alloc errors! - defer func() { - for _, al := range storage.alloc.serviceIPAllocatorsByFamily { - for _, ip := range createdSvc.Spec.ClusterIPs { - al.Release(netutils.ParseIPSloppy(ip)) - } - } - }() // The gate is off - these should always be empty. if want, got := fmtIPFamilyPolicy(nil), fmtIPFamilyPolicy(createdSvc.Spec.IPFamilyPolicy); want != got { @@ -4697,15 +4699,6 @@ func TestCreateInitIPFields(t *testing.T) { t.Fatalf("unexpected success creating service") } createdSvc := createdObj.(*api.Service) - //FIXME: HACK!! Delete above calls "inner" which doesn't - // yet call the allocators - no release = alloc errors! - defer func() { - for _, al := range storage.alloc.serviceIPAllocatorsByFamily { - for _, ip := range createdSvc.Spec.ClusterIPs { - al.Release(netutils.ParseIPSloppy(ip)) - } - } - }() if want, got := fmtIPFamilyPolicy(&itc.expectPolicy), fmtIPFamilyPolicy(createdSvc.Spec.IPFamilyPolicy); want != got { t.Errorf("wrong IPFamilyPolicy: want %s, got %s", want, got) @@ -4736,7 +4729,7 @@ func TestCreateInitIPFields(t *testing.T) { } } -func TestCreateReallocation(t *testing.T) { +func TestCreateDeleteReuse(t *testing.T) { testCases := []struct { name string svc *api.Service @@ -4761,37 +4754,69 @@ func TestCreateReallocation(t *testing.T) { defer storage.Store.DestroyFunc() ctx := genericapirequest.NewDefaultContext() + + // Create it createdObj, err := storage.Create(ctx, tc.svc, rest.ValidateAllObjectFunc, &metav1.CreateOptions{}) if err != nil { t.Fatalf("unexpected error creating service: %v", err) } createdSvc := createdObj.(*api.Service) + // Ensure IPs and ports were allocated + for i, fam := range createdSvc.Spec.IPFamilies { + if !ipIsAllocated(t, storage.alloc.serviceIPAllocatorsByFamily[fam], createdSvc.Spec.ClusterIPs[i]) { + t.Errorf("expected IP to be allocated: %v", createdSvc.Spec.ClusterIPs[i]) + } + } + for _, p := range createdSvc.Spec.Ports { + if !portIsAllocated(t, storage.alloc.serviceNodePorts, p.NodePort) { + t.Errorf("expected port to be allocated: %v", p.NodePort) + } + } + + // Delete it _, _, err = storage.Delete(ctx, tc.svc.Name, rest.ValidateAllObjectFunc, &metav1.DeleteOptions{}) if err != nil { t.Fatalf("unexpected error creating service: %v", err) } - //FIXME: HACK!! Delete above calls "inner" which doesn't - // yet call the allocators - no release = test errors! - for _, al := range storage.alloc.serviceIPAllocatorsByFamily { - for _, ip := range createdSvc.Spec.ClusterIPs { - al.Release(netutils.ParseIPSloppy(ip)) + + // Ensure IPs and ports were deallocated + for i, fam := range createdSvc.Spec.IPFamilies { + if ipIsAllocated(t, storage.alloc.serviceIPAllocatorsByFamily[fam], createdSvc.Spec.ClusterIPs[i]) { + t.Errorf("expected IP to not be allocated: %v", createdSvc.Spec.ClusterIPs[i]) } } for _, p := range createdSvc.Spec.Ports { - storage.alloc.serviceNodePorts.Release(int(p.NodePort)) + if portIsAllocated(t, storage.alloc.serviceNodePorts, p.NodePort) { + t.Errorf("expected port to not be allocated: %v", p.NodePort) + } } // Force the same IPs and ports svc2 := tc.svc.DeepCopy() + svc2.Name += "2" svc2.Spec.ClusterIP = createdSvc.Spec.ClusterIP svc2.Spec.ClusterIPs = createdSvc.Spec.ClusterIPs svc2.Spec.Ports = createdSvc.Spec.Ports + // Create again _, err = storage.Create(ctx, svc2, rest.ValidateAllObjectFunc, &metav1.CreateOptions{}) if err != nil { t.Fatalf("unexpected error creating service: %v", err) } + + // Ensure IPs and ports were allocated + for i, fam := range createdSvc.Spec.IPFamilies { + if !ipIsAllocated(t, storage.alloc.serviceIPAllocatorsByFamily[fam], createdSvc.Spec.ClusterIPs[i]) { + t.Errorf("expected IP to be allocated: %v", createdSvc.Spec.ClusterIPs[i]) + } + } + for _, p := range createdSvc.Spec.Ports { + if !portIsAllocated(t, storage.alloc.serviceNodePorts, p.NodePort) { + t.Errorf("expected port to be allocated: %v", p.NodePort) + } + } + }) } } @@ -5120,18 +5145,6 @@ func TestCreateInitNodePorts(t *testing.T) { t.Fatalf("unexpected success creating service") } createdSvc := createdObj.(*api.Service) - //FIXME: HACK!! Delete above calls "inner" which doesn't - // yet call the allocators - no release = alloc errors! - defer func() { - for _, al := range storage.alloc.serviceIPAllocatorsByFamily { - for _, ip := range createdSvc.Spec.ClusterIPs { - al.Release(netutils.ParseIPSloppy(ip)) - } - } - for _, p := range createdSvc.Spec.Ports { - storage.alloc.serviceNodePorts.Release(int(p.NodePort)) - } - }() // Produce a map of port index to nodeport value, excluding zero. ports := map[int]*api.ServicePort{} @@ -5550,23 +5563,23 @@ func TestCreateDryRun(t *testing.T) { // Ensure IPs were allocated if netutils.ParseIPSloppy(createdSvc.Spec.ClusterIP) == nil { - t.Errorf("expected valid clusterIP: %v", createdSvc.Spec.ClusterIP) + t.Errorf("expected valid clusterIP: %q", createdSvc.Spec.ClusterIP) } // Ensure the IP allocators are clean. if !tc.enableDualStack { if ipIsAllocated(t, storage.alloc.serviceIPAllocatorsByFamily[api.IPv4Protocol], createdSvc.Spec.ClusterIP) { - t.Errorf("expected IP to not be allocated: %v", createdSvc.Spec.ClusterIP) + t.Errorf("expected IP to not be allocated: %q", createdSvc.Spec.ClusterIP) } } else { for _, ip := range createdSvc.Spec.ClusterIPs { if netutils.ParseIPSloppy(ip) == nil { - t.Errorf("expected valid clusterIP: %v", createdSvc.Spec.ClusterIP) + t.Errorf("expected valid clusterIP: %q", createdSvc.Spec.ClusterIP) } } for i, fam := range createdSvc.Spec.IPFamilies { if ipIsAllocated(t, storage.alloc.serviceIPAllocatorsByFamily[fam], createdSvc.Spec.ClusterIPs[i]) { - t.Errorf("expected IP to not be allocated: %v", createdSvc.Spec.ClusterIPs[i]) + t.Errorf("expected IP to not be allocated: %q", createdSvc.Spec.ClusterIPs[i]) } } } @@ -5574,10 +5587,88 @@ func TestCreateDryRun(t *testing.T) { if tc.svc.Spec.Type != api.ServiceTypeClusterIP { for _, p := range createdSvc.Spec.Ports { if portIsAllocated(t, storage.alloc.serviceNodePorts, p.NodePort) { - t.Errorf("expected port to not be allocated: %v", p.NodePort) + t.Errorf("expected port to not be allocated: %d", p.NodePort) } } } }) } } + +// Prove that a dry-run delete doesn't actually deallocate IPs or ports. +func TestDeleteDryRun(t *testing.T) { + testCases := []struct { + name string + enableDualStack bool + svc *api.Service + }{{ + name: "gate:off", + enableDualStack: false, + svc: svctest.MakeService("foo", + svctest.SetTypeLoadBalancer, + svctest.SetExternalTrafficPolicy(api.ServiceExternalTrafficPolicyTypeLocal)), + }, { + name: "gate:on", + enableDualStack: true, + svc: svctest.MakeService("foo", + svctest.SetTypeLoadBalancer, + svctest.SetExternalTrafficPolicy(api.ServiceExternalTrafficPolicyTypeLocal)), + }} + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.IPv6DualStack, tc.enableDualStack)() + + families := []api.IPFamily{api.IPv4Protocol} + if tc.enableDualStack { + families = append(families, api.IPv6Protocol) + } + + storage, _, server := newStorage(t, families) + defer server.Terminate(t) + defer storage.Store.DestroyFunc() + + ctx := genericapirequest.NewDefaultContext() + createdObj, err := storage.Create(ctx, tc.svc, rest.ValidateAllObjectFunc, &metav1.CreateOptions{}) + if err != nil { + t.Fatalf("unexpected error creating service: %v", err) + } + createdSvc := createdObj.(*api.Service) + + // Ensure IPs and ports were allocated + for i, fam := range createdSvc.Spec.IPFamilies { + if !ipIsAllocated(t, storage.alloc.serviceIPAllocatorsByFamily[fam], createdSvc.Spec.ClusterIPs[i]) { + t.Errorf("expected IP to be allocated: %q", createdSvc.Spec.ClusterIPs[i]) + } + } + for _, p := range createdSvc.Spec.Ports { + if !portIsAllocated(t, storage.alloc.serviceNodePorts, p.NodePort) { + t.Errorf("expected port to be allocated: %d", p.NodePort) + } + } + if !portIsAllocated(t, storage.alloc.serviceNodePorts, createdSvc.Spec.HealthCheckNodePort) { + t.Errorf("expected port to be allocated: %d", createdSvc.Spec.HealthCheckNodePort) + } + + _, _, err = storage.Delete(ctx, tc.svc.Name, rest.ValidateAllObjectFunc, &metav1.DeleteOptions{DryRun: []string{metav1.DryRunAll}}) + if err != nil { + t.Fatalf("unexpected error deleting service: %v", err) + } + + // Ensure they are still allocated. + for i, fam := range createdSvc.Spec.IPFamilies { + if !ipIsAllocated(t, storage.alloc.serviceIPAllocatorsByFamily[fam], createdSvc.Spec.ClusterIPs[i]) { + t.Errorf("expected IP to still be allocated: %q", createdSvc.Spec.ClusterIPs[i]) + } + } + for _, p := range createdSvc.Spec.Ports { + if !portIsAllocated(t, storage.alloc.serviceNodePorts, p.NodePort) { + t.Errorf("expected port to still be allocated: %d", p.NodePort) + } + } + if !portIsAllocated(t, storage.alloc.serviceNodePorts, createdSvc.Spec.HealthCheckNodePort) { + t.Errorf("expected port to still be allocated: %d", createdSvc.Spec.HealthCheckNodePort) + } + }) + } +}