mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-08-14 14:23:37 +00:00
sync Service API status rest storage
The Service API Rest implementation is complex and has to use different hooks on the REST storage. The status store was making a shallow copy of the storage before adding the hooks, so it was not inheriting the hooks. The status store must have the same hooks as the rest store to be able to handle correctly the allocation and deallocation of ClusterIPs and nodePorts. Change-Id: I44be21468d36017f0ec41a8f912b8490f8f13f55 Signed-off-by: Antonio Ojea <aojea@google.com>
This commit is contained in:
parent
d147da0304
commit
21e26486ac
@ -127,6 +127,11 @@ func NewREST(
|
|||||||
store.BeginCreate = genericStore.beginCreate
|
store.BeginCreate = genericStore.beginCreate
|
||||||
store.BeginUpdate = genericStore.beginUpdate
|
store.BeginUpdate = genericStore.beginUpdate
|
||||||
|
|
||||||
|
// users can patch the status to remove the finalizer,
|
||||||
|
// hence statusStore must participate on the AfterDelete
|
||||||
|
// hook to release the allocated resources
|
||||||
|
statusStore.AfterDelete = genericStore.afterDelete
|
||||||
|
|
||||||
return genericStore, &StatusREST{store: &statusStore}, &svcreg.ProxyREST{Redirector: genericStore, ProxyTransport: proxyTransport}, nil
|
return genericStore, &StatusREST{store: &statusStore}, &svcreg.ProxyREST{Redirector: genericStore, ProxyTransport: proxyTransport}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -18,6 +18,8 @@ package network
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
@ -25,6 +27,7 @@ import (
|
|||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/types"
|
"k8s.io/apimachinery/pkg/types"
|
||||||
"k8s.io/apimachinery/pkg/util/intstr"
|
"k8s.io/apimachinery/pkg/util/intstr"
|
||||||
|
"k8s.io/apimachinery/pkg/util/strategicpatch"
|
||||||
"k8s.io/apimachinery/pkg/util/wait"
|
"k8s.io/apimachinery/pkg/util/wait"
|
||||||
"k8s.io/kubernetes/cmd/kube-apiserver/app/options"
|
"k8s.io/kubernetes/cmd/kube-apiserver/app/options"
|
||||||
"k8s.io/kubernetes/pkg/controlplane"
|
"k8s.io/kubernetes/pkg/controlplane"
|
||||||
@ -134,6 +137,113 @@ func TestServicesFinalizersRepairLoop(t *testing.T) {
|
|||||||
t.Logf("Created service: %s", svcNodePort.Name)
|
t.Logf("Created service: %s", svcNodePort.Name)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestServicesFinalizersPatchStatus(t *testing.T) {
|
||||||
|
serviceCIDR := "10.0.0.0/16"
|
||||||
|
clusterIP := "10.0.0.21"
|
||||||
|
nodePort := 30443
|
||||||
|
_, ctx := ktesting.NewTestContext(t)
|
||||||
|
ctx, cancel := context.WithCancel(ctx)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
client, _, tearDownFn := framework.StartTestServer(ctx, t, framework.TestServerSetup{
|
||||||
|
ModifyServerRunOptions: func(opts *options.ServerRunOptions) {
|
||||||
|
opts.ServiceClusterIPRanges = serviceCIDR
|
||||||
|
},
|
||||||
|
})
|
||||||
|
defer tearDownFn()
|
||||||
|
|
||||||
|
for _, testcase := range []string{"spec", "status"} {
|
||||||
|
t.Run(testcase, func(t *testing.T) {
|
||||||
|
// Create a NodePort service with one finalizer
|
||||||
|
svcNodePort := v1.Service{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Name: "svc" + testcase,
|
||||||
|
Finalizers: []string{"foo.bar/some-finalizer"},
|
||||||
|
},
|
||||||
|
Spec: v1.ServiceSpec{
|
||||||
|
ClusterIP: clusterIP,
|
||||||
|
Ports: []v1.ServicePort{{
|
||||||
|
Port: 8443,
|
||||||
|
NodePort: int32(nodePort),
|
||||||
|
TargetPort: intstr.FromInt32(8443),
|
||||||
|
Protocol: v1.ProtocolTCP,
|
||||||
|
}},
|
||||||
|
Type: v1.ServiceTypeNodePort,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
ns := framework.CreateNamespaceOrDie(client, "test-service-finalizers-"+testcase, t)
|
||||||
|
defer framework.DeleteNamespaceOrDie(client, ns, t)
|
||||||
|
|
||||||
|
// Create service
|
||||||
|
if _, err := client.CoreV1().Services(ns.Name).Create(ctx, &svcNodePort, metav1.CreateOptions{}); err != nil {
|
||||||
|
t.Fatalf("unexpected error creating service: %v", err)
|
||||||
|
}
|
||||||
|
t.Logf("Created service: %s", svcNodePort.Name)
|
||||||
|
|
||||||
|
// Check the service has been created correctly
|
||||||
|
svc, err := client.CoreV1().Services(ns.Name).Get(ctx, svcNodePort.Name, metav1.GetOptions{})
|
||||||
|
if err != nil || svc.Spec.ClusterIP != clusterIP {
|
||||||
|
t.Fatalf("created service is not correct: %v", err)
|
||||||
|
}
|
||||||
|
t.Logf("Service created successfully: %+v", svc)
|
||||||
|
|
||||||
|
// Delete service
|
||||||
|
if err := client.CoreV1().Services(ns.Name).Delete(ctx, svcNodePort.Name, metav1.DeleteOptions{}); err != nil {
|
||||||
|
t.Fatalf("unexpected error deleting service: %v", err)
|
||||||
|
}
|
||||||
|
t.Logf("Deleted service: %s", svcNodePort.Name)
|
||||||
|
|
||||||
|
// Check that the service was not deleted and the IP is already allocated
|
||||||
|
svc, err = client.CoreV1().Services(ns.Name).Get(ctx, svcNodePort.Name, metav1.GetOptions{})
|
||||||
|
if err != nil ||
|
||||||
|
svc.Spec.ClusterIP != clusterIP ||
|
||||||
|
int(svc.Spec.Ports[0].NodePort) != nodePort ||
|
||||||
|
svc.DeletionTimestamp == nil ||
|
||||||
|
len(svc.ObjectMeta.Finalizers) != 1 {
|
||||||
|
t.Fatalf("Service expected to be deleting and with the same values: %v", err)
|
||||||
|
}
|
||||||
|
t.Logf("Service after Delete: %+v", svc)
|
||||||
|
|
||||||
|
// Remove the finalizer
|
||||||
|
updated := svc.DeepCopy()
|
||||||
|
updated.ObjectMeta.Finalizers = []string{}
|
||||||
|
patchBytes, err := getPatchBytes(svc, updated)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("unexpected error getting patch bytes: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if testcase == "spec" {
|
||||||
|
if _, err = client.CoreV1().Services(ns.Name).Patch(ctx, svcNodePort.Name, types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{}); err != nil {
|
||||||
|
t.Fatalf("unexpected error removing finalizer: %v", err)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if _, err = client.CoreV1().Services(ns.Name).Patch(ctx, svcNodePort.Name, types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{}, "status"); err != nil {
|
||||||
|
t.Fatalf("unexpected error removing finalizer: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
t.Logf("Removed service finalizer: %s", svcNodePort.Name)
|
||||||
|
|
||||||
|
// Check that the service was deleted
|
||||||
|
_, err = client.CoreV1().Services(ns.Name).Get(ctx, svcNodePort.Name, metav1.GetOptions{})
|
||||||
|
if err == nil {
|
||||||
|
t.Fatalf("service was not delete: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Try to create service again without the finalizer to check the ClusterIP and NodePort are deallocated
|
||||||
|
svc = svcNodePort.DeepCopy()
|
||||||
|
svc.Finalizers = []string{}
|
||||||
|
if _, err := client.CoreV1().Services(ns.Name).Create(ctx, svc, metav1.CreateOptions{}); err != nil {
|
||||||
|
t.Fatalf("unexpected error creating service: %v", err)
|
||||||
|
}
|
||||||
|
// Delete service
|
||||||
|
if err := client.CoreV1().Services(ns.Name).Delete(ctx, svc.Name, metav1.DeleteOptions{}); err != nil {
|
||||||
|
t.Fatalf("unexpected error deleting service: %v", err)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Regresion test for https://issues.k8s.io/115316
|
// Regresion test for https://issues.k8s.io/115316
|
||||||
func TestServiceCIDR28bits(t *testing.T) {
|
func TestServiceCIDR28bits(t *testing.T) {
|
||||||
serviceCIDR := "10.0.0.0/28"
|
serviceCIDR := "10.0.0.0/28"
|
||||||
@ -183,3 +293,22 @@ func TestServiceCIDR28bits(t *testing.T) {
|
|||||||
t.Fatalf("Error creating test service: %v", err)
|
t.Fatalf("Error creating test service: %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func getPatchBytes(oldSvc, newSvc *v1.Service) ([]byte, error) {
|
||||||
|
oldData, err := json.Marshal(oldSvc)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to Marshal oldData for svc %s/%s: %v", oldSvc.Namespace, oldSvc.Name, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
newData, err := json.Marshal(newSvc)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to Marshal newData for svc %s/%s: %v", newSvc.Namespace, newSvc.Name, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
patchBytes, err := strategicpatch.CreateTwoWayMergePatch(oldData, newData, v1.Service{})
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to CreateTwoWayMergePatch for svc %s/%s: %v", oldSvc.Namespace, oldSvc.Name, err)
|
||||||
|
}
|
||||||
|
return patchBytes, nil
|
||||||
|
|
||||||
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user