modify tests to use the networking v1beta1 API

This commit is contained in:
Antonio Ojea 2024-06-29 10:06:38 +00:00
parent dbfdd074d0
commit ed597316d6
8 changed files with 84 additions and 81 deletions

View File

@ -21,7 +21,7 @@ import (
"github.com/onsi/ginkgo/v2"
v1 "k8s.io/api/core/v1"
networkingv1alpha1 "k8s.io/api/networking/v1alpha1"
networkingv1beta1 "k8s.io/api/networking/v1beta1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/apimachinery/pkg/util/wait"
@ -62,18 +62,18 @@ var _ = common.SIGDescribe(feature.ServiceCIDRs, func() {
ginkgo.It("should create Services and servce on different Service CIDRs", func(ctx context.Context) {
// create a new service CIDR
svcCIDR := &networkingv1alpha1.ServiceCIDR{
svcCIDR := &networkingv1beta1.ServiceCIDR{
ObjectMeta: metav1.ObjectMeta{
Name: "test-svc-cidr",
},
Spec: networkingv1alpha1.ServiceCIDRSpec{
Spec: networkingv1beta1.ServiceCIDRSpec{
CIDRs: []string{"10.196.196.0/24"},
},
}
_, err := cs.NetworkingV1alpha1().ServiceCIDRs().Create(context.TODO(), svcCIDR, metav1.CreateOptions{})
_, err := cs.NetworkingV1beta1().ServiceCIDRs().Create(context.TODO(), svcCIDR, metav1.CreateOptions{})
framework.ExpectNoError(err, "error creating ServiceCIDR")
if pollErr := wait.PollUntilContextTimeout(ctx, framework.Poll, e2eservice.RespondingTimeout, false, func(ctx context.Context) (bool, error) {
svcCIDR, err := cs.NetworkingV1alpha1().ServiceCIDRs().Get(ctx, svcCIDR.Name, metav1.GetOptions{})
svcCIDR, err := cs.NetworkingV1beta1().ServiceCIDRs().Get(ctx, svcCIDR.Name, metav1.GetOptions{})
if err != nil {
return false, nil
}
@ -103,13 +103,13 @@ var _ = common.SIGDescribe(feature.ServiceCIDRs, func() {
})
func isReady(serviceCIDR *networkingv1alpha1.ServiceCIDR) bool {
func isReady(serviceCIDR *networkingv1beta1.ServiceCIDR) bool {
if serviceCIDR == nil {
return false
}
for _, condition := range serviceCIDR.Status.Conditions {
if condition.Type == string(networkingv1alpha1.ServiceCIDRConditionReady) {
if condition.Type == string(networkingv1beta1.ServiceCIDRConditionReady) {
return condition.Status == metav1.ConditionStatus(metav1.ConditionTrue)
}
}

View File

@ -67,6 +67,7 @@ var resetFieldsStatusData = map[schema.GroupVersionResource]string{
gvr("admissionregistration.k8s.io", "v1beta1", "validatingadmissionpolicies"): `{"status": {"conditions":[{"type":"Accepted","status":"True","lastTransitionTime":"2020-01-01T00:00:00Z","reason":"RuleApplied","message":"Rule was applied"}]}}`,
gvr("admissionregistration.k8s.io", "v1", "validatingadmissionpolicies"): `{"status": {"conditions":[{"type":"Accepted","status":"True","lastTransitionTime":"2020-01-01T00:00:00Z","reason":"RuleApplied","message":"Rule was applied"}]}}`,
gvr("networking.k8s.io", "v1alpha1", "servicecidrs"): `{"status": {"conditions":[{"type":"Accepted","status":"True","lastTransitionTime":"2020-01-01T00:00:00Z","reason":"RuleApplied","message":"Rule was applied"}]}}`,
gvr("networking.k8s.io", "v1beta1", "servicecidrs"): `{"status": {"conditions":[{"type":"Accepted","status":"True","lastTransitionTime":"2020-01-01T00:00:00Z","reason":"RuleApplied","message":"Rule was applied"}]}}`,
}
// resetFieldsStatusDefault conflicts with statusDefault
@ -140,6 +141,7 @@ var resetFieldsSpecData = map[schema.GroupVersionResource]string{
gvr("networking.k8s.io", "v1beta1", "ingresses"): `{"spec": {"backend": {"serviceName": "service2"}}}`,
gvr("networking.k8s.io", "v1", "ingresses"): `{"spec": {"defaultBackend": {"service": {"name": "service2"}}}}`,
gvr("networking.k8s.io", "v1alpha1", "servicecidrs"): `{}`,
gvr("networking.k8s.io", "v1beta1", "servicecidrs"): `{}`,
gvr("policy", "v1", "poddisruptionbudgets"): `{"spec": {"selector": {"matchLabels": {"anokkey2": "anokvalue"}}}}`,
gvr("policy", "v1beta1", "poddisruptionbudgets"): `{"spec": {"selector": {"matchLabels": {"anokkey2": "anokvalue"}}}}`,
gvr("storage.k8s.io", "v1alpha1", "volumeattachments"): `{"metadata": {"name": "va3"}, "spec": {"nodeName": "localhost2"}}`,

View File

@ -202,17 +202,17 @@ func GetEtcdStorageDataForNamespace(namespace string) map[schema.GroupVersionRes
},
// --
// k8s.io/kubernetes/pkg/apis/networking/v1alpha1
gvr("networking.k8s.io", "v1alpha1", "ipaddresses"): {
Stub: `{"metadata": {"name": "192.168.1.2"}, "spec": {"parentRef": {"resource": "services","name": "test", "namespace": "ns"}}}`,
ExpectedEtcdPath: "/registry/ipaddresses/192.168.1.2",
// k8s.io/kubernetes/pkg/apis/networking/v1beta1
gvr("networking.k8s.io", "v1beta1", "ipaddresses"): {
Stub: `{"metadata": {"name": "192.168.1.3"}, "spec": {"parentRef": {"resource": "services","name": "test", "namespace": "ns"}}}`,
ExpectedEtcdPath: "/registry/ipaddresses/192.168.1.3",
},
// --
// k8s.io/kubernetes/pkg/apis/networking/v1alpha1
gvr("networking.k8s.io", "v1alpha1", "servicecidrs"): {
Stub: `{"metadata": {"name": "range1"}, "spec": {"cidrs": ["192.168.0.0/16","fd00:1::/120"]}}`,
ExpectedEtcdPath: "/registry/servicecidrs/range1",
// k8s.io/kubernetes/pkg/apis/networking/v1beta1
gvr("networking.k8s.io", "v1beta1", "servicecidrs"): {
Stub: `{"metadata": {"name": "range-b1"}, "spec": {"cidrs": ["192.168.0.0/16","fd00:1::/120"]}}`,
ExpectedEtcdPath: "/registry/servicecidrs/range-b1",
},
// --

View File

@ -74,7 +74,7 @@ func TestServiceAllocation(t *testing.T) {
s1 := kubeapiservertesting.StartTestServerOrDie(t,
apiServerOptions,
[]string{
"--runtime-config=networking.k8s.io/v1alpha1=true",
"--runtime-config=networking.k8s.io/v1beta1=true",
"--service-cluster-ip-range=" + serviceCIDR,
"--advertise-address=10.0.0.2",
"--disable-admission-plugins=ServiceAccount",
@ -159,7 +159,7 @@ func TestServiceAllocIPAddressLargeCIDR(t *testing.T) {
s1 := kubeapiservertesting.StartTestServerOrDie(t,
apiServerOptions,
[]string{
"--runtime-config=networking.k8s.io/v1alpha1=true",
"--runtime-config=networking.k8s.io/v1beta1=true",
"--service-cluster-ip-range=" + serviceCIDR,
"--advertise-address=2001:db8::10",
"--disable-admission-plugins=ServiceAccount",
@ -205,7 +205,7 @@ func TestServiceAllocIPAddressLargeCIDR(t *testing.T) {
if err != nil {
t.Error(err)
}
_, err = client.NetworkingV1alpha1().IPAddresses().Get(tCtx, svc.Spec.ClusterIP, metav1.GetOptions{})
_, err = client.NetworkingV1beta1().IPAddresses().Get(tCtx, svc.Spec.ClusterIP, metav1.GetOptions{})
if err != nil {
t.Error(err)
}
@ -218,7 +218,8 @@ func TestServiceAllocIPAddressLargeCIDR(t *testing.T) {
if _, err := client.CoreV1().Services(metav1.NamespaceDefault).Create(tCtx, lastSvc, metav1.CreateOptions{}); err != nil {
t.Errorf("unexpected error text: %v", err)
}
_, err = client.NetworkingV1alpha1().IPAddresses().Get(tCtx, lastSvc.Spec.ClusterIP, metav1.GetOptions{})
_, err = client.NetworkingV1beta1().IPAddresses().Get(context.TODO(), lastSvc.Spec.ClusterIP, metav1.GetOptions{})
if err != nil {
t.Error(err)
}
@ -231,7 +232,7 @@ func TestMigrateService(t *testing.T) {
s := kubeapiservertesting.StartTestServerOrDie(t,
apiServerOptions,
[]string{
"--runtime-config=networking.k8s.io/v1alpha1=true",
"--runtime-config=networking.k8s.io/v1beta1=true",
"--service-cluster-ip-range=10.0.0.0/24",
"--advertise-address=10.1.1.1",
"--disable-admission-plugins=ServiceAccount",
@ -285,7 +286,7 @@ func TestMigrateService(t *testing.T) {
err = wait.PollImmediate(1*time.Second, 10*time.Second, func() (bool, error) {
// The repair loop must create the IP address associated
_, err = kubeclient.NetworkingV1alpha1().IPAddresses().Get(context.TODO(), svc.Spec.ClusterIP, metav1.GetOptions{})
_, err = kubeclient.NetworkingV1beta1().IPAddresses().Get(context.TODO(), svc.Spec.ClusterIP, metav1.GetOptions{})
if err != nil {
return false, nil
}
@ -319,7 +320,7 @@ func TestSkewedAllocatorsRollback(t *testing.T) {
// s1 uses IPAddress allocator
s1 := kubeapiservertesting.StartTestServerOrDie(t, apiServerOptions,
[]string{
"--runtime-config=networking.k8s.io/v1alpha1=true",
"--runtime-config=networking.k8s.io/v1beta1=true",
"--service-cluster-ip-range=10.0.0.0/24",
"--disable-admission-plugins=ServiceAccount",
fmt.Sprintf("--feature-gates=%s=true,%s=true", features.MultiCIDRServiceAllocator, features.DisableAllocatorDualWrite)},
@ -338,7 +339,7 @@ func TestSkewedAllocatorsRollback(t *testing.T) {
t.Error(err)
continue
}
_, err = kubeclient1.NetworkingV1alpha1().IPAddresses().Get(context.TODO(), service.Spec.ClusterIP, metav1.GetOptions{})
_, err = kubeclient1.NetworkingV1beta1().IPAddresses().Get(context.TODO(), service.Spec.ClusterIP, metav1.GetOptions{})
if err != nil {
t.Error(err)
}
@ -347,7 +348,7 @@ func TestSkewedAllocatorsRollback(t *testing.T) {
// s2 uses bitmap allocator
s2 := kubeapiservertesting.StartTestServerOrDie(t, apiServerOptions,
[]string{
"--runtime-config=networking.k8s.io/v1alpha1=false",
"--runtime-config=networking.k8s.io/v1beta1=false",
"--service-cluster-ip-range=10.0.0.0/24",
"--disable-admission-plugins=ServiceAccount",
fmt.Sprintf("--feature-gates=%s=false", features.MultiCIDRServiceAllocator)},
@ -368,7 +369,7 @@ func TestSkewedAllocatorsRollback(t *testing.T) {
err = wait.PollImmediate(1*time.Second, 10*time.Second, func() (bool, error) {
// The repair loop must create the IP address associated
_, err = kubeclient1.NetworkingV1alpha1().IPAddresses().Get(context.TODO(), service.Spec.ClusterIP, metav1.GetOptions{})
_, err = kubeclient1.NetworkingV1beta1().IPAddresses().Get(context.TODO(), service.Spec.ClusterIP, metav1.GetOptions{})
if err != nil {
return false, nil
}
@ -409,7 +410,7 @@ func TestSkewAllocatorsRollout(t *testing.T) {
// oldServer uses bitmap allocator
oldServer := kubeapiservertesting.StartTestServerOrDie(t, apiServerOptions,
[]string{
"--runtime-config=networking.k8s.io/v1alpha1=false",
"--runtime-config=networking.k8s.io/v1beta1=false",
"--service-cluster-ip-range=10.0.0.0/16",
"--disable-admission-plugins=ServiceAccount",
fmt.Sprintf("--feature-gates=%s=false", features.MultiCIDRServiceAllocator)},
@ -423,7 +424,7 @@ func TestSkewAllocatorsRollout(t *testing.T) {
// s1 uses IPAddress allocator
newServer := kubeapiservertesting.StartTestServerOrDie(t, apiServerOptions,
[]string{
"--runtime-config=networking.k8s.io/v1alpha1=true",
"--runtime-config=networking.k8s.io/v1beta1=true",
"--service-cluster-ip-range=10.0.0.0/16",
"--disable-admission-plugins=ServiceAccount",
fmt.Sprintf("--feature-gates=%s=true,%s=false", features.MultiCIDRServiceAllocator, features.DisableAllocatorDualWrite)},
@ -497,7 +498,7 @@ func TestSkewAllocatorsRollout(t *testing.T) {
// It takes some time for the repairip loop to create the corresponding IPAddress objects
// ClusterIPs are synchronized through the bitmap.
err = wait.PollUntilContextTimeout(context.Background(), 1*time.Second, 10*time.Second, true, func(context.Context) (bool, error) {
ips, err := kubeclientNew.NetworkingV1alpha1().IPAddresses().List(context.Background(), metav1.ListOptions{})
ips, err := kubeclientNew.NetworkingV1beta1().IPAddresses().List(context.Background(), metav1.ListOptions{})
if err != nil {
return false, nil
}
@ -526,7 +527,7 @@ func TestSkewAllocatorsRollout(t *testing.T) {
ip := fmt.Sprintf("10.0.0.%d", i)
err = wait.PollUntilContextTimeout(context.Background(), 1*time.Second, 10*time.Second, true, func(context.Context) (bool, error) {
// The repair loop must create the IP address associated
_, err = kubeclientNew.NetworkingV1alpha1().IPAddresses().Get(context.Background(), ip, metav1.GetOptions{})
_, err = kubeclientNew.NetworkingV1beta1().IPAddresses().Get(context.Background(), ip, metav1.GetOptions{})
if err != nil {
return false, nil
}
@ -558,7 +559,7 @@ func TestFlagsIPAllocator(t *testing.T) {
// s1 uses IPAddress allocator
s1 := kubeapiservertesting.StartTestServerOrDie(t, apiServerOptions,
[]string{
"--runtime-config=networking.k8s.io/v1alpha1=true",
"--runtime-config=networking.k8s.io/v1beta1=true",
"--service-cluster-ip-range=10.0.0.0/24",
fmt.Sprintf("--feature-gates=%s=true", features.MultiCIDRServiceAllocator)},
etcdOptions)
@ -576,7 +577,7 @@ func TestFlagsIPAllocator(t *testing.T) {
t.Error(err)
continue
}
_, err = kubeclient1.NetworkingV1alpha1().IPAddresses().Get(context.TODO(), service.Spec.ClusterIP, metav1.GetOptions{})
_, err = kubeclient1.NetworkingV1beta1().IPAddresses().Get(context.TODO(), service.Spec.ClusterIP, metav1.GetOptions{})
if err != nil {
t.Error(err)
}

View File

@ -49,7 +49,7 @@ func TestEnableDisableServiceCIDR(t *testing.T) {
apiServerOptions := kubeapiservertesting.NewDefaultTestServerOptions()
s1 := kubeapiservertesting.StartTestServerOrDie(t, apiServerOptions,
[]string{
"--runtime-config=networking.k8s.io/v1alpha1=false",
"--runtime-config=networking.k8s.io/v1beta1=false",
"--service-cluster-ip-range=10.0.0.0/24",
"--disable-admission-plugins=ServiceAccount",
fmt.Sprintf("--feature-gates=%s=false", features.MultiCIDRServiceAllocator)},
@ -82,7 +82,7 @@ func TestEnableDisableServiceCIDR(t *testing.T) {
// apiserver with the feature enabled
s2 := kubeapiservertesting.StartTestServerOrDie(t, apiServerOptions,
[]string{
"--runtime-config=networking.k8s.io/v1alpha1=true",
"--runtime-config=networking.k8s.io/v1beta1=true",
"--service-cluster-ip-range=10.0.0.0/24",
"--disable-admission-plugins=ServiceAccount",
fmt.Sprintf("--feature-gates=%s=true", features.MultiCIDRServiceAllocator)},
@ -113,7 +113,7 @@ func TestEnableDisableServiceCIDR(t *testing.T) {
// start an apiserver with the feature disabled
s3 := kubeapiservertesting.StartTestServerOrDie(t, apiServerOptions,
[]string{
"--runtime-config=networking.k8s.io/v1alpha1=false",
"--runtime-config=networking.k8s.io/v1beta1=false",
"--service-cluster-ip-range=10.0.0.0/24",
"--disable-admission-plugins=ServiceAccount",
fmt.Sprintf("--feature-gates=%s=false", features.MultiCIDRServiceAllocator)},

View File

@ -23,7 +23,7 @@ import (
"time"
v1 "k8s.io/api/core/v1"
networkingv1alpha1 "k8s.io/api/networking/v1alpha1"
networkingv1beta1 "k8s.io/api/networking/v1beta1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
@ -57,7 +57,7 @@ func TestMigrateServiceCIDR(t *testing.T) {
s1 := kubeapiservertesting.StartTestServerOrDie(t,
apiServerOptions,
[]string{
"--runtime-config=networking.k8s.io/v1alpha1=true",
"--runtime-config=networking.k8s.io/v1beta1=true",
"--service-cluster-ip-range=" + cidr1,
"--advertise-address=10.1.1.1",
"--disable-admission-plugins=ServiceAccount",
@ -77,15 +77,15 @@ func TestMigrateServiceCIDR(t *testing.T) {
// ServiceCIDR controller
go servicecidrs.NewController(
tCtx,
informers1.Networking().V1alpha1().ServiceCIDRs(),
informers1.Networking().V1alpha1().IPAddresses(),
informers1.Networking().V1beta1().ServiceCIDRs(),
informers1.Networking().V1beta1().IPAddresses(),
client1,
).Run(tCtx, 5)
informers1.Start(tCtx.Done())
// the default serviceCIDR should have a finalizer and ready condition set to true
if err := wait.PollUntilContextTimeout(context.Background(), 1*time.Second, time.Minute, false, func(ctx context.Context) (bool, error) {
cidr, err := client1.NetworkingV1alpha1().ServiceCIDRs().Get(context.TODO(), defaultservicecidr.DefaultServiceCIDRName, metav1.GetOptions{})
cidr, err := client1.NetworkingV1beta1().ServiceCIDRs().Get(context.TODO(), defaultservicecidr.DefaultServiceCIDRName, metav1.GetOptions{})
if err != nil && !apierrors.IsNotFound(err) {
return false, err
}
@ -119,13 +119,13 @@ func TestMigrateServiceCIDR(t *testing.T) {
}
}
// Add a new service CIDR to be able to migrate the apiserver
if _, err := client1.NetworkingV1alpha1().ServiceCIDRs().Create(context.Background(), makeServiceCIDR("migration-cidr", cidr2, ""), metav1.CreateOptions{}); err != nil {
if _, err := client1.NetworkingV1beta1().ServiceCIDRs().Create(context.Background(), makeServiceCIDR("migration-cidr", cidr2, ""), metav1.CreateOptions{}); err != nil {
t.Fatalf("got unexpected error: %v", err)
}
// wait ServiceCIDR is ready
if err := wait.PollUntilContextTimeout(context.Background(), 1*time.Second, time.Minute, false, func(ctx context.Context) (bool, error) {
cidr, err := client1.NetworkingV1alpha1().ServiceCIDRs().Get(context.TODO(), "migration-cidr", metav1.GetOptions{})
cidr, err := client1.NetworkingV1beta1().ServiceCIDRs().Get(context.TODO(), "migration-cidr", metav1.GetOptions{})
if err != nil && !apierrors.IsNotFound(err) {
return false, err
}
@ -135,18 +135,18 @@ func TestMigrateServiceCIDR(t *testing.T) {
}
// delete the default ServiceCIDR so is no longer used for allocating IPs
if err := client1.NetworkingV1alpha1().ServiceCIDRs().Delete(context.Background(), defaultservicecidr.DefaultServiceCIDRName, metav1.DeleteOptions{}); err != nil {
if err := client1.NetworkingV1beta1().ServiceCIDRs().Delete(context.Background(), defaultservicecidr.DefaultServiceCIDRName, metav1.DeleteOptions{}); err != nil {
t.Fatalf("got unexpected error: %v", err)
}
// the default serviceCIDR should be pending deletion with Ready condition set to false
if err := wait.PollUntilContextTimeout(context.Background(), 1*time.Second, time.Minute, false, func(ctx context.Context) (bool, error) {
cidr, err := client1.NetworkingV1alpha1().ServiceCIDRs().Get(context.TODO(), defaultservicecidr.DefaultServiceCIDRName, metav1.GetOptions{})
cidr, err := client1.NetworkingV1beta1().ServiceCIDRs().Get(context.TODO(), defaultservicecidr.DefaultServiceCIDRName, metav1.GetOptions{})
if err != nil && !apierrors.IsNotFound(err) {
return false, err
}
for _, condition := range cidr.Status.Conditions {
if condition.Type == networkingv1alpha1.ServiceCIDRConditionReady {
if condition.Type == networkingv1beta1.ServiceCIDRConditionReady {
return condition.Status == metav1.ConditionFalse, nil
}
}
@ -191,7 +191,7 @@ func TestMigrateServiceCIDR(t *testing.T) {
s2 := kubeapiservertesting.StartTestServerOrDie(t,
apiServerOptions,
[]string{
"--runtime-config=networking.k8s.io/v1alpha1=true",
"--runtime-config=networking.k8s.io/v1beta1=true",
"--service-cluster-ip-range=" + cidr2,
"--advertise-address=10.1.1.1",
"--disable-admission-plugins=ServiceAccount",
@ -215,8 +215,8 @@ func TestMigrateServiceCIDR(t *testing.T) {
informers2 := informers.NewSharedInformerFactory(client2, resyncPeriod)
go servicecidrs.NewController(
tCtx2,
informers2.Networking().V1alpha1().ServiceCIDRs(),
informers2.Networking().V1alpha1().IPAddresses(),
informers2.Networking().V1beta1().ServiceCIDRs(),
informers2.Networking().V1beta1().IPAddresses(),
client2,
).Run(tCtx2, 5)
informers2.Start(tCtx2.Done())
@ -229,7 +229,7 @@ func TestMigrateServiceCIDR(t *testing.T) {
// the default serviceCIDR should be the new one
if err := wait.PollUntilContextTimeout(context.Background(), 1*time.Second, time.Minute, false, func(ctx context.Context) (bool, error) {
cidr, err := client2.NetworkingV1alpha1().ServiceCIDRs().Get(context.TODO(), defaultservicecidr.DefaultServiceCIDRName, metav1.GetOptions{})
cidr, err := client2.NetworkingV1beta1().ServiceCIDRs().Get(context.TODO(), defaultservicecidr.DefaultServiceCIDRName, metav1.GetOptions{})
if err != nil && !apierrors.IsNotFound(err) {
return false, err
}
@ -250,7 +250,7 @@ func TestMigrateServiceCIDR(t *testing.T) {
}
for _, condition := range cidr.Status.Conditions {
if condition.Type == networkingv1alpha1.ServiceCIDRConditionReady {
if condition.Type == networkingv1beta1.ServiceCIDRConditionReady {
t.Logf("Expected Condition %s to be %s", condition.Status, metav1.ConditionTrue)
return condition.Status == metav1.ConditionTrue, nil
}
@ -275,13 +275,13 @@ func TestMigrateServiceCIDR(t *testing.T) {
}
// The temporary ServiceCIDR can be deleted now since the Default ServiceCIDR will cover it
if err := client2.NetworkingV1alpha1().ServiceCIDRs().Delete(context.Background(), "migration-cidr", metav1.DeleteOptions{}); err != nil {
if err := client2.NetworkingV1beta1().ServiceCIDRs().Delete(context.Background(), "migration-cidr", metav1.DeleteOptions{}); err != nil {
t.Fatalf("got unexpected error: %v", err)
}
// wait ServiceCIDR no longer exist
if err := wait.PollUntilContextTimeout(context.Background(), 1*time.Second, time.Minute, false, func(ctx context.Context) (bool, error) {
_, err := client2.NetworkingV1alpha1().ServiceCIDRs().Get(context.TODO(), "migration-cidr", metav1.GetOptions{})
_, err := client2.NetworkingV1beta1().ServiceCIDRs().Get(context.TODO(), "migration-cidr", metav1.GetOptions{})
if err != nil && !apierrors.IsNotFound(err) {
return false, nil
}

View File

@ -89,7 +89,7 @@ func TestServiceAllocPerformance(t *testing.T) {
s1 := kubeapiservertesting.StartTestServerOrDie(t,
apiServerOptions,
[]string{
"--runtime-config=networking.k8s.io/v1alpha1=true",
"--runtime-config=networking.k8s.io/v1beta1=true",
"--service-cluster-ip-range=" + "10.0.0.0/12",
"--advertise-address=10.0.0.1",
"--disable-admission-plugins=ServiceAccount",
@ -108,7 +108,7 @@ func TestServiceAllocPerformance(t *testing.T) {
// 100 workers for 15k services
nworkers := 100
nservices := 15000
nservices := 150
jobs := make(chan int, nservices)
results := make(chan error, nservices)
t.Log("Starting workers to create ClusterIP Service")

View File

@ -25,7 +25,7 @@ import (
"time"
v1 "k8s.io/api/core/v1"
networkingv1alpha1 "k8s.io/api/networking/v1alpha1"
networkingv1beta1 "k8s.io/api/networking/v1beta1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
@ -43,7 +43,7 @@ func TestServiceAllocNewServiceCIDR(t *testing.T) {
s := kubeapiservertesting.StartTestServerOrDie(t,
apiServerOptions,
[]string{
"--runtime-config=networking.k8s.io/v1alpha1=true",
"--runtime-config=networking.k8s.io/v1beta1=true",
"--service-cluster-ip-range=192.168.0.0/29",
"--advertise-address=10.1.1.1",
"--disable-admission-plugins=ServiceAccount",
@ -63,8 +63,8 @@ func TestServiceAllocNewServiceCIDR(t *testing.T) {
informerFactory := informers.NewSharedInformerFactory(client, resyncPeriod)
go servicecidrs.NewController(
ctx,
informerFactory.Networking().V1alpha1().ServiceCIDRs(),
informerFactory.Networking().V1alpha1().IPAddresses(),
informerFactory.Networking().V1beta1().ServiceCIDRs(),
informerFactory.Networking().V1beta1().IPAddresses(),
client,
).Run(ctx, 5)
informerFactory.Start(ctx.Done())
@ -96,12 +96,12 @@ func TestServiceAllocNewServiceCIDR(t *testing.T) {
// Add a new service CIDR to be able to create new IPs.
cidr := makeServiceCIDR("test2", "10.168.0.0/24", "")
if _, err := client.NetworkingV1alpha1().ServiceCIDRs().Create(context.Background(), cidr, metav1.CreateOptions{}); err != nil {
if _, err := client.NetworkingV1beta1().ServiceCIDRs().Create(context.Background(), cidr, metav1.CreateOptions{}); err != nil {
t.Fatalf("got unexpected error: %v", err)
}
// wait ServiceCIDR is ready
if err := wait.PollUntilContextTimeout(context.Background(), 1*time.Second, time.Minute, false, func(ctx context.Context) (bool, error) {
cidr, err := client.NetworkingV1alpha1().ServiceCIDRs().Get(context.TODO(), cidr.Name, metav1.GetOptions{})
cidr, err := client.NetworkingV1beta1().ServiceCIDRs().Get(context.TODO(), cidr.Name, metav1.GetOptions{})
if err != nil {
return false, err
}
@ -140,7 +140,7 @@ func TestServiceCIDRDeletion(t *testing.T) {
s := kubeapiservertesting.StartTestServerOrDie(t,
apiServerOptions,
[]string{
"--runtime-config=networking.k8s.io/v1alpha1=true",
"--runtime-config=networking.k8s.io/v1beta1=true",
"--service-cluster-ip-range=" + cidr1,
"--advertise-address=172.16.1.1",
"--disable-admission-plugins=ServiceAccount",
@ -164,8 +164,8 @@ func TestServiceCIDRDeletion(t *testing.T) {
informerFactory := informers.NewSharedInformerFactory(client, resyncPeriod)
go servicecidrs.NewController(
ctx,
informerFactory.Networking().V1alpha1().ServiceCIDRs(),
informerFactory.Networking().V1alpha1().IPAddresses(),
informerFactory.Networking().V1beta1().ServiceCIDRs(),
informerFactory.Networking().V1beta1().IPAddresses(),
client,
).Run(ctx, 5)
informerFactory.Start(ctx.Done())
@ -178,13 +178,13 @@ func TestServiceCIDRDeletion(t *testing.T) {
}
}
// create a new ServiceCIDRs that overlaps the default one
_, err = client.NetworkingV1alpha1().ServiceCIDRs().Create(ctx, makeServiceCIDR("cidr1", cidr1, ""), metav1.CreateOptions{})
_, err = client.NetworkingV1beta1().ServiceCIDRs().Create(ctx, makeServiceCIDR("cidr1", cidr1, ""), metav1.CreateOptions{})
if err != nil {
t.Fatal((err))
}
// Wait until is ready.
if err := wait.PollUntilContextTimeout(context.Background(), 250*time.Millisecond, 30*time.Second, false, func(ctx context.Context) (bool, error) {
cidr, err := client.NetworkingV1alpha1().ServiceCIDRs().Get(ctx, "cidr1", metav1.GetOptions{})
cidr, err := client.NetworkingV1beta1().ServiceCIDRs().Get(ctx, "cidr1", metav1.GetOptions{})
if err != nil {
return false, nil
}
@ -193,13 +193,13 @@ func TestServiceCIDRDeletion(t *testing.T) {
t.Fatalf("cidr1 is not ready")
}
// we should be able to delete the ServiceCIDR despite it contains IP addresses as it overlaps with the default ServiceCIDR
err = client.NetworkingV1alpha1().ServiceCIDRs().Delete(ctx, "cidr1", metav1.DeleteOptions{})
err = client.NetworkingV1beta1().ServiceCIDRs().Delete(ctx, "cidr1", metav1.DeleteOptions{})
if err != nil {
t.Fatal((err))
}
if err := wait.PollUntilContextTimeout(context.Background(), 250*time.Millisecond, 30*time.Second, false, func(ctx context.Context) (bool, error) {
_, err := client.NetworkingV1alpha1().ServiceCIDRs().Get(ctx, "cidr1", metav1.GetOptions{})
_, err := client.NetworkingV1beta1().ServiceCIDRs().Get(ctx, "cidr1", metav1.GetOptions{})
if err != nil && apierrors.IsNotFound(err) {
return true, nil
}
@ -209,14 +209,14 @@ func TestServiceCIDRDeletion(t *testing.T) {
}
// add a new ServiceCIDR with a new range
_, err = client.NetworkingV1alpha1().ServiceCIDRs().Create(ctx, makeServiceCIDR("cidr2", cidr2, ""), metav1.CreateOptions{})
_, err = client.NetworkingV1beta1().ServiceCIDRs().Create(ctx, makeServiceCIDR("cidr2", cidr2, ""), metav1.CreateOptions{})
if err != nil {
t.Fatal((err))
}
// wait the allocator process the new ServiceCIDR
// Wait until is ready.
if err := wait.PollUntilContextTimeout(context.Background(), 250*time.Millisecond, 30*time.Second, false, func(ctx context.Context) (bool, error) {
cidr, err := client.NetworkingV1alpha1().ServiceCIDRs().Get(ctx, "cidr2", metav1.GetOptions{})
cidr, err := client.NetworkingV1beta1().ServiceCIDRs().Get(ctx, "cidr2", metav1.GetOptions{})
if err != nil {
return false, nil
}
@ -235,13 +235,13 @@ func TestServiceCIDRDeletion(t *testing.T) {
}
// add a new ServiceCIDR that overlaps the existing one
_, err = client.NetworkingV1alpha1().ServiceCIDRs().Create(ctx, makeServiceCIDR("cidr3", cidr3, ""), metav1.CreateOptions{})
_, err = client.NetworkingV1beta1().ServiceCIDRs().Create(ctx, makeServiceCIDR("cidr3", cidr3, ""), metav1.CreateOptions{})
if err != nil {
t.Fatal((err))
}
// Wait until is ready.
if err := wait.PollUntilContextTimeout(context.Background(), 250*time.Millisecond, 30*time.Second, false, func(ctx context.Context) (bool, error) {
cidr, err := client.NetworkingV1alpha1().ServiceCIDRs().Get(ctx, "cidr3", metav1.GetOptions{})
cidr, err := client.NetworkingV1beta1().ServiceCIDRs().Get(ctx, "cidr3", metav1.GetOptions{})
if err != nil {
return false, nil
}
@ -250,13 +250,13 @@ func TestServiceCIDRDeletion(t *testing.T) {
t.Fatalf("cidr3 is not ready")
}
// we should be able to delete the ServiceCIDR2 despite it contains IP addresses as it is contained on ServiceCIDR3
err = client.NetworkingV1alpha1().ServiceCIDRs().Delete(ctx, "cidr2", metav1.DeleteOptions{})
err = client.NetworkingV1beta1().ServiceCIDRs().Delete(ctx, "cidr2", metav1.DeleteOptions{})
if err != nil {
t.Fatal((err))
}
if err := wait.PollUntilContextTimeout(context.Background(), 250*time.Millisecond, 30*time.Second, false, func(ctx context.Context) (bool, error) {
_, err := client.NetworkingV1alpha1().ServiceCIDRs().Get(ctx, "cidr2", metav1.GetOptions{})
_, err := client.NetworkingV1beta1().ServiceCIDRs().Get(ctx, "cidr2", metav1.GetOptions{})
if err != nil && apierrors.IsNotFound(err) {
return true, nil
}
@ -266,18 +266,18 @@ func TestServiceCIDRDeletion(t *testing.T) {
}
// serviceCIDR3 will not be able to be deleted until the IPAddress is removed
err = client.NetworkingV1alpha1().ServiceCIDRs().Delete(ctx, "cidr3", metav1.DeleteOptions{})
err = client.NetworkingV1beta1().ServiceCIDRs().Delete(ctx, "cidr3", metav1.DeleteOptions{})
if err != nil {
t.Fatal((err))
}
// Wait until is not ready.
if err := wait.PollUntilContextTimeout(context.Background(), 250*time.Millisecond, 30*time.Second, false, func(ctx context.Context) (bool, error) {
cidr, err := client.NetworkingV1alpha1().ServiceCIDRs().Get(ctx, "cidr3", metav1.GetOptions{})
cidr, err := client.NetworkingV1beta1().ServiceCIDRs().Get(ctx, "cidr3", metav1.GetOptions{})
if err != nil {
return false, nil
}
for _, condition := range cidr.Status.Conditions {
if condition.Type == networkingv1alpha1.ServiceCIDRConditionReady {
if condition.Type == networkingv1beta1.ServiceCIDRConditionReady {
return condition.Status == metav1.ConditionStatus(metav1.ConditionFalse), nil
}
}
@ -293,7 +293,7 @@ func TestServiceCIDRDeletion(t *testing.T) {
// cidr3 must not exist
if err := wait.PollUntilContextTimeout(context.Background(), 250*time.Millisecond, 30*time.Second, false, func(ctx context.Context) (bool, error) {
_, err := client.NetworkingV1alpha1().ServiceCIDRs().Get(ctx, "cidr3", metav1.GetOptions{})
_, err := client.NetworkingV1beta1().ServiceCIDRs().Get(ctx, "cidr3", metav1.GetOptions{})
if err != nil && apierrors.IsNotFound(err) {
return true, nil
}
@ -303,12 +303,12 @@ func TestServiceCIDRDeletion(t *testing.T) {
}
}
func makeServiceCIDR(name, primary, secondary string) *networkingv1alpha1.ServiceCIDR {
serviceCIDR := &networkingv1alpha1.ServiceCIDR{
func makeServiceCIDR(name, primary, secondary string) *networkingv1beta1.ServiceCIDR {
serviceCIDR := &networkingv1beta1.ServiceCIDR{
ObjectMeta: metav1.ObjectMeta{
Name: name,
},
Spec: networkingv1alpha1.ServiceCIDRSpec{},
Spec: networkingv1beta1.ServiceCIDRSpec{},
}
serviceCIDR.Spec.CIDRs = append(serviceCIDR.Spec.CIDRs, primary)
if secondary != "" {
@ -332,13 +332,13 @@ func makeService(name string) *v1.Service {
}
// returns true of the ServiceCIDRConditionReady is true
func isServiceCIDRReady(serviceCIDR *networkingv1alpha1.ServiceCIDR) bool {
func isServiceCIDRReady(serviceCIDR *networkingv1beta1.ServiceCIDR) bool {
if serviceCIDR == nil {
return false
}
for _, condition := range serviceCIDR.Status.Conditions {
if condition.Type == networkingv1alpha1.ServiceCIDRConditionReady {
if condition.Type == networkingv1beta1.ServiceCIDRConditionReady {
return condition.Status == metav1.ConditionStatus(metav1.ConditionTrue)
}
}