diff --git a/test/e2e/network/ingress.go b/test/e2e/network/ingress.go index 7ea9abd11ed..17c916f894c 100644 --- a/test/e2e/network/ingress.go +++ b/test/e2e/network/ingress.go @@ -28,12 +28,16 @@ import ( compute "google.golang.org/api/compute/v1" v1 "k8s.io/api/core/v1" + networkingv1beta1 "k8s.io/api/networking/v1beta1" rbacv1 "k8s.io/api/rbac/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/apimachinery/pkg/util/uuid" "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/apimachinery/pkg/watch" "k8s.io/apiserver/pkg/authentication/serviceaccount" "k8s.io/kubernetes/test/e2e/framework" e2eauth "k8s.io/kubernetes/test/e2e/framework/auth" @@ -935,3 +939,213 @@ func detectNegAnnotation(f *framework.Framework, jig *e2eingress.TestJig, gceCon framework.ExpectNoError(err) } } + +var _ = SIGDescribe("Ingress API", func() { + f := framework.NewDefaultFramework("ingress") + /* + Release: v1.19 + Testname: Ingress API + Description: + The networking.k8s.io API group MUST exist in the /apis discovery document. + The networking.k8s.io/v1beta1 API group/version MUST exist in the /apis/networking.k8s.io discovery document. + The ingresses resources MUST exist in the /apis/networking.k8s.io/v1beta1 discovery document. + The ingresses resource must support create, get, list, watch, update, patch, delete, and deletecollection. + The ingresses/status resource must support update and patch + + */ + + ginkgo.It("should support creating Ingress API operations", func() { + // Setup + ns := f.Namespace.Name + ingVersion := "v1beta1" + ingClient := f.ClientSet.NetworkingV1beta1().Ingresses(ns) + + prefixPathType := networkingv1beta1.PathTypePrefix + + ingTemplate := &networkingv1beta1.Ingress{ + ObjectMeta: metav1.ObjectMeta{GenerateName: "e2e-example-ing", + Labels: map[string]string{ + "special-label": f.UniqueName, + }}, + Spec: networkingv1beta1.IngressSpec{ + Backend: &networkingv1beta1.IngressBackend{ + ServiceName: "default-backend", + ServicePort: intstr.FromInt(8080), + }, + Rules: []networkingv1beta1.IngressRule{ + { + Host: "foo.bar.com", + IngressRuleValue: networkingv1beta1.IngressRuleValue{ + HTTP: &networkingv1beta1.HTTPIngressRuleValue{ + Paths: []networkingv1beta1.HTTPIngressPath{{ + Path: "/", + PathType: &prefixPathType, + Backend: networkingv1beta1.IngressBackend{ + ServiceName: "test-backend", + ServicePort: intstr.FromInt(8080), + }, + }}, + }, + }, + }, + }, + }, + Status: networkingv1beta1.IngressStatus{LoadBalancer: v1.LoadBalancerStatus{}}, + } + // Discovery + ginkgo.By("getting /apis") + { + discoveryGroups, err := f.ClientSet.Discovery().ServerGroups() + framework.ExpectNoError(err) + found := false + for _, group := range discoveryGroups.Groups { + if group.Name == networkingv1beta1.GroupName { + for _, version := range group.Versions { + if version.Version == ingVersion { + found = true + break + } + } + } + } + framework.ExpectEqual(found, true, fmt.Sprintf("expected networking API group/version, got %#v", discoveryGroups.Groups)) + } + + ginkgo.By("getting /apis/networking.k8s.io") + { + group := &metav1.APIGroup{} + err := f.ClientSet.Discovery().RESTClient().Get().AbsPath("/apis/networking.k8s.io").Do(context.TODO()).Into(group) + framework.ExpectNoError(err) + found := false + for _, version := range group.Versions { + if version.Version == ingVersion { + found = true + break + } + } + framework.ExpectEqual(found, true, fmt.Sprintf("expected networking API version, got %#v", group.Versions)) + } + + ginkgo.By("getting /apis/networking.k8s.io" + ingVersion) + { + resources, err := f.ClientSet.Discovery().ServerResourcesForGroupVersion(networkingv1beta1.SchemeGroupVersion.String()) + framework.ExpectNoError(err) + foundIngress := false + for _, resource := range resources.APIResources { + switch resource.Name { + case "ingresses": + foundIngress = true + } + } + framework.ExpectEqual(foundIngress, true, fmt.Sprintf("expected ingresses, got %#v", resources.APIResources)) + } + + // Ingress resource create/read/update/watch verbs + ginkgo.By("creating") + _, err := ingClient.Create(context.TODO(), ingTemplate, metav1.CreateOptions{}) + framework.ExpectNoError(err) + _, err = ingClient.Create(context.TODO(), ingTemplate, metav1.CreateOptions{}) + framework.ExpectNoError(err) + createdIngress, err := ingClient.Create(context.TODO(), ingTemplate, metav1.CreateOptions{}) + framework.ExpectNoError(err) + + ginkgo.By("getting") + gottenIngress, err := ingClient.Get(context.TODO(), createdIngress.Name, metav1.GetOptions{}) + framework.ExpectNoError(err) + framework.ExpectEqual(gottenIngress.UID, createdIngress.UID) + + ginkgo.By("listing") + ings, err := ingClient.List(context.TODO(), metav1.ListOptions{LabelSelector: "special-label=" + f.UniqueName}) + framework.ExpectNoError(err) + framework.ExpectEqual(len(ings.Items), 3, "filtered list should have 3 items") + + ginkgo.By("watching") + framework.Logf("starting watch") + ingWatch, err := ingClient.Watch(context.TODO(), metav1.ListOptions{ResourceVersion: ings.ResourceVersion, LabelSelector: "special-label=" + f.UniqueName}) + framework.ExpectNoError(err) + + // Test cluster-wide list and watch + clusterIngClient := f.ClientSet.NetworkingV1beta1().Ingresses("") + ginkgo.By("cluster-wide listing") + clusterIngs, err := clusterIngClient.List(context.TODO(), metav1.ListOptions{LabelSelector: "special-label=" + f.UniqueName}) + framework.ExpectNoError(err) + framework.ExpectEqual(len(clusterIngs.Items), 3, "filtered list should have 3 items") + + ginkgo.By("cluster-wide watching") + framework.Logf("starting watch") + _, err = clusterIngClient.Watch(context.TODO(), metav1.ListOptions{ResourceVersion: ings.ResourceVersion, LabelSelector: "special-label=" + f.UniqueName}) + framework.ExpectNoError(err) + + ginkgo.By("patching") + patchedIngress, err := ingClient.Patch(context.TODO(), createdIngress.Name, types.MergePatchType, []byte(`{"metadata":{"annotations":{"patched":"true"}}}`), metav1.PatchOptions{}) + framework.ExpectNoError(err) + framework.ExpectEqual(patchedIngress.Annotations["patched"], "true", "patched object should have the applied annotation") + + ginkgo.By("updating") + ingToUpdate := patchedIngress.DeepCopy() + ingToUpdate.Annotations["updated"] = "true" + updatedIngress, err := ingClient.Update(context.TODO(), ingToUpdate, metav1.UpdateOptions{}) + framework.ExpectNoError(err) + framework.ExpectEqual(updatedIngress.Annotations["updated"], "true", "updated object should have the applied annotation") + + framework.Logf("waiting for watch events with expected annotations") + for sawAnnotations := false; !sawAnnotations; { + select { + case evt, ok := <-ingWatch.ResultChan(): + framework.ExpectEqual(ok, true, "watch channel should not close") + framework.ExpectEqual(evt.Type, watch.Modified) + watchedIngress, isIngress := evt.Object.(*networkingv1beta1.Ingress) + framework.ExpectEqual(isIngress, true, fmt.Sprintf("expected Ingress, got %T", evt.Object)) + if watchedIngress.Annotations["patched"] == "true" { + framework.Logf("saw patched and updated annotations") + sawAnnotations = true + ingWatch.Stop() + } else { + framework.Logf("missing expected annotations, waiting: %#v", watchedIngress.Annotations) + } + case <-time.After(wait.ForeverTestTimeout): + framework.Fail("timed out waiting for watch event") + } + } + + // /status subresource operations + ginkgo.By("patching /status") + lbStatus := v1.LoadBalancerStatus{ + Ingress: []v1.LoadBalancerIngress{{IP: "169.1.1.1"}}, + } + lbStatusJSON, err := json.Marshal(lbStatus) + framework.ExpectNoError(err) + patchedStatus, err := ingClient.Patch(context.TODO(), createdIngress.Name, types.MergePatchType, + []byte(`{"metadata":{"annotations":{"patchedstatus":"true"}},"status":{"loadBalancer":`+string(lbStatusJSON)+`}}`), + metav1.PatchOptions{}, "status") + framework.ExpectNoError(err) + framework.ExpectEqual(patchedStatus.Status.LoadBalancer, lbStatus, "patched object should have the applied loadBalancer status") + framework.ExpectEqual(patchedStatus.Annotations["patchedstatus"], "true", "patched object should have the applied annotation") + + ginkgo.By("updating /status") + statusToUpdate := patchedStatus.DeepCopy() + statusToUpdate.Status.LoadBalancer = v1.LoadBalancerStatus{ + Ingress: []v1.LoadBalancerIngress{{IP: "169.1.1.2"}}, + } + updatedStatus, err := ingClient.UpdateStatus(context.TODO(), statusToUpdate, metav1.UpdateOptions{}) + framework.ExpectNoError(err) + framework.ExpectEqual(updatedStatus.Status.LoadBalancer, statusToUpdate.Status.LoadBalancer, fmt.Sprintf("updated object expected to have updated loadbalancer status %#v, got %#v", statusToUpdate.Status.LoadBalancer, updatedStatus.Status.LoadBalancer)) + + // Ingress resource delete operations + ginkgo.By("deleting") + err = ingClient.Delete(context.TODO(), createdIngress.Name, metav1.DeleteOptions{}) + framework.ExpectNoError(err) + _, err = ingClient.Get(context.TODO(), createdIngress.Name, metav1.GetOptions{}) + framework.ExpectEqual(apierrors.IsNotFound(err), true, fmt.Sprintf("expected 404, got %#v", err)) + ings, err = ingClient.List(context.TODO(), metav1.ListOptions{LabelSelector: "special-label=" + f.UniqueName}) + framework.ExpectNoError(err) + framework.ExpectEqual(len(ings.Items), 2, "filtered list should have 2 items") + + ginkgo.By("deleting a collection") + err = ingClient.DeleteCollection(context.TODO(), metav1.DeleteOptions{}, metav1.ListOptions{LabelSelector: "special-label=" + f.UniqueName}) + framework.ExpectNoError(err) + ings, err = ingClient.List(context.TODO(), metav1.ListOptions{LabelSelector: "special-label=" + f.UniqueName}) + framework.ExpectNoError(err) + framework.ExpectEqual(len(ings.Items), 0, "filtered list should have 0 items") + }) +})