diff --git a/test/integration/apiserver/apiserver_test.go b/test/integration/apiserver/apiserver_test.go index e360cd3342c..4e0e0205038 100644 --- a/test/integration/apiserver/apiserver_test.go +++ b/test/integration/apiserver/apiserver_test.go @@ -80,6 +80,7 @@ import ( "k8s.io/kubernetes/test/integration/etcd" "k8s.io/kubernetes/test/integration/framework" "k8s.io/kubernetes/test/utils/ktesting" + "k8s.io/utils/ptr" ) func setup(t *testing.T, groupVersions ...schema.GroupVersion) (context.Context, clientset.Interface, *restclient.Config, framework.TearDownFunc) { @@ -1677,6 +1678,203 @@ func TestGetSubresourcesAsTables(t *testing.T) { } } +func TestGetScaleSubresourceAsTableForAllBuiltins(t *testing.T) { + // KUBE_APISERVER_SERVE_REMOVED_APIS_FOR_ONE_RELEASE allows for APIs pending removal to not block tests + t.Setenv("KUBE_APISERVER_SERVE_REMOVED_APIS_FOR_ONE_RELEASE", "true") + + // Enable all features for testing + featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, "AllAlpha", true) + featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, "AllBeta", true) + + testNamespace := "test-scale" + server := kubeapiservertesting.StartTestServerOrDie(t, nil, framework.DefaultTestServerFlags(), framework.SharedEtcd()) + defer server.TearDownFn() + + clientset := clientset.NewForConfigOrDie(server.ClientConfig) + + ns := framework.CreateNamespaceOrDie(clientset, testNamespace, t) + defer framework.DeleteNamespaceOrDie(clientset, ns, t) + + scaleResources := map[string]runtime.Object{ + "deployments.apps/v1": &apps.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: "dummy", + }, + Spec: apps.DeploymentSpec{ + Replicas: ptr.To[int32](1), + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "app": "dummy", + }, + }, + Template: v1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "app": "dummy", + }, + }, + Spec: v1.PodSpec{ + Containers: []v1.Container{ + { + Name: "dummy-container", + Image: "nginx:latest", + }, + }, + }, + }, + }, + }, + "replicasets.apps/v1": &apps.ReplicaSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "dummy", + }, + Spec: apps.ReplicaSetSpec{ + Replicas: ptr.To[int32](1), + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "app": "dummy", + }, + }, + Template: v1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "app": "dummy", + }, + }, + Spec: v1.PodSpec{ + Containers: []v1.Container{ + { + Name: "dummy-container", + Image: "nginx:latest", + }, + }, + }, + }, + }, + }, + "statefulsets.apps/v1": &apps.StatefulSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "dummy", + }, + Spec: apps.StatefulSetSpec{ + ServiceName: "dummy", + Replicas: ptr.To[int32](1), + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "app": "dummy", + }, + }, + Template: v1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "app": "dummy", + }, + }, + Spec: v1.PodSpec{ + Containers: []v1.Container{ + { + Name: "dummy-container", + Image: "nginx:latest", + }, + }, + }, + }, + }, + }, + "replicationcontrollers.v1": &v1.ReplicationController{ + ObjectMeta: metav1.ObjectMeta{ + Name: "dummy", + }, + Spec: v1.ReplicationControllerSpec{ + Replicas: ptr.To[int32](1), + Selector: map[string]string{ + "app": "dummy", + }, + Template: &v1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "app": "dummy", + }, + }, + Spec: v1.PodSpec{ + Containers: []v1.Container{ + { + Name: "dummy-container", + Image: "nginx:latest", + }, + }, + }, + }, + }, + }, + } + + _, serverResources, err := clientset.Discovery().ServerGroupsAndResources() + if err != nil { + t.Fatal(err) + } + + for _, resourceList := range serverResources { + for _, resource := range resourceList.APIResources { + if !strings.Contains(resource.Name, "/scale") { + continue + } + resourceName := strings.Split(resource.Name, "/")[0] + // we can safely skip error here, since the group version is coming from the server + groupVersion, _ := schema.ParseGroupVersion(resourceList.GroupVersion) + + t.Run(resourceName, func(t *testing.T) { + _, ctx := ktesting.NewTestContext(t) + scaleResourceObjName := fmt.Sprintf("%s.%s", resourceName, resourceList.GroupVersion) + obj, ok := scaleResources[scaleResourceObjName] + if !ok { + t.Fatalf("sample resource for %s not found in scaleResources", scaleResourceObjName) + } + + cfg := dynamic.ConfigFor(server.ClientConfig) + if len(groupVersion.Group) == 0 { + cfg = dynamic.ConfigFor(server.ClientConfig) + cfg.APIPath = "/api" + } else { + cfg.APIPath = "/apis" + } + cfg.GroupVersion = &groupVersion + client, err := restclient.RESTClientFor(cfg) + if err != nil { + t.Fatalf("failed to create RESTClient: %v", err) + } + + err = client.Post(). + NamespaceIfScoped(testNamespace, resource.Namespaced).Resource(resourceName). + VersionedParams(&metav1.CreateOptions{}, metav1.ParameterCodec). + Body(obj).Do(ctx).Error() + if err != nil { + t.Fatalf("failed to create object: %v", err) + } + + metaAccessor, err := meta.Accessor(obj) + if err != nil { + t.Fatalf("failed to get metadata accessor: %v", err) + } + res := client.Get(). + NamespaceIfScoped(testNamespace, resource.Namespaced).Resource(resourceName). + SetHeader("Accept", "application/json;as=Table;g=meta.k8s.io;v=v1"). + Name(metaAccessor.GetName()). + SubResource("scale"). + Do(ctx) + resObj, err := res.Get() + if err != nil { + t.Fatalf("failed to retrieve object from response: %v", err) + } + actualKind := resObj.GetObjectKind().GroupVersionKind().Kind + if actualKind != "Table" { + t.Fatalf("Expected Kind 'Table', got '%v'", actualKind) + } + }) + } + } +} + func TestTransform(t *testing.T) { testNamespace := "test-transform" tearDown, config, _, err := fixtures.StartDefaultServer(t)