diff --git a/test/integration/dra/OWNERS b/test/integration/dra/OWNERS new file mode 100644 index 00000000000..ac91dd47ea5 --- /dev/null +++ b/test/integration/dra/OWNERS @@ -0,0 +1,12 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +approvers: + - johnbelamaric + - klueska + - pohly +reviewers: + - pohly + - bart0sh +labels: + - sig/node + - wg/device-management diff --git a/test/integration/dra/dra_test.go b/test/integration/dra/dra_test.go new file mode 100644 index 00000000000..9b57883295e --- /dev/null +++ b/test/integration/dra/dra_test.go @@ -0,0 +1,222 @@ +/* +Copyright 2025 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package dra + +import ( + "fmt" + "regexp" + "sort" + "strings" + "testing" + + "github.com/stretchr/testify/assert" + + v1 "k8s.io/api/core/v1" + resourcealphaapi "k8s.io/api/resource/v1alpha3" + resourceapi "k8s.io/api/resource/v1beta1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + utilfeature "k8s.io/apiserver/pkg/util/feature" + "k8s.io/component-base/featuregate" + featuregatetesting "k8s.io/component-base/featuregate/testing" + kubeapiservertesting "k8s.io/kubernetes/cmd/kube-apiserver/app/testing" + "k8s.io/kubernetes/pkg/features" + st "k8s.io/kubernetes/pkg/scheduler/testing" + "k8s.io/kubernetes/test/integration/framework" + "k8s.io/kubernetes/test/utils/ktesting" + "k8s.io/utils/ptr" +) + +var ( + // For more test data see pkg/scheduler/framework/plugin/dynamicresources/dynamicresources_test.go. + + podName = "my-pod" + namespace = "default" + resourceName = "my-resource" + className = "my-resource-class" + claimName = podName + "-" + resourceName + podWithClaimName = st.MakePod().Name(podName).Namespace(namespace). + Container("my-container"). + PodResourceClaims(v1.PodResourceClaim{Name: resourceName, ResourceClaimName: &claimName}). + Obj() + claim = st.MakeResourceClaim(). + Name(claimName). + Namespace(namespace). + Request(className). + Obj() +) + +// createTestNamespace creates a namespace with a name that is derived from the +// current test name: +// - Non-alpha-numeric characters replaced by hyphen. +// - Truncated in the middle to make it short enough for GenerateName. +// - Hyphen plus random suffix added by the apiserver. +func createTestNamespace(tCtx ktesting.TContext) string { + tCtx.Helper() + name := regexp.MustCompile(`[^[:alnum:]_-]`).ReplaceAllString(tCtx.Name(), "-") + name = strings.ToLower(name) + if len(name) > 63 { + name = name[:30] + "--" + name[len(name)-30:] + } + ns := &v1.Namespace{ObjectMeta: metav1.ObjectMeta{GenerateName: name + "-"}} + ns, err := tCtx.Client().CoreV1().Namespaces().Create(tCtx, ns, metav1.CreateOptions{}) + tCtx.ExpectNoError(err, "create test namespace") + tCtx.CleanupCtx(func(tCtx ktesting.TContext) { + tCtx.ExpectNoError(tCtx.Client().CoreV1().Namespaces().Delete(tCtx, ns.Name, metav1.DeleteOptions{}), "delete test namespace") + }) + return ns.Name +} + +func TestDRA(t *testing.T) { + // Each sub-test brings up the API server in a certain + // configuration. These sub-tests must run sequentially because they + // change the global DefaultFeatureGate. For each configuration, + // multiple tests can run in parallel as long as they are careful + // about what they create. + for name, tc := range map[string]struct { + apis map[schema.GroupVersion]bool + features map[featuregate.Feature]bool + f func(tCtx ktesting.TContext) + }{ + "default": { + f: func(tCtx ktesting.TContext) { + tCtx.Run("Pod", func(tCtx ktesting.TContext) { testPod(tCtx, false) }) + tCtx.Run("APIDisabled", testAPIDisabled) + }, + }, + "core": { + apis: map[schema.GroupVersion]bool{ + resourceapi.SchemeGroupVersion: true, + }, + features: map[featuregate.Feature]bool{features.DynamicResourceAllocation: true}, + f: func(tCtx ktesting.TContext) { + tCtx.Run("AdminAccess", func(tCtx ktesting.TContext) { testAdminAccess(tCtx, false) }) + tCtx.Run("Pod", func(tCtx ktesting.TContext) { testPod(tCtx, true) }) + }, + }, + "all": { + apis: map[schema.GroupVersion]bool{ + resourceapi.SchemeGroupVersion: true, + resourcealphaapi.SchemeGroupVersion: true, + }, + features: map[featuregate.Feature]bool{ + features.DynamicResourceAllocation: true, + // Additional DRA feature gates go here, + // in alphabetical order, + // as needed by tests for them. + features.DRAAdminAccess: true, + }, + f: func(tCtx ktesting.TContext) { + tCtx.Run("AdminAccess", func(tCtx ktesting.TContext) { testAdminAccess(tCtx, true) }) + tCtx.Run("Convert", testConvert) + }, + }, + } { + t.Run(name, func(t *testing.T) { + tCtx := ktesting.Init(t) + var entries []string + for key, value := range tc.features { + entries = append(entries, fmt.Sprintf("%s=%t", key, value)) + } + for key, value := range tc.apis { + entries = append(entries, fmt.Sprintf("%s=%t", key, value)) + } + sort.Strings(entries) + t.Logf("Config: %s", strings.Join(entries, ",")) + + for key, value := range tc.features { + featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, key, value) + } + + etcdOptions := framework.SharedEtcd() + apiServerOptions := kubeapiservertesting.NewDefaultTestServerOptions() + apiServerFlags := framework.DefaultTestServerFlags() + // Default kube-apiserver behavior, must be requested explicitly for test server. + runtimeConfigs := []string{"api/alpha=false", "api/beta=false"} + for key, value := range tc.apis { + runtimeConfigs = append(runtimeConfigs, fmt.Sprintf("%s=%t", key, value)) + } + apiServerFlags = append(apiServerFlags, "--runtime-config="+strings.Join(runtimeConfigs, ",")) + server := kubeapiservertesting.StartTestServerOrDie(t, apiServerOptions, apiServerFlags, etcdOptions) + tCtx.Cleanup(server.TearDownFn) + + tCtx = ktesting.WithRESTConfig(tCtx, server.ClientConfig) + tc.f(tCtx) + }) + } +} + +// testPod creates a pod with a resource claim reference and then checks +// whether that field is or isn't getting dropped. +func testPod(tCtx ktesting.TContext, draEnabled bool) { + tCtx.Parallel() + namespace := createTestNamespace(tCtx) + podWithClaimName := podWithClaimName.DeepCopy() + podWithClaimName.Namespace = namespace + pod, err := tCtx.Client().CoreV1().Pods(namespace).Create(tCtx, podWithClaimName, metav1.CreateOptions{}) + tCtx.ExpectNoError(err, "create pod") + if draEnabled { + assert.NotEmpty(tCtx, pod.Spec.ResourceClaims, "should store resource claims in pod spec") + } else { + assert.Empty(tCtx, pod.Spec.ResourceClaims, "should drop resource claims from pod spec") + } +} + +// testAPIDisabled checks that the resource.k8s.io API is disabled. +func testAPIDisabled(tCtx ktesting.TContext) { + tCtx.Parallel() + _, err := tCtx.Client().ResourceV1beta1().ResourceClaims(claim.Namespace).Create(tCtx, claim, metav1.CreateOptions{}) + if !apierrors.IsNotFound(err) { + tCtx.Fatalf("expected 'resource not found' error, got %v", err) + } +} + +// testConvert creates a claim using a one API version and reads it with another. +func testConvert(tCtx ktesting.TContext) { + tCtx.Parallel() + namespace := createTestNamespace(tCtx) + claim := claim.DeepCopy() + claim.Namespace = namespace + claim, err := tCtx.Client().ResourceV1beta1().ResourceClaims(namespace).Create(tCtx, claim, metav1.CreateOptions{}) + tCtx.ExpectNoError(err, "create claim") + claimAlpha, err := tCtx.Client().ResourceV1alpha3().ResourceClaims(namespace).Get(tCtx, claim.Name, metav1.GetOptions{}) + tCtx.ExpectNoError(err, "get claim") + // We could check more fields, but there are unit tests which cover this better. + assert.Equal(tCtx, claim.Name, claimAlpha.Name, "claim name") +} + +// testAdminAccess creates a claim with AdminAccess and then checks +// whether that field is or isn't getting dropped. +func testAdminAccess(tCtx ktesting.TContext, adminAccessEnabled bool) { + tCtx.Parallel() + namespace := createTestNamespace(tCtx) + claim := claim.DeepCopy() + claim.Namespace = namespace + claim.Spec.Devices.Requests[0].AdminAccess = ptr.To(true) + claim, err := tCtx.Client().ResourceV1beta1().ResourceClaims(namespace).Create(tCtx, claim, metav1.CreateOptions{}) + tCtx.ExpectNoError(err, "create claim") + if adminAccessEnabled { + if !ptr.Deref(claim.Spec.Devices.Requests[0].AdminAccess, false) { + tCtx.Fatal("should store AdminAccess in ResourceClaim") + } + } else { + if claim.Spec.Devices.Requests[0].AdminAccess != nil { + tCtx.Fatal("should drop AdminAccess in ResourceClaim") + } + } +} diff --git a/test/integration/dra/main_test.go b/test/integration/dra/main_test.go new file mode 100644 index 00000000000..8be01afe495 --- /dev/null +++ b/test/integration/dra/main_test.go @@ -0,0 +1,27 @@ +/* +Copyright 2025 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package dra + +import ( + "testing" + + "k8s.io/kubernetes/test/integration/framework" +) + +func TestMain(m *testing.M) { + framework.EtcdMain(m.Run) +} diff --git a/test/utils/ktesting/clientcontext.go b/test/utils/ktesting/clientcontext.go index d07cbccf14a..36da9793bdd 100644 --- a/test/utils/ktesting/clientcontext.go +++ b/test/utils/ktesting/clientcontext.go @@ -86,6 +86,10 @@ func (cCtx clientContext) ExpectNoError(err error, explain ...interface{}) { expectNoError(cCtx, err, explain...) } +func (cCtx clientContext) Run(name string, cb func(tCtx TContext)) bool { + return run(cCtx, name, cb) +} + func (cCtx clientContext) Logger() klog.Logger { return klog.FromContext(cCtx) } diff --git a/test/utils/ktesting/errorcontext.go b/test/utils/ktesting/errorcontext.go index 318e8070a13..bb1214e4329 100644 --- a/test/utils/ktesting/errorcontext.go +++ b/test/utils/ktesting/errorcontext.go @@ -149,6 +149,10 @@ func (eCtx *errorContext) ExpectNoError(err error, explain ...interface{}) { expectNoError(eCtx, err, explain...) } +func (cCtx *errorContext) Run(name string, cb func(tCtx TContext)) bool { + return run(cCtx, name, cb) +} + func (eCtx *errorContext) Logger() klog.Logger { return klog.FromContext(eCtx) } diff --git a/test/utils/ktesting/tcontext.go b/test/utils/ktesting/tcontext.go index 18bc387e057..67b94500a82 100644 --- a/test/utils/ktesting/tcontext.go +++ b/test/utils/ktesting/tcontext.go @@ -21,6 +21,7 @@ import ( "flag" "fmt" "strings" + "testing" "time" "github.com/onsi/gomega" @@ -75,6 +76,22 @@ type TContext interface { context.Context TB + // Parallel signals that this test is to be run in parallel with (and + // only with) other parallel tests. In other words, it needs to be + // called in each test which is meant to run in parallel. + // + // Only supported in Go unit tests. When such a test is run multiple + // times due to use of -test.count or -test.cpu, multiple instances of + // a single test never run in parallel with each other. + Parallel() + + // Run runs f as a subtest of t called name. It blocks until f returns or + // calls t.Parallel to become a parallel test. + // + // Only supported in Go unit tests or benchmarks. It fails the current + // test when called elsewhere. + Run(name string, f func(tCtx TContext)) bool + // Cancel can be invoked to cancel the context before the test is completed. // Tests which use the context to control goroutines and then wait for // termination of those goroutines must call Cancel to avoid a deadlock. @@ -165,6 +182,7 @@ type TContext interface { // - CleanupCtx // - Expect // - ExpectNoError + // - Run // - Logger // // Usually these methods would be stand-alone functions with a TContext @@ -328,6 +346,9 @@ func InitCtx(ctx context.Context, tb TB, _ ...InitOption) TContext { // }) // // WithTB sets up cancellation for the sub-test. +// +// A simpler API is to use TContext.Run as replacement +// for [testing.T.Run]. func WithTB(parentCtx TContext, tb TB) TContext { tCtx := InitCtx(parentCtx, tb) tCtx = WithCancel(tCtx) @@ -341,6 +362,27 @@ func WithTB(parentCtx TContext, tb TB) TContext { return tCtx } +// run implements the different Run methods. It's not an exported +// method because tCtx.Run is more discoverable (same usage as +// with normal Go). +func run(tCtx TContext, name string, cb func(tCtx TContext)) bool { + tCtx.Helper() + switch tb := tCtx.TB().(type) { + case interface { + Run(string, func(t *testing.T)) bool + }: + return tb.Run(name, func(t *testing.T) { cb(WithTB(tCtx, t)) }) + case interface { + Run(string, func(t *testing.B)) bool + }: + return tb.Run(name, func(b *testing.B) { cb(WithTB(tCtx, b)) }) + default: + tCtx.Fatalf("Run not implemented, underlying %T does not support it", tCtx.TB()) + } + + return false +} + // WithContext constructs a new TContext with a different Context instance. // This can be used in callbacks which receive a Context, for example // from Gomega: @@ -381,6 +423,12 @@ type testingTB struct { TB } +func (tCtx tContext) Parallel() { + if tb, ok := tCtx.TB().(interface{ Parallel() }); ok { + tb.Parallel() + } +} + func (tCtx tContext) Cancel(cause string) { if tCtx.cancel != nil { tCtx.cancel(cause) @@ -424,6 +472,10 @@ func cleanupCtx(tCtx TContext, cb func(TContext)) { }) } +func (cCtx tContext) Run(name string, cb func(tCtx TContext)) bool { + return run(cCtx, name, cb) +} + func (tCtx tContext) Logger() klog.Logger { return klog.FromContext(tCtx) } diff --git a/test/utils/ktesting/tcontext_test.go b/test/utils/ktesting/tcontext_test.go index 116184d4e70..d158b9f5336 100644 --- a/test/utils/ktesting/tcontext_test.go +++ b/test/utils/ktesting/tcontext_test.go @@ -86,6 +86,23 @@ func TestCancelCtx(t *testing.T) { tCtx.Cancel("test is complete") } +func TestParallel(t *testing.T) { + var wg sync.WaitGroup + wg.Add(3) + + tCtx := ktesting.Init(t) + + // Each sub-test runs in parallel to the others and waits for the other two. + test := func(tCtx ktesting.TContext) { + tCtx.Parallel() + wg.Done() + wg.Wait() + } + tCtx.Run("one", test) + tCtx.Run("two", test) + tCtx.Run("three", test) +} + func TestWithTB(t *testing.T) { tCtx := ktesting.Init(t) @@ -106,6 +123,33 @@ func TestWithTB(t *testing.T) { assert.Equal(t, apiextensions, tCtx.APIExtensions(), "APIExtensions") tCtx.Cancel("test is complete") + <-tCtx.Done() + }) + + if err := tCtx.Err(); err != nil { + t.Errorf("parent TContext should not have been cancelled: %v", err) + } +} + +func TestRun(t *testing.T) { + tCtx := ktesting.Init(t) + + cfg := new(rest.Config) + mapper := new(restmapper.DeferredDiscoveryRESTMapper) + client := clientset.New(nil) + dynamic := dynamic.New(nil) + apiextensions := apiextensions.New(nil) + tCtx = ktesting.WithClients(tCtx, cfg, mapper, client, dynamic, apiextensions) + + tCtx.Run("sub", func(tCtx ktesting.TContext) { + assert.Equal(t, cfg, tCtx.RESTConfig(), "RESTConfig") + assert.Equal(t, mapper, tCtx.RESTMapper(), "RESTMapper") + assert.Equal(t, client, tCtx.Client(), "Client") + assert.Equal(t, dynamic, tCtx.Dynamic(), "Dynamic") + assert.Equal(t, apiextensions, tCtx.APIExtensions(), "APIExtensions") + + tCtx.Cancel("test is complete") + <-tCtx.Done() }) if err := tCtx.Err(); err != nil { diff --git a/test/utils/ktesting/withcontext.go b/test/utils/ktesting/withcontext.go index db4bddb160b..6aa9b2d456b 100644 --- a/test/utils/ktesting/withcontext.go +++ b/test/utils/ktesting/withcontext.go @@ -102,6 +102,10 @@ func (wCtx withContext) ExpectNoError(err error, explain ...interface{}) { expectNoError(wCtx, err, explain...) } +func (cCtx withContext) Run(name string, cb func(tCtx TContext)) bool { + return run(cCtx, name, cb) +} + func (wCtx withContext) Logger() klog.Logger { return klog.FromContext(wCtx) }