diff --git a/test/e2e/dra/dra.go b/test/e2e/dra/dra.go index b1367796897..68b11e1de16 100644 --- a/test/e2e/dra/dra.go +++ b/test/e2e/dra/dra.go @@ -32,6 +32,7 @@ import ( "github.com/onsi/gomega/gstruct" "github.com/onsi/gomega/types" + admissionregistrationv1 "k8s.io/api/admissionregistration/v1" v1 "k8s.io/api/core/v1" resourceapi "k8s.io/api/resource/v1alpha3" apierrors "k8s.io/apimachinery/pkg/api/errors" @@ -39,6 +40,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" + applyv1 "k8s.io/client-go/applyconfigurations/core/v1" "k8s.io/client-go/kubernetes" "k8s.io/dynamic-resource-allocation/controller" "k8s.io/klog/v2" @@ -56,6 +58,9 @@ const ( podStartTimeout = 5 * time.Minute ) +//go:embed test-driver/deploy/example/admin-access-policy.yaml +var adminAccessPolicyYAML string + // networkResources can be passed to NewDriver directly. func networkResources() app.Resources { return app.Resources{} @@ -966,6 +971,63 @@ var _ = framework.SIGDescribe("node")("DRA", feature.DynamicResourceAllocation, driver := NewDriver(f, nodes, networkResources) b := newBuilder(f, driver) + ginkgo.It("support validating admission policy for admin access", func(ctx context.Context) { + // Create VAP, after making it unique to the current test. + adminAccessPolicyYAML := strings.ReplaceAll(adminAccessPolicyYAML, "dra.example.com", b.f.UniqueName) + driver.createFromYAML(ctx, []byte(adminAccessPolicyYAML), "") + + // Wait for both VAPs to be processed. This ensures that there are no check errors in the status. + matchStatus := gomega.Equal(admissionregistrationv1.ValidatingAdmissionPolicyStatus{ObservedGeneration: 1, TypeChecking: &admissionregistrationv1.TypeChecking{}}) + gomega.Eventually(ctx, framework.ListObjects(b.f.ClientSet.AdmissionregistrationV1().ValidatingAdmissionPolicies().List, metav1.ListOptions{})).Should(gomega.HaveField("Items", gomega.ContainElements( + gstruct.MatchFields(gstruct.IgnoreExtras, gstruct.Fields{ + "ObjectMeta": gomega.HaveField("Name", "resourceclaim-policy."+b.f.UniqueName), + "Status": matchStatus, + }), + gstruct.MatchFields(gstruct.IgnoreExtras, gstruct.Fields{ + "ObjectMeta": gomega.HaveField("Name", "resourceclaimtemplate-policy."+b.f.UniqueName), + "Status": matchStatus, + }), + ))) + + // Attempt to create claim and claim template with admin access. Must fail eventually. + claim := b.externalClaim() + claim.Spec.Devices.Requests[0].AdminAccess = true + _, claimTemplate := b.podInline() + claimTemplate.Spec.Spec.Devices.Requests[0].AdminAccess = true + matchVAPError := gomega.MatchError(gomega.ContainSubstring("admin access to devices not enabled" /* in namespace " + b.f.Namespace.Name */)) + gomega.Eventually(ctx, func(ctx context.Context) error { + // First delete, in case that it succeeded earlier. + if err := b.f.ClientSet.ResourceV1alpha3().ResourceClaims(b.f.Namespace.Name).Delete(ctx, claim.Name, metav1.DeleteOptions{}); err != nil && !apierrors.IsNotFound(err) { + return err + } + _, err := b.f.ClientSet.ResourceV1alpha3().ResourceClaims(b.f.Namespace.Name).Create(ctx, claim, metav1.CreateOptions{}) + return err + }).Should(matchVAPError) + + gomega.Eventually(ctx, func(ctx context.Context) error { + // First delete, in case that it succeeded earlier. + if err := b.f.ClientSet.ResourceV1alpha3().ResourceClaimTemplates(b.f.Namespace.Name).Delete(ctx, claimTemplate.Name, metav1.DeleteOptions{}); err != nil && !apierrors.IsNotFound(err) { + return err + } + _, err := b.f.ClientSet.ResourceV1alpha3().ResourceClaimTemplates(b.f.Namespace.Name).Create(ctx, claimTemplate, metav1.CreateOptions{}) + return err + }).Should(matchVAPError) + + // After labeling the namespace, creation must (eventually...) succeed. + _, err := b.f.ClientSet.CoreV1().Namespaces().Apply(ctx, + applyv1.Namespace(b.f.Namespace.Name).WithLabels(map[string]string{"admin-access." + b.f.UniqueName: "on"}), + metav1.ApplyOptions{FieldManager: b.f.UniqueName}) + framework.ExpectNoError(err) + gomega.Eventually(ctx, func(ctx context.Context) error { + _, err := b.f.ClientSet.ResourceV1alpha3().ResourceClaims(b.f.Namespace.Name).Create(ctx, claim, metav1.CreateOptions{}) + return err + }).Should(gomega.Succeed()) + gomega.Eventually(ctx, func(ctx context.Context) error { + _, err := b.f.ClientSet.ResourceV1alpha3().ResourceClaimTemplates(b.f.Namespace.Name).Create(ctx, claimTemplate, metav1.CreateOptions{}) + return err + }).Should(gomega.Succeed()) + }) + ginkgo.It("truncates the name of a generated resource claim", func(ctx context.Context) { pod, template := b.podInline() pod.Name = strings.Repeat("p", 63) @@ -1126,19 +1188,19 @@ var _ = framework.SIGDescribe("node")("DRA", feature.DynamicResourceAllocation, mustCreate := func(clientSet kubernetes.Interface, clientName string, slice *resourceapi.ResourceSlice) *resourceapi.ResourceSlice { ginkgo.GinkgoHelper() slice, err := clientSet.ResourceV1alpha3().ResourceSlices().Create(ctx, slice, metav1.CreateOptions{}) - gomega.Expect(err).ToNot(gomega.HaveOccurred(), fmt.Sprintf("CREATE: %s + %s", clientName, slice.Name)) + framework.ExpectNoError(err, fmt.Sprintf("CREATE: %s + %s", clientName, slice.Name)) return slice } mustUpdate := func(clientSet kubernetes.Interface, clientName string, slice *resourceapi.ResourceSlice) *resourceapi.ResourceSlice { ginkgo.GinkgoHelper() slice, err := clientSet.ResourceV1alpha3().ResourceSlices().Update(ctx, slice, metav1.UpdateOptions{}) - gomega.Expect(err).ToNot(gomega.HaveOccurred(), fmt.Sprintf("UPDATE: %s + %s", clientName, slice.Name)) + framework.ExpectNoError(err, fmt.Sprintf("UPDATE: %s + %s", clientName, slice.Name)) return slice } mustDelete := func(clientSet kubernetes.Interface, clientName string, slice *resourceapi.ResourceSlice) { ginkgo.GinkgoHelper() err := clientSet.ResourceV1alpha3().ResourceSlices().Delete(ctx, slice.Name, metav1.DeleteOptions{}) - gomega.Expect(err).ToNot(gomega.HaveOccurred(), fmt.Sprintf("DELETE: %s + %s", clientName, slice.Name)) + framework.ExpectNoError(err, fmt.Sprintf("DELETE: %s + %s", clientName, slice.Name)) } mustCreateAndDelete := func(clientSet kubernetes.Interface, clientName string, slice *resourceapi.ResourceSlice) { ginkgo.GinkgoHelper() diff --git a/test/e2e/dra/test-driver/deploy/example/admin-access-policy.yaml b/test/e2e/dra/test-driver/deploy/example/admin-access-policy.yaml new file mode 100644 index 00000000000..133dd2caf78 --- /dev/null +++ b/test/e2e/dra/test-driver/deploy/example/admin-access-policy.yaml @@ -0,0 +1,71 @@ +# This example shows how to use a validating admission policy (VAP) +# to control who may use "admin access", a privileged mode which +# grants access to devices which are currently in use, potentially +# by some other user. +# +# The policy applies in any namespace which does not have the +# "admin-access.dra.example.com" label. Other ways of making that decision are +# also possible. +# +# Cluster administrators need to adapt at least the names and replace +# "dra.example.com". + +apiVersion: admissionregistration.k8s.io/v1 +kind: ValidatingAdmissionPolicy +metadata: + name: resourceclaim-policy.dra.example.com +spec: + failurePolicy: Fail + matchConstraints: + resourceRules: + - apiGroups: ["resource.k8s.io"] + apiVersions: ["v1alpha3"] + operations: ["CREATE", "UPDATE"] + resources: ["resourceclaims"] + validations: + - expression: '! object.spec.devices.requests.exists(e, has(e.adminAccess) && e.adminAccess)' + reason: Forbidden + messageExpression: '"admin access to devices not enabled"' # in namespace " + object.metadata.namespace' - need to use __namespace__, but somehow that also doesn't work. +--- +apiVersion: admissionregistration.k8s.io/v1 +kind: ValidatingAdmissionPolicyBinding +metadata: + name: resourceclaim-binding.dra.example.com +spec: + policyName: resourceclaim-policy.dra.example.com + validationActions: [Deny] + matchResources: + namespaceSelector: + matchExpressions: + - key: admin-access.dra.example.com + operator: DoesNotExist +--- +apiVersion: admissionregistration.k8s.io/v1 +kind: ValidatingAdmissionPolicy +metadata: + name: resourceclaimtemplate-policy.dra.example.com +spec: + failurePolicy: Fail + matchConstraints: + resourceRules: + - apiGroups: ["resource.k8s.io"] + apiVersions: ["v1alpha3"] + operations: ["CREATE", "UPDATE"] + resources: ["resourceclaimtemplates"] + validations: + - expression: '! object.spec.spec.devices.requests.exists(e, has(e.adminAccess) && e.adminAccess)' + reason: Forbidden + messageExpression: '"admin access to devices not enabled"' # in namespace " + object.metadata.namespace' - need to use __namespace__, but somehow that also doesn't work. +--- +apiVersion: admissionregistration.k8s.io/v1 +kind: ValidatingAdmissionPolicyBinding +metadata: + name: resourceclaimtemplate-binding.dra.example.com +spec: + policyName: resourceclaimtemplate-policy.dra.example.com + validationActions: [Deny] + matchResources: + namespaceSelector: + matchExpressions: + - key: admin-access.dra.example.com + operator: DoesNotExist