mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-08-01 07:47:56 +00:00
Merge pull request #109946 from tallclair/ps-e2e
Restricted Pod E2E tests
This commit is contained in:
commit
60481c944e
@ -184,7 +184,7 @@ func expectNoErrorWithRetries(fn func() error, maxRetries int, explain ...interf
|
||||
|
||||
var _ = SIGDescribe("Pods", func() {
|
||||
f := framework.NewDefaultFramework("pods")
|
||||
f.NamespacePodSecurityEnforceLevel = admissionapi.LevelBaseline
|
||||
f.NamespacePodSecurityEnforceLevel = admissionapi.LevelRestricted
|
||||
var podClient *framework.PodClient
|
||||
var dc dynamic.Interface
|
||||
|
||||
@ -200,7 +200,7 @@ var _ = SIGDescribe("Pods", func() {
|
||||
*/
|
||||
framework.ConformanceIt("should get a host IP [NodeConformance]", func() {
|
||||
name := "pod-hostip-" + string(uuid.NewUUID())
|
||||
testHostIP(podClient, &v1.Pod{
|
||||
testHostIP(podClient, e2epod.MustMixinRestrictedPodSecurity(&v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
},
|
||||
@ -212,7 +212,7 @@ var _ = SIGDescribe("Pods", func() {
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
}))
|
||||
})
|
||||
|
||||
/*
|
||||
@ -224,7 +224,7 @@ var _ = SIGDescribe("Pods", func() {
|
||||
ginkgo.By("creating the pod")
|
||||
name := "pod-submit-remove-" + string(uuid.NewUUID())
|
||||
value := strconv.Itoa(time.Now().Nanosecond())
|
||||
pod := &v1.Pod{
|
||||
pod := e2epod.MustMixinRestrictedPodSecurity(&v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Labels: map[string]string{
|
||||
@ -235,12 +235,12 @@ var _ = SIGDescribe("Pods", func() {
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "nginx",
|
||||
Image: imageutils.GetE2EImage(imageutils.Nginx),
|
||||
Name: "pause",
|
||||
Image: imageutils.GetPauseImageName(),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
})
|
||||
|
||||
ginkgo.By("setting up watch")
|
||||
selector := labels.SelectorFromSet(labels.Set(map[string]string{"time": value}))
|
||||
@ -342,7 +342,7 @@ var _ = SIGDescribe("Pods", func() {
|
||||
ginkgo.By("creating the pod")
|
||||
name := "pod-update-" + string(uuid.NewUUID())
|
||||
value := strconv.Itoa(time.Now().Nanosecond())
|
||||
pod := &v1.Pod{
|
||||
pod := e2epod.MustMixinRestrictedPodSecurity(&v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Labels: map[string]string{
|
||||
@ -353,12 +353,12 @@ var _ = SIGDescribe("Pods", func() {
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "nginx",
|
||||
Image: imageutils.GetE2EImage(imageutils.Nginx),
|
||||
Name: "pause",
|
||||
Image: imageutils.GetPauseImageName(),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
})
|
||||
|
||||
ginkgo.By("submitting the pod to kubernetes")
|
||||
pod = podClient.CreateSync(pod)
|
||||
@ -396,7 +396,7 @@ var _ = SIGDescribe("Pods", func() {
|
||||
ginkgo.By("creating the pod")
|
||||
name := "pod-update-activedeadlineseconds-" + string(uuid.NewUUID())
|
||||
value := strconv.Itoa(time.Now().Nanosecond())
|
||||
pod := &v1.Pod{
|
||||
pod := e2epod.MustMixinRestrictedPodSecurity(&v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Labels: map[string]string{
|
||||
@ -407,18 +407,18 @@ var _ = SIGDescribe("Pods", func() {
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "nginx",
|
||||
Image: imageutils.GetE2EImage(imageutils.Nginx),
|
||||
Name: "pause",
|
||||
Image: imageutils.GetPauseImageName(),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
})
|
||||
|
||||
ginkgo.By("submitting the pod to kubernetes")
|
||||
podClient.CreateSync(pod)
|
||||
|
||||
ginkgo.By("verifying the pod is in kubernetes")
|
||||
selector := labels.SelectorFromSet(labels.Set(map[string]string{"time": value}))
|
||||
selector := labels.SelectorFromSet(labels.Set{"time": value})
|
||||
options := metav1.ListOptions{LabelSelector: selector.String()}
|
||||
pods, err := podClient.List(context.TODO(), options)
|
||||
framework.ExpectNoError(err, "failed to query for pods")
|
||||
@ -442,7 +442,7 @@ var _ = SIGDescribe("Pods", func() {
|
||||
// Make a pod that will be a service.
|
||||
// This pod serves its hostname via HTTP.
|
||||
serverName := "server-envvars-" + string(uuid.NewUUID())
|
||||
serverPod := &v1.Pod{
|
||||
serverPod := e2epod.MustMixinRestrictedPodSecurity(&v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: serverName,
|
||||
Labels: map[string]string{"name": serverName},
|
||||
@ -456,7 +456,7 @@ var _ = SIGDescribe("Pods", func() {
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
})
|
||||
podClient.CreateSync(serverPod)
|
||||
|
||||
// This service exposes port 8080 of the test pod as a service on port 8765
|
||||
@ -490,7 +490,7 @@ var _ = SIGDescribe("Pods", func() {
|
||||
// Make a client pod that verifies that it has the service environment variables.
|
||||
podName := "client-envvars-" + string(uuid.NewUUID())
|
||||
const containerName = "env3cont"
|
||||
pod := &v1.Pod{
|
||||
pod := e2epod.MustMixinRestrictedPodSecurity(&v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: podName,
|
||||
Labels: map[string]string{"name": podName},
|
||||
@ -505,7 +505,7 @@ var _ = SIGDescribe("Pods", func() {
|
||||
},
|
||||
RestartPolicy: v1.RestartPolicyNever,
|
||||
},
|
||||
}
|
||||
})
|
||||
|
||||
// It's possible for the Pod to be created before the Kubelet is updated with the new
|
||||
// service. In that case, we just retry.
|
||||
@ -536,7 +536,7 @@ var _ = SIGDescribe("Pods", func() {
|
||||
|
||||
ginkgo.By("creating the pod")
|
||||
name := "pod-exec-websocket-" + string(uuid.NewUUID())
|
||||
pod := &v1.Pod{
|
||||
pod := e2epod.MustMixinRestrictedPodSecurity(&v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
},
|
||||
@ -549,7 +549,7 @@ var _ = SIGDescribe("Pods", func() {
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
})
|
||||
|
||||
ginkgo.By("submitting the pod to kubernetes")
|
||||
pod = podClient.CreateSync(pod)
|
||||
@ -618,7 +618,7 @@ var _ = SIGDescribe("Pods", func() {
|
||||
|
||||
ginkgo.By("creating the pod")
|
||||
name := "pod-logs-websocket-" + string(uuid.NewUUID())
|
||||
pod := &v1.Pod{
|
||||
pod := e2epod.MustMixinRestrictedPodSecurity(&v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
},
|
||||
@ -631,7 +631,7 @@ var _ = SIGDescribe("Pods", func() {
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
})
|
||||
|
||||
ginkgo.By("submitting the pod to kubernetes")
|
||||
podClient.CreateSync(pod)
|
||||
@ -673,7 +673,7 @@ var _ = SIGDescribe("Pods", func() {
|
||||
ginkgo.It("should have their auto-restart back-off timer reset on image update [Slow][NodeConformance]", func() {
|
||||
podName := "pod-back-off-image"
|
||||
containerName := "back-off"
|
||||
pod := &v1.Pod{
|
||||
pod := e2epod.MustMixinRestrictedPodSecurity(&v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: podName,
|
||||
Labels: map[string]string{"test": "back-off-image"},
|
||||
@ -687,7 +687,7 @@ var _ = SIGDescribe("Pods", func() {
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
})
|
||||
|
||||
delay1, delay2 := startPodAndGetBackOffs(podClient, pod, buildBackOffDuration)
|
||||
|
||||
@ -714,7 +714,7 @@ var _ = SIGDescribe("Pods", func() {
|
||||
ginkgo.It("should cap back-off at MaxContainerBackOff [Slow][NodeConformance]", func() {
|
||||
podName := "back-off-cap"
|
||||
containerName := "back-off-cap"
|
||||
pod := &v1.Pod{
|
||||
pod := e2epod.MustMixinRestrictedPodSecurity(&v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: podName,
|
||||
Labels: map[string]string{"test": "liveness"},
|
||||
@ -728,7 +728,7 @@ var _ = SIGDescribe("Pods", func() {
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
})
|
||||
|
||||
podClient.CreateSync(pod)
|
||||
time.Sleep(2 * kubelet.MaxContainerBackOff) // it takes slightly more than 2*x to get to a back-off of x
|
||||
@ -770,7 +770,7 @@ var _ = SIGDescribe("Pods", func() {
|
||||
readinessGate1 := "k8s.io/test-condition1"
|
||||
readinessGate2 := "k8s.io/test-condition2"
|
||||
patchStatusFmt := `{"status":{"conditions":[{"type":%q, "status":%q}]}}`
|
||||
pod := &v1.Pod{
|
||||
pod := e2epod.MustMixinRestrictedPodSecurity(&v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: podName,
|
||||
Labels: map[string]string{"test": "pod-readiness-gate"},
|
||||
@ -788,7 +788,7 @@ var _ = SIGDescribe("Pods", func() {
|
||||
{ConditionType: v1.PodConditionType(readinessGate2)},
|
||||
},
|
||||
},
|
||||
}
|
||||
})
|
||||
|
||||
validatePodReadiness := func(expectReady bool) {
|
||||
err := wait.Poll(time.Second, time.Minute, func() (bool, error) {
|
||||
@ -847,20 +847,22 @@ var _ = SIGDescribe("Pods", func() {
|
||||
ginkgo.By("Create set of pods")
|
||||
// create a set of pods in test namespace
|
||||
for _, podTestName := range podTestNames {
|
||||
_, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: podTestName,
|
||||
Labels: map[string]string{
|
||||
"type": "Testing"},
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
TerminationGracePeriodSeconds: &one,
|
||||
Containers: []v1.Container{{
|
||||
Image: imageutils.GetE2EImage(imageutils.Agnhost),
|
||||
Name: "token-test",
|
||||
}},
|
||||
RestartPolicy: v1.RestartPolicyNever,
|
||||
}}, metav1.CreateOptions{})
|
||||
_, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(),
|
||||
e2epod.MustMixinRestrictedPodSecurity(&v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: podTestName,
|
||||
Labels: map[string]string{
|
||||
"type": "Testing",
|
||||
},
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
TerminationGracePeriodSeconds: &one,
|
||||
Containers: []v1.Container{{
|
||||
Image: imageutils.GetE2EImage(imageutils.Agnhost),
|
||||
Name: "token-test",
|
||||
}},
|
||||
RestartPolicy: v1.RestartPolicyNever,
|
||||
}}), metav1.CreateOptions{})
|
||||
framework.ExpectNoError(err, "failed to create pod")
|
||||
framework.Logf("created %v", podTestName)
|
||||
}
|
||||
@ -907,7 +909,7 @@ var _ = SIGDescribe("Pods", func() {
|
||||
podsList, err := f.ClientSet.CoreV1().Pods("").List(context.TODO(), metav1.ListOptions{LabelSelector: testPodLabelsFlat})
|
||||
framework.ExpectNoError(err, "failed to list Pods")
|
||||
|
||||
testPod := v1.Pod{
|
||||
testPod := e2epod.MustMixinRestrictedPodSecurity(&v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: testPodName,
|
||||
Labels: testPodLabels,
|
||||
@ -921,9 +923,9 @@ var _ = SIGDescribe("Pods", func() {
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
})
|
||||
ginkgo.By("creating a Pod with a static label")
|
||||
_, err = f.ClientSet.CoreV1().Pods(testNamespaceName).Create(context.TODO(), &testPod, metav1.CreateOptions{})
|
||||
_, err = f.ClientSet.CoreV1().Pods(testNamespaceName).Create(context.TODO(), testPod, metav1.CreateOptions{})
|
||||
framework.ExpectNoError(err, "failed to create Pod %v in namespace %v", testPod.ObjectMeta.Name, testNamespaceName)
|
||||
|
||||
ginkgo.By("watching for Pod to be ready")
|
||||
|
@ -18,9 +18,14 @@ package pod
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
|
||||
"github.com/onsi/gomega"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
psaapi "k8s.io/pod-security-admission/api"
|
||||
psapolicy "k8s.io/pod-security-admission/policy"
|
||||
"k8s.io/utils/pointer"
|
||||
)
|
||||
|
||||
@ -115,10 +120,16 @@ func GetLinuxLabel() *v1.SELinuxOptions {
|
||||
Level: "s0:c0,c1"}
|
||||
}
|
||||
|
||||
// GetRestrictedPodSecurityContext returns a minimal restricted pod security context.
|
||||
// DefaultNonRootUser is the default user ID used for running restricted (non-root) containers.
|
||||
const DefaultNonRootUser = 1000
|
||||
|
||||
// GetRestrictedPodSecurityContext returns a restricted pod security context.
|
||||
// This includes setting RunAsUser for convenience, to pass the RunAsNonRoot check.
|
||||
// Tests that require a specific user ID should override this.
|
||||
func GetRestrictedPodSecurityContext() *v1.PodSecurityContext {
|
||||
return &v1.PodSecurityContext{
|
||||
RunAsNonRoot: pointer.BoolPtr(true),
|
||||
RunAsUser: pointer.Int64(DefaultNonRootUser),
|
||||
SeccompProfile: &v1.SeccompProfile{Type: v1.SeccompProfileTypeRuntimeDefault},
|
||||
}
|
||||
}
|
||||
@ -130,3 +141,69 @@ func GetRestrictedContainerSecurityContext() *v1.SecurityContext {
|
||||
Capabilities: &v1.Capabilities{Drop: []v1.Capability{"ALL"}},
|
||||
}
|
||||
}
|
||||
|
||||
var psaEvaluator, _ = psapolicy.NewEvaluator(psapolicy.DefaultChecks())
|
||||
|
||||
// MustMixinRestrictedPodSecurity makes the given pod compliant with the restricted pod security level.
|
||||
// If doing so would overwrite existing non-conformant configuration, a test failure is triggered.
|
||||
func MustMixinRestrictedPodSecurity(pod *v1.Pod) *v1.Pod {
|
||||
err := MixinRestrictedPodSecurity(pod)
|
||||
gomega.ExpectWithOffset(1, err).NotTo(gomega.HaveOccurred())
|
||||
return pod
|
||||
}
|
||||
|
||||
// MixinRestrictedPodSecurity makes the given pod compliant with the restricted pod security level.
|
||||
// If doing so would overwrite existing non-conformant configuration, an error is returned.
|
||||
// Note that this sets a default RunAsUser. See GetRestrictedPodSecurityContext.
|
||||
// TODO(#105919): Handle PodOS for windows pods.
|
||||
func MixinRestrictedPodSecurity(pod *v1.Pod) error {
|
||||
if pod.Spec.SecurityContext == nil {
|
||||
pod.Spec.SecurityContext = GetRestrictedPodSecurityContext()
|
||||
} else {
|
||||
if pod.Spec.SecurityContext.RunAsNonRoot == nil {
|
||||
pod.Spec.SecurityContext.RunAsNonRoot = pointer.BoolPtr(true)
|
||||
}
|
||||
if pod.Spec.SecurityContext.RunAsUser == nil {
|
||||
pod.Spec.SecurityContext.RunAsUser = pointer.Int64Ptr(DefaultNonRootUser)
|
||||
}
|
||||
if pod.Spec.SecurityContext.SeccompProfile == nil {
|
||||
pod.Spec.SecurityContext.SeccompProfile = &v1.SeccompProfile{Type: v1.SeccompProfileTypeRuntimeDefault}
|
||||
}
|
||||
}
|
||||
for i := range pod.Spec.Containers {
|
||||
mixinRestrictedContainerSecurityContext(&pod.Spec.Containers[i])
|
||||
}
|
||||
for i := range pod.Spec.InitContainers {
|
||||
mixinRestrictedContainerSecurityContext(&pod.Spec.InitContainers[i])
|
||||
}
|
||||
|
||||
// Validate the resulting pod against the restricted profile.
|
||||
restricted := psaapi.LevelVersion{
|
||||
Level: psaapi.LevelRestricted,
|
||||
Version: psaapi.LatestVersion(),
|
||||
}
|
||||
if agg := psapolicy.AggregateCheckResults(psaEvaluator.EvaluatePod(restricted, &pod.ObjectMeta, &pod.Spec)); !agg.Allowed {
|
||||
return fmt.Errorf("failed to make pod %s restricted: %s", pod.Name, agg.ForbiddenDetail())
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// mixinRestrictedContainerSecurityContext adds the required container security context options to
|
||||
// be compliant with the restricted pod security level. Non-conformance checking is handled by the
|
||||
// caller.
|
||||
func mixinRestrictedContainerSecurityContext(container *v1.Container) {
|
||||
if container.SecurityContext == nil {
|
||||
container.SecurityContext = GetRestrictedContainerSecurityContext()
|
||||
} else {
|
||||
if container.SecurityContext.AllowPrivilegeEscalation == nil {
|
||||
container.SecurityContext.AllowPrivilegeEscalation = pointer.Bool(false)
|
||||
}
|
||||
if container.SecurityContext.Capabilities == nil {
|
||||
container.SecurityContext.Capabilities = &v1.Capabilities{}
|
||||
}
|
||||
if len(container.SecurityContext.Capabilities.Drop) == 0 {
|
||||
container.SecurityContext.Capabilities.Drop = []v1.Capability{"ALL"}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
94
test/e2e/framework/pod/utils_test.go
Normal file
94
test/e2e/framework/pod/utils_test.go
Normal file
@ -0,0 +1,94 @@
|
||||
/*
|
||||
Copyright 2022 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package pod
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/utils/pointer"
|
||||
)
|
||||
|
||||
func TestMixinRestrictedPodSecurity(t *testing.T) {
|
||||
restrictablePods := []v1.Pod{{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "default",
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{{
|
||||
Name: "pause",
|
||||
Image: "pause",
|
||||
}},
|
||||
},
|
||||
}, {
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "already_restricted",
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
SecurityContext: GetRestrictedPodSecurityContext(),
|
||||
Containers: []v1.Container{{
|
||||
Name: "pause",
|
||||
Image: "pause",
|
||||
SecurityContext: GetRestrictedContainerSecurityContext(),
|
||||
}},
|
||||
},
|
||||
}, {
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "empty_securityContext",
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
SecurityContext: &v1.PodSecurityContext{},
|
||||
Containers: []v1.Container{{
|
||||
Name: "pause",
|
||||
Image: "pause",
|
||||
SecurityContext: &v1.SecurityContext{},
|
||||
}},
|
||||
},
|
||||
}}
|
||||
|
||||
for _, pod := range restrictablePods {
|
||||
t.Run(pod.Name, func(t *testing.T) {
|
||||
p := pod // closure
|
||||
assert.NoError(t, MixinRestrictedPodSecurity(&p))
|
||||
assert.Equal(t, GetRestrictedPodSecurityContext(), p.Spec.SecurityContext,
|
||||
"Mixed in PodSecurityContext should equal the from-scratch PodSecurityContext")
|
||||
assert.Equal(t, GetRestrictedContainerSecurityContext(), p.Spec.Containers[0].SecurityContext,
|
||||
"Mixed in SecurityContext should equal the from-scratch SecurityContext")
|
||||
})
|
||||
}
|
||||
|
||||
privilegedPod := v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "privileged",
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{{
|
||||
Name: "pause",
|
||||
Image: "pause",
|
||||
SecurityContext: &v1.SecurityContext{
|
||||
Privileged: pointer.Bool(true),
|
||||
},
|
||||
}},
|
||||
},
|
||||
}
|
||||
t.Run("privileged", func(t *testing.T) {
|
||||
assert.Error(t, MixinRestrictedPodSecurity(&privilegedPod))
|
||||
})
|
||||
|
||||
}
|
Loading…
Reference in New Issue
Block a user