mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-31 07:20:13 +00:00
Merge pull request #124889 from soltysh/fix_podsecurity
Update PodSecurityLevel used during tests
This commit is contained in:
commit
2592caa9a7
@ -102,7 +102,7 @@ func observerUpdate(w watch.Interface, expectedUpdate func(runtime.Object) bool)
|
||||
|
||||
var _ = SIGDescribe("Generated clientset", func() {
|
||||
f := framework.NewDefaultFramework("clientset")
|
||||
f.NamespacePodSecurityEnforceLevel = admissionapi.LevelBaseline
|
||||
f.NamespacePodSecurityLevel = admissionapi.LevelBaseline
|
||||
ginkgo.It("should create pods, set the deletionTimestamp and deletionGracePeriodSeconds of the pod", func(ctx context.Context) {
|
||||
podClient := f.ClientSet.CoreV1().Pods(f.Namespace.Name)
|
||||
ginkgo.By("constructing the pod")
|
||||
@ -215,7 +215,7 @@ func newTestingCronJob(name string, value string) *batchv1.CronJob {
|
||||
|
||||
var _ = SIGDescribe("Generated clientset", func() {
|
||||
f := framework.NewDefaultFramework("clientset")
|
||||
f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged
|
||||
f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged
|
||||
|
||||
ginkgo.It("should create v1 cronJobs, delete cronJobs, watch cronJobs", func(ctx context.Context) {
|
||||
cronJobClient := f.ClientSet.BatchV1().CronJobs(f.Namespace.Name)
|
||||
|
@ -549,7 +549,7 @@ func getSidecarPodWithHook(name string, image string, lifecycle *v1.Lifecycle) *
|
||||
|
||||
var _ = SIGDescribe(feature.PodLifecycleSleepAction, func() {
|
||||
f := framework.NewDefaultFramework("pod-lifecycle-sleep-action")
|
||||
f.NamespacePodSecurityEnforceLevel = admissionapi.LevelBaseline
|
||||
f.NamespacePodSecurityLevel = admissionapi.LevelBaseline
|
||||
var podClient *e2epod.PodClient
|
||||
|
||||
validDuration := func(duration time.Duration, low, high int64) bool {
|
||||
|
@ -638,7 +638,7 @@ var _ = SIGDescribe("Security Context", func() {
|
||||
|
||||
var _ = SIGDescribe("User Namespaces for Pod Security Standards [LinuxOnly]", func() {
|
||||
f := framework.NewDefaultFramework("user-namespaces-pss-test")
|
||||
f.NamespacePodSecurityEnforceLevel = admissionapi.LevelRestricted
|
||||
f.NamespacePodSecurityLevel = admissionapi.LevelRestricted
|
||||
|
||||
ginkgo.Context("with UserNamespacesSupport and UserNamespacesPodSecurityStandards enabled", func() {
|
||||
f.It("should allow pod", feature.UserNamespacesPodSecurityStandards, func(ctx context.Context) {
|
||||
|
@ -35,7 +35,7 @@ import (
|
||||
|
||||
var _ = common.SIGDescribe("Metrics", func() {
|
||||
f := framework.NewDefaultFramework("metrics")
|
||||
f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged
|
||||
f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged
|
||||
var c, ec clientset.Interface
|
||||
var grabber *e2emetrics.Grabber
|
||||
ginkgo.BeforeEach(func(ctx context.Context) {
|
||||
|
@ -909,7 +909,7 @@ var _ = SIGDescribe(framework.WithNodeConformance(), "Containers Lifecycle", fun
|
||||
|
||||
var _ = SIGDescribe(framework.WithSerial(), "Containers Lifecycle", func() {
|
||||
f := framework.NewDefaultFramework("containers-lifecycle-test-serial")
|
||||
f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged
|
||||
f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged
|
||||
|
||||
ginkgo.It("should restart the containers in right order after the node reboot", func(ctx context.Context) {
|
||||
init1 := "init-1"
|
||||
@ -1049,7 +1049,7 @@ var _ = SIGDescribe(framework.WithSerial(), "Containers Lifecycle", func() {
|
||||
|
||||
var _ = SIGDescribe(nodefeature.SidecarContainers, "Containers Lifecycle", func() {
|
||||
f := framework.NewDefaultFramework("containers-lifecycle-test")
|
||||
f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged
|
||||
f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged
|
||||
|
||||
ginkgo.When("using a Pod with restartPolicy=Never, three init container and two restartable init containers", ginkgo.Ordered, func() {
|
||||
|
||||
@ -3123,7 +3123,7 @@ var _ = SIGDescribe(nodefeature.SidecarContainers, "Containers Lifecycle", func(
|
||||
|
||||
var _ = SIGDescribe(nodefeature.SidecarContainers, framework.WithSerial(), "Containers Lifecycle", func() {
|
||||
f := framework.NewDefaultFramework("containers-lifecycle-test-serial")
|
||||
f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged
|
||||
f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged
|
||||
|
||||
ginkgo.It("should restart the containers in right order after the node reboot", func(ctx context.Context) {
|
||||
init1 := "init-1"
|
||||
|
@ -36,7 +36,7 @@ import (
|
||||
|
||||
var _ = SIGDescribe("Memory Manager Metrics", framework.WithSerial(), feature.MemoryManager, func() {
|
||||
f := framework.NewDefaultFramework("memorymanager-metrics")
|
||||
f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged
|
||||
f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged
|
||||
|
||||
ginkgo.Context("when querying /metrics", func() {
|
||||
var testPod *v1.Pod
|
||||
|
@ -46,7 +46,7 @@ const KubeReservedMemory = 0.35
|
||||
|
||||
var _ = SIGDescribe("OOMKiller for pod using more memory than node allocatable [LinuxOnly]", framework.WithSerial(), func() {
|
||||
f := framework.NewDefaultFramework("nodeallocatable-oomkiller-test")
|
||||
f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged
|
||||
f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged
|
||||
|
||||
testCases := []testCase{
|
||||
{
|
||||
|
@ -46,7 +46,7 @@ const (
|
||||
|
||||
var _ = SIGDescribe("Swap", framework.WithNodeConformance(), "[LinuxOnly]", func() {
|
||||
f := framework.NewDefaultFramework("swap-test")
|
||||
f.NamespacePodSecurityEnforceLevel = admissionapi.LevelBaseline
|
||||
f.NamespacePodSecurityLevel = admissionapi.LevelBaseline
|
||||
|
||||
ginkgo.DescribeTable("with configuration", func(qosClass v1.PodQOSClass, memoryRequestEqualLimit bool) {
|
||||
ginkgo.By(fmt.Sprintf("Creating a pod of QOS class %s. memoryRequestEqualLimit: %t", qosClass, memoryRequestEqualLimit))
|
||||
|
Loading…
Reference in New Issue
Block a user