diff --git a/test/e2e/auth/service_accounts.go b/test/e2e/auth/service_accounts.go index cdc32265d99..db39c114d07 100644 --- a/test/e2e/auth/service_accounts.go +++ b/test/e2e/auth/service_accounts.go @@ -38,6 +38,7 @@ import ( watch "k8s.io/apimachinery/pkg/watch" "k8s.io/client-go/util/retry" "k8s.io/kubernetes/plugin/pkg/admission/serviceaccount" + "k8s.io/kubernetes/test/e2e/feature" "k8s.io/kubernetes/test/e2e/framework" e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" @@ -364,7 +365,7 @@ var _ = SIGDescribe("ServiceAccounts", func() { Containers MUST verify that the projected service account token can be read and has correct file mode set including ownership and permission. */ - f.It("should set ownership and permission when RunAsUser or FsGroup is present [LinuxOnly]", nodefeature.FSGroup, func(ctx context.Context) { + f.It("should set ownership and permission when RunAsUser or FsGroup is present [LinuxOnly]", nodefeature.FSGroup, feature.FSGroup, func(ctx context.Context) { e2eskipper.SkipIfNodeOSDistroIs("windows") var ( diff --git a/test/e2e/common/node/downwardapi.go b/test/e2e/common/node/downwardapi.go index fc3ef6edf93..2c199e827b5 100644 --- a/test/e2e/common/node/downwardapi.go +++ b/test/e2e/common/node/downwardapi.go @@ -24,6 +24,7 @@ import ( "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/uuid" + "k8s.io/kubernetes/test/e2e/feature" "k8s.io/kubernetes/test/e2e/framework" e2enetwork "k8s.io/kubernetes/test/e2e/framework/network" e2epodoutput "k8s.io/kubernetes/test/e2e/framework/pod/output" @@ -314,7 +315,7 @@ var _ = SIGDescribe("Downward API", func() { }) }) -var _ = SIGDescribe("Downward API", framework.WithSerial(), framework.WithDisruptive(), nodefeature.DownwardAPIHugePages, func() { +var _ = SIGDescribe("Downward API", framework.WithSerial(), framework.WithDisruptive(), nodefeature.DownwardAPIHugePages, feature.DownwardAPIHugePages, func() { f := framework.NewDefaultFramework("downward-api") f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged diff --git a/test/e2e/common/node/runtimeclass.go b/test/e2e/common/node/runtimeclass.go index 79f0dd68582..0341017c7e5 100644 --- a/test/e2e/common/node/runtimeclass.go +++ b/test/e2e/common/node/runtimeclass.go @@ -33,6 +33,7 @@ import ( "k8s.io/apimachinery/pkg/watch" "k8s.io/kubernetes/pkg/kubelet/events" runtimeclasstest "k8s.io/kubernetes/pkg/kubelet/runtimeclass/testing" + "k8s.io/kubernetes/test/e2e/feature" "k8s.io/kubernetes/test/e2e/framework" e2eevents "k8s.io/kubernetes/test/e2e/framework/events" e2eruntimeclass "k8s.io/kubernetes/test/e2e/framework/node/runtimeclass" @@ -60,7 +61,7 @@ var _ = SIGDescribe("RuntimeClass", func() { }) // The test CANNOT be made a Conformance as it depends on a container runtime to have a specific handler not being installed. - f.It("should reject a Pod requesting a RuntimeClass with an unconfigured handler", nodefeature.RuntimeHandler, func(ctx context.Context) { + f.It("should reject a Pod requesting a RuntimeClass with an unconfigured handler", nodefeature.RuntimeHandler, feature.RuntimeHandler, func(ctx context.Context) { handler := f.Namespace.Name + "-handler" rcName := createRuntimeClass(ctx, f, "unconfigured-handler", handler, nil) ginkgo.DeferCleanup(deleteRuntimeClass, f, rcName) @@ -84,7 +85,7 @@ var _ = SIGDescribe("RuntimeClass", func() { // This test requires that the PreconfiguredRuntimeClassHandler has already been set up on nodes. // The test CANNOT be made a Conformance as it depends on a container runtime to have a specific handler installed and working. - f.It("should run a Pod requesting a RuntimeClass with a configured handler", nodefeature.RuntimeHandler, func(ctx context.Context) { + f.It("should run a Pod requesting a RuntimeClass with a configured handler", nodefeature.RuntimeHandler, feature.RuntimeHandler, func(ctx context.Context) { if err := e2eruntimeclass.NodeSupportsPreconfiguredRuntimeClassHandler(ctx, f); err != nil { e2eskipper.Skipf("Skipping test as node does not have E2E runtime class handler preconfigured in container runtime config: %v", err) } diff --git a/test/e2e/common/node/security_context.go b/test/e2e/common/node/security_context.go index 20663fc7cc4..9d0f021b808 100644 --- a/test/e2e/common/node/security_context.go +++ b/test/e2e/common/node/security_context.go @@ -541,7 +541,7 @@ var _ = SIGDescribe("Security Context", func() { } }) - f.It("should run the container as privileged when true [LinuxOnly]", nodefeature.HostAccess, func(ctx context.Context) { + f.It("should run the container as privileged when true [LinuxOnly]", nodefeature.HostAccess, feature.HostAccess, func(ctx context.Context) { podName := createAndWaitUserPod(ctx, true) logs, err := e2epod.GetPodLogs(ctx, f.ClientSet, f.Namespace.Name, podName, podName) if err != nil { diff --git a/test/e2e/common/storage/configmap_volume.go b/test/e2e/common/storage/configmap_volume.go index 27b23a81118..c7912f4aa1e 100644 --- a/test/e2e/common/storage/configmap_volume.go +++ b/test/e2e/common/storage/configmap_volume.go @@ -27,6 +27,7 @@ import ( apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/uuid" + "k8s.io/kubernetes/test/e2e/feature" "k8s.io/kubernetes/test/e2e/framework" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2epodoutput "k8s.io/kubernetes/test/e2e/framework/pod/output" @@ -60,7 +61,7 @@ var _ = SIGDescribe("ConfigMap", func() { doConfigMapE2EWithoutMappings(ctx, f, false, 0, &defaultMode) }) - f.It("should be consumable from pods in volume as non-root with defaultMode and fsGroup set [LinuxOnly]", nodefeature.FSGroup, func(ctx context.Context) { + f.It("should be consumable from pods in volume as non-root with defaultMode and fsGroup set [LinuxOnly]", nodefeature.FSGroup, feature.FSGroup, func(ctx context.Context) { // Windows does not support RunAsUser / FSGroup SecurityContext options, and it does not support setting file permissions. e2eskipper.SkipIfNodeOSDistroIs("windows") defaultMode := int32(0440) /* setting fsGroup sets mode to at least 440 */ @@ -76,7 +77,7 @@ var _ = SIGDescribe("ConfigMap", func() { doConfigMapE2EWithoutMappings(ctx, f, true, 0, nil) }) - f.It("should be consumable from pods in volume as non-root with FSGroup [LinuxOnly]", nodefeature.FSGroup, func(ctx context.Context) { + f.It("should be consumable from pods in volume as non-root with FSGroup [LinuxOnly]", nodefeature.FSGroup, feature.FSGroup, func(ctx context.Context) { // Windows does not support RunAsUser / FSGroup SecurityContext options. e2eskipper.SkipIfNodeOSDistroIs("windows") doConfigMapE2EWithoutMappings(ctx, f, true, 1001, nil) @@ -111,7 +112,7 @@ var _ = SIGDescribe("ConfigMap", func() { doConfigMapE2EWithMappings(ctx, f, true, 0, nil) }) - f.It("should be consumable from pods in volume with mappings as non-root with FSGroup [LinuxOnly]", nodefeature.FSGroup, func(ctx context.Context) { + f.It("should be consumable from pods in volume with mappings as non-root with FSGroup [LinuxOnly]", nodefeature.FSGroup, feature.FSGroup, func(ctx context.Context) { // Windows does not support RunAsUser / FSGroup SecurityContext options. e2eskipper.SkipIfNodeOSDistroIs("windows") doConfigMapE2EWithMappings(ctx, f, true, 1001, nil) diff --git a/test/e2e/common/storage/downwardapi_volume.go b/test/e2e/common/storage/downwardapi_volume.go index 99518d529b3..263e9d3d93e 100644 --- a/test/e2e/common/storage/downwardapi_volume.go +++ b/test/e2e/common/storage/downwardapi_volume.go @@ -25,6 +25,7 @@ import ( "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/uuid" + "k8s.io/kubernetes/test/e2e/feature" "k8s.io/kubernetes/test/e2e/framework" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2epodoutput "k8s.io/kubernetes/test/e2e/framework/pod/output" @@ -93,7 +94,7 @@ var _ = SIGDescribe("Downward API volume", func() { }) }) - f.It("should provide podname as non-root with fsgroup [LinuxOnly]", nodefeature.FSGroup, func(ctx context.Context) { + f.It("should provide podname as non-root with fsgroup [LinuxOnly]", nodefeature.FSGroup, feature.FSGroup, func(ctx context.Context) { // Windows does not support RunAsUser / FSGroup SecurityContext options. e2eskipper.SkipIfNodeOSDistroIs("windows") podName := "metadata-volume-" + string(uuid.NewUUID()) @@ -108,7 +109,7 @@ var _ = SIGDescribe("Downward API volume", func() { }) }) - f.It("should provide podname as non-root with fsgroup and defaultMode [LinuxOnly]", nodefeature.FSGroup, func(ctx context.Context) { + f.It("should provide podname as non-root with fsgroup and defaultMode [LinuxOnly]", nodefeature.FSGroup, feature.FSGroup, func(ctx context.Context) { // Windows does not support RunAsUser / FSGroup SecurityContext options, and it does not support setting file permissions. e2eskipper.SkipIfNodeOSDistroIs("windows") podName := "metadata-volume-" + string(uuid.NewUUID()) diff --git a/test/e2e/common/storage/empty_dir.go b/test/e2e/common/storage/empty_dir.go index 74aaa44ded3..9aab64fbe5e 100644 --- a/test/e2e/common/storage/empty_dir.go +++ b/test/e2e/common/storage/empty_dir.go @@ -28,6 +28,7 @@ import ( "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/uuid" + "k8s.io/kubernetes/test/e2e/feature" "k8s.io/kubernetes/test/e2e/framework" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2epodoutput "k8s.io/kubernetes/test/e2e/framework/pod/output" @@ -49,7 +50,7 @@ var _ = SIGDescribe("EmptyDir volumes", func() { f := framework.NewDefaultFramework("emptydir") f.NamespacePodSecurityLevel = admissionapi.LevelBaseline - f.Context("when FSGroup is specified [LinuxOnly]", nodefeature.FSGroup, func() { + f.Context("when FSGroup is specified [LinuxOnly]", nodefeature.FSGroup, feature.FSGroup, func() { ginkgo.BeforeEach(func() { // Windows does not support the FSGroup SecurityContext option. diff --git a/test/e2e/common/storage/projected_configmap.go b/test/e2e/common/storage/projected_configmap.go index f80e06eb452..5d438153e79 100644 --- a/test/e2e/common/storage/projected_configmap.go +++ b/test/e2e/common/storage/projected_configmap.go @@ -24,6 +24,7 @@ import ( v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/uuid" + "k8s.io/kubernetes/test/e2e/feature" "k8s.io/kubernetes/test/e2e/framework" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2epodoutput "k8s.io/kubernetes/test/e2e/framework/pod/output" @@ -60,7 +61,7 @@ var _ = SIGDescribe("Projected configMap", func() { doProjectedConfigMapE2EWithoutMappings(ctx, f, false, 0, &defaultMode) }) - f.It("should be consumable from pods in volume as non-root with defaultMode and fsGroup set [LinuxOnly]", nodefeature.FSGroup, func(ctx context.Context) { + f.It("should be consumable from pods in volume as non-root with defaultMode and fsGroup set [LinuxOnly]", nodefeature.FSGroup, feature.FSGroup, func(ctx context.Context) { // Windows does not support RunAsUser / FSGroup SecurityContext options, and it does not support setting file permissions. e2eskipper.SkipIfNodeOSDistroIs("windows") defaultMode := int32(0440) /* setting fsGroup sets mode to at least 440 */ @@ -76,7 +77,7 @@ var _ = SIGDescribe("Projected configMap", func() { doProjectedConfigMapE2EWithoutMappings(ctx, f, true, 0, nil) }) - f.It("should be consumable from pods in volume as non-root with FSGroup [LinuxOnly]", nodefeature.FSGroup, func(ctx context.Context) { + f.It("should be consumable from pods in volume as non-root with FSGroup [LinuxOnly]", nodefeature.FSGroup, feature.FSGroup, func(ctx context.Context) { // Windows does not support RunAsUser / FSGroup SecurityContext options. e2eskipper.SkipIfNodeOSDistroIs("windows") doProjectedConfigMapE2EWithoutMappings(ctx, f, true, 1001, nil) @@ -111,7 +112,7 @@ var _ = SIGDescribe("Projected configMap", func() { doProjectedConfigMapE2EWithMappings(ctx, f, true, 0, nil) }) - f.It("should be consumable from pods in volume with mappings as non-root with FSGroup [LinuxOnly]", nodefeature.FSGroup, func(ctx context.Context) { + f.It("should be consumable from pods in volume with mappings as non-root with FSGroup [LinuxOnly]", nodefeature.FSGroup, feature.FSGroup, func(ctx context.Context) { // Windows does not support RunAsUser / FSGroup SecurityContext options. e2eskipper.SkipIfNodeOSDistroIs("windows") doProjectedConfigMapE2EWithMappings(ctx, f, true, 1001, nil) diff --git a/test/e2e/common/storage/projected_downwardapi.go b/test/e2e/common/storage/projected_downwardapi.go index f9fe43e2381..222b5ce6a6c 100644 --- a/test/e2e/common/storage/projected_downwardapi.go +++ b/test/e2e/common/storage/projected_downwardapi.go @@ -24,6 +24,7 @@ import ( v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/uuid" + "k8s.io/kubernetes/test/e2e/feature" "k8s.io/kubernetes/test/e2e/framework" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2epodoutput "k8s.io/kubernetes/test/e2e/framework/pod/output" @@ -93,7 +94,7 @@ var _ = SIGDescribe("Projected downwardAPI", func() { }) }) - f.It("should provide podname as non-root with fsgroup [LinuxOnly]", nodefeature.FSGroup, func(ctx context.Context) { + f.It("should provide podname as non-root with fsgroup [LinuxOnly]", nodefeature.FSGroup, feature.FSGroup, func(ctx context.Context) { // Windows does not support RunAsUser / FSGroup SecurityContext options. e2eskipper.SkipIfNodeOSDistroIs("windows") podName := "metadata-volume-" + string(uuid.NewUUID()) @@ -108,7 +109,7 @@ var _ = SIGDescribe("Projected downwardAPI", func() { }) }) - f.It("should provide podname as non-root with fsgroup and defaultMode [LinuxOnly]", nodefeature.FSGroup, func(ctx context.Context) { + f.It("should provide podname as non-root with fsgroup and defaultMode [LinuxOnly]", nodefeature.FSGroup, feature.FSGroup, func(ctx context.Context) { // Windows does not support RunAsUser / FSGroup SecurityContext options, and it does not support setting file permissions. e2eskipper.SkipIfNodeOSDistroIs("windows") podName := "metadata-volume-" + string(uuid.NewUUID()) diff --git a/test/e2e/feature/feature.go b/test/e2e/feature/feature.go index dc29a62965d..abd7afd7a74 100644 --- a/test/e2e/feature/feature.go +++ b/test/e2e/feature/feature.go @@ -41,6 +41,10 @@ var ( // Marks tests that exercise the CBOR data format for serving or storage. CBOR = framework.WithFeature(framework.ValidFeatures.Add("CBOR")) + // Owner: sig-node + // Marks test that exercise checkpointing of containers + CheckpointContainer = framework.WithFeature(framework.ValidFeatures.Add("CheckpointContainer")) + // TODO: document the feature (owning SIG, when to use this feature for a test) CloudProvider = framework.WithFeature(framework.ValidFeatures.Add("CloudProvider")) @@ -71,9 +75,21 @@ var ( // TODO: document the feature (owning SIG, when to use this feature for a test) CPUManager = framework.WithFeature(framework.ValidFeatures.Add("CPUManager")) + // OWNER: sig-node + // Testing critical pod admission + CriticalPod = framework.WithFeature(framework.ValidFeatures.Add("CriticalPod")) + // TODO: document the feature (owning SIG, when to use this feature for a test) CustomMetricsAutoscaling = framework.WithFeature(framework.ValidFeatures.Add("CustomMetricsAutoscaling")) + // OWNER: sig-node + // Testing device managers + DeviceManager = framework.WithFeature(framework.ValidFeatures.Add("DeviceManager")) + + // OWNER: sig-node + // Testing device plugins + DevicePlugin = framework.WithFeature(framework.ValidFeatures.Add("DevicePlugin")) + // TODO: document the feature (owning SIG, when to use this feature for a test) Downgrade = framework.WithFeature(framework.ValidFeatures.Add("Downgrade")) @@ -98,6 +114,10 @@ var ( // is enabled such that passing CDI device IDs through CRI fields is supported DRAAdminAccess = framework.WithFeature(framework.ValidFeatures.Add("DRAAdminAccess")) + // TODO: document the feature (owning SIG, when to use this feature for a test) + // OWNER: sig-node + // Testing downward API huge pages + DownwardAPIHugePages = framework.WithFeature(framework.ValidFeatures.Add("DownwardAPIHugePages")) // owning-sig: sig-node // kep: https://kep.k8s.io/4381 // test-infra jobs: @@ -119,9 +139,20 @@ var ( // TODO: document the feature (owning SIG, when to use this feature for a test) ExperimentalResourceUsageTracking = framework.WithFeature(framework.ValidFeatures.Add("ExperimentalResourceUsageTracking")) + // OWNER: sig-node + // Testing eviction manager + Eviction = framework.WithFeature(framework.ValidFeatures.Add("Eviction")) + // TODO: document the feature (owning SIG, when to use this feature for a test) Flexvolumes = framework.WithFeature(framework.ValidFeatures.Add("Flexvolumes")) + // TODO: document the feature (owning SIG, when to use this feature for a test) + FSGroup = framework.WithFeature(framework.ValidFeatures.Add("FSGroup")) + + // OWNER: sig-node + // Testing garbage collection of images/containers + GarbageCollect = framework.WithFeature(framework.ValidFeatures.Add("GarbageCollect")) + // TODO: document the feature (owning SIG, when to use this feature for a test) GKENodePool = framework.WithFeature(framework.ValidFeatures.Add("GKENodePool")) @@ -140,6 +171,14 @@ var ( // TODO: document the feature (owning SIG, when to use this feature for a test) GPUUpgrade = framework.WithFeature(framework.ValidFeatures.Add("GPUUpgrade")) + // OWNER: sig-node + // Testing graceful node shutdown + GracefulNodeShutdown = framework.WithFeature(framework.ValidFeatures.Add("GracefulNodeShutdown")) + + // OWNER: sig-node + // GracefulNodeShutdown based on pod priority + GracefulNodeShutdownBasedOnPodPriority = framework.WithFeature(framework.ValidFeatures.Add("GracefulNodeShutdownBasedOnPodPriority")) + // TODO: document the feature (owning SIG, when to use this feature for a test) HAMaster = framework.WithFeature(framework.ValidFeatures.Add("HAMaster")) @@ -160,9 +199,19 @@ var ( // not focus on this feature anymore. HonorPVReclaimPolicy = framework.WithFeature(framework.ValidFeatures.Add("HonorPVReclaimPolicy")) + // owner: sig-node + HostAccess = framework.WithFeature(framework.ValidFeatures.Add("HostAccess")) + // TODO: document the feature (owning SIG, when to use this feature for a test) HugePages = framework.WithFeature(framework.ValidFeatures.Add("HugePages")) + // Owner: sig-node + ImageID = framework.WithFeature(framework.ValidFeatures.Add("ImageID")) + + // Owner: sig-node + // ImageVolume is used for testing the image volume source feature (https://kep.k8s.io/4639). + ImageVolume = framework.WithFeature(framework.ValidFeatures.Add("ImageVolume")) + // Owner: sig-network // Marks tests that require a conforming implementation of // Ingress.networking.k8s.io to be present. @@ -182,12 +231,20 @@ var ( // Marks tests that require kube-dns-autoscaler KubeDNSAutoscaler = framework.WithFeature(framework.ValidFeatures.Add("KubeDNSAutoscaler")) + // Owner: sig-node + // Testing kubelet drop in KEP + KubeletConfigDropInDir = framework.WithFeature(framework.ValidFeatures.Add("KubeletConfigDropInDir")) + // TODO: document the feature (owning SIG, when to use this feature for a test) KubeletCredentialProviders = framework.WithFeature(framework.ValidFeatures.Add("KubeletCredentialProviders")) // TODO: document the feature (owning SIG, when to use this feature for a test) KubeletSecurity = framework.WithFeature(framework.ValidFeatures.Add("KubeletSecurity")) + // KubeletSeparateDiskGC (SIG-node, used for testing separate image filesystem ) + // The tests need separate disk settings on nodes and separate filesystems in storage.conf + KubeletSeparateDiskGC = framework.WithFeature(framework.ValidFeatures.Add("KubeletSeparateDiskGC")) + // TODO: document the feature (owning SIG, when to use this feature for a test) KubeProxyDaemonSetDowngrade = framework.WithFeature(framework.ValidFeatures.Add("KubeProxyDaemonSetDowngrade")) @@ -204,6 +261,9 @@ var ( // Marks tests that require a cloud provider that implements LoadBalancer Services LoadBalancer = framework.WithFeature(framework.ValidFeatures.Add("LoadBalancer")) + // Owner: sig-storage + LSCIQuotaMonitoring = framework.WithFeature(framework.ValidFeatures.Add("LSCIQuotaMonitoring")) + // TODO: document the feature (owning SIG, when to use this feature for a test) LocalStorageCapacityIsolationQuota = framework.WithFeature(framework.ValidFeatures.Add("LocalStorageCapacityIsolationQuota")) @@ -235,9 +295,17 @@ var ( // NetworkPolicy.networking.k8s.io to be present. NetworkPolicy = framework.WithFeature(framework.ValidFeatures.Add("NetworkPolicy")) + // Owner: sig-node + // Testing node allocatable validations + NodeAllocatable = framework.WithFeature(framework.ValidFeatures.Add("NodeAllocatable")) + // TODO: document the feature (owning SIG, when to use this feature for a test) NodeAuthenticator = framework.WithFeature(framework.ValidFeatures.Add("NodeAuthenticator")) + // Owner: sig-node + // Node Problem Detect e2e tests in tree. + NodeProblemDetector = framework.WithFeature(framework.ValidFeatures.Add("NodeProblemDetector")) + // Owner: sig-auth // Marks tests that require a conforming implementation of // Node claims for serviceaccounts. Typically this means that the @@ -253,6 +321,15 @@ var ( // TODO: document the feature (owning SIG, when to use this feature for a test) NodeOutOfServiceVolumeDetach = framework.WithFeature(framework.ValidFeatures.Add("NodeOutOfServiceVolumeDetach")) + // Owner: sig-node + // Tests aiming to verify oom_score functionality + OOMScoreAdj = framework.WithFeature(framework.ValidFeatures.Add("OOMScoreAdj")) + + // Owner: sig-node + // Verify ProcMount feature. + // Used in combination with user namespaces + ProcMountType = framework.WithFeature(framework.ValidFeatures.Add("ProcMountType")) + // Owner: sig-network // Marks a single test that tests cluster DNS performance with many services. PerformanceDNS = framework.WithFeature(framework.ValidFeatures.Add("PerformanceDNS")) @@ -295,6 +372,9 @@ var ( // TODO: document the feature (owning SIG, when to use this feature for a test) RecoverVolumeExpansionFailure = framework.WithFeature(framework.ValidFeatures.Add("RecoverVolumeExpansionFailure")) + // RecursiveReadOnlyMounts (SIG-node, used for testing recursive read-only mounts ) + RecursiveReadOnlyMounts = framework.WithFeature(framework.ValidFeatures.Add("RecursiveReadOnlyMounts")) + // RelaxedEnvironmentVariableValidation used when we verify whether the pod can consume all printable ASCII characters as environment variable names, // and whether the pod can consume configmap/secret that key starts with a number. RelaxedEnvironmentVariableValidation = framework.WithFeature(framework.ValidFeatures.Add("RelaxedEnvironmentVariableValidation")) @@ -303,12 +383,24 @@ var ( // Marks tests of KEP-4427 that require the `RelaxedDNSSearchValidation` feature gate RelaxedDNSSearchValidation = framework.WithFeature(framework.ValidFeatures.Add("RelaxedDNSSearchValidation")) + // Owner: sig-node + // Device Management metrics + ResourceMetrics = framework.WithFeature(framework.ValidFeatures.Add("ResourceMetrics")) + // TODO: document the feature (owning SIG, when to use this feature for a test) Recreate = framework.WithFeature(framework.ValidFeatures.Add("Recreate")) // TODO: document the feature (owning SIG, when to use this feature for a test) RegularResourceUsageTracking = framework.WithFeature(framework.ValidFeatures.Add("RegularResourceUsageTracking")) + // Owner: sig-node + // resource health Status for device plugins and DRA + ResourceHealthStatus = framework.WithFeature(framework.ValidFeatures.Add("ResourceHealthStatus")) + + // Owner: sig-node + // Runtime Handler + RuntimeHandler = framework.WithFeature(framework.ValidFeatures.Add("RuntimeHandler")) + // Owner: sig-scheduling // Marks tests of the asynchronous preemption (KEP-4832) that require the `SchedulerAsyncPreemption` feature gate. SchedulerAsyncPreemption = framework.WithFeature(framework.ValidFeatures.Add("SchedulerAsyncPreemption")) @@ -336,7 +428,8 @@ var ( // and the networking.k8s.io/v1alpha1 API. ServiceCIDRs = framework.WithFeature(framework.ValidFeatures.Add("ServiceCIDRs")) - // TODO: document the feature (owning SIG, when to use this feature for a test) + // Owner: sig-node + // Sidecar KEP-753 SidecarContainers = framework.WithFeature(framework.ValidFeatures.Add("SidecarContainers")) // TODO: document the feature (owning SIG, when to use this feature for a test) @@ -360,6 +453,10 @@ var ( // TODO: document the feature (owning SIG, when to use this feature for a test) StatefulSet = framework.WithFeature(framework.ValidFeatures.Add("StatefulSet")) + // Added to test Swap Feature + // This label should be used when testing KEP-2400 (Node Swap Support) + Swap = framework.WithFeature(framework.ValidFeatures.Add("NodeSwap")) + PodIndexLabel = framework.WithFeature(framework.ValidFeatures.Add("PodIndexLabel")) // TODO: document the feature (owning SIG, when to use this feature for a test) @@ -379,6 +476,10 @@ var ( // (used for testing fine-grained SupplementalGroups control ) SupplementalGroupsPolicy = framework.WithFeature(framework.ValidFeatures.Add("SupplementalGroupsPolicy")) + // Owner: sig-node + // Mark tests that are testing system critical pods + + SystemNodeCriticalPod = framework.WithFeature(framework.ValidFeatures.Add("SystemNodeCriticalPod")) // Owner: sig-node // Tests marked with this feature MUST run with the CRI Proxy configured so errors can be injected into the kubelet's CRI calls. // This is useful for testing how the kubelet handles various error conditions in its CRI interactions. diff --git a/test/e2e/node/node_problem_detector.go b/test/e2e/node/node_problem_detector.go index aac165fe9ce..19046c2002d 100644 --- a/test/e2e/node/node_problem_detector.go +++ b/test/e2e/node/node_problem_detector.go @@ -28,6 +28,7 @@ import ( v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/fields" + "k8s.io/kubernetes/test/e2e/feature" "k8s.io/kubernetes/test/e2e/framework" e2ekubelet "k8s.io/kubernetes/test/e2e/framework/kubelet" e2enode "k8s.io/kubernetes/test/e2e/framework/node" @@ -43,7 +44,7 @@ import ( // This test checks if node-problem-detector (NPD) runs fine without error on // the up to 10 nodes in the cluster. NPD's functionality is tested in e2e_node tests. -var _ = SIGDescribe("NodeProblemDetector", nodefeature.NodeProblemDetector, func() { +var _ = SIGDescribe("NodeProblemDetector", nodefeature.NodeProblemDetector, feature.NodeProblemDetector, func() { const ( pollInterval = 1 * time.Second pollTimeout = 1 * time.Minute diff --git a/test/e2e/nodefeature/nodefeature.go b/test/e2e/nodefeature/nodefeature.go index 3b0d1bb287d..5519b19e4fe 100644 --- a/test/e2e/nodefeature/nodefeature.go +++ b/test/e2e/nodefeature/nodefeature.go @@ -22,6 +22,8 @@ import ( "k8s.io/kubernetes/test/e2e/framework" ) +// We are deprecating this. +// These features will be kept around for a short period so we can switch over test-infra to use WithFeature. var ( // Please keep the list in alphabetical order. @@ -41,6 +43,7 @@ var ( DownwardAPIHugePages = framework.WithNodeFeature(framework.ValidNodeFeatures.Add("DownwardAPIHugePages")) // TODO: document the feature (owning SIG, when to use this feature for a test) + // not used anywhere DynamicResourceAllocation = framework.WithNodeFeature(framework.ValidNodeFeatures.Add("DynamicResourceAllocation")) // TODO: document the feature (owning SIG, when to use this feature for a test) diff --git a/test/e2e_node/checkpoint_container.go b/test/e2e_node/checkpoint_container.go index 40ab27ca060..257d9e56e12 100644 --- a/test/e2e_node/checkpoint_container.go +++ b/test/e2e_node/checkpoint_container.go @@ -33,6 +33,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" clientset "k8s.io/client-go/kubernetes" restclient "k8s.io/client-go/rest" + "k8s.io/kubernetes/test/e2e/feature" "k8s.io/kubernetes/test/e2e/framework" e2emetrics "k8s.io/kubernetes/test/e2e/framework/metrics" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" @@ -130,7 +131,7 @@ func getCheckpointContainerErrorMetric(ctx context.Context, f *framework.Framewo return 0, nil } -var _ = SIGDescribe("Checkpoint Container", nodefeature.CheckpointContainer, func() { +var _ = SIGDescribe("Checkpoint Container", nodefeature.CheckpointContainer, feature.CheckpointContainer, func() { f := framework.NewDefaultFramework("checkpoint-container-test") f.NamespacePodSecurityLevel = admissionapi.LevelBaseline ginkgo.It("will checkpoint a container out of a pod", func(ctx context.Context) { diff --git a/test/e2e_node/container_lifecycle_test.go b/test/e2e_node/container_lifecycle_test.go index 251bf6f0e7a..fcbcba1aea4 100644 --- a/test/e2e_node/container_lifecycle_test.go +++ b/test/e2e_node/container_lifecycle_test.go @@ -29,6 +29,7 @@ import ( runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1" admissionapi "k8s.io/pod-security-admission/api" + "k8s.io/kubernetes/test/e2e/feature" "k8s.io/kubernetes/test/e2e/framework" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" "k8s.io/kubernetes/test/e2e/nodefeature" @@ -1621,7 +1622,7 @@ var _ = SIGDescribe(framework.WithSerial(), "Containers Lifecycle", func() { }) }) -var _ = SIGDescribe(nodefeature.SidecarContainers, "Containers Lifecycle", func() { +var _ = SIGDescribe(nodefeature.SidecarContainers, feature.SidecarContainers, "Containers Lifecycle", func() { f := framework.NewDefaultFramework("containers-lifecycle-test") addAfterEachForCleaningUpPods(f) f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged @@ -5408,7 +5409,7 @@ var _ = SIGDescribe(nodefeature.SidecarContainers, "Containers Lifecycle", func( }) }) -var _ = SIGDescribe(nodefeature.SidecarContainers, framework.WithSerial(), "Containers Lifecycle", func() { +var _ = SIGDescribe(nodefeature.SidecarContainers, feature.SidecarContainers, framework.WithSerial(), "Containers Lifecycle", func() { f := framework.NewDefaultFramework("containers-lifecycle-test-serial") addAfterEachForCleaningUpPods(f) f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged diff --git a/test/e2e_node/container_manager_test.go b/test/e2e_node/container_manager_test.go index 4aa89eff3d8..cce8e69ea15 100644 --- a/test/e2e_node/container_manager_test.go +++ b/test/e2e_node/container_manager_test.go @@ -34,6 +34,7 @@ import ( "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/uuid" runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1" + "k8s.io/kubernetes/test/e2e/feature" "k8s.io/kubernetes/test/e2e/framework" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" "k8s.io/kubernetes/test/e2e/nodefeature" @@ -101,7 +102,7 @@ func dumpRunningContainer(ctx context.Context) error { var _ = SIGDescribe("Container Manager Misc", framework.WithSerial(), func() { f := framework.NewDefaultFramework("kubelet-container-manager") f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged - f.Describe("Validate OOM score adjustments", nodefeature.OOMScoreAdj, func() { + f.Describe("Validate OOM score adjustments", nodefeature.OOMScoreAdj, feature.OOMScoreAdj, func() { ginkgo.Context("once the node is setup", func() { ginkgo.It("container runtime's oom-score-adj should be -999", func(ctx context.Context) { runtimePids, err := getPidsForProcess(framework.TestContext.ContainerRuntimeProcessName, framework.TestContext.ContainerRuntimePidFile) diff --git a/test/e2e_node/cpu_manager_test.go b/test/e2e_node/cpu_manager_test.go index 97dc228208a..7152782552f 100644 --- a/test/e2e_node/cpu_manager_test.go +++ b/test/e2e_node/cpu_manager_test.go @@ -710,7 +710,7 @@ func runCPUManagerTests(f *framework.Framework) { runSMTAlignmentPositiveTests(ctx, f, smtLevel) }) - f.It("should not reuse CPUs of restartable init containers", nodefeature.SidecarContainers, func(ctx context.Context) { + f.It("should not reuse CPUs of restartable init containers", nodefeature.SidecarContainers, feature.SidecarContainers, func(ctx context.Context) { cpuCap, cpuAlloc, _ = getLocalNodeCPUDetails(ctx, f) // Skip rest of the tests if CPU capacity < 3. diff --git a/test/e2e_node/critical_pod_test.go b/test/e2e_node/critical_pod_test.go index 7c6149f2cc6..ef53228713b 100644 --- a/test/e2e_node/critical_pod_test.go +++ b/test/e2e_node/critical_pod_test.go @@ -27,6 +27,7 @@ import ( kubeapi "k8s.io/kubernetes/pkg/apis/core" "k8s.io/kubernetes/pkg/apis/scheduling" kubelettypes "k8s.io/kubernetes/pkg/kubelet/types" + "k8s.io/kubernetes/test/e2e/feature" "k8s.io/kubernetes/test/e2e/framework" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" "k8s.io/kubernetes/test/e2e/nodefeature" @@ -44,7 +45,7 @@ const ( bestEffortPodName = "best-effort" ) -var _ = SIGDescribe("CriticalPod", framework.WithSerial(), framework.WithDisruptive(), nodefeature.CriticalPod, func() { +var _ = SIGDescribe("CriticalPod", framework.WithSerial(), framework.WithDisruptive(), nodefeature.CriticalPod, feature.CriticalPod, func() { f := framework.NewDefaultFramework("critical-pod-test") f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged ginkgo.Context("when we need to admit a critical pod", func() { diff --git a/test/e2e_node/device_manager_test.go b/test/e2e_node/device_manager_test.go index dc0f6ae7625..e96bc04bf0a 100644 --- a/test/e2e_node/device_manager_test.go +++ b/test/e2e_node/device_manager_test.go @@ -33,6 +33,7 @@ import ( "k8s.io/klog/v2" admissionapi "k8s.io/pod-security-admission/api" + "k8s.io/kubernetes/test/e2e/feature" "k8s.io/kubernetes/test/e2e/framework" e2enode "k8s.io/kubernetes/test/e2e/framework/node" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" @@ -51,7 +52,7 @@ const ( ) // Serial because the test updates kubelet configuration. -var _ = SIGDescribe("Device Manager", framework.WithSerial(), nodefeature.DeviceManager, func() { +var _ = SIGDescribe("Device Manager", framework.WithSerial(), nodefeature.DeviceManager, feature.DeviceManager, func() { f := framework.NewDefaultFramework("devicemanager-test") f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged diff --git a/test/e2e_node/device_plugin_failures_pod_status_test.go b/test/e2e_node/device_plugin_failures_pod_status_test.go index ffbecad40c1..53ecfe07585 100644 --- a/test/e2e_node/device_plugin_failures_pod_status_test.go +++ b/test/e2e_node/device_plugin_failures_pod_status_test.go @@ -27,6 +27,7 @@ import ( v1 "k8s.io/api/core/v1" kubeletdevicepluginv1beta1 "k8s.io/kubelet/pkg/apis/deviceplugin/v1beta1" "k8s.io/kubernetes/pkg/features" + "k8s.io/kubernetes/test/e2e/feature" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" "k8s.io/kubernetes/test/e2e/nodefeature" @@ -40,7 +41,7 @@ import ( "k8s.io/kubernetes/test/e2e_node/testdeviceplugin" ) -var _ = SIGDescribe("Device Plugin Failures Pod Status", nodefeature.ResourceHealthStatus, func() { +var _ = SIGDescribe("Device Plugin Failures Pod Status", nodefeature.ResourceHealthStatus, feature.ResourceHealthStatus, func() { f := framework.NewDefaultFramework("device-plugin-failures") f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged diff --git a/test/e2e_node/device_plugin_test.go b/test/e2e_node/device_plugin_test.go index 2db84ce2b80..c82eaf34a07 100644 --- a/test/e2e_node/device_plugin_test.go +++ b/test/e2e_node/device_plugin_test.go @@ -49,6 +49,7 @@ import ( kubeletpodresourcesv1 "k8s.io/kubelet/pkg/apis/podresources/v1" kubeletpodresourcesv1alpha1 "k8s.io/kubelet/pkg/apis/podresources/v1alpha1" "k8s.io/kubernetes/pkg/features" + "k8s.io/kubernetes/test/e2e/feature" "k8s.io/kubernetes/test/e2e/framework" e2enode "k8s.io/kubernetes/test/e2e/framework/node" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" @@ -63,7 +64,7 @@ var ( ) // Serial because the test restarts Kubelet -var _ = SIGDescribe("Device Plugin", nodefeature.DevicePlugin, framework.WithSerial(), func() { +var _ = SIGDescribe("Device Plugin", nodefeature.DevicePlugin, framework.WithSerial(), feature.DevicePlugin, func() { f := framework.NewDefaultFramework("device-plugin-errors") f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged testDevicePlugin(f, kubeletdevicepluginv1beta1.DevicePluginPath) @@ -695,7 +696,7 @@ func testDevicePlugin(f *framework.Framework, pluginSockDir string) { } }) - f.It("Can schedule a pod with a restartable init container", nodefeature.SidecarContainers, func(ctx context.Context) { + f.It("Can schedule a pod with a restartable init container", nodefeature.SidecarContainers, feature.SidecarContainers, func(ctx context.Context) { podRECMD := "devs=$(ls /tmp/ | egrep '^Dev-[0-9]+$') && echo stub devices: $devs && sleep %s" sleepOneSecond := "1s" rl := v1.ResourceList{v1.ResourceName(SampleDeviceResourceName): *resource.NewQuantity(1, resource.DecimalSI)} diff --git a/test/e2e_node/eviction_test.go b/test/e2e_node/eviction_test.go index 15818e2949e..04a02d8f6d6 100644 --- a/test/e2e_node/eviction_test.go +++ b/test/e2e_node/eviction_test.go @@ -36,6 +36,7 @@ import ( evictionapi "k8s.io/kubernetes/pkg/kubelet/eviction/api" kubeletmetrics "k8s.io/kubernetes/pkg/kubelet/metrics" kubetypes "k8s.io/kubernetes/pkg/kubelet/types" + "k8s.io/kubernetes/test/e2e/feature" "k8s.io/kubernetes/test/e2e/framework" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" @@ -70,7 +71,7 @@ const ( // InodeEviction tests that the node responds to node disk pressure by evicting only responsible pods. // Node disk pressure is induced by consuming all inodes on the node. -var _ = SIGDescribe("InodeEviction", framework.WithSlow(), framework.WithSerial(), framework.WithDisruptive(), nodefeature.Eviction, func() { +var _ = SIGDescribe("InodeEviction", framework.WithSlow(), framework.WithSerial(), framework.WithDisruptive(), nodefeature.Eviction, feature.Eviction, func() { f := framework.NewDefaultFramework("inode-eviction-test") f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged expectedNodeCondition := v1.NodeDiskPressure @@ -105,7 +106,7 @@ var _ = SIGDescribe("InodeEviction", framework.WithSlow(), framework.WithSerial( // ImageGCNoEviction tests that the node does not evict pods when inodes are consumed by images // Disk pressure is induced by pulling large images -var _ = SIGDescribe("ImageGCNoEviction", framework.WithSlow(), framework.WithSerial(), framework.WithDisruptive(), nodefeature.Eviction, func() { +var _ = SIGDescribe("ImageGCNoEviction", framework.WithSlow(), framework.WithSerial(), framework.WithDisruptive(), nodefeature.Eviction, feature.Eviction, func() { f := framework.NewDefaultFramework("image-gc-eviction-test") f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged pressureTimeout := 10 * time.Minute @@ -136,7 +137,7 @@ var _ = SIGDescribe("ImageGCNoEviction", framework.WithSlow(), framework.WithSer // MemoryAllocatableEviction tests that the node responds to node memory pressure by evicting only responsible pods. // Node memory pressure is only encountered because we reserve the majority of the node's capacity via kube-reserved. -var _ = SIGDescribe("MemoryAllocatableEviction", framework.WithSlow(), framework.WithSerial(), framework.WithDisruptive(), nodefeature.Eviction, func() { +var _ = SIGDescribe("MemoryAllocatableEviction", framework.WithSlow(), framework.WithSerial(), framework.WithDisruptive(), nodefeature.Eviction, feature.Eviction, func() { f := framework.NewDefaultFramework("memory-allocatable-eviction-test") f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged expectedNodeCondition := v1.NodeMemoryPressure @@ -170,7 +171,7 @@ var _ = SIGDescribe("MemoryAllocatableEviction", framework.WithSlow(), framework // LocalStorageEviction tests that the node responds to node disk pressure by evicting only responsible pods // Disk pressure is induced by running pods which consume disk space. -var _ = SIGDescribe("LocalStorageEviction", framework.WithSlow(), framework.WithSerial(), framework.WithDisruptive(), nodefeature.Eviction, func() { +var _ = SIGDescribe("LocalStorageEviction", framework.WithSlow(), framework.WithSerial(), framework.WithDisruptive(), nodefeature.Eviction, feature.Eviction, func() { f := framework.NewDefaultFramework("localstorage-eviction-test") f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged pressureTimeout := 15 * time.Minute @@ -211,7 +212,7 @@ var _ = SIGDescribe("LocalStorageEviction", framework.WithSlow(), framework.With // LocalStorageEviction tests that the node responds to node disk pressure by evicting only responsible pods // Disk pressure is induced by running pods which consume disk space, which exceed the soft eviction threshold. // Note: This test's purpose is to test Soft Evictions. Local storage was chosen since it is the least costly to run. -var _ = SIGDescribe("LocalStorageSoftEviction", framework.WithSlow(), framework.WithSerial(), framework.WithDisruptive(), nodefeature.Eviction, func() { +var _ = SIGDescribe("LocalStorageSoftEviction", framework.WithSlow(), framework.WithSerial(), framework.WithDisruptive(), nodefeature.Eviction, feature.Eviction, func() { f := framework.NewDefaultFramework("localstorage-eviction-test") f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged pressureTimeout := 10 * time.Minute @@ -249,7 +250,7 @@ var _ = SIGDescribe("LocalStorageSoftEviction", framework.WithSlow(), framework. }) }) -var _ = SIGDescribe("LocalStorageSoftEvictionNotOverwriteTerminationGracePeriodSeconds", framework.WithSlow(), framework.WithSerial(), framework.WithDisruptive(), nodefeature.Eviction, func() { +var _ = SIGDescribe("LocalStorageSoftEvictionNotOverwriteTerminationGracePeriodSeconds", framework.WithSlow(), framework.WithSerial(), framework.WithDisruptive(), nodefeature.Eviction, feature.Eviction, func() { f := framework.NewDefaultFramework("localstorage-eviction-test") f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged pressureTimeout := 10 * time.Minute @@ -288,7 +289,7 @@ var _ = SIGDescribe("LocalStorageSoftEvictionNotOverwriteTerminationGracePeriodS }) // LocalStorageCapacityIsolationEviction tests that container and volume local storage limits are enforced through evictions -var _ = SIGDescribe("LocalStorageCapacityIsolationEviction", framework.WithSlow(), framework.WithSerial(), framework.WithDisruptive(), nodefeature.Eviction, func() { +var _ = SIGDescribe("LocalStorageCapacityIsolationEviction", framework.WithSlow(), framework.WithSerial(), framework.WithDisruptive(), feature.LocalStorageCapacityIsolationQuota, nodefeature.Eviction, feature.Eviction, func() { f := framework.NewDefaultFramework("localstorage-eviction-test") f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged evictionTestTimeout := 10 * time.Minute @@ -341,7 +342,7 @@ var _ = SIGDescribe("LocalStorageCapacityIsolationEviction", framework.WithSlow( // PriorityMemoryEvictionOrdering tests that the node responds to node memory pressure by evicting pods. // This test tests that the guaranteed pod is never evicted, and that the lower-priority pod is evicted before // the higher priority pod. -var _ = SIGDescribe("PriorityMemoryEvictionOrdering", framework.WithSlow(), framework.WithSerial(), framework.WithDisruptive(), nodefeature.Eviction, func() { +var _ = SIGDescribe("PriorityMemoryEvictionOrdering", framework.WithSlow(), framework.WithSerial(), framework.WithDisruptive(), nodefeature.Eviction, feature.Eviction, func() { f := framework.NewDefaultFramework("priority-memory-eviction-ordering-test") f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged expectedNodeCondition := v1.NodeMemoryPressure @@ -401,7 +402,7 @@ var _ = SIGDescribe("PriorityMemoryEvictionOrdering", framework.WithSlow(), fram // PriorityLocalStorageEvictionOrdering tests that the node responds to node disk pressure by evicting pods. // This test tests that the guaranteed pod is never evicted, and that the lower-priority pod is evicted before // the higher priority pod. -var _ = SIGDescribe("PriorityLocalStorageEvictionOrdering", framework.WithSlow(), framework.WithSerial(), framework.WithDisruptive(), nodefeature.Eviction, func() { +var _ = SIGDescribe("PriorityLocalStorageEvictionOrdering", framework.WithSlow(), framework.WithSerial(), framework.WithDisruptive(), nodefeature.Eviction, feature.Eviction, func() { f := framework.NewDefaultFramework("priority-disk-eviction-ordering-test") f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged expectedNodeCondition := v1.NodeDiskPressure @@ -464,7 +465,7 @@ var _ = SIGDescribe("PriorityLocalStorageEvictionOrdering", framework.WithSlow() }) // PriorityPidEvictionOrdering tests that the node emits pid pressure in response to a fork bomb, and evicts pods by priority -var _ = SIGDescribe("PriorityPidEvictionOrdering", framework.WithSlow(), framework.WithSerial(), framework.WithDisruptive(), nodefeature.Eviction, func() { +var _ = SIGDescribe("PriorityPidEvictionOrdering", framework.WithSlow(), framework.WithSerial(), framework.WithDisruptive(), nodefeature.Eviction, feature.Eviction, func() { f := framework.NewDefaultFramework("pidpressure-eviction-test") f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged pressureTimeout := 10 * time.Minute diff --git a/test/e2e_node/garbage_collector_test.go b/test/e2e_node/garbage_collector_test.go index 7f7dc46f1e2..4c37332af62 100644 --- a/test/e2e_node/garbage_collector_test.go +++ b/test/e2e_node/garbage_collector_test.go @@ -27,6 +27,7 @@ import ( internalapi "k8s.io/cri-api/pkg/apis" runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1" "k8s.io/kubelet/pkg/types" + "k8s.io/kubernetes/test/e2e/feature" "k8s.io/kubernetes/test/e2e/framework" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" "k8s.io/kubernetes/test/e2e/nodefeature" @@ -73,7 +74,7 @@ type testRun struct { // GarbageCollect tests that the Kubelet conforms to the Kubelet Garbage Collection Policy, found here: // http://kubernetes.io/docs/admin/garbage-collection/ -var _ = SIGDescribe("GarbageCollect", framework.WithSerial(), nodefeature.GarbageCollect, func() { +var _ = SIGDescribe("GarbageCollect", framework.WithSerial(), nodefeature.GarbageCollect, feature.GarbageCollect, func() { f := framework.NewDefaultFramework("garbage-collect-test") f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged containerNamePrefix := "gc-test-container-" diff --git a/test/e2e_node/image_gc_test.go b/test/e2e_node/image_gc_test.go index 6c27dc92b20..a6e23388b16 100644 --- a/test/e2e_node/image_gc_test.go +++ b/test/e2e_node/image_gc_test.go @@ -26,8 +26,10 @@ import ( runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1" kubefeatures "k8s.io/kubernetes/pkg/features" kubeletconfig "k8s.io/kubernetes/pkg/kubelet/apis/config" + "k8s.io/kubernetes/test/e2e/feature" "k8s.io/kubernetes/test/e2e/framework" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" + "k8s.io/kubernetes/test/e2e/nodefeature" admissionapi "k8s.io/pod-security-admission/api" "github.com/onsi/ginkgo/v2" @@ -41,7 +43,7 @@ const ( checkGCFreq time.Duration = 30 * time.Second ) -var _ = SIGDescribe("ImageGarbageCollect", framework.WithSerial(), framework.WithNodeFeature("GarbageCollect"), func() { +var _ = SIGDescribe("ImageGarbageCollect", framework.WithSerial(), nodefeature.GarbageCollect, feature.GarbageCollect, func() { f := framework.NewDefaultFramework("image-garbage-collect-test") f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged var is internalapi.ImageManagerService diff --git a/test/e2e_node/image_id_test.go b/test/e2e_node/image_id_test.go index 94581b37538..d3be08220ff 100644 --- a/test/e2e_node/image_id_test.go +++ b/test/e2e_node/image_id_test.go @@ -22,16 +22,18 @@ import ( v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/dump" + "k8s.io/kubernetes/test/e2e/feature" "k8s.io/kubernetes/test/e2e/framework" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" "k8s.io/kubernetes/test/e2e/nodefeature" + admissionapi "k8s.io/pod-security-admission/api" "github.com/onsi/ginkgo/v2" "github.com/onsi/gomega" ) -var _ = SIGDescribe("ImageID", nodefeature.ImageID, func() { +var _ = SIGDescribe("ImageID", nodefeature.ImageID, feature.ImageID, func() { busyBoxImage := "registry.k8s.io/e2e-test-images/busybox@sha256:a9155b13325b2abef48e71de77bb8ac015412a566829f621d06bfae5c699b1b9" diff --git a/test/e2e_node/image_volume.go b/test/e2e_node/image_volume.go index 223080c7ebc..79e5431545c 100644 --- a/test/e2e_node/image_volume.go +++ b/test/e2e_node/image_volume.go @@ -30,6 +30,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/pkg/kubelet/images" + "k8s.io/kubernetes/test/e2e/feature" "k8s.io/kubernetes/test/e2e/framework" e2enode "k8s.io/kubernetes/test/e2e/framework/node" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" @@ -40,7 +41,7 @@ import ( // Run this single test locally using a running CRI-O instance by: // make test-e2e-node CONTAINER_RUNTIME_ENDPOINT="unix:///var/run/crio/crio.sock" TEST_ARGS='--ginkgo.focus="ImageVolume" --feature-gates=ImageVolume=true --service-feature-gates=ImageVolume=true --kubelet-flags="--cgroup-root=/ --runtime-cgroups=/system.slice/crio.service --kubelet-cgroups=/system.slice/kubelet.service --fail-swap-on=false"' -var _ = SIGDescribe("ImageVolume", nodefeature.ImageVolume, func() { +var _ = SIGDescribe("ImageVolume", nodefeature.ImageVolume, feature.ImageVolume, func() { f := framework.NewDefaultFramework("image-volume-test") f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged diff --git a/test/e2e_node/kubelet_config_dir_test.go b/test/e2e_node/kubelet_config_dir_test.go index 9e0e59ef117..48c121dd20f 100644 --- a/test/e2e_node/kubelet_config_dir_test.go +++ b/test/e2e_node/kubelet_config_dir_test.go @@ -26,11 +26,12 @@ import ( "github.com/onsi/gomega" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" kubeletconfig "k8s.io/kubernetes/pkg/kubelet/apis/config" + "k8s.io/kubernetes/test/e2e/feature" "k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/nodefeature" ) -var _ = SIGDescribe("Kubelet Config", framework.WithSlow(), framework.WithSerial(), framework.WithDisruptive(), nodefeature.KubeletConfigDropInDir, func() { +var _ = SIGDescribe("Kubelet Config", framework.WithSlow(), framework.WithSerial(), framework.WithDisruptive(), nodefeature.KubeletConfigDropInDir, feature.KubeletConfigDropInDir, func() { f := framework.NewDefaultFramework("kubelet-config-drop-in-dir-test") ginkgo.Context("when merging drop-in configs", func() { var oldcfg *kubeletconfig.KubeletConfiguration diff --git a/test/e2e_node/mount_rro_linux_test.go b/test/e2e_node/mount_rro_linux_test.go index 6e9dc4c8672..5316cdc2a70 100644 --- a/test/e2e_node/mount_rro_linux_test.go +++ b/test/e2e_node/mount_rro_linux_test.go @@ -26,6 +26,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/uuid" "k8s.io/kubernetes/pkg/features" + "k8s.io/kubernetes/test/e2e/feature" "k8s.io/kubernetes/test/e2e/framework" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" @@ -36,7 +37,7 @@ import ( // Usage: // make test-e2e-node TEST_ARGS='--service-feature-gates=RecursiveReadOnlyMounts=true --kubelet-flags="--feature-gates=RecursiveReadOnlyMounts=true"' FOCUS="Mount recursive read-only" SKIP="" -var _ = SIGDescribe("Mount recursive read-only [LinuxOnly]", nodefeature.RecursiveReadOnlyMounts, func() { +var _ = SIGDescribe("Mount recursive read-only [LinuxOnly]", nodefeature.RecursiveReadOnlyMounts, feature.RecursiveReadOnlyMounts, func() { f := framework.NewDefaultFramework("mount-rro") f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged ginkgo.Describe("Mount recursive read-only", func() { diff --git a/test/e2e_node/node_container_manager_test.go b/test/e2e_node/node_container_manager_test.go index 5f61e4a8676..21fff73cf68 100644 --- a/test/e2e_node/node_container_manager_test.go +++ b/test/e2e_node/node_container_manager_test.go @@ -36,6 +36,7 @@ import ( "k8s.io/kubernetes/pkg/kubelet/stats/pidlimit" admissionapi "k8s.io/pod-security-admission/api" + "k8s.io/kubernetes/test/e2e/feature" "k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/nodefeature" e2enodekubelet "k8s.io/kubernetes/test/e2e_node/kubeletconfig" @@ -71,7 +72,7 @@ func setDesiredConfiguration(initialConfig *kubeletconfig.KubeletConfiguration, var _ = SIGDescribe("Node Container Manager", framework.WithSerial(), func() { f := framework.NewDefaultFramework("node-container-manager") f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged - f.Describe("Validate Node Allocatable", nodefeature.NodeAllocatable, func() { + f.Describe("Validate Node Allocatable", nodefeature.NodeAllocatable, feature.NodeAllocatable, func() { ginkgo.It("sets up the node and runs the test", func(ctx context.Context) { framework.ExpectNoError(runTest(ctx, f)) }) diff --git a/test/e2e_node/node_problem_detector_linux.go b/test/e2e_node/node_problem_detector_linux.go index 3c264af9d06..4fd50ff6b62 100644 --- a/test/e2e_node/node_problem_detector_linux.go +++ b/test/e2e_node/node_problem_detector_linux.go @@ -38,13 +38,14 @@ import ( admissionapi "k8s.io/pod-security-admission/api" "k8s.io/kubernetes/pkg/kubelet/util" + "k8s.io/kubernetes/test/e2e/feature" "k8s.io/kubernetes/test/e2e/framework" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" "k8s.io/kubernetes/test/e2e/nodefeature" testutils "k8s.io/kubernetes/test/utils" ) -var _ = SIGDescribe("NodeProblemDetector", nodefeature.NodeProblemDetector, framework.WithSerial(), func() { +var _ = SIGDescribe("NodeProblemDetector", nodefeature.NodeProblemDetector, feature.NodeProblemDetector, framework.WithSerial(), func() { const ( pollInterval = 1 * time.Second pollConsistent = 5 * time.Second diff --git a/test/e2e_node/node_shutdown_linux_test.go b/test/e2e_node/node_shutdown_linux_test.go index 60743d6c778..4e7d4a04ef2 100644 --- a/test/e2e_node/node_shutdown_linux_test.go +++ b/test/e2e_node/node_shutdown_linux_test.go @@ -40,6 +40,7 @@ import ( "github.com/onsi/ginkgo/v2" "github.com/onsi/gomega" "k8s.io/kubernetes/pkg/apis/scheduling" + "k8s.io/kubernetes/test/e2e/feature" "k8s.io/kubernetes/test/e2e/framework" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" @@ -57,7 +58,7 @@ import ( testutils "k8s.io/kubernetes/test/utils" ) -var _ = SIGDescribe("GracefulNodeShutdown", framework.WithSerial(), nodefeature.GracefulNodeShutdown, nodefeature.GracefulNodeShutdownBasedOnPodPriority, func() { +var _ = SIGDescribe("GracefulNodeShutdown", framework.WithSerial(), nodefeature.GracefulNodeShutdown, nodefeature.GracefulNodeShutdownBasedOnPodPriority, feature.GracefulNodeShutdown, feature.GracefulNodeShutdownBasedOnPodPriority, func() { f := framework.NewDefaultFramework("graceful-node-shutdown") f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged diff --git a/test/e2e_node/podresources_test.go b/test/e2e_node/podresources_test.go index 86e1573c2b5..f61ae71b4b9 100644 --- a/test/e2e_node/podresources_test.go +++ b/test/e2e_node/podresources_test.go @@ -904,7 +904,7 @@ var _ = SIGDescribe("POD Resources", framework.WithSerial(), feature.PodResource podresourcesGetAllocatableResourcesTests(ctx, cli, sd, onlineCPUs, reservedSystemCPUs) }) - framework.It("should return the expected responses", nodefeature.SidecarContainers, func(ctx context.Context) { + framework.It("should return the expected responses", nodefeature.SidecarContainers, feature.SidecarContainers, func(ctx context.Context) { onlineCPUs, err := getOnlineCPUs() framework.ExpectNoError(err, "getOnlineCPUs() failed err: %v", err) @@ -1008,7 +1008,7 @@ var _ = SIGDescribe("POD Resources", framework.WithSerial(), feature.PodResource podresourcesGetTests(ctx, f, cli, false) }) - framework.It("should return the expected responses", nodefeature.SidecarContainers, func(ctx context.Context) { + framework.It("should return the expected responses", nodefeature.SidecarContainers, feature.SidecarContainers, func(ctx context.Context) { onlineCPUs, err := getOnlineCPUs() framework.ExpectNoError(err, "getOnlineCPUs() failed err: %v", err) diff --git a/test/e2e_node/proc_mount_test.go b/test/e2e_node/proc_mount_test.go index 0eb043eb848..726dcfe1e73 100644 --- a/test/e2e_node/proc_mount_test.go +++ b/test/e2e_node/proc_mount_test.go @@ -46,7 +46,7 @@ var _ = SIGDescribe("DefaultProcMount [LinuxOnly]", framework.WithNodeConformanc }) }) -var _ = SIGDescribe("ProcMount [LinuxOnly]", nodefeature.ProcMountType, nodefeature.UserNamespacesSupport, feature.UserNamespacesSupport, func() { +var _ = SIGDescribe("ProcMount [LinuxOnly]", nodefeature.ProcMountType, feature.ProcMountType, nodefeature.UserNamespacesSupport, feature.UserNamespacesSupport, func() { f := framework.NewDefaultFramework("proc-mount-baseline-test") f.NamespacePodSecurityLevel = admissionapi.LevelBaseline @@ -77,7 +77,7 @@ var _ = SIGDescribe("ProcMount [LinuxOnly]", nodefeature.ProcMountType, nodefeat }) }) -var _ = SIGDescribe("ProcMount [LinuxOnly]", nodefeature.ProcMountType, nodefeature.UserNamespacesSupport, feature.UserNamespacesSupport, func() { +var _ = SIGDescribe("ProcMount [LinuxOnly]", nodefeature.ProcMountType, feature.ProcMountType, nodefeature.UserNamespacesSupport, feature.UserNamespacesSupport, func() { f := framework.NewDefaultFramework("proc-mount-privileged-test") f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged diff --git a/test/e2e_node/quota_lsci_test.go b/test/e2e_node/quota_lsci_test.go index 6d26f55a0f2..2f5611b5bcb 100644 --- a/test/e2e_node/quota_lsci_test.go +++ b/test/e2e_node/quota_lsci_test.go @@ -101,7 +101,7 @@ func runOneQuotaTest(f *framework.Framework, quotasRequested bool, userNamespace // pod that creates a file, deletes it, and writes data to it. If // quotas are used to monitor, it will detect this deleted-but-in-use // file; if du is used to monitor, it will not detect this. -var _ = SIGDescribe("LocalStorageCapacityIsolationFSQuotaMonitoring", framework.WithSlow(), framework.WithSerial(), framework.WithDisruptive(), feature.LocalStorageCapacityIsolationQuota, nodefeature.LSCIQuotaMonitoring, nodefeature.UserNamespacesSupport, feature.UserNamespacesSupport, func() { +var _ = SIGDescribe("LocalStorageCapacityIsolationFSQuotaMonitoring", framework.WithSlow(), framework.WithSerial(), framework.WithDisruptive(), feature.LocalStorageCapacityIsolationQuota, nodefeature.LSCIQuotaMonitoring, feature.LSCIQuotaMonitoring, nodefeature.UserNamespacesSupport, feature.UserNamespacesSupport, func() { f := framework.NewDefaultFramework("localstorage-quota-monitoring-test") f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged runOneQuotaTest(f, true, true) diff --git a/test/e2e_node/resource_metrics_test.go b/test/e2e_node/resource_metrics_test.go index 54a9c93c055..1f2a857f00b 100644 --- a/test/e2e_node/resource_metrics_test.go +++ b/test/e2e_node/resource_metrics_test.go @@ -23,6 +23,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/kubernetes/pkg/features" + "k8s.io/kubernetes/test/e2e/feature" "k8s.io/kubernetes/test/e2e/framework" e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl" e2emetrics "k8s.io/kubernetes/test/e2e/framework/metrics" @@ -46,7 +47,7 @@ const ( maxStatsAge = time.Minute ) -var _ = SIGDescribe("ResourceMetricsAPI", nodefeature.ResourceMetrics, func() { +var _ = SIGDescribe("ResourceMetricsAPI", nodefeature.ResourceMetrics, feature.ResourceMetrics, func() { f := framework.NewDefaultFramework("resource-metrics") f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged ginkgo.Context("when querying /resource/metrics", func() { diff --git a/test/e2e_node/security_context_test.go b/test/e2e_node/security_context_test.go index fb14283fcf1..2bcc578e0cf 100644 --- a/test/e2e_node/security_context_test.go +++ b/test/e2e_node/security_context_test.go @@ -27,6 +27,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/uuid" + "k8s.io/kubernetes/test/e2e/feature" "k8s.io/kubernetes/test/e2e/framework" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" "k8s.io/kubernetes/test/e2e/nodefeature" @@ -148,7 +149,7 @@ var _ = SIGDescribe("Security Context", func() { nginxPid = strings.TrimSpace(output) }) - f.It("should show its pid in the host PID namespace", nodefeature.HostAccess, func(ctx context.Context) { + f.It("should show its pid in the host PID namespace", nodefeature.HostAccess, feature.HostAccess, func(ctx context.Context) { busyboxPodName := "busybox-hostpid-" + string(uuid.NewUUID()) createAndWaitHostPidPod(ctx, busyboxPodName, true) logs, err := e2epod.GetPodLogs(ctx, f.ClientSet, f.Namespace.Name, busyboxPodName, busyboxPodName) @@ -168,7 +169,7 @@ var _ = SIGDescribe("Security Context", func() { } }) - f.It("should not show its pid in the non-hostpid containers", nodefeature.HostAccess, func(ctx context.Context) { + f.It("should not show its pid in the non-hostpid containers", nodefeature.HostAccess, feature.HostAccess, func(ctx context.Context) { busyboxPodName := "busybox-non-hostpid-" + string(uuid.NewUUID()) createAndWaitHostPidPod(ctx, busyboxPodName, false) logs, err := e2epod.GetPodLogs(ctx, f.ClientSet, f.Namespace.Name, busyboxPodName, busyboxPodName) @@ -224,7 +225,7 @@ var _ = SIGDescribe("Security Context", func() { framework.Logf("Got host shared memory ID %q", hostSharedMemoryID) }) - f.It("should show the shared memory ID in the host IPC containers", nodefeature.HostAccess, func(ctx context.Context) { + f.It("should show the shared memory ID in the host IPC containers", nodefeature.HostAccess, feature.HostAccess, func(ctx context.Context) { ipcutilsPodName := "ipcutils-hostipc-" + string(uuid.NewUUID()) createAndWaitHostIPCPod(ctx, ipcutilsPodName, true) logs, err := e2epod.GetPodLogs(ctx, f.ClientSet, f.Namespace.Name, ipcutilsPodName, ipcutilsPodName) @@ -239,7 +240,7 @@ var _ = SIGDescribe("Security Context", func() { } }) - f.It("should not show the shared memory ID in the non-hostIPC containers", nodefeature.HostAccess, func(ctx context.Context) { + f.It("should not show the shared memory ID in the non-hostIPC containers", nodefeature.HostAccess, feature.HostAccess, func(ctx context.Context) { ipcutilsPodName := "ipcutils-non-hostipc-" + string(uuid.NewUUID()) createAndWaitHostIPCPod(ctx, ipcutilsPodName, false) logs, err := e2epod.GetPodLogs(ctx, f.ClientSet, f.Namespace.Name, ipcutilsPodName, ipcutilsPodName) @@ -307,7 +308,7 @@ var _ = SIGDescribe("Security Context", func() { framework.Logf("Opened a new tcp port %q", listeningPort) }) - f.It("should listen on same port in the host network containers", nodefeature.HostAccess, func(ctx context.Context) { + f.It("should listen on same port in the host network containers", nodefeature.HostAccess, feature.HostAccess, func(ctx context.Context) { busyboxPodName := "busybox-hostnetwork-" + string(uuid.NewUUID()) createAndWaitHostNetworkPod(ctx, busyboxPodName, true) logs, err := e2epod.GetPodLogs(ctx, f.ClientSet, f.Namespace.Name, busyboxPodName, busyboxPodName) @@ -321,7 +322,7 @@ var _ = SIGDescribe("Security Context", func() { } }) - f.It("shouldn't show the same port in the non-hostnetwork containers", nodefeature.HostAccess, func(ctx context.Context) { + f.It("shouldn't show the same port in the non-hostnetwork containers", nodefeature.HostAccess, feature.HostAccess, func(ctx context.Context) { busyboxPodName := "busybox-non-hostnetwork-" + string(uuid.NewUUID()) createAndWaitHostNetworkPod(ctx, busyboxPodName, false) logs, err := e2epod.GetPodLogs(ctx, f.ClientSet, f.Namespace.Name, busyboxPodName, busyboxPodName) diff --git a/test/e2e_node/split_disk_test.go b/test/e2e_node/split_disk_test.go index 5da03534fbd..658d4670591 100644 --- a/test/e2e_node/split_disk_test.go +++ b/test/e2e_node/split_disk_test.go @@ -25,6 +25,7 @@ import ( "time" "k8s.io/kubernetes/pkg/features" + "k8s.io/kubernetes/test/e2e/feature" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" v1 "k8s.io/api/core/v1" @@ -43,7 +44,7 @@ import ( "github.com/onsi/gomega" ) -var _ = SIGDescribe("KubeletSeparateDiskGC", nodefeature.KubeletSeparateDiskGC, func() { +var _ = SIGDescribe("KubeletSeparateDiskGC", nodefeature.KubeletSeparateDiskGC, feature.KubeletSeparateDiskGC, func() { f := framework.NewDefaultFramework("split-disk-test") f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged pressureTimeout := 10 * time.Minute diff --git a/test/e2e_node/swap_test.go b/test/e2e_node/swap_test.go index 3c93e07a213..b64c1e019af 100644 --- a/test/e2e_node/swap_test.go +++ b/test/e2e_node/swap_test.go @@ -27,6 +27,7 @@ import ( "k8s.io/kubernetes/pkg/apis/core/v1/helper/qos" "k8s.io/kubernetes/pkg/kubelet/apis/config" + "k8s.io/kubernetes/test/e2e/feature" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" "k8s.io/kubernetes/test/e2e/nodefeature" imageutils "k8s.io/kubernetes/test/utils/image" @@ -56,7 +57,7 @@ var ( noLimits *resource.Quantity = nil ) -var _ = SIGDescribe("Swap", "[LinuxOnly]", nodefeature.Swap, framework.WithSerial(), func() { +var _ = SIGDescribe("Swap", "[LinuxOnly]", nodefeature.Swap, feature.Swap, framework.WithSerial(), func() { f := framework.NewDefaultFramework("swap-qos") addAfterEachForCleaningUpPods(f) f.NamespacePodSecurityLevel = admissionapi.LevelBaseline diff --git a/test/e2e_node/system_node_critical_test.go b/test/e2e_node/system_node_critical_test.go index 8b5c130bec4..4d9bfe5d3b5 100644 --- a/test/e2e_node/system_node_critical_test.go +++ b/test/e2e_node/system_node_critical_test.go @@ -29,6 +29,7 @@ import ( kubeapi "k8s.io/kubernetes/pkg/apis/core" kubeletconfig "k8s.io/kubernetes/pkg/kubelet/apis/config" evictionapi "k8s.io/kubernetes/pkg/kubelet/eviction/api" + "k8s.io/kubernetes/test/e2e/feature" "k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/nodefeature" admissionapi "k8s.io/pod-security-admission/api" @@ -37,7 +38,7 @@ import ( "github.com/onsi/gomega" ) -var _ = SIGDescribe("SystemNodeCriticalPod", framework.WithSlow(), framework.WithSerial(), framework.WithDisruptive(), nodefeature.SystemNodeCriticalPod, nodefeature.Eviction, func() { +var _ = SIGDescribe("SystemNodeCriticalPod", framework.WithSlow(), framework.WithSerial(), framework.WithDisruptive(), nodefeature.SystemNodeCriticalPod, feature.SystemNodeCriticalPod, nodefeature.Eviction, feature.Eviction, func() { f := framework.NewDefaultFramework("system-node-critical-pod-test") f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged // this test only manipulates pods in kube-system