Merge pull request #129166 from kannon92/move-node-features-to-features

[KEP-3041]: deprecate nodefeature for feature labels
This commit is contained in:
Kubernetes Prow Robot 2025-01-14 20:02:33 -08:00 committed by GitHub
commit 2d0a4f7556
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
38 changed files with 198 additions and 62 deletions

View File

@ -38,6 +38,7 @@ import (
watch "k8s.io/apimachinery/pkg/watch" watch "k8s.io/apimachinery/pkg/watch"
"k8s.io/client-go/util/retry" "k8s.io/client-go/util/retry"
"k8s.io/kubernetes/plugin/pkg/admission/serviceaccount" "k8s.io/kubernetes/plugin/pkg/admission/serviceaccount"
"k8s.io/kubernetes/test/e2e/feature"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl" e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
@ -364,7 +365,7 @@ var _ = SIGDescribe("ServiceAccounts", func() {
Containers MUST verify that the projected service account token can be Containers MUST verify that the projected service account token can be
read and has correct file mode set including ownership and permission. read and has correct file mode set including ownership and permission.
*/ */
f.It("should set ownership and permission when RunAsUser or FsGroup is present [LinuxOnly]", nodefeature.FSGroup, func(ctx context.Context) { f.It("should set ownership and permission when RunAsUser or FsGroup is present [LinuxOnly]", nodefeature.FSGroup, feature.FSGroup, func(ctx context.Context) {
e2eskipper.SkipIfNodeOSDistroIs("windows") e2eskipper.SkipIfNodeOSDistroIs("windows")
var ( var (

View File

@ -24,6 +24,7 @@ import (
"k8s.io/apimachinery/pkg/api/resource" "k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/uuid" "k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/kubernetes/test/e2e/feature"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2enetwork "k8s.io/kubernetes/test/e2e/framework/network" e2enetwork "k8s.io/kubernetes/test/e2e/framework/network"
e2epodoutput "k8s.io/kubernetes/test/e2e/framework/pod/output" e2epodoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"
@ -314,7 +315,7 @@ var _ = SIGDescribe("Downward API", func() {
}) })
}) })
var _ = SIGDescribe("Downward API", framework.WithSerial(), framework.WithDisruptive(), nodefeature.DownwardAPIHugePages, func() { var _ = SIGDescribe("Downward API", framework.WithSerial(), framework.WithDisruptive(), nodefeature.DownwardAPIHugePages, feature.DownwardAPIHugePages, func() {
f := framework.NewDefaultFramework("downward-api") f := framework.NewDefaultFramework("downward-api")
f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged

View File

@ -33,6 +33,7 @@ import (
"k8s.io/apimachinery/pkg/watch" "k8s.io/apimachinery/pkg/watch"
"k8s.io/kubernetes/pkg/kubelet/events" "k8s.io/kubernetes/pkg/kubelet/events"
runtimeclasstest "k8s.io/kubernetes/pkg/kubelet/runtimeclass/testing" runtimeclasstest "k8s.io/kubernetes/pkg/kubelet/runtimeclass/testing"
"k8s.io/kubernetes/test/e2e/feature"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2eevents "k8s.io/kubernetes/test/e2e/framework/events" e2eevents "k8s.io/kubernetes/test/e2e/framework/events"
e2eruntimeclass "k8s.io/kubernetes/test/e2e/framework/node/runtimeclass" e2eruntimeclass "k8s.io/kubernetes/test/e2e/framework/node/runtimeclass"
@ -60,7 +61,7 @@ var _ = SIGDescribe("RuntimeClass", func() {
}) })
// The test CANNOT be made a Conformance as it depends on a container runtime to have a specific handler not being installed. // The test CANNOT be made a Conformance as it depends on a container runtime to have a specific handler not being installed.
f.It("should reject a Pod requesting a RuntimeClass with an unconfigured handler", nodefeature.RuntimeHandler, func(ctx context.Context) { f.It("should reject a Pod requesting a RuntimeClass with an unconfigured handler", nodefeature.RuntimeHandler, feature.RuntimeHandler, func(ctx context.Context) {
handler := f.Namespace.Name + "-handler" handler := f.Namespace.Name + "-handler"
rcName := createRuntimeClass(ctx, f, "unconfigured-handler", handler, nil) rcName := createRuntimeClass(ctx, f, "unconfigured-handler", handler, nil)
ginkgo.DeferCleanup(deleteRuntimeClass, f, rcName) ginkgo.DeferCleanup(deleteRuntimeClass, f, rcName)
@ -84,7 +85,7 @@ var _ = SIGDescribe("RuntimeClass", func() {
// This test requires that the PreconfiguredRuntimeClassHandler has already been set up on nodes. // This test requires that the PreconfiguredRuntimeClassHandler has already been set up on nodes.
// The test CANNOT be made a Conformance as it depends on a container runtime to have a specific handler installed and working. // The test CANNOT be made a Conformance as it depends on a container runtime to have a specific handler installed and working.
f.It("should run a Pod requesting a RuntimeClass with a configured handler", nodefeature.RuntimeHandler, func(ctx context.Context) { f.It("should run a Pod requesting a RuntimeClass with a configured handler", nodefeature.RuntimeHandler, feature.RuntimeHandler, func(ctx context.Context) {
if err := e2eruntimeclass.NodeSupportsPreconfiguredRuntimeClassHandler(ctx, f); err != nil { if err := e2eruntimeclass.NodeSupportsPreconfiguredRuntimeClassHandler(ctx, f); err != nil {
e2eskipper.Skipf("Skipping test as node does not have E2E runtime class handler preconfigured in container runtime config: %v", err) e2eskipper.Skipf("Skipping test as node does not have E2E runtime class handler preconfigured in container runtime config: %v", err)
} }

View File

@ -541,7 +541,7 @@ var _ = SIGDescribe("Security Context", func() {
} }
}) })
f.It("should run the container as privileged when true [LinuxOnly]", nodefeature.HostAccess, func(ctx context.Context) { f.It("should run the container as privileged when true [LinuxOnly]", nodefeature.HostAccess, feature.HostAccess, func(ctx context.Context) {
podName := createAndWaitUserPod(ctx, true) podName := createAndWaitUserPod(ctx, true)
logs, err := e2epod.GetPodLogs(ctx, f.ClientSet, f.Namespace.Name, podName, podName) logs, err := e2epod.GetPodLogs(ctx, f.ClientSet, f.Namespace.Name, podName, podName)
if err != nil { if err != nil {

View File

@ -27,6 +27,7 @@ import (
apierrors "k8s.io/apimachinery/pkg/api/errors" apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/uuid" "k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/kubernetes/test/e2e/feature"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2epodoutput "k8s.io/kubernetes/test/e2e/framework/pod/output" e2epodoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"
@ -60,7 +61,7 @@ var _ = SIGDescribe("ConfigMap", func() {
doConfigMapE2EWithoutMappings(ctx, f, false, 0, &defaultMode) doConfigMapE2EWithoutMappings(ctx, f, false, 0, &defaultMode)
}) })
f.It("should be consumable from pods in volume as non-root with defaultMode and fsGroup set [LinuxOnly]", nodefeature.FSGroup, func(ctx context.Context) { f.It("should be consumable from pods in volume as non-root with defaultMode and fsGroup set [LinuxOnly]", nodefeature.FSGroup, feature.FSGroup, func(ctx context.Context) {
// Windows does not support RunAsUser / FSGroup SecurityContext options, and it does not support setting file permissions. // Windows does not support RunAsUser / FSGroup SecurityContext options, and it does not support setting file permissions.
e2eskipper.SkipIfNodeOSDistroIs("windows") e2eskipper.SkipIfNodeOSDistroIs("windows")
defaultMode := int32(0440) /* setting fsGroup sets mode to at least 440 */ defaultMode := int32(0440) /* setting fsGroup sets mode to at least 440 */
@ -76,7 +77,7 @@ var _ = SIGDescribe("ConfigMap", func() {
doConfigMapE2EWithoutMappings(ctx, f, true, 0, nil) doConfigMapE2EWithoutMappings(ctx, f, true, 0, nil)
}) })
f.It("should be consumable from pods in volume as non-root with FSGroup [LinuxOnly]", nodefeature.FSGroup, func(ctx context.Context) { f.It("should be consumable from pods in volume as non-root with FSGroup [LinuxOnly]", nodefeature.FSGroup, feature.FSGroup, func(ctx context.Context) {
// Windows does not support RunAsUser / FSGroup SecurityContext options. // Windows does not support RunAsUser / FSGroup SecurityContext options.
e2eskipper.SkipIfNodeOSDistroIs("windows") e2eskipper.SkipIfNodeOSDistroIs("windows")
doConfigMapE2EWithoutMappings(ctx, f, true, 1001, nil) doConfigMapE2EWithoutMappings(ctx, f, true, 1001, nil)
@ -111,7 +112,7 @@ var _ = SIGDescribe("ConfigMap", func() {
doConfigMapE2EWithMappings(ctx, f, true, 0, nil) doConfigMapE2EWithMappings(ctx, f, true, 0, nil)
}) })
f.It("should be consumable from pods in volume with mappings as non-root with FSGroup [LinuxOnly]", nodefeature.FSGroup, func(ctx context.Context) { f.It("should be consumable from pods in volume with mappings as non-root with FSGroup [LinuxOnly]", nodefeature.FSGroup, feature.FSGroup, func(ctx context.Context) {
// Windows does not support RunAsUser / FSGroup SecurityContext options. // Windows does not support RunAsUser / FSGroup SecurityContext options.
e2eskipper.SkipIfNodeOSDistroIs("windows") e2eskipper.SkipIfNodeOSDistroIs("windows")
doConfigMapE2EWithMappings(ctx, f, true, 1001, nil) doConfigMapE2EWithMappings(ctx, f, true, 1001, nil)

View File

@ -25,6 +25,7 @@ import (
"k8s.io/apimachinery/pkg/api/resource" "k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/uuid" "k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/kubernetes/test/e2e/feature"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2epodoutput "k8s.io/kubernetes/test/e2e/framework/pod/output" e2epodoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"
@ -93,7 +94,7 @@ var _ = SIGDescribe("Downward API volume", func() {
}) })
}) })
f.It("should provide podname as non-root with fsgroup [LinuxOnly]", nodefeature.FSGroup, func(ctx context.Context) { f.It("should provide podname as non-root with fsgroup [LinuxOnly]", nodefeature.FSGroup, feature.FSGroup, func(ctx context.Context) {
// Windows does not support RunAsUser / FSGroup SecurityContext options. // Windows does not support RunAsUser / FSGroup SecurityContext options.
e2eskipper.SkipIfNodeOSDistroIs("windows") e2eskipper.SkipIfNodeOSDistroIs("windows")
podName := "metadata-volume-" + string(uuid.NewUUID()) podName := "metadata-volume-" + string(uuid.NewUUID())
@ -108,7 +109,7 @@ var _ = SIGDescribe("Downward API volume", func() {
}) })
}) })
f.It("should provide podname as non-root with fsgroup and defaultMode [LinuxOnly]", nodefeature.FSGroup, func(ctx context.Context) { f.It("should provide podname as non-root with fsgroup and defaultMode [LinuxOnly]", nodefeature.FSGroup, feature.FSGroup, func(ctx context.Context) {
// Windows does not support RunAsUser / FSGroup SecurityContext options, and it does not support setting file permissions. // Windows does not support RunAsUser / FSGroup SecurityContext options, and it does not support setting file permissions.
e2eskipper.SkipIfNodeOSDistroIs("windows") e2eskipper.SkipIfNodeOSDistroIs("windows")
podName := "metadata-volume-" + string(uuid.NewUUID()) podName := "metadata-volume-" + string(uuid.NewUUID())

View File

@ -28,6 +28,7 @@ import (
"k8s.io/apimachinery/pkg/api/resource" "k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/uuid" "k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/kubernetes/test/e2e/feature"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2epodoutput "k8s.io/kubernetes/test/e2e/framework/pod/output" e2epodoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"
@ -49,7 +50,7 @@ var _ = SIGDescribe("EmptyDir volumes", func() {
f := framework.NewDefaultFramework("emptydir") f := framework.NewDefaultFramework("emptydir")
f.NamespacePodSecurityLevel = admissionapi.LevelBaseline f.NamespacePodSecurityLevel = admissionapi.LevelBaseline
f.Context("when FSGroup is specified [LinuxOnly]", nodefeature.FSGroup, func() { f.Context("when FSGroup is specified [LinuxOnly]", nodefeature.FSGroup, feature.FSGroup, func() {
ginkgo.BeforeEach(func() { ginkgo.BeforeEach(func() {
// Windows does not support the FSGroup SecurityContext option. // Windows does not support the FSGroup SecurityContext option.

View File

@ -24,6 +24,7 @@ import (
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/uuid" "k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/kubernetes/test/e2e/feature"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2epodoutput "k8s.io/kubernetes/test/e2e/framework/pod/output" e2epodoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"
@ -60,7 +61,7 @@ var _ = SIGDescribe("Projected configMap", func() {
doProjectedConfigMapE2EWithoutMappings(ctx, f, false, 0, &defaultMode) doProjectedConfigMapE2EWithoutMappings(ctx, f, false, 0, &defaultMode)
}) })
f.It("should be consumable from pods in volume as non-root with defaultMode and fsGroup set [LinuxOnly]", nodefeature.FSGroup, func(ctx context.Context) { f.It("should be consumable from pods in volume as non-root with defaultMode and fsGroup set [LinuxOnly]", nodefeature.FSGroup, feature.FSGroup, func(ctx context.Context) {
// Windows does not support RunAsUser / FSGroup SecurityContext options, and it does not support setting file permissions. // Windows does not support RunAsUser / FSGroup SecurityContext options, and it does not support setting file permissions.
e2eskipper.SkipIfNodeOSDistroIs("windows") e2eskipper.SkipIfNodeOSDistroIs("windows")
defaultMode := int32(0440) /* setting fsGroup sets mode to at least 440 */ defaultMode := int32(0440) /* setting fsGroup sets mode to at least 440 */
@ -76,7 +77,7 @@ var _ = SIGDescribe("Projected configMap", func() {
doProjectedConfigMapE2EWithoutMappings(ctx, f, true, 0, nil) doProjectedConfigMapE2EWithoutMappings(ctx, f, true, 0, nil)
}) })
f.It("should be consumable from pods in volume as non-root with FSGroup [LinuxOnly]", nodefeature.FSGroup, func(ctx context.Context) { f.It("should be consumable from pods in volume as non-root with FSGroup [LinuxOnly]", nodefeature.FSGroup, feature.FSGroup, func(ctx context.Context) {
// Windows does not support RunAsUser / FSGroup SecurityContext options. // Windows does not support RunAsUser / FSGroup SecurityContext options.
e2eskipper.SkipIfNodeOSDistroIs("windows") e2eskipper.SkipIfNodeOSDistroIs("windows")
doProjectedConfigMapE2EWithoutMappings(ctx, f, true, 1001, nil) doProjectedConfigMapE2EWithoutMappings(ctx, f, true, 1001, nil)
@ -111,7 +112,7 @@ var _ = SIGDescribe("Projected configMap", func() {
doProjectedConfigMapE2EWithMappings(ctx, f, true, 0, nil) doProjectedConfigMapE2EWithMappings(ctx, f, true, 0, nil)
}) })
f.It("should be consumable from pods in volume with mappings as non-root with FSGroup [LinuxOnly]", nodefeature.FSGroup, func(ctx context.Context) { f.It("should be consumable from pods in volume with mappings as non-root with FSGroup [LinuxOnly]", nodefeature.FSGroup, feature.FSGroup, func(ctx context.Context) {
// Windows does not support RunAsUser / FSGroup SecurityContext options. // Windows does not support RunAsUser / FSGroup SecurityContext options.
e2eskipper.SkipIfNodeOSDistroIs("windows") e2eskipper.SkipIfNodeOSDistroIs("windows")
doProjectedConfigMapE2EWithMappings(ctx, f, true, 1001, nil) doProjectedConfigMapE2EWithMappings(ctx, f, true, 1001, nil)

View File

@ -24,6 +24,7 @@ import (
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/uuid" "k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/kubernetes/test/e2e/feature"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2epodoutput "k8s.io/kubernetes/test/e2e/framework/pod/output" e2epodoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"
@ -93,7 +94,7 @@ var _ = SIGDescribe("Projected downwardAPI", func() {
}) })
}) })
f.It("should provide podname as non-root with fsgroup [LinuxOnly]", nodefeature.FSGroup, func(ctx context.Context) { f.It("should provide podname as non-root with fsgroup [LinuxOnly]", nodefeature.FSGroup, feature.FSGroup, func(ctx context.Context) {
// Windows does not support RunAsUser / FSGroup SecurityContext options. // Windows does not support RunAsUser / FSGroup SecurityContext options.
e2eskipper.SkipIfNodeOSDistroIs("windows") e2eskipper.SkipIfNodeOSDistroIs("windows")
podName := "metadata-volume-" + string(uuid.NewUUID()) podName := "metadata-volume-" + string(uuid.NewUUID())
@ -108,7 +109,7 @@ var _ = SIGDescribe("Projected downwardAPI", func() {
}) })
}) })
f.It("should provide podname as non-root with fsgroup and defaultMode [LinuxOnly]", nodefeature.FSGroup, func(ctx context.Context) { f.It("should provide podname as non-root with fsgroup and defaultMode [LinuxOnly]", nodefeature.FSGroup, feature.FSGroup, func(ctx context.Context) {
// Windows does not support RunAsUser / FSGroup SecurityContext options, and it does not support setting file permissions. // Windows does not support RunAsUser / FSGroup SecurityContext options, and it does not support setting file permissions.
e2eskipper.SkipIfNodeOSDistroIs("windows") e2eskipper.SkipIfNodeOSDistroIs("windows")
podName := "metadata-volume-" + string(uuid.NewUUID()) podName := "metadata-volume-" + string(uuid.NewUUID())

View File

@ -41,6 +41,10 @@ var (
// Marks tests that exercise the CBOR data format for serving or storage. // Marks tests that exercise the CBOR data format for serving or storage.
CBOR = framework.WithFeature(framework.ValidFeatures.Add("CBOR")) CBOR = framework.WithFeature(framework.ValidFeatures.Add("CBOR"))
// Owner: sig-node
// Marks test that exercise checkpointing of containers
CheckpointContainer = framework.WithFeature(framework.ValidFeatures.Add("CheckpointContainer"))
// TODO: document the feature (owning SIG, when to use this feature for a test) // TODO: document the feature (owning SIG, when to use this feature for a test)
CloudProvider = framework.WithFeature(framework.ValidFeatures.Add("CloudProvider")) CloudProvider = framework.WithFeature(framework.ValidFeatures.Add("CloudProvider"))
@ -71,9 +75,21 @@ var (
// TODO: document the feature (owning SIG, when to use this feature for a test) // TODO: document the feature (owning SIG, when to use this feature for a test)
CPUManager = framework.WithFeature(framework.ValidFeatures.Add("CPUManager")) CPUManager = framework.WithFeature(framework.ValidFeatures.Add("CPUManager"))
// OWNER: sig-node
// Testing critical pod admission
CriticalPod = framework.WithFeature(framework.ValidFeatures.Add("CriticalPod"))
// TODO: document the feature (owning SIG, when to use this feature for a test) // TODO: document the feature (owning SIG, when to use this feature for a test)
CustomMetricsAutoscaling = framework.WithFeature(framework.ValidFeatures.Add("CustomMetricsAutoscaling")) CustomMetricsAutoscaling = framework.WithFeature(framework.ValidFeatures.Add("CustomMetricsAutoscaling"))
// OWNER: sig-node
// Testing device managers
DeviceManager = framework.WithFeature(framework.ValidFeatures.Add("DeviceManager"))
// OWNER: sig-node
// Testing device plugins
DevicePlugin = framework.WithFeature(framework.ValidFeatures.Add("DevicePlugin"))
// TODO: document the feature (owning SIG, when to use this feature for a test) // TODO: document the feature (owning SIG, when to use this feature for a test)
Downgrade = framework.WithFeature(framework.ValidFeatures.Add("Downgrade")) Downgrade = framework.WithFeature(framework.ValidFeatures.Add("Downgrade"))
@ -98,6 +114,10 @@ var (
// is enabled such that passing CDI device IDs through CRI fields is supported // is enabled such that passing CDI device IDs through CRI fields is supported
DRAAdminAccess = framework.WithFeature(framework.ValidFeatures.Add("DRAAdminAccess")) DRAAdminAccess = framework.WithFeature(framework.ValidFeatures.Add("DRAAdminAccess"))
// TODO: document the feature (owning SIG, when to use this feature for a test)
// OWNER: sig-node
// Testing downward API huge pages
DownwardAPIHugePages = framework.WithFeature(framework.ValidFeatures.Add("DownwardAPIHugePages"))
// owning-sig: sig-node // owning-sig: sig-node
// kep: https://kep.k8s.io/4381 // kep: https://kep.k8s.io/4381
// test-infra jobs: // test-infra jobs:
@ -125,9 +145,20 @@ var (
// TODO: document the feature (owning SIG, when to use this feature for a test) // TODO: document the feature (owning SIG, when to use this feature for a test)
ExperimentalResourceUsageTracking = framework.WithFeature(framework.ValidFeatures.Add("ExperimentalResourceUsageTracking")) ExperimentalResourceUsageTracking = framework.WithFeature(framework.ValidFeatures.Add("ExperimentalResourceUsageTracking"))
// OWNER: sig-node
// Testing eviction manager
Eviction = framework.WithFeature(framework.ValidFeatures.Add("Eviction"))
// TODO: document the feature (owning SIG, when to use this feature for a test) // TODO: document the feature (owning SIG, when to use this feature for a test)
Flexvolumes = framework.WithFeature(framework.ValidFeatures.Add("Flexvolumes")) Flexvolumes = framework.WithFeature(framework.ValidFeatures.Add("Flexvolumes"))
// TODO: document the feature (owning SIG, when to use this feature for a test)
FSGroup = framework.WithFeature(framework.ValidFeatures.Add("FSGroup"))
// OWNER: sig-node
// Testing garbage collection of images/containers
GarbageCollect = framework.WithFeature(framework.ValidFeatures.Add("GarbageCollect"))
// TODO: document the feature (owning SIG, when to use this feature for a test) // TODO: document the feature (owning SIG, when to use this feature for a test)
GKENodePool = framework.WithFeature(framework.ValidFeatures.Add("GKENodePool")) GKENodePool = framework.WithFeature(framework.ValidFeatures.Add("GKENodePool"))
@ -146,6 +177,14 @@ var (
// TODO: document the feature (owning SIG, when to use this feature for a test) // TODO: document the feature (owning SIG, when to use this feature for a test)
GPUUpgrade = framework.WithFeature(framework.ValidFeatures.Add("GPUUpgrade")) GPUUpgrade = framework.WithFeature(framework.ValidFeatures.Add("GPUUpgrade"))
// OWNER: sig-node
// Testing graceful node shutdown
GracefulNodeShutdown = framework.WithFeature(framework.ValidFeatures.Add("GracefulNodeShutdown"))
// OWNER: sig-node
// GracefulNodeShutdown based on pod priority
GracefulNodeShutdownBasedOnPodPriority = framework.WithFeature(framework.ValidFeatures.Add("GracefulNodeShutdownBasedOnPodPriority"))
// TODO: document the feature (owning SIG, when to use this feature for a test) // TODO: document the feature (owning SIG, when to use this feature for a test)
HAMaster = framework.WithFeature(framework.ValidFeatures.Add("HAMaster")) HAMaster = framework.WithFeature(framework.ValidFeatures.Add("HAMaster"))
@ -166,9 +205,19 @@ var (
// not focus on this feature anymore. // not focus on this feature anymore.
HonorPVReclaimPolicy = framework.WithFeature(framework.ValidFeatures.Add("HonorPVReclaimPolicy")) HonorPVReclaimPolicy = framework.WithFeature(framework.ValidFeatures.Add("HonorPVReclaimPolicy"))
// owner: sig-node
HostAccess = framework.WithFeature(framework.ValidFeatures.Add("HostAccess"))
// TODO: document the feature (owning SIG, when to use this feature for a test) // TODO: document the feature (owning SIG, when to use this feature for a test)
HugePages = framework.WithFeature(framework.ValidFeatures.Add("HugePages")) HugePages = framework.WithFeature(framework.ValidFeatures.Add("HugePages"))
// Owner: sig-node
ImageID = framework.WithFeature(framework.ValidFeatures.Add("ImageID"))
// Owner: sig-node
// ImageVolume is used for testing the image volume source feature (https://kep.k8s.io/4639).
ImageVolume = framework.WithFeature(framework.ValidFeatures.Add("ImageVolume"))
// Owner: sig-network // Owner: sig-network
// Marks tests that require a conforming implementation of // Marks tests that require a conforming implementation of
// Ingress.networking.k8s.io to be present. // Ingress.networking.k8s.io to be present.
@ -188,12 +237,20 @@ var (
// Marks tests that require kube-dns-autoscaler // Marks tests that require kube-dns-autoscaler
KubeDNSAutoscaler = framework.WithFeature(framework.ValidFeatures.Add("KubeDNSAutoscaler")) KubeDNSAutoscaler = framework.WithFeature(framework.ValidFeatures.Add("KubeDNSAutoscaler"))
// Owner: sig-node
// Testing kubelet drop in KEP
KubeletConfigDropInDir = framework.WithFeature(framework.ValidFeatures.Add("KubeletConfigDropInDir"))
// TODO: document the feature (owning SIG, when to use this feature for a test) // TODO: document the feature (owning SIG, when to use this feature for a test)
KubeletCredentialProviders = framework.WithFeature(framework.ValidFeatures.Add("KubeletCredentialProviders")) KubeletCredentialProviders = framework.WithFeature(framework.ValidFeatures.Add("KubeletCredentialProviders"))
// TODO: document the feature (owning SIG, when to use this feature for a test) // TODO: document the feature (owning SIG, when to use this feature for a test)
KubeletSecurity = framework.WithFeature(framework.ValidFeatures.Add("KubeletSecurity")) KubeletSecurity = framework.WithFeature(framework.ValidFeatures.Add("KubeletSecurity"))
// KubeletSeparateDiskGC (SIG-node, used for testing separate image filesystem <https://kep.k8s.io/4191>)
// The tests need separate disk settings on nodes and separate filesystems in storage.conf
KubeletSeparateDiskGC = framework.WithFeature(framework.ValidFeatures.Add("KubeletSeparateDiskGC"))
// TODO: document the feature (owning SIG, when to use this feature for a test) // TODO: document the feature (owning SIG, when to use this feature for a test)
KubeProxyDaemonSetDowngrade = framework.WithFeature(framework.ValidFeatures.Add("KubeProxyDaemonSetDowngrade")) KubeProxyDaemonSetDowngrade = framework.WithFeature(framework.ValidFeatures.Add("KubeProxyDaemonSetDowngrade"))
@ -210,6 +267,9 @@ var (
// Marks tests that require a cloud provider that implements LoadBalancer Services // Marks tests that require a cloud provider that implements LoadBalancer Services
LoadBalancer = framework.WithFeature(framework.ValidFeatures.Add("LoadBalancer")) LoadBalancer = framework.WithFeature(framework.ValidFeatures.Add("LoadBalancer"))
// Owner: sig-storage
LSCIQuotaMonitoring = framework.WithFeature(framework.ValidFeatures.Add("LSCIQuotaMonitoring"))
// TODO: document the feature (owning SIG, when to use this feature for a test) // TODO: document the feature (owning SIG, when to use this feature for a test)
LocalStorageCapacityIsolationQuota = framework.WithFeature(framework.ValidFeatures.Add("LocalStorageCapacityIsolationQuota")) LocalStorageCapacityIsolationQuota = framework.WithFeature(framework.ValidFeatures.Add("LocalStorageCapacityIsolationQuota"))
@ -244,9 +304,17 @@ var (
// NetworkPolicy.networking.k8s.io to be present. // NetworkPolicy.networking.k8s.io to be present.
NetworkPolicy = framework.WithFeature(framework.ValidFeatures.Add("NetworkPolicy")) NetworkPolicy = framework.WithFeature(framework.ValidFeatures.Add("NetworkPolicy"))
// Owner: sig-node
// Testing node allocatable validations
NodeAllocatable = framework.WithFeature(framework.ValidFeatures.Add("NodeAllocatable"))
// TODO: document the feature (owning SIG, when to use this feature for a test) // TODO: document the feature (owning SIG, when to use this feature for a test)
NodeAuthenticator = framework.WithFeature(framework.ValidFeatures.Add("NodeAuthenticator")) NodeAuthenticator = framework.WithFeature(framework.ValidFeatures.Add("NodeAuthenticator"))
// Owner: sig-node
// Node Problem Detect e2e tests in tree.
NodeProblemDetector = framework.WithFeature(framework.ValidFeatures.Add("NodeProblemDetector"))
// Owner: sig-auth // Owner: sig-auth
// Marks tests that require a conforming implementation of // Marks tests that require a conforming implementation of
// Node claims for serviceaccounts. Typically this means that the // Node claims for serviceaccounts. Typically this means that the
@ -262,6 +330,15 @@ var (
// TODO: document the feature (owning SIG, when to use this feature for a test) // TODO: document the feature (owning SIG, when to use this feature for a test)
NodeOutOfServiceVolumeDetach = framework.WithFeature(framework.ValidFeatures.Add("NodeOutOfServiceVolumeDetach")) NodeOutOfServiceVolumeDetach = framework.WithFeature(framework.ValidFeatures.Add("NodeOutOfServiceVolumeDetach"))
// Owner: sig-node
// Tests aiming to verify oom_score functionality
OOMScoreAdj = framework.WithFeature(framework.ValidFeatures.Add("OOMScoreAdj"))
// Owner: sig-node
// Verify ProcMount feature.
// Used in combination with user namespaces
ProcMountType = framework.WithFeature(framework.ValidFeatures.Add("ProcMountType"))
// Owner: sig-network // Owner: sig-network
// Marks a single test that tests cluster DNS performance with many services. // Marks a single test that tests cluster DNS performance with many services.
PerformanceDNS = framework.WithFeature(framework.ValidFeatures.Add("PerformanceDNS")) PerformanceDNS = framework.WithFeature(framework.ValidFeatures.Add("PerformanceDNS"))
@ -304,6 +381,9 @@ var (
// TODO: document the feature (owning SIG, when to use this feature for a test) // TODO: document the feature (owning SIG, when to use this feature for a test)
RecoverVolumeExpansionFailure = framework.WithFeature(framework.ValidFeatures.Add("RecoverVolumeExpansionFailure")) RecoverVolumeExpansionFailure = framework.WithFeature(framework.ValidFeatures.Add("RecoverVolumeExpansionFailure"))
// RecursiveReadOnlyMounts (SIG-node, used for testing recursive read-only mounts <https://kep.k8s.io/3857>)
RecursiveReadOnlyMounts = framework.WithFeature(framework.ValidFeatures.Add("RecursiveReadOnlyMounts"))
// RelaxedEnvironmentVariableValidation used when we verify whether the pod can consume all printable ASCII characters as environment variable names, // RelaxedEnvironmentVariableValidation used when we verify whether the pod can consume all printable ASCII characters as environment variable names,
// and whether the pod can consume configmap/secret that key starts with a number. // and whether the pod can consume configmap/secret that key starts with a number.
RelaxedEnvironmentVariableValidation = framework.WithFeature(framework.ValidFeatures.Add("RelaxedEnvironmentVariableValidation")) RelaxedEnvironmentVariableValidation = framework.WithFeature(framework.ValidFeatures.Add("RelaxedEnvironmentVariableValidation"))
@ -312,12 +392,24 @@ var (
// Marks tests of KEP-4427 that require the `RelaxedDNSSearchValidation` feature gate // Marks tests of KEP-4427 that require the `RelaxedDNSSearchValidation` feature gate
RelaxedDNSSearchValidation = framework.WithFeature(framework.ValidFeatures.Add("RelaxedDNSSearchValidation")) RelaxedDNSSearchValidation = framework.WithFeature(framework.ValidFeatures.Add("RelaxedDNSSearchValidation"))
// Owner: sig-node
// Device Management metrics
ResourceMetrics = framework.WithFeature(framework.ValidFeatures.Add("ResourceMetrics"))
// TODO: document the feature (owning SIG, when to use this feature for a test) // TODO: document the feature (owning SIG, when to use this feature for a test)
Recreate = framework.WithFeature(framework.ValidFeatures.Add("Recreate")) Recreate = framework.WithFeature(framework.ValidFeatures.Add("Recreate"))
// TODO: document the feature (owning SIG, when to use this feature for a test) // TODO: document the feature (owning SIG, when to use this feature for a test)
RegularResourceUsageTracking = framework.WithFeature(framework.ValidFeatures.Add("RegularResourceUsageTracking")) RegularResourceUsageTracking = framework.WithFeature(framework.ValidFeatures.Add("RegularResourceUsageTracking"))
// Owner: sig-node
// resource health Status for device plugins and DRA <https://kep.k8s.io/4680>
ResourceHealthStatus = framework.WithFeature(framework.ValidFeatures.Add("ResourceHealthStatus"))
// Owner: sig-node
// Runtime Handler
RuntimeHandler = framework.WithFeature(framework.ValidFeatures.Add("RuntimeHandler"))
// Owner: sig-scheduling // Owner: sig-scheduling
// Marks tests of the asynchronous preemption (KEP-4832) that require the `SchedulerAsyncPreemption` feature gate. // Marks tests of the asynchronous preemption (KEP-4832) that require the `SchedulerAsyncPreemption` feature gate.
SchedulerAsyncPreemption = framework.WithFeature(framework.ValidFeatures.Add("SchedulerAsyncPreemption")) SchedulerAsyncPreemption = framework.WithFeature(framework.ValidFeatures.Add("SchedulerAsyncPreemption"))
@ -345,7 +437,8 @@ var (
// and the networking.k8s.io/v1alpha1 API. // and the networking.k8s.io/v1alpha1 API.
ServiceCIDRs = framework.WithFeature(framework.ValidFeatures.Add("ServiceCIDRs")) ServiceCIDRs = framework.WithFeature(framework.ValidFeatures.Add("ServiceCIDRs"))
// TODO: document the feature (owning SIG, when to use this feature for a test) // Owner: sig-node
// Sidecar KEP-753
SidecarContainers = framework.WithFeature(framework.ValidFeatures.Add("SidecarContainers")) SidecarContainers = framework.WithFeature(framework.ValidFeatures.Add("SidecarContainers"))
// TODO: document the feature (owning SIG, when to use this feature for a test) // TODO: document the feature (owning SIG, when to use this feature for a test)
@ -369,6 +462,10 @@ var (
// TODO: document the feature (owning SIG, when to use this feature for a test) // TODO: document the feature (owning SIG, when to use this feature for a test)
StatefulSet = framework.WithFeature(framework.ValidFeatures.Add("StatefulSet")) StatefulSet = framework.WithFeature(framework.ValidFeatures.Add("StatefulSet"))
// Added to test Swap Feature
// This label should be used when testing KEP-2400 (Node Swap Support)
Swap = framework.WithFeature(framework.ValidFeatures.Add("NodeSwap"))
PodIndexLabel = framework.WithFeature(framework.ValidFeatures.Add("PodIndexLabel")) PodIndexLabel = framework.WithFeature(framework.ValidFeatures.Add("PodIndexLabel"))
// TODO: document the feature (owning SIG, when to use this feature for a test) // TODO: document the feature (owning SIG, when to use this feature for a test)
@ -388,6 +485,10 @@ var (
// (used for testing fine-grained SupplementalGroups control <https://kep.k8s.io/3619>) // (used for testing fine-grained SupplementalGroups control <https://kep.k8s.io/3619>)
SupplementalGroupsPolicy = framework.WithFeature(framework.ValidFeatures.Add("SupplementalGroupsPolicy")) SupplementalGroupsPolicy = framework.WithFeature(framework.ValidFeatures.Add("SupplementalGroupsPolicy"))
// Owner: sig-node
// Mark tests that are testing system critical pods
SystemNodeCriticalPod = framework.WithFeature(framework.ValidFeatures.Add("SystemNodeCriticalPod"))
// Owner: sig-node // Owner: sig-node
// Tests marked with this feature MUST run with the CRI Proxy configured so errors can be injected into the kubelet's CRI calls. // Tests marked with this feature MUST run with the CRI Proxy configured so errors can be injected into the kubelet's CRI calls.
// This is useful for testing how the kubelet handles various error conditions in its CRI interactions. // This is useful for testing how the kubelet handles various error conditions in its CRI interactions.

View File

@ -28,6 +28,7 @@ import (
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/fields"
"k8s.io/kubernetes/test/e2e/feature"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2ekubelet "k8s.io/kubernetes/test/e2e/framework/kubelet" e2ekubelet "k8s.io/kubernetes/test/e2e/framework/kubelet"
e2enode "k8s.io/kubernetes/test/e2e/framework/node" e2enode "k8s.io/kubernetes/test/e2e/framework/node"
@ -43,7 +44,7 @@ import (
// This test checks if node-problem-detector (NPD) runs fine without error on // This test checks if node-problem-detector (NPD) runs fine without error on
// the up to 10 nodes in the cluster. NPD's functionality is tested in e2e_node tests. // the up to 10 nodes in the cluster. NPD's functionality is tested in e2e_node tests.
var _ = SIGDescribe("NodeProblemDetector", nodefeature.NodeProblemDetector, func() { var _ = SIGDescribe("NodeProblemDetector", nodefeature.NodeProblemDetector, feature.NodeProblemDetector, func() {
const ( const (
pollInterval = 1 * time.Second pollInterval = 1 * time.Second
pollTimeout = 1 * time.Minute pollTimeout = 1 * time.Minute

View File

@ -22,6 +22,8 @@ import (
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
) )
// We are deprecating this.
// These features will be kept around for a short period so we can switch over test-infra to use WithFeature.
var ( var (
// Please keep the list in alphabetical order. // Please keep the list in alphabetical order.
@ -41,6 +43,7 @@ var (
DownwardAPIHugePages = framework.WithNodeFeature(framework.ValidNodeFeatures.Add("DownwardAPIHugePages")) DownwardAPIHugePages = framework.WithNodeFeature(framework.ValidNodeFeatures.Add("DownwardAPIHugePages"))
// TODO: document the feature (owning SIG, when to use this feature for a test) // TODO: document the feature (owning SIG, when to use this feature for a test)
// not used anywhere
DynamicResourceAllocation = framework.WithNodeFeature(framework.ValidNodeFeatures.Add("DynamicResourceAllocation")) DynamicResourceAllocation = framework.WithNodeFeature(framework.ValidNodeFeatures.Add("DynamicResourceAllocation"))
// TODO: document the feature (owning SIG, when to use this feature for a test) // TODO: document the feature (owning SIG, when to use this feature for a test)

View File

@ -33,6 +33,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
restclient "k8s.io/client-go/rest" restclient "k8s.io/client-go/rest"
"k8s.io/kubernetes/test/e2e/feature"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2emetrics "k8s.io/kubernetes/test/e2e/framework/metrics" e2emetrics "k8s.io/kubernetes/test/e2e/framework/metrics"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
@ -130,7 +131,7 @@ func getCheckpointContainerErrorMetric(ctx context.Context, f *framework.Framewo
return 0, nil return 0, nil
} }
var _ = SIGDescribe("Checkpoint Container", nodefeature.CheckpointContainer, func() { var _ = SIGDescribe("Checkpoint Container", nodefeature.CheckpointContainer, feature.CheckpointContainer, func() {
f := framework.NewDefaultFramework("checkpoint-container-test") f := framework.NewDefaultFramework("checkpoint-container-test")
f.NamespacePodSecurityLevel = admissionapi.LevelBaseline f.NamespacePodSecurityLevel = admissionapi.LevelBaseline
ginkgo.It("will checkpoint a container out of a pod", func(ctx context.Context) { ginkgo.It("will checkpoint a container out of a pod", func(ctx context.Context) {

View File

@ -29,6 +29,7 @@ import (
runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1" runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1"
admissionapi "k8s.io/pod-security-admission/api" admissionapi "k8s.io/pod-security-admission/api"
"k8s.io/kubernetes/test/e2e/feature"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
"k8s.io/kubernetes/test/e2e/nodefeature" "k8s.io/kubernetes/test/e2e/nodefeature"
@ -1621,7 +1622,7 @@ var _ = SIGDescribe(framework.WithSerial(), "Containers Lifecycle", func() {
}) })
}) })
var _ = SIGDescribe(nodefeature.SidecarContainers, "Containers Lifecycle", func() { var _ = SIGDescribe(nodefeature.SidecarContainers, feature.SidecarContainers, "Containers Lifecycle", func() {
f := framework.NewDefaultFramework("containers-lifecycle-test") f := framework.NewDefaultFramework("containers-lifecycle-test")
addAfterEachForCleaningUpPods(f) addAfterEachForCleaningUpPods(f)
f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged
@ -5408,7 +5409,7 @@ var _ = SIGDescribe(nodefeature.SidecarContainers, "Containers Lifecycle", func(
}) })
}) })
var _ = SIGDescribe(nodefeature.SidecarContainers, framework.WithSerial(), "Containers Lifecycle", func() { var _ = SIGDescribe(nodefeature.SidecarContainers, feature.SidecarContainers, framework.WithSerial(), "Containers Lifecycle", func() {
f := framework.NewDefaultFramework("containers-lifecycle-test-serial") f := framework.NewDefaultFramework("containers-lifecycle-test-serial")
addAfterEachForCleaningUpPods(f) addAfterEachForCleaningUpPods(f)
f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged

View File

@ -34,6 +34,7 @@ import (
"k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/uuid" "k8s.io/apimachinery/pkg/util/uuid"
runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1" runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1"
"k8s.io/kubernetes/test/e2e/feature"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
"k8s.io/kubernetes/test/e2e/nodefeature" "k8s.io/kubernetes/test/e2e/nodefeature"
@ -101,7 +102,7 @@ func dumpRunningContainer(ctx context.Context) error {
var _ = SIGDescribe("Container Manager Misc", framework.WithSerial(), func() { var _ = SIGDescribe("Container Manager Misc", framework.WithSerial(), func() {
f := framework.NewDefaultFramework("kubelet-container-manager") f := framework.NewDefaultFramework("kubelet-container-manager")
f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged
f.Describe("Validate OOM score adjustments", nodefeature.OOMScoreAdj, func() { f.Describe("Validate OOM score adjustments", nodefeature.OOMScoreAdj, feature.OOMScoreAdj, func() {
ginkgo.Context("once the node is setup", func() { ginkgo.Context("once the node is setup", func() {
ginkgo.It("container runtime's oom-score-adj should be -999", func(ctx context.Context) { ginkgo.It("container runtime's oom-score-adj should be -999", func(ctx context.Context) {
runtimePids, err := getPidsForProcess(framework.TestContext.ContainerRuntimeProcessName, framework.TestContext.ContainerRuntimePidFile) runtimePids, err := getPidsForProcess(framework.TestContext.ContainerRuntimeProcessName, framework.TestContext.ContainerRuntimePidFile)

View File

@ -710,7 +710,7 @@ func runCPUManagerTests(f *framework.Framework) {
runSMTAlignmentPositiveTests(ctx, f, smtLevel) runSMTAlignmentPositiveTests(ctx, f, smtLevel)
}) })
f.It("should not reuse CPUs of restartable init containers", nodefeature.SidecarContainers, func(ctx context.Context) { f.It("should not reuse CPUs of restartable init containers", nodefeature.SidecarContainers, feature.SidecarContainers, func(ctx context.Context) {
cpuCap, cpuAlloc, _ = getLocalNodeCPUDetails(ctx, f) cpuCap, cpuAlloc, _ = getLocalNodeCPUDetails(ctx, f)
// Skip rest of the tests if CPU capacity < 3. // Skip rest of the tests if CPU capacity < 3.

View File

@ -27,6 +27,7 @@ import (
kubeapi "k8s.io/kubernetes/pkg/apis/core" kubeapi "k8s.io/kubernetes/pkg/apis/core"
"k8s.io/kubernetes/pkg/apis/scheduling" "k8s.io/kubernetes/pkg/apis/scheduling"
kubelettypes "k8s.io/kubernetes/pkg/kubelet/types" kubelettypes "k8s.io/kubernetes/pkg/kubelet/types"
"k8s.io/kubernetes/test/e2e/feature"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
"k8s.io/kubernetes/test/e2e/nodefeature" "k8s.io/kubernetes/test/e2e/nodefeature"
@ -44,7 +45,7 @@ const (
bestEffortPodName = "best-effort" bestEffortPodName = "best-effort"
) )
var _ = SIGDescribe("CriticalPod", framework.WithSerial(), framework.WithDisruptive(), nodefeature.CriticalPod, func() { var _ = SIGDescribe("CriticalPod", framework.WithSerial(), framework.WithDisruptive(), nodefeature.CriticalPod, feature.CriticalPod, func() {
f := framework.NewDefaultFramework("critical-pod-test") f := framework.NewDefaultFramework("critical-pod-test")
f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged
ginkgo.Context("when we need to admit a critical pod", func() { ginkgo.Context("when we need to admit a critical pod", func() {

View File

@ -33,6 +33,7 @@ import (
"k8s.io/klog/v2" "k8s.io/klog/v2"
admissionapi "k8s.io/pod-security-admission/api" admissionapi "k8s.io/pod-security-admission/api"
"k8s.io/kubernetes/test/e2e/feature"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2enode "k8s.io/kubernetes/test/e2e/framework/node" e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
@ -51,7 +52,7 @@ const (
) )
// Serial because the test updates kubelet configuration. // Serial because the test updates kubelet configuration.
var _ = SIGDescribe("Device Manager", framework.WithSerial(), nodefeature.DeviceManager, func() { var _ = SIGDescribe("Device Manager", framework.WithSerial(), nodefeature.DeviceManager, feature.DeviceManager, func() {
f := framework.NewDefaultFramework("devicemanager-test") f := framework.NewDefaultFramework("devicemanager-test")
f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged

View File

@ -27,6 +27,7 @@ import (
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
kubeletdevicepluginv1beta1 "k8s.io/kubelet/pkg/apis/deviceplugin/v1beta1" kubeletdevicepluginv1beta1 "k8s.io/kubelet/pkg/apis/deviceplugin/v1beta1"
"k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/pkg/features"
"k8s.io/kubernetes/test/e2e/feature"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
"k8s.io/kubernetes/test/e2e/nodefeature" "k8s.io/kubernetes/test/e2e/nodefeature"
@ -40,7 +41,7 @@ import (
"k8s.io/kubernetes/test/e2e_node/testdeviceplugin" "k8s.io/kubernetes/test/e2e_node/testdeviceplugin"
) )
var _ = SIGDescribe("Device Plugin Failures Pod Status", nodefeature.ResourceHealthStatus, func() { var _ = SIGDescribe("Device Plugin Failures Pod Status", nodefeature.ResourceHealthStatus, feature.ResourceHealthStatus, func() {
f := framework.NewDefaultFramework("device-plugin-failures") f := framework.NewDefaultFramework("device-plugin-failures")
f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged

View File

@ -63,7 +63,7 @@ var (
) )
// Serial because the test restarts Kubelet // Serial because the test restarts Kubelet
var _ = SIGDescribe("Device Plugin", nodefeature.DevicePlugin, framework.WithSerial(), func() { var _ = SIGDescribe("Device Plugin", nodefeature.DevicePlugin, framework.WithSerial(), feature.DevicePlugin, func() {
f := framework.NewDefaultFramework("device-plugin-errors") f := framework.NewDefaultFramework("device-plugin-errors")
f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged
testDevicePlugin(f, kubeletdevicepluginv1beta1.DevicePluginPath) testDevicePlugin(f, kubeletdevicepluginv1beta1.DevicePluginPath)
@ -694,7 +694,7 @@ func testDevicePlugin(f *framework.Framework, pluginSockDir string) {
} }
}) })
f.It("Can schedule a pod with a restartable init container", nodefeature.SidecarContainers, func(ctx context.Context) { f.It("Can schedule a pod with a restartable init container", nodefeature.SidecarContainers, feature.SidecarContainers, func(ctx context.Context) {
podRECMD := "devs=$(ls /tmp/ | egrep '^Dev-[0-9]+$') && echo stub devices: $devs && sleep %s" podRECMD := "devs=$(ls /tmp/ | egrep '^Dev-[0-9]+$') && echo stub devices: $devs && sleep %s"
sleepOneSecond := "1s" sleepOneSecond := "1s"
rl := v1.ResourceList{v1.ResourceName(SampleDeviceResourceName): *resource.NewQuantity(1, resource.DecimalSI)} rl := v1.ResourceList{v1.ResourceName(SampleDeviceResourceName): *resource.NewQuantity(1, resource.DecimalSI)}

View File

@ -36,6 +36,7 @@ import (
evictionapi "k8s.io/kubernetes/pkg/kubelet/eviction/api" evictionapi "k8s.io/kubernetes/pkg/kubelet/eviction/api"
kubeletmetrics "k8s.io/kubernetes/pkg/kubelet/metrics" kubeletmetrics "k8s.io/kubernetes/pkg/kubelet/metrics"
kubetypes "k8s.io/kubernetes/pkg/kubelet/types" kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
"k8s.io/kubernetes/test/e2e/feature"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
@ -70,7 +71,7 @@ const (
// InodeEviction tests that the node responds to node disk pressure by evicting only responsible pods. // InodeEviction tests that the node responds to node disk pressure by evicting only responsible pods.
// Node disk pressure is induced by consuming all inodes on the node. // Node disk pressure is induced by consuming all inodes on the node.
var _ = SIGDescribe("InodeEviction", framework.WithSlow(), framework.WithSerial(), framework.WithDisruptive(), nodefeature.Eviction, func() { var _ = SIGDescribe("InodeEviction", framework.WithSlow(), framework.WithSerial(), framework.WithDisruptive(), nodefeature.Eviction, feature.Eviction, func() {
f := framework.NewDefaultFramework("inode-eviction-test") f := framework.NewDefaultFramework("inode-eviction-test")
f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged
expectedNodeCondition := v1.NodeDiskPressure expectedNodeCondition := v1.NodeDiskPressure
@ -105,7 +106,7 @@ var _ = SIGDescribe("InodeEviction", framework.WithSlow(), framework.WithSerial(
// ImageGCNoEviction tests that the node does not evict pods when inodes are consumed by images // ImageGCNoEviction tests that the node does not evict pods when inodes are consumed by images
// Disk pressure is induced by pulling large images // Disk pressure is induced by pulling large images
var _ = SIGDescribe("ImageGCNoEviction", framework.WithSlow(), framework.WithSerial(), framework.WithDisruptive(), nodefeature.Eviction, func() { var _ = SIGDescribe("ImageGCNoEviction", framework.WithSlow(), framework.WithSerial(), framework.WithDisruptive(), nodefeature.Eviction, feature.Eviction, func() {
f := framework.NewDefaultFramework("image-gc-eviction-test") f := framework.NewDefaultFramework("image-gc-eviction-test")
f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged
pressureTimeout := 10 * time.Minute pressureTimeout := 10 * time.Minute
@ -136,7 +137,7 @@ var _ = SIGDescribe("ImageGCNoEviction", framework.WithSlow(), framework.WithSer
// MemoryAllocatableEviction tests that the node responds to node memory pressure by evicting only responsible pods. // MemoryAllocatableEviction tests that the node responds to node memory pressure by evicting only responsible pods.
// Node memory pressure is only encountered because we reserve the majority of the node's capacity via kube-reserved. // Node memory pressure is only encountered because we reserve the majority of the node's capacity via kube-reserved.
var _ = SIGDescribe("MemoryAllocatableEviction", framework.WithSlow(), framework.WithSerial(), framework.WithDisruptive(), nodefeature.Eviction, func() { var _ = SIGDescribe("MemoryAllocatableEviction", framework.WithSlow(), framework.WithSerial(), framework.WithDisruptive(), nodefeature.Eviction, feature.Eviction, func() {
f := framework.NewDefaultFramework("memory-allocatable-eviction-test") f := framework.NewDefaultFramework("memory-allocatable-eviction-test")
f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged
expectedNodeCondition := v1.NodeMemoryPressure expectedNodeCondition := v1.NodeMemoryPressure
@ -170,7 +171,7 @@ var _ = SIGDescribe("MemoryAllocatableEviction", framework.WithSlow(), framework
// LocalStorageEviction tests that the node responds to node disk pressure by evicting only responsible pods // LocalStorageEviction tests that the node responds to node disk pressure by evicting only responsible pods
// Disk pressure is induced by running pods which consume disk space. // Disk pressure is induced by running pods which consume disk space.
var _ = SIGDescribe("LocalStorageEviction", framework.WithSlow(), framework.WithSerial(), framework.WithDisruptive(), nodefeature.Eviction, func() { var _ = SIGDescribe("LocalStorageEviction", framework.WithSlow(), framework.WithSerial(), framework.WithDisruptive(), nodefeature.Eviction, feature.Eviction, func() {
f := framework.NewDefaultFramework("localstorage-eviction-test") f := framework.NewDefaultFramework("localstorage-eviction-test")
f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged
pressureTimeout := 15 * time.Minute pressureTimeout := 15 * time.Minute
@ -211,7 +212,7 @@ var _ = SIGDescribe("LocalStorageEviction", framework.WithSlow(), framework.With
// LocalStorageEviction tests that the node responds to node disk pressure by evicting only responsible pods // LocalStorageEviction tests that the node responds to node disk pressure by evicting only responsible pods
// Disk pressure is induced by running pods which consume disk space, which exceed the soft eviction threshold. // Disk pressure is induced by running pods which consume disk space, which exceed the soft eviction threshold.
// Note: This test's purpose is to test Soft Evictions. Local storage was chosen since it is the least costly to run. // Note: This test's purpose is to test Soft Evictions. Local storage was chosen since it is the least costly to run.
var _ = SIGDescribe("LocalStorageSoftEviction", framework.WithSlow(), framework.WithSerial(), framework.WithDisruptive(), nodefeature.Eviction, func() { var _ = SIGDescribe("LocalStorageSoftEviction", framework.WithSlow(), framework.WithSerial(), framework.WithDisruptive(), nodefeature.Eviction, feature.Eviction, func() {
f := framework.NewDefaultFramework("localstorage-eviction-test") f := framework.NewDefaultFramework("localstorage-eviction-test")
f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged
pressureTimeout := 10 * time.Minute pressureTimeout := 10 * time.Minute
@ -249,7 +250,7 @@ var _ = SIGDescribe("LocalStorageSoftEviction", framework.WithSlow(), framework.
}) })
}) })
var _ = SIGDescribe("LocalStorageSoftEvictionNotOverwriteTerminationGracePeriodSeconds", framework.WithSlow(), framework.WithSerial(), framework.WithDisruptive(), nodefeature.Eviction, func() { var _ = SIGDescribe("LocalStorageSoftEvictionNotOverwriteTerminationGracePeriodSeconds", framework.WithSlow(), framework.WithSerial(), framework.WithDisruptive(), nodefeature.Eviction, feature.Eviction, func() {
f := framework.NewDefaultFramework("localstorage-eviction-test") f := framework.NewDefaultFramework("localstorage-eviction-test")
f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged
pressureTimeout := 10 * time.Minute pressureTimeout := 10 * time.Minute
@ -288,7 +289,7 @@ var _ = SIGDescribe("LocalStorageSoftEvictionNotOverwriteTerminationGracePeriodS
}) })
// LocalStorageCapacityIsolationEviction tests that container and volume local storage limits are enforced through evictions // LocalStorageCapacityIsolationEviction tests that container and volume local storage limits are enforced through evictions
var _ = SIGDescribe("LocalStorageCapacityIsolationEviction", framework.WithSlow(), framework.WithSerial(), framework.WithDisruptive(), nodefeature.Eviction, func() { var _ = SIGDescribe("LocalStorageCapacityIsolationEviction", framework.WithSlow(), framework.WithSerial(), framework.WithDisruptive(), feature.LocalStorageCapacityIsolationQuota, nodefeature.Eviction, feature.Eviction, func() {
f := framework.NewDefaultFramework("localstorage-eviction-test") f := framework.NewDefaultFramework("localstorage-eviction-test")
f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged
evictionTestTimeout := 10 * time.Minute evictionTestTimeout := 10 * time.Minute
@ -341,7 +342,7 @@ var _ = SIGDescribe("LocalStorageCapacityIsolationEviction", framework.WithSlow(
// PriorityMemoryEvictionOrdering tests that the node responds to node memory pressure by evicting pods. // PriorityMemoryEvictionOrdering tests that the node responds to node memory pressure by evicting pods.
// This test tests that the guaranteed pod is never evicted, and that the lower-priority pod is evicted before // This test tests that the guaranteed pod is never evicted, and that the lower-priority pod is evicted before
// the higher priority pod. // the higher priority pod.
var _ = SIGDescribe("PriorityMemoryEvictionOrdering", framework.WithSlow(), framework.WithSerial(), framework.WithDisruptive(), nodefeature.Eviction, func() { var _ = SIGDescribe("PriorityMemoryEvictionOrdering", framework.WithSlow(), framework.WithSerial(), framework.WithDisruptive(), nodefeature.Eviction, feature.Eviction, func() {
f := framework.NewDefaultFramework("priority-memory-eviction-ordering-test") f := framework.NewDefaultFramework("priority-memory-eviction-ordering-test")
f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged
expectedNodeCondition := v1.NodeMemoryPressure expectedNodeCondition := v1.NodeMemoryPressure
@ -401,7 +402,7 @@ var _ = SIGDescribe("PriorityMemoryEvictionOrdering", framework.WithSlow(), fram
// PriorityLocalStorageEvictionOrdering tests that the node responds to node disk pressure by evicting pods. // PriorityLocalStorageEvictionOrdering tests that the node responds to node disk pressure by evicting pods.
// This test tests that the guaranteed pod is never evicted, and that the lower-priority pod is evicted before // This test tests that the guaranteed pod is never evicted, and that the lower-priority pod is evicted before
// the higher priority pod. // the higher priority pod.
var _ = SIGDescribe("PriorityLocalStorageEvictionOrdering", framework.WithSlow(), framework.WithSerial(), framework.WithDisruptive(), nodefeature.Eviction, func() { var _ = SIGDescribe("PriorityLocalStorageEvictionOrdering", framework.WithSlow(), framework.WithSerial(), framework.WithDisruptive(), nodefeature.Eviction, feature.Eviction, func() {
f := framework.NewDefaultFramework("priority-disk-eviction-ordering-test") f := framework.NewDefaultFramework("priority-disk-eviction-ordering-test")
f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged
expectedNodeCondition := v1.NodeDiskPressure expectedNodeCondition := v1.NodeDiskPressure
@ -464,7 +465,7 @@ var _ = SIGDescribe("PriorityLocalStorageEvictionOrdering", framework.WithSlow()
}) })
// PriorityPidEvictionOrdering tests that the node emits pid pressure in response to a fork bomb, and evicts pods by priority // PriorityPidEvictionOrdering tests that the node emits pid pressure in response to a fork bomb, and evicts pods by priority
var _ = SIGDescribe("PriorityPidEvictionOrdering", framework.WithSlow(), framework.WithSerial(), framework.WithDisruptive(), nodefeature.Eviction, func() { var _ = SIGDescribe("PriorityPidEvictionOrdering", framework.WithSlow(), framework.WithSerial(), framework.WithDisruptive(), nodefeature.Eviction, feature.Eviction, func() {
f := framework.NewDefaultFramework("pidpressure-eviction-test") f := framework.NewDefaultFramework("pidpressure-eviction-test")
f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged
pressureTimeout := 10 * time.Minute pressureTimeout := 10 * time.Minute

View File

@ -27,6 +27,7 @@ import (
internalapi "k8s.io/cri-api/pkg/apis" internalapi "k8s.io/cri-api/pkg/apis"
runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1" runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1"
"k8s.io/kubelet/pkg/types" "k8s.io/kubelet/pkg/types"
"k8s.io/kubernetes/test/e2e/feature"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
"k8s.io/kubernetes/test/e2e/nodefeature" "k8s.io/kubernetes/test/e2e/nodefeature"
@ -73,7 +74,7 @@ type testRun struct {
// GarbageCollect tests that the Kubelet conforms to the Kubelet Garbage Collection Policy, found here: // GarbageCollect tests that the Kubelet conforms to the Kubelet Garbage Collection Policy, found here:
// http://kubernetes.io/docs/admin/garbage-collection/ // http://kubernetes.io/docs/admin/garbage-collection/
var _ = SIGDescribe("GarbageCollect", framework.WithSerial(), nodefeature.GarbageCollect, func() { var _ = SIGDescribe("GarbageCollect", framework.WithSerial(), nodefeature.GarbageCollect, feature.GarbageCollect, func() {
f := framework.NewDefaultFramework("garbage-collect-test") f := framework.NewDefaultFramework("garbage-collect-test")
f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged
containerNamePrefix := "gc-test-container-" containerNamePrefix := "gc-test-container-"

View File

@ -26,8 +26,10 @@ import (
runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1" runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1"
kubefeatures "k8s.io/kubernetes/pkg/features" kubefeatures "k8s.io/kubernetes/pkg/features"
kubeletconfig "k8s.io/kubernetes/pkg/kubelet/apis/config" kubeletconfig "k8s.io/kubernetes/pkg/kubelet/apis/config"
"k8s.io/kubernetes/test/e2e/feature"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
"k8s.io/kubernetes/test/e2e/nodefeature"
admissionapi "k8s.io/pod-security-admission/api" admissionapi "k8s.io/pod-security-admission/api"
"github.com/onsi/ginkgo/v2" "github.com/onsi/ginkgo/v2"
@ -41,7 +43,7 @@ const (
checkGCFreq time.Duration = 30 * time.Second checkGCFreq time.Duration = 30 * time.Second
) )
var _ = SIGDescribe("ImageGarbageCollect", framework.WithSerial(), framework.WithNodeFeature("GarbageCollect"), func() { var _ = SIGDescribe("ImageGarbageCollect", framework.WithSerial(), nodefeature.GarbageCollect, feature.GarbageCollect, func() {
f := framework.NewDefaultFramework("image-garbage-collect-test") f := framework.NewDefaultFramework("image-garbage-collect-test")
f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged
var is internalapi.ImageManagerService var is internalapi.ImageManagerService

View File

@ -22,16 +22,18 @@ import (
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/dump" "k8s.io/apimachinery/pkg/util/dump"
"k8s.io/kubernetes/test/e2e/feature"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
"k8s.io/kubernetes/test/e2e/nodefeature" "k8s.io/kubernetes/test/e2e/nodefeature"
admissionapi "k8s.io/pod-security-admission/api" admissionapi "k8s.io/pod-security-admission/api"
"github.com/onsi/ginkgo/v2" "github.com/onsi/ginkgo/v2"
"github.com/onsi/gomega" "github.com/onsi/gomega"
) )
var _ = SIGDescribe("ImageID", nodefeature.ImageID, func() { var _ = SIGDescribe("ImageID", nodefeature.ImageID, feature.ImageID, func() {
busyBoxImage := "registry.k8s.io/e2e-test-images/busybox@sha256:a9155b13325b2abef48e71de77bb8ac015412a566829f621d06bfae5c699b1b9" busyBoxImage := "registry.k8s.io/e2e-test-images/busybox@sha256:a9155b13325b2abef48e71de77bb8ac015412a566829f621d06bfae5c699b1b9"

View File

@ -30,6 +30,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/pkg/features"
"k8s.io/kubernetes/pkg/kubelet/images" "k8s.io/kubernetes/pkg/kubelet/images"
"k8s.io/kubernetes/test/e2e/feature"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2enode "k8s.io/kubernetes/test/e2e/framework/node" e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
@ -40,7 +41,7 @@ import (
// Run this single test locally using a running CRI-O instance by: // Run this single test locally using a running CRI-O instance by:
// make test-e2e-node CONTAINER_RUNTIME_ENDPOINT="unix:///var/run/crio/crio.sock" TEST_ARGS='--ginkgo.focus="ImageVolume" --feature-gates=ImageVolume=true --service-feature-gates=ImageVolume=true --kubelet-flags="--cgroup-root=/ --runtime-cgroups=/system.slice/crio.service --kubelet-cgroups=/system.slice/kubelet.service --fail-swap-on=false"' // make test-e2e-node CONTAINER_RUNTIME_ENDPOINT="unix:///var/run/crio/crio.sock" TEST_ARGS='--ginkgo.focus="ImageVolume" --feature-gates=ImageVolume=true --service-feature-gates=ImageVolume=true --kubelet-flags="--cgroup-root=/ --runtime-cgroups=/system.slice/crio.service --kubelet-cgroups=/system.slice/kubelet.service --fail-swap-on=false"'
var _ = SIGDescribe("ImageVolume", nodefeature.ImageVolume, func() { var _ = SIGDescribe("ImageVolume", nodefeature.ImageVolume, feature.ImageVolume, func() {
f := framework.NewDefaultFramework("image-volume-test") f := framework.NewDefaultFramework("image-volume-test")
f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged

View File

@ -26,11 +26,12 @@ import (
"github.com/onsi/gomega" "github.com/onsi/gomega"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
kubeletconfig "k8s.io/kubernetes/pkg/kubelet/apis/config" kubeletconfig "k8s.io/kubernetes/pkg/kubelet/apis/config"
"k8s.io/kubernetes/test/e2e/feature"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/nodefeature" "k8s.io/kubernetes/test/e2e/nodefeature"
) )
var _ = SIGDescribe("Kubelet Config", framework.WithSlow(), framework.WithSerial(), framework.WithDisruptive(), nodefeature.KubeletConfigDropInDir, func() { var _ = SIGDescribe("Kubelet Config", framework.WithSlow(), framework.WithSerial(), framework.WithDisruptive(), nodefeature.KubeletConfigDropInDir, feature.KubeletConfigDropInDir, func() {
f := framework.NewDefaultFramework("kubelet-config-drop-in-dir-test") f := framework.NewDefaultFramework("kubelet-config-drop-in-dir-test")
ginkgo.Context("when merging drop-in configs", func() { ginkgo.Context("when merging drop-in configs", func() {
var oldcfg *kubeletconfig.KubeletConfiguration var oldcfg *kubeletconfig.KubeletConfiguration

View File

@ -26,6 +26,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/uuid" "k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/pkg/features"
"k8s.io/kubernetes/test/e2e/feature"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
@ -36,7 +37,7 @@ import (
// Usage: // Usage:
// make test-e2e-node TEST_ARGS='--service-feature-gates=RecursiveReadOnlyMounts=true --kubelet-flags="--feature-gates=RecursiveReadOnlyMounts=true"' FOCUS="Mount recursive read-only" SKIP="" // make test-e2e-node TEST_ARGS='--service-feature-gates=RecursiveReadOnlyMounts=true --kubelet-flags="--feature-gates=RecursiveReadOnlyMounts=true"' FOCUS="Mount recursive read-only" SKIP=""
var _ = SIGDescribe("Mount recursive read-only [LinuxOnly]", nodefeature.RecursiveReadOnlyMounts, func() { var _ = SIGDescribe("Mount recursive read-only [LinuxOnly]", nodefeature.RecursiveReadOnlyMounts, feature.RecursiveReadOnlyMounts, func() {
f := framework.NewDefaultFramework("mount-rro") f := framework.NewDefaultFramework("mount-rro")
f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged
ginkgo.Describe("Mount recursive read-only", func() { ginkgo.Describe("Mount recursive read-only", func() {

View File

@ -36,6 +36,7 @@ import (
"k8s.io/kubernetes/pkg/kubelet/stats/pidlimit" "k8s.io/kubernetes/pkg/kubelet/stats/pidlimit"
admissionapi "k8s.io/pod-security-admission/api" admissionapi "k8s.io/pod-security-admission/api"
"k8s.io/kubernetes/test/e2e/feature"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/nodefeature" "k8s.io/kubernetes/test/e2e/nodefeature"
e2enodekubelet "k8s.io/kubernetes/test/e2e_node/kubeletconfig" e2enodekubelet "k8s.io/kubernetes/test/e2e_node/kubeletconfig"
@ -71,7 +72,7 @@ func setDesiredConfiguration(initialConfig *kubeletconfig.KubeletConfiguration,
var _ = SIGDescribe("Node Container Manager", framework.WithSerial(), func() { var _ = SIGDescribe("Node Container Manager", framework.WithSerial(), func() {
f := framework.NewDefaultFramework("node-container-manager") f := framework.NewDefaultFramework("node-container-manager")
f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged
f.Describe("Validate Node Allocatable", nodefeature.NodeAllocatable, func() { f.Describe("Validate Node Allocatable", nodefeature.NodeAllocatable, feature.NodeAllocatable, func() {
ginkgo.It("sets up the node and runs the test", func(ctx context.Context) { ginkgo.It("sets up the node and runs the test", func(ctx context.Context) {
framework.ExpectNoError(runTest(ctx, f)) framework.ExpectNoError(runTest(ctx, f))
}) })

View File

@ -38,13 +38,14 @@ import (
admissionapi "k8s.io/pod-security-admission/api" admissionapi "k8s.io/pod-security-admission/api"
"k8s.io/kubernetes/pkg/kubelet/util" "k8s.io/kubernetes/pkg/kubelet/util"
"k8s.io/kubernetes/test/e2e/feature"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
"k8s.io/kubernetes/test/e2e/nodefeature" "k8s.io/kubernetes/test/e2e/nodefeature"
testutils "k8s.io/kubernetes/test/utils" testutils "k8s.io/kubernetes/test/utils"
) )
var _ = SIGDescribe("NodeProblemDetector", nodefeature.NodeProblemDetector, framework.WithSerial(), func() { var _ = SIGDescribe("NodeProblemDetector", nodefeature.NodeProblemDetector, feature.NodeProblemDetector, framework.WithSerial(), func() {
const ( const (
pollInterval = 1 * time.Second pollInterval = 1 * time.Second
pollConsistent = 5 * time.Second pollConsistent = 5 * time.Second

View File

@ -40,6 +40,7 @@ import (
"github.com/onsi/ginkgo/v2" "github.com/onsi/ginkgo/v2"
"github.com/onsi/gomega" "github.com/onsi/gomega"
"k8s.io/kubernetes/pkg/apis/scheduling" "k8s.io/kubernetes/pkg/apis/scheduling"
"k8s.io/kubernetes/test/e2e/feature"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
@ -57,7 +58,7 @@ import (
testutils "k8s.io/kubernetes/test/utils" testutils "k8s.io/kubernetes/test/utils"
) )
var _ = SIGDescribe("GracefulNodeShutdown", framework.WithSerial(), nodefeature.GracefulNodeShutdown, nodefeature.GracefulNodeShutdownBasedOnPodPriority, func() { var _ = SIGDescribe("GracefulNodeShutdown", framework.WithSerial(), nodefeature.GracefulNodeShutdown, nodefeature.GracefulNodeShutdownBasedOnPodPriority, feature.GracefulNodeShutdown, feature.GracefulNodeShutdownBasedOnPodPriority, func() {
f := framework.NewDefaultFramework("graceful-node-shutdown") f := framework.NewDefaultFramework("graceful-node-shutdown")
f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged

View File

@ -904,7 +904,7 @@ var _ = SIGDescribe("POD Resources", framework.WithSerial(), feature.PodResource
podresourcesGetAllocatableResourcesTests(ctx, cli, sd, onlineCPUs, reservedSystemCPUs) podresourcesGetAllocatableResourcesTests(ctx, cli, sd, onlineCPUs, reservedSystemCPUs)
}) })
framework.It("should return the expected responses", nodefeature.SidecarContainers, func(ctx context.Context) { framework.It("should return the expected responses", nodefeature.SidecarContainers, feature.SidecarContainers, func(ctx context.Context) {
onlineCPUs, err := getOnlineCPUs() onlineCPUs, err := getOnlineCPUs()
framework.ExpectNoError(err, "getOnlineCPUs() failed err: %v", err) framework.ExpectNoError(err, "getOnlineCPUs() failed err: %v", err)
@ -1008,7 +1008,7 @@ var _ = SIGDescribe("POD Resources", framework.WithSerial(), feature.PodResource
podresourcesGetTests(ctx, f, cli, false) podresourcesGetTests(ctx, f, cli, false)
}) })
framework.It("should return the expected responses", nodefeature.SidecarContainers, func(ctx context.Context) { framework.It("should return the expected responses", nodefeature.SidecarContainers, feature.SidecarContainers, func(ctx context.Context) {
onlineCPUs, err := getOnlineCPUs() onlineCPUs, err := getOnlineCPUs()
framework.ExpectNoError(err, "getOnlineCPUs() failed err: %v", err) framework.ExpectNoError(err, "getOnlineCPUs() failed err: %v", err)

View File

@ -46,7 +46,7 @@ var _ = SIGDescribe("DefaultProcMount [LinuxOnly]", framework.WithNodeConformanc
}) })
}) })
var _ = SIGDescribe("ProcMount [LinuxOnly]", nodefeature.ProcMountType, nodefeature.UserNamespacesSupport, feature.UserNamespacesSupport, func() { var _ = SIGDescribe("ProcMount [LinuxOnly]", nodefeature.ProcMountType, feature.ProcMountType, nodefeature.UserNamespacesSupport, feature.UserNamespacesSupport, func() {
f := framework.NewDefaultFramework("proc-mount-baseline-test") f := framework.NewDefaultFramework("proc-mount-baseline-test")
f.NamespacePodSecurityLevel = admissionapi.LevelBaseline f.NamespacePodSecurityLevel = admissionapi.LevelBaseline
@ -77,7 +77,7 @@ var _ = SIGDescribe("ProcMount [LinuxOnly]", nodefeature.ProcMountType, nodefeat
}) })
}) })
var _ = SIGDescribe("ProcMount [LinuxOnly]", nodefeature.ProcMountType, nodefeature.UserNamespacesSupport, feature.UserNamespacesSupport, func() { var _ = SIGDescribe("ProcMount [LinuxOnly]", nodefeature.ProcMountType, feature.ProcMountType, nodefeature.UserNamespacesSupport, feature.UserNamespacesSupport, func() {
f := framework.NewDefaultFramework("proc-mount-privileged-test") f := framework.NewDefaultFramework("proc-mount-privileged-test")
f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged

View File

@ -101,7 +101,7 @@ func runOneQuotaTest(f *framework.Framework, quotasRequested bool, userNamespace
// pod that creates a file, deletes it, and writes data to it. If // pod that creates a file, deletes it, and writes data to it. If
// quotas are used to monitor, it will detect this deleted-but-in-use // quotas are used to monitor, it will detect this deleted-but-in-use
// file; if du is used to monitor, it will not detect this. // file; if du is used to monitor, it will not detect this.
var _ = SIGDescribe("LocalStorageCapacityIsolationFSQuotaMonitoring", framework.WithSlow(), framework.WithSerial(), framework.WithDisruptive(), feature.LocalStorageCapacityIsolationQuota, nodefeature.LSCIQuotaMonitoring, nodefeature.UserNamespacesSupport, feature.UserNamespacesSupport, func() { var _ = SIGDescribe("LocalStorageCapacityIsolationFSQuotaMonitoring", framework.WithSlow(), framework.WithSerial(), framework.WithDisruptive(), feature.LocalStorageCapacityIsolationQuota, nodefeature.LSCIQuotaMonitoring, feature.LSCIQuotaMonitoring, nodefeature.UserNamespacesSupport, feature.UserNamespacesSupport, func() {
f := framework.NewDefaultFramework("localstorage-quota-monitoring-test") f := framework.NewDefaultFramework("localstorage-quota-monitoring-test")
f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged
runOneQuotaTest(f, true, true) runOneQuotaTest(f, true, true)

View File

@ -23,6 +23,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/pkg/features"
"k8s.io/kubernetes/test/e2e/feature"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl" e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl"
e2emetrics "k8s.io/kubernetes/test/e2e/framework/metrics" e2emetrics "k8s.io/kubernetes/test/e2e/framework/metrics"
@ -46,7 +47,7 @@ const (
maxStatsAge = time.Minute maxStatsAge = time.Minute
) )
var _ = SIGDescribe("ResourceMetricsAPI", nodefeature.ResourceMetrics, func() { var _ = SIGDescribe("ResourceMetricsAPI", nodefeature.ResourceMetrics, feature.ResourceMetrics, func() {
f := framework.NewDefaultFramework("resource-metrics") f := framework.NewDefaultFramework("resource-metrics")
f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged
ginkgo.Context("when querying /resource/metrics", func() { ginkgo.Context("when querying /resource/metrics", func() {

View File

@ -27,6 +27,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/uuid" "k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/kubernetes/test/e2e/feature"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
"k8s.io/kubernetes/test/e2e/nodefeature" "k8s.io/kubernetes/test/e2e/nodefeature"
@ -148,7 +149,7 @@ var _ = SIGDescribe("Security Context", func() {
nginxPid = strings.TrimSpace(output) nginxPid = strings.TrimSpace(output)
}) })
f.It("should show its pid in the host PID namespace", nodefeature.HostAccess, func(ctx context.Context) { f.It("should show its pid in the host PID namespace", nodefeature.HostAccess, feature.HostAccess, func(ctx context.Context) {
busyboxPodName := "busybox-hostpid-" + string(uuid.NewUUID()) busyboxPodName := "busybox-hostpid-" + string(uuid.NewUUID())
createAndWaitHostPidPod(ctx, busyboxPodName, true) createAndWaitHostPidPod(ctx, busyboxPodName, true)
logs, err := e2epod.GetPodLogs(ctx, f.ClientSet, f.Namespace.Name, busyboxPodName, busyboxPodName) logs, err := e2epod.GetPodLogs(ctx, f.ClientSet, f.Namespace.Name, busyboxPodName, busyboxPodName)
@ -168,7 +169,7 @@ var _ = SIGDescribe("Security Context", func() {
} }
}) })
f.It("should not show its pid in the non-hostpid containers", nodefeature.HostAccess, func(ctx context.Context) { f.It("should not show its pid in the non-hostpid containers", nodefeature.HostAccess, feature.HostAccess, func(ctx context.Context) {
busyboxPodName := "busybox-non-hostpid-" + string(uuid.NewUUID()) busyboxPodName := "busybox-non-hostpid-" + string(uuid.NewUUID())
createAndWaitHostPidPod(ctx, busyboxPodName, false) createAndWaitHostPidPod(ctx, busyboxPodName, false)
logs, err := e2epod.GetPodLogs(ctx, f.ClientSet, f.Namespace.Name, busyboxPodName, busyboxPodName) logs, err := e2epod.GetPodLogs(ctx, f.ClientSet, f.Namespace.Name, busyboxPodName, busyboxPodName)
@ -224,7 +225,7 @@ var _ = SIGDescribe("Security Context", func() {
framework.Logf("Got host shared memory ID %q", hostSharedMemoryID) framework.Logf("Got host shared memory ID %q", hostSharedMemoryID)
}) })
f.It("should show the shared memory ID in the host IPC containers", nodefeature.HostAccess, func(ctx context.Context) { f.It("should show the shared memory ID in the host IPC containers", nodefeature.HostAccess, feature.HostAccess, func(ctx context.Context) {
ipcutilsPodName := "ipcutils-hostipc-" + string(uuid.NewUUID()) ipcutilsPodName := "ipcutils-hostipc-" + string(uuid.NewUUID())
createAndWaitHostIPCPod(ctx, ipcutilsPodName, true) createAndWaitHostIPCPod(ctx, ipcutilsPodName, true)
logs, err := e2epod.GetPodLogs(ctx, f.ClientSet, f.Namespace.Name, ipcutilsPodName, ipcutilsPodName) logs, err := e2epod.GetPodLogs(ctx, f.ClientSet, f.Namespace.Name, ipcutilsPodName, ipcutilsPodName)
@ -239,7 +240,7 @@ var _ = SIGDescribe("Security Context", func() {
} }
}) })
f.It("should not show the shared memory ID in the non-hostIPC containers", nodefeature.HostAccess, func(ctx context.Context) { f.It("should not show the shared memory ID in the non-hostIPC containers", nodefeature.HostAccess, feature.HostAccess, func(ctx context.Context) {
ipcutilsPodName := "ipcutils-non-hostipc-" + string(uuid.NewUUID()) ipcutilsPodName := "ipcutils-non-hostipc-" + string(uuid.NewUUID())
createAndWaitHostIPCPod(ctx, ipcutilsPodName, false) createAndWaitHostIPCPod(ctx, ipcutilsPodName, false)
logs, err := e2epod.GetPodLogs(ctx, f.ClientSet, f.Namespace.Name, ipcutilsPodName, ipcutilsPodName) logs, err := e2epod.GetPodLogs(ctx, f.ClientSet, f.Namespace.Name, ipcutilsPodName, ipcutilsPodName)
@ -307,7 +308,7 @@ var _ = SIGDescribe("Security Context", func() {
framework.Logf("Opened a new tcp port %q", listeningPort) framework.Logf("Opened a new tcp port %q", listeningPort)
}) })
f.It("should listen on same port in the host network containers", nodefeature.HostAccess, func(ctx context.Context) { f.It("should listen on same port in the host network containers", nodefeature.HostAccess, feature.HostAccess, func(ctx context.Context) {
busyboxPodName := "busybox-hostnetwork-" + string(uuid.NewUUID()) busyboxPodName := "busybox-hostnetwork-" + string(uuid.NewUUID())
createAndWaitHostNetworkPod(ctx, busyboxPodName, true) createAndWaitHostNetworkPod(ctx, busyboxPodName, true)
logs, err := e2epod.GetPodLogs(ctx, f.ClientSet, f.Namespace.Name, busyboxPodName, busyboxPodName) logs, err := e2epod.GetPodLogs(ctx, f.ClientSet, f.Namespace.Name, busyboxPodName, busyboxPodName)
@ -321,7 +322,7 @@ var _ = SIGDescribe("Security Context", func() {
} }
}) })
f.It("shouldn't show the same port in the non-hostnetwork containers", nodefeature.HostAccess, func(ctx context.Context) { f.It("shouldn't show the same port in the non-hostnetwork containers", nodefeature.HostAccess, feature.HostAccess, func(ctx context.Context) {
busyboxPodName := "busybox-non-hostnetwork-" + string(uuid.NewUUID()) busyboxPodName := "busybox-non-hostnetwork-" + string(uuid.NewUUID())
createAndWaitHostNetworkPod(ctx, busyboxPodName, false) createAndWaitHostNetworkPod(ctx, busyboxPodName, false)
logs, err := e2epod.GetPodLogs(ctx, f.ClientSet, f.Namespace.Name, busyboxPodName, busyboxPodName) logs, err := e2epod.GetPodLogs(ctx, f.ClientSet, f.Namespace.Name, busyboxPodName, busyboxPodName)

View File

@ -25,6 +25,7 @@ import (
"time" "time"
"k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/pkg/features"
"k8s.io/kubernetes/test/e2e/feature"
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
@ -43,7 +44,7 @@ import (
"github.com/onsi/gomega" "github.com/onsi/gomega"
) )
var _ = SIGDescribe("KubeletSeparateDiskGC", nodefeature.KubeletSeparateDiskGC, func() { var _ = SIGDescribe("KubeletSeparateDiskGC", nodefeature.KubeletSeparateDiskGC, feature.KubeletSeparateDiskGC, func() {
f := framework.NewDefaultFramework("split-disk-test") f := framework.NewDefaultFramework("split-disk-test")
f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged
pressureTimeout := 10 * time.Minute pressureTimeout := 10 * time.Minute

View File

@ -27,6 +27,7 @@ import (
"k8s.io/kubernetes/pkg/apis/core/v1/helper/qos" "k8s.io/kubernetes/pkg/apis/core/v1/helper/qos"
"k8s.io/kubernetes/pkg/kubelet/apis/config" "k8s.io/kubernetes/pkg/kubelet/apis/config"
"k8s.io/kubernetes/test/e2e/feature"
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
"k8s.io/kubernetes/test/e2e/nodefeature" "k8s.io/kubernetes/test/e2e/nodefeature"
imageutils "k8s.io/kubernetes/test/utils/image" imageutils "k8s.io/kubernetes/test/utils/image"
@ -56,7 +57,7 @@ var (
noLimits *resource.Quantity = nil noLimits *resource.Quantity = nil
) )
var _ = SIGDescribe("Swap", "[LinuxOnly]", nodefeature.Swap, framework.WithSerial(), func() { var _ = SIGDescribe("Swap", "[LinuxOnly]", nodefeature.Swap, feature.Swap, framework.WithSerial(), func() {
f := framework.NewDefaultFramework("swap-qos") f := framework.NewDefaultFramework("swap-qos")
addAfterEachForCleaningUpPods(f) addAfterEachForCleaningUpPods(f)
f.NamespacePodSecurityLevel = admissionapi.LevelBaseline f.NamespacePodSecurityLevel = admissionapi.LevelBaseline

View File

@ -29,6 +29,7 @@ import (
kubeapi "k8s.io/kubernetes/pkg/apis/core" kubeapi "k8s.io/kubernetes/pkg/apis/core"
kubeletconfig "k8s.io/kubernetes/pkg/kubelet/apis/config" kubeletconfig "k8s.io/kubernetes/pkg/kubelet/apis/config"
evictionapi "k8s.io/kubernetes/pkg/kubelet/eviction/api" evictionapi "k8s.io/kubernetes/pkg/kubelet/eviction/api"
"k8s.io/kubernetes/test/e2e/feature"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/nodefeature" "k8s.io/kubernetes/test/e2e/nodefeature"
admissionapi "k8s.io/pod-security-admission/api" admissionapi "k8s.io/pod-security-admission/api"
@ -37,7 +38,7 @@ import (
"github.com/onsi/gomega" "github.com/onsi/gomega"
) )
var _ = SIGDescribe("SystemNodeCriticalPod", framework.WithSlow(), framework.WithSerial(), framework.WithDisruptive(), nodefeature.SystemNodeCriticalPod, nodefeature.Eviction, func() { var _ = SIGDescribe("SystemNodeCriticalPod", framework.WithSlow(), framework.WithSerial(), framework.WithDisruptive(), nodefeature.SystemNodeCriticalPod, feature.SystemNodeCriticalPod, nodefeature.Eviction, feature.Eviction, func() {
f := framework.NewDefaultFramework("system-node-critical-pod-test") f := framework.NewDefaultFramework("system-node-critical-pod-test")
f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged
// this test only manipulates pods in kube-system // this test only manipulates pods in kube-system