Merge pull request #129105 from carlory/sig-scheduling

scheduling e2e tests: add feature-gate label when these tests depend feature-gate
This commit is contained in:
Kubernetes Prow Robot 2024-12-12 06:40:25 +00:00 committed by GitHub
commit c0862c3184
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
4 changed files with 4 additions and 8 deletions

View File

@ -218,9 +218,6 @@ var (
// Marks tests that require a cloud provider that implements LoadBalancer Services
LoadBalancer = framework.WithFeature(framework.ValidFeatures.Add("LoadBalancer"))
// TODO: document the feature (owning SIG, when to use this feature for a test)
LocalStorageCapacityIsolation = framework.WithFeature(framework.ValidFeatures.Add("LocalStorageCapacityIsolation"))
// TODO: document the feature (owning SIG, when to use this feature for a test)
LocalStorageCapacityIsolationQuota = framework.WithFeature(framework.ValidFeatures.Add("LocalStorageCapacityIsolationQuota"))

View File

@ -37,7 +37,6 @@ import (
utilversion "k8s.io/apimachinery/pkg/util/version"
clientset "k8s.io/client-go/kubernetes"
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
"k8s.io/kubernetes/test/e2e/feature"
"k8s.io/kubernetes/test/e2e/framework"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2eruntimeclass "k8s.io/kubernetes/test/e2e/framework/node/runtimeclass"
@ -125,7 +124,7 @@ var _ = SIGDescribe("SchedulerPredicates", framework.WithSerial(), func() {
// This test verifies we don't allow scheduling of pods in a way that sum of local ephemeral storage resource requests of pods is greater than machines capacity.
// It assumes that cluster add-on pods stay stable and cannot be run in parallel with any other test that touches Nodes or Pods.
// It is so because we need to have precise control on what's running in the cluster.
f.It("validates local ephemeral storage resource limits of pods that are allowed to run", feature.LocalStorageCapacityIsolation, func(ctx context.Context) {
f.It("validates local ephemeral storage resource limits of pods that are allowed to run", func(ctx context.Context) {
e2eskipper.SkipUnlessServerVersionGTE(localStorageVersion, f.ClientSet.Discovery())

View File

@ -43,6 +43,7 @@ import (
clientset "k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/cache"
"k8s.io/kubernetes/pkg/apis/scheduling"
"k8s.io/kubernetes/pkg/features"
"k8s.io/kubernetes/test/e2e/feature"
"k8s.io/kubernetes/test/e2e/framework"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
@ -312,7 +313,7 @@ var _ = SIGDescribe("SchedulerPreemption", framework.WithSerial(), func() {
Description: When there are Pods with various priority classes running the preemption,
the scheduler must prioritize the Pods with the higher priority class.
*/
framework.It("validates various priority Pods preempt expectedly with the async preemption", feature.SchedulerAsyncPreemption, func(ctx context.Context) {
framework.It("validates various priority Pods preempt expectedly with the async preemption", feature.SchedulerAsyncPreemption, framework.WithFeatureGate(features.SchedulerAsyncPreemption), func(ctx context.Context) {
var podRes v1.ResourceList
// Create 10 pods per node that will eat up all the node's resources.
ginkgo.By("Create 10 low-priority pods on each node.")

View File

@ -36,7 +36,6 @@ import (
evictionapi "k8s.io/kubernetes/pkg/kubelet/eviction/api"
kubeletmetrics "k8s.io/kubernetes/pkg/kubelet/metrics"
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
"k8s.io/kubernetes/test/e2e/feature"
"k8s.io/kubernetes/test/e2e/framework"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
@ -289,7 +288,7 @@ var _ = SIGDescribe("LocalStorageSoftEvictionNotOverwriteTerminationGracePeriodS
})
// LocalStorageCapacityIsolationEviction tests that container and volume local storage limits are enforced through evictions
var _ = SIGDescribe("LocalStorageCapacityIsolationEviction", framework.WithSlow(), framework.WithSerial(), framework.WithDisruptive(), feature.LocalStorageCapacityIsolation, nodefeature.Eviction, func() {
var _ = SIGDescribe("LocalStorageCapacityIsolationEviction", framework.WithSlow(), framework.WithSerial(), framework.WithDisruptive(), nodefeature.Eviction, func() {
f := framework.NewDefaultFramework("localstorage-eviction-test")
f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged
evictionTestTimeout := 10 * time.Minute