mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-31 15:25:57 +00:00
drop NodeSpecialFeature and NodeAlphaFeature from e2e-node
This commit is contained in:
parent
16da2955d0
commit
6a608c3cdb
@ -52,7 +52,7 @@ var (
|
||||
cmd = []string{"/bin/sh", "-c", "sleep 1d"}
|
||||
)
|
||||
|
||||
var _ = SIGDescribe("Pod Level Resources", framework.WithSerial(), feature.PodLevelResources, "[NodeAlphaFeature:PodLevelResources]", func() {
|
||||
var _ = SIGDescribe("Pod Level Resources", framework.WithSerial(), feature.PodLevelResources, func() {
|
||||
f := framework.NewDefaultFramework("pod-level-resources-tests")
|
||||
f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged
|
||||
|
||||
|
@ -1210,7 +1210,7 @@ func doPodResizeErrorTests() {
|
||||
// Above tests are performed by doSheduletTests() and doPodResizeResourceQuotaTests()
|
||||
// in test/e2e/node/pod_resize.go
|
||||
|
||||
var _ = SIGDescribe("Pod InPlace Resize Container", feature.InPlacePodVerticalScaling, "[NodeAlphaFeature:InPlacePodVerticalScaling]", func() {
|
||||
var _ = SIGDescribe("Pod InPlace Resize Container", feature.InPlacePodVerticalScaling, func() {
|
||||
f := framework.NewDefaultFramework("pod-resize-tests")
|
||||
|
||||
ginkgo.BeforeEach(func(ctx context.Context) {
|
||||
|
@ -110,6 +110,12 @@ var (
|
||||
// is enabled such that passing CDI device IDs through CRI fields is supported
|
||||
DynamicResourceAllocation = framework.WithFeature(framework.ValidFeatures.Add("DynamicResourceAllocation"))
|
||||
|
||||
// owning-sig: sig-node
|
||||
// kep: https://kep.k8s.io/4009
|
||||
// DevicePluginCDIDevices tests the CDI feature which is GA.
|
||||
// This label is used for https://testgrid.k8s.io/sig-node-cri-o#ci-crio-cdi-device-plugins
|
||||
DevicePluginCDIDevices = framework.WithFeature(framework.ValidFeatures.Add("DevicePluginCDIDevices"))
|
||||
|
||||
// TODO: document the feature (owning SIG, when to use this feature for a test)
|
||||
EphemeralStorage = framework.WithFeature(framework.ValidFeatures.Add("EphemeralStorage"))
|
||||
|
||||
@ -207,6 +213,9 @@ var (
|
||||
// TODO: document the feature (owning SIG, when to use this feature for a test)
|
||||
LocalStorageCapacityIsolationQuota = framework.WithFeature(framework.ValidFeatures.Add("LocalStorageCapacityIsolationQuota"))
|
||||
|
||||
// owning-sig: sig-node
|
||||
// Marks a disruptive test for lock contention
|
||||
LockContention = framework.WithFeature(framework.ValidFeatures.Add("LockContention"))
|
||||
// TODO: document the feature (owning SIG, when to use this feature for a test)
|
||||
MasterUpgrade = framework.WithFeature(framework.ValidFeatures.Add("MasterUpgrade"))
|
||||
|
||||
|
@ -168,7 +168,7 @@ var _ = SIGDescribe("Density", framework.WithSerial(), framework.WithSlow(), fun
|
||||
|
||||
for _, testArg := range dTests {
|
||||
itArg := testArg
|
||||
desc := fmt.Sprintf("latency/resource should be within limit when create %d pods with %v interval [Benchmark][NodeSpecialFeature:Benchmark]", itArg.podsNr, itArg.interval)
|
||||
desc := fmt.Sprintf("latency/resource should be within limit when create %d pods with %v interval [Benchmark]", itArg.podsNr, itArg.interval)
|
||||
ginkgo.It(desc, func(ctx context.Context) {
|
||||
itArg.createMethod = "batch"
|
||||
testInfo := getTestNodeInfo(f, itArg.getTestName(), desc)
|
||||
@ -206,7 +206,7 @@ var _ = SIGDescribe("Density", framework.WithSerial(), framework.WithSlow(), fun
|
||||
for _, testArg := range dTests {
|
||||
itArg := testArg
|
||||
ginkgo.Context("", func() {
|
||||
desc := fmt.Sprintf("latency/resource should be within limit when create %d pods with %v interval (QPS %d) [Benchmark][NodeSpecialFeature:Benchmark]", itArg.podsNr, itArg.interval, itArg.APIQPSLimit)
|
||||
desc := fmt.Sprintf("latency/resource should be within limit when create %d pods with %v interval (QPS %d) [Benchmark]", itArg.podsNr, itArg.interval, itArg.APIQPSLimit)
|
||||
// The latency caused by API QPS limit takes a large portion (up to ~33%) of e2e latency.
|
||||
// It makes the pod startup latency of Kubelet (creation throughput as well) under-estimated.
|
||||
// Here we set API QPS limit from default 5 to 60 in order to test real Kubelet performance.
|
||||
|
@ -49,10 +49,10 @@ import (
|
||||
kubeletpodresourcesv1 "k8s.io/kubelet/pkg/apis/podresources/v1"
|
||||
kubeletpodresourcesv1alpha1 "k8s.io/kubelet/pkg/apis/podresources/v1alpha1"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
"k8s.io/kubernetes/test/e2e/feature"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
|
||||
e2etestfiles "k8s.io/kubernetes/test/e2e/framework/testfiles"
|
||||
"k8s.io/kubernetes/test/e2e/nodefeature"
|
||||
)
|
||||
@ -283,8 +283,7 @@ func testDevicePlugin(f *framework.Framework, pluginSockDir string) {
|
||||
gomega.Expect(v1ResourcesForOurPod.Containers[0].Devices[0].DeviceIds).To(gomega.HaveLen(1))
|
||||
})
|
||||
|
||||
ginkgo.It("[NodeSpecialFeature:CDI] can make a CDI device accessible in a container", func(ctx context.Context) {
|
||||
e2eskipper.SkipUnlessFeatureGateEnabled(features.DevicePluginCDIDevices)
|
||||
f.It("can make a CDI device accessible in a container", feature.DevicePluginCDIDevices, func(ctx context.Context) {
|
||||
// check if CDI_DEVICE env variable is set
|
||||
// and only one correspondent device node /tmp/<CDI_DEVICE> is available inside a container
|
||||
podObj := makeBusyboxPod(SampleDeviceResourceName, "[ $(ls /tmp/CDI-Dev-[1,2] | wc -l) -eq 1 -a -b /tmp/$CDI_DEVICE ]")
|
||||
|
@ -18,7 +18,7 @@ limitations under the License.
|
||||
E2E Node test for DRA (Dynamic Resource Allocation)
|
||||
This test covers node-specific aspects of DRA
|
||||
The test can be run locally on Linux this way:
|
||||
make test-e2e-node FOCUS='\[NodeAlphaFeature:DynamicResourceAllocation\]' SKIP='\[Flaky\]' PARALLELISM=1 \
|
||||
make test-e2e-node FOCUS='\[Feature:DynamicResourceAllocation\]' SKIP='\[Flaky\]' PARALLELISM=1 \
|
||||
TEST_ARGS='--feature-gates="DynamicResourceAllocation=true" --service-feature-gates="DynamicResourceAllocation=true" --runtime-config=api/all=true'
|
||||
*/
|
||||
|
||||
@ -83,7 +83,7 @@ const (
|
||||
retryTestTimeout = kubeletRetryPeriod + 30*time.Second
|
||||
)
|
||||
|
||||
var _ = framework.SIGDescribe("node")("DRA", feature.DynamicResourceAllocation, "[NodeAlphaFeature:DynamicResourceAllocation]", func() {
|
||||
var _ = framework.SIGDescribe("node")("DRA", feature.DynamicResourceAllocation, func() {
|
||||
f := framework.NewDefaultFramework("dra-node")
|
||||
f.NamespacePodSecurityLevel = admissionapi.LevelBaseline
|
||||
|
||||
|
@ -201,7 +201,7 @@ func getHugepagesTestPod(f *framework.Framework, limits v1.ResourceList, mounts
|
||||
}
|
||||
|
||||
// Serial because the test updates kubelet configuration.
|
||||
var _ = SIGDescribe("HugePages", framework.WithSerial(), feature.HugePages, "[NodeSpecialFeature:HugePages]", func() {
|
||||
var _ = SIGDescribe("HugePages", framework.WithSerial(), feature.HugePages, func() {
|
||||
f := framework.NewDefaultFramework("hugepages-test")
|
||||
f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged
|
||||
|
||||
|
@ -27,6 +27,7 @@ import (
|
||||
|
||||
"github.com/onsi/ginkgo/v2"
|
||||
"github.com/onsi/gomega"
|
||||
"k8s.io/kubernetes/test/e2e/feature"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
)
|
||||
|
||||
@ -34,9 +35,9 @@ const contentionLockFile = "/var/run/kubelet.lock"
|
||||
|
||||
// Kubelet Lock contention tests the lock contention feature.
|
||||
// Disruptive because the kubelet is restarted in the test.
|
||||
// NodeSpecialFeature:LockContention because we don't want the test to be picked up by any other
|
||||
// Feature:LockContention because we don't want the test to be picked up by any other
|
||||
// test suite, hence the unique name "LockContention".
|
||||
var _ = SIGDescribe("Lock contention", framework.WithSlow(), framework.WithDisruptive(), "[NodeSpecialFeature:LockContention]", func() {
|
||||
var _ = SIGDescribe("Lock contention", framework.WithSlow(), framework.WithDisruptive(), feature.LockContention, func() {
|
||||
|
||||
// Requires `--lock-file` & `--exit-on-lock-contention` flags to be set on the Kubelet.
|
||||
ginkgo.It("Kubelet should stop when the test acquires the lock on lock file and restart once the lock is released", func(ctx context.Context) {
|
||||
|
Loading…
Reference in New Issue
Block a user