Merge pull request #99319 from wojtek-t/cleanup_describe_2

Mark remaining e2e_node tests with [sig-*] label
This commit is contained in:
Kubernetes Prow Robot 2021-02-23 12:00:50 -08:00 committed by GitHub
commit 9d7d3eb820
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
33 changed files with 114 additions and 41 deletions

View File

@ -123,7 +123,6 @@ go_test(
"e2e_node_suite_test.go",
"eviction_test.go",
"garbage_collector_test.go",
"gke_environment_test.go",
"gpu_device_plugin_test.go",
"hugepages_test.go",
"image_id_test.go",
@ -219,7 +218,6 @@ go_test(
"//test/e2e_node/services:go_default_library",
"//test/utils:go_default_library",
"//test/utils/image:go_default_library",
"//vendor/github.com/blang/semver:go_default_library",
"//vendor/github.com/coreos/go-systemd/util:go_default_library",
"//vendor/github.com/davecgh/go-spew/spew:go_default_library",
"//vendor/github.com/google/go-cmp/cmp:go_default_library",
@ -276,6 +274,7 @@ filegroup(
":package-srcs",
"//test/e2e_node/builder:all-srcs",
"//test/e2e_node/environment:all-srcs",
"//test/e2e_node/gcp:all-srcs",
"//test/e2e_node/perf/workloads:all-srcs",
"//test/e2e_node/perftype:all-srcs",
"//test/e2e_node/remote:all-srcs",

View File

@ -46,7 +46,7 @@ import (
"github.com/onsi/gomega"
)
var _ = framework.KubeDescribe("AppArmor [Feature:AppArmor][NodeFeature:AppArmor]", func() {
var _ = SIGDescribe("AppArmor [Feature:AppArmor][NodeFeature:AppArmor]", func() {
if isAppArmorEnabled() {
ginkgo.BeforeEach(func() {
ginkgo.By("Loading AppArmor profiles for testing")

View File

@ -41,7 +41,7 @@ const (
rotationConsistentlyTimeout = 2 * time.Minute
)
var _ = framework.KubeDescribe("ContainerLogRotation [Slow] [Serial] [Disruptive]", func() {
var _ = SIGDescribe("ContainerLogRotation [Slow] [Serial] [Disruptive]", func() {
f := framework.NewDefaultFramework("container-log-rotation-test")
ginkgo.Context("when a container generates a lot of log", func() {
ginkgo.BeforeEach(func() {

View File

@ -73,7 +73,7 @@ func validateOOMScoreAdjSettingIsInRange(pid int, expectedMinOOMScoreAdj, expect
return nil
}
var _ = framework.KubeDescribe("Container Manager Misc [Serial]", func() {
var _ = SIGDescribe("Container Manager Misc [Serial]", func() {
f := framework.NewDefaultFramework("kubelet-container-manager")
ginkgo.Describe("Validate OOM score adjustments [NodeFeature:OOMScoreAdj]", func() {
ginkgo.Context("once the node is setup", func() {

View File

@ -41,7 +41,7 @@ const (
bestEffortPodName = "best-effort"
)
var _ = framework.KubeDescribe("CriticalPod [Serial] [Disruptive] [NodeFeature:CriticalPod]", func() {
var _ = SIGDescribe("CriticalPod [Serial] [Disruptive] [NodeFeature:CriticalPod]", func() {
f := framework.NewDefaultFramework("critical-pod-test")
ginkgo.Context("when we need to admit a critical pod", func() {
tempSetCurrentKubeletConfig(f, func(initialConfig *kubeletconfig.KubeletConfiguration) {

View File

@ -49,7 +49,7 @@ const (
kubeletAddr = "localhost:10255"
)
var _ = framework.KubeDescribe("Density [Serial] [Slow]", func() {
var _ = SIGDescribe("Density [Serial] [Slow]", func() {
const (
// The data collection time of resource collector and the standalone cadvisor
// is not synchronized, so resource collector may miss data or

View File

@ -63,7 +63,7 @@ var (
)
// Serial because the test restarts Kubelet
var _ = framework.KubeDescribe("Device Plugin [Feature:DevicePluginProbe][NodeFeature:DevicePluginProbe][Serial]", func() {
var _ = SIGDescribe("Device Plugin [Feature:DevicePluginProbe][NodeFeature:DevicePluginProbe][Serial]", func() {
f := framework.NewDefaultFramework("device-plugin-errors")
testDevicePlugin(f, "/var/lib/kubelet/plugins_registry")
})

View File

@ -32,7 +32,7 @@ import (
"github.com/onsi/gomega"
)
var _ = framework.KubeDescribe("Docker features [Feature:Docker][Legacy:Docker]", func() {
var _ = SIGDescribe("Docker features [Feature:Docker][Legacy:Docker]", func() {
f := framework.NewDefaultFramework("docker-feature-test")
ginkgo.BeforeEach(func() {

View File

@ -70,7 +70,7 @@ type nodeConfigTestCase struct {
}
// This test is marked [Disruptive] because the Kubelet restarts several times during this test.
var _ = framework.KubeDescribe("[Feature:DynamicKubeletConfig][NodeFeature:DynamicKubeletConfig][Serial][Disruptive]", func() {
var _ = SIGDescribe("[Feature:DynamicKubeletConfig][NodeFeature:DynamicKubeletConfig][Serial][Disruptive]", func() {
f := framework.NewDefaultFramework("dynamic-kubelet-configuration-test")
var beforeNode *v1.Node
var beforeConfigMap *v1.ConfigMap

View File

@ -66,7 +66,7 @@ const (
// InodeEviction tests that the node responds to node disk pressure by evicting only responsible pods.
// Node disk pressure is induced by consuming all inodes on the node.
var _ = framework.KubeDescribe("InodeEviction [Slow] [Serial] [Disruptive][NodeFeature:Eviction]", func() {
var _ = SIGDescribe("InodeEviction [Slow] [Serial] [Disruptive][NodeFeature:Eviction]", func() {
f := framework.NewDefaultFramework("inode-eviction-test")
expectedNodeCondition := v1.NodeDiskPressure
expectedStarvedResource := resourceInodes
@ -102,7 +102,7 @@ var _ = framework.KubeDescribe("InodeEviction [Slow] [Serial] [Disruptive][NodeF
// ImageGCNoEviction tests that the node does not evict pods when inodes are consumed by images
// Disk pressure is induced by pulling large images
var _ = framework.KubeDescribe("ImageGCNoEviction [Slow] [Serial] [Disruptive][NodeFeature:Eviction]", func() {
var _ = SIGDescribe("ImageGCNoEviction [Slow] [Serial] [Disruptive][NodeFeature:Eviction]", func() {
f := framework.NewDefaultFramework("image-gc-eviction-test")
pressureTimeout := 10 * time.Minute
expectedNodeCondition := v1.NodeDiskPressure
@ -132,7 +132,7 @@ var _ = framework.KubeDescribe("ImageGCNoEviction [Slow] [Serial] [Disruptive][N
// MemoryAllocatableEviction tests that the node responds to node memory pressure by evicting only responsible pods.
// Node memory pressure is only encountered because we reserve the majority of the node's capacity via kube-reserved.
var _ = framework.KubeDescribe("MemoryAllocatableEviction [Slow] [Serial] [Disruptive][NodeFeature:Eviction]", func() {
var _ = SIGDescribe("MemoryAllocatableEviction [Slow] [Serial] [Disruptive][NodeFeature:Eviction]", func() {
f := framework.NewDefaultFramework("memory-allocatable-eviction-test")
expectedNodeCondition := v1.NodeMemoryPressure
expectedStarvedResource := v1.ResourceMemory
@ -165,7 +165,7 @@ var _ = framework.KubeDescribe("MemoryAllocatableEviction [Slow] [Serial] [Disru
// LocalStorageEviction tests that the node responds to node disk pressure by evicting only responsible pods
// Disk pressure is induced by running pods which consume disk space.
var _ = framework.KubeDescribe("LocalStorageEviction [Slow] [Serial] [Disruptive][NodeFeature:Eviction]", func() {
var _ = SIGDescribe("LocalStorageEviction [Slow] [Serial] [Disruptive][NodeFeature:Eviction]", func() {
f := framework.NewDefaultFramework("localstorage-eviction-test")
pressureTimeout := 10 * time.Minute
expectedNodeCondition := v1.NodeDiskPressure
@ -194,7 +194,7 @@ var _ = framework.KubeDescribe("LocalStorageEviction [Slow] [Serial] [Disruptive
// LocalStorageEviction tests that the node responds to node disk pressure by evicting only responsible pods
// Disk pressure is induced by running pods which consume disk space, which exceed the soft eviction threshold.
// Note: This test's purpose is to test Soft Evictions. Local storage was chosen since it is the least costly to run.
var _ = framework.KubeDescribe("LocalStorageSoftEviction [Slow] [Serial] [Disruptive][NodeFeature:Eviction]", func() {
var _ = SIGDescribe("LocalStorageSoftEviction [Slow] [Serial] [Disruptive][NodeFeature:Eviction]", func() {
f := framework.NewDefaultFramework("localstorage-eviction-test")
pressureTimeout := 10 * time.Minute
expectedNodeCondition := v1.NodeDiskPressure
@ -230,7 +230,7 @@ var _ = framework.KubeDescribe("LocalStorageSoftEviction [Slow] [Serial] [Disrup
})
// LocalStorageCapacityIsolationEviction tests that container and volume local storage limits are enforced through evictions
var _ = framework.KubeDescribe("LocalStorageCapacityIsolationEviction [Slow] [Serial] [Disruptive] [Feature:LocalStorageCapacityIsolation][NodeFeature:Eviction]", func() {
var _ = SIGDescribe("LocalStorageCapacityIsolationEviction [Slow] [Serial] [Disruptive] [Feature:LocalStorageCapacityIsolation][NodeFeature:Eviction]", func() {
f := framework.NewDefaultFramework("localstorage-eviction-test")
evictionTestTimeout := 10 * time.Minute
ginkgo.Context(fmt.Sprintf(testContextFmt, "evictions due to pod local storage violations"), func() {
@ -282,7 +282,7 @@ var _ = framework.KubeDescribe("LocalStorageCapacityIsolationEviction [Slow] [Se
// PriorityMemoryEvictionOrdering tests that the node responds to node memory pressure by evicting pods.
// This test tests that the guaranteed pod is never evicted, and that the lower-priority pod is evicted before
// the higher priority pod.
var _ = framework.KubeDescribe("PriorityMemoryEvictionOrdering [Slow] [Serial] [Disruptive][NodeFeature:Eviction]", func() {
var _ = SIGDescribe("PriorityMemoryEvictionOrdering [Slow] [Serial] [Disruptive][NodeFeature:Eviction]", func() {
f := framework.NewDefaultFramework("priority-memory-eviction-ordering-test")
expectedNodeCondition := v1.NodeMemoryPressure
expectedStarvedResource := v1.ResourceMemory
@ -339,7 +339,7 @@ var _ = framework.KubeDescribe("PriorityMemoryEvictionOrdering [Slow] [Serial] [
// PriorityLocalStorageEvictionOrdering tests that the node responds to node disk pressure by evicting pods.
// This test tests that the guaranteed pod is never evicted, and that the lower-priority pod is evicted before
// the higher priority pod.
var _ = framework.KubeDescribe("PriorityLocalStorageEvictionOrdering [Slow] [Serial] [Disruptive][NodeFeature:Eviction]", func() {
var _ = SIGDescribe("PriorityLocalStorageEvictionOrdering [Slow] [Serial] [Disruptive][NodeFeature:Eviction]", func() {
f := framework.NewDefaultFramework("priority-disk-eviction-ordering-test")
expectedNodeCondition := v1.NodeDiskPressure
expectedStarvedResource := v1.ResourceEphemeralStorage
@ -395,7 +395,7 @@ var _ = framework.KubeDescribe("PriorityLocalStorageEvictionOrdering [Slow] [Ser
})
// PriorityPidEvictionOrdering tests that the node emits pid pressure in response to a fork bomb, and evicts pods by priority
var _ = framework.KubeDescribe("PriorityPidEvictionOrdering [Slow] [Serial] [Disruptive][NodeFeature:Eviction]", func() {
var _ = SIGDescribe("PriorityPidEvictionOrdering [Slow] [Serial] [Disruptive][NodeFeature:Eviction]", func() {
f := framework.NewDefaultFramework("pidpressure-eviction-test")
pressureTimeout := 2 * time.Minute
expectedNodeCondition := v1.NodePIDPressure

View File

@ -70,7 +70,7 @@ type testRun struct {
// GarbageCollect tests that the Kubelet conforms to the Kubelet Garbage Collection Policy, found here:
// http://kubernetes.io/docs/admin/garbage-collection/
var _ = framework.KubeDescribe("GarbageCollect [Serial][NodeFeature:GarbageCollect]", func() {
var _ = SIGDescribe("GarbageCollect [Serial][NodeFeature:GarbageCollect]", func() {
f := framework.NewDefaultFramework("garbage-collect-test")
containerNamePrefix := "gc-test-container-"
podNamePrefix := "gc-test-pod-"

36
test/e2e_node/gcp/BUILD Normal file
View File

@ -0,0 +1,36 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
go_library(
name = "go_default_library",
srcs = ["framework.go"],
importpath = "k8s.io/kubernetes/test/e2e_node/gcp",
visibility = ["//visibility:public"],
deps = ["//vendor/github.com/onsi/ginkgo:go_default_library"],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)
go_test(
name = "go_default_test",
srcs = ["gke_environment_test.go"],
embed = [":go_default_library"],
deps = [
"//test/e2e/framework:go_default_library",
"//test/e2e/framework/skipper:go_default_library",
"//test/utils/image:go_default_library",
"//vendor/github.com/blang/semver:go_default_library",
"//vendor/github.com/onsi/ginkgo:go_default_library",
],
)

4
test/e2e_node/gcp/OWNERS Normal file
View File

@ -0,0 +1,4 @@
# See the OWNERS docs at https://go.k8s.io/owners
labels:
- sig/cloud-provider

View File

@ -0,0 +1,24 @@
/*
Copyright 2021 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package gcp
import "github.com/onsi/ginkgo"
// SIGDescribe annotates the test with the SIG label.
func SIGDescribe(text string, body func()) bool {
return ginkgo.Describe("[sig-cloud-provider-gcp] "+text, body)
}

View File

@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
package e2enode
package gcp
import (
"bytes"
@ -320,7 +320,7 @@ func checkDockerStorageDriver() error {
return fmt.Errorf("failed to find storage driver")
}
var _ = framework.KubeDescribe("GKE system requirements [NodeConformance][Feature:GKEEnv][NodeFeature:GKEEnv]", func() {
var _ = SIGDescribe("GKE system requirements [NodeConformance][Feature:GKEEnv][NodeFeature:GKEEnv]", func() {
ginkgo.BeforeEach(func() {
e2eskipper.RunIfSystemSpecNameIs("gke")
})
@ -437,3 +437,13 @@ func getKernelVersion() (*semver.Version, error) {
}
return &kernelVersion, nil
}
// runCommand runs the cmd and returns the combined stdout and stderr, or an
// error if the command failed.
func runCommand(cmd ...string) (string, error) {
output, err := exec.Command(cmd[0], cmd[1:]...).CombinedOutput()
if err != nil {
return "", fmt.Errorf("failed to run %q: %s (%s)", strings.Join(cmd, " "), err, output)
}
return string(output), nil
}

View File

@ -65,7 +65,7 @@ func NVIDIADevicePlugin() *v1.Pod {
}
// Serial because the test restarts Kubelet
var _ = framework.KubeDescribe("NVIDIA GPU Device Plugin [Feature:GPUDevicePlugin][NodeFeature:GPUDevicePlugin][Serial] [Disruptive]", func() {
var _ = SIGDescribe("NVIDIA GPU Device Plugin [Feature:GPUDevicePlugin][NodeFeature:GPUDevicePlugin][Serial] [Disruptive]", func() {
f := framework.NewDefaultFramework("device-plugin-gpus-errors")
ginkgo.Context("DevicePlugin", func() {

View File

@ -28,7 +28,7 @@ import (
"github.com/onsi/gomega"
)
var _ = framework.KubeDescribe("ImageID [NodeFeature: ImageID]", func() {
var _ = SIGDescribe("ImageID [NodeFeature: ImageID]", func() {
busyBoxImage := "k8s.gcr.io/busybox@sha256:4bdd623e848417d96127e16037743f0cd8b528c026e9175e22a84f639eca58ff"

View File

@ -35,7 +35,7 @@ const (
logContainerName = "logger"
)
var _ = framework.KubeDescribe("ContainerLogPath [NodeConformance]", func() {
var _ = SIGDescribe("ContainerLogPath [NodeConformance]", func() {
f := framework.NewDefaultFramework("kubelet-container-log-path")
var podClient *framework.PodClient

View File

@ -30,7 +30,7 @@ import (
"k8s.io/kubernetes/test/e2e/framework"
)
var _ = framework.KubeDescribe("MirrorPodWithGracePeriod", func() {
var _ = SIGDescribe("MirrorPodWithGracePeriod", func() {
f := framework.NewDefaultFramework("mirror-pod-with-grace-period")
ginkgo.Context("when create a mirror pod ", func() {
var ns, podPath, staticPodName, mirrorPodName string

View File

@ -40,7 +40,7 @@ import (
"github.com/onsi/gomega"
)
var _ = framework.KubeDescribe("MirrorPod", func() {
var _ = SIGDescribe("MirrorPod", func() {
f := framework.NewDefaultFramework("mirror-pod")
ginkgo.Context("when create a mirror pod ", func() {
var ns, podPath, staticPodName, mirrorPodName string

View File

@ -58,7 +58,7 @@ func setDesiredConfiguration(initialConfig *kubeletconfig.KubeletConfiguration)
initialConfig.SystemReservedCgroup = systemReservedCgroup
}
var _ = framework.KubeDescribe("Node Container Manager [Serial]", func() {
var _ = SIGDescribe("Node Container Manager [Serial]", func() {
f := framework.NewDefaultFramework("node-container-manager")
ginkgo.Describe("Validate Node Allocatable [NodeFeature:NodeAllocatable]", func() {
ginkgo.It("sets up the node and runs the test", func() {

View File

@ -42,7 +42,7 @@ import (
testutils "k8s.io/kubernetes/test/utils"
)
var _ = framework.KubeDescribe("NodeProblemDetector [NodeFeature:NodeProblemDetector] [Serial]", func() {
var _ = SIGDescribe("NodeProblemDetector [NodeFeature:NodeProblemDetector] [Serial]", func() {
const (
pollInterval = 1 * time.Second
pollConsistent = 5 * time.Second
@ -68,7 +68,7 @@ var _ = framework.KubeDescribe("NodeProblemDetector [NodeFeature:NodeProblemDete
})
// Test system log monitor. We may add other tests if we have more problem daemons in the future.
framework.KubeDescribe("SystemLogMonitor", func() {
ginkgo.Describe("SystemLogMonitor", func() {
const (
// Use test condition to avoid changing the real node condition in use.
// TODO(random-liu): Now node condition could be arbitrary string, consider whether we need to

View File

@ -37,7 +37,7 @@ import (
"k8s.io/kubernetes/test/e2e/framework"
)
var _ = framework.KubeDescribe("GracefulNodeShutdown [Serial] [NodeAlphaFeature:GracefulNodeShutdown]", func() {
var _ = SIGDescribe("GracefulNodeShutdown [Serial] [NodeAlphaFeature:GracefulNodeShutdown]", func() {
f := framework.NewDefaultFramework("graceful-node-shutdown")
ginkgo.Context("when gracefully shutting down", func() {

View File

@ -163,7 +163,7 @@ func makePodToVerifyCgroupRemoved(baseName string) *v1.Pod {
return pod
}
var _ = framework.KubeDescribe("Kubelet Cgroup Manager", func() {
var _ = SIGDescribe("Kubelet Cgroup Manager", func() {
f := framework.NewDefaultFramework("kubelet-cgroup-manager")
ginkgo.Describe("QOS containers", func() {
ginkgo.Context("On enabling QOS cgroup hierarchy", func() {

View File

@ -90,7 +90,7 @@ func runOneQuotaTest(f *framework.Framework, quotasRequested bool) {
// pod that creates a file, deletes it, and writes data to it. If
// quotas are used to monitor, it will detect this deleted-but-in-use
// file; if du is used to monitor, it will not detect this.
var _ = framework.KubeDescribe("LocalStorageCapacityIsolationQuotaMonitoring [Slow] [Serial] [Disruptive] [Feature:LocalStorageCapacityIsolationQuota][NodeFeature:LSCIQuotaMonitoring]", func() {
var _ = SIGDescribe("LocalStorageCapacityIsolationQuotaMonitoring [Slow] [Serial] [Disruptive] [Feature:LocalStorageCapacityIsolationQuota][NodeFeature:LSCIQuotaMonitoring]", func() {
f := framework.NewDefaultFramework("localstorage-quota-monitoring-test")
runOneQuotaTest(f, true)
runOneQuotaTest(f, false)

View File

@ -40,7 +40,7 @@ const (
maxStatsAge = time.Minute
)
var _ = framework.KubeDescribe("ResourceMetricsAPI [NodeFeature:ResourceMetrics]", func() {
var _ = SIGDescribe("ResourceMetricsAPI [NodeFeature:ResourceMetrics]", func() {
f := framework.NewDefaultFramework("resource-metrics")
ginkgo.Context("when querying /resource/metrics", func() {
ginkgo.BeforeEach(func() {

View File

@ -59,7 +59,7 @@ func waitForPods(f *framework.Framework, podCount int, timeout time.Duration) (r
return runningPods
}
var _ = framework.KubeDescribe("Restart [Serial] [Slow] [Disruptive] [NodeFeature:ContainerRuntimeRestart]", func() {
var _ = SIGDescribe("Restart [Serial] [Slow] [Disruptive] [NodeFeature:ContainerRuntimeRestart]", func() {
const (
// Saturate the node. It's not necessary that all these pods enter
// Running/Ready, because we don't know the number of cores in the

View File

@ -32,7 +32,7 @@ import (
"github.com/onsi/ginkgo"
)
var _ = framework.KubeDescribe("Container Runtime Conformance Test", func() {
var _ = SIGDescribe("Container Runtime Conformance Test", func() {
f := framework.NewDefaultFramework("runtime-conformance")
ginkgo.Describe("container runtime conformance blackbox test", func() {

View File

@ -89,7 +89,7 @@ func makePodToVerifyCgroupSize(cgroupNames []string, expectedCPU string, expecte
return pod
}
var _ = framework.KubeDescribe("Kubelet PodOverhead handling [LinuxOnly]", func() {
var _ = SIGDescribe("Kubelet PodOverhead handling [LinuxOnly]", func() {
f := framework.NewDefaultFramework("podoverhead-handling")
ginkgo.Describe("PodOverhead cgroup accounting", func() {
ginkgo.Context("On running pod with PodOverhead defined", func() {

View File

@ -34,7 +34,7 @@ import (
"github.com/onsi/ginkgo"
)
var _ = framework.KubeDescribe("Security Context", func() {
var _ = SIGDescribe("Security Context", func() {
f := framework.NewDefaultFramework("security-context-test")
var podClient *framework.PodClient
ginkgo.BeforeEach(func() {

View File

@ -38,7 +38,7 @@ import (
"github.com/onsi/gomega/types"
)
var _ = framework.KubeDescribe("Summary API [NodeConformance]", func() {
var _ = SIGDescribe("Summary API [NodeConformance]", func() {
f := framework.NewDefaultFramework("summary-test")
ginkgo.Context("when querying /stats/summary", func() {
ginkgo.AfterEach(func() {

View File

@ -33,7 +33,7 @@ import (
"github.com/onsi/gomega"
)
var _ = framework.KubeDescribe("SystemNodeCriticalPod [Slow] [Serial] [Disruptive] [NodeFeature:SystemNodeCriticalPod]", func() {
var _ = SIGDescribe("SystemNodeCriticalPod [Slow] [Serial] [Disruptive] [NodeFeature:SystemNodeCriticalPod]", func() {
f := framework.NewDefaultFramework("system-node-critical-pod-test")
// this test only manipulates pods in kube-system
f.SkipNamespaceCreation = true

View File

@ -31,7 +31,7 @@ import (
"github.com/onsi/ginkgo"
)
var _ = framework.KubeDescribe("Kubelet Volume Manager", func() {
var _ = SIGDescribe("Kubelet Volume Manager", func() {
f := framework.NewDefaultFramework("kubelet-volume-manager")
ginkgo.Describe("Volume Manager", func() {
ginkgo.Context("On terminatation of pod with memory backed volume", func() {