From 90750c77c3d047b9e018e31ac24987db224aa9f2 Mon Sep 17 00:00:00 2001 From: Yu-Ju Hong Date: Mon, 21 May 2018 17:24:29 -0700 Subject: [PATCH] test/e2e_node: Add NodeFeature tags to non-conformance tests Serial tests are not considered for conformance tests. --- test/e2e_node/container_manager_test.go | 2 +- test/e2e_node/critical_pod_test.go | 2 +- test/e2e_node/eviction_test.go | 14 +++++++------- test/e2e_node/garbage_collector_test.go | 2 +- test/e2e_node/image_id_test.go | 2 +- test/e2e_node/node_container_manager_test.go | 2 +- test/e2e_node/node_problem_detector_linux.go | 2 +- test/e2e_node/restart_test.go | 2 +- test/e2e_node/security_context_test.go | 14 +++++++------- 9 files changed, 21 insertions(+), 21 deletions(-) diff --git a/test/e2e_node/container_manager_test.go b/test/e2e_node/container_manager_test.go index 71a011045e5..789a954fef8 100644 --- a/test/e2e_node/container_manager_test.go +++ b/test/e2e_node/container_manager_test.go @@ -75,7 +75,7 @@ func validateOOMScoreAdjSettingIsInRange(pid int, expectedMinOOMScoreAdj, expect var _ = framework.KubeDescribe("Container Manager Misc [Serial]", func() { f := framework.NewDefaultFramework("kubelet-container-manager") - Describe("Validate OOM score adjustments", func() { + Describe("Validate OOM score adjustments [NodeFeature:OOMScoreAdj]", func() { Context("once the node is setup", func() { It("container runtime's oom-score-adj should be -999", func() { runtimePids, err := getPidsForProcess(framework.TestContext.ContainerRuntimeProcessName, framework.TestContext.ContainerRuntimePidFile) diff --git a/test/e2e_node/critical_pod_test.go b/test/e2e_node/critical_pod_test.go index 6f22dc2aae3..3ed2924d555 100644 --- a/test/e2e_node/critical_pod_test.go +++ b/test/e2e_node/critical_pod_test.go @@ -40,7 +40,7 @@ const ( bestEffortPodName = "best-effort" ) -var _ = framework.KubeDescribe("CriticalPod [Serial] [Disruptive]", func() { +var _ = framework.KubeDescribe("CriticalPod [Serial] [Disruptive] [NodeFeature:CriticalPod]", func() { f := framework.NewDefaultFramework("critical-pod-test") Context("when we need to admit a critical pod", func() { diff --git a/test/e2e_node/eviction_test.go b/test/e2e_node/eviction_test.go index 08e5752fca9..3f038216692 100644 --- a/test/e2e_node/eviction_test.go +++ b/test/e2e_node/eviction_test.go @@ -94,7 +94,7 @@ var _ = framework.KubeDescribe("InodeEviction [Slow] [Serial] [Disruptive]", fun // ImageGCNoEviction tests that the node does not evict pods when inodes are consumed by images // Disk pressure is induced by pulling large images -var _ = framework.KubeDescribe("ImageGCNoEviction [Slow] [Serial] [Disruptive]", func() { +var _ = framework.KubeDescribe("ImageGCNoEviction [Slow] [Serial] [Disruptive][NodeFeature:Eviction]", func() { f := framework.NewDefaultFramework("image-gc-eviction-test") pressureTimeout := 10 * time.Minute expectedNodeCondition := v1.NodeDiskPressure @@ -123,7 +123,7 @@ var _ = framework.KubeDescribe("ImageGCNoEviction [Slow] [Serial] [Disruptive]", // MemoryAllocatableEviction tests that the node responds to node memory pressure by evicting only responsible pods. // Node memory pressure is only encountered because we reserve the majority of the node's capacity via kube-reserved. -var _ = framework.KubeDescribe("MemoryAllocatableEviction [Slow] [Serial] [Disruptive]", func() { +var _ = framework.KubeDescribe("MemoryAllocatableEviction [Slow] [Serial] [Disruptive][NodeFeature:Eviction", func() { f := framework.NewDefaultFramework("memory-allocatable-eviction-test") expectedNodeCondition := v1.NodeMemoryPressure pressureTimeout := 10 * time.Minute @@ -155,7 +155,7 @@ var _ = framework.KubeDescribe("MemoryAllocatableEviction [Slow] [Serial] [Disru // LocalStorageEviction tests that the node responds to node disk pressure by evicting only responsible pods // Disk pressure is induced by running pods which consume disk space. -var _ = framework.KubeDescribe("LocalStorageEviction [Slow] [Serial] [Disruptive]", func() { +var _ = framework.KubeDescribe("LocalStorageEviction [Slow] [Serial] [Disruptive][NodeFeature:Eviction", func() { f := framework.NewDefaultFramework("localstorage-eviction-test") pressureTimeout := 10 * time.Minute expectedNodeCondition := v1.NodeDiskPressure @@ -183,7 +183,7 @@ var _ = framework.KubeDescribe("LocalStorageEviction [Slow] [Serial] [Disruptive // LocalStorageEviction tests that the node responds to node disk pressure by evicting only responsible pods // Disk pressure is induced by running pods which consume disk space, which exceed the soft eviction threshold. // Note: This test's purpose is to test Soft Evictions. Local storage was chosen since it is the least costly to run. -var _ = framework.KubeDescribe("LocalStorageSoftEviction [Slow] [Serial] [Disruptive]", func() { +var _ = framework.KubeDescribe("LocalStorageSoftEviction [Slow] [Serial] [Disruptive][NodeFeature:Eviction", func() { f := framework.NewDefaultFramework("localstorage-eviction-test") pressureTimeout := 10 * time.Minute expectedNodeCondition := v1.NodeDiskPressure @@ -218,7 +218,7 @@ var _ = framework.KubeDescribe("LocalStorageSoftEviction [Slow] [Serial] [Disrup }) // LocalStorageCapacityIsolationEviction tests that container and volume local storage limits are enforced through evictions -var _ = framework.KubeDescribe("LocalStorageCapacityIsolationEviction [Slow] [Serial] [Disruptive] [Feature:LocalStorageCapacityIsolation][NodeFeature:LocalStorageCapacityIsolation]", func() { +var _ = framework.KubeDescribe("LocalStorageCapacityIsolationEviction [Slow] [Serial] [Disruptive] [Feature:LocalStorageCapacityIsolation][NodeFeature:Eviction]", func() { f := framework.NewDefaultFramework("localstorage-eviction-test") evictionTestTimeout := 10 * time.Minute Context(fmt.Sprintf(testContextFmt, "evictions due to pod local storage violations"), func() { @@ -271,7 +271,7 @@ var _ = framework.KubeDescribe("LocalStorageCapacityIsolationEviction [Slow] [Se // PriorityMemoryEvictionOrdering tests that the node responds to node memory pressure by evicting pods. // This test tests that the guaranteed pod is never evicted, and that the lower-priority pod is evicted before // the higher priority pod. -var _ = framework.KubeDescribe("PriorityMemoryEvictionOrdering [Slow] [Serial] [Disruptive]", func() { +var _ = framework.KubeDescribe("PriorityMemoryEvictionOrdering [Slow] [Serial] [Disruptive][NodeFeature:Eviction", func() { f := framework.NewDefaultFramework("priority-memory-eviction-ordering-test") expectedNodeCondition := v1.NodeMemoryPressure pressureTimeout := 10 * time.Minute @@ -317,7 +317,7 @@ var _ = framework.KubeDescribe("PriorityMemoryEvictionOrdering [Slow] [Serial] [ // PriorityLocalStorageEvictionOrdering tests that the node responds to node disk pressure by evicting pods. // This test tests that the guaranteed pod is never evicted, and that the lower-priority pod is evicted before // the higher priority pod. -var _ = framework.KubeDescribe("PriorityLocalStorageEvictionOrdering [Slow] [Serial] [Disruptive]", func() { +var _ = framework.KubeDescribe("PriorityLocalStorageEvictionOrdering [Slow] [Serial] [Disruptive][NodeFeature:Eviction", func() { f := framework.NewDefaultFramework("priority-disk-eviction-ordering-test") expectedNodeCondition := v1.NodeDiskPressure pressureTimeout := 10 * time.Minute diff --git a/test/e2e_node/garbage_collector_test.go b/test/e2e_node/garbage_collector_test.go index d6a9244ad43..2ee6ce772db 100644 --- a/test/e2e_node/garbage_collector_test.go +++ b/test/e2e_node/garbage_collector_test.go @@ -71,7 +71,7 @@ type testRun struct { // GarbageCollect tests that the Kubelet conforms to the Kubelet Garbage Collection Policy, found here: // http://kubernetes.io/docs/admin/garbage-collection/ -var _ = framework.KubeDescribe("GarbageCollect [Serial]", func() { +var _ = framework.KubeDescribe("GarbageCollect [Serial][NodeFeature:GarbageCollect]", func() { f := framework.NewDefaultFramework("garbage-collect-test") containerNamePrefix := "gc-test-container-" podNamePrefix := "gc-test-pod-" diff --git a/test/e2e_node/image_id_test.go b/test/e2e_node/image_id_test.go index 090eafa2863..198d204c44a 100644 --- a/test/e2e_node/image_id_test.go +++ b/test/e2e_node/image_id_test.go @@ -26,7 +26,7 @@ import ( . "github.com/onsi/gomega" ) -var _ = framework.KubeDescribe("ImageID", func() { +var _ = framework.KubeDescribe("ImageID [NodeFeature: ImageID]", func() { busyBoxImage := "k8s.gcr.io/busybox@sha256:4bdd623e848417d96127e16037743f0cd8b528c026e9175e22a84f639eca58ff" diff --git a/test/e2e_node/node_container_manager_test.go b/test/e2e_node/node_container_manager_test.go index 09e6dfad41d..58223e08434 100644 --- a/test/e2e_node/node_container_manager_test.go +++ b/test/e2e_node/node_container_manager_test.go @@ -56,7 +56,7 @@ func setDesiredConfiguration(initialConfig *kubeletconfig.KubeletConfiguration) var _ = framework.KubeDescribe("Node Container Manager [Serial]", func() { f := framework.NewDefaultFramework("node-container-manager") - Describe("Validate Node Allocatable", func() { + Describe("Validate Node Allocatable [NodeFeature:NodeAllocatable]", func() { It("set's up the node and runs the test", func() { framework.ExpectNoError(runTest(f)) }) diff --git a/test/e2e_node/node_problem_detector_linux.go b/test/e2e_node/node_problem_detector_linux.go index 3baf3287337..64aa7ac7207 100644 --- a/test/e2e_node/node_problem_detector_linux.go +++ b/test/e2e_node/node_problem_detector_linux.go @@ -40,7 +40,7 @@ import ( . "github.com/onsi/gomega" ) -var _ = framework.KubeDescribe("NodeProblemDetector", func() { +var _ = framework.KubeDescribe("NodeProblemDetector [NodeFeature:NodeProblemDetector]", func() { const ( pollInterval = 1 * time.Second pollConsistent = 5 * time.Second diff --git a/test/e2e_node/restart_test.go b/test/e2e_node/restart_test.go index 7b68ea74011..4b50c3479ea 100644 --- a/test/e2e_node/restart_test.go +++ b/test/e2e_node/restart_test.go @@ -59,7 +59,7 @@ func waitForPods(f *framework.Framework, pod_count int, timeout time.Duration) ( return runningPods } -var _ = framework.KubeDescribe("Restart [Serial] [Slow] [Disruptive]", func() { +var _ = framework.KubeDescribe("Restart [Serial] [Slow] [Disruptive] [NodeFeature:ContainerRuntimeRestart]", func() { const ( // Saturate the node. It's not necessary that all these pods enter // Running/Ready, because we don't know the number of cores in the diff --git a/test/e2e_node/security_context_test.go b/test/e2e_node/security_context_test.go index a6e2f619bc8..3d0aa231cc1 100644 --- a/test/e2e_node/security_context_test.go +++ b/test/e2e_node/security_context_test.go @@ -156,7 +156,7 @@ var _ = framework.KubeDescribe("Security Context", func() { nginxPid = strings.TrimSpace(output) }) - It("should show its pid in the host PID namespace", func() { + It("should show its pid in the host PID namespace [NodeFeature:HostAccess]", func() { busyboxPodName := "busybox-hostpid-" + string(uuid.NewUUID()) createAndWaitHostPidPod(busyboxPodName, true) logs, err := framework.GetPodLogs(f.ClientSet, f.Namespace.Name, busyboxPodName, busyboxPodName) @@ -176,7 +176,7 @@ var _ = framework.KubeDescribe("Security Context", func() { } }) - It("should not show its pid in the non-hostpid containers", func() { + It("should not show its pid in the non-hostpid containers [NodeFeature:HostAccess]", func() { busyboxPodName := "busybox-non-hostpid-" + string(uuid.NewUUID()) createAndWaitHostPidPod(busyboxPodName, false) logs, err := framework.GetPodLogs(f.ClientSet, f.Namespace.Name, busyboxPodName, busyboxPodName) @@ -232,7 +232,7 @@ var _ = framework.KubeDescribe("Security Context", func() { framework.Logf("Got host shared memory ID %q", hostSharedMemoryID) }) - It("should show the shared memory ID in the host IPC containers", func() { + It("should show the shared memory ID in the host IPC containers [NodeFeature:HostAccess]", func() { ipcutilsPodName := "ipcutils-hostipc-" + string(uuid.NewUUID()) createAndWaitHostIPCPod(ipcutilsPodName, true) logs, err := framework.GetPodLogs(f.ClientSet, f.Namespace.Name, ipcutilsPodName, ipcutilsPodName) @@ -247,7 +247,7 @@ var _ = framework.KubeDescribe("Security Context", func() { } }) - It("should not show the shared memory ID in the non-hostIPC containers", func() { + It("should not show the shared memory ID in the non-hostIPC containers [NodeFeature:HostAccess]", func() { ipcutilsPodName := "ipcutils-non-hostipc-" + string(uuid.NewUUID()) createAndWaitHostIPCPod(ipcutilsPodName, false) logs, err := framework.GetPodLogs(f.ClientSet, f.Namespace.Name, ipcutilsPodName, ipcutilsPodName) @@ -315,7 +315,7 @@ var _ = framework.KubeDescribe("Security Context", func() { framework.Logf("Opened a new tcp port %q", listeningPort) }) - It("should listen on same port in the host network containers", func() { + It("should listen on same port in the host network containers [NodeFeature:HostAccess]", func() { busyboxPodName := "busybox-hostnetwork-" + string(uuid.NewUUID()) createAndWaitHostNetworkPod(busyboxPodName, true) logs, err := framework.GetPodLogs(f.ClientSet, f.Namespace.Name, busyboxPodName, busyboxPodName) @@ -329,7 +329,7 @@ var _ = framework.KubeDescribe("Security Context", func() { } }) - It("shouldn't show the same port in the non-hostnetwork containers", func() { + It("shouldn't show the same port in the non-hostnetwork containers [NodeFeature:HostAccess]", func() { busyboxPodName := "busybox-non-hostnetwork-" + string(uuid.NewUUID()) createAndWaitHostNetworkPod(busyboxPodName, false) logs, err := framework.GetPodLogs(f.ClientSet, f.Namespace.Name, busyboxPodName, busyboxPodName) @@ -555,7 +555,7 @@ var _ = framework.KubeDescribe("Security Context", func() { return podName } - It("should run the container as privileged when true", func() { + It("should run the container as privileged when true [NodeFeature:HostAccess]", func() { podName := createAndWaitUserPod(true) logs, err := framework.GetPodLogs(f.ClientSet, f.Namespace.Name, podName, podName) if err != nil {