mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-25 04:33:26 +00:00
test/e2e_node: Add NodeFeature tags to non-conformance tests
Serial tests are not considered for conformance tests.
This commit is contained in:
parent
ff62f037b8
commit
90750c77c3
@ -75,7 +75,7 @@ func validateOOMScoreAdjSettingIsInRange(pid int, expectedMinOOMScoreAdj, expect
|
|||||||
|
|
||||||
var _ = framework.KubeDescribe("Container Manager Misc [Serial]", func() {
|
var _ = framework.KubeDescribe("Container Manager Misc [Serial]", func() {
|
||||||
f := framework.NewDefaultFramework("kubelet-container-manager")
|
f := framework.NewDefaultFramework("kubelet-container-manager")
|
||||||
Describe("Validate OOM score adjustments", func() {
|
Describe("Validate OOM score adjustments [NodeFeature:OOMScoreAdj]", func() {
|
||||||
Context("once the node is setup", func() {
|
Context("once the node is setup", func() {
|
||||||
It("container runtime's oom-score-adj should be -999", func() {
|
It("container runtime's oom-score-adj should be -999", func() {
|
||||||
runtimePids, err := getPidsForProcess(framework.TestContext.ContainerRuntimeProcessName, framework.TestContext.ContainerRuntimePidFile)
|
runtimePids, err := getPidsForProcess(framework.TestContext.ContainerRuntimeProcessName, framework.TestContext.ContainerRuntimePidFile)
|
||||||
|
@ -40,7 +40,7 @@ const (
|
|||||||
bestEffortPodName = "best-effort"
|
bestEffortPodName = "best-effort"
|
||||||
)
|
)
|
||||||
|
|
||||||
var _ = framework.KubeDescribe("CriticalPod [Serial] [Disruptive]", func() {
|
var _ = framework.KubeDescribe("CriticalPod [Serial] [Disruptive] [NodeFeature:CriticalPod]", func() {
|
||||||
f := framework.NewDefaultFramework("critical-pod-test")
|
f := framework.NewDefaultFramework("critical-pod-test")
|
||||||
|
|
||||||
Context("when we need to admit a critical pod", func() {
|
Context("when we need to admit a critical pod", func() {
|
||||||
|
@ -94,7 +94,7 @@ var _ = framework.KubeDescribe("InodeEviction [Slow] [Serial] [Disruptive]", fun
|
|||||||
|
|
||||||
// ImageGCNoEviction tests that the node does not evict pods when inodes are consumed by images
|
// ImageGCNoEviction tests that the node does not evict pods when inodes are consumed by images
|
||||||
// Disk pressure is induced by pulling large images
|
// Disk pressure is induced by pulling large images
|
||||||
var _ = framework.KubeDescribe("ImageGCNoEviction [Slow] [Serial] [Disruptive]", func() {
|
var _ = framework.KubeDescribe("ImageGCNoEviction [Slow] [Serial] [Disruptive][NodeFeature:Eviction]", func() {
|
||||||
f := framework.NewDefaultFramework("image-gc-eviction-test")
|
f := framework.NewDefaultFramework("image-gc-eviction-test")
|
||||||
pressureTimeout := 10 * time.Minute
|
pressureTimeout := 10 * time.Minute
|
||||||
expectedNodeCondition := v1.NodeDiskPressure
|
expectedNodeCondition := v1.NodeDiskPressure
|
||||||
@ -123,7 +123,7 @@ var _ = framework.KubeDescribe("ImageGCNoEviction [Slow] [Serial] [Disruptive]",
|
|||||||
|
|
||||||
// MemoryAllocatableEviction tests that the node responds to node memory pressure by evicting only responsible pods.
|
// MemoryAllocatableEviction tests that the node responds to node memory pressure by evicting only responsible pods.
|
||||||
// Node memory pressure is only encountered because we reserve the majority of the node's capacity via kube-reserved.
|
// Node memory pressure is only encountered because we reserve the majority of the node's capacity via kube-reserved.
|
||||||
var _ = framework.KubeDescribe("MemoryAllocatableEviction [Slow] [Serial] [Disruptive]", func() {
|
var _ = framework.KubeDescribe("MemoryAllocatableEviction [Slow] [Serial] [Disruptive][NodeFeature:Eviction", func() {
|
||||||
f := framework.NewDefaultFramework("memory-allocatable-eviction-test")
|
f := framework.NewDefaultFramework("memory-allocatable-eviction-test")
|
||||||
expectedNodeCondition := v1.NodeMemoryPressure
|
expectedNodeCondition := v1.NodeMemoryPressure
|
||||||
pressureTimeout := 10 * time.Minute
|
pressureTimeout := 10 * time.Minute
|
||||||
@ -155,7 +155,7 @@ var _ = framework.KubeDescribe("MemoryAllocatableEviction [Slow] [Serial] [Disru
|
|||||||
|
|
||||||
// LocalStorageEviction tests that the node responds to node disk pressure by evicting only responsible pods
|
// LocalStorageEviction tests that the node responds to node disk pressure by evicting only responsible pods
|
||||||
// Disk pressure is induced by running pods which consume disk space.
|
// Disk pressure is induced by running pods which consume disk space.
|
||||||
var _ = framework.KubeDescribe("LocalStorageEviction [Slow] [Serial] [Disruptive]", func() {
|
var _ = framework.KubeDescribe("LocalStorageEviction [Slow] [Serial] [Disruptive][NodeFeature:Eviction", func() {
|
||||||
f := framework.NewDefaultFramework("localstorage-eviction-test")
|
f := framework.NewDefaultFramework("localstorage-eviction-test")
|
||||||
pressureTimeout := 10 * time.Minute
|
pressureTimeout := 10 * time.Minute
|
||||||
expectedNodeCondition := v1.NodeDiskPressure
|
expectedNodeCondition := v1.NodeDiskPressure
|
||||||
@ -183,7 +183,7 @@ var _ = framework.KubeDescribe("LocalStorageEviction [Slow] [Serial] [Disruptive
|
|||||||
// LocalStorageEviction tests that the node responds to node disk pressure by evicting only responsible pods
|
// LocalStorageEviction tests that the node responds to node disk pressure by evicting only responsible pods
|
||||||
// Disk pressure is induced by running pods which consume disk space, which exceed the soft eviction threshold.
|
// Disk pressure is induced by running pods which consume disk space, which exceed the soft eviction threshold.
|
||||||
// Note: This test's purpose is to test Soft Evictions. Local storage was chosen since it is the least costly to run.
|
// Note: This test's purpose is to test Soft Evictions. Local storage was chosen since it is the least costly to run.
|
||||||
var _ = framework.KubeDescribe("LocalStorageSoftEviction [Slow] [Serial] [Disruptive]", func() {
|
var _ = framework.KubeDescribe("LocalStorageSoftEviction [Slow] [Serial] [Disruptive][NodeFeature:Eviction", func() {
|
||||||
f := framework.NewDefaultFramework("localstorage-eviction-test")
|
f := framework.NewDefaultFramework("localstorage-eviction-test")
|
||||||
pressureTimeout := 10 * time.Minute
|
pressureTimeout := 10 * time.Minute
|
||||||
expectedNodeCondition := v1.NodeDiskPressure
|
expectedNodeCondition := v1.NodeDiskPressure
|
||||||
@ -218,7 +218,7 @@ var _ = framework.KubeDescribe("LocalStorageSoftEviction [Slow] [Serial] [Disrup
|
|||||||
})
|
})
|
||||||
|
|
||||||
// LocalStorageCapacityIsolationEviction tests that container and volume local storage limits are enforced through evictions
|
// LocalStorageCapacityIsolationEviction tests that container and volume local storage limits are enforced through evictions
|
||||||
var _ = framework.KubeDescribe("LocalStorageCapacityIsolationEviction [Slow] [Serial] [Disruptive] [Feature:LocalStorageCapacityIsolation][NodeFeature:LocalStorageCapacityIsolation]", func() {
|
var _ = framework.KubeDescribe("LocalStorageCapacityIsolationEviction [Slow] [Serial] [Disruptive] [Feature:LocalStorageCapacityIsolation][NodeFeature:Eviction]", func() {
|
||||||
f := framework.NewDefaultFramework("localstorage-eviction-test")
|
f := framework.NewDefaultFramework("localstorage-eviction-test")
|
||||||
evictionTestTimeout := 10 * time.Minute
|
evictionTestTimeout := 10 * time.Minute
|
||||||
Context(fmt.Sprintf(testContextFmt, "evictions due to pod local storage violations"), func() {
|
Context(fmt.Sprintf(testContextFmt, "evictions due to pod local storage violations"), func() {
|
||||||
@ -271,7 +271,7 @@ var _ = framework.KubeDescribe("LocalStorageCapacityIsolationEviction [Slow] [Se
|
|||||||
// PriorityMemoryEvictionOrdering tests that the node responds to node memory pressure by evicting pods.
|
// PriorityMemoryEvictionOrdering tests that the node responds to node memory pressure by evicting pods.
|
||||||
// This test tests that the guaranteed pod is never evicted, and that the lower-priority pod is evicted before
|
// This test tests that the guaranteed pod is never evicted, and that the lower-priority pod is evicted before
|
||||||
// the higher priority pod.
|
// the higher priority pod.
|
||||||
var _ = framework.KubeDescribe("PriorityMemoryEvictionOrdering [Slow] [Serial] [Disruptive]", func() {
|
var _ = framework.KubeDescribe("PriorityMemoryEvictionOrdering [Slow] [Serial] [Disruptive][NodeFeature:Eviction", func() {
|
||||||
f := framework.NewDefaultFramework("priority-memory-eviction-ordering-test")
|
f := framework.NewDefaultFramework("priority-memory-eviction-ordering-test")
|
||||||
expectedNodeCondition := v1.NodeMemoryPressure
|
expectedNodeCondition := v1.NodeMemoryPressure
|
||||||
pressureTimeout := 10 * time.Minute
|
pressureTimeout := 10 * time.Minute
|
||||||
@ -317,7 +317,7 @@ var _ = framework.KubeDescribe("PriorityMemoryEvictionOrdering [Slow] [Serial] [
|
|||||||
// PriorityLocalStorageEvictionOrdering tests that the node responds to node disk pressure by evicting pods.
|
// PriorityLocalStorageEvictionOrdering tests that the node responds to node disk pressure by evicting pods.
|
||||||
// This test tests that the guaranteed pod is never evicted, and that the lower-priority pod is evicted before
|
// This test tests that the guaranteed pod is never evicted, and that the lower-priority pod is evicted before
|
||||||
// the higher priority pod.
|
// the higher priority pod.
|
||||||
var _ = framework.KubeDescribe("PriorityLocalStorageEvictionOrdering [Slow] [Serial] [Disruptive]", func() {
|
var _ = framework.KubeDescribe("PriorityLocalStorageEvictionOrdering [Slow] [Serial] [Disruptive][NodeFeature:Eviction", func() {
|
||||||
f := framework.NewDefaultFramework("priority-disk-eviction-ordering-test")
|
f := framework.NewDefaultFramework("priority-disk-eviction-ordering-test")
|
||||||
expectedNodeCondition := v1.NodeDiskPressure
|
expectedNodeCondition := v1.NodeDiskPressure
|
||||||
pressureTimeout := 10 * time.Minute
|
pressureTimeout := 10 * time.Minute
|
||||||
|
@ -71,7 +71,7 @@ type testRun struct {
|
|||||||
|
|
||||||
// GarbageCollect tests that the Kubelet conforms to the Kubelet Garbage Collection Policy, found here:
|
// GarbageCollect tests that the Kubelet conforms to the Kubelet Garbage Collection Policy, found here:
|
||||||
// http://kubernetes.io/docs/admin/garbage-collection/
|
// http://kubernetes.io/docs/admin/garbage-collection/
|
||||||
var _ = framework.KubeDescribe("GarbageCollect [Serial]", func() {
|
var _ = framework.KubeDescribe("GarbageCollect [Serial][NodeFeature:GarbageCollect]", func() {
|
||||||
f := framework.NewDefaultFramework("garbage-collect-test")
|
f := framework.NewDefaultFramework("garbage-collect-test")
|
||||||
containerNamePrefix := "gc-test-container-"
|
containerNamePrefix := "gc-test-container-"
|
||||||
podNamePrefix := "gc-test-pod-"
|
podNamePrefix := "gc-test-pod-"
|
||||||
|
@ -26,7 +26,7 @@ import (
|
|||||||
. "github.com/onsi/gomega"
|
. "github.com/onsi/gomega"
|
||||||
)
|
)
|
||||||
|
|
||||||
var _ = framework.KubeDescribe("ImageID", func() {
|
var _ = framework.KubeDescribe("ImageID [NodeFeature: ImageID]", func() {
|
||||||
|
|
||||||
busyBoxImage := "k8s.gcr.io/busybox@sha256:4bdd623e848417d96127e16037743f0cd8b528c026e9175e22a84f639eca58ff"
|
busyBoxImage := "k8s.gcr.io/busybox@sha256:4bdd623e848417d96127e16037743f0cd8b528c026e9175e22a84f639eca58ff"
|
||||||
|
|
||||||
|
@ -56,7 +56,7 @@ func setDesiredConfiguration(initialConfig *kubeletconfig.KubeletConfiguration)
|
|||||||
|
|
||||||
var _ = framework.KubeDescribe("Node Container Manager [Serial]", func() {
|
var _ = framework.KubeDescribe("Node Container Manager [Serial]", func() {
|
||||||
f := framework.NewDefaultFramework("node-container-manager")
|
f := framework.NewDefaultFramework("node-container-manager")
|
||||||
Describe("Validate Node Allocatable", func() {
|
Describe("Validate Node Allocatable [NodeFeature:NodeAllocatable]", func() {
|
||||||
It("set's up the node and runs the test", func() {
|
It("set's up the node and runs the test", func() {
|
||||||
framework.ExpectNoError(runTest(f))
|
framework.ExpectNoError(runTest(f))
|
||||||
})
|
})
|
||||||
|
@ -40,7 +40,7 @@ import (
|
|||||||
. "github.com/onsi/gomega"
|
. "github.com/onsi/gomega"
|
||||||
)
|
)
|
||||||
|
|
||||||
var _ = framework.KubeDescribe("NodeProblemDetector", func() {
|
var _ = framework.KubeDescribe("NodeProblemDetector [NodeFeature:NodeProblemDetector]", func() {
|
||||||
const (
|
const (
|
||||||
pollInterval = 1 * time.Second
|
pollInterval = 1 * time.Second
|
||||||
pollConsistent = 5 * time.Second
|
pollConsistent = 5 * time.Second
|
||||||
|
@ -59,7 +59,7 @@ func waitForPods(f *framework.Framework, pod_count int, timeout time.Duration) (
|
|||||||
return runningPods
|
return runningPods
|
||||||
}
|
}
|
||||||
|
|
||||||
var _ = framework.KubeDescribe("Restart [Serial] [Slow] [Disruptive]", func() {
|
var _ = framework.KubeDescribe("Restart [Serial] [Slow] [Disruptive] [NodeFeature:ContainerRuntimeRestart]", func() {
|
||||||
const (
|
const (
|
||||||
// Saturate the node. It's not necessary that all these pods enter
|
// Saturate the node. It's not necessary that all these pods enter
|
||||||
// Running/Ready, because we don't know the number of cores in the
|
// Running/Ready, because we don't know the number of cores in the
|
||||||
|
@ -156,7 +156,7 @@ var _ = framework.KubeDescribe("Security Context", func() {
|
|||||||
nginxPid = strings.TrimSpace(output)
|
nginxPid = strings.TrimSpace(output)
|
||||||
})
|
})
|
||||||
|
|
||||||
It("should show its pid in the host PID namespace", func() {
|
It("should show its pid in the host PID namespace [NodeFeature:HostAccess]", func() {
|
||||||
busyboxPodName := "busybox-hostpid-" + string(uuid.NewUUID())
|
busyboxPodName := "busybox-hostpid-" + string(uuid.NewUUID())
|
||||||
createAndWaitHostPidPod(busyboxPodName, true)
|
createAndWaitHostPidPod(busyboxPodName, true)
|
||||||
logs, err := framework.GetPodLogs(f.ClientSet, f.Namespace.Name, busyboxPodName, busyboxPodName)
|
logs, err := framework.GetPodLogs(f.ClientSet, f.Namespace.Name, busyboxPodName, busyboxPodName)
|
||||||
@ -176,7 +176,7 @@ var _ = framework.KubeDescribe("Security Context", func() {
|
|||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
It("should not show its pid in the non-hostpid containers", func() {
|
It("should not show its pid in the non-hostpid containers [NodeFeature:HostAccess]", func() {
|
||||||
busyboxPodName := "busybox-non-hostpid-" + string(uuid.NewUUID())
|
busyboxPodName := "busybox-non-hostpid-" + string(uuid.NewUUID())
|
||||||
createAndWaitHostPidPod(busyboxPodName, false)
|
createAndWaitHostPidPod(busyboxPodName, false)
|
||||||
logs, err := framework.GetPodLogs(f.ClientSet, f.Namespace.Name, busyboxPodName, busyboxPodName)
|
logs, err := framework.GetPodLogs(f.ClientSet, f.Namespace.Name, busyboxPodName, busyboxPodName)
|
||||||
@ -232,7 +232,7 @@ var _ = framework.KubeDescribe("Security Context", func() {
|
|||||||
framework.Logf("Got host shared memory ID %q", hostSharedMemoryID)
|
framework.Logf("Got host shared memory ID %q", hostSharedMemoryID)
|
||||||
})
|
})
|
||||||
|
|
||||||
It("should show the shared memory ID in the host IPC containers", func() {
|
It("should show the shared memory ID in the host IPC containers [NodeFeature:HostAccess]", func() {
|
||||||
ipcutilsPodName := "ipcutils-hostipc-" + string(uuid.NewUUID())
|
ipcutilsPodName := "ipcutils-hostipc-" + string(uuid.NewUUID())
|
||||||
createAndWaitHostIPCPod(ipcutilsPodName, true)
|
createAndWaitHostIPCPod(ipcutilsPodName, true)
|
||||||
logs, err := framework.GetPodLogs(f.ClientSet, f.Namespace.Name, ipcutilsPodName, ipcutilsPodName)
|
logs, err := framework.GetPodLogs(f.ClientSet, f.Namespace.Name, ipcutilsPodName, ipcutilsPodName)
|
||||||
@ -247,7 +247,7 @@ var _ = framework.KubeDescribe("Security Context", func() {
|
|||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
It("should not show the shared memory ID in the non-hostIPC containers", func() {
|
It("should not show the shared memory ID in the non-hostIPC containers [NodeFeature:HostAccess]", func() {
|
||||||
ipcutilsPodName := "ipcutils-non-hostipc-" + string(uuid.NewUUID())
|
ipcutilsPodName := "ipcutils-non-hostipc-" + string(uuid.NewUUID())
|
||||||
createAndWaitHostIPCPod(ipcutilsPodName, false)
|
createAndWaitHostIPCPod(ipcutilsPodName, false)
|
||||||
logs, err := framework.GetPodLogs(f.ClientSet, f.Namespace.Name, ipcutilsPodName, ipcutilsPodName)
|
logs, err := framework.GetPodLogs(f.ClientSet, f.Namespace.Name, ipcutilsPodName, ipcutilsPodName)
|
||||||
@ -315,7 +315,7 @@ var _ = framework.KubeDescribe("Security Context", func() {
|
|||||||
framework.Logf("Opened a new tcp port %q", listeningPort)
|
framework.Logf("Opened a new tcp port %q", listeningPort)
|
||||||
})
|
})
|
||||||
|
|
||||||
It("should listen on same port in the host network containers", func() {
|
It("should listen on same port in the host network containers [NodeFeature:HostAccess]", func() {
|
||||||
busyboxPodName := "busybox-hostnetwork-" + string(uuid.NewUUID())
|
busyboxPodName := "busybox-hostnetwork-" + string(uuid.NewUUID())
|
||||||
createAndWaitHostNetworkPod(busyboxPodName, true)
|
createAndWaitHostNetworkPod(busyboxPodName, true)
|
||||||
logs, err := framework.GetPodLogs(f.ClientSet, f.Namespace.Name, busyboxPodName, busyboxPodName)
|
logs, err := framework.GetPodLogs(f.ClientSet, f.Namespace.Name, busyboxPodName, busyboxPodName)
|
||||||
@ -329,7 +329,7 @@ var _ = framework.KubeDescribe("Security Context", func() {
|
|||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
It("shouldn't show the same port in the non-hostnetwork containers", func() {
|
It("shouldn't show the same port in the non-hostnetwork containers [NodeFeature:HostAccess]", func() {
|
||||||
busyboxPodName := "busybox-non-hostnetwork-" + string(uuid.NewUUID())
|
busyboxPodName := "busybox-non-hostnetwork-" + string(uuid.NewUUID())
|
||||||
createAndWaitHostNetworkPod(busyboxPodName, false)
|
createAndWaitHostNetworkPod(busyboxPodName, false)
|
||||||
logs, err := framework.GetPodLogs(f.ClientSet, f.Namespace.Name, busyboxPodName, busyboxPodName)
|
logs, err := framework.GetPodLogs(f.ClientSet, f.Namespace.Name, busyboxPodName, busyboxPodName)
|
||||||
@ -555,7 +555,7 @@ var _ = framework.KubeDescribe("Security Context", func() {
|
|||||||
return podName
|
return podName
|
||||||
}
|
}
|
||||||
|
|
||||||
It("should run the container as privileged when true", func() {
|
It("should run the container as privileged when true [NodeFeature:HostAccess]", func() {
|
||||||
podName := createAndWaitUserPod(true)
|
podName := createAndWaitUserPod(true)
|
||||||
logs, err := framework.GetPodLogs(f.ClientSet, f.Namespace.Name, podName, podName)
|
logs, err := framework.GetPodLogs(f.ClientSet, f.Namespace.Name, podName, podName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
Loading…
Reference in New Issue
Block a user