mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-28 22:17:14 +00:00
Merge pull request #44520 from dashpole/test_eviction_fix
Automatic merge from submit-queue (batch tested with PRs 44520, 45253, 45838, 44685, 45901) Ensure ordering of using dynamic kubelet config and setting up tests. This PR simply places the body of the eviction test within its own context. This ensures that the kubelet config is set before the pods are created, and that the kubelet config is reverted only after the pods are deleted.
This commit is contained in:
commit
85775105f1
@ -65,7 +65,9 @@ var _ = framework.KubeDescribe("AllocatableEviction [Slow] [Serial] [Disruptive]
|
||||
}
|
||||
evictionTestTimeout := 40 * time.Minute
|
||||
testCondition := "Memory Pressure"
|
||||
kubeletConfigUpdate := func(initialConfig *componentconfig.KubeletConfiguration) {
|
||||
|
||||
Context(fmt.Sprintf("when we run containers that should cause %s", testCondition), func() {
|
||||
tempSetCurrentKubeletConfig(f, func(initialConfig *componentconfig.KubeletConfiguration) {
|
||||
initialConfig.EvictionHard = "memory.available<10%"
|
||||
// Set large system and kube reserved values to trigger allocatable thresholds far before hard eviction thresholds.
|
||||
initialConfig.SystemReserved = componentconfig.ConfigurationMap(map[string]string{"memory": "1Gi"})
|
||||
@ -73,8 +75,12 @@ var _ = framework.KubeDescribe("AllocatableEviction [Slow] [Serial] [Disruptive]
|
||||
initialConfig.EnforceNodeAllocatable = []string{cm.NodeAllocatableEnforcementKey}
|
||||
initialConfig.ExperimentalNodeAllocatableIgnoreEvictionThreshold = false
|
||||
initialConfig.CgroupsPerQOS = true
|
||||
}
|
||||
runEvictionTest(f, testCondition, podTestSpecs, evictionTestTimeout, hasMemoryPressure, kubeletConfigUpdate)
|
||||
})
|
||||
// Place the remainder of the test within a context so that the kubelet config is set before and after the test.
|
||||
Context("With kubeconfig updated", func() {
|
||||
runEvictionTest(f, testCondition, podTestSpecs, evictionTestTimeout, hasMemoryPressure)
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
// Returns TRUE if the node has Memory Pressure, FALSE otherwise
|
||||
|
@ -114,11 +114,16 @@ var _ = framework.KubeDescribe("InodeEviction [Slow] [Serial] [Disruptive] [Flak
|
||||
}
|
||||
evictionTestTimeout := 30 * time.Minute
|
||||
testCondition := "Disk Pressure due to Inodes"
|
||||
kubeletConfigUpdate := func(initialConfig *componentconfig.KubeletConfiguration) {
|
||||
initialConfig.EvictionHard = "nodefs.inodesFree<50%"
|
||||
}
|
||||
|
||||
runEvictionTest(f, testCondition, podTestSpecs, evictionTestTimeout, hasInodePressure, kubeletConfigUpdate)
|
||||
Context(fmt.Sprintf("when we run containers that should cause %s", testCondition), func() {
|
||||
tempSetCurrentKubeletConfig(f, func(initialConfig *componentconfig.KubeletConfiguration) {
|
||||
initialConfig.EvictionHard = "nodefs.inodesFree<50%"
|
||||
})
|
||||
// Place the remainder of the test within a context so that the kubelet config is set before and after the test.
|
||||
Context("With kubeconfig updated", func() {
|
||||
runEvictionTest(f, testCondition, podTestSpecs, evictionTestTimeout, hasInodePressure)
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
// Struct used by runEvictionTest that specifies the pod, and when that pod should be evicted, relative to other pods
|
||||
@ -137,11 +142,7 @@ type podTestSpec struct {
|
||||
// It ensures that all lower evictionPriority pods are eventually evicted.
|
||||
// runEvictionTest then cleans up the testing environment by deleting provided nodes, and ensures that testCondition no longer exists
|
||||
func runEvictionTest(f *framework.Framework, testCondition string, podTestSpecs []podTestSpec, evictionTestTimeout time.Duration,
|
||||
hasPressureCondition func(*framework.Framework, string) (bool, error), updateFunction func(initialConfig *componentconfig.KubeletConfiguration)) {
|
||||
|
||||
Context(fmt.Sprintf("when we run containers that should cause %s", testCondition), func() {
|
||||
|
||||
tempSetCurrentKubeletConfig(f, updateFunction)
|
||||
hasPressureCondition func(*framework.Framework, string) (bool, error)) {
|
||||
BeforeEach(func() {
|
||||
By("seting up pods to be used by tests")
|
||||
for _, spec := range podTestSpecs {
|
||||
@ -302,7 +303,6 @@ func runEvictionTest(f *framework.Framework, testCondition string, podTestSpecs
|
||||
time.Sleep(postTestConditionMonitoringPeriod)
|
||||
}
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
// Returns TRUE if the node has disk pressure due to inodes exists on the node, FALSE otherwise
|
||||
|
Loading…
Reference in New Issue
Block a user