Merge pull request #46441 from dashpole/eviction_time

Automatic merge from submit-queue

Shorten eviction tests, and increase test suite timeout

After #43590, the eviction manager is less aggressive when evicting pods.  Because of that, many runs in the flaky suite time out.
To shorten the inode eviction test, I have lowered the eviction threshold.
To shorten the allocatable eviction test, I now set KubeReserved = NodeMemoryCapacity - 200Mb, so that any pod using 200Mb will be evicted.  This shortens this test from 40 minutes, to 10 minutes.
While this should be enough to not hit the flaky suite timeout anymore, it is better to keep lower individual test timeouts than a lower suite timeout, since hitting the suite timeout means that even successful test runs are not reported.

/assign @Random-Liu @mtaufen 

issue: #31362
This commit is contained in:
Kubernetes Submit Queue 2017-06-13 12:58:22 -07:00 committed by GitHub
commit f4d2c7b931
4 changed files with 60 additions and 65 deletions

View File

@ -20,7 +20,7 @@ import (
"fmt"
"time"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/kubernetes/pkg/api/v1"
nodeutil "k8s.io/kubernetes/pkg/api/v1/node"
"k8s.io/kubernetes/pkg/apis/componentconfig"
@ -34,44 +34,30 @@ import (
// Eviction Policy is described here:
// https://github.com/kubernetes/kubernetes/blob/master/docs/proposals/kubelet-eviction.md
var _ = framework.KubeDescribe("AllocatableEviction [Slow] [Serial] [Disruptive] [Flaky]", func() {
f := framework.NewDefaultFramework("allocatable-eviction-test")
var _ = framework.KubeDescribe("MemoryAllocatableEviction [Slow] [Serial] [Disruptive] [Flaky]", func() {
f := framework.NewDefaultFramework("memory-allocatable-eviction-test")
podTestSpecs := []podTestSpec{
{
evictionPriority: 1, // This pod should be evicted before the innocent pod
pod: *getMemhogPod("memory-hog-pod", "memory-hog", v1.ResourceRequirements{}),
pod: getMemhogPod("memory-hog-pod", "memory-hog", v1.ResourceRequirements{}),
},
{
evictionPriority: 0, // This pod should never be evicted
pod: v1.Pod{
ObjectMeta: metav1.ObjectMeta{Name: "innocent-pod"},
Spec: v1.PodSpec{
RestartPolicy: v1.RestartPolicyNever,
Containers: []v1.Container{
{
Image: "gcr.io/google_containers/busybox:1.24",
Name: "normal-memory-usage-container",
Command: []string{
"sh",
"-c", //make one big (5 Gb) file
"dd if=/dev/urandom of=largefile bs=5000000000 count=1; while true; do sleep 5; done",
},
},
},
},
},
pod: getInnocentPod(),
},
}
evictionTestTimeout := 40 * time.Minute
evictionTestTimeout := 10 * time.Minute
testCondition := "Memory Pressure"
Context(fmt.Sprintf("when we run containers that should cause %s", testCondition), func() {
tempSetCurrentKubeletConfig(f, func(initialConfig *componentconfig.KubeletConfiguration) {
initialConfig.EvictionHard = "memory.available<10%"
// Set large system and kube reserved values to trigger allocatable thresholds far before hard eviction thresholds.
initialConfig.SystemReserved = componentconfig.ConfigurationMap(map[string]string{"memory": "1Gi"})
initialConfig.KubeReserved = componentconfig.ConfigurationMap(map[string]string{"memory": "1Gi"})
kubeReserved := getNodeCPUAndMemoryCapacity(f)[v1.ResourceMemory]
// The default hard eviction threshold is 250Mb, so Allocatable = Capacity - Reserved - 250Mb
// We want Allocatable = 50Mb, so set Reserved = Capacity - Allocatable - 250Mb = Capacity - 300Mb
kubeReserved.Sub(resource.MustParse("300Mi"))
initialConfig.KubeReserved = componentconfig.ConfigurationMap(map[string]string{"memory": kubeReserved.String()})
initialConfig.EnforceNodeAllocatable = []string{cm.NodeAllocatableEnforcementKey}
initialConfig.ExperimentalNodeAllocatableIgnoreEvictionThreshold = false
initialConfig.CgroupsPerQOS = true

View File

@ -18,6 +18,7 @@ package e2e_node
import (
"fmt"
"path/filepath"
"time"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@ -45,22 +46,19 @@ const (
var _ = framework.KubeDescribe("InodeEviction [Slow] [Serial] [Disruptive] [Flaky]", func() {
f := framework.NewDefaultFramework("inode-eviction-test")
volumeMountPath := "/test-empty-dir-mnt"
podTestSpecs := []podTestSpec{
{
evictionPriority: 1, // This pod should be evicted before the normal memory usage pod
pod: v1.Pod{
pod: &v1.Pod{
ObjectMeta: metav1.ObjectMeta{Name: "container-inode-hog-pod"},
Spec: v1.PodSpec{
RestartPolicy: v1.RestartPolicyNever,
Containers: []v1.Container{
{
Image: "gcr.io/google_containers/busybox:1.24",
Name: "container-inode-hog-pod",
Command: []string{
"sh",
"-c", // Make 100 billion small files (more than we have inodes)
"i=0; while [[ $i -lt 100000000000 ]]; do touch smallfile$i.txt; sleep 0.001; i=$((i+=1)); done;",
},
Image: "gcr.io/google_containers/busybox:1.24",
Name: "container-inode-hog-container",
Command: getInodeConsumingCommand(""),
},
},
},
@ -68,21 +66,17 @@ var _ = framework.KubeDescribe("InodeEviction [Slow] [Serial] [Disruptive] [Flak
},
{
evictionPriority: 1, // This pod should be evicted before the normal memory usage pod
pod: v1.Pod{
pod: &v1.Pod{
ObjectMeta: metav1.ObjectMeta{Name: "volume-inode-hog-pod"},
Spec: v1.PodSpec{
RestartPolicy: v1.RestartPolicyNever,
Containers: []v1.Container{
{
Image: "gcr.io/google_containers/busybox:1.24",
Name: "volume-inode-hog-pod",
Command: []string{
"sh",
"-c", // Make 100 billion small files (more than we have inodes)
"i=0; while [[ $i -lt 100000000000 ]]; do touch /test-empty-dir-mnt/smallfile$i.txt; sleep 0.001; i=$((i+=1)); done;",
},
Image: "gcr.io/google_containers/busybox:1.24",
Name: "volume-inode-hog-container",
Command: getInodeConsumingCommand(volumeMountPath),
VolumeMounts: []v1.VolumeMount{
{MountPath: "/test-empty-dir-mnt", Name: "test-empty-dir"},
{MountPath: volumeMountPath, Name: "test-empty-dir"},
},
},
},
@ -94,23 +88,7 @@ var _ = framework.KubeDescribe("InodeEviction [Slow] [Serial] [Disruptive] [Flak
},
{
evictionPriority: 0, // This pod should never be evicted
pod: v1.Pod{
ObjectMeta: metav1.ObjectMeta{Name: "normal-memory-usage-pod"},
Spec: v1.PodSpec{
RestartPolicy: v1.RestartPolicyNever,
Containers: []v1.Container{
{
Image: "gcr.io/google_containers/busybox:1.24",
Name: "normal-memory-usage-pod",
Command: []string{
"sh",
"-c", //make one big (5 Gb) file
"dd if=/dev/urandom of=largefile bs=5000000000 count=1; while true; do sleep 5; done",
},
},
},
},
},
pod: getInnocentPod(),
},
}
evictionTestTimeout := 30 * time.Minute
@ -118,7 +96,7 @@ var _ = framework.KubeDescribe("InodeEviction [Slow] [Serial] [Disruptive] [Flak
Context(fmt.Sprintf("when we run containers that should cause %s", testCondition), func() {
tempSetCurrentKubeletConfig(f, func(initialConfig *componentconfig.KubeletConfiguration) {
initialConfig.EvictionHard = "nodefs.inodesFree<50%"
initialConfig.EvictionHard = "nodefs.inodesFree<70%"
})
// Place the remainder of the test within a context so that the kubelet config is set before and after the test.
Context("With kubeconfig updated", func() {
@ -133,7 +111,7 @@ type podTestSpec struct {
// If two are ranked at 1, either is permitted to fail before the other.
// The test ends when all other than the 0 have been evicted
evictionPriority int
pod v1.Pod
pod *v1.Pod
}
// runEvictionTest sets up a testing environment given the provided nodes, and checks a few things:
@ -148,7 +126,7 @@ func runEvictionTest(f *framework.Framework, testCondition string, podTestSpecs
By("seting up pods to be used by tests")
for _, spec := range podTestSpecs {
By(fmt.Sprintf("creating pod with container: %s", spec.pod.Name))
f.PodClient().CreateSync(&spec.pod)
f.PodClient().CreateSync(spec.pod)
}
})
@ -342,3 +320,32 @@ func hasInodePressure(f *framework.Framework, testCondition string) (bool, error
}
return hasPressure, nil
}
// returns a pod that does not use any resources
func getInnocentPod() *v1.Pod {
return &v1.Pod{
ObjectMeta: metav1.ObjectMeta{Name: "innocent-pod"},
Spec: v1.PodSpec{
RestartPolicy: v1.RestartPolicyNever,
Containers: []v1.Container{
{
Image: "gcr.io/google_containers/busybox:1.24",
Name: "innocent-container",
Command: []string{
"sh",
"-c", //make one large file
"dd if=/dev/urandom of=largefile bs=5000000000 count=1; while true; do sleep 5; done",
},
},
},
},
}
}
func getInodeConsumingCommand(path string) []string {
return []string{
"sh",
"-c",
fmt.Sprintf("i=0; while true; do touch %s${i}.txt; sleep 0.001; i=$((i+=1)); done;", filepath.Join(path, "smallfile")),
}
}

View File

@ -7,3 +7,5 @@ GINKGO_FLAGS='--focus="\[Flaky\]"'
TEST_ARGS='--feature-gates=DynamicKubeletConfig=true'
KUBELET_ARGS='--cgroups-per-qos=true --cgroup-root=/'
PARALLELISM=1
TIMEOUT=2h

View File

@ -57,7 +57,7 @@ var _ = framework.KubeDescribe("LocalStorageAllocatableEviction [Slow] [Serial]
podTestSpecs = []podTestSpec{
{
evictionPriority: 1, // This pod should be evicted before the innocent pod
pod: v1.Pod{
pod: &v1.Pod{
ObjectMeta: metav1.ObjectMeta{Name: "container-disk-hog-pod"},
Spec: v1.PodSpec{
RestartPolicy: v1.RestartPolicyNever,
@ -74,7 +74,7 @@ var _ = framework.KubeDescribe("LocalStorageAllocatableEviction [Slow] [Serial]
{
evictionPriority: 0, // This pod should never be evicted
pod: v1.Pod{
pod: &v1.Pod{
ObjectMeta: metav1.ObjectMeta{Name: "idle-pod"},
Spec: v1.PodSpec{
RestartPolicy: v1.RestartPolicyNever,
@ -151,7 +151,7 @@ func runLocalStorageEvictionTest(f *framework.Framework, conditionType v1.NodeCo
By("seting up pods to be used by tests")
for _, spec := range *podTestSpecsP {
By(fmt.Sprintf("creating pod with container: %s", spec.pod.Name))
f.PodClient().CreateSync(&spec.pod)
f.PodClient().CreateSync(spec.pod)
}
})