mirror of
https://github.com/k3s-io/kubernetes.git
synced 2026-01-06 07:57:35 +00:00
test/e2e_node
This commit is contained in:
@@ -22,8 +22,8 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/resource"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
@@ -50,7 +50,7 @@ var _ = framework.KubeDescribe("MemoryEviction [Slow] [Serial] [Disruptive]", fu
|
||||
// Wait for the memory pressure condition to disappear from the node status before continuing.
|
||||
By("waiting for the memory pressure condition on the node to disappear before ending the test.")
|
||||
Eventually(func() error {
|
||||
nodeList, err := f.ClientSet.Core().Nodes().List(api.ListOptions{})
|
||||
nodeList, err := f.ClientSet.Core().Nodes().List(v1.ListOptions{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("tried to get node list but got error: %v", err)
|
||||
}
|
||||
@@ -59,8 +59,8 @@ var _ = framework.KubeDescribe("MemoryEviction [Slow] [Serial] [Disruptive]", fu
|
||||
return fmt.Errorf("expected 1 node, but see %d. List: %v", len(nodeList.Items), nodeList.Items)
|
||||
}
|
||||
node := nodeList.Items[0]
|
||||
_, pressure := api.GetNodeCondition(&node.Status, api.NodeMemoryPressure)
|
||||
if pressure != nil && pressure.Status == api.ConditionTrue {
|
||||
_, pressure := v1.GetNodeCondition(&node.Status, v1.NodeMemoryPressure)
|
||||
if pressure != nil && pressure.Status == v1.ConditionTrue {
|
||||
return fmt.Errorf("node is still reporting memory pressure condition: %s", pressure)
|
||||
}
|
||||
return nil
|
||||
@@ -104,13 +104,13 @@ var _ = framework.KubeDescribe("MemoryEviction [Slow] [Serial] [Disruptive]", fu
|
||||
// Finally, try starting a new pod and wait for it to be scheduled and running.
|
||||
// This is the final check to try to prevent interference with subsequent tests.
|
||||
podName := "admit-best-effort-pod"
|
||||
f.PodClient().CreateSync(&api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
f.PodClient().CreateSync(&v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: podName,
|
||||
},
|
||||
Spec: api.PodSpec{
|
||||
RestartPolicy: api.RestartPolicyNever,
|
||||
Containers: []api.Container{
|
||||
Spec: v1.PodSpec{
|
||||
RestartPolicy: v1.RestartPolicyNever,
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Image: framework.GetPauseImageNameForHostArch(),
|
||||
Name: podName,
|
||||
@@ -124,25 +124,25 @@ var _ = framework.KubeDescribe("MemoryEviction [Slow] [Serial] [Disruptive]", fu
|
||||
By("creating a guaranteed pod, a burstable pod, and a besteffort pod.")
|
||||
|
||||
// A pod is guaranteed only when requests and limits are specified for all the containers and they are equal.
|
||||
guaranteed := createMemhogPod(f, "guaranteed-", "guaranteed", api.ResourceRequirements{
|
||||
Requests: api.ResourceList{
|
||||
guaranteed := createMemhogPod(f, "guaranteed-", "guaranteed", v1.ResourceRequirements{
|
||||
Requests: v1.ResourceList{
|
||||
"cpu": resource.MustParse("100m"),
|
||||
"memory": resource.MustParse("100Mi"),
|
||||
},
|
||||
Limits: api.ResourceList{
|
||||
Limits: v1.ResourceList{
|
||||
"cpu": resource.MustParse("100m"),
|
||||
"memory": resource.MustParse("100Mi"),
|
||||
}})
|
||||
|
||||
// A pod is burstable if limits and requests do not match across all containers.
|
||||
burstable := createMemhogPod(f, "burstable-", "burstable", api.ResourceRequirements{
|
||||
Requests: api.ResourceList{
|
||||
burstable := createMemhogPod(f, "burstable-", "burstable", v1.ResourceRequirements{
|
||||
Requests: v1.ResourceList{
|
||||
"cpu": resource.MustParse("100m"),
|
||||
"memory": resource.MustParse("100Mi"),
|
||||
}})
|
||||
|
||||
// A pod is besteffort if none of its containers have specified any requests or limits.
|
||||
besteffort := createMemhogPod(f, "besteffort-", "besteffort", api.ResourceRequirements{})
|
||||
besteffort := createMemhogPod(f, "besteffort-", "besteffort", v1.ResourceRequirements{})
|
||||
|
||||
// We poll until timeout or all pods are killed.
|
||||
// Inside the func, we check that all pods are in a valid phase with
|
||||
@@ -174,7 +174,7 @@ var _ = framework.KubeDescribe("MemoryEviction [Slow] [Serial] [Disruptive]", fu
|
||||
// see the eviction manager reporting a pressure condition for a while without the besteffort failing,
|
||||
// and we see that the manager did in fact evict the besteffort (this should be in the Kubelet log), we
|
||||
// will have more reason to believe the phase is out of date.
|
||||
nodeList, err := f.ClientSet.Core().Nodes().List(api.ListOptions{})
|
||||
nodeList, err := f.ClientSet.Core().Nodes().List(v1.ListOptions{})
|
||||
if err != nil {
|
||||
glog.Errorf("tried to get node list but got error: %v", err)
|
||||
}
|
||||
@@ -182,7 +182,7 @@ var _ = framework.KubeDescribe("MemoryEviction [Slow] [Serial] [Disruptive]", fu
|
||||
glog.Errorf("expected 1 node, but see %d. List: %v", len(nodeList.Items), nodeList.Items)
|
||||
}
|
||||
node := nodeList.Items[0]
|
||||
_, pressure := api.GetNodeCondition(&node.Status, api.NodeMemoryPressure)
|
||||
_, pressure := v1.GetNodeCondition(&node.Status, v1.NodeMemoryPressure)
|
||||
glog.Infof("node pressure condition: %s", pressure)
|
||||
|
||||
// NOTE/TODO(mtaufen): Also log (at least temporarily) the actual memory consumption on the node.
|
||||
@@ -198,15 +198,15 @@ var _ = framework.KubeDescribe("MemoryEviction [Slow] [Serial] [Disruptive]", fu
|
||||
|
||||
}
|
||||
|
||||
if bestPh == api.PodRunning {
|
||||
Expect(burstPh).NotTo(Equal(api.PodFailed), "burstable pod failed before best effort pod")
|
||||
Expect(gteedPh).NotTo(Equal(api.PodFailed), "guaranteed pod failed before best effort pod")
|
||||
} else if burstPh == api.PodRunning {
|
||||
Expect(gteedPh).NotTo(Equal(api.PodFailed), "guaranteed pod failed before burstable pod")
|
||||
if bestPh == v1.PodRunning {
|
||||
Expect(burstPh).NotTo(Equal(v1.PodFailed), "burstable pod failed before best effort pod")
|
||||
Expect(gteedPh).NotTo(Equal(v1.PodFailed), "guaranteed pod failed before best effort pod")
|
||||
} else if burstPh == v1.PodRunning {
|
||||
Expect(gteedPh).NotTo(Equal(v1.PodFailed), "guaranteed pod failed before burstable pod")
|
||||
}
|
||||
|
||||
// When both besteffort and burstable have been evicted, the test has completed.
|
||||
if bestPh == api.PodFailed && burstPh == api.PodFailed {
|
||||
if bestPh == v1.PodFailed && burstPh == v1.PodFailed {
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("besteffort and burstable have not yet both been evicted.")
|
||||
@@ -219,12 +219,12 @@ var _ = framework.KubeDescribe("MemoryEviction [Slow] [Serial] [Disruptive]", fu
|
||||
|
||||
})
|
||||
|
||||
func createMemhogPod(f *framework.Framework, genName string, ctnName string, res api.ResourceRequirements) *api.Pod {
|
||||
env := []api.EnvVar{
|
||||
func createMemhogPod(f *framework.Framework, genName string, ctnName string, res v1.ResourceRequirements) *v1.Pod {
|
||||
env := []v1.EnvVar{
|
||||
{
|
||||
Name: "MEMORY_LIMIT",
|
||||
ValueFrom: &api.EnvVarSource{
|
||||
ResourceFieldRef: &api.ResourceFieldSelector{
|
||||
ValueFrom: &v1.EnvVarSource{
|
||||
ResourceFieldRef: &v1.ResourceFieldSelector{
|
||||
Resource: "limits.memory",
|
||||
},
|
||||
},
|
||||
@@ -243,13 +243,13 @@ func createMemhogPod(f *framework.Framework, genName string, ctnName string, res
|
||||
memLimit = "$(MEMORY_LIMIT)"
|
||||
}
|
||||
|
||||
pod := &api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
GenerateName: genName,
|
||||
},
|
||||
Spec: api.PodSpec{
|
||||
RestartPolicy: api.RestartPolicyNever,
|
||||
Containers: []api.Container{
|
||||
Spec: v1.PodSpec{
|
||||
RestartPolicy: v1.RestartPolicyNever,
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: ctnName,
|
||||
Image: "gcr.io/google-containers/stress:v1",
|
||||
|
||||
Reference in New Issue
Block a user