Update test/e2e for test/e2e/framework refactoring

This commit is contained in:
Tim St. Clair
2016-04-07 10:21:31 -07:00
parent a55b4f2e77
commit b0d3f32e88
88 changed files with 2969 additions and 2887 deletions

View File

@@ -27,6 +27,7 @@ import (
client "k8s.io/kubernetes/pkg/client/unversioned"
"k8s.io/kubernetes/pkg/fields"
"k8s.io/kubernetes/pkg/util/wait"
"k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
@@ -64,19 +65,19 @@ const (
// 7. Observe that the pod in pending status schedules on that node.
//
// Flaky issue #20015. We have no clear path for how to test this functionality in a non-flaky way.
var _ = KubeDescribe("NodeOutOfDisk [Serial] [Flaky] [Disruptive]", func() {
var _ = framework.KubeDescribe("NodeOutOfDisk [Serial] [Flaky] [Disruptive]", func() {
var c *client.Client
var unfilledNodeName, recoveredNodeName string
framework := NewDefaultFramework("node-outofdisk")
f := framework.NewDefaultFramework("node-outofdisk")
BeforeEach(func() {
c = framework.Client
c = f.Client
nodelist := ListSchedulableNodesOrDie(c)
nodelist := framework.ListSchedulableNodesOrDie(c)
// Skip this test on small clusters. No need to fail since it is not a use
// case that any cluster of small size needs to support.
SkipUnlessNodeCountIsAtLeast(2)
framework.SkipUnlessNodeCountIsAtLeast(2)
unfilledNodeName = nodelist.Items[0].Name
for _, node := range nodelist.Items[1:] {
@@ -86,7 +87,7 @@ var _ = KubeDescribe("NodeOutOfDisk [Serial] [Flaky] [Disruptive]", func() {
AfterEach(func() {
nodelist := ListSchedulableNodesOrDie(c)
nodelist := framework.ListSchedulableNodesOrDie(c)
Expect(len(nodelist.Items)).ToNot(BeZero())
for _, node := range nodelist.Items {
if unfilledNodeName == node.Name || recoveredNodeName == node.Name {
@@ -98,11 +99,11 @@ var _ = KubeDescribe("NodeOutOfDisk [Serial] [Flaky] [Disruptive]", func() {
It("runs out of disk space", func() {
unfilledNode, err := c.Nodes().Get(unfilledNodeName)
expectNoError(err)
framework.ExpectNoError(err)
By(fmt.Sprintf("Calculating CPU availability on node %s", unfilledNode.Name))
milliCpu, err := availCpu(c, unfilledNode)
expectNoError(err)
framework.ExpectNoError(err)
// Per pod CPU should be just enough to fit only (numNodeOODPods - 1) pods on the given
// node. We compute this value by dividing the available CPU capacity on the node by
@@ -111,7 +112,7 @@ var _ = KubeDescribe("NodeOutOfDisk [Serial] [Flaky] [Disruptive]", func() {
// subtracting 1% from the value, we directly use 0.99 as the multiplier.
podCPU := int64(float64(milliCpu/(numNodeOODPods-1)) * 0.99)
ns := framework.Namespace.Name
ns := f.Namespace.Name
podClient := c.Pods(ns)
By("Creating pods and waiting for all but one pods to be scheduled")
@@ -120,9 +121,9 @@ var _ = KubeDescribe("NodeOutOfDisk [Serial] [Flaky] [Disruptive]", func() {
name := fmt.Sprintf("pod-node-outofdisk-%d", i)
createOutOfDiskPod(c, ns, name, podCPU)
expectNoError(framework.WaitForPodRunning(name))
framework.ExpectNoError(f.WaitForPodRunning(name))
pod, err := podClient.Get(name)
expectNoError(err)
framework.ExpectNoError(err)
Expect(pod.Spec.NodeName).To(Equal(unfilledNodeName))
}
@@ -140,7 +141,7 @@ var _ = KubeDescribe("NodeOutOfDisk [Serial] [Flaky] [Disruptive]", func() {
}.AsSelector()
options := api.ListOptions{FieldSelector: selector}
schedEvents, err := c.Events(ns).List(options)
expectNoError(err)
framework.ExpectNoError(err)
if len(schedEvents.Items) > 0 {
return true, nil
@@ -149,7 +150,7 @@ var _ = KubeDescribe("NodeOutOfDisk [Serial] [Flaky] [Disruptive]", func() {
}
})
nodelist := ListSchedulableNodesOrDie(c)
nodelist := framework.ListSchedulableNodesOrDie(c)
Expect(len(nodelist.Items)).To(BeNumerically(">", 1))
nodeToRecover := nodelist.Items[1]
@@ -159,9 +160,9 @@ var _ = KubeDescribe("NodeOutOfDisk [Serial] [Flaky] [Disruptive]", func() {
recoveredNodeName = nodeToRecover.Name
By(fmt.Sprintf("Verifying that pod %s schedules on node %s", pendingPodName, recoveredNodeName))
expectNoError(framework.WaitForPodRunning(pendingPodName))
framework.ExpectNoError(f.WaitForPodRunning(pendingPodName))
pendingPod, err := podClient.Get(pendingPodName)
expectNoError(err)
framework.ExpectNoError(err)
Expect(pendingPod.Spec.NodeName).To(Equal(recoveredNodeName))
})
})
@@ -191,7 +192,7 @@ func createOutOfDiskPod(c *client.Client, ns, name string, milliCPU int64) {
}
_, err := podClient.Create(pod)
expectNoError(err)
framework.ExpectNoError(err)
}
// availCpu calculates the available CPU on a given node by subtracting the CPU requested by
@@ -218,7 +219,7 @@ func availCpu(c *client.Client, node *api.Node) (int64, error) {
// is in turn obtained internally from cadvisor.
func availSize(c *client.Client, node *api.Node) (uint64, error) {
statsResource := fmt.Sprintf("api/v1/proxy/nodes/%s/stats/", node.Name)
Logf("Querying stats for node %s using url %s", node.Name, statsResource)
framework.Logf("Querying stats for node %s using url %s", node.Name, statsResource)
res, err := c.Get().AbsPath(statsResource).Timeout(time.Minute).Do().Raw()
if err != nil {
return 0, fmt.Errorf("error querying cAdvisor API: %v", err)
@@ -236,21 +237,21 @@ func availSize(c *client.Client, node *api.Node) (uint64, error) {
// below the lowDiskSpaceThreshold mark.
func fillDiskSpace(c *client.Client, node *api.Node) {
avail, err := availSize(c, node)
expectNoError(err, "Node %s: couldn't obtain available disk size %v", node.Name, err)
framework.ExpectNoError(err, "Node %s: couldn't obtain available disk size %v", node.Name, err)
fillSize := (avail - lowDiskSpaceThreshold + (100 * mb))
Logf("Node %s: disk space available %d bytes", node.Name, avail)
framework.Logf("Node %s: disk space available %d bytes", node.Name, avail)
By(fmt.Sprintf("Node %s: creating a file of size %d bytes to fill the available disk space", node.Name, fillSize))
cmd := fmt.Sprintf("fallocate -l %d test.img", fillSize)
expectNoError(issueSSHCommand(cmd, testContext.Provider, node))
framework.ExpectNoError(framework.IssueSSHCommand(cmd, framework.TestContext.Provider, node))
ood := waitForNodeToBe(c, node.Name, api.NodeOutOfDisk, true, nodeOODTimeOut)
ood := framework.WaitForNodeToBe(c, node.Name, api.NodeOutOfDisk, true, nodeOODTimeOut)
Expect(ood).To(BeTrue(), "Node %s did not run out of disk within %v", node.Name, nodeOODTimeOut)
avail, err = availSize(c, node)
Logf("Node %s: disk space available %d bytes", node.Name, avail)
framework.Logf("Node %s: disk space available %d bytes", node.Name, avail)
Expect(avail < lowDiskSpaceThreshold).To(BeTrue())
}
@@ -258,8 +259,8 @@ func fillDiskSpace(c *client.Client, node *api.Node) {
func recoverDiskSpace(c *client.Client, node *api.Node) {
By(fmt.Sprintf("Recovering disk space on node %s", node.Name))
cmd := "rm -f test.img"
expectNoError(issueSSHCommand(cmd, testContext.Provider, node))
framework.ExpectNoError(framework.IssueSSHCommand(cmd, framework.TestContext.Provider, node))
ood := waitForNodeToBe(c, node.Name, api.NodeOutOfDisk, false, nodeOODTimeOut)
ood := framework.WaitForNodeToBe(c, node.Name, api.NodeOutOfDisk, false, nodeOODTimeOut)
Expect(ood).To(BeTrue(), "Node %s's out of disk condition status did not change to false within %v", node.Name, nodeOODTimeOut)
}