Make all e2e tests use Framework

This commit is contained in:
Jeff Grafton 2016-02-09 15:50:07 -08:00
parent c1e79e4264
commit 220b5e3e8e
5 changed files with 18 additions and 52 deletions

View File

@ -32,16 +32,11 @@ const (
) )
var _ = Describe("Cadvisor", func() { var _ = Describe("Cadvisor", func() {
var c *client.Client
BeforeEach(func() { f := NewFramework("cadvisor")
var err error
c, err = loadClient()
expectNoError(err)
})
It("should be healthy on every node.", func() { It("should be healthy on every node.", func() {
CheckCadvisorHealthOnAllNodes(c, 5*time.Minute) CheckCadvisorHealthOnAllNodes(f.Client, 5*time.Minute)
}) })
}) })

View File

@ -33,18 +33,14 @@ import (
// TODO: quinton: debug issue #6541 and then remove Pending flag here. // TODO: quinton: debug issue #6541 and then remove Pending flag here.
var _ = Describe("[Flaky] Monitoring", func() { var _ = Describe("[Flaky] Monitoring", func() {
var c *client.Client f := NewFramework("monitoring")
BeforeEach(func() { BeforeEach(func() {
var err error
c, err = loadClient()
expectNoError(err)
SkipUnlessProviderIs("gce") SkipUnlessProviderIs("gce")
}) })
It("should verify monitoring pods and all cluster nodes are available on influxdb using heapster.", func() { It("should verify monitoring pods and all cluster nodes are available on influxdb using heapster.", func() {
testMonitoringUsingHeapsterInfluxdb(c) testMonitoringUsingHeapsterInfluxdb(f.Client)
}) })
}) })

View File

@ -105,22 +105,12 @@ func extinguish(c *client.Client, totalNS int, maxAllowedAfterDel int, maxSecond
// rate of approximately 1 per second. // rate of approximately 1 per second.
var _ = Describe("Namespaces [Serial]", func() { var _ = Describe("Namespaces [Serial]", func() {
//This namespace is modified throughout the course of the test. f := NewFramework("namespaces")
var c *client.Client
var err error = nil
BeforeEach(func() {
By("Creating a kubernetes client")
c, err = loadClient()
Expect(err).NotTo(HaveOccurred())
})
AfterEach(func() {
})
It("should delete fast enough (90 percent of 100 namespaces in 150 seconds)", It("should delete fast enough (90 percent of 100 namespaces in 150 seconds)",
func() { extinguish(c, 100, 10, 150) }) func() { extinguish(f.Client, 100, 10, 150) })
// On hold until etcd3; see #7372 // On hold until etcd3; see #7372
It("should always delete fast (ALL of 100 namespaces in 150 seconds) [Feature:ComprehensiveNamespaceDraining]", It("should always delete fast (ALL of 100 namespaces in 150 seconds) [Feature:ComprehensiveNamespaceDraining]",
func() { extinguish(c, 100, 0, 150) }) func() { extinguish(f.Client, 100, 0, 150) })
}) })

View File

@ -49,37 +49,28 @@ const (
) )
var _ = Describe("Restart [Disruptive]", func() { var _ = Describe("Restart [Disruptive]", func() {
var c *client.Client f := NewFramework("restart")
var ps *podStore var ps *podStore
var skipped bool
BeforeEach(func() { BeforeEach(func() {
var err error
c, err = loadClient()
Expect(err).NotTo(HaveOccurred())
// This test requires the ability to restart all nodes, so the provider // This test requires the ability to restart all nodes, so the provider
// check must be identical to that call. // check must be identical to that call.
skipped = true
SkipUnlessProviderIs("gce", "gke") SkipUnlessProviderIs("gce", "gke")
skipped = false
ps = newPodStore(c, api.NamespaceSystem, labels.Everything(), fields.Everything()) ps = newPodStore(f.Client, api.NamespaceSystem, labels.Everything(), fields.Everything())
}) })
AfterEach(func() { AfterEach(func() {
if skipped { if ps != nil {
return ps.Stop()
} }
ps.Stop()
}) })
It("should restart all nodes and ensure all nodes and pods recover", func() { It("should restart all nodes and ensure all nodes and pods recover", func() {
nn := testContext.CloudConfig.NumNodes nn := testContext.CloudConfig.NumNodes
By("ensuring all nodes are ready") By("ensuring all nodes are ready")
nodeNamesBefore, err := checkNodesReady(c, nodeReadyInitialTimeout, nn) nodeNamesBefore, err := checkNodesReady(f.Client, nodeReadyInitialTimeout, nn)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
Logf("Got the following nodes before restart: %v", nodeNamesBefore) Logf("Got the following nodes before restart: %v", nodeNamesBefore)
@ -90,7 +81,7 @@ var _ = Describe("Restart [Disruptive]", func() {
podNamesBefore[i] = p.ObjectMeta.Name podNamesBefore[i] = p.ObjectMeta.Name
} }
ns := api.NamespaceSystem ns := api.NamespaceSystem
if !checkPodsRunningReady(c, ns, podNamesBefore, podReadyBeforeTimeout) { if !checkPodsRunningReady(f.Client, ns, podNamesBefore, podReadyBeforeTimeout) {
Failf("At least one pod wasn't running and ready at test start.") Failf("At least one pod wasn't running and ready at test start.")
} }
@ -99,7 +90,7 @@ var _ = Describe("Restart [Disruptive]", func() {
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("ensuring all nodes are ready after the restart") By("ensuring all nodes are ready after the restart")
nodeNamesAfter, err := checkNodesReady(c, restartNodeReadyAgainTimeout, nn) nodeNamesAfter, err := checkNodesReady(f.Client, restartNodeReadyAgainTimeout, nn)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
Logf("Got the following nodes after restart: %v", nodeNamesAfter) Logf("Got the following nodes after restart: %v", nodeNamesAfter)
@ -119,7 +110,7 @@ var _ = Describe("Restart [Disruptive]", func() {
podNamesAfter, err := waitForNPods(ps, len(podNamesBefore), restartPodReadyAgainTimeout) podNamesAfter, err := waitForNPods(ps, len(podNamesBefore), restartPodReadyAgainTimeout)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
remaining := restartPodReadyAgainTimeout - time.Since(podCheckStart) remaining := restartPodReadyAgainTimeout - time.Since(podCheckStart)
if !checkPodsRunningReady(c, ns, podNamesAfter, remaining) { if !checkPodsRunningReady(f.Client, ns, podNamesAfter, remaining) {
Failf("At least one pod wasn't running and ready after the restart.") Failf("At least one pod wasn't running and ready after the restart.")
} }
}) })

View File

@ -20,20 +20,14 @@ import (
"fmt" "fmt"
"strings" "strings"
client "k8s.io/kubernetes/pkg/client/unversioned"
. "github.com/onsi/ginkgo" . "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
) )
var _ = Describe("SSH", func() { var _ = Describe("SSH", func() {
var c *client.Client
f := NewFramework("ssh")
BeforeEach(func() { BeforeEach(func() {
var err error
c, err = loadClient()
Expect(err).NotTo(HaveOccurred())
// When adding more providers here, also implement their functionality in util.go's getSigner(...). // When adding more providers here, also implement their functionality in util.go's getSigner(...).
SkipUnlessProviderIs(providersWithSSH...) SkipUnlessProviderIs(providersWithSSH...)
}) })
@ -41,7 +35,7 @@ var _ = Describe("SSH", func() {
It("should SSH to all nodes and run commands", func() { It("should SSH to all nodes and run commands", func() {
// Get all nodes' external IPs. // Get all nodes' external IPs.
By("Getting all nodes' SSH-able IP addresses") By("Getting all nodes' SSH-able IP addresses")
hosts, err := NodeSSHHosts(c) hosts, err := NodeSSHHosts(f.Client)
if err != nil { if err != nil {
Failf("Error getting node hostnames: %v", err) Failf("Error getting node hostnames: %v", err)
} }