Merge pull request #20939 from ixdy/use-framework

Auto commit by PR queue bot
This commit is contained in:
k8s-merge-robot 2016-02-11 05:18:39 -08:00
commit 3522f7bea8
5 changed files with 18 additions and 52 deletions

View File

@ -32,16 +32,11 @@ const (
)
var _ = Describe("Cadvisor", func() {
var c *client.Client
BeforeEach(func() {
var err error
c, err = loadClient()
expectNoError(err)
})
f := NewFramework("cadvisor")
It("should be healthy on every node.", func() {
CheckCadvisorHealthOnAllNodes(c, 5*time.Minute)
CheckCadvisorHealthOnAllNodes(f.Client, 5*time.Minute)
})
})

View File

@ -33,18 +33,14 @@ import (
// TODO: quinton: debug issue #6541 and then remove Pending flag here.
var _ = Describe("[Flaky] Monitoring", func() {
var c *client.Client
f := NewFramework("monitoring")
BeforeEach(func() {
var err error
c, err = loadClient()
expectNoError(err)
SkipUnlessProviderIs("gce")
})
It("should verify monitoring pods and all cluster nodes are available on influxdb using heapster.", func() {
testMonitoringUsingHeapsterInfluxdb(c)
testMonitoringUsingHeapsterInfluxdb(f.Client)
})
})

View File

@ -105,22 +105,12 @@ func extinguish(c *client.Client, totalNS int, maxAllowedAfterDel int, maxSecond
// rate of approximately 1 per second.
var _ = Describe("Namespaces [Serial]", func() {
//This namespace is modified throughout the course of the test.
var c *client.Client
var err error = nil
BeforeEach(func() {
By("Creating a kubernetes client")
c, err = loadClient()
Expect(err).NotTo(HaveOccurred())
})
AfterEach(func() {
})
f := NewFramework("namespaces")
It("should delete fast enough (90 percent of 100 namespaces in 150 seconds)",
func() { extinguish(c, 100, 10, 150) })
func() { extinguish(f.Client, 100, 10, 150) })
// On hold until etcd3; see #7372
It("should always delete fast (ALL of 100 namespaces in 150 seconds) [Feature:ComprehensiveNamespaceDraining]",
func() { extinguish(c, 100, 0, 150) })
func() { extinguish(f.Client, 100, 0, 150) })
})

View File

@ -49,37 +49,28 @@ const (
)
var _ = Describe("Restart [Disruptive]", func() {
var c *client.Client
f := NewFramework("restart")
var ps *podStore
var skipped bool
BeforeEach(func() {
var err error
c, err = loadClient()
Expect(err).NotTo(HaveOccurred())
// This test requires the ability to restart all nodes, so the provider
// check must be identical to that call.
skipped = true
SkipUnlessProviderIs("gce", "gke")
skipped = false
ps = newPodStore(c, api.NamespaceSystem, labels.Everything(), fields.Everything())
ps = newPodStore(f.Client, api.NamespaceSystem, labels.Everything(), fields.Everything())
})
AfterEach(func() {
if skipped {
return
}
if ps != nil {
ps.Stop()
}
})
It("should restart all nodes and ensure all nodes and pods recover", func() {
nn := testContext.CloudConfig.NumNodes
By("ensuring all nodes are ready")
nodeNamesBefore, err := checkNodesReady(c, nodeReadyInitialTimeout, nn)
nodeNamesBefore, err := checkNodesReady(f.Client, nodeReadyInitialTimeout, nn)
Expect(err).NotTo(HaveOccurred())
Logf("Got the following nodes before restart: %v", nodeNamesBefore)
@ -90,7 +81,7 @@ var _ = Describe("Restart [Disruptive]", func() {
podNamesBefore[i] = p.ObjectMeta.Name
}
ns := api.NamespaceSystem
if !checkPodsRunningReady(c, ns, podNamesBefore, podReadyBeforeTimeout) {
if !checkPodsRunningReady(f.Client, ns, podNamesBefore, podReadyBeforeTimeout) {
Failf("At least one pod wasn't running and ready at test start.")
}
@ -99,7 +90,7 @@ var _ = Describe("Restart [Disruptive]", func() {
Expect(err).NotTo(HaveOccurred())
By("ensuring all nodes are ready after the restart")
nodeNamesAfter, err := checkNodesReady(c, restartNodeReadyAgainTimeout, nn)
nodeNamesAfter, err := checkNodesReady(f.Client, restartNodeReadyAgainTimeout, nn)
Expect(err).NotTo(HaveOccurred())
Logf("Got the following nodes after restart: %v", nodeNamesAfter)
@ -119,7 +110,7 @@ var _ = Describe("Restart [Disruptive]", func() {
podNamesAfter, err := waitForNPods(ps, len(podNamesBefore), restartPodReadyAgainTimeout)
Expect(err).NotTo(HaveOccurred())
remaining := restartPodReadyAgainTimeout - time.Since(podCheckStart)
if !checkPodsRunningReady(c, ns, podNamesAfter, remaining) {
if !checkPodsRunningReady(f.Client, ns, podNamesAfter, remaining) {
Failf("At least one pod wasn't running and ready after the restart.")
}
})

View File

@ -20,20 +20,14 @@ import (
"fmt"
"strings"
client "k8s.io/kubernetes/pkg/client/unversioned"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
var _ = Describe("SSH", func() {
var c *client.Client
f := NewFramework("ssh")
BeforeEach(func() {
var err error
c, err = loadClient()
Expect(err).NotTo(HaveOccurred())
// When adding more providers here, also implement their functionality in util.go's getSigner(...).
SkipUnlessProviderIs(providersWithSSH...)
})
@ -41,7 +35,7 @@ var _ = Describe("SSH", func() {
It("should SSH to all nodes and run commands", func() {
// Get all nodes' external IPs.
By("Getting all nodes' SSH-able IP addresses")
hosts, err := NodeSSHHosts(c)
hosts, err := NodeSSHHosts(f.Client)
if err != nil {
Failf("Error getting node hostnames: %v", err)
}