Merge pull request #16358 from kubernetes/revert-16084-improve_e2e

Revert "All e2e tests should use NewFramework"
This commit is contained in:
Jerzy Szczepkowski 2015-10-27 13:53:36 +01:00
commit 465c5b0791
11 changed files with 131 additions and 101 deletions

View File

@ -92,7 +92,7 @@ var masterPush = func(_ string) error {
return err
}
var nodeUpgrade = func(f *Framework, replicas int, v string) error {
var nodeUpgrade = func(f Framework, replicas int, v string) error {
// Perform the upgrade.
var err error
switch testContext.Provider {
@ -150,6 +150,8 @@ var _ = Describe("Skipped", func() {
svcName, replicas := "baz", 2
var rcName, ip, v string
var ingress api.LoadBalancerIngress
f := Framework{BaseName: "cluster-upgrade"}
var w *WebserverTest
BeforeEach(func() {
// The version is determined once at the beginning of the test so that
@ -160,12 +162,9 @@ var _ = Describe("Skipped", func() {
v, err = realVersion(testContext.UpgradeTarget)
expectNoError(err)
Logf("Version for %q is %q", testContext.UpgradeTarget, v)
})
f := NewFramework("cluster-upgrade")
var w *WebserverTest
BeforeEach(func() {
By("Setting up the service, RC, and pods")
f.beforeEach()
w = NewWebserverTest(f.Client, f.Namespace.Name, svcName)
rc := w.CreateWebserverRC(replicas)
rcName = rc.ObjectMeta.Name
@ -193,7 +192,9 @@ var _ = Describe("Skipped", func() {
// - volumes
// - persistent volumes
})
AfterEach(func() {
f.afterEach()
w.Cleanup()
})
@ -344,7 +345,7 @@ func checkMasterVersion(c *client.Client, want string) error {
return nil
}
func testNodeUpgrade(f *Framework, nUp func(f *Framework, n int, v string) error, replicas int, v string) {
func testNodeUpgrade(f Framework, nUp func(f Framework, n int, v string) error, replicas int, v string) {
Logf("Starting node upgrade")
expectNoError(nUp(f, replicas, v))
Logf("Node upgrade complete")
@ -411,7 +412,7 @@ func runCmd(command string, args ...string) (string, string, error) {
return stdout, stderr, nil
}
func validate(f *Framework, svcNameWant, rcNameWant string, ingress api.LoadBalancerIngress, podsWant int) error {
func validate(f Framework, svcNameWant, rcNameWant string, ingress api.LoadBalancerIngress, podsWant int) error {
Logf("Beginning cluster validation")
// Verify RC.
rcs, err := f.Client.ReplicationControllers(f.Namespace.Name).List(labels.Everything(), fields.Everything())

View File

@ -29,14 +29,17 @@ import (
)
var _ = Describe("Probing container", func() {
framework := NewFramework("container-probe")
framework := Framework{BaseName: "container-probe"}
var podClient client.PodInterface
probe := webserverProbeBuilder{}
BeforeEach(func() {
framework.beforeEach()
podClient = framework.Client.Pods(framework.Namespace.Name)
})
AfterEach(framework.afterEach)
It("with readiness probe should not be ready before initial delay and never restart [Conformance]", func() {
p, err := podClient.Create(makePodSpec(probe.withInitialDelay().build(), nil))
expectNoError(err)

View File

@ -185,7 +185,7 @@ func getContainerRestarts(c *client.Client, ns string, labelSelector labels.Sele
var _ = Describe("DaemonRestart", func() {
framework := NewFramework("daemonrestart")
framework := Framework{BaseName: "daemonrestart"}
rcName := "daemonrestart" + strconv.Itoa(numPods) + "-" + string(util.NewUUID())
labelSelector := labels.Set(map[string]string{"name": rcName}).AsSelector()
existingPods := cache.NewStore(cache.MetaNamespaceKeyFunc)
@ -197,9 +197,11 @@ var _ = Describe("DaemonRestart", func() {
var tracker *podTracker
BeforeEach(func() {
// These tests require SSH
// TODO(11834): Enable this test in GKE once experimental API there is switched on
SkipUnlessProviderIs("gce", "aws")
framework.beforeEach()
ns = framework.Namespace.Name
// All the restart tests need an rc and a watch on pods of the rc.
@ -244,6 +246,7 @@ var _ = Describe("DaemonRestart", func() {
})
AfterEach(func() {
defer framework.afterEach()
close(stopCh)
expectNoError(DeleteRC(framework.Client, ns, rcName))
})

View File

@ -48,14 +48,7 @@ const (
)
var _ = Describe("Daemon set", func() {
var f *Framework
AfterEach(func() {
err := clearDaemonSetNodeLabels(f.Client)
Expect(err).NotTo(HaveOccurred())
})
f = NewFramework("daemonsets")
f := &Framework{BaseName: "daemonsets"}
image := "gcr.io/google_containers/serve_hostname:1.1"
dsName := "daemon-set"
@ -64,12 +57,19 @@ var _ = Describe("Daemon set", func() {
var c *client.Client
BeforeEach(func() {
f.beforeEach()
ns = f.Namespace.Name
c = f.Client
err := clearDaemonSetNodeLabels(c)
Expect(err).NotTo(HaveOccurred())
})
AfterEach(func() {
defer f.afterEach()
err := clearDaemonSetNodeLabels(f.Client)
Expect(err).NotTo(HaveOccurred())
})
It("should run and stop simple daemon", func() {
label := map[string]string{daemonsetNameLabel: dsName}

View File

@ -80,39 +80,10 @@ var _ = Describe("Density", func() {
var additionalPodsPrefix string
var ns string
var uuid string
// Gathers data prior to framework namespace teardown
AfterEach(func() {
// Remove any remaining pods from this test if the
// replication controller still exists and the replica count
// isn't 0. This means the controller wasn't cleaned up
// during the test so clean it up here. We want to do it separately
// to not cause a timeout on Namespace removal.
rc, err := c.ReplicationControllers(ns).Get(RCName)
if err == nil && rc.Spec.Replicas != 0 {
By("Cleaning up the replication controller")
err := DeleteRC(c, ns, RCName)
expectNoError(err)
}
By("Removing additional pods if any")
for i := 1; i <= nodeCount; i++ {
name := additionalPodsPrefix + "-" + strconv.Itoa(i)
c.Pods(ns).Delete(name, nil)
}
expectNoError(writePerfData(c, fmt.Sprintf(testContext.OutputDir+"/%s", uuid), "after"))
// Verify latency metrics
highLatencyRequests, err := HighLatencyRequests(c)
expectNoError(err)
Expect(highLatencyRequests).NotTo(BeNumerically(">", 0), "There should be no high-latency requests")
})
framework := NewFramework("density")
framework.NamespaceDeletionTimeout = time.Hour
framework := Framework{BaseName: "density", NamespaceDeletionTimeout: time.Hour}
BeforeEach(func() {
framework.beforeEach()
c = framework.Client
ns = framework.Namespace.Name
var err error
@ -144,6 +115,37 @@ var _ = Describe("Density", func() {
}
})
AfterEach(func() {
// We can't call it explicitly at the end, because it will not be called
// if Expect() fails.
defer framework.afterEach()
// Remove any remaining pods from this test if the
// replication controller still exists and the replica count
// isn't 0. This means the controller wasn't cleaned up
// during the test so clean it up here. We want to do it separately
// to not cause a timeout on Namespace removal.
rc, err := c.ReplicationControllers(ns).Get(RCName)
if err == nil && rc.Spec.Replicas != 0 {
By("Cleaning up the replication controller")
err := DeleteRC(c, ns, RCName)
expectNoError(err)
}
By("Removing additional pods if any")
for i := 1; i <= nodeCount; i++ {
name := additionalPodsPrefix + "-" + strconv.Itoa(i)
c.Pods(ns).Delete(name, nil)
}
expectNoError(writePerfData(c, fmt.Sprintf(testContext.OutputDir+"/%s", uuid), "after"))
// Verify latency metrics
highLatencyRequests, err := HighLatencyRequests(c)
expectNoError(err)
Expect(highLatencyRequests).NotTo(BeNumerically(">", 0), "There should be no high-latency requests")
})
// Tests with "Skipped" substring in their name will be skipped when running
// e2e test suite without --ginkgo.focus & --ginkgo.skip flags.
type Density struct {

View File

@ -31,7 +31,7 @@ import (
var _ = Describe("Etcd failure", func() {
var skipped bool
framework := NewFramework("etcd-failure")
framework := Framework{BaseName: "etcd-failure"}
BeforeEach(func() {
// This test requires:
@ -43,6 +43,8 @@ var _ = Describe("Etcd failure", func() {
SkipUnlessProviderIs("gce")
skipped = false
framework.beforeEach()
Expect(RunRC(RCConfig{
Client: framework.Client,
Name: "baz",
@ -52,6 +54,14 @@ var _ = Describe("Etcd failure", func() {
})).NotTo(HaveOccurred())
})
AfterEach(func() {
if skipped {
return
}
framework.afterEach()
})
It("should recover from network partition with master", func() {
etcdFailTest(
framework,
@ -69,12 +79,12 @@ var _ = Describe("Etcd failure", func() {
})
})
func etcdFailTest(framework *Framework, failCommand, fixCommand string) {
func etcdFailTest(framework Framework, failCommand, fixCommand string) {
doEtcdFailure(failCommand, fixCommand)
checkExistingRCRecovers(framework)
ServeImageOrFail(framework, "basic", "gcr.io/google_containers/serve_hostname:1.1")
ServeImageOrFail(&framework, "basic", "gcr.io/google_containers/serve_hostname:1.1")
}
// For this duration, etcd will be failed by executing a failCommand on the master.
@ -100,7 +110,7 @@ func masterExec(cmd string) {
}
}
func checkExistingRCRecovers(f *Framework) {
func checkExistingRCRecovers(f Framework) {
By("assert that the pre-existing replication controller recovers")
podClient := f.Client.Pods(f.Namespace.Name)
rcSelector := labels.Set{"name": "baz"}.AsSelector()

View File

@ -45,31 +45,10 @@ var _ = Describe("[Performance Suite] Latency", func() {
var additionalPodsPrefix string
var ns string
var uuid string
AfterEach(func() {
By("Removing additional pods if any")
for i := 1; i <= nodeCount; i++ {
name := additionalPodsPrefix + "-" + strconv.Itoa(i)
c.Pods(ns).Delete(name, nil)
}
By(fmt.Sprintf("Destroying namespace for this suite %v", ns))
if err := c.Namespaces().Delete(ns); err != nil {
Failf("Couldn't delete ns %s", err)
}
expectNoError(writePerfData(c, fmt.Sprintf(testContext.OutputDir+"/%s", uuid), "after"))
// Verify latency metrics
highLatencyRequests, err := HighLatencyRequests(c)
expectNoError(err)
Expect(highLatencyRequests).NotTo(BeNumerically(">", 0), "There should be no high-latency requests")
})
framework := NewFramework("latency")
framework.NamespaceDeletionTimeout = time.Hour
framework := Framework{BaseName: "latency", NamespaceDeletionTimeout: time.Hour}
BeforeEach(func() {
framework.beforeEach()
c = framework.Client
ns = framework.Namespace.Name
var err error
@ -100,6 +79,27 @@ var _ = Describe("[Performance Suite] Latency", func() {
}
})
AfterEach(func() {
defer framework.afterEach()
By("Removing additional pods if any")
for i := 1; i <= nodeCount; i++ {
name := additionalPodsPrefix + "-" + strconv.Itoa(i)
c.Pods(ns).Delete(name, nil)
}
By(fmt.Sprintf("Destroying namespace for this suite %v", ns))
if err := c.Namespaces().Delete(ns); err != nil {
Failf("Couldn't delete ns %s", err)
}
expectNoError(writePerfData(c, fmt.Sprintf(testContext.OutputDir+"/%s", uuid), "after"))
// Verify latency metrics
highLatencyRequests, err := HighLatencyRequests(c)
expectNoError(err)
Expect(highLatencyRequests).NotTo(BeNumerically(">", 0), "There should be no high-latency requests")
})
// Skipped to avoid running in e2e
It("[Skipped] pod start latency should be acceptable", func() {
runLatencyTest(nodeCount, c, ns)

View File

@ -52,22 +52,10 @@ var _ = Describe("Load capacity", func() {
var nodeCount int
var ns string
var configs []*RCConfig
// Gathers metrics before teardown
// TODO add flag that allows to skip cleanup on failure
AfterEach(func() {
deleteAllRC(configs)
// Verify latency metrics
highLatencyRequests, err := HighLatencyRequests(c)
expectNoError(err, "Too many instances metrics above the threshold")
Expect(highLatencyRequests).NotTo(BeNumerically(">", 0))
})
framework := NewFramework("load")
framework.NamespaceDeletionTimeout = time.Hour
framework := Framework{BaseName: "load", NamespaceDeletionTimeout: time.Hour}
BeforeEach(func() {
framework.beforeEach()
c = framework.Client
ns = framework.Namespace.Name
nodes, err := c.Nodes().List(labels.Everything(), fields.Everything())
@ -84,6 +72,20 @@ var _ = Describe("Load capacity", func() {
expectNoError(resetMetrics(c))
})
// TODO add flag that allows to skip cleanup on failure
AfterEach(func() {
// We can't call it explicitly at the end, because it will not be called
// if Expect() fails.
defer framework.afterEach()
deleteAllRC(configs)
// Verify latency metrics
highLatencyRequests, err := HighLatencyRequests(c)
expectNoError(err, "Too many instances metrics above the threshold")
Expect(highLatencyRequests).NotTo(BeNumerically(">", 0))
})
type Load struct {
podsPerNode int
image string

View File

@ -384,15 +384,18 @@ func performTemporaryNetworkFailure(c *client.Client, ns, rcName string, replica
}
var _ = Describe("Nodes", func() {
framework := NewFramework("resize-nodes")
framework := Framework{BaseName: "resize-nodes"}
var c *client.Client
var ns string
BeforeEach(func() {
framework.beforeEach()
c = framework.Client
ns = framework.Namespace.Name
})
AfterEach(framework.afterEach)
Describe("Resize", func() {
var skipped bool

View File

@ -175,13 +175,24 @@ func waitForStableCluster(c *client.Client) int {
}
var _ = Describe("SchedulerPredicates", func() {
framework := Framework{BaseName: "sched-pred"}
var c *client.Client
var nodeList *api.NodeList
var totalPodCapacity int64
var RCName string
var ns string
BeforeEach(func() {
framework.beforeEach()
c = framework.Client
ns = framework.Namespace.Name
var err error
nodeList, err = c.Nodes().List(labels.Everything(), fields.Everything())
expectNoError(err)
})
AfterEach(func() {
defer framework.afterEach()
rc, err := c.ReplicationControllers(ns).Get(RCName)
if err == nil && rc.Spec.Replicas != 0 {
By("Cleaning up the replication controller")
@ -190,16 +201,6 @@ var _ = Describe("SchedulerPredicates", func() {
}
})
framework := NewFramework("sched-pred")
BeforeEach(func() {
c = framework.Client
ns = framework.Namespace.Name
var err error
nodeList, err = c.Nodes().List(labels.Everything(), fields.Everything())
expectNoError(err)
})
// This test verifies that max-pods flag works as advertised. It assumes that cluster add-on pods stay stable
// and cannot be run in parallel with any other test that touches Nodes or Pods. It is so because to check
// if max-pods is working we need to fully saturate the cluster and keep it in this state for few seconds.

View File

@ -210,14 +210,19 @@ var _ = Describe("ServiceLoadBalancer", func() {
var repoRoot string
var client *client.Client
framework := NewFramework("servicelb")
framework := Framework{BaseName: "servicelb"}
BeforeEach(func() {
framework.beforeEach()
client = framework.Client
ns = framework.Namespace.Name
repoRoot = testContext.RepoRoot
})
AfterEach(func() {
framework.afterEach()
})
It("should support simple GET on Ingress ips", func() {
for _, t := range getLoadBalancerControllers(repoRoot, client) {
By(fmt.Sprintf("Starting loadbalancer controller %v in namespace %v", t.getName(), ns))