Merge pull request #16084 from smarterclayton/improve_e2e

Auto commit by PR queue bot
This commit is contained in:
k8s-merge-robot 2015-10-27 01:09:23 -07:00
commit 2d29d266e7
11 changed files with 101 additions and 131 deletions

View File

@ -92,7 +92,7 @@ var masterPush = func(_ string) error {
return err return err
} }
var nodeUpgrade = func(f Framework, replicas int, v string) error { var nodeUpgrade = func(f *Framework, replicas int, v string) error {
// Perform the upgrade. // Perform the upgrade.
var err error var err error
switch testContext.Provider { switch testContext.Provider {
@ -150,8 +150,6 @@ var _ = Describe("Skipped", func() {
svcName, replicas := "baz", 2 svcName, replicas := "baz", 2
var rcName, ip, v string var rcName, ip, v string
var ingress api.LoadBalancerIngress var ingress api.LoadBalancerIngress
f := Framework{BaseName: "cluster-upgrade"}
var w *WebserverTest
BeforeEach(func() { BeforeEach(func() {
// The version is determined once at the beginning of the test so that // The version is determined once at the beginning of the test so that
@ -162,9 +160,12 @@ var _ = Describe("Skipped", func() {
v, err = realVersion(testContext.UpgradeTarget) v, err = realVersion(testContext.UpgradeTarget)
expectNoError(err) expectNoError(err)
Logf("Version for %q is %q", testContext.UpgradeTarget, v) Logf("Version for %q is %q", testContext.UpgradeTarget, v)
})
f := NewFramework("cluster-upgrade")
var w *WebserverTest
BeforeEach(func() {
By("Setting up the service, RC, and pods") By("Setting up the service, RC, and pods")
f.beforeEach()
w = NewWebserverTest(f.Client, f.Namespace.Name, svcName) w = NewWebserverTest(f.Client, f.Namespace.Name, svcName)
rc := w.CreateWebserverRC(replicas) rc := w.CreateWebserverRC(replicas)
rcName = rc.ObjectMeta.Name rcName = rc.ObjectMeta.Name
@ -192,9 +193,7 @@ var _ = Describe("Skipped", func() {
// - volumes // - volumes
// - persistent volumes // - persistent volumes
}) })
AfterEach(func() { AfterEach(func() {
f.afterEach()
w.Cleanup() w.Cleanup()
}) })
@ -345,7 +344,7 @@ func checkMasterVersion(c *client.Client, want string) error {
return nil return nil
} }
func testNodeUpgrade(f Framework, nUp func(f Framework, n int, v string) error, replicas int, v string) { func testNodeUpgrade(f *Framework, nUp func(f *Framework, n int, v string) error, replicas int, v string) {
Logf("Starting node upgrade") Logf("Starting node upgrade")
expectNoError(nUp(f, replicas, v)) expectNoError(nUp(f, replicas, v))
Logf("Node upgrade complete") Logf("Node upgrade complete")
@ -412,7 +411,7 @@ func runCmd(command string, args ...string) (string, string, error) {
return stdout, stderr, nil return stdout, stderr, nil
} }
func validate(f Framework, svcNameWant, rcNameWant string, ingress api.LoadBalancerIngress, podsWant int) error { func validate(f *Framework, svcNameWant, rcNameWant string, ingress api.LoadBalancerIngress, podsWant int) error {
Logf("Beginning cluster validation") Logf("Beginning cluster validation")
// Verify RC. // Verify RC.
rcs, err := f.Client.ReplicationControllers(f.Namespace.Name).List(labels.Everything(), fields.Everything()) rcs, err := f.Client.ReplicationControllers(f.Namespace.Name).List(labels.Everything(), fields.Everything())

View File

@ -29,17 +29,14 @@ import (
) )
var _ = Describe("Probing container", func() { var _ = Describe("Probing container", func() {
framework := Framework{BaseName: "container-probe"} framework := NewFramework("container-probe")
var podClient client.PodInterface var podClient client.PodInterface
probe := webserverProbeBuilder{} probe := webserverProbeBuilder{}
BeforeEach(func() { BeforeEach(func() {
framework.beforeEach()
podClient = framework.Client.Pods(framework.Namespace.Name) podClient = framework.Client.Pods(framework.Namespace.Name)
}) })
AfterEach(framework.afterEach)
It("with readiness probe should not be ready before initial delay and never restart [Conformance]", func() { It("with readiness probe should not be ready before initial delay and never restart [Conformance]", func() {
p, err := podClient.Create(makePodSpec(probe.withInitialDelay().build(), nil)) p, err := podClient.Create(makePodSpec(probe.withInitialDelay().build(), nil))
expectNoError(err) expectNoError(err)

View File

@ -185,7 +185,7 @@ func getContainerRestarts(c *client.Client, ns string, labelSelector labels.Sele
var _ = Describe("DaemonRestart", func() { var _ = Describe("DaemonRestart", func() {
framework := Framework{BaseName: "daemonrestart"} framework := NewFramework("daemonrestart")
rcName := "daemonrestart" + strconv.Itoa(numPods) + "-" + string(util.NewUUID()) rcName := "daemonrestart" + strconv.Itoa(numPods) + "-" + string(util.NewUUID())
labelSelector := labels.Set(map[string]string{"name": rcName}).AsSelector() labelSelector := labels.Set(map[string]string{"name": rcName}).AsSelector()
existingPods := cache.NewStore(cache.MetaNamespaceKeyFunc) existingPods := cache.NewStore(cache.MetaNamespaceKeyFunc)
@ -197,11 +197,9 @@ var _ = Describe("DaemonRestart", func() {
var tracker *podTracker var tracker *podTracker
BeforeEach(func() { BeforeEach(func() {
// These tests require SSH // These tests require SSH
// TODO(11834): Enable this test in GKE once experimental API there is switched on // TODO(11834): Enable this test in GKE once experimental API there is switched on
SkipUnlessProviderIs("gce", "aws") SkipUnlessProviderIs("gce", "aws")
framework.beforeEach()
ns = framework.Namespace.Name ns = framework.Namespace.Name
// All the restart tests need an rc and a watch on pods of the rc. // All the restart tests need an rc and a watch on pods of the rc.
@ -247,7 +245,6 @@ var _ = Describe("DaemonRestart", func() {
}) })
AfterEach(func() { AfterEach(func() {
defer framework.afterEach()
close(stopCh) close(stopCh)
expectNoError(DeleteRC(framework.Client, ns, rcName)) expectNoError(DeleteRC(framework.Client, ns, rcName))
}) })

View File

@ -48,7 +48,14 @@ const (
) )
var _ = Describe("Daemon set", func() { var _ = Describe("Daemon set", func() {
f := &Framework{BaseName: "daemonsets"} var f *Framework
AfterEach(func() {
err := clearDaemonSetNodeLabels(f.Client)
Expect(err).NotTo(HaveOccurred())
})
f = NewFramework("daemonsets")
image := "gcr.io/google_containers/serve_hostname:1.1" image := "gcr.io/google_containers/serve_hostname:1.1"
dsName := "daemon-set" dsName := "daemon-set"
@ -57,19 +64,12 @@ var _ = Describe("Daemon set", func() {
var c *client.Client var c *client.Client
BeforeEach(func() { BeforeEach(func() {
f.beforeEach()
ns = f.Namespace.Name ns = f.Namespace.Name
c = f.Client c = f.Client
err := clearDaemonSetNodeLabels(c) err := clearDaemonSetNodeLabels(c)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
}) })
AfterEach(func() {
defer f.afterEach()
err := clearDaemonSetNodeLabels(f.Client)
Expect(err).NotTo(HaveOccurred())
})
It("should run and stop simple daemon", func() { It("should run and stop simple daemon", func() {
label := map[string]string{daemonsetNameLabel: dsName} label := map[string]string{daemonsetNameLabel: dsName}

View File

@ -80,10 +80,39 @@ var _ = Describe("Density", func() {
var additionalPodsPrefix string var additionalPodsPrefix string
var ns string var ns string
var uuid string var uuid string
framework := Framework{BaseName: "density", NamespaceDeletionTimeout: time.Hour}
// Gathers data prior to framework namespace teardown
AfterEach(func() {
// Remove any remaining pods from this test if the
// replication controller still exists and the replica count
// isn't 0. This means the controller wasn't cleaned up
// during the test so clean it up here. We want to do it separately
// to not cause a timeout on Namespace removal.
rc, err := c.ReplicationControllers(ns).Get(RCName)
if err == nil && rc.Spec.Replicas != 0 {
By("Cleaning up the replication controller")
err := DeleteRC(c, ns, RCName)
expectNoError(err)
}
By("Removing additional pods if any")
for i := 1; i <= nodeCount; i++ {
name := additionalPodsPrefix + "-" + strconv.Itoa(i)
c.Pods(ns).Delete(name, nil)
}
expectNoError(writePerfData(c, fmt.Sprintf(testContext.OutputDir+"/%s", uuid), "after"))
// Verify latency metrics
highLatencyRequests, err := HighLatencyRequests(c)
expectNoError(err)
Expect(highLatencyRequests).NotTo(BeNumerically(">", 0), "There should be no high-latency requests")
})
framework := NewFramework("density")
framework.NamespaceDeletionTimeout = time.Hour
BeforeEach(func() { BeforeEach(func() {
framework.beforeEach()
c = framework.Client c = framework.Client
ns = framework.Namespace.Name ns = framework.Namespace.Name
var err error var err error
@ -115,37 +144,6 @@ var _ = Describe("Density", func() {
} }
}) })
AfterEach(func() {
// We can't call it explicitly at the end, because it will not be called
// if Expect() fails.
defer framework.afterEach()
// Remove any remaining pods from this test if the
// replication controller still exists and the replica count
// isn't 0. This means the controller wasn't cleaned up
// during the test so clean it up here. We want to do it separately
// to not cause a timeout on Namespace removal.
rc, err := c.ReplicationControllers(ns).Get(RCName)
if err == nil && rc.Spec.Replicas != 0 {
By("Cleaning up the replication controller")
err := DeleteRC(c, ns, RCName)
expectNoError(err)
}
By("Removing additional pods if any")
for i := 1; i <= nodeCount; i++ {
name := additionalPodsPrefix + "-" + strconv.Itoa(i)
c.Pods(ns).Delete(name, nil)
}
expectNoError(writePerfData(c, fmt.Sprintf(testContext.OutputDir+"/%s", uuid), "after"))
// Verify latency metrics
highLatencyRequests, err := HighLatencyRequests(c)
expectNoError(err)
Expect(highLatencyRequests).NotTo(BeNumerically(">", 0), "There should be no high-latency requests")
})
// Tests with "Skipped" substring in their name will be skipped when running // Tests with "Skipped" substring in their name will be skipped when running
// e2e test suite without --ginkgo.focus & --ginkgo.skip flags. // e2e test suite without --ginkgo.focus & --ginkgo.skip flags.
type Density struct { type Density struct {

View File

@ -31,7 +31,7 @@ import (
var _ = Describe("Etcd failure", func() { var _ = Describe("Etcd failure", func() {
var skipped bool var skipped bool
framework := Framework{BaseName: "etcd-failure"} framework := NewFramework("etcd-failure")
BeforeEach(func() { BeforeEach(func() {
// This test requires: // This test requires:
@ -43,8 +43,6 @@ var _ = Describe("Etcd failure", func() {
SkipUnlessProviderIs("gce") SkipUnlessProviderIs("gce")
skipped = false skipped = false
framework.beforeEach()
Expect(RunRC(RCConfig{ Expect(RunRC(RCConfig{
Client: framework.Client, Client: framework.Client,
Name: "baz", Name: "baz",
@ -54,14 +52,6 @@ var _ = Describe("Etcd failure", func() {
})).NotTo(HaveOccurred()) })).NotTo(HaveOccurred())
}) })
AfterEach(func() {
if skipped {
return
}
framework.afterEach()
})
It("should recover from network partition with master", func() { It("should recover from network partition with master", func() {
etcdFailTest( etcdFailTest(
framework, framework,
@ -79,12 +69,12 @@ var _ = Describe("Etcd failure", func() {
}) })
}) })
func etcdFailTest(framework Framework, failCommand, fixCommand string) { func etcdFailTest(framework *Framework, failCommand, fixCommand string) {
doEtcdFailure(failCommand, fixCommand) doEtcdFailure(failCommand, fixCommand)
checkExistingRCRecovers(framework) checkExistingRCRecovers(framework)
ServeImageOrFail(&framework, "basic", "gcr.io/google_containers/serve_hostname:1.1") ServeImageOrFail(framework, "basic", "gcr.io/google_containers/serve_hostname:1.1")
} }
// For this duration, etcd will be failed by executing a failCommand on the master. // For this duration, etcd will be failed by executing a failCommand on the master.
@ -110,7 +100,7 @@ func masterExec(cmd string) {
} }
} }
func checkExistingRCRecovers(f Framework) { func checkExistingRCRecovers(f *Framework) {
By("assert that the pre-existing replication controller recovers") By("assert that the pre-existing replication controller recovers")
podClient := f.Client.Pods(f.Namespace.Name) podClient := f.Client.Pods(f.Namespace.Name)
rcSelector := labels.Set{"name": "baz"}.AsSelector() rcSelector := labels.Set{"name": "baz"}.AsSelector()

View File

@ -45,10 +45,31 @@ var _ = Describe("[Performance Suite] Latency", func() {
var additionalPodsPrefix string var additionalPodsPrefix string
var ns string var ns string
var uuid string var uuid string
framework := Framework{BaseName: "latency", NamespaceDeletionTimeout: time.Hour}
AfterEach(func() {
By("Removing additional pods if any")
for i := 1; i <= nodeCount; i++ {
name := additionalPodsPrefix + "-" + strconv.Itoa(i)
c.Pods(ns).Delete(name, nil)
}
By(fmt.Sprintf("Destroying namespace for this suite %v", ns))
if err := c.Namespaces().Delete(ns); err != nil {
Failf("Couldn't delete ns %s", err)
}
expectNoError(writePerfData(c, fmt.Sprintf(testContext.OutputDir+"/%s", uuid), "after"))
// Verify latency metrics
highLatencyRequests, err := HighLatencyRequests(c)
expectNoError(err)
Expect(highLatencyRequests).NotTo(BeNumerically(">", 0), "There should be no high-latency requests")
})
framework := NewFramework("latency")
framework.NamespaceDeletionTimeout = time.Hour
BeforeEach(func() { BeforeEach(func() {
framework.beforeEach()
c = framework.Client c = framework.Client
ns = framework.Namespace.Name ns = framework.Namespace.Name
var err error var err error
@ -79,27 +100,6 @@ var _ = Describe("[Performance Suite] Latency", func() {
} }
}) })
AfterEach(func() {
defer framework.afterEach()
By("Removing additional pods if any")
for i := 1; i <= nodeCount; i++ {
name := additionalPodsPrefix + "-" + strconv.Itoa(i)
c.Pods(ns).Delete(name, nil)
}
By(fmt.Sprintf("Destroying namespace for this suite %v", ns))
if err := c.Namespaces().Delete(ns); err != nil {
Failf("Couldn't delete ns %s", err)
}
expectNoError(writePerfData(c, fmt.Sprintf(testContext.OutputDir+"/%s", uuid), "after"))
// Verify latency metrics
highLatencyRequests, err := HighLatencyRequests(c)
expectNoError(err)
Expect(highLatencyRequests).NotTo(BeNumerically(">", 0), "There should be no high-latency requests")
})
// Skipped to avoid running in e2e // Skipped to avoid running in e2e
It("[Skipped] pod start latency should be acceptable", func() { It("[Skipped] pod start latency should be acceptable", func() {
runLatencyTest(nodeCount, c, ns) runLatencyTest(nodeCount, c, ns)

View File

@ -52,10 +52,22 @@ var _ = Describe("Load capacity", func() {
var nodeCount int var nodeCount int
var ns string var ns string
var configs []*RCConfig var configs []*RCConfig
framework := Framework{BaseName: "load", NamespaceDeletionTimeout: time.Hour}
// Gathers metrics before teardown
// TODO add flag that allows to skip cleanup on failure
AfterEach(func() {
deleteAllRC(configs)
// Verify latency metrics
highLatencyRequests, err := HighLatencyRequests(c)
expectNoError(err, "Too many instances metrics above the threshold")
Expect(highLatencyRequests).NotTo(BeNumerically(">", 0))
})
framework := NewFramework("load")
framework.NamespaceDeletionTimeout = time.Hour
BeforeEach(func() { BeforeEach(func() {
framework.beforeEach()
c = framework.Client c = framework.Client
ns = framework.Namespace.Name ns = framework.Namespace.Name
nodes, err := c.Nodes().List(labels.Everything(), fields.Everything()) nodes, err := c.Nodes().List(labels.Everything(), fields.Everything())
@ -72,20 +84,6 @@ var _ = Describe("Load capacity", func() {
expectNoError(resetMetrics(c)) expectNoError(resetMetrics(c))
}) })
// TODO add flag that allows to skip cleanup on failure
AfterEach(func() {
// We can't call it explicitly at the end, because it will not be called
// if Expect() fails.
defer framework.afterEach()
deleteAllRC(configs)
// Verify latency metrics
highLatencyRequests, err := HighLatencyRequests(c)
expectNoError(err, "Too many instances metrics above the threshold")
Expect(highLatencyRequests).NotTo(BeNumerically(">", 0))
})
type Load struct { type Load struct {
podsPerNode int podsPerNode int
image string image string

View File

@ -384,18 +384,15 @@ func performTemporaryNetworkFailure(c *client.Client, ns, rcName string, replica
} }
var _ = Describe("Nodes", func() { var _ = Describe("Nodes", func() {
framework := Framework{BaseName: "resize-nodes"} framework := NewFramework("resize-nodes")
var c *client.Client var c *client.Client
var ns string var ns string
BeforeEach(func() { BeforeEach(func() {
framework.beforeEach()
c = framework.Client c = framework.Client
ns = framework.Namespace.Name ns = framework.Namespace.Name
}) })
AfterEach(framework.afterEach)
Describe("Resize", func() { Describe("Resize", func() {
var skipped bool var skipped bool

View File

@ -175,24 +175,13 @@ func waitForStableCluster(c *client.Client) int {
} }
var _ = Describe("SchedulerPredicates", func() { var _ = Describe("SchedulerPredicates", func() {
framework := Framework{BaseName: "sched-pred"}
var c *client.Client var c *client.Client
var nodeList *api.NodeList var nodeList *api.NodeList
var totalPodCapacity int64 var totalPodCapacity int64
var RCName string var RCName string
var ns string var ns string
BeforeEach(func() {
framework.beforeEach()
c = framework.Client
ns = framework.Namespace.Name
var err error
nodeList, err = c.Nodes().List(labels.Everything(), fields.Everything())
expectNoError(err)
})
AfterEach(func() { AfterEach(func() {
defer framework.afterEach()
rc, err := c.ReplicationControllers(ns).Get(RCName) rc, err := c.ReplicationControllers(ns).Get(RCName)
if err == nil && rc.Spec.Replicas != 0 { if err == nil && rc.Spec.Replicas != 0 {
By("Cleaning up the replication controller") By("Cleaning up the replication controller")
@ -201,6 +190,16 @@ var _ = Describe("SchedulerPredicates", func() {
} }
}) })
framework := NewFramework("sched-pred")
BeforeEach(func() {
c = framework.Client
ns = framework.Namespace.Name
var err error
nodeList, err = c.Nodes().List(labels.Everything(), fields.Everything())
expectNoError(err)
})
// This test verifies that max-pods flag works as advertised. It assumes that cluster add-on pods stay stable // This test verifies that max-pods flag works as advertised. It assumes that cluster add-on pods stay stable
// and cannot be run in parallel with any other test that touches Nodes or Pods. It is so because to check // and cannot be run in parallel with any other test that touches Nodes or Pods. It is so because to check
// if max-pods is working we need to fully saturate the cluster and keep it in this state for few seconds. // if max-pods is working we need to fully saturate the cluster and keep it in this state for few seconds.

View File

@ -210,19 +210,14 @@ var _ = Describe("ServiceLoadBalancer", func() {
var repoRoot string var repoRoot string
var client *client.Client var client *client.Client
framework := Framework{BaseName: "servicelb"} framework := NewFramework("servicelb")
BeforeEach(func() { BeforeEach(func() {
framework.beforeEach()
client = framework.Client client = framework.Client
ns = framework.Namespace.Name ns = framework.Namespace.Name
repoRoot = testContext.RepoRoot repoRoot = testContext.RepoRoot
}) })
AfterEach(func() {
framework.afterEach()
})
It("should support simple GET on Ingress ips", func() { It("should support simple GET on Ingress ips", func() {
for _, t := range getLoadBalancerControllers(repoRoot, client) { for _, t := range getLoadBalancerControllers(repoRoot, client) {
By(fmt.Sprintf("Starting loadbalancer controller %v in namespace %v", t.getName(), ns)) By(fmt.Sprintf("Starting loadbalancer controller %v in namespace %v", t.getName(), ns))