Merge pull request #8651 from lavalamp/e2e-timeout

Start e2e framework; print events
This commit is contained in:
Dawn Chen 2015-05-22 15:14:23 -07:00
commit 4ca2595ed3
24 changed files with 433 additions and 280 deletions

4
Godeps/Godeps.json generated
View File

@ -391,8 +391,8 @@
},
{
"ImportPath": "github.com/onsi/ginkgo",
"Comment": "v1.1.0-44-gae043a2",
"Rev": "ae043a2b2a91d6441adedc96d2c01958a78ee516"
"Comment": "v1.2.0-beta-9-gfbb6632",
"Rev": "fbb663242655b700c623e9629d7781db98957501"
},
{
"ImportPath": "github.com/onsi/gomega",

View File

@ -1,5 +1,13 @@
## HEAD
Improvements
- `ginkgo -coverpkg` calls down to `go test -coverpkg` (#160)
## 1.2.0-beta
Ginkgo now requires Go 1.4+
Improvements:
- Call reporters in reverse order when announcing spec completion -- allows custom reporters to emit output before the default reporter does.
@ -26,6 +34,8 @@ Improvements:
- Support `bootstrap`ping and `generate`ing [Agouti](http://agouti.org) specs.
- `ginkgo generate` and `ginkgo bootstrap` now honor the package name already defined in a given directory
- The `ginkgo` CLI ignores `SIGQUIT`. Prevents its stack dump from interlacing with the underlying test suite's stack dump.
- The `ginkgo` CLI now compiles tests into a temporary directory instead of the package directory. This necessitates upgrading to Go v1.4+.
- `ginkgo -notify` now works on Linux
Bug Fixes:
@ -34,6 +44,8 @@ Bug Fixes:
- Fix incorrect failure message when a panic occurs during a parallel test run
- Fixed an issue where a pending test within a focused context (or a focused test within a pending context) would skip all other tests.
- Be more consistent about handling SIGTERM as well as SIGINT
- When interupted while concurrently compiling test suites in the background, Ginkgo now cleans up the compiled artifacts.
- Fixed a long standing bug where `ginkgo -p` would hang if a process spawned by one of the Ginkgo parallel nodes does not exit. (Hooray!)
## 1.1.0 (8/2/2014)

View File

@ -59,7 +59,7 @@ Agouti allows you run WebDriver integration tests. Learn more about Agouti [her
## Set Me Up!
You'll need Golang v1.2+ (Ubuntu users: you probably have Golang v1.0 -- you'll need to upgrade!)
You'll need Golang v1.4+ (Ubuntu users: you probably have Golang v1.0 -- you'll need to upgrade!)
```bash

View File

@ -20,7 +20,7 @@ import (
"fmt"
)
const VERSION = "1.1.0"
const VERSION = "1.2.0-beta"
type GinkgoConfigType struct {
RandomSeed int64

View File

@ -46,15 +46,19 @@ func (r *SpecBuilder) BuildSpecs(args []string, additionalArgs []string) {
passed := true
for _, suite := range suites {
runner := testrunner.New(suite, 1, false, r.commandFlags.Race, r.commandFlags.Cover, r.commandFlags.Tags, nil)
runner := testrunner.New(suite, 1, false, r.commandFlags.Race, r.commandFlags.Cover, r.commandFlags.CoverPkg, r.commandFlags.Tags, nil)
fmt.Printf("Compiling %s...\n", suite.PackageName)
err := runner.Compile()
path, _ := filepath.Abs(filepath.Join(suite.Path, fmt.Sprintf("%s.test", suite.PackageName)))
err := runner.CompileTo(path)
if err != nil {
fmt.Println(err.Error())
passed = false
} else {
fmt.Printf(" compiled %s.test\n", filepath.Join(suite.Path, suite.PackageName))
fmt.Printf(" compiled %s.test\n", suite.PackageName)
}
runner.CleanUp()
}
if passed {

View File

@ -58,7 +58,7 @@ passing `ginkgo watch` the `-r` flag will recursively detect all test suites und
`watch` does not detect *new* packages. Moreover, changes in package X only rerun the tests for package X, tests for packages
that depend on X are not rerun.
[OSX only] To receive (desktop) notifications when a test run completes:
[OSX & Linux only] To receive (desktop) notifications when a test run completes:
ginkgo -notify

View File

@ -4,6 +4,7 @@ import (
"fmt"
"os"
"os/exec"
"runtime"
"github.com/onsi/ginkgo/ginkgo/testsuite"
)
@ -20,9 +21,15 @@ func NewNotifier(commandFlags *RunWatchAndBuildCommandFlags) *Notifier {
func (n *Notifier) VerifyNotificationsAreAvailable() {
if n.commandFlags.Notify {
_, err := exec.LookPath("terminal-notifier")
if err != nil {
fmt.Printf(`--notify requires terminal-notifier, which you don't seem to have installed.
onLinux := (runtime.GOOS == "linux")
onOSX := (runtime.GOOS == "darwin")
if onOSX {
_, err := exec.LookPath("terminal-notifier")
if err != nil {
fmt.Printf(`--notify requires terminal-notifier, which you don't seem to have installed.
OSX:
To remedy this:
@ -32,7 +39,22 @@ To learn more about terminal-notifier:
https://github.com/alloy/terminal-notifier
`)
os.Exit(1)
os.Exit(1)
}
} else if onLinux {
_, err := exec.LookPath("notify-send")
if err != nil {
fmt.Printf(`--notify requires terminal-notifier or notify-send, which you don't seem to have installed.
Linux:
Download and install notify-send for your distribution
`)
os.Exit(1)
}
}
}
}
@ -46,16 +68,34 @@ func (n *Notifier) SendSuiteCompletionNotification(suite testsuite.TestSuite, su
}
func (n *Notifier) SendNotification(title string, subtitle string) {
args := []string{"-title", title, "-subtitle", subtitle, "-group", "com.onsi.ginkgo"}
terminal := os.Getenv("TERM_PROGRAM")
if terminal == "iTerm.app" {
args = append(args, "-activate", "com.googlecode.iterm2")
} else if terminal == "Apple_Terminal" {
args = append(args, "-activate", "com.apple.Terminal")
}
if n.commandFlags.Notify {
exec.Command("terminal-notifier", args...).Run()
onLinux := (runtime.GOOS == "linux")
onOSX := (runtime.GOOS == "darwin")
if onOSX {
_, err := exec.LookPath("terminal-notifier")
if err == nil {
args := []string{"-title", title, "-subtitle", subtitle, "-group", "com.onsi.ginkgo"}
terminal := os.Getenv("TERM_PROGRAM")
if terminal == "iTerm.app" {
args = append(args, "-activate", "com.googlecode.iterm2")
} else if terminal == "Apple_Terminal" {
args = append(args, "-activate", "com.apple.Terminal")
}
exec.Command("terminal-notifier", args...).Run()
}
} else if onLinux {
_, err := exec.LookPath("notify-send")
if err == nil {
args := []string{"-a", "ginkgo", title, subtitle}
exec.Command("notify-send", args...).Run()
}
}
}
}

View File

@ -71,7 +71,7 @@ func (r *SpecRunner) RunSpecs(args []string, additionalArgs []string) {
runners := []*testrunner.TestRunner{}
for _, suite := range suites {
runners = append(runners, testrunner.New(suite, r.commandFlags.NumCPU, r.commandFlags.ParallelStream, r.commandFlags.Race, r.commandFlags.Cover, r.commandFlags.Tags, additionalArgs))
runners = append(runners, testrunner.New(suite, r.commandFlags.NumCPU, r.commandFlags.ParallelStream, r.commandFlags.Race, r.commandFlags.Cover, r.commandFlags.CoverPkg, r.commandFlags.Tags, additionalArgs))
}
numSuites := 0

View File

@ -11,6 +11,7 @@ type RunWatchAndBuildCommandFlags struct {
Recurse bool
Race bool
Cover bool
CoverPkg string
SkipPackage string
Tags string
@ -87,11 +88,11 @@ func (c *RunWatchAndBuildCommandFlags) computeNodes() {
func (c *RunWatchAndBuildCommandFlags) flags(mode int) {
onWindows := (runtime.GOOS == "windows")
onOSX := (runtime.GOOS == "darwin")
c.FlagSet.BoolVar(&(c.Recurse), "r", false, "Find and run test suites under the current directory recursively")
c.FlagSet.BoolVar(&(c.Race), "race", false, "Run tests with race detection enabled")
c.FlagSet.BoolVar(&(c.Cover), "cover", false, "Run tests with coverage analysis, will generate coverage profiles with the package name in the current directory")
c.FlagSet.StringVar(&(c.CoverPkg), "coverpkg", "", "Run tests with coverage on the given external modules")
c.FlagSet.StringVar(&(c.SkipPackage), "skipPackage", "", "A comma-separated list of package names to be skipped. If any part of the package's path matches, that package is ignored.")
c.FlagSet.StringVar(&(c.Tags), "tags", "", "A list of build tags to consider satisfied during the build")
@ -101,7 +102,7 @@ func (c *RunWatchAndBuildCommandFlags) flags(mode int) {
c.FlagSet.IntVar(&(c.NumCompilers), "compilers", 0, "The number of concurrent compilations to run (0 will autodetect)")
c.FlagSet.BoolVar(&(c.AutoNodes), "p", false, "Run in parallel with auto-detected number of nodes")
c.FlagSet.BoolVar(&(c.ParallelStream), "stream", onWindows, "stream parallel test output in real time: less coherent, but useful for debugging")
if onOSX {
if !onWindows {
c.FlagSet.BoolVar(&(c.Notify), "notify", false, "Send desktop notifications when a test run completes")
}
}

View File

@ -3,6 +3,7 @@ package main
import (
"fmt"
"runtime"
"sync"
"github.com/onsi/ginkgo/config"
"github.com/onsi/ginkgo/ginkgo/interrupthandler"
@ -10,28 +11,21 @@ import (
"github.com/onsi/ginkgo/ginkgo/testsuite"
)
type compilationInput struct {
runner *testrunner.TestRunner
result chan compilationOutput
}
type compilationOutput struct {
runner *testrunner.TestRunner
err error
}
type SuiteRunner struct {
notifier *Notifier
interruptHandler *interrupthandler.InterruptHandler
}
type compiler struct {
runner *testrunner.TestRunner
compilationError chan error
}
func (c *compiler) compile() {
retries := 0
err := c.runner.Compile()
for err != nil && retries < 5 { //We retry because Go sometimes steps on itself when multiple compiles happen in parallel. This is ugly, but should help resolve flakiness...
err = c.runner.Compile()
retries++
}
c.compilationError <- err
}
func NewSuiteRunner(notifier *Notifier, interruptHandler *interrupthandler.InterruptHandler) *SuiteRunner {
return &SuiteRunner{
notifier: notifier,
@ -39,63 +33,110 @@ func NewSuiteRunner(notifier *Notifier, interruptHandler *interrupthandler.Inter
}
}
func (r *SuiteRunner) RunSuites(runners []*testrunner.TestRunner, numCompilers int, keepGoing bool, willCompile func(suite testsuite.TestSuite)) (testrunner.RunResult, int) {
runResult := testrunner.PassingRunResult()
func (r *SuiteRunner) compileInParallel(runners []*testrunner.TestRunner, numCompilers int, willCompile func(suite testsuite.TestSuite)) chan compilationOutput {
//we return this to the consumer, it will return each runner in order as it compiles
compilationOutputs := make(chan compilationOutput, len(runners))
compilers := make([]*compiler, len(runners))
for i, runner := range runners {
compilers[i] = &compiler{
runner: runner,
compilationError: make(chan error, 1),
}
//an array of channels - the nth runner's compilation output is sent to the nth channel in this array
//we read from these channels in order to ensure we run the suites in order
orderedCompilationOutputs := []chan compilationOutput{}
for _ = range runners {
orderedCompilationOutputs = append(orderedCompilationOutputs, make(chan compilationOutput, 1))
}
compilerChannel := make(chan *compiler)
//we're going to spin up numCompilers compilers - they're going to run concurrently and will consume this channel
//we prefill the channel then close it, this ensures we compile things in the correct order
workPool := make(chan compilationInput, len(runners))
for i, runner := range runners {
workPool <- compilationInput{runner, orderedCompilationOutputs[i]}
}
close(workPool)
//pick a reasonable numCompilers
if numCompilers == 0 {
numCompilers = runtime.NumCPU()
}
//a WaitGroup to help us wait for all compilers to shut down
wg := &sync.WaitGroup{}
wg.Add(numCompilers)
//spin up the concurrent compilers
for i := 0; i < numCompilers; i++ {
go func() {
for compiler := range compilerChannel {
if willCompile != nil {
willCompile(compiler.runner.Suite)
defer wg.Done()
for input := range workPool {
if r.interruptHandler.WasInterrupted() {
return
}
compiler.compile()
if willCompile != nil {
willCompile(input.runner.Suite)
}
//We retry because Go sometimes steps on itself when multiple compiles happen in parallel. This is ugly, but should help resolve flakiness...
var err error
retries := 0
for retries <= 5 {
if r.interruptHandler.WasInterrupted() {
return
}
if err = input.runner.Compile(); err == nil {
break
}
retries++
}
input.result <- compilationOutput{input.runner, err}
}
}()
}
//read from the compilation output channels *in order* and send them to the caller
//close the compilationOutputs channel to tell the caller we're done
go func() {
for _, compiler := range compilers {
compilerChannel <- compiler
defer close(compilationOutputs)
for _, orderedCompilationOutput := range orderedCompilationOutputs {
select {
case compilationOutput := <-orderedCompilationOutput:
compilationOutputs <- compilationOutput
case <-r.interruptHandler.C:
//interrupt detected, wait for the compilers to shut down then bail
//this ensure we clean up after ourselves as we don't leave any compilation processes running
wg.Wait()
return
}
}
close(compilerChannel)
}()
return compilationOutputs
}
func (r *SuiteRunner) RunSuites(runners []*testrunner.TestRunner, numCompilers int, keepGoing bool, willCompile func(suite testsuite.TestSuite)) (testrunner.RunResult, int) {
runResult := testrunner.PassingRunResult()
compilationOutputs := r.compileInParallel(runners, numCompilers, willCompile)
numSuitesThatRan := 0
suitesThatFailed := []testsuite.TestSuite{}
for i, runner := range runners {
if r.interruptHandler.WasInterrupted() {
break
}
compilationError := <-compilers[i].compilationError
if compilationError != nil {
fmt.Print(compilationError.Error())
for compilationOutput := range compilationOutputs {
if compilationOutput.err != nil {
fmt.Print(compilationOutput.err.Error())
}
numSuitesThatRan++
suiteRunResult := testrunner.FailingRunResult()
if compilationError == nil {
suiteRunResult = compilers[i].runner.Run()
if compilationOutput.err == nil {
suiteRunResult = compilationOutput.runner.Run()
}
r.notifier.SendSuiteCompletionNotification(runner.Suite, suiteRunResult.Passed)
r.notifier.SendSuiteCompletionNotification(compilationOutput.runner.Suite, suiteRunResult.Passed)
runResult = runResult.Merge(suiteRunResult)
if !suiteRunResult.Passed {
suitesThatFailed = append(suitesThatFailed, runner.Suite)
suitesThatFailed = append(suitesThatFailed, compilationOutput.runner.Suite)
if !keepGoing {
break
}
}
if i < len(runners)-1 && !config.DefaultReporterConfig.Succinct {
if numSuitesThatRan < len(runners) && !config.DefaultReporterConfig.Succinct {
fmt.Println("")
}
}

View File

@ -22,30 +22,48 @@ import (
)
type TestRunner struct {
Suite testsuite.TestSuite
compiled bool
Suite testsuite.TestSuite
compiled bool
compilationTargetPath string
numCPU int
parallelStream bool
race bool
cover bool
coverPkg string
tags string
additionalArgs []string
}
func New(suite testsuite.TestSuite, numCPU int, parallelStream bool, race bool, cover bool, tags string, additionalArgs []string) *TestRunner {
return &TestRunner{
func New(suite testsuite.TestSuite, numCPU int, parallelStream bool, race bool, cover bool, coverPkg string, tags string, additionalArgs []string) *TestRunner {
runner := &TestRunner{
Suite: suite,
numCPU: numCPU,
parallelStream: parallelStream,
race: race,
cover: cover,
coverPkg: coverPkg,
tags: tags,
additionalArgs: additionalArgs,
}
if !suite.Precompiled {
dir, err := ioutil.TempDir("", "ginkgo")
if err != nil {
panic(fmt.Sprintf("coulnd't create temporary directory... might be time to rm -rf:\n%s", err.Error()))
}
runner.compilationTargetPath = filepath.Join(dir, suite.PackageName+".test")
}
return runner
}
func (t *TestRunner) Compile() error {
return t.CompileTo(t.compilationTargetPath)
}
func (t *TestRunner) CompileTo(path string) error {
if t.compiled {
return nil
}
@ -54,15 +72,16 @@ func (t *TestRunner) Compile() error {
return nil
}
os.Remove(t.compiledArtifact())
args := []string{"test", "-c", "-i"}
args := []string{"test", "-c", "-i", "-o", path}
if t.race {
args = append(args, "-race")
}
if t.cover {
if t.cover || t.coverPkg != "" {
args = append(args, "-cover", "-covermode=atomic")
}
if t.coverPkg != "" {
args = append(args, fmt.Sprintf("-coverpkg=%s", t.coverPkg))
}
if t.tags != "" {
args = append(args, fmt.Sprintf("-tags=%s", t.tags))
}
@ -78,10 +97,11 @@ func (t *TestRunner) Compile() error {
if len(output) > 0 {
return fmt.Errorf("Failed to compile %s:\n\n%s", t.Suite.PackageName, fixedOutput)
}
return fmt.Errorf("")
return fmt.Errorf("Failed to compile %s", t.Suite.PackageName)
}
t.compiled = true
return nil
}
@ -134,12 +154,7 @@ func (t *TestRunner) CleanUp() {
if t.Suite.Precompiled {
return
}
os.Remove(t.compiledArtifact())
}
func (t *TestRunner) compiledArtifact() string {
compiledArtifact, _ := filepath.Abs(filepath.Join(t.Suite.Path, fmt.Sprintf("%s.test", t.Suite.PackageName)))
return compiledArtifact
os.RemoveAll(filepath.Dir(t.compilationTargetPath))
}
func (t *TestRunner) runSerialGinkgoSuite() RunResult {
@ -196,7 +211,7 @@ func (t *TestRunner) runAndStreamParallelGinkgoSuite() RunResult {
os.Stdout.Sync()
if t.cover {
if t.cover || t.coverPkg != "" {
t.combineCoverprofiles()
}
@ -257,21 +272,16 @@ func (t *TestRunner) runParallelGinkgoSuite() RunResult {
fmt.Println("")
case <-time.After(time.Second):
//the aggregator never got back to us! something must have gone wrong
fmt.Println("")
fmt.Println("")
fmt.Println(" ----------------------------------------------------------- ")
fmt.Println(" | |")
fmt.Println(" | Ginkgo timed out waiting for all parallel nodes to end! |")
fmt.Println(" | Here is some salvaged output: |")
fmt.Println(" | |")
fmt.Println(" ----------------------------------------------------------- ")
fmt.Println("")
fmt.Println("")
fmt.Println(`
-------------------------------------------------------------------
| |
| Ginkgo timed out waiting for all parallel nodes to report back! |
| |
-------------------------------------------------------------------
`)
os.Stdout.Sync()
time.Sleep(time.Second)
for _, writer := range writers {
writer.Close()
}
@ -283,7 +293,7 @@ func (t *TestRunner) runParallelGinkgoSuite() RunResult {
os.Stdout.Sync()
}
if t.cover {
if t.cover || t.coverPkg != "" {
t.combineCoverprofiles()
}
@ -292,7 +302,7 @@ func (t *TestRunner) runParallelGinkgoSuite() RunResult {
func (t *TestRunner) cmd(ginkgoArgs []string, stream io.Writer, node int) *exec.Cmd {
args := []string{"--test.timeout=24h"}
if t.cover {
if t.cover || t.coverPkg != "" {
coverprofile := "--test.coverprofile=" + t.Suite.PackageName + ".coverprofile"
if t.numCPU > 1 {
coverprofile = fmt.Sprintf("%s.%d", coverprofile, node)
@ -303,7 +313,12 @@ func (t *TestRunner) cmd(ginkgoArgs []string, stream io.Writer, node int) *exec.
args = append(args, ginkgoArgs...)
args = append(args, t.additionalArgs...)
cmd := exec.Command(t.compiledArtifact(), args...)
path := t.compilationTargetPath
if t.Suite.Precompiled {
path, _ = filepath.Abs(filepath.Join(t.Suite.Path, fmt.Sprintf("%s.test", t.Suite.PackageName)))
}
cmd := exec.Command(path, args...)
cmd.Dir = t.Suite.Path
cmd.Stderr = stream

View File

@ -57,7 +57,7 @@ func (w *SpecWatcher) runnersForSuites(suites []testsuite.TestSuite, additionalA
runners := []*testrunner.TestRunner{}
for _, suite := range suites {
runners = append(runners, testrunner.New(suite, w.commandFlags.NumCPU, w.commandFlags.ParallelStream, w.commandFlags.Race, w.commandFlags.Cover, w.commandFlags.Tags, additionalArgs))
runners = append(runners, testrunner.New(suite, w.commandFlags.NumCPU, w.commandFlags.ParallelStream, w.commandFlags.Race, w.commandFlags.Cover, w.commandFlags.CoverPkg, w.commandFlags.Tags, additionalArgs))
}
return runners

View File

@ -130,6 +130,7 @@ type Done chan<- interface{}
// IsMeasurement: true if the current test is a measurement
// FileName: the name of the file containing the current test
// LineNumber: the line number for the current test
// Failed: if the current test has failed, this will be true (useful in an AfterEach)
type GinkgoTestDescription struct {
FullTestText string
ComponentTexts []string
@ -139,6 +140,8 @@ type GinkgoTestDescription struct {
FileName string
LineNumber int
Failed bool
}
//CurrentGinkgoTestDescripton returns information about the current running test.
@ -157,6 +160,7 @@ func CurrentGinkgoTestDescription() GinkgoTestDescription {
IsMeasurement: summary.IsMeasurement,
FileName: subjectCodeLocation.FileName,
LineNumber: subjectCodeLocation.LineNumber,
Failed: summary.HasFailureState(),
}
}

View File

@ -1,11 +1,12 @@
package integration_test
import (
"os"
"os/exec"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/onsi/gomega/gexec"
"os"
"os/exec"
)
var _ = Describe("Coverage Specs", func() {
@ -31,4 +32,23 @@ var _ = Describe("Coverage Specs", func() {
Ω(parallelCoverProfileOutput).Should(Equal(serialCoverProfileOutput))
})
It("runs coverage analysis on external packages in series and in parallel", func() {
session := startGinkgo("./_fixtures/coverage_fixture", "-coverpkg=github.com/onsi/ginkgo/integration/_fixtures/coverage_fixture,github.com/onsi/ginkgo/integration/_fixtures/coverage_fixture/external_coverage_fixture")
Eventually(session).Should(gexec.Exit(0))
output := session.Out.Contents()
Ω(output).Should(ContainSubstring("coverage: 71.4% of statements in github.com/onsi/ginkgo/integration/_fixtures/coverage_fixture, github.com/onsi/ginkgo/integration/_fixtures/coverage_fixture/external_coverage_fixture"))
serialCoverProfileOutput, err := exec.Command("go", "tool", "cover", "-func=./_fixtures/coverage_fixture/coverage_fixture.coverprofile").CombinedOutput()
Ω(err).ShouldNot(HaveOccurred())
os.RemoveAll("./_fixtures/coverage_fixture/coverage_fixture.coverprofile")
Eventually(startGinkgo("./_fixtures/coverage_fixture", "-coverpkg=github.com/onsi/ginkgo/integration/_fixtures/coverage_fixture,github.com/onsi/ginkgo/integration/_fixtures/coverage_fixture/external_coverage_fixture", "-nodes=4")).Should(gexec.Exit(0))
parallelCoverProfileOutput, err := exec.Command("go", "tool", "cover", "-func=./_fixtures/coverage_fixture/coverage_fixture.coverprofile").CombinedOutput()
Ω(err).ShouldNot(HaveOccurred())
Ω(parallelCoverProfileOutput).Should(Equal(serialCoverProfileOutput))
})
})

View File

@ -1,10 +1,11 @@
package integration_test
import (
"strings"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/onsi/gomega/gexec"
"strings"
)
var _ = Describe("SuiteSetup", func() {
@ -171,7 +172,7 @@ var _ = Describe("SuiteSetup", func() {
output := string(session.Out.Contents())
Ω(output).Should(ContainSubstring("Node 1 disappeared before completing BeforeSuite"))
Ω(output).Should(ContainSubstring("Ginkgo timed out waiting for all parallel nodes to end"))
Ω(output).Should(ContainSubstring("Ginkgo timed out waiting for all parallel nodes to report back!"))
})
})
})

View File

@ -0,0 +1,25 @@
package integration_test
import (
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/onsi/gomega/gbytes"
"github.com/onsi/gomega/gexec"
)
var _ = Describe("TestDescription", func() {
var pathToTest string
BeforeEach(func() {
pathToTest = tmpPath("test_description")
copyIn("test_description", pathToTest)
})
It("should capture and emit information about the current test", func() {
session := startGinkgo(pathToTest, "--noColor")
Eventually(session).Should(gexec.Exit(1))
Ω(session).Should(gbytes.Say("TestDescription should pass:false"))
Ω(session).Should(gbytes.Say("TestDescription should fail:true"))
})
})

View File

@ -1,64 +0,0 @@
package remote_test
import (
"fmt"
. "github.com/onsi/ginkgo"
. "github.com/onsi/ginkgo/internal/remote"
. "github.com/onsi/gomega"
"os"
)
var _ = Describe("OutputInterceptor", func() {
var interceptor OutputInterceptor
BeforeEach(func() {
interceptor = NewOutputInterceptor()
})
It("should capture all stdout/stderr output", func() {
err := interceptor.StartInterceptingOutput()
Ω(err).ShouldNot(HaveOccurred())
fmt.Fprint(os.Stdout, "STDOUT")
fmt.Fprint(os.Stderr, "STDERR")
print("PRINT")
output, err := interceptor.StopInterceptingAndReturnOutput()
Ω(output).Should(Equal("STDOUTSTDERRPRINT"))
Ω(err).ShouldNot(HaveOccurred())
})
It("should error if told to intercept output twice", func() {
err := interceptor.StartInterceptingOutput()
Ω(err).ShouldNot(HaveOccurred())
print("A")
err = interceptor.StartInterceptingOutput()
Ω(err).Should(HaveOccurred())
print("B")
output, err := interceptor.StopInterceptingAndReturnOutput()
Ω(output).Should(Equal("AB"))
Ω(err).ShouldNot(HaveOccurred())
})
It("should allow multiple interception sessions", func() {
err := interceptor.StartInterceptingOutput()
Ω(err).ShouldNot(HaveOccurred())
print("A")
output, err := interceptor.StopInterceptingAndReturnOutput()
Ω(output).Should(Equal("A"))
Ω(err).ShouldNot(HaveOccurred())
err = interceptor.StartInterceptingOutput()
Ω(err).ShouldNot(HaveOccurred())
print("B")
output, err = interceptor.StopInterceptingAndReturnOutput()
Ω(output).Should(Equal("B"))
Ω(err).ShouldNot(HaveOccurred())
})
})

View File

@ -14,10 +14,8 @@ func NewOutputInterceptor() OutputInterceptor {
}
type outputInterceptor struct {
stdoutPlaceholder *os.File
stderrPlaceholder *os.File
redirectFile *os.File
intercepting bool
redirectFile *os.File
intercepting bool
}
func (interceptor *outputInterceptor) StartInterceptingOutput() error {
@ -33,19 +31,6 @@ func (interceptor *outputInterceptor) StartInterceptingOutput() error {
return err
}
interceptor.stdoutPlaceholder, err = ioutil.TempFile("", "ginkgo-output")
if err != nil {
return err
}
interceptor.stderrPlaceholder, err = ioutil.TempFile("", "ginkgo-output")
if err != nil {
return err
}
syscall.Dup2(1, int(interceptor.stdoutPlaceholder.Fd()))
syscall.Dup2(2, int(interceptor.stderrPlaceholder.Fd()))
syscall.Dup2(int(interceptor.redirectFile.Fd()), 1)
syscall.Dup2(int(interceptor.redirectFile.Fd()), 2)
@ -57,18 +42,9 @@ func (interceptor *outputInterceptor) StopInterceptingAndReturnOutput() (string,
return "", errors.New("Not intercepting output!")
}
syscall.Dup2(int(interceptor.stdoutPlaceholder.Fd()), 1)
syscall.Dup2(int(interceptor.stderrPlaceholder.Fd()), 2)
for _, f := range []*os.File{interceptor.redirectFile, interceptor.stdoutPlaceholder, interceptor.stderrPlaceholder} {
f.Close()
}
interceptor.redirectFile.Close()
output, err := ioutil.ReadFile(interceptor.redirectFile.Name())
for _, f := range []*os.File{interceptor.redirectFile, interceptor.stdoutPlaceholder, interceptor.stderrPlaceholder} {
os.Remove(f.Name())
}
os.Remove(interceptor.redirectFile.Name())
interceptor.intercepting = false

View File

@ -115,7 +115,7 @@ func (spec *Spec) Run(writer io.Writer) {
}()
for sample := 0; sample < spec.subject.Samples(); sample++ {
spec.state, spec.failure = spec.runSample(sample, writer)
spec.runSample(sample, writer)
if spec.state != types.SpecStatePassed {
return
@ -123,9 +123,9 @@ func (spec *Spec) Run(writer io.Writer) {
}
}
func (spec *Spec) runSample(sample int, writer io.Writer) (specState types.SpecState, specFailure types.SpecFailure) {
specState = types.SpecStatePassed
specFailure = types.SpecFailure{}
func (spec *Spec) runSample(sample int, writer io.Writer) {
spec.state = types.SpecStatePassed
spec.failure = types.SpecFailure{}
innerMostContainerIndexToUnwind := -1
defer func() {
@ -134,9 +134,9 @@ func (spec *Spec) runSample(sample int, writer io.Writer) (specState types.SpecS
for _, afterEach := range container.SetupNodesOfType(types.SpecComponentTypeAfterEach) {
spec.announceSetupNode(writer, "AfterEach", container, afterEach)
afterEachState, afterEachFailure := afterEach.Run()
if afterEachState != types.SpecStatePassed && specState == types.SpecStatePassed {
specState = afterEachState
specFailure = afterEachFailure
if afterEachState != types.SpecStatePassed && spec.state == types.SpecStatePassed {
spec.state = afterEachState
spec.failure = afterEachFailure
}
}
}
@ -146,8 +146,8 @@ func (spec *Spec) runSample(sample int, writer io.Writer) (specState types.SpecS
innerMostContainerIndexToUnwind = i
for _, beforeEach := range container.SetupNodesOfType(types.SpecComponentTypeBeforeEach) {
spec.announceSetupNode(writer, "BeforeEach", container, beforeEach)
specState, specFailure = beforeEach.Run()
if specState != types.SpecStatePassed {
spec.state, spec.failure = beforeEach.Run()
if spec.state != types.SpecStatePassed {
return
}
}
@ -156,17 +156,15 @@ func (spec *Spec) runSample(sample int, writer io.Writer) (specState types.SpecS
for _, container := range spec.containers {
for _, justBeforeEach := range container.SetupNodesOfType(types.SpecComponentTypeJustBeforeEach) {
spec.announceSetupNode(writer, "JustBeforeEach", container, justBeforeEach)
specState, specFailure = justBeforeEach.Run()
if specState != types.SpecStatePassed {
spec.state, spec.failure = justBeforeEach.Run()
if spec.state != types.SpecStatePassed {
return
}
}
}
spec.announceSubject(writer, spec.subject)
specState, specFailure = spec.subject.Run()
return
spec.state, spec.failure = spec.subject.Run()
}
func (spec *Spec) announceSetupNode(writer io.Writer, nodeType string, container *containernode.ContainerNode, setupNode leafnodes.BasicNode) {

View File

@ -120,6 +120,7 @@ var _ = Describe("Suite", func() {
Ω(description.FileName).Should(ContainSubstring("suite_test.go"))
Ω(description.LineNumber).Should(BeNumerically(">", 50))
Ω(description.LineNumber).Should(BeNumerically("<", 150))
Ω(description.Failed).Should(BeFalse())
})
Measure("should run measurements", func(b Benchmarker) {

96
test/e2e/framework.go Normal file
View File

@ -0,0 +1,96 @@
/*
Copyright 2015 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package e2e
import (
"fmt"
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
"github.com/GoogleCloudPlatform/kubernetes/pkg/client"
"github.com/GoogleCloudPlatform/kubernetes/pkg/fields"
"github.com/GoogleCloudPlatform/kubernetes/pkg/labels"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
// Framework supports common operations used by e2e tests; it will keep a client & a namespace for you.
// Eventual goal is to merge this with integration test framework.
type Framework struct {
BaseName string
Namespace *api.Namespace
Client *client.Client
}
// NewFramework makes a new framework and sets up a BeforeEach/AfterEach for
// you (you can write additional before/after each functions).
func NewFramework(baseName string) *Framework {
f := &Framework{
BaseName: baseName,
}
BeforeEach(f.beforeEach)
AfterEach(f.afterEach)
return f
}
// beforeEach gets a client and makes a namespace.
func (f *Framework) beforeEach() {
By("Creating a kubernetes client")
c, err := loadClient()
Expect(err).NotTo(HaveOccurred())
f.Client = c
By("Building a namespace api object")
namespace, err := createTestingNS(f.BaseName, f.Client)
Expect(err).NotTo(HaveOccurred())
f.Namespace = namespace
}
// afterEach deletes the namespace, after reading its events.
func (f *Framework) afterEach() {
// Print events if the test failed.
if CurrentGinkgoTestDescription().Failed {
By(fmt.Sprintf("Collecting events from namespace %q.", f.Namespace.Name))
events, err := f.Client.Events(f.Namespace.Name).List(labels.Everything(), fields.Everything())
Expect(err).NotTo(HaveOccurred())
for _, e := range events.Items {
Logf("event for %v: %v %v: %v", e.InvolvedObject.Name, e.Source, e.Reason, e.Message)
}
// Note that we don't wait for any cleanup to propagate, which means
// that if you delete a bunch of pods right before ending your test,
// you may or may not see the killing/deletion/cleanup events.
}
By(fmt.Sprintf("Destroying namespace %q for this suite.", f.Namespace.Name))
if err := f.Client.Namespaces().Delete(f.Namespace.Name); err != nil {
Failf("Couldn't delete ns %q: %s", f.Namespace.Name, err)
}
// Paranoia-- prevent reuse!
f.Namespace = nil
f.Client = nil
}
// WaitForPodRunning waits for the pod to run in the namespace.
func (f *Framework) WaitForPodRunning(podName string) error {
return waitForPodRunningInNamespace(f.Client, podName, f.Namespace.Name)
}

View File

@ -19,10 +19,10 @@ package e2e
import (
"fmt"
"net/http"
"strings"
"time"
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
"github.com/GoogleCloudPlatform/kubernetes/pkg/client"
"github.com/GoogleCloudPlatform/kubernetes/pkg/fields"
"github.com/GoogleCloudPlatform/kubernetes/pkg/labels"
"github.com/GoogleCloudPlatform/kubernetes/pkg/util"
@ -31,20 +31,17 @@ import (
. "github.com/onsi/gomega"
)
func LaunchNetTestPodPerNode(nodes *api.NodeList, name string, c *client.Client, ns string) []string {
func LaunchNetTestPodPerNode(f *Framework, nodes *api.NodeList, name string) []string {
podNames := []string{}
totalPods := len(nodes.Items)
Expect(totalPods).NotTo(Equal(0))
for i, node := range nodes.Items {
podName := fmt.Sprintf("%s-%d", name, i)
podNames = append(podNames, podName)
Logf("Creating pod %s on node %s", podName, node.Name)
_, err := c.Pods(ns).Create(&api.Pod{
for _, node := range nodes.Items {
pod, err := f.Client.Pods(f.Namespace.Name).Create(&api.Pod{
ObjectMeta: api.ObjectMeta{
Name: podName,
GenerateName: name + "-",
Labels: map[string]string{
"name": name,
},
@ -59,7 +56,7 @@ func LaunchNetTestPodPerNode(nodes *api.NodeList, name string, c *client.Client,
//peers >= totalPods should be asserted by the container.
//the nettest container finds peers by looking up list of svc endpoints.
fmt.Sprintf("-peers=%d", totalPods),
"-namespace=" + ns},
"-namespace=" + f.Namespace.Name},
Ports: []api.ContainerPort{{ContainerPort: 8080}},
},
},
@ -68,16 +65,16 @@ func LaunchNetTestPodPerNode(nodes *api.NodeList, name string, c *client.Client,
},
})
Expect(err).NotTo(HaveOccurred())
Logf("Created pod %s on node %s", pod.ObjectMeta.Name, node.Name)
podNames = append(podNames, pod.ObjectMeta.Name)
}
return podNames
}
var _ = Describe("Networking", func() {
f := NewFramework("nettest")
//This namespace is modified throughout the course of the test.
var namespace *api.Namespace
var svcname = "nettest"
var c *client.Client = nil
BeforeEach(func() {
//Assert basic external connectivity.
@ -91,22 +88,6 @@ var _ = Describe("Networking", func() {
if resp.StatusCode != http.StatusOK {
Failf("Unexpected error code, expected 200, got, %v (%v)", resp.StatusCode, resp)
}
By("Creating a kubernetes client")
c, err = loadClient()
Expect(err).NotTo(HaveOccurred())
By("Building a namespace api object")
namespace, err = createTestingNS("nettest", c)
Expect(err).NotTo(HaveOccurred())
})
AfterEach(func() {
By(fmt.Sprintf("Destroying namespace for this suite %v", namespace.Name))
if err := c.Namespaces().Delete(namespace.Name); err != nil {
Failf("Couldn't delete ns %s", err)
}
})
// First test because it has no dependencies on variables created later on.
@ -120,8 +101,8 @@ var _ = Describe("Networking", func() {
}
for _, test := range tests {
By(fmt.Sprintf("testing: %s", test.path))
data, err := c.RESTClient.Get().
Namespace(namespace.Name).
data, err := f.Client.RESTClient.Get().
Namespace(f.Namespace.Name).
AbsPath(test.path).
DoRaw()
if err != nil {
@ -139,8 +120,8 @@ var _ = Describe("Networking", func() {
return
}
By(fmt.Sprintf("Creating a service named [%s] in namespace %s", svcname, namespace.Name))
svc, err := c.Services(namespace.Name).Create(&api.Service{
By(fmt.Sprintf("Creating a service named %q in namespace %q", svcname, f.Namespace.Name))
svc, err := f.Client.Services(f.Namespace.Name).Create(&api.Service{
ObjectMeta: api.ObjectMeta{
Name: svcname,
Labels: map[string]string{
@ -166,26 +147,26 @@ var _ = Describe("Networking", func() {
defer func() {
defer GinkgoRecover()
By("Cleaning up the service")
if err = c.Services(namespace.Name).Delete(svc.Name); err != nil {
if err = f.Client.Services(f.Namespace.Name).Delete(svc.Name); err != nil {
Failf("unable to delete svc %v: %v", svc.Name, err)
}
}()
By("Creating a webserver (pending) pod on each node")
nodes, err := c.Nodes().List(labels.Everything(), fields.Everything())
nodes, err := f.Client.Nodes().List(labels.Everything(), fields.Everything())
if err != nil {
Failf("Failed to list nodes: %v", err)
}
podNames := LaunchNetTestPodPerNode(nodes, svcname, c, namespace.Name)
podNames := LaunchNetTestPodPerNode(f, nodes, svcname)
// Clean up the pods
defer func() {
defer GinkgoRecover()
By("Cleaning up the webserver pods")
for _, podName := range podNames {
if err = c.Pods(namespace.Name).Delete(podName, nil); err != nil {
if err = f.Client.Pods(f.Namespace.Name).Delete(podName, nil); err != nil {
Logf("Failed to delete pod %s: %v", podName, err)
}
}
@ -193,63 +174,67 @@ var _ = Describe("Networking", func() {
By("Waiting for the webserver pods to transition to Running state")
for _, podName := range podNames {
err = waitForPodRunningInNamespace(c, podName, namespace.Name)
err = f.WaitForPodRunning(podName)
Expect(err).NotTo(HaveOccurred())
}
By("Waiting for connectivity to be verified")
const maxAttempts = 60
passed := false
//once response OK, evaluate response body for pass/fail.
var body []byte
getDetails := func() ([]byte, error) {
return f.Client.Get().
Namespace(f.Namespace.Name).
Prefix("proxy").
Resource("services").
Name(svc.Name).
Suffix("read").
DoRaw()
}
for i := 0; i < maxAttempts && !passed; i++ {
time.Sleep(2 * time.Second)
Logf("About to make a proxy status call")
start := time.Now()
body, err = c.Get().
Namespace(namespace.Name).
getStatus := func() ([]byte, error) {
return f.Client.Get().
Namespace(f.Namespace.Name).
Prefix("proxy").
Resource("services").
Name(svc.Name).
Suffix("status").
DoRaw()
}
for i := 0; !passed; i++ { // Timeout will keep us from going forever.
time.Sleep(2 * time.Second)
Logf("About to make a proxy status call")
start := time.Now()
body, err = getStatus()
Logf("Proxy status call returned in %v", time.Since(start))
if err != nil {
Logf("Attempt %v/%v: service/pod still starting. (error: '%v')", i, maxAttempts, err)
Logf("Attempt %v: service/pod still starting. (error: '%v')", i, err)
continue
}
//Finally, we pass/fail the test based on if the container's response body, as to wether or not it was able to find peers.
switch string(body) {
case "pass":
// Finally, we pass/fail the test based on if the container's response body, as to wether or not it was able to find peers.
switch {
case string(body) == "pass":
Logf("Passed on attempt %v. Cleaning up.", i)
passed = true
case "running":
Logf("Attempt %v/%v: test still running", i, maxAttempts)
case "fail":
if body, err = c.Get().
Namespace(namespace.Name).Prefix("proxy").
Resource("services").
Name(svc.Name).Suffix("read").
DoRaw(); err != nil {
case string(body) == "running":
Logf("Attempt %v: test still running", i)
case string(body) == "fail":
if body, err = getDetails(); err != nil {
Failf("Failed on attempt %v. Cleaning up. Error reading details: %v", i, err)
} else {
Failf("Failed on attempt %v. Cleaning up. Details:\n%s", i, string(body))
}
case strings.Contains(string(body), "no endpoints available"):
Logf("Attempt %v: waiting on service/endpoints", i)
default:
Logf("Unexpected response: %q", body)
Logf("Unexpected response:\n%s", body)
}
}
if !passed {
if body, err = c.Get().
Namespace(namespace.Name).
Prefix("proxy").
Resource("services").
Name(svc.Name).
Suffix("read").
DoRaw(); err != nil {
if body, err = getDetails(); err != nil {
Failf("Timed out. Cleaning up. Error reading details: %v", err)
} else {
Failf("Timed out. Cleaning up. Details:\n%s", string(body))

View File

@ -55,8 +55,7 @@ const (
)
var _ = Describe("Reboot", func() {
var c *client.Client = nil
var c *client.Client
BeforeEach(func() {
var err error

View File

@ -27,8 +27,7 @@ import (
)
var _ = Describe("SSH", func() {
var c *client.Client = nil
var c *client.Client
BeforeEach(func() {
var err error