update to latest ginkgo

This commit is contained in:
Daniel Smith 2015-05-21 18:48:09 -07:00
parent d9d12fd3f7
commit 2a6cf783ec
20 changed files with 286 additions and 212 deletions

4
Godeps/Godeps.json generated
View File

@ -371,8 +371,8 @@
}, },
{ {
"ImportPath": "github.com/onsi/ginkgo", "ImportPath": "github.com/onsi/ginkgo",
"Comment": "v1.1.0-44-gae043a2", "Comment": "v1.2.0-beta-9-gfbb6632",
"Rev": "ae043a2b2a91d6441adedc96d2c01958a78ee516" "Rev": "fbb663242655b700c623e9629d7781db98957501"
}, },
{ {
"ImportPath": "github.com/onsi/gomega", "ImportPath": "github.com/onsi/gomega",

View File

@ -1,5 +1,13 @@
## HEAD ## HEAD
Improvements
- `ginkgo -coverpkg` calls down to `go test -coverpkg` (#160)
## 1.2.0-beta
Ginkgo now requires Go 1.4+
Improvements: Improvements:
- Call reporters in reverse order when announcing spec completion -- allows custom reporters to emit output before the default reporter does. - Call reporters in reverse order when announcing spec completion -- allows custom reporters to emit output before the default reporter does.
@ -26,6 +34,8 @@ Improvements:
- Support `bootstrap`ping and `generate`ing [Agouti](http://agouti.org) specs. - Support `bootstrap`ping and `generate`ing [Agouti](http://agouti.org) specs.
- `ginkgo generate` and `ginkgo bootstrap` now honor the package name already defined in a given directory - `ginkgo generate` and `ginkgo bootstrap` now honor the package name already defined in a given directory
- The `ginkgo` CLI ignores `SIGQUIT`. Prevents its stack dump from interlacing with the underlying test suite's stack dump. - The `ginkgo` CLI ignores `SIGQUIT`. Prevents its stack dump from interlacing with the underlying test suite's stack dump.
- The `ginkgo` CLI now compiles tests into a temporary directory instead of the package directory. This necessitates upgrading to Go v1.4+.
- `ginkgo -notify` now works on Linux
Bug Fixes: Bug Fixes:
@ -34,6 +44,8 @@ Bug Fixes:
- Fix incorrect failure message when a panic occurs during a parallel test run - Fix incorrect failure message when a panic occurs during a parallel test run
- Fixed an issue where a pending test within a focused context (or a focused test within a pending context) would skip all other tests. - Fixed an issue where a pending test within a focused context (or a focused test within a pending context) would skip all other tests.
- Be more consistent about handling SIGTERM as well as SIGINT - Be more consistent about handling SIGTERM as well as SIGINT
- When interupted while concurrently compiling test suites in the background, Ginkgo now cleans up the compiled artifacts.
- Fixed a long standing bug where `ginkgo -p` would hang if a process spawned by one of the Ginkgo parallel nodes does not exit. (Hooray!)
## 1.1.0 (8/2/2014) ## 1.1.0 (8/2/2014)

View File

@ -59,7 +59,7 @@ Agouti allows you run WebDriver integration tests. Learn more about Agouti [her
## Set Me Up! ## Set Me Up!
You'll need Golang v1.2+ (Ubuntu users: you probably have Golang v1.0 -- you'll need to upgrade!) You'll need Golang v1.4+ (Ubuntu users: you probably have Golang v1.0 -- you'll need to upgrade!)
```bash ```bash

View File

@ -20,7 +20,7 @@ import (
"fmt" "fmt"
) )
const VERSION = "1.1.0" const VERSION = "1.2.0-beta"
type GinkgoConfigType struct { type GinkgoConfigType struct {
RandomSeed int64 RandomSeed int64

View File

@ -46,15 +46,19 @@ func (r *SpecBuilder) BuildSpecs(args []string, additionalArgs []string) {
passed := true passed := true
for _, suite := range suites { for _, suite := range suites {
runner := testrunner.New(suite, 1, false, r.commandFlags.Race, r.commandFlags.Cover, r.commandFlags.Tags, nil) runner := testrunner.New(suite, 1, false, r.commandFlags.Race, r.commandFlags.Cover, r.commandFlags.CoverPkg, r.commandFlags.Tags, nil)
fmt.Printf("Compiling %s...\n", suite.PackageName) fmt.Printf("Compiling %s...\n", suite.PackageName)
err := runner.Compile()
path, _ := filepath.Abs(filepath.Join(suite.Path, fmt.Sprintf("%s.test", suite.PackageName)))
err := runner.CompileTo(path)
if err != nil { if err != nil {
fmt.Println(err.Error()) fmt.Println(err.Error())
passed = false passed = false
} else { } else {
fmt.Printf(" compiled %s.test\n", filepath.Join(suite.Path, suite.PackageName)) fmt.Printf(" compiled %s.test\n", suite.PackageName)
} }
runner.CleanUp()
} }
if passed { if passed {

View File

@ -58,7 +58,7 @@ passing `ginkgo watch` the `-r` flag will recursively detect all test suites und
`watch` does not detect *new* packages. Moreover, changes in package X only rerun the tests for package X, tests for packages `watch` does not detect *new* packages. Moreover, changes in package X only rerun the tests for package X, tests for packages
that depend on X are not rerun. that depend on X are not rerun.
[OSX only] To receive (desktop) notifications when a test run completes: [OSX & Linux only] To receive (desktop) notifications when a test run completes:
ginkgo -notify ginkgo -notify

View File

@ -4,6 +4,7 @@ import (
"fmt" "fmt"
"os" "os"
"os/exec" "os/exec"
"runtime"
"github.com/onsi/ginkgo/ginkgo/testsuite" "github.com/onsi/ginkgo/ginkgo/testsuite"
) )
@ -20,9 +21,15 @@ func NewNotifier(commandFlags *RunWatchAndBuildCommandFlags) *Notifier {
func (n *Notifier) VerifyNotificationsAreAvailable() { func (n *Notifier) VerifyNotificationsAreAvailable() {
if n.commandFlags.Notify { if n.commandFlags.Notify {
_, err := exec.LookPath("terminal-notifier") onLinux := (runtime.GOOS == "linux")
if err != nil { onOSX := (runtime.GOOS == "darwin")
fmt.Printf(`--notify requires terminal-notifier, which you don't seem to have installed. if onOSX {
_, err := exec.LookPath("terminal-notifier")
if err != nil {
fmt.Printf(`--notify requires terminal-notifier, which you don't seem to have installed.
OSX:
To remedy this: To remedy this:
@ -32,7 +39,22 @@ To learn more about terminal-notifier:
https://github.com/alloy/terminal-notifier https://github.com/alloy/terminal-notifier
`) `)
os.Exit(1) os.Exit(1)
}
} else if onLinux {
_, err := exec.LookPath("notify-send")
if err != nil {
fmt.Printf(`--notify requires terminal-notifier or notify-send, which you don't seem to have installed.
Linux:
Download and install notify-send for your distribution
`)
os.Exit(1)
}
} }
} }
} }
@ -46,16 +68,34 @@ func (n *Notifier) SendSuiteCompletionNotification(suite testsuite.TestSuite, su
} }
func (n *Notifier) SendNotification(title string, subtitle string) { func (n *Notifier) SendNotification(title string, subtitle string) {
args := []string{"-title", title, "-subtitle", subtitle, "-group", "com.onsi.ginkgo"}
terminal := os.Getenv("TERM_PROGRAM")
if terminal == "iTerm.app" {
args = append(args, "-activate", "com.googlecode.iterm2")
} else if terminal == "Apple_Terminal" {
args = append(args, "-activate", "com.apple.Terminal")
}
if n.commandFlags.Notify { if n.commandFlags.Notify {
exec.Command("terminal-notifier", args...).Run() onLinux := (runtime.GOOS == "linux")
onOSX := (runtime.GOOS == "darwin")
if onOSX {
_, err := exec.LookPath("terminal-notifier")
if err == nil {
args := []string{"-title", title, "-subtitle", subtitle, "-group", "com.onsi.ginkgo"}
terminal := os.Getenv("TERM_PROGRAM")
if terminal == "iTerm.app" {
args = append(args, "-activate", "com.googlecode.iterm2")
} else if terminal == "Apple_Terminal" {
args = append(args, "-activate", "com.apple.Terminal")
}
exec.Command("terminal-notifier", args...).Run()
}
} else if onLinux {
_, err := exec.LookPath("notify-send")
if err == nil {
args := []string{"-a", "ginkgo", title, subtitle}
exec.Command("notify-send", args...).Run()
}
}
} }
} }

View File

@ -71,7 +71,7 @@ func (r *SpecRunner) RunSpecs(args []string, additionalArgs []string) {
runners := []*testrunner.TestRunner{} runners := []*testrunner.TestRunner{}
for _, suite := range suites { for _, suite := range suites {
runners = append(runners, testrunner.New(suite, r.commandFlags.NumCPU, r.commandFlags.ParallelStream, r.commandFlags.Race, r.commandFlags.Cover, r.commandFlags.Tags, additionalArgs)) runners = append(runners, testrunner.New(suite, r.commandFlags.NumCPU, r.commandFlags.ParallelStream, r.commandFlags.Race, r.commandFlags.Cover, r.commandFlags.CoverPkg, r.commandFlags.Tags, additionalArgs))
} }
numSuites := 0 numSuites := 0

View File

@ -11,6 +11,7 @@ type RunWatchAndBuildCommandFlags struct {
Recurse bool Recurse bool
Race bool Race bool
Cover bool Cover bool
CoverPkg string
SkipPackage string SkipPackage string
Tags string Tags string
@ -87,11 +88,11 @@ func (c *RunWatchAndBuildCommandFlags) computeNodes() {
func (c *RunWatchAndBuildCommandFlags) flags(mode int) { func (c *RunWatchAndBuildCommandFlags) flags(mode int) {
onWindows := (runtime.GOOS == "windows") onWindows := (runtime.GOOS == "windows")
onOSX := (runtime.GOOS == "darwin")
c.FlagSet.BoolVar(&(c.Recurse), "r", false, "Find and run test suites under the current directory recursively") c.FlagSet.BoolVar(&(c.Recurse), "r", false, "Find and run test suites under the current directory recursively")
c.FlagSet.BoolVar(&(c.Race), "race", false, "Run tests with race detection enabled") c.FlagSet.BoolVar(&(c.Race), "race", false, "Run tests with race detection enabled")
c.FlagSet.BoolVar(&(c.Cover), "cover", false, "Run tests with coverage analysis, will generate coverage profiles with the package name in the current directory") c.FlagSet.BoolVar(&(c.Cover), "cover", false, "Run tests with coverage analysis, will generate coverage profiles with the package name in the current directory")
c.FlagSet.StringVar(&(c.CoverPkg), "coverpkg", "", "Run tests with coverage on the given external modules")
c.FlagSet.StringVar(&(c.SkipPackage), "skipPackage", "", "A comma-separated list of package names to be skipped. If any part of the package's path matches, that package is ignored.") c.FlagSet.StringVar(&(c.SkipPackage), "skipPackage", "", "A comma-separated list of package names to be skipped. If any part of the package's path matches, that package is ignored.")
c.FlagSet.StringVar(&(c.Tags), "tags", "", "A list of build tags to consider satisfied during the build") c.FlagSet.StringVar(&(c.Tags), "tags", "", "A list of build tags to consider satisfied during the build")
@ -101,7 +102,7 @@ func (c *RunWatchAndBuildCommandFlags) flags(mode int) {
c.FlagSet.IntVar(&(c.NumCompilers), "compilers", 0, "The number of concurrent compilations to run (0 will autodetect)") c.FlagSet.IntVar(&(c.NumCompilers), "compilers", 0, "The number of concurrent compilations to run (0 will autodetect)")
c.FlagSet.BoolVar(&(c.AutoNodes), "p", false, "Run in parallel with auto-detected number of nodes") c.FlagSet.BoolVar(&(c.AutoNodes), "p", false, "Run in parallel with auto-detected number of nodes")
c.FlagSet.BoolVar(&(c.ParallelStream), "stream", onWindows, "stream parallel test output in real time: less coherent, but useful for debugging") c.FlagSet.BoolVar(&(c.ParallelStream), "stream", onWindows, "stream parallel test output in real time: less coherent, but useful for debugging")
if onOSX { if !onWindows {
c.FlagSet.BoolVar(&(c.Notify), "notify", false, "Send desktop notifications when a test run completes") c.FlagSet.BoolVar(&(c.Notify), "notify", false, "Send desktop notifications when a test run completes")
} }
} }

View File

@ -3,6 +3,7 @@ package main
import ( import (
"fmt" "fmt"
"runtime" "runtime"
"sync"
"github.com/onsi/ginkgo/config" "github.com/onsi/ginkgo/config"
"github.com/onsi/ginkgo/ginkgo/interrupthandler" "github.com/onsi/ginkgo/ginkgo/interrupthandler"
@ -10,28 +11,21 @@ import (
"github.com/onsi/ginkgo/ginkgo/testsuite" "github.com/onsi/ginkgo/ginkgo/testsuite"
) )
type compilationInput struct {
runner *testrunner.TestRunner
result chan compilationOutput
}
type compilationOutput struct {
runner *testrunner.TestRunner
err error
}
type SuiteRunner struct { type SuiteRunner struct {
notifier *Notifier notifier *Notifier
interruptHandler *interrupthandler.InterruptHandler interruptHandler *interrupthandler.InterruptHandler
} }
type compiler struct {
runner *testrunner.TestRunner
compilationError chan error
}
func (c *compiler) compile() {
retries := 0
err := c.runner.Compile()
for err != nil && retries < 5 { //We retry because Go sometimes steps on itself when multiple compiles happen in parallel. This is ugly, but should help resolve flakiness...
err = c.runner.Compile()
retries++
}
c.compilationError <- err
}
func NewSuiteRunner(notifier *Notifier, interruptHandler *interrupthandler.InterruptHandler) *SuiteRunner { func NewSuiteRunner(notifier *Notifier, interruptHandler *interrupthandler.InterruptHandler) *SuiteRunner {
return &SuiteRunner{ return &SuiteRunner{
notifier: notifier, notifier: notifier,
@ -39,63 +33,110 @@ func NewSuiteRunner(notifier *Notifier, interruptHandler *interrupthandler.Inter
} }
} }
func (r *SuiteRunner) RunSuites(runners []*testrunner.TestRunner, numCompilers int, keepGoing bool, willCompile func(suite testsuite.TestSuite)) (testrunner.RunResult, int) { func (r *SuiteRunner) compileInParallel(runners []*testrunner.TestRunner, numCompilers int, willCompile func(suite testsuite.TestSuite)) chan compilationOutput {
runResult := testrunner.PassingRunResult() //we return this to the consumer, it will return each runner in order as it compiles
compilationOutputs := make(chan compilationOutput, len(runners))
compilers := make([]*compiler, len(runners)) //an array of channels - the nth runner's compilation output is sent to the nth channel in this array
for i, runner := range runners { //we read from these channels in order to ensure we run the suites in order
compilers[i] = &compiler{ orderedCompilationOutputs := []chan compilationOutput{}
runner: runner, for _ = range runners {
compilationError: make(chan error, 1), orderedCompilationOutputs = append(orderedCompilationOutputs, make(chan compilationOutput, 1))
}
} }
compilerChannel := make(chan *compiler) //we're going to spin up numCompilers compilers - they're going to run concurrently and will consume this channel
//we prefill the channel then close it, this ensures we compile things in the correct order
workPool := make(chan compilationInput, len(runners))
for i, runner := range runners {
workPool <- compilationInput{runner, orderedCompilationOutputs[i]}
}
close(workPool)
//pick a reasonable numCompilers
if numCompilers == 0 { if numCompilers == 0 {
numCompilers = runtime.NumCPU() numCompilers = runtime.NumCPU()
} }
//a WaitGroup to help us wait for all compilers to shut down
wg := &sync.WaitGroup{}
wg.Add(numCompilers)
//spin up the concurrent compilers
for i := 0; i < numCompilers; i++ { for i := 0; i < numCompilers; i++ {
go func() { go func() {
for compiler := range compilerChannel { defer wg.Done()
if willCompile != nil { for input := range workPool {
willCompile(compiler.runner.Suite) if r.interruptHandler.WasInterrupted() {
return
} }
compiler.compile()
if willCompile != nil {
willCompile(input.runner.Suite)
}
//We retry because Go sometimes steps on itself when multiple compiles happen in parallel. This is ugly, but should help resolve flakiness...
var err error
retries := 0
for retries <= 5 {
if r.interruptHandler.WasInterrupted() {
return
}
if err = input.runner.Compile(); err == nil {
break
}
retries++
}
input.result <- compilationOutput{input.runner, err}
} }
}() }()
} }
//read from the compilation output channels *in order* and send them to the caller
//close the compilationOutputs channel to tell the caller we're done
go func() { go func() {
for _, compiler := range compilers { defer close(compilationOutputs)
compilerChannel <- compiler for _, orderedCompilationOutput := range orderedCompilationOutputs {
select {
case compilationOutput := <-orderedCompilationOutput:
compilationOutputs <- compilationOutput
case <-r.interruptHandler.C:
//interrupt detected, wait for the compilers to shut down then bail
//this ensure we clean up after ourselves as we don't leave any compilation processes running
wg.Wait()
return
}
} }
close(compilerChannel)
}() }()
return compilationOutputs
}
func (r *SuiteRunner) RunSuites(runners []*testrunner.TestRunner, numCompilers int, keepGoing bool, willCompile func(suite testsuite.TestSuite)) (testrunner.RunResult, int) {
runResult := testrunner.PassingRunResult()
compilationOutputs := r.compileInParallel(runners, numCompilers, willCompile)
numSuitesThatRan := 0 numSuitesThatRan := 0
suitesThatFailed := []testsuite.TestSuite{} suitesThatFailed := []testsuite.TestSuite{}
for i, runner := range runners { for compilationOutput := range compilationOutputs {
if r.interruptHandler.WasInterrupted() { if compilationOutput.err != nil {
break fmt.Print(compilationOutput.err.Error())
}
compilationError := <-compilers[i].compilationError
if compilationError != nil {
fmt.Print(compilationError.Error())
} }
numSuitesThatRan++ numSuitesThatRan++
suiteRunResult := testrunner.FailingRunResult() suiteRunResult := testrunner.FailingRunResult()
if compilationError == nil { if compilationOutput.err == nil {
suiteRunResult = compilers[i].runner.Run() suiteRunResult = compilationOutput.runner.Run()
} }
r.notifier.SendSuiteCompletionNotification(runner.Suite, suiteRunResult.Passed) r.notifier.SendSuiteCompletionNotification(compilationOutput.runner.Suite, suiteRunResult.Passed)
runResult = runResult.Merge(suiteRunResult) runResult = runResult.Merge(suiteRunResult)
if !suiteRunResult.Passed { if !suiteRunResult.Passed {
suitesThatFailed = append(suitesThatFailed, runner.Suite) suitesThatFailed = append(suitesThatFailed, compilationOutput.runner.Suite)
if !keepGoing { if !keepGoing {
break break
} }
} }
if i < len(runners)-1 && !config.DefaultReporterConfig.Succinct { if numSuitesThatRan < len(runners) && !config.DefaultReporterConfig.Succinct {
fmt.Println("") fmt.Println("")
} }
} }

View File

@ -22,30 +22,48 @@ import (
) )
type TestRunner struct { type TestRunner struct {
Suite testsuite.TestSuite Suite testsuite.TestSuite
compiled bool
compiled bool
compilationTargetPath string
numCPU int numCPU int
parallelStream bool parallelStream bool
race bool race bool
cover bool cover bool
coverPkg string
tags string tags string
additionalArgs []string additionalArgs []string
} }
func New(suite testsuite.TestSuite, numCPU int, parallelStream bool, race bool, cover bool, tags string, additionalArgs []string) *TestRunner { func New(suite testsuite.TestSuite, numCPU int, parallelStream bool, race bool, cover bool, coverPkg string, tags string, additionalArgs []string) *TestRunner {
return &TestRunner{ runner := &TestRunner{
Suite: suite, Suite: suite,
numCPU: numCPU, numCPU: numCPU,
parallelStream: parallelStream, parallelStream: parallelStream,
race: race, race: race,
cover: cover, cover: cover,
coverPkg: coverPkg,
tags: tags, tags: tags,
additionalArgs: additionalArgs, additionalArgs: additionalArgs,
} }
if !suite.Precompiled {
dir, err := ioutil.TempDir("", "ginkgo")
if err != nil {
panic(fmt.Sprintf("coulnd't create temporary directory... might be time to rm -rf:\n%s", err.Error()))
}
runner.compilationTargetPath = filepath.Join(dir, suite.PackageName+".test")
}
return runner
} }
func (t *TestRunner) Compile() error { func (t *TestRunner) Compile() error {
return t.CompileTo(t.compilationTargetPath)
}
func (t *TestRunner) CompileTo(path string) error {
if t.compiled { if t.compiled {
return nil return nil
} }
@ -54,15 +72,16 @@ func (t *TestRunner) Compile() error {
return nil return nil
} }
os.Remove(t.compiledArtifact()) args := []string{"test", "-c", "-i", "-o", path}
args := []string{"test", "-c", "-i"}
if t.race { if t.race {
args = append(args, "-race") args = append(args, "-race")
} }
if t.cover { if t.cover || t.coverPkg != "" {
args = append(args, "-cover", "-covermode=atomic") args = append(args, "-cover", "-covermode=atomic")
} }
if t.coverPkg != "" {
args = append(args, fmt.Sprintf("-coverpkg=%s", t.coverPkg))
}
if t.tags != "" { if t.tags != "" {
args = append(args, fmt.Sprintf("-tags=%s", t.tags)) args = append(args, fmt.Sprintf("-tags=%s", t.tags))
} }
@ -78,10 +97,11 @@ func (t *TestRunner) Compile() error {
if len(output) > 0 { if len(output) > 0 {
return fmt.Errorf("Failed to compile %s:\n\n%s", t.Suite.PackageName, fixedOutput) return fmt.Errorf("Failed to compile %s:\n\n%s", t.Suite.PackageName, fixedOutput)
} }
return fmt.Errorf("") return fmt.Errorf("Failed to compile %s", t.Suite.PackageName)
} }
t.compiled = true t.compiled = true
return nil return nil
} }
@ -134,12 +154,7 @@ func (t *TestRunner) CleanUp() {
if t.Suite.Precompiled { if t.Suite.Precompiled {
return return
} }
os.Remove(t.compiledArtifact()) os.RemoveAll(filepath.Dir(t.compilationTargetPath))
}
func (t *TestRunner) compiledArtifact() string {
compiledArtifact, _ := filepath.Abs(filepath.Join(t.Suite.Path, fmt.Sprintf("%s.test", t.Suite.PackageName)))
return compiledArtifact
} }
func (t *TestRunner) runSerialGinkgoSuite() RunResult { func (t *TestRunner) runSerialGinkgoSuite() RunResult {
@ -196,7 +211,7 @@ func (t *TestRunner) runAndStreamParallelGinkgoSuite() RunResult {
os.Stdout.Sync() os.Stdout.Sync()
if t.cover { if t.cover || t.coverPkg != "" {
t.combineCoverprofiles() t.combineCoverprofiles()
} }
@ -257,21 +272,16 @@ func (t *TestRunner) runParallelGinkgoSuite() RunResult {
fmt.Println("") fmt.Println("")
case <-time.After(time.Second): case <-time.After(time.Second):
//the aggregator never got back to us! something must have gone wrong //the aggregator never got back to us! something must have gone wrong
fmt.Println("") fmt.Println(`
fmt.Println("") -------------------------------------------------------------------
fmt.Println(" ----------------------------------------------------------- ") | |
fmt.Println(" | |") | Ginkgo timed out waiting for all parallel nodes to report back! |
fmt.Println(" | Ginkgo timed out waiting for all parallel nodes to end! |") | |
fmt.Println(" | Here is some salvaged output: |") -------------------------------------------------------------------
fmt.Println(" | |") `)
fmt.Println(" ----------------------------------------------------------- ")
fmt.Println("")
fmt.Println("")
os.Stdout.Sync() os.Stdout.Sync()
time.Sleep(time.Second)
for _, writer := range writers { for _, writer := range writers {
writer.Close() writer.Close()
} }
@ -283,7 +293,7 @@ func (t *TestRunner) runParallelGinkgoSuite() RunResult {
os.Stdout.Sync() os.Stdout.Sync()
} }
if t.cover { if t.cover || t.coverPkg != "" {
t.combineCoverprofiles() t.combineCoverprofiles()
} }
@ -292,7 +302,7 @@ func (t *TestRunner) runParallelGinkgoSuite() RunResult {
func (t *TestRunner) cmd(ginkgoArgs []string, stream io.Writer, node int) *exec.Cmd { func (t *TestRunner) cmd(ginkgoArgs []string, stream io.Writer, node int) *exec.Cmd {
args := []string{"--test.timeout=24h"} args := []string{"--test.timeout=24h"}
if t.cover { if t.cover || t.coverPkg != "" {
coverprofile := "--test.coverprofile=" + t.Suite.PackageName + ".coverprofile" coverprofile := "--test.coverprofile=" + t.Suite.PackageName + ".coverprofile"
if t.numCPU > 1 { if t.numCPU > 1 {
coverprofile = fmt.Sprintf("%s.%d", coverprofile, node) coverprofile = fmt.Sprintf("%s.%d", coverprofile, node)
@ -303,7 +313,12 @@ func (t *TestRunner) cmd(ginkgoArgs []string, stream io.Writer, node int) *exec.
args = append(args, ginkgoArgs...) args = append(args, ginkgoArgs...)
args = append(args, t.additionalArgs...) args = append(args, t.additionalArgs...)
cmd := exec.Command(t.compiledArtifact(), args...) path := t.compilationTargetPath
if t.Suite.Precompiled {
path, _ = filepath.Abs(filepath.Join(t.Suite.Path, fmt.Sprintf("%s.test", t.Suite.PackageName)))
}
cmd := exec.Command(path, args...)
cmd.Dir = t.Suite.Path cmd.Dir = t.Suite.Path
cmd.Stderr = stream cmd.Stderr = stream

View File

@ -57,7 +57,7 @@ func (w *SpecWatcher) runnersForSuites(suites []testsuite.TestSuite, additionalA
runners := []*testrunner.TestRunner{} runners := []*testrunner.TestRunner{}
for _, suite := range suites { for _, suite := range suites {
runners = append(runners, testrunner.New(suite, w.commandFlags.NumCPU, w.commandFlags.ParallelStream, w.commandFlags.Race, w.commandFlags.Cover, w.commandFlags.Tags, additionalArgs)) runners = append(runners, testrunner.New(suite, w.commandFlags.NumCPU, w.commandFlags.ParallelStream, w.commandFlags.Race, w.commandFlags.Cover, w.commandFlags.CoverPkg, w.commandFlags.Tags, additionalArgs))
} }
return runners return runners

View File

@ -130,6 +130,7 @@ type Done chan<- interface{}
// IsMeasurement: true if the current test is a measurement // IsMeasurement: true if the current test is a measurement
// FileName: the name of the file containing the current test // FileName: the name of the file containing the current test
// LineNumber: the line number for the current test // LineNumber: the line number for the current test
// Failed: if the current test has failed, this will be true (useful in an AfterEach)
type GinkgoTestDescription struct { type GinkgoTestDescription struct {
FullTestText string FullTestText string
ComponentTexts []string ComponentTexts []string
@ -139,6 +140,8 @@ type GinkgoTestDescription struct {
FileName string FileName string
LineNumber int LineNumber int
Failed bool
} }
//CurrentGinkgoTestDescripton returns information about the current running test. //CurrentGinkgoTestDescripton returns information about the current running test.
@ -157,6 +160,7 @@ func CurrentGinkgoTestDescription() GinkgoTestDescription {
IsMeasurement: summary.IsMeasurement, IsMeasurement: summary.IsMeasurement,
FileName: subjectCodeLocation.FileName, FileName: subjectCodeLocation.FileName,
LineNumber: subjectCodeLocation.LineNumber, LineNumber: subjectCodeLocation.LineNumber,
Failed: summary.HasFailureState(),
} }
} }

View File

@ -1,11 +1,12 @@
package integration_test package integration_test
import ( import (
"os"
"os/exec"
. "github.com/onsi/ginkgo" . "github.com/onsi/ginkgo"
. "github.com/onsi/gomega" . "github.com/onsi/gomega"
"github.com/onsi/gomega/gexec" "github.com/onsi/gomega/gexec"
"os"
"os/exec"
) )
var _ = Describe("Coverage Specs", func() { var _ = Describe("Coverage Specs", func() {
@ -31,4 +32,23 @@ var _ = Describe("Coverage Specs", func() {
Ω(parallelCoverProfileOutput).Should(Equal(serialCoverProfileOutput)) Ω(parallelCoverProfileOutput).Should(Equal(serialCoverProfileOutput))
}) })
It("runs coverage analysis on external packages in series and in parallel", func() {
session := startGinkgo("./_fixtures/coverage_fixture", "-coverpkg=github.com/onsi/ginkgo/integration/_fixtures/coverage_fixture,github.com/onsi/ginkgo/integration/_fixtures/coverage_fixture/external_coverage_fixture")
Eventually(session).Should(gexec.Exit(0))
output := session.Out.Contents()
Ω(output).Should(ContainSubstring("coverage: 71.4% of statements in github.com/onsi/ginkgo/integration/_fixtures/coverage_fixture, github.com/onsi/ginkgo/integration/_fixtures/coverage_fixture/external_coverage_fixture"))
serialCoverProfileOutput, err := exec.Command("go", "tool", "cover", "-func=./_fixtures/coverage_fixture/coverage_fixture.coverprofile").CombinedOutput()
Ω(err).ShouldNot(HaveOccurred())
os.RemoveAll("./_fixtures/coverage_fixture/coverage_fixture.coverprofile")
Eventually(startGinkgo("./_fixtures/coverage_fixture", "-coverpkg=github.com/onsi/ginkgo/integration/_fixtures/coverage_fixture,github.com/onsi/ginkgo/integration/_fixtures/coverage_fixture/external_coverage_fixture", "-nodes=4")).Should(gexec.Exit(0))
parallelCoverProfileOutput, err := exec.Command("go", "tool", "cover", "-func=./_fixtures/coverage_fixture/coverage_fixture.coverprofile").CombinedOutput()
Ω(err).ShouldNot(HaveOccurred())
Ω(parallelCoverProfileOutput).Should(Equal(serialCoverProfileOutput))
})
}) })

View File

@ -1,10 +1,11 @@
package integration_test package integration_test
import ( import (
"strings"
. "github.com/onsi/ginkgo" . "github.com/onsi/ginkgo"
. "github.com/onsi/gomega" . "github.com/onsi/gomega"
"github.com/onsi/gomega/gexec" "github.com/onsi/gomega/gexec"
"strings"
) )
var _ = Describe("SuiteSetup", func() { var _ = Describe("SuiteSetup", func() {
@ -171,7 +172,7 @@ var _ = Describe("SuiteSetup", func() {
output := string(session.Out.Contents()) output := string(session.Out.Contents())
Ω(output).Should(ContainSubstring("Node 1 disappeared before completing BeforeSuite")) Ω(output).Should(ContainSubstring("Node 1 disappeared before completing BeforeSuite"))
Ω(output).Should(ContainSubstring("Ginkgo timed out waiting for all parallel nodes to end")) Ω(output).Should(ContainSubstring("Ginkgo timed out waiting for all parallel nodes to report back!"))
}) })
}) })
}) })

View File

@ -0,0 +1,25 @@
package integration_test
import (
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/onsi/gomega/gbytes"
"github.com/onsi/gomega/gexec"
)
var _ = Describe("TestDescription", func() {
var pathToTest string
BeforeEach(func() {
pathToTest = tmpPath("test_description")
copyIn("test_description", pathToTest)
})
It("should capture and emit information about the current test", func() {
session := startGinkgo(pathToTest, "--noColor")
Eventually(session).Should(gexec.Exit(1))
Ω(session).Should(gbytes.Say("TestDescription should pass:false"))
Ω(session).Should(gbytes.Say("TestDescription should fail:true"))
})
})

View File

@ -1,64 +0,0 @@
package remote_test
import (
"fmt"
. "github.com/onsi/ginkgo"
. "github.com/onsi/ginkgo/internal/remote"
. "github.com/onsi/gomega"
"os"
)
var _ = Describe("OutputInterceptor", func() {
var interceptor OutputInterceptor
BeforeEach(func() {
interceptor = NewOutputInterceptor()
})
It("should capture all stdout/stderr output", func() {
err := interceptor.StartInterceptingOutput()
Ω(err).ShouldNot(HaveOccurred())
fmt.Fprint(os.Stdout, "STDOUT")
fmt.Fprint(os.Stderr, "STDERR")
print("PRINT")
output, err := interceptor.StopInterceptingAndReturnOutput()
Ω(output).Should(Equal("STDOUTSTDERRPRINT"))
Ω(err).ShouldNot(HaveOccurred())
})
It("should error if told to intercept output twice", func() {
err := interceptor.StartInterceptingOutput()
Ω(err).ShouldNot(HaveOccurred())
print("A")
err = interceptor.StartInterceptingOutput()
Ω(err).Should(HaveOccurred())
print("B")
output, err := interceptor.StopInterceptingAndReturnOutput()
Ω(output).Should(Equal("AB"))
Ω(err).ShouldNot(HaveOccurred())
})
It("should allow multiple interception sessions", func() {
err := interceptor.StartInterceptingOutput()
Ω(err).ShouldNot(HaveOccurred())
print("A")
output, err := interceptor.StopInterceptingAndReturnOutput()
Ω(output).Should(Equal("A"))
Ω(err).ShouldNot(HaveOccurred())
err = interceptor.StartInterceptingOutput()
Ω(err).ShouldNot(HaveOccurred())
print("B")
output, err = interceptor.StopInterceptingAndReturnOutput()
Ω(output).Should(Equal("B"))
Ω(err).ShouldNot(HaveOccurred())
})
})

View File

@ -14,10 +14,8 @@ func NewOutputInterceptor() OutputInterceptor {
} }
type outputInterceptor struct { type outputInterceptor struct {
stdoutPlaceholder *os.File redirectFile *os.File
stderrPlaceholder *os.File intercepting bool
redirectFile *os.File
intercepting bool
} }
func (interceptor *outputInterceptor) StartInterceptingOutput() error { func (interceptor *outputInterceptor) StartInterceptingOutput() error {
@ -33,19 +31,6 @@ func (interceptor *outputInterceptor) StartInterceptingOutput() error {
return err return err
} }
interceptor.stdoutPlaceholder, err = ioutil.TempFile("", "ginkgo-output")
if err != nil {
return err
}
interceptor.stderrPlaceholder, err = ioutil.TempFile("", "ginkgo-output")
if err != nil {
return err
}
syscall.Dup2(1, int(interceptor.stdoutPlaceholder.Fd()))
syscall.Dup2(2, int(interceptor.stderrPlaceholder.Fd()))
syscall.Dup2(int(interceptor.redirectFile.Fd()), 1) syscall.Dup2(int(interceptor.redirectFile.Fd()), 1)
syscall.Dup2(int(interceptor.redirectFile.Fd()), 2) syscall.Dup2(int(interceptor.redirectFile.Fd()), 2)
@ -57,18 +42,9 @@ func (interceptor *outputInterceptor) StopInterceptingAndReturnOutput() (string,
return "", errors.New("Not intercepting output!") return "", errors.New("Not intercepting output!")
} }
syscall.Dup2(int(interceptor.stdoutPlaceholder.Fd()), 1) interceptor.redirectFile.Close()
syscall.Dup2(int(interceptor.stderrPlaceholder.Fd()), 2)
for _, f := range []*os.File{interceptor.redirectFile, interceptor.stdoutPlaceholder, interceptor.stderrPlaceholder} {
f.Close()
}
output, err := ioutil.ReadFile(interceptor.redirectFile.Name()) output, err := ioutil.ReadFile(interceptor.redirectFile.Name())
os.Remove(interceptor.redirectFile.Name())
for _, f := range []*os.File{interceptor.redirectFile, interceptor.stdoutPlaceholder, interceptor.stderrPlaceholder} {
os.Remove(f.Name())
}
interceptor.intercepting = false interceptor.intercepting = false

View File

@ -115,7 +115,7 @@ func (spec *Spec) Run(writer io.Writer) {
}() }()
for sample := 0; sample < spec.subject.Samples(); sample++ { for sample := 0; sample < spec.subject.Samples(); sample++ {
spec.state, spec.failure = spec.runSample(sample, writer) spec.runSample(sample, writer)
if spec.state != types.SpecStatePassed { if spec.state != types.SpecStatePassed {
return return
@ -123,9 +123,9 @@ func (spec *Spec) Run(writer io.Writer) {
} }
} }
func (spec *Spec) runSample(sample int, writer io.Writer) (specState types.SpecState, specFailure types.SpecFailure) { func (spec *Spec) runSample(sample int, writer io.Writer) {
specState = types.SpecStatePassed spec.state = types.SpecStatePassed
specFailure = types.SpecFailure{} spec.failure = types.SpecFailure{}
innerMostContainerIndexToUnwind := -1 innerMostContainerIndexToUnwind := -1
defer func() { defer func() {
@ -134,9 +134,9 @@ func (spec *Spec) runSample(sample int, writer io.Writer) (specState types.SpecS
for _, afterEach := range container.SetupNodesOfType(types.SpecComponentTypeAfterEach) { for _, afterEach := range container.SetupNodesOfType(types.SpecComponentTypeAfterEach) {
spec.announceSetupNode(writer, "AfterEach", container, afterEach) spec.announceSetupNode(writer, "AfterEach", container, afterEach)
afterEachState, afterEachFailure := afterEach.Run() afterEachState, afterEachFailure := afterEach.Run()
if afterEachState != types.SpecStatePassed && specState == types.SpecStatePassed { if afterEachState != types.SpecStatePassed && spec.state == types.SpecStatePassed {
specState = afterEachState spec.state = afterEachState
specFailure = afterEachFailure spec.failure = afterEachFailure
} }
} }
} }
@ -146,8 +146,8 @@ func (spec *Spec) runSample(sample int, writer io.Writer) (specState types.SpecS
innerMostContainerIndexToUnwind = i innerMostContainerIndexToUnwind = i
for _, beforeEach := range container.SetupNodesOfType(types.SpecComponentTypeBeforeEach) { for _, beforeEach := range container.SetupNodesOfType(types.SpecComponentTypeBeforeEach) {
spec.announceSetupNode(writer, "BeforeEach", container, beforeEach) spec.announceSetupNode(writer, "BeforeEach", container, beforeEach)
specState, specFailure = beforeEach.Run() spec.state, spec.failure = beforeEach.Run()
if specState != types.SpecStatePassed { if spec.state != types.SpecStatePassed {
return return
} }
} }
@ -156,17 +156,15 @@ func (spec *Spec) runSample(sample int, writer io.Writer) (specState types.SpecS
for _, container := range spec.containers { for _, container := range spec.containers {
for _, justBeforeEach := range container.SetupNodesOfType(types.SpecComponentTypeJustBeforeEach) { for _, justBeforeEach := range container.SetupNodesOfType(types.SpecComponentTypeJustBeforeEach) {
spec.announceSetupNode(writer, "JustBeforeEach", container, justBeforeEach) spec.announceSetupNode(writer, "JustBeforeEach", container, justBeforeEach)
specState, specFailure = justBeforeEach.Run() spec.state, spec.failure = justBeforeEach.Run()
if specState != types.SpecStatePassed { if spec.state != types.SpecStatePassed {
return return
} }
} }
} }
spec.announceSubject(writer, spec.subject) spec.announceSubject(writer, spec.subject)
specState, specFailure = spec.subject.Run() spec.state, spec.failure = spec.subject.Run()
return
} }
func (spec *Spec) announceSetupNode(writer io.Writer, nodeType string, container *containernode.ContainerNode, setupNode leafnodes.BasicNode) { func (spec *Spec) announceSetupNode(writer io.Writer, nodeType string, container *containernode.ContainerNode, setupNode leafnodes.BasicNode) {

View File

@ -120,6 +120,7 @@ var _ = Describe("Suite", func() {
Ω(description.FileName).Should(ContainSubstring("suite_test.go")) Ω(description.FileName).Should(ContainSubstring("suite_test.go"))
Ω(description.LineNumber).Should(BeNumerically(">", 50)) Ω(description.LineNumber).Should(BeNumerically(">", 50))
Ω(description.LineNumber).Should(BeNumerically("<", 150)) Ω(description.LineNumber).Should(BeNumerically("<", 150))
Ω(description.Failed).Should(BeFalse())
}) })
Measure("should run measurements", func(b Benchmarker) { Measure("should run measurements", func(b Benchmarker) {