Update vendored ginkgo.

This includes onsi/ginkgo#262, which improves the JUnit stacktraces.
This commit is contained in:
Ryan Hitchman 2016-07-25 13:40:15 -07:00
parent 2f545e1f45
commit ea43036dc0
10 changed files with 20 additions and 10 deletions

View File

@ -1,6 +1,5 @@
language: go language: go
go: go:
- 1.4
- 1.5 - 1.5
- 1.6 - 1.6
- tip - tip

View File

@ -46,7 +46,7 @@ func (r *SpecBuilder) BuildSpecs(args []string, additionalArgs []string) {
passed := true passed := true
for _, suite := range suites { for _, suite := range suites {
runner := testrunner.New(suite, 1, false, r.commandFlags.Race, r.commandFlags.Cover, r.commandFlags.CoverPkg, r.commandFlags.Tags, nil) runner := testrunner.New(suite, 1, false, r.commandFlags.Race, r.commandFlags.Cover, r.commandFlags.CoverPkg, r.commandFlags.Tags, r.commandFlags.GCFlags, nil)
fmt.Printf("Compiling %s...\n", suite.PackageName) fmt.Printf("Compiling %s...\n", suite.PackageName)
path, _ := filepath.Abs(filepath.Join(suite.Path, fmt.Sprintf("%s.test", suite.PackageName))) path, _ := filepath.Abs(filepath.Join(suite.Path, fmt.Sprintf("%s.test", suite.PackageName)))

View File

@ -71,7 +71,7 @@ func (r *SpecRunner) RunSpecs(args []string, additionalArgs []string) {
runners := []*testrunner.TestRunner{} runners := []*testrunner.TestRunner{}
for _, suite := range suites { for _, suite := range suites {
runners = append(runners, testrunner.New(suite, r.commandFlags.NumCPU, r.commandFlags.ParallelStream, r.commandFlags.Race, r.commandFlags.Cover, r.commandFlags.CoverPkg, r.commandFlags.Tags, additionalArgs)) runners = append(runners, testrunner.New(suite, r.commandFlags.NumCPU, r.commandFlags.ParallelStream, r.commandFlags.Race, r.commandFlags.Cover, r.commandFlags.CoverPkg, r.commandFlags.Tags, r.commandFlags.GCFlags, additionalArgs))
} }
numSuites := 0 numSuites := 0

View File

@ -14,6 +14,7 @@ type RunWatchAndBuildCommandFlags struct {
CoverPkg string CoverPkg string
SkipPackage string SkipPackage string
Tags string Tags string
GCFlags string
//for run and watch commands //for run and watch commands
NumCPU int NumCPU int
@ -96,6 +97,7 @@ func (c *RunWatchAndBuildCommandFlags) flags(mode int) {
c.FlagSet.StringVar(&(c.CoverPkg), "coverpkg", "", "Run tests with coverage on the given external modules") c.FlagSet.StringVar(&(c.CoverPkg), "coverpkg", "", "Run tests with coverage on the given external modules")
c.FlagSet.StringVar(&(c.SkipPackage), "skipPackage", "", "A comma-separated list of package names to be skipped. If any part of the package's path matches, that package is ignored.") c.FlagSet.StringVar(&(c.SkipPackage), "skipPackage", "", "A comma-separated list of package names to be skipped. If any part of the package's path matches, that package is ignored.")
c.FlagSet.StringVar(&(c.Tags), "tags", "", "A list of build tags to consider satisfied during the build") c.FlagSet.StringVar(&(c.Tags), "tags", "", "A list of build tags to consider satisfied during the build")
c.FlagSet.StringVar(&(c.GCFlags), "gcflags", "", "Arguments to pass on each go tool compile invocation.")
if mode == runMode || mode == watchMode { if mode == runMode || mode == watchMode {
config.Flags(c.FlagSet, "", false) config.Flags(c.FlagSet, "", false)

View File

@ -33,10 +33,11 @@ type TestRunner struct {
cover bool cover bool
coverPkg string coverPkg string
tags string tags string
gcFlags string
additionalArgs []string additionalArgs []string
} }
func New(suite testsuite.TestSuite, numCPU int, parallelStream bool, race bool, cover bool, coverPkg string, tags string, additionalArgs []string) *TestRunner { func New(suite testsuite.TestSuite, numCPU int, parallelStream bool, race bool, cover bool, coverPkg string, tags string, gcFlags string, additionalArgs []string) *TestRunner {
runner := &TestRunner{ runner := &TestRunner{
Suite: suite, Suite: suite,
numCPU: numCPU, numCPU: numCPU,
@ -46,6 +47,7 @@ func New(suite testsuite.TestSuite, numCPU int, parallelStream bool, race bool,
coverPkg: coverPkg, coverPkg: coverPkg,
tags: tags, tags: tags,
additionalArgs: additionalArgs, additionalArgs: additionalArgs,
gcFlags: gcFlags,
} }
if !suite.Precompiled { if !suite.Precompiled {
@ -85,6 +87,9 @@ func (t *TestRunner) CompileTo(path string) error {
if t.tags != "" { if t.tags != "" {
args = append(args, fmt.Sprintf("-tags=%s", t.tags)) args = append(args, fmt.Sprintf("-tags=%s", t.tags))
} }
if t.gcFlags != "" {
args = append(args, fmt.Sprintf("-gcflags=%s", t.gcFlags))
}
cmd := exec.Command("go", args...) cmd := exec.Command("go", args...)

View File

@ -57,7 +57,7 @@ func (w *SpecWatcher) runnersForSuites(suites []testsuite.TestSuite, additionalA
runners := []*testrunner.TestRunner{} runners := []*testrunner.TestRunner{}
for _, suite := range suites { for _, suite := range suites {
runners = append(runners, testrunner.New(suite, w.commandFlags.NumCPU, w.commandFlags.ParallelStream, w.commandFlags.Race, w.commandFlags.Cover, w.commandFlags.CoverPkg, w.commandFlags.Tags, additionalArgs)) runners = append(runners, testrunner.New(suite, w.commandFlags.NumCPU, w.commandFlags.ParallelStream, w.commandFlags.Race, w.commandFlags.Cover, w.commandFlags.CoverPkg, w.commandFlags.Tags, w.commandFlags.GCFlags, additionalArgs))
} }
return runners return runners

View File

@ -8,4 +8,4 @@ import "syscall"
// use the nearly identical syscall.Dup3 instead // use the nearly identical syscall.Dup3 instead
func syscallDup(oldfd int, newfd int) (err error) { func syscallDup(oldfd int, newfd int) (err error) {
return syscall.Dup3(oldfd, newfd, 0) return syscall.Dup3(oldfd, newfd, 0)
} }

View File

@ -6,4 +6,4 @@ import "golang.org/x/sys/unix"
func syscallDup(oldfd int, newfd int) (err error) { func syscallDup(oldfd int, newfd int) (err error) {
return unix.Dup2(oldfd, newfd) return unix.Dup2(oldfd, newfd)
} }

View File

@ -8,4 +8,4 @@ import "syscall"
func syscallDup(oldfd int, newfd int) (err error) { func syscallDup(oldfd int, newfd int) (err error) {
return syscall.Dup2(oldfd, newfd) return syscall.Dup2(oldfd, newfd)
} }

View File

@ -74,6 +74,10 @@ func (reporter *JUnitReporter) AfterSuiteDidRun(setupSummary *types.SetupSummary
reporter.handleSetupSummary("AfterSuite", setupSummary) reporter.handleSetupSummary("AfterSuite", setupSummary)
} }
func failureMessage(failure types.SpecFailure) string {
return fmt.Sprintf("%s\n%s\n%s", failure.ComponentCodeLocation.String(), failure.Message, failure.Location.String())
}
func (reporter *JUnitReporter) handleSetupSummary(name string, setupSummary *types.SetupSummary) { func (reporter *JUnitReporter) handleSetupSummary(name string, setupSummary *types.SetupSummary) {
if setupSummary.State != types.SpecStatePassed { if setupSummary.State != types.SpecStatePassed {
testCase := JUnitTestCase{ testCase := JUnitTestCase{
@ -83,7 +87,7 @@ func (reporter *JUnitReporter) handleSetupSummary(name string, setupSummary *typ
testCase.FailureMessage = &JUnitFailureMessage{ testCase.FailureMessage = &JUnitFailureMessage{
Type: reporter.failureTypeForState(setupSummary.State), Type: reporter.failureTypeForState(setupSummary.State),
Message: fmt.Sprintf("%s\n%s", setupSummary.Failure.ComponentCodeLocation.String(), setupSummary.Failure.Message), Message: failureMessage(setupSummary.Failure),
} }
testCase.Time = setupSummary.RunTime.Seconds() testCase.Time = setupSummary.RunTime.Seconds()
reporter.suite.TestCases = append(reporter.suite.TestCases, testCase) reporter.suite.TestCases = append(reporter.suite.TestCases, testCase)
@ -98,7 +102,7 @@ func (reporter *JUnitReporter) SpecDidComplete(specSummary *types.SpecSummary) {
if specSummary.State == types.SpecStateFailed || specSummary.State == types.SpecStateTimedOut || specSummary.State == types.SpecStatePanicked { if specSummary.State == types.SpecStateFailed || specSummary.State == types.SpecStateTimedOut || specSummary.State == types.SpecStatePanicked {
testCase.FailureMessage = &JUnitFailureMessage{ testCase.FailureMessage = &JUnitFailureMessage{
Type: reporter.failureTypeForState(specSummary.State), Type: reporter.failureTypeForState(specSummary.State),
Message: fmt.Sprintf("%s\n%s", specSummary.Failure.ComponentCodeLocation.String(), specSummary.Failure.Message), Message: failureMessage(specSummary.Failure),
} }
} }
if specSummary.State == types.SpecStateSkipped || specSummary.State == types.SpecStatePending { if specSummary.State == types.SpecStateSkipped || specSummary.State == types.SpecStatePending {