Merge pull request #43273 from smarterclayton/parallel_ginkgo

Automatic merge from submit-queue (batch tested with PRs 43273, 44287, 44281)

bump(github.com/onsi/ginkgo):v1.2.0-94-g5ca1211

Picks up parallel execution improvements in Ginkgo that distribute jobs more evenly with parallel runs.  Opened after observing long tail runs in OpenShift - testing there showed a much more compact run in terms of CPU https://github.com/onsi/ginkgo/issues/333

We'll need some soak time to be sure this has no issues.
This commit is contained in:
Kubernetes Submit Queue 2017-04-10 16:19:11 -07:00 committed by GitHub
commit 9e929c66ff
19 changed files with 438 additions and 114 deletions

101
Godeps/Godeps.json generated
View File

@ -1800,123 +1800,128 @@
}, },
{ {
"ImportPath": "github.com/onsi/ginkgo", "ImportPath": "github.com/onsi/ginkgo",
"Comment": "v1.2.0-90-gbb93381", "Comment": "v1.2.0-95-g67b9df7",
"Rev": "bb93381d543b0e5725244abe752214a110791d01" "Rev": "67b9df7f55fe1165fd9ad49aca7754cce01a42b8"
}, },
{ {
"ImportPath": "github.com/onsi/ginkgo/config", "ImportPath": "github.com/onsi/ginkgo/config",
"Comment": "v1.2.0-90-gbb93381", "Comment": "v1.2.0-95-g67b9df7",
"Rev": "bb93381d543b0e5725244abe752214a110791d01" "Rev": "67b9df7f55fe1165fd9ad49aca7754cce01a42b8"
}, },
{ {
"ImportPath": "github.com/onsi/ginkgo/ginkgo", "ImportPath": "github.com/onsi/ginkgo/ginkgo",
"Comment": "v1.2.0-90-gbb93381", "Comment": "v1.2.0-95-g67b9df7",
"Rev": "bb93381d543b0e5725244abe752214a110791d01" "Rev": "67b9df7f55fe1165fd9ad49aca7754cce01a42b8"
}, },
{ {
"ImportPath": "github.com/onsi/ginkgo/ginkgo/convert", "ImportPath": "github.com/onsi/ginkgo/ginkgo/convert",
"Comment": "v1.2.0-90-gbb93381", "Comment": "v1.2.0-95-g67b9df7",
"Rev": "bb93381d543b0e5725244abe752214a110791d01" "Rev": "67b9df7f55fe1165fd9ad49aca7754cce01a42b8"
}, },
{ {
"ImportPath": "github.com/onsi/ginkgo/ginkgo/interrupthandler", "ImportPath": "github.com/onsi/ginkgo/ginkgo/interrupthandler",
"Comment": "v1.2.0-90-gbb93381", "Comment": "v1.2.0-95-g67b9df7",
"Rev": "bb93381d543b0e5725244abe752214a110791d01" "Rev": "67b9df7f55fe1165fd9ad49aca7754cce01a42b8"
}, },
{ {
"ImportPath": "github.com/onsi/ginkgo/ginkgo/nodot", "ImportPath": "github.com/onsi/ginkgo/ginkgo/nodot",
"Comment": "v1.2.0-90-gbb93381", "Comment": "v1.2.0-95-g67b9df7",
"Rev": "bb93381d543b0e5725244abe752214a110791d01" "Rev": "67b9df7f55fe1165fd9ad49aca7754cce01a42b8"
}, },
{ {
"ImportPath": "github.com/onsi/ginkgo/ginkgo/testrunner", "ImportPath": "github.com/onsi/ginkgo/ginkgo/testrunner",
"Comment": "v1.2.0-90-gbb93381", "Comment": "v1.2.0-95-g67b9df7",
"Rev": "bb93381d543b0e5725244abe752214a110791d01" "Rev": "67b9df7f55fe1165fd9ad49aca7754cce01a42b8"
}, },
{ {
"ImportPath": "github.com/onsi/ginkgo/ginkgo/testsuite", "ImportPath": "github.com/onsi/ginkgo/ginkgo/testsuite",
"Comment": "v1.2.0-90-gbb93381", "Comment": "v1.2.0-95-g67b9df7",
"Rev": "bb93381d543b0e5725244abe752214a110791d01" "Rev": "67b9df7f55fe1165fd9ad49aca7754cce01a42b8"
}, },
{ {
"ImportPath": "github.com/onsi/ginkgo/ginkgo/watch", "ImportPath": "github.com/onsi/ginkgo/ginkgo/watch",
"Comment": "v1.2.0-90-gbb93381", "Comment": "v1.2.0-95-g67b9df7",
"Rev": "bb93381d543b0e5725244abe752214a110791d01" "Rev": "67b9df7f55fe1165fd9ad49aca7754cce01a42b8"
}, },
{ {
"ImportPath": "github.com/onsi/ginkgo/internal/codelocation", "ImportPath": "github.com/onsi/ginkgo/internal/codelocation",
"Comment": "v1.2.0-90-gbb93381", "Comment": "v1.2.0-95-g67b9df7",
"Rev": "bb93381d543b0e5725244abe752214a110791d01" "Rev": "67b9df7f55fe1165fd9ad49aca7754cce01a42b8"
}, },
{ {
"ImportPath": "github.com/onsi/ginkgo/internal/containernode", "ImportPath": "github.com/onsi/ginkgo/internal/containernode",
"Comment": "v1.2.0-90-gbb93381", "Comment": "v1.2.0-95-g67b9df7",
"Rev": "bb93381d543b0e5725244abe752214a110791d01" "Rev": "67b9df7f55fe1165fd9ad49aca7754cce01a42b8"
}, },
{ {
"ImportPath": "github.com/onsi/ginkgo/internal/failer", "ImportPath": "github.com/onsi/ginkgo/internal/failer",
"Comment": "v1.2.0-90-gbb93381", "Comment": "v1.2.0-95-g67b9df7",
"Rev": "bb93381d543b0e5725244abe752214a110791d01" "Rev": "67b9df7f55fe1165fd9ad49aca7754cce01a42b8"
}, },
{ {
"ImportPath": "github.com/onsi/ginkgo/internal/leafnodes", "ImportPath": "github.com/onsi/ginkgo/internal/leafnodes",
"Comment": "v1.2.0-90-gbb93381", "Comment": "v1.2.0-95-g67b9df7",
"Rev": "bb93381d543b0e5725244abe752214a110791d01" "Rev": "67b9df7f55fe1165fd9ad49aca7754cce01a42b8"
}, },
{ {
"ImportPath": "github.com/onsi/ginkgo/internal/remote", "ImportPath": "github.com/onsi/ginkgo/internal/remote",
"Comment": "v1.2.0-90-gbb93381", "Comment": "v1.2.0-95-g67b9df7",
"Rev": "bb93381d543b0e5725244abe752214a110791d01" "Rev": "67b9df7f55fe1165fd9ad49aca7754cce01a42b8"
}, },
{ {
"ImportPath": "github.com/onsi/ginkgo/internal/spec", "ImportPath": "github.com/onsi/ginkgo/internal/spec",
"Comment": "v1.2.0-90-gbb93381", "Comment": "v1.2.0-95-g67b9df7",
"Rev": "bb93381d543b0e5725244abe752214a110791d01" "Rev": "67b9df7f55fe1165fd9ad49aca7754cce01a42b8"
},
{
"ImportPath": "github.com/onsi/ginkgo/internal/spec_iterator",
"Comment": "v1.2.0-95-g67b9df7",
"Rev": "67b9df7f55fe1165fd9ad49aca7754cce01a42b8"
}, },
{ {
"ImportPath": "github.com/onsi/ginkgo/internal/specrunner", "ImportPath": "github.com/onsi/ginkgo/internal/specrunner",
"Comment": "v1.2.0-90-gbb93381", "Comment": "v1.2.0-95-g67b9df7",
"Rev": "bb93381d543b0e5725244abe752214a110791d01" "Rev": "67b9df7f55fe1165fd9ad49aca7754cce01a42b8"
}, },
{ {
"ImportPath": "github.com/onsi/ginkgo/internal/suite", "ImportPath": "github.com/onsi/ginkgo/internal/suite",
"Comment": "v1.2.0-90-gbb93381", "Comment": "v1.2.0-95-g67b9df7",
"Rev": "bb93381d543b0e5725244abe752214a110791d01" "Rev": "67b9df7f55fe1165fd9ad49aca7754cce01a42b8"
}, },
{ {
"ImportPath": "github.com/onsi/ginkgo/internal/testingtproxy", "ImportPath": "github.com/onsi/ginkgo/internal/testingtproxy",
"Comment": "v1.2.0-90-gbb93381", "Comment": "v1.2.0-95-g67b9df7",
"Rev": "bb93381d543b0e5725244abe752214a110791d01" "Rev": "67b9df7f55fe1165fd9ad49aca7754cce01a42b8"
}, },
{ {
"ImportPath": "github.com/onsi/ginkgo/internal/writer", "ImportPath": "github.com/onsi/ginkgo/internal/writer",
"Comment": "v1.2.0-90-gbb93381", "Comment": "v1.2.0-95-g67b9df7",
"Rev": "bb93381d543b0e5725244abe752214a110791d01" "Rev": "67b9df7f55fe1165fd9ad49aca7754cce01a42b8"
}, },
{ {
"ImportPath": "github.com/onsi/ginkgo/reporters", "ImportPath": "github.com/onsi/ginkgo/reporters",
"Comment": "v1.2.0-90-gbb93381", "Comment": "v1.2.0-95-g67b9df7",
"Rev": "bb93381d543b0e5725244abe752214a110791d01" "Rev": "67b9df7f55fe1165fd9ad49aca7754cce01a42b8"
}, },
{ {
"ImportPath": "github.com/onsi/ginkgo/reporters/stenographer", "ImportPath": "github.com/onsi/ginkgo/reporters/stenographer",
"Comment": "v1.2.0-90-gbb93381", "Comment": "v1.2.0-95-g67b9df7",
"Rev": "bb93381d543b0e5725244abe752214a110791d01" "Rev": "67b9df7f55fe1165fd9ad49aca7754cce01a42b8"
}, },
{ {
"ImportPath": "github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable", "ImportPath": "github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable",
"Comment": "v1.2.0-90-gbb93381", "Comment": "v1.2.0-95-g67b9df7",
"Rev": "bb93381d543b0e5725244abe752214a110791d01" "Rev": "67b9df7f55fe1165fd9ad49aca7754cce01a42b8"
}, },
{ {
"ImportPath": "github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty", "ImportPath": "github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty",
"Comment": "v1.2.0-90-gbb93381", "Comment": "v1.2.0-95-g67b9df7",
"Rev": "bb93381d543b0e5725244abe752214a110791d01" "Rev": "67b9df7f55fe1165fd9ad49aca7754cce01a42b8"
}, },
{ {
"ImportPath": "github.com/onsi/ginkgo/types", "ImportPath": "github.com/onsi/ginkgo/types",
"Comment": "v1.2.0-90-gbb93381", "Comment": "v1.2.0-95-g67b9df7",
"Rev": "bb93381d543b0e5725244abe752214a110791d01" "Rev": "67b9df7f55fe1165fd9ad49aca7754cce01a42b8"
}, },
{ {
"ImportPath": "github.com/onsi/gomega", "ImportPath": "github.com/onsi/gomega",

28
Godeps/LICENSES generated
View File

@ -62894,6 +62894,34 @@ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
================================================================================ ================================================================================
================================================================================
= vendor/github.com/onsi/ginkgo/internal/spec_iterator licensed under: =
Copyright (c) 2013-2014 Onsi Fakhouri
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
= vendor/github.com/onsi/ginkgo/LICENSE 570603114d52313cb86c0206401c9af7 -
================================================================================
================================================================================ ================================================================================
= vendor/github.com/onsi/ginkgo/internal/suite licensed under: = = vendor/github.com/onsi/ginkgo/internal/suite licensed under: =

17
vendor/BUILD vendored
View File

@ -5412,6 +5412,7 @@ go_library(
tags = ["automanaged"], tags = ["automanaged"],
deps = [ deps = [
"//vendor:github.com/onsi/ginkgo/config", "//vendor:github.com/onsi/ginkgo/config",
"//vendor:github.com/onsi/ginkgo/internal/spec_iterator",
"//vendor:github.com/onsi/ginkgo/reporters", "//vendor:github.com/onsi/ginkgo/reporters",
"//vendor:github.com/onsi/ginkgo/reporters/stenographer", "//vendor:github.com/onsi/ginkgo/reporters/stenographer",
"//vendor:github.com/onsi/ginkgo/types", "//vendor:github.com/onsi/ginkgo/types",
@ -5421,7 +5422,6 @@ go_library(
go_library( go_library(
name = "github.com/onsi/ginkgo/internal/spec", name = "github.com/onsi/ginkgo/internal/spec",
srcs = [ srcs = [
"github.com/onsi/ginkgo/internal/spec/index_computer.go",
"github.com/onsi/ginkgo/internal/spec/spec.go", "github.com/onsi/ginkgo/internal/spec/spec.go",
"github.com/onsi/ginkgo/internal/spec/specs.go", "github.com/onsi/ginkgo/internal/spec/specs.go",
], ],
@ -5444,6 +5444,7 @@ go_library(
"//vendor:github.com/onsi/ginkgo/config", "//vendor:github.com/onsi/ginkgo/config",
"//vendor:github.com/onsi/ginkgo/internal/leafnodes", "//vendor:github.com/onsi/ginkgo/internal/leafnodes",
"//vendor:github.com/onsi/ginkgo/internal/spec", "//vendor:github.com/onsi/ginkgo/internal/spec",
"//vendor:github.com/onsi/ginkgo/internal/spec_iterator",
"//vendor:github.com/onsi/ginkgo/internal/writer", "//vendor:github.com/onsi/ginkgo/internal/writer",
"//vendor:github.com/onsi/ginkgo/reporters", "//vendor:github.com/onsi/ginkgo/reporters",
"//vendor:github.com/onsi/ginkgo/types", "//vendor:github.com/onsi/ginkgo/types",
@ -5460,6 +5461,7 @@ go_library(
"//vendor:github.com/onsi/ginkgo/internal/failer", "//vendor:github.com/onsi/ginkgo/internal/failer",
"//vendor:github.com/onsi/ginkgo/internal/leafnodes", "//vendor:github.com/onsi/ginkgo/internal/leafnodes",
"//vendor:github.com/onsi/ginkgo/internal/spec", "//vendor:github.com/onsi/ginkgo/internal/spec",
"//vendor:github.com/onsi/ginkgo/internal/spec_iterator",
"//vendor:github.com/onsi/ginkgo/internal/specrunner", "//vendor:github.com/onsi/ginkgo/internal/specrunner",
"//vendor:github.com/onsi/ginkgo/internal/writer", "//vendor:github.com/onsi/ginkgo/internal/writer",
"//vendor:github.com/onsi/ginkgo/reporters", "//vendor:github.com/onsi/ginkgo/reporters",
@ -16600,3 +16602,16 @@ go_test(
"//vendor:k8s.io/client-go/kubernetes/fake", "//vendor:k8s.io/client-go/kubernetes/fake",
], ],
) )
go_library(
name = "github.com/onsi/ginkgo/internal/spec_iterator",
srcs = [
"github.com/onsi/ginkgo/internal/spec_iterator/index_computer.go",
"github.com/onsi/ginkgo/internal/spec_iterator/parallel_spec_iterator.go",
"github.com/onsi/ginkgo/internal/spec_iterator/serial_spec_iterator.go",
"github.com/onsi/ginkgo/internal/spec_iterator/sharded_parallel_spec_iterator.go",
"github.com/onsi/ginkgo/internal/spec_iterator/spec_iterator.go",
],
tags = ["automanaged"],
deps = ["//vendor:github.com/onsi/ginkgo/internal/spec"],
)

View File

@ -1,6 +1,6 @@
![Ginkgo: A Golang BDD Testing Framework](http://onsi.github.io/ginkgo/images/ginkgo.png) ![Ginkgo: A Golang BDD Testing Framework](http://onsi.github.io/ginkgo/images/ginkgo.png)
[![Build Status](https://travis-ci.org/onsi/ginkgo.png)](https://travis-ci.org/onsi/ginkgo) [![Build Status](https://travis-ci.org/onsi/ginkgo.svg)](https://travis-ci.org/onsi/ginkgo)
Jump to the [docs](http://onsi.github.io/ginkgo/) to learn more. To start rolling your Ginkgo tests *now* [keep reading](#set-me-up)! Jump to the [docs](http://onsi.github.io/ginkgo/) to learn more. To start rolling your Ginkgo tests *now* [keep reading](#set-me-up)!

View File

@ -125,14 +125,12 @@ func (aggregator *Aggregator) registerSuiteBeginning(configAndSuite configAndSui
aggregator.stenographer.AnnounceSuite(configAndSuite.summary.SuiteDescription, configAndSuite.config.RandomSeed, configAndSuite.config.RandomizeAllSpecs, aggregator.config.Succinct) aggregator.stenographer.AnnounceSuite(configAndSuite.summary.SuiteDescription, configAndSuite.config.RandomSeed, configAndSuite.config.RandomizeAllSpecs, aggregator.config.Succinct)
numberOfSpecsToRun := 0
totalNumberOfSpecs := 0 totalNumberOfSpecs := 0
for _, configAndSuite := range aggregator.aggregatedSuiteBeginnings { if len(aggregator.aggregatedSuiteBeginnings) > 0 {
numberOfSpecsToRun += configAndSuite.summary.NumberOfSpecsThatWillBeRun totalNumberOfSpecs = configAndSuite.summary.NumberOfSpecsBeforeParallelization
totalNumberOfSpecs += configAndSuite.summary.NumberOfTotalSpecs
} }
aggregator.stenographer.AnnounceNumberOfSpecs(numberOfSpecsToRun, totalNumberOfSpecs, aggregator.config.Succinct) aggregator.stenographer.AnnounceTotalNumberOfSpecs(totalNumberOfSpecs, aggregator.config.Succinct)
aggregator.stenographer.AnnounceAggregatedParallelRun(aggregator.nodeCount, aggregator.config.Succinct) aggregator.stenographer.AnnounceAggregatedParallelRun(aggregator.nodeCount, aggregator.config.Succinct)
aggregator.flushCompletedSpecs() aggregator.flushCompletedSpecs()
} }

View File

@ -9,13 +9,16 @@ package remote
import ( import (
"encoding/json" "encoding/json"
"github.com/onsi/ginkgo/config"
"github.com/onsi/ginkgo/reporters"
"github.com/onsi/ginkgo/types"
"io/ioutil" "io/ioutil"
"net" "net"
"net/http" "net/http"
"sync" "sync"
"github.com/onsi/ginkgo/internal/spec_iterator"
"github.com/onsi/ginkgo/config"
"github.com/onsi/ginkgo/reporters"
"github.com/onsi/ginkgo/types"
) )
/* /*
@ -29,6 +32,7 @@ type Server struct {
lock *sync.Mutex lock *sync.Mutex
beforeSuiteData types.RemoteBeforeSuiteData beforeSuiteData types.RemoteBeforeSuiteData
parallelTotal int parallelTotal int
counter int
} }
//Create a new server, automatically selecting a port //Create a new server, automatically selecting a port
@ -63,6 +67,8 @@ func (server *Server) Start() {
//synchronization endpoints //synchronization endpoints
mux.HandleFunc("/BeforeSuiteState", server.handleBeforeSuiteState) mux.HandleFunc("/BeforeSuiteState", server.handleBeforeSuiteState)
mux.HandleFunc("/RemoteAfterSuiteData", server.handleRemoteAfterSuiteData) mux.HandleFunc("/RemoteAfterSuiteData", server.handleRemoteAfterSuiteData)
mux.HandleFunc("/counter", server.handleCounter)
mux.HandleFunc("/has-counter", server.handleHasCounter) //for backward compatibility
go httpServer.Serve(server.listener) go httpServer.Serve(server.listener)
} }
@ -202,3 +208,17 @@ func (server *Server) handleRemoteAfterSuiteData(writer http.ResponseWriter, req
enc := json.NewEncoder(writer) enc := json.NewEncoder(writer)
enc.Encode(afterSuiteData) enc.Encode(afterSuiteData)
} }
func (server *Server) handleCounter(writer http.ResponseWriter, request *http.Request) {
c := spec_iterator.Counter{}
server.lock.Lock()
c.Index = server.counter
server.counter = server.counter + 1
server.lock.Unlock()
json.NewEncoder(writer).Encode(c)
}
func (server *Server) handleHasCounter(writer http.ResponseWriter, request *http.Request) {
writer.Write([]byte(""))
}

View File

@ -8,7 +8,6 @@ import (
type Specs struct { type Specs struct {
specs []*Spec specs []*Spec
numberOfOriginalSpecs int
hasProgrammaticFocus bool hasProgrammaticFocus bool
RegexScansFilePath bool RegexScansFilePath bool
} }
@ -16,7 +15,6 @@ type Specs struct {
func NewSpecs(specs []*Spec) *Specs { func NewSpecs(specs []*Spec) *Specs {
return &Specs{ return &Specs{
specs: specs, specs: specs,
numberOfOriginalSpecs: len(specs),
} }
} }
@ -24,10 +22,6 @@ func (e *Specs) Specs() []*Spec {
return e.specs return e.specs
} }
func (e *Specs) NumberOfOriginalSpecs() int {
return e.numberOfOriginalSpecs
}
func (e *Specs) HasProgrammaticFocus() bool { func (e *Specs) HasProgrammaticFocus() bool {
return e.hasProgrammaticFocus return e.hasProgrammaticFocus
} }
@ -114,15 +108,6 @@ func (e *Specs) SkipMeasurements() {
} }
} }
func (e *Specs) TrimForParallelization(total int, node int) {
startIndex, count := ParallelizedIndexRange(len(e.specs), total, node)
if count == 0 {
e.specs = make([]*Spec, 0)
} else {
e.specs = e.specs[startIndex : startIndex+count]
}
}
//sort.Interface //sort.Interface
func (e *Specs) Len() int { func (e *Specs) Len() int {

View File

@ -1,4 +1,4 @@
package spec package spec_iterator
func ParallelizedIndexRange(length int, parallelTotal int, parallelNode int) (startIndex int, count int) { func ParallelizedIndexRange(length int, parallelTotal int, parallelNode int) (startIndex int, count int) {
if length == 0 { if length == 0 {

View File

@ -0,0 +1,60 @@
package spec_iterator
import (
"encoding/json"
"errors"
"fmt"
"net/http"
"github.com/onsi/ginkgo/internal/spec"
)
type ParallelIterator struct {
specs []*spec.Spec
host string
client *http.Client
}
func NewParallelIterator(specs []*spec.Spec, host string) *ParallelIterator {
return &ParallelIterator{
specs: specs,
host: host,
client: &http.Client{},
}
}
func (s *ParallelIterator) Next() (*spec.Spec, error) {
resp, err := s.client.Get(s.host + "/counter")
if err != nil {
return nil, err
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
return nil, errors.New(fmt.Sprintf("unexpected status code %d", resp.StatusCode))
}
var counter Counter
err = json.NewDecoder(resp.Body).Decode(&counter)
if err != nil {
return nil, err
}
if counter.Index >= len(s.specs) {
return nil, ErrClosed
}
return s.specs[counter.Index], nil
}
func (s *ParallelIterator) NumberOfSpecsPriorToIteration() int {
return len(s.specs)
}
func (s *ParallelIterator) NumberOfSpecsToProcessIfKnown() (int, bool) {
return -1, false
}
func (s *ParallelIterator) NumberOfSpecsThatWillBeRunIfKnown() (int, bool) {
return -1, false
}

View File

@ -0,0 +1,45 @@
package spec_iterator
import (
"github.com/onsi/ginkgo/internal/spec"
)
type SerialIterator struct {
specs []*spec.Spec
index int
}
func NewSerialIterator(specs []*spec.Spec) *SerialIterator {
return &SerialIterator{
specs: specs,
index: 0,
}
}
func (s *SerialIterator) Next() (*spec.Spec, error) {
if s.index >= len(s.specs) {
return nil, ErrClosed
}
spec := s.specs[s.index]
s.index += 1
return spec, nil
}
func (s *SerialIterator) NumberOfSpecsPriorToIteration() int {
return len(s.specs)
}
func (s *SerialIterator) NumberOfSpecsToProcessIfKnown() (int, bool) {
return len(s.specs), true
}
func (s *SerialIterator) NumberOfSpecsThatWillBeRunIfKnown() (int, bool) {
count := 0
for _, s := range s.specs {
if !s.Skipped() && !s.Pending() {
count += 1
}
}
return count, true
}

View File

@ -0,0 +1,47 @@
package spec_iterator
import "github.com/onsi/ginkgo/internal/spec"
type ShardedParallelIterator struct {
specs []*spec.Spec
index int
maxIndex int
}
func NewShardedParallelIterator(specs []*spec.Spec, total int, node int) *ShardedParallelIterator {
startIndex, count := ParallelizedIndexRange(len(specs), total, node)
return &ShardedParallelIterator{
specs: specs,
index: startIndex,
maxIndex: startIndex + count,
}
}
func (s *ShardedParallelIterator) Next() (*spec.Spec, error) {
if s.index >= s.maxIndex {
return nil, ErrClosed
}
spec := s.specs[s.index]
s.index += 1
return spec, nil
}
func (s *ShardedParallelIterator) NumberOfSpecsPriorToIteration() int {
return len(s.specs)
}
func (s *ShardedParallelIterator) NumberOfSpecsToProcessIfKnown() (int, bool) {
return s.maxIndex - s.index, true
}
func (s *ShardedParallelIterator) NumberOfSpecsThatWillBeRunIfKnown() (int, bool) {
count := 0
for i := s.index; i < s.maxIndex; i += 1 {
if !s.specs[i].Skipped() && !s.specs[i].Pending() {
count += 1
}
}
return count, true
}

View File

@ -0,0 +1,20 @@
package spec_iterator
import (
"errors"
"github.com/onsi/ginkgo/internal/spec"
)
var ErrClosed = errors.New("no more specs to run")
type SpecIterator interface {
Next() (*spec.Spec, error)
NumberOfSpecsPriorToIteration() int
NumberOfSpecsToProcessIfKnown() (int, bool)
NumberOfSpecsThatWillBeRunIfKnown() (int, bool)
}
type Counter struct {
Index int `json:"index"`
}

View File

@ -7,6 +7,8 @@ import (
"sync" "sync"
"syscall" "syscall"
"github.com/onsi/ginkgo/internal/spec_iterator"
"github.com/onsi/ginkgo/config" "github.com/onsi/ginkgo/config"
"github.com/onsi/ginkgo/internal/leafnodes" "github.com/onsi/ginkgo/internal/leafnodes"
"github.com/onsi/ginkgo/internal/spec" "github.com/onsi/ginkgo/internal/spec"
@ -20,7 +22,7 @@ import (
type SpecRunner struct { type SpecRunner struct {
description string description string
beforeSuiteNode leafnodes.SuiteNode beforeSuiteNode leafnodes.SuiteNode
specs *spec.Specs iterator spec_iterator.SpecIterator
afterSuiteNode leafnodes.SuiteNode afterSuiteNode leafnodes.SuiteNode
reporters []reporters.Reporter reporters []reporters.Reporter
startTime time.Time startTime time.Time
@ -29,14 +31,15 @@ type SpecRunner struct {
writer Writer.WriterInterface writer Writer.WriterInterface
config config.GinkgoConfigType config config.GinkgoConfigType
interrupted bool interrupted bool
processedSpecs []*spec.Spec
lock *sync.Mutex lock *sync.Mutex
} }
func New(description string, beforeSuiteNode leafnodes.SuiteNode, specs *spec.Specs, afterSuiteNode leafnodes.SuiteNode, reporters []reporters.Reporter, writer Writer.WriterInterface, config config.GinkgoConfigType) *SpecRunner { func New(description string, beforeSuiteNode leafnodes.SuiteNode, iterator spec_iterator.SpecIterator, afterSuiteNode leafnodes.SuiteNode, reporters []reporters.Reporter, writer Writer.WriterInterface, config config.GinkgoConfigType) *SpecRunner {
return &SpecRunner{ return &SpecRunner{
description: description, description: description,
beforeSuiteNode: beforeSuiteNode, beforeSuiteNode: beforeSuiteNode,
specs: specs, iterator: iterator,
afterSuiteNode: afterSuiteNode, afterSuiteNode: afterSuiteNode,
reporters: reporters, reporters: reporters,
writer: writer, writer: writer,
@ -79,7 +82,18 @@ func (runner *SpecRunner) performDryRun() {
runner.reportBeforeSuite(summary) runner.reportBeforeSuite(summary)
} }
for _, spec := range runner.specs.Specs() { for {
spec, err := runner.iterator.Next()
if err == spec_iterator.ErrClosed {
break
}
if err != nil {
fmt.Println("failed to iterate over tests:\n" + err.Error())
break
}
runner.processedSpecs = append(runner.processedSpecs, spec)
summary := spec.Summary(runner.suiteID) summary := spec.Summary(runner.suiteID)
runner.reportSpecWillRun(summary) runner.reportSpecWillRun(summary)
if summary.State == types.SpecStateInvalid { if summary.State == types.SpecStateInvalid {
@ -130,9 +144,21 @@ func (runner *SpecRunner) runAfterSuite() bool {
func (runner *SpecRunner) runSpecs() bool { func (runner *SpecRunner) runSpecs() bool {
suiteFailed := false suiteFailed := false
skipRemainingSpecs := false skipRemainingSpecs := false
for _, spec := range runner.specs.Specs() { for {
spec, err := runner.iterator.Next()
if err == spec_iterator.ErrClosed {
break
}
if err != nil {
fmt.Println("failed to iterate over tests:\n" + err.Error())
suiteFailed = true
break
}
runner.processedSpecs = append(runner.processedSpecs, spec)
if runner.wasInterrupted() { if runner.wasInterrupted() {
return suiteFailed break
} }
if skipRemainingSpecs { if skipRemainingSpecs {
spec.Skip() spec.Skip()
@ -244,7 +270,7 @@ func (runner *SpecRunner) wasInterrupted() bool {
func (runner *SpecRunner) reportSuiteWillBegin() { func (runner *SpecRunner) reportSuiteWillBegin() {
runner.startTime = time.Now() runner.startTime = time.Now()
summary := runner.summary(true) summary := runner.suiteWillBeginSummary()
for _, reporter := range runner.reporters { for _, reporter := range runner.reporters {
reporter.SpecSuiteWillBegin(runner.config, summary) reporter.SpecSuiteWillBegin(runner.config, summary)
} }
@ -286,17 +312,17 @@ func (runner *SpecRunner) reportSpecDidComplete(summary *types.SpecSummary, fail
} }
func (runner *SpecRunner) reportSuiteDidEnd(success bool) { func (runner *SpecRunner) reportSuiteDidEnd(success bool) {
summary := runner.summary(success) summary := runner.suiteDidEndSummary(success)
summary.RunTime = time.Since(runner.startTime) summary.RunTime = time.Since(runner.startTime)
for _, reporter := range runner.reporters { for _, reporter := range runner.reporters {
reporter.SpecSuiteDidEnd(summary) reporter.SpecSuiteDidEnd(summary)
} }
} }
func (runner *SpecRunner) countSpecsSatisfying(filter func(ex *spec.Spec) bool) (count int) { func (runner *SpecRunner) countSpecsThatRanSatisfying(filter func(ex *spec.Spec) bool) (count int) {
count = 0 count = 0
for _, spec := range runner.specs.Specs() { for _, spec := range runner.processedSpecs {
if filter(spec) { if filter(spec) {
count++ count++
} }
@ -305,32 +331,37 @@ func (runner *SpecRunner) countSpecsSatisfying(filter func(ex *spec.Spec) bool)
return count return count
} }
func (runner *SpecRunner) summary(success bool) *types.SuiteSummary { func (runner *SpecRunner) suiteDidEndSummary(success bool) *types.SuiteSummary {
numberOfSpecsThatWillBeRun := runner.countSpecsSatisfying(func(ex *spec.Spec) bool { numberOfSpecsThatWillBeRun := runner.countSpecsThatRanSatisfying(func(ex *spec.Spec) bool {
return !ex.Skipped() && !ex.Pending() return !ex.Skipped() && !ex.Pending()
}) })
numberOfPendingSpecs := runner.countSpecsSatisfying(func(ex *spec.Spec) bool { numberOfPendingSpecs := runner.countSpecsThatRanSatisfying(func(ex *spec.Spec) bool {
return ex.Pending() return ex.Pending()
}) })
numberOfSkippedSpecs := runner.countSpecsSatisfying(func(ex *spec.Spec) bool { numberOfSkippedSpecs := runner.countSpecsThatRanSatisfying(func(ex *spec.Spec) bool {
return ex.Skipped() return ex.Skipped()
}) })
numberOfPassedSpecs := runner.countSpecsSatisfying(func(ex *spec.Spec) bool { numberOfPassedSpecs := runner.countSpecsThatRanSatisfying(func(ex *spec.Spec) bool {
return ex.Passed() return ex.Passed()
}) })
numberOfFlakedSpecs := runner.countSpecsSatisfying(func(ex *spec.Spec) bool { numberOfFlakedSpecs := runner.countSpecsThatRanSatisfying(func(ex *spec.Spec) bool {
return ex.Flaked() return ex.Flaked()
}) })
numberOfFailedSpecs := runner.countSpecsSatisfying(func(ex *spec.Spec) bool { numberOfFailedSpecs := runner.countSpecsThatRanSatisfying(func(ex *spec.Spec) bool {
return ex.Failed() return ex.Failed()
}) })
if runner.beforeSuiteNode != nil && !runner.beforeSuiteNode.Passed() && !runner.config.DryRun { if runner.beforeSuiteNode != nil && !runner.beforeSuiteNode.Passed() && !runner.config.DryRun {
var known bool
numberOfSpecsThatWillBeRun, known = runner.iterator.NumberOfSpecsThatWillBeRunIfKnown()
if !known {
numberOfSpecsThatWillBeRun = runner.iterator.NumberOfSpecsPriorToIteration()
}
numberOfFailedSpecs = numberOfSpecsThatWillBeRun numberOfFailedSpecs = numberOfSpecsThatWillBeRun
} }
@ -339,8 +370,8 @@ func (runner *SpecRunner) summary(success bool) *types.SuiteSummary {
SuiteSucceeded: success, SuiteSucceeded: success,
SuiteID: runner.suiteID, SuiteID: runner.suiteID,
NumberOfSpecsBeforeParallelization: runner.specs.NumberOfOriginalSpecs(), NumberOfSpecsBeforeParallelization: runner.iterator.NumberOfSpecsPriorToIteration(),
NumberOfTotalSpecs: len(runner.specs.Specs()), NumberOfTotalSpecs: len(runner.processedSpecs),
NumberOfSpecsThatWillBeRun: numberOfSpecsThatWillBeRun, NumberOfSpecsThatWillBeRun: numberOfSpecsThatWillBeRun,
NumberOfPendingSpecs: numberOfPendingSpecs, NumberOfPendingSpecs: numberOfPendingSpecs,
NumberOfSkippedSpecs: numberOfSkippedSpecs, NumberOfSkippedSpecs: numberOfSkippedSpecs,
@ -349,3 +380,29 @@ func (runner *SpecRunner) summary(success bool) *types.SuiteSummary {
NumberOfFlakedSpecs: numberOfFlakedSpecs, NumberOfFlakedSpecs: numberOfFlakedSpecs,
} }
} }
func (runner *SpecRunner) suiteWillBeginSummary() *types.SuiteSummary {
numTotal, known := runner.iterator.NumberOfSpecsToProcessIfKnown()
if !known {
numTotal = -1
}
numToRun, known := runner.iterator.NumberOfSpecsThatWillBeRunIfKnown()
if !known {
numToRun = -1
}
return &types.SuiteSummary{
SuiteDescription: runner.description,
SuiteID: runner.suiteID,
NumberOfSpecsBeforeParallelization: runner.iterator.NumberOfSpecsPriorToIteration(),
NumberOfTotalSpecs: numTotal,
NumberOfSpecsThatWillBeRun: numToRun,
NumberOfPendingSpecs: -1,
NumberOfSkippedSpecs: -1,
NumberOfPassedSpecs: -1,
NumberOfFailedSpecs: -1,
NumberOfFlakedSpecs: -1,
}
}

View File

@ -2,8 +2,11 @@ package suite
import ( import (
"math/rand" "math/rand"
"net/http"
"time" "time"
"github.com/onsi/ginkgo/internal/spec_iterator"
"github.com/onsi/ginkgo/config" "github.com/onsi/ginkgo/config"
"github.com/onsi/ginkgo/internal/containernode" "github.com/onsi/ginkgo/internal/containernode"
"github.com/onsi/ginkgo/internal/failer" "github.com/onsi/ginkgo/internal/failer"
@ -52,18 +55,18 @@ func (suite *Suite) Run(t ginkgoTestingT, description string, reporters []report
r := rand.New(rand.NewSource(config.RandomSeed)) r := rand.New(rand.NewSource(config.RandomSeed))
suite.topLevelContainer.Shuffle(r) suite.topLevelContainer.Shuffle(r)
specs := suite.generateSpecs(description, config) iterator, hasProgrammaticFocus := suite.generateSpecsIterator(description, config)
suite.runner = specrunner.New(description, suite.beforeSuiteNode, specs, suite.afterSuiteNode, reporters, writer, config) suite.runner = specrunner.New(description, suite.beforeSuiteNode, iterator, suite.afterSuiteNode, reporters, writer, config)
suite.running = true suite.running = true
success := suite.runner.Run() success := suite.runner.Run()
if !success { if !success {
t.Fail() t.Fail()
} }
return success, specs.HasProgrammaticFocus() return success, hasProgrammaticFocus
} }
func (suite *Suite) generateSpecs(description string, config config.GinkgoConfigType) *spec.Specs { func (suite *Suite) generateSpecsIterator(description string, config config.GinkgoConfigType) (spec_iterator.SpecIterator, bool) {
specsSlice := []*spec.Spec{} specsSlice := []*spec.Spec{}
suite.topLevelContainer.BackPropagateProgrammaticFocus() suite.topLevelContainer.BackPropagateProgrammaticFocus()
for _, collatedNodes := range suite.topLevelContainer.Collate() { for _, collatedNodes := range suite.topLevelContainer.Collate() {
@ -83,10 +86,19 @@ func (suite *Suite) generateSpecs(description string, config config.GinkgoConfig
specs.SkipMeasurements() specs.SkipMeasurements()
} }
var iterator spec_iterator.SpecIterator
if config.ParallelTotal > 1 { if config.ParallelTotal > 1 {
specs.TrimForParallelization(config.ParallelTotal, config.ParallelNode) iterator = spec_iterator.NewParallelIterator(specs.Specs(), config.SyncHost)
resp, err := http.Get(config.SyncHost + "/has-counter")
if err != nil || resp.StatusCode != http.StatusOK {
iterator = spec_iterator.NewShardedParallelIterator(specs.Specs(), config.ParallelTotal, config.ParallelNode)
} }
return specs } else {
iterator = spec_iterator.NewSerialIterator(specs.Specs())
}
return iterator, specs.HasProgrammaticFocus()
} }
func (suite *Suite) CurrentRunningSpecSummary() (*types.SpecSummary, bool) { func (suite *Suite) CurrentRunningSpecSummary() (*types.SpecSummary, bool) {

View File

@ -29,9 +29,10 @@ func NewDefaultReporter(config config.DefaultReporterConfigType, stenographer st
func (reporter *DefaultReporter) SpecSuiteWillBegin(config config.GinkgoConfigType, summary *types.SuiteSummary) { func (reporter *DefaultReporter) SpecSuiteWillBegin(config config.GinkgoConfigType, summary *types.SuiteSummary) {
reporter.stenographer.AnnounceSuite(summary.SuiteDescription, config.RandomSeed, config.RandomizeAllSpecs, reporter.config.Succinct) reporter.stenographer.AnnounceSuite(summary.SuiteDescription, config.RandomSeed, config.RandomizeAllSpecs, reporter.config.Succinct)
if config.ParallelTotal > 1 { if config.ParallelTotal > 1 {
reporter.stenographer.AnnounceParallelRun(config.ParallelNode, config.ParallelTotal, summary.NumberOfTotalSpecs, summary.NumberOfSpecsBeforeParallelization, reporter.config.Succinct) reporter.stenographer.AnnounceParallelRun(config.ParallelNode, config.ParallelTotal, reporter.config.Succinct)
} } else {
reporter.stenographer.AnnounceNumberOfSpecs(summary.NumberOfSpecsThatWillBeRun, summary.NumberOfTotalSpecs, reporter.config.Succinct) reporter.stenographer.AnnounceNumberOfSpecs(summary.NumberOfSpecsThatWillBeRun, summary.NumberOfTotalSpecs, reporter.config.Succinct)
}
} }
func (reporter *DefaultReporter) BeforeSuiteDidRun(setupSummary *types.SetupSummary) { func (reporter *DefaultReporter) BeforeSuiteDidRun(setupSummary *types.SetupSummary) {

View File

@ -11,10 +11,11 @@ package reporters
import ( import (
"encoding/xml" "encoding/xml"
"fmt" "fmt"
"github.com/onsi/ginkgo/config"
"github.com/onsi/ginkgo/types"
"os" "os"
"strings" "strings"
"github.com/onsi/ginkgo/config"
"github.com/onsi/ginkgo/types"
) )
type JUnitTestSuite struct { type JUnitTestSuite struct {
@ -58,7 +59,6 @@ func NewJUnitReporter(filename string) *JUnitReporter {
func (reporter *JUnitReporter) SpecSuiteWillBegin(config config.GinkgoConfigType, summary *types.SuiteSummary) { func (reporter *JUnitReporter) SpecSuiteWillBegin(config config.GinkgoConfigType, summary *types.SuiteSummary) {
reporter.suite = JUnitTestSuite{ reporter.suite = JUnitTestSuite{
Tests: summary.NumberOfSpecsThatWillBeRun,
TestCases: []JUnitTestCase{}, TestCases: []JUnitTestCase{},
} }
reporter.testSuiteName = summary.SuiteDescription reporter.testSuiteName = summary.SuiteDescription
@ -116,6 +116,7 @@ func (reporter *JUnitReporter) SpecDidComplete(specSummary *types.SpecSummary) {
} }
func (reporter *JUnitReporter) SpecSuiteDidEnd(summary *types.SuiteSummary) { func (reporter *JUnitReporter) SpecSuiteDidEnd(summary *types.SuiteSummary) {
reporter.suite.Tests = summary.NumberOfSpecsThatWillBeRun
reporter.suite.Time = summary.RunTime.Seconds() reporter.suite.Time = summary.RunTime.Seconds()
reporter.suite.Failures = summary.NumberOfFailedSpecs reporter.suite.Failures = summary.NumberOfFailedSpecs
file, err := os.Create(reporter.filename) file, err := os.Create(reporter.filename)

View File

@ -74,14 +74,18 @@ func (stenographer *FakeStenographer) AnnounceAggregatedParallelRun(nodes int, s
stenographer.registerCall("AnnounceAggregatedParallelRun", nodes, succinct) stenographer.registerCall("AnnounceAggregatedParallelRun", nodes, succinct)
} }
func (stenographer *FakeStenographer) AnnounceParallelRun(node int, nodes int, specsToRun int, totalSpecs int, succinct bool) { func (stenographer *FakeStenographer) AnnounceParallelRun(node int, nodes int, succinct bool) {
stenographer.registerCall("AnnounceParallelRun", node, nodes, specsToRun, totalSpecs, succinct) stenographer.registerCall("AnnounceParallelRun", node, nodes, succinct)
} }
func (stenographer *FakeStenographer) AnnounceNumberOfSpecs(specsToRun int, total int, succinct bool) { func (stenographer *FakeStenographer) AnnounceNumberOfSpecs(specsToRun int, total int, succinct bool) {
stenographer.registerCall("AnnounceNumberOfSpecs", specsToRun, total, succinct) stenographer.registerCall("AnnounceNumberOfSpecs", specsToRun, total, succinct)
} }
func (stenographer *FakeStenographer) AnnounceTotalNumberOfSpecs(total int, succinct bool) {
stenographer.registerCall("AnnounceTotalNumberOfSpecs", total, succinct)
}
func (stenographer *FakeStenographer) AnnounceSpecRunCompletion(summary *types.SuiteSummary, succinct bool) { func (stenographer *FakeStenographer) AnnounceSpecRunCompletion(summary *types.SuiteSummary, succinct bool) {
stenographer.registerCall("AnnounceSpecRunCompletion", summary, succinct) stenographer.registerCall("AnnounceSpecRunCompletion", summary, succinct)
} }

View File

@ -37,7 +37,8 @@ const (
type Stenographer interface { type Stenographer interface {
AnnounceSuite(description string, randomSeed int64, randomizingAll bool, succinct bool) AnnounceSuite(description string, randomSeed int64, randomizingAll bool, succinct bool)
AnnounceAggregatedParallelRun(nodes int, succinct bool) AnnounceAggregatedParallelRun(nodes int, succinct bool)
AnnounceParallelRun(node int, nodes int, specsToRun int, totalSpecs int, succinct bool) AnnounceParallelRun(node int, nodes int, succinct bool)
AnnounceTotalNumberOfSpecs(total int, succinct bool)
AnnounceNumberOfSpecs(specsToRun int, total int, succinct bool) AnnounceNumberOfSpecs(specsToRun int, total int, succinct bool)
AnnounceSpecRunCompletion(summary *types.SuiteSummary, succinct bool) AnnounceSpecRunCompletion(summary *types.SuiteSummary, succinct bool)
@ -98,17 +99,15 @@ func (s *consoleStenographer) AnnounceSuite(description string, randomSeed int64
s.printNewLine() s.printNewLine()
} }
func (s *consoleStenographer) AnnounceParallelRun(node int, nodes int, specsToRun int, totalSpecs int, succinct bool) { func (s *consoleStenographer) AnnounceParallelRun(node int, nodes int, succinct bool) {
if succinct { if succinct {
s.print(0, "- node #%d ", node) s.print(0, "- node #%d ", node)
return return
} }
s.println(0, s.println(0,
"Parallel test node %s/%s. Assigned %s of %s specs.", "Parallel test node %s/%s.",
s.colorize(boldStyle, "%d", node), s.colorize(boldStyle, "%d", node),
s.colorize(boldStyle, "%d", nodes), s.colorize(boldStyle, "%d", nodes),
s.colorize(boldStyle, "%d", specsToRun),
s.colorize(boldStyle, "%d", totalSpecs),
) )
s.printNewLine() s.printNewLine()
} }
@ -140,6 +139,20 @@ func (s *consoleStenographer) AnnounceNumberOfSpecs(specsToRun int, total int, s
s.printNewLine() s.printNewLine()
} }
func (s *consoleStenographer) AnnounceTotalNumberOfSpecs(total int, succinct bool) {
if succinct {
s.print(0, "- %d specs ", total)
s.stream()
return
}
s.println(0,
"Will run %s specs",
s.colorize(boldStyle, "%d", total),
)
s.printNewLine()
}
func (s *consoleStenographer) AnnounceSpecRunCompletion(summary *types.SuiteSummary, succinct bool) { func (s *consoleStenographer) AnnounceSpecRunCompletion(summary *types.SuiteSummary, succinct bool) {
if succinct && summary.SuiteSucceeded { if succinct && summary.SuiteSucceeded {
s.print(0, " %s %s ", s.colorize(greenColor, "SUCCESS!"), summary.RunTime) s.print(0, " %s %s ", s.colorize(greenColor, "SUCCESS!"), summary.RunTime)

View File

@ -7,6 +7,19 @@ import (
const GINKGO_FOCUS_EXIT_CODE = 197 const GINKGO_FOCUS_EXIT_CODE = 197
/*
SuiteSummary represents the a summary of the test suite and is passed to both
Reporter.SpecSuiteWillBegin
Reporter.SpecSuiteDidEnd
this is unfortunate as these two methods should receive different objects. When running in parallel
each node does not deterministically know how many specs it will end up running.
Unfortunately making such a change would break backward compatibility.
Until Ginkgo 2.0 comes out we will continue to reuse this struct but populate unkown fields
with -1.
*/
type SuiteSummary struct { type SuiteSummary struct {
SuiteDescription string SuiteDescription string
SuiteSucceeded bool SuiteSucceeded bool