mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-22 11:21:47 +00:00
bump(github.com/onsi/ginkgo):v1.2.0-94-g5ca1211
Picks up parallel execution improvements in Ginkgo that distribute jobs more evenly with parallel
This commit is contained in:
parent
eed7d11255
commit
f654505f24
101
Godeps/Godeps.json
generated
101
Godeps/Godeps.json
generated
@ -1800,123 +1800,128 @@
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/onsi/ginkgo",
|
||||
"Comment": "v1.2.0-90-gbb93381",
|
||||
"Rev": "bb93381d543b0e5725244abe752214a110791d01"
|
||||
"Comment": "v1.2.0-95-g67b9df7",
|
||||
"Rev": "67b9df7f55fe1165fd9ad49aca7754cce01a42b8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/onsi/ginkgo/config",
|
||||
"Comment": "v1.2.0-90-gbb93381",
|
||||
"Rev": "bb93381d543b0e5725244abe752214a110791d01"
|
||||
"Comment": "v1.2.0-95-g67b9df7",
|
||||
"Rev": "67b9df7f55fe1165fd9ad49aca7754cce01a42b8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/onsi/ginkgo/ginkgo",
|
||||
"Comment": "v1.2.0-90-gbb93381",
|
||||
"Rev": "bb93381d543b0e5725244abe752214a110791d01"
|
||||
"Comment": "v1.2.0-95-g67b9df7",
|
||||
"Rev": "67b9df7f55fe1165fd9ad49aca7754cce01a42b8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/onsi/ginkgo/ginkgo/convert",
|
||||
"Comment": "v1.2.0-90-gbb93381",
|
||||
"Rev": "bb93381d543b0e5725244abe752214a110791d01"
|
||||
"Comment": "v1.2.0-95-g67b9df7",
|
||||
"Rev": "67b9df7f55fe1165fd9ad49aca7754cce01a42b8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/onsi/ginkgo/ginkgo/interrupthandler",
|
||||
"Comment": "v1.2.0-90-gbb93381",
|
||||
"Rev": "bb93381d543b0e5725244abe752214a110791d01"
|
||||
"Comment": "v1.2.0-95-g67b9df7",
|
||||
"Rev": "67b9df7f55fe1165fd9ad49aca7754cce01a42b8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/onsi/ginkgo/ginkgo/nodot",
|
||||
"Comment": "v1.2.0-90-gbb93381",
|
||||
"Rev": "bb93381d543b0e5725244abe752214a110791d01"
|
||||
"Comment": "v1.2.0-95-g67b9df7",
|
||||
"Rev": "67b9df7f55fe1165fd9ad49aca7754cce01a42b8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/onsi/ginkgo/ginkgo/testrunner",
|
||||
"Comment": "v1.2.0-90-gbb93381",
|
||||
"Rev": "bb93381d543b0e5725244abe752214a110791d01"
|
||||
"Comment": "v1.2.0-95-g67b9df7",
|
||||
"Rev": "67b9df7f55fe1165fd9ad49aca7754cce01a42b8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/onsi/ginkgo/ginkgo/testsuite",
|
||||
"Comment": "v1.2.0-90-gbb93381",
|
||||
"Rev": "bb93381d543b0e5725244abe752214a110791d01"
|
||||
"Comment": "v1.2.0-95-g67b9df7",
|
||||
"Rev": "67b9df7f55fe1165fd9ad49aca7754cce01a42b8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/onsi/ginkgo/ginkgo/watch",
|
||||
"Comment": "v1.2.0-90-gbb93381",
|
||||
"Rev": "bb93381d543b0e5725244abe752214a110791d01"
|
||||
"Comment": "v1.2.0-95-g67b9df7",
|
||||
"Rev": "67b9df7f55fe1165fd9ad49aca7754cce01a42b8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/onsi/ginkgo/internal/codelocation",
|
||||
"Comment": "v1.2.0-90-gbb93381",
|
||||
"Rev": "bb93381d543b0e5725244abe752214a110791d01"
|
||||
"Comment": "v1.2.0-95-g67b9df7",
|
||||
"Rev": "67b9df7f55fe1165fd9ad49aca7754cce01a42b8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/onsi/ginkgo/internal/containernode",
|
||||
"Comment": "v1.2.0-90-gbb93381",
|
||||
"Rev": "bb93381d543b0e5725244abe752214a110791d01"
|
||||
"Comment": "v1.2.0-95-g67b9df7",
|
||||
"Rev": "67b9df7f55fe1165fd9ad49aca7754cce01a42b8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/onsi/ginkgo/internal/failer",
|
||||
"Comment": "v1.2.0-90-gbb93381",
|
||||
"Rev": "bb93381d543b0e5725244abe752214a110791d01"
|
||||
"Comment": "v1.2.0-95-g67b9df7",
|
||||
"Rev": "67b9df7f55fe1165fd9ad49aca7754cce01a42b8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/onsi/ginkgo/internal/leafnodes",
|
||||
"Comment": "v1.2.0-90-gbb93381",
|
||||
"Rev": "bb93381d543b0e5725244abe752214a110791d01"
|
||||
"Comment": "v1.2.0-95-g67b9df7",
|
||||
"Rev": "67b9df7f55fe1165fd9ad49aca7754cce01a42b8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/onsi/ginkgo/internal/remote",
|
||||
"Comment": "v1.2.0-90-gbb93381",
|
||||
"Rev": "bb93381d543b0e5725244abe752214a110791d01"
|
||||
"Comment": "v1.2.0-95-g67b9df7",
|
||||
"Rev": "67b9df7f55fe1165fd9ad49aca7754cce01a42b8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/onsi/ginkgo/internal/spec",
|
||||
"Comment": "v1.2.0-90-gbb93381",
|
||||
"Rev": "bb93381d543b0e5725244abe752214a110791d01"
|
||||
"Comment": "v1.2.0-95-g67b9df7",
|
||||
"Rev": "67b9df7f55fe1165fd9ad49aca7754cce01a42b8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/onsi/ginkgo/internal/spec_iterator",
|
||||
"Comment": "v1.2.0-95-g67b9df7",
|
||||
"Rev": "67b9df7f55fe1165fd9ad49aca7754cce01a42b8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/onsi/ginkgo/internal/specrunner",
|
||||
"Comment": "v1.2.0-90-gbb93381",
|
||||
"Rev": "bb93381d543b0e5725244abe752214a110791d01"
|
||||
"Comment": "v1.2.0-95-g67b9df7",
|
||||
"Rev": "67b9df7f55fe1165fd9ad49aca7754cce01a42b8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/onsi/ginkgo/internal/suite",
|
||||
"Comment": "v1.2.0-90-gbb93381",
|
||||
"Rev": "bb93381d543b0e5725244abe752214a110791d01"
|
||||
"Comment": "v1.2.0-95-g67b9df7",
|
||||
"Rev": "67b9df7f55fe1165fd9ad49aca7754cce01a42b8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/onsi/ginkgo/internal/testingtproxy",
|
||||
"Comment": "v1.2.0-90-gbb93381",
|
||||
"Rev": "bb93381d543b0e5725244abe752214a110791d01"
|
||||
"Comment": "v1.2.0-95-g67b9df7",
|
||||
"Rev": "67b9df7f55fe1165fd9ad49aca7754cce01a42b8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/onsi/ginkgo/internal/writer",
|
||||
"Comment": "v1.2.0-90-gbb93381",
|
||||
"Rev": "bb93381d543b0e5725244abe752214a110791d01"
|
||||
"Comment": "v1.2.0-95-g67b9df7",
|
||||
"Rev": "67b9df7f55fe1165fd9ad49aca7754cce01a42b8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/onsi/ginkgo/reporters",
|
||||
"Comment": "v1.2.0-90-gbb93381",
|
||||
"Rev": "bb93381d543b0e5725244abe752214a110791d01"
|
||||
"Comment": "v1.2.0-95-g67b9df7",
|
||||
"Rev": "67b9df7f55fe1165fd9ad49aca7754cce01a42b8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/onsi/ginkgo/reporters/stenographer",
|
||||
"Comment": "v1.2.0-90-gbb93381",
|
||||
"Rev": "bb93381d543b0e5725244abe752214a110791d01"
|
||||
"Comment": "v1.2.0-95-g67b9df7",
|
||||
"Rev": "67b9df7f55fe1165fd9ad49aca7754cce01a42b8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable",
|
||||
"Comment": "v1.2.0-90-gbb93381",
|
||||
"Rev": "bb93381d543b0e5725244abe752214a110791d01"
|
||||
"Comment": "v1.2.0-95-g67b9df7",
|
||||
"Rev": "67b9df7f55fe1165fd9ad49aca7754cce01a42b8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty",
|
||||
"Comment": "v1.2.0-90-gbb93381",
|
||||
"Rev": "bb93381d543b0e5725244abe752214a110791d01"
|
||||
"Comment": "v1.2.0-95-g67b9df7",
|
||||
"Rev": "67b9df7f55fe1165fd9ad49aca7754cce01a42b8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/onsi/ginkgo/types",
|
||||
"Comment": "v1.2.0-90-gbb93381",
|
||||
"Rev": "bb93381d543b0e5725244abe752214a110791d01"
|
||||
"Comment": "v1.2.0-95-g67b9df7",
|
||||
"Rev": "67b9df7f55fe1165fd9ad49aca7754cce01a42b8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/onsi/gomega",
|
||||
|
28
Godeps/LICENSES
generated
28
Godeps/LICENSES
generated
@ -62894,6 +62894,34 @@ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
================================================================================
|
||||
|
||||
|
||||
================================================================================
|
||||
= vendor/github.com/onsi/ginkgo/internal/spec_iterator licensed under: =
|
||||
|
||||
Copyright (c) 2013-2014 Onsi Fakhouri
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining
|
||||
a copy of this software and associated documentation files (the
|
||||
"Software"), to deal in the Software without restriction, including
|
||||
without limitation the rights to use, copy, modify, merge, publish,
|
||||
distribute, sublicense, and/or sell copies of the Software, and to
|
||||
permit persons to whom the Software is furnished to do so, subject to
|
||||
the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be
|
||||
included in all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
|
||||
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
||||
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
|
||||
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
|
||||
= vendor/github.com/onsi/ginkgo/LICENSE 570603114d52313cb86c0206401c9af7 -
|
||||
================================================================================
|
||||
|
||||
|
||||
================================================================================
|
||||
= vendor/github.com/onsi/ginkgo/internal/suite licensed under: =
|
||||
|
||||
|
17
vendor/BUILD
vendored
17
vendor/BUILD
vendored
@ -5412,6 +5412,7 @@ go_library(
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//vendor:github.com/onsi/ginkgo/config",
|
||||
"//vendor:github.com/onsi/ginkgo/internal/spec_iterator",
|
||||
"//vendor:github.com/onsi/ginkgo/reporters",
|
||||
"//vendor:github.com/onsi/ginkgo/reporters/stenographer",
|
||||
"//vendor:github.com/onsi/ginkgo/types",
|
||||
@ -5421,7 +5422,6 @@ go_library(
|
||||
go_library(
|
||||
name = "github.com/onsi/ginkgo/internal/spec",
|
||||
srcs = [
|
||||
"github.com/onsi/ginkgo/internal/spec/index_computer.go",
|
||||
"github.com/onsi/ginkgo/internal/spec/spec.go",
|
||||
"github.com/onsi/ginkgo/internal/spec/specs.go",
|
||||
],
|
||||
@ -5444,6 +5444,7 @@ go_library(
|
||||
"//vendor:github.com/onsi/ginkgo/config",
|
||||
"//vendor:github.com/onsi/ginkgo/internal/leafnodes",
|
||||
"//vendor:github.com/onsi/ginkgo/internal/spec",
|
||||
"//vendor:github.com/onsi/ginkgo/internal/spec_iterator",
|
||||
"//vendor:github.com/onsi/ginkgo/internal/writer",
|
||||
"//vendor:github.com/onsi/ginkgo/reporters",
|
||||
"//vendor:github.com/onsi/ginkgo/types",
|
||||
@ -5460,6 +5461,7 @@ go_library(
|
||||
"//vendor:github.com/onsi/ginkgo/internal/failer",
|
||||
"//vendor:github.com/onsi/ginkgo/internal/leafnodes",
|
||||
"//vendor:github.com/onsi/ginkgo/internal/spec",
|
||||
"//vendor:github.com/onsi/ginkgo/internal/spec_iterator",
|
||||
"//vendor:github.com/onsi/ginkgo/internal/specrunner",
|
||||
"//vendor:github.com/onsi/ginkgo/internal/writer",
|
||||
"//vendor:github.com/onsi/ginkgo/reporters",
|
||||
@ -16601,3 +16603,16 @@ go_test(
|
||||
"//vendor:k8s.io/client-go/kubernetes/fake",
|
||||
],
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "github.com/onsi/ginkgo/internal/spec_iterator",
|
||||
srcs = [
|
||||
"github.com/onsi/ginkgo/internal/spec_iterator/index_computer.go",
|
||||
"github.com/onsi/ginkgo/internal/spec_iterator/parallel_spec_iterator.go",
|
||||
"github.com/onsi/ginkgo/internal/spec_iterator/serial_spec_iterator.go",
|
||||
"github.com/onsi/ginkgo/internal/spec_iterator/sharded_parallel_spec_iterator.go",
|
||||
"github.com/onsi/ginkgo/internal/spec_iterator/spec_iterator.go",
|
||||
],
|
||||
tags = ["automanaged"],
|
||||
deps = ["//vendor:github.com/onsi/ginkgo/internal/spec"],
|
||||
)
|
||||
|
2
vendor/github.com/onsi/ginkgo/README.md
generated
vendored
2
vendor/github.com/onsi/ginkgo/README.md
generated
vendored
@ -1,6 +1,6 @@
|
||||

|
||||
|
||||
[](https://travis-ci.org/onsi/ginkgo)
|
||||
[](https://travis-ci.org/onsi/ginkgo)
|
||||
|
||||
Jump to the [docs](http://onsi.github.io/ginkgo/) to learn more. To start rolling your Ginkgo tests *now* [keep reading](#set-me-up)!
|
||||
|
||||
|
8
vendor/github.com/onsi/ginkgo/internal/remote/aggregator.go
generated
vendored
8
vendor/github.com/onsi/ginkgo/internal/remote/aggregator.go
generated
vendored
@ -125,14 +125,12 @@ func (aggregator *Aggregator) registerSuiteBeginning(configAndSuite configAndSui
|
||||
|
||||
aggregator.stenographer.AnnounceSuite(configAndSuite.summary.SuiteDescription, configAndSuite.config.RandomSeed, configAndSuite.config.RandomizeAllSpecs, aggregator.config.Succinct)
|
||||
|
||||
numberOfSpecsToRun := 0
|
||||
totalNumberOfSpecs := 0
|
||||
for _, configAndSuite := range aggregator.aggregatedSuiteBeginnings {
|
||||
numberOfSpecsToRun += configAndSuite.summary.NumberOfSpecsThatWillBeRun
|
||||
totalNumberOfSpecs += configAndSuite.summary.NumberOfTotalSpecs
|
||||
if len(aggregator.aggregatedSuiteBeginnings) > 0 {
|
||||
totalNumberOfSpecs = configAndSuite.summary.NumberOfSpecsBeforeParallelization
|
||||
}
|
||||
|
||||
aggregator.stenographer.AnnounceNumberOfSpecs(numberOfSpecsToRun, totalNumberOfSpecs, aggregator.config.Succinct)
|
||||
aggregator.stenographer.AnnounceTotalNumberOfSpecs(totalNumberOfSpecs, aggregator.config.Succinct)
|
||||
aggregator.stenographer.AnnounceAggregatedParallelRun(aggregator.nodeCount, aggregator.config.Succinct)
|
||||
aggregator.flushCompletedSpecs()
|
||||
}
|
||||
|
26
vendor/github.com/onsi/ginkgo/internal/remote/server.go
generated
vendored
26
vendor/github.com/onsi/ginkgo/internal/remote/server.go
generated
vendored
@ -9,13 +9,16 @@ package remote
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"github.com/onsi/ginkgo/config"
|
||||
"github.com/onsi/ginkgo/reporters"
|
||||
"github.com/onsi/ginkgo/types"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"net/http"
|
||||
"sync"
|
||||
|
||||
"github.com/onsi/ginkgo/internal/spec_iterator"
|
||||
|
||||
"github.com/onsi/ginkgo/config"
|
||||
"github.com/onsi/ginkgo/reporters"
|
||||
"github.com/onsi/ginkgo/types"
|
||||
)
|
||||
|
||||
/*
|
||||
@ -29,6 +32,7 @@ type Server struct {
|
||||
lock *sync.Mutex
|
||||
beforeSuiteData types.RemoteBeforeSuiteData
|
||||
parallelTotal int
|
||||
counter int
|
||||
}
|
||||
|
||||
//Create a new server, automatically selecting a port
|
||||
@ -63,6 +67,8 @@ func (server *Server) Start() {
|
||||
//synchronization endpoints
|
||||
mux.HandleFunc("/BeforeSuiteState", server.handleBeforeSuiteState)
|
||||
mux.HandleFunc("/RemoteAfterSuiteData", server.handleRemoteAfterSuiteData)
|
||||
mux.HandleFunc("/counter", server.handleCounter)
|
||||
mux.HandleFunc("/has-counter", server.handleHasCounter) //for backward compatibility
|
||||
|
||||
go httpServer.Serve(server.listener)
|
||||
}
|
||||
@ -202,3 +208,17 @@ func (server *Server) handleRemoteAfterSuiteData(writer http.ResponseWriter, req
|
||||
enc := json.NewEncoder(writer)
|
||||
enc.Encode(afterSuiteData)
|
||||
}
|
||||
|
||||
func (server *Server) handleCounter(writer http.ResponseWriter, request *http.Request) {
|
||||
c := spec_iterator.Counter{}
|
||||
server.lock.Lock()
|
||||
c.Index = server.counter
|
||||
server.counter = server.counter + 1
|
||||
server.lock.Unlock()
|
||||
|
||||
json.NewEncoder(writer).Encode(c)
|
||||
}
|
||||
|
||||
func (server *Server) handleHasCounter(writer http.ResponseWriter, request *http.Request) {
|
||||
writer.Write([]byte(""))
|
||||
}
|
||||
|
21
vendor/github.com/onsi/ginkgo/internal/spec/specs.go
generated
vendored
21
vendor/github.com/onsi/ginkgo/internal/spec/specs.go
generated
vendored
@ -7,16 +7,14 @@ import (
|
||||
)
|
||||
|
||||
type Specs struct {
|
||||
specs []*Spec
|
||||
numberOfOriginalSpecs int
|
||||
hasProgrammaticFocus bool
|
||||
RegexScansFilePath bool
|
||||
specs []*Spec
|
||||
hasProgrammaticFocus bool
|
||||
RegexScansFilePath bool
|
||||
}
|
||||
|
||||
func NewSpecs(specs []*Spec) *Specs {
|
||||
return &Specs{
|
||||
specs: specs,
|
||||
numberOfOriginalSpecs: len(specs),
|
||||
}
|
||||
}
|
||||
|
||||
@ -24,10 +22,6 @@ func (e *Specs) Specs() []*Spec {
|
||||
return e.specs
|
||||
}
|
||||
|
||||
func (e *Specs) NumberOfOriginalSpecs() int {
|
||||
return e.numberOfOriginalSpecs
|
||||
}
|
||||
|
||||
func (e *Specs) HasProgrammaticFocus() bool {
|
||||
return e.hasProgrammaticFocus
|
||||
}
|
||||
@ -114,15 +108,6 @@ func (e *Specs) SkipMeasurements() {
|
||||
}
|
||||
}
|
||||
|
||||
func (e *Specs) TrimForParallelization(total int, node int) {
|
||||
startIndex, count := ParallelizedIndexRange(len(e.specs), total, node)
|
||||
if count == 0 {
|
||||
e.specs = make([]*Spec, 0)
|
||||
} else {
|
||||
e.specs = e.specs[startIndex : startIndex+count]
|
||||
}
|
||||
}
|
||||
|
||||
//sort.Interface
|
||||
|
||||
func (e *Specs) Len() int {
|
||||
|
@ -1,4 +1,4 @@
|
||||
package spec
|
||||
package spec_iterator
|
||||
|
||||
func ParallelizedIndexRange(length int, parallelTotal int, parallelNode int) (startIndex int, count int) {
|
||||
if length == 0 {
|
60
vendor/github.com/onsi/ginkgo/internal/spec_iterator/parallel_spec_iterator.go
generated
vendored
Normal file
60
vendor/github.com/onsi/ginkgo/internal/spec_iterator/parallel_spec_iterator.go
generated
vendored
Normal file
@ -0,0 +1,60 @@
|
||||
package spec_iterator
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
|
||||
"github.com/onsi/ginkgo/internal/spec"
|
||||
)
|
||||
|
||||
type ParallelIterator struct {
|
||||
specs []*spec.Spec
|
||||
host string
|
||||
client *http.Client
|
||||
}
|
||||
|
||||
func NewParallelIterator(specs []*spec.Spec, host string) *ParallelIterator {
|
||||
return &ParallelIterator{
|
||||
specs: specs,
|
||||
host: host,
|
||||
client: &http.Client{},
|
||||
}
|
||||
}
|
||||
|
||||
func (s *ParallelIterator) Next() (*spec.Spec, error) {
|
||||
resp, err := s.client.Get(s.host + "/counter")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return nil, errors.New(fmt.Sprintf("unexpected status code %d", resp.StatusCode))
|
||||
}
|
||||
|
||||
var counter Counter
|
||||
err = json.NewDecoder(resp.Body).Decode(&counter)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if counter.Index >= len(s.specs) {
|
||||
return nil, ErrClosed
|
||||
}
|
||||
|
||||
return s.specs[counter.Index], nil
|
||||
}
|
||||
|
||||
func (s *ParallelIterator) NumberOfSpecsPriorToIteration() int {
|
||||
return len(s.specs)
|
||||
}
|
||||
|
||||
func (s *ParallelIterator) NumberOfSpecsToProcessIfKnown() (int, bool) {
|
||||
return -1, false
|
||||
}
|
||||
|
||||
func (s *ParallelIterator) NumberOfSpecsThatWillBeRunIfKnown() (int, bool) {
|
||||
return -1, false
|
||||
}
|
45
vendor/github.com/onsi/ginkgo/internal/spec_iterator/serial_spec_iterator.go
generated
vendored
Normal file
45
vendor/github.com/onsi/ginkgo/internal/spec_iterator/serial_spec_iterator.go
generated
vendored
Normal file
@ -0,0 +1,45 @@
|
||||
package spec_iterator
|
||||
|
||||
import (
|
||||
"github.com/onsi/ginkgo/internal/spec"
|
||||
)
|
||||
|
||||
type SerialIterator struct {
|
||||
specs []*spec.Spec
|
||||
index int
|
||||
}
|
||||
|
||||
func NewSerialIterator(specs []*spec.Spec) *SerialIterator {
|
||||
return &SerialIterator{
|
||||
specs: specs,
|
||||
index: 0,
|
||||
}
|
||||
}
|
||||
|
||||
func (s *SerialIterator) Next() (*spec.Spec, error) {
|
||||
if s.index >= len(s.specs) {
|
||||
return nil, ErrClosed
|
||||
}
|
||||
|
||||
spec := s.specs[s.index]
|
||||
s.index += 1
|
||||
return spec, nil
|
||||
}
|
||||
|
||||
func (s *SerialIterator) NumberOfSpecsPriorToIteration() int {
|
||||
return len(s.specs)
|
||||
}
|
||||
|
||||
func (s *SerialIterator) NumberOfSpecsToProcessIfKnown() (int, bool) {
|
||||
return len(s.specs), true
|
||||
}
|
||||
|
||||
func (s *SerialIterator) NumberOfSpecsThatWillBeRunIfKnown() (int, bool) {
|
||||
count := 0
|
||||
for _, s := range s.specs {
|
||||
if !s.Skipped() && !s.Pending() {
|
||||
count += 1
|
||||
}
|
||||
}
|
||||
return count, true
|
||||
}
|
47
vendor/github.com/onsi/ginkgo/internal/spec_iterator/sharded_parallel_spec_iterator.go
generated
vendored
Normal file
47
vendor/github.com/onsi/ginkgo/internal/spec_iterator/sharded_parallel_spec_iterator.go
generated
vendored
Normal file
@ -0,0 +1,47 @@
|
||||
package spec_iterator
|
||||
|
||||
import "github.com/onsi/ginkgo/internal/spec"
|
||||
|
||||
type ShardedParallelIterator struct {
|
||||
specs []*spec.Spec
|
||||
index int
|
||||
maxIndex int
|
||||
}
|
||||
|
||||
func NewShardedParallelIterator(specs []*spec.Spec, total int, node int) *ShardedParallelIterator {
|
||||
startIndex, count := ParallelizedIndexRange(len(specs), total, node)
|
||||
|
||||
return &ShardedParallelIterator{
|
||||
specs: specs,
|
||||
index: startIndex,
|
||||
maxIndex: startIndex + count,
|
||||
}
|
||||
}
|
||||
|
||||
func (s *ShardedParallelIterator) Next() (*spec.Spec, error) {
|
||||
if s.index >= s.maxIndex {
|
||||
return nil, ErrClosed
|
||||
}
|
||||
|
||||
spec := s.specs[s.index]
|
||||
s.index += 1
|
||||
return spec, nil
|
||||
}
|
||||
|
||||
func (s *ShardedParallelIterator) NumberOfSpecsPriorToIteration() int {
|
||||
return len(s.specs)
|
||||
}
|
||||
|
||||
func (s *ShardedParallelIterator) NumberOfSpecsToProcessIfKnown() (int, bool) {
|
||||
return s.maxIndex - s.index, true
|
||||
}
|
||||
|
||||
func (s *ShardedParallelIterator) NumberOfSpecsThatWillBeRunIfKnown() (int, bool) {
|
||||
count := 0
|
||||
for i := s.index; i < s.maxIndex; i += 1 {
|
||||
if !s.specs[i].Skipped() && !s.specs[i].Pending() {
|
||||
count += 1
|
||||
}
|
||||
}
|
||||
return count, true
|
||||
}
|
20
vendor/github.com/onsi/ginkgo/internal/spec_iterator/spec_iterator.go
generated
vendored
Normal file
20
vendor/github.com/onsi/ginkgo/internal/spec_iterator/spec_iterator.go
generated
vendored
Normal file
@ -0,0 +1,20 @@
|
||||
package spec_iterator
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"github.com/onsi/ginkgo/internal/spec"
|
||||
)
|
||||
|
||||
var ErrClosed = errors.New("no more specs to run")
|
||||
|
||||
type SpecIterator interface {
|
||||
Next() (*spec.Spec, error)
|
||||
NumberOfSpecsPriorToIteration() int
|
||||
NumberOfSpecsToProcessIfKnown() (int, bool)
|
||||
NumberOfSpecsThatWillBeRunIfKnown() (int, bool)
|
||||
}
|
||||
|
||||
type Counter struct {
|
||||
Index int `json:"index"`
|
||||
}
|
95
vendor/github.com/onsi/ginkgo/internal/specrunner/spec_runner.go
generated
vendored
95
vendor/github.com/onsi/ginkgo/internal/specrunner/spec_runner.go
generated
vendored
@ -7,6 +7,8 @@ import (
|
||||
"sync"
|
||||
"syscall"
|
||||
|
||||
"github.com/onsi/ginkgo/internal/spec_iterator"
|
||||
|
||||
"github.com/onsi/ginkgo/config"
|
||||
"github.com/onsi/ginkgo/internal/leafnodes"
|
||||
"github.com/onsi/ginkgo/internal/spec"
|
||||
@ -20,7 +22,7 @@ import (
|
||||
type SpecRunner struct {
|
||||
description string
|
||||
beforeSuiteNode leafnodes.SuiteNode
|
||||
specs *spec.Specs
|
||||
iterator spec_iterator.SpecIterator
|
||||
afterSuiteNode leafnodes.SuiteNode
|
||||
reporters []reporters.Reporter
|
||||
startTime time.Time
|
||||
@ -29,14 +31,15 @@ type SpecRunner struct {
|
||||
writer Writer.WriterInterface
|
||||
config config.GinkgoConfigType
|
||||
interrupted bool
|
||||
processedSpecs []*spec.Spec
|
||||
lock *sync.Mutex
|
||||
}
|
||||
|
||||
func New(description string, beforeSuiteNode leafnodes.SuiteNode, specs *spec.Specs, afterSuiteNode leafnodes.SuiteNode, reporters []reporters.Reporter, writer Writer.WriterInterface, config config.GinkgoConfigType) *SpecRunner {
|
||||
func New(description string, beforeSuiteNode leafnodes.SuiteNode, iterator spec_iterator.SpecIterator, afterSuiteNode leafnodes.SuiteNode, reporters []reporters.Reporter, writer Writer.WriterInterface, config config.GinkgoConfigType) *SpecRunner {
|
||||
return &SpecRunner{
|
||||
description: description,
|
||||
beforeSuiteNode: beforeSuiteNode,
|
||||
specs: specs,
|
||||
iterator: iterator,
|
||||
afterSuiteNode: afterSuiteNode,
|
||||
reporters: reporters,
|
||||
writer: writer,
|
||||
@ -79,7 +82,18 @@ func (runner *SpecRunner) performDryRun() {
|
||||
runner.reportBeforeSuite(summary)
|
||||
}
|
||||
|
||||
for _, spec := range runner.specs.Specs() {
|
||||
for {
|
||||
spec, err := runner.iterator.Next()
|
||||
if err == spec_iterator.ErrClosed {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
fmt.Println("failed to iterate over tests:\n" + err.Error())
|
||||
break
|
||||
}
|
||||
|
||||
runner.processedSpecs = append(runner.processedSpecs, spec)
|
||||
|
||||
summary := spec.Summary(runner.suiteID)
|
||||
runner.reportSpecWillRun(summary)
|
||||
if summary.State == types.SpecStateInvalid {
|
||||
@ -130,9 +144,21 @@ func (runner *SpecRunner) runAfterSuite() bool {
|
||||
func (runner *SpecRunner) runSpecs() bool {
|
||||
suiteFailed := false
|
||||
skipRemainingSpecs := false
|
||||
for _, spec := range runner.specs.Specs() {
|
||||
for {
|
||||
spec, err := runner.iterator.Next()
|
||||
if err == spec_iterator.ErrClosed {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
fmt.Println("failed to iterate over tests:\n" + err.Error())
|
||||
suiteFailed = true
|
||||
break
|
||||
}
|
||||
|
||||
runner.processedSpecs = append(runner.processedSpecs, spec)
|
||||
|
||||
if runner.wasInterrupted() {
|
||||
return suiteFailed
|
||||
break
|
||||
}
|
||||
if skipRemainingSpecs {
|
||||
spec.Skip()
|
||||
@ -244,7 +270,7 @@ func (runner *SpecRunner) wasInterrupted() bool {
|
||||
|
||||
func (runner *SpecRunner) reportSuiteWillBegin() {
|
||||
runner.startTime = time.Now()
|
||||
summary := runner.summary(true)
|
||||
summary := runner.suiteWillBeginSummary()
|
||||
for _, reporter := range runner.reporters {
|
||||
reporter.SpecSuiteWillBegin(runner.config, summary)
|
||||
}
|
||||
@ -286,17 +312,17 @@ func (runner *SpecRunner) reportSpecDidComplete(summary *types.SpecSummary, fail
|
||||
}
|
||||
|
||||
func (runner *SpecRunner) reportSuiteDidEnd(success bool) {
|
||||
summary := runner.summary(success)
|
||||
summary := runner.suiteDidEndSummary(success)
|
||||
summary.RunTime = time.Since(runner.startTime)
|
||||
for _, reporter := range runner.reporters {
|
||||
reporter.SpecSuiteDidEnd(summary)
|
||||
}
|
||||
}
|
||||
|
||||
func (runner *SpecRunner) countSpecsSatisfying(filter func(ex *spec.Spec) bool) (count int) {
|
||||
func (runner *SpecRunner) countSpecsThatRanSatisfying(filter func(ex *spec.Spec) bool) (count int) {
|
||||
count = 0
|
||||
|
||||
for _, spec := range runner.specs.Specs() {
|
||||
for _, spec := range runner.processedSpecs {
|
||||
if filter(spec) {
|
||||
count++
|
||||
}
|
||||
@ -305,32 +331,37 @@ func (runner *SpecRunner) countSpecsSatisfying(filter func(ex *spec.Spec) bool)
|
||||
return count
|
||||
}
|
||||
|
||||
func (runner *SpecRunner) summary(success bool) *types.SuiteSummary {
|
||||
numberOfSpecsThatWillBeRun := runner.countSpecsSatisfying(func(ex *spec.Spec) bool {
|
||||
func (runner *SpecRunner) suiteDidEndSummary(success bool) *types.SuiteSummary {
|
||||
numberOfSpecsThatWillBeRun := runner.countSpecsThatRanSatisfying(func(ex *spec.Spec) bool {
|
||||
return !ex.Skipped() && !ex.Pending()
|
||||
})
|
||||
|
||||
numberOfPendingSpecs := runner.countSpecsSatisfying(func(ex *spec.Spec) bool {
|
||||
numberOfPendingSpecs := runner.countSpecsThatRanSatisfying(func(ex *spec.Spec) bool {
|
||||
return ex.Pending()
|
||||
})
|
||||
|
||||
numberOfSkippedSpecs := runner.countSpecsSatisfying(func(ex *spec.Spec) bool {
|
||||
numberOfSkippedSpecs := runner.countSpecsThatRanSatisfying(func(ex *spec.Spec) bool {
|
||||
return ex.Skipped()
|
||||
})
|
||||
|
||||
numberOfPassedSpecs := runner.countSpecsSatisfying(func(ex *spec.Spec) bool {
|
||||
numberOfPassedSpecs := runner.countSpecsThatRanSatisfying(func(ex *spec.Spec) bool {
|
||||
return ex.Passed()
|
||||
})
|
||||
|
||||
numberOfFlakedSpecs := runner.countSpecsSatisfying(func(ex *spec.Spec) bool {
|
||||
numberOfFlakedSpecs := runner.countSpecsThatRanSatisfying(func(ex *spec.Spec) bool {
|
||||
return ex.Flaked()
|
||||
})
|
||||
|
||||
numberOfFailedSpecs := runner.countSpecsSatisfying(func(ex *spec.Spec) bool {
|
||||
numberOfFailedSpecs := runner.countSpecsThatRanSatisfying(func(ex *spec.Spec) bool {
|
||||
return ex.Failed()
|
||||
})
|
||||
|
||||
if runner.beforeSuiteNode != nil && !runner.beforeSuiteNode.Passed() && !runner.config.DryRun {
|
||||
var known bool
|
||||
numberOfSpecsThatWillBeRun, known = runner.iterator.NumberOfSpecsThatWillBeRunIfKnown()
|
||||
if !known {
|
||||
numberOfSpecsThatWillBeRun = runner.iterator.NumberOfSpecsPriorToIteration()
|
||||
}
|
||||
numberOfFailedSpecs = numberOfSpecsThatWillBeRun
|
||||
}
|
||||
|
||||
@ -339,8 +370,8 @@ func (runner *SpecRunner) summary(success bool) *types.SuiteSummary {
|
||||
SuiteSucceeded: success,
|
||||
SuiteID: runner.suiteID,
|
||||
|
||||
NumberOfSpecsBeforeParallelization: runner.specs.NumberOfOriginalSpecs(),
|
||||
NumberOfTotalSpecs: len(runner.specs.Specs()),
|
||||
NumberOfSpecsBeforeParallelization: runner.iterator.NumberOfSpecsPriorToIteration(),
|
||||
NumberOfTotalSpecs: len(runner.processedSpecs),
|
||||
NumberOfSpecsThatWillBeRun: numberOfSpecsThatWillBeRun,
|
||||
NumberOfPendingSpecs: numberOfPendingSpecs,
|
||||
NumberOfSkippedSpecs: numberOfSkippedSpecs,
|
||||
@ -349,3 +380,29 @@ func (runner *SpecRunner) summary(success bool) *types.SuiteSummary {
|
||||
NumberOfFlakedSpecs: numberOfFlakedSpecs,
|
||||
}
|
||||
}
|
||||
|
||||
func (runner *SpecRunner) suiteWillBeginSummary() *types.SuiteSummary {
|
||||
numTotal, known := runner.iterator.NumberOfSpecsToProcessIfKnown()
|
||||
if !known {
|
||||
numTotal = -1
|
||||
}
|
||||
|
||||
numToRun, known := runner.iterator.NumberOfSpecsThatWillBeRunIfKnown()
|
||||
if !known {
|
||||
numToRun = -1
|
||||
}
|
||||
|
||||
return &types.SuiteSummary{
|
||||
SuiteDescription: runner.description,
|
||||
SuiteID: runner.suiteID,
|
||||
|
||||
NumberOfSpecsBeforeParallelization: runner.iterator.NumberOfSpecsPriorToIteration(),
|
||||
NumberOfTotalSpecs: numTotal,
|
||||
NumberOfSpecsThatWillBeRun: numToRun,
|
||||
NumberOfPendingSpecs: -1,
|
||||
NumberOfSkippedSpecs: -1,
|
||||
NumberOfPassedSpecs: -1,
|
||||
NumberOfFailedSpecs: -1,
|
||||
NumberOfFlakedSpecs: -1,
|
||||
}
|
||||
}
|
||||
|
24
vendor/github.com/onsi/ginkgo/internal/suite/suite.go
generated
vendored
24
vendor/github.com/onsi/ginkgo/internal/suite/suite.go
generated
vendored
@ -2,8 +2,11 @@ package suite
|
||||
|
||||
import (
|
||||
"math/rand"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/onsi/ginkgo/internal/spec_iterator"
|
||||
|
||||
"github.com/onsi/ginkgo/config"
|
||||
"github.com/onsi/ginkgo/internal/containernode"
|
||||
"github.com/onsi/ginkgo/internal/failer"
|
||||
@ -52,18 +55,18 @@ func (suite *Suite) Run(t ginkgoTestingT, description string, reporters []report
|
||||
|
||||
r := rand.New(rand.NewSource(config.RandomSeed))
|
||||
suite.topLevelContainer.Shuffle(r)
|
||||
specs := suite.generateSpecs(description, config)
|
||||
suite.runner = specrunner.New(description, suite.beforeSuiteNode, specs, suite.afterSuiteNode, reporters, writer, config)
|
||||
iterator, hasProgrammaticFocus := suite.generateSpecsIterator(description, config)
|
||||
suite.runner = specrunner.New(description, suite.beforeSuiteNode, iterator, suite.afterSuiteNode, reporters, writer, config)
|
||||
|
||||
suite.running = true
|
||||
success := suite.runner.Run()
|
||||
if !success {
|
||||
t.Fail()
|
||||
}
|
||||
return success, specs.HasProgrammaticFocus()
|
||||
return success, hasProgrammaticFocus
|
||||
}
|
||||
|
||||
func (suite *Suite) generateSpecs(description string, config config.GinkgoConfigType) *spec.Specs {
|
||||
func (suite *Suite) generateSpecsIterator(description string, config config.GinkgoConfigType) (spec_iterator.SpecIterator, bool) {
|
||||
specsSlice := []*spec.Spec{}
|
||||
suite.topLevelContainer.BackPropagateProgrammaticFocus()
|
||||
for _, collatedNodes := range suite.topLevelContainer.Collate() {
|
||||
@ -83,10 +86,19 @@ func (suite *Suite) generateSpecs(description string, config config.GinkgoConfig
|
||||
specs.SkipMeasurements()
|
||||
}
|
||||
|
||||
var iterator spec_iterator.SpecIterator
|
||||
|
||||
if config.ParallelTotal > 1 {
|
||||
specs.TrimForParallelization(config.ParallelTotal, config.ParallelNode)
|
||||
iterator = spec_iterator.NewParallelIterator(specs.Specs(), config.SyncHost)
|
||||
resp, err := http.Get(config.SyncHost + "/has-counter")
|
||||
if err != nil || resp.StatusCode != http.StatusOK {
|
||||
iterator = spec_iterator.NewShardedParallelIterator(specs.Specs(), config.ParallelTotal, config.ParallelNode)
|
||||
}
|
||||
} else {
|
||||
iterator = spec_iterator.NewSerialIterator(specs.Specs())
|
||||
}
|
||||
return specs
|
||||
|
||||
return iterator, specs.HasProgrammaticFocus()
|
||||
}
|
||||
|
||||
func (suite *Suite) CurrentRunningSpecSummary() (*types.SpecSummary, bool) {
|
||||
|
5
vendor/github.com/onsi/ginkgo/reporters/default_reporter.go
generated
vendored
5
vendor/github.com/onsi/ginkgo/reporters/default_reporter.go
generated
vendored
@ -29,9 +29,10 @@ func NewDefaultReporter(config config.DefaultReporterConfigType, stenographer st
|
||||
func (reporter *DefaultReporter) SpecSuiteWillBegin(config config.GinkgoConfigType, summary *types.SuiteSummary) {
|
||||
reporter.stenographer.AnnounceSuite(summary.SuiteDescription, config.RandomSeed, config.RandomizeAllSpecs, reporter.config.Succinct)
|
||||
if config.ParallelTotal > 1 {
|
||||
reporter.stenographer.AnnounceParallelRun(config.ParallelNode, config.ParallelTotal, summary.NumberOfTotalSpecs, summary.NumberOfSpecsBeforeParallelization, reporter.config.Succinct)
|
||||
reporter.stenographer.AnnounceParallelRun(config.ParallelNode, config.ParallelTotal, reporter.config.Succinct)
|
||||
} else {
|
||||
reporter.stenographer.AnnounceNumberOfSpecs(summary.NumberOfSpecsThatWillBeRun, summary.NumberOfTotalSpecs, reporter.config.Succinct)
|
||||
}
|
||||
reporter.stenographer.AnnounceNumberOfSpecs(summary.NumberOfSpecsThatWillBeRun, summary.NumberOfTotalSpecs, reporter.config.Succinct)
|
||||
}
|
||||
|
||||
func (reporter *DefaultReporter) BeforeSuiteDidRun(setupSummary *types.SetupSummary) {
|
||||
|
7
vendor/github.com/onsi/ginkgo/reporters/junit_reporter.go
generated
vendored
7
vendor/github.com/onsi/ginkgo/reporters/junit_reporter.go
generated
vendored
@ -11,10 +11,11 @@ package reporters
|
||||
import (
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
"github.com/onsi/ginkgo/config"
|
||||
"github.com/onsi/ginkgo/types"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/onsi/ginkgo/config"
|
||||
"github.com/onsi/ginkgo/types"
|
||||
)
|
||||
|
||||
type JUnitTestSuite struct {
|
||||
@ -58,7 +59,6 @@ func NewJUnitReporter(filename string) *JUnitReporter {
|
||||
|
||||
func (reporter *JUnitReporter) SpecSuiteWillBegin(config config.GinkgoConfigType, summary *types.SuiteSummary) {
|
||||
reporter.suite = JUnitTestSuite{
|
||||
Tests: summary.NumberOfSpecsThatWillBeRun,
|
||||
TestCases: []JUnitTestCase{},
|
||||
}
|
||||
reporter.testSuiteName = summary.SuiteDescription
|
||||
@ -116,6 +116,7 @@ func (reporter *JUnitReporter) SpecDidComplete(specSummary *types.SpecSummary) {
|
||||
}
|
||||
|
||||
func (reporter *JUnitReporter) SpecSuiteDidEnd(summary *types.SuiteSummary) {
|
||||
reporter.suite.Tests = summary.NumberOfSpecsThatWillBeRun
|
||||
reporter.suite.Time = summary.RunTime.Seconds()
|
||||
reporter.suite.Failures = summary.NumberOfFailedSpecs
|
||||
file, err := os.Create(reporter.filename)
|
||||
|
8
vendor/github.com/onsi/ginkgo/reporters/stenographer/fake_stenographer.go
generated
vendored
8
vendor/github.com/onsi/ginkgo/reporters/stenographer/fake_stenographer.go
generated
vendored
@ -74,14 +74,18 @@ func (stenographer *FakeStenographer) AnnounceAggregatedParallelRun(nodes int, s
|
||||
stenographer.registerCall("AnnounceAggregatedParallelRun", nodes, succinct)
|
||||
}
|
||||
|
||||
func (stenographer *FakeStenographer) AnnounceParallelRun(node int, nodes int, specsToRun int, totalSpecs int, succinct bool) {
|
||||
stenographer.registerCall("AnnounceParallelRun", node, nodes, specsToRun, totalSpecs, succinct)
|
||||
func (stenographer *FakeStenographer) AnnounceParallelRun(node int, nodes int, succinct bool) {
|
||||
stenographer.registerCall("AnnounceParallelRun", node, nodes, succinct)
|
||||
}
|
||||
|
||||
func (stenographer *FakeStenographer) AnnounceNumberOfSpecs(specsToRun int, total int, succinct bool) {
|
||||
stenographer.registerCall("AnnounceNumberOfSpecs", specsToRun, total, succinct)
|
||||
}
|
||||
|
||||
func (stenographer *FakeStenographer) AnnounceTotalNumberOfSpecs(total int, succinct bool) {
|
||||
stenographer.registerCall("AnnounceTotalNumberOfSpecs", total, succinct)
|
||||
}
|
||||
|
||||
func (stenographer *FakeStenographer) AnnounceSpecRunCompletion(summary *types.SuiteSummary, succinct bool) {
|
||||
stenographer.registerCall("AnnounceSpecRunCompletion", summary, succinct)
|
||||
}
|
||||
|
23
vendor/github.com/onsi/ginkgo/reporters/stenographer/stenographer.go
generated
vendored
23
vendor/github.com/onsi/ginkgo/reporters/stenographer/stenographer.go
generated
vendored
@ -37,7 +37,8 @@ const (
|
||||
type Stenographer interface {
|
||||
AnnounceSuite(description string, randomSeed int64, randomizingAll bool, succinct bool)
|
||||
AnnounceAggregatedParallelRun(nodes int, succinct bool)
|
||||
AnnounceParallelRun(node int, nodes int, specsToRun int, totalSpecs int, succinct bool)
|
||||
AnnounceParallelRun(node int, nodes int, succinct bool)
|
||||
AnnounceTotalNumberOfSpecs(total int, succinct bool)
|
||||
AnnounceNumberOfSpecs(specsToRun int, total int, succinct bool)
|
||||
AnnounceSpecRunCompletion(summary *types.SuiteSummary, succinct bool)
|
||||
|
||||
@ -98,17 +99,15 @@ func (s *consoleStenographer) AnnounceSuite(description string, randomSeed int64
|
||||
s.printNewLine()
|
||||
}
|
||||
|
||||
func (s *consoleStenographer) AnnounceParallelRun(node int, nodes int, specsToRun int, totalSpecs int, succinct bool) {
|
||||
func (s *consoleStenographer) AnnounceParallelRun(node int, nodes int, succinct bool) {
|
||||
if succinct {
|
||||
s.print(0, "- node #%d ", node)
|
||||
return
|
||||
}
|
||||
s.println(0,
|
||||
"Parallel test node %s/%s. Assigned %s of %s specs.",
|
||||
"Parallel test node %s/%s.",
|
||||
s.colorize(boldStyle, "%d", node),
|
||||
s.colorize(boldStyle, "%d", nodes),
|
||||
s.colorize(boldStyle, "%d", specsToRun),
|
||||
s.colorize(boldStyle, "%d", totalSpecs),
|
||||
)
|
||||
s.printNewLine()
|
||||
}
|
||||
@ -140,6 +139,20 @@ func (s *consoleStenographer) AnnounceNumberOfSpecs(specsToRun int, total int, s
|
||||
s.printNewLine()
|
||||
}
|
||||
|
||||
func (s *consoleStenographer) AnnounceTotalNumberOfSpecs(total int, succinct bool) {
|
||||
if succinct {
|
||||
s.print(0, "- %d specs ", total)
|
||||
s.stream()
|
||||
return
|
||||
}
|
||||
s.println(0,
|
||||
"Will run %s specs",
|
||||
s.colorize(boldStyle, "%d", total),
|
||||
)
|
||||
|
||||
s.printNewLine()
|
||||
}
|
||||
|
||||
func (s *consoleStenographer) AnnounceSpecRunCompletion(summary *types.SuiteSummary, succinct bool) {
|
||||
if succinct && summary.SuiteSucceeded {
|
||||
s.print(0, " %s %s ", s.colorize(greenColor, "SUCCESS!"), summary.RunTime)
|
||||
|
13
vendor/github.com/onsi/ginkgo/types/types.go
generated
vendored
13
vendor/github.com/onsi/ginkgo/types/types.go
generated
vendored
@ -7,6 +7,19 @@ import (
|
||||
|
||||
const GINKGO_FOCUS_EXIT_CODE = 197
|
||||
|
||||
/*
|
||||
SuiteSummary represents the a summary of the test suite and is passed to both
|
||||
Reporter.SpecSuiteWillBegin
|
||||
Reporter.SpecSuiteDidEnd
|
||||
|
||||
this is unfortunate as these two methods should receive different objects. When running in parallel
|
||||
each node does not deterministically know how many specs it will end up running.
|
||||
|
||||
Unfortunately making such a change would break backward compatibility.
|
||||
|
||||
Until Ginkgo 2.0 comes out we will continue to reuse this struct but populate unkown fields
|
||||
with -1.
|
||||
*/
|
||||
type SuiteSummary struct {
|
||||
SuiteDescription string
|
||||
SuiteSucceeded bool
|
||||
|
Loading…
Reference in New Issue
Block a user