mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-08-05 02:09:56 +00:00
Merge pull request #8651 from lavalamp/e2e-timeout
Start e2e framework; print events
This commit is contained in:
commit
4ca2595ed3
4
Godeps/Godeps.json
generated
4
Godeps/Godeps.json
generated
@ -391,8 +391,8 @@
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/onsi/ginkgo",
|
"ImportPath": "github.com/onsi/ginkgo",
|
||||||
"Comment": "v1.1.0-44-gae043a2",
|
"Comment": "v1.2.0-beta-9-gfbb6632",
|
||||||
"Rev": "ae043a2b2a91d6441adedc96d2c01958a78ee516"
|
"Rev": "fbb663242655b700c623e9629d7781db98957501"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/onsi/gomega",
|
"ImportPath": "github.com/onsi/gomega",
|
||||||
|
12
Godeps/_workspace/src/github.com/onsi/ginkgo/CHANGELOG.md
generated
vendored
12
Godeps/_workspace/src/github.com/onsi/ginkgo/CHANGELOG.md
generated
vendored
@ -1,5 +1,13 @@
|
|||||||
## HEAD
|
## HEAD
|
||||||
|
|
||||||
|
Improvements
|
||||||
|
|
||||||
|
- `ginkgo -coverpkg` calls down to `go test -coverpkg` (#160)
|
||||||
|
|
||||||
|
## 1.2.0-beta
|
||||||
|
|
||||||
|
Ginkgo now requires Go 1.4+
|
||||||
|
|
||||||
Improvements:
|
Improvements:
|
||||||
|
|
||||||
- Call reporters in reverse order when announcing spec completion -- allows custom reporters to emit output before the default reporter does.
|
- Call reporters in reverse order when announcing spec completion -- allows custom reporters to emit output before the default reporter does.
|
||||||
@ -26,6 +34,8 @@ Improvements:
|
|||||||
- Support `bootstrap`ping and `generate`ing [Agouti](http://agouti.org) specs.
|
- Support `bootstrap`ping and `generate`ing [Agouti](http://agouti.org) specs.
|
||||||
- `ginkgo generate` and `ginkgo bootstrap` now honor the package name already defined in a given directory
|
- `ginkgo generate` and `ginkgo bootstrap` now honor the package name already defined in a given directory
|
||||||
- The `ginkgo` CLI ignores `SIGQUIT`. Prevents its stack dump from interlacing with the underlying test suite's stack dump.
|
- The `ginkgo` CLI ignores `SIGQUIT`. Prevents its stack dump from interlacing with the underlying test suite's stack dump.
|
||||||
|
- The `ginkgo` CLI now compiles tests into a temporary directory instead of the package directory. This necessitates upgrading to Go v1.4+.
|
||||||
|
- `ginkgo -notify` now works on Linux
|
||||||
|
|
||||||
Bug Fixes:
|
Bug Fixes:
|
||||||
|
|
||||||
@ -34,6 +44,8 @@ Bug Fixes:
|
|||||||
- Fix incorrect failure message when a panic occurs during a parallel test run
|
- Fix incorrect failure message when a panic occurs during a parallel test run
|
||||||
- Fixed an issue where a pending test within a focused context (or a focused test within a pending context) would skip all other tests.
|
- Fixed an issue where a pending test within a focused context (or a focused test within a pending context) would skip all other tests.
|
||||||
- Be more consistent about handling SIGTERM as well as SIGINT
|
- Be more consistent about handling SIGTERM as well as SIGINT
|
||||||
|
- When interupted while concurrently compiling test suites in the background, Ginkgo now cleans up the compiled artifacts.
|
||||||
|
- Fixed a long standing bug where `ginkgo -p` would hang if a process spawned by one of the Ginkgo parallel nodes does not exit. (Hooray!)
|
||||||
|
|
||||||
## 1.1.0 (8/2/2014)
|
## 1.1.0 (8/2/2014)
|
||||||
|
|
||||||
|
2
Godeps/_workspace/src/github.com/onsi/ginkgo/README.md
generated
vendored
2
Godeps/_workspace/src/github.com/onsi/ginkgo/README.md
generated
vendored
@ -59,7 +59,7 @@ Agouti allows you run WebDriver integration tests. Learn more about Agouti [her
|
|||||||
|
|
||||||
## Set Me Up!
|
## Set Me Up!
|
||||||
|
|
||||||
You'll need Golang v1.2+ (Ubuntu users: you probably have Golang v1.0 -- you'll need to upgrade!)
|
You'll need Golang v1.4+ (Ubuntu users: you probably have Golang v1.0 -- you'll need to upgrade!)
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
|
|
||||||
|
2
Godeps/_workspace/src/github.com/onsi/ginkgo/config/config.go
generated
vendored
2
Godeps/_workspace/src/github.com/onsi/ginkgo/config/config.go
generated
vendored
@ -20,7 +20,7 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
)
|
)
|
||||||
|
|
||||||
const VERSION = "1.1.0"
|
const VERSION = "1.2.0-beta"
|
||||||
|
|
||||||
type GinkgoConfigType struct {
|
type GinkgoConfigType struct {
|
||||||
RandomSeed int64
|
RandomSeed int64
|
||||||
|
10
Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/build_command.go
generated
vendored
10
Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/build_command.go
generated
vendored
@ -46,15 +46,19 @@ func (r *SpecBuilder) BuildSpecs(args []string, additionalArgs []string) {
|
|||||||
|
|
||||||
passed := true
|
passed := true
|
||||||
for _, suite := range suites {
|
for _, suite := range suites {
|
||||||
runner := testrunner.New(suite, 1, false, r.commandFlags.Race, r.commandFlags.Cover, r.commandFlags.Tags, nil)
|
runner := testrunner.New(suite, 1, false, r.commandFlags.Race, r.commandFlags.Cover, r.commandFlags.CoverPkg, r.commandFlags.Tags, nil)
|
||||||
fmt.Printf("Compiling %s...\n", suite.PackageName)
|
fmt.Printf("Compiling %s...\n", suite.PackageName)
|
||||||
err := runner.Compile()
|
|
||||||
|
path, _ := filepath.Abs(filepath.Join(suite.Path, fmt.Sprintf("%s.test", suite.PackageName)))
|
||||||
|
err := runner.CompileTo(path)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fmt.Println(err.Error())
|
fmt.Println(err.Error())
|
||||||
passed = false
|
passed = false
|
||||||
} else {
|
} else {
|
||||||
fmt.Printf(" compiled %s.test\n", filepath.Join(suite.Path, suite.PackageName))
|
fmt.Printf(" compiled %s.test\n", suite.PackageName)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
runner.CleanUp()
|
||||||
}
|
}
|
||||||
|
|
||||||
if passed {
|
if passed {
|
||||||
|
2
Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/main.go
generated
vendored
2
Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/main.go
generated
vendored
@ -58,7 +58,7 @@ passing `ginkgo watch` the `-r` flag will recursively detect all test suites und
|
|||||||
`watch` does not detect *new* packages. Moreover, changes in package X only rerun the tests for package X, tests for packages
|
`watch` does not detect *new* packages. Moreover, changes in package X only rerun the tests for package X, tests for packages
|
||||||
that depend on X are not rerun.
|
that depend on X are not rerun.
|
||||||
|
|
||||||
[OSX only] To receive (desktop) notifications when a test run completes:
|
[OSX & Linux only] To receive (desktop) notifications when a test run completes:
|
||||||
|
|
||||||
ginkgo -notify
|
ginkgo -notify
|
||||||
|
|
||||||
|
66
Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/notifications.go
generated
vendored
66
Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/notifications.go
generated
vendored
@ -4,6 +4,7 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
"os/exec"
|
"os/exec"
|
||||||
|
"runtime"
|
||||||
|
|
||||||
"github.com/onsi/ginkgo/ginkgo/testsuite"
|
"github.com/onsi/ginkgo/ginkgo/testsuite"
|
||||||
)
|
)
|
||||||
@ -20,9 +21,15 @@ func NewNotifier(commandFlags *RunWatchAndBuildCommandFlags) *Notifier {
|
|||||||
|
|
||||||
func (n *Notifier) VerifyNotificationsAreAvailable() {
|
func (n *Notifier) VerifyNotificationsAreAvailable() {
|
||||||
if n.commandFlags.Notify {
|
if n.commandFlags.Notify {
|
||||||
_, err := exec.LookPath("terminal-notifier")
|
onLinux := (runtime.GOOS == "linux")
|
||||||
if err != nil {
|
onOSX := (runtime.GOOS == "darwin")
|
||||||
fmt.Printf(`--notify requires terminal-notifier, which you don't seem to have installed.
|
if onOSX {
|
||||||
|
|
||||||
|
_, err := exec.LookPath("terminal-notifier")
|
||||||
|
if err != nil {
|
||||||
|
fmt.Printf(`--notify requires terminal-notifier, which you don't seem to have installed.
|
||||||
|
|
||||||
|
OSX:
|
||||||
|
|
||||||
To remedy this:
|
To remedy this:
|
||||||
|
|
||||||
@ -32,7 +39,22 @@ To learn more about terminal-notifier:
|
|||||||
|
|
||||||
https://github.com/alloy/terminal-notifier
|
https://github.com/alloy/terminal-notifier
|
||||||
`)
|
`)
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
} else if onLinux {
|
||||||
|
|
||||||
|
_, err := exec.LookPath("notify-send")
|
||||||
|
if err != nil {
|
||||||
|
fmt.Printf(`--notify requires terminal-notifier or notify-send, which you don't seem to have installed.
|
||||||
|
|
||||||
|
Linux:
|
||||||
|
|
||||||
|
Download and install notify-send for your distribution
|
||||||
|
`)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -46,16 +68,34 @@ func (n *Notifier) SendSuiteCompletionNotification(suite testsuite.TestSuite, su
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (n *Notifier) SendNotification(title string, subtitle string) {
|
func (n *Notifier) SendNotification(title string, subtitle string) {
|
||||||
args := []string{"-title", title, "-subtitle", subtitle, "-group", "com.onsi.ginkgo"}
|
|
||||||
|
|
||||||
terminal := os.Getenv("TERM_PROGRAM")
|
|
||||||
if terminal == "iTerm.app" {
|
|
||||||
args = append(args, "-activate", "com.googlecode.iterm2")
|
|
||||||
} else if terminal == "Apple_Terminal" {
|
|
||||||
args = append(args, "-activate", "com.apple.Terminal")
|
|
||||||
}
|
|
||||||
|
|
||||||
if n.commandFlags.Notify {
|
if n.commandFlags.Notify {
|
||||||
exec.Command("terminal-notifier", args...).Run()
|
onLinux := (runtime.GOOS == "linux")
|
||||||
|
onOSX := (runtime.GOOS == "darwin")
|
||||||
|
|
||||||
|
if onOSX {
|
||||||
|
|
||||||
|
_, err := exec.LookPath("terminal-notifier")
|
||||||
|
if err == nil {
|
||||||
|
args := []string{"-title", title, "-subtitle", subtitle, "-group", "com.onsi.ginkgo"}
|
||||||
|
terminal := os.Getenv("TERM_PROGRAM")
|
||||||
|
if terminal == "iTerm.app" {
|
||||||
|
args = append(args, "-activate", "com.googlecode.iterm2")
|
||||||
|
} else if terminal == "Apple_Terminal" {
|
||||||
|
args = append(args, "-activate", "com.apple.Terminal")
|
||||||
|
}
|
||||||
|
|
||||||
|
exec.Command("terminal-notifier", args...).Run()
|
||||||
|
}
|
||||||
|
|
||||||
|
} else if onLinux {
|
||||||
|
|
||||||
|
_, err := exec.LookPath("notify-send")
|
||||||
|
if err == nil {
|
||||||
|
args := []string{"-a", "ginkgo", title, subtitle}
|
||||||
|
exec.Command("notify-send", args...).Run()
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
2
Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/run_command.go
generated
vendored
2
Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/run_command.go
generated
vendored
@ -71,7 +71,7 @@ func (r *SpecRunner) RunSpecs(args []string, additionalArgs []string) {
|
|||||||
|
|
||||||
runners := []*testrunner.TestRunner{}
|
runners := []*testrunner.TestRunner{}
|
||||||
for _, suite := range suites {
|
for _, suite := range suites {
|
||||||
runners = append(runners, testrunner.New(suite, r.commandFlags.NumCPU, r.commandFlags.ParallelStream, r.commandFlags.Race, r.commandFlags.Cover, r.commandFlags.Tags, additionalArgs))
|
runners = append(runners, testrunner.New(suite, r.commandFlags.NumCPU, r.commandFlags.ParallelStream, r.commandFlags.Race, r.commandFlags.Cover, r.commandFlags.CoverPkg, r.commandFlags.Tags, additionalArgs))
|
||||||
}
|
}
|
||||||
|
|
||||||
numSuites := 0
|
numSuites := 0
|
||||||
|
@ -11,6 +11,7 @@ type RunWatchAndBuildCommandFlags struct {
|
|||||||
Recurse bool
|
Recurse bool
|
||||||
Race bool
|
Race bool
|
||||||
Cover bool
|
Cover bool
|
||||||
|
CoverPkg string
|
||||||
SkipPackage string
|
SkipPackage string
|
||||||
Tags string
|
Tags string
|
||||||
|
|
||||||
@ -87,11 +88,11 @@ func (c *RunWatchAndBuildCommandFlags) computeNodes() {
|
|||||||
|
|
||||||
func (c *RunWatchAndBuildCommandFlags) flags(mode int) {
|
func (c *RunWatchAndBuildCommandFlags) flags(mode int) {
|
||||||
onWindows := (runtime.GOOS == "windows")
|
onWindows := (runtime.GOOS == "windows")
|
||||||
onOSX := (runtime.GOOS == "darwin")
|
|
||||||
|
|
||||||
c.FlagSet.BoolVar(&(c.Recurse), "r", false, "Find and run test suites under the current directory recursively")
|
c.FlagSet.BoolVar(&(c.Recurse), "r", false, "Find and run test suites under the current directory recursively")
|
||||||
c.FlagSet.BoolVar(&(c.Race), "race", false, "Run tests with race detection enabled")
|
c.FlagSet.BoolVar(&(c.Race), "race", false, "Run tests with race detection enabled")
|
||||||
c.FlagSet.BoolVar(&(c.Cover), "cover", false, "Run tests with coverage analysis, will generate coverage profiles with the package name in the current directory")
|
c.FlagSet.BoolVar(&(c.Cover), "cover", false, "Run tests with coverage analysis, will generate coverage profiles with the package name in the current directory")
|
||||||
|
c.FlagSet.StringVar(&(c.CoverPkg), "coverpkg", "", "Run tests with coverage on the given external modules")
|
||||||
c.FlagSet.StringVar(&(c.SkipPackage), "skipPackage", "", "A comma-separated list of package names to be skipped. If any part of the package's path matches, that package is ignored.")
|
c.FlagSet.StringVar(&(c.SkipPackage), "skipPackage", "", "A comma-separated list of package names to be skipped. If any part of the package's path matches, that package is ignored.")
|
||||||
c.FlagSet.StringVar(&(c.Tags), "tags", "", "A list of build tags to consider satisfied during the build")
|
c.FlagSet.StringVar(&(c.Tags), "tags", "", "A list of build tags to consider satisfied during the build")
|
||||||
|
|
||||||
@ -101,7 +102,7 @@ func (c *RunWatchAndBuildCommandFlags) flags(mode int) {
|
|||||||
c.FlagSet.IntVar(&(c.NumCompilers), "compilers", 0, "The number of concurrent compilations to run (0 will autodetect)")
|
c.FlagSet.IntVar(&(c.NumCompilers), "compilers", 0, "The number of concurrent compilations to run (0 will autodetect)")
|
||||||
c.FlagSet.BoolVar(&(c.AutoNodes), "p", false, "Run in parallel with auto-detected number of nodes")
|
c.FlagSet.BoolVar(&(c.AutoNodes), "p", false, "Run in parallel with auto-detected number of nodes")
|
||||||
c.FlagSet.BoolVar(&(c.ParallelStream), "stream", onWindows, "stream parallel test output in real time: less coherent, but useful for debugging")
|
c.FlagSet.BoolVar(&(c.ParallelStream), "stream", onWindows, "stream parallel test output in real time: less coherent, but useful for debugging")
|
||||||
if onOSX {
|
if !onWindows {
|
||||||
c.FlagSet.BoolVar(&(c.Notify), "notify", false, "Send desktop notifications when a test run completes")
|
c.FlagSet.BoolVar(&(c.Notify), "notify", false, "Send desktop notifications when a test run completes")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
133
Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/suite_runner.go
generated
vendored
133
Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/suite_runner.go
generated
vendored
@ -3,6 +3,7 @@ package main
|
|||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"runtime"
|
"runtime"
|
||||||
|
"sync"
|
||||||
|
|
||||||
"github.com/onsi/ginkgo/config"
|
"github.com/onsi/ginkgo/config"
|
||||||
"github.com/onsi/ginkgo/ginkgo/interrupthandler"
|
"github.com/onsi/ginkgo/ginkgo/interrupthandler"
|
||||||
@ -10,28 +11,21 @@ import (
|
|||||||
"github.com/onsi/ginkgo/ginkgo/testsuite"
|
"github.com/onsi/ginkgo/ginkgo/testsuite"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
type compilationInput struct {
|
||||||
|
runner *testrunner.TestRunner
|
||||||
|
result chan compilationOutput
|
||||||
|
}
|
||||||
|
|
||||||
|
type compilationOutput struct {
|
||||||
|
runner *testrunner.TestRunner
|
||||||
|
err error
|
||||||
|
}
|
||||||
|
|
||||||
type SuiteRunner struct {
|
type SuiteRunner struct {
|
||||||
notifier *Notifier
|
notifier *Notifier
|
||||||
interruptHandler *interrupthandler.InterruptHandler
|
interruptHandler *interrupthandler.InterruptHandler
|
||||||
}
|
}
|
||||||
|
|
||||||
type compiler struct {
|
|
||||||
runner *testrunner.TestRunner
|
|
||||||
compilationError chan error
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *compiler) compile() {
|
|
||||||
retries := 0
|
|
||||||
|
|
||||||
err := c.runner.Compile()
|
|
||||||
for err != nil && retries < 5 { //We retry because Go sometimes steps on itself when multiple compiles happen in parallel. This is ugly, but should help resolve flakiness...
|
|
||||||
err = c.runner.Compile()
|
|
||||||
retries++
|
|
||||||
}
|
|
||||||
|
|
||||||
c.compilationError <- err
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewSuiteRunner(notifier *Notifier, interruptHandler *interrupthandler.InterruptHandler) *SuiteRunner {
|
func NewSuiteRunner(notifier *Notifier, interruptHandler *interrupthandler.InterruptHandler) *SuiteRunner {
|
||||||
return &SuiteRunner{
|
return &SuiteRunner{
|
||||||
notifier: notifier,
|
notifier: notifier,
|
||||||
@ -39,63 +33,110 @@ func NewSuiteRunner(notifier *Notifier, interruptHandler *interrupthandler.Inter
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *SuiteRunner) RunSuites(runners []*testrunner.TestRunner, numCompilers int, keepGoing bool, willCompile func(suite testsuite.TestSuite)) (testrunner.RunResult, int) {
|
func (r *SuiteRunner) compileInParallel(runners []*testrunner.TestRunner, numCompilers int, willCompile func(suite testsuite.TestSuite)) chan compilationOutput {
|
||||||
runResult := testrunner.PassingRunResult()
|
//we return this to the consumer, it will return each runner in order as it compiles
|
||||||
|
compilationOutputs := make(chan compilationOutput, len(runners))
|
||||||
|
|
||||||
compilers := make([]*compiler, len(runners))
|
//an array of channels - the nth runner's compilation output is sent to the nth channel in this array
|
||||||
for i, runner := range runners {
|
//we read from these channels in order to ensure we run the suites in order
|
||||||
compilers[i] = &compiler{
|
orderedCompilationOutputs := []chan compilationOutput{}
|
||||||
runner: runner,
|
for _ = range runners {
|
||||||
compilationError: make(chan error, 1),
|
orderedCompilationOutputs = append(orderedCompilationOutputs, make(chan compilationOutput, 1))
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
compilerChannel := make(chan *compiler)
|
//we're going to spin up numCompilers compilers - they're going to run concurrently and will consume this channel
|
||||||
|
//we prefill the channel then close it, this ensures we compile things in the correct order
|
||||||
|
workPool := make(chan compilationInput, len(runners))
|
||||||
|
for i, runner := range runners {
|
||||||
|
workPool <- compilationInput{runner, orderedCompilationOutputs[i]}
|
||||||
|
}
|
||||||
|
close(workPool)
|
||||||
|
|
||||||
|
//pick a reasonable numCompilers
|
||||||
if numCompilers == 0 {
|
if numCompilers == 0 {
|
||||||
numCompilers = runtime.NumCPU()
|
numCompilers = runtime.NumCPU()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
//a WaitGroup to help us wait for all compilers to shut down
|
||||||
|
wg := &sync.WaitGroup{}
|
||||||
|
wg.Add(numCompilers)
|
||||||
|
|
||||||
|
//spin up the concurrent compilers
|
||||||
for i := 0; i < numCompilers; i++ {
|
for i := 0; i < numCompilers; i++ {
|
||||||
go func() {
|
go func() {
|
||||||
for compiler := range compilerChannel {
|
defer wg.Done()
|
||||||
if willCompile != nil {
|
for input := range workPool {
|
||||||
willCompile(compiler.runner.Suite)
|
if r.interruptHandler.WasInterrupted() {
|
||||||
|
return
|
||||||
}
|
}
|
||||||
compiler.compile()
|
|
||||||
|
if willCompile != nil {
|
||||||
|
willCompile(input.runner.Suite)
|
||||||
|
}
|
||||||
|
|
||||||
|
//We retry because Go sometimes steps on itself when multiple compiles happen in parallel. This is ugly, but should help resolve flakiness...
|
||||||
|
var err error
|
||||||
|
retries := 0
|
||||||
|
for retries <= 5 {
|
||||||
|
if r.interruptHandler.WasInterrupted() {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if err = input.runner.Compile(); err == nil {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
retries++
|
||||||
|
}
|
||||||
|
|
||||||
|
input.result <- compilationOutput{input.runner, err}
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
//read from the compilation output channels *in order* and send them to the caller
|
||||||
|
//close the compilationOutputs channel to tell the caller we're done
|
||||||
go func() {
|
go func() {
|
||||||
for _, compiler := range compilers {
|
defer close(compilationOutputs)
|
||||||
compilerChannel <- compiler
|
for _, orderedCompilationOutput := range orderedCompilationOutputs {
|
||||||
|
select {
|
||||||
|
case compilationOutput := <-orderedCompilationOutput:
|
||||||
|
compilationOutputs <- compilationOutput
|
||||||
|
case <-r.interruptHandler.C:
|
||||||
|
//interrupt detected, wait for the compilers to shut down then bail
|
||||||
|
//this ensure we clean up after ourselves as we don't leave any compilation processes running
|
||||||
|
wg.Wait()
|
||||||
|
return
|
||||||
|
}
|
||||||
}
|
}
|
||||||
close(compilerChannel)
|
|
||||||
}()
|
}()
|
||||||
|
|
||||||
|
return compilationOutputs
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *SuiteRunner) RunSuites(runners []*testrunner.TestRunner, numCompilers int, keepGoing bool, willCompile func(suite testsuite.TestSuite)) (testrunner.RunResult, int) {
|
||||||
|
runResult := testrunner.PassingRunResult()
|
||||||
|
|
||||||
|
compilationOutputs := r.compileInParallel(runners, numCompilers, willCompile)
|
||||||
|
|
||||||
numSuitesThatRan := 0
|
numSuitesThatRan := 0
|
||||||
suitesThatFailed := []testsuite.TestSuite{}
|
suitesThatFailed := []testsuite.TestSuite{}
|
||||||
for i, runner := range runners {
|
for compilationOutput := range compilationOutputs {
|
||||||
if r.interruptHandler.WasInterrupted() {
|
if compilationOutput.err != nil {
|
||||||
break
|
fmt.Print(compilationOutput.err.Error())
|
||||||
}
|
|
||||||
|
|
||||||
compilationError := <-compilers[i].compilationError
|
|
||||||
if compilationError != nil {
|
|
||||||
fmt.Print(compilationError.Error())
|
|
||||||
}
|
}
|
||||||
numSuitesThatRan++
|
numSuitesThatRan++
|
||||||
suiteRunResult := testrunner.FailingRunResult()
|
suiteRunResult := testrunner.FailingRunResult()
|
||||||
if compilationError == nil {
|
if compilationOutput.err == nil {
|
||||||
suiteRunResult = compilers[i].runner.Run()
|
suiteRunResult = compilationOutput.runner.Run()
|
||||||
}
|
}
|
||||||
r.notifier.SendSuiteCompletionNotification(runner.Suite, suiteRunResult.Passed)
|
r.notifier.SendSuiteCompletionNotification(compilationOutput.runner.Suite, suiteRunResult.Passed)
|
||||||
runResult = runResult.Merge(suiteRunResult)
|
runResult = runResult.Merge(suiteRunResult)
|
||||||
if !suiteRunResult.Passed {
|
if !suiteRunResult.Passed {
|
||||||
suitesThatFailed = append(suitesThatFailed, runner.Suite)
|
suitesThatFailed = append(suitesThatFailed, compilationOutput.runner.Suite)
|
||||||
if !keepGoing {
|
if !keepGoing {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if i < len(runners)-1 && !config.DefaultReporterConfig.Succinct {
|
if numSuitesThatRan < len(runners) && !config.DefaultReporterConfig.Succinct {
|
||||||
fmt.Println("")
|
fmt.Println("")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
77
Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/testrunner/test_runner.go
generated
vendored
77
Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/testrunner/test_runner.go
generated
vendored
@ -22,30 +22,48 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
type TestRunner struct {
|
type TestRunner struct {
|
||||||
Suite testsuite.TestSuite
|
Suite testsuite.TestSuite
|
||||||
compiled bool
|
|
||||||
|
compiled bool
|
||||||
|
compilationTargetPath string
|
||||||
|
|
||||||
numCPU int
|
numCPU int
|
||||||
parallelStream bool
|
parallelStream bool
|
||||||
race bool
|
race bool
|
||||||
cover bool
|
cover bool
|
||||||
|
coverPkg string
|
||||||
tags string
|
tags string
|
||||||
additionalArgs []string
|
additionalArgs []string
|
||||||
}
|
}
|
||||||
|
|
||||||
func New(suite testsuite.TestSuite, numCPU int, parallelStream bool, race bool, cover bool, tags string, additionalArgs []string) *TestRunner {
|
func New(suite testsuite.TestSuite, numCPU int, parallelStream bool, race bool, cover bool, coverPkg string, tags string, additionalArgs []string) *TestRunner {
|
||||||
return &TestRunner{
|
runner := &TestRunner{
|
||||||
Suite: suite,
|
Suite: suite,
|
||||||
numCPU: numCPU,
|
numCPU: numCPU,
|
||||||
parallelStream: parallelStream,
|
parallelStream: parallelStream,
|
||||||
race: race,
|
race: race,
|
||||||
cover: cover,
|
cover: cover,
|
||||||
|
coverPkg: coverPkg,
|
||||||
tags: tags,
|
tags: tags,
|
||||||
additionalArgs: additionalArgs,
|
additionalArgs: additionalArgs,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if !suite.Precompiled {
|
||||||
|
dir, err := ioutil.TempDir("", "ginkgo")
|
||||||
|
if err != nil {
|
||||||
|
panic(fmt.Sprintf("coulnd't create temporary directory... might be time to rm -rf:\n%s", err.Error()))
|
||||||
|
}
|
||||||
|
runner.compilationTargetPath = filepath.Join(dir, suite.PackageName+".test")
|
||||||
|
}
|
||||||
|
|
||||||
|
return runner
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *TestRunner) Compile() error {
|
func (t *TestRunner) Compile() error {
|
||||||
|
return t.CompileTo(t.compilationTargetPath)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *TestRunner) CompileTo(path string) error {
|
||||||
if t.compiled {
|
if t.compiled {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@ -54,15 +72,16 @@ func (t *TestRunner) Compile() error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
os.Remove(t.compiledArtifact())
|
args := []string{"test", "-c", "-i", "-o", path}
|
||||||
|
|
||||||
args := []string{"test", "-c", "-i"}
|
|
||||||
if t.race {
|
if t.race {
|
||||||
args = append(args, "-race")
|
args = append(args, "-race")
|
||||||
}
|
}
|
||||||
if t.cover {
|
if t.cover || t.coverPkg != "" {
|
||||||
args = append(args, "-cover", "-covermode=atomic")
|
args = append(args, "-cover", "-covermode=atomic")
|
||||||
}
|
}
|
||||||
|
if t.coverPkg != "" {
|
||||||
|
args = append(args, fmt.Sprintf("-coverpkg=%s", t.coverPkg))
|
||||||
|
}
|
||||||
if t.tags != "" {
|
if t.tags != "" {
|
||||||
args = append(args, fmt.Sprintf("-tags=%s", t.tags))
|
args = append(args, fmt.Sprintf("-tags=%s", t.tags))
|
||||||
}
|
}
|
||||||
@ -78,10 +97,11 @@ func (t *TestRunner) Compile() error {
|
|||||||
if len(output) > 0 {
|
if len(output) > 0 {
|
||||||
return fmt.Errorf("Failed to compile %s:\n\n%s", t.Suite.PackageName, fixedOutput)
|
return fmt.Errorf("Failed to compile %s:\n\n%s", t.Suite.PackageName, fixedOutput)
|
||||||
}
|
}
|
||||||
return fmt.Errorf("")
|
return fmt.Errorf("Failed to compile %s", t.Suite.PackageName)
|
||||||
}
|
}
|
||||||
|
|
||||||
t.compiled = true
|
t.compiled = true
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -134,12 +154,7 @@ func (t *TestRunner) CleanUp() {
|
|||||||
if t.Suite.Precompiled {
|
if t.Suite.Precompiled {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
os.Remove(t.compiledArtifact())
|
os.RemoveAll(filepath.Dir(t.compilationTargetPath))
|
||||||
}
|
|
||||||
|
|
||||||
func (t *TestRunner) compiledArtifact() string {
|
|
||||||
compiledArtifact, _ := filepath.Abs(filepath.Join(t.Suite.Path, fmt.Sprintf("%s.test", t.Suite.PackageName)))
|
|
||||||
return compiledArtifact
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *TestRunner) runSerialGinkgoSuite() RunResult {
|
func (t *TestRunner) runSerialGinkgoSuite() RunResult {
|
||||||
@ -196,7 +211,7 @@ func (t *TestRunner) runAndStreamParallelGinkgoSuite() RunResult {
|
|||||||
|
|
||||||
os.Stdout.Sync()
|
os.Stdout.Sync()
|
||||||
|
|
||||||
if t.cover {
|
if t.cover || t.coverPkg != "" {
|
||||||
t.combineCoverprofiles()
|
t.combineCoverprofiles()
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -257,21 +272,16 @@ func (t *TestRunner) runParallelGinkgoSuite() RunResult {
|
|||||||
fmt.Println("")
|
fmt.Println("")
|
||||||
case <-time.After(time.Second):
|
case <-time.After(time.Second):
|
||||||
//the aggregator never got back to us! something must have gone wrong
|
//the aggregator never got back to us! something must have gone wrong
|
||||||
fmt.Println("")
|
fmt.Println(`
|
||||||
fmt.Println("")
|
-------------------------------------------------------------------
|
||||||
fmt.Println(" ----------------------------------------------------------- ")
|
| |
|
||||||
fmt.Println(" | |")
|
| Ginkgo timed out waiting for all parallel nodes to report back! |
|
||||||
fmt.Println(" | Ginkgo timed out waiting for all parallel nodes to end! |")
|
| |
|
||||||
fmt.Println(" | Here is some salvaged output: |")
|
-------------------------------------------------------------------
|
||||||
fmt.Println(" | |")
|
`)
|
||||||
fmt.Println(" ----------------------------------------------------------- ")
|
|
||||||
fmt.Println("")
|
|
||||||
fmt.Println("")
|
|
||||||
|
|
||||||
os.Stdout.Sync()
|
os.Stdout.Sync()
|
||||||
|
|
||||||
time.Sleep(time.Second)
|
|
||||||
|
|
||||||
for _, writer := range writers {
|
for _, writer := range writers {
|
||||||
writer.Close()
|
writer.Close()
|
||||||
}
|
}
|
||||||
@ -283,7 +293,7 @@ func (t *TestRunner) runParallelGinkgoSuite() RunResult {
|
|||||||
os.Stdout.Sync()
|
os.Stdout.Sync()
|
||||||
}
|
}
|
||||||
|
|
||||||
if t.cover {
|
if t.cover || t.coverPkg != "" {
|
||||||
t.combineCoverprofiles()
|
t.combineCoverprofiles()
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -292,7 +302,7 @@ func (t *TestRunner) runParallelGinkgoSuite() RunResult {
|
|||||||
|
|
||||||
func (t *TestRunner) cmd(ginkgoArgs []string, stream io.Writer, node int) *exec.Cmd {
|
func (t *TestRunner) cmd(ginkgoArgs []string, stream io.Writer, node int) *exec.Cmd {
|
||||||
args := []string{"--test.timeout=24h"}
|
args := []string{"--test.timeout=24h"}
|
||||||
if t.cover {
|
if t.cover || t.coverPkg != "" {
|
||||||
coverprofile := "--test.coverprofile=" + t.Suite.PackageName + ".coverprofile"
|
coverprofile := "--test.coverprofile=" + t.Suite.PackageName + ".coverprofile"
|
||||||
if t.numCPU > 1 {
|
if t.numCPU > 1 {
|
||||||
coverprofile = fmt.Sprintf("%s.%d", coverprofile, node)
|
coverprofile = fmt.Sprintf("%s.%d", coverprofile, node)
|
||||||
@ -303,7 +313,12 @@ func (t *TestRunner) cmd(ginkgoArgs []string, stream io.Writer, node int) *exec.
|
|||||||
args = append(args, ginkgoArgs...)
|
args = append(args, ginkgoArgs...)
|
||||||
args = append(args, t.additionalArgs...)
|
args = append(args, t.additionalArgs...)
|
||||||
|
|
||||||
cmd := exec.Command(t.compiledArtifact(), args...)
|
path := t.compilationTargetPath
|
||||||
|
if t.Suite.Precompiled {
|
||||||
|
path, _ = filepath.Abs(filepath.Join(t.Suite.Path, fmt.Sprintf("%s.test", t.Suite.PackageName)))
|
||||||
|
}
|
||||||
|
|
||||||
|
cmd := exec.Command(path, args...)
|
||||||
|
|
||||||
cmd.Dir = t.Suite.Path
|
cmd.Dir = t.Suite.Path
|
||||||
cmd.Stderr = stream
|
cmd.Stderr = stream
|
||||||
|
2
Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/watch_command.go
generated
vendored
2
Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo/watch_command.go
generated
vendored
@ -57,7 +57,7 @@ func (w *SpecWatcher) runnersForSuites(suites []testsuite.TestSuite, additionalA
|
|||||||
runners := []*testrunner.TestRunner{}
|
runners := []*testrunner.TestRunner{}
|
||||||
|
|
||||||
for _, suite := range suites {
|
for _, suite := range suites {
|
||||||
runners = append(runners, testrunner.New(suite, w.commandFlags.NumCPU, w.commandFlags.ParallelStream, w.commandFlags.Race, w.commandFlags.Cover, w.commandFlags.Tags, additionalArgs))
|
runners = append(runners, testrunner.New(suite, w.commandFlags.NumCPU, w.commandFlags.ParallelStream, w.commandFlags.Race, w.commandFlags.Cover, w.commandFlags.CoverPkg, w.commandFlags.Tags, additionalArgs))
|
||||||
}
|
}
|
||||||
|
|
||||||
return runners
|
return runners
|
||||||
|
4
Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo_dsl.go
generated
vendored
4
Godeps/_workspace/src/github.com/onsi/ginkgo/ginkgo_dsl.go
generated
vendored
@ -130,6 +130,7 @@ type Done chan<- interface{}
|
|||||||
// IsMeasurement: true if the current test is a measurement
|
// IsMeasurement: true if the current test is a measurement
|
||||||
// FileName: the name of the file containing the current test
|
// FileName: the name of the file containing the current test
|
||||||
// LineNumber: the line number for the current test
|
// LineNumber: the line number for the current test
|
||||||
|
// Failed: if the current test has failed, this will be true (useful in an AfterEach)
|
||||||
type GinkgoTestDescription struct {
|
type GinkgoTestDescription struct {
|
||||||
FullTestText string
|
FullTestText string
|
||||||
ComponentTexts []string
|
ComponentTexts []string
|
||||||
@ -139,6 +140,8 @@ type GinkgoTestDescription struct {
|
|||||||
|
|
||||||
FileName string
|
FileName string
|
||||||
LineNumber int
|
LineNumber int
|
||||||
|
|
||||||
|
Failed bool
|
||||||
}
|
}
|
||||||
|
|
||||||
//CurrentGinkgoTestDescripton returns information about the current running test.
|
//CurrentGinkgoTestDescripton returns information about the current running test.
|
||||||
@ -157,6 +160,7 @@ func CurrentGinkgoTestDescription() GinkgoTestDescription {
|
|||||||
IsMeasurement: summary.IsMeasurement,
|
IsMeasurement: summary.IsMeasurement,
|
||||||
FileName: subjectCodeLocation.FileName,
|
FileName: subjectCodeLocation.FileName,
|
||||||
LineNumber: subjectCodeLocation.LineNumber,
|
LineNumber: subjectCodeLocation.LineNumber,
|
||||||
|
Failed: summary.HasFailureState(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
24
Godeps/_workspace/src/github.com/onsi/ginkgo/integration/coverage_test.go
generated
vendored
24
Godeps/_workspace/src/github.com/onsi/ginkgo/integration/coverage_test.go
generated
vendored
@ -1,11 +1,12 @@
|
|||||||
package integration_test
|
package integration_test
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"os"
|
||||||
|
"os/exec"
|
||||||
|
|
||||||
. "github.com/onsi/ginkgo"
|
. "github.com/onsi/ginkgo"
|
||||||
. "github.com/onsi/gomega"
|
. "github.com/onsi/gomega"
|
||||||
"github.com/onsi/gomega/gexec"
|
"github.com/onsi/gomega/gexec"
|
||||||
"os"
|
|
||||||
"os/exec"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
var _ = Describe("Coverage Specs", func() {
|
var _ = Describe("Coverage Specs", func() {
|
||||||
@ -31,4 +32,23 @@ var _ = Describe("Coverage Specs", func() {
|
|||||||
|
|
||||||
Ω(parallelCoverProfileOutput).Should(Equal(serialCoverProfileOutput))
|
Ω(parallelCoverProfileOutput).Should(Equal(serialCoverProfileOutput))
|
||||||
})
|
})
|
||||||
|
|
||||||
|
It("runs coverage analysis on external packages in series and in parallel", func() {
|
||||||
|
session := startGinkgo("./_fixtures/coverage_fixture", "-coverpkg=github.com/onsi/ginkgo/integration/_fixtures/coverage_fixture,github.com/onsi/ginkgo/integration/_fixtures/coverage_fixture/external_coverage_fixture")
|
||||||
|
Eventually(session).Should(gexec.Exit(0))
|
||||||
|
output := session.Out.Contents()
|
||||||
|
Ω(output).Should(ContainSubstring("coverage: 71.4% of statements in github.com/onsi/ginkgo/integration/_fixtures/coverage_fixture, github.com/onsi/ginkgo/integration/_fixtures/coverage_fixture/external_coverage_fixture"))
|
||||||
|
|
||||||
|
serialCoverProfileOutput, err := exec.Command("go", "tool", "cover", "-func=./_fixtures/coverage_fixture/coverage_fixture.coverprofile").CombinedOutput()
|
||||||
|
Ω(err).ShouldNot(HaveOccurred())
|
||||||
|
|
||||||
|
os.RemoveAll("./_fixtures/coverage_fixture/coverage_fixture.coverprofile")
|
||||||
|
|
||||||
|
Eventually(startGinkgo("./_fixtures/coverage_fixture", "-coverpkg=github.com/onsi/ginkgo/integration/_fixtures/coverage_fixture,github.com/onsi/ginkgo/integration/_fixtures/coverage_fixture/external_coverage_fixture", "-nodes=4")).Should(gexec.Exit(0))
|
||||||
|
|
||||||
|
parallelCoverProfileOutput, err := exec.Command("go", "tool", "cover", "-func=./_fixtures/coverage_fixture/coverage_fixture.coverprofile").CombinedOutput()
|
||||||
|
Ω(err).ShouldNot(HaveOccurred())
|
||||||
|
|
||||||
|
Ω(parallelCoverProfileOutput).Should(Equal(serialCoverProfileOutput))
|
||||||
|
})
|
||||||
})
|
})
|
||||||
|
5
Godeps/_workspace/src/github.com/onsi/ginkgo/integration/suite_setup_test.go
generated
vendored
5
Godeps/_workspace/src/github.com/onsi/ginkgo/integration/suite_setup_test.go
generated
vendored
@ -1,10 +1,11 @@
|
|||||||
package integration_test
|
package integration_test
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"strings"
|
||||||
|
|
||||||
. "github.com/onsi/ginkgo"
|
. "github.com/onsi/ginkgo"
|
||||||
. "github.com/onsi/gomega"
|
. "github.com/onsi/gomega"
|
||||||
"github.com/onsi/gomega/gexec"
|
"github.com/onsi/gomega/gexec"
|
||||||
"strings"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
var _ = Describe("SuiteSetup", func() {
|
var _ = Describe("SuiteSetup", func() {
|
||||||
@ -171,7 +172,7 @@ var _ = Describe("SuiteSetup", func() {
|
|||||||
output := string(session.Out.Contents())
|
output := string(session.Out.Contents())
|
||||||
|
|
||||||
Ω(output).Should(ContainSubstring("Node 1 disappeared before completing BeforeSuite"))
|
Ω(output).Should(ContainSubstring("Node 1 disappeared before completing BeforeSuite"))
|
||||||
Ω(output).Should(ContainSubstring("Ginkgo timed out waiting for all parallel nodes to end"))
|
Ω(output).Should(ContainSubstring("Ginkgo timed out waiting for all parallel nodes to report back!"))
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
25
Godeps/_workspace/src/github.com/onsi/ginkgo/integration/test_description_test.go
generated
vendored
Normal file
25
Godeps/_workspace/src/github.com/onsi/ginkgo/integration/test_description_test.go
generated
vendored
Normal file
@ -0,0 +1,25 @@
|
|||||||
|
package integration_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
. "github.com/onsi/ginkgo"
|
||||||
|
. "github.com/onsi/gomega"
|
||||||
|
"github.com/onsi/gomega/gbytes"
|
||||||
|
"github.com/onsi/gomega/gexec"
|
||||||
|
)
|
||||||
|
|
||||||
|
var _ = Describe("TestDescription", func() {
|
||||||
|
var pathToTest string
|
||||||
|
|
||||||
|
BeforeEach(func() {
|
||||||
|
pathToTest = tmpPath("test_description")
|
||||||
|
copyIn("test_description", pathToTest)
|
||||||
|
})
|
||||||
|
|
||||||
|
It("should capture and emit information about the current test", func() {
|
||||||
|
session := startGinkgo(pathToTest, "--noColor")
|
||||||
|
Eventually(session).Should(gexec.Exit(1))
|
||||||
|
|
||||||
|
Ω(session).Should(gbytes.Say("TestDescription should pass:false"))
|
||||||
|
Ω(session).Should(gbytes.Say("TestDescription should fail:true"))
|
||||||
|
})
|
||||||
|
})
|
64
Godeps/_workspace/src/github.com/onsi/ginkgo/internal/remote/output_interceptor_test.go
generated
vendored
64
Godeps/_workspace/src/github.com/onsi/ginkgo/internal/remote/output_interceptor_test.go
generated
vendored
@ -1,64 +0,0 @@
|
|||||||
package remote_test
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
. "github.com/onsi/ginkgo"
|
|
||||||
. "github.com/onsi/ginkgo/internal/remote"
|
|
||||||
. "github.com/onsi/gomega"
|
|
||||||
"os"
|
|
||||||
)
|
|
||||||
|
|
||||||
var _ = Describe("OutputInterceptor", func() {
|
|
||||||
var interceptor OutputInterceptor
|
|
||||||
|
|
||||||
BeforeEach(func() {
|
|
||||||
interceptor = NewOutputInterceptor()
|
|
||||||
})
|
|
||||||
|
|
||||||
It("should capture all stdout/stderr output", func() {
|
|
||||||
err := interceptor.StartInterceptingOutput()
|
|
||||||
Ω(err).ShouldNot(HaveOccurred())
|
|
||||||
|
|
||||||
fmt.Fprint(os.Stdout, "STDOUT")
|
|
||||||
fmt.Fprint(os.Stderr, "STDERR")
|
|
||||||
print("PRINT")
|
|
||||||
|
|
||||||
output, err := interceptor.StopInterceptingAndReturnOutput()
|
|
||||||
|
|
||||||
Ω(output).Should(Equal("STDOUTSTDERRPRINT"))
|
|
||||||
Ω(err).ShouldNot(HaveOccurred())
|
|
||||||
})
|
|
||||||
|
|
||||||
It("should error if told to intercept output twice", func() {
|
|
||||||
err := interceptor.StartInterceptingOutput()
|
|
||||||
Ω(err).ShouldNot(HaveOccurred())
|
|
||||||
|
|
||||||
print("A")
|
|
||||||
|
|
||||||
err = interceptor.StartInterceptingOutput()
|
|
||||||
Ω(err).Should(HaveOccurred())
|
|
||||||
|
|
||||||
print("B")
|
|
||||||
|
|
||||||
output, err := interceptor.StopInterceptingAndReturnOutput()
|
|
||||||
|
|
||||||
Ω(output).Should(Equal("AB"))
|
|
||||||
Ω(err).ShouldNot(HaveOccurred())
|
|
||||||
})
|
|
||||||
|
|
||||||
It("should allow multiple interception sessions", func() {
|
|
||||||
err := interceptor.StartInterceptingOutput()
|
|
||||||
Ω(err).ShouldNot(HaveOccurred())
|
|
||||||
print("A")
|
|
||||||
output, err := interceptor.StopInterceptingAndReturnOutput()
|
|
||||||
Ω(output).Should(Equal("A"))
|
|
||||||
Ω(err).ShouldNot(HaveOccurred())
|
|
||||||
|
|
||||||
err = interceptor.StartInterceptingOutput()
|
|
||||||
Ω(err).ShouldNot(HaveOccurred())
|
|
||||||
print("B")
|
|
||||||
output, err = interceptor.StopInterceptingAndReturnOutput()
|
|
||||||
Ω(output).Should(Equal("B"))
|
|
||||||
Ω(err).ShouldNot(HaveOccurred())
|
|
||||||
})
|
|
||||||
})
|
|
32
Godeps/_workspace/src/github.com/onsi/ginkgo/internal/remote/output_interceptor_unix.go
generated
vendored
32
Godeps/_workspace/src/github.com/onsi/ginkgo/internal/remote/output_interceptor_unix.go
generated
vendored
@ -14,10 +14,8 @@ func NewOutputInterceptor() OutputInterceptor {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type outputInterceptor struct {
|
type outputInterceptor struct {
|
||||||
stdoutPlaceholder *os.File
|
redirectFile *os.File
|
||||||
stderrPlaceholder *os.File
|
intercepting bool
|
||||||
redirectFile *os.File
|
|
||||||
intercepting bool
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (interceptor *outputInterceptor) StartInterceptingOutput() error {
|
func (interceptor *outputInterceptor) StartInterceptingOutput() error {
|
||||||
@ -33,19 +31,6 @@ func (interceptor *outputInterceptor) StartInterceptingOutput() error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
interceptor.stdoutPlaceholder, err = ioutil.TempFile("", "ginkgo-output")
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
interceptor.stderrPlaceholder, err = ioutil.TempFile("", "ginkgo-output")
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
syscall.Dup2(1, int(interceptor.stdoutPlaceholder.Fd()))
|
|
||||||
syscall.Dup2(2, int(interceptor.stderrPlaceholder.Fd()))
|
|
||||||
|
|
||||||
syscall.Dup2(int(interceptor.redirectFile.Fd()), 1)
|
syscall.Dup2(int(interceptor.redirectFile.Fd()), 1)
|
||||||
syscall.Dup2(int(interceptor.redirectFile.Fd()), 2)
|
syscall.Dup2(int(interceptor.redirectFile.Fd()), 2)
|
||||||
|
|
||||||
@ -57,18 +42,9 @@ func (interceptor *outputInterceptor) StopInterceptingAndReturnOutput() (string,
|
|||||||
return "", errors.New("Not intercepting output!")
|
return "", errors.New("Not intercepting output!")
|
||||||
}
|
}
|
||||||
|
|
||||||
syscall.Dup2(int(interceptor.stdoutPlaceholder.Fd()), 1)
|
interceptor.redirectFile.Close()
|
||||||
syscall.Dup2(int(interceptor.stderrPlaceholder.Fd()), 2)
|
|
||||||
|
|
||||||
for _, f := range []*os.File{interceptor.redirectFile, interceptor.stdoutPlaceholder, interceptor.stderrPlaceholder} {
|
|
||||||
f.Close()
|
|
||||||
}
|
|
||||||
|
|
||||||
output, err := ioutil.ReadFile(interceptor.redirectFile.Name())
|
output, err := ioutil.ReadFile(interceptor.redirectFile.Name())
|
||||||
|
os.Remove(interceptor.redirectFile.Name())
|
||||||
for _, f := range []*os.File{interceptor.redirectFile, interceptor.stdoutPlaceholder, interceptor.stderrPlaceholder} {
|
|
||||||
os.Remove(f.Name())
|
|
||||||
}
|
|
||||||
|
|
||||||
interceptor.intercepting = false
|
interceptor.intercepting = false
|
||||||
|
|
||||||
|
26
Godeps/_workspace/src/github.com/onsi/ginkgo/internal/spec/spec.go
generated
vendored
26
Godeps/_workspace/src/github.com/onsi/ginkgo/internal/spec/spec.go
generated
vendored
@ -115,7 +115,7 @@ func (spec *Spec) Run(writer io.Writer) {
|
|||||||
}()
|
}()
|
||||||
|
|
||||||
for sample := 0; sample < spec.subject.Samples(); sample++ {
|
for sample := 0; sample < spec.subject.Samples(); sample++ {
|
||||||
spec.state, spec.failure = spec.runSample(sample, writer)
|
spec.runSample(sample, writer)
|
||||||
|
|
||||||
if spec.state != types.SpecStatePassed {
|
if spec.state != types.SpecStatePassed {
|
||||||
return
|
return
|
||||||
@ -123,9 +123,9 @@ func (spec *Spec) Run(writer io.Writer) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (spec *Spec) runSample(sample int, writer io.Writer) (specState types.SpecState, specFailure types.SpecFailure) {
|
func (spec *Spec) runSample(sample int, writer io.Writer) {
|
||||||
specState = types.SpecStatePassed
|
spec.state = types.SpecStatePassed
|
||||||
specFailure = types.SpecFailure{}
|
spec.failure = types.SpecFailure{}
|
||||||
innerMostContainerIndexToUnwind := -1
|
innerMostContainerIndexToUnwind := -1
|
||||||
|
|
||||||
defer func() {
|
defer func() {
|
||||||
@ -134,9 +134,9 @@ func (spec *Spec) runSample(sample int, writer io.Writer) (specState types.SpecS
|
|||||||
for _, afterEach := range container.SetupNodesOfType(types.SpecComponentTypeAfterEach) {
|
for _, afterEach := range container.SetupNodesOfType(types.SpecComponentTypeAfterEach) {
|
||||||
spec.announceSetupNode(writer, "AfterEach", container, afterEach)
|
spec.announceSetupNode(writer, "AfterEach", container, afterEach)
|
||||||
afterEachState, afterEachFailure := afterEach.Run()
|
afterEachState, afterEachFailure := afterEach.Run()
|
||||||
if afterEachState != types.SpecStatePassed && specState == types.SpecStatePassed {
|
if afterEachState != types.SpecStatePassed && spec.state == types.SpecStatePassed {
|
||||||
specState = afterEachState
|
spec.state = afterEachState
|
||||||
specFailure = afterEachFailure
|
spec.failure = afterEachFailure
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -146,8 +146,8 @@ func (spec *Spec) runSample(sample int, writer io.Writer) (specState types.SpecS
|
|||||||
innerMostContainerIndexToUnwind = i
|
innerMostContainerIndexToUnwind = i
|
||||||
for _, beforeEach := range container.SetupNodesOfType(types.SpecComponentTypeBeforeEach) {
|
for _, beforeEach := range container.SetupNodesOfType(types.SpecComponentTypeBeforeEach) {
|
||||||
spec.announceSetupNode(writer, "BeforeEach", container, beforeEach)
|
spec.announceSetupNode(writer, "BeforeEach", container, beforeEach)
|
||||||
specState, specFailure = beforeEach.Run()
|
spec.state, spec.failure = beforeEach.Run()
|
||||||
if specState != types.SpecStatePassed {
|
if spec.state != types.SpecStatePassed {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -156,17 +156,15 @@ func (spec *Spec) runSample(sample int, writer io.Writer) (specState types.SpecS
|
|||||||
for _, container := range spec.containers {
|
for _, container := range spec.containers {
|
||||||
for _, justBeforeEach := range container.SetupNodesOfType(types.SpecComponentTypeJustBeforeEach) {
|
for _, justBeforeEach := range container.SetupNodesOfType(types.SpecComponentTypeJustBeforeEach) {
|
||||||
spec.announceSetupNode(writer, "JustBeforeEach", container, justBeforeEach)
|
spec.announceSetupNode(writer, "JustBeforeEach", container, justBeforeEach)
|
||||||
specState, specFailure = justBeforeEach.Run()
|
spec.state, spec.failure = justBeforeEach.Run()
|
||||||
if specState != types.SpecStatePassed {
|
if spec.state != types.SpecStatePassed {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
spec.announceSubject(writer, spec.subject)
|
spec.announceSubject(writer, spec.subject)
|
||||||
specState, specFailure = spec.subject.Run()
|
spec.state, spec.failure = spec.subject.Run()
|
||||||
|
|
||||||
return
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (spec *Spec) announceSetupNode(writer io.Writer, nodeType string, container *containernode.ContainerNode, setupNode leafnodes.BasicNode) {
|
func (spec *Spec) announceSetupNode(writer io.Writer, nodeType string, container *containernode.ContainerNode, setupNode leafnodes.BasicNode) {
|
||||||
|
1
Godeps/_workspace/src/github.com/onsi/ginkgo/internal/suite/suite_test.go
generated
vendored
1
Godeps/_workspace/src/github.com/onsi/ginkgo/internal/suite/suite_test.go
generated
vendored
@ -120,6 +120,7 @@ var _ = Describe("Suite", func() {
|
|||||||
Ω(description.FileName).Should(ContainSubstring("suite_test.go"))
|
Ω(description.FileName).Should(ContainSubstring("suite_test.go"))
|
||||||
Ω(description.LineNumber).Should(BeNumerically(">", 50))
|
Ω(description.LineNumber).Should(BeNumerically(">", 50))
|
||||||
Ω(description.LineNumber).Should(BeNumerically("<", 150))
|
Ω(description.LineNumber).Should(BeNumerically("<", 150))
|
||||||
|
Ω(description.Failed).Should(BeFalse())
|
||||||
})
|
})
|
||||||
|
|
||||||
Measure("should run measurements", func(b Benchmarker) {
|
Measure("should run measurements", func(b Benchmarker) {
|
||||||
|
96
test/e2e/framework.go
Normal file
96
test/e2e/framework.go
Normal file
@ -0,0 +1,96 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2015 The Kubernetes Authors All rights reserved.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package e2e
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
|
||||||
|
"github.com/GoogleCloudPlatform/kubernetes/pkg/client"
|
||||||
|
"github.com/GoogleCloudPlatform/kubernetes/pkg/fields"
|
||||||
|
"github.com/GoogleCloudPlatform/kubernetes/pkg/labels"
|
||||||
|
|
||||||
|
. "github.com/onsi/ginkgo"
|
||||||
|
. "github.com/onsi/gomega"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Framework supports common operations used by e2e tests; it will keep a client & a namespace for you.
|
||||||
|
// Eventual goal is to merge this with integration test framework.
|
||||||
|
type Framework struct {
|
||||||
|
BaseName string
|
||||||
|
|
||||||
|
Namespace *api.Namespace
|
||||||
|
Client *client.Client
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewFramework makes a new framework and sets up a BeforeEach/AfterEach for
|
||||||
|
// you (you can write additional before/after each functions).
|
||||||
|
func NewFramework(baseName string) *Framework {
|
||||||
|
f := &Framework{
|
||||||
|
BaseName: baseName,
|
||||||
|
}
|
||||||
|
|
||||||
|
BeforeEach(f.beforeEach)
|
||||||
|
AfterEach(f.afterEach)
|
||||||
|
|
||||||
|
return f
|
||||||
|
}
|
||||||
|
|
||||||
|
// beforeEach gets a client and makes a namespace.
|
||||||
|
func (f *Framework) beforeEach() {
|
||||||
|
By("Creating a kubernetes client")
|
||||||
|
c, err := loadClient()
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
|
||||||
|
f.Client = c
|
||||||
|
|
||||||
|
By("Building a namespace api object")
|
||||||
|
namespace, err := createTestingNS(f.BaseName, f.Client)
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
|
||||||
|
f.Namespace = namespace
|
||||||
|
}
|
||||||
|
|
||||||
|
// afterEach deletes the namespace, after reading its events.
|
||||||
|
func (f *Framework) afterEach() {
|
||||||
|
// Print events if the test failed.
|
||||||
|
if CurrentGinkgoTestDescription().Failed {
|
||||||
|
By(fmt.Sprintf("Collecting events from namespace %q.", f.Namespace.Name))
|
||||||
|
events, err := f.Client.Events(f.Namespace.Name).List(labels.Everything(), fields.Everything())
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
|
||||||
|
for _, e := range events.Items {
|
||||||
|
Logf("event for %v: %v %v: %v", e.InvolvedObject.Name, e.Source, e.Reason, e.Message)
|
||||||
|
}
|
||||||
|
// Note that we don't wait for any cleanup to propagate, which means
|
||||||
|
// that if you delete a bunch of pods right before ending your test,
|
||||||
|
// you may or may not see the killing/deletion/cleanup events.
|
||||||
|
}
|
||||||
|
|
||||||
|
By(fmt.Sprintf("Destroying namespace %q for this suite.", f.Namespace.Name))
|
||||||
|
if err := f.Client.Namespaces().Delete(f.Namespace.Name); err != nil {
|
||||||
|
Failf("Couldn't delete ns %q: %s", f.Namespace.Name, err)
|
||||||
|
}
|
||||||
|
// Paranoia-- prevent reuse!
|
||||||
|
f.Namespace = nil
|
||||||
|
f.Client = nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// WaitForPodRunning waits for the pod to run in the namespace.
|
||||||
|
func (f *Framework) WaitForPodRunning(podName string) error {
|
||||||
|
return waitForPodRunningInNamespace(f.Client, podName, f.Namespace.Name)
|
||||||
|
}
|
@ -19,10 +19,10 @@ package e2e
|
|||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"net/http"
|
"net/http"
|
||||||
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
|
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
|
||||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/client"
|
|
||||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/fields"
|
"github.com/GoogleCloudPlatform/kubernetes/pkg/fields"
|
||||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/labels"
|
"github.com/GoogleCloudPlatform/kubernetes/pkg/labels"
|
||||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/util"
|
"github.com/GoogleCloudPlatform/kubernetes/pkg/util"
|
||||||
@ -31,20 +31,17 @@ import (
|
|||||||
. "github.com/onsi/gomega"
|
. "github.com/onsi/gomega"
|
||||||
)
|
)
|
||||||
|
|
||||||
func LaunchNetTestPodPerNode(nodes *api.NodeList, name string, c *client.Client, ns string) []string {
|
func LaunchNetTestPodPerNode(f *Framework, nodes *api.NodeList, name string) []string {
|
||||||
podNames := []string{}
|
podNames := []string{}
|
||||||
|
|
||||||
totalPods := len(nodes.Items)
|
totalPods := len(nodes.Items)
|
||||||
|
|
||||||
Expect(totalPods).NotTo(Equal(0))
|
Expect(totalPods).NotTo(Equal(0))
|
||||||
|
|
||||||
for i, node := range nodes.Items {
|
for _, node := range nodes.Items {
|
||||||
podName := fmt.Sprintf("%s-%d", name, i)
|
pod, err := f.Client.Pods(f.Namespace.Name).Create(&api.Pod{
|
||||||
podNames = append(podNames, podName)
|
|
||||||
Logf("Creating pod %s on node %s", podName, node.Name)
|
|
||||||
_, err := c.Pods(ns).Create(&api.Pod{
|
|
||||||
ObjectMeta: api.ObjectMeta{
|
ObjectMeta: api.ObjectMeta{
|
||||||
Name: podName,
|
GenerateName: name + "-",
|
||||||
Labels: map[string]string{
|
Labels: map[string]string{
|
||||||
"name": name,
|
"name": name,
|
||||||
},
|
},
|
||||||
@ -59,7 +56,7 @@ func LaunchNetTestPodPerNode(nodes *api.NodeList, name string, c *client.Client,
|
|||||||
//peers >= totalPods should be asserted by the container.
|
//peers >= totalPods should be asserted by the container.
|
||||||
//the nettest container finds peers by looking up list of svc endpoints.
|
//the nettest container finds peers by looking up list of svc endpoints.
|
||||||
fmt.Sprintf("-peers=%d", totalPods),
|
fmt.Sprintf("-peers=%d", totalPods),
|
||||||
"-namespace=" + ns},
|
"-namespace=" + f.Namespace.Name},
|
||||||
Ports: []api.ContainerPort{{ContainerPort: 8080}},
|
Ports: []api.ContainerPort{{ContainerPort: 8080}},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@ -68,16 +65,16 @@ func LaunchNetTestPodPerNode(nodes *api.NodeList, name string, c *client.Client,
|
|||||||
},
|
},
|
||||||
})
|
})
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
Logf("Created pod %s on node %s", pod.ObjectMeta.Name, node.Name)
|
||||||
|
podNames = append(podNames, pod.ObjectMeta.Name)
|
||||||
}
|
}
|
||||||
return podNames
|
return podNames
|
||||||
}
|
}
|
||||||
|
|
||||||
var _ = Describe("Networking", func() {
|
var _ = Describe("Networking", func() {
|
||||||
|
f := NewFramework("nettest")
|
||||||
|
|
||||||
//This namespace is modified throughout the course of the test.
|
|
||||||
var namespace *api.Namespace
|
|
||||||
var svcname = "nettest"
|
var svcname = "nettest"
|
||||||
var c *client.Client = nil
|
|
||||||
|
|
||||||
BeforeEach(func() {
|
BeforeEach(func() {
|
||||||
//Assert basic external connectivity.
|
//Assert basic external connectivity.
|
||||||
@ -91,22 +88,6 @@ var _ = Describe("Networking", func() {
|
|||||||
if resp.StatusCode != http.StatusOK {
|
if resp.StatusCode != http.StatusOK {
|
||||||
Failf("Unexpected error code, expected 200, got, %v (%v)", resp.StatusCode, resp)
|
Failf("Unexpected error code, expected 200, got, %v (%v)", resp.StatusCode, resp)
|
||||||
}
|
}
|
||||||
|
|
||||||
By("Creating a kubernetes client")
|
|
||||||
c, err = loadClient()
|
|
||||||
Expect(err).NotTo(HaveOccurred())
|
|
||||||
|
|
||||||
By("Building a namespace api object")
|
|
||||||
namespace, err = createTestingNS("nettest", c)
|
|
||||||
Expect(err).NotTo(HaveOccurred())
|
|
||||||
|
|
||||||
})
|
|
||||||
|
|
||||||
AfterEach(func() {
|
|
||||||
By(fmt.Sprintf("Destroying namespace for this suite %v", namespace.Name))
|
|
||||||
if err := c.Namespaces().Delete(namespace.Name); err != nil {
|
|
||||||
Failf("Couldn't delete ns %s", err)
|
|
||||||
}
|
|
||||||
})
|
})
|
||||||
|
|
||||||
// First test because it has no dependencies on variables created later on.
|
// First test because it has no dependencies on variables created later on.
|
||||||
@ -120,8 +101,8 @@ var _ = Describe("Networking", func() {
|
|||||||
}
|
}
|
||||||
for _, test := range tests {
|
for _, test := range tests {
|
||||||
By(fmt.Sprintf("testing: %s", test.path))
|
By(fmt.Sprintf("testing: %s", test.path))
|
||||||
data, err := c.RESTClient.Get().
|
data, err := f.Client.RESTClient.Get().
|
||||||
Namespace(namespace.Name).
|
Namespace(f.Namespace.Name).
|
||||||
AbsPath(test.path).
|
AbsPath(test.path).
|
||||||
DoRaw()
|
DoRaw()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -139,8 +120,8 @@ var _ = Describe("Networking", func() {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
By(fmt.Sprintf("Creating a service named [%s] in namespace %s", svcname, namespace.Name))
|
By(fmt.Sprintf("Creating a service named %q in namespace %q", svcname, f.Namespace.Name))
|
||||||
svc, err := c.Services(namespace.Name).Create(&api.Service{
|
svc, err := f.Client.Services(f.Namespace.Name).Create(&api.Service{
|
||||||
ObjectMeta: api.ObjectMeta{
|
ObjectMeta: api.ObjectMeta{
|
||||||
Name: svcname,
|
Name: svcname,
|
||||||
Labels: map[string]string{
|
Labels: map[string]string{
|
||||||
@ -166,26 +147,26 @@ var _ = Describe("Networking", func() {
|
|||||||
defer func() {
|
defer func() {
|
||||||
defer GinkgoRecover()
|
defer GinkgoRecover()
|
||||||
By("Cleaning up the service")
|
By("Cleaning up the service")
|
||||||
if err = c.Services(namespace.Name).Delete(svc.Name); err != nil {
|
if err = f.Client.Services(f.Namespace.Name).Delete(svc.Name); err != nil {
|
||||||
Failf("unable to delete svc %v: %v", svc.Name, err)
|
Failf("unable to delete svc %v: %v", svc.Name, err)
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
By("Creating a webserver (pending) pod on each node")
|
By("Creating a webserver (pending) pod on each node")
|
||||||
|
|
||||||
nodes, err := c.Nodes().List(labels.Everything(), fields.Everything())
|
nodes, err := f.Client.Nodes().List(labels.Everything(), fields.Everything())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
Failf("Failed to list nodes: %v", err)
|
Failf("Failed to list nodes: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
podNames := LaunchNetTestPodPerNode(nodes, svcname, c, namespace.Name)
|
podNames := LaunchNetTestPodPerNode(f, nodes, svcname)
|
||||||
|
|
||||||
// Clean up the pods
|
// Clean up the pods
|
||||||
defer func() {
|
defer func() {
|
||||||
defer GinkgoRecover()
|
defer GinkgoRecover()
|
||||||
By("Cleaning up the webserver pods")
|
By("Cleaning up the webserver pods")
|
||||||
for _, podName := range podNames {
|
for _, podName := range podNames {
|
||||||
if err = c.Pods(namespace.Name).Delete(podName, nil); err != nil {
|
if err = f.Client.Pods(f.Namespace.Name).Delete(podName, nil); err != nil {
|
||||||
Logf("Failed to delete pod %s: %v", podName, err)
|
Logf("Failed to delete pod %s: %v", podName, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -193,63 +174,67 @@ var _ = Describe("Networking", func() {
|
|||||||
|
|
||||||
By("Waiting for the webserver pods to transition to Running state")
|
By("Waiting for the webserver pods to transition to Running state")
|
||||||
for _, podName := range podNames {
|
for _, podName := range podNames {
|
||||||
err = waitForPodRunningInNamespace(c, podName, namespace.Name)
|
err = f.WaitForPodRunning(podName)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred())
|
||||||
}
|
}
|
||||||
|
|
||||||
By("Waiting for connectivity to be verified")
|
By("Waiting for connectivity to be verified")
|
||||||
const maxAttempts = 60
|
|
||||||
passed := false
|
passed := false
|
||||||
|
|
||||||
//once response OK, evaluate response body for pass/fail.
|
//once response OK, evaluate response body for pass/fail.
|
||||||
var body []byte
|
var body []byte
|
||||||
|
getDetails := func() ([]byte, error) {
|
||||||
|
return f.Client.Get().
|
||||||
|
Namespace(f.Namespace.Name).
|
||||||
|
Prefix("proxy").
|
||||||
|
Resource("services").
|
||||||
|
Name(svc.Name).
|
||||||
|
Suffix("read").
|
||||||
|
DoRaw()
|
||||||
|
}
|
||||||
|
|
||||||
for i := 0; i < maxAttempts && !passed; i++ {
|
getStatus := func() ([]byte, error) {
|
||||||
time.Sleep(2 * time.Second)
|
return f.Client.Get().
|
||||||
Logf("About to make a proxy status call")
|
Namespace(f.Namespace.Name).
|
||||||
start := time.Now()
|
|
||||||
body, err = c.Get().
|
|
||||||
Namespace(namespace.Name).
|
|
||||||
Prefix("proxy").
|
Prefix("proxy").
|
||||||
Resource("services").
|
Resource("services").
|
||||||
Name(svc.Name).
|
Name(svc.Name).
|
||||||
Suffix("status").
|
Suffix("status").
|
||||||
DoRaw()
|
DoRaw()
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := 0; !passed; i++ { // Timeout will keep us from going forever.
|
||||||
|
time.Sleep(2 * time.Second)
|
||||||
|
Logf("About to make a proxy status call")
|
||||||
|
start := time.Now()
|
||||||
|
body, err = getStatus()
|
||||||
Logf("Proxy status call returned in %v", time.Since(start))
|
Logf("Proxy status call returned in %v", time.Since(start))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
Logf("Attempt %v/%v: service/pod still starting. (error: '%v')", i, maxAttempts, err)
|
Logf("Attempt %v: service/pod still starting. (error: '%v')", i, err)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
//Finally, we pass/fail the test based on if the container's response body, as to wether or not it was able to find peers.
|
// Finally, we pass/fail the test based on if the container's response body, as to wether or not it was able to find peers.
|
||||||
switch string(body) {
|
switch {
|
||||||
case "pass":
|
case string(body) == "pass":
|
||||||
Logf("Passed on attempt %v. Cleaning up.", i)
|
Logf("Passed on attempt %v. Cleaning up.", i)
|
||||||
passed = true
|
passed = true
|
||||||
case "running":
|
case string(body) == "running":
|
||||||
Logf("Attempt %v/%v: test still running", i, maxAttempts)
|
Logf("Attempt %v: test still running", i)
|
||||||
case "fail":
|
case string(body) == "fail":
|
||||||
if body, err = c.Get().
|
if body, err = getDetails(); err != nil {
|
||||||
Namespace(namespace.Name).Prefix("proxy").
|
|
||||||
Resource("services").
|
|
||||||
Name(svc.Name).Suffix("read").
|
|
||||||
DoRaw(); err != nil {
|
|
||||||
Failf("Failed on attempt %v. Cleaning up. Error reading details: %v", i, err)
|
Failf("Failed on attempt %v. Cleaning up. Error reading details: %v", i, err)
|
||||||
} else {
|
} else {
|
||||||
Failf("Failed on attempt %v. Cleaning up. Details:\n%s", i, string(body))
|
Failf("Failed on attempt %v. Cleaning up. Details:\n%s", i, string(body))
|
||||||
}
|
}
|
||||||
|
case strings.Contains(string(body), "no endpoints available"):
|
||||||
|
Logf("Attempt %v: waiting on service/endpoints", i)
|
||||||
default:
|
default:
|
||||||
Logf("Unexpected response: %q", body)
|
Logf("Unexpected response:\n%s", body)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if !passed {
|
if !passed {
|
||||||
if body, err = c.Get().
|
if body, err = getDetails(); err != nil {
|
||||||
Namespace(namespace.Name).
|
|
||||||
Prefix("proxy").
|
|
||||||
Resource("services").
|
|
||||||
Name(svc.Name).
|
|
||||||
Suffix("read").
|
|
||||||
DoRaw(); err != nil {
|
|
||||||
Failf("Timed out. Cleaning up. Error reading details: %v", err)
|
Failf("Timed out. Cleaning up. Error reading details: %v", err)
|
||||||
} else {
|
} else {
|
||||||
Failf("Timed out. Cleaning up. Details:\n%s", string(body))
|
Failf("Timed out. Cleaning up. Details:\n%s", string(body))
|
||||||
|
@ -55,8 +55,7 @@ const (
|
|||||||
)
|
)
|
||||||
|
|
||||||
var _ = Describe("Reboot", func() {
|
var _ = Describe("Reboot", func() {
|
||||||
|
var c *client.Client
|
||||||
var c *client.Client = nil
|
|
||||||
|
|
||||||
BeforeEach(func() {
|
BeforeEach(func() {
|
||||||
var err error
|
var err error
|
||||||
|
@ -27,8 +27,7 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
var _ = Describe("SSH", func() {
|
var _ = Describe("SSH", func() {
|
||||||
|
var c *client.Client
|
||||||
var c *client.Client = nil
|
|
||||||
|
|
||||||
BeforeEach(func() {
|
BeforeEach(func() {
|
||||||
var err error
|
var err error
|
||||||
|
Loading…
Reference in New Issue
Block a user