Merge pull request #10197 from mesosphere/ginkgo-skip

Use ginkgo's new Skip()
This commit is contained in:
Robert Bailey 2015-06-26 10:44:34 -07:00
commit 198b334227
41 changed files with 494 additions and 244 deletions

4
Godeps/Godeps.json generated
View File

@ -423,8 +423,8 @@
},
{
"ImportPath": "github.com/onsi/ginkgo",
"Comment": "v1.2.0-beta-9-gfbb6632",
"Rev": "fbb663242655b700c623e9629d7781db98957501"
"Comment": "v1.2.0-6-gd981d36",
"Rev": "d981d36e9884231afa909627b9c275e4ba678f90"
},
{
"ImportPath": "github.com/onsi/gomega",

View File

@ -1,6 +1,8 @@
language: go
go:
- 1.3
- 1.4
- tip
install:
- go get -v ./...

View File

@ -1,8 +1,20 @@
## HEAD
Improvements:
- `Skip(message)` can be used to skip the current test.
Bug Fixes:
- Ginkgo tests now fail when you `panic(nil)` (#167)
## 1.2.0 5/31/2015
Improvements
- `ginkgo -coverpkg` calls down to `go test -coverpkg` (#160)
- `ginkgo -afterSuiteHook COMMAND` invokes the passed-in `COMMAND` after a test suite completes (#152)
- Relaxed requirement for Go 1.4+. `ginkgo` now works with Go v1.3+ (#166)
## 1.2.0-beta

View File

@ -59,7 +59,7 @@ Agouti allows you run WebDriver integration tests. Learn more about Agouti [her
## Set Me Up!
You'll need Golang v1.4+ (Ubuntu users: you probably have Golang v1.0 -- you'll need to upgrade!)
You'll need Golang v1.3+ (Ubuntu users: you probably have Golang v1.0 -- you'll need to upgrade!)
```bash

View File

@ -20,7 +20,7 @@ import (
"fmt"
)
const VERSION = "1.2.0-beta"
const VERSION = "1.2.0"
type GinkgoConfigType struct {
RandomSeed int64

View File

@ -4,8 +4,11 @@ import (
"fmt"
"os"
"os/exec"
"regexp"
"runtime"
"strings"
"github.com/onsi/ginkgo/config"
"github.com/onsi/ginkgo/ginkgo/testsuite"
)
@ -99,3 +102,40 @@ func (n *Notifier) SendNotification(title string, subtitle string) {
}
}
}
func (n *Notifier) RunCommand(suite testsuite.TestSuite, suitePassed bool) {
command := n.commandFlags.AfterSuiteHook
if command != "" {
// Allow for string replacement to pass input to the command
passed := "[FAIL]"
if suitePassed {
passed = "[PASS]"
}
command = strings.Replace(command, "(ginkgo-suite-passed)", passed, -1)
command = strings.Replace(command, "(ginkgo-suite-name)", suite.PackageName, -1)
// Must break command into parts
splitArgs := regexp.MustCompile(`'.+'|".+"|\S+`)
parts := splitArgs.FindAllString(command, -1)
output, err := exec.Command(parts[0], parts[1:]...).CombinedOutput()
if err != nil {
fmt.Println("Post-suite command failed:")
if config.DefaultReporterConfig.NoColor {
fmt.Printf("\t%s\n", output)
} else {
fmt.Printf("\t%s%s%s\n", redColor, string(output), defaultStyle)
}
n.SendNotification("Ginkgo [ERROR]", fmt.Sprintf(`After suite command "%s" failed`, n.commandFlags.AfterSuiteHook))
} else {
fmt.Println("Post-suite command succeeded:")
if config.DefaultReporterConfig.NoColor {
fmt.Printf("\t%s\n", output)
} else {
fmt.Printf("\t%s%s%s\n", greenColor, string(output), defaultStyle)
}
}
}
}

View File

@ -20,6 +20,7 @@ type RunWatchAndBuildCommandFlags struct {
NumCompilers int
ParallelStream bool
Notify bool
AfterSuiteHook string
AutoNodes bool
//only for run command
@ -105,6 +106,7 @@ func (c *RunWatchAndBuildCommandFlags) flags(mode int) {
if !onWindows {
c.FlagSet.BoolVar(&(c.Notify), "notify", false, "Send desktop notifications when a test run completes")
}
c.FlagSet.StringVar(&(c.AfterSuiteHook), "afterSuiteHook", "", "Run a command when a suite test run completes")
}
if mode == runMode {

View File

@ -129,6 +129,7 @@ func (r *SuiteRunner) RunSuites(runners []*testrunner.TestRunner, numCompilers i
suiteRunResult = compilationOutput.runner.Run()
}
r.notifier.SendSuiteCompletionNotification(compilationOutput.runner.Suite, suiteRunResult.Passed)
r.notifier.RunCommand(compilationOutput.runner.Suite, suiteRunResult.Passed)
runResult = runResult.Merge(suiteRunResult)
if !suiteRunResult.Passed {
suitesThatFailed = append(suitesThatFailed, compilationOutput.runner.Suite)

View File

@ -100,11 +100,30 @@ func (t *TestRunner) CompileTo(path string) error {
return fmt.Errorf("Failed to compile %s", t.Suite.PackageName)
}
if fileExists(path) == false {
compiledFile := filepath.Join(t.Suite.Path, t.Suite. PackageName+".test")
if fileExists(compiledFile) {
// seems like we are on an old go version that does not support the -o flag on go test
// move the compiled test file to the desired location by hand
err = os.Rename(compiledFile, path)
if err != nil {
return fmt.Errorf("Failed to move compiled file: %s", err)
}
} else {
return fmt.Errorf("Failed to compile %s: output file %q could not be found", t.Suite.PackageName, path)
}
}
t.compiled = true
return nil
}
func fileExists(path string) bool {
_, err := os.Stat(path)
return err == nil || os.IsNotExist(err) == false
}
/*
go test -c -i spits package.test out into the cwd. there's no way to change this.

View File

@ -222,6 +222,17 @@ func buildDefaultReporter() Reporter {
}
}
//Skip notifies Ginkgo that the current spec should be skipped.
func Skip(message string, callerSkip ...int) {
skip := 0
if len(callerSkip) > 0 {
skip = callerSkip[0]
}
globalFailer.Skip(message, codelocation.New(skip+1))
panic(GINKGO_PANIC)
}
//Fail notifies Ginkgo that the current spec has failed. (Gomega will call Fail for you automatically when an assertion fails.)
func Fail(message string, callerSkip ...int) {
skip := 0

View File

@ -217,7 +217,7 @@ var _ = Describe("Running Specs", func() {
Eventually(session).Should(gexec.Exit(0))
output := string(session.Out.Contents())
Ω(output).Should(MatchRegexp(`\[\d+\] Passing_ginkgo_tests Suite - 4/4 specs - 2 nodes •••• SUCCESS! [\d.µs]+`))
Ω(output).Should(MatchRegexp(`\[\d+\] Passing_ginkgo_tests Suite - 4/4 specs - 2 nodes •••• SUCCESS! \d+(\.\d+)?[muµ]s`))
Ω(output).Should(ContainSubstring("Test Suite Passed"))
})
})
@ -232,7 +232,7 @@ var _ = Describe("Running Specs", func() {
if nodes > 4 {
nodes = nodes - 1
}
Ω(output).Should(MatchRegexp(`\[\d+\] Passing_ginkgo_tests Suite - 4/4 specs - %d nodes •••• SUCCESS! [\d.µs]+`, nodes))
Ω(output).Should(MatchRegexp(`\[\d+\] Passing_ginkgo_tests Suite - 4/4 specs - %d nodes •••• SUCCESS! \d+(\.\d+)?[muµ]s`, nodes))
Ω(output).Should(ContainSubstring("Test Suite Passed"))
})
})
@ -272,8 +272,8 @@ var _ = Describe("Running Specs", func() {
output := string(session.Out.Contents())
outputLines := strings.Split(output, "\n")
Ω(outputLines[0]).Should(MatchRegexp(`\[\d+\] Passing_ginkgo_tests Suite - 4/4 specs •••• SUCCESS! [\d.µs]+ PASS`))
Ω(outputLines[1]).Should(MatchRegexp(`\[\d+\] More_ginkgo_tests Suite - 2/2 specs •• SUCCESS! [\d.µs]+ PASS`))
Ω(outputLines[0]).Should(MatchRegexp(`\[\d+\] Passing_ginkgo_tests Suite - 4/4 specs •••• SUCCESS! \d+(\.\d+)?[muµ]s PASS`))
Ω(outputLines[1]).Should(MatchRegexp(`\[\d+\] More_ginkgo_tests Suite - 2/2 specs •• SUCCESS! \d+(\.\d+)?[muµ]s PASS`))
Ω(output).Should(ContainSubstring("Test Suite Passed"))
})
})
@ -290,7 +290,7 @@ var _ = Describe("Running Specs", func() {
output := string(session.Out.Contents())
outputLines := strings.Split(output, "\n")
Ω(outputLines[0]).Should(MatchRegexp(`\[\d+\] Passing_ginkgo_tests Suite - 4/4 specs •••• SUCCESS! [\d.µs]+ PASS`))
Ω(outputLines[0]).Should(MatchRegexp(`\[\d+\] Passing_ginkgo_tests Suite - 4/4 specs •••• SUCCESS! \d+(\.\d+)?[muµ]s PASS`))
Ω(outputLines[1]).Should(MatchRegexp(`\[\d+\] Failing_ginkgo_tests Suite - 2/2 specs`))
Ω(output).Should(ContainSubstring("• Failure"))
Ω(output).ShouldNot(ContainSubstring("More_ginkgo_tests Suite"))
@ -313,7 +313,7 @@ var _ = Describe("Running Specs", func() {
output := string(session.Out.Contents())
outputLines := strings.Split(output, "\n")
Ω(outputLines[0]).Should(MatchRegexp(`\[\d+\] Passing_ginkgo_tests Suite - 4/4 specs •••• SUCCESS! [\d.µs]+ PASS`))
Ω(outputLines[0]).Should(MatchRegexp(`\[\d+\] Passing_ginkgo_tests Suite - 4/4 specs •••• SUCCESS! \d+(\.\d+)?[muµ]s PASS`))
Ω(outputLines[1]).Should(ContainSubstring("Failed to compile C:"))
Ω(output).ShouldNot(ContainSubstring("More_ginkgo_tests Suite"))
Ω(output).Should(ContainSubstring("Test Suite Failed"))
@ -335,11 +335,11 @@ var _ = Describe("Running Specs", func() {
output := string(session.Out.Contents())
outputLines := strings.Split(output, "\n")
Ω(outputLines[0]).Should(MatchRegexp(`\[\d+\] Passing_ginkgo_tests Suite - 4/4 specs •••• SUCCESS! [\d.µs]+ PASS`))
Ω(outputLines[0]).Should(MatchRegexp(`\[\d+\] Passing_ginkgo_tests Suite - 4/4 specs •••• SUCCESS! \d+(\.\d+)?[muµ]s PASS`))
Ω(outputLines[1]).Should(ContainSubstring("Failed to compile B:"))
Ω(output).Should(MatchRegexp(`\[\d+\] Failing_ginkgo_tests Suite - 2/2 specs`))
Ω(output).Should(ContainSubstring("• Failure"))
Ω(output).Should(MatchRegexp(`\[\d+\] More_ginkgo_tests Suite - 2/2 specs •• SUCCESS! [\d.µs]+ PASS`))
Ω(output).Should(MatchRegexp(`\[\d+\] More_ginkgo_tests Suite - 2/2 specs •• SUCCESS! \d+(\.\d+)?[muµ]s PASS`))
Ω(output).Should(ContainSubstring("Test Suite Failed"))
})
})

View File

@ -0,0 +1,43 @@
package integration_test
import (
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/onsi/gomega/gexec"
)
var _ = Describe("Skipping Specs", func() {
var pathToTest string
BeforeEach(func() {
pathToTest = tmpPath("skipping")
copyIn("skip_fixture", pathToTest)
})
It("should skip in all the possible ways", func() {
session := startGinkgo(pathToTest, "--noColor")
Eventually(session).Should(gexec.Exit(0))
output := string(session.Out.Contents())
Ω(output).ShouldNot(ContainSubstring("NEVER SEE THIS"))
Ω(output).Should(ContainSubstring("a top level skip on line 9"))
Ω(output).Should(ContainSubstring("skip_fixture_test.go:9"))
Ω(output).Should(ContainSubstring("an async top level skip on line 14"))
Ω(output).Should(ContainSubstring("skip_fixture_test.go:14"))
Ω(output).Should(ContainSubstring("a top level goroutine skip on line 21"))
Ω(output).Should(ContainSubstring("skip_fixture_test.go:21"))
Ω(output).Should(ContainSubstring("a sync SKIP"))
Ω(output).Should(ContainSubstring("an async SKIP"))
Ω(output).Should(ContainSubstring("a goroutine SKIP"))
Ω(output).Should(ContainSubstring("a measure SKIP"))
Ω(output).Should(ContainSubstring("S [SKIPPING] in Spec Setup (BeforeEach) ["))
Ω(output).Should(ContainSubstring("a BeforeEach SKIP"))
Ω(output).Should(ContainSubstring("S [SKIPPING] in Spec Teardown (AfterEach) ["))
Ω(output).Should(ContainSubstring("an AfterEach SKIP"))
Ω(output).Should(ContainSubstring("0 Passed | 0 Failed | 0 Pending | 9 Skipped"))
})
})

View File

@ -0,0 +1,63 @@
package integration_test
import (
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/onsi/gomega/gexec"
)
var _ = Describe("Suite Command Specs", func() {
var pathToTest string
BeforeEach(func() {
pathToTest = tmpPath("suite_command")
copyIn("suite_command_tests", pathToTest)
})
It("Runs command after suite echoing out suite data, properly reporting suite name and passing status in successful command output", func() {
command := "-afterSuiteHook=echo THIS IS A (ginkgo-suite-passed) TEST OF THE (ginkgo-suite-name) SYSTEM, THIS IS ONLY A TEST"
expected := "THIS IS A [PASS] TEST OF THE suite_command SYSTEM, THIS IS ONLY A TEST"
session := startGinkgo(pathToTest, command)
Eventually(session).Should(gexec.Exit(0))
output := string(session.Out.Contents())
Ω(output).Should(ContainSubstring("1 Passed"))
Ω(output).Should(ContainSubstring("0 Failed"))
Ω(output).Should(ContainSubstring("1 Pending"))
Ω(output).Should(ContainSubstring("0 Skipped"))
Ω(output).Should(ContainSubstring("Test Suite Passed"))
Ω(output).Should(ContainSubstring("Post-suite command succeeded:"))
Ω(output).Should(ContainSubstring(expected))
})
It("Runs command after suite reporting that command failed", func() {
command := "-afterSuiteHook=exit 1"
session := startGinkgo(pathToTest, command)
Eventually(session).Should(gexec.Exit(0))
output := string(session.Out.Contents())
Ω(output).Should(ContainSubstring("1 Passed"))
Ω(output).Should(ContainSubstring("0 Failed"))
Ω(output).Should(ContainSubstring("1 Pending"))
Ω(output).Should(ContainSubstring("0 Skipped"))
Ω(output).Should(ContainSubstring("Test Suite Passed"))
Ω(output).Should(ContainSubstring("Post-suite command failed:"))
})
It("Runs command after suite echoing out suite data, properly reporting suite name and failing status in successful command output", func() {
command := "-afterSuiteHook=echo THIS IS A (ginkgo-suite-passed) TEST OF THE (ginkgo-suite-name) SYSTEM, THIS IS ONLY A TEST"
expected := "THIS IS A [FAIL] TEST OF THE suite_command SYSTEM, THIS IS ONLY A TEST"
session := startGinkgo(pathToTest, "-failOnPending=true", command)
Eventually(session).Should(gexec.Exit(1))
output := string(session.Out.Contents())
Ω(output).Should(ContainSubstring("1 Passed"))
Ω(output).Should(ContainSubstring("0 Failed"))
Ω(output).Should(ContainSubstring("1 Pending"))
Ω(output).Should(ContainSubstring("0 Skipped"))
Ω(output).Should(ContainSubstring("Test Suite Failed"))
Ω(output).Should(ContainSubstring("Post-suite command succeeded:"))
Ω(output).Should(ContainSubstring(expected))
})
})

View File

@ -77,3 +77,16 @@ func (f *Failer) Drain(componentType types.SpecComponentType, componentIndex int
return failure, outcome
}
func (f *Failer) Skip(message string, location types.CodeLocation) {
f.lock.Lock()
defer f.lock.Unlock()
if f.state == types.SpecStatePassed {
f.state = types.SpecStateSkipped
f.failure = types.SpecFailure{
Message: message,
Location: location,
}
}
}

View File

@ -30,6 +30,22 @@ var _ = Describe("Failer", func() {
})
})
Describe("Skip", func() {
It("should handle failures", func() {
failer.Skip("something skipped", codeLocationA)
failure, state := failer.Drain(types.SpecComponentTypeIt, 3, codeLocationB)
Ω(failure).Should(Equal(types.SpecFailure{
Message: "something skipped",
Location: codeLocationA,
ForwardedPanic: "",
ComponentType: types.SpecComponentTypeIt,
ComponentIndex: 3,
ComponentCodeLocation: codeLocationB,
}))
Ω(state).Should(Equal(types.SpecStateSkipped))
})
})
Describe("Fail", func() {
It("should handle failures", func() {
failer.Fail("something failed", codeLocationA)

View File

@ -68,8 +68,10 @@ func (r *runner) runAsync() (outcome types.SpecState, failure types.SpecFailure)
done := make(chan interface{}, 1)
go func() {
finished := false
defer func() {
if e := recover(); e != nil {
if e := recover(); e != nil || !finished {
r.failer.Panic(codelocation.New(2), e)
select {
case <-done:
@ -81,6 +83,7 @@ func (r *runner) runAsync() (outcome types.SpecState, failure types.SpecFailure)
}()
r.asyncFunc(done)
finished = true
}()
select {
@ -93,8 +96,10 @@ func (r *runner) runAsync() (outcome types.SpecState, failure types.SpecFailure)
return
}
func (r *runner) runSync() (outcome types.SpecState, failure types.SpecFailure) {
finished := false
defer func() {
if e := recover(); e != nil {
if e := recover(); e != nil || !finished {
r.failer.Panic(codelocation.New(2), e)
}
@ -102,6 +107,7 @@ func (r *runner) runSync() (outcome types.SpecState, failure types.SpecFailure)
}()
r.syncFunc()
finished = true
return
}

View File

@ -96,6 +96,24 @@ func SynchronousSharedRunnerBehaviors(build func(body interface{}, timeout time.
Ω(failure.ForwardedPanic).Should(Equal("ack!"))
})
})
Context("when a panic occurs with a nil value", func() {
BeforeEach(func() {
outcome, failure = build(func() {
didRun = true
innerCodeLocation = codelocation.New(0)
panic(nil)
}, 0, failer, componentCodeLocation).Run()
})
It("should return the nil-valued panic", func() {
Ω(didRun).Should(BeTrue())
Ω(outcome).Should(Equal(types.SpecStatePanicked))
Ω(failure.ForwardedPanic).Should(Equal("<nil>"))
})
})
})
}
@ -230,6 +248,23 @@ func AsynchronousSharedRunnerBehaviors(build func(body interface{}, timeout time
Ω(failure.ForwardedPanic).Should(Equal("ack!"))
})
})
Context("when the function panics with a nil value", func() {
BeforeEach(func() {
outcome, failure = build(func(done Done) {
didRun = true
innerCodeLocation = codelocation.New(0)
panic(nil)
}, 100*time.Millisecond, failer, componentCodeLocation).Run()
})
It("should return the nil-valued panic", func() {
Ω(didRun).Should(BeTrue())
Ω(outcome).Should(Equal(types.SpecStatePanicked))
Ω(failure.ForwardedPanic).Should(Equal("<nil>"))
})
})
})
}

View File

@ -209,7 +209,7 @@ func (aggregator *Aggregator) announceSpec(specSummary *types.SpecSummary) {
case types.SpecStatePending:
aggregator.stenographer.AnnouncePendingSpec(specSummary, aggregator.config.NoisyPendings && !aggregator.config.Succinct)
case types.SpecStateSkipped:
aggregator.stenographer.AnnounceSkippedSpec(specSummary)
aggregator.stenographer.AnnounceSkippedSpec(specSummary, aggregator.config.Succinct, aggregator.config.FullTrace)
case types.SpecStateTimedOut:
aggregator.stenographer.AnnounceSpecTimedOut(specSummary, aggregator.config.Succinct, aggregator.config.FullTrace)
case types.SpecStatePanicked:

View File

@ -65,7 +65,7 @@ func (reporter *DefaultReporter) SpecDidComplete(specSummary *types.SpecSummary)
case types.SpecStatePending:
reporter.stenographer.AnnouncePendingSpec(specSummary, reporter.config.NoisyPendings && !reporter.config.Succinct)
case types.SpecStateSkipped:
reporter.stenographer.AnnounceSkippedSpec(specSummary)
reporter.stenographer.AnnounceSkippedSpec(specSummary, reporter.config.Succinct, reporter.config.FullTrace)
case types.SpecStateTimedOut:
reporter.stenographer.AnnounceSpecTimedOut(specSummary, reporter.config.Succinct, reporter.config.FullTrace)
case types.SpecStatePanicked:

View File

@ -27,7 +27,7 @@ var _ = Describe("DefaultReporter", func() {
reporterConfig = config.DefaultReporterConfigType{
NoColor: false,
SlowSpecThreshold: 0.1,
NoisyPendings: true,
NoisyPendings: false,
Verbose: true,
FullTrace: true,
}
@ -249,8 +249,8 @@ var _ = Describe("DefaultReporter", func() {
spec.State = types.SpecStatePending
})
It("should announce the pending spec", func() {
Ω(stenographer.Calls()[0]).Should(Equal(call("AnnouncePendingSpec", spec, true)))
It("should announce the pending spec, succinctly", func() {
Ω(stenographer.Calls()[0]).Should(Equal(call("AnnouncePendingSpec", spec, false)))
})
})
@ -260,7 +260,7 @@ var _ = Describe("DefaultReporter", func() {
})
It("should announce the skipped spec", func() {
Ω(stenographer.Calls()[0]).Should(Equal(call("AnnounceSkippedSpec", spec)))
Ω(stenographer.Calls()[0]).Should(Equal(call("AnnounceSkippedSpec", spec, false, true)))
})
})
@ -294,6 +294,24 @@ var _ = Describe("DefaultReporter", func() {
})
})
Context("in noisy pendings mode", func() {
BeforeEach(func() {
reporterConfig.Succinct = false
reporterConfig.NoisyPendings = true
reporter = reporters.NewDefaultReporter(reporterConfig, stenographer)
})
Context("When the spec is pending", func() {
BeforeEach(func() {
spec.State = types.SpecStatePending
})
It("should announce the pending spec, noisily", func() {
Ω(stenographer.Calls()[0]).Should(Equal(call("AnnouncePendingSpec", spec, true)))
})
})
})
Context("in succinct mode", func() {
BeforeEach(func() {
reporterConfig.Succinct = true
@ -337,7 +355,7 @@ var _ = Describe("DefaultReporter", func() {
spec.State = types.SpecStatePending
})
It("should announce the pending spec, but never noisily", func() {
It("should announce the pending spec, succinctly", func() {
Ω(stenographer.Calls()[0]).Should(Equal(call("AnnouncePendingSpec", spec, false)))
})
})
@ -348,7 +366,7 @@ var _ = Describe("DefaultReporter", func() {
})
It("should announce the skipped spec", func() {
Ω(stenographer.Calls()[0]).Should(Equal(call("AnnounceSkippedSpec", spec)))
Ω(stenographer.Calls()[0]).Should(Equal(call("AnnounceSkippedSpec", spec, true, true)))
})
})

View File

@ -117,8 +117,8 @@ func (stenographer *FakeStenographer) AnnouncePendingSpec(spec *types.SpecSummar
stenographer.registerCall("AnnouncePendingSpec", spec, noisy)
}
func (stenographer *FakeStenographer) AnnounceSkippedSpec(spec *types.SpecSummary) {
stenographer.registerCall("AnnounceSkippedSpec", spec)
func (stenographer *FakeStenographer) AnnounceSkippedSpec(spec *types.SpecSummary, succinct bool, fullTrace bool) {
stenographer.registerCall("AnnounceSkippedSpec", spec, succinct, fullTrace)
}
func (stenographer *FakeStenographer) AnnounceSpecTimedOut(spec *types.SpecSummary, succinct bool, fullTrace bool) {

View File

@ -49,7 +49,7 @@ type Stenographer interface {
AnnounceSuccesfulMeasurement(spec *types.SpecSummary, succinct bool)
AnnouncePendingSpec(spec *types.SpecSummary, noisy bool)
AnnounceSkippedSpec(spec *types.SpecSummary)
AnnounceSkippedSpec(spec *types.SpecSummary, succinct bool, fullTrace bool)
AnnounceSpecTimedOut(spec *types.SpecSummary, succinct bool, fullTrace bool)
AnnounceSpecPanicked(spec *types.SpecSummary, succinct bool, fullTrace bool)
@ -252,9 +252,21 @@ func (s *consoleStenographer) AnnouncePendingSpec(spec *types.SpecSummary, noisy
}
}
func (s *consoleStenographer) AnnounceSkippedSpec(spec *types.SpecSummary) {
s.print(0, s.colorize(cyanColor, "S"))
s.stream()
func (s *consoleStenographer) AnnounceSkippedSpec(spec *types.SpecSummary, succinct bool, fullTrace bool) {
// Skips at runtime will have a non-empty spec.Failure. All others should be succinct.
if succinct || spec.Failure == (types.SpecFailure{}) {
s.print(0, s.colorize(cyanColor, "S"))
s.stream()
} else {
s.startBlock()
s.println(0, s.colorize(cyanColor+boldStyle, "S [SKIPPING]%s [%.3f seconds]", s.failureContext(spec.Failure.ComponentType), spec.RunTime.Seconds()))
indentation := s.printCodeLocationBlock(spec.ComponentTexts, spec.ComponentCodeLocations, spec.Failure.ComponentType, spec.Failure.ComponentIndex, true, succinct)
s.printNewLine()
s.printFailure(indentation, spec.State, spec.Failure, fullTrace)
s.endBlock()
}
}
func (s *consoleStenographer) AnnounceSpecTimedOut(spec *types.SpecSummary, succinct bool, fullTrace bool) {

View File

@ -232,10 +232,7 @@ var _ = Describe("Addon update", func() {
// This test requires SSH, so the provider check should be identical to
// those tests.
if !providerIs("gce") {
Logf(fmt.Sprintf("Skipping test, which is not implemented for %s", testContext.Provider))
return
}
SkipUnlessProviderIs("gce")
temporaryRemotePathPrefix := "addon-test-dir"
temporaryRemotePath := temporaryRemotePathPrefix + "/" + dir // in home directory on kubernetes-master

View File

@ -29,14 +29,11 @@ var _ = Describe("MasterCerts", func() {
var err error
_, err = loadClient()
Expect(err).NotTo(HaveOccurred())
SkipUnlessProviderIs("gce", "gke")
})
It("should have all expected certs on the master", func() {
if !providerIs("gce", "gke") {
By(fmt.Sprintf("Skipping MasterCerts test for cloud provider %s (only supported for gce and gke)", testContext.Provider))
return
}
for _, certFile := range []string{"kubecfg.key", "kubecfg.crt", "ca.crt"} {
cmd := exec.Command("gcloud", "compute", "ssh", "--project", testContext.CloudConfig.ProjectID,
"--zone", testContext.CloudConfig.Zone, testContext.CloudConfig.MasterName,

View File

@ -240,11 +240,11 @@ var _ = Describe("Skipped", func() {
})
Describe("kube-push", func() {
BeforeEach(func() {
SkipUnlessProviderIs("gce")
})
It("of master should maintain responsive services", func() {
if !providerIs("gce") {
By(fmt.Sprintf("Skipping kube-push test, which is not implemented for %s", testContext.Provider))
return
}
By("Validating cluster before master upgrade")
expectNoError(validate(f, svcName, rcName, ingress, replicas))
By("Performing a master upgrade")
@ -255,11 +255,11 @@ var _ = Describe("Skipped", func() {
})
Describe("upgrade-master", func() {
BeforeEach(func() {
SkipUnlessProviderIs("gce", "gke")
})
It("should maintain responsive services", func() {
if !providerIs("gce", "gke") {
By(fmt.Sprintf("Skipping upgrade test, which is not implemented for %s", testContext.Provider))
return
}
By("Validating cluster before master upgrade")
expectNoError(validate(f, svcName, rcName, ingress, replicas))
By("Performing a master upgrade")
@ -315,10 +315,8 @@ var _ = Describe("Skipped", func() {
})
It("should maintain a functioning cluster", func() {
if !providerIs("gce", "gke") {
By(fmt.Sprintf("Skipping upgrade test, which is not implemented for %s", testContext.Provider))
return
}
SkipUnlessProviderIs("gce", "gke")
By("Validating cluster before master upgrade")
expectNoError(validate(f, svcName, rcName, ingress, replicas))
By("Performing a master upgrade")

View File

@ -36,6 +36,8 @@ func coreDump(dir string) {
return
}
provider := testContext.Provider
// requires ssh
if !providerIs("gce", "gke") {
fmt.Printf("Skipping SSH core dump, which is not implemented for %s", provider)
return

View File

@ -185,10 +185,8 @@ var _ = Describe("DNS", func() {
f := NewFramework("dns")
It("should provide DNS for the cluster", func() {
if providerIs("vagrant") {
By("Skipping test which is broken for vagrant (See https://github.com/GoogleCloudPlatform/kubernetes/issues/3580)")
return
}
// TODO: support DNS on vagrant #3580
SkipIfProviderIs("vagrant")
podClient := f.Client.Pods(api.NamespaceDefault)
@ -224,11 +222,10 @@ var _ = Describe("DNS", func() {
pod := createDNSPod(f.Namespace.Name, wheezyProbeCmd, jessieProbeCmd)
validateDNSResults(f, pod, append(wheezyFileNames, jessieFileNames...))
})
It("should provide DNS for services", func() {
if providerIs("vagrant") {
By("Skipping test which is broken for vagrant (See https://github.com/GoogleCloudPlatform/kubernetes/issues/3580)")
return
}
// TODO: support DNS on vagrant #3580
SkipIfProviderIs("vagrant")
podClient := f.Client.Pods(api.NamespaceDefault)

View File

@ -34,6 +34,13 @@ import (
var _ = Describe("Cluster level logging using Elasticsearch", func() {
f := NewFramework("es-logging")
BeforeEach(func() {
// TODO: For now assume we are only testing cluster logging with Elasticsearch
// on GCE. Once we are sure that Elasticsearch cluster level logging
// works for other providers we should widen this scope of this test.
SkipUnlessProviderIs("gce")
})
It("should check that logs from pods on all nodes are ingested into Elasticsearch", func() {
ClusterLevelLoggingWithElasticsearch(f)
})
@ -55,14 +62,6 @@ func bodyToJSON(body []byte) (map[string]interface{}, error) {
// ClusterLevelLoggingWithElasticsearch is an end to end test for cluster level logging.
func ClusterLevelLoggingWithElasticsearch(f *Framework) {
// TODO: For now assume we are only testing cluster logging with Elasticsearch
// on GCE. Once we are sure that Elasticsearch cluster level logging
// works for other providers we should widen this scope of this test.
if !providerIs("gce") {
Logf("Skipping cluster level logging test for provider %s", testContext.Provider)
return
}
// graceTime is how long to keep retrying requests for status information.
const graceTime = 2 * time.Minute
// ingestionTimeout is how long to keep retrying to wait for all the

View File

@ -17,7 +17,6 @@ limitations under the License.
package e2e
import (
"fmt"
"time"
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
@ -31,9 +30,15 @@ import (
var _ = Describe("Etcd failure", func() {
var skipped bool
framework := Framework{BaseName: "etcd-failure"}
BeforeEach(func() {
// These tests requires SSH, so the provider check should be identical to those tests.
skipped = true
SkipUnlessProviderIs("gce")
skipped = false
framework.beforeEach()
Expect(RunRC(RCConfig{
@ -45,7 +50,13 @@ var _ = Describe("Etcd failure", func() {
})).NotTo(HaveOccurred())
})
AfterEach(framework.afterEach)
AfterEach(func() {
if skipped {
return
}
framework.afterEach()
})
It("should recover from network partition with master", func() {
etcdFailTest(
@ -65,13 +76,6 @@ var _ = Describe("Etcd failure", func() {
})
func etcdFailTest(framework Framework, failCommand, fixCommand string) {
// This test requires SSH, so the provider check should be identical to
// those tests.
if !providerIs("gce") {
By(fmt.Sprintf("Skippingt test, which is not implemented for %s", testContext.Provider))
return
}
doEtcdFailure(failCommand, fixCommand)
checkExistingRCRecovers(framework)

View File

@ -109,16 +109,15 @@ var _ = Describe("Kubectl client", func() {
Describe("Guestbook application", func() {
var guestbookPath string
BeforeEach(func() {
guestbookPath = filepath.Join(testContext.RepoRoot, "examples/guestbook")
// requires ExternalLoadBalancer support
SkipUnlessProviderIs("gce", "gke", "aws")
})
It("should create and stop a working application", func() {
if !providerIs("gce", "gke", "aws") {
By(fmt.Sprintf("Skipping guestbook, uses createExternalLoadBalancer, a (gce|gke|aws) feature"))
return
}
defer cleanup(guestbookPath, ns, frontendSelector, redisMasterSelector, redisSlaveSelector)
By("creating all guestbook components")

View File

@ -39,14 +39,11 @@ var _ = Describe("Monitoring", func() {
var err error
c, err = loadClient()
expectNoError(err)
SkipUnlessProviderIs("gce")
})
It("should verify monitoring pods and all cluster nodes are available on influxdb using heapster.", func() {
if !providerIs("gce") {
By(fmt.Sprintf("Skipping Monitoring test, which is only supported for provider gce (not %s)",
testContext.Provider))
return
}
testMonitoringUsingHeapsterInfluxdb(c)
})
})

View File

@ -103,10 +103,8 @@ var _ = Describe("Networking", func() {
//Now we can proceed with the test.
It("should function for intra-pod communication", func() {
if testContext.Provider == "vagrant" {
By("Skipping test which is broken for vagrant (See https://github.com/GoogleCloudPlatform/kubernetes/issues/3580)")
return
}
// TODO: support DNS on vagrant #3580
SkipIfProviderIs("vagrant")
By(fmt.Sprintf("Creating a service named %q in namespace %q", svcname, f.Namespace.Name))
svc, err := f.Client.Services(f.Namespace.Name).Create(&api.Service{

View File

@ -39,7 +39,6 @@ var _ = Describe("Pod Disks", func() {
podClient client.PodInterface
host0Name string
host1Name string
numHosts int
)
BeforeEach(func() {
@ -47,29 +46,21 @@ var _ = Describe("Pod Disks", func() {
c, err = loadClient()
expectNoError(err)
SkipUnlessNodeCountIsAtLeast(2)
podClient = c.Pods(api.NamespaceDefault)
nodes, err := c.Nodes().List(labels.Everything(), fields.Everything())
expectNoError(err, "Failed to list nodes for e2e cluster.")
numHosts = len(nodes.Items)
Expect(len(nodes.Items)).To(BeNumerically(">=", 2), "Requires at least 2 nodes")
if len(nodes.Items) >= 2 {
host1Name = nodes.Items[1].ObjectMeta.Name
}
if len(nodes.Items) >= 1 {
host0Name = nodes.Items[0].ObjectMeta.Name
}
host0Name = nodes.Items[0].ObjectMeta.Name
host1Name = nodes.Items[1].ObjectMeta.Name
})
It("should schedule a pod w/ a RW PD, remove it, then schedule it on another host", func() {
if !providerIs("gce", "aws") {
By(fmt.Sprintf("Skipping PD test, which is only supported for providers gce & aws (not %s)",
testContext.Provider))
return
}
Expect(numHosts >= 2).To(BeTrue(), "At least 2 nodes required")
SkipUnlessProviderIs("gce", "aws")
By("creating PD")
diskName, err := createPD()
@ -122,13 +113,7 @@ var _ = Describe("Pod Disks", func() {
})
It("should schedule a pod w/ a readonly PD on two hosts, then remove both.", func() {
if testContext.Provider != "gce" {
By(fmt.Sprintf("Skipping PD test, which is only supported for provider gce (not %s)",
testContext.Provider))
return
}
Expect(numHosts >= 2).To(BeTrue(), "At least 2 nodes required")
SkipUnlessProviderIs("gce")
By("creating PD")
diskName, err := createPD()

View File

@ -47,11 +47,9 @@ var _ = Describe("ReplicationController", func() {
})
It("should serve a basic image on each replica with a private image", func() {
if !providerIs("gce", "gke") {
By(fmt.Sprintf("Skipping private variant, which is only supported for providers gce and gke (not %s)",
testContext.Provider))
return
}
// requires private images
SkipUnlessProviderIs("gce", "gke")
ServeImageOrFail(c, "private", "gcr.io/_b_k8s_authenticated_test/serve_hostname:1.1")
})
})

View File

@ -50,6 +50,12 @@ var _ = Describe("Reboot", func() {
var err error
c, err = loadClient()
Expect(err).NotTo(HaveOccurred())
// These tests requires SSH, so the provider check should be identical to there
// (the limiting factor is the implementation of util.go's getSigner(...)).
// Cluster must support node reboot
SkipUnlessProviderIs("gce", "aws")
})
It("each node by ordering clean reboot and ensure they function upon restart", func() {
@ -90,15 +96,6 @@ var _ = Describe("Reboot", func() {
})
func testReboot(c *client.Client, rebootCmd string) {
// This test requires SSH, so the provider check should be identical to
// there (the limiting factor is the implementation of util.go's
// getSigner(...)).
provider := testContext.Provider
if !providerIs("aws", "gce") {
By(fmt.Sprintf("Skipping reboot test, which is not implemented for %s", provider))
return
}
// Get all nodes, and kick off the test on each.
nodelist, err := listNodes(c, labels.Everything(), fields.Everything())
if err != nil {
@ -106,7 +103,7 @@ func testReboot(c *client.Client, rebootCmd string) {
}
result := make(chan bool, len(nodelist.Items))
for _, n := range nodelist.Items {
go rebootNode(c, provider, n.ObjectMeta.Name, rebootCmd, result)
go rebootNode(c, testContext.Provider, n.ObjectMeta.Name, rebootCmd, result)
}
// Wait for all to finish and check the final result.

View File

@ -386,8 +386,6 @@ func performTemporaryNetworkFailure(c *client.Client, ns, rcName string, replica
}
var _ = Describe("Nodes", func() {
supportedProviders := []string{"aws", "gce", "gke"}
var testName string
var c *client.Client
var ns string
@ -408,17 +406,20 @@ var _ = Describe("Nodes", func() {
})
Describe("Resize", func() {
var skipped bool
BeforeEach(func() {
if !providerIs(supportedProviders...) {
Failf("Nodes.Resize test is only supported for providers %v (not %s). You can avoid this failure by using ginkgo.skip=Nodes.Resize in your environment.",
supportedProviders, testContext.Provider)
}
skipped = true
SkipUnlessProviderIs("gce", "gke", "aws")
SkipUnlessNodeCountIsAtLeast(2)
skipped = false
})
AfterEach(func() {
if !providerIs(supportedProviders...) {
if skipped {
return
}
By("restoring the original node instance group size")
if err := resizeGroup(testContext.CloudConfig.NumNodes); err != nil {
Failf("Couldn't restore the original node instance group size: %v", err)
@ -431,15 +432,7 @@ var _ = Describe("Nodes", func() {
}
})
testName = "should be able to delete nodes."
It(testName, func() {
Logf("starting test %s", testName)
if testContext.CloudConfig.NumNodes < 2 {
Failf("Failing test %s as it requires at least 2 nodes (not %d)", testName, testContext.CloudConfig.NumNodes)
return
}
It("should be able to delete nodes", func() {
// Create a replication controller for a service that serves its hostname.
// The source for the Docker containter kubernetes/serve_hostname is in contrib/for-demos/serve_hostname
name := "my-hostname-delete-node"
@ -461,16 +454,8 @@ var _ = Describe("Nodes", func() {
Expect(err).NotTo(HaveOccurred())
})
testName = "should be able to add nodes."
It(testName, func() {
// TODO: Bug here - testName is not correct
Logf("starting test %s", testName)
if testContext.CloudConfig.NumNodes < 2 {
Failf("Failing test %s as it requires at least 2 nodes (not %d)", testName, testContext.CloudConfig.NumNodes)
return
}
// TODO: Bug here - testName is not correct
It("should be able to add nodes", func() {
// Create a replication controller for a service that serves its hostname.
// The source for the Docker containter kubernetes/serve_hostname is in contrib/for-demos/serve_hostname
name := "my-hostname-add-node"
@ -497,68 +482,62 @@ var _ = Describe("Nodes", func() {
})
Describe("Network", func() {
BeforeEach(func() {
if !providerIs(supportedProviders...) {
Failf("Nodes.Network test is only supported for providers %v (not %s). You can avoid this failure by using ginkgo.skip=Nodes.Network in your environment.",
supportedProviders, testContext.Provider)
}
})
Context("when a minion node becomes unreachable", func() {
BeforeEach(func() {
SkipUnlessProviderIs("gce", "gke", "aws")
SkipUnlessNodeCountIsAtLeast(2)
})
// TODO marekbiskup 2015-06-19 #10085
// This test has nothing to do with resizing nodes so it should be moved elsewhere.
// Two things are tested here:
// 1. pods from a uncontactable nodes are rescheduled
// 2. when a node joins the cluster, it can host new pods.
// Factor out the cases into two separate tests.
// TODO marekbiskup 2015-06-19 #10085
// This test has nothing to do with resizing nodes so it should be moved elsewhere.
// Two things are tested here:
// 1. pods from a uncontactable nodes are rescheduled
// 2. when a node joins the cluster, it can host new pods.
// Factor out the cases into two separate tests.
It("[replication controller] recreates pods scheduled on the unreachable minion node "+
"AND allows scheduling of pods on a minion after it rejoins the cluster", func() {
testName = "Uncontactable nodes, have their pods recreated by a replication controller, and can host new pods after rejoining."
It(testName, func() {
if testContext.CloudConfig.NumNodes < 2 {
By(fmt.Sprintf("skipping %s test, which requires at least 2 nodes (not %d)",
testName, testContext.CloudConfig.NumNodes))
return
}
// Create a replication controller for a service that serves its hostname.
// The source for the Docker containter kubernetes/serve_hostname is in contrib/for-demos/serve_hostname
name := "my-hostname-net"
newSVCByName(c, ns, name)
replicas := testContext.CloudConfig.NumNodes
newRCByName(c, ns, name, replicas)
err := verifyPods(c, ns, name, true, replicas)
Expect(err).NotTo(HaveOccurred(), "Each pod should start running and responding")
// Create a replication controller for a service that serves its hostname.
// The source for the Docker containter kubernetes/serve_hostname is in contrib/for-demos/serve_hostname
name := "my-hostname-net"
newSVCByName(c, ns, name)
replicas := testContext.CloudConfig.NumNodes
newRCByName(c, ns, name, replicas)
err := verifyPods(c, ns, name, true, replicas)
Expect(err).NotTo(HaveOccurred(), "Each pod should start running and responding")
By("choose a node with at least one pod - we will block some network traffic on this node")
label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name}))
pods, err := c.Pods(ns).List(label, fields.Everything()) // list pods after all have been scheduled
Expect(err).NotTo(HaveOccurred())
nodeName := pods.Items[0].Spec.NodeName
node, err := c.Nodes().Get(nodeName)
Expect(err).NotTo(HaveOccurred())
By(fmt.Sprintf("block network traffic from node %s", node.Name))
performTemporaryNetworkFailure(c, ns, name, replicas, pods.Items[0].Name, node)
Logf("Waiting for node %s to be ready", node.Name)
waitForNodeToBe(c, node.Name, true, 2*time.Minute)
By("verify wheter new pods can be created on the re-attached node")
// increasing the RC size is not a valid way to test this
// since we have no guarantees the pod will be scheduled on our node.
additionalPod := "additionalpod"
err = newPodOnNode(c, ns, additionalPod, node.Name)
Expect(err).NotTo(HaveOccurred())
err = verifyPods(c, ns, additionalPod, true, 1)
Expect(err).NotTo(HaveOccurred())
// verify that it is really on the requested node
{
pod, err := c.Pods(ns).Get(additionalPod)
By("choose a node with at least one pod - we will block some network traffic on this node")
label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name}))
pods, err := c.Pods(ns).List(label, fields.Everything()) // list pods after all have been scheduled
Expect(err).NotTo(HaveOccurred())
if pod.Spec.NodeName != node.Name {
Logf("Pod %s found on invalid node: %s instead of %s", pod.Spec.NodeName, node.Name)
nodeName := pods.Items[0].Spec.NodeName
node, err := c.Nodes().Get(nodeName)
Expect(err).NotTo(HaveOccurred())
By(fmt.Sprintf("block network traffic from node %s", node.Name))
performTemporaryNetworkFailure(c, ns, name, replicas, pods.Items[0].Name, node)
Logf("Waiting for node %s to be ready", node.Name)
waitForNodeToBe(c, node.Name, true, 2*time.Minute)
By("verify wheter new pods can be created on the re-attached node")
// increasing the RC size is not a valid way to test this
// since we have no guarantees the pod will be scheduled on our node.
additionalPod := "additionalpod"
err = newPodOnNode(c, ns, additionalPod, node.Name)
Expect(err).NotTo(HaveOccurred())
err = verifyPods(c, ns, additionalPod, true, 1)
Expect(err).NotTo(HaveOccurred())
// verify that it is really on the requested node
{
pod, err := c.Pods(ns).Get(additionalPod)
Expect(err).NotTo(HaveOccurred())
if pod.Spec.NodeName != node.Name {
Logf("Pod %s found on invalid node: %s instead of %s", pod.Spec.NodeName, node.Name)
}
}
}
})
})
})
})

View File

@ -53,27 +53,32 @@ const (
var _ = Describe("Restart", func() {
var c *client.Client
var ps *podStore
var skipped bool
BeforeEach(func() {
var err error
c, err = loadClient()
Expect(err).NotTo(HaveOccurred())
// This test requires the ability to restart all nodes, so the provider
// check must be identical to that call.
skipped = true
SkipUnlessProviderIs("gce")
skipped = false
ps = newPodStore(c, api.NamespaceDefault, labels.Everything(), fields.Everything())
})
AfterEach(func() {
if skipped {
return
}
ps.Stop()
})
It("should restart all nodes and ensure all nodes and pods recover", func() {
// This test requires the ability to restart all nodes, so the provider
// check must be identical to that call.
provider := testContext.Provider
nn := testContext.CloudConfig.NumNodes
if !providerIs("gce") {
By(fmt.Sprintf("Skipping reboot test, which is not implemented for %s", provider))
return
}
By("ensuring all nodes are ready")
nodeNamesBefore, err := checkNodesReady(c, nodeReadyInitialTimeout, nn)
@ -92,7 +97,7 @@ var _ = Describe("Restart", func() {
}
By("restarting all of the nodes")
err = restartNodes(provider, restartPerNodeTimeout)
err = restartNodes(testContext.Provider, restartPerNodeTimeout)
Expect(err).NotTo(HaveOccurred())
By("ensuring all nodes are ready after the restart")

View File

@ -230,10 +230,8 @@ var _ = Describe("Services", func() {
})
It("should be able to create a functioning external load balancer", func() {
if !providerIs("gce", "gke", "aws") {
By(fmt.Sprintf("Skipping service external load balancer test; uses ServiceTypeLoadBalancer, a (gce|gke|aws) feature"))
return
}
// requires ExternalLoadBalancer
SkipUnlessProviderIs("gce", "gke", "aws")
serviceName := "external-lb-test"
ns := namespaces[0]
@ -741,10 +739,8 @@ var _ = Describe("Services", func() {
})
It("should correctly serve identically named services in different namespaces on different external IP addresses", func() {
if !providerIs("gce", "gke", "aws") {
By(fmt.Sprintf("Skipping service namespace collision test; uses ServiceTypeLoadBalancer, a (gce|gke|aws) feature"))
return
}
// requires ExternalLoadBalancer
SkipUnlessProviderIs("gce", "gke", "aws")
serviceNames := []string{"s0"} // Could add more here, but then it takes longer.
labels := map[string]string{

View File

@ -26,15 +26,9 @@ import (
)
var _ = Describe("Shell", func() {
defer GinkgoRecover()
It("should pass tests for services.sh", func() {
SkipUnlessProviderIs("gce", "gke")
It(fmt.Sprintf("should pass tests for services.sh"), func() {
// The services script only works on gce/gke
if !providerIs("gce", "gke") {
By(fmt.Sprintf("Skipping Shell test services.sh, which is only supported for provider gce and gke (not %s)",
testContext.Provider))
return
}
runCmdTest(filepath.Join(testContext.RepoRoot, "hack/e2e-suite/services.sh"))
})
})
@ -48,7 +42,5 @@ func runCmdTest(path string) {
if err := cmd.Run(); err != nil {
Fail(fmt.Sprintf("Error running %v:\nCommand output:\n%v\n", cmd, cmd.Stdout))
return
}
return
}

View File

@ -33,17 +33,12 @@ var _ = Describe("SSH", func() {
var err error
c, err = loadClient()
Expect(err).NotTo(HaveOccurred())
// When adding more providers here, also implement their functionality in util.go's getSigner(...).
SkipUnlessProviderIs("gce", "gke")
})
It("should SSH to all nodes and run commands", func() {
// When adding more providers here, also implement their functionality
// in util.go's getSigner(...).
provider := testContext.Provider
if !providerIs("gce", "gke") {
By(fmt.Sprintf("Skipping SSH test, which is not implemented for %s", provider))
return
}
// Get all nodes' external IPs.
By("Getting all nodes' SSH-able IP addresses")
hosts, err := NodeSSHHosts(c)
@ -70,7 +65,7 @@ var _ = Describe("SSH", func() {
for _, testCase := range testCases {
By(fmt.Sprintf("SSH'ing to all nodes and running %s", testCase.cmd))
for _, host := range hosts {
stdout, stderr, code, err := SSH(testCase.cmd, host, provider)
stdout, stderr, code, err := SSH(testCase.cmd, host, testContext.Provider)
stdout, stderr = strings.TrimSpace(stdout), strings.TrimSpace(stderr)
if err != testCase.expectedError {
Failf("Ran %s on %s, got error %v, expected %v", testCase.cmd, host, err, testCase.expectedError)
@ -96,7 +91,7 @@ var _ = Describe("SSH", func() {
// Quickly test that SSH itself errors correctly.
By("SSH'ing to a nonexistent host")
if _, _, _, err = SSH(`echo "hello"`, "i.do.not.exist", provider); err == nil {
if _, _, _, err = SSH(`echo "hello"`, "i.do.not.exist", testContext.Provider); err == nil {
Failf("Expected error trying to SSH to nonexistent host.")
}
})

View File

@ -183,6 +183,28 @@ func Failf(format string, a ...interface{}) {
Fail(fmt.Sprintf(format, a...), 1)
}
func Skipf(format string, args ...interface{}) {
Skip(fmt.Sprintf(format, args...))
}
func SkipUnlessNodeCountIsAtLeast(minNodeCount int) {
if testContext.CloudConfig.NumNodes < minNodeCount {
Skipf("Requires at least %d nodes (not %d)", minNodeCount, testContext.CloudConfig.NumNodes)
}
}
func SkipIfProviderIs(unsupportedProviders ...string) {
if providerIs(unsupportedProviders...) {
Skipf("Not supported for providers %v (found %s)", unsupportedProviders, testContext.Provider)
}
}
func SkipUnlessProviderIs(supportedProviders ...string) {
if !providerIs(supportedProviders...) {
Skipf("Only supported for providers %v (not %s)", supportedProviders, testContext.Provider)
}
}
func providerIs(providers ...string) bool {
for _, provider := range providers {
if strings.ToLower(provider) == strings.ToLower(testContext.Provider) {