Use Ginkgo Skip() to avoid failing when tests are skipped

- Added util methods: Skipf, SkipUnlessNodeCountIsAtLeast, SkipIfProviderIs,
  and SkipUnlessProviderIs
This commit is contained in:
Karl Isenberg 2015-06-22 14:14:54 -07:00
parent 57f62976c5
commit 32a09cfcee
19 changed files with 173 additions and 216 deletions

View File

@ -232,10 +232,7 @@ var _ = Describe("Addon update", func() {
// This test requires SSH, so the provider check should be identical to
// those tests.
if !providerIs("gce") {
Logf(fmt.Sprintf("Skipping test, which is not implemented for %s", testContext.Provider))
return
}
SkipUnlessProviderIs("gce")
temporaryRemotePathPrefix := "addon-test-dir"
temporaryRemotePath := temporaryRemotePathPrefix + "/" + dir // in home directory on kubernetes-master

View File

@ -29,14 +29,11 @@ var _ = Describe("MasterCerts", func() {
var err error
_, err = loadClient()
Expect(err).NotTo(HaveOccurred())
SkipUnlessProviderIs("gce", "gke")
})
It("should have all expected certs on the master", func() {
if !providerIs("gce", "gke") {
By(fmt.Sprintf("Skipping MasterCerts test for cloud provider %s (only supported for gce and gke)", testContext.Provider))
return
}
for _, certFile := range []string{"kubecfg.key", "kubecfg.crt", "ca.crt"} {
cmd := exec.Command("gcloud", "compute", "ssh", "--project", testContext.CloudConfig.ProjectID,
"--zone", testContext.CloudConfig.Zone, testContext.CloudConfig.MasterName,

View File

@ -240,11 +240,11 @@ var _ = Describe("Skipped", func() {
})
Describe("kube-push", func() {
BeforeEach(func() {
SkipUnlessProviderIs("gce")
})
It("of master should maintain responsive services", func() {
if !providerIs("gce") {
By(fmt.Sprintf("Skipping kube-push test, which is not implemented for %s", testContext.Provider))
return
}
By("Validating cluster before master upgrade")
expectNoError(validate(f, svcName, rcName, ingress, replicas))
By("Performing a master upgrade")
@ -255,11 +255,11 @@ var _ = Describe("Skipped", func() {
})
Describe("upgrade-master", func() {
BeforeEach(func() {
SkipUnlessProviderIs("gce", "gke")
})
It("should maintain responsive services", func() {
if !providerIs("gce", "gke") {
By(fmt.Sprintf("Skipping upgrade test, which is not implemented for %s", testContext.Provider))
return
}
By("Validating cluster before master upgrade")
expectNoError(validate(f, svcName, rcName, ingress, replicas))
By("Performing a master upgrade")
@ -315,10 +315,8 @@ var _ = Describe("Skipped", func() {
})
It("should maintain a functioning cluster", func() {
if !providerIs("gce", "gke") {
By(fmt.Sprintf("Skipping upgrade test, which is not implemented for %s", testContext.Provider))
return
}
SkipUnlessProviderIs("gce", "gke")
By("Validating cluster before master upgrade")
expectNoError(validate(f, svcName, rcName, ingress, replicas))
By("Performing a master upgrade")

View File

@ -36,6 +36,8 @@ func coreDump(dir string) {
return
}
provider := testContext.Provider
// requires ssh
if !providerIs("gce", "gke") {
fmt.Printf("Skipping SSH core dump, which is not implemented for %s", provider)
return

View File

@ -144,10 +144,8 @@ var _ = Describe("DNS", func() {
f := NewFramework("dns")
It("should provide DNS for the cluster", func() {
if providerIs("vagrant") {
By("Skipping test which is broken for vagrant (See https://github.com/GoogleCloudPlatform/kubernetes/issues/3580)")
return
}
// TODO: support DNS on vagrant #3580
SkipIfProviderIs("vagrant")
podClient := f.Client.Pods(api.NamespaceDefault)
@ -208,11 +206,10 @@ var _ = Describe("DNS", func() {
Logf("DNS probes using %s succeeded\n", pod.Name)
})
It("should provide DNS for services", func() {
if providerIs("vagrant") {
By("Skipping test which is broken for vagrant (See https://github.com/GoogleCloudPlatform/kubernetes/issues/3580)")
return
}
// TODO: support DNS on vagrant #3580
SkipIfProviderIs("vagrant")
podClient := f.Client.Pods(api.NamespaceDefault)

View File

@ -34,6 +34,13 @@ import (
var _ = Describe("Cluster level logging using Elasticsearch", func() {
f := NewFramework("es-logging")
BeforeEach(func() {
// TODO: For now assume we are only testing cluster logging with Elasticsearch
// on GCE. Once we are sure that Elasticsearch cluster level logging
// works for other providers we should widen this scope of this test.
SkipUnlessProviderIs("gce")
})
It("should check that logs from pods on all nodes are ingested into Elasticsearch", func() {
ClusterLevelLoggingWithElasticsearch(f)
})
@ -55,14 +62,6 @@ func bodyToJSON(body []byte) (map[string]interface{}, error) {
// ClusterLevelLoggingWithElasticsearch is an end to end test for cluster level logging.
func ClusterLevelLoggingWithElasticsearch(f *Framework) {
// TODO: For now assume we are only testing cluster logging with Elasticsearch
// on GCE. Once we are sure that Elasticsearch cluster level logging
// works for other providers we should widen this scope of this test.
if !providerIs("gce") {
Logf("Skipping cluster level logging test for provider %s", testContext.Provider)
return
}
// graceTime is how long to keep retrying requests for status information.
const graceTime = 2 * time.Minute
// ingestionTimeout is how long to keep retrying to wait for all the

View File

@ -17,7 +17,6 @@ limitations under the License.
package e2e
import (
"fmt"
"time"
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
@ -31,9 +30,15 @@ import (
var _ = Describe("Etcd failure", func() {
var skipped bool
framework := Framework{BaseName: "etcd-failure"}
BeforeEach(func() {
// These tests requires SSH, so the provider check should be identical to those tests.
skipped = true
SkipUnlessProviderIs("gce")
skipped = false
framework.beforeEach()
Expect(RunRC(RCConfig{
@ -45,7 +50,13 @@ var _ = Describe("Etcd failure", func() {
})).NotTo(HaveOccurred())
})
AfterEach(framework.afterEach)
AfterEach(func() {
if skipped {
return
}
framework.afterEach()
})
It("should recover from network partition with master", func() {
etcdFailTest(
@ -65,13 +76,6 @@ var _ = Describe("Etcd failure", func() {
})
func etcdFailTest(framework Framework, failCommand, fixCommand string) {
// This test requires SSH, so the provider check should be identical to
// those tests.
if !providerIs("gce") {
By(fmt.Sprintf("Skippingt test, which is not implemented for %s", testContext.Provider))
return
}
doEtcdFailure(failCommand, fixCommand)
checkExistingRCRecovers(framework)

View File

@ -109,16 +109,15 @@ var _ = Describe("Kubectl client", func() {
Describe("Guestbook application", func() {
var guestbookPath string
BeforeEach(func() {
guestbookPath = filepath.Join(testContext.RepoRoot, "examples/guestbook")
// requires ExternalLoadBalancer support
SkipUnlessProviderIs("gce", "gke", "aws")
})
It("should create and stop a working application", func() {
if !providerIs("gce", "gke", "aws") {
By(fmt.Sprintf("Skipping guestbook, uses createExternalLoadBalancer, a (gce|gke|aws) feature"))
return
}
defer cleanup(guestbookPath, ns, frontendSelector, redisMasterSelector, redisSlaveSelector)
By("creating all guestbook components")

View File

@ -39,14 +39,11 @@ var _ = Describe("Monitoring", func() {
var err error
c, err = loadClient()
expectNoError(err)
SkipUnlessProviderIs("gce")
})
It("should verify monitoring pods and all cluster nodes are available on influxdb using heapster.", func() {
if !providerIs("gce") {
By(fmt.Sprintf("Skipping Monitoring test, which is only supported for provider gce (not %s)",
testContext.Provider))
return
}
testMonitoringUsingHeapsterInfluxdb(c)
})
})

View File

@ -103,10 +103,8 @@ var _ = Describe("Networking", func() {
//Now we can proceed with the test.
It("should function for intra-pod communication", func() {
if testContext.Provider == "vagrant" {
By("Skipping test which is broken for vagrant (See https://github.com/GoogleCloudPlatform/kubernetes/issues/3580)")
return
}
// TODO: support DNS on vagrant #3580
SkipIfProviderIs("vagrant")
By(fmt.Sprintf("Creating a service named %q in namespace %q", svcname, f.Namespace.Name))
svc, err := f.Client.Services(f.Namespace.Name).Create(&api.Service{

View File

@ -39,7 +39,6 @@ var _ = Describe("Pod Disks", func() {
podClient client.PodInterface
host0Name string
host1Name string
numHosts int
)
BeforeEach(func() {
@ -47,29 +46,21 @@ var _ = Describe("Pod Disks", func() {
c, err = loadClient()
expectNoError(err)
SkipUnlessNodeCountIsAtLeast(2)
podClient = c.Pods(api.NamespaceDefault)
nodes, err := c.Nodes().List(labels.Everything(), fields.Everything())
expectNoError(err, "Failed to list nodes for e2e cluster.")
numHosts = len(nodes.Items)
Expect(len(nodes.Items)).To(BeNumerically(">=", 2), "Requires at least 2 nodes")
if len(nodes.Items) >= 2 {
host1Name = nodes.Items[1].ObjectMeta.Name
}
if len(nodes.Items) >= 1 {
host0Name = nodes.Items[0].ObjectMeta.Name
}
host0Name = nodes.Items[0].ObjectMeta.Name
host1Name = nodes.Items[1].ObjectMeta.Name
})
It("should schedule a pod w/ a RW PD, remove it, then schedule it on another host", func() {
if !providerIs("gce", "aws") {
By(fmt.Sprintf("Skipping PD test, which is only supported for providers gce & aws (not %s)",
testContext.Provider))
return
}
Expect(numHosts >= 2).To(BeTrue(), "At least 2 nodes required")
SkipUnlessProviderIs("gce", "aws")
By("creating PD")
diskName, err := createPD()
@ -122,13 +113,7 @@ var _ = Describe("Pod Disks", func() {
})
It("should schedule a pod w/ a readonly PD on two hosts, then remove both.", func() {
if testContext.Provider != "gce" {
By(fmt.Sprintf("Skipping PD test, which is only supported for provider gce (not %s)",
testContext.Provider))
return
}
Expect(numHosts >= 2).To(BeTrue(), "At least 2 nodes required")
SkipUnlessProviderIs("gce")
By("creating PD")
diskName, err := createPD()

View File

@ -47,11 +47,9 @@ var _ = Describe("ReplicationController", func() {
})
It("should serve a basic image on each replica with a private image", func() {
if !providerIs("gce", "gke") {
By(fmt.Sprintf("Skipping private variant, which is only supported for providers gce and gke (not %s)",
testContext.Provider))
return
}
// requires private images
SkipUnlessProviderIs("gce", "gke")
ServeImageOrFail(c, "private", "gcr.io/_b_k8s_authenticated_test/serve_hostname:1.1")
})
})

View File

@ -50,6 +50,12 @@ var _ = Describe("Reboot", func() {
var err error
c, err = loadClient()
Expect(err).NotTo(HaveOccurred())
// These tests requires SSH, so the provider check should be identical to there
// (the limiting factor is the implementation of util.go's getSigner(...)).
// Cluster must support node reboot
SkipUnlessProviderIs("gce", "aws")
})
It("each node by ordering clean reboot and ensure they function upon restart", func() {
@ -90,15 +96,6 @@ var _ = Describe("Reboot", func() {
})
func testReboot(c *client.Client, rebootCmd string) {
// This test requires SSH, so the provider check should be identical to
// there (the limiting factor is the implementation of util.go's
// getSigner(...)).
provider := testContext.Provider
if !providerIs("aws", "gce") {
By(fmt.Sprintf("Skipping reboot test, which is not implemented for %s", provider))
return
}
// Get all nodes, and kick off the test on each.
nodelist, err := listNodes(c, labels.Everything(), fields.Everything())
if err != nil {
@ -106,7 +103,7 @@ func testReboot(c *client.Client, rebootCmd string) {
}
result := make(chan bool, len(nodelist.Items))
for _, n := range nodelist.Items {
go rebootNode(c, provider, n.ObjectMeta.Name, rebootCmd, result)
go rebootNode(c, testContext.Provider, n.ObjectMeta.Name, rebootCmd, result)
}
// Wait for all to finish and check the final result.

View File

@ -386,8 +386,6 @@ func performTemporaryNetworkFailure(c *client.Client, ns, rcName string, replica
}
var _ = Describe("Nodes", func() {
supportedProviders := []string{"aws", "gce", "gke"}
var testName string
var c *client.Client
var ns string
@ -408,17 +406,20 @@ var _ = Describe("Nodes", func() {
})
Describe("Resize", func() {
var skipped bool
BeforeEach(func() {
if !providerIs(supportedProviders...) {
Failf("Nodes.Resize test is only supported for providers %v (not %s). You can avoid this failure by using ginkgo.skip=Nodes.Resize in your environment.",
supportedProviders, testContext.Provider)
}
skipped = true
SkipUnlessProviderIs("gce", "gke", "aws")
SkipUnlessNodeCountIsAtLeast(2)
skipped = false
})
AfterEach(func() {
if !providerIs(supportedProviders...) {
if skipped {
return
}
By("restoring the original node instance group size")
if err := resizeGroup(testContext.CloudConfig.NumNodes); err != nil {
Failf("Couldn't restore the original node instance group size: %v", err)
@ -431,15 +432,7 @@ var _ = Describe("Nodes", func() {
}
})
testName = "should be able to delete nodes."
It(testName, func() {
Logf("starting test %s", testName)
if testContext.CloudConfig.NumNodes < 2 {
Failf("Failing test %s as it requires at least 2 nodes (not %d)", testName, testContext.CloudConfig.NumNodes)
return
}
It("should be able to delete nodes", func() {
// Create a replication controller for a service that serves its hostname.
// The source for the Docker containter kubernetes/serve_hostname is in contrib/for-demos/serve_hostname
name := "my-hostname-delete-node"
@ -461,16 +454,8 @@ var _ = Describe("Nodes", func() {
Expect(err).NotTo(HaveOccurred())
})
testName = "should be able to add nodes."
It(testName, func() {
// TODO: Bug here - testName is not correct
Logf("starting test %s", testName)
if testContext.CloudConfig.NumNodes < 2 {
Failf("Failing test %s as it requires at least 2 nodes (not %d)", testName, testContext.CloudConfig.NumNodes)
return
}
// TODO: Bug here - testName is not correct
It("should be able to add nodes", func() {
// Create a replication controller for a service that serves its hostname.
// The source for the Docker containter kubernetes/serve_hostname is in contrib/for-demos/serve_hostname
name := "my-hostname-add-node"
@ -497,68 +482,62 @@ var _ = Describe("Nodes", func() {
})
Describe("Network", func() {
BeforeEach(func() {
if !providerIs(supportedProviders...) {
Failf("Nodes.Network test is only supported for providers %v (not %s). You can avoid this failure by using ginkgo.skip=Nodes.Network in your environment.",
supportedProviders, testContext.Provider)
}
})
Context("when a minion node becomes unreachable", func() {
BeforeEach(func() {
SkipUnlessProviderIs("gce", "gke", "aws")
SkipUnlessNodeCountIsAtLeast(2)
})
// TODO marekbiskup 2015-06-19 #10085
// This test has nothing to do with resizing nodes so it should be moved elsewhere.
// Two things are tested here:
// 1. pods from a uncontactable nodes are rescheduled
// 2. when a node joins the cluster, it can host new pods.
// Factor out the cases into two separate tests.
// TODO marekbiskup 2015-06-19 #10085
// This test has nothing to do with resizing nodes so it should be moved elsewhere.
// Two things are tested here:
// 1. pods from a uncontactable nodes are rescheduled
// 2. when a node joins the cluster, it can host new pods.
// Factor out the cases into two separate tests.
It("[replication controller] recreates pods scheduled on the unreachable minion node "+
"AND allows scheduling of pods on a minion after it rejoins the cluster", func() {
testName = "Uncontactable nodes, have their pods recreated by a replication controller, and can host new pods after rejoining."
It(testName, func() {
if testContext.CloudConfig.NumNodes < 2 {
By(fmt.Sprintf("skipping %s test, which requires at least 2 nodes (not %d)",
testName, testContext.CloudConfig.NumNodes))
return
}
// Create a replication controller for a service that serves its hostname.
// The source for the Docker containter kubernetes/serve_hostname is in contrib/for-demos/serve_hostname
name := "my-hostname-net"
newSVCByName(c, ns, name)
replicas := testContext.CloudConfig.NumNodes
newRCByName(c, ns, name, replicas)
err := verifyPods(c, ns, name, true, replicas)
Expect(err).NotTo(HaveOccurred(), "Each pod should start running and responding")
// Create a replication controller for a service that serves its hostname.
// The source for the Docker containter kubernetes/serve_hostname is in contrib/for-demos/serve_hostname
name := "my-hostname-net"
newSVCByName(c, ns, name)
replicas := testContext.CloudConfig.NumNodes
newRCByName(c, ns, name, replicas)
err := verifyPods(c, ns, name, true, replicas)
Expect(err).NotTo(HaveOccurred(), "Each pod should start running and responding")
By("choose a node with at least one pod - we will block some network traffic on this node")
label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name}))
pods, err := c.Pods(ns).List(label, fields.Everything()) // list pods after all have been scheduled
Expect(err).NotTo(HaveOccurred())
nodeName := pods.Items[0].Spec.NodeName
node, err := c.Nodes().Get(nodeName)
Expect(err).NotTo(HaveOccurred())
By(fmt.Sprintf("block network traffic from node %s", node.Name))
performTemporaryNetworkFailure(c, ns, name, replicas, pods.Items[0].Name, node)
Logf("Waiting for node %s to be ready", node.Name)
waitForNodeToBe(c, node.Name, true, 2*time.Minute)
By("verify wheter new pods can be created on the re-attached node")
// increasing the RC size is not a valid way to test this
// since we have no guarantees the pod will be scheduled on our node.
additionalPod := "additionalpod"
err = newPodOnNode(c, ns, additionalPod, node.Name)
Expect(err).NotTo(HaveOccurred())
err = verifyPods(c, ns, additionalPod, true, 1)
Expect(err).NotTo(HaveOccurred())
// verify that it is really on the requested node
{
pod, err := c.Pods(ns).Get(additionalPod)
By("choose a node with at least one pod - we will block some network traffic on this node")
label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name}))
pods, err := c.Pods(ns).List(label, fields.Everything()) // list pods after all have been scheduled
Expect(err).NotTo(HaveOccurred())
if pod.Spec.NodeName != node.Name {
Logf("Pod %s found on invalid node: %s instead of %s", pod.Spec.NodeName, node.Name)
nodeName := pods.Items[0].Spec.NodeName
node, err := c.Nodes().Get(nodeName)
Expect(err).NotTo(HaveOccurred())
By(fmt.Sprintf("block network traffic from node %s", node.Name))
performTemporaryNetworkFailure(c, ns, name, replicas, pods.Items[0].Name, node)
Logf("Waiting for node %s to be ready", node.Name)
waitForNodeToBe(c, node.Name, true, 2*time.Minute)
By("verify wheter new pods can be created on the re-attached node")
// increasing the RC size is not a valid way to test this
// since we have no guarantees the pod will be scheduled on our node.
additionalPod := "additionalpod"
err = newPodOnNode(c, ns, additionalPod, node.Name)
Expect(err).NotTo(HaveOccurred())
err = verifyPods(c, ns, additionalPod, true, 1)
Expect(err).NotTo(HaveOccurred())
// verify that it is really on the requested node
{
pod, err := c.Pods(ns).Get(additionalPod)
Expect(err).NotTo(HaveOccurred())
if pod.Spec.NodeName != node.Name {
Logf("Pod %s found on invalid node: %s instead of %s", pod.Spec.NodeName, node.Name)
}
}
}
})
})
})
})

View File

@ -53,27 +53,32 @@ const (
var _ = Describe("Restart", func() {
var c *client.Client
var ps *podStore
var skipped bool
BeforeEach(func() {
var err error
c, err = loadClient()
Expect(err).NotTo(HaveOccurred())
// This test requires the ability to restart all nodes, so the provider
// check must be identical to that call.
skipped = true
SkipUnlessProviderIs("gce")
skipped = false
ps = newPodStore(c, api.NamespaceDefault, labels.Everything(), fields.Everything())
})
AfterEach(func() {
if skipped {
return
}
ps.Stop()
})
It("should restart all nodes and ensure all nodes and pods recover", func() {
// This test requires the ability to restart all nodes, so the provider
// check must be identical to that call.
provider := testContext.Provider
nn := testContext.CloudConfig.NumNodes
if !providerIs("gce") {
By(fmt.Sprintf("Skipping reboot test, which is not implemented for %s", provider))
return
}
By("ensuring all nodes are ready")
nodeNamesBefore, err := checkNodesReady(c, nodeReadyInitialTimeout, nn)
@ -92,7 +97,7 @@ var _ = Describe("Restart", func() {
}
By("restarting all of the nodes")
err = restartNodes(provider, restartPerNodeTimeout)
err = restartNodes(testContext.Provider, restartPerNodeTimeout)
Expect(err).NotTo(HaveOccurred())
By("ensuring all nodes are ready after the restart")

View File

@ -230,10 +230,8 @@ var _ = Describe("Services", func() {
})
It("should be able to create a functioning external load balancer", func() {
if !providerIs("gce", "gke", "aws") {
By(fmt.Sprintf("Skipping service external load balancer test; uses ServiceTypeLoadBalancer, a (gce|gke|aws) feature"))
return
}
// requires ExternalLoadBalancer
SkipUnlessProviderIs("gce", "gke", "aws")
serviceName := "external-lb-test"
ns := namespaces[0]
@ -741,10 +739,8 @@ var _ = Describe("Services", func() {
})
It("should correctly serve identically named services in different namespaces on different external IP addresses", func() {
if !providerIs("gce", "gke", "aws") {
By(fmt.Sprintf("Skipping service namespace collision test; uses ServiceTypeLoadBalancer, a (gce|gke|aws) feature"))
return
}
// requires ExternalLoadBalancer
SkipUnlessProviderIs("gce", "gke", "aws")
serviceNames := []string{"s0"} // Could add more here, but then it takes longer.
labels := map[string]string{

View File

@ -26,15 +26,9 @@ import (
)
var _ = Describe("Shell", func() {
defer GinkgoRecover()
It("should pass tests for services.sh", func() {
SkipUnlessProviderIs("gce", "gke")
It(fmt.Sprintf("should pass tests for services.sh"), func() {
// The services script only works on gce/gke
if !providerIs("gce", "gke") {
By(fmt.Sprintf("Skipping Shell test services.sh, which is only supported for provider gce and gke (not %s)",
testContext.Provider))
return
}
runCmdTest(filepath.Join(testContext.RepoRoot, "hack/e2e-suite/services.sh"))
})
})
@ -48,7 +42,5 @@ func runCmdTest(path string) {
if err := cmd.Run(); err != nil {
Fail(fmt.Sprintf("Error running %v:\nCommand output:\n%v\n", cmd, cmd.Stdout))
return
}
return
}

View File

@ -33,17 +33,12 @@ var _ = Describe("SSH", func() {
var err error
c, err = loadClient()
Expect(err).NotTo(HaveOccurred())
// When adding more providers here, also implement their functionality in util.go's getSigner(...).
SkipUnlessProviderIs("gce", "gke")
})
It("should SSH to all nodes and run commands", func() {
// When adding more providers here, also implement their functionality
// in util.go's getSigner(...).
provider := testContext.Provider
if !providerIs("gce", "gke") {
By(fmt.Sprintf("Skipping SSH test, which is not implemented for %s", provider))
return
}
// Get all nodes' external IPs.
By("Getting all nodes' SSH-able IP addresses")
hosts, err := NodeSSHHosts(c)
@ -70,7 +65,7 @@ var _ = Describe("SSH", func() {
for _, testCase := range testCases {
By(fmt.Sprintf("SSH'ing to all nodes and running %s", testCase.cmd))
for _, host := range hosts {
stdout, stderr, code, err := SSH(testCase.cmd, host, provider)
stdout, stderr, code, err := SSH(testCase.cmd, host, testContext.Provider)
stdout, stderr = strings.TrimSpace(stdout), strings.TrimSpace(stderr)
if err != testCase.expectedError {
Failf("Ran %s on %s, got error %v, expected %v", testCase.cmd, host, err, testCase.expectedError)
@ -96,7 +91,7 @@ var _ = Describe("SSH", func() {
// Quickly test that SSH itself errors correctly.
By("SSH'ing to a nonexistent host")
if _, _, _, err = SSH(`echo "hello"`, "i.do.not.exist", provider); err == nil {
if _, _, _, err = SSH(`echo "hello"`, "i.do.not.exist", testContext.Provider); err == nil {
Failf("Expected error trying to SSH to nonexistent host.")
}
})

View File

@ -182,6 +182,28 @@ func Failf(format string, a ...interface{}) {
Fail(fmt.Sprintf(format, a...), 1)
}
func Skipf(format string, args ...interface{}) {
Skip(fmt.Sprintf(format, args...))
}
func SkipUnlessNodeCountIsAtLeast(minNodeCount int) {
if testContext.CloudConfig.NumNodes < minNodeCount {
Skipf("Requires at least %d nodes (not %d)", minNodeCount, testContext.CloudConfig.NumNodes)
}
}
func SkipIfProviderIs(unsupportedProviders ...string) {
if providerIs(unsupportedProviders...) {
Skipf("Not supported for providers %v (found %s)", unsupportedProviders, testContext.Provider)
}
}
func SkipUnlessProviderIs(supportedProviders ...string) {
if !providerIs(supportedProviders...) {
Skipf("Only supported for providers %v (not %s)", supportedProviders, testContext.Provider)
}
}
func providerIs(providers ...string) bool {
for _, provider := range providers {
if strings.ToLower(provider) == strings.ToLower(testContext.Provider) {