Merge pull request #81087 from johnSchnake/addSkipsIfSSHNotPresent

Ensure tests which rely on SSH behavior are skipped if unavailable
This commit is contained in:
Kubernetes Prow Robot 2019-08-13 07:56:07 -07:00 committed by GitHub
commit 43fa5ca693
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
14 changed files with 40 additions and 0 deletions

View File

@ -44,6 +44,7 @@ var _ = SIGDescribe("Etcd failure [Disruptive]", func() {
// ... so the provider check should be identical to the intersection of
// providers that provide those capabilities.
framework.SkipUnlessProviderIs("gce")
framework.SkipUnlessSSHKeyPresent()
err := framework.RunRC(testutils.RCConfig{
Client: f.ClientSet,

View File

@ -130,6 +130,7 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
ginkgo.Context("should return to running and ready state after network partition is healed", func() {
ginkgo.BeforeEach(func() {
framework.SkipUnlessNodeCountIsAtLeast(2)
framework.SkipUnlessSSHKeyPresent()
})
// What happens in this test:
@ -238,6 +239,7 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
framework.KubeDescribe("[ReplicationController]", func() {
ginkgo.It("should recreate pods scheduled on the unreachable node "+
"AND allow scheduling of pods on a node after it rejoins the cluster", func() {
framework.SkipUnlessSSHKeyPresent()
// Create a replication controller for a service that serves its hostname.
// The source for the Docker container kubernetes/serve_hostname is in contrib/for-demos/serve_hostname
@ -304,6 +306,8 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
})
ginkgo.It("should eagerly create replacement pod during network partition when termination grace is non-zero", func() {
framework.SkipUnlessSSHKeyPresent()
// Create a replication controller for a service that serves its hostname.
// The source for the Docker container kubernetes/serve_hostname is in contrib/for-demos/serve_hostname
name := "my-hostname-net"
@ -394,6 +398,8 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
})
ginkgo.It("should not reschedule stateful pods if there is a network partition [Slow] [Disruptive]", func() {
framework.SkipUnlessSSHKeyPresent()
ps := e2esset.NewStatefulSet(psName, ns, headlessSvcName, 3, []v1.VolumeMount{}, []v1.VolumeMount{}, labels)
_, err := c.AppsV1().StatefulSets(ns).Create(ps)
framework.ExpectNoError(err)
@ -425,6 +431,8 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
framework.KubeDescribe("[Job]", func() {
ginkgo.It("should create new pods when node is partitioned", func() {
framework.SkipUnlessSSHKeyPresent()
parallelism := int32(2)
completions := int32(4)
backoffLimit := int32(6) // default value
@ -482,6 +490,7 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
// 3. After enough time passess all Pods are evicted from the given Node
ginkgo.It("[Feature:TaintEviction] All pods on the unreachable node should be marked as NotReady upon the node turn NotReady "+
"AND all pods should be evicted after eviction timeout passes", func() {
framework.SkipUnlessSSHKeyPresent()
ginkgo.By("choose a node - we will block all network traffic on this node")
var podOpts metav1.ListOptions
nodes := framework.GetReadySchedulableNodesOrDie(c)

View File

@ -685,6 +685,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
ginkgo.It("should correctly scale down after a node is not needed and one node is broken [Feature:ClusterSizeAutoscalingScaleDown]",
func() {
framework.SkipUnlessSSHKeyPresent()
framework.TestUnderTemporaryNetworkFailure(c, "default", getAnyNode(c), func() { simpleScaleDownTest(1) })
})
@ -876,6 +877,8 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
})
ginkgo.It("Shouldn't perform scale up operation and should list unhealthy status if most of the cluster is broken[Feature:ClusterSizeAutoscalingScaleUp]", func() {
framework.SkipUnlessSSHKeyPresent()
clusterSize := nodeCount
for clusterSize < unhealthyClusterThreshold+1 {
clusterSize = manuallyIncreaseClusterSize(f, originalSizes)

View File

@ -408,6 +408,7 @@ var _ = SIGDescribe("Services", func() {
ginkgo.It("should work after restarting kube-proxy [Disruptive]", func() {
// TODO: use the ServiceTestJig here
framework.SkipUnlessProviderIs("gce", "gke")
framework.SkipUnlessSSHKeyPresent()
ns := f.Namespace.Name
numPods, servicePort := 3, defaultServeHostnameServicePort
@ -464,6 +465,7 @@ var _ = SIGDescribe("Services", func() {
ginkgo.It("should work after restarting apiserver [Disruptive]", func() {
// TODO: use the ServiceTestJig here
framework.SkipUnlessProviderIs("gce", "gke")
framework.SkipUnlessSSHKeyPresent()
ns := f.Namespace.Name
numPods, servicePort := 3, 80
@ -1666,6 +1668,8 @@ var _ = SIGDescribe("Services", func() {
// This test is for clusters on GCE.
// (It restarts kube-controller-manager, which we don't support on GKE)
framework.SkipUnlessProviderIs("gce")
framework.SkipUnlessSSHKeyPresent()
clusterID, err := gce.GetClusterID(cs)
if err != nil {
e2elog.Failf("framework.GetClusterID(cs) = _, %v; want nil", err)

View File

@ -56,6 +56,8 @@ var _ = SIGDescribe("NodeProblemDetector [DisabledForLargeClusters]", func() {
})
ginkgo.It("should run without error", func() {
framework.SkipUnlessSSHKeyPresent()
ginkgo.By("Getting all nodes and their SSH-able IP addresses")
nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
gomega.Expect(len(nodes.Items)).NotTo(gomega.BeZero())

View File

@ -503,6 +503,9 @@ var _ = SIGDescribe("Density", func() {
f.NamespaceDeletionTimeout = time.Hour
ginkgo.BeforeEach(func() {
// Gathering the metrics currently uses a path which uses SSH.
framework.SkipUnlessSSHKeyPresent()
var err error
c = f.ClientSet
ns = f.Namespace.Name

View File

@ -109,6 +109,10 @@ var _ = SIGDescribe("Load capacity", func() {
var testPhaseDurations *timer.TestPhaseTimer
var profileGathererStopCh chan struct{}
ginkgo.BeforeEach(func() {
framework.SkipUnlessSSHKeyPresent()
})
// Gathers metrics before teardown
// TODO add flag that allows to skip cleanup on failure
ginkgo.AfterEach(func() {

View File

@ -76,6 +76,8 @@ var _ = SIGDescribe("TaintBasedEvictions [Serial]", func() {
// 5. node lifecycle manager generate a status change: [NodeReady=true, status=ConditionTrue]
// 6. node.kubernetes.io/unreachable=:NoExecute taint is taken off the node
ginkgo.It("Checks that the node becomes unreachable", func() {
framework.SkipUnlessSSHKeyPresent()
// find an available node
nodeName := GetNodeThatCanRunPod(f)
ginkgo.By("Finding an available node " + nodeName)

View File

@ -64,6 +64,8 @@ var _ = utils.SIGDescribe("Detaching volumes", func() {
})
ginkgo.It("should not work when mount is in progress [Slow]", func() {
framework.SkipUnlessSSHKeyPresent()
driver := "attachable-with-long-mount"
driverInstallAs := driver + "-" + suffix

View File

@ -116,6 +116,8 @@ var _ = utils.SIGDescribe("Mounted flexvolume volume expand [Slow] [Feature:Expa
})
ginkgo.It("should be resizable when mounted", func() {
framework.SkipUnlessSSHKeyPresent()
driver := "dummy-attachable"
node := nodeList.Items[0]

View File

@ -175,6 +175,8 @@ var _ = utils.SIGDescribe("NFSPersistentVolumes[Disruptive][Flaky]", func() {
})
ginkgo.It("should delete a bound PVC from a clientPod, restart the kube-control-manager, and ensure the kube-controller-manager does not crash", func() {
framework.SkipUnlessSSHKeyPresent()
ginkgo.By("Deleting PVC for volume 2")
err = framework.DeletePersistentVolumeClaim(c, pvc2.Name, ns)
framework.ExpectNoError(err)

View File

@ -179,6 +179,7 @@ var _ = utils.SIGDescribe("PersistentVolumes:vsphere", func() {
3. Verify that written file is accessible after kubelet restart
*/
ginkgo.It("should test that a file written to the vspehre volume mount before kubelet restart can be read after restart [Disruptive]", func() {
framework.SkipUnlessSSHKeyPresent()
utils.TestKubeletRestartsAndRestoresMount(c, f, clientPod)
})
@ -194,6 +195,7 @@ var _ = utils.SIGDescribe("PersistentVolumes:vsphere", func() {
5. Verify that volume mount not to be found.
*/
ginkgo.It("should test that a vspehre volume mounted to a pod that is deleted while the kubelet is down unmounts when the kubelet returns [Disruptive]", func() {
framework.SkipUnlessSSHKeyPresent()
utils.TestVolumeUnmountsFromDeletedPod(c, f, clientPod)
})

View File

@ -81,6 +81,8 @@ var _ = utils.SIGDescribe("Volume Attach Verify [Feature:vsphere][Serial][Disrup
})
ginkgo.It("verify volume remains attached after master kubelet restart", func() {
framework.SkipUnlessSSHKeyPresent()
// Create pod on each node
for i := 0; i < numNodes; i++ {
ginkgo.By(fmt.Sprintf("%d: Creating a test vsphere volume", i))

View File

@ -99,6 +99,8 @@ var _ = utils.SIGDescribe("Verify Volume Attach Through vpxd Restart [Feature:vs
})
ginkgo.It("verify volume remains attached through vpxd restart", func() {
framework.SkipUnlessSSHKeyPresent()
for vcHost, nodes := range vcNodesMap {
var (
volumePaths []string