Use e2eskipper package in e2e/apps

This commit is contained in:
toyoda 2020-01-10 15:08:19 +09:00
parent 9269b5d430
commit 91dca8ff8c
8 changed files with 26 additions and 18 deletions

View File

@ -72,6 +72,7 @@ go_library(
"//test/e2e/framework/rc:go_default_library",
"//test/e2e/framework/replicaset:go_default_library",
"//test/e2e/framework/service:go_default_library",
"//test/e2e/framework/skipper:go_default_library",
"//test/e2e/framework/ssh:go_default_library",
"//test/e2e/framework/statefulset:go_default_library",
"//test/utils:go_default_library",

View File

@ -35,6 +35,7 @@ import (
"k8s.io/kubernetes/pkg/controller/job"
"k8s.io/kubernetes/test/e2e/framework"
jobutil "k8s.io/kubernetes/test/e2e/framework/job"
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
imageutils "k8s.io/kubernetes/test/utils/image"
)
@ -53,7 +54,7 @@ var _ = SIGDescribe("CronJob", func() {
failureCommand := []string{"/bin/false"}
ginkgo.BeforeEach(func() {
framework.SkipIfMissingResource(f.DynamicClient, CronJobGroupVersionResourceBeta, f.Namespace.Name)
e2eskipper.SkipIfMissingResource(f.DynamicClient, CronJobGroupVersionResourceBeta, f.Namespace.Name)
})
// multiple jobs running at once

View File

@ -35,6 +35,7 @@ import (
"k8s.io/kubernetes/test/e2e/framework"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2erc "k8s.io/kubernetes/test/e2e/framework/rc"
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
e2essh "k8s.io/kubernetes/test/e2e/framework/ssh"
testutils "k8s.io/kubernetes/test/utils"
imageutils "k8s.io/kubernetes/test/utils/image"
@ -204,7 +205,7 @@ var _ = SIGDescribe("DaemonRestart [Disruptive]", func() {
ginkgo.BeforeEach(func() {
// These tests require SSH
framework.SkipUnlessProviderIs(framework.ProvidersWithSSH...)
e2eskipper.SkipUnlessProviderIs(framework.ProvidersWithSSH...)
ns = f.Namespace.Name
// All the restart tests need an rc and a watch on pods of the rc.
@ -258,7 +259,7 @@ var _ = SIGDescribe("DaemonRestart [Disruptive]", func() {
ginkgo.It("Controller Manager should not create/delete replicas across restart", func() {
// Requires master ssh access.
framework.SkipUnlessProviderIs("gce", "aws")
e2eskipper.SkipUnlessProviderIs("gce", "aws")
restarter := NewRestartConfig(
framework.GetMasterHost(), "kube-controller", ports.InsecureKubeControllerManagerPort, restartPollInterval, restartTimeout)
restarter.restart()
@ -289,7 +290,7 @@ var _ = SIGDescribe("DaemonRestart [Disruptive]", func() {
ginkgo.It("Scheduler should continue assigning pods to nodes across restart", func() {
// Requires master ssh access.
framework.SkipUnlessProviderIs("gce", "aws")
e2eskipper.SkipUnlessProviderIs("gce", "aws")
restarter := NewRestartConfig(
framework.GetMasterHost(), "kube-scheduler", ports.InsecureSchedulerPort, restartPollInterval, restartTimeout)

View File

@ -45,6 +45,7 @@ import (
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
"k8s.io/kubernetes/test/e2e/framework/replicaset"
e2eservice "k8s.io/kubernetes/test/e2e/framework/service"
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
testutil "k8s.io/kubernetes/test/utils"
utilpointer "k8s.io/utils/pointer"
)
@ -125,7 +126,7 @@ var _ = SIGDescribe("Deployment", func() {
testProportionalScalingDeployment(f)
})
ginkgo.It("should not disrupt a cloud load-balancer's connectivity during rollout", func() {
framework.SkipUnlessProviderIs("aws", "azure", "gce", "gke")
e2eskipper.SkipUnlessProviderIs("aws", "azure", "gce", "gke")
testRollingUpdateDeploymentWithLocalTrafficLoadBalancer(f)
})
// TODO: add tests that cover deployment.Spec.MinReadySeconds once we solved clock-skew issues

View File

@ -33,6 +33,7 @@ import (
"k8s.io/client-go/util/retry"
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
"k8s.io/kubernetes/test/e2e/framework"
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
imageutils "k8s.io/kubernetes/test/utils/image"
)
@ -149,7 +150,7 @@ var _ = SIGDescribe("DisruptionController", func() {
}
ginkgo.It(fmt.Sprintf("evictions: %s => %s", c.description, expectation), func() {
if c.skipForBigClusters {
framework.SkipUnlessNodeCountIsAtMost(bigClusterSize - 1)
e2eskipper.SkipUnlessNodeCountIsAtMost(bigClusterSize - 1)
}
createPodsOrDie(cs, ns, c.podCount)
if c.replicaSetSize > 0 {

View File

@ -41,6 +41,7 @@ import (
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2eservice "k8s.io/kubernetes/test/e2e/framework/service"
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
e2esset "k8s.io/kubernetes/test/e2e/framework/statefulset"
testutils "k8s.io/kubernetes/test/utils"
@ -120,7 +121,7 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
framework.ExpectNoError(err)
// TODO(foxish): Re-enable testing on gce after kubernetes#56787 is fixed.
framework.SkipUnlessProviderIs("gke", "aws")
e2eskipper.SkipUnlessProviderIs("gke", "aws")
if strings.Index(framework.TestContext.CloudConfig.NodeInstanceGroup, ",") >= 0 {
framework.Failf("Test dose not support cluster setup with more than one MIG: %s", framework.TestContext.CloudConfig.NodeInstanceGroup)
}
@ -129,8 +130,8 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
framework.KubeDescribe("Pods", func() {
ginkgo.Context("should return to running and ready state after network partition is healed", func() {
ginkgo.BeforeEach(func() {
framework.SkipUnlessNodeCountIsAtLeast(2)
framework.SkipUnlessSSHKeyPresent()
e2eskipper.SkipUnlessNodeCountIsAtLeast(2)
e2eskipper.SkipUnlessSSHKeyPresent()
})
// What happens in this test:
@ -239,7 +240,7 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
framework.KubeDescribe("[ReplicationController]", func() {
ginkgo.It("should recreate pods scheduled on the unreachable node "+
"AND allow scheduling of pods on a node after it rejoins the cluster", func() {
framework.SkipUnlessSSHKeyPresent()
e2eskipper.SkipUnlessSSHKeyPresent()
// Create a replication controller for a service that serves its hostname.
// The source for the Docker container kubernetes/serve_hostname is in contrib/for-demos/serve_hostname
@ -306,7 +307,7 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
})
ginkgo.It("should eagerly create replacement pod during network partition when termination grace is non-zero", func() {
framework.SkipUnlessSSHKeyPresent()
e2eskipper.SkipUnlessSSHKeyPresent()
// Create a replication controller for a service that serves its hostname.
// The source for the Docker container kubernetes/serve_hostname is in contrib/for-demos/serve_hostname
@ -363,7 +364,7 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
ginkgo.BeforeEach(func() {
// TODO(foxish): Re-enable testing on gce after kubernetes#56787 is fixed.
framework.SkipUnlessProviderIs("gke")
e2eskipper.SkipUnlessProviderIs("gke")
ginkgo.By("creating service " + headlessSvcName + " in namespace " + f.Namespace.Name)
headlessService := e2eservice.CreateServiceSpec(headlessSvcName, "", true, labels)
_, err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(headlessService)
@ -398,7 +399,7 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
})
ginkgo.It("should not reschedule stateful pods if there is a network partition [Slow] [Disruptive]", func() {
framework.SkipUnlessSSHKeyPresent()
e2eskipper.SkipUnlessSSHKeyPresent()
ps := e2esset.NewStatefulSet(psName, ns, headlessSvcName, 3, []v1.VolumeMount{}, []v1.VolumeMount{}, labels)
_, err := c.AppsV1().StatefulSets(ns).Create(ps)
@ -431,7 +432,7 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
framework.KubeDescribe("[Job]", func() {
ginkgo.It("should create new pods when node is partitioned", func() {
framework.SkipUnlessSSHKeyPresent()
e2eskipper.SkipUnlessSSHKeyPresent()
parallelism := int32(2)
completions := int32(4)
@ -479,7 +480,7 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
framework.KubeDescribe("Pods", func() {
ginkgo.Context("should be evicted from unready Node", func() {
ginkgo.BeforeEach(func() {
framework.SkipUnlessNodeCountIsAtLeast(2)
e2eskipper.SkipUnlessNodeCountIsAtLeast(2)
})
// What happens in this test:
@ -490,7 +491,7 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
// 3. After enough time passess all Pods are evicted from the given Node
ginkgo.It("[Feature:TaintEviction] All pods on the unreachable node should be marked as NotReady upon the node turn NotReady "+
"AND all pods should be evicted after eviction timeout passes", func() {
framework.SkipUnlessSSHKeyPresent()
e2eskipper.SkipUnlessSSHKeyPresent()
ginkgo.By("choose a node - we will block all network traffic on this node")
var podOpts metav1.ListOptions
nodes, err := e2enode.GetReadySchedulableNodes(c)

View File

@ -31,6 +31,7 @@ import (
"k8s.io/kubernetes/pkg/controller/replication"
"k8s.io/kubernetes/test/e2e/framework"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
imageutils "k8s.io/kubernetes/test/utils/image"
"github.com/onsi/ginkgo"
@ -51,7 +52,7 @@ var _ = SIGDescribe("ReplicationController", func() {
ginkgo.It("should serve a basic image on each replica with a private image", func() {
// requires private images
framework.SkipUnlessProviderIs("gce", "gke")
e2eskipper.SkipUnlessProviderIs("gce", "gke")
privateimage := imageutils.GetConfig(imageutils.AgnhostPrivate)
TestReplicationControllerServeImageOrFail(f, "private", privateimage.GetE2EImage())
})

View File

@ -32,6 +32,7 @@ import (
"k8s.io/kubernetes/test/e2e/framework"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
replicasetutil "k8s.io/kubernetes/test/e2e/framework/replicaset"
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
"github.com/onsi/ginkgo"
imageutils "k8s.io/kubernetes/test/utils/image"
@ -95,7 +96,7 @@ var _ = SIGDescribe("ReplicaSet", func() {
ginkgo.It("should serve a basic image on each replica with a private image", func() {
// requires private images
framework.SkipUnlessProviderIs("gce", "gke")
e2eskipper.SkipUnlessProviderIs("gce", "gke")
privateimage := imageutils.GetConfig(imageutils.AgnhostPrivate)
testReplicaSetServeImageOrFail(f, "private", privateimage.GetE2EImage())
})