From aaac13fc6a1c66325be151cf0f7a70744afbc91c Mon Sep 17 00:00:00 2001 From: carlory Date: Mon, 20 Jan 2025 21:50:46 +0800 Subject: [PATCH] remove gke from e2e --- test/e2e/apps/deployment.go | 2 +- test/e2e/apps/rc.go | 2 +- test/e2e/apps/replica_set.go | 2 +- test/e2e/cloud/gcp/cluster_upgrade.go | 4 +- .../e2e/cloud/gcp/common/upgrade_mechanics.go | 57 ------------------- test/e2e/cloud/gcp/gke_node_pools.go | 2 +- test/e2e/cloud/gcp/kubelet_security.go | 2 +- test/e2e/cloud/gcp/node_lease.go | 17 +----- test/e2e/cloud/gcp/reboot.go | 11 ---- test/e2e/cloud/gcp/resize_nodes.go | 15 +---- test/e2e/cloud/gcp/restart.go | 2 +- test/e2e/cloud/nodes.go | 2 +- test/e2e/e2e.go | 2 +- test/e2e/framework/ssh/ssh.go | 2 +- test/e2e/framework/util.go | 4 +- test/e2e/kubectl/kubectl.go | 5 -- test/e2e/network/dns.go | 2 +- test/e2e/network/service.go | 14 +---- test/e2e/node/crictl.go | 2 +- test/e2e/node/node_problem_detector.go | 2 +- test/e2e/storage/csimock/base.go | 2 +- test/e2e/storage/drivers/csi.go | 7 +-- test/e2e/storage/drivers/in_tree.go | 2 +- test/e2e/storage/helpers.go | 2 +- test/e2e/storage/testsuites/base.go | 2 +- test/e2e/storage/volume_metrics.go | 2 +- test/e2e/storage/volume_provisioning.go | 24 ++++---- .../upgrades/storage/persistent_volumes.go | 2 +- test/e2e/upgrades/storage/volume_mode.go | 2 +- 29 files changed, 42 insertions(+), 154 deletions(-) diff --git a/test/e2e/apps/deployment.go b/test/e2e/apps/deployment.go index c733ff77eb5..16190febd84 100644 --- a/test/e2e/apps/deployment.go +++ b/test/e2e/apps/deployment.go @@ -161,7 +161,7 @@ var _ = SIGDescribe("Deployment", func() { testProportionalScalingDeployment(ctx, f) }) ginkgo.It("should not disrupt a cloud load-balancer's connectivity during rollout", func(ctx context.Context) { - e2eskipper.SkipUnlessProviderIs("aws", "azure", "gce", "gke") + e2eskipper.SkipUnlessProviderIs("aws", "azure", "gce") e2eskipper.SkipIfIPv6("aws") nodes, err := e2enode.GetReadySchedulableNodes(ctx, c) framework.ExpectNoError(err) diff --git a/test/e2e/apps/rc.go b/test/e2e/apps/rc.go index 04d455cb66d..b34f3767374 100644 --- a/test/e2e/apps/rc.go +++ b/test/e2e/apps/rc.go @@ -73,7 +73,7 @@ var _ = SIGDescribe("ReplicationController", func() { ginkgo.It("should serve a basic image on each replica with a private image", func(ctx context.Context) { // requires private images - e2eskipper.SkipUnlessProviderIs("gce", "gke") + e2eskipper.SkipUnlessProviderIs("gce") privateimage := imageutils.GetConfig(imageutils.AgnhostPrivate) TestReplicationControllerServeImageOrFail(ctx, f, "private", privateimage.GetE2EImage()) }) diff --git a/test/e2e/apps/replica_set.go b/test/e2e/apps/replica_set.go index e4c3c24ac12..8e28f1ce525 100644 --- a/test/e2e/apps/replica_set.go +++ b/test/e2e/apps/replica_set.go @@ -115,7 +115,7 @@ var _ = SIGDescribe("ReplicaSet", func() { ginkgo.It("should serve a basic image on each replica with a private image", func(ctx context.Context) { // requires private images - e2eskipper.SkipUnlessProviderIs("gce", "gke") + e2eskipper.SkipUnlessProviderIs("gce") privateimage := imageutils.GetConfig(imageutils.AgnhostPrivate) testReplicaSetServeImageOrFail(ctx, f, "private", privateimage.GetE2EImage()) }) diff --git a/test/e2e/cloud/gcp/cluster_upgrade.go b/test/e2e/cloud/gcp/cluster_upgrade.go index 88e4ddc8090..4ab4c4690be 100644 --- a/test/e2e/cloud/gcp/cluster_upgrade.go +++ b/test/e2e/cloud/gcp/cluster_upgrade.go @@ -60,7 +60,7 @@ var _ = SIGDescribe("Upgrade", feature.Upgrade, func() { testFrameworks := upgrades.CreateUpgradeFrameworks(upgradeTests) ginkgo.BeforeEach(func() { - e2eskipper.SkipUnlessProviderIs("gce", "gke") + e2eskipper.SkipUnlessProviderIs("gce") }) // Create the frameworks here because we can only create them @@ -103,7 +103,7 @@ var _ = SIGDescribe("Downgrade", feature.Downgrade, func() { testFrameworks := upgrades.CreateUpgradeFrameworks(upgradeTests) ginkgo.BeforeEach(func() { - e2eskipper.SkipUnlessProviderIs("gce", "gke") + e2eskipper.SkipUnlessProviderIs("gce") }) ginkgo.Describe("cluster downgrade", func() { diff --git a/test/e2e/cloud/gcp/common/upgrade_mechanics.go b/test/e2e/cloud/gcp/common/upgrade_mechanics.go index 4df326c1445..95e78c84083 100644 --- a/test/e2e/cloud/gcp/common/upgrade_mechanics.go +++ b/test/e2e/cloud/gcp/common/upgrade_mechanics.go @@ -75,8 +75,6 @@ func controlPlaneUpgrade(ctx context.Context, f *framework.Framework, v string, switch framework.TestContext.Provider { case "gce": return controlPlaneUpgradeGCE(v, extraEnvs) - case "gke": - return e2eproviders.MasterUpgradeGKE(ctx, f.Namespace.Name, v) default: return fmt.Errorf("controlPlaneUpgrade() is not implemented for provider %s", framework.TestContext.Provider) } @@ -151,8 +149,6 @@ func nodeUpgrade(ctx context.Context, f *framework.Framework, v string, img stri switch framework.TestContext.Provider { case "gce": err = nodeUpgradeGCE(v, img, extraEnvs) - case "gke": - err = nodeUpgradeGKE(ctx, f.Namespace.Name, v, img) default: err = fmt.Errorf("nodeUpgrade() is not implemented for provider %s", framework.TestContext.Provider) } @@ -175,59 +171,6 @@ func nodeUpgradeGCE(rawV, img string, extraEnvs []string) error { return err } -func nodeUpgradeGKE(ctx context.Context, namespace string, v string, img string) error { - framework.Logf("Upgrading nodes to version %q and image %q", v, img) - nps, err := nodePoolsGKE() - if err != nil { - return err - } - framework.Logf("Found node pools %v", nps) - for _, np := range nps { - args := []string{ - "container", - "clusters", - fmt.Sprintf("--project=%s", framework.TestContext.CloudConfig.ProjectID), - e2eproviders.LocationParamGKE(), - "upgrade", - framework.TestContext.CloudConfig.Cluster, - fmt.Sprintf("--node-pool=%s", np), - fmt.Sprintf("--cluster-version=%s", v), - "--quiet", - } - if len(img) > 0 { - args = append(args, fmt.Sprintf("--image-type=%s", img)) - } - _, _, err = framework.RunCmd("gcloud", framework.AppendContainerCommandGroupIfNeeded(args)...) - - if err != nil { - return err - } - - e2enode.WaitForSSHTunnels(ctx, namespace) - } - return nil -} - -func nodePoolsGKE() ([]string, error) { - args := []string{ - "container", - "node-pools", - fmt.Sprintf("--project=%s", framework.TestContext.CloudConfig.ProjectID), - e2eproviders.LocationParamGKE(), - "list", - fmt.Sprintf("--cluster=%s", framework.TestContext.CloudConfig.Cluster), - "--format=get(name)", - } - stdout, _, err := framework.RunCmd("gcloud", framework.AppendContainerCommandGroupIfNeeded(args)...) - if err != nil { - return nil, err - } - if len(strings.TrimSpace(stdout)) == 0 { - return []string{}, nil - } - return strings.Fields(stdout), nil -} - func waitForNodesReadyAfterUpgrade(ctx context.Context, f *framework.Framework) error { // Wait for it to complete and validate nodes are healthy. // diff --git a/test/e2e/cloud/gcp/gke_node_pools.go b/test/e2e/cloud/gcp/gke_node_pools.go index 99eeb9be74a..e959fbca60d 100644 --- a/test/e2e/cloud/gcp/gke_node_pools.go +++ b/test/e2e/cloud/gcp/gke_node_pools.go @@ -36,7 +36,7 @@ var _ = SIGDescribe("GKE node pools", feature.GKENodePool, func() { f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged ginkgo.BeforeEach(func() { - e2eskipper.SkipUnlessProviderIs("gce", "gke") + e2eskipper.SkipUnlessProviderIs("gce") }) f.It("should create a cluster with multiple node pools", feature.GKENodePool, func(ctx context.Context) { diff --git a/test/e2e/cloud/gcp/kubelet_security.go b/test/e2e/cloud/gcp/kubelet_security.go index c054d921360..944d32488ea 100644 --- a/test/e2e/cloud/gcp/kubelet_security.go +++ b/test/e2e/cloud/gcp/kubelet_security.go @@ -44,7 +44,7 @@ var _ = SIGDescribe("Ports Security Check", feature.KubeletSecurity, func() { var nodeName string ginkgo.BeforeEach(func(ctx context.Context) { - e2eskipper.SkipUnlessProviderIs("gce", "gke") + e2eskipper.SkipUnlessProviderIs("gce") var err error node, err = e2enode.GetRandomReadySchedulableNode(ctx, f.ClientSet) framework.ExpectNoError(err) diff --git a/test/e2e/cloud/gcp/node_lease.go b/test/e2e/cloud/gcp/node_lease.go index 5dc49e695d2..012bb277055 100644 --- a/test/e2e/cloud/gcp/node_lease.go +++ b/test/e2e/cloud/gcp/node_lease.go @@ -44,7 +44,7 @@ var _ = SIGDescribe(framework.WithDisruptive(), "NodeLease", func() { var group string ginkgo.BeforeEach(func(ctx context.Context) { - e2eskipper.SkipUnlessProviderIs("gce", "gke") + e2eskipper.SkipUnlessProviderIs("gce") c = f.ClientSet ns = f.Namespace.Name systemPods, err := e2epod.GetPodsInNamespace(ctx, c, ns, map[string]string{}) @@ -62,7 +62,7 @@ var _ = SIGDescribe(framework.WithDisruptive(), "NodeLease", func() { ginkgo.BeforeEach(func() { skipped = true - e2eskipper.SkipUnlessProviderIs("gce", "gke", "aws") + e2eskipper.SkipUnlessProviderIs("gce", "aws") e2eskipper.SkipUnlessNodeCountIsAtLeast(2) skipped = false }) @@ -76,18 +76,7 @@ var _ = SIGDescribe(framework.WithDisruptive(), "NodeLease", func() { if err := framework.ResizeGroup(group, int32(framework.TestContext.CloudConfig.NumNodes)); err != nil { framework.Failf("Couldn't restore the original node instance group size: %v", err) } - // In GKE, our current tunneling setup has the potential to hold on to a broken tunnel (from a - // rebooted/deleted node) for up to 5 minutes before all tunnels are dropped and recreated. - // Most tests make use of some proxy feature to verify functionality. So, if a reboot test runs - // right before a test that tries to get logs, for example, we may get unlucky and try to use a - // closed tunnel to a node that was recently rebooted. There's no good way to framework.Poll for proxies - // being closed, so we sleep. - // - // TODO(cjcullen) reduce this sleep (#19314) - if framework.ProviderIs("gke") { - ginkgo.By("waiting 5 minutes for all dead tunnels to be dropped") - time.Sleep(5 * time.Minute) - } + if err := framework.WaitForGroupSize(group, int32(framework.TestContext.CloudConfig.NumNodes)); err != nil { framework.Failf("Couldn't restore the original node instance group size: %v", err) } diff --git a/test/e2e/cloud/gcp/reboot.go b/test/e2e/cloud/gcp/reboot.go index d16e26cb3ec..6712982969a 100644 --- a/test/e2e/cloud/gcp/reboot.go +++ b/test/e2e/cloud/gcp/reboot.go @@ -79,17 +79,6 @@ var _ = SIGDescribe("Reboot", framework.WithDisruptive(), feature.Reboot, func() framework.Logf("event for %v: %v %v: %v", e.InvolvedObject.Name, e.Source, e.Reason, e.Message) } } - // In GKE, our current tunneling setup has the potential to hold on to a broken tunnel (from a - // rebooted/deleted node) for up to 5 minutes before all tunnels are dropped and recreated. Most tests - // make use of some proxy feature to verify functionality. So, if a reboot test runs right before a test - // that tries to get logs, for example, we may get unlucky and try to use a closed tunnel to a node that - // was recently rebooted. There's no good way to framework.Poll for proxies being closed, so we sleep. - // - // TODO(cjcullen) reduce this sleep (#19314) - if framework.ProviderIs("gke") { - ginkgo.By("waiting 5 minutes for all dead tunnels to be dropped") - time.Sleep(5 * time.Minute) - } }) f = framework.NewDefaultFramework("reboot") diff --git a/test/e2e/cloud/gcp/resize_nodes.go b/test/e2e/cloud/gcp/resize_nodes.go index 0d32f93f719..9275bece2cb 100644 --- a/test/e2e/cloud/gcp/resize_nodes.go +++ b/test/e2e/cloud/gcp/resize_nodes.go @@ -70,25 +70,14 @@ var _ = SIGDescribe("Nodes", framework.WithDisruptive(), func() { var originalNodeCount int32 ginkgo.BeforeEach(func() { - e2eskipper.SkipUnlessProviderIs("gce", "gke") + e2eskipper.SkipUnlessProviderIs("gce") e2eskipper.SkipUnlessNodeCountIsAtLeast(2) ginkgo.DeferCleanup(func(ctx context.Context) { ginkgo.By("restoring the original node instance group size") if err := framework.ResizeGroup(group, int32(framework.TestContext.CloudConfig.NumNodes)); err != nil { framework.Failf("Couldn't restore the original node instance group size: %v", err) } - // In GKE, our current tunneling setup has the potential to hold on to a broken tunnel (from a - // rebooted/deleted node) for up to 5 minutes before all tunnels are dropped and recreated. - // Most tests make use of some proxy feature to verify functionality. So, if a reboot test runs - // right before a test that tries to get logs, for example, we may get unlucky and try to use a - // closed tunnel to a node that was recently rebooted. There's no good way to framework.Poll for proxies - // being closed, so we sleep. - // - // TODO(cjcullen) reduce this sleep (#19314) - if framework.ProviderIs("gke") { - ginkgo.By("waiting 5 minutes for all dead tunnels to be dropped") - time.Sleep(5 * time.Minute) - } + if err := framework.WaitForGroupSize(group, int32(framework.TestContext.CloudConfig.NumNodes)); err != nil { framework.Failf("Couldn't restore the original node instance group size: %v", err) } diff --git a/test/e2e/cloud/gcp/restart.go b/test/e2e/cloud/gcp/restart.go index 5a3679c49a6..8d4d26b12c0 100644 --- a/test/e2e/cloud/gcp/restart.go +++ b/test/e2e/cloud/gcp/restart.go @@ -55,7 +55,7 @@ var _ = SIGDescribe("Restart", framework.WithDisruptive(), func() { ginkgo.BeforeEach(func(ctx context.Context) { // This test requires the ability to restart all nodes, so the provider // check must be identical to that call. - e2eskipper.SkipUnlessProviderIs("gce", "gke") + e2eskipper.SkipUnlessProviderIs("gce") var err error ps, err = testutils.NewPodStore(f.ClientSet, metav1.NamespaceSystem, labels.Everything(), fields.Everything()) framework.ExpectNoError(err) diff --git a/test/e2e/cloud/nodes.go b/test/e2e/cloud/nodes.go index edf3d2fc391..a2bf40760c8 100644 --- a/test/e2e/cloud/nodes.go +++ b/test/e2e/cloud/nodes.go @@ -42,7 +42,7 @@ var _ = SIGDescribe(feature.CloudProvider, framework.WithDisruptive(), "Nodes", ginkgo.BeforeEach(func() { // Only supported in AWS/GCE because those are the only cloud providers // where E2E test are currently running. - e2eskipper.SkipUnlessProviderIs("aws", "gce", "gke") + e2eskipper.SkipUnlessProviderIs("aws", "gce") c = f.ClientSet }) diff --git a/test/e2e/e2e.go b/test/e2e/e2e.go index 262069829a8..f80c76f013c 100644 --- a/test/e2e/e2e.go +++ b/test/e2e/e2e.go @@ -178,7 +178,7 @@ func setupSuite(ctx context.Context) { // Run only on Ginkgo node 1 switch framework.TestContext.Provider { - case "gce", "gke": + case "gce": logClusterImageSources() } diff --git a/test/e2e/framework/ssh/ssh.go b/test/e2e/framework/ssh/ssh.go index b1f0901f85b..e1931754589 100644 --- a/test/e2e/framework/ssh/ssh.go +++ b/test/e2e/framework/ssh/ssh.go @@ -66,7 +66,7 @@ func GetSigner(provider string) (ssh.Signer, error) { // support. keyfile := "" switch provider { - case "gce", "gke", "kubemark": + case "gce", "kubemark": keyfile = os.Getenv("GCE_SSH_KEY") if keyfile == "" { keyfile = os.Getenv("GCE_SSH_PRIVATE_KEY_FILE") diff --git a/test/e2e/framework/util.go b/test/e2e/framework/util.go index a6dc9b125d3..b727fbe7322 100644 --- a/test/e2e/framework/util.go +++ b/test/e2e/framework/util.go @@ -135,7 +135,7 @@ const ( var ( // ProvidersWithSSH are those providers where each node is accessible with SSH - ProvidersWithSSH = []string{"gce", "gke", "aws", "local", "azure"} + ProvidersWithSSH = []string{"gce", "aws", "local", "azure"} ) // RunID is a unique identifier of the e2e run. @@ -746,7 +746,7 @@ func GetControlPlaneAddresses(ctx context.Context, c clientset.Interface) []stri ips := sets.NewString() switch TestContext.Provider { - case "gce", "gke": + case "gce": for _, ip := range externalIPs { ips.Insert(ip) } diff --git a/test/e2e/kubectl/kubectl.go b/test/e2e/kubectl/kubectl.go index 5dae2b8f4e7..25c90a67444 100644 --- a/test/e2e/kubectl/kubectl.go +++ b/test/e2e/kubectl/kubectl.go @@ -556,11 +556,6 @@ var _ = SIGDescribe("Kubectl client", func() { }) ginkgo.It("should handle in-cluster config", func(ctx context.Context) { - // This test does not work for dynamically linked kubectl binaries; only statically linked ones. The - // problem happens when the kubectl binary is copied to a pod in the cluster. For dynamically linked - // binaries, the necessary libraries are not also copied. For this reason, the test can not be - // guaranteed to work with GKE, which sometimes run tests using a dynamically linked kubectl. - e2eskipper.SkipIfProviderIs("gke") // TODO: Find a way to download and copy the appropriate kubectl binary, or maybe a multi-arch kubectl image // for now this only works on amd64 e2eskipper.SkipUnlessNodeOSArchIs("amd64") diff --git a/test/e2e/network/dns.go b/test/e2e/network/dns.go index 2b20d2ea0f4..c9ea05e2194 100644 --- a/test/e2e/network/dns.go +++ b/test/e2e/network/dns.go @@ -78,7 +78,7 @@ var _ = common.SIGDescribe("DNS", func() { // Added due to #8512. This is critical for GCE and GKE deployments. ginkgo.It("should provide DNS for the cluster [Provider:GCE]", func(ctx context.Context) { - e2eskipper.SkipUnlessProviderIs("gce", "gke") + e2eskipper.SkipUnlessProviderIs("gce") namesToResolve := []string{"google.com"} // Windows containers do not have a route to the GCE metadata server by default. diff --git a/test/e2e/network/service.go b/test/e2e/network/service.go index fe45f949fc0..9f1b2b2cc0e 100644 --- a/test/e2e/network/service.go +++ b/test/e2e/network/service.go @@ -65,7 +65,6 @@ import ( e2enode "k8s.io/kubernetes/test/e2e/framework/node" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output" - e2eproviders "k8s.io/kubernetes/test/e2e/framework/providers" e2erc "k8s.io/kubernetes/test/e2e/framework/rc" e2eservice "k8s.io/kubernetes/test/e2e/framework/service" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" @@ -1210,9 +1209,7 @@ var _ = common.SIGDescribe("Services", func() { f.It("should work after restarting apiserver", f.WithDisruptive(), func(ctx context.Context) { - if !framework.ProviderIs("gke") { - e2eskipper.SkipUnlessComponentRunsAsPodsAndClientCanDeleteThem(ctx, kubeAPIServerLabelName, cs, metav1.NamespaceSystem, map[string]string{clusterComponentKey: kubeAPIServerLabelName}) - } + e2eskipper.SkipUnlessComponentRunsAsPodsAndClientCanDeleteThem(ctx, kubeAPIServerLabelName, cs, metav1.NamespaceSystem, map[string]string{clusterComponentKey: kubeAPIServerLabelName}) // TODO: use the ServiceTestJig here ns := f.Namespace.Name @@ -4530,15 +4527,6 @@ func validateEndpointsPortsOrFail(ctx context.Context, c clientset.Interface, na } func restartApiserver(ctx context.Context, namespace string, cs clientset.Interface) error { - if framework.ProviderIs("gke") { - // GKE use a same-version master upgrade to teardown/recreate master. - v, err := cs.Discovery().ServerVersion() - if err != nil { - return err - } - return e2eproviders.MasterUpgradeGKE(ctx, namespace, v.GitVersion[1:]) // strip leading 'v' - } - return restartComponent(ctx, cs, kubeAPIServerLabelName, metav1.NamespaceSystem, map[string]string{clusterComponentKey: kubeAPIServerLabelName}) } diff --git a/test/e2e/node/crictl.go b/test/e2e/node/crictl.go index 9988b98e3ae..67f85aaac5d 100644 --- a/test/e2e/node/crictl.go +++ b/test/e2e/node/crictl.go @@ -35,7 +35,7 @@ var _ = SIGDescribe("crictl", func() { ginkgo.BeforeEach(func() { // `crictl` is not available on all cloud providers. - e2eskipper.SkipUnlessProviderIs("gce", "gke") + e2eskipper.SkipUnlessProviderIs("gce") }) ginkgo.It("should be able to run crictl on the node", func(ctx context.Context) { diff --git a/test/e2e/node/node_problem_detector.go b/test/e2e/node/node_problem_detector.go index 19046c2002d..b946203f237 100644 --- a/test/e2e/node/node_problem_detector.go +++ b/test/e2e/node/node_problem_detector.go @@ -56,7 +56,7 @@ var _ = SIGDescribe("NodeProblemDetector", nodefeature.NodeProblemDetector, feat ginkgo.BeforeEach(func(ctx context.Context) { e2eskipper.SkipUnlessSSHKeyPresent() e2eskipper.SkipUnlessProviderIs(framework.ProvidersWithSSH...) - e2eskipper.SkipUnlessProviderIs("gce", "gke") + e2eskipper.SkipUnlessProviderIs("gce") e2eskipper.SkipUnlessNodeOSDistroIs("gci", "ubuntu") e2enode.WaitForTotalHealthy(ctx, f.ClientSet, time.Minute) }) diff --git a/test/e2e/storage/csimock/base.go b/test/e2e/storage/csimock/base.go index dcad86bd06a..2cf5441f8b0 100644 --- a/test/e2e/storage/csimock/base.go +++ b/test/e2e/storage/csimock/base.go @@ -584,7 +584,7 @@ func getStorageClass( func getDefaultPluginName() string { switch { - case framework.ProviderIs("gke"), framework.ProviderIs("gce"): + case framework.ProviderIs("gce"): return "kubernetes.io/gce-pd" case framework.ProviderIs("aws"): return "kubernetes.io/aws-ebs" diff --git a/test/e2e/storage/drivers/csi.go b/test/e2e/storage/drivers/csi.go index 122a5253da6..d2e99b724f3 100644 --- a/test/e2e/storage/drivers/csi.go +++ b/test/e2e/storage/drivers/csi.go @@ -906,7 +906,7 @@ func (g *gcePDCSIDriver) GetDriverInfo() *storageframework.DriverInfo { } func (g *gcePDCSIDriver) SkipUnsupportedTest(pattern storageframework.TestPattern) { - e2eskipper.SkipUnlessProviderIs("gce", "gke") + e2eskipper.SkipUnlessProviderIs("gce") if pattern.FsType == "xfs" { e2eskipper.SkipUnlessNodeOSDistroIs("ubuntu", "custom") } @@ -944,11 +944,6 @@ func (g *gcePDCSIDriver) PrepareTest(ctx context.Context, f *framework.Framework Framework: f, } - if framework.ProviderIs("gke") { - framework.Logf("The csi gce-pd driver is automatically installed in GKE. Skipping driver installation.") - return cfg - } - // Check if the cluster is already running gce-pd CSI Driver deploy, err := f.ClientSet.AppsV1().Deployments("gce-pd-csi-driver").Get(ctx, "csi-gce-pd-controller", metav1.GetOptions{}) if err == nil && deploy != nil { diff --git a/test/e2e/storage/drivers/in_tree.go b/test/e2e/storage/drivers/in_tree.go index 753d4e2794f..dfd8ecc6ddf 100644 --- a/test/e2e/storage/drivers/in_tree.go +++ b/test/e2e/storage/drivers/in_tree.go @@ -890,7 +890,7 @@ func (g *gcePdDriver) GetDriverInfo() *storageframework.DriverInfo { } func (g *gcePdDriver) SkipUnsupportedTest(pattern storageframework.TestPattern) { - e2eskipper.SkipUnlessProviderIs("gce", "gke") + e2eskipper.SkipUnlessProviderIs("gce") for _, tag := range pattern.TestTags { if tag == feature.Windows { e2eskipper.SkipUnlessNodeOSDistroIs("windows") diff --git a/test/e2e/storage/helpers.go b/test/e2e/storage/helpers.go index 646d3480953..64a62699213 100644 --- a/test/e2e/storage/helpers.go +++ b/test/e2e/storage/helpers.go @@ -67,7 +67,7 @@ func newStorageClass(t testsuites.StorageClassTest, ns string, prefix string) *s func getDefaultPluginName() string { switch { - case framework.ProviderIs("gke"), framework.ProviderIs("gce"): + case framework.ProviderIs("gce"): return "kubernetes.io/gce-pd" case framework.ProviderIs("aws"): return "kubernetes.io/aws-ebs" diff --git a/test/e2e/storage/testsuites/base.go b/test/e2e/storage/testsuites/base.go index a1bb3f46354..aa4340b394c 100644 --- a/test/e2e/storage/testsuites/base.go +++ b/test/e2e/storage/testsuites/base.go @@ -113,7 +113,7 @@ func getVolumeOpsFromMetricsForPlugin(ms testutil.Metrics, pluginName string) op } func getVolumeOpCounts(ctx context.Context, c clientset.Interface, config *rest.Config, pluginName string) opCounts { - if !framework.ProviderIs("gce", "gke", "aws") { + if !framework.ProviderIs("gce", "aws") { return opCounts{} } diff --git a/test/e2e/storage/volume_metrics.go b/test/e2e/storage/volume_metrics.go index 7429118f63d..044da8576c3 100644 --- a/test/e2e/storage/volume_metrics.go +++ b/test/e2e/storage/volume_metrics.go @@ -69,7 +69,7 @@ var _ = utils.SIGDescribe(framework.WithSerial(), "Volume metrics", func() { // The tests below make various assumptions about the cluster // and the underlying storage driver and therefore don't pass // with other kinds of clusters and drivers. - e2eskipper.SkipUnlessProviderIs("gce", "gke", "aws") + e2eskipper.SkipUnlessProviderIs("gce", "aws") e2epv.SkipIfNoDefaultStorageClass(ctx, c) defaultScName, err = e2epv.GetDefaultStorageClassName(ctx, c) framework.ExpectNoError(err) diff --git a/test/e2e/storage/volume_provisioning.go b/test/e2e/storage/volume_provisioning.go index 2cacbb87cc9..b2b40ec9060 100644 --- a/test/e2e/storage/volume_provisioning.go +++ b/test/e2e/storage/volume_provisioning.go @@ -77,7 +77,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() { // GCE/GKE { Name: "SSD PD on GCE/GKE", - CloudProviders: []string{"gce", "gke"}, + CloudProviders: []string{"gce"}, Timeouts: f.Timeouts, Provisioner: "kubernetes.io/gce-pd", Parameters: map[string]string{ @@ -92,8 +92,8 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() { }, }, { - Name: "HDD PD on GCE/GKE", - CloudProviders: []string{"gce", "gke"}, + Name: "HDD PD on GCE", + CloudProviders: []string{"gce"}, Timeouts: f.Timeouts, Provisioner: "kubernetes.io/gce-pd", Parameters: map[string]string{ @@ -273,12 +273,12 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() { }) ginkgo.It("should provision storage with non-default reclaim policy Retain", func(ctx context.Context) { - e2eskipper.SkipUnlessProviderIs("gce", "gke") + e2eskipper.SkipUnlessProviderIs("gce") test := testsuites.StorageClassTest{ Client: c, - Name: "HDD PD on GCE/GKE", - CloudProviders: []string{"gce", "gke"}, + Name: "HDD PD on GCE", + CloudProviders: []string{"gce"}, Provisioner: "kubernetes.io/gce-pd", Timeouts: f.Timeouts, Parameters: map[string]string{ @@ -322,7 +322,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() { // not being deleted. // NOTE: Polls until no PVs are detected, times out at 5 minutes. - e2eskipper.SkipUnlessProviderIs("openstack", "gce", "aws", "gke", "vsphere", "azure") + e2eskipper.SkipUnlessProviderIs("openstack", "gce", "aws", "vsphere", "azure") const raceAttempts int = 100 var residualPVs []*v1.PersistentVolume @@ -368,7 +368,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() { // volume and changes the reclaim policy to Delete. // PV controller should delete the PV even though the underlying volume // is already deleted. - e2eskipper.SkipUnlessProviderIs("gce", "gke", "aws") + e2eskipper.SkipUnlessProviderIs("gce", "aws") ginkgo.By("creating PD") diskName, err := e2epv.CreatePDWithRetry(ctx) framework.ExpectNoError(err) @@ -400,7 +400,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() { VolumeID: diskName, }, } - case "gce", "gke": + case "gce": pv.Spec.PersistentVolumeSource = v1.PersistentVolumeSource{ GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{ PDName: diskName, @@ -497,7 +497,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() { ginkgo.Describe("DynamicProvisioner Default", func() { f.It("should create and delete default persistent volumes", f.WithSlow(), func(ctx context.Context) { - e2eskipper.SkipUnlessProviderIs("openstack", "gce", "aws", "gke", "vsphere", "azure") + e2eskipper.SkipUnlessProviderIs("openstack", "gce", "aws", "vsphere", "azure") e2epv.SkipIfNoDefaultStorageClass(ctx, c) ginkgo.By("creating a claim with no annotation") @@ -521,7 +521,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() { // Modifying the default storage class can be disruptive to other tests that depend on it f.It("should be disabled by changing the default annotation", f.WithSerial(), f.WithDisruptive(), func(ctx context.Context) { - e2eskipper.SkipUnlessProviderIs("openstack", "gce", "aws", "gke", "vsphere", "azure") + e2eskipper.SkipUnlessProviderIs("openstack", "gce", "aws", "vsphere", "azure") e2epv.SkipIfNoDefaultStorageClass(ctx, c) scName, scErr := e2epv.GetDefaultStorageClassName(ctx, c) @@ -558,7 +558,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() { // Modifying the default storage class can be disruptive to other tests that depend on it f.It("should be disabled by removing the default annotation", f.WithSerial(), f.WithDisruptive(), func(ctx context.Context) { - e2eskipper.SkipUnlessProviderIs("openstack", "gce", "aws", "gke", "vsphere", "azure") + e2eskipper.SkipUnlessProviderIs("openstack", "gce", "aws", "vsphere", "azure") e2epv.SkipIfNoDefaultStorageClass(ctx, c) scName, scErr := e2epv.GetDefaultStorageClassName(ctx, c) diff --git a/test/e2e/upgrades/storage/persistent_volumes.go b/test/e2e/upgrades/storage/persistent_volumes.go index 0ee82a19ce0..727a3fef1be 100644 --- a/test/e2e/upgrades/storage/persistent_volumes.go +++ b/test/e2e/upgrades/storage/persistent_volumes.go @@ -50,7 +50,7 @@ const ( func (t *PersistentVolumeUpgradeTest) Setup(ctx context.Context, f *framework.Framework) { var err error - e2eskipper.SkipUnlessProviderIs("gce", "gke", "openstack", "aws", "vsphere", "azure") + e2eskipper.SkipUnlessProviderIs("gce", "openstack", "aws", "vsphere", "azure") ns := f.Namespace.Name diff --git a/test/e2e/upgrades/storage/volume_mode.go b/test/e2e/upgrades/storage/volume_mode.go index 8fe978740e4..ba7f6b1d4af 100644 --- a/test/e2e/upgrades/storage/volume_mode.go +++ b/test/e2e/upgrades/storage/volume_mode.go @@ -52,7 +52,7 @@ func (VolumeModeDowngradeTest) Name() string { // Skip returns true when this test can be skipped. func (t *VolumeModeDowngradeTest) Skip(upgCtx upgrades.UpgradeContext) bool { - if !framework.ProviderIs("openstack", "gce", "aws", "gke", "vsphere", "azure") { + if !framework.ProviderIs("openstack", "gce", "aws", "vsphere", "azure") { return true }