mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-08-05 18:24:07 +00:00
remove gke from e2e
This commit is contained in:
parent
ab54e442c6
commit
aaac13fc6a
@ -161,7 +161,7 @@ var _ = SIGDescribe("Deployment", func() {
|
|||||||
testProportionalScalingDeployment(ctx, f)
|
testProportionalScalingDeployment(ctx, f)
|
||||||
})
|
})
|
||||||
ginkgo.It("should not disrupt a cloud load-balancer's connectivity during rollout", func(ctx context.Context) {
|
ginkgo.It("should not disrupt a cloud load-balancer's connectivity during rollout", func(ctx context.Context) {
|
||||||
e2eskipper.SkipUnlessProviderIs("aws", "azure", "gce", "gke")
|
e2eskipper.SkipUnlessProviderIs("aws", "azure", "gce")
|
||||||
e2eskipper.SkipIfIPv6("aws")
|
e2eskipper.SkipIfIPv6("aws")
|
||||||
nodes, err := e2enode.GetReadySchedulableNodes(ctx, c)
|
nodes, err := e2enode.GetReadySchedulableNodes(ctx, c)
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
|
@ -73,7 +73,7 @@ var _ = SIGDescribe("ReplicationController", func() {
|
|||||||
|
|
||||||
ginkgo.It("should serve a basic image on each replica with a private image", func(ctx context.Context) {
|
ginkgo.It("should serve a basic image on each replica with a private image", func(ctx context.Context) {
|
||||||
// requires private images
|
// requires private images
|
||||||
e2eskipper.SkipUnlessProviderIs("gce", "gke")
|
e2eskipper.SkipUnlessProviderIs("gce")
|
||||||
privateimage := imageutils.GetConfig(imageutils.AgnhostPrivate)
|
privateimage := imageutils.GetConfig(imageutils.AgnhostPrivate)
|
||||||
TestReplicationControllerServeImageOrFail(ctx, f, "private", privateimage.GetE2EImage())
|
TestReplicationControllerServeImageOrFail(ctx, f, "private", privateimage.GetE2EImage())
|
||||||
})
|
})
|
||||||
|
@ -115,7 +115,7 @@ var _ = SIGDescribe("ReplicaSet", func() {
|
|||||||
|
|
||||||
ginkgo.It("should serve a basic image on each replica with a private image", func(ctx context.Context) {
|
ginkgo.It("should serve a basic image on each replica with a private image", func(ctx context.Context) {
|
||||||
// requires private images
|
// requires private images
|
||||||
e2eskipper.SkipUnlessProviderIs("gce", "gke")
|
e2eskipper.SkipUnlessProviderIs("gce")
|
||||||
privateimage := imageutils.GetConfig(imageutils.AgnhostPrivate)
|
privateimage := imageutils.GetConfig(imageutils.AgnhostPrivate)
|
||||||
testReplicaSetServeImageOrFail(ctx, f, "private", privateimage.GetE2EImage())
|
testReplicaSetServeImageOrFail(ctx, f, "private", privateimage.GetE2EImage())
|
||||||
})
|
})
|
||||||
|
@ -60,7 +60,7 @@ var _ = SIGDescribe("Upgrade", feature.Upgrade, func() {
|
|||||||
testFrameworks := upgrades.CreateUpgradeFrameworks(upgradeTests)
|
testFrameworks := upgrades.CreateUpgradeFrameworks(upgradeTests)
|
||||||
|
|
||||||
ginkgo.BeforeEach(func() {
|
ginkgo.BeforeEach(func() {
|
||||||
e2eskipper.SkipUnlessProviderIs("gce", "gke")
|
e2eskipper.SkipUnlessProviderIs("gce")
|
||||||
})
|
})
|
||||||
|
|
||||||
// Create the frameworks here because we can only create them
|
// Create the frameworks here because we can only create them
|
||||||
@ -103,7 +103,7 @@ var _ = SIGDescribe("Downgrade", feature.Downgrade, func() {
|
|||||||
testFrameworks := upgrades.CreateUpgradeFrameworks(upgradeTests)
|
testFrameworks := upgrades.CreateUpgradeFrameworks(upgradeTests)
|
||||||
|
|
||||||
ginkgo.BeforeEach(func() {
|
ginkgo.BeforeEach(func() {
|
||||||
e2eskipper.SkipUnlessProviderIs("gce", "gke")
|
e2eskipper.SkipUnlessProviderIs("gce")
|
||||||
})
|
})
|
||||||
|
|
||||||
ginkgo.Describe("cluster downgrade", func() {
|
ginkgo.Describe("cluster downgrade", func() {
|
||||||
|
@ -75,8 +75,6 @@ func controlPlaneUpgrade(ctx context.Context, f *framework.Framework, v string,
|
|||||||
switch framework.TestContext.Provider {
|
switch framework.TestContext.Provider {
|
||||||
case "gce":
|
case "gce":
|
||||||
return controlPlaneUpgradeGCE(v, extraEnvs)
|
return controlPlaneUpgradeGCE(v, extraEnvs)
|
||||||
case "gke":
|
|
||||||
return e2eproviders.MasterUpgradeGKE(ctx, f.Namespace.Name, v)
|
|
||||||
default:
|
default:
|
||||||
return fmt.Errorf("controlPlaneUpgrade() is not implemented for provider %s", framework.TestContext.Provider)
|
return fmt.Errorf("controlPlaneUpgrade() is not implemented for provider %s", framework.TestContext.Provider)
|
||||||
}
|
}
|
||||||
@ -151,8 +149,6 @@ func nodeUpgrade(ctx context.Context, f *framework.Framework, v string, img stri
|
|||||||
switch framework.TestContext.Provider {
|
switch framework.TestContext.Provider {
|
||||||
case "gce":
|
case "gce":
|
||||||
err = nodeUpgradeGCE(v, img, extraEnvs)
|
err = nodeUpgradeGCE(v, img, extraEnvs)
|
||||||
case "gke":
|
|
||||||
err = nodeUpgradeGKE(ctx, f.Namespace.Name, v, img)
|
|
||||||
default:
|
default:
|
||||||
err = fmt.Errorf("nodeUpgrade() is not implemented for provider %s", framework.TestContext.Provider)
|
err = fmt.Errorf("nodeUpgrade() is not implemented for provider %s", framework.TestContext.Provider)
|
||||||
}
|
}
|
||||||
@ -175,59 +171,6 @@ func nodeUpgradeGCE(rawV, img string, extraEnvs []string) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
func nodeUpgradeGKE(ctx context.Context, namespace string, v string, img string) error {
|
|
||||||
framework.Logf("Upgrading nodes to version %q and image %q", v, img)
|
|
||||||
nps, err := nodePoolsGKE()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
framework.Logf("Found node pools %v", nps)
|
|
||||||
for _, np := range nps {
|
|
||||||
args := []string{
|
|
||||||
"container",
|
|
||||||
"clusters",
|
|
||||||
fmt.Sprintf("--project=%s", framework.TestContext.CloudConfig.ProjectID),
|
|
||||||
e2eproviders.LocationParamGKE(),
|
|
||||||
"upgrade",
|
|
||||||
framework.TestContext.CloudConfig.Cluster,
|
|
||||||
fmt.Sprintf("--node-pool=%s", np),
|
|
||||||
fmt.Sprintf("--cluster-version=%s", v),
|
|
||||||
"--quiet",
|
|
||||||
}
|
|
||||||
if len(img) > 0 {
|
|
||||||
args = append(args, fmt.Sprintf("--image-type=%s", img))
|
|
||||||
}
|
|
||||||
_, _, err = framework.RunCmd("gcloud", framework.AppendContainerCommandGroupIfNeeded(args)...)
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
e2enode.WaitForSSHTunnels(ctx, namespace)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func nodePoolsGKE() ([]string, error) {
|
|
||||||
args := []string{
|
|
||||||
"container",
|
|
||||||
"node-pools",
|
|
||||||
fmt.Sprintf("--project=%s", framework.TestContext.CloudConfig.ProjectID),
|
|
||||||
e2eproviders.LocationParamGKE(),
|
|
||||||
"list",
|
|
||||||
fmt.Sprintf("--cluster=%s", framework.TestContext.CloudConfig.Cluster),
|
|
||||||
"--format=get(name)",
|
|
||||||
}
|
|
||||||
stdout, _, err := framework.RunCmd("gcloud", framework.AppendContainerCommandGroupIfNeeded(args)...)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if len(strings.TrimSpace(stdout)) == 0 {
|
|
||||||
return []string{}, nil
|
|
||||||
}
|
|
||||||
return strings.Fields(stdout), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func waitForNodesReadyAfterUpgrade(ctx context.Context, f *framework.Framework) error {
|
func waitForNodesReadyAfterUpgrade(ctx context.Context, f *framework.Framework) error {
|
||||||
// Wait for it to complete and validate nodes are healthy.
|
// Wait for it to complete and validate nodes are healthy.
|
||||||
//
|
//
|
||||||
|
@ -36,7 +36,7 @@ var _ = SIGDescribe("GKE node pools", feature.GKENodePool, func() {
|
|||||||
f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged
|
f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged
|
||||||
|
|
||||||
ginkgo.BeforeEach(func() {
|
ginkgo.BeforeEach(func() {
|
||||||
e2eskipper.SkipUnlessProviderIs("gce", "gke")
|
e2eskipper.SkipUnlessProviderIs("gce")
|
||||||
})
|
})
|
||||||
|
|
||||||
f.It("should create a cluster with multiple node pools", feature.GKENodePool, func(ctx context.Context) {
|
f.It("should create a cluster with multiple node pools", feature.GKENodePool, func(ctx context.Context) {
|
||||||
|
@ -44,7 +44,7 @@ var _ = SIGDescribe("Ports Security Check", feature.KubeletSecurity, func() {
|
|||||||
var nodeName string
|
var nodeName string
|
||||||
|
|
||||||
ginkgo.BeforeEach(func(ctx context.Context) {
|
ginkgo.BeforeEach(func(ctx context.Context) {
|
||||||
e2eskipper.SkipUnlessProviderIs("gce", "gke")
|
e2eskipper.SkipUnlessProviderIs("gce")
|
||||||
var err error
|
var err error
|
||||||
node, err = e2enode.GetRandomReadySchedulableNode(ctx, f.ClientSet)
|
node, err = e2enode.GetRandomReadySchedulableNode(ctx, f.ClientSet)
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
|
@ -44,7 +44,7 @@ var _ = SIGDescribe(framework.WithDisruptive(), "NodeLease", func() {
|
|||||||
var group string
|
var group string
|
||||||
|
|
||||||
ginkgo.BeforeEach(func(ctx context.Context) {
|
ginkgo.BeforeEach(func(ctx context.Context) {
|
||||||
e2eskipper.SkipUnlessProviderIs("gce", "gke")
|
e2eskipper.SkipUnlessProviderIs("gce")
|
||||||
c = f.ClientSet
|
c = f.ClientSet
|
||||||
ns = f.Namespace.Name
|
ns = f.Namespace.Name
|
||||||
systemPods, err := e2epod.GetPodsInNamespace(ctx, c, ns, map[string]string{})
|
systemPods, err := e2epod.GetPodsInNamespace(ctx, c, ns, map[string]string{})
|
||||||
@ -62,7 +62,7 @@ var _ = SIGDescribe(framework.WithDisruptive(), "NodeLease", func() {
|
|||||||
|
|
||||||
ginkgo.BeforeEach(func() {
|
ginkgo.BeforeEach(func() {
|
||||||
skipped = true
|
skipped = true
|
||||||
e2eskipper.SkipUnlessProviderIs("gce", "gke", "aws")
|
e2eskipper.SkipUnlessProviderIs("gce", "aws")
|
||||||
e2eskipper.SkipUnlessNodeCountIsAtLeast(2)
|
e2eskipper.SkipUnlessNodeCountIsAtLeast(2)
|
||||||
skipped = false
|
skipped = false
|
||||||
})
|
})
|
||||||
@ -76,18 +76,7 @@ var _ = SIGDescribe(framework.WithDisruptive(), "NodeLease", func() {
|
|||||||
if err := framework.ResizeGroup(group, int32(framework.TestContext.CloudConfig.NumNodes)); err != nil {
|
if err := framework.ResizeGroup(group, int32(framework.TestContext.CloudConfig.NumNodes)); err != nil {
|
||||||
framework.Failf("Couldn't restore the original node instance group size: %v", err)
|
framework.Failf("Couldn't restore the original node instance group size: %v", err)
|
||||||
}
|
}
|
||||||
// In GKE, our current tunneling setup has the potential to hold on to a broken tunnel (from a
|
|
||||||
// rebooted/deleted node) for up to 5 minutes before all tunnels are dropped and recreated.
|
|
||||||
// Most tests make use of some proxy feature to verify functionality. So, if a reboot test runs
|
|
||||||
// right before a test that tries to get logs, for example, we may get unlucky and try to use a
|
|
||||||
// closed tunnel to a node that was recently rebooted. There's no good way to framework.Poll for proxies
|
|
||||||
// being closed, so we sleep.
|
|
||||||
//
|
|
||||||
// TODO(cjcullen) reduce this sleep (#19314)
|
|
||||||
if framework.ProviderIs("gke") {
|
|
||||||
ginkgo.By("waiting 5 minutes for all dead tunnels to be dropped")
|
|
||||||
time.Sleep(5 * time.Minute)
|
|
||||||
}
|
|
||||||
if err := framework.WaitForGroupSize(group, int32(framework.TestContext.CloudConfig.NumNodes)); err != nil {
|
if err := framework.WaitForGroupSize(group, int32(framework.TestContext.CloudConfig.NumNodes)); err != nil {
|
||||||
framework.Failf("Couldn't restore the original node instance group size: %v", err)
|
framework.Failf("Couldn't restore the original node instance group size: %v", err)
|
||||||
}
|
}
|
||||||
|
@ -79,17 +79,6 @@ var _ = SIGDescribe("Reboot", framework.WithDisruptive(), feature.Reboot, func()
|
|||||||
framework.Logf("event for %v: %v %v: %v", e.InvolvedObject.Name, e.Source, e.Reason, e.Message)
|
framework.Logf("event for %v: %v %v: %v", e.InvolvedObject.Name, e.Source, e.Reason, e.Message)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// In GKE, our current tunneling setup has the potential to hold on to a broken tunnel (from a
|
|
||||||
// rebooted/deleted node) for up to 5 minutes before all tunnels are dropped and recreated. Most tests
|
|
||||||
// make use of some proxy feature to verify functionality. So, if a reboot test runs right before a test
|
|
||||||
// that tries to get logs, for example, we may get unlucky and try to use a closed tunnel to a node that
|
|
||||||
// was recently rebooted. There's no good way to framework.Poll for proxies being closed, so we sleep.
|
|
||||||
//
|
|
||||||
// TODO(cjcullen) reduce this sleep (#19314)
|
|
||||||
if framework.ProviderIs("gke") {
|
|
||||||
ginkgo.By("waiting 5 minutes for all dead tunnels to be dropped")
|
|
||||||
time.Sleep(5 * time.Minute)
|
|
||||||
}
|
|
||||||
})
|
})
|
||||||
|
|
||||||
f = framework.NewDefaultFramework("reboot")
|
f = framework.NewDefaultFramework("reboot")
|
||||||
|
@ -70,25 +70,14 @@ var _ = SIGDescribe("Nodes", framework.WithDisruptive(), func() {
|
|||||||
var originalNodeCount int32
|
var originalNodeCount int32
|
||||||
|
|
||||||
ginkgo.BeforeEach(func() {
|
ginkgo.BeforeEach(func() {
|
||||||
e2eskipper.SkipUnlessProviderIs("gce", "gke")
|
e2eskipper.SkipUnlessProviderIs("gce")
|
||||||
e2eskipper.SkipUnlessNodeCountIsAtLeast(2)
|
e2eskipper.SkipUnlessNodeCountIsAtLeast(2)
|
||||||
ginkgo.DeferCleanup(func(ctx context.Context) {
|
ginkgo.DeferCleanup(func(ctx context.Context) {
|
||||||
ginkgo.By("restoring the original node instance group size")
|
ginkgo.By("restoring the original node instance group size")
|
||||||
if err := framework.ResizeGroup(group, int32(framework.TestContext.CloudConfig.NumNodes)); err != nil {
|
if err := framework.ResizeGroup(group, int32(framework.TestContext.CloudConfig.NumNodes)); err != nil {
|
||||||
framework.Failf("Couldn't restore the original node instance group size: %v", err)
|
framework.Failf("Couldn't restore the original node instance group size: %v", err)
|
||||||
}
|
}
|
||||||
// In GKE, our current tunneling setup has the potential to hold on to a broken tunnel (from a
|
|
||||||
// rebooted/deleted node) for up to 5 minutes before all tunnels are dropped and recreated.
|
|
||||||
// Most tests make use of some proxy feature to verify functionality. So, if a reboot test runs
|
|
||||||
// right before a test that tries to get logs, for example, we may get unlucky and try to use a
|
|
||||||
// closed tunnel to a node that was recently rebooted. There's no good way to framework.Poll for proxies
|
|
||||||
// being closed, so we sleep.
|
|
||||||
//
|
|
||||||
// TODO(cjcullen) reduce this sleep (#19314)
|
|
||||||
if framework.ProviderIs("gke") {
|
|
||||||
ginkgo.By("waiting 5 minutes for all dead tunnels to be dropped")
|
|
||||||
time.Sleep(5 * time.Minute)
|
|
||||||
}
|
|
||||||
if err := framework.WaitForGroupSize(group, int32(framework.TestContext.CloudConfig.NumNodes)); err != nil {
|
if err := framework.WaitForGroupSize(group, int32(framework.TestContext.CloudConfig.NumNodes)); err != nil {
|
||||||
framework.Failf("Couldn't restore the original node instance group size: %v", err)
|
framework.Failf("Couldn't restore the original node instance group size: %v", err)
|
||||||
}
|
}
|
||||||
|
@ -55,7 +55,7 @@ var _ = SIGDescribe("Restart", framework.WithDisruptive(), func() {
|
|||||||
ginkgo.BeforeEach(func(ctx context.Context) {
|
ginkgo.BeforeEach(func(ctx context.Context) {
|
||||||
// This test requires the ability to restart all nodes, so the provider
|
// This test requires the ability to restart all nodes, so the provider
|
||||||
// check must be identical to that call.
|
// check must be identical to that call.
|
||||||
e2eskipper.SkipUnlessProviderIs("gce", "gke")
|
e2eskipper.SkipUnlessProviderIs("gce")
|
||||||
var err error
|
var err error
|
||||||
ps, err = testutils.NewPodStore(f.ClientSet, metav1.NamespaceSystem, labels.Everything(), fields.Everything())
|
ps, err = testutils.NewPodStore(f.ClientSet, metav1.NamespaceSystem, labels.Everything(), fields.Everything())
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
|
@ -42,7 +42,7 @@ var _ = SIGDescribe(feature.CloudProvider, framework.WithDisruptive(), "Nodes",
|
|||||||
ginkgo.BeforeEach(func() {
|
ginkgo.BeforeEach(func() {
|
||||||
// Only supported in AWS/GCE because those are the only cloud providers
|
// Only supported in AWS/GCE because those are the only cloud providers
|
||||||
// where E2E test are currently running.
|
// where E2E test are currently running.
|
||||||
e2eskipper.SkipUnlessProviderIs("aws", "gce", "gke")
|
e2eskipper.SkipUnlessProviderIs("aws", "gce")
|
||||||
c = f.ClientSet
|
c = f.ClientSet
|
||||||
})
|
})
|
||||||
|
|
||||||
|
@ -178,7 +178,7 @@ func setupSuite(ctx context.Context) {
|
|||||||
// Run only on Ginkgo node 1
|
// Run only on Ginkgo node 1
|
||||||
|
|
||||||
switch framework.TestContext.Provider {
|
switch framework.TestContext.Provider {
|
||||||
case "gce", "gke":
|
case "gce":
|
||||||
logClusterImageSources()
|
logClusterImageSources()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -66,7 +66,7 @@ func GetSigner(provider string) (ssh.Signer, error) {
|
|||||||
// support.
|
// support.
|
||||||
keyfile := ""
|
keyfile := ""
|
||||||
switch provider {
|
switch provider {
|
||||||
case "gce", "gke", "kubemark":
|
case "gce", "kubemark":
|
||||||
keyfile = os.Getenv("GCE_SSH_KEY")
|
keyfile = os.Getenv("GCE_SSH_KEY")
|
||||||
if keyfile == "" {
|
if keyfile == "" {
|
||||||
keyfile = os.Getenv("GCE_SSH_PRIVATE_KEY_FILE")
|
keyfile = os.Getenv("GCE_SSH_PRIVATE_KEY_FILE")
|
||||||
|
@ -135,7 +135,7 @@ const (
|
|||||||
|
|
||||||
var (
|
var (
|
||||||
// ProvidersWithSSH are those providers where each node is accessible with SSH
|
// ProvidersWithSSH are those providers where each node is accessible with SSH
|
||||||
ProvidersWithSSH = []string{"gce", "gke", "aws", "local", "azure"}
|
ProvidersWithSSH = []string{"gce", "aws", "local", "azure"}
|
||||||
)
|
)
|
||||||
|
|
||||||
// RunID is a unique identifier of the e2e run.
|
// RunID is a unique identifier of the e2e run.
|
||||||
@ -746,7 +746,7 @@ func GetControlPlaneAddresses(ctx context.Context, c clientset.Interface) []stri
|
|||||||
|
|
||||||
ips := sets.NewString()
|
ips := sets.NewString()
|
||||||
switch TestContext.Provider {
|
switch TestContext.Provider {
|
||||||
case "gce", "gke":
|
case "gce":
|
||||||
for _, ip := range externalIPs {
|
for _, ip := range externalIPs {
|
||||||
ips.Insert(ip)
|
ips.Insert(ip)
|
||||||
}
|
}
|
||||||
|
@ -556,11 +556,6 @@ var _ = SIGDescribe("Kubectl client", func() {
|
|||||||
})
|
})
|
||||||
|
|
||||||
ginkgo.It("should handle in-cluster config", func(ctx context.Context) {
|
ginkgo.It("should handle in-cluster config", func(ctx context.Context) {
|
||||||
// This test does not work for dynamically linked kubectl binaries; only statically linked ones. The
|
|
||||||
// problem happens when the kubectl binary is copied to a pod in the cluster. For dynamically linked
|
|
||||||
// binaries, the necessary libraries are not also copied. For this reason, the test can not be
|
|
||||||
// guaranteed to work with GKE, which sometimes run tests using a dynamically linked kubectl.
|
|
||||||
e2eskipper.SkipIfProviderIs("gke")
|
|
||||||
// TODO: Find a way to download and copy the appropriate kubectl binary, or maybe a multi-arch kubectl image
|
// TODO: Find a way to download and copy the appropriate kubectl binary, or maybe a multi-arch kubectl image
|
||||||
// for now this only works on amd64
|
// for now this only works on amd64
|
||||||
e2eskipper.SkipUnlessNodeOSArchIs("amd64")
|
e2eskipper.SkipUnlessNodeOSArchIs("amd64")
|
||||||
|
@ -78,7 +78,7 @@ var _ = common.SIGDescribe("DNS", func() {
|
|||||||
|
|
||||||
// Added due to #8512. This is critical for GCE and GKE deployments.
|
// Added due to #8512. This is critical for GCE and GKE deployments.
|
||||||
ginkgo.It("should provide DNS for the cluster [Provider:GCE]", func(ctx context.Context) {
|
ginkgo.It("should provide DNS for the cluster [Provider:GCE]", func(ctx context.Context) {
|
||||||
e2eskipper.SkipUnlessProviderIs("gce", "gke")
|
e2eskipper.SkipUnlessProviderIs("gce")
|
||||||
|
|
||||||
namesToResolve := []string{"google.com"}
|
namesToResolve := []string{"google.com"}
|
||||||
// Windows containers do not have a route to the GCE metadata server by default.
|
// Windows containers do not have a route to the GCE metadata server by default.
|
||||||
|
@ -65,7 +65,6 @@ import (
|
|||||||
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
|
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
|
||||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||||
e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"
|
e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"
|
||||||
e2eproviders "k8s.io/kubernetes/test/e2e/framework/providers"
|
|
||||||
e2erc "k8s.io/kubernetes/test/e2e/framework/rc"
|
e2erc "k8s.io/kubernetes/test/e2e/framework/rc"
|
||||||
e2eservice "k8s.io/kubernetes/test/e2e/framework/service"
|
e2eservice "k8s.io/kubernetes/test/e2e/framework/service"
|
||||||
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
|
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
|
||||||
@ -1210,9 +1209,7 @@ var _ = common.SIGDescribe("Services", func() {
|
|||||||
|
|
||||||
f.It("should work after restarting apiserver", f.WithDisruptive(), func(ctx context.Context) {
|
f.It("should work after restarting apiserver", f.WithDisruptive(), func(ctx context.Context) {
|
||||||
|
|
||||||
if !framework.ProviderIs("gke") {
|
e2eskipper.SkipUnlessComponentRunsAsPodsAndClientCanDeleteThem(ctx, kubeAPIServerLabelName, cs, metav1.NamespaceSystem, map[string]string{clusterComponentKey: kubeAPIServerLabelName})
|
||||||
e2eskipper.SkipUnlessComponentRunsAsPodsAndClientCanDeleteThem(ctx, kubeAPIServerLabelName, cs, metav1.NamespaceSystem, map[string]string{clusterComponentKey: kubeAPIServerLabelName})
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO: use the ServiceTestJig here
|
// TODO: use the ServiceTestJig here
|
||||||
ns := f.Namespace.Name
|
ns := f.Namespace.Name
|
||||||
@ -4530,15 +4527,6 @@ func validateEndpointsPortsOrFail(ctx context.Context, c clientset.Interface, na
|
|||||||
}
|
}
|
||||||
|
|
||||||
func restartApiserver(ctx context.Context, namespace string, cs clientset.Interface) error {
|
func restartApiserver(ctx context.Context, namespace string, cs clientset.Interface) error {
|
||||||
if framework.ProviderIs("gke") {
|
|
||||||
// GKE use a same-version master upgrade to teardown/recreate master.
|
|
||||||
v, err := cs.Discovery().ServerVersion()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return e2eproviders.MasterUpgradeGKE(ctx, namespace, v.GitVersion[1:]) // strip leading 'v'
|
|
||||||
}
|
|
||||||
|
|
||||||
return restartComponent(ctx, cs, kubeAPIServerLabelName, metav1.NamespaceSystem, map[string]string{clusterComponentKey: kubeAPIServerLabelName})
|
return restartComponent(ctx, cs, kubeAPIServerLabelName, metav1.NamespaceSystem, map[string]string{clusterComponentKey: kubeAPIServerLabelName})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -35,7 +35,7 @@ var _ = SIGDescribe("crictl", func() {
|
|||||||
|
|
||||||
ginkgo.BeforeEach(func() {
|
ginkgo.BeforeEach(func() {
|
||||||
// `crictl` is not available on all cloud providers.
|
// `crictl` is not available on all cloud providers.
|
||||||
e2eskipper.SkipUnlessProviderIs("gce", "gke")
|
e2eskipper.SkipUnlessProviderIs("gce")
|
||||||
})
|
})
|
||||||
|
|
||||||
ginkgo.It("should be able to run crictl on the node", func(ctx context.Context) {
|
ginkgo.It("should be able to run crictl on the node", func(ctx context.Context) {
|
||||||
|
@ -56,7 +56,7 @@ var _ = SIGDescribe("NodeProblemDetector", nodefeature.NodeProblemDetector, feat
|
|||||||
ginkgo.BeforeEach(func(ctx context.Context) {
|
ginkgo.BeforeEach(func(ctx context.Context) {
|
||||||
e2eskipper.SkipUnlessSSHKeyPresent()
|
e2eskipper.SkipUnlessSSHKeyPresent()
|
||||||
e2eskipper.SkipUnlessProviderIs(framework.ProvidersWithSSH...)
|
e2eskipper.SkipUnlessProviderIs(framework.ProvidersWithSSH...)
|
||||||
e2eskipper.SkipUnlessProviderIs("gce", "gke")
|
e2eskipper.SkipUnlessProviderIs("gce")
|
||||||
e2eskipper.SkipUnlessNodeOSDistroIs("gci", "ubuntu")
|
e2eskipper.SkipUnlessNodeOSDistroIs("gci", "ubuntu")
|
||||||
e2enode.WaitForTotalHealthy(ctx, f.ClientSet, time.Minute)
|
e2enode.WaitForTotalHealthy(ctx, f.ClientSet, time.Minute)
|
||||||
})
|
})
|
||||||
|
@ -584,7 +584,7 @@ func getStorageClass(
|
|||||||
|
|
||||||
func getDefaultPluginName() string {
|
func getDefaultPluginName() string {
|
||||||
switch {
|
switch {
|
||||||
case framework.ProviderIs("gke"), framework.ProviderIs("gce"):
|
case framework.ProviderIs("gce"):
|
||||||
return "kubernetes.io/gce-pd"
|
return "kubernetes.io/gce-pd"
|
||||||
case framework.ProviderIs("aws"):
|
case framework.ProviderIs("aws"):
|
||||||
return "kubernetes.io/aws-ebs"
|
return "kubernetes.io/aws-ebs"
|
||||||
|
@ -906,7 +906,7 @@ func (g *gcePDCSIDriver) GetDriverInfo() *storageframework.DriverInfo {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (g *gcePDCSIDriver) SkipUnsupportedTest(pattern storageframework.TestPattern) {
|
func (g *gcePDCSIDriver) SkipUnsupportedTest(pattern storageframework.TestPattern) {
|
||||||
e2eskipper.SkipUnlessProviderIs("gce", "gke")
|
e2eskipper.SkipUnlessProviderIs("gce")
|
||||||
if pattern.FsType == "xfs" {
|
if pattern.FsType == "xfs" {
|
||||||
e2eskipper.SkipUnlessNodeOSDistroIs("ubuntu", "custom")
|
e2eskipper.SkipUnlessNodeOSDistroIs("ubuntu", "custom")
|
||||||
}
|
}
|
||||||
@ -944,11 +944,6 @@ func (g *gcePDCSIDriver) PrepareTest(ctx context.Context, f *framework.Framework
|
|||||||
Framework: f,
|
Framework: f,
|
||||||
}
|
}
|
||||||
|
|
||||||
if framework.ProviderIs("gke") {
|
|
||||||
framework.Logf("The csi gce-pd driver is automatically installed in GKE. Skipping driver installation.")
|
|
||||||
return cfg
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check if the cluster is already running gce-pd CSI Driver
|
// Check if the cluster is already running gce-pd CSI Driver
|
||||||
deploy, err := f.ClientSet.AppsV1().Deployments("gce-pd-csi-driver").Get(ctx, "csi-gce-pd-controller", metav1.GetOptions{})
|
deploy, err := f.ClientSet.AppsV1().Deployments("gce-pd-csi-driver").Get(ctx, "csi-gce-pd-controller", metav1.GetOptions{})
|
||||||
if err == nil && deploy != nil {
|
if err == nil && deploy != nil {
|
||||||
|
@ -890,7 +890,7 @@ func (g *gcePdDriver) GetDriverInfo() *storageframework.DriverInfo {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (g *gcePdDriver) SkipUnsupportedTest(pattern storageframework.TestPattern) {
|
func (g *gcePdDriver) SkipUnsupportedTest(pattern storageframework.TestPattern) {
|
||||||
e2eskipper.SkipUnlessProviderIs("gce", "gke")
|
e2eskipper.SkipUnlessProviderIs("gce")
|
||||||
for _, tag := range pattern.TestTags {
|
for _, tag := range pattern.TestTags {
|
||||||
if tag == feature.Windows {
|
if tag == feature.Windows {
|
||||||
e2eskipper.SkipUnlessNodeOSDistroIs("windows")
|
e2eskipper.SkipUnlessNodeOSDistroIs("windows")
|
||||||
|
@ -67,7 +67,7 @@ func newStorageClass(t testsuites.StorageClassTest, ns string, prefix string) *s
|
|||||||
|
|
||||||
func getDefaultPluginName() string {
|
func getDefaultPluginName() string {
|
||||||
switch {
|
switch {
|
||||||
case framework.ProviderIs("gke"), framework.ProviderIs("gce"):
|
case framework.ProviderIs("gce"):
|
||||||
return "kubernetes.io/gce-pd"
|
return "kubernetes.io/gce-pd"
|
||||||
case framework.ProviderIs("aws"):
|
case framework.ProviderIs("aws"):
|
||||||
return "kubernetes.io/aws-ebs"
|
return "kubernetes.io/aws-ebs"
|
||||||
|
@ -113,7 +113,7 @@ func getVolumeOpsFromMetricsForPlugin(ms testutil.Metrics, pluginName string) op
|
|||||||
}
|
}
|
||||||
|
|
||||||
func getVolumeOpCounts(ctx context.Context, c clientset.Interface, config *rest.Config, pluginName string) opCounts {
|
func getVolumeOpCounts(ctx context.Context, c clientset.Interface, config *rest.Config, pluginName string) opCounts {
|
||||||
if !framework.ProviderIs("gce", "gke", "aws") {
|
if !framework.ProviderIs("gce", "aws") {
|
||||||
return opCounts{}
|
return opCounts{}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -69,7 +69,7 @@ var _ = utils.SIGDescribe(framework.WithSerial(), "Volume metrics", func() {
|
|||||||
// The tests below make various assumptions about the cluster
|
// The tests below make various assumptions about the cluster
|
||||||
// and the underlying storage driver and therefore don't pass
|
// and the underlying storage driver and therefore don't pass
|
||||||
// with other kinds of clusters and drivers.
|
// with other kinds of clusters and drivers.
|
||||||
e2eskipper.SkipUnlessProviderIs("gce", "gke", "aws")
|
e2eskipper.SkipUnlessProviderIs("gce", "aws")
|
||||||
e2epv.SkipIfNoDefaultStorageClass(ctx, c)
|
e2epv.SkipIfNoDefaultStorageClass(ctx, c)
|
||||||
defaultScName, err = e2epv.GetDefaultStorageClassName(ctx, c)
|
defaultScName, err = e2epv.GetDefaultStorageClassName(ctx, c)
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
|
@ -77,7 +77,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
|
|||||||
// GCE/GKE
|
// GCE/GKE
|
||||||
{
|
{
|
||||||
Name: "SSD PD on GCE/GKE",
|
Name: "SSD PD on GCE/GKE",
|
||||||
CloudProviders: []string{"gce", "gke"},
|
CloudProviders: []string{"gce"},
|
||||||
Timeouts: f.Timeouts,
|
Timeouts: f.Timeouts,
|
||||||
Provisioner: "kubernetes.io/gce-pd",
|
Provisioner: "kubernetes.io/gce-pd",
|
||||||
Parameters: map[string]string{
|
Parameters: map[string]string{
|
||||||
@ -92,8 +92,8 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Name: "HDD PD on GCE/GKE",
|
Name: "HDD PD on GCE",
|
||||||
CloudProviders: []string{"gce", "gke"},
|
CloudProviders: []string{"gce"},
|
||||||
Timeouts: f.Timeouts,
|
Timeouts: f.Timeouts,
|
||||||
Provisioner: "kubernetes.io/gce-pd",
|
Provisioner: "kubernetes.io/gce-pd",
|
||||||
Parameters: map[string]string{
|
Parameters: map[string]string{
|
||||||
@ -273,12 +273,12 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
|
|||||||
})
|
})
|
||||||
|
|
||||||
ginkgo.It("should provision storage with non-default reclaim policy Retain", func(ctx context.Context) {
|
ginkgo.It("should provision storage with non-default reclaim policy Retain", func(ctx context.Context) {
|
||||||
e2eskipper.SkipUnlessProviderIs("gce", "gke")
|
e2eskipper.SkipUnlessProviderIs("gce")
|
||||||
|
|
||||||
test := testsuites.StorageClassTest{
|
test := testsuites.StorageClassTest{
|
||||||
Client: c,
|
Client: c,
|
||||||
Name: "HDD PD on GCE/GKE",
|
Name: "HDD PD on GCE",
|
||||||
CloudProviders: []string{"gce", "gke"},
|
CloudProviders: []string{"gce"},
|
||||||
Provisioner: "kubernetes.io/gce-pd",
|
Provisioner: "kubernetes.io/gce-pd",
|
||||||
Timeouts: f.Timeouts,
|
Timeouts: f.Timeouts,
|
||||||
Parameters: map[string]string{
|
Parameters: map[string]string{
|
||||||
@ -322,7 +322,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
|
|||||||
// not being deleted.
|
// not being deleted.
|
||||||
// NOTE: Polls until no PVs are detected, times out at 5 minutes.
|
// NOTE: Polls until no PVs are detected, times out at 5 minutes.
|
||||||
|
|
||||||
e2eskipper.SkipUnlessProviderIs("openstack", "gce", "aws", "gke", "vsphere", "azure")
|
e2eskipper.SkipUnlessProviderIs("openstack", "gce", "aws", "vsphere", "azure")
|
||||||
|
|
||||||
const raceAttempts int = 100
|
const raceAttempts int = 100
|
||||||
var residualPVs []*v1.PersistentVolume
|
var residualPVs []*v1.PersistentVolume
|
||||||
@ -368,7 +368,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
|
|||||||
// volume and changes the reclaim policy to Delete.
|
// volume and changes the reclaim policy to Delete.
|
||||||
// PV controller should delete the PV even though the underlying volume
|
// PV controller should delete the PV even though the underlying volume
|
||||||
// is already deleted.
|
// is already deleted.
|
||||||
e2eskipper.SkipUnlessProviderIs("gce", "gke", "aws")
|
e2eskipper.SkipUnlessProviderIs("gce", "aws")
|
||||||
ginkgo.By("creating PD")
|
ginkgo.By("creating PD")
|
||||||
diskName, err := e2epv.CreatePDWithRetry(ctx)
|
diskName, err := e2epv.CreatePDWithRetry(ctx)
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
@ -400,7 +400,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
|
|||||||
VolumeID: diskName,
|
VolumeID: diskName,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
case "gce", "gke":
|
case "gce":
|
||||||
pv.Spec.PersistentVolumeSource = v1.PersistentVolumeSource{
|
pv.Spec.PersistentVolumeSource = v1.PersistentVolumeSource{
|
||||||
GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{
|
GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{
|
||||||
PDName: diskName,
|
PDName: diskName,
|
||||||
@ -497,7 +497,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
|
|||||||
|
|
||||||
ginkgo.Describe("DynamicProvisioner Default", func() {
|
ginkgo.Describe("DynamicProvisioner Default", func() {
|
||||||
f.It("should create and delete default persistent volumes", f.WithSlow(), func(ctx context.Context) {
|
f.It("should create and delete default persistent volumes", f.WithSlow(), func(ctx context.Context) {
|
||||||
e2eskipper.SkipUnlessProviderIs("openstack", "gce", "aws", "gke", "vsphere", "azure")
|
e2eskipper.SkipUnlessProviderIs("openstack", "gce", "aws", "vsphere", "azure")
|
||||||
e2epv.SkipIfNoDefaultStorageClass(ctx, c)
|
e2epv.SkipIfNoDefaultStorageClass(ctx, c)
|
||||||
|
|
||||||
ginkgo.By("creating a claim with no annotation")
|
ginkgo.By("creating a claim with no annotation")
|
||||||
@ -521,7 +521,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
|
|||||||
|
|
||||||
// Modifying the default storage class can be disruptive to other tests that depend on it
|
// Modifying the default storage class can be disruptive to other tests that depend on it
|
||||||
f.It("should be disabled by changing the default annotation", f.WithSerial(), f.WithDisruptive(), func(ctx context.Context) {
|
f.It("should be disabled by changing the default annotation", f.WithSerial(), f.WithDisruptive(), func(ctx context.Context) {
|
||||||
e2eskipper.SkipUnlessProviderIs("openstack", "gce", "aws", "gke", "vsphere", "azure")
|
e2eskipper.SkipUnlessProviderIs("openstack", "gce", "aws", "vsphere", "azure")
|
||||||
e2epv.SkipIfNoDefaultStorageClass(ctx, c)
|
e2epv.SkipIfNoDefaultStorageClass(ctx, c)
|
||||||
|
|
||||||
scName, scErr := e2epv.GetDefaultStorageClassName(ctx, c)
|
scName, scErr := e2epv.GetDefaultStorageClassName(ctx, c)
|
||||||
@ -558,7 +558,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
|
|||||||
|
|
||||||
// Modifying the default storage class can be disruptive to other tests that depend on it
|
// Modifying the default storage class can be disruptive to other tests that depend on it
|
||||||
f.It("should be disabled by removing the default annotation", f.WithSerial(), f.WithDisruptive(), func(ctx context.Context) {
|
f.It("should be disabled by removing the default annotation", f.WithSerial(), f.WithDisruptive(), func(ctx context.Context) {
|
||||||
e2eskipper.SkipUnlessProviderIs("openstack", "gce", "aws", "gke", "vsphere", "azure")
|
e2eskipper.SkipUnlessProviderIs("openstack", "gce", "aws", "vsphere", "azure")
|
||||||
e2epv.SkipIfNoDefaultStorageClass(ctx, c)
|
e2epv.SkipIfNoDefaultStorageClass(ctx, c)
|
||||||
|
|
||||||
scName, scErr := e2epv.GetDefaultStorageClassName(ctx, c)
|
scName, scErr := e2epv.GetDefaultStorageClassName(ctx, c)
|
||||||
|
@ -50,7 +50,7 @@ const (
|
|||||||
func (t *PersistentVolumeUpgradeTest) Setup(ctx context.Context, f *framework.Framework) {
|
func (t *PersistentVolumeUpgradeTest) Setup(ctx context.Context, f *framework.Framework) {
|
||||||
|
|
||||||
var err error
|
var err error
|
||||||
e2eskipper.SkipUnlessProviderIs("gce", "gke", "openstack", "aws", "vsphere", "azure")
|
e2eskipper.SkipUnlessProviderIs("gce", "openstack", "aws", "vsphere", "azure")
|
||||||
|
|
||||||
ns := f.Namespace.Name
|
ns := f.Namespace.Name
|
||||||
|
|
||||||
|
@ -52,7 +52,7 @@ func (VolumeModeDowngradeTest) Name() string {
|
|||||||
|
|
||||||
// Skip returns true when this test can be skipped.
|
// Skip returns true when this test can be skipped.
|
||||||
func (t *VolumeModeDowngradeTest) Skip(upgCtx upgrades.UpgradeContext) bool {
|
func (t *VolumeModeDowngradeTest) Skip(upgCtx upgrades.UpgradeContext) bool {
|
||||||
if !framework.ProviderIs("openstack", "gce", "aws", "gke", "vsphere", "azure") {
|
if !framework.ProviderIs("openstack", "gce", "aws", "vsphere", "azure") {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user