mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-23 03:41:45 +00:00
Merge pull request #77034 from atoato88/fix-golint-e2e-framework-providers-gce
Fix golint failures of e2e/framework/providers/gce/recreate_node.go
This commit is contained in:
commit
d2b8f3e145
@ -609,7 +609,6 @@ test/e2e/autoscaling
|
||||
test/e2e/chaosmonkey
|
||||
test/e2e/common
|
||||
test/e2e/framework
|
||||
test/e2e/framework/providers/gce
|
||||
test/e2e/lifecycle/bootstrap
|
||||
test/e2e/network
|
||||
test/e2e/node
|
||||
|
@ -25,7 +25,6 @@ go_library(
|
||||
"//test/e2e/framework:go_default_library",
|
||||
"//test/utils:go_default_library",
|
||||
"//vendor/github.com/onsi/ginkgo:go_default_library",
|
||||
"//vendor/github.com/onsi/gomega:go_default_library",
|
||||
"//vendor/google.golang.org/api/compute/v1:go_default_library",
|
||||
"//vendor/google.golang.org/api/googleapi:go_default_library",
|
||||
"//vendor/k8s.io/utils/exec:go_default_library",
|
||||
|
@ -20,8 +20,7 @@ import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
"github.com/onsi/ginkgo"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
@ -39,19 +38,19 @@ func nodeNames(nodes []v1.Node) []string {
|
||||
return result
|
||||
}
|
||||
|
||||
var _ = Describe("Recreate [Feature:Recreate]", func() {
|
||||
var _ = ginkgo.Describe("Recreate [Feature:Recreate]", func() {
|
||||
f := framework.NewDefaultFramework("recreate")
|
||||
var originalNodes []v1.Node
|
||||
var originalPodNames []string
|
||||
var ps *testutils.PodStore
|
||||
systemNamespace := metav1.NamespaceSystem
|
||||
BeforeEach(func() {
|
||||
ginkgo.BeforeEach(func() {
|
||||
framework.SkipUnlessProviderIs("gce", "gke")
|
||||
var err error
|
||||
numNodes, err := framework.NumberOfRegisteredNodes(f.ClientSet)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
originalNodes, err = framework.CheckNodesReady(f.ClientSet, numNodes, framework.NodeReadyInitialTimeout)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
framework.Logf("Got the following nodes before recreate %v", nodeNames(originalNodes))
|
||||
|
||||
@ -69,13 +68,13 @@ var _ = Describe("Recreate [Feature:Recreate]", func() {
|
||||
|
||||
})
|
||||
|
||||
AfterEach(func() {
|
||||
if CurrentGinkgoTestDescription().Failed {
|
||||
ginkgo.AfterEach(func() {
|
||||
if ginkgo.CurrentGinkgoTestDescription().Failed {
|
||||
// Make sure that addon/system pods are running, so dump
|
||||
// events for the kube-system namespace on failures
|
||||
By(fmt.Sprintf("Collecting events from namespace %q.", systemNamespace))
|
||||
ginkgo.By(fmt.Sprintf("Collecting events from namespace %q.", systemNamespace))
|
||||
events, err := f.ClientSet.CoreV1().Events(systemNamespace).List(metav1.ListOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
for _, e := range events.Items {
|
||||
framework.Logf("event for %v: %v %v: %v", e.InvolvedObject.Name, e.Source, e.Reason, e.Message)
|
||||
@ -86,7 +85,7 @@ var _ = Describe("Recreate [Feature:Recreate]", func() {
|
||||
}
|
||||
})
|
||||
|
||||
It("recreate nodes and ensure they function upon restart", func() {
|
||||
ginkgo.It("recreate nodes and ensure they function upon restart", func() {
|
||||
testRecreate(f.ClientSet, ps, systemNamespace, originalNodes, originalPodNames)
|
||||
})
|
||||
})
|
||||
@ -104,7 +103,7 @@ func testRecreate(c clientset.Interface, ps *testutils.PodStore, systemNamespace
|
||||
}
|
||||
|
||||
nodesAfter, err := framework.CheckNodesReady(c, len(nodes), framework.RestartNodeReadyAgainTimeout)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
framework.Logf("Got the following nodes after recreate: %v", nodeNames(nodesAfter))
|
||||
|
||||
if len(nodes) != len(nodesAfter) {
|
||||
@ -115,7 +114,7 @@ func testRecreate(c clientset.Interface, ps *testutils.PodStore, systemNamespace
|
||||
// Make sure the pods from before node recreation are running/completed
|
||||
podCheckStart := time.Now()
|
||||
podNamesAfter, err := framework.WaitForNRestartablePods(ps, len(podNames), framework.RestartPodReadyAgainTimeout)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
framework.ExpectNoError(err)
|
||||
remaining := framework.RestartPodReadyAgainTimeout - time.Since(podCheckStart)
|
||||
if !framework.CheckPodsRunningReadyOrSucceeded(c, systemNamespace, podNamesAfter, remaining) {
|
||||
framework.Failf("At least one pod wasn't running and ready after the restart.")
|
||||
|
Loading…
Reference in New Issue
Block a user