From b26b6f0d5c11d7c205d6f002bb1fee9ed3d639e2 Mon Sep 17 00:00:00 2001 From: Jiatong Wang Date: Tue, 7 May 2019 00:21:44 -0700 Subject: [PATCH] Modify e2e/lifecycle tests to import e2elog.Logf Signed-off-by: Jiatong Wang --- test/e2e/lifecycle/BUILD | 1 + test/e2e/lifecycle/addon_update.go | 7 ++++--- test/e2e/lifecycle/ha_master.go | 9 +++++---- test/e2e/lifecycle/node_lease.go | 3 ++- test/e2e/lifecycle/reboot.go | 29 +++++++++++++++-------------- test/e2e/lifecycle/restart.go | 5 +++-- 6 files changed, 30 insertions(+), 24 deletions(-) diff --git a/test/e2e/lifecycle/BUILD b/test/e2e/lifecycle/BUILD index d30c2d39189..ddb52a76fcc 100644 --- a/test/e2e/lifecycle/BUILD +++ b/test/e2e/lifecycle/BUILD @@ -35,6 +35,7 @@ go_library( "//test/e2e/framework:go_default_library", "//test/e2e/framework/ginkgowrapper:go_default_library", "//test/e2e/framework/lifecycle:go_default_library", + "//test/e2e/framework/log:go_default_library", "//test/e2e/upgrades:go_default_library", "//test/e2e/upgrades/apps:go_default_library", "//test/e2e/upgrades/storage:go_default_library", diff --git a/test/e2e/lifecycle/addon_update.go b/test/e2e/lifecycle/addon_update.go index a8fb164820d..b52fcc32cdd 100644 --- a/test/e2e/lifecycle/addon_update.go +++ b/test/e2e/lifecycle/addon_update.go @@ -29,6 +29,7 @@ import ( "k8s.io/apimachinery/pkg/labels" clientset "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/test/e2e/framework" + e2elog "k8s.io/kubernetes/test/e2e/framework/log" "github.com/onsi/ginkgo" "github.com/onsi/gomega" @@ -297,7 +298,7 @@ var _ = SIGDescribe("Addon update", func() { sshExecAndVerify(sshClient, fmt.Sprintf("sudo cp %s/%s %s/%s", temporaryRemotePath, svcAddonEnsureExists, destinationDir, svcAddonEnsureExists)) // Delete the "ensure exist class" addon at the end. defer func() { - framework.Logf("Cleaning up ensure exist class addon.") + e2elog.Logf("Cleaning up ensure exist class addon.") gomega.Expect(f.ClientSet.CoreV1().Services(addonNsName).Delete("addon-ensure-exists-test", nil)).NotTo(gomega.HaveOccurred()) }() @@ -389,7 +390,7 @@ func sshExecAndVerify(client *ssh.Client, cmd string) { } func sshExec(client *ssh.Client, cmd string) (string, string, int, error) { - framework.Logf("Executing '%s' on %v", cmd, client.RemoteAddr()) + e2elog.Logf("Executing '%s' on %v", cmd, client.RemoteAddr()) session, err := client.NewSession() if err != nil { return "", "", 0, fmt.Errorf("error creating session to host %s: '%v'", client.RemoteAddr(), err) @@ -421,7 +422,7 @@ func sshExec(client *ssh.Client, cmd string) (string, string, int, error) { } func writeRemoteFile(sshClient *ssh.Client, data, dir, fileName string, mode os.FileMode) error { - framework.Logf(fmt.Sprintf("Writing remote file '%s/%s' on %v", dir, fileName, sshClient.RemoteAddr())) + e2elog.Logf(fmt.Sprintf("Writing remote file '%s/%s' on %v", dir, fileName, sshClient.RemoteAddr())) session, err := sshClient.NewSession() if err != nil { return fmt.Errorf("error creating session to host %s: '%v'", sshClient.RemoteAddr(), err) diff --git a/test/e2e/lifecycle/ha_master.go b/test/e2e/lifecycle/ha_master.go index c7588e9b41c..2789917e498 100644 --- a/test/e2e/lifecycle/ha_master.go +++ b/test/e2e/lifecycle/ha_master.go @@ -28,10 +28,11 @@ import ( clientset "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/test/e2e/common" "k8s.io/kubernetes/test/e2e/framework" + e2elog "k8s.io/kubernetes/test/e2e/framework/log" ) func addMasterReplica(zone string) error { - framework.Logf(fmt.Sprintf("Adding a new master replica, zone: %s", zone)) + e2elog.Logf(fmt.Sprintf("Adding a new master replica, zone: %s", zone)) _, _, err := framework.RunCmd(path.Join(framework.TestContext.RepoRoot, "hack/e2e-internal/e2e-grow-cluster.sh"), zone, "true", "true", "false") if err != nil { return err @@ -40,7 +41,7 @@ func addMasterReplica(zone string) error { } func removeMasterReplica(zone string) error { - framework.Logf(fmt.Sprintf("Removing an existing master replica, zone: %s", zone)) + e2elog.Logf(fmt.Sprintf("Removing an existing master replica, zone: %s", zone)) _, _, err := framework.RunCmd(path.Join(framework.TestContext.RepoRoot, "hack/e2e-internal/e2e-shrink-cluster.sh"), zone, "true", "false", "false") if err != nil { return err @@ -49,7 +50,7 @@ func removeMasterReplica(zone string) error { } func addWorkerNodes(zone string) error { - framework.Logf(fmt.Sprintf("Adding worker nodes, zone: %s", zone)) + e2elog.Logf(fmt.Sprintf("Adding worker nodes, zone: %s", zone)) _, _, err := framework.RunCmd(path.Join(framework.TestContext.RepoRoot, "hack/e2e-internal/e2e-grow-cluster.sh"), zone, "true", "false", "true") if err != nil { return err @@ -58,7 +59,7 @@ func addWorkerNodes(zone string) error { } func removeWorkerNodes(zone string) error { - framework.Logf(fmt.Sprintf("Removing worker nodes, zone: %s", zone)) + e2elog.Logf(fmt.Sprintf("Removing worker nodes, zone: %s", zone)) _, _, err := framework.RunCmd(path.Join(framework.TestContext.RepoRoot, "hack/e2e-internal/e2e-shrink-cluster.sh"), zone, "true", "true", "true") if err != nil { return err diff --git a/test/e2e/lifecycle/node_lease.go b/test/e2e/lifecycle/node_lease.go index 9e8c0bfbda5..a5ae9d5cf6a 100644 --- a/test/e2e/lifecycle/node_lease.go +++ b/test/e2e/lifecycle/node_lease.go @@ -25,6 +25,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" clientset "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/test/e2e/framework" + e2elog "k8s.io/kubernetes/test/e2e/framework/log" "github.com/onsi/ginkgo" "github.com/onsi/gomega" @@ -108,7 +109,7 @@ var _ = SIGDescribe("[Disruptive]NodeLease", func() { pass := true for _, node := range originalNodes.Items { if _, err := leaseClient.Get(node.ObjectMeta.Name, metav1.GetOptions{}); err != nil { - framework.Logf("Try to get lease of node %s, but got error: %v", node.ObjectMeta.Name, err) + e2elog.Logf("Try to get lease of node %s, but got error: %v", node.ObjectMeta.Name, err) pass = false } } diff --git a/test/e2e/lifecycle/reboot.go b/test/e2e/lifecycle/reboot.go index 57ab9ff03a5..c7bc74f56fc 100644 --- a/test/e2e/lifecycle/reboot.go +++ b/test/e2e/lifecycle/reboot.go @@ -22,7 +22,7 @@ import ( "sync" "time" - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/labels" @@ -30,6 +30,7 @@ import ( clientset "k8s.io/client-go/kubernetes" api "k8s.io/kubernetes/pkg/apis/core" "k8s.io/kubernetes/test/e2e/framework" + e2elog "k8s.io/kubernetes/test/e2e/framework/log" testutils "k8s.io/kubernetes/test/utils" "github.com/onsi/ginkgo" @@ -71,7 +72,7 @@ var _ = SIGDescribe("Reboot [Disruptive] [Feature:Reboot]", func() { gomega.Expect(err).NotTo(gomega.HaveOccurred()) for _, e := range events.Items { - framework.Logf("event for %v: %v %v: %v", e.InvolvedObject.Name, e.Source, e.Reason, e.Message) + e2elog.Logf("event for %v: %v %v: %v", e.InvolvedObject.Name, e.Source, e.Reason, e.Message) } } // In GKE, our current tunneling setup has the potential to hold on to a broken tunnel (from a @@ -135,7 +136,7 @@ func testReboot(c clientset.Interface, rebootCmd string, hook terminationHook) { nodelist := framework.GetReadySchedulableNodesOrDie(c) if hook != nil { defer func() { - framework.Logf("Executing termination hook on nodes") + e2elog.Logf("Executing termination hook on nodes") hook(framework.TestContext.Provider, nodelist) }() } @@ -162,7 +163,7 @@ func testReboot(c clientset.Interface, rebootCmd string, hook terminationHook) { for ix := range nodelist.Items { n := nodelist.Items[ix] if !result[ix] { - framework.Logf("Node %s failed reboot test.", n.ObjectMeta.Name) + e2elog.Logf("Node %s failed reboot test.", n.ObjectMeta.Name) } } framework.Failf("Test failed; at least one node failed to reboot in the time given.") @@ -176,9 +177,9 @@ func printStatusAndLogsForNotReadyPods(c clientset.Interface, ns string, podName prefix = "Retrieving log for the last terminated container" } if err != nil { - framework.Logf("%s %s, err: %v:\n%s\n", prefix, id, err, log) + e2elog.Logf("%s %s, err: %v:\n%s\n", prefix, id, err, log) } else { - framework.Logf("%s %s:\n%s\n", prefix, id, log) + e2elog.Logf("%s %s:\n%s\n", prefix, id, log) } } podNameSet := sets.NewString(podNames...) @@ -192,7 +193,7 @@ func printStatusAndLogsForNotReadyPods(c clientset.Interface, ns string, podName if ok, _ := testutils.PodRunningReady(p); ok { continue } - framework.Logf("Status for not ready pod %s/%s: %+v", p.Namespace, p.Name, p.Status) + e2elog.Logf("Status for not ready pod %s/%s: %+v", p.Namespace, p.Name, p.Status) // Print the log of the containers if pod is not running and ready. for _, container := range p.Status.ContainerStatuses { cIdentifer := fmt.Sprintf("%s/%s/%s", p.Namespace, p.Name, container.Name) @@ -221,16 +222,16 @@ func rebootNode(c clientset.Interface, provider, name, rebootCmd string) bool { ns := metav1.NamespaceSystem ps, err := testutils.NewPodStore(c, ns, labels.Everything(), fields.OneTermEqualSelector(api.PodHostField, name)) if err != nil { - framework.Logf("Couldn't initialize pod store: %v", err) + e2elog.Logf("Couldn't initialize pod store: %v", err) return false } defer ps.Stop() // Get the node initially. - framework.Logf("Getting %s", name) + e2elog.Logf("Getting %s", name) node, err := c.CoreV1().Nodes().Get(name, metav1.GetOptions{}) if err != nil { - framework.Logf("Couldn't get node %s", name) + e2elog.Logf("Couldn't get node %s", name) return false } @@ -255,7 +256,7 @@ func rebootNode(c clientset.Interface, provider, name, rebootCmd string) bool { podNames = append(podNames, p.ObjectMeta.Name) } } - framework.Logf("Node %s has %d assigned pods with no liveness probes: %v", name, len(podNames), podNames) + e2elog.Logf("Node %s has %d assigned pods with no liveness probes: %v", name, len(podNames), podNames) // For each pod, we do a sanity check to ensure it's running / healthy // or succeeded now, as that's what we'll be checking later. @@ -266,7 +267,7 @@ func rebootNode(c clientset.Interface, provider, name, rebootCmd string) bool { // Reboot the node. if err = framework.IssueSSHCommand(rebootCmd, provider, node); err != nil { - framework.Logf("Error while issuing ssh command: %v", err) + e2elog.Logf("Error while issuing ssh command: %v", err) return false } @@ -288,7 +289,7 @@ func rebootNode(c clientset.Interface, provider, name, rebootCmd string) bool { return false } - framework.Logf("Reboot successful on node %s", name) + e2elog.Logf("Reboot successful on node %s", name) return true } @@ -299,7 +300,7 @@ func catLogHook(logPath string) terminationHook { for _, n := range nodes.Items { cmd := fmt.Sprintf("cat %v && rm %v", logPath, logPath) if _, err := framework.IssueSSHCommandWithResult(cmd, provider, &n); err != nil { - framework.Logf("Error while issuing ssh command: %v", err) + e2elog.Logf("Error while issuing ssh command: %v", err) } } diff --git a/test/e2e/lifecycle/restart.go b/test/e2e/lifecycle/restart.go index 261f62fc030..86be21dc4e9 100644 --- a/test/e2e/lifecycle/restart.go +++ b/test/e2e/lifecycle/restart.go @@ -25,6 +25,7 @@ import ( "k8s.io/apimachinery/pkg/labels" "k8s.io/kubernetes/test/e2e/common" "k8s.io/kubernetes/test/e2e/framework" + e2elog "k8s.io/kubernetes/test/e2e/framework/log" testutils "k8s.io/kubernetes/test/utils" "github.com/onsi/ginkgo" @@ -61,7 +62,7 @@ var _ = SIGDescribe("Restart [Disruptive]", func() { ginkgo.By("ensuring all nodes are ready") originalNodes, err = framework.CheckNodesReady(f.ClientSet, numNodes, framework.NodeReadyInitialTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - framework.Logf("Got the following nodes before restart: %v", nodeNames(originalNodes)) + e2elog.Logf("Got the following nodes before restart: %v", nodeNames(originalNodes)) ginkgo.By("ensuring all pods are running and ready") allPods := ps.List() @@ -91,7 +92,7 @@ var _ = SIGDescribe("Restart [Disruptive]", func() { ginkgo.By("ensuring all nodes are ready after the restart") nodesAfter, err := framework.CheckNodesReady(f.ClientSet, numNodes, framework.RestartNodeReadyAgainTimeout) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - framework.Logf("Got the following nodes after restart: %v", nodeNames(nodesAfter)) + e2elog.Logf("Got the following nodes after restart: %v", nodeNames(nodesAfter)) // Make sure that we have the same number of nodes. We're not checking // that the names match because that's implementation specific.