From 2301a3b15bac88eee4ef1e86ba3c3f94e313e22f Mon Sep 17 00:00:00 2001 From: carlory Date: Tue, 27 Aug 2019 09:51:23 +0800 Subject: [PATCH] cleanup test code in lifecycle, servicecatalog and ui package --- test/e2e/lifecycle/BUILD | 1 - test/e2e/lifecycle/addon_update.go | 8 +++---- test/e2e/lifecycle/bootstrap/BUILD | 1 - test/e2e/lifecycle/bootstrap/util.go | 7 +++--- test/e2e/lifecycle/ha_master.go | 12 +++++------ test/e2e/lifecycle/kubelet_security.go | 4 ++-- test/e2e/lifecycle/node_lease.go | 12 +++++------ test/e2e/lifecycle/reboot.go | 30 +++++++++++++------------- test/e2e/lifecycle/resize_nodes.go | 10 ++++----- test/e2e/lifecycle/restart.go | 12 +++++------ test/e2e/servicecatalog/BUILD | 1 - test/e2e/servicecatalog/podpreset.go | 21 +++++++++--------- test/e2e/ui/BUILD | 1 - test/e2e/ui/dashboard.go | 9 ++++---- 14 files changed, 61 insertions(+), 68 deletions(-) diff --git a/test/e2e/lifecycle/BUILD b/test/e2e/lifecycle/BUILD index dcb3cdbc0df..322ab984734 100644 --- a/test/e2e/lifecycle/BUILD +++ b/test/e2e/lifecycle/BUILD @@ -37,7 +37,6 @@ go_library( "//test/e2e/framework/ginkgowrapper:go_default_library", "//test/e2e/framework/kubelet:go_default_library", "//test/e2e/framework/lifecycle:go_default_library", - "//test/e2e/framework/log:go_default_library", "//test/e2e/framework/node:go_default_library", "//test/e2e/framework/pod:go_default_library", "//test/e2e/framework/ssh:go_default_library", diff --git a/test/e2e/lifecycle/addon_update.go b/test/e2e/lifecycle/addon_update.go index 3d4c165f94b..77b2d5dafff 100644 --- a/test/e2e/lifecycle/addon_update.go +++ b/test/e2e/lifecycle/addon_update.go @@ -29,7 +29,7 @@ import ( "k8s.io/apimachinery/pkg/labels" clientset "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/test/e2e/framework" - e2elog "k8s.io/kubernetes/test/e2e/framework/log" + e2essh "k8s.io/kubernetes/test/e2e/framework/ssh" "github.com/onsi/ginkgo" @@ -299,7 +299,7 @@ var _ = SIGDescribe("Addon update", func() { sshExecAndVerify(sshClient, fmt.Sprintf("sudo cp %s/%s %s/%s", temporaryRemotePath, svcAddonEnsureExists, destinationDir, svcAddonEnsureExists)) // Delete the "ensure exist class" addon at the end. defer func() { - e2elog.Logf("Cleaning up ensure exist class addon.") + framework.Logf("Cleaning up ensure exist class addon.") err := f.ClientSet.CoreV1().Services(addonNsName).Delete("addon-ensure-exists-test", nil) framework.ExpectNoError(err) }() @@ -392,7 +392,7 @@ func sshExecAndVerify(client *ssh.Client, cmd string) { } func sshExec(client *ssh.Client, cmd string) (string, string, int, error) { - e2elog.Logf("Executing '%s' on %v", cmd, client.RemoteAddr()) + framework.Logf("Executing '%s' on %v", cmd, client.RemoteAddr()) session, err := client.NewSession() if err != nil { return "", "", 0, fmt.Errorf("error creating session to host %s: '%v'", client.RemoteAddr(), err) @@ -424,7 +424,7 @@ func sshExec(client *ssh.Client, cmd string) (string, string, int, error) { } func writeRemoteFile(sshClient *ssh.Client, data, dir, fileName string, mode os.FileMode) error { - e2elog.Logf(fmt.Sprintf("Writing remote file '%s/%s' on %v", dir, fileName, sshClient.RemoteAddr())) + framework.Logf(fmt.Sprintf("Writing remote file '%s/%s' on %v", dir, fileName, sshClient.RemoteAddr())) session, err := sshClient.NewSession() if err != nil { return fmt.Errorf("error creating session to host %s: '%v'", sshClient.RemoteAddr(), err) diff --git a/test/e2e/lifecycle/bootstrap/BUILD b/test/e2e/lifecycle/bootstrap/BUILD index f4b4f2c6c1b..28195ccc21d 100644 --- a/test/e2e/lifecycle/bootstrap/BUILD +++ b/test/e2e/lifecycle/bootstrap/BUILD @@ -21,7 +21,6 @@ go_library( "//staging/src/k8s.io/client-go/kubernetes:go_default_library", "//staging/src/k8s.io/cluster-bootstrap/token/api:go_default_library", "//test/e2e/framework:go_default_library", - "//test/e2e/framework/log:go_default_library", "//test/e2e/lifecycle:go_default_library", "//vendor/github.com/onsi/ginkgo:go_default_library", ], diff --git a/test/e2e/lifecycle/bootstrap/util.go b/test/e2e/lifecycle/bootstrap/util.go index 5bb9229aa34..0f62a7575d1 100644 --- a/test/e2e/lifecycle/bootstrap/util.go +++ b/test/e2e/lifecycle/bootstrap/util.go @@ -29,7 +29,6 @@ import ( clientset "k8s.io/client-go/kubernetes" bootstrapapi "k8s.io/cluster-bootstrap/token/api" "k8s.io/kubernetes/test/e2e/framework" - e2elog "k8s.io/kubernetes/test/e2e/framework/log" ) func newTokenSecret(tokenID, tokenSecret string) *v1.Secret { @@ -84,7 +83,7 @@ func WaitforSignedClusterInfoByBootStrapToken(c clientset.Interface, tokenID str return wait.Poll(framework.Poll, 2*time.Minute, func() (bool, error) { cfgMap, err := c.CoreV1().ConfigMaps(metav1.NamespacePublic).Get(bootstrapapi.ConfigMapClusterInfo, metav1.GetOptions{}) if err != nil { - e2elog.Failf("Failed to get cluster-info configMap: %v", err) + framework.Failf("Failed to get cluster-info configMap: %v", err) return false, err } _, ok := cfgMap.Data[bootstrapapi.JWSSignatureKeyPrefix+tokenID] @@ -100,7 +99,7 @@ func WaitForSignedClusterInfoGetUpdatedByBootstrapToken(c clientset.Interface, t return wait.Poll(framework.Poll, 2*time.Minute, func() (bool, error) { cfgMap, err := c.CoreV1().ConfigMaps(metav1.NamespacePublic).Get(bootstrapapi.ConfigMapClusterInfo, metav1.GetOptions{}) if err != nil { - e2elog.Failf("Failed to get cluster-info configMap: %v", err) + framework.Failf("Failed to get cluster-info configMap: %v", err) return false, err } updated, ok := cfgMap.Data[bootstrapapi.JWSSignatureKeyPrefix+tokenID] @@ -116,7 +115,7 @@ func WaitForSignedClusterInfoByBootstrapTokenToDisappear(c clientset.Interface, return wait.Poll(framework.Poll, 2*time.Minute, func() (bool, error) { cfgMap, err := c.CoreV1().ConfigMaps(metav1.NamespacePublic).Get(bootstrapapi.ConfigMapClusterInfo, metav1.GetOptions{}) if err != nil { - e2elog.Failf("Failed to get cluster-info configMap: %v", err) + framework.Failf("Failed to get cluster-info configMap: %v", err) return false, err } _, ok := cfgMap.Data[bootstrapapi.JWSSignatureKeyPrefix+tokenID] diff --git a/test/e2e/lifecycle/ha_master.go b/test/e2e/lifecycle/ha_master.go index 193f8501007..baa7db67f5e 100644 --- a/test/e2e/lifecycle/ha_master.go +++ b/test/e2e/lifecycle/ha_master.go @@ -28,12 +28,12 @@ import ( clientset "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/test/e2e/common" "k8s.io/kubernetes/test/e2e/framework" - e2elog "k8s.io/kubernetes/test/e2e/framework/log" + e2epod "k8s.io/kubernetes/test/e2e/framework/pod" ) func addMasterReplica(zone string) error { - e2elog.Logf(fmt.Sprintf("Adding a new master replica, zone: %s", zone)) + framework.Logf(fmt.Sprintf("Adding a new master replica, zone: %s", zone)) _, _, err := framework.RunCmd(path.Join(framework.TestContext.RepoRoot, "hack/e2e-internal/e2e-grow-cluster.sh"), zone, "true", "true", "false") if err != nil { return err @@ -42,7 +42,7 @@ func addMasterReplica(zone string) error { } func removeMasterReplica(zone string) error { - e2elog.Logf(fmt.Sprintf("Removing an existing master replica, zone: %s", zone)) + framework.Logf(fmt.Sprintf("Removing an existing master replica, zone: %s", zone)) _, _, err := framework.RunCmd(path.Join(framework.TestContext.RepoRoot, "hack/e2e-internal/e2e-shrink-cluster.sh"), zone, "true", "false", "false") if err != nil { return err @@ -51,7 +51,7 @@ func removeMasterReplica(zone string) error { } func addWorkerNodes(zone string) error { - e2elog.Logf(fmt.Sprintf("Adding worker nodes, zone: %s", zone)) + framework.Logf(fmt.Sprintf("Adding worker nodes, zone: %s", zone)) _, _, err := framework.RunCmd(path.Join(framework.TestContext.RepoRoot, "hack/e2e-internal/e2e-grow-cluster.sh"), zone, "true", "false", "true") if err != nil { return err @@ -60,7 +60,7 @@ func addWorkerNodes(zone string) error { } func removeWorkerNodes(zone string) error { - e2elog.Logf(fmt.Sprintf("Removing worker nodes, zone: %s", zone)) + framework.Logf(fmt.Sprintf("Removing worker nodes, zone: %s", zone)) _, _, err := framework.RunCmd(path.Join(framework.TestContext.RepoRoot, "hack/e2e-internal/e2e-shrink-cluster.sh"), zone, "true", "true", "true") if err != nil { return err @@ -83,7 +83,7 @@ func findRegionForZone(zone string) string { region, err := exec.Command("gcloud", "compute", "zones", "list", zone, "--quiet", "--format=csv[no-heading](region)").Output() framework.ExpectNoError(err) if string(region) == "" { - e2elog.Failf("Region not found; zone: %s", zone) + framework.Failf("Region not found; zone: %s", zone) } return string(region) } diff --git a/test/e2e/lifecycle/kubelet_security.go b/test/e2e/lifecycle/kubelet_security.go index 3f29cd94335..3672cba1668 100644 --- a/test/e2e/lifecycle/kubelet_security.go +++ b/test/e2e/lifecycle/kubelet_security.go @@ -29,7 +29,7 @@ import ( "github.com/onsi/ginkgo" "github.com/onsi/gomega" e2ekubelet "k8s.io/kubernetes/test/e2e/framework/kubelet" - e2elog "k8s.io/kubernetes/test/e2e/framework/log" + e2enode "k8s.io/kubernetes/test/e2e/framework/node" ) @@ -82,7 +82,7 @@ func portClosedTest(f *framework.Framework, pickNode *v1.Node, port int) { conn, err := net.DialTimeout("tcp", fmt.Sprintf("%s:%d", addr, port), 1*time.Minute) if err == nil { conn.Close() - e2elog.Failf("port %d is not disabled", port) + framework.Failf("port %d is not disabled", port) } } } diff --git a/test/e2e/lifecycle/node_lease.go b/test/e2e/lifecycle/node_lease.go index dcb5560c628..fdc8b7a5595 100644 --- a/test/e2e/lifecycle/node_lease.go +++ b/test/e2e/lifecycle/node_lease.go @@ -25,7 +25,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" clientset "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/test/e2e/framework" - e2elog "k8s.io/kubernetes/test/e2e/framework/log" + e2enode "k8s.io/kubernetes/test/e2e/framework/node" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" @@ -47,7 +47,7 @@ var _ = SIGDescribe("[Disruptive]NodeLease", func() { gomega.Expect(err).To(gomega.BeNil()) systemPodsNo = int32(len(systemPods)) if strings.Index(framework.TestContext.CloudConfig.NodeInstanceGroup, ",") >= 0 { - e2elog.Failf("Test dose not support cluster setup with more than one MIG: %s", framework.TestContext.CloudConfig.NodeInstanceGroup) + framework.Failf("Test dose not support cluster setup with more than one MIG: %s", framework.TestContext.CloudConfig.NodeInstanceGroup) } else { group = framework.TestContext.CloudConfig.NodeInstanceGroup } @@ -70,7 +70,7 @@ var _ = SIGDescribe("[Disruptive]NodeLease", func() { ginkgo.By("restoring the original node instance group size") if err := framework.ResizeGroup(group, int32(framework.TestContext.CloudConfig.NumNodes)); err != nil { - e2elog.Failf("Couldn't restore the original node instance group size: %v", err) + framework.Failf("Couldn't restore the original node instance group size: %v", err) } // In GKE, our current tunneling setup has the potential to hold on to a broken tunnel (from a // rebooted/deleted node) for up to 5 minutes before all tunnels are dropped and recreated. @@ -85,11 +85,11 @@ var _ = SIGDescribe("[Disruptive]NodeLease", func() { time.Sleep(5 * time.Minute) } if err := framework.WaitForGroupSize(group, int32(framework.TestContext.CloudConfig.NumNodes)); err != nil { - e2elog.Failf("Couldn't restore the original node instance group size: %v", err) + framework.Failf("Couldn't restore the original node instance group size: %v", err) } if err := e2enode.WaitForReadyNodes(c, framework.TestContext.CloudConfig.NumNodes, 10*time.Minute); err != nil { - e2elog.Failf("Couldn't restore the original cluster size: %v", err) + framework.Failf("Couldn't restore the original cluster size: %v", err) } // Many e2e tests assume that the cluster is fully healthy before they start. Wait until // the cluster is restored to health. @@ -111,7 +111,7 @@ var _ = SIGDescribe("[Disruptive]NodeLease", func() { pass := true for _, node := range originalNodes.Items { if _, err := leaseClient.Get(node.ObjectMeta.Name, metav1.GetOptions{}); err != nil { - e2elog.Logf("Try to get lease of node %s, but got error: %v", node.ObjectMeta.Name, err) + framework.Logf("Try to get lease of node %s, but got error: %v", node.ObjectMeta.Name, err) pass = false } } diff --git a/test/e2e/lifecycle/reboot.go b/test/e2e/lifecycle/reboot.go index 48ca1d9a0aa..c859da6d4bf 100644 --- a/test/e2e/lifecycle/reboot.go +++ b/test/e2e/lifecycle/reboot.go @@ -30,7 +30,7 @@ import ( clientset "k8s.io/client-go/kubernetes" api "k8s.io/kubernetes/pkg/apis/core" "k8s.io/kubernetes/test/e2e/framework" - e2elog "k8s.io/kubernetes/test/e2e/framework/log" + e2enode "k8s.io/kubernetes/test/e2e/framework/node" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2essh "k8s.io/kubernetes/test/e2e/framework/ssh" @@ -74,7 +74,7 @@ var _ = SIGDescribe("Reboot [Disruptive] [Feature:Reboot]", func() { framework.ExpectNoError(err) for _, e := range events.Items { - e2elog.Logf("event for %v: %v %v: %v", e.InvolvedObject.Name, e.Source, e.Reason, e.Message) + framework.Logf("event for %v: %v %v: %v", e.InvolvedObject.Name, e.Source, e.Reason, e.Message) } } // In GKE, our current tunneling setup has the potential to hold on to a broken tunnel (from a @@ -138,7 +138,7 @@ func testReboot(c clientset.Interface, rebootCmd string, hook terminationHook) { nodelist := framework.GetReadySchedulableNodesOrDie(c) if hook != nil { defer func() { - e2elog.Logf("Executing termination hook on nodes") + framework.Logf("Executing termination hook on nodes") hook(framework.TestContext.Provider, nodelist) }() } @@ -165,10 +165,10 @@ func testReboot(c clientset.Interface, rebootCmd string, hook terminationHook) { for ix := range nodelist.Items { n := nodelist.Items[ix] if !result[ix] { - e2elog.Logf("Node %s failed reboot test.", n.ObjectMeta.Name) + framework.Logf("Node %s failed reboot test.", n.ObjectMeta.Name) } } - e2elog.Failf("Test failed; at least one node failed to reboot in the time given.") + framework.Failf("Test failed; at least one node failed to reboot in the time given.") } } @@ -179,9 +179,9 @@ func printStatusAndLogsForNotReadyPods(c clientset.Interface, ns string, podName prefix = "Retrieving log for the last terminated container" } if err != nil { - e2elog.Logf("%s %s, err: %v:\n%s\n", prefix, id, err, log) + framework.Logf("%s %s, err: %v:\n%s\n", prefix, id, err, log) } else { - e2elog.Logf("%s %s:\n%s\n", prefix, id, log) + framework.Logf("%s %s:\n%s\n", prefix, id, log) } } podNameSet := sets.NewString(podNames...) @@ -195,7 +195,7 @@ func printStatusAndLogsForNotReadyPods(c clientset.Interface, ns string, podName if ok, _ := testutils.PodRunningReady(p); ok { continue } - e2elog.Logf("Status for not ready pod %s/%s: %+v", p.Namespace, p.Name, p.Status) + framework.Logf("Status for not ready pod %s/%s: %+v", p.Namespace, p.Name, p.Status) // Print the log of the containers if pod is not running and ready. for _, container := range p.Status.ContainerStatuses { cIdentifer := fmt.Sprintf("%s/%s/%s", p.Namespace, p.Name, container.Name) @@ -224,16 +224,16 @@ func rebootNode(c clientset.Interface, provider, name, rebootCmd string) bool { ns := metav1.NamespaceSystem ps, err := testutils.NewPodStore(c, ns, labels.Everything(), fields.OneTermEqualSelector(api.PodHostField, name)) if err != nil { - e2elog.Logf("Couldn't initialize pod store: %v", err) + framework.Logf("Couldn't initialize pod store: %v", err) return false } defer ps.Stop() // Get the node initially. - e2elog.Logf("Getting %s", name) + framework.Logf("Getting %s", name) node, err := c.CoreV1().Nodes().Get(name, metav1.GetOptions{}) if err != nil { - e2elog.Logf("Couldn't get node %s", name) + framework.Logf("Couldn't get node %s", name) return false } @@ -258,7 +258,7 @@ func rebootNode(c clientset.Interface, provider, name, rebootCmd string) bool { podNames = append(podNames, p.ObjectMeta.Name) } } - e2elog.Logf("Node %s has %d assigned pods with no liveness probes: %v", name, len(podNames), podNames) + framework.Logf("Node %s has %d assigned pods with no liveness probes: %v", name, len(podNames), podNames) // For each pod, we do a sanity check to ensure it's running / healthy // or succeeded now, as that's what we'll be checking later. @@ -269,7 +269,7 @@ func rebootNode(c clientset.Interface, provider, name, rebootCmd string) bool { // Reboot the node. if err = e2essh.IssueSSHCommand(rebootCmd, provider, node); err != nil { - e2elog.Logf("Error while issuing ssh command: %v", err) + framework.Logf("Error while issuing ssh command: %v", err) return false } @@ -291,7 +291,7 @@ func rebootNode(c clientset.Interface, provider, name, rebootCmd string) bool { return false } - e2elog.Logf("Reboot successful on node %s", name) + framework.Logf("Reboot successful on node %s", name) return true } @@ -302,7 +302,7 @@ func catLogHook(logPath string) terminationHook { for _, n := range nodes.Items { cmd := fmt.Sprintf("cat %v && rm %v", logPath, logPath) if _, err := e2essh.IssueSSHCommandWithResult(cmd, provider, &n); err != nil { - e2elog.Logf("Error while issuing ssh command: %v", err) + framework.Logf("Error while issuing ssh command: %v", err) } } diff --git a/test/e2e/lifecycle/resize_nodes.go b/test/e2e/lifecycle/resize_nodes.go index c69c81ee0b4..843f43a57db 100644 --- a/test/e2e/lifecycle/resize_nodes.go +++ b/test/e2e/lifecycle/resize_nodes.go @@ -25,7 +25,7 @@ import ( clientset "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/test/e2e/common" "k8s.io/kubernetes/test/e2e/framework" - e2elog "k8s.io/kubernetes/test/e2e/framework/log" + e2enode "k8s.io/kubernetes/test/e2e/framework/node" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" @@ -56,7 +56,7 @@ var _ = SIGDescribe("Nodes [Disruptive]", func() { framework.ExpectNoError(err) systemPodsNo = int32(len(systemPods)) if strings.Index(framework.TestContext.CloudConfig.NodeInstanceGroup, ",") >= 0 { - e2elog.Failf("Test dose not support cluster setup with more than one MIG: %s", framework.TestContext.CloudConfig.NodeInstanceGroup) + framework.Failf("Test dose not support cluster setup with more than one MIG: %s", framework.TestContext.CloudConfig.NodeInstanceGroup) } else { group = framework.TestContext.CloudConfig.NodeInstanceGroup } @@ -81,7 +81,7 @@ var _ = SIGDescribe("Nodes [Disruptive]", func() { ginkgo.By("restoring the original node instance group size") if err := framework.ResizeGroup(group, int32(framework.TestContext.CloudConfig.NumNodes)); err != nil { - e2elog.Failf("Couldn't restore the original node instance group size: %v", err) + framework.Failf("Couldn't restore the original node instance group size: %v", err) } // In GKE, our current tunneling setup has the potential to hold on to a broken tunnel (from a // rebooted/deleted node) for up to 5 minutes before all tunnels are dropped and recreated. @@ -96,11 +96,11 @@ var _ = SIGDescribe("Nodes [Disruptive]", func() { time.Sleep(5 * time.Minute) } if err := framework.WaitForGroupSize(group, int32(framework.TestContext.CloudConfig.NumNodes)); err != nil { - e2elog.Failf("Couldn't restore the original node instance group size: %v", err) + framework.Failf("Couldn't restore the original node instance group size: %v", err) } if err := e2enode.WaitForReadyNodes(c, int(originalNodeCount), 10*time.Minute); err != nil { - e2elog.Failf("Couldn't restore the original cluster size: %v", err) + framework.Failf("Couldn't restore the original cluster size: %v", err) } // Many e2e tests assume that the cluster is fully healthy before they start. Wait until // the cluster is restored to health. diff --git a/test/e2e/lifecycle/restart.go b/test/e2e/lifecycle/restart.go index 0168282d974..d20cb9b79d4 100644 --- a/test/e2e/lifecycle/restart.go +++ b/test/e2e/lifecycle/restart.go @@ -25,7 +25,7 @@ import ( "k8s.io/apimachinery/pkg/labels" "k8s.io/kubernetes/test/e2e/common" "k8s.io/kubernetes/test/e2e/framework" - e2elog "k8s.io/kubernetes/test/e2e/framework/log" + e2enode "k8s.io/kubernetes/test/e2e/framework/node" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" testutils "k8s.io/kubernetes/test/utils" @@ -63,7 +63,7 @@ var _ = SIGDescribe("Restart [Disruptive]", func() { ginkgo.By("ensuring all nodes are ready") originalNodes, err = e2enode.CheckReady(f.ClientSet, numNodes, framework.NodeReadyInitialTimeout) framework.ExpectNoError(err) - e2elog.Logf("Got the following nodes before restart: %v", nodeNames(originalNodes)) + framework.Logf("Got the following nodes before restart: %v", nodeNames(originalNodes)) ginkgo.By("ensuring all pods are running and ready") allPods := ps.List() @@ -75,7 +75,7 @@ var _ = SIGDescribe("Restart [Disruptive]", func() { } if !e2epod.CheckPodsRunningReadyOrSucceeded(f.ClientSet, systemNamespace, originalPodNames, framework.PodReadyBeforeTimeout) { printStatusAndLogsForNotReadyPods(f.ClientSet, systemNamespace, originalPodNames, pods) - e2elog.Failf("At least one pod wasn't running and ready or succeeded at test start.") + framework.Failf("At least one pod wasn't running and ready or succeeded at test start.") } }) @@ -93,13 +93,13 @@ var _ = SIGDescribe("Restart [Disruptive]", func() { ginkgo.By("ensuring all nodes are ready after the restart") nodesAfter, err := e2enode.CheckReady(f.ClientSet, numNodes, framework.RestartNodeReadyAgainTimeout) framework.ExpectNoError(err) - e2elog.Logf("Got the following nodes after restart: %v", nodeNames(nodesAfter)) + framework.Logf("Got the following nodes after restart: %v", nodeNames(nodesAfter)) // Make sure that we have the same number of nodes. We're not checking // that the names match because that's implementation specific. ginkgo.By("ensuring the same number of nodes exist after the restart") if len(originalNodes) != len(nodesAfter) { - e2elog.Failf("Had %d nodes before nodes were restarted, but now only have %d", + framework.Failf("Had %d nodes before nodes were restarted, but now only have %d", len(originalNodes), len(nodesAfter)) } @@ -114,7 +114,7 @@ var _ = SIGDescribe("Restart [Disruptive]", func() { if !e2epod.CheckPodsRunningReadyOrSucceeded(f.ClientSet, systemNamespace, podNamesAfter, remaining) { pods := ps.List() printStatusAndLogsForNotReadyPods(f.ClientSet, systemNamespace, podNamesAfter, pods) - e2elog.Failf("At least one pod wasn't running and ready after the restart.") + framework.Failf("At least one pod wasn't running and ready after the restart.") } }) }) diff --git a/test/e2e/servicecatalog/BUILD b/test/e2e/servicecatalog/BUILD index ed7d6512cf2..c722e5ff61a 100644 --- a/test/e2e/servicecatalog/BUILD +++ b/test/e2e/servicecatalog/BUILD @@ -17,7 +17,6 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/watch:go_default_library", "//staging/src/k8s.io/client-go/kubernetes:go_default_library", "//test/e2e/framework:go_default_library", - "//test/e2e/framework/log:go_default_library", "//test/utils/image:go_default_library", "//vendor/github.com/onsi/ginkgo:go_default_library", ], diff --git a/test/e2e/servicecatalog/podpreset.go b/test/e2e/servicecatalog/podpreset.go index 811dc8e3c08..54e1945d36b 100644 --- a/test/e2e/servicecatalog/podpreset.go +++ b/test/e2e/servicecatalog/podpreset.go @@ -29,7 +29,6 @@ import ( "k8s.io/apimachinery/pkg/watch" clientset "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/test/e2e/framework" - e2elog "k8s.io/kubernetes/test/e2e/framework/log" "github.com/onsi/ginkgo" imageutils "k8s.io/kubernetes/test/utils/image" @@ -136,10 +135,10 @@ var _ = SIGDescribe("[Feature:PodPreset] PodPreset", func() { select { case event, _ := <-w.ResultChan(): if event.Type != watch.Added { - e2elog.Failf("Failed to observe pod creation: %v", event) + framework.Failf("Failed to observe pod creation: %v", event) } case <-time.After(framework.PodStartTimeout): - e2elog.Failf("Timeout while waiting for pod creation") + framework.Failf("Timeout while waiting for pod creation") } // We need to wait for the pod to be running, otherwise the deletion @@ -153,15 +152,15 @@ var _ = SIGDescribe("[Feature:PodPreset] PodPreset", func() { // check the annotation is there if _, ok := pod.Annotations["podpreset.admission.kubernetes.io/podpreset-hello"]; !ok { - e2elog.Failf("Annotation not found in pod annotations: \n%v\n", pod.Annotations) + framework.Failf("Annotation not found in pod annotations: \n%v\n", pod.Annotations) } // verify the env is the same if !reflect.DeepEqual(pip.Spec.Env, pod.Spec.Containers[0].Env) { - e2elog.Failf("env of pod container does not match the env of the pip: expected %#v, got: %#v", pip.Spec.Env, pod.Spec.Containers[0].Env) + framework.Failf("env of pod container does not match the env of the pip: expected %#v, got: %#v", pip.Spec.Env, pod.Spec.Containers[0].Env) } if !reflect.DeepEqual(pip.Spec.Env, pod.Spec.InitContainers[0].Env) { - e2elog.Failf("env of pod init container does not match the env of the pip: expected %#v, got: %#v", pip.Spec.Env, pod.Spec.InitContainers[0].Env) + framework.Failf("env of pod init container does not match the env of the pip: expected %#v, got: %#v", pip.Spec.Env, pod.Spec.InitContainers[0].Env) } }) @@ -256,10 +255,10 @@ var _ = SIGDescribe("[Feature:PodPreset] PodPreset", func() { select { case event, _ := <-w.ResultChan(): if event.Type != watch.Added { - e2elog.Failf("Failed to observe pod creation: %v", event) + framework.Failf("Failed to observe pod creation: %v", event) } case <-time.After(framework.PodStartTimeout): - e2elog.Failf("Timeout while waiting for pod creation") + framework.Failf("Timeout while waiting for pod creation") } // We need to wait for the pod to be running, otherwise the deletion @@ -273,15 +272,15 @@ var _ = SIGDescribe("[Feature:PodPreset] PodPreset", func() { // check the annotation is not there if _, ok := pod.Annotations["podpreset.admission.kubernetes.io/podpreset-hello"]; ok { - e2elog.Failf("Annotation found in pod annotations and should not be: \n%v\n", pod.Annotations) + framework.Failf("Annotation found in pod annotations and should not be: \n%v\n", pod.Annotations) } // verify the env is the same if !reflect.DeepEqual(originalPod.Spec.Containers[0].Env, pod.Spec.Containers[0].Env) { - e2elog.Failf("env of pod container does not match the env of the original pod: expected %#v, got: %#v", originalPod.Spec.Containers[0].Env, pod.Spec.Containers[0].Env) + framework.Failf("env of pod container does not match the env of the original pod: expected %#v, got: %#v", originalPod.Spec.Containers[0].Env, pod.Spec.Containers[0].Env) } if !reflect.DeepEqual(originalPod.Spec.InitContainers[0].Env, pod.Spec.InitContainers[0].Env) { - e2elog.Failf("env of pod init container does not match the env of the original pod: expected %#v, got: %#v", originalPod.Spec.InitContainers[0].Env, pod.Spec.InitContainers[0].Env) + framework.Failf("env of pod init container does not match the env of the original pod: expected %#v, got: %#v", originalPod.Spec.InitContainers[0].Env, pod.Spec.InitContainers[0].Env) } }) diff --git a/test/e2e/ui/BUILD b/test/e2e/ui/BUILD index 7dc395b8017..67696dd9728 100644 --- a/test/e2e/ui/BUILD +++ b/test/e2e/ui/BUILD @@ -14,7 +14,6 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/util/net:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//test/e2e/framework:go_default_library", - "//test/e2e/framework/log:go_default_library", "//test/e2e/framework/service:go_default_library", "//test/utils:go_default_library", "//vendor/github.com/onsi/ginkgo:go_default_library", diff --git a/test/e2e/ui/dashboard.go b/test/e2e/ui/dashboard.go index dd18b7d0dbe..8d0bb085df0 100644 --- a/test/e2e/ui/dashboard.go +++ b/test/e2e/ui/dashboard.go @@ -26,7 +26,6 @@ import ( utilnet "k8s.io/apimachinery/pkg/util/net" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/kubernetes/test/e2e/framework" - e2elog "k8s.io/kubernetes/test/e2e/framework/log" e2eservice "k8s.io/kubernetes/test/e2e/framework/service" testutils "k8s.io/kubernetes/test/utils" @@ -65,7 +64,7 @@ var _ = SIGDescribe("Kubernetes Dashboard [Feature:Dashboard]", func() { var status int proxyRequest, errProxy := e2eservice.GetServicesProxyRequest(f.ClientSet, f.ClientSet.CoreV1().RESTClient().Get()) if errProxy != nil { - e2elog.Logf("Get services proxy request failed: %v", errProxy) + framework.Logf("Get services proxy request failed: %v", errProxy) } ctx, cancel := context.WithTimeout(context.Background(), framework.SingleCallTimeout) @@ -81,12 +80,12 @@ var _ = SIGDescribe("Kubernetes Dashboard [Feature:Dashboard]", func() { Error() if err != nil { if ctx.Err() != nil { - e2elog.Failf("Request to kubernetes-dashboard failed: %v", err) + framework.Failf("Request to kubernetes-dashboard failed: %v", err) return true, err } - e2elog.Logf("Request to kubernetes-dashboard failed: %v", err) + framework.Logf("Request to kubernetes-dashboard failed: %v", err) } else if status != http.StatusOK { - e2elog.Logf("Unexpected status from kubernetes-dashboard: %v", status) + framework.Logf("Unexpected status from kubernetes-dashboard: %v", status) } // Don't return err here as it aborts polling. return status == http.StatusOK, nil