cleanup test code in lifecycle, servicecatalog and ui package

This commit is contained in:
carlory 2019-08-27 09:51:23 +08:00
parent 1afcd7d0c8
commit 2301a3b15b
14 changed files with 61 additions and 68 deletions

View File

@ -37,7 +37,6 @@ go_library(
"//test/e2e/framework/ginkgowrapper:go_default_library", "//test/e2e/framework/ginkgowrapper:go_default_library",
"//test/e2e/framework/kubelet:go_default_library", "//test/e2e/framework/kubelet:go_default_library",
"//test/e2e/framework/lifecycle:go_default_library", "//test/e2e/framework/lifecycle:go_default_library",
"//test/e2e/framework/log:go_default_library",
"//test/e2e/framework/node:go_default_library", "//test/e2e/framework/node:go_default_library",
"//test/e2e/framework/pod:go_default_library", "//test/e2e/framework/pod:go_default_library",
"//test/e2e/framework/ssh:go_default_library", "//test/e2e/framework/ssh:go_default_library",

View File

@ -29,7 +29,7 @@ import (
"k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/labels"
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
e2essh "k8s.io/kubernetes/test/e2e/framework/ssh" e2essh "k8s.io/kubernetes/test/e2e/framework/ssh"
"github.com/onsi/ginkgo" "github.com/onsi/ginkgo"
@ -299,7 +299,7 @@ var _ = SIGDescribe("Addon update", func() {
sshExecAndVerify(sshClient, fmt.Sprintf("sudo cp %s/%s %s/%s", temporaryRemotePath, svcAddonEnsureExists, destinationDir, svcAddonEnsureExists)) sshExecAndVerify(sshClient, fmt.Sprintf("sudo cp %s/%s %s/%s", temporaryRemotePath, svcAddonEnsureExists, destinationDir, svcAddonEnsureExists))
// Delete the "ensure exist class" addon at the end. // Delete the "ensure exist class" addon at the end.
defer func() { defer func() {
e2elog.Logf("Cleaning up ensure exist class addon.") framework.Logf("Cleaning up ensure exist class addon.")
err := f.ClientSet.CoreV1().Services(addonNsName).Delete("addon-ensure-exists-test", nil) err := f.ClientSet.CoreV1().Services(addonNsName).Delete("addon-ensure-exists-test", nil)
framework.ExpectNoError(err) framework.ExpectNoError(err)
}() }()
@ -392,7 +392,7 @@ func sshExecAndVerify(client *ssh.Client, cmd string) {
} }
func sshExec(client *ssh.Client, cmd string) (string, string, int, error) { func sshExec(client *ssh.Client, cmd string) (string, string, int, error) {
e2elog.Logf("Executing '%s' on %v", cmd, client.RemoteAddr()) framework.Logf("Executing '%s' on %v", cmd, client.RemoteAddr())
session, err := client.NewSession() session, err := client.NewSession()
if err != nil { if err != nil {
return "", "", 0, fmt.Errorf("error creating session to host %s: '%v'", client.RemoteAddr(), err) return "", "", 0, fmt.Errorf("error creating session to host %s: '%v'", client.RemoteAddr(), err)
@ -424,7 +424,7 @@ func sshExec(client *ssh.Client, cmd string) (string, string, int, error) {
} }
func writeRemoteFile(sshClient *ssh.Client, data, dir, fileName string, mode os.FileMode) error { func writeRemoteFile(sshClient *ssh.Client, data, dir, fileName string, mode os.FileMode) error {
e2elog.Logf(fmt.Sprintf("Writing remote file '%s/%s' on %v", dir, fileName, sshClient.RemoteAddr())) framework.Logf(fmt.Sprintf("Writing remote file '%s/%s' on %v", dir, fileName, sshClient.RemoteAddr()))
session, err := sshClient.NewSession() session, err := sshClient.NewSession()
if err != nil { if err != nil {
return fmt.Errorf("error creating session to host %s: '%v'", sshClient.RemoteAddr(), err) return fmt.Errorf("error creating session to host %s: '%v'", sshClient.RemoteAddr(), err)

View File

@ -21,7 +21,6 @@ go_library(
"//staging/src/k8s.io/client-go/kubernetes:go_default_library", "//staging/src/k8s.io/client-go/kubernetes:go_default_library",
"//staging/src/k8s.io/cluster-bootstrap/token/api:go_default_library", "//staging/src/k8s.io/cluster-bootstrap/token/api:go_default_library",
"//test/e2e/framework:go_default_library", "//test/e2e/framework:go_default_library",
"//test/e2e/framework/log:go_default_library",
"//test/e2e/lifecycle:go_default_library", "//test/e2e/lifecycle:go_default_library",
"//vendor/github.com/onsi/ginkgo:go_default_library", "//vendor/github.com/onsi/ginkgo:go_default_library",
], ],

View File

@ -29,7 +29,6 @@ import (
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
bootstrapapi "k8s.io/cluster-bootstrap/token/api" bootstrapapi "k8s.io/cluster-bootstrap/token/api"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
) )
func newTokenSecret(tokenID, tokenSecret string) *v1.Secret { func newTokenSecret(tokenID, tokenSecret string) *v1.Secret {
@ -84,7 +83,7 @@ func WaitforSignedClusterInfoByBootStrapToken(c clientset.Interface, tokenID str
return wait.Poll(framework.Poll, 2*time.Minute, func() (bool, error) { return wait.Poll(framework.Poll, 2*time.Minute, func() (bool, error) {
cfgMap, err := c.CoreV1().ConfigMaps(metav1.NamespacePublic).Get(bootstrapapi.ConfigMapClusterInfo, metav1.GetOptions{}) cfgMap, err := c.CoreV1().ConfigMaps(metav1.NamespacePublic).Get(bootstrapapi.ConfigMapClusterInfo, metav1.GetOptions{})
if err != nil { if err != nil {
e2elog.Failf("Failed to get cluster-info configMap: %v", err) framework.Failf("Failed to get cluster-info configMap: %v", err)
return false, err return false, err
} }
_, ok := cfgMap.Data[bootstrapapi.JWSSignatureKeyPrefix+tokenID] _, ok := cfgMap.Data[bootstrapapi.JWSSignatureKeyPrefix+tokenID]
@ -100,7 +99,7 @@ func WaitForSignedClusterInfoGetUpdatedByBootstrapToken(c clientset.Interface, t
return wait.Poll(framework.Poll, 2*time.Minute, func() (bool, error) { return wait.Poll(framework.Poll, 2*time.Minute, func() (bool, error) {
cfgMap, err := c.CoreV1().ConfigMaps(metav1.NamespacePublic).Get(bootstrapapi.ConfigMapClusterInfo, metav1.GetOptions{}) cfgMap, err := c.CoreV1().ConfigMaps(metav1.NamespacePublic).Get(bootstrapapi.ConfigMapClusterInfo, metav1.GetOptions{})
if err != nil { if err != nil {
e2elog.Failf("Failed to get cluster-info configMap: %v", err) framework.Failf("Failed to get cluster-info configMap: %v", err)
return false, err return false, err
} }
updated, ok := cfgMap.Data[bootstrapapi.JWSSignatureKeyPrefix+tokenID] updated, ok := cfgMap.Data[bootstrapapi.JWSSignatureKeyPrefix+tokenID]
@ -116,7 +115,7 @@ func WaitForSignedClusterInfoByBootstrapTokenToDisappear(c clientset.Interface,
return wait.Poll(framework.Poll, 2*time.Minute, func() (bool, error) { return wait.Poll(framework.Poll, 2*time.Minute, func() (bool, error) {
cfgMap, err := c.CoreV1().ConfigMaps(metav1.NamespacePublic).Get(bootstrapapi.ConfigMapClusterInfo, metav1.GetOptions{}) cfgMap, err := c.CoreV1().ConfigMaps(metav1.NamespacePublic).Get(bootstrapapi.ConfigMapClusterInfo, metav1.GetOptions{})
if err != nil { if err != nil {
e2elog.Failf("Failed to get cluster-info configMap: %v", err) framework.Failf("Failed to get cluster-info configMap: %v", err)
return false, err return false, err
} }
_, ok := cfgMap.Data[bootstrapapi.JWSSignatureKeyPrefix+tokenID] _, ok := cfgMap.Data[bootstrapapi.JWSSignatureKeyPrefix+tokenID]

View File

@ -28,12 +28,12 @@ import (
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/common" "k8s.io/kubernetes/test/e2e/common"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
) )
func addMasterReplica(zone string) error { func addMasterReplica(zone string) error {
e2elog.Logf(fmt.Sprintf("Adding a new master replica, zone: %s", zone)) framework.Logf(fmt.Sprintf("Adding a new master replica, zone: %s", zone))
_, _, err := framework.RunCmd(path.Join(framework.TestContext.RepoRoot, "hack/e2e-internal/e2e-grow-cluster.sh"), zone, "true", "true", "false") _, _, err := framework.RunCmd(path.Join(framework.TestContext.RepoRoot, "hack/e2e-internal/e2e-grow-cluster.sh"), zone, "true", "true", "false")
if err != nil { if err != nil {
return err return err
@ -42,7 +42,7 @@ func addMasterReplica(zone string) error {
} }
func removeMasterReplica(zone string) error { func removeMasterReplica(zone string) error {
e2elog.Logf(fmt.Sprintf("Removing an existing master replica, zone: %s", zone)) framework.Logf(fmt.Sprintf("Removing an existing master replica, zone: %s", zone))
_, _, err := framework.RunCmd(path.Join(framework.TestContext.RepoRoot, "hack/e2e-internal/e2e-shrink-cluster.sh"), zone, "true", "false", "false") _, _, err := framework.RunCmd(path.Join(framework.TestContext.RepoRoot, "hack/e2e-internal/e2e-shrink-cluster.sh"), zone, "true", "false", "false")
if err != nil { if err != nil {
return err return err
@ -51,7 +51,7 @@ func removeMasterReplica(zone string) error {
} }
func addWorkerNodes(zone string) error { func addWorkerNodes(zone string) error {
e2elog.Logf(fmt.Sprintf("Adding worker nodes, zone: %s", zone)) framework.Logf(fmt.Sprintf("Adding worker nodes, zone: %s", zone))
_, _, err := framework.RunCmd(path.Join(framework.TestContext.RepoRoot, "hack/e2e-internal/e2e-grow-cluster.sh"), zone, "true", "false", "true") _, _, err := framework.RunCmd(path.Join(framework.TestContext.RepoRoot, "hack/e2e-internal/e2e-grow-cluster.sh"), zone, "true", "false", "true")
if err != nil { if err != nil {
return err return err
@ -60,7 +60,7 @@ func addWorkerNodes(zone string) error {
} }
func removeWorkerNodes(zone string) error { func removeWorkerNodes(zone string) error {
e2elog.Logf(fmt.Sprintf("Removing worker nodes, zone: %s", zone)) framework.Logf(fmt.Sprintf("Removing worker nodes, zone: %s", zone))
_, _, err := framework.RunCmd(path.Join(framework.TestContext.RepoRoot, "hack/e2e-internal/e2e-shrink-cluster.sh"), zone, "true", "true", "true") _, _, err := framework.RunCmd(path.Join(framework.TestContext.RepoRoot, "hack/e2e-internal/e2e-shrink-cluster.sh"), zone, "true", "true", "true")
if err != nil { if err != nil {
return err return err
@ -83,7 +83,7 @@ func findRegionForZone(zone string) string {
region, err := exec.Command("gcloud", "compute", "zones", "list", zone, "--quiet", "--format=csv[no-heading](region)").Output() region, err := exec.Command("gcloud", "compute", "zones", "list", zone, "--quiet", "--format=csv[no-heading](region)").Output()
framework.ExpectNoError(err) framework.ExpectNoError(err)
if string(region) == "" { if string(region) == "" {
e2elog.Failf("Region not found; zone: %s", zone) framework.Failf("Region not found; zone: %s", zone)
} }
return string(region) return string(region)
} }

View File

@ -29,7 +29,7 @@ import (
"github.com/onsi/ginkgo" "github.com/onsi/ginkgo"
"github.com/onsi/gomega" "github.com/onsi/gomega"
e2ekubelet "k8s.io/kubernetes/test/e2e/framework/kubelet" e2ekubelet "k8s.io/kubernetes/test/e2e/framework/kubelet"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
e2enode "k8s.io/kubernetes/test/e2e/framework/node" e2enode "k8s.io/kubernetes/test/e2e/framework/node"
) )
@ -82,7 +82,7 @@ func portClosedTest(f *framework.Framework, pickNode *v1.Node, port int) {
conn, err := net.DialTimeout("tcp", fmt.Sprintf("%s:%d", addr, port), 1*time.Minute) conn, err := net.DialTimeout("tcp", fmt.Sprintf("%s:%d", addr, port), 1*time.Minute)
if err == nil { if err == nil {
conn.Close() conn.Close()
e2elog.Failf("port %d is not disabled", port) framework.Failf("port %d is not disabled", port)
} }
} }
} }

View File

@ -25,7 +25,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
e2enode "k8s.io/kubernetes/test/e2e/framework/node" e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
@ -47,7 +47,7 @@ var _ = SIGDescribe("[Disruptive]NodeLease", func() {
gomega.Expect(err).To(gomega.BeNil()) gomega.Expect(err).To(gomega.BeNil())
systemPodsNo = int32(len(systemPods)) systemPodsNo = int32(len(systemPods))
if strings.Index(framework.TestContext.CloudConfig.NodeInstanceGroup, ",") >= 0 { if strings.Index(framework.TestContext.CloudConfig.NodeInstanceGroup, ",") >= 0 {
e2elog.Failf("Test dose not support cluster setup with more than one MIG: %s", framework.TestContext.CloudConfig.NodeInstanceGroup) framework.Failf("Test dose not support cluster setup with more than one MIG: %s", framework.TestContext.CloudConfig.NodeInstanceGroup)
} else { } else {
group = framework.TestContext.CloudConfig.NodeInstanceGroup group = framework.TestContext.CloudConfig.NodeInstanceGroup
} }
@ -70,7 +70,7 @@ var _ = SIGDescribe("[Disruptive]NodeLease", func() {
ginkgo.By("restoring the original node instance group size") ginkgo.By("restoring the original node instance group size")
if err := framework.ResizeGroup(group, int32(framework.TestContext.CloudConfig.NumNodes)); err != nil { if err := framework.ResizeGroup(group, int32(framework.TestContext.CloudConfig.NumNodes)); err != nil {
e2elog.Failf("Couldn't restore the original node instance group size: %v", err) framework.Failf("Couldn't restore the original node instance group size: %v", err)
} }
// In GKE, our current tunneling setup has the potential to hold on to a broken tunnel (from a // In GKE, our current tunneling setup has the potential to hold on to a broken tunnel (from a
// rebooted/deleted node) for up to 5 minutes before all tunnels are dropped and recreated. // rebooted/deleted node) for up to 5 minutes before all tunnels are dropped and recreated.
@ -85,11 +85,11 @@ var _ = SIGDescribe("[Disruptive]NodeLease", func() {
time.Sleep(5 * time.Minute) time.Sleep(5 * time.Minute)
} }
if err := framework.WaitForGroupSize(group, int32(framework.TestContext.CloudConfig.NumNodes)); err != nil { if err := framework.WaitForGroupSize(group, int32(framework.TestContext.CloudConfig.NumNodes)); err != nil {
e2elog.Failf("Couldn't restore the original node instance group size: %v", err) framework.Failf("Couldn't restore the original node instance group size: %v", err)
} }
if err := e2enode.WaitForReadyNodes(c, framework.TestContext.CloudConfig.NumNodes, 10*time.Minute); err != nil { if err := e2enode.WaitForReadyNodes(c, framework.TestContext.CloudConfig.NumNodes, 10*time.Minute); err != nil {
e2elog.Failf("Couldn't restore the original cluster size: %v", err) framework.Failf("Couldn't restore the original cluster size: %v", err)
} }
// Many e2e tests assume that the cluster is fully healthy before they start. Wait until // Many e2e tests assume that the cluster is fully healthy before they start. Wait until
// the cluster is restored to health. // the cluster is restored to health.
@ -111,7 +111,7 @@ var _ = SIGDescribe("[Disruptive]NodeLease", func() {
pass := true pass := true
for _, node := range originalNodes.Items { for _, node := range originalNodes.Items {
if _, err := leaseClient.Get(node.ObjectMeta.Name, metav1.GetOptions{}); err != nil { if _, err := leaseClient.Get(node.ObjectMeta.Name, metav1.GetOptions{}); err != nil {
e2elog.Logf("Try to get lease of node %s, but got error: %v", node.ObjectMeta.Name, err) framework.Logf("Try to get lease of node %s, but got error: %v", node.ObjectMeta.Name, err)
pass = false pass = false
} }
} }

View File

@ -30,7 +30,7 @@ import (
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
api "k8s.io/kubernetes/pkg/apis/core" api "k8s.io/kubernetes/pkg/apis/core"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
e2enode "k8s.io/kubernetes/test/e2e/framework/node" e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2essh "k8s.io/kubernetes/test/e2e/framework/ssh" e2essh "k8s.io/kubernetes/test/e2e/framework/ssh"
@ -74,7 +74,7 @@ var _ = SIGDescribe("Reboot [Disruptive] [Feature:Reboot]", func() {
framework.ExpectNoError(err) framework.ExpectNoError(err)
for _, e := range events.Items { for _, e := range events.Items {
e2elog.Logf("event for %v: %v %v: %v", e.InvolvedObject.Name, e.Source, e.Reason, e.Message) framework.Logf("event for %v: %v %v: %v", e.InvolvedObject.Name, e.Source, e.Reason, e.Message)
} }
} }
// In GKE, our current tunneling setup has the potential to hold on to a broken tunnel (from a // In GKE, our current tunneling setup has the potential to hold on to a broken tunnel (from a
@ -138,7 +138,7 @@ func testReboot(c clientset.Interface, rebootCmd string, hook terminationHook) {
nodelist := framework.GetReadySchedulableNodesOrDie(c) nodelist := framework.GetReadySchedulableNodesOrDie(c)
if hook != nil { if hook != nil {
defer func() { defer func() {
e2elog.Logf("Executing termination hook on nodes") framework.Logf("Executing termination hook on nodes")
hook(framework.TestContext.Provider, nodelist) hook(framework.TestContext.Provider, nodelist)
}() }()
} }
@ -165,10 +165,10 @@ func testReboot(c clientset.Interface, rebootCmd string, hook terminationHook) {
for ix := range nodelist.Items { for ix := range nodelist.Items {
n := nodelist.Items[ix] n := nodelist.Items[ix]
if !result[ix] { if !result[ix] {
e2elog.Logf("Node %s failed reboot test.", n.ObjectMeta.Name) framework.Logf("Node %s failed reboot test.", n.ObjectMeta.Name)
} }
} }
e2elog.Failf("Test failed; at least one node failed to reboot in the time given.") framework.Failf("Test failed; at least one node failed to reboot in the time given.")
} }
} }
@ -179,9 +179,9 @@ func printStatusAndLogsForNotReadyPods(c clientset.Interface, ns string, podName
prefix = "Retrieving log for the last terminated container" prefix = "Retrieving log for the last terminated container"
} }
if err != nil { if err != nil {
e2elog.Logf("%s %s, err: %v:\n%s\n", prefix, id, err, log) framework.Logf("%s %s, err: %v:\n%s\n", prefix, id, err, log)
} else { } else {
e2elog.Logf("%s %s:\n%s\n", prefix, id, log) framework.Logf("%s %s:\n%s\n", prefix, id, log)
} }
} }
podNameSet := sets.NewString(podNames...) podNameSet := sets.NewString(podNames...)
@ -195,7 +195,7 @@ func printStatusAndLogsForNotReadyPods(c clientset.Interface, ns string, podName
if ok, _ := testutils.PodRunningReady(p); ok { if ok, _ := testutils.PodRunningReady(p); ok {
continue continue
} }
e2elog.Logf("Status for not ready pod %s/%s: %+v", p.Namespace, p.Name, p.Status) framework.Logf("Status for not ready pod %s/%s: %+v", p.Namespace, p.Name, p.Status)
// Print the log of the containers if pod is not running and ready. // Print the log of the containers if pod is not running and ready.
for _, container := range p.Status.ContainerStatuses { for _, container := range p.Status.ContainerStatuses {
cIdentifer := fmt.Sprintf("%s/%s/%s", p.Namespace, p.Name, container.Name) cIdentifer := fmt.Sprintf("%s/%s/%s", p.Namespace, p.Name, container.Name)
@ -224,16 +224,16 @@ func rebootNode(c clientset.Interface, provider, name, rebootCmd string) bool {
ns := metav1.NamespaceSystem ns := metav1.NamespaceSystem
ps, err := testutils.NewPodStore(c, ns, labels.Everything(), fields.OneTermEqualSelector(api.PodHostField, name)) ps, err := testutils.NewPodStore(c, ns, labels.Everything(), fields.OneTermEqualSelector(api.PodHostField, name))
if err != nil { if err != nil {
e2elog.Logf("Couldn't initialize pod store: %v", err) framework.Logf("Couldn't initialize pod store: %v", err)
return false return false
} }
defer ps.Stop() defer ps.Stop()
// Get the node initially. // Get the node initially.
e2elog.Logf("Getting %s", name) framework.Logf("Getting %s", name)
node, err := c.CoreV1().Nodes().Get(name, metav1.GetOptions{}) node, err := c.CoreV1().Nodes().Get(name, metav1.GetOptions{})
if err != nil { if err != nil {
e2elog.Logf("Couldn't get node %s", name) framework.Logf("Couldn't get node %s", name)
return false return false
} }
@ -258,7 +258,7 @@ func rebootNode(c clientset.Interface, provider, name, rebootCmd string) bool {
podNames = append(podNames, p.ObjectMeta.Name) podNames = append(podNames, p.ObjectMeta.Name)
} }
} }
e2elog.Logf("Node %s has %d assigned pods with no liveness probes: %v", name, len(podNames), podNames) framework.Logf("Node %s has %d assigned pods with no liveness probes: %v", name, len(podNames), podNames)
// For each pod, we do a sanity check to ensure it's running / healthy // For each pod, we do a sanity check to ensure it's running / healthy
// or succeeded now, as that's what we'll be checking later. // or succeeded now, as that's what we'll be checking later.
@ -269,7 +269,7 @@ func rebootNode(c clientset.Interface, provider, name, rebootCmd string) bool {
// Reboot the node. // Reboot the node.
if err = e2essh.IssueSSHCommand(rebootCmd, provider, node); err != nil { if err = e2essh.IssueSSHCommand(rebootCmd, provider, node); err != nil {
e2elog.Logf("Error while issuing ssh command: %v", err) framework.Logf("Error while issuing ssh command: %v", err)
return false return false
} }
@ -291,7 +291,7 @@ func rebootNode(c clientset.Interface, provider, name, rebootCmd string) bool {
return false return false
} }
e2elog.Logf("Reboot successful on node %s", name) framework.Logf("Reboot successful on node %s", name)
return true return true
} }
@ -302,7 +302,7 @@ func catLogHook(logPath string) terminationHook {
for _, n := range nodes.Items { for _, n := range nodes.Items {
cmd := fmt.Sprintf("cat %v && rm %v", logPath, logPath) cmd := fmt.Sprintf("cat %v && rm %v", logPath, logPath)
if _, err := e2essh.IssueSSHCommandWithResult(cmd, provider, &n); err != nil { if _, err := e2essh.IssueSSHCommandWithResult(cmd, provider, &n); err != nil {
e2elog.Logf("Error while issuing ssh command: %v", err) framework.Logf("Error while issuing ssh command: %v", err)
} }
} }

View File

@ -25,7 +25,7 @@ import (
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/common" "k8s.io/kubernetes/test/e2e/common"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
e2enode "k8s.io/kubernetes/test/e2e/framework/node" e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
@ -56,7 +56,7 @@ var _ = SIGDescribe("Nodes [Disruptive]", func() {
framework.ExpectNoError(err) framework.ExpectNoError(err)
systemPodsNo = int32(len(systemPods)) systemPodsNo = int32(len(systemPods))
if strings.Index(framework.TestContext.CloudConfig.NodeInstanceGroup, ",") >= 0 { if strings.Index(framework.TestContext.CloudConfig.NodeInstanceGroup, ",") >= 0 {
e2elog.Failf("Test dose not support cluster setup with more than one MIG: %s", framework.TestContext.CloudConfig.NodeInstanceGroup) framework.Failf("Test dose not support cluster setup with more than one MIG: %s", framework.TestContext.CloudConfig.NodeInstanceGroup)
} else { } else {
group = framework.TestContext.CloudConfig.NodeInstanceGroup group = framework.TestContext.CloudConfig.NodeInstanceGroup
} }
@ -81,7 +81,7 @@ var _ = SIGDescribe("Nodes [Disruptive]", func() {
ginkgo.By("restoring the original node instance group size") ginkgo.By("restoring the original node instance group size")
if err := framework.ResizeGroup(group, int32(framework.TestContext.CloudConfig.NumNodes)); err != nil { if err := framework.ResizeGroup(group, int32(framework.TestContext.CloudConfig.NumNodes)); err != nil {
e2elog.Failf("Couldn't restore the original node instance group size: %v", err) framework.Failf("Couldn't restore the original node instance group size: %v", err)
} }
// In GKE, our current tunneling setup has the potential to hold on to a broken tunnel (from a // In GKE, our current tunneling setup has the potential to hold on to a broken tunnel (from a
// rebooted/deleted node) for up to 5 minutes before all tunnels are dropped and recreated. // rebooted/deleted node) for up to 5 minutes before all tunnels are dropped and recreated.
@ -96,11 +96,11 @@ var _ = SIGDescribe("Nodes [Disruptive]", func() {
time.Sleep(5 * time.Minute) time.Sleep(5 * time.Minute)
} }
if err := framework.WaitForGroupSize(group, int32(framework.TestContext.CloudConfig.NumNodes)); err != nil { if err := framework.WaitForGroupSize(group, int32(framework.TestContext.CloudConfig.NumNodes)); err != nil {
e2elog.Failf("Couldn't restore the original node instance group size: %v", err) framework.Failf("Couldn't restore the original node instance group size: %v", err)
} }
if err := e2enode.WaitForReadyNodes(c, int(originalNodeCount), 10*time.Minute); err != nil { if err := e2enode.WaitForReadyNodes(c, int(originalNodeCount), 10*time.Minute); err != nil {
e2elog.Failf("Couldn't restore the original cluster size: %v", err) framework.Failf("Couldn't restore the original cluster size: %v", err)
} }
// Many e2e tests assume that the cluster is fully healthy before they start. Wait until // Many e2e tests assume that the cluster is fully healthy before they start. Wait until
// the cluster is restored to health. // the cluster is restored to health.

View File

@ -25,7 +25,7 @@ import (
"k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/labels"
"k8s.io/kubernetes/test/e2e/common" "k8s.io/kubernetes/test/e2e/common"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
e2enode "k8s.io/kubernetes/test/e2e/framework/node" e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
testutils "k8s.io/kubernetes/test/utils" testutils "k8s.io/kubernetes/test/utils"
@ -63,7 +63,7 @@ var _ = SIGDescribe("Restart [Disruptive]", func() {
ginkgo.By("ensuring all nodes are ready") ginkgo.By("ensuring all nodes are ready")
originalNodes, err = e2enode.CheckReady(f.ClientSet, numNodes, framework.NodeReadyInitialTimeout) originalNodes, err = e2enode.CheckReady(f.ClientSet, numNodes, framework.NodeReadyInitialTimeout)
framework.ExpectNoError(err) framework.ExpectNoError(err)
e2elog.Logf("Got the following nodes before restart: %v", nodeNames(originalNodes)) framework.Logf("Got the following nodes before restart: %v", nodeNames(originalNodes))
ginkgo.By("ensuring all pods are running and ready") ginkgo.By("ensuring all pods are running and ready")
allPods := ps.List() allPods := ps.List()
@ -75,7 +75,7 @@ var _ = SIGDescribe("Restart [Disruptive]", func() {
} }
if !e2epod.CheckPodsRunningReadyOrSucceeded(f.ClientSet, systemNamespace, originalPodNames, framework.PodReadyBeforeTimeout) { if !e2epod.CheckPodsRunningReadyOrSucceeded(f.ClientSet, systemNamespace, originalPodNames, framework.PodReadyBeforeTimeout) {
printStatusAndLogsForNotReadyPods(f.ClientSet, systemNamespace, originalPodNames, pods) printStatusAndLogsForNotReadyPods(f.ClientSet, systemNamespace, originalPodNames, pods)
e2elog.Failf("At least one pod wasn't running and ready or succeeded at test start.") framework.Failf("At least one pod wasn't running and ready or succeeded at test start.")
} }
}) })
@ -93,13 +93,13 @@ var _ = SIGDescribe("Restart [Disruptive]", func() {
ginkgo.By("ensuring all nodes are ready after the restart") ginkgo.By("ensuring all nodes are ready after the restart")
nodesAfter, err := e2enode.CheckReady(f.ClientSet, numNodes, framework.RestartNodeReadyAgainTimeout) nodesAfter, err := e2enode.CheckReady(f.ClientSet, numNodes, framework.RestartNodeReadyAgainTimeout)
framework.ExpectNoError(err) framework.ExpectNoError(err)
e2elog.Logf("Got the following nodes after restart: %v", nodeNames(nodesAfter)) framework.Logf("Got the following nodes after restart: %v", nodeNames(nodesAfter))
// Make sure that we have the same number of nodes. We're not checking // Make sure that we have the same number of nodes. We're not checking
// that the names match because that's implementation specific. // that the names match because that's implementation specific.
ginkgo.By("ensuring the same number of nodes exist after the restart") ginkgo.By("ensuring the same number of nodes exist after the restart")
if len(originalNodes) != len(nodesAfter) { if len(originalNodes) != len(nodesAfter) {
e2elog.Failf("Had %d nodes before nodes were restarted, but now only have %d", framework.Failf("Had %d nodes before nodes were restarted, but now only have %d",
len(originalNodes), len(nodesAfter)) len(originalNodes), len(nodesAfter))
} }
@ -114,7 +114,7 @@ var _ = SIGDescribe("Restart [Disruptive]", func() {
if !e2epod.CheckPodsRunningReadyOrSucceeded(f.ClientSet, systemNamespace, podNamesAfter, remaining) { if !e2epod.CheckPodsRunningReadyOrSucceeded(f.ClientSet, systemNamespace, podNamesAfter, remaining) {
pods := ps.List() pods := ps.List()
printStatusAndLogsForNotReadyPods(f.ClientSet, systemNamespace, podNamesAfter, pods) printStatusAndLogsForNotReadyPods(f.ClientSet, systemNamespace, podNamesAfter, pods)
e2elog.Failf("At least one pod wasn't running and ready after the restart.") framework.Failf("At least one pod wasn't running and ready after the restart.")
} }
}) })
}) })

View File

@ -17,7 +17,6 @@ go_library(
"//staging/src/k8s.io/apimachinery/pkg/watch:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/watch:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes:go_default_library", "//staging/src/k8s.io/client-go/kubernetes:go_default_library",
"//test/e2e/framework:go_default_library", "//test/e2e/framework:go_default_library",
"//test/e2e/framework/log:go_default_library",
"//test/utils/image:go_default_library", "//test/utils/image:go_default_library",
"//vendor/github.com/onsi/ginkgo:go_default_library", "//vendor/github.com/onsi/ginkgo:go_default_library",
], ],

View File

@ -29,7 +29,6 @@ import (
"k8s.io/apimachinery/pkg/watch" "k8s.io/apimachinery/pkg/watch"
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
"github.com/onsi/ginkgo" "github.com/onsi/ginkgo"
imageutils "k8s.io/kubernetes/test/utils/image" imageutils "k8s.io/kubernetes/test/utils/image"
@ -136,10 +135,10 @@ var _ = SIGDescribe("[Feature:PodPreset] PodPreset", func() {
select { select {
case event, _ := <-w.ResultChan(): case event, _ := <-w.ResultChan():
if event.Type != watch.Added { if event.Type != watch.Added {
e2elog.Failf("Failed to observe pod creation: %v", event) framework.Failf("Failed to observe pod creation: %v", event)
} }
case <-time.After(framework.PodStartTimeout): case <-time.After(framework.PodStartTimeout):
e2elog.Failf("Timeout while waiting for pod creation") framework.Failf("Timeout while waiting for pod creation")
} }
// We need to wait for the pod to be running, otherwise the deletion // We need to wait for the pod to be running, otherwise the deletion
@ -153,15 +152,15 @@ var _ = SIGDescribe("[Feature:PodPreset] PodPreset", func() {
// check the annotation is there // check the annotation is there
if _, ok := pod.Annotations["podpreset.admission.kubernetes.io/podpreset-hello"]; !ok { if _, ok := pod.Annotations["podpreset.admission.kubernetes.io/podpreset-hello"]; !ok {
e2elog.Failf("Annotation not found in pod annotations: \n%v\n", pod.Annotations) framework.Failf("Annotation not found in pod annotations: \n%v\n", pod.Annotations)
} }
// verify the env is the same // verify the env is the same
if !reflect.DeepEqual(pip.Spec.Env, pod.Spec.Containers[0].Env) { if !reflect.DeepEqual(pip.Spec.Env, pod.Spec.Containers[0].Env) {
e2elog.Failf("env of pod container does not match the env of the pip: expected %#v, got: %#v", pip.Spec.Env, pod.Spec.Containers[0].Env) framework.Failf("env of pod container does not match the env of the pip: expected %#v, got: %#v", pip.Spec.Env, pod.Spec.Containers[0].Env)
} }
if !reflect.DeepEqual(pip.Spec.Env, pod.Spec.InitContainers[0].Env) { if !reflect.DeepEqual(pip.Spec.Env, pod.Spec.InitContainers[0].Env) {
e2elog.Failf("env of pod init container does not match the env of the pip: expected %#v, got: %#v", pip.Spec.Env, pod.Spec.InitContainers[0].Env) framework.Failf("env of pod init container does not match the env of the pip: expected %#v, got: %#v", pip.Spec.Env, pod.Spec.InitContainers[0].Env)
} }
}) })
@ -256,10 +255,10 @@ var _ = SIGDescribe("[Feature:PodPreset] PodPreset", func() {
select { select {
case event, _ := <-w.ResultChan(): case event, _ := <-w.ResultChan():
if event.Type != watch.Added { if event.Type != watch.Added {
e2elog.Failf("Failed to observe pod creation: %v", event) framework.Failf("Failed to observe pod creation: %v", event)
} }
case <-time.After(framework.PodStartTimeout): case <-time.After(framework.PodStartTimeout):
e2elog.Failf("Timeout while waiting for pod creation") framework.Failf("Timeout while waiting for pod creation")
} }
// We need to wait for the pod to be running, otherwise the deletion // We need to wait for the pod to be running, otherwise the deletion
@ -273,15 +272,15 @@ var _ = SIGDescribe("[Feature:PodPreset] PodPreset", func() {
// check the annotation is not there // check the annotation is not there
if _, ok := pod.Annotations["podpreset.admission.kubernetes.io/podpreset-hello"]; ok { if _, ok := pod.Annotations["podpreset.admission.kubernetes.io/podpreset-hello"]; ok {
e2elog.Failf("Annotation found in pod annotations and should not be: \n%v\n", pod.Annotations) framework.Failf("Annotation found in pod annotations and should not be: \n%v\n", pod.Annotations)
} }
// verify the env is the same // verify the env is the same
if !reflect.DeepEqual(originalPod.Spec.Containers[0].Env, pod.Spec.Containers[0].Env) { if !reflect.DeepEqual(originalPod.Spec.Containers[0].Env, pod.Spec.Containers[0].Env) {
e2elog.Failf("env of pod container does not match the env of the original pod: expected %#v, got: %#v", originalPod.Spec.Containers[0].Env, pod.Spec.Containers[0].Env) framework.Failf("env of pod container does not match the env of the original pod: expected %#v, got: %#v", originalPod.Spec.Containers[0].Env, pod.Spec.Containers[0].Env)
} }
if !reflect.DeepEqual(originalPod.Spec.InitContainers[0].Env, pod.Spec.InitContainers[0].Env) { if !reflect.DeepEqual(originalPod.Spec.InitContainers[0].Env, pod.Spec.InitContainers[0].Env) {
e2elog.Failf("env of pod init container does not match the env of the original pod: expected %#v, got: %#v", originalPod.Spec.InitContainers[0].Env, pod.Spec.InitContainers[0].Env) framework.Failf("env of pod init container does not match the env of the original pod: expected %#v, got: %#v", originalPod.Spec.InitContainers[0].Env, pod.Spec.InitContainers[0].Env)
} }
}) })

View File

@ -14,7 +14,6 @@ go_library(
"//staging/src/k8s.io/apimachinery/pkg/util/net:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/net:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//test/e2e/framework:go_default_library", "//test/e2e/framework:go_default_library",
"//test/e2e/framework/log:go_default_library",
"//test/e2e/framework/service:go_default_library", "//test/e2e/framework/service:go_default_library",
"//test/utils:go_default_library", "//test/utils:go_default_library",
"//vendor/github.com/onsi/ginkgo:go_default_library", "//vendor/github.com/onsi/ginkgo:go_default_library",

View File

@ -26,7 +26,6 @@ import (
utilnet "k8s.io/apimachinery/pkg/util/net" utilnet "k8s.io/apimachinery/pkg/util/net"
"k8s.io/apimachinery/pkg/util/wait" "k8s.io/apimachinery/pkg/util/wait"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
e2eservice "k8s.io/kubernetes/test/e2e/framework/service" e2eservice "k8s.io/kubernetes/test/e2e/framework/service"
testutils "k8s.io/kubernetes/test/utils" testutils "k8s.io/kubernetes/test/utils"
@ -65,7 +64,7 @@ var _ = SIGDescribe("Kubernetes Dashboard [Feature:Dashboard]", func() {
var status int var status int
proxyRequest, errProxy := e2eservice.GetServicesProxyRequest(f.ClientSet, f.ClientSet.CoreV1().RESTClient().Get()) proxyRequest, errProxy := e2eservice.GetServicesProxyRequest(f.ClientSet, f.ClientSet.CoreV1().RESTClient().Get())
if errProxy != nil { if errProxy != nil {
e2elog.Logf("Get services proxy request failed: %v", errProxy) framework.Logf("Get services proxy request failed: %v", errProxy)
} }
ctx, cancel := context.WithTimeout(context.Background(), framework.SingleCallTimeout) ctx, cancel := context.WithTimeout(context.Background(), framework.SingleCallTimeout)
@ -81,12 +80,12 @@ var _ = SIGDescribe("Kubernetes Dashboard [Feature:Dashboard]", func() {
Error() Error()
if err != nil { if err != nil {
if ctx.Err() != nil { if ctx.Err() != nil {
e2elog.Failf("Request to kubernetes-dashboard failed: %v", err) framework.Failf("Request to kubernetes-dashboard failed: %v", err)
return true, err return true, err
} }
e2elog.Logf("Request to kubernetes-dashboard failed: %v", err) framework.Logf("Request to kubernetes-dashboard failed: %v", err)
} else if status != http.StatusOK { } else if status != http.StatusOK {
e2elog.Logf("Unexpected status from kubernetes-dashboard: %v", status) framework.Logf("Unexpected status from kubernetes-dashboard: %v", status)
} }
// Don't return err here as it aborts polling. // Don't return err here as it aborts polling.
return status == http.StatusOK, nil return status == http.StatusOK, nil