diff --git a/test/e2e/autoscaling/BUILD b/test/e2e/autoscaling/BUILD index 02d88db7e8d..8aa5230d91e 100644 --- a/test/e2e/autoscaling/BUILD +++ b/test/e2e/autoscaling/BUILD @@ -40,7 +40,6 @@ go_library( "//staging/src/k8s.io/client-go/kubernetes:go_default_library", "//test/e2e/common:go_default_library", "//test/e2e/framework:go_default_library", - "//test/e2e/framework/log:go_default_library", "//test/e2e/framework/node:go_default_library", "//test/e2e/framework/pod:go_default_library", "//test/e2e/framework/pv:go_default_library", diff --git a/test/e2e/autoscaling/cluster_autoscaler_scalability.go b/test/e2e/autoscaling/cluster_autoscaler_scalability.go index 3895f59e6a8..a3200e5e2c5 100644 --- a/test/e2e/autoscaling/cluster_autoscaler_scalability.go +++ b/test/e2e/autoscaling/cluster_autoscaler_scalability.go @@ -29,6 +29,7 @@ import ( "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/strategicpatch" clientset "k8s.io/client-go/kubernetes" + "k8s.io/klog" "k8s.io/kubernetes/test/e2e/framework" e2enode "k8s.io/kubernetes/test/e2e/framework/node" testutils "k8s.io/kubernetes/test/utils" @@ -36,7 +37,6 @@ import ( "github.com/onsi/ginkgo" "github.com/onsi/gomega" - "k8s.io/klog" ) const ( diff --git a/test/e2e/autoscaling/cluster_size_autoscaling.go b/test/e2e/autoscaling/cluster_size_autoscaling.go index 8c2db5ad755..00a46bb8bfc 100644 --- a/test/e2e/autoscaling/cluster_size_autoscaling.go +++ b/test/e2e/autoscaling/cluster_size_autoscaling.go @@ -41,9 +41,9 @@ import ( "k8s.io/apimachinery/pkg/util/uuid" "k8s.io/apimachinery/pkg/util/wait" clientset "k8s.io/client-go/kubernetes" + "k8s.io/klog" api "k8s.io/kubernetes/pkg/apis/core" "k8s.io/kubernetes/test/e2e/framework" - e2elog "k8s.io/kubernetes/test/e2e/framework/log" e2enode "k8s.io/kubernetes/test/e2e/framework/node" e2epv "k8s.io/kubernetes/test/e2e/framework/pv" "k8s.io/kubernetes/test/e2e/scheduling" @@ -52,7 +52,6 @@ import ( "github.com/onsi/ginkgo" "github.com/onsi/gomega" - "k8s.io/klog" ) const ( @@ -211,7 +210,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() { ginkgo.It(fmt.Sprintf("Should scale up GPU pool from 0 [GpuType:%s] [Feature:ClusterSizeAutoscalingGpu]", gpuType), func() { framework.SkipUnlessProviderIs("gke") if gpuType == "" { - e2elog.Failf("TEST_GPU_TYPE not defined") + framework.Failf("TEST_GPU_TYPE not defined") return } @@ -238,7 +237,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() { ginkgo.It(fmt.Sprintf("Should scale up GPU pool from 1 [GpuType:%s] [Feature:ClusterSizeAutoscalingGpu]", gpuType), func() { framework.SkipUnlessProviderIs("gke") if gpuType == "" { - e2elog.Failf("TEST_GPU_TYPE not defined") + framework.Failf("TEST_GPU_TYPE not defined") return } @@ -268,7 +267,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() { ginkgo.It(fmt.Sprintf("Should not scale GPU pool up if pod does not require GPUs [GpuType:%s] [Feature:ClusterSizeAutoscalingGpu]", gpuType), func() { framework.SkipUnlessProviderIs("gke") if gpuType == "" { - e2elog.Failf("TEST_GPU_TYPE not defined") + framework.Failf("TEST_GPU_TYPE not defined") return } @@ -297,7 +296,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() { ginkgo.It(fmt.Sprintf("Should scale down GPU pool from 1 [GpuType:%s] [Feature:ClusterSizeAutoscalingGpu]", gpuType), func() { framework.SkipUnlessProviderIs("gke") if gpuType == "" { - e2elog.Failf("TEST_GPU_TYPE not defined") + framework.Failf("TEST_GPU_TYPE not defined") return } @@ -499,7 +498,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() { defer func() { errs := e2epv.PVPVCCleanup(c, f.Namespace.Name, pv, pvc) if len(errs) > 0 { - e2elog.Failf("failed to delete PVC and/or PV. Errors: %v", utilerrors.NewAggregate(errs)) + framework.Failf("failed to delete PVC and/or PV. Errors: %v", utilerrors.NewAggregate(errs)) } pv, pvc = nil, nil if diskName != "" { @@ -921,10 +920,10 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() { defer framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "memory-reservation") time.Sleep(scaleUpTimeout) currentNodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet) - e2elog.Logf("Currently available nodes: %v, nodes available at the start of test: %v, disabled nodes: %v", len(currentNodes.Items), len(nodes.Items), nodesToBreakCount) + framework.Logf("Currently available nodes: %v, nodes available at the start of test: %v, disabled nodes: %v", len(currentNodes.Items), len(nodes.Items), nodesToBreakCount) framework.ExpectEqual(len(currentNodes.Items), len(nodes.Items)-nodesToBreakCount) status, err := getClusterwideStatus(c) - e2elog.Logf("Clusterwide status: %v", status) + framework.Logf("Clusterwide status: %v", status) framework.ExpectNoError(err) framework.ExpectEqual(status, "Unhealthy") } @@ -1232,7 +1231,7 @@ func getPoolNodes(f *framework.Framework, poolName string) []*v1.Node { nodes := make([]*v1.Node, 0, 1) nodeList, err := e2enode.GetReadyNodesIncludingTainted(f.ClientSet) if err != nil { - e2elog.Logf("Unexpected error occurred: %v", err) + framework.Logf("Unexpected error occurred: %v", err) } // TODO: write a wrapper for ExpectNoErrorWithOffset() framework.ExpectNoErrorWithOffset(0, err) @@ -1309,7 +1308,7 @@ func reserveMemory(f *framework.Framework, id string, replicas, megabytes int, e return framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, id) } } - e2elog.Failf("Failed to reserve memory within timeout") + framework.Failf("Failed to reserve memory within timeout") return nil } @@ -1880,7 +1879,7 @@ func addKubeSystemPdbs(f *framework.Framework) (func(), error) { } } if finalErr != nil { - e2elog.Failf("Error during PodDisruptionBudget cleanup: %v", finalErr) + framework.Failf("Error during PodDisruptionBudget cleanup: %v", finalErr) } } diff --git a/test/e2e/autoscaling/custom_metrics_stackdriver_autoscaling.go b/test/e2e/autoscaling/custom_metrics_stackdriver_autoscaling.go index 7721ccd51ff..c1f968f7d2a 100644 --- a/test/e2e/autoscaling/custom_metrics_stackdriver_autoscaling.go +++ b/test/e2e/autoscaling/custom_metrics_stackdriver_autoscaling.go @@ -30,7 +30,6 @@ import ( "k8s.io/apimachinery/pkg/util/wait" clientset "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/test/e2e/framework" - e2elog "k8s.io/kubernetes/test/e2e/framework/log" "k8s.io/kubernetes/test/e2e/instrumentation/monitoring" "github.com/onsi/ginkgo" @@ -240,35 +239,35 @@ func (tc *CustomMetricTestCase) Run() { // and uncomment following lines: /* ts, err := google.DefaultTokenSource(oauth2.NoContext) - e2elog.Logf("Couldn't get application default credentials, %v", err) + framework.Logf("Couldn't get application default credentials, %v", err) if err != nil { - e2elog.Failf("Error accessing application default credentials, %v", err) + framework.Failf("Error accessing application default credentials, %v", err) } client := oauth2.NewClient(oauth2.NoContext, ts) */ gcmService, err := gcm.New(client) if err != nil { - e2elog.Failf("Failed to create gcm service, %v", err) + framework.Failf("Failed to create gcm service, %v", err) } // Set up a cluster: create a custom metric and set up k8s-sd adapter err = monitoring.CreateDescriptors(gcmService, projectID) if err != nil { - e2elog.Failf("Failed to create metric descriptor: %v", err) + framework.Failf("Failed to create metric descriptor: %v", err) } defer monitoring.CleanupDescriptors(gcmService, projectID) err = monitoring.CreateAdapter(monitoring.AdapterDefault) if err != nil { - e2elog.Failf("Failed to set up: %v", err) + framework.Failf("Failed to set up: %v", err) } defer monitoring.CleanupAdapter(monitoring.AdapterDefault) // Run application that exports the metric err = createDeploymentToScale(tc.framework, tc.kubeClient, tc.deployment, tc.pod) if err != nil { - e2elog.Failf("Failed to create stackdriver-exporter pod: %v", err) + framework.Failf("Failed to create stackdriver-exporter pod: %v", err) } defer cleanupDeploymentsToScale(tc.framework, tc.kubeClient, tc.deployment, tc.pod) @@ -278,7 +277,7 @@ func (tc *CustomMetricTestCase) Run() { // Autoscale the deployment _, err = tc.kubeClient.AutoscalingV2beta1().HorizontalPodAutoscalers(tc.framework.Namespace.ObjectMeta.Name).Create(tc.hpa) if err != nil { - e2elog.Failf("Failed to create HPA: %v", err) + framework.Failf("Failed to create HPA: %v", err) } defer tc.kubeClient.AutoscalingV2beta1().HorizontalPodAutoscalers(tc.framework.Namespace.ObjectMeta.Name).Delete(tc.hpa.ObjectMeta.Name, &metav1.DeleteOptions{}) @@ -442,13 +441,13 @@ func waitForReplicas(deploymentName, namespace string, cs clientset.Interface, t err := wait.PollImmediate(interval, timeout, func() (bool, error) { deployment, err := cs.AppsV1().Deployments(namespace).Get(deploymentName, metav1.GetOptions{}) if err != nil { - e2elog.Failf("Failed to get replication controller %s: %v", deployment, err) + framework.Failf("Failed to get replication controller %s: %v", deployment, err) } replicas := int(deployment.Status.ReadyReplicas) - e2elog.Logf("waiting for %d replicas (current: %d)", desiredReplicas, replicas) + framework.Logf("waiting for %d replicas (current: %d)", desiredReplicas, replicas) return replicas == desiredReplicas, nil // Expected number of replicas found. Exit. }) if err != nil { - e2elog.Failf("Timeout waiting %v for %v replicas", timeout, desiredReplicas) + framework.Failf("Timeout waiting %v for %v replicas", timeout, desiredReplicas) } } diff --git a/test/e2e/autoscaling/dns_autoscaling.go b/test/e2e/autoscaling/dns_autoscaling.go index 8fa34a78382..6162b080555 100644 --- a/test/e2e/autoscaling/dns_autoscaling.go +++ b/test/e2e/autoscaling/dns_autoscaling.go @@ -29,7 +29,6 @@ import ( "k8s.io/apimachinery/pkg/util/wait" clientset "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/test/e2e/framework" - e2elog "k8s.io/kubernetes/test/e2e/framework/log" e2enode "k8s.io/kubernetes/test/e2e/framework/node" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" @@ -258,7 +257,7 @@ func getScheduableCores(nodes []v1.Node) int64 { scInt64, scOk := sc.AsInt64() if !scOk { - e2elog.Logf("Unable to compute integer values of schedulable cores in the cluster") + framework.Logf("Unable to compute integer values of schedulable cores in the cluster") return 0 } return scInt64 @@ -276,7 +275,7 @@ func deleteDNSScalingConfigMap(c clientset.Interface) error { if err := c.CoreV1().ConfigMaps(metav1.NamespaceSystem).Delete(DNSAutoscalerLabelName, nil); err != nil { return err } - e2elog.Logf("DNS autoscaling ConfigMap deleted.") + framework.Logf("DNS autoscaling ConfigMap deleted.") return nil } @@ -303,7 +302,7 @@ func updateDNSScalingConfigMap(c clientset.Interface, configMap *v1.ConfigMap) e if err != nil { return err } - e2elog.Logf("DNS autoscaling ConfigMap updated.") + framework.Logf("DNS autoscaling ConfigMap updated.") return nil } @@ -337,14 +336,14 @@ func deleteDNSAutoscalerPod(c clientset.Interface) error { if err := c.CoreV1().Pods(metav1.NamespaceSystem).Delete(podName, nil); err != nil { return err } - e2elog.Logf("DNS autoscaling pod %v deleted.", podName) + framework.Logf("DNS autoscaling pod %v deleted.", podName) return nil } func waitForDNSReplicasSatisfied(c clientset.Interface, getExpected getExpectReplicasFunc, timeout time.Duration) (err error) { var current int var expected int - e2elog.Logf("Waiting up to %v for kube-dns to reach expected replicas", timeout) + framework.Logf("Waiting up to %v for kube-dns to reach expected replicas", timeout) condition := func() (bool, error) { current, err = getDNSReplicas(c) if err != nil { @@ -352,7 +351,7 @@ func waitForDNSReplicasSatisfied(c clientset.Interface, getExpected getExpectRep } expected = getExpected(c) if current != expected { - e2elog.Logf("Replicas not as expected: got %v, expected %v", current, expected) + framework.Logf("Replicas not as expected: got %v, expected %v", current, expected) return false, nil } return true, nil @@ -361,12 +360,12 @@ func waitForDNSReplicasSatisfied(c clientset.Interface, getExpected getExpectRep if err = wait.Poll(2*time.Second, timeout, condition); err != nil { return fmt.Errorf("err waiting for DNS replicas to satisfy %v, got %v: %v", expected, current, err) } - e2elog.Logf("kube-dns reaches expected replicas: %v", expected) + framework.Logf("kube-dns reaches expected replicas: %v", expected) return nil } func waitForDNSConfigMapCreated(c clientset.Interface, timeout time.Duration) (configMap *v1.ConfigMap, err error) { - e2elog.Logf("Waiting up to %v for DNS autoscaling ConfigMap got re-created", timeout) + framework.Logf("Waiting up to %v for DNS autoscaling ConfigMap got re-created", timeout) condition := func() (bool, error) { configMap, err = fetchDNSScalingConfigMap(c) if err != nil { diff --git a/test/e2e/upgrades/BUILD b/test/e2e/upgrades/BUILD index 6a4e11d7165..9a534754f6d 100644 --- a/test/e2e/upgrades/BUILD +++ b/test/e2e/upgrades/BUILD @@ -37,7 +37,6 @@ go_library( "//test/e2e/common:go_default_library", "//test/e2e/framework:go_default_library", "//test/e2e/framework/job:go_default_library", - "//test/e2e/framework/log:go_default_library", "//test/e2e/framework/service:go_default_library", "//test/e2e/framework/statefulset:go_default_library", "//test/e2e/framework/testfiles:go_default_library", diff --git a/test/e2e/upgrades/apparmor.go b/test/e2e/upgrades/apparmor.go index 21475df460c..ae3dbef19ee 100644 --- a/test/e2e/upgrades/apparmor.go +++ b/test/e2e/upgrades/apparmor.go @@ -21,7 +21,6 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/kubernetes/test/e2e/common" "k8s.io/kubernetes/test/e2e/framework" - e2elog "k8s.io/kubernetes/test/e2e/framework/log" "github.com/onsi/ginkgo" "github.com/onsi/gomega" @@ -80,7 +79,7 @@ func (t *AppArmorUpgradeTest) Test(f *framework.Framework, done <-chan struct{}, func (t *AppArmorUpgradeTest) Teardown(f *framework.Framework) { // rely on the namespace deletion to clean up everything ginkgo.By("Logging container failures") - framework.LogFailedContainers(f.ClientSet, f.Namespace.Name, e2elog.Logf) + framework.LogFailedContainers(f.ClientSet, f.Namespace.Name, framework.Logf) } func (t *AppArmorUpgradeTest) verifyPodStillUp(f *framework.Framework) { diff --git a/test/e2e/upgrades/apps/BUILD b/test/e2e/upgrades/apps/BUILD index 0926c210bc7..ae94a7f636e 100644 --- a/test/e2e/upgrades/apps/BUILD +++ b/test/e2e/upgrades/apps/BUILD @@ -29,7 +29,6 @@ go_library( "//test/e2e/framework:go_default_library", "//test/e2e/framework/deployment:go_default_library", "//test/e2e/framework/job:go_default_library", - "//test/e2e/framework/log:go_default_library", "//test/e2e/framework/replicaset:go_default_library", "//test/e2e/framework/statefulset:go_default_library", "//test/e2e/upgrades:go_default_library", diff --git a/test/e2e/upgrades/apps/daemonsets.go b/test/e2e/upgrades/apps/daemonsets.go index fbc409cd3ae..53dd1c0a4b0 100644 --- a/test/e2e/upgrades/apps/daemonsets.go +++ b/test/e2e/upgrades/apps/daemonsets.go @@ -26,7 +26,6 @@ import ( "k8s.io/apimachinery/pkg/util/wait" "k8s.io/kubernetes/pkg/controller" "k8s.io/kubernetes/test/e2e/framework" - e2elog "k8s.io/kubernetes/test/e2e/framework/log" "k8s.io/kubernetes/test/e2e/upgrades" ) @@ -81,7 +80,7 @@ func (t *DaemonSetUpgradeTest) Setup(f *framework.Framework) { ginkgo.By("Creating a DaemonSet") var err error if t.daemonSet, err = f.ClientSet.AppsV1().DaemonSets(ns.Name).Create(t.daemonSet); err != nil { - e2elog.Failf("unable to create test DaemonSet %s: %v", t.daemonSet.Name, err) + framework.Failf("unable to create test DaemonSet %s: %v", t.daemonSet.Name, err) } ginkgo.By("Waiting for DaemonSet pods to become ready") @@ -114,7 +113,7 @@ func (t *DaemonSetUpgradeTest) validateRunningDaemonSet(f *framework.Framework) res, err := checkRunningOnAllNodes(f, t.daemonSet.Namespace, t.daemonSet.Labels) framework.ExpectNoError(err) if !res { - e2elog.Failf("expected DaemonSet pod to be running on all nodes, it was not") + framework.Failf("expected DaemonSet pod to be running on all nodes, it was not") } // DaemonSet resource itself should be good @@ -122,7 +121,7 @@ func (t *DaemonSetUpgradeTest) validateRunningDaemonSet(f *framework.Framework) res, err = checkDaemonStatus(f, t.daemonSet.Namespace, t.daemonSet.Name) framework.ExpectNoError(err) if !res { - e2elog.Failf("expected DaemonSet to be in a good state, it was not") + framework.Failf("expected DaemonSet to be in a good state, it was not") } } @@ -135,7 +134,7 @@ func checkRunningOnAllNodes(f *framework.Framework, namespace string, selector m nodeNames := make([]string, 0) for _, node := range nodeList.Items { if len(node.Spec.Taints) != 0 { - e2elog.Logf("Ignore taints %v on Node %v for DaemonSet Pod.", node.Spec.Taints, node.Name) + framework.Logf("Ignore taints %v on Node %v for DaemonSet Pod.", node.Spec.Taints, node.Name) } // DaemonSet Pods are expected to run on all the nodes in e2e. nodeNames = append(nodeNames, node.Name) @@ -156,11 +155,11 @@ func checkDaemonPodOnNodes(f *framework.Framework, namespace string, labelSet ma nodesToPodCount := make(map[string]int) for _, pod := range pods { if controller.IsPodActive(&pod) { - e2elog.Logf("Pod name: %v\t Node Name: %v", pod.Name, pod.Spec.NodeName) + framework.Logf("Pod name: %v\t Node Name: %v", pod.Name, pod.Spec.NodeName) nodesToPodCount[pod.Spec.NodeName]++ } } - e2elog.Logf("nodesToPodCount: %v", nodesToPodCount) + framework.Logf("nodesToPodCount: %v", nodesToPodCount) // Ensure that exactly 1 pod is running on all nodes in nodeNames. for _, nodeName := range nodeNames { diff --git a/test/e2e/upgrades/cassandra.go b/test/e2e/upgrades/cassandra.go index 14388835116..e81d7864629 100644 --- a/test/e2e/upgrades/cassandra.go +++ b/test/e2e/upgrades/cassandra.go @@ -32,7 +32,6 @@ import ( "k8s.io/apimachinery/pkg/util/version" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/kubernetes/test/e2e/framework" - e2elog "k8s.io/kubernetes/test/e2e/framework/log" e2esset "k8s.io/kubernetes/test/e2e/framework/statefulset" "k8s.io/kubernetes/test/e2e/framework/testfiles" ) @@ -90,13 +89,13 @@ func (t *CassandraUpgradeTest) Setup(f *framework.Framework) { return false, nil } if _, err := t.listUsers(); err != nil { - e2elog.Logf("Service endpoint is up but isn't responding") + framework.Logf("Service endpoint is up but isn't responding") return false, nil } return true, nil }) framework.ExpectNoError(err) - e2elog.Logf("Service endpoint is up") + framework.Logf("Service endpoint is up") ginkgo.By("Adding 2 dummy users") err = t.addUser("Alice") @@ -177,7 +176,7 @@ func (t *CassandraUpgradeTest) Test(f *framework.Framework, done <-chan struct{} go wait.Until(func() { writeAttempts++ if err := t.addUser(fmt.Sprintf("user-%d", writeAttempts)); err != nil { - e2elog.Logf("Unable to add user: %v", err) + framework.Logf("Unable to add user: %v", err) mu.Lock() errors[err.Error()]++ mu.Unlock() @@ -189,7 +188,7 @@ func (t *CassandraUpgradeTest) Test(f *framework.Framework, done <-chan struct{} wait.Until(func() { users, err := t.listUsers() if err != nil { - e2elog.Logf("Could not retrieve users: %v", err) + framework.Logf("Could not retrieve users: %v", err) failures++ mu.Lock() errors[err.Error()]++ @@ -199,14 +198,14 @@ func (t *CassandraUpgradeTest) Test(f *framework.Framework, done <-chan struct{} success++ lastUserCount = len(users) }, 10*time.Millisecond, done) - e2elog.Logf("got %d users; want >=%d", lastUserCount, t.successfulWrites) + framework.Logf("got %d users; want >=%d", lastUserCount, t.successfulWrites) gomega.Expect(lastUserCount >= t.successfulWrites).To(gomega.BeTrue()) ratio := float64(success) / float64(success+failures) - e2elog.Logf("Successful gets %d/%d=%v", success, success+failures, ratio) + framework.Logf("Successful gets %d/%d=%v", success, success+failures, ratio) ratio = float64(t.successfulWrites) / float64(writeAttempts) - e2elog.Logf("Successful writes %d/%d=%v", t.successfulWrites, writeAttempts, ratio) - e2elog.Logf("Errors: %v", errors) + framework.Logf("Successful writes %d/%d=%v", t.successfulWrites, writeAttempts, ratio) + framework.Logf("Errors: %v", errors) // TODO(maisem): tweak this value once we have a few test runs. gomega.Expect(ratio > 0.75).To(gomega.BeTrue()) } diff --git a/test/e2e/upgrades/configmaps.go b/test/e2e/upgrades/configmaps.go index 1ee3fb58fa0..7a9a4aa1514 100644 --- a/test/e2e/upgrades/configmaps.go +++ b/test/e2e/upgrades/configmaps.go @@ -21,12 +21,11 @@ import ( "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/uuid" "k8s.io/kubernetes/test/e2e/framework" - e2elog "k8s.io/kubernetes/test/e2e/framework/log" imageutils "k8s.io/kubernetes/test/utils/image" "github.com/onsi/ginkgo" - "k8s.io/apimachinery/pkg/util/uuid" ) // ConfigMapUpgradeTest tests that a ConfigMap is available before and after @@ -59,7 +58,7 @@ func (t *ConfigMapUpgradeTest) Setup(f *framework.Framework) { ginkgo.By("Creating a ConfigMap") var err error if t.configMap, err = f.ClientSet.CoreV1().ConfigMaps(ns.Name).Create(t.configMap); err != nil { - e2elog.Failf("unable to create test ConfigMap %s: %v", t.configMap.Name, err) + framework.Failf("unable to create test ConfigMap %s: %v", t.configMap.Name, err) } ginkgo.By("Making sure the ConfigMap is consumable") diff --git a/test/e2e/upgrades/etcd.go b/test/e2e/upgrades/etcd.go index 7fd742a1bda..2c44992c3a1 100644 --- a/test/e2e/upgrades/etcd.go +++ b/test/e2e/upgrades/etcd.go @@ -32,7 +32,6 @@ import ( "k8s.io/apimachinery/pkg/util/version" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/kubernetes/test/e2e/framework" - e2elog "k8s.io/kubernetes/test/e2e/framework/log" e2esset "k8s.io/kubernetes/test/e2e/framework/statefulset" "k8s.io/kubernetes/test/e2e/framework/testfiles" ) @@ -85,13 +84,13 @@ func (t *EtcdUpgradeTest) Setup(f *framework.Framework) { return false, nil } if _, err := t.listUsers(); err != nil { - e2elog.Logf("Service endpoint is up but isn't responding") + framework.Logf("Service endpoint is up but isn't responding") return false, nil } return true, nil }) framework.ExpectNoError(err) - e2elog.Logf("Service endpoint is up") + framework.Logf("Service endpoint is up") ginkgo.By("Adding 2 dummy users") err = t.addUser("Alice") @@ -165,7 +164,7 @@ func (t *EtcdUpgradeTest) Test(f *framework.Framework, done <-chan struct{}, upg go wait.Until(func() { writeAttempts++ if err := t.addUser(fmt.Sprintf("user-%d", writeAttempts)); err != nil { - e2elog.Logf("Unable to add user: %v", err) + framework.Logf("Unable to add user: %v", err) mu.Lock() errors[err.Error()]++ mu.Unlock() @@ -177,7 +176,7 @@ func (t *EtcdUpgradeTest) Test(f *framework.Framework, done <-chan struct{}, upg wait.Until(func() { users, err := t.listUsers() if err != nil { - e2elog.Logf("Could not retrieve users: %v", err) + framework.Logf("Could not retrieve users: %v", err) failures++ mu.Lock() errors[err.Error()]++ @@ -187,14 +186,14 @@ func (t *EtcdUpgradeTest) Test(f *framework.Framework, done <-chan struct{}, upg success++ lastUserCount = len(users) }, 10*time.Millisecond, done) - e2elog.Logf("got %d users; want >=%d", lastUserCount, t.successfulWrites) + framework.Logf("got %d users; want >=%d", lastUserCount, t.successfulWrites) gomega.Expect(lastUserCount >= t.successfulWrites).To(gomega.BeTrue()) ratio := float64(success) / float64(success+failures) - e2elog.Logf("Successful gets %d/%d=%v", success, success+failures, ratio) + framework.Logf("Successful gets %d/%d=%v", success, success+failures, ratio) ratio = float64(t.successfulWrites) / float64(writeAttempts) - e2elog.Logf("Successful writes %d/%d=%v", t.successfulWrites, writeAttempts, ratio) - e2elog.Logf("Errors: %v", errors) + framework.Logf("Successful writes %d/%d=%v", t.successfulWrites, writeAttempts, ratio) + framework.Logf("Errors: %v", errors) // TODO(maisem): tweak this value once we have a few test runs. gomega.Expect(ratio > 0.75).To(gomega.BeTrue()) } diff --git a/test/e2e/upgrades/kube_proxy_migration.go b/test/e2e/upgrades/kube_proxy_migration.go index 3445a47460d..4a73a41b819 100644 --- a/test/e2e/upgrades/kube_proxy_migration.go +++ b/test/e2e/upgrades/kube_proxy_migration.go @@ -27,7 +27,6 @@ import ( "k8s.io/apimachinery/pkg/util/wait" clientset "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/test/e2e/framework" - e2elog "k8s.io/kubernetes/test/e2e/framework/log" "github.com/onsi/ginkgo" ) @@ -110,12 +109,12 @@ func (t *KubeProxyDowngradeTest) Teardown(f *framework.Framework) { } func waitForKubeProxyStaticPodsRunning(c clientset.Interface) error { - e2elog.Logf("Waiting up to %v for kube-proxy static pods running", defaultTestTimeout) + framework.Logf("Waiting up to %v for kube-proxy static pods running", defaultTestTimeout) condition := func() (bool, error) { pods, err := getKubeProxyStaticPods(c) if err != nil { - e2elog.Logf("Failed to get kube-proxy static pods: %v", err) + framework.Logf("Failed to get kube-proxy static pods: %v", err) return false, nil } @@ -127,7 +126,7 @@ func waitForKubeProxyStaticPodsRunning(c clientset.Interface) error { } } if numberkubeProxyPods != numberSchedulableNodes { - e2elog.Logf("Expect %v kube-proxy static pods running, got %v running, %v in total", numberSchedulableNodes, numberkubeProxyPods, len(pods.Items)) + framework.Logf("Expect %v kube-proxy static pods running, got %v running, %v in total", numberSchedulableNodes, numberkubeProxyPods, len(pods.Items)) return false, nil } return true, nil @@ -140,17 +139,17 @@ func waitForKubeProxyStaticPodsRunning(c clientset.Interface) error { } func waitForKubeProxyStaticPodsDisappear(c clientset.Interface) error { - e2elog.Logf("Waiting up to %v for kube-proxy static pods disappear", defaultTestTimeout) + framework.Logf("Waiting up to %v for kube-proxy static pods disappear", defaultTestTimeout) condition := func() (bool, error) { pods, err := getKubeProxyStaticPods(c) if err != nil { - e2elog.Logf("Failed to get kube-proxy static pods: %v", err) + framework.Logf("Failed to get kube-proxy static pods: %v", err) return false, nil } if len(pods.Items) != 0 { - e2elog.Logf("Expect kube-proxy static pods to disappear, got %v pods", len(pods.Items)) + framework.Logf("Expect kube-proxy static pods to disappear, got %v pods", len(pods.Items)) return false, nil } return true, nil @@ -163,24 +162,24 @@ func waitForKubeProxyStaticPodsDisappear(c clientset.Interface) error { } func waitForKubeProxyDaemonSetRunning(c clientset.Interface) error { - e2elog.Logf("Waiting up to %v for kube-proxy DaemonSet running", defaultTestTimeout) + framework.Logf("Waiting up to %v for kube-proxy DaemonSet running", defaultTestTimeout) condition := func() (bool, error) { daemonSets, err := getKubeProxyDaemonSet(c) if err != nil { - e2elog.Logf("Failed to get kube-proxy DaemonSet: %v", err) + framework.Logf("Failed to get kube-proxy DaemonSet: %v", err) return false, nil } if len(daemonSets.Items) != 1 { - e2elog.Logf("Expect only one kube-proxy DaemonSet, got %v", len(daemonSets.Items)) + framework.Logf("Expect only one kube-proxy DaemonSet, got %v", len(daemonSets.Items)) return false, nil } numberSchedulableNodes := len(framework.GetReadySchedulableNodesOrDie(c).Items) numberkubeProxyPods := int(daemonSets.Items[0].Status.NumberAvailable) if numberkubeProxyPods != numberSchedulableNodes { - e2elog.Logf("Expect %v kube-proxy DaemonSet pods running, got %v", numberSchedulableNodes, numberkubeProxyPods) + framework.Logf("Expect %v kube-proxy DaemonSet pods running, got %v", numberSchedulableNodes, numberkubeProxyPods) return false, nil } return true, nil @@ -193,17 +192,17 @@ func waitForKubeProxyDaemonSetRunning(c clientset.Interface) error { } func waitForKubeProxyDaemonSetDisappear(c clientset.Interface) error { - e2elog.Logf("Waiting up to %v for kube-proxy DaemonSet disappear", defaultTestTimeout) + framework.Logf("Waiting up to %v for kube-proxy DaemonSet disappear", defaultTestTimeout) condition := func() (bool, error) { daemonSets, err := getKubeProxyDaemonSet(c) if err != nil { - e2elog.Logf("Failed to get kube-proxy DaemonSet: %v", err) + framework.Logf("Failed to get kube-proxy DaemonSet: %v", err) return false, nil } if len(daemonSets.Items) != 0 { - e2elog.Logf("Expect kube-proxy DaemonSet to disappear, got %v DaemonSet", len(daemonSets.Items)) + framework.Logf("Expect kube-proxy DaemonSet to disappear, got %v DaemonSet", len(daemonSets.Items)) return false, nil } return true, nil diff --git a/test/e2e/upgrades/mysql.go b/test/e2e/upgrades/mysql.go index 945ee198a69..55d4a98e139 100644 --- a/test/e2e/upgrades/mysql.go +++ b/test/e2e/upgrades/mysql.go @@ -32,7 +32,6 @@ import ( "k8s.io/apimachinery/pkg/util/version" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/kubernetes/test/e2e/framework" - e2elog "k8s.io/kubernetes/test/e2e/framework/log" e2esset "k8s.io/kubernetes/test/e2e/framework/statefulset" "k8s.io/kubernetes/test/e2e/framework/testfiles" ) @@ -100,13 +99,13 @@ func (t *MySQLUpgradeTest) Setup(f *framework.Framework) { return false, nil } if _, err := t.countNames(); err != nil { - e2elog.Logf("Service endpoint is up but isn't responding") + framework.Logf("Service endpoint is up but isn't responding") return false, nil } return true, nil }) framework.ExpectNoError(err) - e2elog.Logf("Service endpoint is up") + framework.Logf("Service endpoint is up") ginkgo.By("Adding 2 names to the database") err = t.addName(strconv.Itoa(t.nextWrite)) @@ -128,7 +127,7 @@ func (t *MySQLUpgradeTest) Test(f *framework.Framework, done <-chan struct{}, up go wait.Until(func() { _, err := t.countNames() if err != nil { - e2elog.Logf("Error while trying to read data: %v", err) + framework.Logf("Error while trying to read data: %v", err) readFailure++ } else { readSuccess++ @@ -138,7 +137,7 @@ func (t *MySQLUpgradeTest) Test(f *framework.Framework, done <-chan struct{}, up wait.Until(func() { err := t.addName(strconv.Itoa(t.nextWrite)) if err != nil { - e2elog.Logf("Error while trying to write data: %v", err) + framework.Logf("Error while trying to write data: %v", err) writeFailure++ } else { writeSuccess++ @@ -146,10 +145,10 @@ func (t *MySQLUpgradeTest) Test(f *framework.Framework, done <-chan struct{}, up }, framework.Poll, done) t.successfulWrites = writeSuccess - e2elog.Logf("Successful reads: %d", readSuccess) - e2elog.Logf("Successful writes: %d", writeSuccess) - e2elog.Logf("Failed reads: %d", readFailure) - e2elog.Logf("Failed writes: %d", writeFailure) + framework.Logf("Successful reads: %d", readSuccess) + framework.Logf("Successful writes: %d", writeSuccess) + framework.Logf("Failed reads: %d", readFailure) + framework.Logf("Failed writes: %d", writeFailure) // TODO: Not sure what the ratio defining a successful test run should be. At time of writing the // test, failures only seem to happen when a race condition occurs (read/write starts, doesn't @@ -158,10 +157,10 @@ func (t *MySQLUpgradeTest) Test(f *framework.Framework, done <-chan struct{}, up readRatio := float64(readSuccess) / float64(readSuccess+readFailure) writeRatio := float64(writeSuccess) / float64(writeSuccess+writeFailure) if readRatio < 0.75 { - e2elog.Failf("Too many failures reading data. Success ratio: %f", readRatio) + framework.Failf("Too many failures reading data. Success ratio: %f", readRatio) } if writeRatio < 0.75 { - e2elog.Failf("Too many failures writing data. Success ratio: %f", writeRatio) + framework.Failf("Too many failures writing data. Success ratio: %f", writeRatio) } } diff --git a/test/e2e/upgrades/secrets.go b/test/e2e/upgrades/secrets.go index 7d5b2f354a8..bd66763f170 100644 --- a/test/e2e/upgrades/secrets.go +++ b/test/e2e/upgrades/secrets.go @@ -23,7 +23,6 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/uuid" "k8s.io/kubernetes/test/e2e/framework" - e2elog "k8s.io/kubernetes/test/e2e/framework/log" imageutils "k8s.io/kubernetes/test/utils/image" "github.com/onsi/ginkgo" @@ -57,7 +56,7 @@ func (t *SecretUpgradeTest) Setup(f *framework.Framework) { ginkgo.By("Creating a secret") var err error if t.secret, err = f.ClientSet.CoreV1().Secrets(ns.Name).Create(t.secret); err != nil { - e2elog.Failf("unable to create test secret %s: %v", t.secret.Name, err) + framework.Failf("unable to create test secret %s: %v", t.secret.Name, err) } ginkgo.By("Making sure the secret is consumable") diff --git a/test/e2e/upgrades/storage/BUILD b/test/e2e/upgrades/storage/BUILD index 9d2aca7741d..c3f727b0bfe 100644 --- a/test/e2e/upgrades/storage/BUILD +++ b/test/e2e/upgrades/storage/BUILD @@ -18,7 +18,6 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/util/errors:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/version:go_default_library", "//test/e2e/framework:go_default_library", - "//test/e2e/framework/log:go_default_library", "//test/e2e/framework/pod:go_default_library", "//test/e2e/framework/pv:go_default_library", "//test/e2e/storage/utils:go_default_library", diff --git a/test/e2e/upgrades/storage/persistent_volumes.go b/test/e2e/upgrades/storage/persistent_volumes.go index 2f340e5e9b0..443981a134c 100644 --- a/test/e2e/upgrades/storage/persistent_volumes.go +++ b/test/e2e/upgrades/storage/persistent_volumes.go @@ -17,15 +17,14 @@ limitations under the License. package storage import ( - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" utilerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/kubernetes/test/e2e/framework" - e2elog "k8s.io/kubernetes/test/e2e/framework/log" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2epv "k8s.io/kubernetes/test/e2e/framework/pv" + "k8s.io/kubernetes/test/e2e/upgrades" "github.com/onsi/ginkgo" - "k8s.io/kubernetes/test/e2e/upgrades" ) // PersistentVolumeUpgradeTest test that a pv is available before and after a cluster upgrade. @@ -75,7 +74,7 @@ func (t *PersistentVolumeUpgradeTest) Test(f *framework.Framework, done <-chan s func (t *PersistentVolumeUpgradeTest) Teardown(f *framework.Framework) { errs := e2epv.PVPVCCleanup(f.ClientSet, f.Namespace.Name, nil, t.pvc) if len(errs) > 0 { - e2elog.Failf("Failed to delete 1 or more PVs/PVCs. Errors: %v", utilerrors.NewAggregate(errs)) + framework.Failf("Failed to delete 1 or more PVs/PVCs. Errors: %v", utilerrors.NewAggregate(errs)) } } diff --git a/test/e2e/upgrades/sysctl.go b/test/e2e/upgrades/sysctl.go index a30199ad858..290b3f8c5f1 100644 --- a/test/e2e/upgrades/sysctl.go +++ b/test/e2e/upgrades/sysctl.go @@ -21,7 +21,7 @@ import ( "github.com/onsi/ginkgo" - v1 "k8s.io/api/core/v1" + "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/uuid"