diff --git a/test/e2e/apps/daemon_restart.go b/test/e2e/apps/daemon_restart.go index 1fcbda516db..2c6c1657635 100644 --- a/test/e2e/apps/daemon_restart.go +++ b/test/e2e/apps/daemon_restart.go @@ -274,7 +274,7 @@ var _ = SIGDescribe("DaemonRestart [Disruptive]", func() { } if len(newKeys.List()) != len(existingKeys.List()) || !newKeys.IsSuperset(existingKeys) { - framework.Failf("RcManager created/deleted pods after restart \n\n %+v", tracker) + e2elog.Failf("RcManager created/deleted pods after restart \n\n %+v", tracker) } }) @@ -312,7 +312,7 @@ var _ = SIGDescribe("DaemonRestart [Disruptive]", func() { postRestarts, badNodes := getContainerRestarts(f.ClientSet, ns, labelSelector) if postRestarts != preRestarts { framework.DumpNodeDebugInfo(f.ClientSet, badNodes, e2elog.Logf) - framework.Failf("Net container restart count went from %v -> %v after kubelet restart on nodes %v \n\n %+v", preRestarts, postRestarts, badNodes, tracker) + e2elog.Failf("Net container restart count went from %v -> %v after kubelet restart on nodes %v \n\n %+v", preRestarts, postRestarts, badNodes, tracker) } }) }) diff --git a/test/e2e/apps/daemon_set.go b/test/e2e/apps/daemon_set.go index da653324c49..2f231f83c5a 100644 --- a/test/e2e/apps/daemon_set.go +++ b/test/e2e/apps/daemon_set.go @@ -418,7 +418,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() { case newDS.Spec.Template.Spec.Containers[0].Image: newPods = append(newPods, &pod) default: - framework.Failf("unexpected pod found, image = %s", image) + e2elog.Failf("unexpected pod found, image = %s", image) } } schedulableNodes = framework.GetReadySchedulableNodesOrDie(c) @@ -655,7 +655,7 @@ func canScheduleOnNode(node v1.Node, ds *apps.DaemonSet) bool { nodeInfo.SetNode(&node) fit, _, err := daemon.Predicates(newPod, nodeInfo) if err != nil { - framework.Failf("Can't test DaemonSet predicates for node %s: %v", node.Name, err) + e2elog.Failf("Can't test DaemonSet predicates for node %s: %v", node.Name, err) return false } return fit diff --git a/test/e2e/apps/deployment.go b/test/e2e/apps/deployment.go index 9cd52901909..8be8de310a7 100644 --- a/test/e2e/apps/deployment.go +++ b/test/e2e/apps/deployment.go @@ -222,7 +222,7 @@ func stopDeployment(c clientset.Interface, ns, deploymentName string) { } return false, nil }); err != nil { - framework.Failf("Err : %s\n. Failed to remove deployment %s pods : %+v", err, deploymentName, pods) + e2elog.Failf("Err : %s\n. Failed to remove deployment %s pods : %+v", err, deploymentName, pods) } } @@ -382,14 +382,14 @@ func testDeploymentCleanUpPolicy(f *framework.Framework) { } numPodCreation-- if numPodCreation < 0 { - framework.Failf("Expect only one pod creation, the second creation event: %#v\n", event) + e2elog.Failf("Expect only one pod creation, the second creation event: %#v\n", event) } pod, ok := event.Object.(*v1.Pod) if !ok { - framework.Failf("Expect event Object to be a pod") + e2elog.Failf("Expect event Object to be a pod") } if pod.Spec.Containers[0].Name != RedisImageName { - framework.Failf("Expect the created pod to have container name %s, got pod %#v\n", RedisImageName, pod) + e2elog.Failf("Expect the created pod to have container name %s, got pod %#v\n", RedisImageName, pod) } case <-stopCh: return diff --git a/test/e2e/apps/job.go b/test/e2e/apps/job.go index 3fae9103f46..8c5f1e55a0f 100644 --- a/test/e2e/apps/job.go +++ b/test/e2e/apps/job.go @@ -26,6 +26,7 @@ import ( batchinternal "k8s.io/kubernetes/pkg/apis/batch" "k8s.io/kubernetes/test/e2e/framework" jobutil "k8s.io/kubernetes/test/e2e/framework/job" + e2elog "k8s.io/kubernetes/test/e2e/framework/log" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" "github.com/onsi/ginkgo" @@ -206,7 +207,7 @@ var _ = SIGDescribe("Job", func() { // updates we need to allow more than backoff+1 // TODO revert this back to above when https://github.com/kubernetes/kubernetes/issues/64787 gets fixed if len(pods.Items) < backoff+1 { - framework.Failf("Not enough pod created expected at least %d, got %#v", backoff+1, pods.Items) + e2elog.Failf("Not enough pod created expected at least %d, got %#v", backoff+1, pods.Items) } for _, pod := range pods.Items { gomega.Expect(pod.Status.Phase).To(gomega.Equal(v1.PodFailed)) diff --git a/test/e2e/apps/network_partition.go b/test/e2e/apps/network_partition.go index b942c47d4cb..f5dccf423f3 100644 --- a/test/e2e/apps/network_partition.go +++ b/test/e2e/apps/network_partition.go @@ -70,7 +70,7 @@ func expectNodeReadiness(isReady bool, newNode chan *v1.Node) { } } if !expected { - framework.Failf("Failed to observe node ready status change to %v", isReady) + e2elog.Failf("Failed to observe node ready status change to %v", isReady) } } @@ -120,7 +120,7 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() { // TODO(foxish): Re-enable testing on gce after kubernetes#56787 is fixed. framework.SkipUnlessProviderIs("gke", "aws") if strings.Index(framework.TestContext.CloudConfig.NodeInstanceGroup, ",") >= 0 { - framework.Failf("Test dose not support cluster setup with more than one MIG: %s", framework.TestContext.CloudConfig.NodeInstanceGroup) + e2elog.Failf("Test dose not support cluster setup with more than one MIG: %s", framework.TestContext.CloudConfig.NodeInstanceGroup) } }) @@ -155,12 +155,12 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() { return true }) if len(nodes.Items) <= 0 { - framework.Failf("No eligible node were found: %d", len(nodes.Items)) + e2elog.Failf("No eligible node were found: %d", len(nodes.Items)) } node := nodes.Items[0] podOpts = metav1.ListOptions{FieldSelector: fields.OneTermEqualSelector(api.PodHostField, node.Name).String()} if err = e2epod.WaitForMatchPodsCondition(c, podOpts, "Running and Ready", podReadyTimeout, testutils.PodRunningReady); err != nil { - framework.Failf("Pods on node %s are not ready and running within %v: %v", node.Name, podReadyTimeout, err) + e2elog.Failf("Pods on node %s are not ready and running within %v: %v", node.Name, podReadyTimeout, err) } ginkgo.By("Set up watch on node status") @@ -216,7 +216,7 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() { ginkgo.By("Expect to observe node and pod status change from NotReady to Ready after network connectivity recovers") expectNodeReadiness(true, newNode) if err = e2epod.WaitForMatchPodsCondition(c, podOpts, "Running and Ready", podReadyTimeout, testutils.PodRunningReady); err != nil { - framework.Failf("Pods on node %s did not become ready and running within %v: %v", node.Name, podReadyTimeout, err) + e2elog.Failf("Pods on node %s did not become ready and running within %v: %v", node.Name, podReadyTimeout, err) } }() @@ -227,7 +227,7 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() { ginkgo.By("Expect to observe node and pod status change from Ready to NotReady after network partition") expectNodeReadiness(false, newNode) if err = e2epod.WaitForMatchPodsCondition(c, podOpts, "NotReady", podNotReadyTimeout, testutils.PodNotReady); err != nil { - framework.Failf("Pods on node %s did not become NotReady within %v: %v", node.Name, podNotReadyTimeout, err) + e2elog.Failf("Pods on node %s did not become NotReady within %v: %v", node.Name, podNotReadyTimeout, err) } }) }) @@ -276,7 +276,7 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() { e2elog.Logf("Waiting %v for node %s to be ready once temporary network failure ends", resizeNodeReadyTimeout, node.Name) if !e2enode.WaitForNodeToBeReady(c, node.Name, resizeNodeReadyTimeout) { - framework.Failf("Node %s did not become ready within %v", node.Name, resizeNodeReadyTimeout) + e2elog.Failf("Node %s did not become ready within %v", node.Name, resizeNodeReadyTimeout) } // sleep a bit, to allow Watch in NodeController to catch up. @@ -343,7 +343,7 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() { e2elog.Logf("Waiting %v for node %s to be ready once temporary network failure ends", resizeNodeReadyTimeout, node.Name) if !e2enode.WaitForNodeToBeReady(c, node.Name, resizeNodeReadyTimeout) { - framework.Failf("Node %s did not become ready within %v", node.Name, resizeNodeReadyTimeout) + e2elog.Failf("Node %s did not become ready within %v", node.Name, resizeNodeReadyTimeout) } }) }) @@ -416,7 +416,7 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() { e2elog.Logf("Waiting %v for node %s to be ready once temporary network failure ends", resizeNodeReadyTimeout, node.Name) if !e2enode.WaitForNodeToBeReady(c, node.Name, resizeNodeReadyTimeout) { - framework.Failf("Node %s did not become ready within %v", node.Name, resizeNodeReadyTimeout) + e2elog.Failf("Node %s did not become ready within %v", node.Name, resizeNodeReadyTimeout) } ginkgo.By("waiting for pods to be running again") @@ -464,7 +464,7 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() { e2elog.Logf("Waiting %v for node %s to be ready once temporary network failure ends", resizeNodeReadyTimeout, node.Name) if !e2enode.WaitForNodeToBeReady(c, node.Name, resizeNodeReadyTimeout) { - framework.Failf("Node %s did not become ready within %v", node.Name, resizeNodeReadyTimeout) + e2elog.Failf("Node %s did not become ready within %v", node.Name, resizeNodeReadyTimeout) } }) }) @@ -498,12 +498,12 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() { return true }) if len(nodes.Items) <= 0 { - framework.Failf("No eligible node were found: %d", len(nodes.Items)) + e2elog.Failf("No eligible node were found: %d", len(nodes.Items)) } node := nodes.Items[0] podOpts = metav1.ListOptions{FieldSelector: fields.OneTermEqualSelector(api.PodHostField, node.Name).String()} if err := e2epod.WaitForMatchPodsCondition(c, podOpts, "Running and Ready", podReadyTimeout, testutils.PodRunningReadyOrSucceeded); err != nil { - framework.Failf("Pods on node %s are not ready and running within %v: %v", node.Name, podReadyTimeout, err) + e2elog.Failf("Pods on node %s are not ready and running within %v: %v", node.Name, podReadyTimeout, err) } pods, err := c.CoreV1().Pods(metav1.NamespaceAll).List(podOpts) framework.ExpectNoError(err) @@ -609,7 +609,7 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() { return framework.NodeHasTaint(c, node.Name, nodepkg.UnreachableTaintTemplate) })) if err = e2epod.WaitForMatchPodsCondition(c, podOpts, "NotReady", podNotReadyTimeout, testutils.PodNotReady); err != nil { - framework.Failf("Pods on node %s did not become NotReady within %v: %v", node.Name, podNotReadyTimeout, err) + e2elog.Failf("Pods on node %s did not become NotReady within %v: %v", node.Name, podNotReadyTimeout, err) } sleepTime := maxTolerationTime + 20*time.Second @@ -629,7 +629,7 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() { if pod.DeletionTimestamp == nil { seenRunning = append(seenRunning, namespacedName) if shouldBeTerminating { - framework.Failf("Pod %v should have been deleted but was seen running", namespacedName) + e2elog.Failf("Pod %v should have been deleted but was seen running", namespacedName) } } } @@ -643,7 +643,7 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() { } } if !running { - framework.Failf("Pod %v was evicted even though it shouldn't", neverEvictedPod) + e2elog.Failf("Pod %v was evicted even though it shouldn't", neverEvictedPod) } } }) diff --git a/test/e2e/apps/rc.go b/test/e2e/apps/rc.go index 00f4af454b7..fad415b64ca 100644 --- a/test/e2e/apps/rc.go +++ b/test/e2e/apps/rc.go @@ -167,7 +167,7 @@ func TestReplicationControllerServeImageOrFail(f *framework.Framework, test stri label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name})) err = wait.Poll(retryInterval, retryTimeout, e2epod.NewProxyResponseChecker(f.ClientSet, f.Namespace.Name, label, name, true, pods).CheckAllResponses) if err != nil { - framework.Failf("Did not get expected responses within the timeout period of %.2f seconds.", retryTimeout.Seconds()) + e2elog.Failf("Did not get expected responses within the timeout period of %.2f seconds.", retryTimeout.Seconds()) } } diff --git a/test/e2e/apps/replica_set.go b/test/e2e/apps/replica_set.go index 2b585c69865..a99c40bf659 100644 --- a/test/e2e/apps/replica_set.go +++ b/test/e2e/apps/replica_set.go @@ -169,7 +169,7 @@ func testReplicaSetServeImageOrFail(f *framework.Framework, test string, image s label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name})) err = wait.Poll(retryInterval, retryTimeout, e2epod.NewProxyResponseChecker(f.ClientSet, f.Namespace.Name, label, name, true, pods).CheckAllResponses) if err != nil { - framework.Failf("Did not get expected responses within the timeout period of %.2f seconds.", retryTimeout.Seconds()) + e2elog.Failf("Did not get expected responses within the timeout period of %.2f seconds.", retryTimeout.Seconds()) } } diff --git a/test/e2e/apps/statefulset.go b/test/e2e/apps/statefulset.go index 41668813bbc..f00987effae 100644 --- a/test/e2e/apps/statefulset.go +++ b/test/e2e/apps/statefulset.go @@ -741,7 +741,7 @@ var _ = SIGDescribe("StatefulSet", func() { ginkgo.By("Waiting until pod " + podName + " will start running in namespace " + f.Namespace.Name) if err := f.WaitForPodRunning(podName); err != nil { - framework.Failf("Pod %v did not start running: %v", podName, err) + e2elog.Failf("Pod %v did not start running: %v", podName, err) } var initialStatefulPodUID types.UID @@ -767,7 +767,7 @@ var _ = SIGDescribe("StatefulSet", func() { return false, nil }) if err != nil { - framework.Failf("Pod %v expected to be re-created at least once", statefulPodName) + e2elog.Failf("Pod %v expected to be re-created at least once", statefulPodName) } ginkgo.By("Removing pod with conflicting port in namespace " + f.Namespace.Name) @@ -803,7 +803,7 @@ var _ = SIGDescribe("StatefulSet", func() { ginkgo.By("getting scale subresource") scale, err := c.AppsV1().StatefulSets(ns).GetScale(ssName, metav1.GetOptions{}) if err != nil { - framework.Failf("Failed to get scale subresource: %v", err) + e2elog.Failf("Failed to get scale subresource: %v", err) } gomega.Expect(scale.Spec.Replicas).To(gomega.Equal(int32(1))) gomega.Expect(scale.Status.Replicas).To(gomega.Equal(int32(1))) @@ -812,14 +812,14 @@ var _ = SIGDescribe("StatefulSet", func() { scale.Spec.Replicas = 2 scaleResult, err := c.AppsV1().StatefulSets(ns).UpdateScale(ssName, scale) if err != nil { - framework.Failf("Failed to put scale subresource: %v", err) + e2elog.Failf("Failed to put scale subresource: %v", err) } gomega.Expect(scaleResult.Spec.Replicas).To(gomega.Equal(int32(2))) ginkgo.By("verifying the statefulset Spec.Replicas was modified") ss, err = c.AppsV1().StatefulSets(ns).Get(ssName, metav1.GetOptions{}) if err != nil { - framework.Failf("Failed to get statefulset resource: %v", err) + e2elog.Failf("Failed to get statefulset resource: %v", err) } gomega.Expect(*(ss.Spec.Replicas)).To(gomega.Equal(int32(2))) }) @@ -880,7 +880,7 @@ func kubectlExecWithRetries(args ...string) (out string) { } e2elog.Logf("Retrying %v:\nerror %v\nstdout %v", args, err, out) } - framework.Failf("Failed to execute \"%v\" with retries: %v", args, err) + e2elog.Failf("Failed to execute \"%v\" with retries: %v", args, err) return } @@ -917,7 +917,7 @@ func (c *clusterAppTester) run() { ginkgo.By("Reading value under foo from member with index 2") if err := pollReadWithTimeout(c.statefulPod, 2, "foo", "bar"); err != nil { - framework.Failf("%v", err) + e2elog.Failf("%v", err) } } diff --git a/test/e2e/auth/service_accounts.go b/test/e2e/auth/service_accounts.go index 8fb2d3f6f75..f0e584017ae 100644 --- a/test/e2e/auth/service_accounts.go +++ b/test/e2e/auth/service_accounts.go @@ -408,7 +408,7 @@ var _ = SIGDescribe("ServiceAccounts", func() { } if hasServiceAccountTokenVolume != tc.ExpectTokenVolume { - framework.Failf("%s: expected volume=%v, got %v (%#v)", tc.PodName, tc.ExpectTokenVolume, hasServiceAccountTokenVolume, createdPod) + e2elog.Failf("%s: expected volume=%v, got %v (%#v)", tc.PodName, tc.ExpectTokenVolume, hasServiceAccountTokenVolume, createdPod) } else { e2elog.Logf("pod %s service account token volume mount: %v", tc.PodName, hasServiceAccountTokenVolume) } @@ -427,7 +427,7 @@ var _ = SIGDescribe("ServiceAccounts", func() { "ca.crt": string(cfg.TLSClientConfig.CAData), }, }); err != nil && !apierrors.IsAlreadyExists(err) { - framework.Failf("Unexpected err creating kube-ca-crt: %v", err) + e2elog.Failf("Unexpected err creating kube-ca-crt: %v", err) } tenMin := int64(10 * 60) @@ -493,7 +493,7 @@ var _ = SIGDescribe("ServiceAccounts", func() { e2elog.Logf("created pod") if !e2epod.CheckPodsRunningReady(f.ClientSet, f.Namespace.Name, []string{pod.Name}, time.Minute) { - framework.Failf("pod %q in ns %q never became ready", pod.Name, f.Namespace.Name) + e2elog.Failf("pod %q in ns %q never became ready", pod.Name, f.Namespace.Name) } e2elog.Logf("pod is ready") @@ -516,7 +516,7 @@ var _ = SIGDescribe("ServiceAccounts", func() { } return true, nil }); err != nil { - framework.Failf("Unexpected error: %v\n%s", err, logs) + e2elog.Failf("Unexpected error: %v\n%s", err, logs) } }) }) diff --git a/test/e2e/autoscaling/cluster_size_autoscaling.go b/test/e2e/autoscaling/cluster_size_autoscaling.go index 8bd49f0c12f..6bb60a7598e 100644 --- a/test/e2e/autoscaling/cluster_size_autoscaling.go +++ b/test/e2e/autoscaling/cluster_size_autoscaling.go @@ -210,7 +210,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() { ginkgo.It(fmt.Sprintf("Should scale up GPU pool from 0 [GpuType:%s] [Feature:ClusterSizeAutoscalingGpu]", gpuType), func() { framework.SkipUnlessProviderIs("gke") if gpuType == "" { - framework.Failf("TEST_GPU_TYPE not defined") + e2elog.Failf("TEST_GPU_TYPE not defined") return } @@ -237,7 +237,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() { ginkgo.It(fmt.Sprintf("Should scale up GPU pool from 1 [GpuType:%s] [Feature:ClusterSizeAutoscalingGpu]", gpuType), func() { framework.SkipUnlessProviderIs("gke") if gpuType == "" { - framework.Failf("TEST_GPU_TYPE not defined") + e2elog.Failf("TEST_GPU_TYPE not defined") return } @@ -267,7 +267,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() { ginkgo.It(fmt.Sprintf("Should not scale GPU pool up if pod does not require GPUs [GpuType:%s] [Feature:ClusterSizeAutoscalingGpu]", gpuType), func() { framework.SkipUnlessProviderIs("gke") if gpuType == "" { - framework.Failf("TEST_GPU_TYPE not defined") + e2elog.Failf("TEST_GPU_TYPE not defined") return } @@ -296,7 +296,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() { ginkgo.It(fmt.Sprintf("Should scale down GPU pool from 1 [GpuType:%s] [Feature:ClusterSizeAutoscalingGpu]", gpuType), func() { framework.SkipUnlessProviderIs("gke") if gpuType == "" { - framework.Failf("TEST_GPU_TYPE not defined") + e2elog.Failf("TEST_GPU_TYPE not defined") return } @@ -498,7 +498,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() { defer func() { errs := framework.PVPVCCleanup(c, f.Namespace.Name, pv, pvc) if len(errs) > 0 { - framework.Failf("failed to delete PVC and/or PV. Errors: %v", utilerrors.NewAggregate(errs)) + e2elog.Failf("failed to delete PVC and/or PV. Errors: %v", utilerrors.NewAggregate(errs)) } pv, pvc = nil, nil if diskName != "" { @@ -1300,7 +1300,7 @@ func reserveMemory(f *framework.Framework, id string, replicas, megabytes int, e return framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, id) } } - framework.Failf("Failed to reserve memory within timeout") + e2elog.Failf("Failed to reserve memory within timeout") return nil } @@ -1871,7 +1871,7 @@ func addKubeSystemPdbs(f *framework.Framework) (func(), error) { } } if finalErr != nil { - framework.Failf("Error during PodDisruptionBudget cleanup: %v", finalErr) + e2elog.Failf("Error during PodDisruptionBudget cleanup: %v", finalErr) } } diff --git a/test/e2e/autoscaling/custom_metrics_stackdriver_autoscaling.go b/test/e2e/autoscaling/custom_metrics_stackdriver_autoscaling.go index 1727b29c711..c337e64745c 100644 --- a/test/e2e/autoscaling/custom_metrics_stackdriver_autoscaling.go +++ b/test/e2e/autoscaling/custom_metrics_stackdriver_autoscaling.go @@ -242,33 +242,33 @@ func (tc *CustomMetricTestCase) Run() { ts, err := google.DefaultTokenSource(oauth2.NoContext) e2elog.Logf("Couldn't get application default credentials, %v", err) if err != nil { - framework.Failf("Error accessing application default credentials, %v", err) + e2elog.Failf("Error accessing application default credentials, %v", err) } client := oauth2.NewClient(oauth2.NoContext, ts) */ gcmService, err := gcm.New(client) if err != nil { - framework.Failf("Failed to create gcm service, %v", err) + e2elog.Failf("Failed to create gcm service, %v", err) } // Set up a cluster: create a custom metric and set up k8s-sd adapter err = monitoring.CreateDescriptors(gcmService, projectID) if err != nil { - framework.Failf("Failed to create metric descriptor: %v", err) + e2elog.Failf("Failed to create metric descriptor: %v", err) } defer monitoring.CleanupDescriptors(gcmService, projectID) err = monitoring.CreateAdapter(monitoring.AdapterDefault) if err != nil { - framework.Failf("Failed to set up: %v", err) + e2elog.Failf("Failed to set up: %v", err) } defer monitoring.CleanupAdapter(monitoring.AdapterDefault) // Run application that exports the metric err = createDeploymentToScale(tc.framework, tc.kubeClient, tc.deployment, tc.pod) if err != nil { - framework.Failf("Failed to create stackdriver-exporter pod: %v", err) + e2elog.Failf("Failed to create stackdriver-exporter pod: %v", err) } defer cleanupDeploymentsToScale(tc.framework, tc.kubeClient, tc.deployment, tc.pod) @@ -278,7 +278,7 @@ func (tc *CustomMetricTestCase) Run() { // Autoscale the deployment _, err = tc.kubeClient.AutoscalingV2beta1().HorizontalPodAutoscalers(tc.framework.Namespace.ObjectMeta.Name).Create(tc.hpa) if err != nil { - framework.Failf("Failed to create HPA: %v", err) + e2elog.Failf("Failed to create HPA: %v", err) } defer tc.kubeClient.AutoscalingV2beta1().HorizontalPodAutoscalers(tc.framework.Namespace.ObjectMeta.Name).Delete(tc.hpa.ObjectMeta.Name, &metav1.DeleteOptions{}) @@ -442,13 +442,13 @@ func waitForReplicas(deploymentName, namespace string, cs clientset.Interface, t err := wait.PollImmediate(interval, timeout, func() (bool, error) { deployment, err := cs.AppsV1().Deployments(namespace).Get(deploymentName, metav1.GetOptions{}) if err != nil { - framework.Failf("Failed to get replication controller %s: %v", deployment, err) + e2elog.Failf("Failed to get replication controller %s: %v", deployment, err) } replicas := int(deployment.Status.ReadyReplicas) e2elog.Logf("waiting for %d replicas (current: %d)", desiredReplicas, replicas) return replicas == desiredReplicas, nil // Expected number of replicas found. Exit. }) if err != nil { - framework.Failf("Timeout waiting %v for %v replicas", timeout, desiredReplicas) + e2elog.Failf("Timeout waiting %v for %v replicas", timeout, desiredReplicas) } } diff --git a/test/e2e/cloud/nodes.go b/test/e2e/cloud/nodes.go index 6364d79fe07..ddb73054fde 100644 --- a/test/e2e/cloud/nodes.go +++ b/test/e2e/cloud/nodes.go @@ -52,7 +52,7 @@ var _ = SIGDescribe("[Feature:CloudProvider][Disruptive] Nodes", func() { err := framework.DeleteNodeOnCloudProvider(&nodeToDelete) if err != nil { - framework.Failf("failed to delete node %q, err: %q", nodeToDelete.Name, err) + e2elog.Failf("failed to delete node %q, err: %q", nodeToDelete.Name, err) } newNodes, err := e2enode.CheckReady(c, len(origNodes.Items)-1, 5*time.Minute) @@ -61,9 +61,9 @@ var _ = SIGDescribe("[Feature:CloudProvider][Disruptive] Nodes", func() { _, err = c.CoreV1().Nodes().Get(nodeToDelete.Name, metav1.GetOptions{}) if err == nil { - framework.Failf("node %q still exists when it should be deleted", nodeToDelete.Name) + e2elog.Failf("node %q still exists when it should be deleted", nodeToDelete.Name) } else if !apierrs.IsNotFound(err) { - framework.Failf("failed to get node %q err: %q", nodeToDelete.Name, err) + e2elog.Failf("failed to get node %q err: %q", nodeToDelete.Name, err) } }) diff --git a/test/e2e/common/autoscaling_utils.go b/test/e2e/common/autoscaling_utils.go index 8ea02875541..647810086f4 100644 --- a/test/e2e/common/autoscaling_utils.go +++ b/test/e2e/common/autoscaling_utils.go @@ -336,25 +336,25 @@ func (rc *ResourceConsumer) GetReplicas() int { replicationController, err := rc.clientSet.CoreV1().ReplicationControllers(rc.nsName).Get(rc.name, metav1.GetOptions{}) framework.ExpectNoError(err) if replicationController == nil { - framework.Failf(rcIsNil) + e2elog.Failf(rcIsNil) } return int(replicationController.Status.ReadyReplicas) case KindDeployment: deployment, err := rc.clientSet.AppsV1().Deployments(rc.nsName).Get(rc.name, metav1.GetOptions{}) framework.ExpectNoError(err) if deployment == nil { - framework.Failf(deploymentIsNil) + e2elog.Failf(deploymentIsNil) } return int(deployment.Status.ReadyReplicas) case KindReplicaSet: rs, err := rc.clientSet.AppsV1().ReplicaSets(rc.nsName).Get(rc.name, metav1.GetOptions{}) framework.ExpectNoError(err) if rs == nil { - framework.Failf(rsIsNil) + e2elog.Failf(rsIsNil) } return int(rs.Status.ReadyReplicas) default: - framework.Failf(invalidKind) + e2elog.Failf(invalidKind) } return 0 } @@ -488,7 +488,7 @@ func runServiceAndWorkloadForResourceConsumer(c clientset.Interface, ns, name st framework.ExpectNoError(replicaset.RunReplicaSet(rsConfig)) break default: - framework.Failf(invalidKind) + e2elog.Failf(invalidKind) } ginkgo.By(fmt.Sprintf("Running controller")) diff --git a/test/e2e/common/configmap.go b/test/e2e/common/configmap.go index 28ec14610d6..bb26aa647c7 100644 --- a/test/e2e/common/configmap.go +++ b/test/e2e/common/configmap.go @@ -23,6 +23,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/uuid" "k8s.io/kubernetes/test/e2e/framework" + e2elog "k8s.io/kubernetes/test/e2e/framework/log" imageutils "k8s.io/kubernetes/test/utils/image" "github.com/onsi/ginkgo" @@ -42,7 +43,7 @@ var _ = ginkgo.Describe("[sig-node] ConfigMap", func() { ginkgo.By(fmt.Sprintf("Creating configMap %v/%v", f.Namespace.Name, configMap.Name)) var err error if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(configMap); err != nil { - framework.Failf("unable to create test configMap %s: %v", configMap.Name, err) + e2elog.Failf("unable to create test configMap %s: %v", configMap.Name, err) } pod := &v1.Pod{ @@ -90,7 +91,7 @@ var _ = ginkgo.Describe("[sig-node] ConfigMap", func() { ginkgo.By(fmt.Sprintf("Creating configMap %v/%v", f.Namespace.Name, configMap.Name)) var err error if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(configMap); err != nil { - framework.Failf("unable to create test configMap %s: %v", configMap.Name, err) + e2elog.Failf("unable to create test configMap %s: %v", configMap.Name, err) } pod := &v1.Pod{ diff --git a/test/e2e/common/configmap_volume.go b/test/e2e/common/configmap_volume.go index 072dddb156f..dc909f643dd 100644 --- a/test/e2e/common/configmap_volume.go +++ b/test/e2e/common/configmap_volume.go @@ -26,6 +26,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/uuid" "k8s.io/kubernetes/test/e2e/framework" + e2elog "k8s.io/kubernetes/test/e2e/framework/log" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" imageutils "k8s.io/kubernetes/test/utils/image" ) @@ -133,7 +134,7 @@ var _ = ginkgo.Describe("[sig-storage] ConfigMap", func() { ginkgo.By(fmt.Sprintf("Creating configMap with name %s", configMap.Name)) var err error if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(configMap); err != nil { - framework.Failf("unable to create test configMap %s: %v", configMap.Name, err) + e2elog.Failf("unable to create test configMap %s: %v", configMap.Name, err) } pod := &v1.Pod{ @@ -220,7 +221,7 @@ var _ = ginkgo.Describe("[sig-storage] ConfigMap", func() { ginkgo.By(fmt.Sprintf("Creating configMap with name %s", configMap.Name)) var err error if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(configMap); err != nil { - framework.Failf("unable to create test configMap %s: %v", configMap.Name, err) + e2elog.Failf("unable to create test configMap %s: %v", configMap.Name, err) } pod := &v1.Pod{ @@ -338,12 +339,12 @@ var _ = ginkgo.Describe("[sig-storage] ConfigMap", func() { ginkgo.By(fmt.Sprintf("Creating configMap with name %s", deleteConfigMap.Name)) var err error if deleteConfigMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(deleteConfigMap); err != nil { - framework.Failf("unable to create test configMap %s: %v", deleteConfigMap.Name, err) + e2elog.Failf("unable to create test configMap %s: %v", deleteConfigMap.Name, err) } ginkgo.By(fmt.Sprintf("Creating configMap with name %s", updateConfigMap.Name)) if updateConfigMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(updateConfigMap); err != nil { - framework.Failf("unable to create test configMap %s: %v", updateConfigMap.Name, err) + e2elog.Failf("unable to create test configMap %s: %v", updateConfigMap.Name, err) } pod := &v1.Pod{ @@ -458,7 +459,7 @@ var _ = ginkgo.Describe("[sig-storage] ConfigMap", func() { ginkgo.By(fmt.Sprintf("Creating configMap with name %s", createConfigMap.Name)) if createConfigMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(createConfigMap); err != nil { - framework.Failf("unable to create test configMap %s: %v", createConfigMap.Name, err) + e2elog.Failf("unable to create test configMap %s: %v", createConfigMap.Name, err) } ginkgo.By("waiting to observe update in volume") @@ -486,7 +487,7 @@ var _ = ginkgo.Describe("[sig-storage] ConfigMap", func() { ginkgo.By(fmt.Sprintf("Creating configMap with name %s", configMap.Name)) var err error if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(configMap); err != nil { - framework.Failf("unable to create test configMap %s: %v", configMap.Name, err) + e2elog.Failf("unable to create test configMap %s: %v", configMap.Name, err) } pod := &v1.Pod{ @@ -594,7 +595,7 @@ func doConfigMapE2EWithoutMappings(f *framework.Framework, uid, fsGroup int64, d ginkgo.By(fmt.Sprintf("Creating configMap with name %s", configMap.Name)) var err error if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(configMap); err != nil { - framework.Failf("unable to create test configMap %s: %v", configMap.Name, err) + e2elog.Failf("unable to create test configMap %s: %v", configMap.Name, err) } one := int64(1) @@ -671,7 +672,7 @@ func doConfigMapE2EWithMappings(f *framework.Framework, uid, fsGroup int64, item var err error if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(configMap); err != nil { - framework.Failf("unable to create test configMap %s: %v", configMap.Name, err) + e2elog.Failf("unable to create test configMap %s: %v", configMap.Name, err) } one := int64(1) @@ -806,7 +807,7 @@ func createNonOptionalConfigMapPodWithConfig(f *framework.Framework, volumeMount ginkgo.By(fmt.Sprintf("Creating configMap with name %s", configMap.Name)) var err error if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(configMap); err != nil { - framework.Failf("unable to create test configMap %s: %v", configMap.Name, err) + e2elog.Failf("unable to create test configMap %s: %v", configMap.Name, err) } //creating a pod with configMap object, but with different key which is not present in configMap object. pod := &v1.Pod{ diff --git a/test/e2e/common/container_probe.go b/test/e2e/common/container_probe.go index a0751051bda..585e6fff4bf 100644 --- a/test/e2e/common/container_probe.go +++ b/test/e2e/common/container_probe.go @@ -79,7 +79,7 @@ var _ = framework.KubeDescribe("Probing container", func() { e2elog.Logf("Container started at %v, pod became ready at %v", startedTime, readyTime) initialDelay := probeTestInitialDelaySeconds * time.Second if readyTime.Sub(startedTime) < initialDelay { - framework.Failf("Pod became ready before it's %v initial delay", initialDelay) + e2elog.Failf("Pod became ready before it's %v initial delay", initialDelay) } restartCount := getRestartCount(p) @@ -443,7 +443,7 @@ func runLivenessTest(f *framework.Framework, pod *v1.Pod, expectNumRestarts int, e2elog.Logf("Restart count of pod %s/%s is now %d (%v elapsed)", ns, pod.Name, restartCount, time.Since(start)) if restartCount < lastRestartCount { - framework.Failf("Restart count should increment monotonically: restart cont of pod %s/%s changed from %d to %d", + e2elog.Failf("Restart count should increment monotonically: restart cont of pod %s/%s changed from %d to %d", ns, pod.Name, lastRestartCount, restartCount) } } @@ -459,7 +459,7 @@ func runLivenessTest(f *framework.Framework, pod *v1.Pod, expectNumRestarts int, // If we expected n restarts (n > 0), fail if we observed < n restarts. if (expectNumRestarts == 0 && observedRestarts > 0) || (expectNumRestarts > 0 && int(observedRestarts) < expectNumRestarts) { - framework.Failf("pod %s/%s - expected number of restarts: %d, found restarts: %d", + e2elog.Failf("pod %s/%s - expected number of restarts: %d, found restarts: %d", ns, pod.Name, expectNumRestarts, observedRestarts) } } diff --git a/test/e2e/common/expansion.go b/test/e2e/common/expansion.go index 8ec7707f185..f86878c74e2 100644 --- a/test/e2e/common/expansion.go +++ b/test/e2e/common/expansion.go @@ -482,14 +482,14 @@ var _ = framework.KubeDescribe("Variable Expansion", func() { cmd := "touch /volume_mount/mypath/foo/test.log" _, _, err = f.ExecShellInPodWithFullOutput(pod.Name, cmd) if err != nil { - framework.Failf("expected to be able to write to subpath") + e2elog.Failf("expected to be able to write to subpath") } ginkgo.By("test for file in mounted path") cmd = "test -f /subpath_mount/test.log" _, _, err = f.ExecShellInPodWithFullOutput(pod.Name, cmd) if err != nil { - framework.Failf("expected to be able to verify file") + e2elog.Failf("expected to be able to verify file") } ginkgo.By("updating the annotation value") @@ -629,13 +629,13 @@ var _ = framework.KubeDescribe("Variable Expansion", func() { cmd := "test -f /volume_mount/foo/test.log" _, _, err = f.ExecShellInPodWithFullOutput(pod.Name, cmd) if err != nil { - framework.Failf("expected to be able to verify old file exists") + e2elog.Failf("expected to be able to verify old file exists") } cmd = "test ! -f /volume_mount/newsubpath/test.log" _, _, err = f.ExecShellInPodWithFullOutput(pod.Name, cmd) if err != nil { - framework.Failf("expected to be able to verify new file does not exist") + e2elog.Failf("expected to be able to verify new file does not exist") } }) }) diff --git a/test/e2e/common/kubelet_etc_hosts.go b/test/e2e/common/kubelet_etc_hosts.go index 08798823b0a..215a6f747c2 100644 --- a/test/e2e/common/kubelet_etc_hosts.go +++ b/test/e2e/common/kubelet_etc_hosts.go @@ -25,6 +25,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/klog" "k8s.io/kubernetes/test/e2e/framework" + e2elog "k8s.io/kubernetes/test/e2e/framework/log" imageutils "k8s.io/kubernetes/test/utils/image" ) @@ -136,11 +137,11 @@ func assertManagedStatus( } if expectedIsManaged { - framework.Failf( + e2elog.Failf( "/etc/hosts file should be kubelet managed (name: %s, retries: %d). /etc/hosts contains %q", name, retryCount, etcHostsContent) } else { - framework.Failf( + e2elog.Failf( "/etc/hosts file should no be kubelet managed (name: %s, retries: %d). /etc/hosts contains %q", name, retryCount, etcHostsContent) } diff --git a/test/e2e/common/pods.go b/test/e2e/common/pods.go index e8d5c02224a..cf9f543b476 100644 --- a/test/e2e/common/pods.go +++ b/test/e2e/common/pods.go @@ -73,7 +73,7 @@ func testHostIP(podClient *framework.PodClient, pod *v1.Pod) { break } if time.Since(t) >= hostIPTimeout { - framework.Failf("Gave up waiting for hostIP of pod %s after %v seconds", + e2elog.Failf("Gave up waiting for hostIP of pod %s after %v seconds", p.Name, time.Since(t).Seconds()) } e2elog.Logf("Retrying to get the hostIP of pod %s", p.Name) @@ -91,19 +91,19 @@ func startPodAndGetBackOffs(podClient *framework.PodClient, pod *v1.Pod, sleepAm ginkgo.By("getting restart delay-0") _, err := getRestartDelay(podClient, podName, containerName) if err != nil { - framework.Failf("timed out waiting for container restart in pod=%s/%s", podName, containerName) + e2elog.Failf("timed out waiting for container restart in pod=%s/%s", podName, containerName) } ginkgo.By("getting restart delay-1") delay1, err := getRestartDelay(podClient, podName, containerName) if err != nil { - framework.Failf("timed out waiting for container restart in pod=%s/%s", podName, containerName) + e2elog.Failf("timed out waiting for container restart in pod=%s/%s", podName, containerName) } ginkgo.By("getting restart delay-2") delay2, err := getRestartDelay(podClient, podName, containerName) if err != nil { - framework.Failf("timed out waiting for container restart in pod=%s/%s", podName, containerName) + e2elog.Failf("timed out waiting for container restart in pod=%s/%s", podName, containerName) } return delay1, delay2 } @@ -265,13 +265,13 @@ var _ = framework.KubeDescribe("Pods", func() { select { case event, _ := <-w.ResultChan(): if event.Type != watch.Added { - framework.Failf("Failed to observe pod creation: %v", event) + e2elog.Failf("Failed to observe pod creation: %v", event) } case <-time.After(framework.PodStartTimeout): - framework.Failf("Timeout while waiting for pod creation") + e2elog.Failf("Timeout while waiting for pod creation") } case <-time.After(10 * time.Second): - framework.Failf("Timeout while waiting to observe pod list") + e2elog.Failf("Timeout while waiting to observe pod list") } // We need to wait for the pod to be running, otherwise the deletion @@ -319,14 +319,14 @@ var _ = framework.KubeDescribe("Pods", func() { deleted = true case watch.Error: e2elog.Logf("received a watch error: %v", event.Object) - framework.Failf("watch closed with error") + e2elog.Failf("watch closed with error") } case <-timer: - framework.Failf("timed out waiting for pod deletion") + e2elog.Failf("timed out waiting for pod deletion") } } if !deleted { - framework.Failf("Failed to observe pod deletion") + e2elog.Failf("Failed to observe pod deletion") } gomega.Expect(lastPod.DeletionTimestamp).ToNot(gomega.BeNil()) @@ -574,7 +574,7 @@ var _ = framework.KubeDescribe("Pods", func() { url := req.URL() ws, err := framework.OpenWebSocketForURL(url, config, []string{"channel.k8s.io"}) if err != nil { - framework.Failf("Failed to open websocket to %s: %v", url.String(), err) + e2elog.Failf("Failed to open websocket to %s: %v", url.String(), err) } defer ws.Close() @@ -586,7 +586,7 @@ var _ = framework.KubeDescribe("Pods", func() { if err == io.EOF { break } - framework.Failf("Failed to read completely from websocket %s: %v", url.String(), err) + e2elog.Failf("Failed to read completely from websocket %s: %v", url.String(), err) } if len(msg) == 0 { continue @@ -596,7 +596,7 @@ var _ = framework.KubeDescribe("Pods", func() { // skip an empty message on stream other than stdout continue } else { - framework.Failf("Got message from server that didn't start with channel 1 (STDOUT): %v", msg) + e2elog.Failf("Got message from server that didn't start with channel 1 (STDOUT): %v", msg) } } @@ -653,7 +653,7 @@ var _ = framework.KubeDescribe("Pods", func() { ws, err := framework.OpenWebSocketForURL(url, config, []string{"binary.k8s.io"}) if err != nil { - framework.Failf("Failed to open websocket to %s: %v", url.String(), err) + e2elog.Failf("Failed to open websocket to %s: %v", url.String(), err) } defer ws.Close() buf := &bytes.Buffer{} @@ -663,7 +663,7 @@ var _ = framework.KubeDescribe("Pods", func() { if err == io.EOF { break } - framework.Failf("Failed to read completely from websocket %s: %v", url.String(), err) + e2elog.Failf("Failed to read completely from websocket %s: %v", url.String(), err) } if len(strings.TrimSpace(string(msg))) == 0 { continue @@ -671,7 +671,7 @@ var _ = framework.KubeDescribe("Pods", func() { buf.Write(msg) } if buf.String() != "container is alive\n" { - framework.Failf("Unexpected websocket logs:\n%s", buf.String()) + e2elog.Failf("Unexpected websocket logs:\n%s", buf.String()) } }) @@ -708,11 +708,11 @@ var _ = framework.KubeDescribe("Pods", func() { ginkgo.By("get restart delay after image update") delayAfterUpdate, err := getRestartDelay(podClient, podName, containerName) if err != nil { - framework.Failf("timed out waiting for container restart in pod=%s/%s", podName, containerName) + e2elog.Failf("timed out waiting for container restart in pod=%s/%s", podName, containerName) } if delayAfterUpdate > 2*delay2 || delayAfterUpdate > 2*delay1 { - framework.Failf("updating image did not reset the back-off value in pod=%s/%s d3=%s d2=%s d1=%s", podName, containerName, delayAfterUpdate, delay1, delay2) + e2elog.Failf("updating image did not reset the back-off value in pod=%s/%s d3=%s d2=%s d1=%s", podName, containerName, delayAfterUpdate, delay1, delay2) } }) @@ -748,7 +748,7 @@ var _ = framework.KubeDescribe("Pods", func() { for i := 0; i < 3; i++ { delay1, err = getRestartDelay(podClient, podName, containerName) if err != nil { - framework.Failf("timed out waiting for container restart in pod=%s/%s", podName, containerName) + e2elog.Failf("timed out waiting for container restart in pod=%s/%s", podName, containerName) } if delay1 < kubelet.MaxContainerBackOff { @@ -757,17 +757,17 @@ var _ = framework.KubeDescribe("Pods", func() { } if (delay1 < kubelet.MaxContainerBackOff) || (delay1 > maxBackOffTolerance) { - framework.Failf("expected %s back-off got=%s in delay1", kubelet.MaxContainerBackOff, delay1) + e2elog.Failf("expected %s back-off got=%s in delay1", kubelet.MaxContainerBackOff, delay1) } ginkgo.By("getting restart delay after a capped delay") delay2, err := getRestartDelay(podClient, podName, containerName) if err != nil { - framework.Failf("timed out waiting for container restart in pod=%s/%s", podName, containerName) + e2elog.Failf("timed out waiting for container restart in pod=%s/%s", podName, containerName) } if delay2 < kubelet.MaxContainerBackOff || delay2 > maxBackOffTolerance { // syncloop cumulative drift - framework.Failf("expected %s back-off got=%s on delay2", kubelet.MaxContainerBackOff, delay2) + e2elog.Failf("expected %s back-off got=%s on delay2", kubelet.MaxContainerBackOff, delay2) } }) diff --git a/test/e2e/common/projected_combined.go b/test/e2e/common/projected_combined.go index 1fbb638f1b3..55d9d0d17ed 100644 --- a/test/e2e/common/projected_combined.go +++ b/test/e2e/common/projected_combined.go @@ -23,6 +23,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/uuid" "k8s.io/kubernetes/test/e2e/framework" + e2elog "k8s.io/kubernetes/test/e2e/framework/log" imageutils "k8s.io/kubernetes/test/utils/image" "github.com/onsi/ginkgo" @@ -63,11 +64,11 @@ var _ = ginkgo.Describe("[sig-storage] Projected combined", func() { ginkgo.By(fmt.Sprintf("Creating configMap with name %s", configMap.Name)) if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(configMap); err != nil { - framework.Failf("unable to create test configMap %s: %v", configMap.Name, err) + e2elog.Failf("unable to create test configMap %s: %v", configMap.Name, err) } ginkgo.By(fmt.Sprintf("Creating secret with name %s", secret.Name)) if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(secret); err != nil { - framework.Failf("unable to create test secret %s: %v", secret.Name, err) + e2elog.Failf("unable to create test secret %s: %v", secret.Name, err) } pod := projectedAllVolumeBasePod(podName, secretName, configMapName, nil, nil) diff --git a/test/e2e/common/projected_configmap.go b/test/e2e/common/projected_configmap.go index 0492b5a5792..1b54ec345de 100644 --- a/test/e2e/common/projected_configmap.go +++ b/test/e2e/common/projected_configmap.go @@ -24,6 +24,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/uuid" "k8s.io/kubernetes/test/e2e/framework" + e2elog "k8s.io/kubernetes/test/e2e/framework/log" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" imageutils "k8s.io/kubernetes/test/utils/image" @@ -133,7 +134,7 @@ var _ = ginkgo.Describe("[sig-storage] Projected configMap", func() { ginkgo.By(fmt.Sprintf("Creating projection with configMap that has name %s", configMap.Name)) var err error if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(configMap); err != nil { - framework.Failf("unable to create test configMap %s: %v", configMap.Name, err) + e2elog.Failf("unable to create test configMap %s: %v", configMap.Name, err) } pod := &v1.Pod{ @@ -248,12 +249,12 @@ var _ = ginkgo.Describe("[sig-storage] Projected configMap", func() { ginkgo.By(fmt.Sprintf("Creating configMap with name %s", deleteConfigMap.Name)) var err error if deleteConfigMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(deleteConfigMap); err != nil { - framework.Failf("unable to create test configMap %s: %v", deleteConfigMap.Name, err) + e2elog.Failf("unable to create test configMap %s: %v", deleteConfigMap.Name, err) } ginkgo.By(fmt.Sprintf("Creating configMap with name %s", updateConfigMap.Name)) if updateConfigMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(updateConfigMap); err != nil { - framework.Failf("unable to create test configMap %s: %v", updateConfigMap.Name, err) + e2elog.Failf("unable to create test configMap %s: %v", updateConfigMap.Name, err) } pod := &v1.Pod{ @@ -386,7 +387,7 @@ var _ = ginkgo.Describe("[sig-storage] Projected configMap", func() { ginkgo.By(fmt.Sprintf("Creating configMap with name %s", createConfigMap.Name)) if createConfigMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(createConfigMap); err != nil { - framework.Failf("unable to create test configMap %s: %v", createConfigMap.Name, err) + e2elog.Failf("unable to create test configMap %s: %v", createConfigMap.Name, err) } ginkgo.By("waiting to observe update in volume") @@ -414,7 +415,7 @@ var _ = ginkgo.Describe("[sig-storage] Projected configMap", func() { ginkgo.By(fmt.Sprintf("Creating configMap with name %s", configMap.Name)) var err error if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(configMap); err != nil { - framework.Failf("unable to create test configMap %s: %v", configMap.Name, err) + e2elog.Failf("unable to create test configMap %s: %v", configMap.Name, err) } pod := &v1.Pod{ @@ -521,7 +522,7 @@ func doProjectedConfigMapE2EWithoutMappings(f *framework.Framework, uid, fsGroup ginkgo.By(fmt.Sprintf("Creating configMap with name %s", configMap.Name)) var err error if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(configMap); err != nil { - framework.Failf("unable to create test configMap %s: %v", configMap.Name, err) + e2elog.Failf("unable to create test configMap %s: %v", configMap.Name, err) } pod := &v1.Pod{ @@ -603,7 +604,7 @@ func doProjectedConfigMapE2EWithMappings(f *framework.Framework, uid, fsGroup in var err error if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(configMap); err != nil { - framework.Failf("unable to create test configMap %s: %v", configMap.Name, err) + e2elog.Failf("unable to create test configMap %s: %v", configMap.Name, err) } pod := &v1.Pod{ diff --git a/test/e2e/common/projected_secret.go b/test/e2e/common/projected_secret.go index 416b6c8ce93..62dcc5e93e3 100644 --- a/test/e2e/common/projected_secret.go +++ b/test/e2e/common/projected_secret.go @@ -24,6 +24,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/uuid" "k8s.io/kubernetes/test/e2e/framework" + e2elog "k8s.io/kubernetes/test/e2e/framework/log" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" imageutils "k8s.io/kubernetes/test/utils/image" @@ -95,7 +96,7 @@ var _ = ginkgo.Describe("[sig-storage] Projected secret", func() { ) if namespace2, err = f.CreateNamespace("secret-namespace", nil); err != nil { - framework.Failf("unable to create new namespace %s: %v", namespace2.Name, err) + e2elog.Failf("unable to create new namespace %s: %v", namespace2.Name, err) } secret2 := secretForTest(namespace2.Name, secret2Name) @@ -103,7 +104,7 @@ var _ = ginkgo.Describe("[sig-storage] Projected secret", func() { "this_should_not_match_content_of_other_secret": []byte("similarly_this_should_not_match_content_of_other_secret\n"), } if secret2, err = f.ClientSet.CoreV1().Secrets(namespace2.Name).Create(secret2); err != nil { - framework.Failf("unable to create test secret %s: %v", secret2.Name, err) + e2elog.Failf("unable to create test secret %s: %v", secret2.Name, err) } doProjectedSecretE2EWithoutMapping(f, nil /* default mode */, secret2.Name, nil, nil) }) @@ -129,7 +130,7 @@ var _ = ginkgo.Describe("[sig-storage] Projected secret", func() { ginkgo.By(fmt.Sprintf("Creating secret with name %s", secret.Name)) var err error if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(secret); err != nil { - framework.Failf("unable to create test secret %s: %v", secret.Name, err) + e2elog.Failf("unable to create test secret %s: %v", secret.Name, err) } pod := &v1.Pod{ @@ -256,12 +257,12 @@ var _ = ginkgo.Describe("[sig-storage] Projected secret", func() { ginkgo.By(fmt.Sprintf("Creating secret with name %s", deleteSecret.Name)) var err error if deleteSecret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(deleteSecret); err != nil { - framework.Failf("unable to create test secret %s: %v", deleteSecret.Name, err) + e2elog.Failf("unable to create test secret %s: %v", deleteSecret.Name, err) } ginkgo.By(fmt.Sprintf("Creating secret with name %s", updateSecret.Name)) if updateSecret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(updateSecret); err != nil { - framework.Failf("unable to create test secret %s: %v", updateSecret.Name, err) + e2elog.Failf("unable to create test secret %s: %v", updateSecret.Name, err) } pod := &v1.Pod{ @@ -394,7 +395,7 @@ var _ = ginkgo.Describe("[sig-storage] Projected secret", func() { ginkgo.By(fmt.Sprintf("Creating secret with name %s", createSecret.Name)) if createSecret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(createSecret); err != nil { - framework.Failf("unable to create test secret %s: %v", createSecret.Name, err) + e2elog.Failf("unable to create test secret %s: %v", createSecret.Name, err) } ginkgo.By("waiting to observe update in volume") @@ -436,7 +437,7 @@ func doProjectedSecretE2EWithoutMapping(f *framework.Framework, defaultMode *int ginkgo.By(fmt.Sprintf("Creating projection with secret that has name %s", secret.Name)) var err error if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(secret); err != nil { - framework.Failf("unable to create test secret %s: %v", secret.Name, err) + e2elog.Failf("unable to create test secret %s: %v", secret.Name, err) } pod := &v1.Pod{ @@ -514,7 +515,7 @@ func doProjectedSecretE2EWithMapping(f *framework.Framework, mode *int32) { ginkgo.By(fmt.Sprintf("Creating projection with secret that has name %s", secret.Name)) var err error if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(secret); err != nil { - framework.Failf("unable to create test secret %s: %v", secret.Name, err) + e2elog.Failf("unable to create test secret %s: %v", secret.Name, err) } pod := &v1.Pod{ diff --git a/test/e2e/common/runtime.go b/test/e2e/common/runtime.go index 82b6e3a9afb..23da0e58581 100644 --- a/test/e2e/common/runtime.go +++ b/test/e2e/common/runtime.go @@ -348,7 +348,7 @@ while true; do sleep 1; done if i < flakeRetry { e2elog.Logf("No.%d attempt failed: %v, retrying...", i, err) } else { - framework.Failf("All %d attempts failed: %v", flakeRetry, err) + e2elog.Failf("All %d attempts failed: %v", flakeRetry, err) } } } diff --git a/test/e2e/common/secrets.go b/test/e2e/common/secrets.go index 33c896e700c..d654f9602af 100644 --- a/test/e2e/common/secrets.go +++ b/test/e2e/common/secrets.go @@ -23,6 +23,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/uuid" "k8s.io/kubernetes/test/e2e/framework" + e2elog "k8s.io/kubernetes/test/e2e/framework/log" imageutils "k8s.io/kubernetes/test/utils/image" "github.com/onsi/ginkgo" @@ -43,7 +44,7 @@ var _ = ginkgo.Describe("[sig-api-machinery] Secrets", func() { ginkgo.By(fmt.Sprintf("Creating secret with name %s", secret.Name)) var err error if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(secret); err != nil { - framework.Failf("unable to create test secret %s: %v", secret.Name, err) + e2elog.Failf("unable to create test secret %s: %v", secret.Name, err) } pod := &v1.Pod{ @@ -91,7 +92,7 @@ var _ = ginkgo.Describe("[sig-api-machinery] Secrets", func() { ginkgo.By(fmt.Sprintf("creating secret %v/%v", f.Namespace.Name, secret.Name)) var err error if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(secret); err != nil { - framework.Failf("unable to create test secret %s: %v", secret.Name, err) + e2elog.Failf("unable to create test secret %s: %v", secret.Name, err) } pod := &v1.Pod{ diff --git a/test/e2e/common/secrets_volume.go b/test/e2e/common/secrets_volume.go index b355812cc58..a64bb01ec01 100644 --- a/test/e2e/common/secrets_volume.go +++ b/test/e2e/common/secrets_volume.go @@ -24,6 +24,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/uuid" "k8s.io/kubernetes/test/e2e/framework" + e2elog "k8s.io/kubernetes/test/e2e/framework/log" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" imageutils "k8s.io/kubernetes/test/utils/image" @@ -100,7 +101,7 @@ var _ = ginkgo.Describe("[sig-storage] Secrets", func() { ) if namespace2, err = f.CreateNamespace("secret-namespace", nil); err != nil { - framework.Failf("unable to create new namespace %s: %v", namespace2.Name, err) + e2elog.Failf("unable to create new namespace %s: %v", namespace2.Name, err) } secret2 := secretForTest(namespace2.Name, secret2Name) @@ -108,7 +109,7 @@ var _ = ginkgo.Describe("[sig-storage] Secrets", func() { "this_should_not_match_content_of_other_secret": []byte("similarly_this_should_not_match_content_of_other_secret\n"), } if secret2, err = f.ClientSet.CoreV1().Secrets(namespace2.Name).Create(secret2); err != nil { - framework.Failf("unable to create test secret %s: %v", secret2.Name, err) + e2elog.Failf("unable to create test secret %s: %v", secret2.Name, err) } doSecretE2EWithoutMapping(f, nil /* default mode */, secret2.Name, nil, nil) }) @@ -134,7 +135,7 @@ var _ = ginkgo.Describe("[sig-storage] Secrets", func() { ginkgo.By(fmt.Sprintf("Creating secret with name %s", secret.Name)) var err error if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(secret); err != nil { - framework.Failf("unable to create test secret %s: %v", secret.Name, err) + e2elog.Failf("unable to create test secret %s: %v", secret.Name, err) } pod := &v1.Pod{ @@ -245,12 +246,12 @@ var _ = ginkgo.Describe("[sig-storage] Secrets", func() { ginkgo.By(fmt.Sprintf("Creating secret with name %s", deleteSecret.Name)) var err error if deleteSecret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(deleteSecret); err != nil { - framework.Failf("unable to create test secret %s: %v", deleteSecret.Name, err) + e2elog.Failf("unable to create test secret %s: %v", deleteSecret.Name, err) } ginkgo.By(fmt.Sprintf("Creating secret with name %s", updateSecret.Name)) if updateSecret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(updateSecret); err != nil { - framework.Failf("unable to create test secret %s: %v", updateSecret.Name, err) + e2elog.Failf("unable to create test secret %s: %v", updateSecret.Name, err) } pod := &v1.Pod{ @@ -359,7 +360,7 @@ var _ = ginkgo.Describe("[sig-storage] Secrets", func() { ginkgo.By(fmt.Sprintf("Creating secret with name %s", createSecret.Name)) if createSecret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(createSecret); err != nil { - framework.Failf("unable to create test secret %s: %v", createSecret.Name, err) + e2elog.Failf("unable to create test secret %s: %v", createSecret.Name, err) } ginkgo.By("waiting to observe update in volume") @@ -415,7 +416,7 @@ func doSecretE2EWithoutMapping(f *framework.Framework, defaultMode *int32, secre ginkgo.By(fmt.Sprintf("Creating secret with name %s", secret.Name)) var err error if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(secret); err != nil { - framework.Failf("unable to create test secret %s: %v", secret.Name, err) + e2elog.Failf("unable to create test secret %s: %v", secret.Name, err) } pod := &v1.Pod{ @@ -484,7 +485,7 @@ func doSecretE2EWithMapping(f *framework.Framework, mode *int32) { ginkgo.By(fmt.Sprintf("Creating secret with name %s", secret.Name)) var err error if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(secret); err != nil { - framework.Failf("unable to create test secret %s: %v", secret.Name, err) + e2elog.Failf("unable to create test secret %s: %v", secret.Name, err) } pod := &v1.Pod{ @@ -602,7 +603,7 @@ func createNonOptionalSecretPodWithSecret(f *framework.Framework, volumeMountPat ginkgo.By(fmt.Sprintf("Creating secret with name %s", secret.Name)) var err error if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(secret); err != nil { - framework.Failf("unable to create test secret %s: %v", secret.Name, err) + e2elog.Failf("unable to create test secret %s: %v", secret.Name, err) } //creating a pod with secret object, with the key which is not present in secret object. pod := &v1.Pod{ diff --git a/test/e2e/common/security_context.go b/test/e2e/common/security_context.go index 225b274a1b8..9ffc38e7b15 100644 --- a/test/e2e/common/security_context.go +++ b/test/e2e/common/security_context.go @@ -260,12 +260,12 @@ var _ = framework.KubeDescribe("Security Context", func() { podName := createAndWaitUserPod(false) logs, err := e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, podName, podName) if err != nil { - framework.Failf("GetPodLogs for pod %q failed: %v", podName, err) + e2elog.Failf("GetPodLogs for pod %q failed: %v", podName, err) } e2elog.Logf("Got logs for pod %q: %q", podName, logs) if !strings.Contains(logs, "Operation not permitted") { - framework.Failf("unprivileged container shouldn't be able to create dummy device") + e2elog.Failf("unprivileged container shouldn't be able to create dummy device") } }) }) @@ -312,7 +312,7 @@ var _ = framework.KubeDescribe("Security Context", func() { ginkgo.It("should allow privilege escalation when not explicitly set and uid != 0 [LinuxOnly] [NodeConformance]", func() { podName := "alpine-nnp-nil-" + string(uuid.NewUUID()) if err := createAndMatchOutput(podName, "Effective uid: 0", nil, 1000); err != nil { - framework.Failf("Match output for pod %q failed: %v", podName, err) + e2elog.Failf("Match output for pod %q failed: %v", podName, err) } }) @@ -328,7 +328,7 @@ var _ = framework.KubeDescribe("Security Context", func() { podName := "alpine-nnp-false-" + string(uuid.NewUUID()) apeFalse := false if err := createAndMatchOutput(podName, "Effective uid: 1000", &apeFalse, 1000); err != nil { - framework.Failf("Match output for pod %q failed: %v", podName, err) + e2elog.Failf("Match output for pod %q failed: %v", podName, err) } }) @@ -345,7 +345,7 @@ var _ = framework.KubeDescribe("Security Context", func() { podName := "alpine-nnp-true-" + string(uuid.NewUUID()) apeTrue := true if err := createAndMatchOutput(podName, "Effective uid: 0", &apeTrue, 1000); err != nil { - framework.Failf("Match output for pod %q failed: %v", podName, err) + e2elog.Failf("Match output for pod %q failed: %v", podName, err) } }) }) diff --git a/test/e2e/common/util.go b/test/e2e/common/util.go index b8b9a4484eb..2e40d372e9c 100644 --- a/test/e2e/common/util.go +++ b/test/e2e/common/util.go @@ -29,6 +29,7 @@ import ( "k8s.io/apimachinery/pkg/util/wait" clientset "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/test/e2e/framework" + e2elog "k8s.io/kubernetes/test/e2e/framework/log" imageutils "k8s.io/kubernetes/test/utils/image" "github.com/onsi/ginkgo" @@ -100,11 +101,11 @@ func SubstituteImageName(content string) string { contentWithImageName := new(bytes.Buffer) tmpl, err := template.New("imagemanifest").Parse(content) if err != nil { - framework.Failf("Failed Parse the template: %v", err) + e2elog.Failf("Failed Parse the template: %v", err) } err = tmpl.Execute(contentWithImageName, testImages) if err != nil { - framework.Failf("Failed executing template: %v", err) + e2elog.Failf("Failed executing template: %v", err) } return contentWithImageName.String() } diff --git a/test/e2e/e2e.go b/test/e2e/e2e.go index 54fee752ce8..c0537612882 100644 --- a/test/e2e/e2e.go +++ b/test/e2e/e2e.go @@ -92,11 +92,11 @@ var _ = ginkgo.SynchronizedBeforeSuite(func() []byte { metav1.NamespacePublic, }) if err != nil { - framework.Failf("Error deleting orphaned namespaces: %v", err) + e2elog.Failf("Error deleting orphaned namespaces: %v", err) } klog.Infof("Waiting for deletion of the following namespaces: %v", deleted) if err := framework.WaitForNamespacesDeleted(c, deleted, framework.NamespaceCleanupTimeout); err != nil { - framework.Failf("Failed to delete orphaned namespaces %v: %v", deleted, err) + e2elog.Failf("Failed to delete orphaned namespaces %v: %v", deleted, err) } } @@ -123,7 +123,7 @@ var _ = ginkgo.SynchronizedBeforeSuite(func() []byte { framework.DumpAllNamespaceInfo(c, metav1.NamespaceSystem) framework.LogFailedContainers(c, metav1.NamespaceSystem, e2elog.Logf) runKubernetesServiceTestContainer(c, metav1.NamespaceDefault) - framework.Failf("Error waiting for all pods to be running and ready: %v", err) + e2elog.Failf("Error waiting for all pods to be running and ready: %v", err) } if err := framework.WaitForDaemonSets(c, metav1.NamespaceSystem, int32(framework.TestContext.AllowedNotReadyNodes), framework.TestContext.SystemDaemonsetStartupTimeout); err != nil { diff --git a/test/e2e/examples.go b/test/e2e/examples.go index 24a3baf0b3c..ecc751eecf0 100644 --- a/test/e2e/examples.go +++ b/test/e2e/examples.go @@ -106,7 +106,7 @@ var _ = framework.KubeDescribe("[Feature:Example]", func() { } wg.Wait() if !passed { - framework.Failf("At least one liveness example failed. See the logs above.") + e2elog.Failf("At least one liveness example failed. See the logs above.") } }) }) diff --git a/test/e2e/framework/ingress/ingress_utils.go b/test/e2e/framework/ingress/ingress_utils.go index b8dc2f4a39f..21a7503cbb4 100644 --- a/test/e2e/framework/ingress/ingress_utils.go +++ b/test/e2e/framework/ingress/ingress_utils.go @@ -473,7 +473,7 @@ func (j *TestJig) Update(update func(ing *networkingv1beta1.Ingress)) { for i := 0; i < 3; i++ { j.Ingress, err = j.Client.NetworkingV1beta1().Ingresses(ns).Get(name, metav1.GetOptions{}) if err != nil { - framework.Failf("failed to get ingress %s/%s: %v", ns, name, err) + e2elog.Failf("failed to get ingress %s/%s: %v", ns, name, err) } update(j.Ingress) j.Ingress, err = j.runUpdate(j.Ingress) @@ -482,10 +482,10 @@ func (j *TestJig) Update(update func(ing *networkingv1beta1.Ingress)) { return } if !apierrs.IsConflict(err) && !apierrs.IsServerTimeout(err) { - framework.Failf("failed to update ingress %s/%s: %v", ns, name, err) + e2elog.Failf("failed to update ingress %s/%s: %v", ns, name, err) } } - framework.Failf("too many retries updating ingress %s/%s", ns, name) + e2elog.Failf("too many retries updating ingress %s/%s", ns, name) } // AddHTTPS updates the ingress to add this secret for these hosts. @@ -543,7 +543,7 @@ func (j *TestJig) GetRootCA(secretName string) (rootCA []byte) { var ok bool rootCA, ok = j.RootCAs[secretName] if !ok { - framework.Failf("Failed to retrieve rootCAs, no recorded secret by name %v", secretName) + e2elog.Failf("Failed to retrieve rootCAs, no recorded secret by name %v", secretName) } return } @@ -675,7 +675,7 @@ func (j *TestJig) pollIngressWithCert(ing *networkingv1beta1.Ingress, address st // WaitForIngress returns when it gets the first 200 response func (j *TestJig) WaitForIngress(waitForNodePort bool) { if err := j.WaitForGivenIngressWithTimeout(j.Ingress, waitForNodePort, framework.LoadBalancerPollTimeout); err != nil { - framework.Failf("error in waiting for ingress to get an address: %s", err) + e2elog.Failf("error in waiting for ingress to get an address: %s", err) } } @@ -688,7 +688,7 @@ func (j *TestJig) WaitForIngressToStable() { } return true, nil }); err != nil { - framework.Failf("error in waiting for ingress to stablize: %v", err) + e2elog.Failf("error in waiting for ingress to stablize: %v", err) } } @@ -814,7 +814,7 @@ func (j *TestJig) GetDistinctResponseFromIngress() (sets.String, error) { // Wait for the loadbalancer IP. address, err := j.WaitForIngressAddress(j.Client, j.Ingress.Namespace, j.Ingress.Name, framework.LoadBalancerPollTimeout) if err != nil { - framework.Failf("Ingress failed to acquire an IP address within %v", framework.LoadBalancerPollTimeout) + e2elog.Failf("Ingress failed to acquire an IP address within %v", framework.LoadBalancerPollTimeout) } responses := sets.NewString() timeoutClient := &http.Client{Timeout: IngressReqTimeout} @@ -858,7 +858,7 @@ func (cont *NginxIngressController) Init() { pods, err := cont.Client.CoreV1().Pods(cont.Ns).List(metav1.ListOptions{LabelSelector: sel.String()}) framework.ExpectNoError(err) if len(pods.Items) == 0 { - framework.Failf("Failed to find nginx ingress controller pods with selector %v", sel) + e2elog.Failf("Failed to find nginx ingress controller pods with selector %v", sel) } cont.pod = &pods.Items[0] cont.externalIP, err = framework.GetHostExternalAddress(cont.Client, cont.pod) diff --git a/test/e2e/framework/providers/gce/firewall.go b/test/e2e/framework/providers/gce/firewall.go index 6be2cb243d9..25df218f0f8 100644 --- a/test/e2e/framework/providers/gce/firewall.go +++ b/test/e2e/framework/providers/gce/firewall.go @@ -43,7 +43,7 @@ func MakeFirewallNameForLBService(name string) string { // ConstructFirewallForLBService returns the expected GCE firewall rule for a loadbalancer type service func ConstructFirewallForLBService(svc *v1.Service, nodeTag string) *compute.Firewall { if svc.Spec.Type != v1.ServiceTypeLoadBalancer { - framework.Failf("can not construct firewall rule for non-loadbalancer type service") + e2elog.Failf("can not construct firewall rule for non-loadbalancer type service") } fw := compute.Firewall{} fw.Name = MakeFirewallNameForLBService(cloudprovider.DefaultLoadBalancerName(svc)) @@ -71,7 +71,7 @@ func MakeHealthCheckFirewallNameForLBService(clusterID, name string, isNodesHeal // ConstructHealthCheckFirewallForLBService returns the expected GCE firewall rule for a loadbalancer type service func ConstructHealthCheckFirewallForLBService(clusterID string, svc *v1.Service, nodeTag string, isNodesHealthCheck bool) *compute.Firewall { if svc.Spec.Type != v1.ServiceTypeLoadBalancer { - framework.Failf("can not construct firewall rule for non-loadbalancer type service") + e2elog.Failf("can not construct firewall rule for non-loadbalancer type service") } fw := compute.Firewall{} fw.Name = MakeHealthCheckFirewallNameForLBService(clusterID, cloudprovider.DefaultLoadBalancerName(svc), isNodesHealthCheck) diff --git a/test/e2e/framework/providers/gce/gce.go b/test/e2e/framework/providers/gce/gce.go index 65e4862e1ce..e0167d1eda4 100644 --- a/test/e2e/framework/providers/gce/gce.go +++ b/test/e2e/framework/providers/gce/gce.go @@ -262,7 +262,7 @@ func (p *Provider) CleanupServiceResources(c clientset.Interface, loadBalancerNa } return true, nil }); pollErr != nil { - framework.Failf("Failed to cleanup service GCE resources.") + e2elog.Failf("Failed to cleanup service GCE resources.") } } @@ -332,7 +332,7 @@ func GetInstanceTags(cloudConfig framework.CloudConfig, instanceName string) *co res, err := gceCloud.ComputeServices().GA.Instances.Get(cloudConfig.ProjectID, cloudConfig.Zone, instanceName).Do() if err != nil { - framework.Failf("Failed to get instance tags for %v: %v", instanceName, err) + e2elog.Failf("Failed to get instance tags for %v: %v", instanceName, err) } return res.Tags } @@ -346,7 +346,7 @@ func SetInstanceTags(cloudConfig framework.CloudConfig, instanceName, zone strin cloudConfig.ProjectID, zone, instanceName, &compute.Tags{Fingerprint: resTags.Fingerprint, Items: tags}).Do() if err != nil { - framework.Failf("failed to set instance tags: %v", err) + e2elog.Failf("failed to set instance tags: %v", err) } e2elog.Logf("Sent request to set tags %v on instance: %v", tags, instanceName) return resTags.Items diff --git a/test/e2e/framework/providers/gce/ingress.go b/test/e2e/framework/providers/gce/ingress.go index 6e4f6aeaf04..06e839b4ae5 100644 --- a/test/e2e/framework/providers/gce/ingress.go +++ b/test/e2e/framework/providers/gce/ingress.go @@ -788,12 +788,12 @@ func (cont *IngressController) CreateStaticIP(name string) string { e2elog.Logf("Failed to delete static ip %v: %v", name, delErr) } } - framework.Failf("Failed to allocate static ip %v: %v", name, err) + e2elog.Failf("Failed to allocate static ip %v: %v", name, err) } ip, err := gceCloud.GetGlobalAddress(name) if err != nil { - framework.Failf("Failed to get newly created static ip %v: %v", name, err) + e2elog.Failf("Failed to get newly created static ip %v: %v", name, err) } cont.staticIPName = ip.Name diff --git a/test/e2e/framework/providers/gce/recreate_node.go b/test/e2e/framework/providers/gce/recreate_node.go index 4bf7ec70bf5..1ac18ab7811 100644 --- a/test/e2e/framework/providers/gce/recreate_node.go +++ b/test/e2e/framework/providers/gce/recreate_node.go @@ -66,7 +66,7 @@ var _ = ginkgo.Describe("Recreate [Feature:Recreate]", func() { } if !e2epod.CheckPodsRunningReadyOrSucceeded(f.ClientSet, systemNamespace, originalPodNames, framework.PodReadyBeforeTimeout) { - framework.Failf("At least one pod wasn't running and ready or succeeded at test start.") + e2elog.Failf("At least one pod wasn't running and ready or succeeded at test start.") } }) @@ -97,12 +97,12 @@ var _ = ginkgo.Describe("Recreate [Feature:Recreate]", func() { func testRecreate(c clientset.Interface, ps *testutils.PodStore, systemNamespace string, nodes []v1.Node, podNames []string) { err := RecreateNodes(c, nodes) if err != nil { - framework.Failf("Test failed; failed to start the restart instance group command.") + e2elog.Failf("Test failed; failed to start the restart instance group command.") } err = WaitForNodeBootIdsToChange(c, nodes, framework.RecreateNodeReadyAgainTimeout) if err != nil { - framework.Failf("Test failed; failed to recreate at least one node in %v.", framework.RecreateNodeReadyAgainTimeout) + e2elog.Failf("Test failed; failed to recreate at least one node in %v.", framework.RecreateNodeReadyAgainTimeout) } nodesAfter, err := e2enode.CheckReady(c, len(nodes), framework.RestartNodeReadyAgainTimeout) @@ -110,7 +110,7 @@ func testRecreate(c clientset.Interface, ps *testutils.PodStore, systemNamespace e2elog.Logf("Got the following nodes after recreate: %v", nodeNames(nodesAfter)) if len(nodes) != len(nodesAfter) { - framework.Failf("Had %d nodes before nodes were recreated, but now only have %d", + e2elog.Failf("Had %d nodes before nodes were recreated, but now only have %d", len(nodes), len(nodesAfter)) } @@ -120,6 +120,6 @@ func testRecreate(c clientset.Interface, ps *testutils.PodStore, systemNamespace framework.ExpectNoError(err) remaining := framework.RestartPodReadyAgainTimeout - time.Since(podCheckStart) if !e2epod.CheckPodsRunningReadyOrSucceeded(c, systemNamespace, podNamesAfter, remaining) { - framework.Failf("At least one pod wasn't running and ready after the restart.") + e2elog.Failf("At least one pod wasn't running and ready after the restart.") } } diff --git a/test/e2e/framework/volume/fixtures.go b/test/e2e/framework/volume/fixtures.go index 14109726645..7e66c3f7c81 100644 --- a/test/e2e/framework/volume/fixtures.go +++ b/test/e2e/framework/volume/fixtures.go @@ -247,7 +247,7 @@ func NewRBDServer(cs clientset.Interface, namespace string) (config TestConfig, secret, err := cs.CoreV1().Secrets(config.Namespace).Create(secret) if err != nil { - framework.Failf("Failed to create secrets for Ceph RBD: %v", err) + e2elog.Failf("Failed to create secrets for Ceph RBD: %v", err) } return config, pod, secret, ip @@ -485,7 +485,7 @@ func TestVolumeClient(client clientset.Interface, config TestConfig, fsGroup *in } clientPod, err := podsNamespacer.Create(clientPod) if err != nil { - framework.Failf("Failed to create %s pod: %v", clientPod.Name, err) + e2elog.Failf("Failed to create %s pod: %v", clientPod.Name, err) } framework.ExpectNoError(e2epod.WaitForPodRunningInNamespace(client, clientPod)) diff --git a/test/e2e/gke_local_ssd.go b/test/e2e/gke_local_ssd.go index e1d4d1ce2bd..e343a886a6e 100644 --- a/test/e2e/gke_local_ssd.go +++ b/test/e2e/gke_local_ssd.go @@ -52,7 +52,7 @@ func createNodePoolWithLocalSsds(nodePoolName string) { fmt.Sprintf("--cluster=%s", framework.TestContext.CloudConfig.Cluster), "--local-ssd-count=1").CombinedOutput() if err != nil { - framework.Failf("Failed to create node pool %s: Err: %v\n%v", nodePoolName, err, string(out)) + e2elog.Failf("Failed to create node pool %s: Err: %v\n%v", nodePoolName, err, string(out)) } e2elog.Logf("Successfully created node pool %s:\n%v", nodePoolName, string(out)) } diff --git a/test/e2e/gke_node_pools.go b/test/e2e/gke_node_pools.go index 4eb1173d135..d5376cd5f21 100644 --- a/test/e2e/gke_node_pools.go +++ b/test/e2e/gke_node_pools.go @@ -51,21 +51,21 @@ func testCreateDeleteNodePool(f *framework.Framework, poolName string) { "--num-nodes=2").CombinedOutput() e2elog.Logf("\n%s", string(out)) if err != nil { - framework.Failf("Failed to create node pool %q. Err: %v\n%v", poolName, err, string(out)) + e2elog.Failf("Failed to create node pool %q. Err: %v\n%v", poolName, err, string(out)) } e2elog.Logf("Successfully created node pool %q.", poolName) out, err = exec.Command("gcloud", "container", "node-pools", "list", clusterStr).CombinedOutput() if err != nil { - framework.Failf("Failed to list node pools from cluster %q. Err: %v\n%v", framework.TestContext.CloudConfig.Cluster, err, string(out)) + e2elog.Failf("Failed to list node pools from cluster %q. Err: %v\n%v", framework.TestContext.CloudConfig.Cluster, err, string(out)) } e2elog.Logf("Node pools:\n%s", string(out)) e2elog.Logf("Checking that 2 nodes have the correct node pool label.") nodeCount := nodesWithPoolLabel(f, poolName) if nodeCount != 2 { - framework.Failf("Wanted 2 nodes with node pool label, got: %v", nodeCount) + e2elog.Failf("Wanted 2 nodes with node pool label, got: %v", nodeCount) } e2elog.Logf("Success, found 2 nodes with correct node pool labels.") @@ -76,21 +76,21 @@ func testCreateDeleteNodePool(f *framework.Framework, poolName string) { "-q").CombinedOutput() e2elog.Logf("\n%s", string(out)) if err != nil { - framework.Failf("Failed to delete node pool %q. Err: %v\n%v", poolName, err, string(out)) + e2elog.Failf("Failed to delete node pool %q. Err: %v\n%v", poolName, err, string(out)) } e2elog.Logf("Successfully deleted node pool %q.", poolName) out, err = exec.Command("gcloud", "container", "node-pools", "list", clusterStr).CombinedOutput() if err != nil { - framework.Failf("\nFailed to list node pools from cluster %q. Err: %v\n%v", framework.TestContext.CloudConfig.Cluster, err, string(out)) + e2elog.Failf("\nFailed to list node pools from cluster %q. Err: %v\n%v", framework.TestContext.CloudConfig.Cluster, err, string(out)) } e2elog.Logf("\nNode pools:\n%s", string(out)) e2elog.Logf("Checking that no nodes have the deleted node pool's label.") nodeCount = nodesWithPoolLabel(f, poolName) if nodeCount != 0 { - framework.Failf("Wanted 0 nodes with node pool label, got: %v", nodeCount) + e2elog.Failf("Wanted 0 nodes with node pool label, got: %v", nodeCount) } e2elog.Logf("Success, found no nodes with the deleted node pool's label.") } diff --git a/test/e2e/instrumentation/logging/elasticsearch/utils.go b/test/e2e/instrumentation/logging/elasticsearch/utils.go index be5934f7ed9..274e62e8a0c 100644 --- a/test/e2e/instrumentation/logging/elasticsearch/utils.go +++ b/test/e2e/instrumentation/logging/elasticsearch/utils.go @@ -118,7 +118,7 @@ func (p *esLogProvider) Init() error { return err } if int(statusCode) != 200 { - framework.Failf("Elasticsearch cluster has a bad status: %v", statusCode) + e2elog.Failf("Elasticsearch cluster has a bad status: %v", statusCode) } // Now assume we really are talking to an Elasticsearch instance. diff --git a/test/e2e/instrumentation/logging/generic_soak.go b/test/e2e/instrumentation/logging/generic_soak.go index c2262640e72..e88f3397f9d 100644 --- a/test/e2e/instrumentation/logging/generic_soak.go +++ b/test/e2e/instrumentation/logging/generic_soak.go @@ -123,8 +123,8 @@ func RunLogPodsWithSleepOf(f *framework.Framework, sleep time.Duration, podname pods, err := logSoakVerification.WaitFor(totalPods, timeout+largeClusterForgiveness) if err != nil { - framework.Failf("Error in wait... %v", err) + e2elog.Failf("Error in wait... %v", err) } else if len(pods) < totalPods { - framework.Failf("Only got %v out of %v", len(pods), totalPods) + e2elog.Failf("Only got %v out of %v", len(pods), totalPods) } } diff --git a/test/e2e/instrumentation/monitoring/cadvisor.go b/test/e2e/instrumentation/monitoring/cadvisor.go index 1af5e4b2a62..460ffbdc728 100644 --- a/test/e2e/instrumentation/monitoring/cadvisor.go +++ b/test/e2e/instrumentation/monitoring/cadvisor.go @@ -75,5 +75,5 @@ func CheckCadvisorHealthOnAllNodes(c clientset.Interface, timeout time.Duration) e2elog.Logf("failed to retrieve kubelet stats -\n %v", errors) time.Sleep(cadvisor.SleepDuration) } - framework.Failf("Failed after retrying %d times for cadvisor to be healthy on all nodes. Errors:\n%v", maxRetries, errors) + e2elog.Failf("Failed after retrying %d times for cadvisor to be healthy on all nodes. Errors:\n%v", maxRetries, errors) } diff --git a/test/e2e/instrumentation/monitoring/custom_metrics_stackdriver.go b/test/e2e/instrumentation/monitoring/custom_metrics_stackdriver.go index ef658765a27..ad6dcebe22c 100644 --- a/test/e2e/instrumentation/monitoring/custom_metrics_stackdriver.go +++ b/test/e2e/instrumentation/monitoring/custom_metrics_stackdriver.go @@ -58,7 +58,7 @@ var _ = instrumentation.SIGDescribe("Stackdriver Monitoring", func() { kubeClient := f.ClientSet config, err := framework.LoadConfig() if err != nil { - framework.Failf("Failed to load config: %s", err) + e2elog.Failf("Failed to load config: %s", err) } discoveryClient := discovery.NewDiscoveryClientForConfigOrDie(config) cachedDiscoClient := cacheddiscovery.NewMemCacheClient(discoveryClient) @@ -73,7 +73,7 @@ var _ = instrumentation.SIGDescribe("Stackdriver Monitoring", func() { kubeClient := f.ClientSet config, err := framework.LoadConfig() if err != nil { - framework.Failf("Failed to load config: %s", err) + e2elog.Failf("Failed to load config: %s", err) } discoveryClient := discovery.NewDiscoveryClientForConfigOrDie(config) cachedDiscoClient := cacheddiscovery.NewMemCacheClient(discoveryClient) @@ -88,7 +88,7 @@ var _ = instrumentation.SIGDescribe("Stackdriver Monitoring", func() { kubeClient := f.ClientSet config, err := framework.LoadConfig() if err != nil { - framework.Failf("Failed to load config: %s", err) + e2elog.Failf("Failed to load config: %s", err) } externalMetricsClient := externalclient.NewForConfigOrDie(config) testExternalMetrics(f, kubeClient, externalMetricsClient) @@ -103,32 +103,32 @@ func testCustomMetrics(f *framework.Framework, kubeClient clientset.Interface, c gcmService, err := gcm.New(client) if err != nil { - framework.Failf("Failed to create gcm service, %v", err) + e2elog.Failf("Failed to create gcm service, %v", err) } // Set up a cluster: create a custom metric and set up k8s-sd adapter err = CreateDescriptors(gcmService, projectID) if err != nil { - framework.Failf("Failed to create metric descriptor: %s", err) + e2elog.Failf("Failed to create metric descriptor: %s", err) } defer CleanupDescriptors(gcmService, projectID) err = CreateAdapter(adapterDeployment) if err != nil { - framework.Failf("Failed to set up: %s", err) + e2elog.Failf("Failed to set up: %s", err) } defer CleanupAdapter(adapterDeployment) _, err = kubeClient.RbacV1().ClusterRoleBindings().Create(HPAPermissions) if err != nil { - framework.Failf("Failed to create ClusterRoleBindings: %v", err) + e2elog.Failf("Failed to create ClusterRoleBindings: %v", err) } defer kubeClient.RbacV1().ClusterRoleBindings().Delete(HPAPermissions.Name, &metav1.DeleteOptions{}) // Run application that exports the metric _, err = createSDExporterPods(f, kubeClient) if err != nil { - framework.Failf("Failed to create stackdriver-exporter pod: %s", err) + e2elog.Failf("Failed to create stackdriver-exporter pod: %s", err) } defer cleanupSDExporterPod(f, kubeClient) @@ -149,33 +149,33 @@ func testExternalMetrics(f *framework.Framework, kubeClient clientset.Interface, gcmService, err := gcm.New(client) if err != nil { - framework.Failf("Failed to create gcm service, %v", err) + e2elog.Failf("Failed to create gcm service, %v", err) } // Set up a cluster: create a custom metric and set up k8s-sd adapter err = CreateDescriptors(gcmService, projectID) if err != nil { - framework.Failf("Failed to create metric descriptor: %s", err) + e2elog.Failf("Failed to create metric descriptor: %s", err) } defer CleanupDescriptors(gcmService, projectID) // Both deployments - for old and new resource model - expose External Metrics API. err = CreateAdapter(AdapterForOldResourceModel) if err != nil { - framework.Failf("Failed to set up: %s", err) + e2elog.Failf("Failed to set up: %s", err) } defer CleanupAdapter(AdapterForOldResourceModel) _, err = kubeClient.RbacV1().ClusterRoleBindings().Create(HPAPermissions) if err != nil { - framework.Failf("Failed to create ClusterRoleBindings: %v", err) + e2elog.Failf("Failed to create ClusterRoleBindings: %v", err) } defer kubeClient.RbacV1().ClusterRoleBindings().Delete(HPAPermissions.Name, &metav1.DeleteOptions{}) // Run application that exports the metric pod, err := createSDExporterPods(f, kubeClient) if err != nil { - framework.Failf("Failed to create stackdriver-exporter pod: %s", err) + e2elog.Failf("Failed to create stackdriver-exporter pod: %s", err) } defer cleanupSDExporterPod(f, kubeClient) @@ -190,34 +190,34 @@ func testExternalMetrics(f *framework.Framework, kubeClient clientset.Interface, func verifyResponsesFromCustomMetricsAPI(f *framework.Framework, customMetricsClient customclient.CustomMetricsClient, discoveryClient *discovery.DiscoveryClient) { resources, err := discoveryClient.ServerResourcesForGroupVersion("custom.metrics.k8s.io/v1beta1") if err != nil { - framework.Failf("Failed to retrieve a list of supported metrics: %s", err) + e2elog.Failf("Failed to retrieve a list of supported metrics: %s", err) } if !containsResource(resources.APIResources, "*/custom.googleapis.com|"+CustomMetricName) { - framework.Failf("Metric '%s' expected but not received", CustomMetricName) + e2elog.Failf("Metric '%s' expected but not received", CustomMetricName) } if !containsResource(resources.APIResources, "*/custom.googleapis.com|"+UnusedMetricName) { - framework.Failf("Metric '%s' expected but not received", UnusedMetricName) + e2elog.Failf("Metric '%s' expected but not received", UnusedMetricName) } value, err := customMetricsClient.NamespacedMetrics(f.Namespace.Name).GetForObject(schema.GroupKind{Group: "", Kind: "Pod"}, stackdriverExporterPod1, CustomMetricName, labels.NewSelector()) if err != nil { - framework.Failf("Failed query: %s", err) + e2elog.Failf("Failed query: %s", err) } if value.Value.Value() != CustomMetricValue { - framework.Failf("Unexpected metric value for metric %s: expected %v but received %v", CustomMetricName, CustomMetricValue, value.Value) + e2elog.Failf("Unexpected metric value for metric %s: expected %v but received %v", CustomMetricName, CustomMetricValue, value.Value) } filter, err := labels.NewRequirement("name", selection.Equals, []string{stackdriverExporterLabel}) if err != nil { - framework.Failf("Couldn't create a label filter") + e2elog.Failf("Couldn't create a label filter") } values, err := customMetricsClient.NamespacedMetrics(f.Namespace.Name).GetForObjects(schema.GroupKind{Group: "", Kind: "Pod"}, labels.NewSelector().Add(*filter), CustomMetricName, labels.NewSelector()) if err != nil { - framework.Failf("Failed query: %s", err) + e2elog.Failf("Failed query: %s", err) } if len(values.Items) != 1 { - framework.Failf("Expected results for exactly 1 pod, but %v results received", len(values.Items)) + e2elog.Failf("Expected results for exactly 1 pod, but %v results received", len(values.Items)) } if values.Items[0].DescribedObject.Name != stackdriverExporterPod1 || values.Items[0].Value.Value() != CustomMetricValue { - framework.Failf("Unexpected metric value for metric %s and pod %s: %v", CustomMetricName, values.Items[0].DescribedObject.Name, values.Items[0].Value.Value()) + e2elog.Failf("Unexpected metric value for metric %s and pod %s: %v", CustomMetricName, values.Items[0].DescribedObject.Name, values.Items[0].Value.Value()) } } @@ -242,16 +242,16 @@ func verifyResponseFromExternalMetricsAPI(f *framework.Framework, externalMetric NamespacedMetrics("dummy"). List("custom.googleapis.com|"+CustomMetricName, labels.NewSelector().Add(*req1, *req2, *req3, *req4, *req5)) if err != nil { - framework.Failf("Failed query: %s", err) + e2elog.Failf("Failed query: %s", err) } if len(values.Items) != 1 { - framework.Failf("Expected exactly one external metric value, but % values received", len(values.Items)) + e2elog.Failf("Expected exactly one external metric value, but % values received", len(values.Items)) } if values.Items[0].MetricName != "custom.googleapis.com|"+CustomMetricName || values.Items[0].Value.Value() != CustomMetricValue || // Check one label just to make sure labels are included values.Items[0].MetricLabels["resource.labels.pod_id"] != string(pod.UID) { - framework.Failf("Unexpected result for metric %s: %v", CustomMetricName, values.Items[0]) + e2elog.Failf("Unexpected result for metric %s: %v", CustomMetricName, values.Items[0]) } } diff --git a/test/e2e/instrumentation/monitoring/prometheus.go b/test/e2e/instrumentation/monitoring/prometheus.go index 2c46ef719ae..1449b4e2214 100644 --- a/test/e2e/instrumentation/monitoring/prometheus.go +++ b/test/e2e/instrumentation/monitoring/prometheus.go @@ -212,7 +212,7 @@ func getInstanceLabelsAvailableForMetric(c clientset.Interface, duration time.Du instanceLabels := make([]string, 0) m, ok := result.(model.Matrix) if !ok { - framework.Failf("Expected matrix response for query '%v', got: %T", query, result) + e2elog.Failf("Expected matrix response for query '%v', got: %T", query, result) return instanceLabels, nil } for _, stream := range m { @@ -373,7 +373,7 @@ func retryUntilSucceeds(validator func() error, timeout time.Duration) { e2elog.Logf(err.Error()) time.Sleep(prometheusSleepBetweenAttempts) } - framework.Failf(err.Error()) + e2elog.Failf(err.Error()) } func getAllNodes(c clientset.Interface) ([]string, error) { diff --git a/test/e2e/instrumentation/monitoring/stackdriver.go b/test/e2e/instrumentation/monitoring/stackdriver.go index 01805628e67..7e67ec6d912 100644 --- a/test/e2e/instrumentation/monitoring/stackdriver.go +++ b/test/e2e/instrumentation/monitoring/stackdriver.go @@ -86,7 +86,7 @@ func testStackdriverMonitoring(f *framework.Framework, pods, allPodsCPU int, per ts, err := google.DefaultTokenSource(oauth2.NoContext) e2elog.Logf("Couldn't get application default credentials, %v", err) if err != nil { - framework.Failf("Error accessing application default credentials, %v", err) + e2elog.Failf("Error accessing application default credentials, %v", err) } client := oauth2.NewClient(oauth2.NoContext, ts) */ diff --git a/test/e2e/instrumentation/monitoring/stackdriver_metadata_agent.go b/test/e2e/instrumentation/monitoring/stackdriver_metadata_agent.go index 8d8e197454e..a25e0a6d4b5 100644 --- a/test/e2e/instrumentation/monitoring/stackdriver_metadata_agent.go +++ b/test/e2e/instrumentation/monitoring/stackdriver_metadata_agent.go @@ -30,6 +30,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" clientset "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/test/e2e/framework" + e2elog "k8s.io/kubernetes/test/e2e/framework/log" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" instrumentation "k8s.io/kubernetes/test/e2e/instrumentation/common" ) @@ -68,7 +69,7 @@ func testAgent(f *framework.Framework, kubeClient clientset.Interface) { oauthClient, err := google.DefaultClient(context.Background(), MonitoringScope) if err != nil { - framework.Failf("Failed to create oauth client: %s", err) + e2elog.Failf("Failed to create oauth client: %s", err) } // Create test pod with unique name. @@ -82,22 +83,22 @@ func testAgent(f *framework.Framework, kubeClient clientset.Interface) { resp, err := oauthClient.Get(endpoint) if err != nil { - framework.Failf("Failed to call Stackdriver Metadata API %s", err) + e2elog.Failf("Failed to call Stackdriver Metadata API %s", err) } if resp.StatusCode != 200 { - framework.Failf("Stackdriver Metadata API returned error status: %s", resp.Status) + e2elog.Failf("Stackdriver Metadata API returned error status: %s", resp.Status) } metadataAPIResponse, err := ioutil.ReadAll(resp.Body) if err != nil { - framework.Failf("Failed to read response from Stackdriver Metadata API: %s", err) + e2elog.Failf("Failed to read response from Stackdriver Metadata API: %s", err) } exists, err := verifyPodExists(metadataAPIResponse, uniqueContainerName) if err != nil { - framework.Failf("Failed to process response from Stackdriver Metadata API: %s", err) + e2elog.Failf("Failed to process response from Stackdriver Metadata API: %s", err) } if !exists { - framework.Failf("Missing Metadata for container %q", uniqueContainerName) + e2elog.Failf("Missing Metadata for container %q", uniqueContainerName) } } diff --git a/test/e2e/kubectl/kubectl.go b/test/e2e/kubectl/kubectl.go index 0556e030d8b..d3c20d6b51e 100644 --- a/test/e2e/kubectl/kubectl.go +++ b/test/e2e/kubectl/kubectl.go @@ -185,17 +185,17 @@ var _ = SIGDescribe("Kubectl alpha client", func() { ginkgo.By("verifying the CronJob " + cjName + " was created") sj, err := c.BatchV1beta1().CronJobs(ns).Get(cjName, metav1.GetOptions{}) if err != nil { - framework.Failf("Failed getting CronJob %s: %v", cjName, err) + e2elog.Failf("Failed getting CronJob %s: %v", cjName, err) } if sj.Spec.Schedule != schedule { - framework.Failf("Failed creating a CronJob with correct schedule %s", schedule) + e2elog.Failf("Failed creating a CronJob with correct schedule %s", schedule) } containers := sj.Spec.JobTemplate.Spec.Template.Spec.Containers if checkContainersImage(containers, busyboxImage) { - framework.Failf("Failed creating CronJob %s for 1 pod with expected image %s: %#v", cjName, busyboxImage, containers) + e2elog.Failf("Failed creating CronJob %s for 1 pod with expected image %s: %#v", cjName, busyboxImage, containers) } if sj.Spec.JobTemplate.Spec.Template.Spec.RestartPolicy != v1.RestartPolicyOnFailure { - framework.Failf("Failed creating a CronJob with correct restart policy for --restart=OnFailure") + e2elog.Failf("Failed creating a CronJob with correct restart policy for --restart=OnFailure") } }) }) @@ -233,7 +233,7 @@ var _ = SIGDescribe("Kubectl client", func() { if err != nil || len(pods) < atLeast { // TODO: Generalize integrating debug info into these tests so we always get debug info when we need it framework.DumpAllNamespaceInfo(f.ClientSet, ns) - framework.Failf("Verified %d of %d pods , error: %v", len(pods), atLeast, err) + e2elog.Failf("Verified %d of %d pods , error: %v", len(pods), atLeast, err) } } @@ -379,7 +379,7 @@ var _ = SIGDescribe("Kubectl client", func() { ginkgo.By("executing a command in the container") execOutput := framework.RunKubectlOrDie("exec", fmt.Sprintf("--namespace=%v", ns), simplePodName, "echo", "running", "in", "container") if e, a := "running in container", strings.TrimSpace(execOutput); e != a { - framework.Failf("Unexpected kubectl exec output. Wanted %q, got %q", e, a) + e2elog.Failf("Unexpected kubectl exec output. Wanted %q, got %q", e, a) } ginkgo.By("executing a very long command in the container") @@ -395,13 +395,13 @@ var _ = SIGDescribe("Kubectl client", func() { WithStdinData("abcd1234"). ExecOrDie() if e, a := "abcd1234", execOutput; e != a { - framework.Failf("Unexpected kubectl exec output. Wanted %q, got %q", e, a) + e2elog.Failf("Unexpected kubectl exec output. Wanted %q, got %q", e, a) } // pretend that we're a user in an interactive shell r, closer, err := newBlockingReader("echo hi\nexit\n") if err != nil { - framework.Failf("Error creating blocking reader: %v", err) + e2elog.Failf("Error creating blocking reader: %v", err) } // NOTE this is solely for test cleanup! defer closer.Close() @@ -411,7 +411,7 @@ var _ = SIGDescribe("Kubectl client", func() { WithStdinReader(r). ExecOrDie() if e, a := "hi", strings.TrimSpace(execOutput); e != a { - framework.Failf("Unexpected kubectl exec output. Wanted %q, got %q", e, a) + e2elog.Failf("Unexpected kubectl exec output. Wanted %q, got %q", e, a) } }) @@ -419,14 +419,14 @@ var _ = SIGDescribe("Kubectl client", func() { ginkgo.By("executing a command in the container") execOutput := framework.RunKubectlOrDie("exec", fmt.Sprintf("--namespace=%v", ns), simplePodResourceName, "echo", "running", "in", "container") if e, a := "running in container", strings.TrimSpace(execOutput); e != a { - framework.Failf("Unexpected kubectl exec output. Wanted %q, got %q", e, a) + e2elog.Failf("Unexpected kubectl exec output. Wanted %q, got %q", e, a) } }) ginkgo.It("should support exec through an HTTP proxy", func() { // Fail if the variable isn't set if framework.TestContext.Host == "" { - framework.Failf("--host variable must be set to the full URI to the api server on e2e run.") + e2elog.Failf("--host variable must be set to the full URI to the api server on e2e run.") } ginkgo.By("Starting goproxy") @@ -444,7 +444,7 @@ var _ = SIGDescribe("Kubectl client", func() { // Verify we got the normal output captured by the exec server expectedExecOutput := "running in container\n" if output != expectedExecOutput { - framework.Failf("Unexpected kubectl exec output. Wanted %q, got %q", expectedExecOutput, output) + e2elog.Failf("Unexpected kubectl exec output. Wanted %q, got %q", expectedExecOutput, output) } // Verify the proxy server logs saw the connection @@ -452,7 +452,7 @@ var _ = SIGDescribe("Kubectl client", func() { proxyLog := proxyLogs.String() if !strings.Contains(proxyLog, expectedProxyLog) { - framework.Failf("Missing expected log result on proxy server for %s. Expected: %q, got %q", proxyVar, expectedProxyLog, proxyLog) + e2elog.Failf("Missing expected log result on proxy server for %s. Expected: %q, got %q", proxyVar, expectedProxyLog, proxyLog) } } }) @@ -460,7 +460,7 @@ var _ = SIGDescribe("Kubectl client", func() { ginkgo.It("should support exec through kubectl proxy", func() { // Fail if the variable isn't set if framework.TestContext.Host == "" { - framework.Failf("--host variable must be set to the full URI to the api server on e2e run.") + e2elog.Failf("--host variable must be set to the full URI to the api server on e2e run.") } ginkgo.By("Starting kubectl proxy") @@ -479,7 +479,7 @@ var _ = SIGDescribe("Kubectl client", func() { // Verify we got the normal output captured by the exec server expectedExecOutput := "running in container\n" if output != expectedExecOutput { - framework.Failf("Unexpected kubectl exec output. Wanted %q, got %q", expectedExecOutput, output) + e2elog.Failf("Unexpected kubectl exec output. Wanted %q, got %q", expectedExecOutput, output) } }) @@ -541,7 +541,7 @@ var _ = SIGDescribe("Kubectl client", func() { // to loop test. err = wait.PollImmediate(time.Second, time.Minute, func() (bool, error) { if !e2epod.CheckPodsRunningReady(c, ns, []string{runTestPod.Name}, 1*time.Second) { - framework.Failf("Pod %q of Job %q should still be running", runTestPod.Name, "run-test") + e2elog.Failf("Pod %q of Job %q should still be running", runTestPod.Name, "run-test") } logOutput := framework.RunKubectlOrDie(nsFlag, "logs", runTestPod.Name) gomega.Expect(runOutput).To(gomega.ContainSubstring("abcd1234")) @@ -569,14 +569,14 @@ var _ = SIGDescribe("Kubectl client", func() { runTestPod, _, err = polymorphichelpers.GetFirstPod(f.ClientSet.CoreV1(), ns, "run=run-test-3", 1*time.Minute, g) gomega.Expect(err).To(gomega.BeNil()) if !e2epod.CheckPodsRunningReady(c, ns, []string{runTestPod.Name}, time.Minute) { - framework.Failf("Pod %q of Job %q should still be running", runTestPod.Name, "run-test-3") + e2elog.Failf("Pod %q of Job %q should still be running", runTestPod.Name, "run-test-3") } // NOTE: we cannot guarantee our output showed up in the container logs before stdin was closed, so we have // to loop test. err = wait.PollImmediate(time.Second, time.Minute, func() (bool, error) { if !e2epod.CheckPodsRunningReady(c, ns, []string{runTestPod.Name}, 1*time.Second) { - framework.Failf("Pod %q of Job %q should still be running", runTestPod.Name, "run-test-3") + e2elog.Failf("Pod %q of Job %q should still be running", runTestPod.Name, "run-test-3") } logOutput := framework.RunKubectlOrDie(nsFlag, "logs", runTestPod.Name) gomega.Expect(logOutput).ToNot(gomega.ContainSubstring("stdin closed")) @@ -595,7 +595,7 @@ var _ = SIGDescribe("Kubectl client", func() { framework.RunKubectlOrDie("run", podName, "--generator=run-pod/v1", "--image="+busyboxImage, "--restart=OnFailure", nsFlag, "--", "sh", "-c", "sleep 10; seq 100 | while read i; do echo $i; sleep 0.01; done; echo EOF") if !e2epod.CheckPodsRunningReady(c, ns, []string{podName}, framework.PodStartTimeout) { - framework.Failf("Pod for run-log-test was not ready") + e2elog.Failf("Pod for run-log-test was not ready") } logOutput := framework.RunKubectlOrDie(nsFlag, "logs", "-f", "run-log-test") @@ -612,10 +612,10 @@ var _ = SIGDescribe("Kubectl client", func() { body, err := curl(localAddr) e2elog.Logf("got: %s", body) if err != nil { - framework.Failf("Failed http.Get of forwarded port (%s): %v", localAddr, err) + e2elog.Failf("Failed http.Get of forwarded port (%s): %v", localAddr, err) } if !strings.Contains(body, nginxDefaultOutput) { - framework.Failf("Container port output missing expected value. Wanted:'%s', got: %s", nginxDefaultOutput, body) + e2elog.Failf("Container port output missing expected value. Wanted:'%s', got: %s", nginxDefaultOutput, body) } }) @@ -754,7 +754,7 @@ metadata: ginkgo.By("validating api versions") output := framework.RunKubectlOrDie("api-versions") if !strings.Contains(output, "v1") { - framework.Failf("No v1 in kubectl api-versions") + e2elog.Failf("No v1 in kubectl api-versions") } }) }) @@ -805,7 +805,7 @@ metadata: ginkgo.By("checking the result") if originalNodePort != currentNodePort { - framework.Failf("port should keep the same") + e2elog.Failf("port should keep the same") } }) @@ -822,7 +822,7 @@ metadata: output := framework.RunKubectlOrDieInput(deployment1Yaml, "apply", "view-last-applied", "-f", "-", nsFlag, "-o", "json") requiredString := "\"replicas\": 2" if !strings.Contains(output, requiredString) { - framework.Failf("Missing %s in kubectl view-last-applied", requiredString) + e2elog.Failf("Missing %s in kubectl view-last-applied", requiredString) } ginkgo.By("apply file doesn't have replicas") @@ -832,7 +832,7 @@ metadata: output = framework.RunKubectlOrDieInput(deployment1Yaml, "apply", "view-last-applied", "-f", "-", nsFlag, "-o", "json") requiredString = "\"replicas\": 2" if strings.Contains(output, requiredString) { - framework.Failf("Presenting %s in kubectl view-last-applied", requiredString) + e2elog.Failf("Presenting %s in kubectl view-last-applied", requiredString) } ginkgo.By("scale set replicas to 3") @@ -848,7 +848,7 @@ metadata: requiredItems := []string{"\"replicas\": 3", imageutils.GetE2EImage(imageutils.Nginx)} for _, item := range requiredItems { if !strings.Contains(output, item) { - framework.Failf("Missing %s in kubectl apply", item) + e2elog.Failf("Missing %s in kubectl apply", item) } } }) @@ -887,7 +887,7 @@ metadata: schemaForGVK := func(desiredGVK schema.GroupVersionKind) *openapi_v2.Schema { d, err := f.ClientSet.Discovery().OpenAPISchema() if err != nil { - framework.Failf("%v", err) + e2elog.Failf("%v", err) } if d == nil || d.Definitions == nil { return nil @@ -909,7 +909,7 @@ metadata: ginkgo.By("create CRD with no validation schema") crd, err := crd.CreateTestCRD(f) if err != nil { - framework.Failf("failed to create test CRD: %v", err) + e2elog.Failf("failed to create test CRD: %v", err) } defer crd.CleanUp() @@ -919,7 +919,7 @@ metadata: meta := fmt.Sprintf(metaPattern, crd.Crd.Spec.Names.Kind, crd.Crd.Spec.Group, crd.Crd.Spec.Versions[0].Name, "test-cr") randomCR := fmt.Sprintf(`{%s,"a":{"b":[{"c":"d"}]}}`, meta) if err := createApplyCustomResource(randomCR, f.Namespace.Name, "test-cr", crd); err != nil { - framework.Failf("%v", err) + e2elog.Failf("%v", err) } }) @@ -928,12 +928,12 @@ metadata: crd, err := crd.CreateTestCRD(f, func(crd *v1beta1.CustomResourceDefinition) { props := &v1beta1.JSONSchemaProps{} if err := yaml.Unmarshal(schemaFoo, props); err != nil { - framework.Failf("failed to unmarshal schema: %v", err) + e2elog.Failf("failed to unmarshal schema: %v", err) } crd.Spec.Validation = &v1beta1.CustomResourceValidation{OpenAPIV3Schema: props} }) if err != nil { - framework.Failf("failed to create test CRD: %v", err) + e2elog.Failf("failed to create test CRD: %v", err) } defer crd.CleanUp() @@ -943,7 +943,7 @@ metadata: meta := fmt.Sprintf(metaPattern, crd.Crd.Spec.Names.Kind, crd.Crd.Spec.Group, crd.Crd.Spec.Versions[0].Name, "test-cr") validCR := fmt.Sprintf(`{%s,"spec":{"bars":[{"name":"test-bar"}]}}`, meta) if err := createApplyCustomResource(validCR, f.Namespace.Name, "test-cr", crd); err != nil { - framework.Failf("%v", err) + e2elog.Failf("%v", err) } }) @@ -952,12 +952,12 @@ metadata: crd, err := crd.CreateTestCRD(f, func(crd *v1beta1.CustomResourceDefinition) { props := &v1beta1.JSONSchemaProps{} if err := yaml.Unmarshal(schemaFoo, props); err != nil { - framework.Failf("failed to unmarshal schema: %v", err) + e2elog.Failf("failed to unmarshal schema: %v", err) } crd.Spec.Validation = &v1beta1.CustomResourceValidation{OpenAPIV3Schema: props} }) if err != nil { - framework.Failf("failed to create test CRD: %v", err) + e2elog.Failf("failed to create test CRD: %v", err) } defer crd.CleanUp() @@ -980,11 +980,11 @@ metadata: validArbitraryCR := fmt.Sprintf(`{%s,"spec":{"bars":[{"name":"test-bar"}],"extraProperty":"arbitrary-value"}}`, meta) if err := createApplyCustomResource(validArbitraryCR, f.Namespace.Name, "test-cr", crd); err != nil { if expectSuccess { - framework.Failf("%v", err) + e2elog.Failf("%v", err) } } else { if !expectSuccess { - framework.Failf("expected error, got none") + e2elog.Failf("expected error, got none") } } }) @@ -1004,7 +1004,7 @@ metadata: requiredItems := []string{"Kubernetes master", "is running at"} for _, item := range requiredItems { if !strings.Contains(output, item) { - framework.Failf("Missing %s in kubectl cluster-info", item) + e2elog.Failf("Missing %s in kubectl cluster-info", item) } } }) @@ -1168,11 +1168,11 @@ metadata: return false, nil } if len(uidToPort) > 1 { - framework.Failf("Too many endpoints found") + e2elog.Failf("Too many endpoints found") } for _, port := range uidToPort { if port[0] != redisPort { - framework.Failf("Wrong endpoint port: %d", port[0]) + e2elog.Failf("Wrong endpoint port: %d", port[0]) } } return true, nil @@ -1183,14 +1183,14 @@ metadata: framework.ExpectNoError(err) if len(service.Spec.Ports) != 1 { - framework.Failf("1 port is expected") + e2elog.Failf("1 port is expected") } port := service.Spec.Ports[0] if port.Port != int32(servicePort) { - framework.Failf("Wrong service port: %d", port.Port) + e2elog.Failf("Wrong service port: %d", port.Port) } if port.TargetPort.IntValue() != redisPort { - framework.Failf("Wrong target port: %d", port.TargetPort.IntValue()) + e2elog.Failf("Wrong target port: %d", port.TargetPort.IntValue()) } } @@ -1234,7 +1234,7 @@ metadata: ginkgo.By("verifying the pod has the label " + labelName + " with the value " + labelValue) output := framework.RunKubectlOrDie("get", "pod", pausePodName, "-L", labelName, nsFlag) if !strings.Contains(output, labelValue) { - framework.Failf("Failed updating label " + labelName + " to the pod " + pausePodName) + e2elog.Failf("Failed updating label " + labelName + " to the pod " + pausePodName) } ginkgo.By("removing the label " + labelName + " of a pod") @@ -1242,7 +1242,7 @@ metadata: ginkgo.By("verifying the pod doesn't have the label " + labelName) output = framework.RunKubectlOrDie("get", "pod", pausePodName, "-L", labelName, nsFlag) if strings.Contains(output, labelValue) { - framework.Failf("Failed removing label " + labelName + " of the pod " + pausePodName) + e2elog.Failf("Failed removing label " + labelName + " of the pod " + pausePodName) } }) }) @@ -1271,7 +1271,7 @@ metadata: podSource := fmt.Sprintf("%s:/root/foo/bar/foo.bar", busyboxPodName) tempDestination, err := ioutil.TempFile(os.TempDir(), "copy-foobar") if err != nil { - framework.Failf("Failed creating temporary destination file: %v", err) + e2elog.Failf("Failed creating temporary destination file: %v", err) } ginkgo.By("specifying a remote filepath " + podSource + " on the pod") @@ -1279,10 +1279,10 @@ metadata: ginkgo.By("verifying that the contents of the remote file " + podSource + " have been copied to a local file " + tempDestination.Name()) localData, err := ioutil.ReadAll(tempDestination) if err != nil { - framework.Failf("Failed reading temporary local file: %v", err) + e2elog.Failf("Failed reading temporary local file: %v", err) } if string(localData) != remoteContents { - framework.Failf("Failed copying remote file contents. Expected %s but got %s", remoteContents, string(localData)) + e2elog.Failf("Failed copying remote file contents. Expected %s but got %s", remoteContents, string(localData)) } }) }) @@ -1344,7 +1344,7 @@ metadata: gomega.Expect(len(words)).To(gomega.BeNumerically(">", 1)) if _, err := time.Parse(time.RFC3339Nano, words[0]); err != nil { if _, err := time.Parse(time.RFC3339, words[0]); err != nil { - framework.Failf("expected %q to be RFC3339 or RFC3339Nano", words[0]) + e2elog.Failf("expected %q to be RFC3339 or RFC3339Nano", words[0]) } } @@ -1390,7 +1390,7 @@ metadata: } } if !found { - framework.Failf("Added annotation not found") + e2elog.Failf("Added annotation not found") } }) }) @@ -1407,7 +1407,7 @@ metadata: requiredItems := []string{"Client Version:", "Server Version:", "Major:", "Minor:", "GitCommit:"} for _, item := range requiredItems { if !strings.Contains(version, item) { - framework.Failf("Required item %s not found in %s", item, version) + e2elog.Failf("Required item %s not found in %s", item, version) } } }) @@ -1441,12 +1441,12 @@ metadata: label := labels.SelectorFromSet(labels.Set(map[string]string{"run": name})) podlist, err := e2epod.WaitForPodsWithLabel(c, ns, label) if err != nil { - framework.Failf("Failed getting pod controlled by %s: %v", name, err) + e2elog.Failf("Failed getting pod controlled by %s: %v", name, err) } pods := podlist.Items if pods == nil || len(pods) != 1 || len(pods[0].Spec.Containers) != 1 || pods[0].Spec.Containers[0].Image != nginxImage { framework.RunKubectlOrDie("get", "pods", "-L", "run", nsFlag) - framework.Failf("Failed creating 1 pod with expected image %s. Number of pods = %v", nginxImage, len(pods)) + e2elog.Failf("Failed creating 1 pod with expected image %s. Number of pods = %v", nginxImage, len(pods)) } }) }) @@ -1475,23 +1475,23 @@ metadata: ginkgo.By("verifying the rc " + rcName + " was created") rc, err := c.CoreV1().ReplicationControllers(ns).Get(rcName, metav1.GetOptions{}) if err != nil { - framework.Failf("Failed getting rc %s: %v", rcName, err) + e2elog.Failf("Failed getting rc %s: %v", rcName, err) } containers := rc.Spec.Template.Spec.Containers if checkContainersImage(containers, nginxImage) { - framework.Failf("Failed creating rc %s for 1 pod with expected image %s", rcName, nginxImage) + e2elog.Failf("Failed creating rc %s for 1 pod with expected image %s", rcName, nginxImage) } ginkgo.By("verifying the pod controlled by rc " + rcName + " was created") label := labels.SelectorFromSet(labels.Set(map[string]string{"run": rcName})) podlist, err := e2epod.WaitForPodsWithLabel(c, ns, label) if err != nil { - framework.Failf("Failed getting pod controlled by rc %s: %v", rcName, err) + e2elog.Failf("Failed getting pod controlled by rc %s: %v", rcName, err) } pods := podlist.Items if pods == nil || len(pods) != 1 || len(pods[0].Spec.Containers) != 1 || pods[0].Spec.Containers[0].Image != nginxImage { framework.RunKubectlOrDie("get", "pods", "-L", "run", nsFlag) - framework.Failf("Failed creating 1 pod with expected image %s. Number of pods = %v", nginxImage, len(pods)) + e2elog.Failf("Failed creating 1 pod with expected image %s. Number of pods = %v", nginxImage, len(pods)) } ginkgo.By("confirm that you can get logs from an rc") @@ -1500,12 +1500,12 @@ metadata: podNames = append(podNames, pod.Name) } if !e2epod.CheckPodsRunningReady(c, ns, podNames, framework.PodStartTimeout) { - framework.Failf("Pods for rc %s were not ready", rcName) + e2elog.Failf("Pods for rc %s were not ready", rcName) } _, err = framework.RunKubectl("logs", "rc/"+rcName, nsFlag) // a non-nil error is fine as long as we actually found a pod. if err != nil && !strings.Contains(err.Error(), " in pod ") { - framework.Failf("Failed getting logs by rc %s: %v", rcName, err) + e2elog.Failf("Failed getting logs by rc %s: %v", rcName, err) } }) }) @@ -1536,11 +1536,11 @@ metadata: ginkgo.By("verifying the rc " + rcName + " was created") rc, err := c.CoreV1().ReplicationControllers(ns).Get(rcName, metav1.GetOptions{}) if err != nil { - framework.Failf("Failed getting rc %s: %v", rcName, err) + e2elog.Failf("Failed getting rc %s: %v", rcName, err) } containers := rc.Spec.Template.Spec.Containers if checkContainersImage(containers, nginxImage) { - framework.Failf("Failed creating rc %s for 1 pod with expected image %s", rcName, nginxImage) + e2elog.Failf("Failed creating rc %s for 1 pod with expected image %s", rcName, nginxImage) } framework.WaitForRCToStabilize(c, ns, rcName, framework.PodStartTimeout) @@ -1586,23 +1586,23 @@ metadata: ginkgo.By("verifying the deployment " + dName + " was created") d, err := c.AppsV1().Deployments(ns).Get(dName, metav1.GetOptions{}) if err != nil { - framework.Failf("Failed getting deployment %s: %v", dName, err) + e2elog.Failf("Failed getting deployment %s: %v", dName, err) } containers := d.Spec.Template.Spec.Containers if checkContainersImage(containers, nginxImage) { - framework.Failf("Failed creating deployment %s for 1 pod with expected image %s", dName, nginxImage) + e2elog.Failf("Failed creating deployment %s for 1 pod with expected image %s", dName, nginxImage) } ginkgo.By("verifying the pod controlled by deployment " + dName + " was created") label := labels.SelectorFromSet(labels.Set(map[string]string{"run": dName})) podlist, err := e2epod.WaitForPodsWithLabel(c, ns, label) if err != nil { - framework.Failf("Failed getting pod controlled by deployment %s: %v", dName, err) + e2elog.Failf("Failed getting pod controlled by deployment %s: %v", dName, err) } pods := podlist.Items if pods == nil || len(pods) != 1 || len(pods[0].Spec.Containers) != 1 || pods[0].Spec.Containers[0].Image != nginxImage { framework.RunKubectlOrDie("get", "pods", "-L", "run", nsFlag) - framework.Failf("Failed creating 1 pod with expected image %s. Number of pods = %v", nginxImage, len(pods)) + e2elog.Failf("Failed creating 1 pod with expected image %s. Number of pods = %v", nginxImage, len(pods)) } }) }) @@ -1631,14 +1631,14 @@ metadata: ginkgo.By("verifying the job " + jobName + " was created") job, err := c.BatchV1().Jobs(ns).Get(jobName, metav1.GetOptions{}) if err != nil { - framework.Failf("Failed getting job %s: %v", jobName, err) + e2elog.Failf("Failed getting job %s: %v", jobName, err) } containers := job.Spec.Template.Spec.Containers if checkContainersImage(containers, nginxImage) { - framework.Failf("Failed creating job %s for 1 pod with expected image %s: %#v", jobName, nginxImage, containers) + e2elog.Failf("Failed creating job %s for 1 pod with expected image %s: %#v", jobName, nginxImage, containers) } if job.Spec.Template.Spec.RestartPolicy != v1.RestartPolicyOnFailure { - framework.Failf("Failed creating a job with correct restart policy for --restart=OnFailure") + e2elog.Failf("Failed creating a job with correct restart policy for --restart=OnFailure") } }) }) @@ -1665,17 +1665,17 @@ metadata: ginkgo.By("verifying the CronJob " + cjName + " was created") cj, err := c.BatchV1beta1().CronJobs(ns).Get(cjName, metav1.GetOptions{}) if err != nil { - framework.Failf("Failed getting CronJob %s: %v", cjName, err) + e2elog.Failf("Failed getting CronJob %s: %v", cjName, err) } if cj.Spec.Schedule != schedule { - framework.Failf("Failed creating a CronJob with correct schedule %s", schedule) + e2elog.Failf("Failed creating a CronJob with correct schedule %s", schedule) } containers := cj.Spec.JobTemplate.Spec.Template.Spec.Containers if checkContainersImage(containers, busyboxImage) { - framework.Failf("Failed creating CronJob %s for 1 pod with expected image %s: %#v", cjName, busyboxImage, containers) + e2elog.Failf("Failed creating CronJob %s for 1 pod with expected image %s: %#v", cjName, busyboxImage, containers) } if cj.Spec.JobTemplate.Spec.Template.Spec.RestartPolicy != v1.RestartPolicyOnFailure { - framework.Failf("Failed creating a CronJob with correct restart policy for --restart=OnFailure") + e2elog.Failf("Failed creating a CronJob with correct restart policy for --restart=OnFailure") } }) }) @@ -1704,14 +1704,14 @@ metadata: ginkgo.By("verifying the pod " + podName + " was created") pod, err := c.CoreV1().Pods(ns).Get(podName, metav1.GetOptions{}) if err != nil { - framework.Failf("Failed getting pod %s: %v", podName, err) + e2elog.Failf("Failed getting pod %s: %v", podName, err) } containers := pod.Spec.Containers if checkContainersImage(containers, nginxImage) { - framework.Failf("Failed creating pod %s with expected image %s", podName, nginxImage) + e2elog.Failf("Failed creating pod %s with expected image %s", podName, nginxImage) } if pod.Spec.RestartPolicy != v1.RestartPolicyNever { - framework.Failf("Failed creating a pod with correct restart policy for --restart=Never") + e2elog.Failf("Failed creating a pod with correct restart policy for --restart=Never") } }) }) @@ -1742,13 +1742,13 @@ metadata: label := labels.SelectorFromSet(labels.Set(map[string]string{"run": podName})) err := testutils.WaitForPodsWithLabelRunning(c, ns, label) if err != nil { - framework.Failf("Failed getting pod %s: %v", podName, err) + e2elog.Failf("Failed getting pod %s: %v", podName, err) } ginkgo.By("verifying the pod " + podName + " was created") podJSON := framework.RunKubectlOrDie("get", "pod", podName, nsFlag, "-o", "json") if !strings.Contains(podJSON, podName) { - framework.Failf("Failed to find pod %s in [%s]", podName, podJSON) + e2elog.Failf("Failed to find pod %s in [%s]", podName, podJSON) } ginkgo.By("replace the image in the pod") @@ -1758,11 +1758,11 @@ metadata: ginkgo.By("verifying the pod " + podName + " has the right image " + busyboxImage) pod, err := c.CoreV1().Pods(ns).Get(podName, metav1.GetOptions{}) if err != nil { - framework.Failf("Failed getting deployment %s: %v", podName, err) + e2elog.Failf("Failed getting deployment %s: %v", podName, err) } containers := pod.Spec.Containers if checkContainersImage(containers, busyboxImage) { - framework.Failf("Failed creating pod with expected image %s", busyboxImage) + e2elog.Failf("Failed creating pod with expected image %s", busyboxImage) } }) }) @@ -1812,16 +1812,16 @@ metadata: defer framework.TryKill(cmd) } if err != nil { - framework.Failf("Failed to start proxy server: %v", err) + e2elog.Failf("Failed to start proxy server: %v", err) } ginkgo.By("curling proxy /api/ output") localAddr := fmt.Sprintf("http://localhost:%d/api/", port) apiVersions, err := getAPIVersions(localAddr) if err != nil { - framework.Failf("Expected at least one supported apiversion, got error %v", err) + e2elog.Failf("Expected at least one supported apiversion, got error %v", err) } if len(apiVersions.Versions) < 1 { - framework.Failf("Expected at least one supported apiversion, got %v", apiVersions) + e2elog.Failf("Expected at least one supported apiversion, got %v", apiVersions) } }) @@ -1834,7 +1834,7 @@ metadata: ginkgo.By("Starting the proxy") tmpdir, err := ioutil.TempDir("", "kubectl-proxy-unix") if err != nil { - framework.Failf("Failed to create temporary directory: %v", err) + e2elog.Failf("Failed to create temporary directory: %v", err) } path := filepath.Join(tmpdir, "test") defer os.Remove(path) @@ -1842,19 +1842,19 @@ metadata: cmd := framework.KubectlCmd("proxy", fmt.Sprintf("--unix-socket=%s", path)) stdout, stderr, err := framework.StartCmdAndStreamOutput(cmd) if err != nil { - framework.Failf("Failed to start kubectl command: %v", err) + e2elog.Failf("Failed to start kubectl command: %v", err) } defer stdout.Close() defer stderr.Close() defer framework.TryKill(cmd) buf := make([]byte, 128) if _, err = stdout.Read(buf); err != nil { - framework.Failf("Expected output from kubectl proxy: %v", err) + e2elog.Failf("Expected output from kubectl proxy: %v", err) } ginkgo.By("retrieving proxy /api/ output") _, err = curlUnix("http://unused/api", path) if err != nil { - framework.Failf("Failed get of /api at %s: %v", path, err) + e2elog.Failf("Failed get of /api at %s: %v", path, err) } }) }) @@ -1889,7 +1889,7 @@ metadata: ginkgo.By("verifying the node doesn't have the taint " + testTaint.Key) output = runKubectlRetryOrDie("describe", "node", nodeName) if strings.Contains(output, testTaint.Key) { - framework.Failf("Failed removing taint " + testTaint.Key + " of the node " + nodeName) + e2elog.Failf("Failed removing taint " + testTaint.Key + " of the node " + nodeName) } }) @@ -1956,7 +1956,7 @@ metadata: ginkgo.By("verifying the node doesn't have the taints that have the same key " + testTaint.Key) output = runKubectlRetryOrDie("describe", "node", nodeName) if strings.Contains(output, testTaint.Key) { - framework.Failf("Failed removing taints " + testTaint.Key + " of the node " + nodeName) + e2elog.Failf("Failed removing taints " + testTaint.Key + " of the node " + nodeName) } }) }) @@ -1972,22 +1972,22 @@ metadata: ginkgo.By("verifying that the quota was created") quota, err := c.CoreV1().ResourceQuotas(ns).Get(quotaName, metav1.GetOptions{}) if err != nil { - framework.Failf("Failed getting quota %s: %v", quotaName, err) + e2elog.Failf("Failed getting quota %s: %v", quotaName, err) } if len(quota.Spec.Scopes) != 0 { - framework.Failf("Expected empty scopes, got %v", quota.Spec.Scopes) + e2elog.Failf("Expected empty scopes, got %v", quota.Spec.Scopes) } if len(quota.Spec.Hard) != 2 { - framework.Failf("Expected two resources, got %v", quota.Spec.Hard) + e2elog.Failf("Expected two resources, got %v", quota.Spec.Hard) } r, found := quota.Spec.Hard[v1.ResourcePods] if expected := resource.MustParse("1000000"); !found || (&r).Cmp(expected) != 0 { - framework.Failf("Expected pods=1000000, got %v", r) + e2elog.Failf("Expected pods=1000000, got %v", r) } r, found = quota.Spec.Hard[v1.ResourceServices] if expected := resource.MustParse("1000000"); !found || (&r).Cmp(expected) != 0 { - framework.Failf("Expected services=1000000, got %v", r) + e2elog.Failf("Expected services=1000000, got %v", r) } }) @@ -2001,21 +2001,21 @@ metadata: ginkgo.By("verifying that the quota was created") quota, err := c.CoreV1().ResourceQuotas(ns).Get(quotaName, metav1.GetOptions{}) if err != nil { - framework.Failf("Failed getting quota %s: %v", quotaName, err) + e2elog.Failf("Failed getting quota %s: %v", quotaName, err) } if len(quota.Spec.Scopes) != 2 { - framework.Failf("Expected two scopes, got %v", quota.Spec.Scopes) + e2elog.Failf("Expected two scopes, got %v", quota.Spec.Scopes) } scopes := make(map[v1.ResourceQuotaScope]struct{}) for _, scope := range quota.Spec.Scopes { scopes[scope] = struct{}{} } if _, found := scopes[v1.ResourceQuotaScopeBestEffort]; !found { - framework.Failf("Expected BestEffort scope, got %v", quota.Spec.Scopes) + e2elog.Failf("Expected BestEffort scope, got %v", quota.Spec.Scopes) } if _, found := scopes[v1.ResourceQuotaScopeNotTerminating]; !found { - framework.Failf("Expected NotTerminating scope, got %v", quota.Spec.Scopes) + e2elog.Failf("Expected NotTerminating scope, got %v", quota.Spec.Scopes) } }) @@ -2026,7 +2026,7 @@ metadata: ginkgo.By("calling kubectl quota") out, err := framework.RunKubectl("create", "quota", quotaName, "--hard=hard=pods=1000000", "--scopes=Foo", nsFlag) if err == nil { - framework.Failf("Expected kubectl to fail, but it succeeded: %s", out) + e2elog.Failf("Expected kubectl to fail, but it succeeded: %s", out) } }) }) @@ -2055,7 +2055,7 @@ func checkOutputReturnError(output string, required [][]string) error { func checkOutput(output string, required [][]string) { err := checkOutputReturnError(output, required) if err != nil { - framework.Failf("%v", err) + e2elog.Failf("%v", err) } } @@ -2072,7 +2072,7 @@ func checkKubectlOutputWithRetry(required [][]string, args ...string) { return true, nil }) if pollErr != nil { - framework.Failf("%v", pollErr) + e2elog.Failf("%v", pollErr) } return } @@ -2153,17 +2153,17 @@ func validateGuestbookApp(c clientset.Interface, ns string) { framework.ExpectNoError(err) e2elog.Logf("Waiting for frontend to serve content.") if !waitForGuestbookResponse(c, "get", "", `{"data": ""}`, guestbookStartupTimeout, ns) { - framework.Failf("Frontend service did not start serving content in %v seconds.", guestbookStartupTimeout.Seconds()) + e2elog.Failf("Frontend service did not start serving content in %v seconds.", guestbookStartupTimeout.Seconds()) } e2elog.Logf("Trying to add a new entry to the guestbook.") if !waitForGuestbookResponse(c, "set", "TestEntry", `{"message": "Updated"}`, guestbookResponseTimeout, ns) { - framework.Failf("Cannot added new entry in %v seconds.", guestbookResponseTimeout.Seconds()) + e2elog.Failf("Cannot added new entry in %v seconds.", guestbookResponseTimeout.Seconds()) } e2elog.Logf("Verifying that added entry can be retrieved.") if !waitForGuestbookResponse(c, "get", "", `{"data": "TestEntry"}`, guestbookResponseTimeout, ns) { - framework.Failf("Entry to guestbook wasn't correctly added in %v seconds.", guestbookResponseTimeout.Seconds()) + e2elog.Failf("Entry to guestbook wasn't correctly added in %v seconds.", guestbookResponseTimeout.Seconds()) } } @@ -2209,7 +2209,7 @@ const applyTestLabel = "kubectl.kubernetes.io/apply-test" func readReplicationControllerFromString(contents string) *v1.ReplicationController { rc := v1.ReplicationController{} if err := yaml.Unmarshal([]byte(contents), &rc); err != nil { - framework.Failf(err.Error()) + e2elog.Failf(err.Error()) } return &rc @@ -2222,7 +2222,7 @@ func modifyReplicationControllerConfiguration(contents string) io.Reader { rc.Spec.Template.Labels[applyTestLabel] = "ADDED" data, err := json.Marshal(rc) if err != nil { - framework.Failf("json marshal failed: %s\n", err) + e2elog.Failf("json marshal failed: %s\n", err) } return bytes.NewReader(data) @@ -2242,7 +2242,7 @@ func forEachReplicationController(c clientset.Interface, ns, selectorKey, select } if rcs == nil || len(rcs.Items) == 0 { - framework.Failf("No replication controllers found") + e2elog.Failf("No replication controllers found") } for _, rc := range rcs.Items { @@ -2253,11 +2253,11 @@ func forEachReplicationController(c clientset.Interface, ns, selectorKey, select func validateReplicationControllerConfiguration(rc v1.ReplicationController) { if rc.Name == "redis-master" { if _, ok := rc.Annotations[v1.LastAppliedConfigAnnotation]; !ok { - framework.Failf("Annotation not found in modified configuration:\n%v\n", rc) + e2elog.Failf("Annotation not found in modified configuration:\n%v\n", rc) } if value, ok := rc.Labels[applyTestLabel]; !ok || value != "ADDED" { - framework.Failf("Added label %s not found in modified configuration:\n%v\n", applyTestLabel, rc) + e2elog.Failf("Added label %s not found in modified configuration:\n%v\n", applyTestLabel, rc) } } } @@ -2285,7 +2285,7 @@ func getUDData(jpgExpected string, ns string) func(clientset.Interface, string) if err != nil { if ctx.Err() != nil { - framework.Failf("Failed to retrieve data from container: %v", err) + e2elog.Failf("Failed to retrieve data from container: %v", err) } return err } diff --git a/test/e2e/kubectl/portforward.go b/test/e2e/kubectl/portforward.go index 42345e94a00..eeacd7f707d 100644 --- a/test/e2e/kubectl/portforward.go +++ b/test/e2e/kubectl/portforward.go @@ -175,7 +175,7 @@ func runPortForward(ns, podName string, port int) *portForwardCommand { e2elog.Logf("starting port-forward command and streaming output") portOutput, _, err := framework.StartCmdAndStreamOutput(cmd) if err != nil { - framework.Failf("Failed to start port-forward command: %v", err) + e2elog.Failf("Failed to start port-forward command: %v", err) } buf := make([]byte, 128) @@ -183,17 +183,17 @@ func runPortForward(ns, podName string, port int) *portForwardCommand { var n int e2elog.Logf("reading from `kubectl port-forward` command's stdout") if n, err = portOutput.Read(buf); err != nil { - framework.Failf("Failed to read from kubectl port-forward stdout: %v", err) + e2elog.Failf("Failed to read from kubectl port-forward stdout: %v", err) } portForwardOutput := string(buf[:n]) match := portForwardRegexp.FindStringSubmatch(portForwardOutput) if len(match) != 3 { - framework.Failf("Failed to parse kubectl port-forward output: %s", portForwardOutput) + e2elog.Failf("Failed to parse kubectl port-forward output: %s", portForwardOutput) } listenPort, err := strconv.Atoi(match[2]) if err != nil { - framework.Failf("Error converting %s to an int: %v", match[2], err) + e2elog.Failf("Error converting %s to an int: %v", match[2], err) } return &portForwardCommand{ @@ -206,10 +206,10 @@ func doTestConnectSendDisconnect(bindAddress string, f *framework.Framework) { ginkgo.By("Creating the target pod") pod := pfPod("", "10", "10", "100", fmt.Sprintf("%s", bindAddress)) if _, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod); err != nil { - framework.Failf("Couldn't create pod: %v", err) + e2elog.Failf("Couldn't create pod: %v", err) } if err := f.WaitForPodReady(pod.Name); err != nil { - framework.Failf("Pod did not start running: %v", err) + e2elog.Failf("Pod did not start running: %v", err) } ginkgo.By("Running 'kubectl port-forward'") @@ -219,7 +219,7 @@ func doTestConnectSendDisconnect(bindAddress string, f *framework.Framework) { ginkgo.By("Dialing the local port") conn, err := net.Dial("tcp", fmt.Sprintf("127.0.0.1:%d", cmd.port)) if err != nil { - framework.Failf("Couldn't connect to port %d: %v", cmd.port, err) + e2elog.Failf("Couldn't connect to port %d: %v", cmd.port, err) } defer func() { ginkgo.By("Closing the connection to the local port") @@ -229,16 +229,16 @@ func doTestConnectSendDisconnect(bindAddress string, f *framework.Framework) { ginkgo.By("Reading data from the local port") fromServer, err := ioutil.ReadAll(conn) if err != nil { - framework.Failf("Unexpected error reading data from the server: %v", err) + e2elog.Failf("Unexpected error reading data from the server: %v", err) } if e, a := strings.Repeat("x", 100), string(fromServer); e != a { - framework.Failf("Expected %q from server, got %q", e, a) + e2elog.Failf("Expected %q from server, got %q", e, a) } ginkgo.By("Waiting for the target pod to stop running") if err := WaitForTerminatedContainer(f, pod, "portforwardtester"); err != nil { - framework.Failf("Container did not terminate: %v", err) + e2elog.Failf("Container did not terminate: %v", err) } ginkgo.By("Verifying logs") @@ -254,10 +254,10 @@ func doTestMustConnectSendNothing(bindAddress string, f *framework.Framework) { ginkgo.By("Creating the target pod") pod := pfPod("abc", "1", "1", "1", fmt.Sprintf("%s", bindAddress)) if _, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod); err != nil { - framework.Failf("Couldn't create pod: %v", err) + e2elog.Failf("Couldn't create pod: %v", err) } if err := f.WaitForPodReady(pod.Name); err != nil { - framework.Failf("Pod did not start running: %v", err) + e2elog.Failf("Pod did not start running: %v", err) } ginkgo.By("Running 'kubectl port-forward'") @@ -267,7 +267,7 @@ func doTestMustConnectSendNothing(bindAddress string, f *framework.Framework) { ginkgo.By("Dialing the local port") conn, err := net.Dial("tcp", fmt.Sprintf("127.0.0.1:%d", cmd.port)) if err != nil { - framework.Failf("Couldn't connect to port %d: %v", cmd.port, err) + e2elog.Failf("Couldn't connect to port %d: %v", cmd.port, err) } ginkgo.By("Closing the connection to the local port") @@ -275,7 +275,7 @@ func doTestMustConnectSendNothing(bindAddress string, f *framework.Framework) { ginkgo.By("Waiting for the target pod to stop running") if err := WaitForTerminatedContainer(f, pod, "portforwardtester"); err != nil { - framework.Failf("Container did not terminate: %v", err) + e2elog.Failf("Container did not terminate: %v", err) } ginkgo.By("Verifying logs") @@ -291,10 +291,10 @@ func doTestMustConnectSendDisconnect(bindAddress string, f *framework.Framework) ginkgo.By("Creating the target pod") pod := pfPod("abc", "10", "10", "100", fmt.Sprintf("%s", bindAddress)) if _, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod); err != nil { - framework.Failf("Couldn't create pod: %v", err) + e2elog.Failf("Couldn't create pod: %v", err) } if err := f.WaitForPodReady(pod.Name); err != nil { - framework.Failf("Pod did not start running: %v", err) + e2elog.Failf("Pod did not start running: %v", err) } ginkgo.By("Running 'kubectl port-forward'") @@ -304,11 +304,11 @@ func doTestMustConnectSendDisconnect(bindAddress string, f *framework.Framework) ginkgo.By("Dialing the local port") addr, err := net.ResolveTCPAddr("tcp", fmt.Sprintf("127.0.0.1:%d", cmd.port)) if err != nil { - framework.Failf("Error resolving tcp addr: %v", err) + e2elog.Failf("Error resolving tcp addr: %v", err) } conn, err := net.DialTCP("tcp", nil, addr) if err != nil { - framework.Failf("Couldn't connect to port %d: %v", cmd.port, err) + e2elog.Failf("Couldn't connect to port %d: %v", cmd.port, err) } defer func() { ginkgo.By("Closing the connection to the local port") @@ -324,16 +324,16 @@ func doTestMustConnectSendDisconnect(bindAddress string, f *framework.Framework) ginkgo.By("Reading data from the local port") fromServer, err := ioutil.ReadAll(conn) if err != nil { - framework.Failf("Unexpected error reading data from the server: %v", err) + e2elog.Failf("Unexpected error reading data from the server: %v", err) } if e, a := strings.Repeat("x", 100), string(fromServer); e != a { - framework.Failf("Expected %q from server, got %q", e, a) + e2elog.Failf("Expected %q from server, got %q", e, a) } ginkgo.By("Waiting for the target pod to stop running") if err := WaitForTerminatedContainer(f, pod, "portforwardtester"); err != nil { - framework.Failf("Container did not terminate: %v", err) + e2elog.Failf("Container did not terminate: %v", err) } ginkgo.By("Verifying logs") @@ -353,10 +353,10 @@ func doTestOverWebSockets(bindAddress string, f *framework.Framework) { ginkgo.By("Creating the pod") pod := pfPod("def", "10", "10", "100", fmt.Sprintf("%s", bindAddress)) if _, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod); err != nil { - framework.Failf("Couldn't create pod: %v", err) + e2elog.Failf("Couldn't create pod: %v", err) } if err := f.WaitForPodReady(pod.Name); err != nil { - framework.Failf("Pod did not start running: %v", err) + e2elog.Failf("Pod did not start running: %v", err) } req := f.ClientSet.CoreV1().RESTClient().Get(). @@ -369,7 +369,7 @@ func doTestOverWebSockets(bindAddress string, f *framework.Framework) { url := req.URL() ws, err := framework.OpenWebSocketForURL(url, config, []string{"v4.channel.k8s.io"}) if err != nil { - framework.Failf("Failed to open websocket to %s: %v", url.String(), err) + e2elog.Failf("Failed to open websocket to %s: %v", url.String(), err) } defer ws.Close() @@ -404,7 +404,7 @@ func doTestOverWebSockets(bindAddress string, f *framework.Framework) { ginkgo.By("Sending the expected data to the local port") err = wsWrite(ws, 0, []byte("def")) if err != nil { - framework.Failf("Failed to write to websocket %s: %v", url.String(), err) + e2elog.Failf("Failed to write to websocket %s: %v", url.String(), err) } ginkgo.By("Reading data from the local port") diff --git a/test/e2e/lifecycle/bootstrap/BUILD b/test/e2e/lifecycle/bootstrap/BUILD index af60340427f..1b0ba6bd7bb 100644 --- a/test/e2e/lifecycle/bootstrap/BUILD +++ b/test/e2e/lifecycle/bootstrap/BUILD @@ -21,6 +21,7 @@ go_library( "//staging/src/k8s.io/client-go/kubernetes:go_default_library", "//staging/src/k8s.io/cluster-bootstrap/token/api:go_default_library", "//test/e2e/framework:go_default_library", + "//test/e2e/framework/log:go_default_library", "//test/e2e/lifecycle:go_default_library", "//vendor/github.com/onsi/ginkgo:go_default_library", "//vendor/github.com/onsi/gomega:go_default_library", diff --git a/test/e2e/lifecycle/bootstrap/util.go b/test/e2e/lifecycle/bootstrap/util.go index 0f62a7575d1..5bb9229aa34 100644 --- a/test/e2e/lifecycle/bootstrap/util.go +++ b/test/e2e/lifecycle/bootstrap/util.go @@ -29,6 +29,7 @@ import ( clientset "k8s.io/client-go/kubernetes" bootstrapapi "k8s.io/cluster-bootstrap/token/api" "k8s.io/kubernetes/test/e2e/framework" + e2elog "k8s.io/kubernetes/test/e2e/framework/log" ) func newTokenSecret(tokenID, tokenSecret string) *v1.Secret { @@ -83,7 +84,7 @@ func WaitforSignedClusterInfoByBootStrapToken(c clientset.Interface, tokenID str return wait.Poll(framework.Poll, 2*time.Minute, func() (bool, error) { cfgMap, err := c.CoreV1().ConfigMaps(metav1.NamespacePublic).Get(bootstrapapi.ConfigMapClusterInfo, metav1.GetOptions{}) if err != nil { - framework.Failf("Failed to get cluster-info configMap: %v", err) + e2elog.Failf("Failed to get cluster-info configMap: %v", err) return false, err } _, ok := cfgMap.Data[bootstrapapi.JWSSignatureKeyPrefix+tokenID] @@ -99,7 +100,7 @@ func WaitForSignedClusterInfoGetUpdatedByBootstrapToken(c clientset.Interface, t return wait.Poll(framework.Poll, 2*time.Minute, func() (bool, error) { cfgMap, err := c.CoreV1().ConfigMaps(metav1.NamespacePublic).Get(bootstrapapi.ConfigMapClusterInfo, metav1.GetOptions{}) if err != nil { - framework.Failf("Failed to get cluster-info configMap: %v", err) + e2elog.Failf("Failed to get cluster-info configMap: %v", err) return false, err } updated, ok := cfgMap.Data[bootstrapapi.JWSSignatureKeyPrefix+tokenID] @@ -115,7 +116,7 @@ func WaitForSignedClusterInfoByBootstrapTokenToDisappear(c clientset.Interface, return wait.Poll(framework.Poll, 2*time.Minute, func() (bool, error) { cfgMap, err := c.CoreV1().ConfigMaps(metav1.NamespacePublic).Get(bootstrapapi.ConfigMapClusterInfo, metav1.GetOptions{}) if err != nil { - framework.Failf("Failed to get cluster-info configMap: %v", err) + e2elog.Failf("Failed to get cluster-info configMap: %v", err) return false, err } _, ok := cfgMap.Data[bootstrapapi.JWSSignatureKeyPrefix+tokenID] diff --git a/test/e2e/lifecycle/ha_master.go b/test/e2e/lifecycle/ha_master.go index 8e7f5ca1680..00aef5e0fee 100644 --- a/test/e2e/lifecycle/ha_master.go +++ b/test/e2e/lifecycle/ha_master.go @@ -83,7 +83,7 @@ func findRegionForZone(zone string) string { region, err := exec.Command("gcloud", "compute", "zones", "list", zone, "--quiet", "--format=csv[no-heading](region)").Output() framework.ExpectNoError(err) if string(region) == "" { - framework.Failf("Region not found; zone: %s", zone) + e2elog.Failf("Region not found; zone: %s", zone) } return string(region) } diff --git a/test/e2e/lifecycle/kubelet_security.go b/test/e2e/lifecycle/kubelet_security.go index 8abe1fe1b8a..2ad910325ff 100644 --- a/test/e2e/lifecycle/kubelet_security.go +++ b/test/e2e/lifecycle/kubelet_security.go @@ -28,6 +28,7 @@ import ( "github.com/onsi/ginkgo" "github.com/onsi/gomega" + e2elog "k8s.io/kubernetes/test/e2e/framework/log" e2enode "k8s.io/kubernetes/test/e2e/framework/node" ) @@ -80,7 +81,7 @@ func portClosedTest(f *framework.Framework, pickNode *v1.Node, port int) { conn, err := net.DialTimeout("tcp", fmt.Sprintf("%s:%d", addr, port), 1*time.Minute) if err == nil { conn.Close() - framework.Failf("port %d is not disabled", port) + e2elog.Failf("port %d is not disabled", port) } } } diff --git a/test/e2e/lifecycle/node_lease.go b/test/e2e/lifecycle/node_lease.go index db9305bec56..a00014a9527 100644 --- a/test/e2e/lifecycle/node_lease.go +++ b/test/e2e/lifecycle/node_lease.go @@ -47,7 +47,7 @@ var _ = SIGDescribe("[Disruptive]NodeLease", func() { gomega.Expect(err).To(gomega.BeNil()) systemPodsNo = int32(len(systemPods)) if strings.Index(framework.TestContext.CloudConfig.NodeInstanceGroup, ",") >= 0 { - framework.Failf("Test dose not support cluster setup with more than one MIG: %s", framework.TestContext.CloudConfig.NodeInstanceGroup) + e2elog.Failf("Test dose not support cluster setup with more than one MIG: %s", framework.TestContext.CloudConfig.NodeInstanceGroup) } else { group = framework.TestContext.CloudConfig.NodeInstanceGroup } @@ -70,7 +70,7 @@ var _ = SIGDescribe("[Disruptive]NodeLease", func() { ginkgo.By("restoring the original node instance group size") if err := framework.ResizeGroup(group, int32(framework.TestContext.CloudConfig.NumNodes)); err != nil { - framework.Failf("Couldn't restore the original node instance group size: %v", err) + e2elog.Failf("Couldn't restore the original node instance group size: %v", err) } // In GKE, our current tunneling setup has the potential to hold on to a broken tunnel (from a // rebooted/deleted node) for up to 5 minutes before all tunnels are dropped and recreated. @@ -85,11 +85,11 @@ var _ = SIGDescribe("[Disruptive]NodeLease", func() { time.Sleep(5 * time.Minute) } if err := framework.WaitForGroupSize(group, int32(framework.TestContext.CloudConfig.NumNodes)); err != nil { - framework.Failf("Couldn't restore the original node instance group size: %v", err) + e2elog.Failf("Couldn't restore the original node instance group size: %v", err) } if err := e2enode.WaitForReadyNodes(c, framework.TestContext.CloudConfig.NumNodes, 10*time.Minute); err != nil { - framework.Failf("Couldn't restore the original cluster size: %v", err) + e2elog.Failf("Couldn't restore the original cluster size: %v", err) } // Many e2e tests assume that the cluster is fully healthy before they start. Wait until // the cluster is restored to health. diff --git a/test/e2e/lifecycle/reboot.go b/test/e2e/lifecycle/reboot.go index dfb6dbf2873..48ca1d9a0aa 100644 --- a/test/e2e/lifecycle/reboot.go +++ b/test/e2e/lifecycle/reboot.go @@ -168,7 +168,7 @@ func testReboot(c clientset.Interface, rebootCmd string, hook terminationHook) { e2elog.Logf("Node %s failed reboot test.", n.ObjectMeta.Name) } } - framework.Failf("Test failed; at least one node failed to reboot in the time given.") + e2elog.Failf("Test failed; at least one node failed to reboot in the time given.") } } diff --git a/test/e2e/lifecycle/resize_nodes.go b/test/e2e/lifecycle/resize_nodes.go index 1c6fe4b896a..0047d88d11f 100644 --- a/test/e2e/lifecycle/resize_nodes.go +++ b/test/e2e/lifecycle/resize_nodes.go @@ -25,6 +25,7 @@ import ( clientset "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/test/e2e/common" "k8s.io/kubernetes/test/e2e/framework" + e2elog "k8s.io/kubernetes/test/e2e/framework/log" e2enode "k8s.io/kubernetes/test/e2e/framework/node" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" @@ -55,7 +56,7 @@ var _ = SIGDescribe("Nodes [Disruptive]", func() { framework.ExpectNoError(err) systemPodsNo = int32(len(systemPods)) if strings.Index(framework.TestContext.CloudConfig.NodeInstanceGroup, ",") >= 0 { - framework.Failf("Test dose not support cluster setup with more than one MIG: %s", framework.TestContext.CloudConfig.NodeInstanceGroup) + e2elog.Failf("Test dose not support cluster setup with more than one MIG: %s", framework.TestContext.CloudConfig.NodeInstanceGroup) } else { group = framework.TestContext.CloudConfig.NodeInstanceGroup } @@ -80,7 +81,7 @@ var _ = SIGDescribe("Nodes [Disruptive]", func() { ginkgo.By("restoring the original node instance group size") if err := framework.ResizeGroup(group, int32(framework.TestContext.CloudConfig.NumNodes)); err != nil { - framework.Failf("Couldn't restore the original node instance group size: %v", err) + e2elog.Failf("Couldn't restore the original node instance group size: %v", err) } // In GKE, our current tunneling setup has the potential to hold on to a broken tunnel (from a // rebooted/deleted node) for up to 5 minutes before all tunnels are dropped and recreated. @@ -95,11 +96,11 @@ var _ = SIGDescribe("Nodes [Disruptive]", func() { time.Sleep(5 * time.Minute) } if err := framework.WaitForGroupSize(group, int32(framework.TestContext.CloudConfig.NumNodes)); err != nil { - framework.Failf("Couldn't restore the original node instance group size: %v", err) + e2elog.Failf("Couldn't restore the original node instance group size: %v", err) } if err := e2enode.WaitForReadyNodes(c, int(originalNodeCount), 10*time.Minute); err != nil { - framework.Failf("Couldn't restore the original cluster size: %v", err) + e2elog.Failf("Couldn't restore the original cluster size: %v", err) } // Many e2e tests assume that the cluster is fully healthy before they start. Wait until // the cluster is restored to health. diff --git a/test/e2e/lifecycle/restart.go b/test/e2e/lifecycle/restart.go index 6286430b6d7..0168282d974 100644 --- a/test/e2e/lifecycle/restart.go +++ b/test/e2e/lifecycle/restart.go @@ -75,7 +75,7 @@ var _ = SIGDescribe("Restart [Disruptive]", func() { } if !e2epod.CheckPodsRunningReadyOrSucceeded(f.ClientSet, systemNamespace, originalPodNames, framework.PodReadyBeforeTimeout) { printStatusAndLogsForNotReadyPods(f.ClientSet, systemNamespace, originalPodNames, pods) - framework.Failf("At least one pod wasn't running and ready or succeeded at test start.") + e2elog.Failf("At least one pod wasn't running and ready or succeeded at test start.") } }) @@ -99,7 +99,7 @@ var _ = SIGDescribe("Restart [Disruptive]", func() { // that the names match because that's implementation specific. ginkgo.By("ensuring the same number of nodes exist after the restart") if len(originalNodes) != len(nodesAfter) { - framework.Failf("Had %d nodes before nodes were restarted, but now only have %d", + e2elog.Failf("Had %d nodes before nodes were restarted, but now only have %d", len(originalNodes), len(nodesAfter)) } @@ -114,7 +114,7 @@ var _ = SIGDescribe("Restart [Disruptive]", func() { if !e2epod.CheckPodsRunningReadyOrSucceeded(f.ClientSet, systemNamespace, podNamesAfter, remaining) { pods := ps.List() printStatusAndLogsForNotReadyPods(f.ClientSet, systemNamespace, podNamesAfter, pods) - framework.Failf("At least one pod wasn't running and ready after the restart.") + e2elog.Failf("At least one pod wasn't running and ready after the restart.") } }) }) diff --git a/test/e2e/network/dns.go b/test/e2e/network/dns.go index f85f52d2bdc..4c6fe7d0f4f 100644 --- a/test/e2e/network/dns.go +++ b/test/e2e/network/dns.go @@ -392,7 +392,7 @@ var _ = SIGDescribe("DNS", func() { defer func() { e2elog.Logf("Deleting pod %s...", testAgnhostPod.Name) if err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(testAgnhostPod.Name, metav1.NewDeleteOptions(0)); err != nil { - framework.Failf("ginkgo.Failed to delete pod %s: %v", testAgnhostPod.Name, err) + e2elog.Failf("ginkgo.Failed to delete pod %s: %v", testAgnhostPod.Name, err) } }() err = f.WaitForPodRunning(testAgnhostPod.Name) @@ -415,13 +415,13 @@ var _ = SIGDescribe("DNS", func() { ginkgo.By("Verifying customized DNS suffix list is configured on pod...") stdout := runCommand("dns-suffix") if !strings.Contains(stdout, testSearchPath) { - framework.Failf("customized DNS suffix list not found configured in pod, expected to contain: %s, got: %s", testSearchPath, stdout) + e2elog.Failf("customized DNS suffix list not found configured in pod, expected to contain: %s, got: %s", testSearchPath, stdout) } ginkgo.By("Verifying customized DNS server is configured on pod...") stdout = runCommand("dns-server-list") if !strings.Contains(stdout, testServerIP) { - framework.Failf("customized DNS server not found in configured in pod, expected to contain: %s, got: %s", testServerIP, stdout) + e2elog.Failf("customized DNS server not found in configured in pod, expected to contain: %s, got: %s", testServerIP, stdout) } }) @@ -441,7 +441,7 @@ var _ = SIGDescribe("DNS", func() { defer func() { e2elog.Logf("Deleting pod %s...", testServerPod.Name) if err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(testServerPod.Name, metav1.NewDeleteOptions(0)); err != nil { - framework.Failf("ginkgo.Failed to delete pod %s: %v", testServerPod.Name, err) + e2elog.Failf("ginkgo.Failed to delete pod %s: %v", testServerPod.Name, err) } }() err = f.WaitForPodRunning(testServerPod.Name) @@ -473,7 +473,7 @@ var _ = SIGDescribe("DNS", func() { defer func() { e2elog.Logf("Deleting pod %s...", testUtilsPod.Name) if err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(testUtilsPod.Name, metav1.NewDeleteOptions(0)); err != nil { - framework.Failf("ginkgo.Failed to delete pod %s: %v", testUtilsPod.Name, err) + e2elog.Failf("ginkgo.Failed to delete pod %s: %v", testUtilsPod.Name, err) } }() err = f.WaitForPodRunning(testUtilsPod.Name) @@ -492,7 +492,7 @@ var _ = SIGDescribe("DNS", func() { }) framework.ExpectNoError(err, "failed to examine resolv,conf file on pod, stdout: %v, stderr: %v, err: %v", stdout, stderr, err) if !strings.Contains(stdout, "ndots:2") { - framework.Failf("customized DNS options not found in resolv.conf, got: %s", stdout) + e2elog.Failf("customized DNS options not found in resolv.conf, got: %s", stdout) } ginkgo.By("Verifying customized name server and search path are working...") diff --git a/test/e2e/network/dns_common.go b/test/e2e/network/dns_common.go index 08f0d202427..8f5d50af8e0 100644 --- a/test/e2e/network/dns_common.go +++ b/test/e2e/network/dns_common.go @@ -100,7 +100,7 @@ func (t *dnsTestCommon) checkDNSRecordFrom(name string, predicate func([]string) }) if err != nil { - framework.Failf("dig result did not match: %#v after %v", + e2elog.Failf("dig result did not match: %#v after %v", actual, timeout) } } @@ -525,7 +525,7 @@ func assertFilesContain(fileNames []string, fileDir string, pod *v1.Pod, client if err != nil { if ctx.Err() != nil { - framework.Failf("Unable to read %s from pod %s/%s: %v", fileName, pod.Namespace, pod.Name, err) + e2elog.Failf("Unable to read %s from pod %s/%s: %v", fileName, pod.Namespace, pod.Name, err) } else { e2elog.Logf("Unable to read %s from pod %s/%s: %v", fileName, pod.Namespace, pod.Name, err) } @@ -553,7 +553,7 @@ func validateDNSResults(f *framework.Framework, pod *v1.Pod, fileNames []string) podClient.Delete(pod.Name, metav1.NewDeleteOptions(0)) }() if _, err := podClient.Create(pod); err != nil { - framework.Failf("ginkgo.Failed to create pod %s/%s: %v", pod.Namespace, pod.Name, err) + e2elog.Failf("ginkgo.Failed to create pod %s/%s: %v", pod.Namespace, pod.Name, err) } framework.ExpectNoError(f.WaitForPodRunning(pod.Name)) @@ -561,7 +561,7 @@ func validateDNSResults(f *framework.Framework, pod *v1.Pod, fileNames []string) ginkgo.By("retrieving the pod") pod, err := podClient.Get(pod.Name, metav1.GetOptions{}) if err != nil { - framework.Failf("ginkgo.Failed to get pod %s/%s: %v", pod.Namespace, pod.Name, err) + e2elog.Failf("ginkgo.Failed to get pod %s/%s: %v", pod.Namespace, pod.Name, err) } // Try to find results for each expected name. ginkgo.By("looking for the results for each expected name from probers") @@ -581,7 +581,7 @@ func validateTargetedProbeOutput(f *framework.Framework, pod *v1.Pod, fileNames podClient.Delete(pod.Name, metav1.NewDeleteOptions(0)) }() if _, err := podClient.Create(pod); err != nil { - framework.Failf("ginkgo.Failed to create pod %s/%s: %v", pod.Namespace, pod.Name, err) + e2elog.Failf("ginkgo.Failed to create pod %s/%s: %v", pod.Namespace, pod.Name, err) } framework.ExpectNoError(f.WaitForPodRunning(pod.Name)) @@ -589,7 +589,7 @@ func validateTargetedProbeOutput(f *framework.Framework, pod *v1.Pod, fileNames ginkgo.By("retrieving the pod") pod, err := podClient.Get(pod.Name, metav1.GetOptions{}) if err != nil { - framework.Failf("ginkgo.Failed to get pod %s/%s: %v", pod.Namespace, pod.Name, err) + e2elog.Failf("ginkgo.Failed to get pod %s/%s: %v", pod.Namespace, pod.Name, err) } // Try to find the expected value for each expected name. ginkgo.By("looking for the results for each expected name from probers") diff --git a/test/e2e/network/example_cluster_dns.go b/test/e2e/network/example_cluster_dns.go index f6c27f6f03a..b8ba8de3483 100644 --- a/test/e2e/network/example_cluster_dns.go +++ b/test/e2e/network/example_cluster_dns.go @@ -128,7 +128,7 @@ var _ = SIGDescribe("ClusterDns [Feature:Example]", func() { pods, err := c.CoreV1().Pods(namespaces[0].Name).List(options) if err != nil || pods == nil || len(pods.Items) == 0 { - framework.Failf("no running pods found") + e2elog.Failf("no running pods found") } podName := pods.Items[0].Name diff --git a/test/e2e/network/firewall.go b/test/e2e/network/firewall.go index 5d4123c9986..1106c0b237e 100644 --- a/test/e2e/network/firewall.go +++ b/test/e2e/network/firewall.go @@ -77,7 +77,7 @@ var _ = SIGDescribe("Firewall rule", func() { gomega.Expect(nodeList).NotTo(gomega.BeNil()) nodesNames := jig.GetNodesNames(framework.MaxNodesForEndpointsTests) if len(nodesNames) <= 0 { - framework.Failf("Expect at least 1 node, got: %v", nodesNames) + e2elog.Failf("Expect at least 1 node, got: %v", nodesNames) } nodesSet := sets.NewString(nodesNames...) @@ -177,7 +177,7 @@ var _ = SIGDescribe("Firewall rule", func() { ginkgo.It("should have correct firewall rules for e2e cluster", func() { nodes := framework.GetReadySchedulableNodesOrDie(cs) if len(nodes.Items) <= 0 { - framework.Failf("Expect at least 1 node, got: %v", len(nodes.Items)) + e2elog.Failf("Expect at least 1 node, got: %v", len(nodes.Items)) } ginkgo.By("Checking if e2e firewall rules are correct") @@ -191,7 +191,7 @@ var _ = SIGDescribe("Firewall rule", func() { ginkgo.By("Checking well known ports on master and nodes are not exposed externally") nodeAddrs := e2enode.FirstAddress(nodes, v1.NodeExternalIP) if len(nodeAddrs) == 0 { - framework.Failf("did not find any node addresses") + e2elog.Failf("did not find any node addresses") } masterAddresses := framework.GetAllMasterAddresses(cs) @@ -208,9 +208,9 @@ var _ = SIGDescribe("Firewall rule", func() { func assertNotReachableHTTPTimeout(ip string, port int, timeout time.Duration) { result := framework.PokeHTTP(ip, port, "/", &framework.HTTPPokeParams{Timeout: timeout}) if result.Status == framework.HTTPError { - framework.Failf("Unexpected error checking for reachability of %s:%d: %v", ip, port, result.Error) + e2elog.Failf("Unexpected error checking for reachability of %s:%d: %v", ip, port, result.Error) } if result.Code != 0 { - framework.Failf("Was unexpectedly able to reach %s:%d", ip, port) + e2elog.Failf("Was unexpectedly able to reach %s:%d", ip, port) } } diff --git a/test/e2e/network/ingress.go b/test/e2e/network/ingress.go index 0845e490ef2..c98df6a66c4 100644 --- a/test/e2e/network/ingress.go +++ b/test/e2e/network/ingress.go @@ -201,7 +201,7 @@ var _ = SIGDescribe("Loadbalancing: L7", func() { if annotations != nil && (annotations[umKey] != "" || annotations[fwKey] != "" || annotations[tpKey] != "" || annotations[fwsKey] != "" || annotations[tpsKey] != "" || annotations[scKey] != "" || annotations[beKey] != "") { - framework.Failf("unexpected annotations. Expected to not have annotations for urlmap, forwarding rule, target proxy, ssl cert and backends, got: %v", annotations) + e2elog.Failf("unexpected annotations. Expected to not have annotations for urlmap, forwarding rule, target proxy, ssl cert and backends, got: %v", annotations) return true, nil } return false, nil @@ -210,26 +210,26 @@ var _ = SIGDescribe("Loadbalancing: L7", func() { // Verify that the controller does not create any other resource except instance group. // TODO(59778): Check GCE resources specific to this ingress instead of listing all resources. if len(gceController.ListURLMaps()) != 0 { - framework.Failf("unexpected url maps, expected none, got: %v", gceController.ListURLMaps()) + e2elog.Failf("unexpected url maps, expected none, got: %v", gceController.ListURLMaps()) } if len(gceController.ListGlobalForwardingRules()) != 0 { - framework.Failf("unexpected forwarding rules, expected none, got: %v", gceController.ListGlobalForwardingRules()) + e2elog.Failf("unexpected forwarding rules, expected none, got: %v", gceController.ListGlobalForwardingRules()) } if len(gceController.ListTargetHTTPProxies()) != 0 { - framework.Failf("unexpected target http proxies, expected none, got: %v", gceController.ListTargetHTTPProxies()) + e2elog.Failf("unexpected target http proxies, expected none, got: %v", gceController.ListTargetHTTPProxies()) } if len(gceController.ListTargetHTTPSProxies()) != 0 { - framework.Failf("unexpected target https proxies, expected none, got: %v", gceController.ListTargetHTTPProxies()) + e2elog.Failf("unexpected target https proxies, expected none, got: %v", gceController.ListTargetHTTPProxies()) } if len(gceController.ListSslCertificates()) != 0 { - framework.Failf("unexpected ssl certificates, expected none, got: %v", gceController.ListSslCertificates()) + e2elog.Failf("unexpected ssl certificates, expected none, got: %v", gceController.ListSslCertificates()) } if len(gceController.ListGlobalBackendServices()) != 0 { - framework.Failf("unexpected backend service, expected none, got: %v", gceController.ListGlobalBackendServices()) + e2elog.Failf("unexpected backend service, expected none, got: %v", gceController.ListGlobalBackendServices()) } // Controller does not have a list command for firewall rule. We use get instead. if fw, err := gceController.GetFirewallRuleOrError(); err == nil { - framework.Failf("unexpected nil error in getting firewall rule, expected firewall NotFound, got firewall: %v", fw) + e2elog.Failf("unexpected nil error in getting firewall rule, expected firewall NotFound, got firewall: %v", fw) } // TODO(nikhiljindal): Check the instance group annotation value and verify with a multizone cluster. @@ -662,16 +662,16 @@ var _ = SIGDescribe("Loadbalancing: L7", func() { filePath := filepath.Join(framework.TestContext.OutputDir, "mci.yaml") output, err := framework.RunKubemciWithKubeconfig("remove-clusters", name, "--ingress="+filePath) if err != nil { - framework.Failf("unexpected error in running kubemci remove-clusters command to remove from all clusters: %s", err) + e2elog.Failf("unexpected error in running kubemci remove-clusters command to remove from all clusters: %s", err) } if !strings.Contains(output, "You should use kubemci delete to delete the ingress completely") { - framework.Failf("unexpected output in removing an ingress from all clusters, expected the output to include: You should use kubemci delete to delete the ingress completely, actual output: %s", output) + e2elog.Failf("unexpected output in removing an ingress from all clusters, expected the output to include: You should use kubemci delete to delete the ingress completely, actual output: %s", output) } // Verify that the ingress is still spread to 1 cluster as expected. verifyKubemciStatusHas(name, "is spread across 1 cluster") // remove-clusters should succeed with --force=true if _, err := framework.RunKubemciWithKubeconfig("remove-clusters", name, "--ingress="+filePath, "--force=true"); err != nil { - framework.Failf("unexpected error in running kubemci remove-clusters to remove from all clusters with --force=true: %s", err) + e2elog.Failf("unexpected error in running kubemci remove-clusters to remove from all clusters with --force=true: %s", err) } verifyKubemciStatusHas(name, "is spread across 0 cluster") }) @@ -765,10 +765,10 @@ var _ = SIGDescribe("Loadbalancing: L7", func() { func verifyKubemciStatusHas(name, expectedSubStr string) { statusStr, err := framework.RunKubemciCmd("get-status", name) if err != nil { - framework.Failf("unexpected error in running kubemci get-status %s: %s", name, err) + e2elog.Failf("unexpected error in running kubemci get-status %s: %s", name, err) } if !strings.Contains(statusStr, expectedSubStr) { - framework.Failf("expected status to have sub string %s, actual status: %s", expectedSubStr, statusStr) + e2elog.Failf("expected status to have sub string %s, actual status: %s", expectedSubStr, statusStr) } } @@ -843,7 +843,7 @@ func executeBacksideBacksideHTTPSTest(f *framework.Framework, jig *ingress.TestJ defer func() { ginkgo.By("Cleaning up re-encryption ingress, service and deployment") if errs := jig.DeleteTestResource(f.ClientSet, deployCreated, svcCreated, ingCreated); len(errs) > 0 { - framework.Failf("ginkgo.Failed to cleanup re-encryption ingress: %v", errs) + e2elog.Failf("ginkgo.Failed to cleanup re-encryption ingress: %v", errs) } }() framework.ExpectNoError(err, "ginkgo.Failed to create re-encryption ingress") diff --git a/test/e2e/network/ingress_scale.go b/test/e2e/network/ingress_scale.go index bda4be54b62..dbe21759d43 100644 --- a/test/e2e/network/ingress_scale.go +++ b/test/e2e/network/ingress_scale.go @@ -18,6 +18,7 @@ package network import ( "k8s.io/kubernetes/test/e2e/framework" + e2elog "k8s.io/kubernetes/test/e2e/framework/log" "k8s.io/kubernetes/test/e2e/network/scale" "github.com/onsi/ginkgo" @@ -44,19 +45,19 @@ var _ = SIGDescribe("Loadbalancing: L7 Scalability", func() { scaleFramework = scale.NewIngressScaleFramework(f.ClientSet, ns, framework.TestContext.CloudConfig) if err := scaleFramework.PrepareScaleTest(); err != nil { - framework.Failf("Unexpected error while preparing ingress scale test: %v", err) + e2elog.Failf("Unexpected error while preparing ingress scale test: %v", err) } }) ginkgo.AfterEach(func() { if errs := scaleFramework.CleanupScaleTest(); len(errs) != 0 { - framework.Failf("Unexpected error while cleaning up ingress scale test: %v", errs) + e2elog.Failf("Unexpected error while cleaning up ingress scale test: %v", errs) } }) ginkgo.It("Creating and updating ingresses should happen promptly with small/medium/large amount of ingresses", func() { if errs := scaleFramework.RunScaleTest(); len(errs) != 0 { - framework.Failf("Unexpected error while running ingress scale test: %v", errs) + e2elog.Failf("Unexpected error while running ingress scale test: %v", errs) } }) diff --git a/test/e2e/network/network_policy.go b/test/e2e/network/network_policy.go index b50b7479b4e..c06d937cd4c 100644 --- a/test/e2e/network/network_policy.go +++ b/test/e2e/network/network_policy.go @@ -541,7 +541,7 @@ func testCanConnect(f *framework.Framework, ns *v1.Namespace, podName string, se defer func() { ginkgo.By(fmt.Sprintf("Cleaning up the pod %s", podName)) if err := f.ClientSet.CoreV1().Pods(ns.Name).Delete(podClient.Name, nil); err != nil { - framework.Failf("unable to cleanup pod %v: %v", podClient.Name, err) + e2elog.Failf("unable to cleanup pod %v: %v", podClient.Name, err) } }() @@ -555,7 +555,7 @@ func testCanConnect(f *framework.Framework, ns *v1.Namespace, podName string, se // Collect pod logs when we see a failure. logs, logErr := e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, podName, fmt.Sprintf("%s-container", podName)) if logErr != nil { - framework.Failf("Error getting container logs: %s", logErr) + e2elog.Failf("Error getting container logs: %s", logErr) } // Collect current NetworkPolicies applied in the test namespace. @@ -575,7 +575,7 @@ func testCanConnect(f *framework.Framework, ns *v1.Namespace, podName string, se pods = append(pods, fmt.Sprintf("Pod: %s, Status: %s\n", p.Name, p.Status.String())) } - framework.Failf("Pod %s should be able to connect to service %s, but was not able to connect.\nPod logs:\n%s\n\n Current NetworkPolicies:\n\t%v\n\n Pods:\n\t%v\n\n", podName, service.Name, logs, policies.Items, pods) + e2elog.Failf("Pod %s should be able to connect to service %s, but was not able to connect.\nPod logs:\n%s\n\n Current NetworkPolicies:\n\t%v\n\n Pods:\n\t%v\n\n", podName, service.Name, logs, policies.Items, pods) // Dump debug information for the test namespace. framework.DumpDebugInfo(f.ClientSet, f.Namespace.Name) @@ -588,7 +588,7 @@ func testCannotConnect(f *framework.Framework, ns *v1.Namespace, podName string, defer func() { ginkgo.By(fmt.Sprintf("Cleaning up the pod %s", podName)) if err := f.ClientSet.CoreV1().Pods(ns.Name).Delete(podClient.Name, nil); err != nil { - framework.Failf("unable to cleanup pod %v: %v", podClient.Name, err) + e2elog.Failf("unable to cleanup pod %v: %v", podClient.Name, err) } }() @@ -601,7 +601,7 @@ func testCannotConnect(f *framework.Framework, ns *v1.Namespace, podName string, // Collect pod logs when we see a failure. logs, logErr := e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, podName, fmt.Sprintf("%s-container", podName)) if logErr != nil { - framework.Failf("Error getting container logs: %s", logErr) + e2elog.Failf("Error getting container logs: %s", logErr) } // Collect current NetworkPolicies applied in the test namespace. @@ -621,7 +621,7 @@ func testCannotConnect(f *framework.Framework, ns *v1.Namespace, podName string, pods = append(pods, fmt.Sprintf("Pod: %s, Status: %s\n", p.Name, p.Status.String())) } - framework.Failf("Pod %s should not be able to connect to service %s, but was able to connect.\nPod logs:\n%s\n\n Current NetworkPolicies:\n\t%v\n\n Pods:\n\t %v\n\n", podName, service.Name, logs, policies.Items, pods) + e2elog.Failf("Pod %s should not be able to connect to service %s, but was able to connect.\nPod logs:\n%s\n\n Current NetworkPolicies:\n\t%v\n\n Pods:\n\t %v\n\n", podName, service.Name, logs, policies.Items, pods) // Dump debug information for the test namespace. framework.DumpDebugInfo(f.ClientSet, f.Namespace.Name) @@ -712,11 +712,11 @@ func createServerPodAndService(f *framework.Framework, namespace *v1.Namespace, func cleanupServerPodAndService(f *framework.Framework, pod *v1.Pod, service *v1.Service) { ginkgo.By("Cleaning up the server.") if err := f.ClientSet.CoreV1().Pods(pod.Namespace).Delete(pod.Name, nil); err != nil { - framework.Failf("unable to cleanup pod %v: %v", pod.Name, err) + e2elog.Failf("unable to cleanup pod %v: %v", pod.Name, err) } ginkgo.By("Cleaning up the server's service.") if err := f.ClientSet.CoreV1().Services(service.Namespace).Delete(service.Name, nil); err != nil { - framework.Failf("unable to cleanup svc %v: %v", service.Name, err) + e2elog.Failf("unable to cleanup svc %v: %v", service.Name, err) } } @@ -756,6 +756,6 @@ func createNetworkClientPod(f *framework.Framework, namespace *v1.Namespace, pod func cleanupNetworkPolicy(f *framework.Framework, policy *networkingv1.NetworkPolicy) { ginkgo.By("Cleaning up the policy.") if err := f.ClientSet.NetworkingV1().NetworkPolicies(policy.Namespace).Delete(policy.Name, nil); err != nil { - framework.Failf("unable to cleanup policy %v: %v", policy.Name, err) + e2elog.Failf("unable to cleanup policy %v: %v", policy.Name, err) } } diff --git a/test/e2e/network/networking.go b/test/e2e/network/networking.go index 0821e0bd1b5..8ca3ef0441d 100644 --- a/test/e2e/network/networking.go +++ b/test/e2e/network/networking.go @@ -23,6 +23,7 @@ import ( "k8s.io/apimachinery/pkg/util/sets" "k8s.io/kubernetes/pkg/master/ports" "k8s.io/kubernetes/test/e2e/framework" + e2elog "k8s.io/kubernetes/test/e2e/framework/log" "github.com/onsi/ginkgo" ) @@ -38,10 +39,10 @@ var _ = SIGDescribe("Networking", func() { ginkgo.By("Executing a successful http request from the external internet") resp, err := http.Get("http://google.com") if err != nil { - framework.Failf("Unable to connect/talk to the internet: %v", err) + e2elog.Failf("Unable to connect/talk to the internet: %v", err) } if resp.StatusCode != http.StatusOK { - framework.Failf("Unexpected error code, expected 200, got, %v (%v)", resp.StatusCode, resp) + e2elog.Failf("Unexpected error code, expected 200, got, %v (%v)", resp.StatusCode, resp) } }) @@ -79,7 +80,7 @@ var _ = SIGDescribe("Networking", func() { AbsPath(test.path). DoRaw() if err != nil { - framework.Failf("ginkgo.Failed: %v\nBody: %s", err, string(data)) + e2elog.Failf("ginkgo.Failed: %v\nBody: %s", err, string(data)) } } }) @@ -207,13 +208,13 @@ var _ = SIGDescribe("Networking", func() { // Check if number of endpoints returned are exactly one. eps, err := config.GetEndpointsFromTestContainer("http", config.SessionAffinityService.Spec.ClusterIP, framework.ClusterHTTPPort, framework.SessionAffinityChecks) if err != nil { - framework.Failf("ginkgo.Failed to get endpoints from test container, error: %v", err) + e2elog.Failf("ginkgo.Failed to get endpoints from test container, error: %v", err) } if len(eps) == 0 { - framework.Failf("Unexpected no endpoints return") + e2elog.Failf("Unexpected no endpoints return") } if len(eps) > 1 { - framework.Failf("Unexpected endpoints return: %v, expect 1 endpoints", eps) + e2elog.Failf("Unexpected endpoints return: %v, expect 1 endpoints", eps) } }) @@ -224,13 +225,13 @@ var _ = SIGDescribe("Networking", func() { // Check if number of endpoints returned are exactly one. eps, err := config.GetEndpointsFromTestContainer("udp", config.SessionAffinityService.Spec.ClusterIP, framework.ClusterUDPPort, framework.SessionAffinityChecks) if err != nil { - framework.Failf("ginkgo.Failed to get endpoints from test container, error: %v", err) + e2elog.Failf("ginkgo.Failed to get endpoints from test container, error: %v", err) } if len(eps) == 0 { - framework.Failf("Unexpected no endpoints return") + e2elog.Failf("Unexpected no endpoints return") } if len(eps) > 1 { - framework.Failf("Unexpected endpoints return: %v, expect 1 endpoints", eps) + e2elog.Failf("Unexpected endpoints return: %v, expect 1 endpoints", eps) } }) }) diff --git a/test/e2e/network/networking_perf.go b/test/e2e/network/networking_perf.go index ecf05e83ac1..58831a60887 100644 --- a/test/e2e/network/networking_perf.go +++ b/test/e2e/network/networking_perf.go @@ -87,7 +87,7 @@ func networkingIPerfTest(isIPv6 bool) { ) if err != nil { - framework.Failf("Fatal error waiting for iperf server endpoint : %v", err) + e2elog.Failf("Fatal error waiting for iperf server endpoint : %v", err) } iperfClientPodLabels := f.CreatePodsPerNodeForSimpleApp( @@ -134,9 +134,9 @@ func networkingIPerfTest(isIPv6 bool) { pods, err2 := iperfClusterVerification.WaitFor(expectedCli, iperfTimeout) if err2 != nil { - framework.Failf("Error in wait...") + e2elog.Failf("Error in wait...") } else if len(pods) < expectedCli { - framework.Failf("IPerf restuls : Only got %v out of %v, after waiting %v", len(pods), expectedCli, iperfTimeout) + e2elog.Failf("IPerf restuls : Only got %v out of %v, after waiting %v", len(pods), expectedCli, iperfTimeout) } else { // For each builds up a collection of IPerfRecords iperfClusterVerification.ForEach( @@ -146,7 +146,7 @@ func networkingIPerfTest(isIPv6 bool) { e2elog.Logf(resultS) iperfResults.Add(NewIPerf(resultS)) } else { - framework.Failf("Unexpected error, %v when running forEach on the pods.", err) + e2elog.Failf("Unexpected error, %v when running forEach on the pods.", err) } }) } diff --git a/test/e2e/network/proxy.go b/test/e2e/network/proxy.go index da24b2608a8..63ffddb2232 100644 --- a/test/e2e/network/proxy.go +++ b/test/e2e/network/proxy.go @@ -255,7 +255,7 @@ var _ = SIGDescribe("Proxy", func() { e2elog.Logf("Pod %s has the following error logs: %s", pods[0].Name, body) } - framework.Failf(strings.Join(errs, "\n")) + e2elog.Failf(strings.Join(errs, "\n")) } }) }) diff --git a/test/e2e/network/service.go b/test/e2e/network/service.go index 3dac4c1b20a..ef8295107f5 100644 --- a/test/e2e/network/service.go +++ b/test/e2e/network/service.go @@ -349,7 +349,7 @@ var _ = SIGDescribe("Services", func() { hosts, err := e2essh.NodeSSHHosts(cs) framework.ExpectNoError(err, "failed to find external/internal IPs for every node") if len(hosts) == 0 { - framework.Failf("No ssh-able nodes") + e2elog.Failf("No ssh-able nodes") } host := hosts[0] @@ -374,7 +374,7 @@ var _ = SIGDescribe("Services", func() { framework.ExpectNoError(err, "failed to create replication controller with service: %s in the namespace: %s", svc3, ns) if svc2IP == svc3IP { - framework.Failf("service IPs conflict: %v", svc2IP) + e2elog.Failf("service IPs conflict: %v", svc2IP) } ginkgo.By("verifying service " + svc2 + " is still up") @@ -407,13 +407,13 @@ var _ = SIGDescribe("Services", func() { framework.ExpectNoError(err, "failed to create replication controller with service: %s in the namespace: %s", svc2, ns) if svc1IP == svc2IP { - framework.Failf("VIPs conflict: %v", svc1IP) + e2elog.Failf("VIPs conflict: %v", svc1IP) } hosts, err := e2essh.NodeSSHHosts(cs) framework.ExpectNoError(err, "failed to find external/internal IPs for every node") if len(hosts) == 0 { - framework.Failf("No ssh-able nodes") + e2elog.Failf("No ssh-able nodes") } host := hosts[0] @@ -422,7 +422,7 @@ var _ = SIGDescribe("Services", func() { ginkgo.By(fmt.Sprintf("Restarting kube-proxy on %v", host)) if err := framework.RestartKubeProxy(host); err != nil { - framework.Failf("error restarting kube-proxy: %v", err) + e2elog.Failf("error restarting kube-proxy: %v", err) } framework.ExpectNoError(framework.VerifyServeHostnameServiceUp(cs, ns, host, podNames1, svc1IP, servicePort)) framework.ExpectNoError(framework.VerifyServeHostnameServiceUp(cs, ns, host, podNames2, svc2IP, servicePort)) @@ -434,7 +434,7 @@ var _ = SIGDescribe("Services", func() { sudo iptables -t nat -F KUBE-PORTALS-CONTAINER || true`, host, framework.TestContext.Provider) if err != nil || result.Code != 0 { e2essh.LogResult(result) - framework.Failf("couldn't remove iptable rules: %v", err) + e2elog.Failf("couldn't remove iptable rules: %v", err) } framework.ExpectNoError(framework.VerifyServeHostnameServiceUp(cs, ns, host, podNames1, svc1IP, servicePort)) framework.ExpectNoError(framework.VerifyServeHostnameServiceUp(cs, ns, host, podNames2, svc2IP, servicePort)) @@ -459,7 +459,7 @@ var _ = SIGDescribe("Services", func() { hosts, err := e2essh.NodeSSHHosts(cs) framework.ExpectNoError(err, "failed to find external/internal IPs for every node") if len(hosts) == 0 { - framework.Failf("No ssh-able nodes") + e2elog.Failf("No ssh-able nodes") } host := hosts[0] @@ -468,11 +468,11 @@ var _ = SIGDescribe("Services", func() { // Restart apiserver ginkgo.By("Restarting apiserver") if err := framework.RestartApiserver(cs); err != nil { - framework.Failf("error restarting apiserver: %v", err) + e2elog.Failf("error restarting apiserver: %v", err) } ginkgo.By("Waiting for apiserver to come up by polling /healthz") if err := framework.WaitForApiserverUp(cs); err != nil { - framework.Failf("error while waiting for apiserver up: %v", err) + e2elog.Failf("error while waiting for apiserver up: %v", err) } framework.ExpectNoError(framework.VerifyServeHostnameServiceUp(cs, ns, host, podNames1, svc1IP, servicePort)) @@ -484,7 +484,7 @@ var _ = SIGDescribe("Services", func() { framework.ExpectNoError(err, "failed to create replication controller with service: %s in the namespace: %s", svc2, ns) if svc1IP == svc2IP { - framework.Failf("VIPs conflict: %v", svc1IP) + e2elog.Failf("VIPs conflict: %v", svc1IP) } framework.ExpectNoError(framework.VerifyServeHostnameServiceUp(cs, ns, host, podNames1, svc1IP, servicePort)) framework.ExpectNoError(framework.VerifyServeHostnameServiceUp(cs, ns, host, podNames2, svc2IP, servicePort)) @@ -520,7 +520,7 @@ var _ = SIGDescribe("Services", func() { cmd := fmt.Sprintf(`for i in $(seq 1 300); do if ss -ant46 'sport = :%d' | grep ^LISTEN; then exit 0; fi; sleep 1; done; exit 1`, nodePort) stdout, err := framework.RunHostCmd(hostExec.Namespace, hostExec.Name, cmd) if err != nil { - framework.Failf("expected node port %d to be in use, stdout: %v. err: %v", nodePort, stdout, err) + e2elog.Failf("expected node port %d to be in use, stdout: %v. err: %v", nodePort, stdout, err) } }) @@ -569,7 +569,7 @@ var _ = SIGDescribe("Services", func() { ginkgo.By("verifying that TCP and UDP use the same port") if tcpService.Spec.Ports[0].Port != udpService.Spec.Ports[0].Port { - framework.Failf("expected to use the same port for TCP and UDP") + e2elog.Failf("expected to use the same port for TCP and UDP") } svcPort := int(tcpService.Spec.Ports[0].Port) e2elog.Logf("service port (TCP and UDP): %d", svcPort) @@ -655,10 +655,10 @@ var _ = SIGDescribe("Services", func() { tcpService = jig.WaitForLoadBalancerOrFail(ns1, tcpService.Name, loadBalancerCreateTimeout) jig.SanityCheckService(tcpService, v1.ServiceTypeLoadBalancer) if int(tcpService.Spec.Ports[0].NodePort) != tcpNodePort { - framework.Failf("TCP Spec.Ports[0].NodePort changed (%d -> %d) when not expected", tcpNodePort, tcpService.Spec.Ports[0].NodePort) + e2elog.Failf("TCP Spec.Ports[0].NodePort changed (%d -> %d) when not expected", tcpNodePort, tcpService.Spec.Ports[0].NodePort) } if requestedIP != "" && framework.GetIngressPoint(&tcpService.Status.LoadBalancer.Ingress[0]) != requestedIP { - framework.Failf("unexpected TCP Status.LoadBalancer.Ingress (expected %s, got %s)", requestedIP, framework.GetIngressPoint(&tcpService.Status.LoadBalancer.Ingress[0])) + e2elog.Failf("unexpected TCP Status.LoadBalancer.Ingress (expected %s, got %s)", requestedIP, framework.GetIngressPoint(&tcpService.Status.LoadBalancer.Ingress[0])) } tcpIngressIP := framework.GetIngressPoint(&tcpService.Status.LoadBalancer.Ingress[0]) e2elog.Logf("TCP load balancer: %s", tcpIngressIP) @@ -675,7 +675,7 @@ var _ = SIGDescribe("Services", func() { // Deleting it after it is attached "demotes" it to an // ephemeral IP, which can be auto-released. if err := gceCloud.DeleteRegionAddress(staticIPName, gceCloud.Region()); err != nil { - framework.Failf("failed to release static IP %s: %v", staticIPName, err) + e2elog.Failf("failed to release static IP %s: %v", staticIPName, err) } staticIPName = "" } @@ -688,14 +688,14 @@ var _ = SIGDescribe("Services", func() { udpService = jig.WaitForLoadBalancerOrFail(ns2, udpService.Name, loadBalancerCreateTimeout) jig.SanityCheckService(udpService, v1.ServiceTypeLoadBalancer) if int(udpService.Spec.Ports[0].NodePort) != udpNodePort { - framework.Failf("UDP Spec.Ports[0].NodePort changed (%d -> %d) when not expected", udpNodePort, udpService.Spec.Ports[0].NodePort) + e2elog.Failf("UDP Spec.Ports[0].NodePort changed (%d -> %d) when not expected", udpNodePort, udpService.Spec.Ports[0].NodePort) } udpIngressIP = framework.GetIngressPoint(&udpService.Status.LoadBalancer.Ingress[0]) e2elog.Logf("UDP load balancer: %s", udpIngressIP) ginkgo.By("verifying that TCP and UDP use different load balancers") if tcpIngressIP == udpIngressIP { - framework.Failf("Load balancers are not different: %s", framework.GetIngressPoint(&tcpService.Status.LoadBalancer.Ingress[0])) + e2elog.Failf("Load balancers are not different: %s", framework.GetIngressPoint(&tcpService.Status.LoadBalancer.Ingress[0])) } } @@ -721,10 +721,10 @@ var _ = SIGDescribe("Services", func() { tcpNodePortOld := tcpNodePort tcpNodePort = int(tcpService.Spec.Ports[0].NodePort) if tcpNodePort == tcpNodePortOld { - framework.Failf("TCP Spec.Ports[0].NodePort (%d) did not change", tcpNodePort) + e2elog.Failf("TCP Spec.Ports[0].NodePort (%d) did not change", tcpNodePort) } if framework.GetIngressPoint(&tcpService.Status.LoadBalancer.Ingress[0]) != tcpIngressIP { - framework.Failf("TCP Status.LoadBalancer.Ingress changed (%s -> %s) when not expected", tcpIngressIP, framework.GetIngressPoint(&tcpService.Status.LoadBalancer.Ingress[0])) + e2elog.Failf("TCP Status.LoadBalancer.Ingress changed (%s -> %s) when not expected", tcpIngressIP, framework.GetIngressPoint(&tcpService.Status.LoadBalancer.Ingress[0])) } e2elog.Logf("TCP node port: %d", tcpNodePort) @@ -738,10 +738,10 @@ var _ = SIGDescribe("Services", func() { udpNodePortOld := udpNodePort udpNodePort = int(udpService.Spec.Ports[0].NodePort) if udpNodePort == udpNodePortOld { - framework.Failf("UDP Spec.Ports[0].NodePort (%d) did not change", udpNodePort) + e2elog.Failf("UDP Spec.Ports[0].NodePort (%d) did not change", udpNodePort) } if loadBalancerSupportsUDP && framework.GetIngressPoint(&udpService.Status.LoadBalancer.Ingress[0]) != udpIngressIP { - framework.Failf("UDP Status.LoadBalancer.Ingress changed (%s -> %s) when not expected", udpIngressIP, framework.GetIngressPoint(&udpService.Status.LoadBalancer.Ingress[0])) + e2elog.Failf("UDP Status.LoadBalancer.Ingress changed (%s -> %s) when not expected", udpIngressIP, framework.GetIngressPoint(&udpService.Status.LoadBalancer.Ingress[0])) } e2elog.Logf("UDP node port: %d", udpNodePort) @@ -775,13 +775,13 @@ var _ = SIGDescribe("Services", func() { svcPortOld := svcPort svcPort = int(tcpService.Spec.Ports[0].Port) if svcPort == svcPortOld { - framework.Failf("TCP Spec.Ports[0].Port (%d) did not change", svcPort) + e2elog.Failf("TCP Spec.Ports[0].Port (%d) did not change", svcPort) } if int(tcpService.Spec.Ports[0].NodePort) != tcpNodePort { - framework.Failf("TCP Spec.Ports[0].NodePort (%d) changed", tcpService.Spec.Ports[0].NodePort) + e2elog.Failf("TCP Spec.Ports[0].NodePort (%d) changed", tcpService.Spec.Ports[0].NodePort) } if framework.GetIngressPoint(&tcpService.Status.LoadBalancer.Ingress[0]) != tcpIngressIP { - framework.Failf("TCP Status.LoadBalancer.Ingress changed (%s -> %s) when not expected", tcpIngressIP, framework.GetIngressPoint(&tcpService.Status.LoadBalancer.Ingress[0])) + e2elog.Failf("TCP Status.LoadBalancer.Ingress changed (%s -> %s) when not expected", tcpIngressIP, framework.GetIngressPoint(&tcpService.Status.LoadBalancer.Ingress[0])) } ginkgo.By("changing the UDP service's port") @@ -794,13 +794,13 @@ var _ = SIGDescribe("Services", func() { jig.SanityCheckService(udpService, v1.ServiceTypeNodePort) } if int(udpService.Spec.Ports[0].Port) != svcPort { - framework.Failf("UDP Spec.Ports[0].Port (%d) did not change", udpService.Spec.Ports[0].Port) + e2elog.Failf("UDP Spec.Ports[0].Port (%d) did not change", udpService.Spec.Ports[0].Port) } if int(udpService.Spec.Ports[0].NodePort) != udpNodePort { - framework.Failf("UDP Spec.Ports[0].NodePort (%d) changed", udpService.Spec.Ports[0].NodePort) + e2elog.Failf("UDP Spec.Ports[0].NodePort (%d) changed", udpService.Spec.Ports[0].NodePort) } if loadBalancerSupportsUDP && framework.GetIngressPoint(&udpService.Status.LoadBalancer.Ingress[0]) != udpIngressIP { - framework.Failf("UDP Status.LoadBalancer.Ingress changed (%s -> %s) when not expected", udpIngressIP, framework.GetIngressPoint(&udpService.Status.LoadBalancer.Ingress[0])) + e2elog.Failf("UDP Status.LoadBalancer.Ingress changed (%s -> %s) when not expected", udpIngressIP, framework.GetIngressPoint(&udpService.Status.LoadBalancer.Ingress[0])) } e2elog.Logf("service port (TCP and UDP): %d", svcPort) @@ -928,11 +928,11 @@ var _ = SIGDescribe("Services", func() { }) jig.SanityCheckService(newService, v1.ServiceTypeNodePort) if len(newService.Spec.Ports) != 2 { - framework.Failf("new service should have two Ports") + e2elog.Failf("new service should have two Ports") } for _, port := range newService.Spec.Ports { if port.NodePort == 0 { - framework.Failf("new service failed to allocate NodePort for Port %s", port.Name) + e2elog.Failf("new service failed to allocate NodePort for Port %s", port.Name) } e2elog.Logf("new service allocates NodePort %d for Port %s", port.NodePort, port.Name) @@ -1043,7 +1043,7 @@ var _ = SIGDescribe("Services", func() { defer ginkgo.GinkgoRecover() errs := t.Cleanup() if len(errs) != 0 { - framework.Failf("errors in cleanup: %v", errs) + e2elog.Failf("errors in cleanup: %v", errs) } }() @@ -1074,10 +1074,10 @@ var _ = SIGDescribe("Services", func() { framework.ExpectNoError(err, "failed to create service: %s in namespace: %s", serviceName, ns) if len(result.Spec.Ports) != 2 { - framework.Failf("got unexpected len(Spec.Ports) for new service: %v", result) + e2elog.Failf("got unexpected len(Spec.Ports) for new service: %v", result) } if result.Spec.Ports[0].NodePort != result.Spec.Ports[1].NodePort { - framework.Failf("should use same NodePort for new service: %v", result) + e2elog.Failf("should use same NodePort for new service: %v", result) } }) @@ -1093,7 +1093,7 @@ var _ = SIGDescribe("Services", func() { defer ginkgo.GinkgoRecover() errs := t.Cleanup() if len(errs) != 0 { - framework.Failf("errors in cleanup: %v", errs) + e2elog.Failf("errors in cleanup: %v", errs) } }() @@ -1104,14 +1104,14 @@ var _ = SIGDescribe("Services", func() { framework.ExpectNoError(err, "failed to create service: %s in namespace: %s", serviceName1, ns) if result.Spec.Type != v1.ServiceTypeNodePort { - framework.Failf("got unexpected Spec.Type for new service: %v", result) + e2elog.Failf("got unexpected Spec.Type for new service: %v", result) } if len(result.Spec.Ports) != 1 { - framework.Failf("got unexpected len(Spec.Ports) for new service: %v", result) + e2elog.Failf("got unexpected len(Spec.Ports) for new service: %v", result) } port := result.Spec.Ports[0] if port.NodePort == 0 { - framework.Failf("got unexpected Spec.Ports[0].NodePort for new service: %v", result) + e2elog.Failf("got unexpected Spec.Ports[0].NodePort for new service: %v", result) } ginkgo.By("creating service " + serviceName2 + " with conflicting NodePort") @@ -1121,7 +1121,7 @@ var _ = SIGDescribe("Services", func() { service2.Spec.Ports[0].NodePort = port.NodePort result2, err := t.CreateService(service2) if err == nil { - framework.Failf("Created service with conflicting NodePort: %v", result2) + e2elog.Failf("Created service with conflicting NodePort: %v", result2) } expectedErr := fmt.Sprintf("%d.*port is already allocated", port.NodePort) gomega.Expect(fmt.Sprintf("%v", err)).To(gomega.MatchRegexp(expectedErr)) @@ -1145,7 +1145,7 @@ var _ = SIGDescribe("Services", func() { defer ginkgo.GinkgoRecover() errs := t.Cleanup() if len(errs) != 0 { - framework.Failf("errors in cleanup: %v", errs) + e2elog.Failf("errors in cleanup: %v", errs) } }() @@ -1157,17 +1157,17 @@ var _ = SIGDescribe("Services", func() { framework.ExpectNoError(err, "failed to create service: %s in namespace: %s", serviceName, ns) if service.Spec.Type != v1.ServiceTypeNodePort { - framework.Failf("got unexpected Spec.Type for new service: %v", service) + e2elog.Failf("got unexpected Spec.Type for new service: %v", service) } if len(service.Spec.Ports) != 1 { - framework.Failf("got unexpected len(Spec.Ports) for new service: %v", service) + e2elog.Failf("got unexpected len(Spec.Ports) for new service: %v", service) } port := service.Spec.Ports[0] if port.NodePort == 0 { - framework.Failf("got unexpected Spec.Ports[0].nodePort for new service: %v", service) + e2elog.Failf("got unexpected Spec.Ports[0].nodePort for new service: %v", service) } if !framework.ServiceNodePortRange.Contains(int(port.NodePort)) { - framework.Failf("got unexpected (out-of-range) port for new service: %v", service) + e2elog.Failf("got unexpected (out-of-range) port for new service: %v", service) } outOfRangeNodePort := 0 @@ -1183,7 +1183,7 @@ var _ = SIGDescribe("Services", func() { s.Spec.Ports[0].NodePort = int32(outOfRangeNodePort) }) if err == nil { - framework.Failf("failed to prevent update of service with out-of-range NodePort: %v", result) + e2elog.Failf("failed to prevent update of service with out-of-range NodePort: %v", result) } expectedErr := fmt.Sprintf("%d.*port is not in the valid range", outOfRangeNodePort) gomega.Expect(fmt.Sprintf("%v", err)).To(gomega.MatchRegexp(expectedErr)) @@ -1198,7 +1198,7 @@ var _ = SIGDescribe("Services", func() { service.Spec.Ports[0].NodePort = int32(outOfRangeNodePort) service, err = t.CreateService(service) if err == nil { - framework.Failf("failed to prevent create of service with out-of-range NodePort (%d): %v", outOfRangeNodePort, service) + e2elog.Failf("failed to prevent create of service with out-of-range NodePort (%d): %v", outOfRangeNodePort, service) } gomega.Expect(fmt.Sprintf("%v", err)).To(gomega.MatchRegexp(expectedErr)) }) @@ -1213,7 +1213,7 @@ var _ = SIGDescribe("Services", func() { defer ginkgo.GinkgoRecover() errs := t.Cleanup() if len(errs) != 0 { - framework.Failf("errors in cleanup: %v", errs) + e2elog.Failf("errors in cleanup: %v", errs) } }() @@ -1225,17 +1225,17 @@ var _ = SIGDescribe("Services", func() { framework.ExpectNoError(err, "failed to create service: %s in namespace: %s", serviceName, ns) if service.Spec.Type != v1.ServiceTypeNodePort { - framework.Failf("got unexpected Spec.Type for new service: %v", service) + e2elog.Failf("got unexpected Spec.Type for new service: %v", service) } if len(service.Spec.Ports) != 1 { - framework.Failf("got unexpected len(Spec.Ports) for new service: %v", service) + e2elog.Failf("got unexpected len(Spec.Ports) for new service: %v", service) } port := service.Spec.Ports[0] if port.NodePort == 0 { - framework.Failf("got unexpected Spec.Ports[0].nodePort for new service: %v", service) + e2elog.Failf("got unexpected Spec.Ports[0].nodePort for new service: %v", service) } if !framework.ServiceNodePortRange.Contains(int(port.NodePort)) { - framework.Failf("got unexpected (out-of-range) port for new service: %v", service) + e2elog.Failf("got unexpected (out-of-range) port for new service: %v", service) } nodePort := port.NodePort @@ -1255,7 +1255,7 @@ var _ = SIGDescribe("Services", func() { } return true, nil }); pollErr != nil { - framework.Failf("expected node port (%d) to not be in use in %v, stdout: %v", nodePort, framework.KubeProxyLagTimeout, stdout) + e2elog.Failf("expected node port (%d) to not be in use in %v, stdout: %v", nodePort, framework.KubeProxyLagTimeout, stdout) } ginkgo.By(fmt.Sprintf("creating service "+serviceName+" with same NodePort %d", nodePort)) @@ -1275,7 +1275,7 @@ var _ = SIGDescribe("Services", func() { defer ginkgo.GinkgoRecover() errs := t.Cleanup() if len(errs) != 0 { - framework.Failf("errors in cleanup: %v", errs) + e2elog.Failf("errors in cleanup: %v", errs) } }() @@ -1347,7 +1347,7 @@ var _ = SIGDescribe("Services", func() { } return true, nil }); pollErr != nil { - framework.Failf("expected un-ready endpoint for Service %v within %v, stdout: %v", t.Name, framework.KubeProxyLagTimeout, stdout) + e2elog.Failf("expected un-ready endpoint for Service %v within %v, stdout: %v", t.Name, framework.KubeProxyLagTimeout, stdout) } ginkgo.By("Scaling down replication controller to zero") @@ -1370,7 +1370,7 @@ var _ = SIGDescribe("Services", func() { } return true, nil }); pollErr != nil { - framework.Failf("expected un-ready endpoint for Service %v within %v, stdout: %v", t.Name, framework.KubeProxyLagTimeout, stdout) + e2elog.Failf("expected un-ready endpoint for Service %v within %v, stdout: %v", t.Name, framework.KubeProxyLagTimeout, stdout) } ginkgo.By("Update service to tolerate unready services again") @@ -1390,7 +1390,7 @@ var _ = SIGDescribe("Services", func() { } return true, nil }); pollErr != nil { - framework.Failf("expected un-ready endpoint for Service %v within %v, stdout: %v", t.Name, framework.KubeProxyLagTimeout, stdout) + e2elog.Failf("expected un-ready endpoint for Service %v within %v, stdout: %v", t.Name, framework.KubeProxyLagTimeout, stdout) } ginkgo.By("Remove pods immediately") @@ -1550,7 +1550,7 @@ var _ = SIGDescribe("Services", func() { e2elog.Logf("Successful curl; stdout: %v", stdout) return true, nil }); pollErr != nil { - framework.Failf("ginkgo.Failed to hit ILB IP, err: %v", pollErr) + e2elog.Failf("ginkgo.Failed to hit ILB IP, err: %v", pollErr) } ginkgo.By("switching to external type LoadBalancer") @@ -1566,7 +1566,7 @@ var _ = SIGDescribe("Services", func() { lbIngress = &svc.Status.LoadBalancer.Ingress[0] return !isInternalEndpoint(lbIngress), nil }); pollErr != nil { - framework.Failf("Loadbalancer IP not changed to external.") + e2elog.Failf("Loadbalancer IP not changed to external.") } // should have an external IP. jig.SanityCheckService(svc, v1.ServiceTypeLoadBalancer) @@ -1595,7 +1595,7 @@ var _ = SIGDescribe("Services", func() { lbIngress = &svc.Status.LoadBalancer.Ingress[0] return isInternalEndpoint(lbIngress), nil }); pollErr != nil { - framework.Failf("Loadbalancer IP not changed to internal.") + e2elog.Failf("Loadbalancer IP not changed to internal.") } // should have the given static internal IP. jig.SanityCheckService(svc, v1.ServiceTypeLoadBalancer) @@ -1616,11 +1616,11 @@ var _ = SIGDescribe("Services", func() { framework.SkipUnlessProviderIs("gce") clusterID, err := gce.GetClusterID(cs) if err != nil { - framework.Failf("framework.GetClusterID(cs) = _, %v; want nil", err) + e2elog.Failf("framework.GetClusterID(cs) = _, %v; want nil", err) } gceCloud, err := gce.GetGCECloud() if err != nil { - framework.Failf("framework.GetGCECloud() = _, %v; want nil", err) + e2elog.Failf("framework.GetGCECloud() = _, %v; want nil", err) } namespace := f.Namespace.Name @@ -1647,22 +1647,22 @@ var _ = SIGDescribe("Services", func() { hcName := gcecloud.MakeNodesHealthCheckName(clusterID) hc, err := gceCloud.GetHTTPHealthCheck(hcName) if err != nil { - framework.Failf("gceCloud.GetHttpHealthCheck(%q) = _, %v; want nil", hcName, err) + e2elog.Failf("gceCloud.GetHttpHealthCheck(%q) = _, %v; want nil", hcName, err) } gomega.Expect(hc.CheckIntervalSec).To(gomega.Equal(gceHcCheckIntervalSeconds)) ginkgo.By("modify the health check interval") hc.CheckIntervalSec = gceHcCheckIntervalSeconds - 1 if err = gceCloud.UpdateHTTPHealthCheck(hc); err != nil { - framework.Failf("gcecloud.UpdateHttpHealthCheck(%#v) = %v; want nil", hc, err) + e2elog.Failf("gcecloud.UpdateHttpHealthCheck(%#v) = %v; want nil", hc, err) } ginkgo.By("restart kube-controller-manager") if err := framework.RestartControllerManager(); err != nil { - framework.Failf("framework.RestartControllerManager() = %v; want nil", err) + e2elog.Failf("framework.RestartControllerManager() = %v; want nil", err) } if err := framework.WaitForControllerManagerUp(); err != nil { - framework.Failf("framework.WaitForControllerManagerUp() = %v; want nil", err) + e2elog.Failf("framework.WaitForControllerManagerUp() = %v; want nil", err) } ginkgo.By("health check should be reconciled") @@ -1676,7 +1676,7 @@ var _ = SIGDescribe("Services", func() { e2elog.Logf("hc.CheckIntervalSec = %v", hc.CheckIntervalSec) return hc.CheckIntervalSec == gceHcCheckIntervalSeconds, nil }); pollErr != nil { - framework.Failf("Health check %q does not reconcile its check interval to %d.", hcName, gceHcCheckIntervalSeconds) + e2elog.Failf("Health check %q does not reconcile its check interval to %d.", hcName, gceHcCheckIntervalSeconds) } }) @@ -1779,7 +1779,7 @@ var _ = SIGDescribe("Services", func() { hosts, err := e2essh.NodeSSHHosts(cs) framework.ExpectNoError(err, "failed to find external/internal IPs for every node") if len(hosts) == 0 { - framework.Failf("No ssh-able nodes") + e2elog.Failf("No ssh-able nodes") } host := hosts[0] @@ -1826,7 +1826,7 @@ var _ = SIGDescribe("Services", func() { ginkgo.By("creating a service with no endpoints") _, err := jig.CreateServiceWithServicePort(labels, namespace, ports) if err != nil { - framework.Failf("ginkgo.Failed to create service: %v", err) + e2elog.Failf("ginkgo.Failed to create service: %v", err) } nodeName := nodes.Items[0].Name @@ -1884,7 +1884,7 @@ var _ = SIGDescribe("Services", func() { ginkgo.By("Manually add load balancer cleanup finalizer to service") svc.Finalizers = append(svc.Finalizers, "service.kubernetes.io/load-balancer-cleanup") if _, err := cs.CoreV1().Services(svc.Namespace).Update(svc); err != nil { - framework.Failf("Failed to add finalizer to service %s/%s: %v", svc.Namespace, svc.Name, err) + e2elog.Failf("Failed to add finalizer to service %s/%s: %v", svc.Namespace, svc.Name, err) } }) @@ -1925,7 +1925,7 @@ var _ = SIGDescribe("Services", func() { func waitForServiceDeletedWithFinalizer(cs clientset.Interface, namespace, name string) { ginkgo.By("Delete service with finalizer") if err := cs.CoreV1().Services(namespace).Delete(name, nil); err != nil { - framework.Failf("Failed to delete service %s/%s", namespace, name) + e2elog.Failf("Failed to delete service %s/%s", namespace, name) } ginkgo.By("Wait for service to disappear") @@ -1941,7 +1941,7 @@ func waitForServiceDeletedWithFinalizer(cs clientset.Interface, namespace, name e2elog.Logf("Service %s/%s still exists with finalizers: %v", namespace, name, svc.Finalizers) return false, nil }); pollErr != nil { - framework.Failf("Failed to wait for service to disappear: %v", pollErr) + e2elog.Failf("Failed to wait for service to disappear: %v", pollErr) } } @@ -1964,7 +1964,7 @@ func waitForServiceUpdatedWithFinalizer(cs clientset.Interface, namespace, name } return true, nil }); pollErr != nil { - framework.Failf("Failed to wait for service to hasFinalizer=%t: %v", hasFinalizer, pollErr) + e2elog.Failf("Failed to wait for service to hasFinalizer=%t: %v", hasFinalizer, pollErr) } } @@ -2007,7 +2007,7 @@ var _ = SIGDescribe("ESIPP [Slow] [DisabledForLargeClusters]", func() { serviceLBNames = append(serviceLBNames, cloudprovider.DefaultLoadBalancerName(svc)) healthCheckNodePort := int(svc.Spec.HealthCheckNodePort) if healthCheckNodePort == 0 { - framework.Failf("Service HealthCheck NodePort was not allocated") + e2elog.Failf("Service HealthCheck NodePort was not allocated") } defer func() { jig.ChangeServiceType(svc.Namespace, svc.Name, v1.ServiceTypeClusterIP, loadBalancerCreateTimeout) @@ -2032,7 +2032,7 @@ var _ = SIGDescribe("ESIPP [Slow] [DisabledForLargeClusters]", func() { ginkgo.By("checking if Source IP is preserved") if strings.HasPrefix(clientIP, "10.") { - framework.Failf("Source IP was NOT preserved") + e2elog.Failf("Source IP was NOT preserved") } }) @@ -2058,7 +2058,7 @@ var _ = SIGDescribe("ESIPP [Slow] [DisabledForLargeClusters]", func() { clientIP := content.String() e2elog.Logf("ClientIP detected by target pod using NodePort is %s", clientIP) if strings.HasPrefix(clientIP, "10.") { - framework.Failf("Source IP was NOT preserved") + e2elog.Failf("Source IP was NOT preserved") } } }) @@ -2088,7 +2088,7 @@ var _ = SIGDescribe("ESIPP [Slow] [DisabledForLargeClusters]", func() { healthCheckNodePort := int(svc.Spec.HealthCheckNodePort) if healthCheckNodePort == 0 { - framework.Failf("Service HealthCheck NodePort was not allocated") + e2elog.Failf("Service HealthCheck NodePort was not allocated") } ips := e2enode.CollectAddresses(nodes, v1.NodeExternalIP) @@ -2175,7 +2175,7 @@ var _ = SIGDescribe("ESIPP [Slow] [DisabledForLargeClusters]", func() { srcIP = strings.TrimSpace(strings.Split(stdout, ":")[0]) return srcIP == execPod.Status.PodIP, nil }); pollErr != nil { - framework.Failf("Source IP not preserved from %v, expected '%v' got '%v'", podName, execPod.Status.PodIP, srcIP) + e2elog.Failf("Source IP not preserved from %v, expected '%v' got '%v'", podName, execPod.Status.PodIP, srcIP) } }) @@ -2186,7 +2186,7 @@ var _ = SIGDescribe("ESIPP [Slow] [DisabledForLargeClusters]", func() { nodes := jig.GetNodes(framework.MaxNodesForEndpointsTests) if len(nodes.Items) < 2 { - framework.Failf("Need at least 2 nodes to verify source ip from a node without endpoint") + e2elog.Failf("Need at least 2 nodes to verify source ip from a node without endpoint") } svc := jig.CreateOnlyLocalLoadBalancerService(namespace, serviceName, loadBalancerCreateTimeout, true, nil) @@ -2205,7 +2205,7 @@ var _ = SIGDescribe("ESIPP [Slow] [DisabledForLargeClusters]", func() { svc.Spec.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyTypeCluster }) if svc.Spec.HealthCheckNodePort > 0 { - framework.Failf("Service HealthCheck NodePort still present") + e2elog.Failf("Service HealthCheck NodePort still present") } endpointNodeMap := jig.GetEndpointNodes(svc) @@ -2241,7 +2241,7 @@ var _ = SIGDescribe("ESIPP [Slow] [DisabledForLargeClusters]", func() { return false, nil } if pollErr := wait.PollImmediate(framework.Poll, framework.ServiceTestTimeout, pollfn); pollErr != nil { - framework.Failf("Kube-proxy still exposing health check on node %v:%v, after ESIPP was turned off. body %s", + e2elog.Failf("Kube-proxy still exposing health check on node %v:%v, after ESIPP was turned off. body %s", nodeName, healthCheckNodePort, body.String()) } } @@ -2258,7 +2258,7 @@ var _ = SIGDescribe("ESIPP [Slow] [DisabledForLargeClusters]", func() { return false, nil }) if pollErr != nil { - framework.Failf("Source IP WAS preserved even after ESIPP turned off. Got %v, expected a ten-dot cluster ip.", clientIP) + e2elog.Failf("Source IP WAS preserved even after ESIPP turned off. Got %v, expected a ten-dot cluster ip.", clientIP) } // TODO: We need to attempt to create another service with the previously @@ -2283,7 +2283,7 @@ var _ = SIGDescribe("ESIPP [Slow] [DisabledForLargeClusters]", func() { return false, nil }) if pollErr != nil { - framework.Failf("Source IP (%v) is not the client IP even after ESIPP turned on, expected a public IP.", clientIP) + e2elog.Failf("Source IP (%v) is not the client IP even after ESIPP turned on, expected a public IP.", clientIP) } }) }) @@ -2327,7 +2327,7 @@ func execSourceipTest(f *framework.Framework, c clientset.Interface, ns, nodeNam outputs := strings.Split(strings.TrimSpace(stdout), "=") if len(outputs) != 2 { // ginkgo.Fail the test if output format is unexpected. - framework.Failf("exec pod returned unexpected stdout format: [%v]\n", stdout) + e2elog.Failf("exec pod returned unexpected stdout format: [%v]\n", stdout) } return execPod.Status.PodIP, outputs[1] } diff --git a/test/e2e/network/service_latency.go b/test/e2e/network/service_latency.go index 1946e68c344..b54ac0e776c 100644 --- a/test/e2e/network/service_latency.go +++ b/test/e2e/network/service_latency.go @@ -95,7 +95,7 @@ var _ = SIGDescribe("Service endpoints latency", func() { } if n < 2 { failing.Insert("Less than two runs succeeded; aborting.") - framework.Failf(strings.Join(failing.List(), "\n")) + e2elog.Failf(strings.Join(failing.List(), "\n")) } percentile := func(p int) time.Duration { est := n * p / 100 @@ -122,7 +122,7 @@ var _ = SIGDescribe("Service endpoints latency", func() { if failing.Len() > 0 { errList := strings.Join(failing.List(), "\n") helpfulInfo := fmt.Sprintf("\n50, 90, 99 percentiles: %v %v %v", p50, p90, p99) - framework.Failf(errList + helpfulInfo) + e2elog.Failf(errList + helpfulInfo) } }) }) diff --git a/test/e2e/network/util_iperf.go b/test/e2e/network/util_iperf.go index 8f47199de0d..77142060744 100644 --- a/test/e2e/network/util_iperf.go +++ b/test/e2e/network/util_iperf.go @@ -24,7 +24,6 @@ import ( "strconv" "strings" - "k8s.io/kubernetes/test/e2e/framework" e2elog "k8s.io/kubernetes/test/e2e/framework/log" ) @@ -74,7 +73,7 @@ func NewIPerf(csvLine string) *IPerfResult { csvLine = strings.Trim(csvLine, "\n") slice := StrSlice(strings.Split(csvLine, ",")) if len(slice) != 9 { - framework.Failf("Incorrect fields in the output: %v (%v out of 9)", slice, len(slice)) + e2elog.Failf("Incorrect fields in the output: %v (%v out of 9)", slice, len(slice)) } i := IPerfResult{} i.date = slice.get(0) @@ -103,7 +102,7 @@ func (s StrSlice) get(i int) string { func intOrFail(debugName string, rawValue string) int64 { value, err := strconv.ParseInt(rawValue, 10, 64) if err != nil { - framework.Failf("Failed parsing value %v from the string '%v' as an integer", debugName, rawValue) + e2elog.Failf("Failed parsing value %v from the string '%v' as an integer", debugName, rawValue) } return value } diff --git a/test/e2e/node/crictl.go b/test/e2e/node/crictl.go index 026dd546726..f0f885f86dd 100644 --- a/test/e2e/node/crictl.go +++ b/test/e2e/node/crictl.go @@ -42,7 +42,7 @@ var _ = SIGDescribe("crictl", func() { ginkgo.By("Getting all nodes' SSH-able IP addresses") hosts, err := e2essh.NodeSSHHosts(f.ClientSet) if err != nil { - framework.Failf("Error getting node hostnames: %v", err) + e2elog.Failf("Error getting node hostnames: %v", err) } testCases := []struct { @@ -60,7 +60,7 @@ var _ = SIGDescribe("crictl", func() { result, err := e2essh.SSH(testCase.cmd, host, framework.TestContext.Provider) stdout, stderr := strings.TrimSpace(result.Stdout), strings.TrimSpace(result.Stderr) if err != nil { - framework.Failf("Ran %q on %q, got error %v", testCase.cmd, host, err) + e2elog.Failf("Ran %q on %q, got error %v", testCase.cmd, host, err) } // Log the stdout/stderr output. // TODO: Verify the output. diff --git a/test/e2e/node/events.go b/test/e2e/node/events.go index aa7841bf48e..a135804a04c 100644 --- a/test/e2e/node/events.go +++ b/test/e2e/node/events.go @@ -73,7 +73,7 @@ var _ = SIGDescribe("Events", func() { podClient.Delete(pod.Name, nil) }() if _, err := podClient.Create(pod); err != nil { - framework.Failf("Failed to create pod: %v", err) + e2elog.Failf("Failed to create pod: %v", err) } framework.ExpectNoError(f.WaitForPodRunning(pod.Name)) @@ -87,7 +87,7 @@ var _ = SIGDescribe("Events", func() { ginkgo.By("retrieving the pod") podWithUID, err := podClient.Get(pod.Name, metav1.GetOptions{}) if err != nil { - framework.Failf("Failed to get pod: %v", err) + e2elog.Failf("Failed to get pod: %v", err) } e2elog.Logf("%+v\n", podWithUID) var events *v1.EventList diff --git a/test/e2e/node/kubelet_perf.go b/test/e2e/node/kubelet_perf.go index 1ea541e51d6..59dddf1b366 100644 --- a/test/e2e/node/kubelet_perf.go +++ b/test/e2e/node/kubelet_perf.go @@ -152,7 +152,7 @@ func verifyMemoryLimits(c clientset.Interface, expected framework.ResourceUsageP } } if len(errList) > 0 { - framework.Failf("Memory usage exceeding limits:\n %s", strings.Join(errList, "\n")) + e2elog.Failf("Memory usage exceeding limits:\n %s", strings.Join(errList, "\n")) } } @@ -186,7 +186,7 @@ func verifyCPULimits(expected framework.ContainersCPUSummary, actual framework.N } } if len(errList) > 0 { - framework.Failf("CPU usage exceeding limits:\n %s", strings.Join(errList, "\n")) + e2elog.Failf("CPU usage exceeding limits:\n %s", strings.Join(errList, "\n")) } } diff --git a/test/e2e/node/pod_gc.go b/test/e2e/node/pod_gc.go index 73de4edc863..fb15094e5bb 100644 --- a/test/e2e/node/pod_gc.go +++ b/test/e2e/node/pod_gc.go @@ -44,7 +44,7 @@ var _ = SIGDescribe("Pod garbage collector [Feature:PodGarbageCollector] [Slow]" pod.Status.Phase = v1.PodFailed pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).UpdateStatus(pod) if err != nil { - framework.Failf("err failing pod: %v", err) + e2elog.Failf("err failing pod: %v", err) } count++ @@ -76,7 +76,7 @@ var _ = SIGDescribe("Pod garbage collector [Feature:PodGarbageCollector] [Slow]" return true, nil }) if pollErr != nil { - framework.Failf("Failed to GC pods within %v, %v pods remaining, error: %v", timeout, len(pods.Items), err) + e2elog.Failf("Failed to GC pods within %v, %v pods remaining, error: %v", timeout, len(pods.Items), err) } }) }) diff --git a/test/e2e/node/pre_stop.go b/test/e2e/node/pre_stop.go index 0f88c801648..7c419986f24 100644 --- a/test/e2e/node/pre_stop.go +++ b/test/e2e/node/pre_stop.go @@ -142,7 +142,7 @@ func testPreStop(c clientset.Interface, ns string) { if err != nil { if ctx.Err() != nil { - framework.Failf("Error validating prestop: %v", err) + e2elog.Failf("Error validating prestop: %v", err) return true, err } ginkgo.By(fmt.Sprintf("Error validating prestop: %v", err)) diff --git a/test/e2e/node/ssh.go b/test/e2e/node/ssh.go index 85668576b48..5126c3e6746 100644 --- a/test/e2e/node/ssh.go +++ b/test/e2e/node/ssh.go @@ -47,7 +47,7 @@ var _ = SIGDescribe("SSH", func() { ginkgo.By("Getting all nodes' SSH-able IP addresses") hosts, err := e2essh.NodeSSHHosts(f.ClientSet) if err != nil { - framework.Failf("Error getting node hostnames: %v", err) + e2elog.Failf("Error getting node hostnames: %v", err) } testCases := []struct { @@ -82,16 +82,16 @@ var _ = SIGDescribe("SSH", func() { result, err := e2essh.SSH(testCase.cmd, host, framework.TestContext.Provider) stdout, stderr := strings.TrimSpace(result.Stdout), strings.TrimSpace(result.Stderr) if err != testCase.expectedError { - framework.Failf("Ran %s on %s, got error %v, expected %v", testCase.cmd, host, err, testCase.expectedError) + e2elog.Failf("Ran %s on %s, got error %v, expected %v", testCase.cmd, host, err, testCase.expectedError) } if testCase.checkStdout && stdout != testCase.expectedStdout { - framework.Failf("Ran %s on %s, got stdout '%s', expected '%s'", testCase.cmd, host, stdout, testCase.expectedStdout) + e2elog.Failf("Ran %s on %s, got stdout '%s', expected '%s'", testCase.cmd, host, stdout, testCase.expectedStdout) } if stderr != testCase.expectedStderr { - framework.Failf("Ran %s on %s, got stderr '%s', expected '%s'", testCase.cmd, host, stderr, testCase.expectedStderr) + e2elog.Failf("Ran %s on %s, got stderr '%s', expected '%s'", testCase.cmd, host, stderr, testCase.expectedStderr) } if result.Code != testCase.expectedCode { - framework.Failf("Ran %s on %s, got exit code %d, expected %d", testCase.cmd, host, result.Code, testCase.expectedCode) + e2elog.Failf("Ran %s on %s, got exit code %d, expected %d", testCase.cmd, host, result.Code, testCase.expectedCode) } // Show stdout, stderr for logging purposes. if len(stdout) > 0 { @@ -106,7 +106,7 @@ var _ = SIGDescribe("SSH", func() { // Quickly test that SSH itself errors correctly. ginkgo.By("SSH'ing to a nonexistent host") if _, err = e2essh.SSH(`echo "hello"`, "i.do.not.exist", framework.TestContext.Provider); err == nil { - framework.Failf("Expected error trying to SSH to nonexistent host.") + e2elog.Failf("Expected error trying to SSH to nonexistent host.") } }) }) diff --git a/test/e2e/scalability/density.go b/test/e2e/scalability/density.go index 330320e2340..e36f9c94ee5 100644 --- a/test/e2e/scalability/density.go +++ b/test/e2e/scalability/density.go @@ -723,7 +723,7 @@ var _ = SIGDescribe("Density", func() { case batch.Kind("Job"): configs[i] = &testutils.JobConfig{RCConfig: *baseConfig} default: - framework.Failf("Unsupported kind: %v", itArg.kind) + e2elog.Failf("Unsupported kind: %v", itArg.kind) } } @@ -787,7 +787,7 @@ var _ = SIGDescribe("Density", func() { if startTime != metav1.NewTime(time.Time{}) { runTimes[p.Name] = startTime } else { - framework.Failf("Pod %v is reported to be running, but none of its containers is", p.Name) + e2elog.Failf("Pod %v is reported to be running, but none of its containers is", p.Name) } } } @@ -876,7 +876,7 @@ var _ = SIGDescribe("Density", func() { waitTimeout := 10 * time.Minute for start := time.Now(); len(watchTimes) < watchTimesLen+nodeCount; time.Sleep(10 * time.Second) { if time.Since(start) < waitTimeout { - framework.Failf("Timeout reached waiting for all Pods being observed by the watch.") + e2elog.Failf("Timeout reached waiting for all Pods being observed by the watch.") } } diff --git a/test/e2e/scalability/load.go b/test/e2e/scalability/load.go index ba42032ba49..6acd72bb263 100644 --- a/test/e2e/scalability/load.go +++ b/test/e2e/scalability/load.go @@ -577,7 +577,7 @@ func GenerateConfigsForGroup( case batch.Kind("Job"): config = &testutils.JobConfig{RCConfig: *baseConfig} default: - framework.Failf("Unsupported kind for config creation: %v", kind) + e2elog.Failf("Unsupported kind for config creation: %v", kind) } configs = append(configs, config) } diff --git a/test/e2e/scheduling/limit_range.go b/test/e2e/scheduling/limit_range.go index a595e8787e9..16bc7ca452b 100644 --- a/test/e2e/scheduling/limit_range.go +++ b/test/e2e/scheduling/limit_range.go @@ -75,10 +75,10 @@ var _ = SIGDescribe("LimitRange", func() { select { case event, _ := <-w.ResultChan(): if event.Type != watch.Added { - framework.Failf("Failed to observe pod creation: %v", event) + e2elog.Failf("Failed to observe pod creation: %v", event) } case <-time.After(framework.ServiceRespondingTimeout): - framework.Failf("Timeout while waiting for LimitRange creation") + e2elog.Failf("Timeout while waiting for LimitRange creation") } ginkgo.By("Fetching the LimitRange to ensure it has proper values") diff --git a/test/e2e/scheduling/nvidia-gpus.go b/test/e2e/scheduling/nvidia-gpus.go index f1fe190d722..e734a9a75a7 100644 --- a/test/e2e/scheduling/nvidia-gpus.go +++ b/test/e2e/scheduling/nvidia-gpus.go @@ -275,7 +275,7 @@ func VerifyJobNCompletions(f *framework.Framework, completions int32) { } } if successes != completions { - framework.Failf("Only got %v completions. Expected %v completions.", successes, completions) + e2elog.Failf("Only got %v completions. Expected %v completions.", successes, completions) } } diff --git a/test/e2e/scheduling/preemption.go b/test/e2e/scheduling/preemption.go index a20bd881ec6..93206baeb9e 100644 --- a/test/e2e/scheduling/preemption.go +++ b/test/e2e/scheduling/preemption.go @@ -430,12 +430,12 @@ var _ = SIGDescribe("PreemptionExecutionPath", func() { var err error node, err = cs.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{}) if err != nil { - framework.Failf("error getting node %q: %v", nodeName, err) + e2elog.Failf("error getting node %q: %v", nodeName, err) } var ok bool nodeHostNameLabel, ok = node.GetObjectMeta().GetLabels()["kubernetes.io/hostname"] if !ok { - framework.Failf("error getting kubernetes.io/hostname label on node %s", nodeName) + e2elog.Failf("error getting kubernetes.io/hostname label on node %s", nodeName) } // update Node API object with a fake resource @@ -581,7 +581,7 @@ var _ = SIGDescribe("PreemptionExecutionPath", func() { for i, got := range rsPodsSeen { expected := maxRSPodsSeen[i] if got > expected { - framework.Failf("pods of ReplicaSet%d have been over-preempted: expect %v pod names, but got %d", i+1, expected, got) + e2elog.Failf("pods of ReplicaSet%d have been over-preempted: expect %v pod names, but got %d", i+1, expected, got) } } }) diff --git a/test/e2e/scheduling/priorities.go b/test/e2e/scheduling/priorities.go index 5c67c345608..0f432f4c503 100644 --- a/test/e2e/scheduling/priorities.go +++ b/test/e2e/scheduling/priorities.go @@ -328,7 +328,7 @@ func computeCPUMemFraction(cs clientset.Interface, node v1.Node, resource *v1.Re totalRequestedMemResource := resource.Requests.Memory().Value() allpods, err := cs.CoreV1().Pods(metav1.NamespaceAll).List(metav1.ListOptions{}) if err != nil { - framework.Failf("Expect error of invalid, got : %v", err) + e2elog.Failf("Expect error of invalid, got : %v", err) } for _, pod := range allpods.Items { if pod.Spec.NodeName == node.Name { diff --git a/test/e2e/scheduling/taint_based_evictions.go b/test/e2e/scheduling/taint_based_evictions.go index a06f38b193f..2e4041f541c 100644 --- a/test/e2e/scheduling/taint_based_evictions.go +++ b/test/e2e/scheduling/taint_based_evictions.go @@ -27,6 +27,7 @@ import ( clientset "k8s.io/client-go/kubernetes" schedulerapi "k8s.io/kubernetes/pkg/scheduler/api" "k8s.io/kubernetes/test/e2e/framework" + e2elog "k8s.io/kubernetes/test/e2e/framework/log" e2enode "k8s.io/kubernetes/test/e2e/framework/node" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" @@ -119,7 +120,7 @@ var _ = SIGDescribe("TaintBasedEvictions [Serial]", func() { nodeSelector := fields.OneTermEqualSelector("metadata.name", nodeName) nodeList, err := cs.CoreV1().Nodes().List(metav1.ListOptions{FieldSelector: nodeSelector.String()}) if err != nil || len(nodeList.Items) != 1 { - framework.Failf("expected no err, got %v; expected len(nodes) = 1, got %v", err, len(nodeList.Items)) + e2elog.Failf("expected no err, got %v; expected len(nodes) = 1, got %v", err, len(nodeList.Items)) } node := nodeList.Items[0] @@ -139,7 +140,7 @@ var _ = SIGDescribe("TaintBasedEvictions [Serial]", func() { } if ginkgo.CurrentGinkgoTestDescription().Failed { - framework.Failf("Current e2e test has failed, so return from here.") + e2elog.Failf("Current e2e test has failed, so return from here.") return } @@ -156,7 +157,7 @@ var _ = SIGDescribe("TaintBasedEvictions [Serial]", func() { ginkgo.By(fmt.Sprintf("Expecting to see node %q becomes NotReady", nodeName)) if !e2enode.WaitForNodeToBeNotReady(cs, nodeName, time.Minute*3) { - framework.Failf("node %q doesn't turn to NotReady after 3 minutes", nodeName) + e2elog.Failf("node %q doesn't turn to NotReady after 3 minutes", nodeName) } ginkgo.By("Expecting to see unreachable=:NoExecute taint is applied") err = framework.WaitForNodeHasTaintOrNot(cs, nodeName, taint, true, time.Second*30) @@ -188,7 +189,7 @@ var _ = SIGDescribe("TaintBasedEvictions [Serial]", func() { seconds, err := getTolerationSeconds(livePod1.Spec.Tolerations) framework.ExpectNoError(err) if seconds != 200 { - framework.Failf("expect tolerationSeconds of pod1 is 200, but got %v", seconds) + e2elog.Failf("expect tolerationSeconds of pod1 is 200, but got %v", seconds) } }) }) diff --git a/test/e2e/scheduling/taints.go b/test/e2e/scheduling/taints.go index 9ff3e1f8ee1..128339f20e8 100644 --- a/test/e2e/scheduling/taints.go +++ b/test/e2e/scheduling/taints.go @@ -196,7 +196,7 @@ var _ = SIGDescribe("NoExecuteTaintManager Single Pod [Serial]", func() { timeoutChannel := time.NewTimer(time.Duration(kubeletPodDeletionDelaySeconds+additionalWaitPerDeleteSeconds) * time.Second).C select { case <-timeoutChannel: - framework.Failf("Failed to evict Pod") + e2elog.Failf("Failed to evict Pod") case <-observedDeletions: e2elog.Logf("Noticed Pod eviction. Test successful") } @@ -230,7 +230,7 @@ var _ = SIGDescribe("NoExecuteTaintManager Single Pod [Serial]", func() { case <-timeoutChannel: e2elog.Logf("Pod wasn't evicted. Test successful") case <-observedDeletions: - framework.Failf("Pod was evicted despite toleration") + e2elog.Failf("Pod was evicted despite toleration") } }) @@ -263,14 +263,14 @@ var _ = SIGDescribe("NoExecuteTaintManager Single Pod [Serial]", func() { case <-timeoutChannel: e2elog.Logf("Pod wasn't evicted") case <-observedDeletions: - framework.Failf("Pod was evicted despite toleration") + e2elog.Failf("Pod was evicted despite toleration") return } ginkgo.By("Waiting for Pod to be deleted") timeoutChannel = time.NewTimer(time.Duration(kubeletPodDeletionDelaySeconds+additionalWaitPerDeleteSeconds) * time.Second).C select { case <-timeoutChannel: - framework.Failf("Pod wasn't evicted") + e2elog.Failf("Pod wasn't evicted") case <-observedDeletions: e2elog.Logf("Pod was evicted after toleration time run out. Test successful") return @@ -312,7 +312,7 @@ var _ = SIGDescribe("NoExecuteTaintManager Single Pod [Serial]", func() { case <-timeoutChannel: e2elog.Logf("Pod wasn't evicted. Proceeding") case <-observedDeletions: - framework.Failf("Pod was evicted despite toleration") + e2elog.Failf("Pod was evicted despite toleration") return } e2elog.Logf("Removing taint from Node") @@ -324,7 +324,7 @@ var _ = SIGDescribe("NoExecuteTaintManager Single Pod [Serial]", func() { case <-timeoutChannel: e2elog.Logf("Pod wasn't evicted. Test successful") case <-observedDeletions: - framework.Failf("Pod was evicted despite toleration") + e2elog.Failf("Pod was evicted despite toleration") } }) }) @@ -383,9 +383,9 @@ var _ = SIGDescribe("NoExecuteTaintManager Multiple Pods [Serial]", func() { select { case <-timeoutChannel: if evicted == 0 { - framework.Failf("Failed to evict Pod1.") + e2elog.Failf("Failed to evict Pod1.") } else if evicted == 2 { - framework.Failf("Pod1 is evicted. But unexpected Pod2 also get evicted.") + e2elog.Failf("Pod1 is evicted. But unexpected Pod2 also get evicted.") } return case podName := <-observedDeletions: @@ -393,7 +393,7 @@ var _ = SIGDescribe("NoExecuteTaintManager Multiple Pods [Serial]", func() { if podName == podGroup+"1" { e2elog.Logf("Noticed Pod %q gets evicted.", podName) } else if podName == podGroup+"2" { - framework.Failf("Unexepected Pod %q gets evicted.", podName) + e2elog.Failf("Unexepected Pod %q gets evicted.", podName) return } } @@ -418,7 +418,7 @@ var _ = SIGDescribe("NoExecuteTaintManager Multiple Pods [Serial]", func() { framework.ExpectNoError(err) nodeHostNameLabel, ok := node.GetObjectMeta().GetLabels()["kubernetes.io/hostname"] if !ok { - framework.Failf("error getting kubernetes.io/hostname label on node %s", nodeName) + e2elog.Failf("error getting kubernetes.io/hostname label on node %s", nodeName) } framework.ExpectNoError(err) e2elog.Logf("Pod1 is running on %v. Tainting Node", nodeName) @@ -441,7 +441,7 @@ var _ = SIGDescribe("NoExecuteTaintManager Multiple Pods [Serial]", func() { for evicted != 2 { select { case <-timeoutChannel: - framework.Failf("Failed to evict all Pods. %d pod(s) is not evicted.", 2-evicted) + e2elog.Failf("Failed to evict all Pods. %d pod(s) is not evicted.", 2-evicted) return case podName := <-observedDeletions: e2elog.Logf("Noticed Pod %q gets evicted.", podName) diff --git a/test/e2e/scheduling/ubernetes_lite_volumes.go b/test/e2e/scheduling/ubernetes_lite_volumes.go index 3655a161c31..5cb3fc109dd 100644 --- a/test/e2e/scheduling/ubernetes_lite_volumes.go +++ b/test/e2e/scheduling/ubernetes_lite_volumes.go @@ -145,7 +145,7 @@ func OnlyAllowNodeZones(f *framework.Framework, zoneCount int, image string) { e2elog.Logf("deleting claim %q/%q", pvc.Namespace, pvc.Name) err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Delete(pvc.Name, nil) if err != nil { - framework.Failf("Error deleting claim %q. Error: %v", pvc.Name, err) + e2elog.Failf("Error deleting claim %q. Error: %v", pvc.Name, err) } }() } diff --git a/test/e2e/servicecatalog/BUILD b/test/e2e/servicecatalog/BUILD index 981fe75ba4c..186c0b96f5e 100644 --- a/test/e2e/servicecatalog/BUILD +++ b/test/e2e/servicecatalog/BUILD @@ -17,6 +17,7 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/watch:go_default_library", "//staging/src/k8s.io/client-go/kubernetes:go_default_library", "//test/e2e/framework:go_default_library", + "//test/e2e/framework/log:go_default_library", "//test/utils/image:go_default_library", "//vendor/github.com/onsi/ginkgo:go_default_library", "//vendor/github.com/onsi/gomega:go_default_library", diff --git a/test/e2e/servicecatalog/podpreset.go b/test/e2e/servicecatalog/podpreset.go index 4f992aed9bb..ca34af99ad4 100644 --- a/test/e2e/servicecatalog/podpreset.go +++ b/test/e2e/servicecatalog/podpreset.go @@ -29,6 +29,7 @@ import ( "k8s.io/apimachinery/pkg/watch" clientset "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/test/e2e/framework" + e2elog "k8s.io/kubernetes/test/e2e/framework/log" "github.com/onsi/ginkgo" "github.com/onsi/gomega" @@ -136,10 +137,10 @@ var _ = SIGDescribe("[Feature:PodPreset] PodPreset", func() { select { case event, _ := <-w.ResultChan(): if event.Type != watch.Added { - framework.Failf("Failed to observe pod creation: %v", event) + e2elog.Failf("Failed to observe pod creation: %v", event) } case <-time.After(framework.PodStartTimeout): - framework.Failf("Timeout while waiting for pod creation") + e2elog.Failf("Timeout while waiting for pod creation") } // We need to wait for the pod to be running, otherwise the deletion @@ -153,15 +154,15 @@ var _ = SIGDescribe("[Feature:PodPreset] PodPreset", func() { // check the annotation is there if _, ok := pod.Annotations["podpreset.admission.kubernetes.io/podpreset-hello"]; !ok { - framework.Failf("Annotation not found in pod annotations: \n%v\n", pod.Annotations) + e2elog.Failf("Annotation not found in pod annotations: \n%v\n", pod.Annotations) } // verify the env is the same if !reflect.DeepEqual(pip.Spec.Env, pod.Spec.Containers[0].Env) { - framework.Failf("env of pod container does not match the env of the pip: expected %#v, got: %#v", pip.Spec.Env, pod.Spec.Containers[0].Env) + e2elog.Failf("env of pod container does not match the env of the pip: expected %#v, got: %#v", pip.Spec.Env, pod.Spec.Containers[0].Env) } if !reflect.DeepEqual(pip.Spec.Env, pod.Spec.InitContainers[0].Env) { - framework.Failf("env of pod init container does not match the env of the pip: expected %#v, got: %#v", pip.Spec.Env, pod.Spec.InitContainers[0].Env) + e2elog.Failf("env of pod init container does not match the env of the pip: expected %#v, got: %#v", pip.Spec.Env, pod.Spec.InitContainers[0].Env) } }) @@ -256,10 +257,10 @@ var _ = SIGDescribe("[Feature:PodPreset] PodPreset", func() { select { case event, _ := <-w.ResultChan(): if event.Type != watch.Added { - framework.Failf("Failed to observe pod creation: %v", event) + e2elog.Failf("Failed to observe pod creation: %v", event) } case <-time.After(framework.PodStartTimeout): - framework.Failf("Timeout while waiting for pod creation") + e2elog.Failf("Timeout while waiting for pod creation") } // We need to wait for the pod to be running, otherwise the deletion @@ -273,15 +274,15 @@ var _ = SIGDescribe("[Feature:PodPreset] PodPreset", func() { // check the annotation is not there if _, ok := pod.Annotations["podpreset.admission.kubernetes.io/podpreset-hello"]; ok { - framework.Failf("Annotation found in pod annotations and should not be: \n%v\n", pod.Annotations) + e2elog.Failf("Annotation found in pod annotations and should not be: \n%v\n", pod.Annotations) } // verify the env is the same if !reflect.DeepEqual(originalPod.Spec.Containers[0].Env, pod.Spec.Containers[0].Env) { - framework.Failf("env of pod container does not match the env of the original pod: expected %#v, got: %#v", originalPod.Spec.Containers[0].Env, pod.Spec.Containers[0].Env) + e2elog.Failf("env of pod container does not match the env of the original pod: expected %#v, got: %#v", originalPod.Spec.Containers[0].Env, pod.Spec.Containers[0].Env) } if !reflect.DeepEqual(originalPod.Spec.InitContainers[0].Env, pod.Spec.InitContainers[0].Env) { - framework.Failf("env of pod init container does not match the env of the original pod: expected %#v, got: %#v", originalPod.Spec.InitContainers[0].Env, pod.Spec.InitContainers[0].Env) + e2elog.Failf("env of pod init container does not match the env of the original pod: expected %#v, got: %#v", originalPod.Spec.InitContainers[0].Env, pod.Spec.InitContainers[0].Env) } }) diff --git a/test/e2e/storage/csi_mock_volume.go b/test/e2e/storage/csi_mock_volume.go index b340ded27da..205d16c808c 100644 --- a/test/e2e/storage/csi_mock_volume.go +++ b/test/e2e/storage/csi_mock_volume.go @@ -445,7 +445,7 @@ var _ = utils.SIGDescribe("CSI mock volume", func() { pvcSize := pvc.Spec.Resources.Requests[v1.ResourceStorage] if pvcSize.Cmp(newSize) != 0 { - framework.Failf("error updating pvc size %q", pvc.Name) + e2elog.Failf("error updating pvc size %q", pvc.Name) } if test.expectFailure { err = waitForResizingCondition(pvc, m.cs, csiResizingConditionWait) @@ -536,7 +536,7 @@ var _ = utils.SIGDescribe("CSI mock volume", func() { pvcSize := pvc.Spec.Resources.Requests[v1.ResourceStorage] if pvcSize.Cmp(newSize) != 0 { - framework.Failf("error updating pvc size %q", pvc.Name) + e2elog.Failf("error updating pvc size %q", pvc.Name) } ginkgo.By("Waiting for persistent volume resize to finish") diff --git a/test/e2e/storage/drivers/csi.go b/test/e2e/storage/drivers/csi.go index 12537856b2d..06b57b4dfec 100644 --- a/test/e2e/storage/drivers/csi.go +++ b/test/e2e/storage/drivers/csi.go @@ -45,6 +45,7 @@ import ( "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/kubernetes/test/e2e/framework" + e2elog "k8s.io/kubernetes/test/e2e/framework/log" "k8s.io/kubernetes/test/e2e/storage/testpatterns" "k8s.io/kubernetes/test/e2e/storage/testsuites" "k8s.io/kubernetes/test/e2e/storage/utils" @@ -159,7 +160,7 @@ func (h *hostpathCSIDriver) PrepareTest(f *framework.Framework) (*testsuites.Per }, h.manifests...) if err != nil { - framework.Failf("deploying %s driver: %v", h.driverInfo.Name, err) + e2elog.Failf("deploying %s driver: %v", h.driverInfo.Name, err) } return config, func() { @@ -304,7 +305,7 @@ func (m *mockCSIDriver) PrepareTest(f *framework.Framework) (*testsuites.PerTest }, m.manifests...) if err != nil { - framework.Failf("deploying csi mock driver: %v", err) + e2elog.Failf("deploying csi mock driver: %v", err) } return config, func() { @@ -420,7 +421,7 @@ func (g *gcePDCSIDriver) PrepareTest(f *framework.Framework) (*testsuites.PerTes cleanup, err := f.CreateFromManifests(nil, manifests...) if err != nil { - framework.Failf("deploying csi gce-pd driver: %v", err) + e2elog.Failf("deploying csi gce-pd driver: %v", err) } return &testsuites.PerTestConfig{ diff --git a/test/e2e/storage/drivers/in_tree.go b/test/e2e/storage/drivers/in_tree.go index 8b5d21316ac..79845a5c73c 100644 --- a/test/e2e/storage/drivers/in_tree.go +++ b/test/e2e/storage/drivers/in_tree.go @@ -202,7 +202,7 @@ func (n *nfsDriver) CreateVolume(config *testsuites.PerTestConfig, volType testp case testpatterns.DynamicPV: // Do nothing default: - framework.Failf("Unsupported volType:%v is specified", volType) + e2elog.Failf("Unsupported volType:%v is specified", volType) } return nil } @@ -317,14 +317,14 @@ func (v *glusterVolume) DeleteVolume() { err := cs.CoreV1().Endpoints(ns.Name).Delete(name, nil) if err != nil { if !errors.IsNotFound(err) { - framework.Failf("Gluster delete endpoints failed: %v", err) + e2elog.Failf("Gluster delete endpoints failed: %v", err) } e2elog.Logf("Gluster endpoints %q not found, assuming deleted", name) } e2elog.Logf("Deleting Gluster server pod %q...", v.serverPod.Name) err = framework.DeletePodWithWait(f, cs, v.serverPod) if err != nil { - framework.Failf("Gluster server pod delete failed: %v", err) + e2elog.Failf("Gluster server pod delete failed: %v", err) } } @@ -1738,7 +1738,7 @@ func (l *localDriver) CreateVolume(config *testsuites.PerTestConfig, volType tes ltr: l.ltrMgr.Create(node, l.volumeType, nil), } default: - framework.Failf("Unsupported volType: %v is specified", volType) + e2elog.Failf("Unsupported volType: %v is specified", volType) } return nil } @@ -1750,11 +1750,11 @@ func (v *localVolume) DeleteVolume() { func (l *localDriver) nodeAffinityForNode(node *v1.Node) *v1.VolumeNodeAffinity { nodeKey := "kubernetes.io/hostname" if node.Labels == nil { - framework.Failf("Node does not have labels") + e2elog.Failf("Node does not have labels") } nodeValue, found := node.Labels[nodeKey] if !found { - framework.Failf("Node does not have required label %q", nodeKey) + e2elog.Failf("Node does not have required label %q", nodeKey) } return &v1.VolumeNodeAffinity{ Required: &v1.NodeSelector{ diff --git a/test/e2e/storage/empty_dir_wrapper.go b/test/e2e/storage/empty_dir_wrapper.go index a02116503f6..c1f557eb070 100644 --- a/test/e2e/storage/empty_dir_wrapper.go +++ b/test/e2e/storage/empty_dir_wrapper.go @@ -26,6 +26,7 @@ import ( "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/apimachinery/pkg/util/uuid" "k8s.io/kubernetes/test/e2e/framework" + e2elog "k8s.io/kubernetes/test/e2e/framework/log" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" "k8s.io/kubernetes/test/e2e/storage/utils" imageutils "k8s.io/kubernetes/test/utils/image" @@ -78,7 +79,7 @@ var _ = utils.SIGDescribe("EmptyDir wrapper volumes", func() { var err error if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(secret); err != nil { - framework.Failf("unable to create test secret %s: %v", secret.Name, err) + e2elog.Failf("unable to create test secret %s: %v", secret.Name, err) } configMapVolumeName := "configmap-volume" @@ -95,7 +96,7 @@ var _ = utils.SIGDescribe("EmptyDir wrapper volumes", func() { } if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(configMap); err != nil { - framework.Failf("unable to create test configMap %s: %v", configMap.Name, err) + e2elog.Failf("unable to create test configMap %s: %v", configMap.Name, err) } pod := &v1.Pod{ @@ -147,15 +148,15 @@ var _ = utils.SIGDescribe("EmptyDir wrapper volumes", func() { defer func() { ginkgo.By("Cleaning up the secret") if err := f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Delete(secret.Name, nil); err != nil { - framework.Failf("unable to delete secret %v: %v", secret.Name, err) + e2elog.Failf("unable to delete secret %v: %v", secret.Name, err) } ginkgo.By("Cleaning up the configmap") if err := f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Delete(configMap.Name, nil); err != nil { - framework.Failf("unable to delete configmap %v: %v", configMap.Name, err) + e2elog.Failf("unable to delete configmap %v: %v", configMap.Name, err) } ginkgo.By("Cleaning up the pod") if err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(pod.Name, metav1.NewDeleteOptions(0)); err != nil { - framework.Failf("unable to delete pod %v: %v", pod.Name, err) + e2elog.Failf("unable to delete pod %v: %v", pod.Name, err) } }() }) @@ -253,17 +254,17 @@ func createGitServer(f *framework.Framework) (gitURL string, gitRepo string, cle } if gitServerSvc, err = f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(gitServerSvc); err != nil { - framework.Failf("unable to create test git server service %s: %v", gitServerSvc.Name, err) + e2elog.Failf("unable to create test git server service %s: %v", gitServerSvc.Name, err) } return "http://" + gitServerSvc.Spec.ClusterIP + ":" + strconv.Itoa(httpPort), "test", func() { ginkgo.By("Cleaning up the git server pod") if err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(gitServerPod.Name, metav1.NewDeleteOptions(0)); err != nil { - framework.Failf("unable to delete git server pod %v: %v", gitServerPod.Name, err) + e2elog.Failf("unable to delete git server pod %v: %v", gitServerPod.Name, err) } ginkgo.By("Cleaning up the git server svc") if err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Delete(gitServerSvc.Name, nil); err != nil { - framework.Failf("unable to delete git server svc %v: %v", gitServerSvc.Name, err) + e2elog.Failf("unable to delete git server svc %v: %v", gitServerSvc.Name, err) } } } diff --git a/test/e2e/storage/flexvolume.go b/test/e2e/storage/flexvolume.go index 66b6ab3e3ae..b0b743411a6 100644 --- a/test/e2e/storage/flexvolume.go +++ b/test/e2e/storage/flexvolume.go @@ -29,6 +29,7 @@ import ( apierrs "k8s.io/apimachinery/pkg/api/errors" clientset "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/test/e2e/framework" + e2elog "k8s.io/kubernetes/test/e2e/framework/log" e2enode "k8s.io/kubernetes/test/e2e/framework/node" e2essh "k8s.io/kubernetes/test/e2e/framework/ssh" "k8s.io/kubernetes/test/e2e/framework/testfiles" @@ -118,7 +119,7 @@ func uninstallFlex(c clientset.Interface, node *v1.Node, vendor, driver string) } if host == "" { - framework.Failf("Error getting node ip : %v", err) + e2elog.Failf("Error getting node ip : %v", err) } cmd := fmt.Sprintf("sudo rm -r %s", flexDir) @@ -139,7 +140,7 @@ func sshAndLog(cmd, host string, failOnError bool) { e2essh.LogResult(result) framework.ExpectNoError(err) if result.Code != 0 && failOnError { - framework.Failf("%s returned non-zero, stderr: %s", cmd, result.Stderr) + e2elog.Failf("%s returned non-zero, stderr: %s", cmd, result.Stderr) } } diff --git a/test/e2e/storage/flexvolume_mounted_volume_resize.go b/test/e2e/storage/flexvolume_mounted_volume_resize.go index cb8a24c1ee2..b547b32caa7 100644 --- a/test/e2e/storage/flexvolume_mounted_volume_resize.go +++ b/test/e2e/storage/flexvolume_mounted_volume_resize.go @@ -63,7 +63,7 @@ var _ = utils.SIGDescribe("Mounted flexvolume expand[Slow]", func() { if len(nodeList.Items) != 0 { nodeName = nodeList.Items[0].Name } else { - framework.Failf("Unable to find ready and schedulable Node") + e2elog.Failf("Unable to find ready and schedulable Node") } nodeKey = "mounted_flexvolume_expand" @@ -107,7 +107,7 @@ var _ = utils.SIGDescribe("Mounted flexvolume expand[Slow]", func() { if c != nil { if errs := framework.PVPVCCleanup(c, ns, nil, pvc); len(errs) > 0 { - framework.Failf("AfterEach: Failed to delete PVC and/or PV. Errors: %v", utilerrors.NewAggregate(errs)) + e2elog.Failf("AfterEach: Failed to delete PVC and/or PV. Errors: %v", utilerrors.NewAggregate(errs)) } pvc, nodeName, isNodeLabeled, nodeLabelValue = nil, "", false, "" nodeKeyValueLabel = make(map[string]string) @@ -157,7 +157,7 @@ var _ = utils.SIGDescribe("Mounted flexvolume expand[Slow]", func() { pvcSize := pvc.Spec.Resources.Requests[v1.ResourceStorage] if pvcSize.Cmp(newSize) != 0 { - framework.Failf("error updating pvc size %q", pvc.Name) + e2elog.Failf("error updating pvc size %q", pvc.Name) } ginkgo.By("Waiting for cloudprovider resize to finish") diff --git a/test/e2e/storage/flexvolume_online_resize.go b/test/e2e/storage/flexvolume_online_resize.go index 956865e30ea..196a8f884fd 100644 --- a/test/e2e/storage/flexvolume_online_resize.go +++ b/test/e2e/storage/flexvolume_online_resize.go @@ -60,7 +60,7 @@ var _ = utils.SIGDescribe("Mounted flexvolume volume expand [Slow] [Feature:Expa nodeList = framework.GetReadySchedulableNodesOrDie(f.ClientSet) if len(nodeList.Items) == 0 { - framework.Failf("unable to find ready and schedulable Node") + e2elog.Failf("unable to find ready and schedulable Node") } nodeName = nodeList.Items[0].Name @@ -106,7 +106,7 @@ var _ = utils.SIGDescribe("Mounted flexvolume volume expand [Slow] [Feature:Expa if c != nil { if errs := framework.PVPVCCleanup(c, ns, nil, pvc); len(errs) > 0 { - framework.Failf("AfterEach: Failed to delete PVC and/or PV. Errors: %v", utilerrors.NewAggregate(errs)) + e2elog.Failf("AfterEach: Failed to delete PVC and/or PV. Errors: %v", utilerrors.NewAggregate(errs)) } pvc, nodeName, isNodeLabeled, nodeLabelValue = nil, "", false, "" nodeKeyValueLabel = make(map[string]string) @@ -161,7 +161,7 @@ var _ = utils.SIGDescribe("Mounted flexvolume volume expand [Slow] [Feature:Expa pvcSize := pvc.Spec.Resources.Requests[v1.ResourceStorage] if pvcSize.Cmp(newSize) != 0 { - framework.Failf("error updating pvc size %q", pvc.Name) + e2elog.Failf("error updating pvc size %q", pvc.Name) } ginkgo.By("Waiting for cloudprovider resize to finish") diff --git a/test/e2e/storage/mounted_volume_resize.go b/test/e2e/storage/mounted_volume_resize.go index 843e8a954be..9c996f2e235 100644 --- a/test/e2e/storage/mounted_volume_resize.go +++ b/test/e2e/storage/mounted_volume_resize.go @@ -62,7 +62,7 @@ var _ = utils.SIGDescribe("Mounted volume expand", func() { if len(nodeList.Items) != 0 { nodeName = nodeList.Items[0].Name } else { - framework.Failf("Unable to find ready and schedulable Node") + e2elog.Failf("Unable to find ready and schedulable Node") } nodeKey = "mounted_volume_expand" @@ -102,7 +102,7 @@ var _ = utils.SIGDescribe("Mounted volume expand", func() { if c != nil { if errs := framework.PVPVCCleanup(c, ns, nil, pvc); len(errs) > 0 { - framework.Failf("AfterEach: Failed to delete PVC and/or PV. Errors: %v", utilerrors.NewAggregate(errs)) + e2elog.Failf("AfterEach: Failed to delete PVC and/or PV. Errors: %v", utilerrors.NewAggregate(errs)) } pvc, nodeName, isNodeLabeled, nodeLabelValue = nil, "", false, "" nodeKeyValueLabel = make(map[string]string) @@ -134,7 +134,7 @@ var _ = utils.SIGDescribe("Mounted volume expand", func() { pvcSize := pvc.Spec.Resources.Requests[v1.ResourceStorage] if pvcSize.Cmp(newSize) != 0 { - framework.Failf("error updating pvc size %q", pvc.Name) + e2elog.Failf("error updating pvc size %q", pvc.Name) } ginkgo.By("Waiting for cloudprovider resize to finish") diff --git a/test/e2e/storage/nfs_persistent_volume-disruptive.go b/test/e2e/storage/nfs_persistent_volume-disruptive.go index 64b90b4d061..c9ccd4dfdf7 100644 --- a/test/e2e/storage/nfs_persistent_volume-disruptive.go +++ b/test/e2e/storage/nfs_persistent_volume-disruptive.go @@ -157,11 +157,11 @@ var _ = utils.SIGDescribe("NFSPersistentVolumes[Disruptive][Flaky]", func() { // Delete PV and PVCs if errs := framework.PVPVCCleanup(c, ns, pv1, pvc1); len(errs) > 0 { - framework.Failf("AfterEach: Failed to delete PVC and/or PV. Errors: %v", utilerrors.NewAggregate(errs)) + e2elog.Failf("AfterEach: Failed to delete PVC and/or PV. Errors: %v", utilerrors.NewAggregate(errs)) } pv1, pvc1 = nil, nil if errs := framework.PVPVCCleanup(c, ns, pv2, pvc2); len(errs) > 0 { - framework.Failf("AfterEach: Failed to delete PVC and/or PV. Errors: %v", utilerrors.NewAggregate(errs)) + e2elog.Failf("AfterEach: Failed to delete PVC and/or PV. Errors: %v", utilerrors.NewAggregate(errs)) } pv2, pvc2 = nil, nil diff --git a/test/e2e/storage/pd.go b/test/e2e/storage/pd.go index 07bdafb4bc5..c79d4d07a52 100644 --- a/test/e2e/storage/pd.go +++ b/test/e2e/storage/pd.go @@ -366,7 +366,7 @@ var _ = utils.SIGDescribe("Pod Disks", func() { // if this defer is reached due to an Expect then nested // Expects are lost, so use Failf here if numNodes != origNodeCnt { - framework.Failf("defer: Requires current node count (%d) to return to original node count (%d)", numNodes, origNodeCnt) + e2elog.Failf("defer: Requires current node count (%d) to return to original node count (%d)", numNodes, origNodeCnt) } } }() @@ -520,7 +520,7 @@ func testPDPod(diskNames []string, targetNode types.NodeName, readOnly bool, num // escape if not a supported provider if !(framework.TestContext.Provider == "gce" || framework.TestContext.Provider == "gke" || framework.TestContext.Provider == "aws") { - framework.Failf(fmt.Sprintf("func `testPDPod` only supports gce, gke, and aws providers, not %v", framework.TestContext.Provider)) + e2elog.Failf(fmt.Sprintf("func `testPDPod` only supports gce, gke, and aws providers, not %v", framework.TestContext.Provider)) } containers := make([]v1.Container, numContainers) diff --git a/test/e2e/storage/persistent_volumes-gce.go b/test/e2e/storage/persistent_volumes-gce.go index d79d67c44b1..be18b852ada 100644 --- a/test/e2e/storage/persistent_volumes-gce.go +++ b/test/e2e/storage/persistent_volumes-gce.go @@ -109,7 +109,7 @@ var _ = utils.SIGDescribe("PersistentVolumes GCEPD", func() { if c != nil { framework.ExpectNoError(framework.DeletePodWithWait(f, c, clientPod)) if errs := framework.PVPVCCleanup(c, ns, pv, pvc); len(errs) > 0 { - framework.Failf("AfterEach: Failed to delete PVC and/or PV. Errors: %v", utilerrors.NewAggregate(errs)) + e2elog.Failf("AfterEach: Failed to delete PVC and/or PV. Errors: %v", utilerrors.NewAggregate(errs)) } clientPod, pv, pvc, node = nil, nil, nil, "" if diskName != "" { diff --git a/test/e2e/storage/persistent_volumes-local.go b/test/e2e/storage/persistent_volumes-local.go index eb5743399db..4b18ed918f9 100644 --- a/test/e2e/storage/persistent_volumes-local.go +++ b/test/e2e/storage/persistent_volumes-local.go @@ -829,7 +829,7 @@ func cleanupLocalPVCsPVs(config *localTestConfig, volumes []*localTestVolume) { ginkgo.By("Cleaning up PVC and PV") errs := framework.PVPVCCleanup(config.client, config.ns, volume.pv, volume.pvc) if len(errs) > 0 { - framework.Failf("Failed to delete PV and/or PVC: %v", utilerrors.NewAggregate(errs)) + e2elog.Failf("Failed to delete PV and/or PVC: %v", utilerrors.NewAggregate(errs)) } } } @@ -870,11 +870,11 @@ func makeLocalPVConfig(config *localTestConfig, volume *localTestVolume) framewo // TODO: hostname may not be the best option nodeKey := "kubernetes.io/hostname" if volume.ltr.Node.Labels == nil { - framework.Failf("Node does not have labels") + e2elog.Failf("Node does not have labels") } nodeValue, found := volume.ltr.Node.Labels[nodeKey] if !found { - framework.Failf("Node does not have required label %q", nodeKey) + e2elog.Failf("Node does not have required label %q", nodeKey) } pvConfig := framework.PersistentVolumeConfig{ diff --git a/test/e2e/storage/persistent_volumes.go b/test/e2e/storage/persistent_volumes.go index 92009a7fe08..026ccca66cd 100644 --- a/test/e2e/storage/persistent_volumes.go +++ b/test/e2e/storage/persistent_volumes.go @@ -154,7 +154,7 @@ var _ = utils.SIGDescribe("PersistentVolumes", func() { ginkgo.AfterEach(func() { e2elog.Logf("AfterEach: Cleaning up test resources.") if errs := framework.PVPVCCleanup(c, ns, pv, pvc); len(errs) > 0 { - framework.Failf("AfterEach: Failed to delete PVC and/or PV. Errors: %v", utilerrors.NewAggregate(errs)) + e2elog.Failf("AfterEach: Failed to delete PVC and/or PV. Errors: %v", utilerrors.NewAggregate(errs)) } }) @@ -221,7 +221,7 @@ var _ = utils.SIGDescribe("PersistentVolumes", func() { for _, e := range errs { errmsg = append(errmsg, e.Error()) } - framework.Failf("AfterEach: Failed to delete 1 or more PVs/PVCs. Errors: %v", strings.Join(errmsg, "; ")) + e2elog.Failf("AfterEach: Failed to delete 1 or more PVs/PVCs. Errors: %v", strings.Join(errmsg, "; ")) } }) @@ -270,7 +270,7 @@ var _ = utils.SIGDescribe("PersistentVolumes", func() { ginkgo.AfterEach(func() { e2elog.Logf("AfterEach: Cleaning up test resources.") if errs := framework.PVPVCCleanup(c, ns, pv, pvc); len(errs) > 0 { - framework.Failf("AfterEach: Failed to delete PVC and/or PV. Errors: %v", utilerrors.NewAggregate(errs)) + e2elog.Failf("AfterEach: Failed to delete PVC and/or PV. Errors: %v", utilerrors.NewAggregate(errs)) } }) diff --git a/test/e2e/storage/pv_protection.go b/test/e2e/storage/pv_protection.go index 820607ed570..b2ae9813889 100644 --- a/test/e2e/storage/pv_protection.go +++ b/test/e2e/storage/pv_protection.go @@ -92,7 +92,7 @@ var _ = utils.SIGDescribe("PV Protection", func() { ginkgo.AfterEach(func() { e2elog.Logf("AfterEach: Cleaning up test resources.") if errs := framework.PVPVCCleanup(client, nameSpace, pv, pvc); len(errs) > 0 { - framework.Failf("AfterEach: Failed to delete PVC and/or PV. Errors: %v", utilerrors.NewAggregate(errs)) + e2elog.Failf("AfterEach: Failed to delete PVC and/or PV. Errors: %v", utilerrors.NewAggregate(errs)) } }) diff --git a/test/e2e/storage/regional_pd.go b/test/e2e/storage/regional_pd.go index 19897bc88c2..7da41614d53 100644 --- a/test/e2e/storage/regional_pd.go +++ b/test/e2e/storage/regional_pd.go @@ -340,11 +340,11 @@ func testRegionalDelayedBinding(c clientset.Interface, ns string, pvcCount int) } pvs, node := test.TestBindingWaitForFirstConsumerMultiPVC(claims, nil /* node selector */, false /* expect unschedulable */) if node == nil { - framework.Failf("unexpected nil node found") + e2elog.Failf("unexpected nil node found") } zone, ok := node.Labels[v1.LabelZoneFailureDomain] if !ok { - framework.Failf("label %s not found on Node", v1.LabelZoneFailureDomain) + e2elog.Failf("label %s not found on Node", v1.LabelZoneFailureDomain) } for _, pv := range pvs { checkZoneFromLabelAndAffinity(pv, zone, false) @@ -400,11 +400,11 @@ func testRegionalAllowedTopologiesWithDelayedBinding(c clientset.Interface, ns s } pvs, node := test.TestBindingWaitForFirstConsumerMultiPVC(claims, nil /* node selector */, false /* expect unschedulable */) if node == nil { - framework.Failf("unexpected nil node found") + e2elog.Failf("unexpected nil node found") } nodeZone, ok := node.Labels[v1.LabelZoneFailureDomain] if !ok { - framework.Failf("label %s not found on Node", v1.LabelZoneFailureDomain) + e2elog.Failf("label %s not found on Node", v1.LabelZoneFailureDomain) } zoneFound := false for _, zone := range topoZones { @@ -414,7 +414,7 @@ func testRegionalAllowedTopologiesWithDelayedBinding(c clientset.Interface, ns s } } if !zoneFound { - framework.Failf("zones specified in AllowedTopologies: %v does not contain zone of node where PV got provisioned: %s", topoZones, nodeZone) + e2elog.Failf("zones specified in AllowedTopologies: %v does not contain zone of node where PV got provisioned: %s", topoZones, nodeZone) } for _, pv := range pvs { checkZonesFromLabelAndAffinity(pv, sets.NewString(topoZones...), true) diff --git a/test/e2e/storage/testsuites/base.go b/test/e2e/storage/testsuites/base.go index ca9ef9264c2..927aa597899 100644 --- a/test/e2e/storage/testsuites/base.go +++ b/test/e2e/storage/testsuites/base.go @@ -226,7 +226,7 @@ func createGenericVolumeTestResource(driver TestDriver, config *PerTestConfig, p r.volType = fmt.Sprintf("%s-dynamicPV", dInfo.Name) } default: - framework.Failf("genericVolumeTestResource doesn't support: %s", volType) + e2elog.Failf("genericVolumeTestResource doesn't support: %s", volType) } if r.volSource == nil { @@ -246,13 +246,13 @@ func (r *genericVolumeTestResource) cleanupResource() { case testpatterns.PreprovisionedPV: ginkgo.By("Deleting pv and pvc") if errs := framework.PVPVCCleanup(f.ClientSet, f.Namespace.Name, r.pv, r.pvc); len(errs) != 0 { - framework.Failf("Failed to delete PVC or PV: %v", utilerrors.NewAggregate(errs)) + e2elog.Failf("Failed to delete PVC or PV: %v", utilerrors.NewAggregate(errs)) } case testpatterns.DynamicPV: ginkgo.By("Deleting pvc") // We only delete the PVC so that PV (and disk) can be cleaned up by dynamic provisioner if r.pv != nil && r.pv.Spec.PersistentVolumeReclaimPolicy != v1.PersistentVolumeReclaimDelete { - framework.Failf("Test framework does not currently support Dynamically Provisioned Persistent Volume %v specified with reclaim policy that isnt %v", + e2elog.Failf("Test framework does not currently support Dynamically Provisioned Persistent Volume %v specified with reclaim policy that isnt %v", r.pv.Name, v1.PersistentVolumeReclaimDelete) } if r.pvc != nil { @@ -264,7 +264,7 @@ func (r *genericVolumeTestResource) cleanupResource() { } } default: - framework.Failf("Found PVC (%v) or PV (%v) but not running Preprovisioned or Dynamic test pattern", r.pvc, r.pv) + e2elog.Failf("Found PVC (%v) or PV (%v) but not running Preprovisioned or Dynamic test pattern", r.pvc, r.pv) } } @@ -601,7 +601,7 @@ func validateMigrationVolumeOpCounts(cs clientset.Interface, pluginName string, for op, count := range newInTreeOps { if count != oldInTreeOps[op] { - framework.Failf("In-tree plugin %v migrated to CSI Driver, however found %v %v metrics for in-tree plugin", pluginName, count-oldInTreeOps[op], op) + e2elog.Failf("In-tree plugin %v migrated to CSI Driver, however found %v %v metrics for in-tree plugin", pluginName, count-oldInTreeOps[op], op) } } // We don't check for migrated metrics because some negative test cases diff --git a/test/e2e/storage/testsuites/driveroperations.go b/test/e2e/storage/testsuites/driveroperations.go index 4d4298e61a7..54be67b64c6 100644 --- a/test/e2e/storage/testsuites/driveroperations.go +++ b/test/e2e/storage/testsuites/driveroperations.go @@ -23,7 +23,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apiserver/pkg/storage/names" - "k8s.io/kubernetes/test/e2e/framework" + e2elog "k8s.io/kubernetes/test/e2e/framework/log" "k8s.io/kubernetes/test/e2e/storage/testpatterns" ) @@ -49,7 +49,7 @@ func CreateVolume(driver TestDriver, config *PerTestConfig, volType testpatterns case testpatterns.DynamicPV: // No need to create volume default: - framework.Failf("Invalid volType specified: %v", volType) + e2elog.Failf("Invalid volType specified: %v", volType) } return nil } diff --git a/test/e2e/storage/testsuites/multivolume.go b/test/e2e/storage/testsuites/multivolume.go index c7508a4865c..ac3a08a96d9 100644 --- a/test/e2e/storage/testsuites/multivolume.go +++ b/test/e2e/storage/testsuites/multivolume.go @@ -26,6 +26,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" clientset "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/test/e2e/framework" + e2elog "k8s.io/kubernetes/test/e2e/framework/log" "k8s.io/kubernetes/test/e2e/storage/testpatterns" "k8s.io/kubernetes/test/e2e/storage/utils" ) @@ -446,7 +447,7 @@ func TestConcurrentAccessToSingleVolume(f *framework.Framework, cs clientset.Int // Delete the last pod and remove from slice of pods if len(pods) < 2 { - framework.Failf("Number of pods shouldn't be less than 2, but got %d", len(pods)) + e2elog.Failf("Number of pods shouldn't be less than 2, but got %d", len(pods)) } lastPod := pods[len(pods)-1] framework.ExpectNoError(framework.DeletePodWithWait(f, cs, lastPod)) diff --git a/test/e2e/storage/testsuites/provisioning.go b/test/e2e/storage/testsuites/provisioning.go index c545c85f7c1..25cdc1e405f 100644 --- a/test/e2e/storage/testsuites/provisioning.go +++ b/test/e2e/storage/testsuites/provisioning.go @@ -206,7 +206,7 @@ func (p *provisioningTestSuite) defineTests(driver TestDriver, pattern testpatte sDriver, ok := driver.(SnapshottableTestDriver) if !ok { - framework.Failf("Driver %q has CapDataSource but does not implement SnapshottableTestDriver", dInfo.Name) + e2elog.Failf("Driver %q has CapDataSource but does not implement SnapshottableTestDriver", dInfo.Name) } init() @@ -259,7 +259,7 @@ func (t StorageClassTest) TestDynamicProvisioning() *v1.PersistentVolume { // typically this claim has already been deleted err = client.CoreV1().PersistentVolumeClaims(claim.Namespace).Delete(claim.Name, nil) if err != nil && !apierrs.IsNotFound(err) { - framework.Failf("Error deleting claim %q. Error: %v", claim.Name, err) + e2elog.Failf("Error deleting claim %q. Error: %v", claim.Name, err) } }() @@ -667,13 +667,13 @@ func prepareDataSourceForProvisioning( e2elog.Logf("deleting snapshot %q/%q", snapshot.GetNamespace(), snapshot.GetName()) err = dynamicClient.Resource(snapshotGVR).Namespace(updatedClaim.Namespace).Delete(snapshot.GetName(), nil) if err != nil && !apierrs.IsNotFound(err) { - framework.Failf("Error deleting snapshot %q. Error: %v", snapshot.GetName(), err) + e2elog.Failf("Error deleting snapshot %q. Error: %v", snapshot.GetName(), err) } e2elog.Logf("deleting initClaim %q/%q", updatedClaim.Namespace, updatedClaim.Name) err = client.CoreV1().PersistentVolumeClaims(updatedClaim.Namespace).Delete(updatedClaim.Name, nil) if err != nil && !apierrs.IsNotFound(err) { - framework.Failf("Error deleting initClaim %q. Error: %v", updatedClaim.Name, err) + e2elog.Failf("Error deleting initClaim %q. Error: %v", updatedClaim.Name, err) } e2elog.Logf("deleting SnapshotClass %s", snapshotClass.GetName()) diff --git a/test/e2e/storage/testsuites/snapshottable.go b/test/e2e/storage/testsuites/snapshottable.go index f5a2be989b3..8bd43bbd888 100644 --- a/test/e2e/storage/testsuites/snapshottable.go +++ b/test/e2e/storage/testsuites/snapshottable.go @@ -138,7 +138,7 @@ func (s *snapshottableTestSuite) defineTests(driver TestDriver, pattern testpatt // typically this claim has already been deleted err = cs.CoreV1().PersistentVolumeClaims(pvc.Namespace).Delete(pvc.Name, nil) if err != nil && !apierrs.IsNotFound(err) { - framework.Failf("Error deleting claim %q. Error: %v", pvc.Name, err) + e2elog.Failf("Error deleting claim %q. Error: %v", pvc.Name, err) } }() err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, cs, pvc.Namespace, pvc.Name, framework.Poll, framework.ClaimProvisionTimeout) @@ -171,7 +171,7 @@ func (s *snapshottableTestSuite) defineTests(driver TestDriver, pattern testpatt // typically this snapshot has already been deleted err = dc.Resource(snapshotGVR).Namespace(snapshot.GetNamespace()).Delete(snapshot.GetName(), nil) if err != nil && !apierrs.IsNotFound(err) { - framework.Failf("Error deleting snapshot %q. Error: %v", pvc.Name, err) + e2elog.Failf("Error deleting snapshot %q. Error: %v", pvc.Name, err) } }() err = WaitForSnapshotReady(dc, snapshot.GetNamespace(), snapshot.GetName(), framework.Poll, framework.SnapshotCreateTimeout) diff --git a/test/e2e/storage/testsuites/subpath.go b/test/e2e/storage/testsuites/subpath.go index 981c9bebf22..4d23bef12c1 100644 --- a/test/e2e/storage/testsuites/subpath.go +++ b/test/e2e/storage/testsuites/subpath.go @@ -130,7 +130,7 @@ func (s *subPathTestSuite) defineTests(driver TestDriver, pattern testpatterns.T }, } default: - framework.Failf("SubPath test doesn't support: %s", volType) + e2elog.Failf("SubPath test doesn't support: %s", volType) } subPath := f.Namespace.Name diff --git a/test/e2e/storage/testsuites/volumemode.go b/test/e2e/storage/testsuites/volumemode.go index 6bd3b6ae2d7..dc4ae4562f0 100644 --- a/test/e2e/storage/testsuites/volumemode.go +++ b/test/e2e/storage/testsuites/volumemode.go @@ -26,6 +26,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" clientset "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/test/e2e/framework" + e2elog "k8s.io/kubernetes/test/e2e/framework/log" "k8s.io/kubernetes/test/e2e/storage/testpatterns" "k8s.io/kubernetes/test/e2e/storage/utils" ) @@ -145,7 +146,7 @@ func (t *volumeModeTestSuite) defineTests(driver TestDriver, pattern testpattern l.pvc.Spec.VolumeMode = &pattern.VolMode } default: - framework.Failf("Volume mode test doesn't support: %s", pattern.VolType) + e2elog.Failf("Volume mode test doesn't support: %s", pattern.VolType) } } @@ -295,7 +296,7 @@ func (t *volumeModeTestSuite) defineTests(driver TestDriver, pattern testpattern // TODO(mkimuram): Add more tests } default: - framework.Failf("Volume mode test doesn't support volType: %v", pattern.VolType) + e2elog.Failf("Volume mode test doesn't support volType: %v", pattern.VolType) } } diff --git a/test/e2e/storage/utils/local.go b/test/e2e/storage/utils/local.go index 5220ac636dd..b57dff836db 100644 --- a/test/e2e/storage/utils/local.go +++ b/test/e2e/storage/utils/local.go @@ -29,6 +29,7 @@ import ( "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/util/uuid" "k8s.io/kubernetes/test/e2e/framework" + e2elog "k8s.io/kubernetes/test/e2e/framework/log" ) // LocalVolumeType represents type of local volume, e.g. tmpfs, directory, @@ -309,11 +310,11 @@ func (l *ltrMgr) Create(node *v1.Node, volumeType LocalVolumeType, parameters ma case LocalVolumeGCELocalSSD: ltr = l.setupLocalVolumeGCELocalSSD(node, parameters) default: - framework.Failf("Failed to create local test resource on node %q, unsupported volume type: %v is specified", node.Name, volumeType) + e2elog.Failf("Failed to create local test resource on node %q, unsupported volume type: %v is specified", node.Name, volumeType) return nil } if ltr == nil { - framework.Failf("Failed to create local test resource on node %q, volume type: %v, parameters: %v", node.Name, volumeType, parameters) + e2elog.Failf("Failed to create local test resource on node %q, volume type: %v, parameters: %v", node.Name, volumeType, parameters) } ltr.VolumeType = volumeType return ltr @@ -338,7 +339,7 @@ func (l *ltrMgr) Remove(ltr *LocalTestResource) { case LocalVolumeGCELocalSSD: l.cleanupLocalVolumeGCELocalSSD(ltr) default: - framework.Failf("Failed to remove local test resource, unsupported volume type: %v is specified", ltr.VolumeType) + e2elog.Failf("Failed to remove local test resource, unsupported volume type: %v is specified", ltr.VolumeType) } return } diff --git a/test/e2e/storage/utils/utils.go b/test/e2e/storage/utils/utils.go index 72fbbc2afe9..6c5fb6e94a5 100644 --- a/test/e2e/storage/utils/utils.go +++ b/test/e2e/storage/utils/utils.go @@ -150,7 +150,7 @@ func KubeletCommand(kOp KubeletOpt, c clientset.Interface, pod *v1.Pod) { if kOp == KStop { if ok := e2enode.WaitForNodeToBeNotReady(c, pod.Spec.NodeName, NodeStateTimeout); !ok { - framework.Failf("Node %s failed to enter NotReady state", pod.Spec.NodeName) + e2elog.Failf("Node %s failed to enter NotReady state", pod.Spec.NodeName) } } if kOp == KRestart { @@ -170,7 +170,7 @@ func KubeletCommand(kOp KubeletOpt, c clientset.Interface, pod *v1.Pod) { if kOp == KStart || kOp == KRestart { // For kubelet start and restart operations, Wait until Node becomes Ready if ok := e2enode.WaitForNodeToBeReady(c, pod.Spec.NodeName, NodeStateTimeout); !ok { - framework.Failf("Node %s failed to enter Ready state", pod.Spec.NodeName) + e2elog.Failf("Node %s failed to enter Ready state", pod.Spec.NodeName) } } } diff --git a/test/e2e/storage/volume_expand.go b/test/e2e/storage/volume_expand.go index c3828f9baee..3047092e794 100644 --- a/test/e2e/storage/volume_expand.go +++ b/test/e2e/storage/volume_expand.go @@ -135,7 +135,7 @@ var _ = utils.SIGDescribe("Volume expand", func() { pvcSize := pvc.Spec.Resources.Requests[v1.ResourceStorage] if pvcSize.Cmp(newSize) != 0 { - framework.Failf("error updating pvc size %q", pvc.Name) + e2elog.Failf("error updating pvc size %q", pvc.Name) } ginkgo.By("Waiting for cloudprovider resize to finish") @@ -193,7 +193,7 @@ var _ = utils.SIGDescribe("Volume expand", func() { pvcSize := pvc.Spec.Resources.Requests[v1.ResourceStorage] if pvcSize.Cmp(newSize) != 0 { - framework.Failf("error updating pvc size %q", pvc.Name) + e2elog.Failf("error updating pvc size %q", pvc.Name) } ginkgo.By("Waiting for cloudprovider resize to finish") @@ -225,7 +225,7 @@ var _ = utils.SIGDescribe("Volume expand", func() { pvcSize := pvc.Spec.Resources.Requests[v1.ResourceStorage] if pvcSize.Cmp(newSize) != 0 { - framework.Failf("error updating pvc size %q", pvc.Name) + e2elog.Failf("error updating pvc size %q", pvc.Name) } ginkgo.By("Waiting for cloudprovider resize to finish") diff --git a/test/e2e/storage/volume_limits.go b/test/e2e/storage/volume_limits.go index 97c82b25907..97cc7e06c64 100644 --- a/test/e2e/storage/volume_limits.go +++ b/test/e2e/storage/volume_limits.go @@ -22,6 +22,7 @@ import ( clientset "k8s.io/client-go/kubernetes" v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper" "k8s.io/kubernetes/test/e2e/framework" + e2elog "k8s.io/kubernetes/test/e2e/framework/log" "k8s.io/kubernetes/test/e2e/storage/utils" ) @@ -39,12 +40,12 @@ var _ = utils.SIGDescribe("Volume limits", func() { ginkgo.It("should verify that all nodes have volume limits", func() { nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet) if len(nodeList.Items) == 0 { - framework.Failf("Unable to find ready and schedulable Node") + e2elog.Failf("Unable to find ready and schedulable Node") } for _, node := range nodeList.Items { volumeLimits := getVolumeLimit(&node) if len(volumeLimits) == 0 { - framework.Failf("Expected volume limits to be set") + e2elog.Failf("Expected volume limits to be set") } } }) diff --git a/test/e2e/storage/volume_metrics.go b/test/e2e/storage/volume_metrics.go index ab1be5e8ff3..e99996b92c8 100644 --- a/test/e2e/storage/volume_metrics.go +++ b/test/e2e/storage/volume_metrics.go @@ -58,7 +58,7 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() { framework.SkipUnlessProviderIs("gce", "gke", "aws") defaultScName, err = framework.GetDefaultStorageClassName(c) if err != nil { - framework.Failf(err.Error()) + e2elog.Failf(err.Error()) } test := testsuites.StorageClassTest{ Name: "default", @@ -70,7 +70,7 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() { metricsGrabber, err = metrics.NewMetricsGrabber(c, nil, true, false, true, false, false) if err != nil { - framework.Failf("Error creating metrics grabber : %v", err) + e2elog.Failf("Error creating metrics grabber : %v", err) } }) @@ -456,10 +456,10 @@ var _ = utils.SIGDescribe("[Serial] Volume metrics", func() { ginkgo.AfterEach(func() { if err := framework.DeletePersistentVolume(c, pv.Name); err != nil { - framework.Failf("Error deleting pv: %v", err) + e2elog.Failf("Error deleting pv: %v", err) } if err := framework.DeletePersistentVolumeClaim(c, pvc.Name, pvc.Namespace); err != nil { - framework.Failf("Error deleting pvc: %v", err) + e2elog.Failf("Error deleting pvc: %v", err) } // Clear original metric values. diff --git a/test/e2e/storage/volume_provisioning.go b/test/e2e/storage/volume_provisioning.go index f7396051525..8c37b12f2cc 100644 --- a/test/e2e/storage/volume_provisioning.go +++ b/test/e2e/storage/volume_provisioning.go @@ -70,28 +70,28 @@ func checkZoneFromLabelAndAffinity(pv *v1.PersistentVolume, zone string, matchZo func checkZonesFromLabelAndAffinity(pv *v1.PersistentVolume, zones sets.String, matchZones bool) { ginkgo.By("checking PV's zone label and node affinity terms match expected zone") if pv == nil { - framework.Failf("nil pv passed") + e2elog.Failf("nil pv passed") } pvLabel, ok := pv.Labels[v1.LabelZoneFailureDomain] if !ok { - framework.Failf("label %s not found on PV", v1.LabelZoneFailureDomain) + e2elog.Failf("label %s not found on PV", v1.LabelZoneFailureDomain) } zonesFromLabel, err := volumehelpers.LabelZonesToSet(pvLabel) if err != nil { - framework.Failf("unable to parse zone labels %s: %v", pvLabel, err) + e2elog.Failf("unable to parse zone labels %s: %v", pvLabel, err) } if matchZones && !zonesFromLabel.Equal(zones) { - framework.Failf("value[s] of %s label for PV: %v does not match expected zone[s]: %v", v1.LabelZoneFailureDomain, zonesFromLabel, zones) + e2elog.Failf("value[s] of %s label for PV: %v does not match expected zone[s]: %v", v1.LabelZoneFailureDomain, zonesFromLabel, zones) } if !matchZones && !zonesFromLabel.IsSuperset(zones) { - framework.Failf("value[s] of %s label for PV: %v does not contain expected zone[s]: %v", v1.LabelZoneFailureDomain, zonesFromLabel, zones) + e2elog.Failf("value[s] of %s label for PV: %v does not contain expected zone[s]: %v", v1.LabelZoneFailureDomain, zonesFromLabel, zones) } if pv.Spec.NodeAffinity == nil { - framework.Failf("node affinity not found in PV spec %v", pv.Spec) + e2elog.Failf("node affinity not found in PV spec %v", pv.Spec) } if len(pv.Spec.NodeAffinity.Required.NodeSelectorTerms) == 0 { - framework.Failf("node selector terms not found in PV spec %v", pv.Spec) + e2elog.Failf("node selector terms not found in PV spec %v", pv.Spec) } for _, term := range pv.Spec.NodeAffinity.Required.NodeSelectorTerms { @@ -103,15 +103,15 @@ func checkZonesFromLabelAndAffinity(pv *v1.PersistentVolume, zones sets.String, keyFound = true zonesFromNodeAffinity := sets.NewString(r.Values...) if matchZones && !zonesFromNodeAffinity.Equal(zones) { - framework.Failf("zones from NodeAffinity of PV: %v does not equal expected zone[s]: %v", zonesFromNodeAffinity, zones) + e2elog.Failf("zones from NodeAffinity of PV: %v does not equal expected zone[s]: %v", zonesFromNodeAffinity, zones) } if !matchZones && !zonesFromNodeAffinity.IsSuperset(zones) { - framework.Failf("zones from NodeAffinity of PV: %v does not contain expected zone[s]: %v", zonesFromNodeAffinity, zones) + e2elog.Failf("zones from NodeAffinity of PV: %v does not contain expected zone[s]: %v", zonesFromNodeAffinity, zones) } break } if !keyFound { - framework.Failf("label %s not found in term %v", v1.LabelZoneFailureDomain, term) + e2elog.Failf("label %s not found in term %v", v1.LabelZoneFailureDomain, term) } } } @@ -231,14 +231,14 @@ func testZonalDelayedBinding(c clientset.Interface, ns string, specifyAllowedTop } pvs, node := test.TestBindingWaitForFirstConsumerMultiPVC(claims, nil /* node selector */, false /* expect unschedulable */) if node == nil { - framework.Failf("unexpected nil node found") + e2elog.Failf("unexpected nil node found") } zone, ok := node.Labels[v1.LabelZoneFailureDomain] if !ok { - framework.Failf("label %s not found on Node", v1.LabelZoneFailureDomain) + e2elog.Failf("label %s not found on Node", v1.LabelZoneFailureDomain) } if specifyAllowedTopology && topoZone != zone { - framework.Failf("zone specified in allowedTopologies: %s does not match zone of node where PV got provisioned: %s", topoZone, zone) + e2elog.Failf("zone specified in allowedTopologies: %s does not match zone of node where PV got provisioned: %s", topoZone, zone) } for _, pv := range pvs { checkZoneFromLabelAndAffinity(pv, zone, true) @@ -621,7 +621,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() { for i, pv := range residualPVs { e2elog.Logf("\t%d) %s", i+1, pv.Name) } - framework.Failf("Expected 0 PersistentVolumes remaining. Found %d", len(residualPVs)) + e2elog.Failf("Expected 0 PersistentVolumes remaining. Found %d", len(residualPVs)) } e2elog.Logf("0 PersistentVolumes remain.") }) @@ -778,7 +778,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() { framework.SkipUnlessProviderIs("openstack", "gce", "aws", "gke", "vsphere", "azure") scName, scErr := framework.GetDefaultStorageClassName(c) if scErr != nil { - framework.Failf(scErr.Error()) + e2elog.Failf(scErr.Error()) } test := testsuites.StorageClassTest{ Name: "default", @@ -812,7 +812,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() { framework.SkipUnlessProviderIs("openstack", "gce", "aws", "gke", "vsphere", "azure") scName, scErr := framework.GetDefaultStorageClassName(c) if scErr != nil { - framework.Failf(scErr.Error()) + e2elog.Failf(scErr.Error()) } test := testsuites.StorageClassTest{ Name: "default", @@ -897,7 +897,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() { e2elog.Logf("deleting claim %q/%q", claim.Namespace, claim.Name) err = c.CoreV1().PersistentVolumeClaims(claim.Namespace).Delete(claim.Name, nil) if err != nil && !apierrs.IsNotFound(err) { - framework.Failf("Error deleting claim %q. Error: %v", claim.Name, err) + e2elog.Failf("Error deleting claim %q. Error: %v", claim.Name, err) } }() diff --git a/test/e2e/storage/volumes.go b/test/e2e/storage/volumes.go index d2edf0158f8..09485f7692e 100644 --- a/test/e2e/storage/volumes.go +++ b/test/e2e/storage/volumes.go @@ -24,6 +24,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" clientset "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/test/e2e/framework" + e2elog "k8s.io/kubernetes/test/e2e/framework/log" "k8s.io/kubernetes/test/e2e/framework/volume" "k8s.io/kubernetes/test/e2e/storage/utils" ) @@ -65,7 +66,7 @@ var _ = utils.SIGDescribe("Volumes", func() { }, } if _, err := cs.CoreV1().ConfigMaps(namespace.Name).Create(configMap); err != nil { - framework.Failf("unable to create test configmap: %v", err) + e2elog.Failf("unable to create test configmap: %v", err) } defer func() { _ = cs.CoreV1().ConfigMaps(namespace.Name).Delete(configMap.Name, nil) diff --git a/test/e2e/storage/vsphere/bootstrap.go b/test/e2e/storage/vsphere/bootstrap.go index 1775ec40c47..39eb39f36f8 100644 --- a/test/e2e/storage/vsphere/bootstrap.go +++ b/test/e2e/storage/vsphere/bootstrap.go @@ -19,6 +19,7 @@ package vsphere import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/kubernetes/test/e2e/framework" + e2elog "k8s.io/kubernetes/test/e2e/framework/log" "sync" ) @@ -42,23 +43,23 @@ func bootstrapOnce() { // 1. Read vSphere conf and get VSphere instances vsphereInstances, err := GetVSphereInstances() if err != nil { - framework.Failf("Failed to bootstrap vSphere with error: %v", err) + e2elog.Failf("Failed to bootstrap vSphere with error: %v", err) } // 2. Get all nodes nodeList, err := f.ClientSet.CoreV1().Nodes().List(metav1.ListOptions{}) if err != nil { - framework.Failf("Failed to get nodes: %v", err) + e2elog.Failf("Failed to get nodes: %v", err) } TestContext = VSphereContext{NodeMapper: &NodeMapper{}, VSphereInstances: vsphereInstances} // 3. Get Node to VSphere mapping err = TestContext.NodeMapper.GenerateNodeMap(vsphereInstances, *nodeList) if err != nil { - framework.Failf("Failed to bootstrap vSphere with error: %v", err) + e2elog.Failf("Failed to bootstrap vSphere with error: %v", err) } // 4. Generate Zone to Datastore mapping err = TestContext.NodeMapper.GenerateZoneToDatastoreMap() if err != nil { - framework.Failf("Failed to generate zone to datastore mapping with error: %v", err) + e2elog.Failf("Failed to generate zone to datastore mapping with error: %v", err) } close(waiting) } diff --git a/test/e2e/storage/vsphere/vsphere_volume_datastore.go b/test/e2e/storage/vsphere/vsphere_volume_datastore.go index e40776767d8..812b82b084b 100644 --- a/test/e2e/storage/vsphere/vsphere_volume_datastore.go +++ b/test/e2e/storage/vsphere/vsphere_volume_datastore.go @@ -26,6 +26,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" clientset "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/test/e2e/framework" + e2elog "k8s.io/kubernetes/test/e2e/framework/log" "k8s.io/kubernetes/test/e2e/storage/utils" ) @@ -59,7 +60,7 @@ var _ = utils.SIGDescribe("Volume Provisioning on Datastore [Feature:vsphere]", scParameters = make(map[string]string) nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet) if !(len(nodeList.Items) > 0) { - framework.Failf("Unable to find ready and schedulable Node") + e2elog.Failf("Unable to find ready and schedulable Node") } }) diff --git a/test/e2e/storage/vsphere/vsphere_volume_vsan_policy.go b/test/e2e/storage/vsphere/vsphere_volume_vsan_policy.go index 75b6c178aae..99ea427fe97 100644 --- a/test/e2e/storage/vsphere/vsphere_volume_vsan_policy.go +++ b/test/e2e/storage/vsphere/vsphere_volume_vsan_policy.go @@ -108,7 +108,7 @@ var _ = utils.SIGDescribe("Storage Policy Based Volume Provisioning [Feature:vsp scParameters = make(map[string]string) nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet) if !(len(nodeList.Items) > 0) { - framework.Failf("Unable to find ready and schedulable Node") + e2elog.Failf("Unable to find ready and schedulable Node") } masternodes, _ := framework.GetMasterAndWorkerNodesOrDie(client) gomega.Expect(masternodes).NotTo(gomega.BeEmpty()) diff --git a/test/e2e/storage/vsphere/vsphere_zone_support.go b/test/e2e/storage/vsphere/vsphere_zone_support.go index 693c863d3c4..7d7551832e2 100644 --- a/test/e2e/storage/vsphere/vsphere_zone_support.go +++ b/test/e2e/storage/vsphere/vsphere_zone_support.go @@ -111,7 +111,7 @@ var _ = utils.SIGDescribe("Zone Support", func() { zones = make([]string, 0) nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet) if !(len(nodeList.Items) > 0) { - framework.Failf("Unable to find ready and schedulable Node") + e2elog.Failf("Unable to find ready and schedulable Node") } }) diff --git a/test/e2e/ui/dashboard.go b/test/e2e/ui/dashboard.go index 58042e05ba3..2bafcc9ef26 100644 --- a/test/e2e/ui/dashboard.go +++ b/test/e2e/ui/dashboard.go @@ -80,7 +80,7 @@ var _ = SIGDescribe("Kubernetes Dashboard", func() { Error() if err != nil { if ctx.Err() != nil { - framework.Failf("Request to kubernetes-dashboard failed: %v", err) + e2elog.Failf("Request to kubernetes-dashboard failed: %v", err) return true, err } e2elog.Logf("Request to kubernetes-dashboard failed: %v", err) diff --git a/test/e2e/upgrades/apps/daemonsets.go b/test/e2e/upgrades/apps/daemonsets.go index 13cc30e7f9d..d216c1b8acc 100644 --- a/test/e2e/upgrades/apps/daemonsets.go +++ b/test/e2e/upgrades/apps/daemonsets.go @@ -80,7 +80,7 @@ func (t *DaemonSetUpgradeTest) Setup(f *framework.Framework) { ginkgo.By("Creating a DaemonSet") var err error if t.daemonSet, err = f.ClientSet.AppsV1().DaemonSets(ns.Name).Create(t.daemonSet); err != nil { - framework.Failf("unable to create test DaemonSet %s: %v", t.daemonSet.Name, err) + e2elog.Failf("unable to create test DaemonSet %s: %v", t.daemonSet.Name, err) } ginkgo.By("Waiting for DaemonSet pods to become ready") @@ -113,7 +113,7 @@ func (t *DaemonSetUpgradeTest) validateRunningDaemonSet(f *framework.Framework) res, err := checkRunningOnAllNodes(f, t.daemonSet.Namespace, t.daemonSet.Labels) framework.ExpectNoError(err) if !res { - framework.Failf("expected DaemonSet pod to be running on all nodes, it was not") + e2elog.Failf("expected DaemonSet pod to be running on all nodes, it was not") } // DaemonSet resource itself should be good @@ -121,7 +121,7 @@ func (t *DaemonSetUpgradeTest) validateRunningDaemonSet(f *framework.Framework) res, err = checkDaemonStatus(f, t.daemonSet.Namespace, t.daemonSet.Name) framework.ExpectNoError(err) if !res { - framework.Failf("expected DaemonSet to be in a good state, it was not") + e2elog.Failf("expected DaemonSet to be in a good state, it was not") } } diff --git a/test/e2e/upgrades/configmaps.go b/test/e2e/upgrades/configmaps.go index 7cda4b76fbc..1ee3fb58fa0 100644 --- a/test/e2e/upgrades/configmaps.go +++ b/test/e2e/upgrades/configmaps.go @@ -22,6 +22,7 @@ import ( "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/kubernetes/test/e2e/framework" + e2elog "k8s.io/kubernetes/test/e2e/framework/log" imageutils "k8s.io/kubernetes/test/utils/image" "github.com/onsi/ginkgo" @@ -58,7 +59,7 @@ func (t *ConfigMapUpgradeTest) Setup(f *framework.Framework) { ginkgo.By("Creating a ConfigMap") var err error if t.configMap, err = f.ClientSet.CoreV1().ConfigMaps(ns.Name).Create(t.configMap); err != nil { - framework.Failf("unable to create test ConfigMap %s: %v", t.configMap.Name, err) + e2elog.Failf("unable to create test ConfigMap %s: %v", t.configMap.Name, err) } ginkgo.By("Making sure the ConfigMap is consumable") diff --git a/test/e2e/upgrades/mysql.go b/test/e2e/upgrades/mysql.go index e537a800004..b68d9567448 100644 --- a/test/e2e/upgrades/mysql.go +++ b/test/e2e/upgrades/mysql.go @@ -159,10 +159,10 @@ func (t *MySQLUpgradeTest) Test(f *framework.Framework, done <-chan struct{}, up readRatio := float64(readSuccess) / float64(readSuccess+readFailure) writeRatio := float64(writeSuccess) / float64(writeSuccess+writeFailure) if readRatio < 0.75 { - framework.Failf("Too many failures reading data. Success ratio: %f", readRatio) + e2elog.Failf("Too many failures reading data. Success ratio: %f", readRatio) } if writeRatio < 0.75 { - framework.Failf("Too many failures writing data. Success ratio: %f", writeRatio) + e2elog.Failf("Too many failures writing data. Success ratio: %f", writeRatio) } } diff --git a/test/e2e/upgrades/secrets.go b/test/e2e/upgrades/secrets.go index bd66763f170..7d5b2f354a8 100644 --- a/test/e2e/upgrades/secrets.go +++ b/test/e2e/upgrades/secrets.go @@ -23,6 +23,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/uuid" "k8s.io/kubernetes/test/e2e/framework" + e2elog "k8s.io/kubernetes/test/e2e/framework/log" imageutils "k8s.io/kubernetes/test/utils/image" "github.com/onsi/ginkgo" @@ -56,7 +57,7 @@ func (t *SecretUpgradeTest) Setup(f *framework.Framework) { ginkgo.By("Creating a secret") var err error if t.secret, err = f.ClientSet.CoreV1().Secrets(ns.Name).Create(t.secret); err != nil { - framework.Failf("unable to create test secret %s: %v", t.secret.Name, err) + e2elog.Failf("unable to create test secret %s: %v", t.secret.Name, err) } ginkgo.By("Making sure the secret is consumable") diff --git a/test/e2e/upgrades/storage/BUILD b/test/e2e/upgrades/storage/BUILD index 8044b3d8846..346595d9824 100644 --- a/test/e2e/upgrades/storage/BUILD +++ b/test/e2e/upgrades/storage/BUILD @@ -18,6 +18,7 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/util/errors:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/version:go_default_library", "//test/e2e/framework:go_default_library", + "//test/e2e/framework/log:go_default_library", "//test/e2e/framework/volume:go_default_library", "//test/e2e/storage/utils:go_default_library", "//test/e2e/upgrades:go_default_library", diff --git a/test/e2e/upgrades/storage/persistent_volumes.go b/test/e2e/upgrades/storage/persistent_volumes.go index 9d12139680c..177ef577f51 100644 --- a/test/e2e/upgrades/storage/persistent_volumes.go +++ b/test/e2e/upgrades/storage/persistent_volumes.go @@ -20,6 +20,7 @@ import ( "k8s.io/api/core/v1" utilerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/kubernetes/test/e2e/framework" + e2elog "k8s.io/kubernetes/test/e2e/framework/log" "k8s.io/kubernetes/test/e2e/framework/volume" "github.com/onsi/ginkgo" @@ -90,7 +91,7 @@ func (t *PersistentVolumeUpgradeTest) Teardown(f *framework.Framework) { errs = append(errs, err) } if len(errs) > 0 { - framework.Failf("Failed to delete 1 or more PVs/PVCs and/or the GCE volume. Errors: %v", utilerrors.NewAggregate(errs)) + e2elog.Failf("Failed to delete 1 or more PVs/PVCs and/or the GCE volume. Errors: %v", utilerrors.NewAggregate(errs)) } } diff --git a/test/e2e/windows/density.go b/test/e2e/windows/density.go index 466d448007b..77baa3d995b 100644 --- a/test/e2e/windows/density.go +++ b/test/e2e/windows/density.go @@ -30,6 +30,7 @@ import ( "k8s.io/apimachinery/pkg/watch" "k8s.io/client-go/tools/cache" "k8s.io/kubernetes/test/e2e/framework" + e2elog "k8s.io/kubernetes/test/e2e/framework/log" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" imageutils "k8s.io/kubernetes/test/utils/image" @@ -118,7 +119,7 @@ func runDensityBatchTest(f *framework.Framework, testArg densityTest) (time.Dura }, 10*time.Minute, 10*time.Second).Should(gomega.BeTrue()) if len(watchTimes) < testArg.podsNr { - framework.Failf("Timeout reached waiting for all Pods to be observed by the watch.") + e2elog.Failf("Timeout reached waiting for all Pods to be observed by the watch.") } // Analyze results diff --git a/test/e2e/windows/gmsa.go b/test/e2e/windows/gmsa.go index 21aef8bd5e4..d2b4df56f23 100644 --- a/test/e2e/windows/gmsa.go +++ b/test/e2e/windows/gmsa.go @@ -24,6 +24,7 @@ import ( corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/kubernetes/test/e2e/framework" + e2elog "k8s.io/kubernetes/test/e2e/framework/log" imageutils "k8s.io/kubernetes/test/utils/image" "github.com/onsi/ginkgo" @@ -98,12 +99,12 @@ var _ = SIGDescribe("[Feature:Windows] [Feature:WindowsGMSA] GMSA [Slow]", func( }, 1*time.Minute, 1*time.Second).Should(gomega.BeTrue()) if !strings.HasPrefix(output, domain) { - framework.Failf("Expected %q to start with %q", output, domain) + e2elog.Failf("Expected %q to start with %q", output, domain) } expectedSubstr := "The command completed successfully" if !strings.Contains(output, expectedSubstr) { - framework.Failf("Expected %q to contain %q", output, expectedSubstr) + e2elog.Failf("Expected %q to contain %q", output, expectedSubstr) } } diff --git a/test/e2e_kubeadm/BUILD b/test/e2e_kubeadm/BUILD index 7326af2d14b..0cde306e78a 100644 --- a/test/e2e_kubeadm/BUILD +++ b/test/e2e_kubeadm/BUILD @@ -33,6 +33,7 @@ go_test( "//staging/src/k8s.io/client-go/kubernetes:go_default_library", "//staging/src/k8s.io/cluster-bootstrap/token/api:go_default_library", "//test/e2e/framework:go_default_library", + "//test/e2e/framework/log:go_default_library", "//vendor/github.com/onsi/ginkgo:go_default_library", "//vendor/github.com/onsi/ginkgo/config:go_default_library", "//vendor/github.com/onsi/ginkgo/reporters:go_default_library", @@ -78,6 +79,7 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/client-go/kubernetes:go_default_library", "//test/e2e/framework:go_default_library", + "//test/e2e/framework/log:go_default_library", "//vendor/github.com/onsi/ginkgo:go_default_library", "//vendor/github.com/onsi/gomega:go_default_library", "//vendor/github.com/onsi/gomega/gstruct:go_default_library", diff --git a/test/e2e_kubeadm/kubeadm_config_test.go b/test/e2e_kubeadm/kubeadm_config_test.go index b1b40711776..8bcacada691 100644 --- a/test/e2e_kubeadm/kubeadm_config_test.go +++ b/test/e2e_kubeadm/kubeadm_config_test.go @@ -22,6 +22,7 @@ import ( rbacv1 "k8s.io/api/rbac/v1" clientset "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/test/e2e/framework" + e2elog "k8s.io/kubernetes/test/e2e/framework/log" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" @@ -71,11 +72,11 @@ var _ = KubeadmDescribe("kubeadm-config ConfigMap", func() { // checks that all the control-plane nodes are in the apiEndpoints list for _, cp := range controlPlanes.Items { if _, ok := d[cp.Name]; !ok { - framework.Failf("failed to get apiEndpoints for control-plane %s in %s", cp.Name, kubeadmConfigClusterStatusConfigMapKey) + e2elog.Failf("failed to get apiEndpoints for control-plane %s in %s", cp.Name, kubeadmConfigClusterStatusConfigMapKey) } } } else { - framework.Failf("failed to get apiEndpoints from %s", kubeadmConfigClusterStatusConfigMapKey) + e2elog.Failf("failed to get apiEndpoints from %s", kubeadmConfigClusterStatusConfigMapKey) } }) @@ -111,7 +112,7 @@ func unmarshalYaml(data string) map[interface{}]interface{} { m := make(map[interface{}]interface{}) err := yaml.Unmarshal([]byte(data), &m) if err != nil { - framework.Failf("error parsing %s ConfigMap: %v", kubeadmConfigName, err) + e2elog.Failf("error parsing %s ConfigMap: %v", kubeadmConfigName, err) } return m } diff --git a/test/e2e_kubeadm/kubelet_config_test.go b/test/e2e_kubeadm/kubelet_config_test.go index 7b507bc0776..d246cc06741 100644 --- a/test/e2e_kubeadm/kubelet_config_test.go +++ b/test/e2e_kubeadm/kubelet_config_test.go @@ -23,6 +23,7 @@ import ( rbacv1 "k8s.io/api/rbac/v1" "k8s.io/apimachinery/pkg/util/version" "k8s.io/kubernetes/test/e2e/framework" + e2elog "k8s.io/kubernetes/test/e2e/framework/log" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" @@ -73,7 +74,7 @@ var _ = KubeadmDescribe("kubelet-config ConfigMap", func() { k8sVersionString := m["kubernetesVersion"].(string) k8sVersion, err := version.ParseSemantic(k8sVersionString) if err != nil { - framework.Failf("error reading kubernetesVersion from %s ConfigMap: %v", kubeadmConfigName, err) + e2elog.Failf("error reading kubernetesVersion from %s ConfigMap: %v", kubeadmConfigName, err) } // Computes all the names derived from the kubernetesVersion diff --git a/test/e2e_kubeadm/util.go b/test/e2e_kubeadm/util.go index 19ce05cb322..e991f99cb5d 100644 --- a/test/e2e_kubeadm/util.go +++ b/test/e2e_kubeadm/util.go @@ -24,6 +24,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" clientset "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/test/e2e/framework" + e2elog "k8s.io/kubernetes/test/e2e/framework/log" "github.com/onsi/gomega" "github.com/onsi/gomega/gstruct" @@ -167,7 +168,7 @@ func ExpectSubjectHasAccessToResource(c clientset.Interface, subjectKind, subjec }, } default: - framework.Failf("invalid subjectKind %s", subjectKind) + e2elog.Failf("invalid subjectKind %s", subjectKind) } s, err := c.AuthorizationV1().SubjectAccessReviews().Create(sar) diff --git a/test/e2e_node/apparmor_test.go b/test/e2e_node/apparmor_test.go index 0e82787240a..bb0c01edc6f 100644 --- a/test/e2e_node/apparmor_test.go +++ b/test/e2e_node/apparmor_test.go @@ -35,6 +35,7 @@ import ( watchtools "k8s.io/client-go/tools/watch" "k8s.io/kubernetes/pkg/security/apparmor" "k8s.io/kubernetes/test/e2e/framework" + e2elog "k8s.io/kubernetes/test/e2e/framework/log" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" "github.com/davecgh/go-spew/spew" @@ -59,7 +60,7 @@ var _ = framework.KubeDescribe("AppArmor [Feature:AppArmor][NodeFeature:AppArmor It("should enforce a profile blocking writes", func() { status := runAppArmorTest(f, true, apparmor.ProfileNamePrefix+apparmorProfilePrefix+"deny-write") if len(status.ContainerStatuses) == 0 { - framework.Failf("Unexpected pod status: %s", spew.Sdump(status)) + e2elog.Failf("Unexpected pod status: %s", spew.Sdump(status)) return } state := status.ContainerStatuses[0].State.Terminated @@ -70,7 +71,7 @@ var _ = framework.KubeDescribe("AppArmor [Feature:AppArmor][NodeFeature:AppArmor It("should enforce a permissive profile", func() { status := runAppArmorTest(f, true, apparmor.ProfileNamePrefix+apparmorProfilePrefix+"audit-write") if len(status.ContainerStatuses) == 0 { - framework.Failf("Unexpected pod status: %s", spew.Sdump(status)) + e2elog.Failf("Unexpected pod status: %s", spew.Sdump(status)) return } state := status.ContainerStatuses[0].State.Terminated diff --git a/test/e2e_node/benchmark_util.go b/test/e2e_node/benchmark_util.go index 93e0b3b5973..53edc5f6363 100644 --- a/test/e2e_node/benchmark_util.go +++ b/test/e2e_node/benchmark_util.go @@ -160,22 +160,22 @@ func getTestNodeInfo(f *framework.Framework, testName, testDesc string) map[stri cpu, ok := node.Status.Capacity[v1.ResourceCPU] if !ok { - framework.Failf("Fail to fetch CPU capacity value of test node.") + e2elog.Failf("Fail to fetch CPU capacity value of test node.") } memory, ok := node.Status.Capacity[v1.ResourceMemory] if !ok { - framework.Failf("Fail to fetch Memory capacity value of test node.") + e2elog.Failf("Fail to fetch Memory capacity value of test node.") } cpuValue, ok := cpu.AsInt64() if !ok { - framework.Failf("Fail to fetch CPU capacity value as Int64.") + e2elog.Failf("Fail to fetch CPU capacity value as Int64.") } memoryValue, ok := memory.AsInt64() if !ok { - framework.Failf("Fail to fetch Memory capacity value as Int64.") + e2elog.Failf("Fail to fetch Memory capacity value as Int64.") } image := node.Status.NodeInfo.OSImage diff --git a/test/e2e_node/density_test.go b/test/e2e_node/density_test.go index c386ba7be55..4ffd43f8ca4 100644 --- a/test/e2e_node/density_test.go +++ b/test/e2e_node/density_test.go @@ -359,7 +359,7 @@ func runDensityBatchTest(f *framework.Framework, rc *ResourceCollector, testArg }, 10*time.Minute, 10*time.Second).Should(BeTrue()) if len(watchTimes) < testArg.podsNr { - framework.Failf("Timeout reached waiting for all Pods to be observed by the watch.") + e2elog.Failf("Timeout reached waiting for all Pods to be observed by the watch.") } // Analyze results diff --git a/test/e2e_node/device_plugin.go b/test/e2e_node/device_plugin.go index 185e1ff9dbb..10370fd35d0 100644 --- a/test/e2e_node/device_plugin.go +++ b/test/e2e_node/device_plugin.go @@ -250,7 +250,7 @@ func ensurePodContainerRestart(f *framework.Framework, podName string, contName var currentCount int32 p, err := f.PodClient().Get(podName, metav1.GetOptions{}) if err != nil || len(p.Status.ContainerStatuses) < 1 { - framework.Failf("ensurePodContainerRestart failed for pod %q: %v", podName, err) + e2elog.Failf("ensurePodContainerRestart failed for pod %q: %v", podName, err) } initialCount = p.Status.ContainerStatuses[0].RestartCount Eventually(func() bool { @@ -268,7 +268,7 @@ func ensurePodContainerRestart(f *framework.Framework, podName string, contName func parseLog(f *framework.Framework, podName string, contName string, re string) string { logs, err := e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, podName, contName) if err != nil { - framework.Failf("GetPodLogs for pod %q failed: %v", podName, err) + e2elog.Failf("GetPodLogs for pod %q failed: %v", podName, err) } e2elog.Logf("got pod logs: %v", logs) diff --git a/test/e2e_node/dockershim_checkpoint_test.go b/test/e2e_node/dockershim_checkpoint_test.go index 435ea6ae12a..60e1ca23690 100644 --- a/test/e2e_node/dockershim_checkpoint_test.go +++ b/test/e2e_node/dockershim_checkpoint_test.go @@ -56,7 +56,7 @@ var _ = SIGDescribe("Dockershim [Serial] [Disruptive] [Feature:Docker][Legacy:Do runPodCheckpointTest(f, podName, func() { checkpoints := findCheckpoints(podName) if len(checkpoints) == 0 { - framework.Failf("No checkpoint for the pod was found") + e2elog.Failf("No checkpoint for the pod was found") } }) }) @@ -85,7 +85,7 @@ var _ = SIGDescribe("Dockershim [Serial] [Disruptive] [Feature:Docker][Legacy:Do runPodCheckpointTest(f, podName, func() { checkpoints := findCheckpoints(podName) if len(checkpoints) == 0 { - framework.Failf("No checkpoint for the pod was found") + e2elog.Failf("No checkpoint for the pod was found") } By("Removing checkpoint of test pod") for _, filename := range checkpoints { @@ -134,7 +134,7 @@ var _ = SIGDescribe("Dockershim [Serial] [Disruptive] [Feature:Docker][Legacy:Do By("Corrupt checkpoint file") checkpoints := findCheckpoints(podName) if len(checkpoints) == 0 { - framework.Failf("No checkpoint for the pod was found") + e2elog.Failf("No checkpoint for the pod was found") } for _, file := range checkpoints { f, err := os.OpenFile(file, os.O_WRONLY|os.O_APPEND, 0644) @@ -179,7 +179,7 @@ func runPodCheckpointTest(f *framework.Framework, podName string, twist func()) e2elog.Logf("Checkpoint of %q still exists: %v", podName, checkpoints) return false, nil }); err != nil { - framework.Failf("Failed to observe checkpoint being removed within timeout: %v", err) + e2elog.Failf("Failed to observe checkpoint being removed within timeout: %v", err) } } diff --git a/test/e2e_node/image_id_test.go b/test/e2e_node/image_id_test.go index f1bd0c2aaa0..3e9b5821804 100644 --- a/test/e2e_node/image_id_test.go +++ b/test/e2e_node/image_id_test.go @@ -20,6 +20,7 @@ import ( "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/kubernetes/test/e2e/framework" + e2elog "k8s.io/kubernetes/test/e2e/framework/log" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" "github.com/davecgh/go-spew/spew" @@ -58,7 +59,7 @@ var _ = framework.KubeDescribe("ImageID [NodeFeature: ImageID]", func() { status := runningPod.Status if len(status.ContainerStatuses) == 0 { - framework.Failf("Unexpected pod status; %s", spew.Sdump(status)) + e2elog.Failf("Unexpected pod status; %s", spew.Sdump(status)) return } diff --git a/test/e2e_node/resource_collector.go b/test/e2e_node/resource_collector.go index f404d1d6e63..7a9928c3462 100644 --- a/test/e2e_node/resource_collector.go +++ b/test/e2e_node/resource_collector.go @@ -96,7 +96,7 @@ func (r *ResourceCollector) Start() { stats.SystemContainerRuntime: runtimeContainer, } } else { - framework.Failf("Failed to get runtime container name in test-e2e-node resource collector.") + e2elog.Failf("Failed to get runtime container name in test-e2e-node resource collector.") } wait.Poll(1*time.Second, 1*time.Minute, func() (bool, error) { diff --git a/test/e2e_node/resource_usage_test.go b/test/e2e_node/resource_usage_test.go index 63eb678217b..cd93858509c 100644 --- a/test/e2e_node/resource_usage_test.go +++ b/test/e2e_node/resource_usage_test.go @@ -246,7 +246,7 @@ func verifyMemoryLimits(c clientset.Interface, expected framework.ResourceUsageP } } if len(errList) > 0 { - framework.Failf("Memory usage exceeding limits:\n %s", strings.Join(errList, "\n")) + e2elog.Failf("Memory usage exceeding limits:\n %s", strings.Join(errList, "\n")) } } @@ -280,7 +280,7 @@ func verifyCPULimits(expected framework.ContainersCPUSummary, actual framework.N } } if len(errList) > 0 { - framework.Failf("CPU usage exceeding limits:\n %s", strings.Join(errList, "\n")) + e2elog.Failf("CPU usage exceeding limits:\n %s", strings.Join(errList, "\n")) } } diff --git a/test/e2e_node/restart_test.go b/test/e2e_node/restart_test.go index 64e1e2b9fcf..266756ee289 100644 --- a/test/e2e_node/restart_test.go +++ b/test/e2e_node/restart_test.go @@ -91,7 +91,7 @@ var _ = framework.KubeDescribe("Restart [Serial] [Slow] [Disruptive] [NodeFeatur // startTimeout fit on the node and the node is now saturated. runningPods := waitForPods(f, podCount, startTimeout) if len(runningPods) < minPods { - framework.Failf("Failed to start %d pods, cannot test that restarting container runtime doesn't leak IPs", minPods) + e2elog.Failf("Failed to start %d pods, cannot test that restarting container runtime doesn't leak IPs", minPods) } for i := 0; i < restartCount; i += 1 { @@ -114,7 +114,7 @@ var _ = framework.KubeDescribe("Restart [Serial] [Slow] [Disruptive] [NodeFeatur return nil }, 1*time.Minute, 2*time.Second).Should(BeNil()) if stdout, err := exec.Command("sudo", "kill", fmt.Sprintf("%d", pid)).CombinedOutput(); err != nil { - framework.Failf("Failed to kill container runtime (pid=%d): %v, stdout: %q", pid, err, string(stdout)) + e2elog.Failf("Failed to kill container runtime (pid=%d): %v, stdout: %q", pid, err, string(stdout)) } // Assume that container runtime will be restarted by systemd/supervisord etc. time.Sleep(20 * time.Second) @@ -123,12 +123,12 @@ var _ = framework.KubeDescribe("Restart [Serial] [Slow] [Disruptive] [NodeFeatur By("Checking currently Running/Ready pods") postRestartRunningPods := waitForPods(f, len(runningPods), recoverTimeout) if len(postRestartRunningPods) == 0 { - framework.Failf("Failed to start *any* pods after container runtime restart, this might indicate an IP leak") + e2elog.Failf("Failed to start *any* pods after container runtime restart, this might indicate an IP leak") } By("Confirm no containers have terminated") for _, pod := range postRestartRunningPods { if c := testutils.TerminatedContainers(pod); len(c) != 0 { - framework.Failf("Pod %q has failed containers %+v after container runtime restart, this might indicate an IP leak", pod.Name, c) + e2elog.Failf("Pod %q has failed containers %+v after container runtime restart, this might indicate an IP leak", pod.Name, c) } } By(fmt.Sprintf("Container runtime restart test passed with %d pods", len(postRestartRunningPods))) diff --git a/test/e2e_node/runtime_conformance_test.go b/test/e2e_node/runtime_conformance_test.go index 4d3381266f3..c438aa6b50d 100644 --- a/test/e2e_node/runtime_conformance_test.go +++ b/test/e2e_node/runtime_conformance_test.go @@ -145,7 +145,7 @@ var _ = framework.KubeDescribe("Container Runtime Conformance Test", func() { if i < flakeRetry { e2elog.Logf("No.%d attempt failed: %v, retrying...", i, err) } else { - framework.Failf("All %d attempts failed: %v", flakeRetry, err) + e2elog.Failf("All %d attempts failed: %v", flakeRetry, err) } } }) diff --git a/test/e2e_node/security_context_test.go b/test/e2e_node/security_context_test.go index 5fdf648e04f..fb9c50bd8ec 100644 --- a/test/e2e_node/security_context_test.go +++ b/test/e2e_node/security_context_test.go @@ -69,7 +69,7 @@ var _ = framework.KubeDescribe("Security Context", func() { pid1 := f.ExecCommandInContainer("isolated-pid-ns-test-pod", "test-container-1", "/bin/pidof", "top") pid2 := f.ExecCommandInContainer("isolated-pid-ns-test-pod", "test-container-2", "/bin/pidof", "sleep") if pid1 != "1" || pid2 != "1" { - framework.Failf("PIDs of different containers are not all 1: test-container-1=%v, test-container-2=%v", pid1, pid2) + e2elog.Failf("PIDs of different containers are not all 1: test-container-1=%v, test-container-2=%v", pid1, pid2) } }) @@ -110,7 +110,7 @@ var _ = framework.KubeDescribe("Security Context", func() { pid1 := f.ExecCommandInContainer("shared-pid-ns-test-pod", "test-container-1", "/bin/pidof", "top") pid2 := f.ExecCommandInContainer("shared-pid-ns-test-pod", "test-container-2", "/bin/pidof", "top") if pid1 != pid2 { - framework.Failf("PIDs are not the same in different containers: test-container-1=%v, test-container-2=%v", pid1, pid2) + e2elog.Failf("PIDs are not the same in different containers: test-container-1=%v, test-container-2=%v", pid1, pid2) } }) }) @@ -163,18 +163,18 @@ var _ = framework.KubeDescribe("Security Context", func() { createAndWaitHostPidPod(busyboxPodName, true) logs, err := e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, busyboxPodName, busyboxPodName) if err != nil { - framework.Failf("GetPodLogs for pod %q failed: %v", busyboxPodName, err) + e2elog.Failf("GetPodLogs for pod %q failed: %v", busyboxPodName, err) } pids := strings.TrimSpace(logs) e2elog.Logf("Got nginx's pid %q from pod %q", pids, busyboxPodName) if pids == "" { - framework.Failf("nginx's pid should be seen by hostpid containers") + e2elog.Failf("nginx's pid should be seen by hostpid containers") } pidSets := sets.NewString(strings.Split(pids, " ")...) if !pidSets.Has(nginxPid) { - framework.Failf("nginx's pid should be seen by hostpid containers") + e2elog.Failf("nginx's pid should be seen by hostpid containers") } }) @@ -183,14 +183,14 @@ var _ = framework.KubeDescribe("Security Context", func() { createAndWaitHostPidPod(busyboxPodName, false) logs, err := e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, busyboxPodName, busyboxPodName) if err != nil { - framework.Failf("GetPodLogs for pod %q failed: %v", busyboxPodName, err) + e2elog.Failf("GetPodLogs for pod %q failed: %v", busyboxPodName, err) } pids := strings.TrimSpace(logs) e2elog.Logf("Got nginx's pid %q from pod %q", pids, busyboxPodName) pidSets := sets.NewString(strings.Split(pids, " ")...) if pidSets.Has(nginxPid) { - framework.Failf("nginx's pid should not be seen by non-hostpid containers") + e2elog.Failf("nginx's pid should not be seen by non-hostpid containers") } }) }) @@ -228,7 +228,7 @@ var _ = framework.KubeDescribe("Security Context", func() { BeforeEach(func() { output, err := exec.Command("sh", "-c", "ipcmk -M 1048576 | awk '{print $NF}'").Output() if err != nil { - framework.Failf("Failed to create the shared memory on the host: %v", err) + e2elog.Failf("Failed to create the shared memory on the host: %v", err) } hostSharedMemoryID = strings.TrimSpace(string(output)) e2elog.Logf("Got host shared memory ID %q", hostSharedMemoryID) @@ -239,13 +239,13 @@ var _ = framework.KubeDescribe("Security Context", func() { createAndWaitHostIPCPod(ipcutilsPodName, true) logs, err := e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, ipcutilsPodName, ipcutilsPodName) if err != nil { - framework.Failf("GetPodLogs for pod %q failed: %v", ipcutilsPodName, err) + e2elog.Failf("GetPodLogs for pod %q failed: %v", ipcutilsPodName, err) } podSharedMemoryIDs := strings.TrimSpace(logs) e2elog.Logf("Got shared memory IDs %q from pod %q", podSharedMemoryIDs, ipcutilsPodName) if !strings.Contains(podSharedMemoryIDs, hostSharedMemoryID) { - framework.Failf("hostIPC container should show shared memory IDs on host") + e2elog.Failf("hostIPC container should show shared memory IDs on host") } }) @@ -254,13 +254,13 @@ var _ = framework.KubeDescribe("Security Context", func() { createAndWaitHostIPCPod(ipcutilsPodName, false) logs, err := e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, ipcutilsPodName, ipcutilsPodName) if err != nil { - framework.Failf("GetPodLogs for pod %q failed: %v", ipcutilsPodName, err) + e2elog.Failf("GetPodLogs for pod %q failed: %v", ipcutilsPodName, err) } podSharedMemoryIDs := strings.TrimSpace(logs) e2elog.Logf("Got shared memory IDs %q from pod %q", podSharedMemoryIDs, ipcutilsPodName) if strings.Contains(podSharedMemoryIDs, hostSharedMemoryID) { - framework.Failf("non-hostIPC container should not show shared memory IDs on host") + e2elog.Failf("non-hostIPC container should not show shared memory IDs on host") } }) @@ -268,7 +268,7 @@ var _ = framework.KubeDescribe("Security Context", func() { if hostSharedMemoryID != "" { _, err := exec.Command("sh", "-c", fmt.Sprintf("ipcrm -m %q", hostSharedMemoryID)).Output() if err != nil { - framework.Failf("Failed to remove shared memory %q on the host: %v", hostSharedMemoryID, err) + e2elog.Failf("Failed to remove shared memory %q on the host: %v", hostSharedMemoryID, err) } } }) @@ -310,7 +310,7 @@ var _ = framework.KubeDescribe("Security Context", func() { BeforeEach(func() { l, err = net.Listen("tcp", ":0") if err != nil { - framework.Failf("Failed to open a new tcp port: %v", err) + e2elog.Failf("Failed to open a new tcp port: %v", err) } addr := strings.Split(l.Addr().String(), ":") listeningPort = addr[len(addr)-1] @@ -322,12 +322,12 @@ var _ = framework.KubeDescribe("Security Context", func() { createAndWaitHostNetworkPod(busyboxPodName, true) logs, err := e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, busyboxPodName, busyboxPodName) if err != nil { - framework.Failf("GetPodLogs for pod %q failed: %v", busyboxPodName, err) + e2elog.Failf("GetPodLogs for pod %q failed: %v", busyboxPodName, err) } e2elog.Logf("Got logs for pod %q: %q", busyboxPodName, logs) if !strings.Contains(logs, listeningPort) { - framework.Failf("host-networked container should listening on same port as host") + e2elog.Failf("host-networked container should listening on same port as host") } }) @@ -336,12 +336,12 @@ var _ = framework.KubeDescribe("Security Context", func() { createAndWaitHostNetworkPod(busyboxPodName, false) logs, err := e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, busyboxPodName, busyboxPodName) if err != nil { - framework.Failf("GetPodLogs for pod %q failed: %v", busyboxPodName, err) + e2elog.Failf("GetPodLogs for pod %q failed: %v", busyboxPodName, err) } e2elog.Logf("Got logs for pod %q: %q", busyboxPodName, logs) if strings.Contains(logs, listeningPort) { - framework.Failf("non-hostnetworked container shouldn't show the same port as host") + e2elog.Failf("non-hostnetworked container shouldn't show the same port as host") } }) @@ -388,12 +388,12 @@ var _ = framework.KubeDescribe("Security Context", func() { podName := createAndWaitUserPod(true) logs, err := e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, podName, podName) if err != nil { - framework.Failf("GetPodLogs for pod %q failed: %v", podName, err) + e2elog.Failf("GetPodLogs for pod %q failed: %v", podName, err) } e2elog.Logf("Got logs for pod %q: %q", podName, logs) if strings.Contains(logs, "Operation not permitted") { - framework.Failf("privileged container should be able to create dummy device") + e2elog.Failf("privileged container should be able to create dummy device") } }) }) diff --git a/test/e2e_node/summary_test.go b/test/e2e_node/summary_test.go index e50d91d2bde..7e31fd2c747 100644 --- a/test/e2e_node/summary_test.go +++ b/test/e2e_node/summary_test.go @@ -391,7 +391,7 @@ func summaryObjectID(element interface{}) string { case stats.UserDefinedMetric: return el.Name default: - framework.Failf("Unknown type: %T", el) + e2elog.Failf("Unknown type: %T", el) return "???" } } diff --git a/test/utils/crd/BUILD b/test/utils/crd/BUILD index 4ed4991479e..e74b54755ba 100644 --- a/test/utils/crd/BUILD +++ b/test/utils/crd/BUILD @@ -13,6 +13,7 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", "//staging/src/k8s.io/client-go/dynamic:go_default_library", "//test/e2e/framework:go_default_library", + "//test/e2e/framework/log:go_default_library", ], ) diff --git a/test/utils/crd/crd_util.go b/test/utils/crd/crd_util.go index 42ecdf1139a..2137eb2d87c 100644 --- a/test/utils/crd/crd_util.go +++ b/test/utils/crd/crd_util.go @@ -26,6 +26,7 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/client-go/dynamic" "k8s.io/kubernetes/test/e2e/framework" + e2elog "k8s.io/kubernetes/test/e2e/framework/log" ) // CleanCrdFn declares the clean up function needed to remove the CRD @@ -52,17 +53,17 @@ func CreateMultiVersionTestCRD(f *framework.Framework, group string, opts ...Opt // Creating a custom resource definition for use by assorted tests. config, err := framework.LoadConfig() if err != nil { - framework.Failf("failed to load config: %v", err) + e2elog.Failf("failed to load config: %v", err) return nil, err } apiExtensionClient, err := crdclientset.NewForConfig(config) if err != nil { - framework.Failf("failed to initialize apiExtensionClient: %v", err) + e2elog.Failf("failed to initialize apiExtensionClient: %v", err) return nil, err } dynamicClient, err := dynamic.NewForConfig(config) if err != nil { - framework.Failf("failed to initialize dynamic client: %v", err) + e2elog.Failf("failed to initialize dynamic client: %v", err) return nil, err } @@ -89,7 +90,7 @@ func CreateMultiVersionTestCRD(f *framework.Framework, group string, opts ...Opt //create CRD and waits for the resource to be recognized and available. crd, err = fixtures.CreateNewCustomResourceDefinitionWatchUnsafe(crd, apiExtensionClient) if err != nil { - framework.Failf("failed to create CustomResourceDefinition: %v", err) + e2elog.Failf("failed to create CustomResourceDefinition: %v", err) return nil, err } @@ -107,7 +108,7 @@ func CreateMultiVersionTestCRD(f *framework.Framework, group string, opts ...Opt testcrd.CleanUp = func() error { err := fixtures.DeleteCustomResourceDefinition(crd, apiExtensionClient) if err != nil { - framework.Failf("failed to delete CustomResourceDefinition(%s): %v", name, err) + e2elog.Failf("failed to delete CustomResourceDefinition(%s): %v", name, err) } return err }