use log func in test/e2e/network

This commit is contained in:
hwdef 2019-09-13 12:44:29 +08:00
parent 16cde44e24
commit af16366001
18 changed files with 295 additions and 312 deletions

View File

@ -65,7 +65,6 @@ go_library(
"//test/e2e/framework/deployment:go_default_library",
"//test/e2e/framework/endpoints:go_default_library",
"//test/e2e/framework/ingress:go_default_library",
"//test/e2e/framework/log:go_default_library",
"//test/e2e/framework/node:go_default_library",
"//test/e2e/framework/pod:go_default_library",
"//test/e2e/framework/providers/gce:go_default_library",

View File

@ -25,7 +25,6 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
e2eservice "k8s.io/kubernetes/test/e2e/framework/service"
"github.com/onsi/ginkgo"
@ -406,11 +405,11 @@ var _ = SIGDescribe("DNS", func() {
}
testAgnhostPod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(testAgnhostPod)
framework.ExpectNoError(err, "failed to create pod: %s", testAgnhostPod.Name)
e2elog.Logf("Created pod %v", testAgnhostPod)
framework.Logf("Created pod %v", testAgnhostPod)
defer func() {
e2elog.Logf("Deleting pod %s...", testAgnhostPod.Name)
framework.Logf("Deleting pod %s...", testAgnhostPod.Name)
if err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(testAgnhostPod.Name, metav1.NewDeleteOptions(0)); err != nil {
e2elog.Failf("ginkgo.Failed to delete pod %s: %v", testAgnhostPod.Name, err)
framework.Failf("ginkgo.Failed to delete pod %s: %v", testAgnhostPod.Name, err)
}
}()
err = f.WaitForPodRunning(testAgnhostPod.Name)
@ -433,13 +432,13 @@ var _ = SIGDescribe("DNS", func() {
ginkgo.By("Verifying customized DNS suffix list is configured on pod...")
stdout := runCommand("dns-suffix")
if !strings.Contains(stdout, testSearchPath) {
e2elog.Failf("customized DNS suffix list not found configured in pod, expected to contain: %s, got: %s", testSearchPath, stdout)
framework.Failf("customized DNS suffix list not found configured in pod, expected to contain: %s, got: %s", testSearchPath, stdout)
}
ginkgo.By("Verifying customized DNS server is configured on pod...")
stdout = runCommand("dns-server-list")
if !strings.Contains(stdout, testServerIP) {
e2elog.Failf("customized DNS server not found in configured in pod, expected to contain: %s, got: %s", testServerIP, stdout)
framework.Failf("customized DNS server not found in configured in pod, expected to contain: %s, got: %s", testServerIP, stdout)
}
})
@ -455,11 +454,11 @@ var _ = SIGDescribe("DNS", func() {
})
testServerPod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(testServerPod)
framework.ExpectNoError(err, "failed to create pod: %s", testServerPod.Name)
e2elog.Logf("Created pod %v", testServerPod)
framework.Logf("Created pod %v", testServerPod)
defer func() {
e2elog.Logf("Deleting pod %s...", testServerPod.Name)
framework.Logf("Deleting pod %s...", testServerPod.Name)
if err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(testServerPod.Name, metav1.NewDeleteOptions(0)); err != nil {
e2elog.Failf("ginkgo.Failed to delete pod %s: %v", testServerPod.Name, err)
framework.Failf("ginkgo.Failed to delete pod %s: %v", testServerPod.Name, err)
}
}()
err = f.WaitForPodRunning(testServerPod.Name)
@ -469,7 +468,7 @@ var _ = SIGDescribe("DNS", func() {
testServerPod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(testServerPod.Name, metav1.GetOptions{})
framework.ExpectNoError(err, "failed to get pod %v", testServerPod.Name)
testServerIP := testServerPod.Status.PodIP
e2elog.Logf("testServerIP is %s", testServerIP)
framework.Logf("testServerIP is %s", testServerIP)
ginkgo.By("Creating a pod with dnsPolicy=None and customized dnsConfig...")
testUtilsPod := generateDNSUtilsPod()
@ -487,11 +486,11 @@ var _ = SIGDescribe("DNS", func() {
}
testUtilsPod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(testUtilsPod)
framework.ExpectNoError(err, "failed to create pod: %s", testUtilsPod.Name)
e2elog.Logf("Created pod %v", testUtilsPod)
framework.Logf("Created pod %v", testUtilsPod)
defer func() {
e2elog.Logf("Deleting pod %s...", testUtilsPod.Name)
framework.Logf("Deleting pod %s...", testUtilsPod.Name)
if err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(testUtilsPod.Name, metav1.NewDeleteOptions(0)); err != nil {
e2elog.Failf("ginkgo.Failed to delete pod %s: %v", testUtilsPod.Name, err)
framework.Failf("ginkgo.Failed to delete pod %s: %v", testUtilsPod.Name, err)
}
}()
err = f.WaitForPodRunning(testUtilsPod.Name)
@ -510,7 +509,7 @@ var _ = SIGDescribe("DNS", func() {
})
framework.ExpectNoError(err, "failed to examine resolv,conf file on pod, stdout: %v, stderr: %v, err: %v", stdout, stderr, err)
if !strings.Contains(stdout, "ndots:2") {
e2elog.Failf("customized DNS options not found in resolv.conf, got: %s", stdout)
framework.Failf("customized DNS options not found in resolv.conf, got: %s", stdout)
}
ginkgo.By("Verifying customized name server and search path are working...")
@ -529,12 +528,12 @@ var _ = SIGDescribe("DNS", func() {
CaptureStderr: true,
})
if err != nil {
e2elog.Logf("ginkgo.Failed to execute dig command, stdout:%v, stderr: %v, err: %v", stdout, stderr, err)
framework.Logf("ginkgo.Failed to execute dig command, stdout:%v, stderr: %v, err: %v", stdout, stderr, err)
return false, nil
}
res := strings.Split(stdout, "\n")
if len(res) != 1 || res[0] != testInjectedIP {
e2elog.Logf("Expect command `%v` to return %s, got: %v", cmd, testInjectedIP, res)
framework.Logf("Expect command `%v` to return %s, got: %v", cmd, testInjectedIP, res)
return false, nil
}
return true, nil

View File

@ -33,7 +33,6 @@ import (
"k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
imageutils "k8s.io/kubernetes/test/utils/image"
"github.com/onsi/ginkgo"
@ -73,7 +72,7 @@ func (t *dnsTestCommon) init() {
gomega.Expect(len(pods.Items)).Should(gomega.BeNumerically(">=", 1))
t.dnsPod = &pods.Items[0]
e2elog.Logf("Using DNS pod: %v", t.dnsPod.Name)
framework.Logf("Using DNS pod: %v", t.dnsPod.Name)
if strings.Contains(t.dnsPod.Name, "coredns") {
t.name = "coredns"
@ -101,7 +100,7 @@ func (t *dnsTestCommon) checkDNSRecordFrom(name string, predicate func([]string)
})
if err != nil {
e2elog.Failf("dig result did not match: %#v after %v",
framework.Failf("dig result did not match: %#v after %v",
actual, timeout)
}
}
@ -134,7 +133,7 @@ func (t *dnsTestCommon) runDig(dnsName, target string) []string {
CaptureStderr: true,
})
e2elog.Logf("Running dig: %v, stdout: %q, stderr: %q, err: %v",
framework.Logf("Running dig: %v, stdout: %q, stderr: %q, err: %v",
cmd, stdout, stderr, err)
if stdout == "" {
@ -226,7 +225,7 @@ func (t *dnsTestCommon) createUtilPodLabel(baseName string) {
var err error
t.utilPod, err = t.c.CoreV1().Pods(t.f.Namespace.Name).Create(t.utilPod)
framework.ExpectNoError(err, "failed to create pod: %v", t.utilPod)
e2elog.Logf("Created pod %v", t.utilPod)
framework.Logf("Created pod %v", t.utilPod)
err = t.f.WaitForPodRunning(t.utilPod.Name)
framework.ExpectNoError(err, "pod failed to start running: %v", t.utilPod)
@ -252,13 +251,13 @@ func (t *dnsTestCommon) createUtilPodLabel(baseName string) {
t.utilService, err = t.c.CoreV1().Services(t.f.Namespace.Name).Create(t.utilService)
framework.ExpectNoError(err, "failed to create service: %s/%s", t.f.Namespace.Name, t.utilService.ObjectMeta.Name)
e2elog.Logf("Created service %v", t.utilService)
framework.Logf("Created service %v", t.utilService)
}
func (t *dnsTestCommon) deleteUtilPod() {
podClient := t.c.CoreV1().Pods(t.f.Namespace.Name)
if err := podClient.Delete(t.utilPod.Name, metav1.NewDeleteOptions(0)); err != nil {
e2elog.Logf("Delete of pod %v/%v failed: %v",
framework.Logf("Delete of pod %v/%v failed: %v",
t.utilPod.Namespace, t.utilPod.Name, err)
}
}
@ -318,7 +317,7 @@ func (t *dnsTestCommon) createDNSPodFromObj(pod *v1.Pod) {
var err error
t.dnsServerPod, err = t.c.CoreV1().Pods(t.f.Namespace.Name).Create(t.dnsServerPod)
framework.ExpectNoError(err, "failed to create pod: %v", t.dnsServerPod)
e2elog.Logf("Created pod %v", t.dnsServerPod)
framework.Logf("Created pod %v", t.dnsServerPod)
err = t.f.WaitForPodRunning(t.dnsServerPod.Name)
framework.ExpectNoError(err, "pod failed to start running: %v", t.dnsServerPod)
@ -373,7 +372,7 @@ func (t *dnsTestCommon) createDNSServerWithPtrRecord(isIPv6 bool) {
func (t *dnsTestCommon) deleteDNSServerPod() {
podClient := t.c.CoreV1().Pods(t.f.Namespace.Name)
if err := podClient.Delete(t.dnsServerPod.Name, metav1.NewDeleteOptions(0)); err != nil {
e2elog.Logf("Delete of pod %v/%v failed: %v",
framework.Logf("Delete of pod %v/%v failed: %v",
t.utilPod.Namespace, t.dnsServerPod.Name, err)
}
}
@ -495,7 +494,7 @@ func createProbeCommand(namesToResolve []string, hostEntries []string, ptrLookup
if len(ptrLookupIP) > 0 {
ptrLookup, err := dnsutil.ReverseAddr(ptrLookupIP)
if err != nil {
e2elog.Failf("Unable to obtain reverse IP address record from IP %s: %v", ptrLookupIP, err)
framework.Failf("Unable to obtain reverse IP address record from IP %s: %v", ptrLookupIP, err)
}
ptrRecByUDPFileName := fmt.Sprintf("%s_udp@PTR", ptrLookupIP)
ptrRecByTCPFileName := fmt.Sprintf("%s_tcp@PTR", ptrLookupIP)
@ -542,20 +541,20 @@ func assertFilesContain(fileNames []string, fileDir string, pod *v1.Pod, client
if err != nil {
if ctx.Err() != nil {
e2elog.Failf("Unable to read %s from pod %s/%s: %v", fileName, pod.Namespace, pod.Name, err)
framework.Failf("Unable to read %s from pod %s/%s: %v", fileName, pod.Namespace, pod.Name, err)
} else {
e2elog.Logf("Unable to read %s from pod %s/%s: %v", fileName, pod.Namespace, pod.Name, err)
framework.Logf("Unable to read %s from pod %s/%s: %v", fileName, pod.Namespace, pod.Name, err)
}
failed = append(failed, fileName)
} else if check && strings.TrimSpace(string(contents)) != expected {
e2elog.Logf("File %s from pod %s/%s contains '%s' instead of '%s'", fileName, pod.Namespace, pod.Name, string(contents), expected)
framework.Logf("File %s from pod %s/%s contains '%s' instead of '%s'", fileName, pod.Namespace, pod.Name, string(contents), expected)
failed = append(failed, fileName)
}
}
if len(failed) == 0 {
return true, nil
}
e2elog.Logf("Lookups using %s/%s failed for: %v\n", pod.Namespace, pod.Name, failed)
framework.Logf("Lookups using %s/%s failed for: %v\n", pod.Namespace, pod.Name, failed)
return false, nil
}))
framework.ExpectEqual(len(failed), 0)
@ -570,7 +569,7 @@ func validateDNSResults(f *framework.Framework, pod *v1.Pod, fileNames []string)
podClient.Delete(pod.Name, metav1.NewDeleteOptions(0))
}()
if _, err := podClient.Create(pod); err != nil {
e2elog.Failf("ginkgo.Failed to create pod %s/%s: %v", pod.Namespace, pod.Name, err)
framework.Failf("ginkgo.Failed to create pod %s/%s: %v", pod.Namespace, pod.Name, err)
}
framework.ExpectNoError(f.WaitForPodRunning(pod.Name))
@ -578,7 +577,7 @@ func validateDNSResults(f *framework.Framework, pod *v1.Pod, fileNames []string)
ginkgo.By("retrieving the pod")
pod, err := podClient.Get(pod.Name, metav1.GetOptions{})
if err != nil {
e2elog.Failf("ginkgo.Failed to get pod %s/%s: %v", pod.Namespace, pod.Name, err)
framework.Failf("ginkgo.Failed to get pod %s/%s: %v", pod.Namespace, pod.Name, err)
}
// Try to find results for each expected name.
ginkgo.By("looking for the results for each expected name from probers")
@ -586,7 +585,7 @@ func validateDNSResults(f *framework.Framework, pod *v1.Pod, fileNames []string)
// TODO: probe from the host, too.
e2elog.Logf("DNS probes using %s/%s succeeded\n", pod.Namespace, pod.Name)
framework.Logf("DNS probes using %s/%s succeeded\n", pod.Namespace, pod.Name)
}
func validateTargetedProbeOutput(f *framework.Framework, pod *v1.Pod, fileNames []string, value string) {
@ -598,7 +597,7 @@ func validateTargetedProbeOutput(f *framework.Framework, pod *v1.Pod, fileNames
podClient.Delete(pod.Name, metav1.NewDeleteOptions(0))
}()
if _, err := podClient.Create(pod); err != nil {
e2elog.Failf("ginkgo.Failed to create pod %s/%s: %v", pod.Namespace, pod.Name, err)
framework.Failf("ginkgo.Failed to create pod %s/%s: %v", pod.Namespace, pod.Name, err)
}
framework.ExpectNoError(f.WaitForPodRunning(pod.Name))
@ -606,13 +605,13 @@ func validateTargetedProbeOutput(f *framework.Framework, pod *v1.Pod, fileNames
ginkgo.By("retrieving the pod")
pod, err := podClient.Get(pod.Name, metav1.GetOptions{})
if err != nil {
e2elog.Failf("ginkgo.Failed to get pod %s/%s: %v", pod.Namespace, pod.Name, err)
framework.Failf("ginkgo.Failed to get pod %s/%s: %v", pod.Namespace, pod.Name, err)
}
// Try to find the expected value for each expected name.
ginkgo.By("looking for the results for each expected name from probers")
assertFilesContain(fileNames, "results", pod, f.ClientSet, true, value)
e2elog.Logf("DNS probes using %s succeeded\n", pod.Name)
framework.Logf("DNS probes using %s succeeded\n", pod.Name)
}
func reverseArray(arr []string) []string {

View File

@ -27,7 +27,6 @@ import (
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/util/workqueue"
"k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
testutils "k8s.io/kubernetes/test/utils"
@ -68,7 +67,7 @@ var _ = SIGDescribe("[Feature:PerformanceDNS][Serial]", func() {
defer ginkgo.GinkgoRecover()
framework.ExpectNoError(testutils.CreateServiceWithRetries(f.ClientSet, services[i].Namespace, services[i]))
}
e2elog.Logf("Creating %v test services", maxServicesPerCluster)
framework.Logf("Creating %v test services", maxServicesPerCluster)
workqueue.ParallelizeUntil(context.TODO(), parallelCreateServiceWorkers, len(services), createService)
dnsTest := dnsTestCommon{
f: f,
@ -77,7 +76,7 @@ var _ = SIGDescribe("[Feature:PerformanceDNS][Serial]", func() {
}
dnsTest.createUtilPodLabel("e2e-dns-scale-records")
defer dnsTest.deleteUtilPod()
e2elog.Logf("Querying %v%% of service records", checkServicePercent*100)
framework.Logf("Querying %v%% of service records", checkServicePercent*100)
for i := 0; i < len(services); i++ {
if i%(1/checkServicePercent) != 0 {
continue
@ -86,7 +85,7 @@ var _ = SIGDescribe("[Feature:PerformanceDNS][Serial]", func() {
svc, err := f.ClientSet.CoreV1().Services(s.Namespace).Get(s.Name, metav1.GetOptions{})
framework.ExpectNoError(err)
qname := fmt.Sprintf("%v.%v.svc.%v", s.Name, s.Namespace, framework.TestContext.ClusterDNSDomain)
e2elog.Logf("Querying %v expecting %v", qname, svc.Spec.ClusterIP)
framework.Logf("Querying %v expecting %v", qname, svc.Spec.ClusterIP)
dnsTest.checkDNSRecordFrom(
qname,
func(actual []string) bool {

View File

@ -29,7 +29,6 @@ import (
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
e2edeploy "k8s.io/kubernetes/test/e2e/framework/deployment"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
imageutils "k8s.io/kubernetes/test/utils/image"
)
@ -126,7 +125,7 @@ var _ = SIGDescribe("[Feature:IPv6DualStackAlphaFeature] [LinuxOnly]", func() {
gomega.Expect(nodeList).NotTo(gomega.BeNil())
if len(nodeList.Items) < 1 {
e2elog.Failf("Expect at least 1 node, got %v", len(nodeList.Items))
framework.Failf("Expect at least 1 node, got %v", len(nodeList.Items))
}
replicas := int32(len(nodeList.Items))
@ -216,10 +215,10 @@ func assertNetworkConnectivity(f *framework.Framework, serverPods v1.PodList, cl
var serverIPs []string
for _, pod := range serverPods.Items {
if pod.Status.PodIPs == nil || len(pod.Status.PodIPs) != 2 {
e2elog.Failf("PodIPs list not expected value, got %v", pod.Status.PodIPs)
framework.Failf("PodIPs list not expected value, got %v", pod.Status.PodIPs)
}
if isIPv4(pod.Status.PodIPs[0].IP) == isIPv4(pod.Status.PodIPs[1].IP) {
e2elog.Failf("PodIPs should belong to different families, got %v", pod.Status.PodIPs)
framework.Failf("PodIPs should belong to different families, got %v", pod.Status.PodIPs)
}
serverIPs = append(serverIPs, pod.Status.PodIPs[0].IP, pod.Status.PodIPs[1].IP)
}

View File

@ -31,7 +31,6 @@ import (
clientset "k8s.io/client-go/kubernetes"
api "k8s.io/kubernetes/pkg/apis/core"
"k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2eservice "k8s.io/kubernetes/test/e2e/framework/service"
)
@ -109,7 +108,7 @@ var _ = SIGDescribe("ClusterDns [Feature:Example]", func() {
framework.ExpectNoError(err, "failed to list pods in namespace: %s", ns.Name)
err = e2epod.PodsResponding(c, ns.Name, backendPodName, false, pods)
framework.ExpectNoError(err, "waiting for all pods to respond")
e2elog.Logf("found %d backend pods responding in namespace %s", len(pods.Items), ns.Name)
framework.Logf("found %d backend pods responding in namespace %s", len(pods.Items), ns.Name)
err = e2eservice.WaitForServiceResponding(c, ns.Name, backendSvcName)
framework.ExpectNoError(err, "waiting for the service to respond")
@ -128,7 +127,7 @@ var _ = SIGDescribe("ClusterDns [Feature:Example]", func() {
pods, err := c.CoreV1().Pods(namespaces[0].Name).List(options)
if err != nil || pods == nil || len(pods.Items) == 0 {
e2elog.Failf("no running pods found")
framework.Failf("no running pods found")
}
podName := pods.Items[0].Name

View File

@ -26,7 +26,6 @@ import (
cloudprovider "k8s.io/cloud-provider"
"k8s.io/kubernetes/pkg/master/ports"
"k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
"k8s.io/kubernetes/test/e2e/framework/providers/gce"
e2eservice "k8s.io/kubernetes/test/e2e/framework/service"
@ -71,14 +70,14 @@ var _ = SIGDescribe("Firewall rule", func() {
ginkgo.By("Getting cluster ID")
clusterID, err := gce.GetClusterID(cs)
framework.ExpectNoError(err)
e2elog.Logf("Got cluster ID: %v", clusterID)
framework.Logf("Got cluster ID: %v", clusterID)
jig := e2eservice.NewTestJig(cs, serviceName)
nodeList := jig.GetNodes(e2eservice.MaxNodesForEndpointsTests)
gomega.Expect(nodeList).NotTo(gomega.BeNil())
nodesNames := jig.GetNodesNames(e2eservice.MaxNodesForEndpointsTests)
if len(nodesNames) <= 0 {
e2elog.Failf("Expect at least 1 node, got: %v", nodesNames)
framework.Failf("Expect at least 1 node, got: %v", nodesNames)
}
nodesSet := sets.NewString(nodesNames...)
@ -136,7 +135,7 @@ var _ = SIGDescribe("Firewall rule", func() {
for i, nodeName := range nodesNames {
podName := fmt.Sprintf("netexec%v", i)
e2elog.Logf("Creating netexec pod %q on node %v in namespace %q", podName, nodeName, ns)
framework.Logf("Creating netexec pod %q on node %v in namespace %q", podName, nodeName, ns)
pod := f.NewAgnhostPod(podName,
"netexec",
fmt.Sprintf("--http-port=%d", firewallTestHTTPPort),
@ -147,10 +146,10 @@ var _ = SIGDescribe("Firewall rule", func() {
_, err := cs.CoreV1().Pods(ns).Create(pod)
framework.ExpectNoError(err)
framework.ExpectNoError(f.WaitForPodRunning(podName))
e2elog.Logf("Netexec pod %q in namespace %q running", podName, ns)
framework.Logf("Netexec pod %q in namespace %q running", podName, ns)
defer func() {
e2elog.Logf("Cleaning up the netexec pod: %v", podName)
framework.Logf("Cleaning up the netexec pod: %v", podName)
err = cs.CoreV1().Pods(ns).Delete(podName, nil)
framework.ExpectNoError(err)
}()
@ -191,7 +190,7 @@ var _ = SIGDescribe("Firewall rule", func() {
ginkgo.It("should have correct firewall rules for e2e cluster", func() {
nodes := framework.GetReadySchedulableNodesOrDie(cs)
if len(nodes.Items) <= 0 {
e2elog.Failf("Expect at least 1 node, got: %v", len(nodes.Items))
framework.Failf("Expect at least 1 node, got: %v", len(nodes.Items))
}
ginkgo.By("Checking if e2e firewall rules are correct")
@ -205,7 +204,7 @@ var _ = SIGDescribe("Firewall rule", func() {
ginkgo.By("Checking well known ports on master and nodes are not exposed externally")
nodeAddrs := e2enode.FirstAddress(nodes, v1.NodeExternalIP)
if len(nodeAddrs) == 0 {
e2elog.Failf("did not find any node addresses")
framework.Failf("did not find any node addresses")
}
masterAddresses := framework.GetAllMasterAddresses(cs)
@ -222,9 +221,9 @@ var _ = SIGDescribe("Firewall rule", func() {
func assertNotReachableHTTPTimeout(ip string, port int, timeout time.Duration) {
result := framework.PokeHTTP(ip, port, "/", &framework.HTTPPokeParams{Timeout: timeout})
if result.Status == framework.HTTPError {
e2elog.Failf("Unexpected error checking for reachability of %s:%d: %v", ip, port, result.Error)
framework.Failf("Unexpected error checking for reachability of %s:%d: %v", ip, port, result.Error)
}
if result.Code != 0 {
e2elog.Failf("Was unexpectedly able to reach %s:%d", ip, port)
framework.Failf("Was unexpectedly able to reach %s:%d", ip, port)
}
}

View File

@ -37,7 +37,6 @@ import (
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/framework/auth"
"k8s.io/kubernetes/test/e2e/framework/ingress"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
"k8s.io/kubernetes/test/e2e/framework/providers/gce"
e2eservice "k8s.io/kubernetes/test/e2e/framework/service"
@ -176,7 +175,7 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
framework.ExpectNoError(err)
annotations := ing.Annotations
if annotations == nil || annotations[instanceGroupAnnotation] == "" {
e2elog.Logf("Waiting for ingress to get %s annotation. Found annotations: %v", instanceGroupAnnotation, annotations)
framework.Logf("Waiting for ingress to get %s annotation. Found annotations: %v", instanceGroupAnnotation, annotations)
return false, nil
}
return true, nil
@ -201,7 +200,7 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
if annotations != nil && (annotations[umKey] != "" || annotations[fwKey] != "" ||
annotations[tpKey] != "" || annotations[fwsKey] != "" || annotations[tpsKey] != "" ||
annotations[scKey] != "" || annotations[beKey] != "") {
e2elog.Failf("unexpected annotations. Expected to not have annotations for urlmap, forwarding rule, target proxy, ssl cert and backends, got: %v", annotations)
framework.Failf("unexpected annotations. Expected to not have annotations for urlmap, forwarding rule, target proxy, ssl cert and backends, got: %v", annotations)
return true, nil
}
return false, nil
@ -210,26 +209,26 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
// Verify that the controller does not create any other resource except instance group.
// TODO(59778): Check GCE resources specific to this ingress instead of listing all resources.
if len(gceController.ListURLMaps()) != 0 {
e2elog.Failf("unexpected url maps, expected none, got: %v", gceController.ListURLMaps())
framework.Failf("unexpected url maps, expected none, got: %v", gceController.ListURLMaps())
}
if len(gceController.ListGlobalForwardingRules()) != 0 {
e2elog.Failf("unexpected forwarding rules, expected none, got: %v", gceController.ListGlobalForwardingRules())
framework.Failf("unexpected forwarding rules, expected none, got: %v", gceController.ListGlobalForwardingRules())
}
if len(gceController.ListTargetHTTPProxies()) != 0 {
e2elog.Failf("unexpected target http proxies, expected none, got: %v", gceController.ListTargetHTTPProxies())
framework.Failf("unexpected target http proxies, expected none, got: %v", gceController.ListTargetHTTPProxies())
}
if len(gceController.ListTargetHTTPSProxies()) != 0 {
e2elog.Failf("unexpected target https proxies, expected none, got: %v", gceController.ListTargetHTTPSProxies())
framework.Failf("unexpected target https proxies, expected none, got: %v", gceController.ListTargetHTTPSProxies())
}
if len(gceController.ListSslCertificates()) != 0 {
e2elog.Failf("unexpected ssl certificates, expected none, got: %v", gceController.ListSslCertificates())
framework.Failf("unexpected ssl certificates, expected none, got: %v", gceController.ListSslCertificates())
}
if len(gceController.ListGlobalBackendServices()) != 0 {
e2elog.Failf("unexpected backend service, expected none, got: %v", gceController.ListGlobalBackendServices())
framework.Failf("unexpected backend service, expected none, got: %v", gceController.ListGlobalBackendServices())
}
// Controller does not have a list command for firewall rule. We use get instead.
if fw, err := gceController.GetFirewallRuleOrError(); err == nil {
e2elog.Failf("unexpected nil error in getting firewall rule, expected firewall NotFound, got firewall: %v", fw)
framework.Failf("unexpected nil error in getting firewall rule, expected firewall NotFound, got firewall: %v", fw)
}
// TODO(nikhiljindal): Check the instance group annotation value and verify with a multizone cluster.
@ -304,7 +303,7 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
}
err = wait.Poll(5*time.Second, e2eservice.LoadBalancerPollTimeout, func() (bool, error) {
if err := gceController.BackendServiceUsingIG(jig.GetServicePorts(false)); err != nil {
e2elog.Logf("ginkgo.Failed to verify IG backend service: %v", err)
framework.Logf("ginkgo.Failed to verify IG backend service: %v", err)
return false, nil
}
return true, nil
@ -322,7 +321,7 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
}
err = wait.Poll(5*time.Second, e2eservice.LoadBalancerPollTimeout, func() (bool, error) {
if err := gceController.BackendServiceUsingNEG(jig.GetServicePorts(false)); err != nil {
e2elog.Logf("ginkgo.Failed to verify NEG backend service: %v", err)
framework.Logf("ginkgo.Failed to verify NEG backend service: %v", err)
return false, nil
}
return true, nil
@ -360,7 +359,7 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
if err != nil {
return false, nil
}
e2elog.Logf("Expecting %d backends, got %d", num, res.Len())
framework.Logf("Expecting %d backends, got %d", num, res.Len())
return res.Len() == num, nil
})
framework.ExpectNoError(err)
@ -431,11 +430,11 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
if res.Len() == replicas {
return true, nil
}
e2elog.Logf("Expecting %d different responses, but got %d.", replicas, res.Len())
framework.Logf("Expecting %d different responses, but got %d.", replicas, res.Len())
return false, nil
}
e2elog.Logf("Waiting for rolling update to finished. Keep sending traffic.")
framework.Logf("Waiting for rolling update to finished. Keep sending traffic.")
return false, nil
})
framework.ExpectNoError(err)
@ -461,30 +460,30 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
v, ok := svc.Annotations[ingress.NEGStatusAnnotation]
if !ok {
// Wait for NEG sync loop to find NEGs
e2elog.Logf("Waiting for %v, got: %+v", ingress.NEGStatusAnnotation, svc.Annotations)
framework.Logf("Waiting for %v, got: %+v", ingress.NEGStatusAnnotation, svc.Annotations)
return false, nil
}
err = json.Unmarshal([]byte(v), &status)
if err != nil {
e2elog.Logf("Error in parsing Expose NEG annotation: %v", err)
framework.Logf("Error in parsing Expose NEG annotation: %v", err)
return false, nil
}
e2elog.Logf("Got %v: %v", ingress.NEGStatusAnnotation, v)
framework.Logf("Got %v: %v", ingress.NEGStatusAnnotation, v)
// Expect 2 NEGs to be created based on the test setup (neg-exposed)
if len(status.NetworkEndpointGroups) != 2 {
e2elog.Logf("Expected 2 NEGs, got %d", len(status.NetworkEndpointGroups))
framework.Logf("Expected 2 NEGs, got %d", len(status.NetworkEndpointGroups))
return false, nil
}
for _, port := range expectedKeys {
if _, ok := status.NetworkEndpointGroups[port]; !ok {
e2elog.Logf("Expected ServicePort key %v, but does not exist", port)
framework.Logf("Expected ServicePort key %v, but does not exist", port)
}
}
if len(status.NetworkEndpointGroups) != len(expectedKeys) {
e2elog.Logf("Expected length of %+v to equal length of %+v, but does not", status.NetworkEndpointGroups, expectedKeys)
framework.Logf("Expected length of %+v to equal length of %+v, but does not", status.NetworkEndpointGroups, expectedKeys)
}
gceCloud, err := gce.GetGCECloud()
@ -493,7 +492,7 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
networkEndpoints, err := gceCloud.ListNetworkEndpoints(neg, gceController.Cloud.Zone, false)
framework.ExpectNoError(err)
if len(networkEndpoints) != num {
e2elog.Logf("Expect number of endpoints to be %d, but got %d", num, len(networkEndpoints))
framework.Logf("Expect number of endpoints to be %d, but got %d", num, len(networkEndpoints))
return false, nil
}
}
@ -662,16 +661,16 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
filePath := filepath.Join(framework.TestContext.OutputDir, "mci.yaml")
output, err := framework.RunKubemciWithKubeconfig("remove-clusters", name, "--ingress="+filePath)
if err != nil {
e2elog.Failf("unexpected error in running kubemci remove-clusters command to remove from all clusters: %s", err)
framework.Failf("unexpected error in running kubemci remove-clusters command to remove from all clusters: %s", err)
}
if !strings.Contains(output, "You should use kubemci delete to delete the ingress completely") {
e2elog.Failf("unexpected output in removing an ingress from all clusters, expected the output to include: You should use kubemci delete to delete the ingress completely, actual output: %s", output)
framework.Failf("unexpected output in removing an ingress from all clusters, expected the output to include: You should use kubemci delete to delete the ingress completely, actual output: %s", output)
}
// Verify that the ingress is still spread to 1 cluster as expected.
verifyKubemciStatusHas(name, "is spread across 1 cluster")
// remove-clusters should succeed with --force=true
if _, err := framework.RunKubemciWithKubeconfig("remove-clusters", name, "--ingress="+filePath, "--force=true"); err != nil {
e2elog.Failf("unexpected error in running kubemci remove-clusters to remove from all clusters with --force=true: %s", err)
framework.Failf("unexpected error in running kubemci remove-clusters to remove from all clusters with --force=true: %s", err)
}
verifyKubemciStatusHas(name, "is spread across 0 cluster")
})
@ -725,7 +724,7 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
if framework.ProviderIs("gce", "gke") {
framework.ExpectNoError(gce.GcloudComputeResourceCreate("firewall-rules", fmt.Sprintf("ingress-80-443-%v", ns), framework.TestContext.CloudConfig.ProjectID, "--allow", "tcp:80,tcp:443", "--network", framework.TestContext.CloudConfig.Network))
} else {
e2elog.Logf("WARNING: Not running on GCE/GKE, cannot create firewall rules for :80, :443. Assuming traffic can reach the external ips of all nodes in cluster on those ports.")
framework.Logf("WARNING: Not running on GCE/GKE, cannot create firewall rules for :80, :443. Assuming traffic can reach the external ips of all nodes in cluster on those ports.")
}
nginxController.Init()
@ -765,10 +764,10 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
func verifyKubemciStatusHas(name, expectedSubStr string) {
statusStr, err := framework.RunKubemciCmd("get-status", name)
if err != nil {
e2elog.Failf("unexpected error in running kubemci get-status %s: %s", name, err)
framework.Failf("unexpected error in running kubemci get-status %s: %s", name, err)
}
if !strings.Contains(statusStr, expectedSubStr) {
e2elog.Failf("expected status to have sub string %s, actual status: %s", expectedSubStr, statusStr)
framework.Failf("expected status to have sub string %s, actual status: %s", expectedSubStr, statusStr)
}
}
@ -790,7 +789,7 @@ func executePresharedCertTest(f *framework.Framework, jig *ingress.TestJig, stat
ginkgo.By(fmt.Sprintf("Deleting ssl certificate %q on GCE", preSharedCertName))
err := wait.Poll(e2eservice.LoadBalancerPollInterval, e2eservice.LoadBalancerCleanupTimeout, func() (bool, error) {
if err := gceCloud.DeleteSslCertificate(preSharedCertName); err != nil && !errors.IsNotFound(err) {
e2elog.Logf("ginkgo.Failed to delete ssl certificate %q: %v. Retrying...", preSharedCertName, err)
framework.Logf("ginkgo.Failed to delete ssl certificate %q: %v. Retrying...", preSharedCertName, err)
return false, nil
}
return true, nil
@ -843,7 +842,7 @@ func executeBacksideBacksideHTTPSTest(f *framework.Framework, jig *ingress.TestJ
defer func() {
ginkgo.By("Cleaning up re-encryption ingress, service and deployment")
if errs := jig.DeleteTestResource(f.ClientSet, deployCreated, svcCreated, ingCreated); len(errs) > 0 {
e2elog.Failf("ginkgo.Failed to cleanup re-encryption ingress: %v", errs)
framework.Failf("ginkgo.Failed to cleanup re-encryption ingress: %v", errs)
}
}()
framework.ExpectNoError(err, "ginkgo.Failed to create re-encryption ingress")
@ -857,13 +856,13 @@ func executeBacksideBacksideHTTPSTest(f *framework.Framework, jig *ingress.TestJ
err = wait.PollImmediate(e2eservice.LoadBalancerPollInterval, e2eservice.LoadBalancerPollTimeout, func() (bool, error) {
resp, err := framework.SimpleGET(timeoutClient, fmt.Sprintf("http://%s", ingIP), "")
if err != nil {
e2elog.Logf("SimpleGET failed: %v", err)
framework.Logf("SimpleGET failed: %v", err)
return false, nil
}
if !strings.Contains(resp, "request_scheme=https") {
return false, fmt.Errorf("request wasn't served by HTTPS, response body: %s", resp)
}
e2elog.Logf("Poll succeeded, request was served by HTTPS")
framework.Logf("Poll succeeded, request was served by HTTPS")
return true, nil
})
framework.ExpectNoError(err, "ginkgo.Failed to verify backside re-encryption ingress")
@ -880,7 +879,7 @@ func detectNegAnnotation(f *framework.Framework, jig *ingress.TestJig, gceContro
if negs == 0 {
err := gceController.BackendServiceUsingIG(jig.GetServicePorts(false))
if err != nil {
e2elog.Logf("ginkgo.Failed to validate IG backend service: %v", err)
framework.Logf("ginkgo.Failed to validate IG backend service: %v", err)
return false, nil
}
return true, nil
@ -889,19 +888,19 @@ func detectNegAnnotation(f *framework.Framework, jig *ingress.TestJig, gceContro
var status ingress.NegStatus
v, ok := svc.Annotations[ingress.NEGStatusAnnotation]
if !ok {
e2elog.Logf("Waiting for %v, got: %+v", ingress.NEGStatusAnnotation, svc.Annotations)
framework.Logf("Waiting for %v, got: %+v", ingress.NEGStatusAnnotation, svc.Annotations)
return false, nil
}
err = json.Unmarshal([]byte(v), &status)
if err != nil {
e2elog.Logf("Error in parsing Expose NEG annotation: %v", err)
framework.Logf("Error in parsing Expose NEG annotation: %v", err)
return false, nil
}
e2elog.Logf("Got %v: %v", ingress.NEGStatusAnnotation, v)
framework.Logf("Got %v: %v", ingress.NEGStatusAnnotation, v)
if len(status.NetworkEndpointGroups) != negs {
e2elog.Logf("Expected %d NEGs, got %d", negs, len(status.NetworkEndpointGroups))
framework.Logf("Expected %d NEGs, got %d", negs, len(status.NetworkEndpointGroups))
return false, nil
}
@ -911,14 +910,14 @@ func detectNegAnnotation(f *framework.Framework, jig *ingress.TestJig, gceContro
networkEndpoints, err := gceCloud.ListNetworkEndpoints(neg, gceController.Cloud.Zone, false)
framework.ExpectNoError(err)
if len(networkEndpoints) != 1 {
e2elog.Logf("Expect NEG %s to exist, but got %d", neg, len(networkEndpoints))
framework.Logf("Expect NEG %s to exist, but got %d", neg, len(networkEndpoints))
return false, nil
}
}
err = gceController.BackendServiceUsingNEG(jig.GetServicePorts(false))
if err != nil {
e2elog.Logf("ginkgo.Failed to validate NEG backend service: %v", err)
framework.Logf("ginkgo.Failed to validate NEG backend service: %v", err)
return false, nil
}
return true, nil

View File

@ -18,7 +18,6 @@ package network
import (
"k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
"k8s.io/kubernetes/test/e2e/network/scale"
"github.com/onsi/ginkgo"
@ -45,19 +44,19 @@ var _ = SIGDescribe("Loadbalancing: L7 Scalability", func() {
scaleFramework = scale.NewIngressScaleFramework(f.ClientSet, ns, framework.TestContext.CloudConfig)
if err := scaleFramework.PrepareScaleTest(); err != nil {
e2elog.Failf("Unexpected error while preparing ingress scale test: %v", err)
framework.Failf("Unexpected error while preparing ingress scale test: %v", err)
}
})
ginkgo.AfterEach(func() {
if errs := scaleFramework.CleanupScaleTest(); len(errs) != 0 {
e2elog.Failf("Unexpected error while cleaning up ingress scale test: %v", errs)
framework.Failf("Unexpected error while cleaning up ingress scale test: %v", errs)
}
})
ginkgo.It("Creating and updating ingresses should happen promptly with small/medium/large amount of ingresses", func() {
if errs := scaleFramework.RunScaleTest(); len(errs) != 0 {
e2elog.Failf("Unexpected error while running ingress scale test: %v", errs)
framework.Failf("Unexpected error while running ingress scale test: %v", errs)
}
})

View File

@ -28,7 +28,6 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2essh "k8s.io/kubernetes/test/e2e/framework/ssh"
@ -213,7 +212,7 @@ var _ = SIGDescribe("Network", func() {
const epsilonSeconds = 60
const expectedTimeoutSeconds = 60 * 60
e2elog.Logf("conntrack entry timeout was: %v, expected: %v",
framework.Logf("conntrack entry timeout was: %v, expected: %v",
timeoutSeconds, expectedTimeoutSeconds)
gomega.Expect(math.Abs(float64(timeoutSeconds - expectedTimeoutSeconds))).Should(

View File

@ -24,7 +24,6 @@ import (
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
imageutils "k8s.io/kubernetes/test/utils/image"
@ -164,7 +163,7 @@ var _ = SIGDescribe("NetworkPolicy [LinuxOnly]", func() {
framework.ExpectNoError(err)
// Create Server with Service in NS-B
e2elog.Logf("Waiting for server to come up.")
framework.Logf("Waiting for server to come up.")
err = e2epod.WaitForPodRunningInNamespace(f.ClientSet, podServer)
framework.ExpectNoError(err)
@ -389,7 +388,7 @@ var _ = SIGDescribe("NetworkPolicy [LinuxOnly]", func() {
framework.ExpectNoError(err, "Error occurred while creating namespace-b.")
// Wait for Server in namespaces-a to be ready
e2elog.Logf("Waiting for server to come up.")
framework.Logf("Waiting for server to come up.")
err = e2epod.WaitForPodRunningInNamespace(f.ClientSet, podServer)
framework.ExpectNoError(err, "Error occurred while waiting for pod status in namespace: Running.")
@ -856,7 +855,7 @@ var _ = SIGDescribe("NetworkPolicy [LinuxOnly]", func() {
defer func() {
ginkgo.By(fmt.Sprintf("Cleaning up the pod %s", podClient.Name))
if err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(podClient.Name, nil); err != nil {
e2elog.Failf("unable to cleanup pod %v: %v", podClient.Name, err)
framework.Failf("unable to cleanup pod %v: %v", podClient.Name, err)
}
}()
checkNoConnectivity(f, f.Namespace, podClient, service)
@ -882,7 +881,7 @@ var _ = SIGDescribe("NetworkPolicy [LinuxOnly]", func() {
nsBpodServerB, nsBserviceB = createServerPodAndService(f, nsB, "ns-b-server-b", []int{80})
// Wait for Server with Service in NS-A to be ready
e2elog.Logf("Waiting for servers to come up.")
framework.Logf("Waiting for servers to come up.")
err = e2epod.WaitForPodRunningInNamespace(f.ClientSet, podServer)
framework.ExpectNoError(err, "Error occurred while waiting for pod status in namespace: Running.")
@ -1389,7 +1388,7 @@ func testCanConnect(f *framework.Framework, ns *v1.Namespace, podName string, se
defer func() {
ginkgo.By(fmt.Sprintf("Cleaning up the pod %s", podClient.Name))
if err := f.ClientSet.CoreV1().Pods(ns.Name).Delete(podClient.Name, nil); err != nil {
e2elog.Failf("unable to cleanup pod %v: %v", podClient.Name, err)
framework.Failf("unable to cleanup pod %v: %v", podClient.Name, err)
}
}()
checkConnectivity(f, ns, podClient, service)
@ -1401,36 +1400,36 @@ func testCannotConnect(f *framework.Framework, ns *v1.Namespace, podName string,
defer func() {
ginkgo.By(fmt.Sprintf("Cleaning up the pod %s", podClient.Name))
if err := f.ClientSet.CoreV1().Pods(ns.Name).Delete(podClient.Name, nil); err != nil {
e2elog.Failf("unable to cleanup pod %v: %v", podClient.Name, err)
framework.Failf("unable to cleanup pod %v: %v", podClient.Name, err)
}
}()
checkNoConnectivity(f, ns, podClient, service)
}
func checkConnectivity(f *framework.Framework, ns *v1.Namespace, podClient *v1.Pod, service *v1.Service) {
e2elog.Logf("Waiting for %s to complete.", podClient.Name)
framework.Logf("Waiting for %s to complete.", podClient.Name)
err := e2epod.WaitForPodNoLongerRunningInNamespace(f.ClientSet, podClient.Name, ns.Name)
framework.ExpectNoError(err, "Pod did not finish as expected.")
e2elog.Logf("Waiting for %s to complete.", podClient.Name)
framework.Logf("Waiting for %s to complete.", podClient.Name)
err = e2epod.WaitForPodSuccessInNamespace(f.ClientSet, podClient.Name, ns.Name)
if err != nil {
// Collect pod logs when we see a failure.
logs, logErr := e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, podClient.Name, fmt.Sprintf("%s-container", podClient.Name))
if logErr != nil {
e2elog.Failf("Error getting container logs: %s", logErr)
framework.Failf("Error getting container logs: %s", logErr)
}
// Collect current NetworkPolicies applied in the test namespace.
policies, err := f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).List(metav1.ListOptions{})
if err != nil {
e2elog.Logf("error getting current NetworkPolicies for %s namespace: %s", f.Namespace.Name, err)
framework.Logf("error getting current NetworkPolicies for %s namespace: %s", f.Namespace.Name, err)
}
// Collect the list of pods running in the test namespace.
podsInNS, err := e2epod.GetPodsInNamespace(f.ClientSet, f.Namespace.Name, map[string]string{})
if err != nil {
e2elog.Logf("error getting pods for %s namespace: %s", f.Namespace.Name, err)
framework.Logf("error getting pods for %s namespace: %s", f.Namespace.Name, err)
}
pods := []string{}
@ -1438,7 +1437,7 @@ func checkConnectivity(f *framework.Framework, ns *v1.Namespace, podClient *v1.P
pods = append(pods, fmt.Sprintf("Pod: %s, Status: %s\n", p.Name, p.Status.String()))
}
e2elog.Failf("Pod %s should be able to connect to service %s, but was not able to connect.\nPod logs:\n%s\n\n Current NetworkPolicies:\n\t%v\n\n Pods:\n\t%v\n\n", podClient.Name, service.Name, logs, policies.Items, pods)
framework.Failf("Pod %s should be able to connect to service %s, but was not able to connect.\nPod logs:\n%s\n\n Current NetworkPolicies:\n\t%v\n\n Pods:\n\t%v\n\n", podClient.Name, service.Name, logs, policies.Items, pods)
// Dump debug information for the test namespace.
framework.DumpDebugInfo(f.ClientSet, f.Namespace.Name)
@ -1446,7 +1445,7 @@ func checkConnectivity(f *framework.Framework, ns *v1.Namespace, podClient *v1.P
}
func checkNoConnectivity(f *framework.Framework, ns *v1.Namespace, podClient *v1.Pod, service *v1.Service) {
e2elog.Logf("Waiting for %s to complete.", podClient.Name)
framework.Logf("Waiting for %s to complete.", podClient.Name)
err := e2epod.WaitForPodSuccessInNamespace(f.ClientSet, podClient.Name, ns.Name)
// We expect an error here since it's a cannot connect test.
@ -1455,19 +1454,19 @@ func checkNoConnectivity(f *framework.Framework, ns *v1.Namespace, podClient *v1
// Collect pod logs when we see a failure.
logs, logErr := e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, podClient.Name, fmt.Sprintf("%s-container", podClient.Name))
if logErr != nil {
e2elog.Failf("Error getting container logs: %s", logErr)
framework.Failf("Error getting container logs: %s", logErr)
}
// Collect current NetworkPolicies applied in the test namespace.
policies, err := f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).List(metav1.ListOptions{})
if err != nil {
e2elog.Logf("error getting current NetworkPolicies for %s namespace: %s", f.Namespace.Name, err)
framework.Logf("error getting current NetworkPolicies for %s namespace: %s", f.Namespace.Name, err)
}
// Collect the list of pods running in the test namespace.
podsInNS, err := e2epod.GetPodsInNamespace(f.ClientSet, f.Namespace.Name, map[string]string{})
if err != nil {
e2elog.Logf("error getting pods for %s namespace: %s", f.Namespace.Name, err)
framework.Logf("error getting pods for %s namespace: %s", f.Namespace.Name, err)
}
pods := []string{}
@ -1475,7 +1474,7 @@ func checkNoConnectivity(f *framework.Framework, ns *v1.Namespace, podClient *v1
pods = append(pods, fmt.Sprintf("Pod: %s, Status: %s\n", p.Name, p.Status.String()))
}
e2elog.Failf("Pod %s should not be able to connect to service %s, but was able to connect.\nPod logs:\n%s\n\n Current NetworkPolicies:\n\t%v\n\n Pods:\n\t %v\n\n", podClient.Name, service.Name, logs, policies.Items, pods)
framework.Failf("Pod %s should not be able to connect to service %s, but was able to connect.\nPod logs:\n%s\n\n Current NetworkPolicies:\n\t%v\n\n Pods:\n\t %v\n\n", podClient.Name, service.Name, logs, policies.Items, pods)
// Dump debug information for the test namespace.
framework.DumpDebugInfo(f.ClientSet, f.Namespace.Name)
@ -1543,7 +1542,7 @@ func createServerPodAndService(f *framework.Framework, namespace *v1.Namespace,
},
})
framework.ExpectNoError(err)
e2elog.Logf("Created pod %v", pod.ObjectMeta.Name)
framework.Logf("Created pod %v", pod.ObjectMeta.Name)
svcName := fmt.Sprintf("svc-%s", podName)
ginkgo.By(fmt.Sprintf("Creating a service %s for pod %s in namespace %s", svcName, podName, namespace.Name))
@ -1559,7 +1558,7 @@ func createServerPodAndService(f *framework.Framework, namespace *v1.Namespace,
},
})
framework.ExpectNoError(err)
e2elog.Logf("Created service %s", svc.Name)
framework.Logf("Created service %s", svc.Name)
return pod, svc
}
@ -1567,11 +1566,11 @@ func createServerPodAndService(f *framework.Framework, namespace *v1.Namespace,
func cleanupServerPodAndService(f *framework.Framework, pod *v1.Pod, service *v1.Service) {
ginkgo.By("Cleaning up the server.")
if err := f.ClientSet.CoreV1().Pods(pod.Namespace).Delete(pod.Name, nil); err != nil {
e2elog.Failf("unable to cleanup pod %v: %v", pod.Name, err)
framework.Failf("unable to cleanup pod %v: %v", pod.Name, err)
}
ginkgo.By("Cleaning up the server's service.")
if err := f.ClientSet.CoreV1().Services(service.Namespace).Delete(service.Name, nil); err != nil {
e2elog.Failf("unable to cleanup svc %v: %v", service.Name, err)
framework.Failf("unable to cleanup svc %v: %v", service.Name, err)
}
}
@ -1631,6 +1630,6 @@ func updateNetworkClientPodLabel(f *framework.Framework, namespace *v1.Namespace
func cleanupNetworkPolicy(f *framework.Framework, policy *networkingv1.NetworkPolicy) {
ginkgo.By("Cleaning up the policy.")
if err := f.ClientSet.NetworkingV1().NetworkPolicies(policy.Namespace).Delete(policy.Name, nil); err != nil {
e2elog.Failf("unable to cleanup policy %v: %v", policy.Name, err)
framework.Failf("unable to cleanup policy %v: %v", policy.Name, err)
}
}

View File

@ -29,7 +29,6 @@ import (
clientset "k8s.io/client-go/kubernetes"
cloudprovider "k8s.io/cloud-provider"
"k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
"k8s.io/kubernetes/test/e2e/framework/providers/gce"
e2eservice "k8s.io/kubernetes/test/e2e/framework/service"
gcecloud "k8s.io/legacy-cloud-providers/gce"
@ -54,7 +53,7 @@ var _ = SIGDescribe("Services [Feature:GCEAlphaFeature][Slow]", func() {
e2eservice.DescribeSvc(f.Namespace.Name)
}
for _, lb := range serviceLBNames {
e2elog.Logf("cleaning gce resource for %s", lb)
framework.Logf("cleaning gce resource for %s", lb)
framework.TestContext.CloudConfig.Provider.CleanupServiceResources(cs, lb, framework.TestContext.CloudConfig.Region, framework.TestContext.CloudConfig.Zone)
}
//reset serviceLBNames
@ -112,12 +111,12 @@ var _ = SIGDescribe("Services [Feature:GCEAlphaFeature][Slow]", func() {
if requestedAddrName != "" {
// Release GCE static address - this is not kube-managed and will not be automatically released.
if err := gceCloud.DeleteRegionAddress(requestedAddrName, gceCloud.Region()); err != nil {
e2elog.Logf("failed to release static IP address %q: %v", requestedAddrName, err)
framework.Logf("failed to release static IP address %q: %v", requestedAddrName, err)
}
}
}()
framework.ExpectNoError(err)
e2elog.Logf("Allocated static IP to be used by the load balancer: %q", requestedIP)
framework.Logf("Allocated static IP to be used by the load balancer: %q", requestedIP)
ginkgo.By("updating the Service to use the standard tier with a requested IP")
svc = jig.UpdateServiceOrFail(ns, svc.Name, func(svc *v1.Service) {

View File

@ -23,7 +23,6 @@ import (
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/kubernetes/pkg/master/ports"
"k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
"github.com/onsi/ginkgo"
)
@ -39,10 +38,10 @@ var _ = SIGDescribe("Networking", func() {
ginkgo.By("Executing a successful http request from the external internet")
resp, err := http.Get("http://google.com")
if err != nil {
e2elog.Failf("Unable to connect/talk to the internet: %v", err)
framework.Failf("Unable to connect/talk to the internet: %v", err)
}
if resp.StatusCode != http.StatusOK {
e2elog.Failf("Unexpected error code, expected 200, got, %v (%v)", resp.StatusCode, resp)
framework.Failf("Unexpected error code, expected 200, got, %v (%v)", resp.StatusCode, resp)
}
})
@ -82,7 +81,7 @@ var _ = SIGDescribe("Networking", func() {
AbsPath(test.path).
DoRaw()
if err != nil {
e2elog.Failf("ginkgo.Failed: %v\nBody: %s", err, string(data))
framework.Failf("ginkgo.Failed: %v\nBody: %s", err, string(data))
}
}
})
@ -209,13 +208,13 @@ var _ = SIGDescribe("Networking", func() {
// Check if number of endpoints returned are exactly one.
eps, err := config.GetEndpointsFromTestContainer("http", config.SessionAffinityService.Spec.ClusterIP, framework.ClusterHTTPPort, framework.SessionAffinityChecks)
if err != nil {
e2elog.Failf("ginkgo.Failed to get endpoints from test container, error: %v", err)
framework.Failf("ginkgo.Failed to get endpoints from test container, error: %v", err)
}
if len(eps) == 0 {
e2elog.Failf("Unexpected no endpoints return")
framework.Failf("Unexpected no endpoints return")
}
if len(eps) > 1 {
e2elog.Failf("Unexpected endpoints return: %v, expect 1 endpoints", eps)
framework.Failf("Unexpected endpoints return: %v, expect 1 endpoints", eps)
}
})
@ -226,13 +225,13 @@ var _ = SIGDescribe("Networking", func() {
// Check if number of endpoints returned are exactly one.
eps, err := config.GetEndpointsFromTestContainer("udp", config.SessionAffinityService.Spec.ClusterIP, framework.ClusterUDPPort, framework.SessionAffinityChecks)
if err != nil {
e2elog.Failf("ginkgo.Failed to get endpoints from test container, error: %v", err)
framework.Failf("ginkgo.Failed to get endpoints from test container, error: %v", err)
}
if len(eps) == 0 {
e2elog.Failf("Unexpected no endpoints return")
framework.Failf("Unexpected no endpoints return")
}
if len(eps) > 1 {
e2elog.Failf("Unexpected endpoints return: %v, expect 1 endpoints", eps)
framework.Failf("Unexpected endpoints return: %v, expect 1 endpoints", eps)
}
})
})

View File

@ -25,7 +25,6 @@ import (
"github.com/onsi/ginkgo"
v1 "k8s.io/api/core/v1"
"k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
imageutils "k8s.io/kubernetes/test/utils/image"
)
@ -86,7 +85,7 @@ func networkingIPerfTest(isIPv6 bool) {
)
if err != nil {
e2elog.Failf("Fatal error waiting for iperf server endpoint : %v", err)
framework.Failf("Fatal error waiting for iperf server endpoint : %v", err)
}
iperfClientPodLabels := f.CreatePodsPerNodeForSimpleApp(
@ -110,8 +109,8 @@ func networkingIPerfTest(isIPv6 bool) {
numClient,
)
e2elog.Logf("Reading all perf results to stdout.")
e2elog.Logf("date,cli,cliPort,server,serverPort,id,interval,transferBits,bandwidthBits")
framework.Logf("Reading all perf results to stdout.")
framework.Logf("date,cli,cliPort,server,serverPort,id,interval,transferBits,bandwidthBits")
// Calculate expected number of clients based on total nodes.
expectedCli := func() int {
@ -133,19 +132,19 @@ func networkingIPerfTest(isIPv6 bool) {
pods, err2 := iperfClusterVerification.WaitFor(expectedCli, iperfTimeout)
if err2 != nil {
e2elog.Failf("Error in wait...")
framework.Failf("Error in wait...")
} else if len(pods) < expectedCli {
e2elog.Failf("IPerf restuls : Only got %v out of %v, after waiting %v", len(pods), expectedCli, iperfTimeout)
framework.Failf("IPerf restuls : Only got %v out of %v, after waiting %v", len(pods), expectedCli, iperfTimeout)
} else {
// For each builds up a collection of IPerfRecords
iperfClusterVerification.ForEach(
func(p v1.Pod) {
resultS, err := framework.LookForStringInLog(f.Namespace.Name, p.Name, "iperf-client", "0-", 1*time.Second)
if err == nil {
e2elog.Logf(resultS)
framework.Logf(resultS)
iperfResults.Add(NewIPerf(resultS))
} else {
e2elog.Failf("Unexpected error, %v when running forEach on the pods.", err)
framework.Failf("Unexpected error, %v when running forEach on the pods.", err)
}
})
}
@ -154,7 +153,7 @@ func networkingIPerfTest(isIPv6 bool) {
fmt.Println("[end] Node,Bandwidth CSV")
for ipClient, bandwidth := range iperfResults.BandwidthMap {
e2elog.Logf("%v had bandwidth %v. Ratio to expected (%v) was %f", ipClient, bandwidth, expectedBandwidth, float64(bandwidth)/float64(expectedBandwidth))
framework.Logf("%v had bandwidth %v. Ratio to expected (%v) was %f", ipClient, bandwidth, expectedBandwidth, float64(bandwidth)/float64(expectedBandwidth))
}
})
}

View File

@ -33,7 +33,6 @@ import (
"k8s.io/apimachinery/pkg/util/net"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
testutils "k8s.io/kubernetes/test/utils"
imageutils "k8s.io/kubernetes/test/utils/image"
@ -211,7 +210,7 @@ var _ = SIGDescribe("Proxy", func() {
errs = append(errs, s)
}
d := time.Since(start)
e2elog.Logf("setup took %v, starting test cases", d)
framework.Logf("setup took %v, starting test cases", d)
numberTestCases := len(expectations)
totalAttempts := numberTestCases * proxyAttempts
ginkgo.By(fmt.Sprintf("running %v cases, %v attempts per case, %v total attempts", numberTestCases, proxyAttempts, totalAttempts))
@ -250,12 +249,12 @@ var _ = SIGDescribe("Proxy", func() {
if len(errs) != 0 {
body, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).GetLogs(pods[0].Name, &v1.PodLogOptions{}).Do().Raw()
if err != nil {
e2elog.Logf("Error getting logs for pod %s: %v", pods[0].Name, err)
framework.Logf("Error getting logs for pod %s: %v", pods[0].Name, err)
} else {
e2elog.Logf("Pod %s has the following error logs: %s", pods[0].Name, body)
framework.Logf("Pod %s has the following error logs: %s", pods[0].Name, body)
}
e2elog.Failf(strings.Join(errs, "\n"))
framework.Failf(strings.Join(errs, "\n"))
}
})
})
@ -272,9 +271,9 @@ func doProxy(f *framework.Framework, path string, i int) (body []byte, statusCod
body, err = f.ClientSet.CoreV1().RESTClient().Get().AbsPath(path).Do().StatusCode(&statusCode).Raw()
d = time.Since(start)
if len(body) > 0 {
e2elog.Logf("(%v) %v: %s (%v; %v)", i, path, truncate(body, maxDisplayBodyLen), statusCode, d)
framework.Logf("(%v) %v: %s (%v; %v)", i, path, truncate(body, maxDisplayBodyLen), statusCode, d)
} else {
e2elog.Logf("%v: %s (%v; %v)", path, "no body", statusCode, d)
framework.Logf("%v: %s (%v; %v)", path, "no body", statusCode, d)
}
return
}
@ -306,7 +305,7 @@ func nodeProxyTest(f *framework.Framework, prefix, nodeDest string) {
for i := 0; i < proxyAttempts; i++ {
_, status, d, err := doProxy(f, prefix+node+nodeDest, i)
if status == http.StatusServiceUnavailable {
e2elog.Logf("ginkgo.Failed proxying node logs due to service unavailable: %v", err)
framework.Logf("ginkgo.Failed proxying node logs due to service unavailable: %v", err)
time.Sleep(time.Second)
serviceUnavailableErrors++
} else {
@ -316,7 +315,7 @@ func nodeProxyTest(f *framework.Framework, prefix, nodeDest string) {
}
}
if serviceUnavailableErrors > 0 {
e2elog.Logf("error: %d requests to proxy node logs failed", serviceUnavailableErrors)
framework.Logf("error: %d requests to proxy node logs failed", serviceUnavailableErrors)
}
maxFailures := int(math.Floor(0.1 * float64(proxyAttempts)))
gomega.Expect(serviceUnavailableErrors).To(gomega.BeNumerically("<", maxFailures))
@ -329,12 +328,12 @@ func waitForEndpoint(c clientset.Interface, ns, name string) error {
for t := time.Now(); time.Since(t) < registerTimeout; time.Sleep(framework.Poll) {
endpoint, err := c.CoreV1().Endpoints(ns).Get(name, metav1.GetOptions{})
if errors.IsNotFound(err) {
e2elog.Logf("Endpoint %s/%s is not ready yet", ns, name)
framework.Logf("Endpoint %s/%s is not ready yet", ns, name)
continue
}
framework.ExpectNoError(err, "Failed to get endpoints for %s/%s", ns, name)
if len(endpoint.Subsets) == 0 || len(endpoint.Subsets[0].Addresses) == 0 {
e2elog.Logf("Endpoint %s/%s is not ready yet", ns, name)
framework.Logf("Endpoint %s/%s is not ready yet", ns, name)
continue
}
return nil

View File

@ -40,7 +40,6 @@ import (
"k8s.io/kubernetes/test/e2e/framework"
e2edeploy "k8s.io/kubernetes/test/e2e/framework/deployment"
e2eendpoints "k8s.io/kubernetes/test/e2e/framework/endpoints"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
"k8s.io/kubernetes/test/e2e/framework/providers/gce"
@ -98,7 +97,7 @@ var _ = SIGDescribe("Services", func() {
e2eservice.DescribeSvc(f.Namespace.Name)
}
for _, lb := range serviceLBNames {
e2elog.Logf("cleaning load balancer resource for %s", lb)
framework.Logf("cleaning load balancer resource for %s", lb)
e2eservice.CleanupServiceResources(cs, lb, framework.TestContext.CloudConfig.Region, framework.TestContext.CloudConfig.Zone)
}
//reset serviceLBNames
@ -276,7 +275,7 @@ var _ = SIGDescribe("Services", func() {
framework.Skipf("The test doesn't work with kube-proxy in userspace mode")
}
} else {
e2elog.Logf("Couldn't detect KubeProxy mode - test failure may be expected: %v", err)
framework.Logf("Couldn't detect KubeProxy mode - test failure may be expected: %v", err)
}
serviceName := "sourceip-test"
@ -288,12 +287,12 @@ var _ = SIGDescribe("Services", func() {
tcpService := jig.CreateTCPServiceWithPort(ns, nil, int32(servicePort))
jig.SanityCheckService(tcpService, v1.ServiceTypeClusterIP)
defer func() {
e2elog.Logf("Cleaning up the sourceip test service")
framework.Logf("Cleaning up the sourceip test service")
err := cs.CoreV1().Services(ns).Delete(serviceName, nil)
framework.ExpectNoError(err, "failed to delete service: %s in namespace: %s", serviceName, ns)
}()
serviceIP := tcpService.Spec.ClusterIP
e2elog.Logf("sourceip-test cluster ip: %s", serviceIP)
framework.Logf("sourceip-test cluster ip: %s", serviceIP)
ginkgo.By("Picking 2 Nodes to test whether source IP is preserved or not")
nodes := jig.GetNodes(2)
@ -310,7 +309,7 @@ var _ = SIGDescribe("Services", func() {
framework.ExpectNoError(err)
framework.ExpectNoError(f.WaitForPodRunning(pod.Name))
defer func() {
e2elog.Logf("Cleaning up the echo server pod")
framework.Logf("Cleaning up the echo server pod")
err := cs.CoreV1().Pods(ns).Delete(serverPodName, nil)
framework.ExpectNoError(err, "failed to delete pod: %s on node", serverPodName)
}()
@ -323,7 +322,7 @@ var _ = SIGDescribe("Services", func() {
deployment := createPausePodDeployment(cs, "pause-pod", ns, nodeCounts)
defer func() {
e2elog.Logf("Deleting deployment")
framework.Logf("Deleting deployment")
err = cs.AppsV1().Deployments(ns).Delete(deployment.Name, &metav1.DeleteOptions{})
framework.ExpectNoError(err, "Failed to delete deployment %s", deployment.Name)
}()
@ -399,7 +398,7 @@ var _ = SIGDescribe("Services", func() {
hosts, err := e2essh.NodeSSHHosts(cs)
framework.ExpectNoError(err, "failed to find external/internal IPs for every node")
if len(hosts) == 0 {
e2elog.Failf("No ssh-able nodes")
framework.Failf("No ssh-able nodes")
}
host := hosts[0]
@ -424,7 +423,7 @@ var _ = SIGDescribe("Services", func() {
framework.ExpectNoError(err, "failed to create replication controller with service: %s in the namespace: %s", svc3, ns)
if svc2IP == svc3IP {
e2elog.Failf("service IPs conflict: %v", svc2IP)
framework.Failf("service IPs conflict: %v", svc2IP)
}
ginkgo.By("verifying service " + svc2 + " is still up")
@ -458,13 +457,13 @@ var _ = SIGDescribe("Services", func() {
framework.ExpectNoError(err, "failed to create replication controller with service: %s in the namespace: %s", svc2, ns)
if svc1IP == svc2IP {
e2elog.Failf("VIPs conflict: %v", svc1IP)
framework.Failf("VIPs conflict: %v", svc1IP)
}
hosts, err := e2essh.NodeSSHHosts(cs)
framework.ExpectNoError(err, "failed to find external/internal IPs for every node")
if len(hosts) == 0 {
e2elog.Failf("No ssh-able nodes")
framework.Failf("No ssh-able nodes")
}
host := hosts[0]
@ -473,7 +472,7 @@ var _ = SIGDescribe("Services", func() {
ginkgo.By(fmt.Sprintf("Restarting kube-proxy on %v", host))
if err := framework.RestartKubeProxy(host); err != nil {
e2elog.Failf("error restarting kube-proxy: %v", err)
framework.Failf("error restarting kube-proxy: %v", err)
}
framework.ExpectNoError(e2eservice.VerifyServeHostnameServiceUp(cs, ns, host, podNames1, svc1IP, servicePort))
framework.ExpectNoError(e2eservice.VerifyServeHostnameServiceUp(cs, ns, host, podNames2, svc2IP, servicePort))
@ -485,7 +484,7 @@ var _ = SIGDescribe("Services", func() {
sudo iptables -t nat -F KUBE-PORTALS-CONTAINER || true`, host, framework.TestContext.Provider)
if err != nil || result.Code != 0 {
e2essh.LogResult(result)
e2elog.Failf("couldn't remove iptable rules: %v", err)
framework.Failf("couldn't remove iptable rules: %v", err)
}
framework.ExpectNoError(e2eservice.VerifyServeHostnameServiceUp(cs, ns, host, podNames1, svc1IP, servicePort))
framework.ExpectNoError(e2eservice.VerifyServeHostnameServiceUp(cs, ns, host, podNames2, svc2IP, servicePort))
@ -511,7 +510,7 @@ var _ = SIGDescribe("Services", func() {
hosts, err := e2essh.NodeSSHHosts(cs)
framework.ExpectNoError(err, "failed to find external/internal IPs for every node")
if len(hosts) == 0 {
e2elog.Failf("No ssh-able nodes")
framework.Failf("No ssh-able nodes")
}
host := hosts[0]
@ -520,11 +519,11 @@ var _ = SIGDescribe("Services", func() {
// Restart apiserver
ginkgo.By("Restarting apiserver")
if err := framework.RestartApiserver(cs); err != nil {
e2elog.Failf("error restarting apiserver: %v", err)
framework.Failf("error restarting apiserver: %v", err)
}
ginkgo.By("Waiting for apiserver to come up by polling /healthz")
if err := framework.WaitForApiserverUp(cs); err != nil {
e2elog.Failf("error while waiting for apiserver up: %v", err)
framework.Failf("error while waiting for apiserver up: %v", err)
}
framework.ExpectNoError(e2eservice.VerifyServeHostnameServiceUp(cs, ns, host, podNames1, svc1IP, servicePort))
@ -536,7 +535,7 @@ var _ = SIGDescribe("Services", func() {
framework.ExpectNoError(err, "failed to create replication controller with service: %s in the namespace: %s", svc2, ns)
if svc1IP == svc2IP {
e2elog.Failf("VIPs conflict: %v", svc1IP)
framework.Failf("VIPs conflict: %v", svc1IP)
}
framework.ExpectNoError(e2eservice.VerifyServeHostnameServiceUp(cs, ns, host, podNames1, svc1IP, servicePort))
framework.ExpectNoError(e2eservice.VerifyServeHostnameServiceUp(cs, ns, host, podNames2, svc2IP, servicePort))
@ -588,18 +587,18 @@ var _ = SIGDescribe("Services", func() {
serviceName := "mutability-test"
ns1 := f.Namespace.Name // LB1 in ns1 on TCP
e2elog.Logf("namespace for TCP test: %s", ns1)
framework.Logf("namespace for TCP test: %s", ns1)
ginkgo.By("creating a second namespace")
namespacePtr, err := f.CreateNamespace("services", nil)
framework.ExpectNoError(err, "failed to create namespace")
ns2 := namespacePtr.Name // LB2 in ns2 on UDP
e2elog.Logf("namespace for UDP test: %s", ns2)
framework.Logf("namespace for UDP test: %s", ns2)
jig := e2eservice.NewTestJig(cs, serviceName)
nodeIP, err := e2enode.PickIP(jig.Client) // for later
if err != nil {
e2elog.Logf("Unexpected error occurred: %v", err)
framework.Logf("Unexpected error occurred: %v", err)
}
// TODO: write a wrapper for ExpectNoErrorWithOffset()
framework.ExpectNoErrorWithOffset(0, err)
@ -617,10 +616,10 @@ var _ = SIGDescribe("Services", func() {
ginkgo.By("verifying that TCP and UDP use the same port")
if tcpService.Spec.Ports[0].Port != udpService.Spec.Ports[0].Port {
e2elog.Failf("expected to use the same port for TCP and UDP")
framework.Failf("expected to use the same port for TCP and UDP")
}
svcPort := int(tcpService.Spec.Ports[0].Port)
e2elog.Logf("service port (TCP and UDP): %d", svcPort)
framework.Logf("service port (TCP and UDP): %d", svcPort)
ginkgo.By("creating a pod to be part of the TCP service " + serviceName)
jig.RunOrFail(ns1, nil)
@ -636,7 +635,7 @@ var _ = SIGDescribe("Services", func() {
})
jig.SanityCheckService(tcpService, v1.ServiceTypeNodePort)
tcpNodePort := int(tcpService.Spec.Ports[0].NodePort)
e2elog.Logf("TCP node port: %d", tcpNodePort)
framework.Logf("TCP node port: %d", tcpNodePort)
ginkgo.By("changing the UDP service to type=NodePort")
udpService = jig.UpdateServiceOrFail(ns2, udpService.Name, func(s *v1.Service) {
@ -644,7 +643,7 @@ var _ = SIGDescribe("Services", func() {
})
jig.SanityCheckService(udpService, v1.ServiceTypeNodePort)
udpNodePort := int(udpService.Spec.Ports[0].NodePort)
e2elog.Logf("UDP node port: %d", udpNodePort)
framework.Logf("UDP node port: %d", udpNodePort)
ginkgo.By("hitting the TCP service's NodePort")
e2eservice.TestReachableHTTP(nodeIP, tcpNodePort, e2eservice.KubeProxyLagTimeout)
@ -669,7 +668,7 @@ var _ = SIGDescribe("Services", func() {
if staticIPName != "" {
// Release GCE static IP - this is not kube-managed and will not be automatically released.
if err := gceCloud.DeleteRegionAddress(staticIPName, gceCloud.Region()); err != nil {
e2elog.Logf("failed to release static IP %s: %v", staticIPName, err)
framework.Logf("failed to release static IP %s: %v", staticIPName, err)
}
}
}()
@ -678,7 +677,7 @@ var _ = SIGDescribe("Services", func() {
framework.ExpectNoError(err, "failed to get region address: %s", staticIPName)
requestedIP = reservedAddr.Address
e2elog.Logf("Allocated static load balancer IP: %s", requestedIP)
framework.Logf("Allocated static load balancer IP: %s", requestedIP)
}
ginkgo.By("changing the TCP service to type=LoadBalancer")
@ -703,13 +702,13 @@ var _ = SIGDescribe("Services", func() {
tcpService = jig.WaitForLoadBalancerOrFail(ns1, tcpService.Name, loadBalancerCreateTimeout)
jig.SanityCheckService(tcpService, v1.ServiceTypeLoadBalancer)
if int(tcpService.Spec.Ports[0].NodePort) != tcpNodePort {
e2elog.Failf("TCP Spec.Ports[0].NodePort changed (%d -> %d) when not expected", tcpNodePort, tcpService.Spec.Ports[0].NodePort)
framework.Failf("TCP Spec.Ports[0].NodePort changed (%d -> %d) when not expected", tcpNodePort, tcpService.Spec.Ports[0].NodePort)
}
if requestedIP != "" && e2eservice.GetIngressPoint(&tcpService.Status.LoadBalancer.Ingress[0]) != requestedIP {
e2elog.Failf("unexpected TCP Status.LoadBalancer.Ingress (expected %s, got %s)", requestedIP, e2eservice.GetIngressPoint(&tcpService.Status.LoadBalancer.Ingress[0]))
framework.Failf("unexpected TCP Status.LoadBalancer.Ingress (expected %s, got %s)", requestedIP, e2eservice.GetIngressPoint(&tcpService.Status.LoadBalancer.Ingress[0]))
}
tcpIngressIP := e2eservice.GetIngressPoint(&tcpService.Status.LoadBalancer.Ingress[0])
e2elog.Logf("TCP load balancer: %s", tcpIngressIP)
framework.Logf("TCP load balancer: %s", tcpIngressIP)
if framework.ProviderIs("gce", "gke") {
// Do this as early as possible, which overrides the `defer` above.
@ -723,7 +722,7 @@ var _ = SIGDescribe("Services", func() {
// Deleting it after it is attached "demotes" it to an
// ephemeral IP, which can be auto-released.
if err := gceCloud.DeleteRegionAddress(staticIPName, gceCloud.Region()); err != nil {
e2elog.Failf("failed to release static IP %s: %v", staticIPName, err)
framework.Failf("failed to release static IP %s: %v", staticIPName, err)
}
staticIPName = ""
}
@ -736,14 +735,14 @@ var _ = SIGDescribe("Services", func() {
udpService = jig.WaitForLoadBalancerOrFail(ns2, udpService.Name, loadBalancerCreateTimeout)
jig.SanityCheckService(udpService, v1.ServiceTypeLoadBalancer)
if int(udpService.Spec.Ports[0].NodePort) != udpNodePort {
e2elog.Failf("UDP Spec.Ports[0].NodePort changed (%d -> %d) when not expected", udpNodePort, udpService.Spec.Ports[0].NodePort)
framework.Failf("UDP Spec.Ports[0].NodePort changed (%d -> %d) when not expected", udpNodePort, udpService.Spec.Ports[0].NodePort)
}
udpIngressIP = e2eservice.GetIngressPoint(&udpService.Status.LoadBalancer.Ingress[0])
e2elog.Logf("UDP load balancer: %s", udpIngressIP)
framework.Logf("UDP load balancer: %s", udpIngressIP)
ginkgo.By("verifying that TCP and UDP use different load balancers")
if tcpIngressIP == udpIngressIP {
e2elog.Failf("Load balancers are not different: %s", e2eservice.GetIngressPoint(&tcpService.Status.LoadBalancer.Ingress[0]))
framework.Failf("Load balancers are not different: %s", e2eservice.GetIngressPoint(&tcpService.Status.LoadBalancer.Ingress[0]))
}
}
@ -769,12 +768,12 @@ var _ = SIGDescribe("Services", func() {
tcpNodePortOld := tcpNodePort
tcpNodePort = int(tcpService.Spec.Ports[0].NodePort)
if tcpNodePort == tcpNodePortOld {
e2elog.Failf("TCP Spec.Ports[0].NodePort (%d) did not change", tcpNodePort)
framework.Failf("TCP Spec.Ports[0].NodePort (%d) did not change", tcpNodePort)
}
if e2eservice.GetIngressPoint(&tcpService.Status.LoadBalancer.Ingress[0]) != tcpIngressIP {
e2elog.Failf("TCP Status.LoadBalancer.Ingress changed (%s -> %s) when not expected", tcpIngressIP, e2eservice.GetIngressPoint(&tcpService.Status.LoadBalancer.Ingress[0]))
framework.Failf("TCP Status.LoadBalancer.Ingress changed (%s -> %s) when not expected", tcpIngressIP, e2eservice.GetIngressPoint(&tcpService.Status.LoadBalancer.Ingress[0]))
}
e2elog.Logf("TCP node port: %d", tcpNodePort)
framework.Logf("TCP node port: %d", tcpNodePort)
ginkgo.By("changing the UDP service's NodePort")
udpService = jig.ChangeServiceNodePortOrFail(ns2, udpService.Name, udpNodePort)
@ -786,12 +785,12 @@ var _ = SIGDescribe("Services", func() {
udpNodePortOld := udpNodePort
udpNodePort = int(udpService.Spec.Ports[0].NodePort)
if udpNodePort == udpNodePortOld {
e2elog.Failf("UDP Spec.Ports[0].NodePort (%d) did not change", udpNodePort)
framework.Failf("UDP Spec.Ports[0].NodePort (%d) did not change", udpNodePort)
}
if loadBalancerSupportsUDP && e2eservice.GetIngressPoint(&udpService.Status.LoadBalancer.Ingress[0]) != udpIngressIP {
e2elog.Failf("UDP Status.LoadBalancer.Ingress changed (%s -> %s) when not expected", udpIngressIP, e2eservice.GetIngressPoint(&udpService.Status.LoadBalancer.Ingress[0]))
framework.Failf("UDP Status.LoadBalancer.Ingress changed (%s -> %s) when not expected", udpIngressIP, e2eservice.GetIngressPoint(&udpService.Status.LoadBalancer.Ingress[0]))
}
e2elog.Logf("UDP node port: %d", udpNodePort)
framework.Logf("UDP node port: %d", udpNodePort)
ginkgo.By("hitting the TCP service's new NodePort")
e2eservice.TestReachableHTTP(nodeIP, tcpNodePort, e2eservice.KubeProxyLagTimeout)
@ -823,13 +822,13 @@ var _ = SIGDescribe("Services", func() {
svcPortOld := svcPort
svcPort = int(tcpService.Spec.Ports[0].Port)
if svcPort == svcPortOld {
e2elog.Failf("TCP Spec.Ports[0].Port (%d) did not change", svcPort)
framework.Failf("TCP Spec.Ports[0].Port (%d) did not change", svcPort)
}
if int(tcpService.Spec.Ports[0].NodePort) != tcpNodePort {
e2elog.Failf("TCP Spec.Ports[0].NodePort (%d) changed", tcpService.Spec.Ports[0].NodePort)
framework.Failf("TCP Spec.Ports[0].NodePort (%d) changed", tcpService.Spec.Ports[0].NodePort)
}
if e2eservice.GetIngressPoint(&tcpService.Status.LoadBalancer.Ingress[0]) != tcpIngressIP {
e2elog.Failf("TCP Status.LoadBalancer.Ingress changed (%s -> %s) when not expected", tcpIngressIP, e2eservice.GetIngressPoint(&tcpService.Status.LoadBalancer.Ingress[0]))
framework.Failf("TCP Status.LoadBalancer.Ingress changed (%s -> %s) when not expected", tcpIngressIP, e2eservice.GetIngressPoint(&tcpService.Status.LoadBalancer.Ingress[0]))
}
ginkgo.By("changing the UDP service's port")
@ -842,16 +841,16 @@ var _ = SIGDescribe("Services", func() {
jig.SanityCheckService(udpService, v1.ServiceTypeNodePort)
}
if int(udpService.Spec.Ports[0].Port) != svcPort {
e2elog.Failf("UDP Spec.Ports[0].Port (%d) did not change", udpService.Spec.Ports[0].Port)
framework.Failf("UDP Spec.Ports[0].Port (%d) did not change", udpService.Spec.Ports[0].Port)
}
if int(udpService.Spec.Ports[0].NodePort) != udpNodePort {
e2elog.Failf("UDP Spec.Ports[0].NodePort (%d) changed", udpService.Spec.Ports[0].NodePort)
framework.Failf("UDP Spec.Ports[0].NodePort (%d) changed", udpService.Spec.Ports[0].NodePort)
}
if loadBalancerSupportsUDP && e2eservice.GetIngressPoint(&udpService.Status.LoadBalancer.Ingress[0]) != udpIngressIP {
e2elog.Failf("UDP Status.LoadBalancer.Ingress changed (%s -> %s) when not expected", udpIngressIP, e2eservice.GetIngressPoint(&udpService.Status.LoadBalancer.Ingress[0]))
framework.Failf("UDP Status.LoadBalancer.Ingress changed (%s -> %s) when not expected", udpIngressIP, e2eservice.GetIngressPoint(&udpService.Status.LoadBalancer.Ingress[0]))
}
e2elog.Logf("service port (TCP and UDP): %d", svcPort)
framework.Logf("service port (TCP and UDP): %d", svcPort)
ginkgo.By("hitting the TCP service's NodePort")
e2eservice.TestReachableHTTP(nodeIP, tcpNodePort, e2eservice.KubeProxyLagTimeout)
@ -954,13 +953,13 @@ var _ = SIGDescribe("Services", func() {
ginkgo.By("creating a TCP service " + serviceName + " with type=ClusterIP in namespace " + ns)
tcpService := jig.CreateTCPServiceOrFail(ns, nil)
defer func() {
e2elog.Logf("Cleaning up the updating NodePorts test service")
framework.Logf("Cleaning up the updating NodePorts test service")
err := cs.CoreV1().Services(ns).Delete(serviceName, nil)
framework.ExpectNoError(err, "failed to delete service: %s in namespace: %s", serviceName, ns)
}()
jig.SanityCheckService(tcpService, v1.ServiceTypeClusterIP)
svcPort := int(tcpService.Spec.Ports[0].Port)
e2elog.Logf("service port TCP: %d", svcPort)
framework.Logf("service port TCP: %d", svcPort)
// Change the services to NodePort and add a UDP port.
ginkgo.By("changing the TCP service to type=NodePort and add a UDP port")
@ -981,14 +980,14 @@ var _ = SIGDescribe("Services", func() {
})
jig.SanityCheckService(newService, v1.ServiceTypeNodePort)
if len(newService.Spec.Ports) != 2 {
e2elog.Failf("new service should have two Ports")
framework.Failf("new service should have two Ports")
}
for _, port := range newService.Spec.Ports {
if port.NodePort == 0 {
e2elog.Failf("new service failed to allocate NodePort for Port %s", port.Name)
framework.Failf("new service failed to allocate NodePort for Port %s", port.Name)
}
e2elog.Logf("new service allocates NodePort %d for Port %s", port.NodePort, port.Name)
framework.Logf("new service allocates NodePort %d for Port %s", port.NodePort, port.Name)
}
})
@ -1007,7 +1006,7 @@ var _ = SIGDescribe("Services", func() {
ginkgo.By("creating a service " + serviceName + " with the type=ExternalName in namespace " + ns)
externalNameService := jig.CreateExternalNameServiceOrFail(ns, nil)
defer func() {
e2elog.Logf("Cleaning up the ExternalName to ClusterIP test service")
framework.Logf("Cleaning up the ExternalName to ClusterIP test service")
err := cs.CoreV1().Services(ns).Delete(serviceName, nil)
framework.ExpectNoError(err, "failed to delete service %s in namespace %s", serviceName, ns)
}()
@ -1043,7 +1042,7 @@ var _ = SIGDescribe("Services", func() {
ginkgo.By("creating a service " + serviceName + " with the type=ExternalName in namespace " + ns)
externalNameService := jig.CreateExternalNameServiceOrFail(ns, nil)
defer func() {
e2elog.Logf("Cleaning up the ExternalName to NodePort test service")
framework.Logf("Cleaning up the ExternalName to NodePort test service")
err := cs.CoreV1().Services(ns).Delete(serviceName, nil)
framework.ExpectNoError(err, "failed to delete service %s in namespace %s", serviceName, ns)
}()
@ -1078,7 +1077,7 @@ var _ = SIGDescribe("Services", func() {
ginkgo.By("creating a service " + serviceName + " with the type=ClusterIP in namespace " + ns)
clusterIPService := jig.CreateTCPServiceOrFail(ns, nil)
defer func() {
e2elog.Logf("Cleaning up the ClusterIP to ExternalName test service")
framework.Logf("Cleaning up the ClusterIP to ExternalName test service")
err := cs.CoreV1().Services(ns).Delete(serviceName, nil)
framework.ExpectNoError(err, "failed to delete service %s in namespace %s", serviceName, ns)
}()
@ -1118,7 +1117,7 @@ var _ = SIGDescribe("Services", func() {
svc.Spec.Type = v1.ServiceTypeNodePort
})
defer func() {
e2elog.Logf("Cleaning up the NodePort to ExternalName test service")
framework.Logf("Cleaning up the NodePort to ExternalName test service")
err := cs.CoreV1().Services(ns).Delete(serviceName, nil)
framework.ExpectNoError(err, "failed to delete service %s in namespace %s", serviceName, ns)
}()
@ -1158,7 +1157,7 @@ var _ = SIGDescribe("Services", func() {
defer ginkgo.GinkgoRecover()
errs := t.Cleanup()
if len(errs) != 0 {
e2elog.Failf("errors in cleanup: %v", errs)
framework.Failf("errors in cleanup: %v", errs)
}
}()
@ -1189,10 +1188,10 @@ var _ = SIGDescribe("Services", func() {
framework.ExpectNoError(err, "failed to create service: %s in namespace: %s", serviceName, ns)
if len(result.Spec.Ports) != 2 {
e2elog.Failf("got unexpected len(Spec.Ports) for new service: %v", result)
framework.Failf("got unexpected len(Spec.Ports) for new service: %v", result)
}
if result.Spec.Ports[0].NodePort != result.Spec.Ports[1].NodePort {
e2elog.Failf("should use same NodePort for new service: %v", result)
framework.Failf("should use same NodePort for new service: %v", result)
}
})
@ -1208,7 +1207,7 @@ var _ = SIGDescribe("Services", func() {
defer ginkgo.GinkgoRecover()
errs := t.Cleanup()
if len(errs) != 0 {
e2elog.Failf("errors in cleanup: %v", errs)
framework.Failf("errors in cleanup: %v", errs)
}
}()
@ -1219,14 +1218,14 @@ var _ = SIGDescribe("Services", func() {
framework.ExpectNoError(err, "failed to create service: %s in namespace: %s", serviceName1, ns)
if result.Spec.Type != v1.ServiceTypeNodePort {
e2elog.Failf("got unexpected Spec.Type for new service: %v", result)
framework.Failf("got unexpected Spec.Type for new service: %v", result)
}
if len(result.Spec.Ports) != 1 {
e2elog.Failf("got unexpected len(Spec.Ports) for new service: %v", result)
framework.Failf("got unexpected len(Spec.Ports) for new service: %v", result)
}
port := result.Spec.Ports[0]
if port.NodePort == 0 {
e2elog.Failf("got unexpected Spec.Ports[0].NodePort for new service: %v", result)
framework.Failf("got unexpected Spec.Ports[0].NodePort for new service: %v", result)
}
ginkgo.By("creating service " + serviceName2 + " with conflicting NodePort")
@ -1236,7 +1235,7 @@ var _ = SIGDescribe("Services", func() {
service2.Spec.Ports[0].NodePort = port.NodePort
result2, err := t.CreateService(service2)
if err == nil {
e2elog.Failf("Created service with conflicting NodePort: %v", result2)
framework.Failf("Created service with conflicting NodePort: %v", result2)
}
expectedErr := fmt.Sprintf("%d.*port is already allocated", port.NodePort)
gomega.Expect(fmt.Sprintf("%v", err)).To(gomega.MatchRegexp(expectedErr))
@ -1260,7 +1259,7 @@ var _ = SIGDescribe("Services", func() {
defer ginkgo.GinkgoRecover()
errs := t.Cleanup()
if len(errs) != 0 {
e2elog.Failf("errors in cleanup: %v", errs)
framework.Failf("errors in cleanup: %v", errs)
}
}()
@ -1272,17 +1271,17 @@ var _ = SIGDescribe("Services", func() {
framework.ExpectNoError(err, "failed to create service: %s in namespace: %s", serviceName, ns)
if service.Spec.Type != v1.ServiceTypeNodePort {
e2elog.Failf("got unexpected Spec.Type for new service: %v", service)
framework.Failf("got unexpected Spec.Type for new service: %v", service)
}
if len(service.Spec.Ports) != 1 {
e2elog.Failf("got unexpected len(Spec.Ports) for new service: %v", service)
framework.Failf("got unexpected len(Spec.Ports) for new service: %v", service)
}
port := service.Spec.Ports[0]
if port.NodePort == 0 {
e2elog.Failf("got unexpected Spec.Ports[0].nodePort for new service: %v", service)
framework.Failf("got unexpected Spec.Ports[0].nodePort for new service: %v", service)
}
if !e2eservice.NodePortRange.Contains(int(port.NodePort)) {
e2elog.Failf("got unexpected (out-of-range) port for new service: %v", service)
framework.Failf("got unexpected (out-of-range) port for new service: %v", service)
}
outOfRangeNodePort := 0
@ -1297,7 +1296,7 @@ var _ = SIGDescribe("Services", func() {
s.Spec.Ports[0].NodePort = int32(outOfRangeNodePort)
})
if err == nil {
e2elog.Failf("failed to prevent update of service with out-of-range NodePort: %v", result)
framework.Failf("failed to prevent update of service with out-of-range NodePort: %v", result)
}
expectedErr := fmt.Sprintf("%d.*port is not in the valid range", outOfRangeNodePort)
gomega.Expect(fmt.Sprintf("%v", err)).To(gomega.MatchRegexp(expectedErr))
@ -1312,7 +1311,7 @@ var _ = SIGDescribe("Services", func() {
service.Spec.Ports[0].NodePort = int32(outOfRangeNodePort)
service, err = t.CreateService(service)
if err == nil {
e2elog.Failf("failed to prevent create of service with out-of-range NodePort (%d): %v", outOfRangeNodePort, service)
framework.Failf("failed to prevent create of service with out-of-range NodePort (%d): %v", outOfRangeNodePort, service)
}
gomega.Expect(fmt.Sprintf("%v", err)).To(gomega.MatchRegexp(expectedErr))
})
@ -1327,7 +1326,7 @@ var _ = SIGDescribe("Services", func() {
defer ginkgo.GinkgoRecover()
errs := t.Cleanup()
if len(errs) != 0 {
e2elog.Failf("errors in cleanup: %v", errs)
framework.Failf("errors in cleanup: %v", errs)
}
}()
@ -1339,17 +1338,17 @@ var _ = SIGDescribe("Services", func() {
framework.ExpectNoError(err, "failed to create service: %s in namespace: %s", serviceName, ns)
if service.Spec.Type != v1.ServiceTypeNodePort {
e2elog.Failf("got unexpected Spec.Type for new service: %v", service)
framework.Failf("got unexpected Spec.Type for new service: %v", service)
}
if len(service.Spec.Ports) != 1 {
e2elog.Failf("got unexpected len(Spec.Ports) for new service: %v", service)
framework.Failf("got unexpected len(Spec.Ports) for new service: %v", service)
}
port := service.Spec.Ports[0]
if port.NodePort == 0 {
e2elog.Failf("got unexpected Spec.Ports[0].nodePort for new service: %v", service)
framework.Failf("got unexpected Spec.Ports[0].nodePort for new service: %v", service)
}
if !e2eservice.NodePortRange.Contains(int(port.NodePort)) {
e2elog.Failf("got unexpected (out-of-range) port for new service: %v", service)
framework.Failf("got unexpected (out-of-range) port for new service: %v", service)
}
nodePort := port.NodePort
@ -1364,12 +1363,12 @@ var _ = SIGDescribe("Services", func() {
var err error
stdout, err = framework.RunHostCmd(hostExec.Namespace, hostExec.Name, cmd)
if err != nil {
e2elog.Logf("expected node port (%d) to not be in use, stdout: %v", nodePort, stdout)
framework.Logf("expected node port (%d) to not be in use, stdout: %v", nodePort, stdout)
return false, nil
}
return true, nil
}); pollErr != nil {
e2elog.Failf("expected node port (%d) to not be in use in %v, stdout: %v", nodePort, e2eservice.KubeProxyLagTimeout, stdout)
framework.Failf("expected node port (%d) to not be in use in %v, stdout: %v", nodePort, e2eservice.KubeProxyLagTimeout, stdout)
}
ginkgo.By(fmt.Sprintf("creating service "+serviceName+" with same NodePort %d", nodePort))
@ -1389,7 +1388,7 @@ var _ = SIGDescribe("Services", func() {
defer ginkgo.GinkgoRecover()
errs := t.Cleanup()
if len(errs) != 0 {
e2elog.Failf("errors in cleanup: %v", errs)
framework.Failf("errors in cleanup: %v", errs)
}
}()
@ -1457,12 +1456,12 @@ var _ = SIGDescribe("Services", func() {
var err error
stdout, err = framework.RunHostCmd(f.Namespace.Name, execPodName, cmd)
if err != nil {
e2elog.Logf("expected un-ready endpoint for Service %v, stdout: %v, err %v", t.Name, stdout, err)
framework.Logf("expected un-ready endpoint for Service %v, stdout: %v, err %v", t.Name, stdout, err)
return false, nil
}
return true, nil
}); pollErr != nil {
e2elog.Failf("expected un-ready endpoint for Service %v within %v, stdout: %v", t.Name, e2eservice.KubeProxyLagTimeout, stdout)
framework.Failf("expected un-ready endpoint for Service %v within %v, stdout: %v", t.Name, e2eservice.KubeProxyLagTimeout, stdout)
}
ginkgo.By("Scaling down replication controller to zero")
@ -1480,12 +1479,12 @@ var _ = SIGDescribe("Services", func() {
var err error
stdout, err = framework.RunHostCmd(f.Namespace.Name, execPodName, cmd)
if err != nil {
e2elog.Logf("expected un-ready endpoint for Service %v, stdout: %v, err %v", t.Name, stdout, err)
framework.Logf("expected un-ready endpoint for Service %v, stdout: %v, err %v", t.Name, stdout, err)
return false, nil
}
return true, nil
}); pollErr != nil {
e2elog.Failf("expected un-ready endpoint for Service %v within %v, stdout: %v", t.Name, e2eservice.KubeProxyLagTimeout, stdout)
framework.Failf("expected un-ready endpoint for Service %v within %v, stdout: %v", t.Name, e2eservice.KubeProxyLagTimeout, stdout)
}
ginkgo.By("Update service to tolerate unready services again")
@ -1500,12 +1499,12 @@ var _ = SIGDescribe("Services", func() {
var err error
stdout, err = framework.RunHostCmd(f.Namespace.Name, execPodName, cmd)
if err != nil {
e2elog.Logf("expected un-ready endpoint for Service %v, stdout: %v, err %v", t.Name, stdout, err)
framework.Logf("expected un-ready endpoint for Service %v, stdout: %v, err %v", t.Name, stdout, err)
return false, nil
}
return true, nil
}); pollErr != nil {
e2elog.Failf("expected un-ready endpoint for Service %v within %v, stdout: %v", t.Name, e2eservice.KubeProxyLagTimeout, stdout)
framework.Failf("expected un-ready endpoint for Service %v within %v, stdout: %v", t.Name, e2eservice.KubeProxyLagTimeout, stdout)
}
ginkgo.By("Remove pods immediately")
@ -1514,13 +1513,13 @@ var _ = SIGDescribe("Services", func() {
podClient := t.Client.CoreV1().Pods(f.Namespace.Name)
pods, err := podClient.List(options)
if err != nil {
e2elog.Logf("warning: error retrieving pods: %s", err)
framework.Logf("warning: error retrieving pods: %s", err)
} else {
for _, pod := range pods.Items {
var gracePeriodSeconds int64 = 0
err := podClient.Delete(pod.Name, &metav1.DeleteOptions{GracePeriodSeconds: &gracePeriodSeconds})
if err != nil {
e2elog.Logf("warning: error force deleting pod '%s': %s", pod.Name, err)
framework.Logf("warning: error force deleting pod '%s': %s", pod.Name, err)
}
}
}
@ -1653,35 +1652,35 @@ var _ = SIGDescribe("Services", func() {
// ILBs are not accessible from the test orchestrator, so it's necessary to use
// a pod to test the service.
ginkgo.By("hitting the internal load balancer from pod")
e2elog.Logf("creating pod with host network")
framework.Logf("creating pod with host network")
hostExec := e2epod.LaunchHostExecPod(f.ClientSet, f.Namespace.Name, "ilb-host-exec")
e2elog.Logf("Waiting up to %v for service %q's internal LB to respond to requests", createTimeout, serviceName)
framework.Logf("Waiting up to %v for service %q's internal LB to respond to requests", createTimeout, serviceName)
tcpIngressIP := e2eservice.GetIngressPoint(lbIngress)
if pollErr := wait.PollImmediate(pollInterval, createTimeout, func() (bool, error) {
cmd := fmt.Sprintf(`curl -m 5 'http://%v:%v/echo?msg=hello'`, tcpIngressIP, svcPort)
stdout, err := framework.RunHostCmd(hostExec.Namespace, hostExec.Name, cmd)
if err != nil {
e2elog.Logf("error curling; stdout: %v. err: %v", stdout, err)
framework.Logf("error curling; stdout: %v. err: %v", stdout, err)
return false, nil
}
if !strings.Contains(stdout, "hello") {
e2elog.Logf("Expected output to contain 'hello', got %q; retrying...", stdout)
framework.Logf("Expected output to contain 'hello', got %q; retrying...", stdout)
return false, nil
}
e2elog.Logf("Successful curl; stdout: %v", stdout)
framework.Logf("Successful curl; stdout: %v", stdout)
return true, nil
}); pollErr != nil {
e2elog.Failf("ginkgo.Failed to hit ILB IP, err: %v", pollErr)
framework.Failf("ginkgo.Failed to hit ILB IP, err: %v", pollErr)
}
ginkgo.By("switching to external type LoadBalancer")
svc = jig.UpdateServiceOrFail(namespace, serviceName, func(svc *v1.Service) {
disableILB(svc)
})
e2elog.Logf("Waiting up to %v for service %q to have an external LoadBalancer", createTimeout, serviceName)
framework.Logf("Waiting up to %v for service %q to have an external LoadBalancer", createTimeout, serviceName)
if pollErr := wait.PollImmediate(pollInterval, createTimeout, func() (bool, error) {
svc, err := jig.Client.CoreV1().Services(namespace).Get(serviceName, metav1.GetOptions{})
if err != nil {
@ -1690,14 +1689,14 @@ var _ = SIGDescribe("Services", func() {
lbIngress = &svc.Status.LoadBalancer.Ingress[0]
return !isInternalEndpoint(lbIngress), nil
}); pollErr != nil {
e2elog.Failf("Loadbalancer IP not changed to external.")
framework.Failf("Loadbalancer IP not changed to external.")
}
// should have an external IP.
jig.SanityCheckService(svc, v1.ServiceTypeLoadBalancer)
gomega.Expect(isInternalEndpoint(lbIngress)).To(gomega.BeFalse())
ginkgo.By("hitting the external load balancer")
e2elog.Logf("Waiting up to %v for service %q's external LB to respond to requests", createTimeout, serviceName)
framework.Logf("Waiting up to %v for service %q's external LB to respond to requests", createTimeout, serviceName)
tcpIngressIP = e2eservice.GetIngressPoint(lbIngress)
e2eservice.TestReachableHTTP(tcpIngressIP, svcPort, e2eservice.LoadBalancerLagTimeoutDefault)
@ -1710,7 +1709,7 @@ var _ = SIGDescribe("Services", func() {
svc.Spec.LoadBalancerIP = internalStaticIP
enableILB(svc)
})
e2elog.Logf("Waiting up to %v for service %q to have an internal LoadBalancer", createTimeout, serviceName)
framework.Logf("Waiting up to %v for service %q to have an internal LoadBalancer", createTimeout, serviceName)
if pollErr := wait.PollImmediate(pollInterval, createTimeout, func() (bool, error) {
svc, err := jig.Client.CoreV1().Services(namespace).Get(serviceName, metav1.GetOptions{})
if err != nil {
@ -1719,7 +1718,7 @@ var _ = SIGDescribe("Services", func() {
lbIngress = &svc.Status.LoadBalancer.Ingress[0]
return isInternalEndpoint(lbIngress), nil
}); pollErr != nil {
e2elog.Failf("Loadbalancer IP not changed to internal.")
framework.Failf("Loadbalancer IP not changed to internal.")
}
// should have the given static internal IP.
jig.SanityCheckService(svc, v1.ServiceTypeLoadBalancer)
@ -1739,11 +1738,11 @@ var _ = SIGDescribe("Services", func() {
clusterID, err := gce.GetClusterID(cs)
if err != nil {
e2elog.Failf("framework.GetClusterID(cs) = _, %v; want nil", err)
framework.Failf("framework.GetClusterID(cs) = _, %v; want nil", err)
}
gceCloud, err := gce.GetGCECloud()
if err != nil {
e2elog.Failf("framework.GetGCECloud() = _, %v; want nil", err)
framework.Failf("framework.GetGCECloud() = _, %v; want nil", err)
}
namespace := f.Namespace.Name
@ -1766,22 +1765,22 @@ var _ = SIGDescribe("Services", func() {
hcName := gcecloud.MakeNodesHealthCheckName(clusterID)
hc, err := gceCloud.GetHTTPHealthCheck(hcName)
if err != nil {
e2elog.Failf("gceCloud.GetHttpHealthCheck(%q) = _, %v; want nil", hcName, err)
framework.Failf("gceCloud.GetHttpHealthCheck(%q) = _, %v; want nil", hcName, err)
}
framework.ExpectEqual(hc.CheckIntervalSec, gceHcCheckIntervalSeconds)
ginkgo.By("modify the health check interval")
hc.CheckIntervalSec = gceHcCheckIntervalSeconds - 1
if err = gceCloud.UpdateHTTPHealthCheck(hc); err != nil {
e2elog.Failf("gcecloud.UpdateHttpHealthCheck(%#v) = %v; want nil", hc, err)
framework.Failf("gcecloud.UpdateHttpHealthCheck(%#v) = %v; want nil", hc, err)
}
ginkgo.By("restart kube-controller-manager")
if err := framework.RestartControllerManager(); err != nil {
e2elog.Failf("framework.RestartControllerManager() = %v; want nil", err)
framework.Failf("framework.RestartControllerManager() = %v; want nil", err)
}
if err := framework.WaitForControllerManagerUp(); err != nil {
e2elog.Failf("framework.WaitForControllerManagerUp() = %v; want nil", err)
framework.Failf("framework.WaitForControllerManagerUp() = %v; want nil", err)
}
ginkgo.By("health check should be reconciled")
@ -1789,13 +1788,13 @@ var _ = SIGDescribe("Services", func() {
if pollErr := wait.PollImmediate(pollInterval, e2eservice.LoadBalancerCreateTimeoutDefault, func() (bool, error) {
hc, err := gceCloud.GetHTTPHealthCheck(hcName)
if err != nil {
e2elog.Logf("ginkgo.Failed to get HttpHealthCheck(%q): %v", hcName, err)
framework.Logf("ginkgo.Failed to get HttpHealthCheck(%q): %v", hcName, err)
return false, err
}
e2elog.Logf("hc.CheckIntervalSec = %v", hc.CheckIntervalSec)
framework.Logf("hc.CheckIntervalSec = %v", hc.CheckIntervalSec)
return hc.CheckIntervalSec == gceHcCheckIntervalSeconds, nil
}); pollErr != nil {
e2elog.Failf("Health check %q does not reconcile its check interval to %d.", hcName, gceHcCheckIntervalSeconds)
framework.Failf("Health check %q does not reconcile its check interval to %d.", hcName, gceHcCheckIntervalSeconds)
}
})
@ -1898,7 +1897,7 @@ var _ = SIGDescribe("Services", func() {
hosts, err := e2essh.NodeSSHHosts(cs)
framework.ExpectNoError(err, "failed to find external/internal IPs for every node")
if len(hosts) == 0 {
e2elog.Failf("No ssh-able nodes")
framework.Failf("No ssh-able nodes")
}
host := hosts[0]
@ -1960,7 +1959,7 @@ var _ = SIGDescribe("Services", func() {
hosts, err := e2essh.NodeSSHHosts(cs)
framework.ExpectNoError(err, "failed to find external/internal IPs for every node")
if len(hosts) == 0 {
e2elog.Failf("No ssh-able nodes")
framework.Failf("No ssh-able nodes")
}
host := hosts[0]
@ -2007,7 +2006,7 @@ var _ = SIGDescribe("Services", func() {
ginkgo.By("creating a service with no endpoints")
_, err := jig.CreateServiceWithServicePort(labels, namespace, ports)
if err != nil {
e2elog.Failf("ginkgo.Failed to create service: %v", err)
framework.Failf("ginkgo.Failed to create service: %v", err)
}
nodeName := nodes.Items[0].Name
@ -2019,7 +2018,7 @@ var _ = SIGDescribe("Services", func() {
})
serviceAddress := net.JoinHostPort(serviceName, strconv.Itoa(port))
e2elog.Logf("waiting up to %v to connect to %v", e2eservice.KubeProxyEndpointLagTimeout, serviceAddress)
framework.Logf("waiting up to %v to connect to %v", e2eservice.KubeProxyEndpointLagTimeout, serviceAddress)
cmd := fmt.Sprintf("/agnhost connect --timeout=3s %s", serviceAddress)
ginkgo.By(fmt.Sprintf("hitting service %v from pod %v on node %v", serviceAddress, podName, nodeName))
@ -2029,10 +2028,10 @@ var _ = SIGDescribe("Services", func() {
if err != nil {
if strings.Contains(err.Error(), expectedErr) {
e2elog.Logf("error contained '%s', as expected: %s", expectedErr, err.Error())
framework.Logf("error contained '%s', as expected: %s", expectedErr, err.Error())
return true, nil
}
e2elog.Logf("error didn't contain '%s', keep trying: %s", expectedErr, err.Error())
framework.Logf("error didn't contain '%s', keep trying: %s", expectedErr, err.Error())
return false, nil
}
return true, errors.New("expected connect call to fail")
@ -2099,7 +2098,7 @@ var _ = SIGDescribe("ESIPP [Slow] [DisabledForLargeClusters]", func() {
e2eservice.DescribeSvc(f.Namespace.Name)
}
for _, lb := range serviceLBNames {
e2elog.Logf("cleaning load balancer resource for %s", lb)
framework.Logf("cleaning load balancer resource for %s", lb)
e2eservice.CleanupServiceResources(cs, lb, framework.TestContext.CloudConfig.Region, framework.TestContext.CloudConfig.Zone)
}
//reset serviceLBNames
@ -2115,7 +2114,7 @@ var _ = SIGDescribe("ESIPP [Slow] [DisabledForLargeClusters]", func() {
serviceLBNames = append(serviceLBNames, cloudprovider.DefaultLoadBalancerName(svc))
healthCheckNodePort := int(svc.Spec.HealthCheckNodePort)
if healthCheckNodePort == 0 {
e2elog.Failf("Service HealthCheck NodePort was not allocated")
framework.Failf("Service HealthCheck NodePort was not allocated")
}
defer func() {
jig.ChangeServiceType(svc.Namespace, svc.Name, v1.ServiceTypeClusterIP, loadBalancerCreateTimeout)
@ -2136,11 +2135,11 @@ var _ = SIGDescribe("ESIPP [Slow] [DisabledForLargeClusters]", func() {
ginkgo.By("reading clientIP using the TCP service's service port via its external VIP")
content := e2eservice.GetHTTPContent(ingressIP, svcTCPPort, e2eservice.KubeProxyLagTimeout, "/clientip")
clientIP := content.String()
e2elog.Logf("ClientIP detected by target pod using VIP:SvcPort is %s", clientIP)
framework.Logf("ClientIP detected by target pod using VIP:SvcPort is %s", clientIP)
ginkgo.By("checking if Source IP is preserved")
if strings.HasPrefix(clientIP, "10.") {
e2elog.Failf("Source IP was NOT preserved")
framework.Failf("Source IP was NOT preserved")
}
})
@ -2164,9 +2163,9 @@ var _ = SIGDescribe("ESIPP [Slow] [DisabledForLargeClusters]", func() {
ginkgo.By(fmt.Sprintf("reading clientIP using the TCP service's NodePort, on node %v: %v%v%v", nodeName, nodeIP, tcpNodePort, path))
content := e2eservice.GetHTTPContent(nodeIP, tcpNodePort, e2eservice.KubeProxyLagTimeout, path)
clientIP := content.String()
e2elog.Logf("ClientIP detected by target pod using NodePort is %s", clientIP)
framework.Logf("ClientIP detected by target pod using NodePort is %s", clientIP)
if strings.HasPrefix(clientIP, "10.") {
e2elog.Failf("Source IP was NOT preserved")
framework.Failf("Source IP was NOT preserved")
}
}
})
@ -2196,7 +2195,7 @@ var _ = SIGDescribe("ESIPP [Slow] [DisabledForLargeClusters]", func() {
healthCheckNodePort := int(svc.Spec.HealthCheckNodePort)
if healthCheckNodePort == 0 {
e2elog.Failf("Service HealthCheck NodePort was not allocated")
framework.Failf("Service HealthCheck NodePort was not allocated")
}
ips := e2enode.CollectAddresses(nodes, v1.NodeExternalIP)
@ -2229,7 +2228,7 @@ var _ = SIGDescribe("ESIPP [Slow] [DisabledForLargeClusters]", func() {
expectedSuccess := nodes.Items[n].Name == endpointNodeName
port := strconv.Itoa(healthCheckNodePort)
ipPort := net.JoinHostPort(publicIP, port)
e2elog.Logf("Health checking %s, http://%s%s, expectedSuccess %v", nodes.Items[n].Name, ipPort, path, expectedSuccess)
framework.Logf("Health checking %s, http://%s%s, expectedSuccess %v", nodes.Items[n].Name, ipPort, path, expectedSuccess)
err := e2eservice.TestHTTPHealthCheckNodePort(publicIP, healthCheckNodePort, path, e2eservice.KubeProxyEndpointLagTimeout, expectedSuccess, threshold)
framework.ExpectNoError(err)
}
@ -2261,7 +2260,7 @@ var _ = SIGDescribe("ESIPP [Slow] [DisabledForLargeClusters]", func() {
framework.ExpectNoError(e2edeploy.WaitForDeploymentComplete(cs, deployment), "Failed to complete pause pod deployment")
defer func() {
e2elog.Logf("Deleting deployment")
framework.Logf("Deleting deployment")
err = cs.AppsV1().Deployments(namespace).Delete(deployment.Name, &metav1.DeleteOptions{})
framework.ExpectNoError(err, "Failed to delete deployment %s", deployment.Name)
}()
@ -2275,7 +2274,7 @@ var _ = SIGDescribe("ESIPP [Slow] [DisabledForLargeClusters]", func() {
framework.ExpectNoError(err, "Error in listing pods associated with pause pod deployments")
pausePod := pausePods.Items[0]
e2elog.Logf("Waiting up to %v curl %v", e2eservice.KubeProxyLagTimeout, path)
framework.Logf("Waiting up to %v curl %v", e2eservice.KubeProxyLagTimeout, path)
cmd := fmt.Sprintf(`curl -q -s --connect-timeout 30 %v`, path)
var srcIP string
@ -2283,13 +2282,13 @@ var _ = SIGDescribe("ESIPP [Slow] [DisabledForLargeClusters]", func() {
if pollErr := wait.PollImmediate(framework.Poll, e2eservice.LoadBalancerCreateTimeoutDefault, func() (bool, error) {
stdout, err := framework.RunHostCmd(pausePod.Namespace, pausePod.Name, cmd)
if err != nil {
e2elog.Logf("got err: %v, retry until timeout", err)
framework.Logf("got err: %v, retry until timeout", err)
return false, nil
}
srcIP = strings.TrimSpace(strings.Split(stdout, ":")[0])
return srcIP == pausePod.Status.PodIP, nil
}); pollErr != nil {
e2elog.Failf("Source IP not preserved from %v, expected '%v' got '%v'", pausePod.Name, pausePod.Status.PodIP, srcIP)
framework.Failf("Source IP not preserved from %v, expected '%v' got '%v'", pausePod.Name, pausePod.Status.PodIP, srcIP)
}
})
@ -2300,7 +2299,7 @@ var _ = SIGDescribe("ESIPP [Slow] [DisabledForLargeClusters]", func() {
nodes := jig.GetNodes(e2eservice.MaxNodesForEndpointsTests)
if len(nodes.Items) < 2 {
e2elog.Failf("Need at least 2 nodes to verify source ip from a node without endpoint")
framework.Failf("Need at least 2 nodes to verify source ip from a node without endpoint")
}
svc := jig.CreateOnlyLocalLoadBalancerService(namespace, serviceName, loadBalancerCreateTimeout, true, nil)
@ -2319,7 +2318,7 @@ var _ = SIGDescribe("ESIPP [Slow] [DisabledForLargeClusters]", func() {
svc.Spec.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyTypeCluster
})
if svc.Spec.HealthCheckNodePort > 0 {
e2elog.Failf("Service HealthCheck NodePort still present")
framework.Failf("Service HealthCheck NodePort still present")
}
endpointNodeMap := jig.GetEndpointNodes(svc)
@ -2355,7 +2354,7 @@ var _ = SIGDescribe("ESIPP [Slow] [DisabledForLargeClusters]", func() {
return false, nil
}
if pollErr := wait.PollImmediate(framework.Poll, e2eservice.TestTimeout, pollfn); pollErr != nil {
e2elog.Failf("Kube-proxy still exposing health check on node %v:%v, after ESIPP was turned off. body %s",
framework.Failf("Kube-proxy still exposing health check on node %v:%v, after ESIPP was turned off. body %s",
nodeName, healthCheckNodePort, body.String())
}
}
@ -2372,7 +2371,7 @@ var _ = SIGDescribe("ESIPP [Slow] [DisabledForLargeClusters]", func() {
return false, nil
})
if pollErr != nil {
e2elog.Failf("Source IP WAS preserved even after ESIPP turned off. Got %v, expected a ten-dot cluster ip.", clientIP)
framework.Failf("Source IP WAS preserved even after ESIPP turned off. Got %v, expected a ten-dot cluster ip.", clientIP)
}
// TODO: We need to attempt to create another service with the previously
@ -2397,7 +2396,7 @@ var _ = SIGDescribe("ESIPP [Slow] [DisabledForLargeClusters]", func() {
return false, nil
})
if pollErr != nil {
e2elog.Failf("Source IP (%v) is not the client IP even after ESIPP turned on, expected a public IP.", clientIP)
framework.Failf("Source IP (%v) is not the client IP even after ESIPP turned on, expected a public IP.", clientIP)
}
})
})
@ -2407,17 +2406,17 @@ func execSourceipTest(pausePod v1.Pod, serviceAddress string) (string, string) {
var stdout string
timeout := 2 * time.Minute
e2elog.Logf("Waiting up to %v to get response from %s", timeout, serviceAddress)
framework.Logf("Waiting up to %v to get response from %s", timeout, serviceAddress)
cmd := fmt.Sprintf(`curl -q -s --connect-timeout 30 %s/clientip`, serviceAddress)
for start := time.Now(); time.Since(start) < timeout; time.Sleep(2 * time.Second) {
stdout, err = framework.RunHostCmd(pausePod.Namespace, pausePod.Name, cmd)
if err != nil {
e2elog.Logf("got err: %v, retry until timeout", err)
framework.Logf("got err: %v, retry until timeout", err)
continue
}
// Need to check output because it might omit in case of error.
if strings.TrimSpace(stdout) == "" {
e2elog.Logf("got empty stdout, retry until timeout")
framework.Logf("got empty stdout, retry until timeout")
continue
}
break
@ -2429,7 +2428,7 @@ func execSourceipTest(pausePod v1.Pod, serviceAddress string) (string, string) {
host, _, err := net.SplitHostPort(stdout)
if err != nil {
// ginkgo.Fail the test if output format is unexpected.
e2elog.Failf("exec pod returned unexpected stdout: [%v]\n", stdout)
framework.Failf("exec pod returned unexpected stdout: [%v]\n", stdout)
}
return pausePod.Status.PodIP, host
}
@ -2474,7 +2473,7 @@ func execAffinityTestForNonLBServiceWithOptionalTransition(f *framework.Framewor
execPod := e2epod.CreateExecPodOrFail(cs, ns, "execpod-affinity", nil)
defer func() {
e2elog.Logf("Cleaning up the exec pod")
framework.Logf("Cleaning up the exec pod")
err := cs.CoreV1().Pods(ns).Delete(execPod.Name, nil)
framework.ExpectNoError(err, "failed to delete pod: %s in namespace: %s", execPod.Name, ns)
}()
@ -2520,10 +2519,10 @@ func execAffinityTestForLBServiceWithOptionalTransition(f *framework.Framework,
jig.SanityCheckService(svc, v1.ServiceTypeLoadBalancer)
defer func() {
podNodePairs, err := e2enode.PodNodePairs(cs, ns)
e2elog.Logf("[pod,node] pairs: %+v; err: %v", podNodePairs, err)
framework.Logf("[pod,node] pairs: %+v; err: %v", podNodePairs, err)
e2eservice.StopServeHostnameService(cs, ns, serviceName)
lb := cloudprovider.DefaultLoadBalancerName(svc)
e2elog.Logf("cleaning load balancer resource for %s", lb)
framework.Logf("cleaning load balancer resource for %s", lb)
e2eservice.CleanupServiceResources(cs, lb, framework.TestContext.CloudConfig.Region, framework.TestContext.CloudConfig.Zone)
}()
ingressIP := e2eservice.GetIngressPoint(&svc.Status.LoadBalancer.Ingress[0])

View File

@ -31,7 +31,6 @@ import (
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/util/flowcontrol"
"k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
testutils "k8s.io/kubernetes/test/utils"
imageutils "k8s.io/kubernetes/test/utils/image"
@ -95,7 +94,7 @@ var _ = SIGDescribe("Service endpoints latency", func() {
}
if n < 2 {
failing.Insert("Less than two runs succeeded; aborting.")
e2elog.Failf(strings.Join(failing.List(), "\n"))
framework.Failf(strings.Join(failing.List(), "\n"))
}
percentile := func(p int) time.Duration {
est := n * p / 100
@ -104,14 +103,14 @@ var _ = SIGDescribe("Service endpoints latency", func() {
}
return dSorted[est]
}
e2elog.Logf("Latencies: %v", dSorted)
framework.Logf("Latencies: %v", dSorted)
p50 := percentile(50)
p90 := percentile(90)
p99 := percentile(99)
e2elog.Logf("50 %%ile: %v", p50)
e2elog.Logf("90 %%ile: %v", p90)
e2elog.Logf("99 %%ile: %v", p99)
e2elog.Logf("Total sample count: %v", len(dSorted))
framework.Logf("50 %%ile: %v", p50)
framework.Logf("90 %%ile: %v", p90)
framework.Logf("99 %%ile: %v", p99)
framework.Logf("Total sample count: %v", len(dSorted))
if p50 > limitMedian {
failing.Insert("Median latency should be less than " + limitMedian.String())
@ -122,7 +121,7 @@ var _ = SIGDescribe("Service endpoints latency", func() {
if failing.Len() > 0 {
errList := strings.Join(failing.List(), "\n")
helpfulInfo := fmt.Sprintf("\n50, 90, 99 percentiles: %v %v %v", p50, p90, p99)
e2elog.Failf(errList + helpfulInfo)
framework.Failf(errList + helpfulInfo)
}
})
})
@ -176,14 +175,14 @@ func runServiceLatencies(f *framework.Framework, inParallel, total int, acceptab
for i := 0; i < total; i++ {
select {
case e := <-errs:
e2elog.Logf("Got error: %v", e)
framework.Logf("Got error: %v", e)
errCount++
case d := <-durations:
output = append(output, d)
}
}
if errCount != 0 {
e2elog.Logf("Got %d errors out of %d tries", errCount, total)
framework.Logf("Got %d errors out of %d tries", errCount, total)
errRatio := float32(errCount) / float32(total)
if errRatio > acceptableFailureRatio {
return output, fmt.Errorf("error ratio %g is higher than the acceptable ratio %g", errRatio, acceptableFailureRatio)
@ -346,13 +345,13 @@ func singleServiceLatency(f *framework.Framework, name string, q *endpointQuerie
if err != nil {
return 0, err
}
e2elog.Logf("Created: %v", gotSvc.Name)
framework.Logf("Created: %v", gotSvc.Name)
if e := q.request(gotSvc.Name); e == nil {
return 0, fmt.Errorf("Never got a result for endpoint %v", gotSvc.Name)
}
stopTime := time.Now()
d := stopTime.Sub(startTime)
e2elog.Logf("Got endpoints: %v [%v]", gotSvc.Name, d)
framework.Logf("Got endpoints: %v [%v]", gotSvc.Name, d)
return d, nil
}

View File

@ -24,7 +24,7 @@ import (
"strconv"
"strings"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
"k8s.io/kubernetes/test/e2e/framework"
)
// IPerfResults is a struct that stores some IPerfResult
@ -57,7 +57,7 @@ func (i *IPerfResults) Add(ipr *IPerfResult) {
// ToTSV exports an easily readable tab delimited format of all IPerfResults.
func (i *IPerfResults) ToTSV() string {
if len(i.BandwidthMap) < 1 {
e2elog.Logf("Warning: no data in bandwidth map")
framework.Logf("Warning: no data in bandwidth map")
}
var buffer bytes.Buffer
@ -73,7 +73,7 @@ func NewIPerf(csvLine string) *IPerfResult {
csvLine = strings.Trim(csvLine, "\n")
slice := StrSlice(strings.Split(csvLine, ","))
if len(slice) != 9 {
e2elog.Failf("Incorrect fields in the output: %v (%v out of 9)", slice, len(slice))
framework.Failf("Incorrect fields in the output: %v (%v out of 9)", slice, len(slice))
}
i := IPerfResult{}
i.date = slice.get(0)
@ -102,7 +102,7 @@ func (s StrSlice) get(i int) string {
func intOrFail(debugName string, rawValue string) int64 {
value, err := strconv.ParseInt(rawValue, 10, 64)
if err != nil {
e2elog.Failf("Failed parsing value %v from the string '%v' as an integer", debugName, rawValue)
framework.Failf("Failed parsing value %v from the string '%v' as an integer", debugName, rawValue)
}
return value
}