mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-22 19:31:44 +00:00
Move auth and network tests to use framework/log
This is part of the transition to using framework/log instead of the Logf inside the framework package. This will help with import size/cycles when importing the framework or subpackages.
This commit is contained in:
parent
a6e5cb266e
commit
0c62f751b6
@ -55,6 +55,7 @@ go_library(
|
||||
"//test/e2e/framework:go_default_library",
|
||||
"//test/e2e/framework/auth:go_default_library",
|
||||
"//test/e2e/framework/job:go_default_library",
|
||||
"//test/e2e/framework/log:go_default_library",
|
||||
"//test/utils:go_default_library",
|
||||
"//test/utils/image:go_default_library",
|
||||
"//vendor/github.com/evanphx/json-patch:go_default_library",
|
||||
|
@ -36,6 +36,7 @@ import (
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
"k8s.io/kubernetes/test/e2e/framework/auth"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
"k8s.io/kubernetes/test/utils"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
|
||||
@ -738,9 +739,9 @@ func expectEvents(f *framework.Framework, expectedEvents []utils.AuditEvent) {
|
||||
defer stream.Close()
|
||||
missingReport, err := utils.CheckAuditLines(stream, expectedEvents, auditv1.SchemeGroupVersion)
|
||||
if err != nil {
|
||||
framework.Logf("Failed to observe audit events: %v", err)
|
||||
e2elog.Logf("Failed to observe audit events: %v", err)
|
||||
} else if len(missingReport.MissingEvents) > 0 {
|
||||
framework.Logf(missingReport.String())
|
||||
e2elog.Logf(missingReport.String())
|
||||
}
|
||||
return len(missingReport.MissingEvents) == 0, nil
|
||||
})
|
||||
|
@ -36,6 +36,7 @@ import (
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
"k8s.io/kubernetes/test/e2e/framework/auth"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
"k8s.io/kubernetes/test/utils"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
)
|
||||
@ -113,14 +114,14 @@ var _ = SIGDescribe("[Feature:DynamicAudit]", func() {
|
||||
err = wait.Poll(100*time.Millisecond, 10*time.Second, func() (done bool, err error) {
|
||||
p, err := f.ClientSet.CoreV1().Pods(namespace).Get("audit-proxy", metav1.GetOptions{})
|
||||
if errors.IsNotFound(err) {
|
||||
framework.Logf("waiting for audit-proxy pod to be present")
|
||||
e2elog.Logf("waiting for audit-proxy pod to be present")
|
||||
return false, nil
|
||||
} else if err != nil {
|
||||
return false, err
|
||||
}
|
||||
podIP = p.Status.PodIP
|
||||
if podIP == "" {
|
||||
framework.Logf("waiting for audit-proxy pod IP to be ready")
|
||||
e2elog.Logf("waiting for audit-proxy pod IP to be ready")
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
@ -153,17 +154,17 @@ var _ = SIGDescribe("[Feature:DynamicAudit]", func() {
|
||||
|
||||
_, err = f.ClientSet.AuditregistrationV1alpha1().AuditSinks().Create(&sink)
|
||||
framework.ExpectNoError(err, "failed to create audit sink")
|
||||
framework.Logf("created audit sink")
|
||||
e2elog.Logf("created audit sink")
|
||||
|
||||
// check that we are receiving logs in the proxy
|
||||
err = wait.Poll(100*time.Millisecond, 10*time.Second, func() (done bool, err error) {
|
||||
logs, err := framework.GetPodLogs(f.ClientSet, namespace, "audit-proxy", "proxy")
|
||||
if err != nil {
|
||||
framework.Logf("waiting for audit-proxy pod logs to be available")
|
||||
e2elog.Logf("waiting for audit-proxy pod logs to be available")
|
||||
return false, nil
|
||||
}
|
||||
if logs == "" {
|
||||
framework.Logf("waiting for audit-proxy pod logs to be non-empty")
|
||||
e2elog.Logf("waiting for audit-proxy pod logs to be non-empty")
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
@ -369,9 +370,9 @@ var _ = SIGDescribe("[Feature:DynamicAudit]", func() {
|
||||
reader := strings.NewReader(logs)
|
||||
missingReport, err := utils.CheckAuditLines(reader, expectedEvents, auditv1.SchemeGroupVersion)
|
||||
if err != nil {
|
||||
framework.Logf("Failed to observe audit events: %v", err)
|
||||
e2elog.Logf("Failed to observe audit events: %v", err)
|
||||
} else if len(missingReport.MissingEvents) > 0 {
|
||||
framework.Logf(missingReport.String())
|
||||
e2elog.Logf(missingReport.String())
|
||||
}
|
||||
return len(missingReport.MissingEvents) == 0, nil
|
||||
})
|
||||
|
@ -28,6 +28,7 @@ import (
|
||||
v1beta1client "k8s.io/client-go/kubernetes/typed/certificates/v1beta1"
|
||||
"k8s.io/client-go/util/cert"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
"k8s.io/kubernetes/test/utils"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
@ -66,13 +67,13 @@ var _ = SIGDescribe("Certificates API", func() {
|
||||
}
|
||||
csrs := f.ClientSet.CertificatesV1beta1().CertificateSigningRequests()
|
||||
|
||||
framework.Logf("creating CSR")
|
||||
e2elog.Logf("creating CSR")
|
||||
csr, err = csrs.Create(csr)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
csrName := csr.Name
|
||||
|
||||
framework.Logf("approving CSR")
|
||||
e2elog.Logf("approving CSR")
|
||||
framework.ExpectNoError(wait.Poll(5*time.Second, time.Minute, func() (bool, error) {
|
||||
csr.Status.Conditions = []v1beta1.CertificateSigningRequestCondition{
|
||||
{
|
||||
@ -84,27 +85,27 @@ var _ = SIGDescribe("Certificates API", func() {
|
||||
csr, err = csrs.UpdateApproval(csr)
|
||||
if err != nil {
|
||||
csr, _ = csrs.Get(csrName, metav1.GetOptions{})
|
||||
framework.Logf("err updating approval: %v", err)
|
||||
e2elog.Logf("err updating approval: %v", err)
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
}))
|
||||
|
||||
framework.Logf("waiting for CSR to be signed")
|
||||
e2elog.Logf("waiting for CSR to be signed")
|
||||
framework.ExpectNoError(wait.Poll(5*time.Second, time.Minute, func() (bool, error) {
|
||||
csr, err = csrs.Get(csrName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
framework.Logf("error getting csr: %v", err)
|
||||
e2elog.Logf("error getting csr: %v", err)
|
||||
return false, nil
|
||||
}
|
||||
if len(csr.Status.Certificate) == 0 {
|
||||
framework.Logf("csr not signed yet")
|
||||
e2elog.Logf("csr not signed yet")
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
}))
|
||||
|
||||
framework.Logf("testing the client")
|
||||
e2elog.Logf("testing the client")
|
||||
rcfg, err := framework.LoadConfig()
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
|
@ -20,13 +20,14 @@ import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
@ -156,7 +157,7 @@ var _ = SIGDescribe("[Feature:NodeAuthorizer]", func() {
|
||||
err = wait.Poll(itv, dur, func() (bool, error) {
|
||||
_, err = c.CoreV1().Secrets(ns).Get(secret.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
framework.Logf("Failed to get secret %v, err: %v", secret.Name, err)
|
||||
e2elog.Logf("Failed to get secret %v, err: %v", secret.Name, err)
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
|
@ -22,7 +22,7 @@ import (
|
||||
"time"
|
||||
|
||||
authenticationv1 "k8s.io/api/authentication/v1"
|
||||
"k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
@ -30,6 +30,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/kubernetes/plugin/pkg/admission/serviceaccount"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
@ -48,19 +49,19 @@ var _ = SIGDescribe("ServiceAccounts", func() {
|
||||
By("waiting for a single token reference")
|
||||
sa, err := f.ClientSet.CoreV1().ServiceAccounts(f.Namespace.Name).Get("default", metav1.GetOptions{})
|
||||
if apierrors.IsNotFound(err) {
|
||||
framework.Logf("default service account was not found")
|
||||
e2elog.Logf("default service account was not found")
|
||||
return false, nil
|
||||
}
|
||||
if err != nil {
|
||||
framework.Logf("error getting default service account: %v", err)
|
||||
e2elog.Logf("error getting default service account: %v", err)
|
||||
return false, err
|
||||
}
|
||||
switch len(sa.Secrets) {
|
||||
case 0:
|
||||
framework.Logf("default service account has no secret references")
|
||||
e2elog.Logf("default service account has no secret references")
|
||||
return false, nil
|
||||
case 1:
|
||||
framework.Logf("default service account has a single secret reference")
|
||||
e2elog.Logf("default service account has a single secret reference")
|
||||
secrets = sa.Secrets
|
||||
return true, nil
|
||||
default:
|
||||
@ -86,19 +87,19 @@ var _ = SIGDescribe("ServiceAccounts", func() {
|
||||
By("waiting for a new token reference")
|
||||
sa, err := f.ClientSet.CoreV1().ServiceAccounts(f.Namespace.Name).Get("default", metav1.GetOptions{})
|
||||
if err != nil {
|
||||
framework.Logf("error getting default service account: %v", err)
|
||||
e2elog.Logf("error getting default service account: %v", err)
|
||||
return false, err
|
||||
}
|
||||
switch len(sa.Secrets) {
|
||||
case 0:
|
||||
framework.Logf("default service account has no secret references")
|
||||
e2elog.Logf("default service account has no secret references")
|
||||
return false, nil
|
||||
case 1:
|
||||
if sa.Secrets[0] == secrets[0] {
|
||||
framework.Logf("default service account still has the deleted secret reference")
|
||||
e2elog.Logf("default service account still has the deleted secret reference")
|
||||
return false, nil
|
||||
}
|
||||
framework.Logf("default service account has a new single secret reference")
|
||||
e2elog.Logf("default service account has a new single secret reference")
|
||||
secrets = sa.Secrets
|
||||
return true, nil
|
||||
default:
|
||||
@ -130,15 +131,15 @@ var _ = SIGDescribe("ServiceAccounts", func() {
|
||||
By("waiting for a new token to be created and added")
|
||||
sa, err := f.ClientSet.CoreV1().ServiceAccounts(f.Namespace.Name).Get("default", metav1.GetOptions{})
|
||||
if err != nil {
|
||||
framework.Logf("error getting default service account: %v", err)
|
||||
e2elog.Logf("error getting default service account: %v", err)
|
||||
return false, err
|
||||
}
|
||||
switch len(sa.Secrets) {
|
||||
case 0:
|
||||
framework.Logf("default service account has no secret references")
|
||||
e2elog.Logf("default service account has no secret references")
|
||||
return false, nil
|
||||
case 1:
|
||||
framework.Logf("default service account has a new single secret reference")
|
||||
e2elog.Logf("default service account has a new single secret reference")
|
||||
secrets = sa.Secrets
|
||||
return true, nil
|
||||
default:
|
||||
@ -176,21 +177,21 @@ var _ = SIGDescribe("ServiceAccounts", func() {
|
||||
By("getting the auto-created API token")
|
||||
sa, err := f.ClientSet.CoreV1().ServiceAccounts(f.Namespace.Name).Get("mount-test", metav1.GetOptions{})
|
||||
if apierrors.IsNotFound(err) {
|
||||
framework.Logf("mount-test service account was not found")
|
||||
e2elog.Logf("mount-test service account was not found")
|
||||
return false, nil
|
||||
}
|
||||
if err != nil {
|
||||
framework.Logf("error getting mount-test service account: %v", err)
|
||||
e2elog.Logf("error getting mount-test service account: %v", err)
|
||||
return false, err
|
||||
}
|
||||
if len(sa.Secrets) == 0 {
|
||||
framework.Logf("mount-test service account has no secret references")
|
||||
e2elog.Logf("mount-test service account has no secret references")
|
||||
return false, nil
|
||||
}
|
||||
for _, secretRef := range sa.Secrets {
|
||||
secret, err := f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Get(secretRef.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
framework.Logf("Error getting secret %s: %v", secretRef.Name, err)
|
||||
e2elog.Logf("Error getting secret %s: %v", secretRef.Name, err)
|
||||
continue
|
||||
}
|
||||
if secret.Type == v1.SecretTypeServiceAccountToken {
|
||||
@ -199,7 +200,7 @@ var _ = SIGDescribe("ServiceAccounts", func() {
|
||||
}
|
||||
}
|
||||
|
||||
framework.Logf("default service account has no secret references to valid service account tokens")
|
||||
e2elog.Logf("default service account has no secret references to valid service account tokens")
|
||||
return false, nil
|
||||
}))
|
||||
|
||||
@ -287,21 +288,21 @@ var _ = SIGDescribe("ServiceAccounts", func() {
|
||||
By("getting the auto-created API token")
|
||||
sa, err := f.ClientSet.CoreV1().ServiceAccounts(f.Namespace.Name).Get(mountSA.Name, metav1.GetOptions{})
|
||||
if apierrors.IsNotFound(err) {
|
||||
framework.Logf("mount service account was not found")
|
||||
e2elog.Logf("mount service account was not found")
|
||||
return false, nil
|
||||
}
|
||||
if err != nil {
|
||||
framework.Logf("error getting mount service account: %v", err)
|
||||
e2elog.Logf("error getting mount service account: %v", err)
|
||||
return false, err
|
||||
}
|
||||
if len(sa.Secrets) == 0 {
|
||||
framework.Logf("mount service account has no secret references")
|
||||
e2elog.Logf("mount service account has no secret references")
|
||||
return false, nil
|
||||
}
|
||||
for _, secretRef := range sa.Secrets {
|
||||
secret, err := f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Get(secretRef.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
framework.Logf("Error getting secret %s: %v", secretRef.Name, err)
|
||||
e2elog.Logf("Error getting secret %s: %v", secretRef.Name, err)
|
||||
continue
|
||||
}
|
||||
if secret.Type == v1.SecretTypeServiceAccountToken {
|
||||
@ -309,7 +310,7 @@ var _ = SIGDescribe("ServiceAccounts", func() {
|
||||
}
|
||||
}
|
||||
|
||||
framework.Logf("default service account has no secret references to valid service account tokens")
|
||||
e2elog.Logf("default service account has no secret references to valid service account tokens")
|
||||
return false, nil
|
||||
}))
|
||||
|
||||
@ -391,7 +392,7 @@ var _ = SIGDescribe("ServiceAccounts", func() {
|
||||
}
|
||||
createdPod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
|
||||
framework.ExpectNoError(err)
|
||||
framework.Logf("created pod %s", tc.PodName)
|
||||
e2elog.Logf("created pod %s", tc.PodName)
|
||||
|
||||
hasServiceAccountTokenVolume := false
|
||||
for _, c := range createdPod.Spec.Containers {
|
||||
@ -405,7 +406,7 @@ var _ = SIGDescribe("ServiceAccounts", func() {
|
||||
if hasServiceAccountTokenVolume != tc.ExpectTokenVolume {
|
||||
framework.Failf("%s: expected volume=%v, got %v (%#v)", tc.PodName, tc.ExpectTokenVolume, hasServiceAccountTokenVolume, createdPod)
|
||||
} else {
|
||||
framework.Logf("pod %s service account token volume mount: %v", tc.PodName, hasServiceAccountTokenVolume)
|
||||
e2elog.Logf("pod %s service account token volume mount: %v", tc.PodName, hasServiceAccountTokenVolume)
|
||||
}
|
||||
}
|
||||
})
|
||||
|
@ -61,6 +61,7 @@ go_library(
|
||||
"//test/e2e/framework/auth:go_default_library",
|
||||
"//test/e2e/framework/endpoints:go_default_library",
|
||||
"//test/e2e/framework/ingress:go_default_library",
|
||||
"//test/e2e/framework/log:go_default_library",
|
||||
"//test/e2e/framework/providers/gce:go_default_library",
|
||||
"//test/e2e/network/scale:go_default_library",
|
||||
"//test/images/net/nat:go_default_library",
|
||||
|
@ -21,10 +21,11 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
@ -339,9 +340,9 @@ var _ = SIGDescribe("DNS", func() {
|
||||
})
|
||||
testServerPod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(testServerPod)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to create pod: %s", testServerPod.Name)
|
||||
framework.Logf("Created pod %v", testServerPod)
|
||||
e2elog.Logf("Created pod %v", testServerPod)
|
||||
defer func() {
|
||||
framework.Logf("Deleting pod %s...", testServerPod.Name)
|
||||
e2elog.Logf("Deleting pod %s...", testServerPod.Name)
|
||||
if err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(testServerPod.Name, metav1.NewDeleteOptions(0)); err != nil {
|
||||
framework.Failf("Failed to delete pod %s: %v", testServerPod.Name, err)
|
||||
}
|
||||
@ -352,7 +353,7 @@ var _ = SIGDescribe("DNS", func() {
|
||||
testServerPod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(testServerPod.Name, metav1.GetOptions{})
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to get pod %v", testServerPod.Name)
|
||||
testServerIP := testServerPod.Status.PodIP
|
||||
framework.Logf("testServerIP is %s", testServerIP)
|
||||
e2elog.Logf("testServerIP is %s", testServerIP)
|
||||
|
||||
By("Creating a pod with dnsPolicy=None and customized dnsConfig...")
|
||||
testUtilsPod := generateDNSUtilsPod()
|
||||
@ -370,9 +371,9 @@ var _ = SIGDescribe("DNS", func() {
|
||||
}
|
||||
testUtilsPod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(testUtilsPod)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to create pod: %s", testUtilsPod.Name)
|
||||
framework.Logf("Created pod %v", testUtilsPod)
|
||||
e2elog.Logf("Created pod %v", testUtilsPod)
|
||||
defer func() {
|
||||
framework.Logf("Deleting pod %s...", testUtilsPod.Name)
|
||||
e2elog.Logf("Deleting pod %s...", testUtilsPod.Name)
|
||||
if err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(testUtilsPod.Name, metav1.NewDeleteOptions(0)); err != nil {
|
||||
framework.Failf("Failed to delete pod %s: %v", testUtilsPod.Name, err)
|
||||
}
|
||||
@ -411,12 +412,12 @@ var _ = SIGDescribe("DNS", func() {
|
||||
CaptureStderr: true,
|
||||
})
|
||||
if err != nil {
|
||||
framework.Logf("Failed to execute dig command, stdout:%v, stderr: %v, err: %v", stdout, stderr, err)
|
||||
e2elog.Logf("Failed to execute dig command, stdout:%v, stderr: %v, err: %v", stdout, stderr, err)
|
||||
return false, nil
|
||||
}
|
||||
res := strings.Split(stdout, "\n")
|
||||
if len(res) != 1 || res[0] != testInjectedIP {
|
||||
framework.Logf("Expect command `%v` to return %s, got: %v", cmd, testInjectedIP, res)
|
||||
e2elog.Logf("Expect command `%v` to return %s, got: %v", cmd, testInjectedIP, res)
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
|
@ -22,7 +22,7 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
@ -32,6 +32,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
@ -71,7 +72,7 @@ func (t *dnsTestCommon) init() {
|
||||
Expect(len(pods.Items)).Should(BeNumerically(">=", 1))
|
||||
|
||||
t.dnsPod = &pods.Items[0]
|
||||
framework.Logf("Using DNS pod: %v", t.dnsPod.Name)
|
||||
e2elog.Logf("Using DNS pod: %v", t.dnsPod.Name)
|
||||
|
||||
if strings.Contains(t.dnsPod.Name, "coredns") {
|
||||
t.name = "coredns"
|
||||
@ -132,7 +133,7 @@ func (t *dnsTestCommon) runDig(dnsName, target string) []string {
|
||||
CaptureStderr: true,
|
||||
})
|
||||
|
||||
framework.Logf("Running dig: %v, stdout: %q, stderr: %q, err: %v",
|
||||
e2elog.Logf("Running dig: %v, stdout: %q, stderr: %q, err: %v",
|
||||
cmd, stdout, stderr, err)
|
||||
|
||||
if stdout == "" {
|
||||
@ -224,7 +225,7 @@ func (t *dnsTestCommon) createUtilPodLabel(baseName string) {
|
||||
var err error
|
||||
t.utilPod, err = t.c.CoreV1().Pods(t.f.Namespace.Name).Create(t.utilPod)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to create pod: %v", t.utilPod)
|
||||
framework.Logf("Created pod %v", t.utilPod)
|
||||
e2elog.Logf("Created pod %v", t.utilPod)
|
||||
Expect(t.f.WaitForPodRunning(t.utilPod.Name)).NotTo(HaveOccurred(), "pod failed to start running: %v", t.utilPod)
|
||||
|
||||
t.utilService = &v1.Service{
|
||||
@ -249,13 +250,13 @@ func (t *dnsTestCommon) createUtilPodLabel(baseName string) {
|
||||
|
||||
t.utilService, err = t.c.CoreV1().Services(t.f.Namespace.Name).Create(t.utilService)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to create service: %s/%s", t.f.Namespace.Name, t.utilService.ObjectMeta.Name)
|
||||
framework.Logf("Created service %v", t.utilService)
|
||||
e2elog.Logf("Created service %v", t.utilService)
|
||||
}
|
||||
|
||||
func (t *dnsTestCommon) deleteUtilPod() {
|
||||
podClient := t.c.CoreV1().Pods(t.f.Namespace.Name)
|
||||
if err := podClient.Delete(t.utilPod.Name, metav1.NewDeleteOptions(0)); err != nil {
|
||||
framework.Logf("Delete of pod %v/%v failed: %v",
|
||||
e2elog.Logf("Delete of pod %v/%v failed: %v",
|
||||
t.utilPod.Namespace, t.utilPod.Name, err)
|
||||
}
|
||||
}
|
||||
@ -315,7 +316,7 @@ func (t *dnsTestCommon) createDNSPodFromObj(pod *v1.Pod) {
|
||||
var err error
|
||||
t.dnsServerPod, err = t.c.CoreV1().Pods(t.f.Namespace.Name).Create(t.dnsServerPod)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to create pod: %v", t.dnsServerPod)
|
||||
framework.Logf("Created pod %v", t.dnsServerPod)
|
||||
e2elog.Logf("Created pod %v", t.dnsServerPod)
|
||||
Expect(t.f.WaitForPodRunning(t.dnsServerPod.Name)).NotTo(HaveOccurred(), "pod failed to start running: %v", t.dnsServerPod)
|
||||
|
||||
t.dnsServerPod, err = t.c.CoreV1().Pods(t.f.Namespace.Name).Get(
|
||||
@ -369,7 +370,7 @@ func (t *dnsTestCommon) createDNSServerWithPtrRecord(isIPv6 bool) {
|
||||
func (t *dnsTestCommon) deleteDNSServerPod() {
|
||||
podClient := t.c.CoreV1().Pods(t.f.Namespace.Name)
|
||||
if err := podClient.Delete(t.dnsServerPod.Name, metav1.NewDeleteOptions(0)); err != nil {
|
||||
framework.Logf("Delete of pod %v/%v failed: %v",
|
||||
e2elog.Logf("Delete of pod %v/%v failed: %v",
|
||||
t.utilPod.Namespace, t.dnsServerPod.Name, err)
|
||||
}
|
||||
}
|
||||
@ -524,18 +525,18 @@ func assertFilesContain(fileNames []string, fileDir string, pod *v1.Pod, client
|
||||
if ctx.Err() != nil {
|
||||
framework.Failf("Unable to read %s from pod %s/%s: %v", fileName, pod.Namespace, pod.Name, err)
|
||||
} else {
|
||||
framework.Logf("Unable to read %s from pod %s/%s: %v", fileName, pod.Namespace, pod.Name, err)
|
||||
e2elog.Logf("Unable to read %s from pod %s/%s: %v", fileName, pod.Namespace, pod.Name, err)
|
||||
}
|
||||
failed = append(failed, fileName)
|
||||
} else if check && strings.TrimSpace(string(contents)) != expected {
|
||||
framework.Logf("File %s from pod %s/%s contains '%s' instead of '%s'", fileName, pod.Namespace, pod.Name, string(contents), expected)
|
||||
e2elog.Logf("File %s from pod %s/%s contains '%s' instead of '%s'", fileName, pod.Namespace, pod.Name, string(contents), expected)
|
||||
failed = append(failed, fileName)
|
||||
}
|
||||
}
|
||||
if len(failed) == 0 {
|
||||
return true, nil
|
||||
}
|
||||
framework.Logf("Lookups using %s/%s failed for: %v\n", pod.Namespace, pod.Name, failed)
|
||||
e2elog.Logf("Lookups using %s/%s failed for: %v\n", pod.Namespace, pod.Name, failed)
|
||||
return false, nil
|
||||
}))
|
||||
Expect(len(failed)).To(Equal(0))
|
||||
@ -566,7 +567,7 @@ func validateDNSResults(f *framework.Framework, pod *v1.Pod, fileNames []string)
|
||||
|
||||
// TODO: probe from the host, too.
|
||||
|
||||
framework.Logf("DNS probes using %s/%s succeeded\n", pod.Namespace, pod.Name)
|
||||
e2elog.Logf("DNS probes using %s/%s succeeded\n", pod.Namespace, pod.Name)
|
||||
}
|
||||
|
||||
func validateTargetedProbeOutput(f *framework.Framework, pod *v1.Pod, fileNames []string, value string) {
|
||||
@ -592,7 +593,7 @@ func validateTargetedProbeOutput(f *framework.Framework, pod *v1.Pod, fileNames
|
||||
By("looking for the results for each expected name from probers")
|
||||
assertFilesContain(fileNames, "results", pod, f.ClientSet, true, value)
|
||||
|
||||
framework.Logf("DNS probes using %s succeeded\n", pod.Name)
|
||||
e2elog.Logf("DNS probes using %s succeeded\n", pod.Name)
|
||||
}
|
||||
|
||||
func reverseArray(arr []string) []string {
|
||||
|
@ -22,11 +22,12 @@ import (
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/client-go/util/workqueue"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
@ -66,7 +67,7 @@ var _ = SIGDescribe("[Feature:PerformanceDNS][Serial]", func() {
|
||||
defer GinkgoRecover()
|
||||
framework.ExpectNoError(testutils.CreateServiceWithRetries(f.ClientSet, services[i].Namespace, services[i]))
|
||||
}
|
||||
framework.Logf("Creating %v test services", maxServicesPerCluster)
|
||||
e2elog.Logf("Creating %v test services", maxServicesPerCluster)
|
||||
workqueue.ParallelizeUntil(context.TODO(), parallelCreateServiceWorkers, len(services), createService)
|
||||
dnsTest := dnsTestCommon{
|
||||
f: f,
|
||||
@ -75,7 +76,7 @@ var _ = SIGDescribe("[Feature:PerformanceDNS][Serial]", func() {
|
||||
}
|
||||
dnsTest.createUtilPodLabel("e2e-dns-scale-records")
|
||||
defer dnsTest.deleteUtilPod()
|
||||
framework.Logf("Querying %v%% of service records", checkServicePercent*100)
|
||||
e2elog.Logf("Querying %v%% of service records", checkServicePercent*100)
|
||||
for i := 0; i < len(services); i++ {
|
||||
if i%(1/checkServicePercent) != 0 {
|
||||
continue
|
||||
@ -84,7 +85,7 @@ var _ = SIGDescribe("[Feature:PerformanceDNS][Serial]", func() {
|
||||
svc, err := f.ClientSet.CoreV1().Services(s.Namespace).Get(s.Name, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
qname := fmt.Sprintf("%v.%v.svc.%v", s.Name, s.Namespace, framework.TestContext.ClusterDNSDomain)
|
||||
framework.Logf("Querying %v expecting %v", qname, svc.Spec.ClusterIP)
|
||||
e2elog.Logf("Querying %v expecting %v", qname, svc.Spec.ClusterIP)
|
||||
dnsTest.checkDNSRecordFrom(
|
||||
qname,
|
||||
func(actual []string) bool {
|
||||
|
@ -24,12 +24,13 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
api "k8s.io/kubernetes/pkg/apis/core"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
@ -108,7 +109,7 @@ var _ = SIGDescribe("ClusterDns [Feature:Example]", func() {
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to list pods in namespace: %s", ns.Name)
|
||||
err = framework.PodsResponding(c, ns.Name, backendPodName, false, pods)
|
||||
Expect(err).NotTo(HaveOccurred(), "waiting for all pods to respond")
|
||||
framework.Logf("found %d backend pods responding in namespace %s", len(pods.Items), ns.Name)
|
||||
e2elog.Logf("found %d backend pods responding in namespace %s", len(pods.Items), ns.Name)
|
||||
|
||||
err = framework.ServiceResponding(c, ns.Name, backendSvcName)
|
||||
Expect(err).NotTo(HaveOccurred(), "waiting for the service to respond")
|
||||
|
@ -20,13 +20,14 @@ import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
cloudprovider "k8s.io/cloud-provider"
|
||||
gcecloud "k8s.io/kubernetes/pkg/cloudprovider/providers/gce"
|
||||
"k8s.io/kubernetes/pkg/master/ports"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
"k8s.io/kubernetes/test/e2e/framework/providers/gce"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
@ -68,7 +69,7 @@ var _ = SIGDescribe("Firewall rule", func() {
|
||||
By("Getting cluster ID")
|
||||
clusterID, err := gce.GetClusterID(cs)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
framework.Logf("Got cluster ID: %v", clusterID)
|
||||
e2elog.Logf("Got cluster ID: %v", clusterID)
|
||||
|
||||
jig := framework.NewServiceTestJig(cs, serviceName)
|
||||
nodeList := jig.GetNodes(framework.MaxNodesForEndpointsTests)
|
||||
@ -130,7 +131,7 @@ var _ = SIGDescribe("Firewall rule", func() {
|
||||
podName := fmt.Sprintf("netexec%v", i)
|
||||
jig.LaunchNetexecPodOnNode(f, nodeName, podName, firewallTestHTTPPort, firewallTestUDPPort, true)
|
||||
defer func() {
|
||||
framework.Logf("Cleaning up the netexec pod: %v", podName)
|
||||
e2elog.Logf("Cleaning up the netexec pod: %v", podName)
|
||||
Expect(cs.CoreV1().Pods(ns).Delete(podName, nil)).NotTo(HaveOccurred())
|
||||
}()
|
||||
}
|
||||
|
@ -37,6 +37,7 @@ import (
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
"k8s.io/kubernetes/test/e2e/framework/auth"
|
||||
"k8s.io/kubernetes/test/e2e/framework/ingress"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
"k8s.io/kubernetes/test/e2e/framework/providers/gce"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
@ -174,7 +175,7 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
|
||||
framework.ExpectNoError(err)
|
||||
annotations := ing.Annotations
|
||||
if annotations == nil || annotations[instanceGroupAnnotation] == "" {
|
||||
framework.Logf("Waiting for ingress to get %s annotation. Found annotations: %v", instanceGroupAnnotation, annotations)
|
||||
e2elog.Logf("Waiting for ingress to get %s annotation. Found annotations: %v", instanceGroupAnnotation, annotations)
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
@ -299,7 +300,7 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
|
||||
}
|
||||
err = wait.Poll(5*time.Second, framework.LoadBalancerPollTimeout, func() (bool, error) {
|
||||
if err := gceController.BackendServiceUsingIG(jig.GetServicePorts(false)); err != nil {
|
||||
framework.Logf("Failed to verify IG backend service: %v", err)
|
||||
e2elog.Logf("Failed to verify IG backend service: %v", err)
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
@ -317,7 +318,7 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
|
||||
}
|
||||
err = wait.Poll(5*time.Second, framework.LoadBalancerPollTimeout, func() (bool, error) {
|
||||
if err := gceController.BackendServiceUsingNEG(jig.GetServicePorts(false)); err != nil {
|
||||
framework.Logf("Failed to verify NEG backend service: %v", err)
|
||||
e2elog.Logf("Failed to verify NEG backend service: %v", err)
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
@ -354,7 +355,7 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
|
||||
if err != nil {
|
||||
return false, nil
|
||||
}
|
||||
framework.Logf("Expecting %d backends, got %d", num, res.Len())
|
||||
e2elog.Logf("Expecting %d backends, got %d", num, res.Len())
|
||||
return res.Len() == num, nil
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
@ -423,11 +424,11 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
|
||||
if res.Len() == replicas {
|
||||
return true, nil
|
||||
}
|
||||
framework.Logf("Expecting %d different responses, but got %d.", replicas, res.Len())
|
||||
e2elog.Logf("Expecting %d different responses, but got %d.", replicas, res.Len())
|
||||
return false, nil
|
||||
|
||||
} else {
|
||||
framework.Logf("Waiting for rolling update to finished. Keep sending traffic.")
|
||||
e2elog.Logf("Waiting for rolling update to finished. Keep sending traffic.")
|
||||
return false, nil
|
||||
}
|
||||
})
|
||||
@ -454,30 +455,30 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
|
||||
v, ok := svc.Annotations[ingress.NEGStatusAnnotation]
|
||||
if !ok {
|
||||
// Wait for NEG sync loop to find NEGs
|
||||
framework.Logf("Waiting for %v, got: %+v", ingress.NEGStatusAnnotation, svc.Annotations)
|
||||
e2elog.Logf("Waiting for %v, got: %+v", ingress.NEGStatusAnnotation, svc.Annotations)
|
||||
return false, nil
|
||||
}
|
||||
err = json.Unmarshal([]byte(v), &status)
|
||||
if err != nil {
|
||||
framework.Logf("Error in parsing Expose NEG annotation: %v", err)
|
||||
e2elog.Logf("Error in parsing Expose NEG annotation: %v", err)
|
||||
return false, nil
|
||||
}
|
||||
framework.Logf("Got %v: %v", ingress.NEGStatusAnnotation, v)
|
||||
e2elog.Logf("Got %v: %v", ingress.NEGStatusAnnotation, v)
|
||||
|
||||
// Expect 2 NEGs to be created based on the test setup (neg-exposed)
|
||||
if len(status.NetworkEndpointGroups) != 2 {
|
||||
framework.Logf("Expected 2 NEGs, got %d", len(status.NetworkEndpointGroups))
|
||||
e2elog.Logf("Expected 2 NEGs, got %d", len(status.NetworkEndpointGroups))
|
||||
return false, nil
|
||||
}
|
||||
|
||||
for _, port := range expectedKeys {
|
||||
if _, ok := status.NetworkEndpointGroups[port]; !ok {
|
||||
framework.Logf("Expected ServicePort key %v, but does not exist", port)
|
||||
e2elog.Logf("Expected ServicePort key %v, but does not exist", port)
|
||||
}
|
||||
}
|
||||
|
||||
if len(status.NetworkEndpointGroups) != len(expectedKeys) {
|
||||
framework.Logf("Expected length of %+v to equal length of %+v, but does not", status.NetworkEndpointGroups, expectedKeys)
|
||||
e2elog.Logf("Expected length of %+v to equal length of %+v, but does not", status.NetworkEndpointGroups, expectedKeys)
|
||||
}
|
||||
|
||||
gceCloud, err := gce.GetGCECloud()
|
||||
@ -486,7 +487,7 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
|
||||
networkEndpoints, err := gceCloud.ListNetworkEndpoints(neg, gceController.Cloud.Zone, false)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
if len(networkEndpoints) != num {
|
||||
framework.Logf("Expect number of endpoints to be %d, but got %d", num, len(networkEndpoints))
|
||||
e2elog.Logf("Expect number of endpoints to be %d, but got %d", num, len(networkEndpoints))
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
@ -716,7 +717,7 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
|
||||
if framework.ProviderIs("gce", "gke") {
|
||||
framework.ExpectNoError(gce.GcloudComputeResourceCreate("firewall-rules", fmt.Sprintf("ingress-80-443-%v", ns), framework.TestContext.CloudConfig.ProjectID, "--allow", "tcp:80,tcp:443", "--network", framework.TestContext.CloudConfig.Network))
|
||||
} else {
|
||||
framework.Logf("WARNING: Not running on GCE/GKE, cannot create firewall rules for :80, :443. Assuming traffic can reach the external ips of all nodes in cluster on those ports.")
|
||||
e2elog.Logf("WARNING: Not running on GCE/GKE, cannot create firewall rules for :80, :443. Assuming traffic can reach the external ips of all nodes in cluster on those ports.")
|
||||
}
|
||||
|
||||
nginxController.Init()
|
||||
@ -781,7 +782,7 @@ func executePresharedCertTest(f *framework.Framework, jig *ingress.TestJig, stat
|
||||
By(fmt.Sprintf("Deleting ssl certificate %q on GCE", preSharedCertName))
|
||||
err := wait.Poll(framework.LoadBalancerPollInterval, framework.LoadBalancerCleanupTimeout, func() (bool, error) {
|
||||
if err := gceCloud.DeleteSslCertificate(preSharedCertName); err != nil && !errors.IsNotFound(err) {
|
||||
framework.Logf("Failed to delete ssl certificate %q: %v. Retrying...", preSharedCertName, err)
|
||||
e2elog.Logf("Failed to delete ssl certificate %q: %v. Retrying...", preSharedCertName, err)
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
@ -848,13 +849,13 @@ func executeBacksideBacksideHTTPSTest(f *framework.Framework, jig *ingress.TestJ
|
||||
err = wait.PollImmediate(framework.LoadBalancerPollInterval, framework.LoadBalancerPollTimeout, func() (bool, error) {
|
||||
resp, err := framework.SimpleGET(timeoutClient, fmt.Sprintf("http://%s", ingIP), "")
|
||||
if err != nil {
|
||||
framework.Logf("SimpleGET failed: %v", err)
|
||||
e2elog.Logf("SimpleGET failed: %v", err)
|
||||
return false, nil
|
||||
}
|
||||
if !strings.Contains(resp, "request_scheme=https") {
|
||||
return false, fmt.Errorf("request wasn't served by HTTPS, response body: %s", resp)
|
||||
}
|
||||
framework.Logf("Poll succeeded, request was served by HTTPS")
|
||||
e2elog.Logf("Poll succeeded, request was served by HTTPS")
|
||||
return true, nil
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred(), "Failed to verify backside re-encryption ingress")
|
||||
@ -871,7 +872,7 @@ func detectNegAnnotation(f *framework.Framework, jig *ingress.TestJig, gceContro
|
||||
if negs == 0 {
|
||||
err := gceController.BackendServiceUsingIG(jig.GetServicePorts(false))
|
||||
if err != nil {
|
||||
framework.Logf("Failed to validate IG backend service: %v", err)
|
||||
e2elog.Logf("Failed to validate IG backend service: %v", err)
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
@ -880,19 +881,19 @@ func detectNegAnnotation(f *framework.Framework, jig *ingress.TestJig, gceContro
|
||||
var status ingress.NegStatus
|
||||
v, ok := svc.Annotations[ingress.NEGStatusAnnotation]
|
||||
if !ok {
|
||||
framework.Logf("Waiting for %v, got: %+v", ingress.NEGStatusAnnotation, svc.Annotations)
|
||||
e2elog.Logf("Waiting for %v, got: %+v", ingress.NEGStatusAnnotation, svc.Annotations)
|
||||
return false, nil
|
||||
}
|
||||
|
||||
err = json.Unmarshal([]byte(v), &status)
|
||||
if err != nil {
|
||||
framework.Logf("Error in parsing Expose NEG annotation: %v", err)
|
||||
e2elog.Logf("Error in parsing Expose NEG annotation: %v", err)
|
||||
return false, nil
|
||||
}
|
||||
framework.Logf("Got %v: %v", ingress.NEGStatusAnnotation, v)
|
||||
e2elog.Logf("Got %v: %v", ingress.NEGStatusAnnotation, v)
|
||||
|
||||
if len(status.NetworkEndpointGroups) != negs {
|
||||
framework.Logf("Expected %d NEGs, got %d", negs, len(status.NetworkEndpointGroups))
|
||||
e2elog.Logf("Expected %d NEGs, got %d", negs, len(status.NetworkEndpointGroups))
|
||||
return false, nil
|
||||
}
|
||||
|
||||
@ -902,14 +903,14 @@ func detectNegAnnotation(f *framework.Framework, jig *ingress.TestJig, gceContro
|
||||
networkEndpoints, err := gceCloud.ListNetworkEndpoints(neg, gceController.Cloud.Zone, false)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
if len(networkEndpoints) != 1 {
|
||||
framework.Logf("Expect NEG %s to exist, but got %d", neg, len(networkEndpoints))
|
||||
e2elog.Logf("Expect NEG %s to exist, but got %d", neg, len(networkEndpoints))
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
|
||||
err = gceController.BackendServiceUsingNEG(jig.GetServicePorts(false))
|
||||
if err != nil {
|
||||
framework.Logf("Failed to validate NEG backend service: %v", err)
|
||||
e2elog.Logf("Failed to validate NEG backend service: %v", err)
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
|
@ -28,6 +28,7 @@ import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
"k8s.io/kubernetes/test/images/net/nat"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
|
||||
@ -209,7 +210,7 @@ var _ = SIGDescribe("Network", func() {
|
||||
const epsilonSeconds = 60
|
||||
const expectedTimeoutSeconds = 60 * 60
|
||||
|
||||
framework.Logf("conntrack entry timeout was: %v, expected: %v",
|
||||
e2elog.Logf("conntrack entry timeout was: %v, expected: %v",
|
||||
timeoutSeconds, expectedTimeoutSeconds)
|
||||
|
||||
Expect(math.Abs(float64(timeoutSeconds - expectedTimeoutSeconds))).Should(
|
||||
|
@ -22,6 +22,7 @@ import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
|
||||
"fmt"
|
||||
@ -134,7 +135,7 @@ var _ = SIGDescribe("NetworkPolicy", func() {
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// Create Server with Service in NS-B
|
||||
framework.Logf("Waiting for server to come up.")
|
||||
e2elog.Logf("Waiting for server to come up.")
|
||||
err = framework.WaitForPodRunningInNamespace(f.ClientSet, podServer)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
@ -365,11 +366,11 @@ func testCanConnect(f *framework.Framework, ns *v1.Namespace, podName string, se
|
||||
}
|
||||
}()
|
||||
|
||||
framework.Logf("Waiting for %s to complete.", podClient.Name)
|
||||
e2elog.Logf("Waiting for %s to complete.", podClient.Name)
|
||||
err := framework.WaitForPodNoLongerRunningInNamespace(f.ClientSet, podClient.Name, ns.Name)
|
||||
Expect(err).NotTo(HaveOccurred(), "Pod did not finish as expected.")
|
||||
|
||||
framework.Logf("Waiting for %s to complete.", podClient.Name)
|
||||
e2elog.Logf("Waiting for %s to complete.", podClient.Name)
|
||||
err = framework.WaitForPodSuccessInNamespace(f.ClientSet, podClient.Name, ns.Name)
|
||||
if err != nil {
|
||||
// Collect pod logs when we see a failure.
|
||||
@ -381,13 +382,13 @@ func testCanConnect(f *framework.Framework, ns *v1.Namespace, podName string, se
|
||||
// Collect current NetworkPolicies applied in the test namespace.
|
||||
policies, err := f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
framework.Logf("error getting current NetworkPolicies for %s namespace: %s", f.Namespace.Name, err)
|
||||
e2elog.Logf("error getting current NetworkPolicies for %s namespace: %s", f.Namespace.Name, err)
|
||||
}
|
||||
|
||||
// Collect the list of pods running in the test namespace.
|
||||
podsInNS, err := framework.GetPodsInNamespace(f.ClientSet, f.Namespace.Name, map[string]string{})
|
||||
if err != nil {
|
||||
framework.Logf("error getting pods for %s namespace: %s", f.Namespace.Name, err)
|
||||
e2elog.Logf("error getting pods for %s namespace: %s", f.Namespace.Name, err)
|
||||
}
|
||||
|
||||
pods := []string{}
|
||||
@ -412,7 +413,7 @@ func testCannotConnect(f *framework.Framework, ns *v1.Namespace, podName string,
|
||||
}
|
||||
}()
|
||||
|
||||
framework.Logf("Waiting for %s to complete.", podClient.Name)
|
||||
e2elog.Logf("Waiting for %s to complete.", podClient.Name)
|
||||
err := framework.WaitForPodSuccessInNamespace(f.ClientSet, podClient.Name, ns.Name)
|
||||
|
||||
// We expect an error here since it's a cannot connect test.
|
||||
@ -427,13 +428,13 @@ func testCannotConnect(f *framework.Framework, ns *v1.Namespace, podName string,
|
||||
// Collect current NetworkPolicies applied in the test namespace.
|
||||
policies, err := f.ClientSet.NetworkingV1().NetworkPolicies(f.Namespace.Name).List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
framework.Logf("error getting current NetworkPolicies for %s namespace: %s", f.Namespace.Name, err)
|
||||
e2elog.Logf("error getting current NetworkPolicies for %s namespace: %s", f.Namespace.Name, err)
|
||||
}
|
||||
|
||||
// Collect the list of pods running in the test namespace.
|
||||
podsInNS, err := framework.GetPodsInNamespace(f.ClientSet, f.Namespace.Name, map[string]string{})
|
||||
if err != nil {
|
||||
framework.Logf("error getting pods for %s namespace: %s", f.Namespace.Name, err)
|
||||
e2elog.Logf("error getting pods for %s namespace: %s", f.Namespace.Name, err)
|
||||
}
|
||||
|
||||
pods := []string{}
|
||||
@ -508,7 +509,7 @@ func createServerPodAndService(f *framework.Framework, namespace *v1.Namespace,
|
||||
},
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
framework.Logf("Created pod %v", pod.ObjectMeta.Name)
|
||||
e2elog.Logf("Created pod %v", pod.ObjectMeta.Name)
|
||||
|
||||
svcName := fmt.Sprintf("svc-%s", podName)
|
||||
By(fmt.Sprintf("Creating a service %s for pod %s in namespace %s", svcName, podName, namespace.Name))
|
||||
@ -524,7 +525,7 @@ func createServerPodAndService(f *framework.Framework, namespace *v1.Namespace,
|
||||
},
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
framework.Logf("Created service %s", svc.Name)
|
||||
e2elog.Logf("Created service %s", svc.Name)
|
||||
|
||||
return pod, svc
|
||||
}
|
||||
|
@ -30,6 +30,7 @@ import (
|
||||
cloudprovider "k8s.io/cloud-provider"
|
||||
gcecloud "k8s.io/kubernetes/pkg/cloudprovider/providers/gce"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
"k8s.io/kubernetes/test/e2e/framework/providers/gce"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
@ -53,7 +54,7 @@ var _ = SIGDescribe("Services [Feature:GCEAlphaFeature][Slow]", func() {
|
||||
framework.DescribeSvc(f.Namespace.Name)
|
||||
}
|
||||
for _, lb := range serviceLBNames {
|
||||
framework.Logf("cleaning gce resource for %s", lb)
|
||||
e2elog.Logf("cleaning gce resource for %s", lb)
|
||||
framework.TestContext.CloudConfig.Provider.CleanupServiceResources(cs, lb, framework.TestContext.CloudConfig.Region, framework.TestContext.CloudConfig.Zone)
|
||||
}
|
||||
//reset serviceLBNames
|
||||
@ -111,12 +112,12 @@ var _ = SIGDescribe("Services [Feature:GCEAlphaFeature][Slow]", func() {
|
||||
if requestedAddrName != "" {
|
||||
// Release GCE static address - this is not kube-managed and will not be automatically released.
|
||||
if err := gceCloud.DeleteRegionAddress(requestedAddrName, gceCloud.Region()); err != nil {
|
||||
framework.Logf("failed to release static IP address %q: %v", requestedAddrName, err)
|
||||
e2elog.Logf("failed to release static IP address %q: %v", requestedAddrName, err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
framework.Logf("Allocated static IP to be used by the load balancer: %q", requestedIP)
|
||||
e2elog.Logf("Allocated static IP to be used by the load balancer: %q", requestedIP)
|
||||
|
||||
By("updating the Service to use the standard tier with a requested IP")
|
||||
svc = jig.UpdateServiceOrFail(ns, svc.Name, func(svc *v1.Service) {
|
||||
|
@ -24,8 +24,9 @@ import (
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
"k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
)
|
||||
|
||||
@ -110,8 +111,8 @@ func networkingIPerfTest(isIPv6 bool) {
|
||||
numClient,
|
||||
)
|
||||
|
||||
framework.Logf("Reading all perf results to stdout.")
|
||||
framework.Logf("date,cli,cliPort,server,serverPort,id,interval,transferBits,bandwidthBits")
|
||||
e2elog.Logf("Reading all perf results to stdout.")
|
||||
e2elog.Logf("date,cli,cliPort,server,serverPort,id,interval,transferBits,bandwidthBits")
|
||||
|
||||
// Calculate expected number of clients based on total nodes.
|
||||
expectedCli := func() int {
|
||||
@ -142,7 +143,7 @@ func networkingIPerfTest(isIPv6 bool) {
|
||||
func(p v1.Pod) {
|
||||
resultS, err := framework.LookForStringInLog(f.Namespace.Name, p.Name, "iperf-client", "0-", 1*time.Second)
|
||||
if err == nil {
|
||||
framework.Logf(resultS)
|
||||
e2elog.Logf(resultS)
|
||||
iperfResults.Add(NewIPerf(resultS))
|
||||
} else {
|
||||
framework.Failf("Unexpected error, %v when running forEach on the pods.", err)
|
||||
@ -154,7 +155,7 @@ func networkingIPerfTest(isIPv6 bool) {
|
||||
fmt.Println("[end] Node,Bandwidth CSV")
|
||||
|
||||
for ipClient, bandwidth := range iperfResults.BandwidthMap {
|
||||
framework.Logf("%v had bandwidth %v. Ratio to expected (%v) was %f", ipClient, bandwidth, expectedBandwidth, float64(bandwidth)/float64(expectedBandwidth))
|
||||
e2elog.Logf("%v had bandwidth %v. Ratio to expected (%v) was %f", ipClient, bandwidth, expectedBandwidth, float64(bandwidth)/float64(expectedBandwidth))
|
||||
}
|
||||
})
|
||||
}
|
||||
|
@ -34,6 +34,7 @@ import (
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
"k8s.io/kubernetes/test/e2e/framework/endpoints"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
|
||||
@ -208,7 +209,7 @@ var _ = SIGDescribe("Proxy", func() {
|
||||
errs = append(errs, s)
|
||||
}
|
||||
d := time.Since(start)
|
||||
framework.Logf("setup took %v, starting test cases", d)
|
||||
e2elog.Logf("setup took %v, starting test cases", d)
|
||||
numberTestCases := len(expectations)
|
||||
totalAttempts := numberTestCases * proxyAttempts
|
||||
By(fmt.Sprintf("running %v cases, %v attempts per case, %v total attempts", numberTestCases, proxyAttempts, totalAttempts))
|
||||
@ -247,9 +248,9 @@ var _ = SIGDescribe("Proxy", func() {
|
||||
if len(errs) != 0 {
|
||||
body, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).GetLogs(pods[0].Name, &v1.PodLogOptions{}).Do().Raw()
|
||||
if err != nil {
|
||||
framework.Logf("Error getting logs for pod %s: %v", pods[0].Name, err)
|
||||
e2elog.Logf("Error getting logs for pod %s: %v", pods[0].Name, err)
|
||||
} else {
|
||||
framework.Logf("Pod %s has the following error logs: %s", pods[0].Name, body)
|
||||
e2elog.Logf("Pod %s has the following error logs: %s", pods[0].Name, body)
|
||||
}
|
||||
|
||||
framework.Failf(strings.Join(errs, "\n"))
|
||||
@ -269,9 +270,9 @@ func doProxy(f *framework.Framework, path string, i int) (body []byte, statusCod
|
||||
body, err = f.ClientSet.CoreV1().RESTClient().Get().AbsPath(path).Do().StatusCode(&statusCode).Raw()
|
||||
d = time.Since(start)
|
||||
if len(body) > 0 {
|
||||
framework.Logf("(%v) %v: %s (%v; %v)", i, path, truncate(body, maxDisplayBodyLen), statusCode, d)
|
||||
e2elog.Logf("(%v) %v: %s (%v; %v)", i, path, truncate(body, maxDisplayBodyLen), statusCode, d)
|
||||
} else {
|
||||
framework.Logf("%v: %s (%v; %v)", path, "no body", statusCode, d)
|
||||
e2elog.Logf("%v: %s (%v; %v)", path, "no body", statusCode, d)
|
||||
}
|
||||
return
|
||||
}
|
||||
@ -303,7 +304,7 @@ func nodeProxyTest(f *framework.Framework, prefix, nodeDest string) {
|
||||
for i := 0; i < proxyAttempts; i++ {
|
||||
_, status, d, err := doProxy(f, prefix+node+nodeDest, i)
|
||||
if status == http.StatusServiceUnavailable {
|
||||
framework.Logf("Failed proxying node logs due to service unavailable: %v", err)
|
||||
e2elog.Logf("Failed proxying node logs due to service unavailable: %v", err)
|
||||
time.Sleep(time.Second)
|
||||
serviceUnavailableErrors++
|
||||
} else {
|
||||
@ -313,7 +314,7 @@ func nodeProxyTest(f *framework.Framework, prefix, nodeDest string) {
|
||||
}
|
||||
}
|
||||
if serviceUnavailableErrors > 0 {
|
||||
framework.Logf("error: %d requests to proxy node logs failed", serviceUnavailableErrors)
|
||||
e2elog.Logf("error: %d requests to proxy node logs failed", serviceUnavailableErrors)
|
||||
}
|
||||
maxFailures := int(math.Floor(0.1 * float64(proxyAttempts)))
|
||||
Expect(serviceUnavailableErrors).To(BeNumerically("<", maxFailures))
|
||||
|
@ -38,6 +38,7 @@ import (
|
||||
gcecloud "k8s.io/kubernetes/pkg/cloudprovider/providers/gce"
|
||||
"k8s.io/kubernetes/pkg/controller/endpoint"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
"k8s.io/kubernetes/test/e2e/framework/providers/gce"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
|
||||
@ -90,7 +91,7 @@ var _ = SIGDescribe("Services", func() {
|
||||
framework.DescribeSvc(f.Namespace.Name)
|
||||
}
|
||||
for _, lb := range serviceLBNames {
|
||||
framework.Logf("cleaning load balancer resource for %s", lb)
|
||||
e2elog.Logf("cleaning load balancer resource for %s", lb)
|
||||
framework.CleanupServiceResources(cs, lb, framework.TestContext.CloudConfig.Region, framework.TestContext.CloudConfig.Zone)
|
||||
}
|
||||
//reset serviceLBNames
|
||||
@ -256,7 +257,7 @@ var _ = SIGDescribe("Services", func() {
|
||||
framework.Skipf("The test doesn't work with kube-proxy in userspace mode")
|
||||
}
|
||||
} else {
|
||||
framework.Logf("Couldn't detect KubeProxy mode - test failure may be expected: %v", err)
|
||||
e2elog.Logf("Couldn't detect KubeProxy mode - test failure may be expected: %v", err)
|
||||
}
|
||||
|
||||
serviceName := "sourceip-test"
|
||||
@ -268,12 +269,12 @@ var _ = SIGDescribe("Services", func() {
|
||||
tcpService := jig.CreateTCPServiceWithPort(ns, nil, int32(servicePort))
|
||||
jig.SanityCheckService(tcpService, v1.ServiceTypeClusterIP)
|
||||
defer func() {
|
||||
framework.Logf("Cleaning up the sourceip test service")
|
||||
e2elog.Logf("Cleaning up the sourceip test service")
|
||||
err := cs.CoreV1().Services(ns).Delete(serviceName, nil)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to delete service: %s in namespace: %s", serviceName, ns)
|
||||
}()
|
||||
serviceIp := tcpService.Spec.ClusterIP
|
||||
framework.Logf("sourceip-test cluster ip: %s", serviceIp)
|
||||
e2elog.Logf("sourceip-test cluster ip: %s", serviceIp)
|
||||
|
||||
By("Picking multiple nodes")
|
||||
nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
|
||||
@ -289,7 +290,7 @@ var _ = SIGDescribe("Services", func() {
|
||||
serverPodName := "echoserver-sourceip"
|
||||
jig.LaunchEchoserverPodOnNode(f, node1.Name, serverPodName)
|
||||
defer func() {
|
||||
framework.Logf("Cleaning up the echo server pod")
|
||||
e2elog.Logf("Cleaning up the echo server pod")
|
||||
err := cs.CoreV1().Pods(ns).Delete(serverPodName, nil)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to delete pod: %s on node: %s", serverPodName, node1.Name)
|
||||
}()
|
||||
@ -521,13 +522,13 @@ var _ = SIGDescribe("Services", func() {
|
||||
|
||||
serviceName := "mutability-test"
|
||||
ns1 := f.Namespace.Name // LB1 in ns1 on TCP
|
||||
framework.Logf("namespace for TCP test: %s", ns1)
|
||||
e2elog.Logf("namespace for TCP test: %s", ns1)
|
||||
|
||||
By("creating a second namespace")
|
||||
namespacePtr, err := f.CreateNamespace("services", nil)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to create namespace")
|
||||
ns2 := namespacePtr.Name // LB2 in ns2 on UDP
|
||||
framework.Logf("namespace for UDP test: %s", ns2)
|
||||
e2elog.Logf("namespace for UDP test: %s", ns2)
|
||||
|
||||
jig := framework.NewServiceTestJig(cs, serviceName)
|
||||
nodeIP := framework.PickNodeIP(jig.Client) // for later
|
||||
@ -548,7 +549,7 @@ var _ = SIGDescribe("Services", func() {
|
||||
framework.Failf("expected to use the same port for TCP and UDP")
|
||||
}
|
||||
svcPort := int(tcpService.Spec.Ports[0].Port)
|
||||
framework.Logf("service port (TCP and UDP): %d", svcPort)
|
||||
e2elog.Logf("service port (TCP and UDP): %d", svcPort)
|
||||
|
||||
By("creating a pod to be part of the TCP service " + serviceName)
|
||||
jig.RunOrFail(ns1, nil)
|
||||
@ -564,7 +565,7 @@ var _ = SIGDescribe("Services", func() {
|
||||
})
|
||||
jig.SanityCheckService(tcpService, v1.ServiceTypeNodePort)
|
||||
tcpNodePort := int(tcpService.Spec.Ports[0].NodePort)
|
||||
framework.Logf("TCP node port: %d", tcpNodePort)
|
||||
e2elog.Logf("TCP node port: %d", tcpNodePort)
|
||||
|
||||
By("changing the UDP service to type=NodePort")
|
||||
udpService = jig.UpdateServiceOrFail(ns2, udpService.Name, func(s *v1.Service) {
|
||||
@ -572,7 +573,7 @@ var _ = SIGDescribe("Services", func() {
|
||||
})
|
||||
jig.SanityCheckService(udpService, v1.ServiceTypeNodePort)
|
||||
udpNodePort := int(udpService.Spec.Ports[0].NodePort)
|
||||
framework.Logf("UDP node port: %d", udpNodePort)
|
||||
e2elog.Logf("UDP node port: %d", udpNodePort)
|
||||
|
||||
By("hitting the TCP service's NodePort")
|
||||
jig.TestReachableHTTP(nodeIP, tcpNodePort, framework.KubeProxyLagTimeout)
|
||||
@ -597,7 +598,7 @@ var _ = SIGDescribe("Services", func() {
|
||||
if staticIPName != "" {
|
||||
// Release GCE static IP - this is not kube-managed and will not be automatically released.
|
||||
if err := gceCloud.DeleteRegionAddress(staticIPName, gceCloud.Region()); err != nil {
|
||||
framework.Logf("failed to release static IP %s: %v", staticIPName, err)
|
||||
e2elog.Logf("failed to release static IP %s: %v", staticIPName, err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
@ -606,7 +607,7 @@ var _ = SIGDescribe("Services", func() {
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to get region address: %s", staticIPName)
|
||||
|
||||
requestedIP = reservedAddr.Address
|
||||
framework.Logf("Allocated static load balancer IP: %s", requestedIP)
|
||||
e2elog.Logf("Allocated static load balancer IP: %s", requestedIP)
|
||||
}
|
||||
|
||||
By("changing the TCP service to type=LoadBalancer")
|
||||
@ -637,7 +638,7 @@ var _ = SIGDescribe("Services", func() {
|
||||
framework.Failf("unexpected TCP Status.LoadBalancer.Ingress (expected %s, got %s)", requestedIP, framework.GetIngressPoint(&tcpService.Status.LoadBalancer.Ingress[0]))
|
||||
}
|
||||
tcpIngressIP := framework.GetIngressPoint(&tcpService.Status.LoadBalancer.Ingress[0])
|
||||
framework.Logf("TCP load balancer: %s", tcpIngressIP)
|
||||
e2elog.Logf("TCP load balancer: %s", tcpIngressIP)
|
||||
|
||||
if framework.ProviderIs("gce", "gke") {
|
||||
// Do this as early as possible, which overrides the `defer` above.
|
||||
@ -667,7 +668,7 @@ var _ = SIGDescribe("Services", func() {
|
||||
framework.Failf("UDP Spec.Ports[0].NodePort changed (%d -> %d) when not expected", udpNodePort, udpService.Spec.Ports[0].NodePort)
|
||||
}
|
||||
udpIngressIP = framework.GetIngressPoint(&udpService.Status.LoadBalancer.Ingress[0])
|
||||
framework.Logf("UDP load balancer: %s", udpIngressIP)
|
||||
e2elog.Logf("UDP load balancer: %s", udpIngressIP)
|
||||
|
||||
By("verifying that TCP and UDP use different load balancers")
|
||||
if tcpIngressIP == udpIngressIP {
|
||||
@ -702,7 +703,7 @@ var _ = SIGDescribe("Services", func() {
|
||||
if framework.GetIngressPoint(&tcpService.Status.LoadBalancer.Ingress[0]) != tcpIngressIP {
|
||||
framework.Failf("TCP Status.LoadBalancer.Ingress changed (%s -> %s) when not expected", tcpIngressIP, framework.GetIngressPoint(&tcpService.Status.LoadBalancer.Ingress[0]))
|
||||
}
|
||||
framework.Logf("TCP node port: %d", tcpNodePort)
|
||||
e2elog.Logf("TCP node port: %d", tcpNodePort)
|
||||
|
||||
By("changing the UDP service's NodePort")
|
||||
udpService = jig.ChangeServiceNodePortOrFail(ns2, udpService.Name, udpNodePort)
|
||||
@ -719,7 +720,7 @@ var _ = SIGDescribe("Services", func() {
|
||||
if loadBalancerSupportsUDP && framework.GetIngressPoint(&udpService.Status.LoadBalancer.Ingress[0]) != udpIngressIP {
|
||||
framework.Failf("UDP Status.LoadBalancer.Ingress changed (%s -> %s) when not expected", udpIngressIP, framework.GetIngressPoint(&udpService.Status.LoadBalancer.Ingress[0]))
|
||||
}
|
||||
framework.Logf("UDP node port: %d", udpNodePort)
|
||||
e2elog.Logf("UDP node port: %d", udpNodePort)
|
||||
|
||||
By("hitting the TCP service's new NodePort")
|
||||
jig.TestReachableHTTP(nodeIP, tcpNodePort, framework.KubeProxyLagTimeout)
|
||||
@ -779,7 +780,7 @@ var _ = SIGDescribe("Services", func() {
|
||||
framework.Failf("UDP Status.LoadBalancer.Ingress changed (%s -> %s) when not expected", udpIngressIP, framework.GetIngressPoint(&udpService.Status.LoadBalancer.Ingress[0]))
|
||||
}
|
||||
|
||||
framework.Logf("service port (TCP and UDP): %d", svcPort)
|
||||
e2elog.Logf("service port (TCP and UDP): %d", svcPort)
|
||||
|
||||
By("hitting the TCP service's NodePort")
|
||||
jig.TestReachableHTTP(nodeIP, tcpNodePort, framework.KubeProxyLagTimeout)
|
||||
@ -876,13 +877,13 @@ var _ = SIGDescribe("Services", func() {
|
||||
By("creating a TCP service " + serviceName + " with type=ClusterIP in namespace " + ns)
|
||||
tcpService := jig.CreateTCPServiceOrFail(ns, nil)
|
||||
defer func() {
|
||||
framework.Logf("Cleaning up the updating NodePorts test service")
|
||||
e2elog.Logf("Cleaning up the updating NodePorts test service")
|
||||
err := cs.CoreV1().Services(ns).Delete(serviceName, nil)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to delete service: %s in namespace: %s", serviceName, ns)
|
||||
}()
|
||||
jig.SanityCheckService(tcpService, v1.ServiceTypeClusterIP)
|
||||
svcPort := int(tcpService.Spec.Ports[0].Port)
|
||||
framework.Logf("service port TCP: %d", svcPort)
|
||||
e2elog.Logf("service port TCP: %d", svcPort)
|
||||
|
||||
// Change the services to NodePort and add a UDP port.
|
||||
|
||||
@ -911,7 +912,7 @@ var _ = SIGDescribe("Services", func() {
|
||||
framework.Failf("new service failed to allocate NodePort for Port %s", port.Name)
|
||||
}
|
||||
|
||||
framework.Logf("new service allocates NodePort %d for Port %s", port.NodePort, port.Name)
|
||||
e2elog.Logf("new service allocates NodePort %d for Port %s", port.NodePort, port.Name)
|
||||
}
|
||||
})
|
||||
|
||||
@ -923,7 +924,7 @@ var _ = SIGDescribe("Services", func() {
|
||||
By("creating a service " + serviceName + " with the type=ExternalName in namespace " + ns)
|
||||
externalNameService := jig.CreateExternalNameServiceOrFail(ns, nil)
|
||||
defer func() {
|
||||
framework.Logf("Cleaning up the ExternalName to ClusterIP test service")
|
||||
e2elog.Logf("Cleaning up the ExternalName to ClusterIP test service")
|
||||
err := cs.CoreV1().Services(ns).Delete(serviceName, nil)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to delete service %s in namespace %s", serviceName, ns)
|
||||
}()
|
||||
@ -947,7 +948,7 @@ var _ = SIGDescribe("Services", func() {
|
||||
By("creating a service " + serviceName + " with the type=ExternalName in namespace " + ns)
|
||||
externalNameService := jig.CreateExternalNameServiceOrFail(ns, nil)
|
||||
defer func() {
|
||||
framework.Logf("Cleaning up the ExternalName to NodePort test service")
|
||||
e2elog.Logf("Cleaning up the ExternalName to NodePort test service")
|
||||
err := cs.CoreV1().Services(ns).Delete(serviceName, nil)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to delete service %s in namespace %s", serviceName, ns)
|
||||
}()
|
||||
@ -971,7 +972,7 @@ var _ = SIGDescribe("Services", func() {
|
||||
By("creating a service " + serviceName + " with the type=ClusterIP in namespace " + ns)
|
||||
clusterIPService := jig.CreateTCPServiceOrFail(ns, nil)
|
||||
defer func() {
|
||||
framework.Logf("Cleaning up the ClusterIP to ExternalName test service")
|
||||
e2elog.Logf("Cleaning up the ClusterIP to ExternalName test service")
|
||||
err := cs.CoreV1().Services(ns).Delete(serviceName, nil)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to delete service %s in namespace %s", serviceName, ns)
|
||||
}()
|
||||
@ -995,7 +996,7 @@ var _ = SIGDescribe("Services", func() {
|
||||
svc.Spec.Type = v1.ServiceTypeNodePort
|
||||
})
|
||||
defer func() {
|
||||
framework.Logf("Cleaning up the NodePort to ExternalName test service")
|
||||
e2elog.Logf("Cleaning up the NodePort to ExternalName test service")
|
||||
err := cs.CoreV1().Services(ns).Delete(serviceName, nil)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to delete service %s in namespace %s", serviceName, ns)
|
||||
}()
|
||||
@ -1226,7 +1227,7 @@ var _ = SIGDescribe("Services", func() {
|
||||
var err error
|
||||
stdout, err = framework.RunHostCmd(hostExec.Namespace, hostExec.Name, cmd)
|
||||
if err != nil {
|
||||
framework.Logf("expected node port (%d) to not be in use, stdout: %v", nodePort, stdout)
|
||||
e2elog.Logf("expected node port (%d) to not be in use, stdout: %v", nodePort, stdout)
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
@ -1318,7 +1319,7 @@ var _ = SIGDescribe("Services", func() {
|
||||
var err error
|
||||
stdout, err = framework.RunHostCmd(f.Namespace.Name, execPodName, cmd)
|
||||
if err != nil {
|
||||
framework.Logf("expected un-ready endpoint for Service %v, stdout: %v, err %v", t.Name, stdout, err)
|
||||
e2elog.Logf("expected un-ready endpoint for Service %v, stdout: %v, err %v", t.Name, stdout, err)
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
@ -1341,7 +1342,7 @@ var _ = SIGDescribe("Services", func() {
|
||||
var err error
|
||||
stdout, err = framework.RunHostCmd(f.Namespace.Name, execPodName, cmd)
|
||||
if err != nil {
|
||||
framework.Logf("expected un-ready endpoint for Service %v, stdout: %v, err %v", t.Name, stdout, err)
|
||||
e2elog.Logf("expected un-ready endpoint for Service %v, stdout: %v, err %v", t.Name, stdout, err)
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
@ -1361,7 +1362,7 @@ var _ = SIGDescribe("Services", func() {
|
||||
var err error
|
||||
stdout, err = framework.RunHostCmd(f.Namespace.Name, execPodName, cmd)
|
||||
if err != nil {
|
||||
framework.Logf("expected un-ready endpoint for Service %v, stdout: %v, err %v", t.Name, stdout, err)
|
||||
e2elog.Logf("expected un-ready endpoint for Service %v, stdout: %v, err %v", t.Name, stdout, err)
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
@ -1375,13 +1376,13 @@ var _ = SIGDescribe("Services", func() {
|
||||
podClient := t.Client.CoreV1().Pods(f.Namespace.Name)
|
||||
pods, err := podClient.List(options)
|
||||
if err != nil {
|
||||
framework.Logf("warning: error retrieving pods: %s", err)
|
||||
e2elog.Logf("warning: error retrieving pods: %s", err)
|
||||
} else {
|
||||
for _, pod := range pods.Items {
|
||||
var gracePeriodSeconds int64 = 0
|
||||
err := podClient.Delete(pod.Name, &metav1.DeleteOptions{GracePeriodSeconds: &gracePeriodSeconds})
|
||||
if err != nil {
|
||||
framework.Logf("warning: error force deleting pod '%s': %s", pod.Name, err)
|
||||
e2elog.Logf("warning: error force deleting pod '%s': %s", pod.Name, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -1504,25 +1505,25 @@ var _ = SIGDescribe("Services", func() {
|
||||
// ILBs are not accessible from the test orchestrator, so it's necessary to use
|
||||
// a pod to test the service.
|
||||
By("hitting the internal load balancer from pod")
|
||||
framework.Logf("creating pod with host network")
|
||||
e2elog.Logf("creating pod with host network")
|
||||
hostExec := framework.LaunchHostExecPod(f.ClientSet, f.Namespace.Name, "ilb-host-exec")
|
||||
|
||||
framework.Logf("Waiting up to %v for service %q's internal LB to respond to requests", createTimeout, serviceName)
|
||||
e2elog.Logf("Waiting up to %v for service %q's internal LB to respond to requests", createTimeout, serviceName)
|
||||
tcpIngressIP := framework.GetIngressPoint(lbIngress)
|
||||
if pollErr := wait.PollImmediate(pollInterval, createTimeout, func() (bool, error) {
|
||||
cmd := fmt.Sprintf(`curl -m 5 'http://%v:%v/echo?msg=hello'`, tcpIngressIP, svcPort)
|
||||
stdout, err := framework.RunHostCmd(hostExec.Namespace, hostExec.Name, cmd)
|
||||
if err != nil {
|
||||
framework.Logf("error curling; stdout: %v. err: %v", stdout, err)
|
||||
e2elog.Logf("error curling; stdout: %v. err: %v", stdout, err)
|
||||
return false, nil
|
||||
}
|
||||
|
||||
if !strings.Contains(stdout, "hello") {
|
||||
framework.Logf("Expected output to contain 'hello', got %q; retrying...", stdout)
|
||||
e2elog.Logf("Expected output to contain 'hello', got %q; retrying...", stdout)
|
||||
return false, nil
|
||||
}
|
||||
|
||||
framework.Logf("Successful curl; stdout: %v", stdout)
|
||||
e2elog.Logf("Successful curl; stdout: %v", stdout)
|
||||
return true, nil
|
||||
}); pollErr != nil {
|
||||
framework.Failf("Failed to hit ILB IP, err: %v", pollErr)
|
||||
@ -1532,7 +1533,7 @@ var _ = SIGDescribe("Services", func() {
|
||||
svc = jig.UpdateServiceOrFail(namespace, serviceName, func(svc *v1.Service) {
|
||||
disableILB(svc)
|
||||
})
|
||||
framework.Logf("Waiting up to %v for service %q to have an external LoadBalancer", createTimeout, serviceName)
|
||||
e2elog.Logf("Waiting up to %v for service %q to have an external LoadBalancer", createTimeout, serviceName)
|
||||
if pollErr := wait.PollImmediate(pollInterval, createTimeout, func() (bool, error) {
|
||||
svc, err := jig.Client.CoreV1().Services(namespace).Get(serviceName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
@ -1548,7 +1549,7 @@ var _ = SIGDescribe("Services", func() {
|
||||
Expect(isInternalEndpoint(lbIngress)).To(BeFalse())
|
||||
|
||||
By("hitting the external load balancer")
|
||||
framework.Logf("Waiting up to %v for service %q's external LB to respond to requests", createTimeout, serviceName)
|
||||
e2elog.Logf("Waiting up to %v for service %q's external LB to respond to requests", createTimeout, serviceName)
|
||||
tcpIngressIP = framework.GetIngressPoint(lbIngress)
|
||||
jig.TestReachableHTTP(tcpIngressIP, svcPort, framework.LoadBalancerLagTimeoutDefault)
|
||||
|
||||
@ -1561,7 +1562,7 @@ var _ = SIGDescribe("Services", func() {
|
||||
svc.Spec.LoadBalancerIP = internalStaticIP
|
||||
enableILB(svc)
|
||||
})
|
||||
framework.Logf("Waiting up to %v for service %q to have an internal LoadBalancer", createTimeout, serviceName)
|
||||
e2elog.Logf("Waiting up to %v for service %q to have an internal LoadBalancer", createTimeout, serviceName)
|
||||
if pollErr := wait.PollImmediate(pollInterval, createTimeout, func() (bool, error) {
|
||||
svc, err := jig.Client.CoreV1().Services(namespace).Get(serviceName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
@ -1644,10 +1645,10 @@ var _ = SIGDescribe("Services", func() {
|
||||
if pollErr := wait.PollImmediate(pollInterval, framework.LoadBalancerCreateTimeoutDefault, func() (bool, error) {
|
||||
hc, err := gceCloud.GetHTTPHealthCheck(hcName)
|
||||
if err != nil {
|
||||
framework.Logf("Failed to get HttpHealthCheck(%q): %v", hcName, err)
|
||||
e2elog.Logf("Failed to get HttpHealthCheck(%q): %v", hcName, err)
|
||||
return false, err
|
||||
}
|
||||
framework.Logf("hc.CheckIntervalSec = %v", hc.CheckIntervalSec)
|
||||
e2elog.Logf("hc.CheckIntervalSec = %v", hc.CheckIntervalSec)
|
||||
return hc.CheckIntervalSec == gceHcCheckIntervalSeconds, nil
|
||||
}); pollErr != nil {
|
||||
framework.Failf("Health check %q does not reconcile its check interval to %d.", hcName, gceHcCheckIntervalSeconds)
|
||||
@ -1814,7 +1815,7 @@ var _ = SIGDescribe("Services", func() {
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
serviceAddress := net.JoinHostPort(serviceName, strconv.Itoa(port))
|
||||
framework.Logf("waiting up to %v wget %v", framework.KubeProxyEndpointLagTimeout, serviceAddress)
|
||||
e2elog.Logf("waiting up to %v wget %v", framework.KubeProxyEndpointLagTimeout, serviceAddress)
|
||||
cmd := fmt.Sprintf(`wget -T 3 -qO- %v`, serviceAddress)
|
||||
|
||||
By(fmt.Sprintf("hitting service %v from pod %v on node %v", serviceAddress, podName, nodeName))
|
||||
@ -1824,10 +1825,10 @@ var _ = SIGDescribe("Services", func() {
|
||||
|
||||
if err != nil {
|
||||
if strings.Contains(strings.ToLower(err.Error()), expectedErr) {
|
||||
framework.Logf("error contained '%s', as expected: %s", expectedErr, err.Error())
|
||||
e2elog.Logf("error contained '%s', as expected: %s", expectedErr, err.Error())
|
||||
return true, nil
|
||||
} else {
|
||||
framework.Logf("error didn't contain '%s', keep trying: %s", expectedErr, err.Error())
|
||||
e2elog.Logf("error didn't contain '%s', keep trying: %s", expectedErr, err.Error())
|
||||
return false, nil
|
||||
}
|
||||
} else {
|
||||
@ -1863,7 +1864,7 @@ var _ = SIGDescribe("ESIPP [Slow] [DisabledForLargeClusters]", func() {
|
||||
framework.DescribeSvc(f.Namespace.Name)
|
||||
}
|
||||
for _, lb := range serviceLBNames {
|
||||
framework.Logf("cleaning load balancer resource for %s", lb)
|
||||
e2elog.Logf("cleaning load balancer resource for %s", lb)
|
||||
framework.CleanupServiceResources(cs, lb, framework.TestContext.CloudConfig.Region, framework.TestContext.CloudConfig.Zone)
|
||||
}
|
||||
//reset serviceLBNames
|
||||
@ -1898,7 +1899,7 @@ var _ = SIGDescribe("ESIPP [Slow] [DisabledForLargeClusters]", func() {
|
||||
By("reading clientIP using the TCP service's service port via its external VIP")
|
||||
content := jig.GetHTTPContent(ingressIP, svcTCPPort, framework.KubeProxyLagTimeout, "/clientip")
|
||||
clientIP := content.String()
|
||||
framework.Logf("ClientIP detected by target pod using VIP:SvcPort is %s", clientIP)
|
||||
e2elog.Logf("ClientIP detected by target pod using VIP:SvcPort is %s", clientIP)
|
||||
|
||||
By("checking if Source IP is preserved")
|
||||
if strings.HasPrefix(clientIP, "10.") {
|
||||
@ -1925,7 +1926,7 @@ var _ = SIGDescribe("ESIPP [Slow] [DisabledForLargeClusters]", func() {
|
||||
By(fmt.Sprintf("reading clientIP using the TCP service's NodePort, on node %v: %v%v%v", nodeName, nodeIP, tcpNodePort, path))
|
||||
content := jig.GetHTTPContent(nodeIP, tcpNodePort, framework.KubeProxyLagTimeout, path)
|
||||
clientIP := content.String()
|
||||
framework.Logf("ClientIP detected by target pod using NodePort is %s", clientIP)
|
||||
e2elog.Logf("ClientIP detected by target pod using NodePort is %s", clientIP)
|
||||
if strings.HasPrefix(clientIP, "10.") {
|
||||
framework.Failf("Source IP was NOT preserved")
|
||||
}
|
||||
@ -1989,7 +1990,7 @@ var _ = SIGDescribe("ESIPP [Slow] [DisabledForLargeClusters]", func() {
|
||||
expectedSuccess := nodes.Items[n].Name == endpointNodeName
|
||||
port := strconv.Itoa(healthCheckNodePort)
|
||||
ipPort := net.JoinHostPort(publicIP, port)
|
||||
framework.Logf("Health checking %s, http://%s%s, expectedSuccess %v", nodes.Items[n].Name, ipPort, path, expectedSuccess)
|
||||
e2elog.Logf("Health checking %s, http://%s%s, expectedSuccess %v", nodes.Items[n].Name, ipPort, path, expectedSuccess)
|
||||
Expect(jig.TestHTTPHealthCheckNodePort(publicIP, healthCheckNodePort, path, framework.KubeProxyEndpointLagTimeout, expectedSuccess, threshold)).NotTo(HaveOccurred())
|
||||
}
|
||||
framework.ExpectNoError(framework.DeleteRCAndWaitForGC(f.ClientSet, namespace, serviceName))
|
||||
@ -2027,7 +2028,7 @@ var _ = SIGDescribe("ESIPP [Slow] [DisabledForLargeClusters]", func() {
|
||||
execPod, err := f.ClientSet.CoreV1().Pods(namespace).Get(execPodName, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
framework.Logf("Waiting up to %v wget %v", framework.KubeProxyLagTimeout, path)
|
||||
e2elog.Logf("Waiting up to %v wget %v", framework.KubeProxyLagTimeout, path)
|
||||
cmd := fmt.Sprintf(`wget -T 30 -qO- %v`, path)
|
||||
|
||||
var srcIP string
|
||||
@ -2035,7 +2036,7 @@ var _ = SIGDescribe("ESIPP [Slow] [DisabledForLargeClusters]", func() {
|
||||
if pollErr := wait.PollImmediate(framework.Poll, framework.LoadBalancerCreateTimeoutDefault, func() (bool, error) {
|
||||
stdout, err := framework.RunHostCmd(execPod.Namespace, execPod.Name, cmd)
|
||||
if err != nil {
|
||||
framework.Logf("got err: %v, retry until timeout", err)
|
||||
e2elog.Logf("got err: %v, retry until timeout", err)
|
||||
return false, nil
|
||||
}
|
||||
srcIP = strings.TrimSpace(strings.Split(stdout, ":")[0])
|
||||
@ -2154,12 +2155,12 @@ var _ = SIGDescribe("ESIPP [Slow] [DisabledForLargeClusters]", func() {
|
||||
})
|
||||
|
||||
func execSourceipTest(f *framework.Framework, c clientset.Interface, ns, nodeName, serviceIP string, servicePort int) (string, string) {
|
||||
framework.Logf("Creating an exec pod on node %v", nodeName)
|
||||
e2elog.Logf("Creating an exec pod on node %v", nodeName)
|
||||
execPodName := framework.CreateExecPodOrFail(f.ClientSet, ns, fmt.Sprintf("execpod-sourceip-%s", nodeName), func(pod *v1.Pod) {
|
||||
pod.Spec.NodeName = nodeName
|
||||
})
|
||||
defer func() {
|
||||
framework.Logf("Cleaning up the exec pod")
|
||||
e2elog.Logf("Cleaning up the exec pod")
|
||||
err := c.CoreV1().Pods(ns).Delete(execPodName, nil)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to delete pod: %s", execPodName)
|
||||
}()
|
||||
@ -2169,17 +2170,17 @@ func execSourceipTest(f *framework.Framework, c clientset.Interface, ns, nodeNam
|
||||
var stdout string
|
||||
serviceIPPort := net.JoinHostPort(serviceIP, strconv.Itoa(servicePort))
|
||||
timeout := 2 * time.Minute
|
||||
framework.Logf("Waiting up to %v wget %s", timeout, serviceIPPort)
|
||||
e2elog.Logf("Waiting up to %v wget %s", timeout, serviceIPPort)
|
||||
cmd := fmt.Sprintf(`wget -T 30 -qO- %s | grep client_address`, serviceIPPort)
|
||||
for start := time.Now(); time.Since(start) < timeout; time.Sleep(2 * time.Second) {
|
||||
stdout, err = framework.RunHostCmd(execPod.Namespace, execPod.Name, cmd)
|
||||
if err != nil {
|
||||
framework.Logf("got err: %v, retry until timeout", err)
|
||||
e2elog.Logf("got err: %v, retry until timeout", err)
|
||||
continue
|
||||
}
|
||||
// Need to check output because wget -q might omit the error.
|
||||
if strings.TrimSpace(stdout) == "" {
|
||||
framework.Logf("got empty stdout, retry until timeout")
|
||||
e2elog.Logf("got empty stdout, retry until timeout")
|
||||
continue
|
||||
}
|
||||
break
|
||||
@ -2237,7 +2238,7 @@ func execAffinityTestForNonLBServiceWithOptionalTransition(f *framework.Framewor
|
||||
|
||||
execPodName := framework.CreateExecPodOrFail(cs, ns, "execpod-affinity", nil)
|
||||
defer func() {
|
||||
framework.Logf("Cleaning up the exec pod")
|
||||
e2elog.Logf("Cleaning up the exec pod")
|
||||
err := cs.CoreV1().Pods(ns).Delete(execPodName, nil)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to delete pod: %s in namespace: %s", execPodName, ns)
|
||||
}()
|
||||
@ -2283,10 +2284,10 @@ func execAffinityTestForLBServiceWithOptionalTransition(f *framework.Framework,
|
||||
jig.SanityCheckService(svc, v1.ServiceTypeLoadBalancer)
|
||||
defer func() {
|
||||
podNodePairs, err := framework.PodNodePairs(cs, ns)
|
||||
framework.Logf("[pod,node] pairs: %+v; err: %v", podNodePairs, err)
|
||||
e2elog.Logf("[pod,node] pairs: %+v; err: %v", podNodePairs, err)
|
||||
framework.StopServeHostnameService(cs, ns, serviceName)
|
||||
lb := cloudprovider.DefaultLoadBalancerName(svc)
|
||||
framework.Logf("cleaning load balancer resource for %s", lb)
|
||||
e2elog.Logf("cleaning load balancer resource for %s", lb)
|
||||
framework.CleanupServiceResources(cs, lb, framework.TestContext.CloudConfig.Region, framework.TestContext.CloudConfig.Zone)
|
||||
}()
|
||||
ingressIP := framework.GetIngressPoint(&svc.Status.LoadBalancer.Ingress[0])
|
||||
|
@ -22,7 +22,7 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
@ -31,6 +31,7 @@ import (
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/client-go/util/flowcontrol"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
|
||||
@ -103,14 +104,14 @@ var _ = SIGDescribe("Service endpoints latency", func() {
|
||||
}
|
||||
return dSorted[est]
|
||||
}
|
||||
framework.Logf("Latencies: %v", dSorted)
|
||||
e2elog.Logf("Latencies: %v", dSorted)
|
||||
p50 := percentile(50)
|
||||
p90 := percentile(90)
|
||||
p99 := percentile(99)
|
||||
framework.Logf("50 %%ile: %v", p50)
|
||||
framework.Logf("90 %%ile: %v", p90)
|
||||
framework.Logf("99 %%ile: %v", p99)
|
||||
framework.Logf("Total sample count: %v", len(dSorted))
|
||||
e2elog.Logf("50 %%ile: %v", p50)
|
||||
e2elog.Logf("90 %%ile: %v", p90)
|
||||
e2elog.Logf("99 %%ile: %v", p99)
|
||||
e2elog.Logf("Total sample count: %v", len(dSorted))
|
||||
|
||||
if p50 > limitMedian {
|
||||
failing.Insert("Median latency should be less than " + limitMedian.String())
|
||||
@ -175,14 +176,14 @@ func runServiceLatencies(f *framework.Framework, inParallel, total int, acceptab
|
||||
for i := 0; i < total; i++ {
|
||||
select {
|
||||
case e := <-errs:
|
||||
framework.Logf("Got error: %v", e)
|
||||
e2elog.Logf("Got error: %v", e)
|
||||
errCount += 1
|
||||
case d := <-durations:
|
||||
output = append(output, d)
|
||||
}
|
||||
}
|
||||
if errCount != 0 {
|
||||
framework.Logf("Got %d errors out of %d tries", errCount, total)
|
||||
e2elog.Logf("Got %d errors out of %d tries", errCount, total)
|
||||
errRatio := float32(errCount) / float32(total)
|
||||
if errRatio > acceptableFailureRatio {
|
||||
return output, fmt.Errorf("error ratio %g is higher than the acceptable ratio %g", errRatio, acceptableFailureRatio)
|
||||
@ -345,13 +346,13 @@ func singleServiceLatency(f *framework.Framework, name string, q *endpointQuerie
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
framework.Logf("Created: %v", gotSvc.Name)
|
||||
e2elog.Logf("Created: %v", gotSvc.Name)
|
||||
|
||||
if e := q.request(gotSvc.Name); e == nil {
|
||||
return 0, fmt.Errorf("Never got a result for endpoint %v", gotSvc.Name)
|
||||
}
|
||||
stopTime := time.Now()
|
||||
d := stopTime.Sub(startTime)
|
||||
framework.Logf("Got endpoints: %v [%v]", gotSvc.Name, d)
|
||||
e2elog.Logf("Got endpoints: %v [%v]", gotSvc.Name, d)
|
||||
return d, nil
|
||||
}
|
||||
|
@ -25,6 +25,7 @@ import (
|
||||
"strings"
|
||||
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
)
|
||||
|
||||
type IPerfResults struct {
|
||||
@ -56,7 +57,7 @@ func (i *IPerfResults) Add(ipr *IPerfResult) {
|
||||
// ToTSV exports an easily readable tab delimited format of all IPerfResults.
|
||||
func (i *IPerfResults) ToTSV() string {
|
||||
if len(i.BandwidthMap) < 1 {
|
||||
framework.Logf("Warning: no data in bandwidth map")
|
||||
e2elog.Logf("Warning: no data in bandwidth map")
|
||||
}
|
||||
|
||||
var buffer bytes.Buffer
|
||||
|
Loading…
Reference in New Issue
Block a user