use log funcs of core framework in the auth, kubectl and windows pkg

This commit is contained in:
carlory 2019-08-27 11:42:47 +08:00
parent ba07527278
commit 5bbedd39c0
17 changed files with 247 additions and 264 deletions

View File

@ -56,7 +56,6 @@ go_library(
"//test/e2e/framework/auth:go_default_library", "//test/e2e/framework/auth:go_default_library",
"//test/e2e/framework/deployment:go_default_library", "//test/e2e/framework/deployment:go_default_library",
"//test/e2e/framework/job:go_default_library", "//test/e2e/framework/job:go_default_library",
"//test/e2e/framework/log:go_default_library",
"//test/e2e/framework/node:go_default_library", "//test/e2e/framework/node:go_default_library",
"//test/e2e/framework/pod:go_default_library", "//test/e2e/framework/pod:go_default_library",
"//test/e2e/framework/psp:go_default_library", "//test/e2e/framework/psp:go_default_library",

View File

@ -37,7 +37,6 @@ import (
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/framework/auth" "k8s.io/kubernetes/test/e2e/framework/auth"
e2edeploy "k8s.io/kubernetes/test/e2e/framework/deployment" e2edeploy "k8s.io/kubernetes/test/e2e/framework/deployment"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
"k8s.io/kubernetes/test/utils" "k8s.io/kubernetes/test/utils"
imageutils "k8s.io/kubernetes/test/utils/image" imageutils "k8s.io/kubernetes/test/utils/image"
@ -740,9 +739,9 @@ func expectEvents(f *framework.Framework, expectedEvents []utils.AuditEvent) {
defer stream.Close() defer stream.Close()
missingReport, err := utils.CheckAuditLines(stream, expectedEvents, auditv1.SchemeGroupVersion) missingReport, err := utils.CheckAuditLines(stream, expectedEvents, auditv1.SchemeGroupVersion)
if err != nil { if err != nil {
e2elog.Logf("Failed to observe audit events: %v", err) framework.Logf("Failed to observe audit events: %v", err)
} else if len(missingReport.MissingEvents) > 0 { } else if len(missingReport.MissingEvents) > 0 {
e2elog.Logf(missingReport.String()) framework.Logf(missingReport.String())
} }
return len(missingReport.MissingEvents) == 0, nil return len(missingReport.MissingEvents) == 0, nil
}) })

View File

@ -36,7 +36,6 @@ import (
restclient "k8s.io/client-go/rest" restclient "k8s.io/client-go/rest"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/framework/auth" "k8s.io/kubernetes/test/e2e/framework/auth"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
"k8s.io/kubernetes/test/utils" "k8s.io/kubernetes/test/utils"
imageutils "k8s.io/kubernetes/test/utils/image" imageutils "k8s.io/kubernetes/test/utils/image"
@ -116,14 +115,14 @@ var _ = SIGDescribe("[Feature:DynamicAudit]", func() {
err = wait.Poll(100*time.Millisecond, 10*time.Second, func() (done bool, err error) { err = wait.Poll(100*time.Millisecond, 10*time.Second, func() (done bool, err error) {
p, err := f.ClientSet.CoreV1().Pods(namespace).Get("audit-proxy", metav1.GetOptions{}) p, err := f.ClientSet.CoreV1().Pods(namespace).Get("audit-proxy", metav1.GetOptions{})
if errors.IsNotFound(err) { if errors.IsNotFound(err) {
e2elog.Logf("waiting for audit-proxy pod to be present") framework.Logf("waiting for audit-proxy pod to be present")
return false, nil return false, nil
} else if err != nil { } else if err != nil {
return false, err return false, err
} }
podIP = p.Status.PodIP podIP = p.Status.PodIP
if podIP == "" { if podIP == "" {
e2elog.Logf("waiting for audit-proxy pod IP to be ready") framework.Logf("waiting for audit-proxy pod IP to be ready")
return false, nil return false, nil
} }
return true, nil return true, nil
@ -156,17 +155,17 @@ var _ = SIGDescribe("[Feature:DynamicAudit]", func() {
_, err = f.ClientSet.AuditregistrationV1alpha1().AuditSinks().Create(&sink) _, err = f.ClientSet.AuditregistrationV1alpha1().AuditSinks().Create(&sink)
framework.ExpectNoError(err, "failed to create audit sink") framework.ExpectNoError(err, "failed to create audit sink")
e2elog.Logf("created audit sink") framework.Logf("created audit sink")
// check that we are receiving logs in the proxy // check that we are receiving logs in the proxy
err = wait.Poll(100*time.Millisecond, 10*time.Second, func() (done bool, err error) { err = wait.Poll(100*time.Millisecond, 10*time.Second, func() (done bool, err error) {
logs, err := e2epod.GetPodLogs(f.ClientSet, namespace, "audit-proxy", "proxy") logs, err := e2epod.GetPodLogs(f.ClientSet, namespace, "audit-proxy", "proxy")
if err != nil { if err != nil {
e2elog.Logf("waiting for audit-proxy pod logs to be available") framework.Logf("waiting for audit-proxy pod logs to be available")
return false, nil return false, nil
} }
if logs == "" { if logs == "" {
e2elog.Logf("waiting for audit-proxy pod logs to be non-empty") framework.Logf("waiting for audit-proxy pod logs to be non-empty")
return false, nil return false, nil
} }
return true, nil return true, nil
@ -372,9 +371,9 @@ var _ = SIGDescribe("[Feature:DynamicAudit]", func() {
reader := strings.NewReader(logs) reader := strings.NewReader(logs)
missingReport, err := utils.CheckAuditLines(reader, expectedEvents, auditv1.SchemeGroupVersion) missingReport, err := utils.CheckAuditLines(reader, expectedEvents, auditv1.SchemeGroupVersion)
if err != nil { if err != nil {
e2elog.Logf("Failed to observe audit events: %v", err) framework.Logf("Failed to observe audit events: %v", err)
} else if len(missingReport.MissingEvents) > 0 { } else if len(missingReport.MissingEvents) > 0 {
e2elog.Logf(missingReport.String()) framework.Logf(missingReport.String())
} }
return len(missingReport.MissingEvents) == 0, nil return len(missingReport.MissingEvents) == 0, nil
}) })

View File

@ -20,15 +20,15 @@ import (
"crypto/x509" "crypto/x509"
"crypto/x509/pkix" "crypto/x509/pkix"
"encoding/pem" "encoding/pem"
"time"
certificatesv1beta1 "k8s.io/api/certificates/v1beta1" certificatesv1beta1 "k8s.io/api/certificates/v1beta1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait" "k8s.io/apimachinery/pkg/util/wait"
v1beta1client "k8s.io/client-go/kubernetes/typed/certificates/v1beta1" v1beta1client "k8s.io/client-go/kubernetes/typed/certificates/v1beta1"
"k8s.io/client-go/util/cert" "k8s.io/client-go/util/cert"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
"k8s.io/kubernetes/test/utils" "k8s.io/kubernetes/test/utils"
"time"
"github.com/onsi/ginkgo" "github.com/onsi/ginkgo"
) )
@ -66,13 +66,13 @@ var _ = SIGDescribe("Certificates API", func() {
} }
csrs := f.ClientSet.CertificatesV1beta1().CertificateSigningRequests() csrs := f.ClientSet.CertificatesV1beta1().CertificateSigningRequests()
e2elog.Logf("creating CSR") framework.Logf("creating CSR")
csr, err = csrs.Create(csr) csr, err = csrs.Create(csr)
framework.ExpectNoError(err) framework.ExpectNoError(err)
csrName := csr.Name csrName := csr.Name
e2elog.Logf("approving CSR") framework.Logf("approving CSR")
framework.ExpectNoError(wait.Poll(5*time.Second, time.Minute, func() (bool, error) { framework.ExpectNoError(wait.Poll(5*time.Second, time.Minute, func() (bool, error) {
csr.Status.Conditions = []certificatesv1beta1.CertificateSigningRequestCondition{ csr.Status.Conditions = []certificatesv1beta1.CertificateSigningRequestCondition{
{ {
@ -84,27 +84,27 @@ var _ = SIGDescribe("Certificates API", func() {
csr, err = csrs.UpdateApproval(csr) csr, err = csrs.UpdateApproval(csr)
if err != nil { if err != nil {
csr, _ = csrs.Get(csrName, metav1.GetOptions{}) csr, _ = csrs.Get(csrName, metav1.GetOptions{})
e2elog.Logf("err updating approval: %v", err) framework.Logf("err updating approval: %v", err)
return false, nil return false, nil
} }
return true, nil return true, nil
})) }))
e2elog.Logf("waiting for CSR to be signed") framework.Logf("waiting for CSR to be signed")
framework.ExpectNoError(wait.Poll(5*time.Second, time.Minute, func() (bool, error) { framework.ExpectNoError(wait.Poll(5*time.Second, time.Minute, func() (bool, error) {
csr, err = csrs.Get(csrName, metav1.GetOptions{}) csr, err = csrs.Get(csrName, metav1.GetOptions{})
if err != nil { if err != nil {
e2elog.Logf("error getting csr: %v", err) framework.Logf("error getting csr: %v", err)
return false, nil return false, nil
} }
if len(csr.Status.Certificate) == 0 { if len(csr.Status.Certificate) == 0 {
e2elog.Logf("csr not signed yet") framework.Logf("csr not signed yet")
return false, nil return false, nil
} }
return true, nil return true, nil
})) }))
e2elog.Logf("testing the client") framework.Logf("testing the client")
rcfg, err := framework.LoadConfig() rcfg, err := framework.LoadConfig()
framework.ExpectNoError(err) framework.ExpectNoError(err)

View File

@ -27,7 +27,6 @@ import (
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
restclient "k8s.io/client-go/rest" restclient "k8s.io/client-go/rest"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
imageutils "k8s.io/kubernetes/test/utils/image" imageutils "k8s.io/kubernetes/test/utils/image"
"github.com/onsi/ginkgo" "github.com/onsi/ginkgo"
@ -156,7 +155,7 @@ var _ = SIGDescribe("[Feature:NodeAuthorizer]", func() {
err = wait.Poll(itv, dur, func() (bool, error) { err = wait.Poll(itv, dur, func() (bool, error) {
_, err = c.CoreV1().Secrets(ns).Get(secret.Name, metav1.GetOptions{}) _, err = c.CoreV1().Secrets(ns).Get(secret.Name, metav1.GetOptions{})
if err != nil { if err != nil {
e2elog.Logf("Failed to get secret %v, err: %v", secret.Name, err) framework.Logf("Failed to get secret %v, err: %v", secret.Name, err)
return false, nil return false, nil
} }
return true, nil return true, nil

View File

@ -32,7 +32,6 @@ import (
"k8s.io/apimachinery/pkg/util/wait" "k8s.io/apimachinery/pkg/util/wait"
"k8s.io/kubernetes/plugin/pkg/admission/serviceaccount" "k8s.io/kubernetes/plugin/pkg/admission/serviceaccount"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
imageutils "k8s.io/kubernetes/test/utils/image" imageutils "k8s.io/kubernetes/test/utils/image"
@ -51,19 +50,19 @@ var _ = SIGDescribe("ServiceAccounts", func() {
ginkgo.By("waiting for a single token reference") ginkgo.By("waiting for a single token reference")
sa, err := f.ClientSet.CoreV1().ServiceAccounts(f.Namespace.Name).Get("default", metav1.GetOptions{}) sa, err := f.ClientSet.CoreV1().ServiceAccounts(f.Namespace.Name).Get("default", metav1.GetOptions{})
if apierrors.IsNotFound(err) { if apierrors.IsNotFound(err) {
e2elog.Logf("default service account was not found") framework.Logf("default service account was not found")
return false, nil return false, nil
} }
if err != nil { if err != nil {
e2elog.Logf("error getting default service account: %v", err) framework.Logf("error getting default service account: %v", err)
return false, err return false, err
} }
switch len(sa.Secrets) { switch len(sa.Secrets) {
case 0: case 0:
e2elog.Logf("default service account has no secret references") framework.Logf("default service account has no secret references")
return false, nil return false, nil
case 1: case 1:
e2elog.Logf("default service account has a single secret reference") framework.Logf("default service account has a single secret reference")
secrets = sa.Secrets secrets = sa.Secrets
return true, nil return true, nil
default: default:
@ -89,19 +88,19 @@ var _ = SIGDescribe("ServiceAccounts", func() {
ginkgo.By("waiting for a new token reference") ginkgo.By("waiting for a new token reference")
sa, err := f.ClientSet.CoreV1().ServiceAccounts(f.Namespace.Name).Get("default", metav1.GetOptions{}) sa, err := f.ClientSet.CoreV1().ServiceAccounts(f.Namespace.Name).Get("default", metav1.GetOptions{})
if err != nil { if err != nil {
e2elog.Logf("error getting default service account: %v", err) framework.Logf("error getting default service account: %v", err)
return false, err return false, err
} }
switch len(sa.Secrets) { switch len(sa.Secrets) {
case 0: case 0:
e2elog.Logf("default service account has no secret references") framework.Logf("default service account has no secret references")
return false, nil return false, nil
case 1: case 1:
if sa.Secrets[0] == secrets[0] { if sa.Secrets[0] == secrets[0] {
e2elog.Logf("default service account still has the deleted secret reference") framework.Logf("default service account still has the deleted secret reference")
return false, nil return false, nil
} }
e2elog.Logf("default service account has a new single secret reference") framework.Logf("default service account has a new single secret reference")
secrets = sa.Secrets secrets = sa.Secrets
return true, nil return true, nil
default: default:
@ -133,15 +132,15 @@ var _ = SIGDescribe("ServiceAccounts", func() {
ginkgo.By("waiting for a new token to be created and added") ginkgo.By("waiting for a new token to be created and added")
sa, err := f.ClientSet.CoreV1().ServiceAccounts(f.Namespace.Name).Get("default", metav1.GetOptions{}) sa, err := f.ClientSet.CoreV1().ServiceAccounts(f.Namespace.Name).Get("default", metav1.GetOptions{})
if err != nil { if err != nil {
e2elog.Logf("error getting default service account: %v", err) framework.Logf("error getting default service account: %v", err)
return false, err return false, err
} }
switch len(sa.Secrets) { switch len(sa.Secrets) {
case 0: case 0:
e2elog.Logf("default service account has no secret references") framework.Logf("default service account has no secret references")
return false, nil return false, nil
case 1: case 1:
e2elog.Logf("default service account has a new single secret reference") framework.Logf("default service account has a new single secret reference")
secrets = sa.Secrets secrets = sa.Secrets
return true, nil return true, nil
default: default:
@ -179,21 +178,21 @@ var _ = SIGDescribe("ServiceAccounts", func() {
ginkgo.By("getting the auto-created API token") ginkgo.By("getting the auto-created API token")
sa, err := f.ClientSet.CoreV1().ServiceAccounts(f.Namespace.Name).Get("mount-test", metav1.GetOptions{}) sa, err := f.ClientSet.CoreV1().ServiceAccounts(f.Namespace.Name).Get("mount-test", metav1.GetOptions{})
if apierrors.IsNotFound(err) { if apierrors.IsNotFound(err) {
e2elog.Logf("mount-test service account was not found") framework.Logf("mount-test service account was not found")
return false, nil return false, nil
} }
if err != nil { if err != nil {
e2elog.Logf("error getting mount-test service account: %v", err) framework.Logf("error getting mount-test service account: %v", err)
return false, err return false, err
} }
if len(sa.Secrets) == 0 { if len(sa.Secrets) == 0 {
e2elog.Logf("mount-test service account has no secret references") framework.Logf("mount-test service account has no secret references")
return false, nil return false, nil
} }
for _, secretRef := range sa.Secrets { for _, secretRef := range sa.Secrets {
secret, err := f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Get(secretRef.Name, metav1.GetOptions{}) secret, err := f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Get(secretRef.Name, metav1.GetOptions{})
if err != nil { if err != nil {
e2elog.Logf("Error getting secret %s: %v", secretRef.Name, err) framework.Logf("Error getting secret %s: %v", secretRef.Name, err)
continue continue
} }
if secret.Type == v1.SecretTypeServiceAccountToken { if secret.Type == v1.SecretTypeServiceAccountToken {
@ -202,7 +201,7 @@ var _ = SIGDescribe("ServiceAccounts", func() {
} }
} }
e2elog.Logf("default service account has no secret references to valid service account tokens") framework.Logf("default service account has no secret references to valid service account tokens")
return false, nil return false, nil
})) }))
@ -290,21 +289,21 @@ var _ = SIGDescribe("ServiceAccounts", func() {
ginkgo.By("getting the auto-created API token") ginkgo.By("getting the auto-created API token")
sa, err := f.ClientSet.CoreV1().ServiceAccounts(f.Namespace.Name).Get(mountSA.Name, metav1.GetOptions{}) sa, err := f.ClientSet.CoreV1().ServiceAccounts(f.Namespace.Name).Get(mountSA.Name, metav1.GetOptions{})
if apierrors.IsNotFound(err) { if apierrors.IsNotFound(err) {
e2elog.Logf("mount service account was not found") framework.Logf("mount service account was not found")
return false, nil return false, nil
} }
if err != nil { if err != nil {
e2elog.Logf("error getting mount service account: %v", err) framework.Logf("error getting mount service account: %v", err)
return false, err return false, err
} }
if len(sa.Secrets) == 0 { if len(sa.Secrets) == 0 {
e2elog.Logf("mount service account has no secret references") framework.Logf("mount service account has no secret references")
return false, nil return false, nil
} }
for _, secretRef := range sa.Secrets { for _, secretRef := range sa.Secrets {
secret, err := f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Get(secretRef.Name, metav1.GetOptions{}) secret, err := f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Get(secretRef.Name, metav1.GetOptions{})
if err != nil { if err != nil {
e2elog.Logf("Error getting secret %s: %v", secretRef.Name, err) framework.Logf("Error getting secret %s: %v", secretRef.Name, err)
continue continue
} }
if secret.Type == v1.SecretTypeServiceAccountToken { if secret.Type == v1.SecretTypeServiceAccountToken {
@ -312,7 +311,7 @@ var _ = SIGDescribe("ServiceAccounts", func() {
} }
} }
e2elog.Logf("default service account has no secret references to valid service account tokens") framework.Logf("default service account has no secret references to valid service account tokens")
return false, nil return false, nil
})) }))
@ -394,7 +393,7 @@ var _ = SIGDescribe("ServiceAccounts", func() {
} }
createdPod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod) createdPod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
framework.ExpectNoError(err) framework.ExpectNoError(err)
e2elog.Logf("created pod %s", tc.PodName) framework.Logf("created pod %s", tc.PodName)
hasServiceAccountTokenVolume := false hasServiceAccountTokenVolume := false
for _, c := range createdPod.Spec.Containers { for _, c := range createdPod.Spec.Containers {
@ -406,9 +405,9 @@ var _ = SIGDescribe("ServiceAccounts", func() {
} }
if hasServiceAccountTokenVolume != tc.ExpectTokenVolume { if hasServiceAccountTokenVolume != tc.ExpectTokenVolume {
e2elog.Failf("%s: expected volume=%v, got %v (%#v)", tc.PodName, tc.ExpectTokenVolume, hasServiceAccountTokenVolume, createdPod) framework.Failf("%s: expected volume=%v, got %v (%#v)", tc.PodName, tc.ExpectTokenVolume, hasServiceAccountTokenVolume, createdPod)
} else { } else {
e2elog.Logf("pod %s service account token volume mount: %v", tc.PodName, hasServiceAccountTokenVolume) framework.Logf("pod %s service account token volume mount: %v", tc.PodName, hasServiceAccountTokenVolume)
} }
} }
}) })
@ -425,7 +424,7 @@ var _ = SIGDescribe("ServiceAccounts", func() {
"ca.crt": string(cfg.TLSClientConfig.CAData), "ca.crt": string(cfg.TLSClientConfig.CAData),
}, },
}); err != nil && !apierrors.IsAlreadyExists(err) { }); err != nil && !apierrors.IsAlreadyExists(err) {
e2elog.Failf("Unexpected err creating kube-ca-crt: %v", err) framework.Failf("Unexpected err creating kube-ca-crt: %v", err)
} }
tenMin := int64(10 * 60) tenMin := int64(10 * 60)
@ -490,19 +489,19 @@ var _ = SIGDescribe("ServiceAccounts", func() {
pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod) pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
framework.ExpectNoError(err) framework.ExpectNoError(err)
e2elog.Logf("created pod") framework.Logf("created pod")
if !e2epod.CheckPodsRunningReady(f.ClientSet, f.Namespace.Name, []string{pod.Name}, time.Minute) { if !e2epod.CheckPodsRunningReady(f.ClientSet, f.Namespace.Name, []string{pod.Name}, time.Minute) {
e2elog.Failf("pod %q in ns %q never became ready", pod.Name, f.Namespace.Name) framework.Failf("pod %q in ns %q never became ready", pod.Name, f.Namespace.Name)
} }
e2elog.Logf("pod is ready") framework.Logf("pod is ready")
var logs string var logs string
if err := wait.Poll(1*time.Minute, 20*time.Minute, func() (done bool, err error) { if err := wait.Poll(1*time.Minute, 20*time.Minute, func() (done bool, err error) {
e2elog.Logf("polling logs") framework.Logf("polling logs")
logs, err = e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, "inclusterclient", "inclusterclient") logs, err = e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, "inclusterclient", "inclusterclient")
if err != nil { if err != nil {
e2elog.Logf("Error pulling logs: %v", err) framework.Logf("Error pulling logs: %v", err)
return false, nil return false, nil
} }
tokenCount, err := parseInClusterClientLogs(logs) tokenCount, err := parseInClusterClientLogs(logs)
@ -510,12 +509,12 @@ var _ = SIGDescribe("ServiceAccounts", func() {
return false, fmt.Errorf("inclusterclient reported an error: %v", err) return false, fmt.Errorf("inclusterclient reported an error: %v", err)
} }
if tokenCount < 2 { if tokenCount < 2 {
e2elog.Logf("Retrying. Still waiting to see more unique tokens: got=%d, want=2", tokenCount) framework.Logf("Retrying. Still waiting to see more unique tokens: got=%d, want=2", tokenCount)
return false, nil return false, nil
} }
return true, nil return true, nil
}); err != nil { }); err != nil {
e2elog.Failf("Unexpected error: %v\n%s", err, logs) framework.Failf("Unexpected error: %v\n%s", err, logs)
} }
}) })
}) })

View File

@ -35,7 +35,6 @@ go_library(
"//test/e2e/framework/auth:go_default_library", "//test/e2e/framework/auth:go_default_library",
"//test/e2e/framework/endpoints:go_default_library", "//test/e2e/framework/endpoints:go_default_library",
"//test/e2e/framework/job:go_default_library", "//test/e2e/framework/job:go_default_library",
"//test/e2e/framework/log:go_default_library",
"//test/e2e/framework/pod:go_default_library", "//test/e2e/framework/pod:go_default_library",
"//test/e2e/framework/service:go_default_library", "//test/e2e/framework/service:go_default_library",
"//test/e2e/framework/testfiles:go_default_library", "//test/e2e/framework/testfiles:go_default_library",

View File

@ -41,9 +41,6 @@ import (
"github.com/elazarl/goproxy" "github.com/elazarl/goproxy"
openapi_v2 "github.com/googleapis/gnostic/OpenAPIv2" openapi_v2 "github.com/googleapis/gnostic/OpenAPIv2"
uexec "k8s.io/utils/exec"
"k8s.io/utils/pointer"
"sigs.k8s.io/yaml"
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
rbacv1 "k8s.io/api/rbac/v1" rbacv1 "k8s.io/api/rbac/v1"
@ -59,25 +56,26 @@ import (
"k8s.io/apiserver/pkg/authentication/serviceaccount" "k8s.io/apiserver/pkg/authentication/serviceaccount"
genericregistry "k8s.io/apiserver/pkg/registry/generic/registry" genericregistry "k8s.io/apiserver/pkg/registry/generic/registry"
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubectl/pkg/polymorphichelpers"
"k8s.io/kubernetes/pkg/controller" "k8s.io/kubernetes/pkg/controller"
commonutils "k8s.io/kubernetes/test/e2e/common" commonutils "k8s.io/kubernetes/test/e2e/common"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/framework/auth" "k8s.io/kubernetes/test/e2e/framework/auth"
e2eendpoints "k8s.io/kubernetes/test/e2e/framework/endpoints" e2eendpoints "k8s.io/kubernetes/test/e2e/framework/endpoints"
jobutil "k8s.io/kubernetes/test/e2e/framework/job" jobutil "k8s.io/kubernetes/test/e2e/framework/job"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2eservice "k8s.io/kubernetes/test/e2e/framework/service" e2eservice "k8s.io/kubernetes/test/e2e/framework/service"
"k8s.io/kubernetes/test/e2e/framework/testfiles" "k8s.io/kubernetes/test/e2e/framework/testfiles"
"k8s.io/kubernetes/test/e2e/scheduling" "k8s.io/kubernetes/test/e2e/scheduling"
testutils "k8s.io/kubernetes/test/utils" testutils "k8s.io/kubernetes/test/utils"
"k8s.io/kubernetes/test/utils/crd" "k8s.io/kubernetes/test/utils/crd"
imageutils "k8s.io/kubernetes/test/utils/image"
uexec "k8s.io/utils/exec"
"k8s.io/utils/pointer"
"sigs.k8s.io/yaml"
"github.com/onsi/ginkgo" "github.com/onsi/ginkgo"
"github.com/onsi/gomega" "github.com/onsi/gomega"
"k8s.io/kubectl/pkg/polymorphichelpers"
imageutils "k8s.io/kubernetes/test/utils/image"
) )
const ( const (
@ -149,7 +147,7 @@ func runKubectlRetryOrDie(args ...string) string {
} }
// Expect no errors to be present after retries are finished // Expect no errors to be present after retries are finished
// Copied from framework #ExecOrDie // Copied from framework #ExecOrDie
e2elog.Logf("stdout: %q", output) framework.Logf("stdout: %q", output)
framework.ExpectNoError(err) framework.ExpectNoError(err)
return output return output
} }
@ -188,17 +186,17 @@ var _ = SIGDescribe("Kubectl alpha client", func() {
ginkgo.By("verifying the CronJob " + cjName + " was created") ginkgo.By("verifying the CronJob " + cjName + " was created")
sj, err := c.BatchV1beta1().CronJobs(ns).Get(cjName, metav1.GetOptions{}) sj, err := c.BatchV1beta1().CronJobs(ns).Get(cjName, metav1.GetOptions{})
if err != nil { if err != nil {
e2elog.Failf("Failed getting CronJob %s: %v", cjName, err) framework.Failf("Failed getting CronJob %s: %v", cjName, err)
} }
if sj.Spec.Schedule != schedule { if sj.Spec.Schedule != schedule {
e2elog.Failf("Failed creating a CronJob with correct schedule %s", schedule) framework.Failf("Failed creating a CronJob with correct schedule %s", schedule)
} }
containers := sj.Spec.JobTemplate.Spec.Template.Spec.Containers containers := sj.Spec.JobTemplate.Spec.Template.Spec.Containers
if checkContainersImage(containers, busyboxImage) { if checkContainersImage(containers, busyboxImage) {
e2elog.Failf("Failed creating CronJob %s for 1 pod with expected image %s: %#v", cjName, busyboxImage, containers) framework.Failf("Failed creating CronJob %s for 1 pod with expected image %s: %#v", cjName, busyboxImage, containers)
} }
if sj.Spec.JobTemplate.Spec.Template.Spec.RestartPolicy != v1.RestartPolicyOnFailure { if sj.Spec.JobTemplate.Spec.Template.Spec.RestartPolicy != v1.RestartPolicyOnFailure {
e2elog.Failf("Failed creating a CronJob with correct restart policy for --restart=OnFailure") framework.Failf("Failed creating a CronJob with correct restart policy for --restart=OnFailure")
} }
}) })
}) })
@ -236,14 +234,14 @@ var _ = SIGDescribe("Kubectl client", func() {
if err != nil || len(pods) < atLeast { if err != nil || len(pods) < atLeast {
// TODO: Generalize integrating debug info into these tests so we always get debug info when we need it // TODO: Generalize integrating debug info into these tests so we always get debug info when we need it
framework.DumpAllNamespaceInfo(f.ClientSet, ns) framework.DumpAllNamespaceInfo(f.ClientSet, ns)
e2elog.Failf("Verified %d of %d pods , error: %v", len(pods), atLeast, err) framework.Failf("Verified %d of %d pods , error: %v", len(pods), atLeast, err)
} }
} }
debugDiscovery := func() { debugDiscovery := func() {
home := os.Getenv("HOME") home := os.Getenv("HOME")
if len(home) == 0 { if len(home) == 0 {
e2elog.Logf("no $HOME envvar set") framework.Logf("no $HOME envvar set")
return return
} }
@ -259,17 +257,17 @@ var _ = SIGDescribe("Kubectl client", func() {
if len(parts) != 3 || parts[1] != "v1" || parts[2] != "serverresources.json" { if len(parts) != 3 || parts[1] != "v1" || parts[2] != "serverresources.json" {
return nil return nil
} }
e2elog.Logf("%s modified at %s (current time: %s)", path, info.ModTime(), time.Now()) framework.Logf("%s modified at %s (current time: %s)", path, info.ModTime(), time.Now())
data, readError := ioutil.ReadFile(path) data, readError := ioutil.ReadFile(path)
if readError != nil { if readError != nil {
e2elog.Logf("%s error: %v", path, readError) framework.Logf("%s error: %v", path, readError)
} else { } else {
e2elog.Logf("%s content: %s", path, string(data)) framework.Logf("%s content: %s", path, string(data))
} }
return nil return nil
}) })
e2elog.Logf("scanned %s for discovery docs: %v", home, err) framework.Logf("scanned %s for discovery docs: %v", home, err)
} }
ginkgo.Describe("Update Demo", func() { ginkgo.Describe("Update Demo", func() {
@ -357,7 +355,7 @@ var _ = SIGDescribe("Kubectl client", func() {
}) })
ginkgo.By("creating all guestbook components") ginkgo.By("creating all guestbook components")
forEachGBFile(func(contents string) { forEachGBFile(func(contents string) {
e2elog.Logf(contents) framework.Logf(contents)
framework.RunKubectlOrDieInput(contents, "create", "-f", "-", fmt.Sprintf("--namespace=%v", ns)) framework.RunKubectlOrDieInput(contents, "create", "-f", "-", fmt.Sprintf("--namespace=%v", ns))
}) })
@ -382,7 +380,7 @@ var _ = SIGDescribe("Kubectl client", func() {
ginkgo.By("executing a command in the container") ginkgo.By("executing a command in the container")
execOutput := framework.RunKubectlOrDie("exec", fmt.Sprintf("--namespace=%v", ns), simplePodName, "echo", "running", "in", "container") execOutput := framework.RunKubectlOrDie("exec", fmt.Sprintf("--namespace=%v", ns), simplePodName, "echo", "running", "in", "container")
if e, a := "running in container", strings.TrimSpace(execOutput); e != a { if e, a := "running in container", strings.TrimSpace(execOutput); e != a {
e2elog.Failf("Unexpected kubectl exec output. Wanted %q, got %q", e, a) framework.Failf("Unexpected kubectl exec output. Wanted %q, got %q", e, a)
} }
ginkgo.By("executing a very long command in the container") ginkgo.By("executing a very long command in the container")
@ -398,13 +396,13 @@ var _ = SIGDescribe("Kubectl client", func() {
WithStdinData("abcd1234"). WithStdinData("abcd1234").
ExecOrDie() ExecOrDie()
if e, a := "abcd1234", execOutput; e != a { if e, a := "abcd1234", execOutput; e != a {
e2elog.Failf("Unexpected kubectl exec output. Wanted %q, got %q", e, a) framework.Failf("Unexpected kubectl exec output. Wanted %q, got %q", e, a)
} }
// pretend that we're a user in an interactive shell // pretend that we're a user in an interactive shell
r, closer, err := newBlockingReader("echo hi\nexit\n") r, closer, err := newBlockingReader("echo hi\nexit\n")
if err != nil { if err != nil {
e2elog.Failf("Error creating blocking reader: %v", err) framework.Failf("Error creating blocking reader: %v", err)
} }
// NOTE this is solely for test cleanup! // NOTE this is solely for test cleanup!
defer closer.Close() defer closer.Close()
@ -414,7 +412,7 @@ var _ = SIGDescribe("Kubectl client", func() {
WithStdinReader(r). WithStdinReader(r).
ExecOrDie() ExecOrDie()
if e, a := "hi", strings.TrimSpace(execOutput); e != a { if e, a := "hi", strings.TrimSpace(execOutput); e != a {
e2elog.Failf("Unexpected kubectl exec output. Wanted %q, got %q", e, a) framework.Failf("Unexpected kubectl exec output. Wanted %q, got %q", e, a)
} }
}) })
@ -422,14 +420,14 @@ var _ = SIGDescribe("Kubectl client", func() {
ginkgo.By("executing a command in the container") ginkgo.By("executing a command in the container")
execOutput := framework.RunKubectlOrDie("exec", fmt.Sprintf("--namespace=%v", ns), simplePodResourceName, "echo", "running", "in", "container") execOutput := framework.RunKubectlOrDie("exec", fmt.Sprintf("--namespace=%v", ns), simplePodResourceName, "echo", "running", "in", "container")
if e, a := "running in container", strings.TrimSpace(execOutput); e != a { if e, a := "running in container", strings.TrimSpace(execOutput); e != a {
e2elog.Failf("Unexpected kubectl exec output. Wanted %q, got %q", e, a) framework.Failf("Unexpected kubectl exec output. Wanted %q, got %q", e, a)
} }
}) })
ginkgo.It("should support exec through an HTTP proxy", func() { ginkgo.It("should support exec through an HTTP proxy", func() {
// Fail if the variable isn't set // Fail if the variable isn't set
if framework.TestContext.Host == "" { if framework.TestContext.Host == "" {
e2elog.Failf("--host variable must be set to the full URI to the api server on e2e run.") framework.Failf("--host variable must be set to the full URI to the api server on e2e run.")
} }
ginkgo.By("Starting goproxy") ginkgo.By("Starting goproxy")
@ -447,7 +445,7 @@ var _ = SIGDescribe("Kubectl client", func() {
// Verify we got the normal output captured by the exec server // Verify we got the normal output captured by the exec server
expectedExecOutput := "running in container\n" expectedExecOutput := "running in container\n"
if output != expectedExecOutput { if output != expectedExecOutput {
e2elog.Failf("Unexpected kubectl exec output. Wanted %q, got %q", expectedExecOutput, output) framework.Failf("Unexpected kubectl exec output. Wanted %q, got %q", expectedExecOutput, output)
} }
// Verify the proxy server logs saw the connection // Verify the proxy server logs saw the connection
@ -455,7 +453,7 @@ var _ = SIGDescribe("Kubectl client", func() {
proxyLog := proxyLogs.String() proxyLog := proxyLogs.String()
if !strings.Contains(proxyLog, expectedProxyLog) { if !strings.Contains(proxyLog, expectedProxyLog) {
e2elog.Failf("Missing expected log result on proxy server for %s. Expected: %q, got %q", proxyVar, expectedProxyLog, proxyLog) framework.Failf("Missing expected log result on proxy server for %s. Expected: %q, got %q", proxyVar, expectedProxyLog, proxyLog)
} }
} }
}) })
@ -463,7 +461,7 @@ var _ = SIGDescribe("Kubectl client", func() {
ginkgo.It("should support exec through kubectl proxy", func() { ginkgo.It("should support exec through kubectl proxy", func() {
// Fail if the variable isn't set // Fail if the variable isn't set
if framework.TestContext.Host == "" { if framework.TestContext.Host == "" {
e2elog.Failf("--host variable must be set to the full URI to the api server on e2e run.") framework.Failf("--host variable must be set to the full URI to the api server on e2e run.")
} }
ginkgo.By("Starting kubectl proxy") ginkgo.By("Starting kubectl proxy")
@ -482,7 +480,7 @@ var _ = SIGDescribe("Kubectl client", func() {
// Verify we got the normal output captured by the exec server // Verify we got the normal output captured by the exec server
expectedExecOutput := "running in container\n" expectedExecOutput := "running in container\n"
if output != expectedExecOutput { if output != expectedExecOutput {
e2elog.Failf("Unexpected kubectl exec output. Wanted %q, got %q", expectedExecOutput, output) framework.Failf("Unexpected kubectl exec output. Wanted %q, got %q", expectedExecOutput, output)
} }
}) })
@ -560,14 +558,14 @@ var _ = SIGDescribe("Kubectl client", func() {
runTestPod, _, err := polymorphichelpers.GetFirstPod(f.ClientSet.CoreV1(), ns, "run=run-test-3", 1*time.Minute, g) runTestPod, _, err := polymorphichelpers.GetFirstPod(f.ClientSet.CoreV1(), ns, "run=run-test-3", 1*time.Minute, g)
gomega.Expect(err).To(gomega.BeNil()) gomega.Expect(err).To(gomega.BeNil())
if !e2epod.CheckPodsRunningReady(c, ns, []string{runTestPod.Name}, time.Minute) { if !e2epod.CheckPodsRunningReady(c, ns, []string{runTestPod.Name}, time.Minute) {
e2elog.Failf("Pod %q of Job %q should still be running", runTestPod.Name, "run-test-3") framework.Failf("Pod %q of Job %q should still be running", runTestPod.Name, "run-test-3")
} }
// NOTE: we cannot guarantee our output showed up in the container logs before stdin was closed, so we have // NOTE: we cannot guarantee our output showed up in the container logs before stdin was closed, so we have
// to loop test. // to loop test.
err = wait.PollImmediate(time.Second, time.Minute, func() (bool, error) { err = wait.PollImmediate(time.Second, time.Minute, func() (bool, error) {
if !e2epod.CheckPodsRunningReady(c, ns, []string{runTestPod.Name}, 1*time.Second) { if !e2epod.CheckPodsRunningReady(c, ns, []string{runTestPod.Name}, 1*time.Second) {
e2elog.Failf("Pod %q of Job %q should still be running", runTestPod.Name, "run-test-3") framework.Failf("Pod %q of Job %q should still be running", runTestPod.Name, "run-test-3")
} }
logOutput := framework.RunKubectlOrDie(nsFlag, "logs", runTestPod.Name) logOutput := framework.RunKubectlOrDie(nsFlag, "logs", runTestPod.Name)
gomega.Expect(logOutput).ToNot(gomega.ContainSubstring("stdin closed")) gomega.Expect(logOutput).ToNot(gomega.ContainSubstring("stdin closed"))
@ -586,7 +584,7 @@ var _ = SIGDescribe("Kubectl client", func() {
framework.RunKubectlOrDie("run", podName, "--generator=run-pod/v1", "--image="+busyboxImage, "--restart=OnFailure", nsFlag, "--", "sh", "-c", "sleep 10; seq 100 | while read i; do echo $i; sleep 0.01; done; echo EOF") framework.RunKubectlOrDie("run", podName, "--generator=run-pod/v1", "--image="+busyboxImage, "--restart=OnFailure", nsFlag, "--", "sh", "-c", "sleep 10; seq 100 | while read i; do echo $i; sleep 0.01; done; echo EOF")
if !e2epod.CheckPodsRunningReadyOrSucceeded(c, ns, []string{podName}, framework.PodStartTimeout) { if !e2epod.CheckPodsRunningReadyOrSucceeded(c, ns, []string{podName}, framework.PodStartTimeout) {
e2elog.Failf("Pod for run-log-test was not ready") framework.Failf("Pod for run-log-test was not ready")
} }
logOutput := framework.RunKubectlOrDie(nsFlag, "logs", "-f", "run-log-test") logOutput := framework.RunKubectlOrDie(nsFlag, "logs", "-f", "run-log-test")
@ -601,12 +599,12 @@ var _ = SIGDescribe("Kubectl client", func() {
ginkgo.By("curling local port output") ginkgo.By("curling local port output")
localAddr := fmt.Sprintf("http://localhost:%d", cmd.port) localAddr := fmt.Sprintf("http://localhost:%d", cmd.port)
body, err := curl(localAddr) body, err := curl(localAddr)
e2elog.Logf("got: %s", body) framework.Logf("got: %s", body)
if err != nil { if err != nil {
e2elog.Failf("Failed http.Get of forwarded port (%s): %v", localAddr, err) framework.Failf("Failed http.Get of forwarded port (%s): %v", localAddr, err)
} }
if !strings.Contains(body, httpdDefaultOutput) { if !strings.Contains(body, httpdDefaultOutput) {
e2elog.Failf("Container port output missing expected value. Wanted:'%s', got: %s", httpdDefaultOutput, body) framework.Failf("Container port output missing expected value. Wanted:'%s', got: %s", httpdDefaultOutput, body)
} }
}) })
@ -636,7 +634,7 @@ var _ = SIGDescribe("Kubectl client", func() {
inClusterHost := strings.TrimSpace(framework.RunHostCmdOrDie(ns, simplePodName, "printenv KUBERNETES_SERVICE_HOST")) inClusterHost := strings.TrimSpace(framework.RunHostCmdOrDie(ns, simplePodName, "printenv KUBERNETES_SERVICE_HOST"))
inClusterPort := strings.TrimSpace(framework.RunHostCmdOrDie(ns, simplePodName, "printenv KUBERNETES_SERVICE_PORT")) inClusterPort := strings.TrimSpace(framework.RunHostCmdOrDie(ns, simplePodName, "printenv KUBERNETES_SERVICE_PORT"))
inClusterURL := net.JoinHostPort(inClusterHost, inClusterPort) inClusterURL := net.JoinHostPort(inClusterHost, inClusterPort)
e2elog.Logf("copying %s to the %s pod", kubectlPath, simplePodName) framework.Logf("copying %s to the %s pod", kubectlPath, simplePodName)
framework.RunKubectlOrDie("cp", kubectlPath, ns+"/"+simplePodName+":/tmp/") framework.RunKubectlOrDie("cp", kubectlPath, ns+"/"+simplePodName+":/tmp/")
// Build a kubeconfig file that will make use of the injected ca and token, // Build a kubeconfig file that will make use of the injected ca and token,
@ -666,7 +664,7 @@ users:
user: user:
tokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token tokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
`), os.FileMode(0755))) `), os.FileMode(0755)))
e2elog.Logf("copying override kubeconfig to the %s pod", simplePodName) framework.Logf("copying override kubeconfig to the %s pod", simplePodName)
framework.RunKubectlOrDie("cp", filepath.Join(tmpDir, overrideKubeconfigName), ns+"/"+simplePodName+":/tmp/") framework.RunKubectlOrDie("cp", filepath.Join(tmpDir, overrideKubeconfigName), ns+"/"+simplePodName+":/tmp/")
framework.ExpectNoError(ioutil.WriteFile(filepath.Join(tmpDir, "invalid-configmap-with-namespace.yaml"), []byte(` framework.ExpectNoError(ioutil.WriteFile(filepath.Join(tmpDir, "invalid-configmap-with-namespace.yaml"), []byte(`
@ -682,7 +680,7 @@ apiVersion: v1
metadata: metadata:
name: "configmap without namespace and invalid name" name: "configmap without namespace and invalid name"
`), os.FileMode(0755))) `), os.FileMode(0755)))
e2elog.Logf("copying configmap manifests to the %s pod", simplePodName) framework.Logf("copying configmap manifests to the %s pod", simplePodName)
framework.RunKubectlOrDie("cp", filepath.Join(tmpDir, "invalid-configmap-with-namespace.yaml"), ns+"/"+simplePodName+":/tmp/") framework.RunKubectlOrDie("cp", filepath.Join(tmpDir, "invalid-configmap-with-namespace.yaml"), ns+"/"+simplePodName+":/tmp/")
framework.RunKubectlOrDie("cp", filepath.Join(tmpDir, "invalid-configmap-without-namespace.yaml"), ns+"/"+simplePodName+":/tmp/") framework.RunKubectlOrDie("cp", filepath.Join(tmpDir, "invalid-configmap-without-namespace.yaml"), ns+"/"+simplePodName+":/tmp/")
@ -707,7 +705,7 @@ metadata:
ginkgo.By("trying to use kubectl with invalid token") ginkgo.By("trying to use kubectl with invalid token")
_, err = framework.RunHostCmd(ns, simplePodName, "/tmp/kubectl get pods --token=invalid --v=7 2>&1") _, err = framework.RunHostCmd(ns, simplePodName, "/tmp/kubectl get pods --token=invalid --v=7 2>&1")
e2elog.Logf("got err %v", err) framework.Logf("got err %v", err)
framework.ExpectError(err) framework.ExpectError(err)
gomega.Expect(err).To(gomega.ContainSubstring("Using in-cluster namespace")) gomega.Expect(err).To(gomega.ContainSubstring("Using in-cluster namespace"))
gomega.Expect(err).To(gomega.ContainSubstring("Using in-cluster configuration")) gomega.Expect(err).To(gomega.ContainSubstring("Using in-cluster configuration"))
@ -715,7 +713,7 @@ metadata:
ginkgo.By("trying to use kubectl with invalid server") ginkgo.By("trying to use kubectl with invalid server")
_, err = framework.RunHostCmd(ns, simplePodName, "/tmp/kubectl get pods --server=invalid --v=6 2>&1") _, err = framework.RunHostCmd(ns, simplePodName, "/tmp/kubectl get pods --server=invalid --v=6 2>&1")
e2elog.Logf("got err %v", err) framework.Logf("got err %v", err)
framework.ExpectError(err) framework.ExpectError(err)
gomega.Expect(err).To(gomega.ContainSubstring("Unable to connect to the server")) gomega.Expect(err).To(gomega.ContainSubstring("Unable to connect to the server"))
gomega.Expect(err).To(gomega.ContainSubstring("GET http://invalid/api")) gomega.Expect(err).To(gomega.ContainSubstring("GET http://invalid/api"))
@ -745,7 +743,7 @@ metadata:
ginkgo.By("validating api versions") ginkgo.By("validating api versions")
output := framework.RunKubectlOrDie("api-versions") output := framework.RunKubectlOrDie("api-versions")
if !strings.Contains(output, "v1") { if !strings.Contains(output, "v1") {
e2elog.Failf("No v1 in kubectl api-versions") framework.Failf("No v1 in kubectl api-versions")
} }
}) })
}) })
@ -796,7 +794,7 @@ metadata:
ginkgo.By("checking the result") ginkgo.By("checking the result")
if originalNodePort != currentNodePort { if originalNodePort != currentNodePort {
e2elog.Failf("port should keep the same") framework.Failf("port should keep the same")
} }
}) })
@ -813,7 +811,7 @@ metadata:
output := framework.RunKubectlOrDieInput(deployment1Yaml, "apply", "view-last-applied", "-f", "-", nsFlag, "-o", "json") output := framework.RunKubectlOrDieInput(deployment1Yaml, "apply", "view-last-applied", "-f", "-", nsFlag, "-o", "json")
requiredString := "\"replicas\": 2" requiredString := "\"replicas\": 2"
if !strings.Contains(output, requiredString) { if !strings.Contains(output, requiredString) {
e2elog.Failf("Missing %s in kubectl view-last-applied", requiredString) framework.Failf("Missing %s in kubectl view-last-applied", requiredString)
} }
ginkgo.By("apply file doesn't have replicas") ginkgo.By("apply file doesn't have replicas")
@ -823,7 +821,7 @@ metadata:
output = framework.RunKubectlOrDieInput(deployment1Yaml, "apply", "view-last-applied", "-f", "-", nsFlag, "-o", "json") output = framework.RunKubectlOrDieInput(deployment1Yaml, "apply", "view-last-applied", "-f", "-", nsFlag, "-o", "json")
requiredString = "\"replicas\": 2" requiredString = "\"replicas\": 2"
if strings.Contains(output, requiredString) { if strings.Contains(output, requiredString) {
e2elog.Failf("Presenting %s in kubectl view-last-applied", requiredString) framework.Failf("Presenting %s in kubectl view-last-applied", requiredString)
} }
ginkgo.By("scale set replicas to 3") ginkgo.By("scale set replicas to 3")
@ -839,7 +837,7 @@ metadata:
requiredItems := []string{"\"replicas\": 3", imageutils.GetE2EImage(imageutils.Httpd)} requiredItems := []string{"\"replicas\": 3", imageutils.GetE2EImage(imageutils.Httpd)}
for _, item := range requiredItems { for _, item := range requiredItems {
if !strings.Contains(output, item) { if !strings.Contains(output, item) {
e2elog.Failf("Missing %s in kubectl apply", item) framework.Failf("Missing %s in kubectl apply", item)
} }
} }
}) })
@ -855,7 +853,7 @@ metadata:
var values []map[string]string var values []map[string]string
err := yaml.Unmarshal([]byte(extension.GetValue().GetYaml()), &values) err := yaml.Unmarshal([]byte(extension.GetValue().GetYaml()), &values)
if err != nil { if err != nil {
e2elog.Logf("%v\n%s", err, string(extension.GetValue().GetYaml())) framework.Logf("%v\n%s", err, string(extension.GetValue().GetYaml()))
continue continue
} }
for _, value := range values { for _, value := range values {
@ -878,7 +876,7 @@ metadata:
schemaForGVK := func(desiredGVK schema.GroupVersionKind) *openapi_v2.Schema { schemaForGVK := func(desiredGVK schema.GroupVersionKind) *openapi_v2.Schema {
d, err := f.ClientSet.Discovery().OpenAPISchema() d, err := f.ClientSet.Discovery().OpenAPISchema()
if err != nil { if err != nil {
e2elog.Failf("%v", err) framework.Failf("%v", err)
} }
if d == nil || d.Definitions == nil { if d == nil || d.Definitions == nil {
return nil return nil
@ -900,7 +898,7 @@ metadata:
ginkgo.By("create CRD with no validation schema") ginkgo.By("create CRD with no validation schema")
crd, err := crd.CreateTestCRD(f) crd, err := crd.CreateTestCRD(f)
if err != nil { if err != nil {
e2elog.Failf("failed to create test CRD: %v", err) framework.Failf("failed to create test CRD: %v", err)
} }
defer crd.CleanUp() defer crd.CleanUp()
@ -910,7 +908,7 @@ metadata:
meta := fmt.Sprintf(metaPattern, crd.Crd.Spec.Names.Kind, crd.Crd.Spec.Group, crd.Crd.Spec.Versions[0].Name, "test-cr") meta := fmt.Sprintf(metaPattern, crd.Crd.Spec.Names.Kind, crd.Crd.Spec.Group, crd.Crd.Spec.Versions[0].Name, "test-cr")
randomCR := fmt.Sprintf(`{%s,"a":{"b":[{"c":"d"}]}}`, meta) randomCR := fmt.Sprintf(`{%s,"a":{"b":[{"c":"d"}]}}`, meta)
if err := createApplyCustomResource(randomCR, f.Namespace.Name, "test-cr", crd); err != nil { if err := createApplyCustomResource(randomCR, f.Namespace.Name, "test-cr", crd); err != nil {
e2elog.Failf("%v", err) framework.Failf("%v", err)
} }
}) })
@ -919,14 +917,14 @@ metadata:
crd, err := crd.CreateTestCRD(f, func(crd *apiextensionsv1.CustomResourceDefinition) { crd, err := crd.CreateTestCRD(f, func(crd *apiextensionsv1.CustomResourceDefinition) {
props := &apiextensionsv1.JSONSchemaProps{} props := &apiextensionsv1.JSONSchemaProps{}
if err := yaml.Unmarshal(schemaFoo, props); err != nil { if err := yaml.Unmarshal(schemaFoo, props); err != nil {
e2elog.Failf("failed to unmarshal schema: %v", err) framework.Failf("failed to unmarshal schema: %v", err)
} }
for i := range crd.Spec.Versions { for i := range crd.Spec.Versions {
crd.Spec.Versions[i].Schema = &apiextensionsv1.CustomResourceValidation{OpenAPIV3Schema: props} crd.Spec.Versions[i].Schema = &apiextensionsv1.CustomResourceValidation{OpenAPIV3Schema: props}
} }
}) })
if err != nil { if err != nil {
e2elog.Failf("failed to create test CRD: %v", err) framework.Failf("failed to create test CRD: %v", err)
} }
defer crd.CleanUp() defer crd.CleanUp()
@ -936,7 +934,7 @@ metadata:
meta := fmt.Sprintf(metaPattern, crd.Crd.Spec.Names.Kind, crd.Crd.Spec.Group, crd.Crd.Spec.Versions[0].Name, "test-cr") meta := fmt.Sprintf(metaPattern, crd.Crd.Spec.Names.Kind, crd.Crd.Spec.Group, crd.Crd.Spec.Versions[0].Name, "test-cr")
validCR := fmt.Sprintf(`{%s,"spec":{"bars":[{"name":"test-bar"}]}}`, meta) validCR := fmt.Sprintf(`{%s,"spec":{"bars":[{"name":"test-bar"}]}}`, meta)
if err := createApplyCustomResource(validCR, f.Namespace.Name, "test-cr", crd); err != nil { if err := createApplyCustomResource(validCR, f.Namespace.Name, "test-cr", crd); err != nil {
e2elog.Failf("%v", err) framework.Failf("%v", err)
} }
}) })
@ -945,7 +943,7 @@ metadata:
crd, err := crd.CreateTestCRD(f, func(crd *apiextensionsv1.CustomResourceDefinition) { crd, err := crd.CreateTestCRD(f, func(crd *apiextensionsv1.CustomResourceDefinition) {
props := &apiextensionsv1.JSONSchemaProps{} props := &apiextensionsv1.JSONSchemaProps{}
if err := yaml.Unmarshal(schemaFoo, props); err != nil { if err := yaml.Unmarshal(schemaFoo, props); err != nil {
e2elog.Failf("failed to unmarshal schema: %v", err) framework.Failf("failed to unmarshal schema: %v", err)
} }
// Allow for arbitrary-extra properties. // Allow for arbitrary-extra properties.
props.XPreserveUnknownFields = pointer.BoolPtr(true) props.XPreserveUnknownFields = pointer.BoolPtr(true)
@ -954,7 +952,7 @@ metadata:
} }
}) })
if err != nil { if err != nil {
e2elog.Failf("failed to create test CRD: %v", err) framework.Failf("failed to create test CRD: %v", err)
} }
defer crd.CleanUp() defer crd.CleanUp()
@ -984,7 +982,7 @@ metadata:
requiredItems := []string{"Kubernetes master", "is running at"} requiredItems := []string{"Kubernetes master", "is running at"}
for _, item := range requiredItems { for _, item := range requiredItems {
if !strings.Contains(output, item) { if !strings.Contains(output, item) {
e2elog.Failf("Missing %s in kubectl cluster-info", item) framework.Failf("Missing %s in kubectl cluster-info", item)
} }
} }
}) })
@ -1116,14 +1114,14 @@ metadata:
ginkgo.By("creating Redis RC") ginkgo.By("creating Redis RC")
e2elog.Logf("namespace %v", ns) framework.Logf("namespace %v", ns)
framework.RunKubectlOrDieInput(controllerJSON, "create", "-f", "-", nsFlag) framework.RunKubectlOrDieInput(controllerJSON, "create", "-f", "-", nsFlag)
// It may take a while for the pods to get registered in some cases, wait to be sure. // It may take a while for the pods to get registered in some cases, wait to be sure.
ginkgo.By("Waiting for Redis master to start.") ginkgo.By("Waiting for Redis master to start.")
waitForOrFailWithDebug(1) waitForOrFailWithDebug(1)
forEachPod(func(pod v1.Pod) { forEachPod(func(pod v1.Pod) {
e2elog.Logf("wait on redis-master startup in %v ", ns) framework.Logf("wait on redis-master startup in %v ", ns)
framework.LookForStringInLog(ns, pod.Name, "redis-master", "Ready to accept connections", framework.PodStartTimeout) framework.LookForStringInLog(ns, pod.Name, "redis-master", "Ready to accept connections", framework.PodStartTimeout)
}) })
validateService := func(name string, servicePort int, timeout time.Duration) { validateService := func(name string, servicePort int, timeout time.Duration) {
@ -1131,7 +1129,7 @@ metadata:
ep, err := c.CoreV1().Endpoints(ns).Get(name, metav1.GetOptions{}) ep, err := c.CoreV1().Endpoints(ns).Get(name, metav1.GetOptions{})
if err != nil { if err != nil {
// log the real error // log the real error
e2elog.Logf("Get endpoints failed (interval %v): %v", framework.Poll, err) framework.Logf("Get endpoints failed (interval %v): %v", framework.Poll, err)
// if the error is API not found or could not find default credentials or TLS handshake timeout, try again // if the error is API not found or could not find default credentials or TLS handshake timeout, try again
if apierrs.IsNotFound(err) || if apierrs.IsNotFound(err) ||
@ -1144,15 +1142,15 @@ metadata:
uidToPort := e2eendpoints.GetContainerPortsByPodUID(ep) uidToPort := e2eendpoints.GetContainerPortsByPodUID(ep)
if len(uidToPort) == 0 { if len(uidToPort) == 0 {
e2elog.Logf("No endpoint found, retrying") framework.Logf("No endpoint found, retrying")
return false, nil return false, nil
} }
if len(uidToPort) > 1 { if len(uidToPort) > 1 {
e2elog.Failf("Too many endpoints found") framework.Failf("Too many endpoints found")
} }
for _, port := range uidToPort { for _, port := range uidToPort {
if port[0] != redisPort { if port[0] != redisPort {
e2elog.Failf("Wrong endpoint port: %d", port[0]) framework.Failf("Wrong endpoint port: %d", port[0])
} }
} }
return true, nil return true, nil
@ -1163,14 +1161,14 @@ metadata:
framework.ExpectNoError(err) framework.ExpectNoError(err)
if len(e2eservice.Spec.Ports) != 1 { if len(e2eservice.Spec.Ports) != 1 {
e2elog.Failf("1 port is expected") framework.Failf("1 port is expected")
} }
port := e2eservice.Spec.Ports[0] port := e2eservice.Spec.Ports[0]
if port.Port != int32(servicePort) { if port.Port != int32(servicePort) {
e2elog.Failf("Wrong service port: %d", port.Port) framework.Failf("Wrong service port: %d", port.Port)
} }
if port.TargetPort.IntValue() != redisPort { if port.TargetPort.IntValue() != redisPort {
e2elog.Failf("Wrong target port: %d", port.TargetPort.IntValue()) framework.Failf("Wrong target port: %d", port.TargetPort.IntValue())
} }
} }
@ -1214,7 +1212,7 @@ metadata:
ginkgo.By("verifying the pod has the label " + labelName + " with the value " + labelValue) ginkgo.By("verifying the pod has the label " + labelName + " with the value " + labelValue)
output := framework.RunKubectlOrDie("get", "pod", pausePodName, "-L", labelName, nsFlag) output := framework.RunKubectlOrDie("get", "pod", pausePodName, "-L", labelName, nsFlag)
if !strings.Contains(output, labelValue) { if !strings.Contains(output, labelValue) {
e2elog.Failf("Failed updating label " + labelName + " to the pod " + pausePodName) framework.Failf("Failed updating label " + labelName + " to the pod " + pausePodName)
} }
ginkgo.By("removing the label " + labelName + " of a pod") ginkgo.By("removing the label " + labelName + " of a pod")
@ -1222,7 +1220,7 @@ metadata:
ginkgo.By("verifying the pod doesn't have the label " + labelName) ginkgo.By("verifying the pod doesn't have the label " + labelName)
output = framework.RunKubectlOrDie("get", "pod", pausePodName, "-L", labelName, nsFlag) output = framework.RunKubectlOrDie("get", "pod", pausePodName, "-L", labelName, nsFlag)
if strings.Contains(output, labelValue) { if strings.Contains(output, labelValue) {
e2elog.Failf("Failed removing label " + labelName + " of the pod " + pausePodName) framework.Failf("Failed removing label " + labelName + " of the pod " + pausePodName)
} }
}) })
}) })
@ -1251,7 +1249,7 @@ metadata:
podSource := fmt.Sprintf("%s:/root/foo/bar/foo.bar", busyboxPodName) podSource := fmt.Sprintf("%s:/root/foo/bar/foo.bar", busyboxPodName)
tempDestination, err := ioutil.TempFile(os.TempDir(), "copy-foobar") tempDestination, err := ioutil.TempFile(os.TempDir(), "copy-foobar")
if err != nil { if err != nil {
e2elog.Failf("Failed creating temporary destination file: %v", err) framework.Failf("Failed creating temporary destination file: %v", err)
} }
ginkgo.By("specifying a remote filepath " + podSource + " on the pod") ginkgo.By("specifying a remote filepath " + podSource + " on the pod")
@ -1259,10 +1257,10 @@ metadata:
ginkgo.By("verifying that the contents of the remote file " + podSource + " have been copied to a local file " + tempDestination.Name()) ginkgo.By("verifying that the contents of the remote file " + podSource + " have been copied to a local file " + tempDestination.Name())
localData, err := ioutil.ReadAll(tempDestination) localData, err := ioutil.ReadAll(tempDestination)
if err != nil { if err != nil {
e2elog.Failf("Failed reading temporary local file: %v", err) framework.Failf("Failed reading temporary local file: %v", err)
} }
if string(localData) != remoteContents { if string(localData) != remoteContents {
e2elog.Failf("Failed copying remote file contents. Expected %s but got %s", remoteContents, string(localData)) framework.Failf("Failed copying remote file contents. Expected %s but got %s", remoteContents, string(localData))
} }
}) })
}) })
@ -1301,7 +1299,7 @@ metadata:
ginkgo.By("Waiting for log generator to start.") ginkgo.By("Waiting for log generator to start.")
if !e2epod.CheckPodsRunningReadyOrSucceeded(c, ns, []string{podName}, framework.PodStartTimeout) { if !e2epod.CheckPodsRunningReadyOrSucceeded(c, ns, []string{podName}, framework.PodStartTimeout) {
e2elog.Failf("Pod %s was not ready", podName) framework.Failf("Pod %s was not ready", podName)
} }
ginkgo.By("checking for a matching strings") ginkgo.By("checking for a matching strings")
@ -1310,26 +1308,26 @@ metadata:
ginkgo.By("limiting log lines") ginkgo.By("limiting log lines")
out := framework.RunKubectlOrDie("logs", podName, containerName, nsFlag, "--tail=1") out := framework.RunKubectlOrDie("logs", podName, containerName, nsFlag, "--tail=1")
e2elog.Logf("got output %q", out) framework.Logf("got output %q", out)
gomega.Expect(len(out)).NotTo(gomega.BeZero()) gomega.Expect(len(out)).NotTo(gomega.BeZero())
framework.ExpectEqual(len(lines(out)), 1) framework.ExpectEqual(len(lines(out)), 1)
ginkgo.By("limiting log bytes") ginkgo.By("limiting log bytes")
out = framework.RunKubectlOrDie("logs", podName, containerName, nsFlag, "--limit-bytes=1") out = framework.RunKubectlOrDie("logs", podName, containerName, nsFlag, "--limit-bytes=1")
e2elog.Logf("got output %q", out) framework.Logf("got output %q", out)
framework.ExpectEqual(len(lines(out)), 1) framework.ExpectEqual(len(lines(out)), 1)
framework.ExpectEqual(len(out), 1) framework.ExpectEqual(len(out), 1)
ginkgo.By("exposing timestamps") ginkgo.By("exposing timestamps")
out = framework.RunKubectlOrDie("logs", podName, containerName, nsFlag, "--tail=1", "--timestamps") out = framework.RunKubectlOrDie("logs", podName, containerName, nsFlag, "--tail=1", "--timestamps")
e2elog.Logf("got output %q", out) framework.Logf("got output %q", out)
l := lines(out) l := lines(out)
framework.ExpectEqual(len(l), 1) framework.ExpectEqual(len(l), 1)
words := strings.Split(l[0], " ") words := strings.Split(l[0], " ")
gomega.Expect(len(words)).To(gomega.BeNumerically(">", 1)) gomega.Expect(len(words)).To(gomega.BeNumerically(">", 1))
if _, err := time.Parse(time.RFC3339Nano, words[0]); err != nil { if _, err := time.Parse(time.RFC3339Nano, words[0]); err != nil {
if _, err := time.Parse(time.RFC3339, words[0]); err != nil { if _, err := time.Parse(time.RFC3339, words[0]); err != nil {
e2elog.Failf("expected %q to be RFC3339 or RFC3339Nano", words[0]) framework.Failf("expected %q to be RFC3339 or RFC3339Nano", words[0])
} }
} }
@ -1374,7 +1372,7 @@ metadata:
} }
} }
if !found { if !found {
e2elog.Failf("Added annotation not found") framework.Failf("Added annotation not found")
} }
}) })
}) })
@ -1391,7 +1389,7 @@ metadata:
requiredItems := []string{"Client Version:", "Server Version:", "Major:", "Minor:", "GitCommit:"} requiredItems := []string{"Client Version:", "Server Version:", "Major:", "Minor:", "GitCommit:"}
for _, item := range requiredItems { for _, item := range requiredItems {
if !strings.Contains(version, item) { if !strings.Contains(version, item) {
e2elog.Failf("Required item %s not found in %s", item, version) framework.Failf("Required item %s not found in %s", item, version)
} }
} }
}) })
@ -1425,12 +1423,12 @@ metadata:
label := labels.SelectorFromSet(labels.Set(map[string]string{"run": name})) label := labels.SelectorFromSet(labels.Set(map[string]string{"run": name}))
podlist, err := e2epod.WaitForPodsWithLabel(c, ns, label) podlist, err := e2epod.WaitForPodsWithLabel(c, ns, label)
if err != nil { if err != nil {
e2elog.Failf("Failed getting pod controlled by %s: %v", name, err) framework.Failf("Failed getting pod controlled by %s: %v", name, err)
} }
pods := podlist.Items pods := podlist.Items
if pods == nil || len(pods) != 1 || len(pods[0].Spec.Containers) != 1 || pods[0].Spec.Containers[0].Image != httpdImage { if pods == nil || len(pods) != 1 || len(pods[0].Spec.Containers) != 1 || pods[0].Spec.Containers[0].Image != httpdImage {
framework.RunKubectlOrDie("get", "pods", "-L", "run", nsFlag) framework.RunKubectlOrDie("get", "pods", "-L", "run", nsFlag)
e2elog.Failf("Failed creating 1 pod with expected image %s. Number of pods = %v", httpdImage, len(pods)) framework.Failf("Failed creating 1 pod with expected image %s. Number of pods = %v", httpdImage, len(pods))
} }
}) })
}) })
@ -1459,23 +1457,23 @@ metadata:
ginkgo.By("verifying the rc " + rcName + " was created") ginkgo.By("verifying the rc " + rcName + " was created")
rc, err := c.CoreV1().ReplicationControllers(ns).Get(rcName, metav1.GetOptions{}) rc, err := c.CoreV1().ReplicationControllers(ns).Get(rcName, metav1.GetOptions{})
if err != nil { if err != nil {
e2elog.Failf("Failed getting rc %s: %v", rcName, err) framework.Failf("Failed getting rc %s: %v", rcName, err)
} }
containers := rc.Spec.Template.Spec.Containers containers := rc.Spec.Template.Spec.Containers
if checkContainersImage(containers, httpdImage) { if checkContainersImage(containers, httpdImage) {
e2elog.Failf("Failed creating rc %s for 1 pod with expected image %s", rcName, httpdImage) framework.Failf("Failed creating rc %s for 1 pod with expected image %s", rcName, httpdImage)
} }
ginkgo.By("verifying the pod controlled by rc " + rcName + " was created") ginkgo.By("verifying the pod controlled by rc " + rcName + " was created")
label := labels.SelectorFromSet(labels.Set(map[string]string{"run": rcName})) label := labels.SelectorFromSet(labels.Set(map[string]string{"run": rcName}))
podlist, err := e2epod.WaitForPodsWithLabel(c, ns, label) podlist, err := e2epod.WaitForPodsWithLabel(c, ns, label)
if err != nil { if err != nil {
e2elog.Failf("Failed getting pod controlled by rc %s: %v", rcName, err) framework.Failf("Failed getting pod controlled by rc %s: %v", rcName, err)
} }
pods := podlist.Items pods := podlist.Items
if pods == nil || len(pods) != 1 || len(pods[0].Spec.Containers) != 1 || pods[0].Spec.Containers[0].Image != httpdImage { if pods == nil || len(pods) != 1 || len(pods[0].Spec.Containers) != 1 || pods[0].Spec.Containers[0].Image != httpdImage {
framework.RunKubectlOrDie("get", "pods", "-L", "run", nsFlag) framework.RunKubectlOrDie("get", "pods", "-L", "run", nsFlag)
e2elog.Failf("Failed creating 1 pod with expected image %s. Number of pods = %v", httpdImage, len(pods)) framework.Failf("Failed creating 1 pod with expected image %s. Number of pods = %v", httpdImage, len(pods))
} }
ginkgo.By("confirm that you can get logs from an rc") ginkgo.By("confirm that you can get logs from an rc")
@ -1484,12 +1482,12 @@ metadata:
podNames = append(podNames, pod.Name) podNames = append(podNames, pod.Name)
} }
if !e2epod.CheckPodsRunningReady(c, ns, podNames, framework.PodStartTimeout) { if !e2epod.CheckPodsRunningReady(c, ns, podNames, framework.PodStartTimeout) {
e2elog.Failf("Pods for rc %s were not ready", rcName) framework.Failf("Pods for rc %s were not ready", rcName)
} }
_, err = framework.RunKubectl("logs", "rc/"+rcName, nsFlag) _, err = framework.RunKubectl("logs", "rc/"+rcName, nsFlag)
// a non-nil error is fine as long as we actually found a pod. // a non-nil error is fine as long as we actually found a pod.
if err != nil && !strings.Contains(err.Error(), " in pod ") { if err != nil && !strings.Contains(err.Error(), " in pod ") {
e2elog.Failf("Failed getting logs by rc %s: %v", rcName, err) framework.Failf("Failed getting logs by rc %s: %v", rcName, err)
} }
}) })
}) })
@ -1520,11 +1518,11 @@ metadata:
ginkgo.By("verifying the rc " + rcName + " was created") ginkgo.By("verifying the rc " + rcName + " was created")
rc, err := c.CoreV1().ReplicationControllers(ns).Get(rcName, metav1.GetOptions{}) rc, err := c.CoreV1().ReplicationControllers(ns).Get(rcName, metav1.GetOptions{})
if err != nil { if err != nil {
e2elog.Failf("Failed getting rc %s: %v", rcName, err) framework.Failf("Failed getting rc %s: %v", rcName, err)
} }
containers := rc.Spec.Template.Spec.Containers containers := rc.Spec.Template.Spec.Containers
if checkContainersImage(containers, httpdImage) { if checkContainersImage(containers, httpdImage) {
e2elog.Failf("Failed creating rc %s for 1 pod with expected image %s", rcName, httpdImage) framework.Failf("Failed creating rc %s for 1 pod with expected image %s", rcName, httpdImage)
} }
framework.WaitForRCToStabilize(c, ns, rcName, framework.PodStartTimeout) framework.WaitForRCToStabilize(c, ns, rcName, framework.PodStartTimeout)
@ -1570,23 +1568,23 @@ metadata:
ginkgo.By("verifying the deployment " + dName + " was created") ginkgo.By("verifying the deployment " + dName + " was created")
d, err := c.AppsV1().Deployments(ns).Get(dName, metav1.GetOptions{}) d, err := c.AppsV1().Deployments(ns).Get(dName, metav1.GetOptions{})
if err != nil { if err != nil {
e2elog.Failf("Failed getting deployment %s: %v", dName, err) framework.Failf("Failed getting deployment %s: %v", dName, err)
} }
containers := d.Spec.Template.Spec.Containers containers := d.Spec.Template.Spec.Containers
if checkContainersImage(containers, httpdImage) { if checkContainersImage(containers, httpdImage) {
e2elog.Failf("Failed creating deployment %s for 1 pod with expected image %s", dName, httpdImage) framework.Failf("Failed creating deployment %s for 1 pod with expected image %s", dName, httpdImage)
} }
ginkgo.By("verifying the pod controlled by deployment " + dName + " was created") ginkgo.By("verifying the pod controlled by deployment " + dName + " was created")
label := labels.SelectorFromSet(labels.Set(map[string]string{"run": dName})) label := labels.SelectorFromSet(labels.Set(map[string]string{"run": dName}))
podlist, err := e2epod.WaitForPodsWithLabel(c, ns, label) podlist, err := e2epod.WaitForPodsWithLabel(c, ns, label)
if err != nil { if err != nil {
e2elog.Failf("Failed getting pod controlled by deployment %s: %v", dName, err) framework.Failf("Failed getting pod controlled by deployment %s: %v", dName, err)
} }
pods := podlist.Items pods := podlist.Items
if pods == nil || len(pods) != 1 || len(pods[0].Spec.Containers) != 1 || pods[0].Spec.Containers[0].Image != httpdImage { if pods == nil || len(pods) != 1 || len(pods[0].Spec.Containers) != 1 || pods[0].Spec.Containers[0].Image != httpdImage {
framework.RunKubectlOrDie("get", "pods", "-L", "run", nsFlag) framework.RunKubectlOrDie("get", "pods", "-L", "run", nsFlag)
e2elog.Failf("Failed creating 1 pod with expected image %s. Number of pods = %v", httpdImage, len(pods)) framework.Failf("Failed creating 1 pod with expected image %s. Number of pods = %v", httpdImage, len(pods))
} }
}) })
}) })
@ -1615,14 +1613,14 @@ metadata:
ginkgo.By("verifying the job " + jobName + " was created") ginkgo.By("verifying the job " + jobName + " was created")
job, err := c.BatchV1().Jobs(ns).Get(jobName, metav1.GetOptions{}) job, err := c.BatchV1().Jobs(ns).Get(jobName, metav1.GetOptions{})
if err != nil { if err != nil {
e2elog.Failf("Failed getting job %s: %v", jobName, err) framework.Failf("Failed getting job %s: %v", jobName, err)
} }
containers := job.Spec.Template.Spec.Containers containers := job.Spec.Template.Spec.Containers
if checkContainersImage(containers, httpdImage) { if checkContainersImage(containers, httpdImage) {
e2elog.Failf("Failed creating job %s for 1 pod with expected image %s: %#v", jobName, httpdImage, containers) framework.Failf("Failed creating job %s for 1 pod with expected image %s: %#v", jobName, httpdImage, containers)
} }
if job.Spec.Template.Spec.RestartPolicy != v1.RestartPolicyOnFailure { if job.Spec.Template.Spec.RestartPolicy != v1.RestartPolicyOnFailure {
e2elog.Failf("Failed creating a job with correct restart policy for --restart=OnFailure") framework.Failf("Failed creating a job with correct restart policy for --restart=OnFailure")
} }
}) })
}) })
@ -1649,17 +1647,17 @@ metadata:
ginkgo.By("verifying the CronJob " + cjName + " was created") ginkgo.By("verifying the CronJob " + cjName + " was created")
cj, err := c.BatchV1beta1().CronJobs(ns).Get(cjName, metav1.GetOptions{}) cj, err := c.BatchV1beta1().CronJobs(ns).Get(cjName, metav1.GetOptions{})
if err != nil { if err != nil {
e2elog.Failf("Failed getting CronJob %s: %v", cjName, err) framework.Failf("Failed getting CronJob %s: %v", cjName, err)
} }
if cj.Spec.Schedule != schedule { if cj.Spec.Schedule != schedule {
e2elog.Failf("Failed creating a CronJob with correct schedule %s", schedule) framework.Failf("Failed creating a CronJob with correct schedule %s", schedule)
} }
containers := cj.Spec.JobTemplate.Spec.Template.Spec.Containers containers := cj.Spec.JobTemplate.Spec.Template.Spec.Containers
if checkContainersImage(containers, busyboxImage) { if checkContainersImage(containers, busyboxImage) {
e2elog.Failf("Failed creating CronJob %s for 1 pod with expected image %s: %#v", cjName, busyboxImage, containers) framework.Failf("Failed creating CronJob %s for 1 pod with expected image %s: %#v", cjName, busyboxImage, containers)
} }
if cj.Spec.JobTemplate.Spec.Template.Spec.RestartPolicy != v1.RestartPolicyOnFailure { if cj.Spec.JobTemplate.Spec.Template.Spec.RestartPolicy != v1.RestartPolicyOnFailure {
e2elog.Failf("Failed creating a CronJob with correct restart policy for --restart=OnFailure") framework.Failf("Failed creating a CronJob with correct restart policy for --restart=OnFailure")
} }
}) })
}) })
@ -1688,14 +1686,14 @@ metadata:
ginkgo.By("verifying the pod " + podName + " was created") ginkgo.By("verifying the pod " + podName + " was created")
pod, err := c.CoreV1().Pods(ns).Get(podName, metav1.GetOptions{}) pod, err := c.CoreV1().Pods(ns).Get(podName, metav1.GetOptions{})
if err != nil { if err != nil {
e2elog.Failf("Failed getting pod %s: %v", podName, err) framework.Failf("Failed getting pod %s: %v", podName, err)
} }
containers := pod.Spec.Containers containers := pod.Spec.Containers
if checkContainersImage(containers, httpdImage) { if checkContainersImage(containers, httpdImage) {
e2elog.Failf("Failed creating pod %s with expected image %s", podName, httpdImage) framework.Failf("Failed creating pod %s with expected image %s", podName, httpdImage)
} }
if pod.Spec.RestartPolicy != v1.RestartPolicyNever { if pod.Spec.RestartPolicy != v1.RestartPolicyNever {
e2elog.Failf("Failed creating a pod with correct restart policy for --restart=Never") framework.Failf("Failed creating a pod with correct restart policy for --restart=Never")
} }
}) })
}) })
@ -1726,13 +1724,13 @@ metadata:
label := labels.SelectorFromSet(labels.Set(map[string]string{"run": podName})) label := labels.SelectorFromSet(labels.Set(map[string]string{"run": podName}))
err := testutils.WaitForPodsWithLabelRunning(c, ns, label) err := testutils.WaitForPodsWithLabelRunning(c, ns, label)
if err != nil { if err != nil {
e2elog.Failf("Failed getting pod %s: %v", podName, err) framework.Failf("Failed getting pod %s: %v", podName, err)
} }
ginkgo.By("verifying the pod " + podName + " was created") ginkgo.By("verifying the pod " + podName + " was created")
podJSON := framework.RunKubectlOrDie("get", "pod", podName, nsFlag, "-o", "json") podJSON := framework.RunKubectlOrDie("get", "pod", podName, nsFlag, "-o", "json")
if !strings.Contains(podJSON, podName) { if !strings.Contains(podJSON, podName) {
e2elog.Failf("Failed to find pod %s in [%s]", podName, podJSON) framework.Failf("Failed to find pod %s in [%s]", podName, podJSON)
} }
ginkgo.By("replace the image in the pod") ginkgo.By("replace the image in the pod")
@ -1742,11 +1740,11 @@ metadata:
ginkgo.By("verifying the pod " + podName + " has the right image " + busyboxImage) ginkgo.By("verifying the pod " + podName + " has the right image " + busyboxImage)
pod, err := c.CoreV1().Pods(ns).Get(podName, metav1.GetOptions{}) pod, err := c.CoreV1().Pods(ns).Get(podName, metav1.GetOptions{})
if err != nil { if err != nil {
e2elog.Failf("Failed getting deployment %s: %v", podName, err) framework.Failf("Failed getting deployment %s: %v", podName, err)
} }
containers := pod.Spec.Containers containers := pod.Spec.Containers
if checkContainersImage(containers, busyboxImage) { if checkContainersImage(containers, busyboxImage) {
e2elog.Failf("Failed creating pod with expected image %s", busyboxImage) framework.Failf("Failed creating pod with expected image %s", busyboxImage)
} }
}) })
}) })
@ -1796,16 +1794,16 @@ metadata:
defer framework.TryKill(cmd) defer framework.TryKill(cmd)
} }
if err != nil { if err != nil {
e2elog.Failf("Failed to start proxy server: %v", err) framework.Failf("Failed to start proxy server: %v", err)
} }
ginkgo.By("curling proxy /api/ output") ginkgo.By("curling proxy /api/ output")
localAddr := fmt.Sprintf("http://localhost:%d/api/", port) localAddr := fmt.Sprintf("http://localhost:%d/api/", port)
apiVersions, err := getAPIVersions(localAddr) apiVersions, err := getAPIVersions(localAddr)
if err != nil { if err != nil {
e2elog.Failf("Expected at least one supported apiversion, got error %v", err) framework.Failf("Expected at least one supported apiversion, got error %v", err)
} }
if len(apiVersions.Versions) < 1 { if len(apiVersions.Versions) < 1 {
e2elog.Failf("Expected at least one supported apiversion, got %v", apiVersions) framework.Failf("Expected at least one supported apiversion, got %v", apiVersions)
} }
}) })
@ -1818,7 +1816,7 @@ metadata:
ginkgo.By("Starting the proxy") ginkgo.By("Starting the proxy")
tmpdir, err := ioutil.TempDir("", "kubectl-proxy-unix") tmpdir, err := ioutil.TempDir("", "kubectl-proxy-unix")
if err != nil { if err != nil {
e2elog.Failf("Failed to create temporary directory: %v", err) framework.Failf("Failed to create temporary directory: %v", err)
} }
path := filepath.Join(tmpdir, "test") path := filepath.Join(tmpdir, "test")
defer os.Remove(path) defer os.Remove(path)
@ -1826,19 +1824,19 @@ metadata:
cmd := framework.KubectlCmd("proxy", fmt.Sprintf("--unix-socket=%s", path)) cmd := framework.KubectlCmd("proxy", fmt.Sprintf("--unix-socket=%s", path))
stdout, stderr, err := framework.StartCmdAndStreamOutput(cmd) stdout, stderr, err := framework.StartCmdAndStreamOutput(cmd)
if err != nil { if err != nil {
e2elog.Failf("Failed to start kubectl command: %v", err) framework.Failf("Failed to start kubectl command: %v", err)
} }
defer stdout.Close() defer stdout.Close()
defer stderr.Close() defer stderr.Close()
defer framework.TryKill(cmd) defer framework.TryKill(cmd)
buf := make([]byte, 128) buf := make([]byte, 128)
if _, err = stdout.Read(buf); err != nil { if _, err = stdout.Read(buf); err != nil {
e2elog.Failf("Expected output from kubectl proxy: %v", err) framework.Failf("Expected output from kubectl proxy: %v", err)
} }
ginkgo.By("retrieving proxy /api/ output") ginkgo.By("retrieving proxy /api/ output")
_, err = curlUnix("http://unused/api", path) _, err = curlUnix("http://unused/api", path)
if err != nil { if err != nil {
e2elog.Failf("Failed get of /api at %s: %v", path, err) framework.Failf("Failed get of /api at %s: %v", path, err)
} }
}) })
}) })
@ -1873,7 +1871,7 @@ metadata:
ginkgo.By("verifying the node doesn't have the taint " + testTaint.Key) ginkgo.By("verifying the node doesn't have the taint " + testTaint.Key)
output = runKubectlRetryOrDie("describe", "node", nodeName) output = runKubectlRetryOrDie("describe", "node", nodeName)
if strings.Contains(output, testTaint.Key) { if strings.Contains(output, testTaint.Key) {
e2elog.Failf("Failed removing taint " + testTaint.Key + " of the node " + nodeName) framework.Failf("Failed removing taint " + testTaint.Key + " of the node " + nodeName)
} }
}) })
@ -1940,7 +1938,7 @@ metadata:
ginkgo.By("verifying the node doesn't have the taints that have the same key " + testTaint.Key) ginkgo.By("verifying the node doesn't have the taints that have the same key " + testTaint.Key)
output = runKubectlRetryOrDie("describe", "node", nodeName) output = runKubectlRetryOrDie("describe", "node", nodeName)
if strings.Contains(output, testTaint.Key) { if strings.Contains(output, testTaint.Key) {
e2elog.Failf("Failed removing taints " + testTaint.Key + " of the node " + nodeName) framework.Failf("Failed removing taints " + testTaint.Key + " of the node " + nodeName)
} }
}) })
}) })
@ -1956,22 +1954,22 @@ metadata:
ginkgo.By("verifying that the quota was created") ginkgo.By("verifying that the quota was created")
quota, err := c.CoreV1().ResourceQuotas(ns).Get(quotaName, metav1.GetOptions{}) quota, err := c.CoreV1().ResourceQuotas(ns).Get(quotaName, metav1.GetOptions{})
if err != nil { if err != nil {
e2elog.Failf("Failed getting quota %s: %v", quotaName, err) framework.Failf("Failed getting quota %s: %v", quotaName, err)
} }
if len(quota.Spec.Scopes) != 0 { if len(quota.Spec.Scopes) != 0 {
e2elog.Failf("Expected empty scopes, got %v", quota.Spec.Scopes) framework.Failf("Expected empty scopes, got %v", quota.Spec.Scopes)
} }
if len(quota.Spec.Hard) != 2 { if len(quota.Spec.Hard) != 2 {
e2elog.Failf("Expected two resources, got %v", quota.Spec.Hard) framework.Failf("Expected two resources, got %v", quota.Spec.Hard)
} }
r, found := quota.Spec.Hard[v1.ResourcePods] r, found := quota.Spec.Hard[v1.ResourcePods]
if expected := resource.MustParse("1000000"); !found || (&r).Cmp(expected) != 0 { if expected := resource.MustParse("1000000"); !found || (&r).Cmp(expected) != 0 {
e2elog.Failf("Expected pods=1000000, got %v", r) framework.Failf("Expected pods=1000000, got %v", r)
} }
r, found = quota.Spec.Hard[v1.ResourceServices] r, found = quota.Spec.Hard[v1.ResourceServices]
if expected := resource.MustParse("1000000"); !found || (&r).Cmp(expected) != 0 { if expected := resource.MustParse("1000000"); !found || (&r).Cmp(expected) != 0 {
e2elog.Failf("Expected services=1000000, got %v", r) framework.Failf("Expected services=1000000, got %v", r)
} }
}) })
@ -1985,21 +1983,21 @@ metadata:
ginkgo.By("verifying that the quota was created") ginkgo.By("verifying that the quota was created")
quota, err := c.CoreV1().ResourceQuotas(ns).Get(quotaName, metav1.GetOptions{}) quota, err := c.CoreV1().ResourceQuotas(ns).Get(quotaName, metav1.GetOptions{})
if err != nil { if err != nil {
e2elog.Failf("Failed getting quota %s: %v", quotaName, err) framework.Failf("Failed getting quota %s: %v", quotaName, err)
} }
if len(quota.Spec.Scopes) != 2 { if len(quota.Spec.Scopes) != 2 {
e2elog.Failf("Expected two scopes, got %v", quota.Spec.Scopes) framework.Failf("Expected two scopes, got %v", quota.Spec.Scopes)
} }
scopes := make(map[v1.ResourceQuotaScope]struct{}) scopes := make(map[v1.ResourceQuotaScope]struct{})
for _, scope := range quota.Spec.Scopes { for _, scope := range quota.Spec.Scopes {
scopes[scope] = struct{}{} scopes[scope] = struct{}{}
} }
if _, found := scopes[v1.ResourceQuotaScopeBestEffort]; !found { if _, found := scopes[v1.ResourceQuotaScopeBestEffort]; !found {
e2elog.Failf("Expected BestEffort scope, got %v", quota.Spec.Scopes) framework.Failf("Expected BestEffort scope, got %v", quota.Spec.Scopes)
} }
if _, found := scopes[v1.ResourceQuotaScopeNotTerminating]; !found { if _, found := scopes[v1.ResourceQuotaScopeNotTerminating]; !found {
e2elog.Failf("Expected NotTerminating scope, got %v", quota.Spec.Scopes) framework.Failf("Expected NotTerminating scope, got %v", quota.Spec.Scopes)
} }
}) })
@ -2010,7 +2008,7 @@ metadata:
ginkgo.By("calling kubectl quota") ginkgo.By("calling kubectl quota")
out, err := framework.RunKubectl("create", "quota", quotaName, "--hard=hard=pods=1000000", "--scopes=Foo", nsFlag) out, err := framework.RunKubectl("create", "quota", quotaName, "--hard=hard=pods=1000000", "--scopes=Foo", nsFlag)
if err == nil { if err == nil {
e2elog.Failf("Expected kubectl to fail, but it succeeded: %s", out) framework.Failf("Expected kubectl to fail, but it succeeded: %s", out)
} }
}) })
}) })
@ -2039,7 +2037,7 @@ func checkOutputReturnError(output string, required [][]string) error {
func checkOutput(output string, required [][]string) { func checkOutput(output string, required [][]string) {
err := checkOutputReturnError(output, required) err := checkOutputReturnError(output, required)
if err != nil { if err != nil {
e2elog.Failf("%v", err) framework.Failf("%v", err)
} }
} }
@ -2056,7 +2054,7 @@ func checkKubectlOutputWithRetry(required [][]string, args ...string) {
return true, nil return true, nil
}) })
if pollErr != nil { if pollErr != nil {
e2elog.Failf("%v", pollErr) framework.Failf("%v", pollErr)
} }
return return
} }
@ -2131,23 +2129,23 @@ func curl(url string) (string, error) {
} }
func validateGuestbookApp(c clientset.Interface, ns string) { func validateGuestbookApp(c clientset.Interface, ns string) {
e2elog.Logf("Waiting for all frontend pods to be Running.") framework.Logf("Waiting for all frontend pods to be Running.")
label := labels.SelectorFromSet(labels.Set(map[string]string{"tier": "frontend", "app": "guestbook"})) label := labels.SelectorFromSet(labels.Set(map[string]string{"tier": "frontend", "app": "guestbook"}))
err := testutils.WaitForPodsWithLabelRunning(c, ns, label) err := testutils.WaitForPodsWithLabelRunning(c, ns, label)
framework.ExpectNoError(err) framework.ExpectNoError(err)
e2elog.Logf("Waiting for frontend to serve content.") framework.Logf("Waiting for frontend to serve content.")
if !waitForGuestbookResponse(c, "get", "", `{"data": ""}`, guestbookStartupTimeout, ns) { if !waitForGuestbookResponse(c, "get", "", `{"data": ""}`, guestbookStartupTimeout, ns) {
e2elog.Failf("Frontend service did not start serving content in %v seconds.", guestbookStartupTimeout.Seconds()) framework.Failf("Frontend service did not start serving content in %v seconds.", guestbookStartupTimeout.Seconds())
} }
e2elog.Logf("Trying to add a new entry to the guestbook.") framework.Logf("Trying to add a new entry to the guestbook.")
if !waitForGuestbookResponse(c, "set", "TestEntry", `{"message": "Updated"}`, guestbookResponseTimeout, ns) { if !waitForGuestbookResponse(c, "set", "TestEntry", `{"message": "Updated"}`, guestbookResponseTimeout, ns) {
e2elog.Failf("Cannot added new entry in %v seconds.", guestbookResponseTimeout.Seconds()) framework.Failf("Cannot added new entry in %v seconds.", guestbookResponseTimeout.Seconds())
} }
e2elog.Logf("Verifying that added entry can be retrieved.") framework.Logf("Verifying that added entry can be retrieved.")
if !waitForGuestbookResponse(c, "get", "", `{"data": "TestEntry"}`, guestbookResponseTimeout, ns) { if !waitForGuestbookResponse(c, "get", "", `{"data": "TestEntry"}`, guestbookResponseTimeout, ns) {
e2elog.Failf("Entry to guestbook wasn't correctly added in %v seconds.", guestbookResponseTimeout.Seconds()) framework.Failf("Entry to guestbook wasn't correctly added in %v seconds.", guestbookResponseTimeout.Seconds())
} }
} }
@ -2158,7 +2156,7 @@ func waitForGuestbookResponse(c clientset.Interface, cmd, arg, expectedResponse
if err == nil && res == expectedResponse { if err == nil && res == expectedResponse {
return true return true
} }
e2elog.Logf("Failed to get response from guestbook. err: %v, response: %s", err, res) framework.Logf("Failed to get response from guestbook. err: %v, response: %s", err, res)
} }
return false return false
} }
@ -2193,7 +2191,7 @@ const applyTestLabel = "kubectl.kubernetes.io/apply-test"
func readReplicationControllerFromString(contents string) *v1.ReplicationController { func readReplicationControllerFromString(contents string) *v1.ReplicationController {
rc := v1.ReplicationController{} rc := v1.ReplicationController{}
if err := yaml.Unmarshal([]byte(contents), &rc); err != nil { if err := yaml.Unmarshal([]byte(contents), &rc); err != nil {
e2elog.Failf(err.Error()) framework.Failf(err.Error())
} }
return &rc return &rc
@ -2206,7 +2204,7 @@ func modifyReplicationControllerConfiguration(contents string) io.Reader {
rc.Spec.Template.Labels[applyTestLabel] = "ADDED" rc.Spec.Template.Labels[applyTestLabel] = "ADDED"
data, err := json.Marshal(rc) data, err := json.Marshal(rc)
if err != nil { if err != nil {
e2elog.Failf("json marshal failed: %s\n", err) framework.Failf("json marshal failed: %s\n", err)
} }
return bytes.NewReader(data) return bytes.NewReader(data)
@ -2226,7 +2224,7 @@ func forEachReplicationController(c clientset.Interface, ns, selectorKey, select
} }
if rcs == nil || len(rcs.Items) == 0 { if rcs == nil || len(rcs.Items) == 0 {
e2elog.Failf("No replication controllers found") framework.Failf("No replication controllers found")
} }
for _, rc := range rcs.Items { for _, rc := range rcs.Items {
@ -2237,11 +2235,11 @@ func forEachReplicationController(c clientset.Interface, ns, selectorKey, select
func validateReplicationControllerConfiguration(rc v1.ReplicationController) { func validateReplicationControllerConfiguration(rc v1.ReplicationController) {
if rc.Name == "redis-master" { if rc.Name == "redis-master" {
if _, ok := rc.Annotations[v1.LastAppliedConfigAnnotation]; !ok { if _, ok := rc.Annotations[v1.LastAppliedConfigAnnotation]; !ok {
e2elog.Failf("Annotation not found in modified configuration:\n%v\n", rc) framework.Failf("Annotation not found in modified configuration:\n%v\n", rc)
} }
if value, ok := rc.Labels[applyTestLabel]; !ok || value != "ADDED" { if value, ok := rc.Labels[applyTestLabel]; !ok || value != "ADDED" {
e2elog.Failf("Added label %s not found in modified configuration:\n%v\n", applyTestLabel, rc) framework.Failf("Added label %s not found in modified configuration:\n%v\n", applyTestLabel, rc)
} }
} }
} }
@ -2253,7 +2251,7 @@ func getUDData(jpgExpected string, ns string) func(clientset.Interface, string)
// getUDData validates data.json in the update-demo (returns nil if data is ok). // getUDData validates data.json in the update-demo (returns nil if data is ok).
return func(c clientset.Interface, podID string) error { return func(c clientset.Interface, podID string) error {
e2elog.Logf("validating pod %s", podID) framework.Logf("validating pod %s", podID)
ctx, cancel := context.WithTimeout(context.Background(), framework.SingleCallTimeout) ctx, cancel := context.WithTimeout(context.Background(), framework.SingleCallTimeout)
defer cancel() defer cancel()
@ -2269,16 +2267,16 @@ func getUDData(jpgExpected string, ns string) func(clientset.Interface, string)
if err != nil { if err != nil {
if ctx.Err() != nil { if ctx.Err() != nil {
e2elog.Failf("Failed to retrieve data from container: %v", err) framework.Failf("Failed to retrieve data from container: %v", err)
} }
return err return err
} }
e2elog.Logf("got data: %s", body) framework.Logf("got data: %s", body)
var data updateDemoData var data updateDemoData
if err := json.Unmarshal(body, &data); err != nil { if err := json.Unmarshal(body, &data); err != nil {
return err return err
} }
e2elog.Logf("Unmarshalled json jpg/img => %s , expecting %s .", data, jpgExpected) framework.Logf("Unmarshalled json jpg/img => %s , expecting %s .", data, jpgExpected)
if strings.Contains(data.Image, jpgExpected) { if strings.Contains(data.Image, jpgExpected) {
return nil return nil
} }

View File

@ -36,7 +36,6 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait" "k8s.io/apimachinery/pkg/util/wait"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
testutils "k8s.io/kubernetes/test/utils" testutils "k8s.io/kubernetes/test/utils"
imageutils "k8s.io/kubernetes/test/utils/image" imageutils "k8s.io/kubernetes/test/utils/image"
@ -140,7 +139,7 @@ type portForwardCommand struct {
func (c *portForwardCommand) Stop() { func (c *portForwardCommand) Stop() {
// SIGINT signals that kubectl port-forward should gracefully terminate // SIGINT signals that kubectl port-forward should gracefully terminate
if err := c.cmd.Process.Signal(syscall.SIGINT); err != nil { if err := c.cmd.Process.Signal(syscall.SIGINT); err != nil {
e2elog.Logf("error sending SIGINT to kubectl port-forward: %v", err) framework.Logf("error sending SIGINT to kubectl port-forward: %v", err)
} }
// try to wait for a clean exit // try to wait for a clean exit
@ -158,12 +157,12 @@ func (c *portForwardCommand) Stop() {
// success // success
return return
} }
e2elog.Logf("error waiting for kubectl port-forward to exit: %v", err) framework.Logf("error waiting for kubectl port-forward to exit: %v", err)
case <-expired.C: case <-expired.C:
e2elog.Logf("timed out waiting for kubectl port-forward to exit") framework.Logf("timed out waiting for kubectl port-forward to exit")
} }
e2elog.Logf("trying to forcibly kill kubectl port-forward") framework.Logf("trying to forcibly kill kubectl port-forward")
framework.TryKill(c.cmd) framework.TryKill(c.cmd)
} }
@ -173,28 +172,28 @@ func runPortForward(ns, podName string, port int) *portForwardCommand {
// This is somewhat ugly but is the only way to retrieve the port that was picked // This is somewhat ugly but is the only way to retrieve the port that was picked
// by the port-forward command. We don't want to hard code the port as we have no // by the port-forward command. We don't want to hard code the port as we have no
// way of guaranteeing we can pick one that isn't in use, particularly on Jenkins. // way of guaranteeing we can pick one that isn't in use, particularly on Jenkins.
e2elog.Logf("starting port-forward command and streaming output") framework.Logf("starting port-forward command and streaming output")
portOutput, _, err := framework.StartCmdAndStreamOutput(cmd) portOutput, _, err := framework.StartCmdAndStreamOutput(cmd)
if err != nil { if err != nil {
e2elog.Failf("Failed to start port-forward command: %v", err) framework.Failf("Failed to start port-forward command: %v", err)
} }
buf := make([]byte, 128) buf := make([]byte, 128)
var n int var n int
e2elog.Logf("reading from `kubectl port-forward` command's stdout") framework.Logf("reading from `kubectl port-forward` command's stdout")
if n, err = portOutput.Read(buf); err != nil { if n, err = portOutput.Read(buf); err != nil {
e2elog.Failf("Failed to read from kubectl port-forward stdout: %v", err) framework.Failf("Failed to read from kubectl port-forward stdout: %v", err)
} }
portForwardOutput := string(buf[:n]) portForwardOutput := string(buf[:n])
match := portForwardRegexp.FindStringSubmatch(portForwardOutput) match := portForwardRegexp.FindStringSubmatch(portForwardOutput)
if len(match) != 3 { if len(match) != 3 {
e2elog.Failf("Failed to parse kubectl port-forward output: %s", portForwardOutput) framework.Failf("Failed to parse kubectl port-forward output: %s", portForwardOutput)
} }
listenPort, err := strconv.Atoi(match[2]) listenPort, err := strconv.Atoi(match[2])
if err != nil { if err != nil {
e2elog.Failf("Error converting %s to an int: %v", match[2], err) framework.Failf("Error converting %s to an int: %v", match[2], err)
} }
return &portForwardCommand{ return &portForwardCommand{
@ -207,10 +206,10 @@ func doTestConnectSendDisconnect(bindAddress string, f *framework.Framework) {
ginkgo.By("Creating the target pod") ginkgo.By("Creating the target pod")
pod := pfPod("", "10", "10", "100", fmt.Sprintf("%s", bindAddress)) pod := pfPod("", "10", "10", "100", fmt.Sprintf("%s", bindAddress))
if _, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod); err != nil { if _, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod); err != nil {
e2elog.Failf("Couldn't create pod: %v", err) framework.Failf("Couldn't create pod: %v", err)
} }
if err := f.WaitForPodReady(pod.Name); err != nil { if err := f.WaitForPodReady(pod.Name); err != nil {
e2elog.Failf("Pod did not start running: %v", err) framework.Failf("Pod did not start running: %v", err)
} }
ginkgo.By("Running 'kubectl port-forward'") ginkgo.By("Running 'kubectl port-forward'")
@ -220,7 +219,7 @@ func doTestConnectSendDisconnect(bindAddress string, f *framework.Framework) {
ginkgo.By("Dialing the local port") ginkgo.By("Dialing the local port")
conn, err := net.Dial("tcp", fmt.Sprintf("127.0.0.1:%d", cmd.port)) conn, err := net.Dial("tcp", fmt.Sprintf("127.0.0.1:%d", cmd.port))
if err != nil { if err != nil {
e2elog.Failf("Couldn't connect to port %d: %v", cmd.port, err) framework.Failf("Couldn't connect to port %d: %v", cmd.port, err)
} }
defer func() { defer func() {
ginkgo.By("Closing the connection to the local port") ginkgo.By("Closing the connection to the local port")
@ -230,16 +229,16 @@ func doTestConnectSendDisconnect(bindAddress string, f *framework.Framework) {
ginkgo.By("Reading data from the local port") ginkgo.By("Reading data from the local port")
fromServer, err := ioutil.ReadAll(conn) fromServer, err := ioutil.ReadAll(conn)
if err != nil { if err != nil {
e2elog.Failf("Unexpected error reading data from the server: %v", err) framework.Failf("Unexpected error reading data from the server: %v", err)
} }
if e, a := strings.Repeat("x", 100), string(fromServer); e != a { if e, a := strings.Repeat("x", 100), string(fromServer); e != a {
e2elog.Failf("Expected %q from server, got %q", e, a) framework.Failf("Expected %q from server, got %q", e, a)
} }
ginkgo.By("Waiting for the target pod to stop running") ginkgo.By("Waiting for the target pod to stop running")
if err := WaitForTerminatedContainer(f, pod, "portforwardtester"); err != nil { if err := WaitForTerminatedContainer(f, pod, "portforwardtester"); err != nil {
e2elog.Failf("Container did not terminate: %v", err) framework.Failf("Container did not terminate: %v", err)
} }
ginkgo.By("Verifying logs") ginkgo.By("Verifying logs")
@ -255,10 +254,10 @@ func doTestMustConnectSendNothing(bindAddress string, f *framework.Framework) {
ginkgo.By("Creating the target pod") ginkgo.By("Creating the target pod")
pod := pfPod("abc", "1", "1", "1", fmt.Sprintf("%s", bindAddress)) pod := pfPod("abc", "1", "1", "1", fmt.Sprintf("%s", bindAddress))
if _, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod); err != nil { if _, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod); err != nil {
e2elog.Failf("Couldn't create pod: %v", err) framework.Failf("Couldn't create pod: %v", err)
} }
if err := f.WaitForPodReady(pod.Name); err != nil { if err := f.WaitForPodReady(pod.Name); err != nil {
e2elog.Failf("Pod did not start running: %v", err) framework.Failf("Pod did not start running: %v", err)
} }
ginkgo.By("Running 'kubectl port-forward'") ginkgo.By("Running 'kubectl port-forward'")
@ -268,7 +267,7 @@ func doTestMustConnectSendNothing(bindAddress string, f *framework.Framework) {
ginkgo.By("Dialing the local port") ginkgo.By("Dialing the local port")
conn, err := net.Dial("tcp", fmt.Sprintf("127.0.0.1:%d", cmd.port)) conn, err := net.Dial("tcp", fmt.Sprintf("127.0.0.1:%d", cmd.port))
if err != nil { if err != nil {
e2elog.Failf("Couldn't connect to port %d: %v", cmd.port, err) framework.Failf("Couldn't connect to port %d: %v", cmd.port, err)
} }
ginkgo.By("Closing the connection to the local port") ginkgo.By("Closing the connection to the local port")
@ -276,7 +275,7 @@ func doTestMustConnectSendNothing(bindAddress string, f *framework.Framework) {
ginkgo.By("Waiting for the target pod to stop running") ginkgo.By("Waiting for the target pod to stop running")
if err := WaitForTerminatedContainer(f, pod, "portforwardtester"); err != nil { if err := WaitForTerminatedContainer(f, pod, "portforwardtester"); err != nil {
e2elog.Failf("Container did not terminate: %v", err) framework.Failf("Container did not terminate: %v", err)
} }
ginkgo.By("Verifying logs") ginkgo.By("Verifying logs")
@ -292,10 +291,10 @@ func doTestMustConnectSendDisconnect(bindAddress string, f *framework.Framework)
ginkgo.By("Creating the target pod") ginkgo.By("Creating the target pod")
pod := pfPod("abc", "10", "10", "100", fmt.Sprintf("%s", bindAddress)) pod := pfPod("abc", "10", "10", "100", fmt.Sprintf("%s", bindAddress))
if _, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod); err != nil { if _, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod); err != nil {
e2elog.Failf("Couldn't create pod: %v", err) framework.Failf("Couldn't create pod: %v", err)
} }
if err := f.WaitForPodReady(pod.Name); err != nil { if err := f.WaitForPodReady(pod.Name); err != nil {
e2elog.Failf("Pod did not start running: %v", err) framework.Failf("Pod did not start running: %v", err)
} }
ginkgo.By("Running 'kubectl port-forward'") ginkgo.By("Running 'kubectl port-forward'")
@ -305,11 +304,11 @@ func doTestMustConnectSendDisconnect(bindAddress string, f *framework.Framework)
ginkgo.By("Dialing the local port") ginkgo.By("Dialing the local port")
addr, err := net.ResolveTCPAddr("tcp", fmt.Sprintf("127.0.0.1:%d", cmd.port)) addr, err := net.ResolveTCPAddr("tcp", fmt.Sprintf("127.0.0.1:%d", cmd.port))
if err != nil { if err != nil {
e2elog.Failf("Error resolving tcp addr: %v", err) framework.Failf("Error resolving tcp addr: %v", err)
} }
conn, err := net.DialTCP("tcp", nil, addr) conn, err := net.DialTCP("tcp", nil, addr)
if err != nil { if err != nil {
e2elog.Failf("Couldn't connect to port %d: %v", cmd.port, err) framework.Failf("Couldn't connect to port %d: %v", cmd.port, err)
} }
defer func() { defer func() {
ginkgo.By("Closing the connection to the local port") ginkgo.By("Closing the connection to the local port")
@ -325,16 +324,16 @@ func doTestMustConnectSendDisconnect(bindAddress string, f *framework.Framework)
ginkgo.By("Reading data from the local port") ginkgo.By("Reading data from the local port")
fromServer, err := ioutil.ReadAll(conn) fromServer, err := ioutil.ReadAll(conn)
if err != nil { if err != nil {
e2elog.Failf("Unexpected error reading data from the server: %v", err) framework.Failf("Unexpected error reading data from the server: %v", err)
} }
if e, a := strings.Repeat("x", 100), string(fromServer); e != a { if e, a := strings.Repeat("x", 100), string(fromServer); e != a {
e2elog.Failf("Expected %q from server, got %q", e, a) framework.Failf("Expected %q from server, got %q", e, a)
} }
ginkgo.By("Waiting for the target pod to stop running") ginkgo.By("Waiting for the target pod to stop running")
if err := WaitForTerminatedContainer(f, pod, "portforwardtester"); err != nil { if err := WaitForTerminatedContainer(f, pod, "portforwardtester"); err != nil {
e2elog.Failf("Container did not terminate: %v", err) framework.Failf("Container did not terminate: %v", err)
} }
ginkgo.By("Verifying logs") ginkgo.By("Verifying logs")
@ -354,10 +353,10 @@ func doTestOverWebSockets(bindAddress string, f *framework.Framework) {
ginkgo.By("Creating the pod") ginkgo.By("Creating the pod")
pod := pfPod("def", "10", "10", "100", fmt.Sprintf("%s", bindAddress)) pod := pfPod("def", "10", "10", "100", fmt.Sprintf("%s", bindAddress))
if _, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod); err != nil { if _, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod); err != nil {
e2elog.Failf("Couldn't create pod: %v", err) framework.Failf("Couldn't create pod: %v", err)
} }
if err := f.WaitForPodReady(pod.Name); err != nil { if err := f.WaitForPodReady(pod.Name); err != nil {
e2elog.Failf("Pod did not start running: %v", err) framework.Failf("Pod did not start running: %v", err)
} }
req := f.ClientSet.CoreV1().RESTClient().Get(). req := f.ClientSet.CoreV1().RESTClient().Get().
@ -370,7 +369,7 @@ func doTestOverWebSockets(bindAddress string, f *framework.Framework) {
url := req.URL() url := req.URL()
ws, err := framework.OpenWebSocketForURL(url, config, []string{"v4.channel.k8s.io"}) ws, err := framework.OpenWebSocketForURL(url, config, []string{"v4.channel.k8s.io"})
if err != nil { if err != nil {
e2elog.Failf("Failed to open websocket to %s: %v", url.String(), err) framework.Failf("Failed to open websocket to %s: %v", url.String(), err)
} }
defer ws.Close() defer ws.Close()
@ -405,7 +404,7 @@ func doTestOverWebSockets(bindAddress string, f *framework.Framework) {
ginkgo.By("Sending the expected data to the local port") ginkgo.By("Sending the expected data to the local port")
err = wsWrite(ws, 0, []byte("def")) err = wsWrite(ws, 0, []byte("def"))
if err != nil { if err != nil {
e2elog.Failf("Failed to write to websocket %s: %v", url.String(), err) framework.Failf("Failed to write to websocket %s: %v", url.String(), err)
} }
ginkgo.By("Reading data from the local port") ginkgo.By("Reading data from the local port")

View File

@ -37,7 +37,6 @@ go_library(
"//staging/src/k8s.io/kubelet/config/v1beta1:go_default_library", "//staging/src/k8s.io/kubelet/config/v1beta1:go_default_library",
"//test/e2e/framework:go_default_library", "//test/e2e/framework:go_default_library",
"//test/e2e/framework/kubelet:go_default_library", "//test/e2e/framework/kubelet:go_default_library",
"//test/e2e/framework/log:go_default_library",
"//test/e2e/framework/metrics:go_default_library", "//test/e2e/framework/metrics:go_default_library",
"//test/e2e/framework/node:go_default_library", "//test/e2e/framework/node:go_default_library",
"//test/e2e/framework/pod:go_default_library", "//test/e2e/framework/pod:go_default_library",

View File

@ -31,7 +31,6 @@ import (
"k8s.io/client-go/tools/cache" "k8s.io/client-go/tools/cache"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2ekubelet "k8s.io/kubernetes/test/e2e/framework/kubelet" e2ekubelet "k8s.io/kubernetes/test/e2e/framework/kubelet"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
e2emetrics "k8s.io/kubernetes/test/e2e/framework/metrics" e2emetrics "k8s.io/kubernetes/test/e2e/framework/metrics"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
imageutils "k8s.io/kubernetes/test/utils/image" imageutils "k8s.io/kubernetes/test/utils/image"
@ -121,7 +120,7 @@ func runDensityBatchTest(f *framework.Framework, testArg densityTest) (time.Dura
}, 10*time.Minute, 10*time.Second).Should(gomega.BeTrue()) }, 10*time.Minute, 10*time.Second).Should(gomega.BeTrue())
if len(watchTimes) < testArg.podsNr { if len(watchTimes) < testArg.podsNr {
e2elog.Failf("Timeout reached waiting for all Pods to be observed by the watch.") framework.Failf("Timeout reached waiting for all Pods to be observed by the watch.")
} }
// Analyze results // Analyze results

View File

@ -23,7 +23,6 @@ import (
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
imageutils "k8s.io/kubernetes/test/utils/image" imageutils "k8s.io/kubernetes/test/utils/image"
"github.com/onsi/ginkgo" "github.com/onsi/ginkgo"
@ -54,11 +53,11 @@ var _ = SIGDescribe("DNS", func() {
} }
testUtilsPod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(testUtilsPod) testUtilsPod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(testUtilsPod)
framework.ExpectNoError(err) framework.ExpectNoError(err)
e2elog.Logf("Created pod %v", testUtilsPod) framework.Logf("Created pod %v", testUtilsPod)
defer func() { defer func() {
e2elog.Logf("Deleting pod %s...", testUtilsPod.Name) framework.Logf("Deleting pod %s...", testUtilsPod.Name)
if err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(testUtilsPod.Name, metav1.NewDeleteOptions(0)); err != nil { if err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(testUtilsPod.Name, metav1.NewDeleteOptions(0)); err != nil {
e2elog.Failf("Failed to delete pod %s: %v", testUtilsPod.Name, err) framework.Failf("Failed to delete pod %s: %v", testUtilsPod.Name, err)
} }
}() }()
framework.ExpectNoError(f.WaitForPodRunning(testUtilsPod.Name), "failed to wait for pod %s to be running", testUtilsPod.Name) framework.ExpectNoError(f.WaitForPodRunning(testUtilsPod.Name), "failed to wait for pod %s to be running", testUtilsPod.Name)
@ -75,17 +74,17 @@ var _ = SIGDescribe("DNS", func() {
}) })
framework.ExpectNoError(err) framework.ExpectNoError(err)
e2elog.Logf("ipconfig /all:\n%s", stdout) framework.Logf("ipconfig /all:\n%s", stdout)
dnsRegex, err := regexp.Compile(`DNS Servers[\s*.]*:(\s*[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3})+`) dnsRegex, err := regexp.Compile(`DNS Servers[\s*.]*:(\s*[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3})+`)
if dnsRegex.MatchString(stdout) { if dnsRegex.MatchString(stdout) {
match := dnsRegex.FindString(stdout) match := dnsRegex.FindString(stdout)
if !strings.Contains(match, testInjectedIP) { if !strings.Contains(match, testInjectedIP) {
e2elog.Failf("customized DNS options not found in ipconfig /all, got: %s", match) framework.Failf("customized DNS options not found in ipconfig /all, got: %s", match)
} }
} else { } else {
e2elog.Failf("cannot find DNS server info in ipconfig /all output: \n%s", stdout) framework.Failf("cannot find DNS server info in ipconfig /all output: \n%s", stdout)
} }
// TODO: Add more test cases for other DNSPolicies. // TODO: Add more test cases for other DNSPolicies.
}) })

View File

@ -52,7 +52,6 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
imageutils "k8s.io/kubernetes/test/utils/image" imageutils "k8s.io/kubernetes/test/utils/image"
"github.com/onsi/ginkgo" "github.com/onsi/ginkgo"
@ -99,28 +98,28 @@ var _ = SIGDescribe("[Feature:Windows] [Feature:WindowsGMSA] GMSA Full [Slow]",
deployScriptPath, err := downloadFile(gmsaWebhookDeployScriptURL) deployScriptPath, err := downloadFile(gmsaWebhookDeployScriptURL)
defer func() { os.Remove(deployScriptPath) }() defer func() { os.Remove(deployScriptPath) }()
if err != nil { if err != nil {
e2elog.Failf(err.Error()) framework.Failf(err.Error())
} }
ginkgo.By("deploying the GMSA webhook") ginkgo.By("deploying the GMSA webhook")
webhookCleanUp, err := deployGmsaWebhook(f, deployScriptPath) webhookCleanUp, err := deployGmsaWebhook(f, deployScriptPath)
defer webhookCleanUp() defer webhookCleanUp()
if err != nil { if err != nil {
e2elog.Failf(err.Error()) framework.Failf(err.Error())
} }
ginkgo.By("creating the GMSA custom resource") ginkgo.By("creating the GMSA custom resource")
customResourceCleanup, err := createGmsaCustomResource(crdManifestContents) customResourceCleanup, err := createGmsaCustomResource(crdManifestContents)
defer customResourceCleanup() defer customResourceCleanup()
if err != nil { if err != nil {
e2elog.Failf(err.Error()) framework.Failf(err.Error())
} }
ginkgo.By("creating an RBAC role to grant use access to that GMSA resource") ginkgo.By("creating an RBAC role to grant use access to that GMSA resource")
rbacRoleName, rbacRoleCleanup, err := createRBACRoleForGmsa(f) rbacRoleName, rbacRoleCleanup, err := createRBACRoleForGmsa(f)
defer rbacRoleCleanup() defer rbacRoleCleanup()
if err != nil { if err != nil {
e2elog.Failf(err.Error()) framework.Failf(err.Error())
} }
ginkgo.By("creating a service account") ginkgo.By("creating a service account")
@ -144,7 +143,7 @@ var _ = SIGDescribe("[Feature:Windows] [Feature:WindowsGMSA] GMSA Full [Slow]",
expectedSubstr := "The command completed successfully" expectedSubstr := "The command completed successfully"
if !strings.Contains(output, expectedSubstr) { if !strings.Contains(output, expectedSubstr) {
e2elog.Failf("Expected %q to contain %q", output, expectedSubstr) framework.Failf("Expected %q to contain %q", output, expectedSubstr)
} }
}) })
}) })
@ -157,7 +156,7 @@ func findPreconfiguredGmsaNodes(c clientset.Interface) []v1.Node {
} }
nodes, err := c.CoreV1().Nodes().List(nodeOpts) nodes, err := c.CoreV1().Nodes().List(nodeOpts)
if err != nil { if err != nil {
e2elog.Failf("Unable to list nodes: %v", err) framework.Failf("Unable to list nodes: %v", err)
} }
return nodes.Items return nodes.Items
} }
@ -211,7 +210,7 @@ func retrieveCRDManifestFileContents(f *framework.Framework, node v1.Node) strin
// escape quotes and backward slashes // escape quotes and backward slashes
output, err := runKubectlExecInNamespace(f.Namespace.Name, podName, "powershell", "Get-Content", strings.ReplaceAll(gmsaCrdManifestPath, `\`, "/")) output, err := runKubectlExecInNamespace(f.Namespace.Name, podName, "powershell", "Get-Content", strings.ReplaceAll(gmsaCrdManifestPath, `\`, "/"))
if err != nil { if err != nil {
e2elog.Failf("failed to retrieve the contents of %q on node %q: %v", gmsaCrdManifestPath, node.Name, err) framework.Failf("failed to retrieve the contents of %q on node %q: %v", gmsaCrdManifestPath, node.Name, err)
} }
// Windows to linux new lines // Windows to linux new lines
@ -250,7 +249,7 @@ func deployGmsaWebhook(f *framework.Framework, deployScriptPath string) (func(),
output, err := cmd.CombinedOutput() output, err := cmd.CombinedOutput()
if err == nil { if err == nil {
e2elog.Logf("GMSA webhook successfully deployed, output:\n%s", string(output)) framework.Logf("GMSA webhook successfully deployed, output:\n%s", string(output))
} else { } else {
err = errors.Wrapf(err, "unable to deploy GMSA webhook, output:\n%s", string(output)) err = errors.Wrapf(err, "unable to deploy GMSA webhook, output:\n%s", string(output))
} }
@ -332,7 +331,7 @@ func createServiceAccount(f *framework.Framework) string {
}, },
} }
if _, err := f.ClientSet.CoreV1().ServiceAccounts(f.Namespace.Name).Create(account); err != nil { if _, err := f.ClientSet.CoreV1().ServiceAccounts(f.Namespace.Name).Create(account); err != nil {
e2elog.Failf("unable to create service account %q: %v", accountName, err) framework.Failf("unable to create service account %q: %v", accountName, err)
} }
return accountName return accountName
} }

View File

@ -29,7 +29,6 @@ import (
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
imageutils "k8s.io/kubernetes/test/utils/image" imageutils "k8s.io/kubernetes/test/utils/image"
"github.com/onsi/ginkgo" "github.com/onsi/ginkgo"
@ -104,12 +103,12 @@ var _ = SIGDescribe("[Feature:Windows] [Feature:WindowsGMSA] GMSA Kubelet [Slow]
}, 1*time.Minute, 1*time.Second).Should(gomega.BeTrue()) }, 1*time.Minute, 1*time.Second).Should(gomega.BeTrue())
if !strings.HasPrefix(output, domain) { if !strings.HasPrefix(output, domain) {
e2elog.Failf("Expected %q to start with %q", output, domain) framework.Failf("Expected %q to start with %q", output, domain)
} }
expectedSubstr := "The command completed successfully" expectedSubstr := "The command completed successfully"
if !strings.Contains(output, expectedSubstr) { if !strings.Contains(output, expectedSubstr) {
e2elog.Failf("Expected %q to contain %q", output, expectedSubstr) framework.Failf("Expected %q to contain %q", output, expectedSubstr)
} }
} }

View File

@ -35,7 +35,6 @@ import (
kubeletconfigv1beta1 "k8s.io/kubelet/config/v1beta1" kubeletconfigv1beta1 "k8s.io/kubelet/config/v1beta1"
kubeletconfig "k8s.io/kubernetes/pkg/kubelet/apis/config" kubeletconfig "k8s.io/kubernetes/pkg/kubelet/apis/config"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
imageutils "k8s.io/kubernetes/test/utils/image" imageutils "k8s.io/kubernetes/test/utils/image"
"github.com/onsi/ginkgo" "github.com/onsi/ginkgo"
@ -85,7 +84,7 @@ type nodeMemory struct {
func checkNodeAllocatableTest(f *framework.Framework) { func checkNodeAllocatableTest(f *framework.Framework) {
nodeMem := getNodeMemory(f) nodeMem := getNodeMemory(f)
e2elog.Logf("nodeMem says: %+v", nodeMem) framework.Logf("nodeMem says: %+v", nodeMem)
// calculate the allocatable mem based on capacity - reserved amounts // calculate the allocatable mem based on capacity - reserved amounts
calculatedNodeAlloc := nodeMem.capacity.DeepCopy() calculatedNodeAlloc := nodeMem.capacity.DeepCopy()
@ -126,7 +125,7 @@ func overrideAllocatableMemoryTest(f *framework.Framework, allocatablePods int)
for _, e := range eventList.Items { for _, e := range eventList.Items {
// Look for an event that shows FailedScheduling // Look for an event that shows FailedScheduling
if e.Type == "Warning" && e.Reason == "FailedScheduling" && e.InvolvedObject.Name == failurePods[0].ObjectMeta.Name { if e.Type == "Warning" && e.Reason == "FailedScheduling" && e.InvolvedObject.Name == failurePods[0].ObjectMeta.Name {
e2elog.Logf("Found %+v event with message %+v", e.Reason, e.Message) framework.Logf("Found %+v event with message %+v", e.Reason, e.Message)
return true return true
} }
} }
@ -294,11 +293,11 @@ func pollConfigz(timeout time.Duration, pollInterval time.Duration, nodeName str
gomega.Eventually(func() bool { gomega.Eventually(func() bool {
resp, err = client.Do(req) resp, err = client.Do(req)
if err != nil { if err != nil {
e2elog.Logf("Failed to get /configz, retrying. Error: %v", err) framework.Logf("Failed to get /configz, retrying. Error: %v", err)
return false return false
} }
if resp.StatusCode != 200 { if resp.StatusCode != 200 {
e2elog.Logf("/configz response status not 200, retrying. Response was: %+v", resp) framework.Logf("/configz response status not 200, retrying. Response was: %+v", resp)
return false return false
} }

View File

@ -21,7 +21,6 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/uuid" "k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
testutils "k8s.io/kubernetes/test/utils" testutils "k8s.io/kubernetes/test/utils"
imageutils "k8s.io/kubernetes/test/utils/image" imageutils "k8s.io/kubernetes/test/utils/image"
@ -46,13 +45,13 @@ var _ = SIGDescribe("[Feature:Windows] SecurityContext RunAsUserName", func() {
ginkgo.By("Creating a pod with an invalid username") ginkgo.By("Creating a pod with an invalid username")
podInvalid := f.PodClient().Create(runAsUserNamePod(toPtr("FooLish"))) podInvalid := f.PodClient().Create(runAsUserNamePod(toPtr("FooLish")))
e2elog.Logf("Waiting for pod %s to enter the error state.", podInvalid.Name) framework.Logf("Waiting for pod %s to enter the error state.", podInvalid.Name)
framework.ExpectNoError(f.WaitForPodTerminated(podInvalid.Name, "")) framework.ExpectNoError(f.WaitForPodTerminated(podInvalid.Name, ""))
podInvalid, _ = f.PodClient().Get(podInvalid.Name, metav1.GetOptions{}) podInvalid, _ = f.PodClient().Get(podInvalid.Name, metav1.GetOptions{})
podTerminatedReason := testutils.TerminatedContainers(podInvalid)[runAsUserNameContainerName] podTerminatedReason := testutils.TerminatedContainers(podInvalid)[runAsUserNameContainerName]
if "ContainerCannotRun" != podTerminatedReason { if "ContainerCannotRun" != podTerminatedReason {
e2elog.Failf("The container terminated reason was supposed to be: 'ContainerCannotRun', not: '%q'", podTerminatedReason) framework.Failf("The container terminated reason was supposed to be: 'ContainerCannotRun', not: '%q'", podTerminatedReason)
} }
}) })

View File

@ -22,7 +22,6 @@ import (
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
e2enode "k8s.io/kubernetes/test/e2e/framework/node" e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2eservice "k8s.io/kubernetes/test/e2e/framework/service" e2eservice "k8s.io/kubernetes/test/e2e/framework/service"
@ -46,7 +45,7 @@ var _ = SIGDescribe("Services", func() {
jig := e2eservice.NewTestJig(cs, serviceName) jig := e2eservice.NewTestJig(cs, serviceName)
nodeIP, err := e2enode.PickIP(jig.Client) nodeIP, err := e2enode.PickIP(jig.Client)
if err != nil { if err != nil {
e2elog.Logf("Unexpected error occurred: %v", err) framework.Logf("Unexpected error occurred: %v", err)
} }
// TODO: write a wrapper for ExpectNoErrorWithOffset() // TODO: write a wrapper for ExpectNoErrorWithOffset()
framework.ExpectNoErrorWithOffset(0, err) framework.ExpectNoErrorWithOffset(0, err)