Merge pull request #81811 from oomichi/replace-e2elog-framework-deployment-ingress

Use log functions of core framework on [d-i]
This commit is contained in:
Kubernetes Prow Robot 2019-08-23 06:54:17 -07:00 committed by GitHub
commit 518ff2a405
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
8 changed files with 37 additions and 39 deletions

View File

@ -20,7 +20,7 @@ go_library(
"//staging/src/k8s.io/apimachinery/pkg/watch:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/watch:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes:go_default_library", "//staging/src/k8s.io/client-go/kubernetes:go_default_library",
"//staging/src/k8s.io/client-go/tools/watch:go_default_library", "//staging/src/k8s.io/client-go/tools/watch:go_default_library",
"//test/e2e/framework/log:go_default_library", "//test/e2e/framework:go_default_library",
"//test/utils:go_default_library", "//test/utils:go_default_library",
"//test/utils/image:go_default_library", "//test/utils/image:go_default_library",
], ],

View File

@ -30,14 +30,14 @@ import (
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
watchtools "k8s.io/client-go/tools/watch" watchtools "k8s.io/client-go/tools/watch"
deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util" deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util"
e2elog "k8s.io/kubernetes/test/e2e/framework/log" "k8s.io/kubernetes/test/e2e/framework"
testutils "k8s.io/kubernetes/test/utils" testutils "k8s.io/kubernetes/test/utils"
imageutils "k8s.io/kubernetes/test/utils/image" imageutils "k8s.io/kubernetes/test/utils/image"
) )
// UpdateDeploymentWithRetries updates the specified deployment with retries. // UpdateDeploymentWithRetries updates the specified deployment with retries.
func UpdateDeploymentWithRetries(c clientset.Interface, namespace, name string, applyUpdate testutils.UpdateDeploymentFunc) (*appsv1.Deployment, error) { func UpdateDeploymentWithRetries(c clientset.Interface, namespace, name string, applyUpdate testutils.UpdateDeploymentFunc) (*appsv1.Deployment, error) {
return testutils.UpdateDeploymentWithRetries(c, namespace, name, applyUpdate, e2elog.Logf, poll, pollShortTimeout) return testutils.UpdateDeploymentWithRetries(c, namespace, name, applyUpdate, framework.Logf, poll, pollShortTimeout)
} }
// CheckDeploymentRevisionAndImage checks if the input deployment's and its new replica set's revision and image are as expected. // CheckDeploymentRevisionAndImage checks if the input deployment's and its new replica set's revision and image are as expected.
@ -67,7 +67,7 @@ func WatchRecreateDeployment(c clientset.Interface, d *appsv1.Deployment) error
_, allOldRSs, err := deploymentutil.GetOldReplicaSets(d, c.AppsV1()) _, allOldRSs, err := deploymentutil.GetOldReplicaSets(d, c.AppsV1())
newRS, nerr := deploymentutil.GetNewReplicaSet(d, c.AppsV1()) newRS, nerr := deploymentutil.GetNewReplicaSet(d, c.AppsV1())
if err == nil && nerr == nil { if err == nil && nerr == nil {
e2elog.Logf("%+v", d) framework.Logf("%+v", d)
logReplicaSetsOfDeployment(d, allOldRSs, newRS) logReplicaSetsOfDeployment(d, allOldRSs, newRS)
logPodsOfDeployment(c, d, append(allOldRSs, newRS)) logPodsOfDeployment(c, d, append(allOldRSs, newRS))
} }
@ -128,7 +128,7 @@ func CreateDeployment(client clientset.Interface, replicas int32, podLabels map[
if err != nil { if err != nil {
return nil, fmt.Errorf("deployment %q Create API error: %v", deploymentSpec.Name, err) return nil, fmt.Errorf("deployment %q Create API error: %v", deploymentSpec.Name, err)
} }
e2elog.Logf("Waiting deployment %q to complete", deploymentSpec.Name) framework.Logf("Waiting deployment %q to complete", deploymentSpec.Name)
err = WaitForDeploymentComplete(client, deployment) err = WaitForDeploymentComplete(client, deployment)
if err != nil { if err != nil {
return nil, fmt.Errorf("deployment %q failed to complete: %v", deploymentSpec.Name, err) return nil, fmt.Errorf("deployment %q failed to complete: %v", deploymentSpec.Name, err)

View File

@ -19,14 +19,14 @@ package deployment
import ( import (
appsv1 "k8s.io/api/apps/v1" appsv1 "k8s.io/api/apps/v1"
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
e2elog "k8s.io/kubernetes/test/e2e/framework/log" "k8s.io/kubernetes/test/e2e/framework"
testutils "k8s.io/kubernetes/test/utils" testutils "k8s.io/kubernetes/test/utils"
) )
func logReplicaSetsOfDeployment(deployment *appsv1.Deployment, allOldRSs []*appsv1.ReplicaSet, newRS *appsv1.ReplicaSet) { func logReplicaSetsOfDeployment(deployment *appsv1.Deployment, allOldRSs []*appsv1.ReplicaSet, newRS *appsv1.ReplicaSet) {
testutils.LogReplicaSetsOfDeployment(deployment, allOldRSs, newRS, e2elog.Logf) testutils.LogReplicaSetsOfDeployment(deployment, allOldRSs, newRS, framework.Logf)
} }
func logPodsOfDeployment(c clientset.Interface, deployment *appsv1.Deployment, rsList []*appsv1.ReplicaSet) { func logPodsOfDeployment(c clientset.Interface, deployment *appsv1.Deployment, rsList []*appsv1.ReplicaSet) {
testutils.LogPodsOfDeployment(c, deployment, rsList, e2elog.Logf) testutils.LogPodsOfDeployment(c, deployment, rsList, framework.Logf)
} }

View File

@ -25,7 +25,7 @@ import (
"k8s.io/apimachinery/pkg/util/wait" "k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util" deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util"
e2elog "k8s.io/kubernetes/test/e2e/framework/log" "k8s.io/kubernetes/test/e2e/framework"
testutils "k8s.io/kubernetes/test/utils" testutils "k8s.io/kubernetes/test/utils"
) )
@ -43,27 +43,27 @@ func WaitForObservedDeployment(c clientset.Interface, ns, deploymentName string,
// WaitForDeploymentWithCondition waits for the specified deployment condition. // WaitForDeploymentWithCondition waits for the specified deployment condition.
func WaitForDeploymentWithCondition(c clientset.Interface, ns, deploymentName, reason string, condType appsv1.DeploymentConditionType) error { func WaitForDeploymentWithCondition(c clientset.Interface, ns, deploymentName, reason string, condType appsv1.DeploymentConditionType) error {
return testutils.WaitForDeploymentWithCondition(c, ns, deploymentName, reason, condType, e2elog.Logf, poll, pollLongTimeout) return testutils.WaitForDeploymentWithCondition(c, ns, deploymentName, reason, condType, framework.Logf, poll, pollLongTimeout)
} }
// WaitForDeploymentRevisionAndImage waits for the deployment's and its new RS's revision and container image to match the given revision and image. // WaitForDeploymentRevisionAndImage waits for the deployment's and its new RS's revision and container image to match the given revision and image.
// Note that deployment revision and its new RS revision should be updated shortly most of the time, but an overwhelmed RS controller // Note that deployment revision and its new RS revision should be updated shortly most of the time, but an overwhelmed RS controller
// may result in taking longer to relabel a RS. // may result in taking longer to relabel a RS.
func WaitForDeploymentRevisionAndImage(c clientset.Interface, ns, deploymentName string, revision, image string) error { func WaitForDeploymentRevisionAndImage(c clientset.Interface, ns, deploymentName string, revision, image string) error {
return testutils.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, revision, image, e2elog.Logf, poll, pollLongTimeout) return testutils.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, revision, image, framework.Logf, poll, pollLongTimeout)
} }
// WaitForDeploymentComplete waits for the deployment to complete, and don't check if rolling update strategy is broken. // WaitForDeploymentComplete waits for the deployment to complete, and don't check if rolling update strategy is broken.
// Rolling update strategy is used only during a rolling update, and can be violated in other situations, // Rolling update strategy is used only during a rolling update, and can be violated in other situations,
// such as shortly after a scaling event or the deployment is just created. // such as shortly after a scaling event or the deployment is just created.
func WaitForDeploymentComplete(c clientset.Interface, d *appsv1.Deployment) error { func WaitForDeploymentComplete(c clientset.Interface, d *appsv1.Deployment) error {
return testutils.WaitForDeploymentComplete(c, d, e2elog.Logf, poll, pollLongTimeout) return testutils.WaitForDeploymentComplete(c, d, framework.Logf, poll, pollLongTimeout)
} }
// WaitForDeploymentCompleteAndCheckRolling waits for the deployment to complete, and check rolling update strategy isn't broken at any times. // WaitForDeploymentCompleteAndCheckRolling waits for the deployment to complete, and check rolling update strategy isn't broken at any times.
// Rolling update strategy should not be broken during a rolling update. // Rolling update strategy should not be broken during a rolling update.
func WaitForDeploymentCompleteAndCheckRolling(c clientset.Interface, d *appsv1.Deployment) error { func WaitForDeploymentCompleteAndCheckRolling(c clientset.Interface, d *appsv1.Deployment) error {
return testutils.WaitForDeploymentCompleteAndCheckRolling(c, d, e2elog.Logf, poll, pollLongTimeout) return testutils.WaitForDeploymentCompleteAndCheckRolling(c, d, framework.Logf, poll, pollLongTimeout)
} }
// WaitForDeploymentUpdatedReplicasGTE waits for given deployment to be observed by the controller and has at least a number of updatedReplicas // WaitForDeploymentUpdatedReplicasGTE waits for given deployment to be observed by the controller and has at least a number of updatedReplicas

View File

@ -10,7 +10,7 @@ go_library(
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes:go_default_library", "//staging/src/k8s.io/client-go/kubernetes:go_default_library",
"//test/e2e/framework/log:go_default_library", "//test/e2e/framework:go_default_library",
"//vendor/github.com/onsi/ginkgo:go_default_library", "//vendor/github.com/onsi/ginkgo:go_default_library",
], ],
) )

View File

@ -32,7 +32,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/types"
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
e2elog "k8s.io/kubernetes/test/e2e/framework/log" "k8s.io/kubernetes/test/e2e/framework"
) )
// ServiceStartTimeout is how long to wait for a service endpoint to be resolvable. // ServiceStartTimeout is how long to wait for a service endpoint to be resolvable.
@ -103,7 +103,7 @@ func ValidateEndpointsPorts(c clientset.Interface, namespace, serviceName string
for start := time.Now(); time.Since(start) < ServiceStartTimeout; time.Sleep(1 * time.Second) { for start := time.Now(); time.Since(start) < ServiceStartTimeout; time.Sleep(1 * time.Second) {
ep, err := c.CoreV1().Endpoints(namespace).Get(serviceName, metav1.GetOptions{}) ep, err := c.CoreV1().Endpoints(namespace).Get(serviceName, metav1.GetOptions{})
if err != nil { if err != nil {
e2elog.Logf("Get endpoints failed (%v elapsed, ignoring for 5s): %v", time.Since(start), err) framework.Logf("Get endpoints failed (%v elapsed, ignoring for 5s): %v", time.Since(start), err)
continue continue
} }
portsByPodUID := GetContainerPortsByPodUID(ep) portsByPodUID := GetContainerPortsByPodUID(ep)
@ -116,21 +116,21 @@ func ValidateEndpointsPorts(c clientset.Interface, namespace, serviceName string
if err != nil { if err != nil {
return err return err
} }
e2elog.Logf("successfully validated that service %s in namespace %s exposes endpoints %v (%v elapsed)", framework.Logf("successfully validated that service %s in namespace %s exposes endpoints %v (%v elapsed)",
serviceName, namespace, expectedEndpoints, time.Since(start)) serviceName, namespace, expectedEndpoints, time.Since(start))
return nil return nil
} }
if i%5 == 0 { if i%5 == 0 {
e2elog.Logf("Unexpected endpoints: found %v, expected %v (%v elapsed, will retry)", portsByPodUID, expectedEndpoints, time.Since(start)) framework.Logf("Unexpected endpoints: found %v, expected %v (%v elapsed, will retry)", portsByPodUID, expectedEndpoints, time.Since(start))
} }
i++ i++
} }
if pods, err := c.CoreV1().Pods(metav1.NamespaceAll).List(metav1.ListOptions{}); err == nil { if pods, err := c.CoreV1().Pods(metav1.NamespaceAll).List(metav1.ListOptions{}); err == nil {
for _, pod := range pods.Items { for _, pod := range pods.Items {
e2elog.Logf("Pod %s\t%s\t%s\t%s", pod.Namespace, pod.Name, pod.Spec.NodeName, pod.DeletionTimestamp) framework.Logf("Pod %s\t%s\t%s\t%s", pod.Namespace, pod.Name, pod.Spec.NodeName, pod.DeletionTimestamp)
} }
} else { } else {
e2elog.Logf("Can't list pod debug info: %v", err) framework.Logf("Can't list pod debug info: %v", err)
} }
return fmt.Errorf("Timed out waiting for service %s in namespace %s to expose endpoints %v (%v elapsed)", serviceName, namespace, expectedEndpoints, ServiceStartTimeout) return fmt.Errorf("Timed out waiting for service %s in namespace %s to expose endpoints %v (%v elapsed)", serviceName, namespace, expectedEndpoints, ServiceStartTimeout)
} }

View File

@ -18,7 +18,6 @@ go_library(
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes:go_default_library", "//staging/src/k8s.io/client-go/kubernetes:go_default_library",
"//test/e2e/framework:go_default_library", "//test/e2e/framework:go_default_library",
"//test/e2e/framework/log:go_default_library",
"//test/e2e/framework/node:go_default_library", "//test/e2e/framework/node:go_default_library",
"//test/e2e/framework/service:go_default_library", "//test/e2e/framework/service:go_default_library",
"//test/e2e/framework/testfiles:go_default_library", "//test/e2e/framework/testfiles:go_default_library",

View File

@ -49,7 +49,6 @@ import (
"k8s.io/apimachinery/pkg/util/wait" "k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
e2enode "k8s.io/kubernetes/test/e2e/framework/node" e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2eservice "k8s.io/kubernetes/test/e2e/framework/service" e2eservice "k8s.io/kubernetes/test/e2e/framework/service"
"k8s.io/kubernetes/test/e2e/framework/testfiles" "k8s.io/kubernetes/test/e2e/framework/testfiles"
@ -131,12 +130,12 @@ type E2ELogger struct{}
// Infof outputs log. // Infof outputs log.
func (l *E2ELogger) Infof(format string, args ...interface{}) { func (l *E2ELogger) Infof(format string, args ...interface{}) {
e2elog.Logf(format, args...) framework.Logf(format, args...)
} }
// Errorf outputs log. // Errorf outputs log.
func (l *E2ELogger) Errorf(format string, args ...interface{}) { func (l *E2ELogger) Errorf(format string, args ...interface{}) {
e2elog.Logf(format, args...) framework.Logf(format, args...)
} }
// ConformanceTests contains a closure with an entry and exit log line. // ConformanceTests contains a closure with an entry and exit log line.
@ -331,7 +330,7 @@ func BuildInsecureClient(timeout time.Duration) *http.Client {
// Ingress, it's updated. // Ingress, it's updated.
func createTLSSecret(kubeClient clientset.Interface, namespace, secretName string, hosts ...string) (host string, rootCA, privKey []byte, err error) { func createTLSSecret(kubeClient clientset.Interface, namespace, secretName string, hosts ...string) (host string, rootCA, privKey []byte, err error) {
host = strings.Join(hosts, ",") host = strings.Join(hosts, ",")
e2elog.Logf("Generating RSA cert for host %v", host) framework.Logf("Generating RSA cert for host %v", host)
cert, key, err := GenerateRSACerts(host, true) cert, key, err := GenerateRSACerts(host, true)
if err != nil { if err != nil {
return return
@ -348,11 +347,11 @@ func createTLSSecret(kubeClient clientset.Interface, namespace, secretName strin
var s *v1.Secret var s *v1.Secret
if s, err = kubeClient.CoreV1().Secrets(namespace).Get(secretName, metav1.GetOptions{}); err == nil { if s, err = kubeClient.CoreV1().Secrets(namespace).Get(secretName, metav1.GetOptions{}); err == nil {
// TODO: Retry the update. We don't really expect anything to conflict though. // TODO: Retry the update. We don't really expect anything to conflict though.
e2elog.Logf("Updating secret %v in ns %v with hosts %v", secret.Name, namespace, host) framework.Logf("Updating secret %v in ns %v with hosts %v", secret.Name, namespace, host)
s.Data = secret.Data s.Data = secret.Data
_, err = kubeClient.CoreV1().Secrets(namespace).Update(s) _, err = kubeClient.CoreV1().Secrets(namespace).Update(s)
} else { } else {
e2elog.Logf("Creating secret %v in ns %v with hosts %v", secret.Name, namespace, host) framework.Logf("Creating secret %v in ns %v with hosts %v", secret.Name, namespace, host)
_, err = kubeClient.CoreV1().Secrets(namespace).Create(secret) _, err = kubeClient.CoreV1().Secrets(namespace).Create(secret)
} }
return host, cert, key, err return host, cert, key, err
@ -468,7 +467,7 @@ func (j *TestJig) Update(update func(ing *networkingv1beta1.Ingress)) {
for i := 0; i < 3; i++ { for i := 0; i < 3; i++ {
j.Ingress, err = j.Client.NetworkingV1beta1().Ingresses(ns).Get(name, metav1.GetOptions{}) j.Ingress, err = j.Client.NetworkingV1beta1().Ingresses(ns).Get(name, metav1.GetOptions{})
if err != nil { if err != nil {
e2elog.Failf("failed to get ingress %s/%s: %v", ns, name, err) framework.Failf("failed to get ingress %s/%s: %v", ns, name, err)
} }
update(j.Ingress) update(j.Ingress)
j.Ingress, err = j.runUpdate(j.Ingress) j.Ingress, err = j.runUpdate(j.Ingress)
@ -477,10 +476,10 @@ func (j *TestJig) Update(update func(ing *networkingv1beta1.Ingress)) {
return return
} }
if !apierrs.IsConflict(err) && !apierrs.IsServerTimeout(err) { if !apierrs.IsConflict(err) && !apierrs.IsServerTimeout(err) {
e2elog.Failf("failed to update ingress %s/%s: %v", ns, name, err) framework.Failf("failed to update ingress %s/%s: %v", ns, name, err)
} }
} }
e2elog.Failf("too many retries updating ingress %s/%s", ns, name) framework.Failf("too many retries updating ingress %s/%s", ns, name)
} }
// AddHTTPS updates the ingress to add this secret for these hosts. // AddHTTPS updates the ingress to add this secret for these hosts.
@ -538,7 +537,7 @@ func (j *TestJig) GetRootCA(secretName string) (rootCA []byte) {
var ok bool var ok bool
rootCA, ok = j.RootCAs[secretName] rootCA, ok = j.RootCAs[secretName]
if !ok { if !ok {
e2elog.Failf("Failed to retrieve rootCAs, no recorded secret by name %v", secretName) framework.Failf("Failed to retrieve rootCAs, no recorded secret by name %v", secretName)
} }
return return
} }
@ -670,7 +669,7 @@ func (j *TestJig) pollIngressWithCert(ing *networkingv1beta1.Ingress, address st
// WaitForIngress returns when it gets the first 200 response // WaitForIngress returns when it gets the first 200 response
func (j *TestJig) WaitForIngress(waitForNodePort bool) { func (j *TestJig) WaitForIngress(waitForNodePort bool) {
if err := j.WaitForGivenIngressWithTimeout(j.Ingress, waitForNodePort, e2eservice.LoadBalancerPollTimeout); err != nil { if err := j.WaitForGivenIngressWithTimeout(j.Ingress, waitForNodePort, e2eservice.LoadBalancerPollTimeout); err != nil {
e2elog.Failf("error in waiting for ingress to get an address: %s", err) framework.Failf("error in waiting for ingress to get an address: %s", err)
} }
} }
@ -683,7 +682,7 @@ func (j *TestJig) WaitForIngressToStable() {
} }
return true, nil return true, nil
}); err != nil { }); err != nil {
e2elog.Failf("error in waiting for ingress to stablize: %v", err) framework.Failf("error in waiting for ingress to stablize: %v", err)
} }
} }
@ -729,7 +728,7 @@ func (j *TestJig) VerifyURL(route, host string, iterations int, interval time.Du
for i := 0; i < iterations; i++ { for i := 0; i < iterations; i++ {
b, err := framework.SimpleGET(httpClient, route, host) b, err := framework.SimpleGET(httpClient, route, host)
if err != nil { if err != nil {
e2elog.Logf(b) framework.Logf(b)
return err return err
} }
j.Logger.Infof("Verified %v with host %v %d times, sleeping for %v", route, host, i, interval) j.Logger.Infof("Verified %v with host %v %d times, sleeping for %v", route, host, i, interval)
@ -809,7 +808,7 @@ func (j *TestJig) GetDistinctResponseFromIngress() (sets.String, error) {
// Wait for the loadbalancer IP. // Wait for the loadbalancer IP.
address, err := j.WaitForIngressAddress(j.Client, j.Ingress.Namespace, j.Ingress.Name, e2eservice.LoadBalancerPollTimeout) address, err := j.WaitForIngressAddress(j.Client, j.Ingress.Namespace, j.Ingress.Name, e2eservice.LoadBalancerPollTimeout)
if err != nil { if err != nil {
e2elog.Failf("Ingress failed to acquire an IP address within %v", e2eservice.LoadBalancerPollTimeout) framework.Failf("Ingress failed to acquire an IP address within %v", e2eservice.LoadBalancerPollTimeout)
} }
responses := sets.NewString() responses := sets.NewString()
timeoutClient := &http.Client{Timeout: IngressReqTimeout} timeoutClient := &http.Client{Timeout: IngressReqTimeout}
@ -840,25 +839,25 @@ func (cont *NginxIngressController) Init() {
read := func(file string) string { read := func(file string) string {
return string(testfiles.ReadOrDie(filepath.Join(IngressManifestPath, "nginx", file))) return string(testfiles.ReadOrDie(filepath.Join(IngressManifestPath, "nginx", file)))
} }
e2elog.Logf("initializing nginx ingress controller") framework.Logf("initializing nginx ingress controller")
framework.RunKubectlOrDieInput(read("rc.yaml"), "create", "-f", "-", fmt.Sprintf("--namespace=%v", cont.Ns)) framework.RunKubectlOrDieInput(read("rc.yaml"), "create", "-f", "-", fmt.Sprintf("--namespace=%v", cont.Ns))
rc, err := cont.Client.CoreV1().ReplicationControllers(cont.Ns).Get("nginx-ingress-controller", metav1.GetOptions{}) rc, err := cont.Client.CoreV1().ReplicationControllers(cont.Ns).Get("nginx-ingress-controller", metav1.GetOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
cont.rc = rc cont.rc = rc
e2elog.Logf("waiting for pods with label %v", rc.Spec.Selector) framework.Logf("waiting for pods with label %v", rc.Spec.Selector)
sel := labels.SelectorFromSet(labels.Set(rc.Spec.Selector)) sel := labels.SelectorFromSet(labels.Set(rc.Spec.Selector))
framework.ExpectNoError(testutils.WaitForPodsWithLabelRunning(cont.Client, cont.Ns, sel)) framework.ExpectNoError(testutils.WaitForPodsWithLabelRunning(cont.Client, cont.Ns, sel))
pods, err := cont.Client.CoreV1().Pods(cont.Ns).List(metav1.ListOptions{LabelSelector: sel.String()}) pods, err := cont.Client.CoreV1().Pods(cont.Ns).List(metav1.ListOptions{LabelSelector: sel.String()})
framework.ExpectNoError(err) framework.ExpectNoError(err)
if len(pods.Items) == 0 { if len(pods.Items) == 0 {
e2elog.Failf("Failed to find nginx ingress controller pods with selector %v", sel) framework.Failf("Failed to find nginx ingress controller pods with selector %v", sel)
} }
cont.pod = &pods.Items[0] cont.pod = &pods.Items[0]
cont.externalIP, err = framework.GetHostExternalAddress(cont.Client, cont.pod) cont.externalIP, err = framework.GetHostExternalAddress(cont.Client, cont.pod)
framework.ExpectNoError(err) framework.ExpectNoError(err)
e2elog.Logf("ingress controller running in pod %v on ip %v", cont.pod.Name, cont.externalIP) framework.Logf("ingress controller running in pod %v on ip %v", cont.pod.Name, cont.externalIP)
} }
func generateBacksideHTTPSIngressSpec(ns string) *networkingv1beta1.Ingress { func generateBacksideHTTPSIngressSpec(ns string) *networkingv1beta1.Ingress {