mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-27 05:27:21 +00:00
Merge pull request #38350 from spxtr/ExpectoPatronum
Automatic merge from submit-queue (batch tested with PRs 36071, 32752, 37998, 38350, 38401) Eradicate ExpectNoError from test/e2e. ``` $ cd test/e2e $ sed -i "s/\tExpectNoError/\tframework.ExpectNoError/g" *.go ```
This commit is contained in:
commit
c294bf0d06
@ -192,7 +192,7 @@ var _ = framework.KubeDescribe("Cluster size autoscaling [Slow]", func() {
|
||||
}
|
||||
|
||||
nodes, err := GetGroupNodes(minMig)
|
||||
ExpectNoError(err)
|
||||
framework.ExpectNoError(err)
|
||||
nodesSet := sets.NewString(nodes...)
|
||||
defer removeLabels(nodesSet)
|
||||
By(fmt.Sprintf("Annotating nodes of the smallest MIG(%s): %v", minMig, nodes))
|
||||
@ -207,7 +207,7 @@ var _ = framework.KubeDescribe("Cluster size autoscaling [Slow]", func() {
|
||||
func(size int) bool { return size >= nodeCount+1 }, scaleUpTimeout))
|
||||
|
||||
newNodes, err := GetGroupNodes(minMig)
|
||||
ExpectNoError(err)
|
||||
framework.ExpectNoError(err)
|
||||
newNodesSet := sets.NewString(newNodes...)
|
||||
newNodesSet.Delete(nodes...)
|
||||
if len(newNodesSet) > 1 {
|
||||
|
@ -287,7 +287,7 @@ var _ = framework.KubeDescribe("[Feature:Example]", func() {
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("Unable to get list of pods in statefulset %s", label)
|
||||
}
|
||||
ExpectNoError(err)
|
||||
framework.ExpectNoError(err)
|
||||
if len(podList.Items) < numPets {
|
||||
framework.Logf("Found %d pets, waiting for %d", len(podList.Items), numPets)
|
||||
return false, nil
|
||||
@ -381,7 +381,7 @@ var _ = framework.KubeDescribe("[Feature:Example]", func() {
|
||||
It("liveness pods should be automatically restarted", func() {
|
||||
mkpath := func(file string) string {
|
||||
path := filepath.Join("test/fixtures/doc-yaml/user-guide/liveness", file)
|
||||
ExpectNoError(createFileForGoBinData(path, path))
|
||||
framework.ExpectNoError(createFileForGoBinData(path, path))
|
||||
return path
|
||||
}
|
||||
execYaml := mkpath("exec-liveness.yaml")
|
||||
@ -433,7 +433,7 @@ var _ = framework.KubeDescribe("[Feature:Example]", func() {
|
||||
It("should create a pod that reads a secret", func() {
|
||||
mkpath := func(file string) string {
|
||||
path := filepath.Join("test/fixtures/doc-yaml/user-guide/secrets", file)
|
||||
ExpectNoError(createFileForGoBinData(path, path))
|
||||
framework.ExpectNoError(createFileForGoBinData(path, path))
|
||||
return path
|
||||
}
|
||||
secretYaml := mkpath("secret.yaml")
|
||||
@ -458,7 +458,7 @@ var _ = framework.KubeDescribe("[Feature:Example]", func() {
|
||||
It("should create a pod that prints his name and namespace", func() {
|
||||
mkpath := func(file string) string {
|
||||
path := filepath.Join("test/fixtures/doc-yaml/user-guide/downward-api", file)
|
||||
ExpectNoError(createFileForGoBinData(path, path))
|
||||
framework.ExpectNoError(createFileForGoBinData(path, path))
|
||||
return path
|
||||
}
|
||||
podYaml := mkpath("dapi-pod.yaml")
|
||||
|
@ -421,7 +421,7 @@ func (j *federationTestJig) waitForFederatedIngress() {
|
||||
for _, p := range rules.IngressRuleValue.HTTP.Paths {
|
||||
route := fmt.Sprintf("%v://%v%v", proto, address, p.Path)
|
||||
framework.Logf("Testing route %v host %v with simple GET", route, rules.Host)
|
||||
ExpectNoError(pollURL(route, rules.Host, lbPollTimeout, lbPollInterval, timeoutClient, false))
|
||||
framework.ExpectNoError(pollURL(route, rules.Host, lbPollTimeout, lbPollInterval, timeoutClient, false))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -70,7 +70,7 @@ var _ = framework.KubeDescribe("[Feature:Federation]", func() {
|
||||
framework.SkipUnlessFederated(f.ClientSet)
|
||||
|
||||
fcs, err := invalidAuthFederationClientSet(nil)
|
||||
ExpectNoError(err)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
nsName := f.FederationNamespace.Name
|
||||
svc, err := createService(fcs, nsName, FederatedServiceName)
|
||||
|
@ -241,7 +241,7 @@ var _ = framework.KubeDescribe("Generated release_1_5 clientset", func() {
|
||||
It("should create v2alpha1 cronJobs, delete cronJobs, watch cronJobs", func() {
|
||||
var enabled bool
|
||||
groupList, err := f.ClientSet.Discovery().ServerGroups()
|
||||
ExpectNoError(err)
|
||||
framework.ExpectNoError(err)
|
||||
for _, group := range groupList.Groups {
|
||||
if group.Name == v2alpha1.GroupName {
|
||||
for _, version := range group.Versions {
|
||||
|
@ -130,10 +130,10 @@ var _ = framework.KubeDescribe("Loadbalancing: L7", func() {
|
||||
|
||||
By("waiting for Ingress to come up with ip: " + ip)
|
||||
httpClient := buildInsecureClient(reqTimeout)
|
||||
ExpectNoError(pollURL(fmt.Sprintf("https://%v/", ip), "", lbPollTimeout, jig.pollInterval, httpClient, false))
|
||||
framework.ExpectNoError(pollURL(fmt.Sprintf("https://%v/", ip), "", lbPollTimeout, jig.pollInterval, httpClient, false))
|
||||
|
||||
By("should reject HTTP traffic")
|
||||
ExpectNoError(pollURL(fmt.Sprintf("http://%v/", ip), "", lbPollTimeout, jig.pollInterval, httpClient, true))
|
||||
framework.ExpectNoError(pollURL(fmt.Sprintf("http://%v/", ip), "", lbPollTimeout, jig.pollInterval, httpClient, true))
|
||||
|
||||
// TODO: uncomment the restart test once we have a way to synchronize
|
||||
// and know that the controller has resumed watching. If we delete
|
||||
@ -164,7 +164,7 @@ var _ = framework.KubeDescribe("Loadbalancing: L7", func() {
|
||||
// but we want to allow easy testing where a user might've hand
|
||||
// configured firewalls.
|
||||
if framework.ProviderIs("gce", "gke") {
|
||||
ExpectNoError(gcloudCreate("firewall-rules", fmt.Sprintf("ingress-80-443-%v", ns), framework.TestContext.CloudConfig.ProjectID, "--allow", "tcp:80,tcp:443", "--network", framework.TestContext.CloudConfig.Network))
|
||||
framework.ExpectNoError(gcloudCreate("firewall-rules", fmt.Sprintf("ingress-80-443-%v", ns), framework.TestContext.CloudConfig.ProjectID, "--allow", "tcp:80,tcp:443", "--network", framework.TestContext.CloudConfig.Network))
|
||||
} else {
|
||||
framework.Logf("WARNING: Not running on GCE/GKE, cannot create firewall rules for :80, :443. Assuming traffic can reach the external ips of all nodes in cluster on those ports.")
|
||||
}
|
||||
@ -174,7 +174,7 @@ var _ = framework.KubeDescribe("Loadbalancing: L7", func() {
|
||||
|
||||
AfterEach(func() {
|
||||
if framework.ProviderIs("gce", "gke") {
|
||||
ExpectNoError(gcloudDelete("firewall-rules", fmt.Sprintf("ingress-80-443-%v", ns), framework.TestContext.CloudConfig.ProjectID))
|
||||
framework.ExpectNoError(gcloudDelete("firewall-rules", fmt.Sprintf("ingress-80-443-%v", ns), framework.TestContext.CloudConfig.ProjectID))
|
||||
}
|
||||
if CurrentGinkgoTestDescription().Failed {
|
||||
describeIng(ns)
|
||||
|
@ -173,7 +173,7 @@ func createComformanceTests(jig *testJig, ns string) []conformanceTests {
|
||||
})
|
||||
By("Checking that " + pathToFail + " is not exposed by polling for failure")
|
||||
route := fmt.Sprintf("http://%v%v", jig.address, pathToFail)
|
||||
ExpectNoError(pollURL(route, updateURLMapHost, lbCleanupTimeout, jig.pollInterval, &http.Client{Timeout: reqTimeout}, true))
|
||||
framework.ExpectNoError(pollURL(route, updateURLMapHost, lbCleanupTimeout, jig.pollInterval, &http.Client{Timeout: reqTimeout}, true))
|
||||
},
|
||||
fmt.Sprintf("Waiting for path updates to reflect in L7"),
|
||||
},
|
||||
@ -760,7 +760,7 @@ func (j *testJig) createIngress(manifestPath, ns string, ingAnnotations map[stri
|
||||
framework.Logf(fmt.Sprintf("creating" + j.ing.Name + " ingress"))
|
||||
var err error
|
||||
j.ing, err = j.client.Extensions().Ingresses(ns).Create(j.ing)
|
||||
ExpectNoError(err)
|
||||
framework.ExpectNoError(err)
|
||||
}
|
||||
|
||||
func (j *testJig) update(update func(ing *extensions.Ingress)) {
|
||||
@ -789,7 +789,7 @@ func (j *testJig) addHTTPS(secretName string, hosts ...string) {
|
||||
// TODO: Just create the secret in getRootCAs once we're watching secrets in
|
||||
// the ingress controller.
|
||||
_, cert, _, err := createSecret(j.client, j.ing)
|
||||
ExpectNoError(err)
|
||||
framework.ExpectNoError(err)
|
||||
framework.Logf("Updating ingress %v to use secret %v for TLS termination", j.ing.Name, secretName)
|
||||
j.update(func(ing *extensions.Ingress) {
|
||||
ing.Spec.TLS = []extensions.IngressTLS{{Hosts: hosts, SecretName: secretName}}
|
||||
@ -807,7 +807,7 @@ func (j *testJig) getRootCA(secretName string) (rootCA []byte) {
|
||||
}
|
||||
|
||||
func (j *testJig) deleteIngress() {
|
||||
ExpectNoError(j.client.Extensions().Ingresses(j.ing.Namespace).Delete(j.ing.Name, nil))
|
||||
framework.ExpectNoError(j.client.Extensions().Ingresses(j.ing.Namespace).Delete(j.ing.Name, nil))
|
||||
}
|
||||
|
||||
func (j *testJig) waitForIngress() {
|
||||
@ -827,7 +827,7 @@ func (j *testJig) waitForIngress() {
|
||||
knownHosts := sets.NewString(j.ing.Spec.TLS[0].Hosts...)
|
||||
if knownHosts.Has(rules.Host) {
|
||||
timeoutClient.Transport, err = buildTransport(rules.Host, j.getRootCA(j.ing.Spec.TLS[0].SecretName))
|
||||
ExpectNoError(err)
|
||||
framework.ExpectNoError(err)
|
||||
proto = "https"
|
||||
}
|
||||
}
|
||||
@ -835,7 +835,7 @@ func (j *testJig) waitForIngress() {
|
||||
j.curlServiceNodePort(j.ing.Namespace, p.Backend.ServiceName, int(p.Backend.ServicePort.IntVal))
|
||||
route := fmt.Sprintf("%v://%v%v", proto, address, p.Path)
|
||||
framework.Logf("Testing route %v host %v with simple GET", route, rules.Host)
|
||||
ExpectNoError(pollURL(route, rules.Host, lbPollTimeout, j.pollInterval, timeoutClient, false))
|
||||
framework.ExpectNoError(pollURL(route, rules.Host, lbPollTimeout, j.pollInterval, timeoutClient, false))
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -858,8 +858,8 @@ func (j *testJig) verifyURL(route, host string, iterations int, interval time.Du
|
||||
func (j *testJig) curlServiceNodePort(ns, name string, port int) {
|
||||
// TODO: Curl all nodes?
|
||||
u, err := framework.GetNodePortURL(j.client, ns, name, port)
|
||||
ExpectNoError(err)
|
||||
ExpectNoError(pollURL(u, "", 30*time.Second, j.pollInterval, &http.Client{Timeout: reqTimeout}, false))
|
||||
framework.ExpectNoError(err)
|
||||
framework.ExpectNoError(pollURL(u, "", 30*time.Second, j.pollInterval, &http.Client{Timeout: reqTimeout}, false))
|
||||
}
|
||||
|
||||
// ingFromManifest reads a .json/yaml file and returns the rc in it.
|
||||
@ -867,12 +867,12 @@ func ingFromManifest(fileName string) *extensions.Ingress {
|
||||
var ing extensions.Ingress
|
||||
framework.Logf("Parsing ingress from %v", fileName)
|
||||
data, err := ioutil.ReadFile(fileName)
|
||||
ExpectNoError(err)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
json, err := utilyaml.ToJSON(data)
|
||||
ExpectNoError(err)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ExpectNoError(runtime.DecodeInto(api.Codecs.UniversalDecoder(), json, &ing))
|
||||
framework.ExpectNoError(runtime.DecodeInto(api.Codecs.UniversalDecoder(), json, &ing))
|
||||
return &ing
|
||||
}
|
||||
|
||||
@ -933,19 +933,19 @@ func (cont *NginxIngressController) init() {
|
||||
framework.RunKubectlOrDie("create", "-f", mkpath("rc.yaml"), fmt.Sprintf("--namespace=%v", cont.ns))
|
||||
|
||||
rc, err := cont.c.Core().ReplicationControllers(cont.ns).Get("nginx-ingress-controller")
|
||||
ExpectNoError(err)
|
||||
framework.ExpectNoError(err)
|
||||
cont.rc = rc
|
||||
|
||||
framework.Logf("waiting for pods with label %v", rc.Spec.Selector)
|
||||
sel := labels.SelectorFromSet(labels.Set(rc.Spec.Selector))
|
||||
ExpectNoError(testutils.WaitForPodsWithLabelRunning(cont.c, cont.ns, sel))
|
||||
framework.ExpectNoError(testutils.WaitForPodsWithLabelRunning(cont.c, cont.ns, sel))
|
||||
pods, err := cont.c.Core().Pods(cont.ns).List(v1.ListOptions{LabelSelector: sel.String()})
|
||||
ExpectNoError(err)
|
||||
framework.ExpectNoError(err)
|
||||
if len(pods.Items) == 0 {
|
||||
framework.Failf("Failed to find nginx ingress controller pods with selector %v", sel)
|
||||
}
|
||||
cont.pod = &pods.Items[0]
|
||||
cont.externalIP, err = framework.GetHostExternalAddress(cont.c, cont.pod)
|
||||
ExpectNoError(err)
|
||||
framework.ExpectNoError(err)
|
||||
framework.Logf("ingress controller running in pod %v on ip %v", cont.pod.Name, cont.externalIP)
|
||||
}
|
||||
|
@ -456,7 +456,7 @@ var _ = framework.KubeDescribe("Kubectl client", func() {
|
||||
|
||||
By("execing into a container with a successful command")
|
||||
_, err := framework.NewKubectlCommand(nsFlag, "exec", "nginx", "--", "/bin/sh", "-c", "exit 0").Exec()
|
||||
ExpectNoError(err)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
By("execing into a container with a failing command")
|
||||
_, err = framework.NewKubectlCommand(nsFlag, "exec", "nginx", "--", "/bin/sh", "-c", "exit 42").Exec()
|
||||
@ -466,7 +466,7 @@ var _ = framework.KubeDescribe("Kubectl client", func() {
|
||||
|
||||
By("running a successful command")
|
||||
_, err = framework.NewKubectlCommand(nsFlag, "run", "-i", "--image="+busyboxImage, "--restart=Never", "success", "--", "/bin/sh", "-c", "exit 0").Exec()
|
||||
ExpectNoError(err)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
By("running a failing command")
|
||||
_, err = framework.NewKubectlCommand(nsFlag, "run", "-i", "--image="+busyboxImage, "--restart=Never", "failure-1", "--", "/bin/sh", "-c", "exit 42").Exec()
|
||||
@ -478,20 +478,20 @@ var _ = framework.KubeDescribe("Kubectl client", func() {
|
||||
_, err = framework.NewKubectlCommand(nsFlag, "run", "-i", "--image="+busyboxImage, "--restart=OnFailure", "failure-2", "--", "/bin/sh", "-c", "cat && exit 42").
|
||||
WithStdinData("abcd1234").
|
||||
Exec()
|
||||
ExpectNoError(err)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
By("running a failing command without --restart=Never, but with --rm")
|
||||
_, err = framework.NewKubectlCommand(nsFlag, "run", "-i", "--image="+busyboxImage, "--restart=OnFailure", "--rm", "failure-3", "--", "/bin/sh", "-c", "cat && exit 42").
|
||||
WithStdinData("abcd1234").
|
||||
Exec()
|
||||
ExpectNoError(err)
|
||||
framework.ExpectNoError(err)
|
||||
framework.WaitForPodToDisappear(f.ClientSet, ns, "failure-3", labels.Everything(), 2*time.Second, wait.ForeverTestTimeout)
|
||||
|
||||
By("running a failing command with --leave-stdin-open")
|
||||
_, err = framework.NewKubectlCommand(nsFlag, "run", "-i", "--image="+busyboxImage, "--restart=Never", "failure-4", "--leave-stdin-open", "--", "/bin/sh", "-c", "exit 42").
|
||||
WithStdinData("abcd1234").
|
||||
Exec()
|
||||
ExpectNoError(err)
|
||||
framework.ExpectNoError(err)
|
||||
})
|
||||
|
||||
It("should support inline execution and attach", func() {
|
||||
|
@ -120,25 +120,25 @@ var _ = framework.KubeDescribe("StatefulSet", func() {
|
||||
pst.saturate(ps)
|
||||
|
||||
By("Verifying statefulset mounted data directory is usable")
|
||||
ExpectNoError(pst.checkMount(ps, "/data"))
|
||||
framework.ExpectNoError(pst.checkMount(ps, "/data"))
|
||||
|
||||
By("Verifying statefulset provides a stable hostname for each pod")
|
||||
ExpectNoError(pst.checkHostname(ps))
|
||||
framework.ExpectNoError(pst.checkHostname(ps))
|
||||
|
||||
cmd := "echo $(hostname) > /data/hostname; sync;"
|
||||
By("Running " + cmd + " in all pets")
|
||||
ExpectNoError(pst.execInPets(ps, cmd))
|
||||
framework.ExpectNoError(pst.execInPets(ps, cmd))
|
||||
|
||||
By("Restarting statefulset " + ps.Name)
|
||||
pst.restart(ps)
|
||||
pst.saturate(ps)
|
||||
|
||||
By("Verifying statefulset mounted data directory is usable")
|
||||
ExpectNoError(pst.checkMount(ps, "/data"))
|
||||
framework.ExpectNoError(pst.checkMount(ps, "/data"))
|
||||
|
||||
cmd = "if [ \"$(cat /data/hostname)\" = \"$(hostname)\" ]; then exit 0; else exit 1; fi"
|
||||
By("Running " + cmd + " in all pets")
|
||||
ExpectNoError(pst.execInPets(ps, cmd))
|
||||
framework.ExpectNoError(pst.execInPets(ps, cmd))
|
||||
})
|
||||
|
||||
It("should handle healthy pet restarts during scale", func() {
|
||||
@ -855,7 +855,7 @@ func (p *statefulSetTester) updateReplicas(ps *apps.StatefulSet, count int32) {
|
||||
|
||||
func (p *statefulSetTester) restart(ps *apps.StatefulSet) {
|
||||
oldReplicas := *(ps.Spec.Replicas)
|
||||
ExpectNoError(p.scale(ps, 0))
|
||||
framework.ExpectNoError(p.scale(ps, 0))
|
||||
p.update(ps.Namespace, ps.Name, func(ps *apps.StatefulSet) { *(ps.Spec.Replicas) = oldReplicas })
|
||||
}
|
||||
|
||||
@ -879,9 +879,9 @@ func (p *statefulSetTester) update(ns, name string, update func(ps *apps.Statefu
|
||||
|
||||
func (p *statefulSetTester) getPodList(ps *apps.StatefulSet) *v1.PodList {
|
||||
selector, err := metav1.LabelSelectorAsSelector(ps.Spec.Selector)
|
||||
ExpectNoError(err)
|
||||
framework.ExpectNoError(err)
|
||||
podList, err := p.c.Core().Pods(ps.Namespace).List(v1.ListOptions{LabelSelector: selector.String()})
|
||||
ExpectNoError(err)
|
||||
framework.ExpectNoError(err)
|
||||
return podList
|
||||
}
|
||||
|
||||
@ -967,7 +967,7 @@ func (p *statefulSetTester) setHealthy(ps *apps.StatefulSet) {
|
||||
p, err := framework.UpdatePodWithRetries(p.c, pod.Namespace, pod.Name, func(up *v1.Pod) {
|
||||
up.Annotations[petset.StatefulSetInitAnnotation] = "true"
|
||||
})
|
||||
ExpectNoError(err)
|
||||
framework.ExpectNoError(err)
|
||||
framework.Logf("Set annotation %v to %v on pod %v", petset.StatefulSetInitAnnotation, p.Annotations[petset.StatefulSetInitAnnotation], pod.Name)
|
||||
markedHealthyPod = pod.Name
|
||||
}
|
||||
@ -997,7 +997,7 @@ func (p *statefulSetTester) waitForStatus(ps *apps.StatefulSet, expectedReplicas
|
||||
func deleteAllStatefulSets(c clientset.Interface, ns string) {
|
||||
pst := &statefulSetTester{c: c}
|
||||
psList, err := c.Apps().StatefulSets(ns).List(v1.ListOptions{LabelSelector: labels.Everything().String()})
|
||||
ExpectNoError(err)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
// Scale down each statefulset, then delete it completely.
|
||||
// Deleting a pvc without doing this will leak volumes, #25101.
|
||||
@ -1059,14 +1059,10 @@ func deleteAllStatefulSets(c clientset.Interface, ns string) {
|
||||
errList = append(errList, fmt.Sprintf("Timeout waiting for pv provisioner to delete pvs, this might mean the test leaked pvs."))
|
||||
}
|
||||
if len(errList) != 0 {
|
||||
ExpectNoError(fmt.Errorf("%v", strings.Join(errList, "\n")))
|
||||
framework.ExpectNoError(fmt.Errorf("%v", strings.Join(errList, "\n")))
|
||||
}
|
||||
}
|
||||
|
||||
func ExpectNoError(err error) {
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
}
|
||||
|
||||
func pollReadWithTimeout(pet petTester, petNumber int, key, expectedVal string) error {
|
||||
err := wait.PollImmediate(time.Second, readTimeout, func() (bool, error) {
|
||||
val := pet.read(petNumber, key)
|
||||
|
@ -514,7 +514,7 @@ var _ = framework.KubeDescribe("SchedulerPredicates [Serial]", func() {
|
||||
CreateHostPortPods(f, "host-port", 2, true)
|
||||
defer framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, ns, "host-port")
|
||||
podList, err := cs.Core().Pods(ns).List(v1.ListOptions{})
|
||||
ExpectNoError(err)
|
||||
framework.ExpectNoError(err)
|
||||
Expect(len(podList.Items)).To(Equal(2))
|
||||
nodeNames := []string{podList.Items[0].Spec.NodeName, podList.Items[1].Spec.NodeName}
|
||||
Expect(nodeNames[0]).ToNot(Equal(nodeNames[1]))
|
||||
|
@ -1076,14 +1076,14 @@ var _ = framework.KubeDescribe("Services", func() {
|
||||
|
||||
By(fmt.Sprintf("createing RC %v with selectors %v", rcSpec.Name, rcSpec.Spec.Selector))
|
||||
_, err := t.createRC(rcSpec)
|
||||
ExpectNoError(err)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
By(fmt.Sprintf("creating Service %v with selectors %v", service.Name, service.Spec.Selector))
|
||||
_, err = t.CreateService(service)
|
||||
ExpectNoError(err)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
By("Verifying pods for RC " + t.name)
|
||||
ExpectNoError(framework.VerifyPods(t.Client, t.Namespace, t.name, false, 1))
|
||||
framework.ExpectNoError(framework.VerifyPods(t.Client, t.Namespace, t.name, false, 1))
|
||||
|
||||
svcName := fmt.Sprintf("%v.%v", serviceName, f.Namespace.Name)
|
||||
By("waiting for endpoints of Service with DNS name " + svcName)
|
||||
@ -1351,7 +1351,7 @@ var _ = framework.KubeDescribe("ESIPP [Slow]", func() {
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
}()
|
||||
execPod, err := f.ClientSet.Core().Pods(namespace).Get(execPodName)
|
||||
ExpectNoError(err)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
framework.Logf("Waiting up to %v wget %v", kubeProxyLagTimeout, path)
|
||||
cmd := fmt.Sprintf(`wget -T 30 -qO- %v`, path)
|
||||
@ -2779,7 +2779,7 @@ func execSourceipTest(f *framework.Framework, c clientset.Interface, ns, nodeNam
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
}()
|
||||
execPod, err := f.ClientSet.Core().Pods(ns).Get(execPodName)
|
||||
ExpectNoError(err)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
var stdout string
|
||||
timeout := 2 * time.Minute
|
||||
@ -2799,7 +2799,7 @@ func execSourceipTest(f *framework.Framework, c clientset.Interface, ns, nodeNam
|
||||
break
|
||||
}
|
||||
|
||||
ExpectNoError(err)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
// The stdout return from RunHostCmd seems to come with "\n", so TrimSpace is needed.
|
||||
// Desired stdout in this format: client_address=x.x.x.x
|
||||
|
Loading…
Reference in New Issue
Block a user