mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-28 14:07:14 +00:00
Add a Logf utility function to print INFO: lines to GinkgoWriter, convert new file to it
This commit is contained in:
parent
0ddbb52717
commit
251bb585bd
@ -99,12 +99,12 @@ func ServeImageOrFail(c *client.Client, test string, image string) {
|
||||
// Resize the replication controller to zero to get rid of pods.
|
||||
controller.Spec.Replicas = 0
|
||||
if _, err = c.ReplicationControllers(ns).Update(controller); err != nil {
|
||||
By(fmt.Sprintf("Failed to resize replication controller %s to zero: %v", name, err))
|
||||
Logf("Failed to resize replication controller %s to zero: %v", name, err)
|
||||
}
|
||||
|
||||
// Delete the replication controller.
|
||||
if err = c.ReplicationControllers(ns).Delete(name); err != nil {
|
||||
By(fmt.Sprintf("Failed to delete replication controller %s: %v", name, err))
|
||||
Logf("Failed to delete replication controller %s: %v", name, err)
|
||||
}
|
||||
}()
|
||||
|
||||
@ -115,7 +115,7 @@ func ServeImageOrFail(c *client.Client, test string, image string) {
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
t := time.Now()
|
||||
for {
|
||||
By(fmt.Sprintf("Controller %s: Found %d pods out of %d", name, len(pods.Items), replicas))
|
||||
Logf("Controller %s: Found %d pods out of %d", name, len(pods.Items), replicas)
|
||||
if len(pods.Items) == replicas {
|
||||
break
|
||||
}
|
||||
@ -129,6 +129,8 @@ func ServeImageOrFail(c *client.Client, test string, image string) {
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
}
|
||||
|
||||
By("Ensuring each pod is running and has a hostIP")
|
||||
|
||||
// Wait for the pods to enter the running state. Waiting loops until the pods
|
||||
// are running so non-running pods cause a timeout for this test.
|
||||
for _, pod := range pods.Items {
|
||||
@ -144,14 +146,14 @@ func ServeImageOrFail(c *client.Client, test string, image string) {
|
||||
p, err := c.Pods(ns).Get(pod.Name)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
if p.Status.HostIP != "" {
|
||||
By(fmt.Sprintf("Controller %s: Replica %d has hostIP: %s", name, i+1, p.Status.HostIP))
|
||||
Logf("Controller %s: Replica %d has hostIP: %s", name, i+1, p.Status.HostIP)
|
||||
break
|
||||
}
|
||||
if time.Since(t) >= hostIPTimeout {
|
||||
Fail(fmt.Sprintf("Controller %s: Gave up waiting for hostIP of replica %d after %v seconds",
|
||||
name, i, time.Since(t).Seconds()))
|
||||
}
|
||||
By(fmt.Sprintf("Controller %s: Retrying to get the hostIP of replica %d", name, i+1))
|
||||
Logf("Controller %s: Retrying to get the hostIP of replica %d", name, i+1)
|
||||
time.Sleep(5 * time.Second)
|
||||
}
|
||||
}
|
||||
@ -161,6 +163,8 @@ func ServeImageOrFail(c *client.Client, test string, image string) {
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// Verify that something is listening.
|
||||
By("Trying to dial each unique pod")
|
||||
|
||||
for i, pod := range pods.Items {
|
||||
resp, err := http.Get(fmt.Sprintf("http://%s:8080", pod.Status.HostIP))
|
||||
if err != nil {
|
||||
@ -179,6 +183,6 @@ func ServeImageOrFail(c *client.Client, test string, image string) {
|
||||
if string(body) != pod.Name {
|
||||
Fail(fmt.Sprintf("Controller %s: Replica %d expected response %s but got %s", name, i+1, pod.Name, string(body)))
|
||||
}
|
||||
By(fmt.Sprintf("Controller %s: Got expected result from replica %d: %s", name, i+1, string(body)))
|
||||
Logf("Controller %s: Got expected result from replica %d: %s", name, i+1, string(body))
|
||||
}
|
||||
}
|
||||
|
@ -40,6 +40,10 @@ type testContextType struct {
|
||||
|
||||
var testContext testContextType
|
||||
|
||||
func Logf(format string, a ...interface{}) {
|
||||
fmt.Fprintf(GinkgoWriter, "INFO: "+format+"\n", a...)
|
||||
}
|
||||
|
||||
func waitForPodRunning(c *client.Client, id string, tryFor time.Duration) error {
|
||||
trySecs := int(tryFor.Seconds())
|
||||
for i := 0; i <= trySecs; i += 5 {
|
||||
@ -51,7 +55,7 @@ func waitForPodRunning(c *client.Client, id string, tryFor time.Duration) error
|
||||
if pod.Status.Phase == api.PodRunning {
|
||||
return nil
|
||||
}
|
||||
By(fmt.Sprintf("Waiting for pod %s status to be %q (found %q) (%d secs)", id, api.PodRunning, pod.Status.Phase, i))
|
||||
Logf("Waiting for pod %s status to be %q (found %q) (%d secs)", id, api.PodRunning, pod.Status.Phase, i)
|
||||
}
|
||||
return fmt.Errorf("Gave up waiting for pod %s to be running after %d seconds", id, trySecs)
|
||||
}
|
||||
@ -65,14 +69,14 @@ func waitForPodNotPending(c *client.Client, ns, podName string, tryFor time.Dura
|
||||
}
|
||||
pod, err := c.Pods(ns).Get(podName)
|
||||
if err != nil {
|
||||
By(fmt.Sprintf("Get pod %s in namespace %s failed, ignoring for 5s: %v", podName, ns, err))
|
||||
Logf("Get pod %s in namespace %s failed, ignoring for 5s: %v", podName, ns, err)
|
||||
continue
|
||||
}
|
||||
if pod.Status.Phase != api.PodPending {
|
||||
By(fmt.Sprintf("Saw pod %s in namespace %s out of pending state (found %q)", podName, ns, pod.Status.Phase))
|
||||
Logf("Saw pod %s in namespace %s out of pending state (found %q)", podName, ns, pod.Status.Phase)
|
||||
return nil
|
||||
}
|
||||
By(fmt.Sprintf("Waiting for status of pod %s in namespace %s to be !%q (found %q) (%v secs)", podName, ns, api.PodPending, pod.Status.Phase, i))
|
||||
Logf("Waiting for status of pod %s in namespace %s to be !%q (found %q) (%v secs)", podName, ns, api.PodPending, pod.Status.Phase, i)
|
||||
}
|
||||
return fmt.Errorf("Gave up waiting for status of pod %s in namespace %s to go out of pending after %d seconds", podName, ns, trySecs)
|
||||
}
|
||||
@ -86,24 +90,24 @@ func waitForPodSuccess(c *client.Client, podName string, contName string, tryFor
|
||||
}
|
||||
pod, err := c.Pods(api.NamespaceDefault).Get(podName)
|
||||
if err != nil {
|
||||
By(fmt.Sprintf("Get pod failed, ignoring for 5s: %v", err))
|
||||
Logf("Get pod failed, ignoring for 5s: %v", err)
|
||||
continue
|
||||
}
|
||||
// Cannot use pod.Status.Phase == api.PodSucceeded/api.PodFailed due to #2632
|
||||
ci, ok := pod.Status.Info[contName]
|
||||
if !ok {
|
||||
By(fmt.Sprintf("No Status.Info for container %s in pod %s yet", contName, podName))
|
||||
Logf("No Status.Info for container %s in pod %s yet", contName, podName)
|
||||
} else {
|
||||
if ci.State.Termination != nil {
|
||||
if ci.State.Termination.ExitCode == 0 {
|
||||
By("Saw pod success")
|
||||
return nil
|
||||
} else {
|
||||
By(fmt.Sprintf("Saw pod failure: %+v", ci.State.Termination))
|
||||
Logf("Saw pod failure: %+v", ci.State.Termination)
|
||||
}
|
||||
By(fmt.Sprintf("Waiting for pod %q status to be success or failure", podName))
|
||||
Logf("Waiting for pod %q status to be success or failure", podName)
|
||||
} else {
|
||||
By(fmt.Sprintf("Nil State.Termination for container %s in pod %s so far", contName, podName))
|
||||
Logf("Nil State.Termination for container %s in pod %s so far", contName, podName)
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -120,7 +124,7 @@ func loadClient() (*client.Client, error) {
|
||||
}
|
||||
// If the certificate directory is provided, set the cert paths to be there.
|
||||
if testContext.certDir != "" {
|
||||
By(fmt.Sprintf("Expecting certs in %v.", testContext.certDir))
|
||||
Logf("Expecting certs in %v.", testContext.certDir)
|
||||
info.CAFile = filepath.Join(testContext.certDir, "ca.crt")
|
||||
info.CertFile = filepath.Join(testContext.certDir, "kubecfg.crt")
|
||||
info.KeyFile = filepath.Join(testContext.certDir, "kubecfg.key")
|
||||
|
Loading…
Reference in New Issue
Block a user