Use gcloud to list nodes so we know their ips

This commit is contained in:
Prashanth Balasubramanian 2015-06-24 21:33:23 -07:00
parent f2b687d53f
commit 7bc32b5a47
3 changed files with 17 additions and 9 deletions

View File

@ -20,6 +20,7 @@ import (
"fmt" "fmt"
"math" "math"
"os" "os"
"os/exec"
"sort" "sort"
"strconv" "strconv"
"sync" "sync"
@ -66,6 +67,20 @@ func printLatencies(latencies []podLatencyData, header string) {
Logf("perc50: %v, perc90: %v, perc99: %v", perc50, perc90, perc99) Logf("perc50: %v, perc90: %v, perc99: %v", perc50, perc90, perc99)
} }
// List nodes via gcloud. We don't rely on the apiserver because we really want the node ips
// and sometimes the node controller is slow to populate them.
func gcloudListNodes() {
Logf("Listing nodes via gcloud:")
output, err := exec.Command("gcloud", "compute", "instances", "list",
"--project="+testContext.CloudConfig.ProjectID, "--zone="+testContext.CloudConfig.Zone).CombinedOutput()
if err != nil {
Logf("Failed to list nodes: %v, %v", err)
return
}
Logf(string(output))
return
}
// This test suite can take a long time to run, so by default it is added to // This test suite can take a long time to run, so by default it is added to
// the ginkgo.skip list (see driver.go). // the ginkgo.skip list (see driver.go).
// To run this suite you must explicitly ask for it by setting the // To run this suite you must explicitly ask for it by setting the
@ -101,6 +116,7 @@ var _ = Describe("Density", func() {
expectNoError(resetMetrics(c)) expectNoError(resetMetrics(c))
expectNoError(os.Mkdir(fmt.Sprintf(testContext.OutputDir+"/%s", uuid), 0777)) expectNoError(os.Mkdir(fmt.Sprintf(testContext.OutputDir+"/%s", uuid), 0777))
expectNoError(writePerfData(c, fmt.Sprintf(testContext.OutputDir+"/%s", uuid), "before")) expectNoError(writePerfData(c, fmt.Sprintf(testContext.OutputDir+"/%s", uuid), "before"))
gcloudListNodes()
}) })
AfterEach(func() { AfterEach(func() {

View File

@ -183,11 +183,7 @@ func nodeProxyRequest(c *client.Client, node, endpoint string) client.Result {
} }
// Retrieve metrics from the kubelet server of the given node. // Retrieve metrics from the kubelet server of the given node.
<<<<<<< HEAD
func getKubeletMetricsThroughProxy(c *client.Client, node string) (string, error) { func getKubeletMetricsThroughProxy(c *client.Client, node string) (string, error) {
=======
func getKubeletMetrics(c *client.Client, node string) (string, error) {
>>>>>>> Density logging
metric, err := nodeProxyRequest(c, node, "metrics").Raw() metric, err := nodeProxyRequest(c, node, "metrics").Raw()
if err != nil { if err != nil {
return "", err return "", err

View File

@ -1133,7 +1133,6 @@ func dumpPodDebugInfo(c *client.Client, pods []*api.Pod) {
} }
func dumpNodeDebugInfo(c *client.Client, nodeNames []string) { func dumpNodeDebugInfo(c *client.Client, nodeNames []string) {
// TODO: Actually log running pods instead of the pods in the pod manager.
for _, n := range nodeNames { for _, n := range nodeNames {
Logf("\nLogging pods the kubelet thinks is on node %v", n) Logf("\nLogging pods the kubelet thinks is on node %v", n)
podList, err := GetKubeletPods(c, n) podList, err := GetKubeletPods(c, n)
@ -1142,9 +1141,7 @@ func dumpNodeDebugInfo(c *client.Client, nodeNames []string) {
continue continue
} }
for _, p := range podList.Items { for _, p := range podList.Items {
// If the pod is in pending it's probably not going to have a starttime or container statuses, since Logf("%v started at %v (%d container statuses recorded)", p.Name, p.Status.StartTime, len(p.Status.ContainerStatuses))
// we're only checking the pod manager. This should change.
Logf("%v started at %v (%d containers)", p.Name, p.Status.StartTime, len(p.Status.ContainerStatuses))
for _, c := range p.Status.ContainerStatuses { for _, c := range p.Status.ContainerStatuses {
Logf("\tContainer %v ready: %v, restart count %v", Logf("\tContainer %v ready: %v, restart count %v",
c.Name, c.Ready, c.RestartCount) c.Name, c.Ready, c.RestartCount)
@ -1153,7 +1150,6 @@ func dumpNodeDebugInfo(c *client.Client, nodeNames []string) {
HighLatencyKubeletOperations(c, 10*time.Second, n) HighLatencyKubeletOperations(c, 10*time.Second, n)
// TODO: Log node resource info // TODO: Log node resource info
} }
} }
func ScaleRC(c *client.Client, ns, name string, size uint) error { func ScaleRC(c *client.Client, ns, name string, size uint) error {