Update elasticsearch and kibana usage

This commit is contained in:
Mik Vyatskov 2016-10-17 21:04:25 +02:00
parent 714f816a34
commit 9832ae1259
5 changed files with 17 additions and 47 deletions

View File

@ -20,7 +20,7 @@ spec:
kubernetes.io/cluster-service: "true" kubernetes.io/cluster-service: "true"
spec: spec:
containers: containers:
- image: gcr.io/google_containers/elasticsearch:1.9 - image: gcr.io/google_containers/elasticsearch:v2.4.1
name: elasticsearch-logging name: elasticsearch-logging
resources: resources:
# need more cpu upon initialization, therefore burstable class # need more cpu upon initialization, therefore burstable class

View File

@ -21,7 +21,7 @@ spec:
spec: spec:
containers: containers:
- name: kibana-logging - name: kibana-logging
image: gcr.io/google_containers/kibana:1.3 image: gcr.io/google_containers/kibana:v4.6.1
resources: resources:
# keep request = limit to keep this container in guaranteed class # keep request = limit to keep this container in guaranteed class
limits: limits:

View File

@ -20,12 +20,12 @@ spec:
kubernetes.io/cluster-service: "true" kubernetes.io/cluster-service: "true"
spec: spec:
containers: containers:
- image: gcr.io/google_containers/elasticsearch:1.9 - image: gcr.io/google_containers/elasticsearch:v2.4.1
name: elasticsearch-logging name: elasticsearch-logging
resources: resources:
# keep request = limit to keep this container in guaranteed class # need more cpu upon initialization, therefore burstable class
limits: limits:
cpu: 100m cpu: 1000m
requests: requests:
cpu: 100m cpu: 100m
ports: ports:

View File

@ -21,7 +21,7 @@ spec:
spec: spec:
containers: containers:
- name: kibana-logging - name: kibana-logging
image: gcr.io/google_containers/kibana:1.3 image: gcr.io/google_containers/kibana:v4.6.1
resources: resources:
# keep request = limit to keep this container in guaranteed class # keep request = limit to keep this container in guaranteed class
limits: limits:

View File

@ -111,8 +111,7 @@ func checkElasticsearchReadiness(f *framework.Framework) error {
By("Checking to make sure we are talking to an Elasticsearch service.") By("Checking to make sure we are talking to an Elasticsearch service.")
// Perform a few checks to make sure this looks like an Elasticsearch cluster. // Perform a few checks to make sure this looks like an Elasticsearch cluster.
var statusCode float64 var statusCode int
var esResponse map[string]interface{}
err = nil err = nil
var body []byte var body []byte
for start := time.Now(); time.Since(start) < graceTime; time.Sleep(10 * time.Second) { for start := time.Now(); time.Since(start) < graceTime; time.Sleep(10 * time.Second) {
@ -122,52 +121,25 @@ func checkElasticsearchReadiness(f *framework.Framework) error {
continue continue
} }
// Query against the root URL for Elasticsearch. // Query against the root URL for Elasticsearch.
body, err = proxyRequest.Namespace(api.NamespaceSystem). response := proxyRequest.Namespace(api.NamespaceSystem).
Name("elasticsearch-logging"). Name("elasticsearch-logging").
DoRaw() Do()
err = response.Error()
response.StatusCode(&statusCode)
if err != nil { if err != nil {
framework.Logf("After %v proxy call to elasticsearch-loigging failed: %v", time.Since(start), err) framework.Logf("After %v proxy call to elasticsearch-loigging failed: %v", time.Since(start), err)
continue continue
} }
err = json.Unmarshal(body, &esResponse)
if err != nil {
framework.Logf("After %v failed to convert Elasticsearch JSON response %v to map[string]interface{}: %v", time.Since(start), string(body), err)
continue
}
statusIntf, ok := esResponse["status"]
if !ok {
framework.Logf("After %v Elasticsearch response has no status field: %v", time.Since(start), esResponse)
continue
}
statusCode, ok = statusIntf.(float64)
if !ok {
// Assume this is a string returning Failure. Retry.
framework.Logf("After %v expected status to be a float64 but got %v of type %T", time.Since(start), statusIntf, statusIntf)
continue
}
if int(statusCode) != 200 { if int(statusCode) != 200 {
framework.Logf("After %v Elasticsearch cluster has a bad status: %v", time.Since(start), statusCode) framework.Logf("After %v Elasticsearch cluster has a bad status: %v", time.Since(start), statusCode)
continue continue
} }
break break
} }
Expect(err).NotTo(HaveOccurred())
if err != nil {
return err
}
if int(statusCode) != 200 { if int(statusCode) != 200 {
return fmt.Errorf("Elasticsearch cluster has a bad status: %v", statusCode) framework.Failf("Elasticsearch cluster has a bad status: %v", statusCode)
}
// Check to see if have a cluster_name field.
clusterName, ok := esResponse["cluster_name"]
if !ok {
return fmt.Errorf("No cluster_name field in Elasticsearch response: %v", esResponse)
}
if clusterName != "kubernetes-logging" {
return fmt.Errorf("Connected to wrong cluster %q (expecting kubernetes_logging)", clusterName)
} }
// Now assume we really are talking to an Elasticsearch instance. // Now assume we really are talking to an Elasticsearch instance.
@ -188,8 +160,7 @@ func checkElasticsearchReadiness(f *framework.Framework) error {
if err != nil { if err != nil {
continue continue
} }
health := make(map[string]interface{})
var health map[string]interface{}
err := json.Unmarshal(body, &health) err := json.Unmarshal(body, &health)
if err != nil { if err != nil {
framework.Logf("Bad json response from elasticsearch: %v", err) framework.Logf("Bad json response from elasticsearch: %v", err)
@ -210,7 +181,6 @@ func checkElasticsearchReadiness(f *framework.Framework) error {
break break
} }
} }
if !healthy { if !healthy {
return fmt.Errorf("After %v elasticsearch cluster is not healthy", graceTime) return fmt.Errorf("After %v elasticsearch cluster is not healthy", graceTime)
} }