From 9832ae125915e1db3afc0f186287445f85cd3000 Mon Sep 17 00:00:00 2001 From: Mik Vyatskov Date: Mon, 17 Oct 2016 21:04:25 +0200 Subject: [PATCH] Update elasticsearch and kibana usage --- .../fluentd-elasticsearch/es-controller.yaml | 4 +- .../kibana-controller.yaml | 2 +- .../fluentd-elasticsearch/es-controller.yaml | 8 ++-- .../kibana-controller.yaml | 2 +- test/e2e/cluster_logging_es.go | 48 ++++--------------- 5 files changed, 17 insertions(+), 47 deletions(-) diff --git a/cluster/addons/fluentd-elasticsearch/es-controller.yaml b/cluster/addons/fluentd-elasticsearch/es-controller.yaml index b4f209485e6..d24d6ed2fee 100644 --- a/cluster/addons/fluentd-elasticsearch/es-controller.yaml +++ b/cluster/addons/fluentd-elasticsearch/es-controller.yaml @@ -20,8 +20,8 @@ spec: kubernetes.io/cluster-service: "true" spec: containers: - - image: gcr.io/google_containers/elasticsearch:1.9 - name: elasticsearch-logging + - image: gcr.io/google_containers/elasticsearch:v2.4.1 + name: elasticsearch-logging resources: # need more cpu upon initialization, therefore burstable class limits: diff --git a/cluster/addons/fluentd-elasticsearch/kibana-controller.yaml b/cluster/addons/fluentd-elasticsearch/kibana-controller.yaml index f438bce4ecc..bd8c430d872 100644 --- a/cluster/addons/fluentd-elasticsearch/kibana-controller.yaml +++ b/cluster/addons/fluentd-elasticsearch/kibana-controller.yaml @@ -21,7 +21,7 @@ spec: spec: containers: - name: kibana-logging - image: gcr.io/google_containers/kibana:1.3 + image: gcr.io/google_containers/kibana:v4.6.1 resources: # keep request = limit to keep this container in guaranteed class limits: diff --git a/cluster/gce/coreos/kube-manifests/addons/fluentd-elasticsearch/es-controller.yaml b/cluster/gce/coreos/kube-manifests/addons/fluentd-elasticsearch/es-controller.yaml index c875984e108..d24d6ed2fee 100644 --- a/cluster/gce/coreos/kube-manifests/addons/fluentd-elasticsearch/es-controller.yaml +++ b/cluster/gce/coreos/kube-manifests/addons/fluentd-elasticsearch/es-controller.yaml @@ -20,12 +20,12 @@ spec: kubernetes.io/cluster-service: "true" spec: containers: - - image: gcr.io/google_containers/elasticsearch:1.9 - name: elasticsearch-logging + - image: gcr.io/google_containers/elasticsearch:v2.4.1 + name: elasticsearch-logging resources: - # keep request = limit to keep this container in guaranteed class + # need more cpu upon initialization, therefore burstable class limits: - cpu: 100m + cpu: 1000m requests: cpu: 100m ports: diff --git a/cluster/gce/coreos/kube-manifests/addons/fluentd-elasticsearch/kibana-controller.yaml b/cluster/gce/coreos/kube-manifests/addons/fluentd-elasticsearch/kibana-controller.yaml index 0b96d053e55..3ec688e1f32 100644 --- a/cluster/gce/coreos/kube-manifests/addons/fluentd-elasticsearch/kibana-controller.yaml +++ b/cluster/gce/coreos/kube-manifests/addons/fluentd-elasticsearch/kibana-controller.yaml @@ -21,7 +21,7 @@ spec: spec: containers: - name: kibana-logging - image: gcr.io/google_containers/kibana:1.3 + image: gcr.io/google_containers/kibana:v4.6.1 resources: # keep request = limit to keep this container in guaranteed class limits: diff --git a/test/e2e/cluster_logging_es.go b/test/e2e/cluster_logging_es.go index 339a72c579b..8cdd85906d8 100644 --- a/test/e2e/cluster_logging_es.go +++ b/test/e2e/cluster_logging_es.go @@ -111,8 +111,7 @@ func checkElasticsearchReadiness(f *framework.Framework) error { By("Checking to make sure we are talking to an Elasticsearch service.") // Perform a few checks to make sure this looks like an Elasticsearch cluster. - var statusCode float64 - var esResponse map[string]interface{} + var statusCode int err = nil var body []byte for start := time.Now(); time.Since(start) < graceTime; time.Sleep(10 * time.Second) { @@ -122,52 +121,25 @@ func checkElasticsearchReadiness(f *framework.Framework) error { continue } // Query against the root URL for Elasticsearch. - body, err = proxyRequest.Namespace(api.NamespaceSystem). + response := proxyRequest.Namespace(api.NamespaceSystem). Name("elasticsearch-logging"). - DoRaw() + Do() + err = response.Error() + response.StatusCode(&statusCode) + if err != nil { framework.Logf("After %v proxy call to elasticsearch-loigging failed: %v", time.Since(start), err) continue } - err = json.Unmarshal(body, &esResponse) - if err != nil { - framework.Logf("After %v failed to convert Elasticsearch JSON response %v to map[string]interface{}: %v", time.Since(start), string(body), err) - continue - } - statusIntf, ok := esResponse["status"] - if !ok { - framework.Logf("After %v Elasticsearch response has no status field: %v", time.Since(start), esResponse) - continue - } - statusCode, ok = statusIntf.(float64) - if !ok { - // Assume this is a string returning Failure. Retry. - framework.Logf("After %v expected status to be a float64 but got %v of type %T", time.Since(start), statusIntf, statusIntf) - continue - } if int(statusCode) != 200 { framework.Logf("After %v Elasticsearch cluster has a bad status: %v", time.Since(start), statusCode) continue } break } - - if err != nil { - return err - } - + Expect(err).NotTo(HaveOccurred()) if int(statusCode) != 200 { - return fmt.Errorf("Elasticsearch cluster has a bad status: %v", statusCode) - } - - // Check to see if have a cluster_name field. - clusterName, ok := esResponse["cluster_name"] - if !ok { - return fmt.Errorf("No cluster_name field in Elasticsearch response: %v", esResponse) - } - - if clusterName != "kubernetes-logging" { - return fmt.Errorf("Connected to wrong cluster %q (expecting kubernetes_logging)", clusterName) + framework.Failf("Elasticsearch cluster has a bad status: %v", statusCode) } // Now assume we really are talking to an Elasticsearch instance. @@ -188,8 +160,7 @@ func checkElasticsearchReadiness(f *framework.Framework) error { if err != nil { continue } - - var health map[string]interface{} + health := make(map[string]interface{}) err := json.Unmarshal(body, &health) if err != nil { framework.Logf("Bad json response from elasticsearch: %v", err) @@ -210,7 +181,6 @@ func checkElasticsearchReadiness(f *framework.Framework) error { break } } - if !healthy { return fmt.Errorf("After %v elasticsearch cluster is not healthy", graceTime) }