Make the Elasticsearch logging pods discover each other

This commit is contained in:
Satnam Singh 2015-05-08 13:48:51 -07:00
parent 7d620c20b9
commit 7cff506c93
7 changed files with 152 additions and 26 deletions

View File

@ -6,7 +6,7 @@ metadata:
kubernetes.io/cluster-service: "true"
name: elasticsearch-logging
spec:
replicas: 1
replicas: 2
selector:
name: elasticsearch-logging
template:
@ -16,7 +16,7 @@ spec:
kubernetes.io/cluster-service: "true"
spec:
containers:
- image: gcr.io/google_containers/elasticsearch:1.0
- image: gcr.io/google_containers/elasticsearch:1.2
name: elasticsearch-logging
ports:
- containerPort: 9200
@ -26,8 +26,10 @@ spec:
name: es-transport-port
protocol: TCP
volumeMounts:
- mountPath: /data
name: es-persistent-storage
- name: token-system-logging
mountPath: /etc/token-system-logging
readOnly: true
volumes:
- name: es-persistent-storage
emptyDir: {}
- name: token-system-logging
secret:
secretName: token-system-logging

View File

@ -14,15 +14,12 @@ RUN apt-get update && \
RUN cd / && \
curl -O https://download.elastic.co/elasticsearch/elasticsearch/elasticsearch-1.5.2.tar.gz && \
tar xf elasticsearch-1.5.2.tar.gz && \
mv elasticsearch-1.5.2 /elasticsearch && \
rm elasticsearch-1.5.2.tar.gz
ADD elasticsearch.yml /elasticsearch/config/elasticsearch.yml
COPY elasticsearch.yml /elasticsearch-1.5.2/config/elasticsearch.yml
COPY run.sh /
COPY elasticsearch_logging_discovery /
VOLUME ["/data"]
WORKDIR /data
EXPOSE 9200 9300
EXPOSE 9200
EXPOSE 9300
CMD ["/elasticsearch/bin/elasticsearch"]
CMD ["/run.sh"]

View File

@ -1,9 +1,12 @@
.PHONY: build push
.PHONY: elasticsearch_logging_discovery build push
TAG = 1.1
TAG = 1.2
build:
build: elasticsearch_logging_discovery
docker build -t gcr.io/google_containers/elasticsearch:$(TAG) .
push:
gcloud preview docker push gcr.io/google_containers/elasticsearch:$(TAG)
elasticsearch_logging_discovery:
go build elasticsearch_logging_discovery.go

View File

@ -1,8 +1,6 @@
# These are the Kubernetes-specific settings that differ
# from the default values.
cluster.name: kubernetes_logging
path.data: /data/data
path.work: /data/work
path.logs: /data/logs
path.plugins: /data/plugins
cluster.name: kubernetes-logging
node.master: ${NODE_MASTER}
node.data: ${NODE_DATA}
transport.tcp.port: ${TRANSPORT_PORT}
http.port: ${HTTP_PORT}
discovery.zen.ping.multicast.enabled: false

View File

@ -0,0 +1,104 @@
/*
Copyright 2015 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"flag"
"fmt"
"strings"
"time"
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
"github.com/GoogleCloudPlatform/kubernetes/pkg/client"
"github.com/GoogleCloudPlatform/kubernetes/pkg/client/clientcmd"
"github.com/golang/glog"
)
var (
kubeconfig = flag.String("kubeconfig", "/etc/token-system-logging/kubeconfig", "kubeconfig file for access")
)
func flattenSubsets(subsets []api.EndpointSubset) []string {
ips := []string{}
for _, ss := range subsets {
for _, addr := range ss.Addresses {
ips = append(ips, fmt.Sprintf(`"%s"`, addr.IP))
}
}
return ips
}
func main() {
flag.Parse()
glog.Info("Kubernetes Elasticsearch logging discovery")
settings, err := clientcmd.LoadFromFile(*kubeconfig)
if err != nil {
glog.Fatalf("Error loading configuration from %s: %v", *kubeconfig, err.Error())
}
config, err := clientcmd.NewDefaultClientConfig(*settings, &clientcmd.ConfigOverrides{}).ClientConfig()
if err != nil {
glog.Fatalf("Failed to construct config: %v", err)
}
c, err := client.New(config)
if err != nil {
glog.Fatalf("Failed to make client: %v", err)
}
var elasticsearch *api.Service
// Look for endpoints associated with the Elasticsearch loggging service.
// First wait for the service to become available.
for t := time.Now(); time.Since(t) < 5*time.Minute; time.Sleep(10 * time.Second) {
elasticsearch, err = c.Services(api.NamespaceDefault).Get("elasticsearch-logging")
if err == nil {
break
}
}
// If we did not find an elasticsearch logging service then log a warning
// and return without adding any unicast hosts.
if elasticsearch == nil {
glog.Warningf("Failed to find the elasticsearch-logging service: %v", err)
return
}
var endpoints *api.Endpoints
addrs := []string{}
// Wait for some endpoints.
count := 0
for t := time.Now(); time.Since(t) < 5*time.Minute; time.Sleep(10 * time.Second) {
endpoints, err = c.Endpoints(api.NamespaceDefault).Get("elasticsearch-logging")
if err != nil {
continue
}
addrs = flattenSubsets(endpoints.Subsets)
glog.Infof("Found %s", addrs)
if len(addrs) > 0 && len(addrs) == count {
break
}
count = len(addrs)
}
// If there was an error finding endpoints then log a warning and quit.
if err != nil {
glog.Warningf("Error finding endpoints: %v", err)
return
}
glog.Infof("Endpoints = %s", addrs)
fmt.Printf("discovery.zen.ping.unicast.hosts: [%s]\n", strings.Join(addrs, ", "))
}

View File

@ -0,0 +1,22 @@
#!/bin/bash
# Copyright 2015 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
export NODE_MASTER=${NODE_MASTER:-true}
export NODE_DATA=${NODE_DATA:-true}
/elasticsearch_logging_discovery >> /elasticsearch-1.5.2/config/elasticsearch.yml
export HTTP_PORT=${HTTP_PORT:-9200}
export TRANSPORT_PORT=${TRANSPORT_PORT:-9300}
/elasticsearch-1.5.2/bin/elasticsearch

View File

@ -134,7 +134,7 @@ func ClusterLevelLoggingWithElasticsearch(c *client.Client) {
if !ok {
Failf("No cluster_name field in Elasticsearch response: %v", esResponse)
}
if clusterName != "kubernetes_logging" {
if clusterName != "kubernetes-logging" {
Failf("Connected to wrong cluster %q (expecting kubernetes_logging)", clusterName)
}