mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-09-05 03:03:40 +00:00
Launch Elasticsearch and Kibana automatically
This commit is contained in:
32
cluster/addons/fluentd-elasticsearch/es-controller.yaml.in
Normal file
32
cluster/addons/fluentd-elasticsearch/es-controller.yaml.in
Normal file
@@ -0,0 +1,32 @@
|
||||
apiVersion: v1beta1
|
||||
kind: ReplicationController
|
||||
id: elasticsearch-logging-controller
|
||||
desiredState:
|
||||
replicas: {ELASTICSEARCH_LOGGING_REPLICAS}
|
||||
replicaSelector:
|
||||
name: elasticsearch-logging
|
||||
podTemplate:
|
||||
desiredState:
|
||||
manifest:
|
||||
version: v1beta1
|
||||
id: es-log-ingestion
|
||||
containers:
|
||||
- name: elasticsearch-logging
|
||||
image: dockerfile/elasticsearch
|
||||
ports:
|
||||
- name: es-port
|
||||
containerPort: 9200
|
||||
- name: es-transport-port
|
||||
containerPort: 9300
|
||||
volumeMounts:
|
||||
- name: es-persistent-storage
|
||||
mountPath: /data
|
||||
volumes:
|
||||
- name: es-persistent-storage
|
||||
source:
|
||||
emptyDir: {}
|
||||
labels:
|
||||
name: elasticsearch-logging
|
||||
labels:
|
||||
name: elasticsearch-logging
|
||||
|
8
cluster/addons/fluentd-elasticsearch/es-service.yaml
Normal file
8
cluster/addons/fluentd-elasticsearch/es-service.yaml
Normal file
@@ -0,0 +1,8 @@
|
||||
apiVersion: v1beta1
|
||||
kind: Service
|
||||
id: elasticsearch-logging
|
||||
containerPort: es-port
|
||||
port: 9200
|
||||
selector:
|
||||
name: elasticsearch-logging
|
||||
createExternalLoadBalancer: true
|
@@ -0,0 +1,37 @@
|
||||
# This Dockerfile will build an image that is configured
|
||||
# to run Fluentd with an Elasticsearch plug-in and the
|
||||
# provided configuration file.
|
||||
# TODO(satnam6502): Use a lighter base image, e.g. some form of busybox.
|
||||
# The image acts as an executable for the binary /usr/sbin/td-agent.
|
||||
# Note that fluentd is run with root permssion to allow access to
|
||||
# log files with root only access under /var/lib/docker/containers/*
|
||||
# Please see http://docs.fluentd.org/articles/install-by-deb for more
|
||||
# information about installing fluentd using deb package.
|
||||
|
||||
FROM ubuntu:14.04
|
||||
MAINTAINER Satnam Singh "satnam@google.com"
|
||||
|
||||
# Ensure there are enough file descriptors for running Fluentd.
|
||||
RUN ulimit -n 65536
|
||||
|
||||
# Install prerequisites.
|
||||
RUN apt-get update && \
|
||||
apt-get install -y curl && \
|
||||
apt-get install -y -q libcurl4-openssl-dev make && \
|
||||
apt-get clean
|
||||
|
||||
# Install Fluentd.
|
||||
RUN /usr/bin/curl -L http://toolbelt.treasuredata.com/sh/install-ubuntu-trusty-td-agent2.sh | sh
|
||||
|
||||
# Change the default user and group to root.
|
||||
# Needed to allow access to /var/log/docker/... files.
|
||||
RUN sed -i -e "s/USER=td-agent/USER=root/" -e "s/GROUP=td-agent/GROUP=root/" /etc/init.d/td-agent
|
||||
|
||||
# Install the Elasticsearch Fluentd plug-in.
|
||||
RUN /usr/sbin/td-agent-gem install fluent-plugin-elasticsearch
|
||||
|
||||
# Copy the Fluentd configuration file.
|
||||
COPY td-agent.conf /etc/td-agent/td-agent.conf
|
||||
|
||||
# Run the Fluentd service.
|
||||
CMD /usr/sbin/td-agent -qq > /var/log/td-agent/td-agent.log
|
@@ -0,0 +1,9 @@
|
||||
.PHONY: build push
|
||||
|
||||
TAG = 1.0
|
||||
|
||||
build:
|
||||
sudo docker build -t kubernetes/fluentd-elasticsearch:$(TAG) .
|
||||
|
||||
push:
|
||||
sudo docker push kubernetes/fluentd-elasticsearch:$(TAG)
|
@@ -0,0 +1,7 @@
|
||||
# Collecting Docker Log Files with Fluentd and Elasticsearch
|
||||
This directory contains the source files needed to make a Docker image
|
||||
that collects Docker container log files using [Fluentd](http://www.fluentd.org/)
|
||||
and sends them to an instance of [Elasticsearch](http://www.elasticsearch.org/).
|
||||
This image is designed to be used as part of the [Kubernetes](https://github.com/GoogleCloudPlatform/kubernetes)
|
||||
cluster bring up process. The image resides at DockerHub under the name
|
||||
[kubernetes/fluentd-eslasticsearch](https://registry.hub.docker.com/u/kubernetes/fluentd-elasticsearch/).
|
@@ -0,0 +1,80 @@
|
||||
# This configuration file for Fluentd / td-agent is used
|
||||
# to watch changes to Docker log files that live in the
|
||||
# directory /var/lib/docker/containers/ which are then submitted to
|
||||
# Elasticsearch (running on the machine %ES_HOST%:9200) which
|
||||
# assumes the installation of the fluentd-elasticsearch plug-in.
|
||||
# See https://github.com/uken/fluent-plugin-elasticsearch for
|
||||
# more information about the plug-in. This file needs to be
|
||||
# patched to replace ES_HOST with the name of the actual
|
||||
# machine running Elasticsearch.
|
||||
# Maintainer: Satnam Singh (satnam@google.com)
|
||||
#
|
||||
# Exampe
|
||||
# ======
|
||||
# A line in the Docker log file might like like this JSON:
|
||||
#
|
||||
# {"log":"2014/09/25 21:15:03 Got request with path wombat\n",
|
||||
# "stream":"stderr",
|
||||
# "time":"2014-09-25T21:15:03.499185026Z"}
|
||||
#
|
||||
# The time_format specification below makes sure we properly
|
||||
# parse the time format produced by Docker. This will be
|
||||
# submitted to Elasticsearch and should appear like:
|
||||
# $ curl 'http://elasticsearch:9200/_search?pretty'
|
||||
# ...
|
||||
# {
|
||||
# "_index" : "logstash-2014.09.25",
|
||||
# "_type" : "fluentd",
|
||||
# "_id" : "VBrbor2QTuGpsQyTCdfzqA",
|
||||
# "_score" : 1.0,
|
||||
# "_source":{"log":"2014/09/25 22:45:50 Got request with path wombat\n",
|
||||
# "stream":"stderr","tag":"docker.container.all",
|
||||
# "@timestamp":"2014-09-25T22:45:50+00:00"}
|
||||
# },
|
||||
# ...
|
||||
|
||||
<source>
|
||||
type tail
|
||||
format json
|
||||
time_key time
|
||||
path /var/lib/docker/containers/*/*-json.log
|
||||
pos_file /var/lib/docker/containers/containers.log.pos
|
||||
time_format %Y-%m-%dT%H:%M:%S
|
||||
tag docker.container.*
|
||||
</source>
|
||||
|
||||
<match docker.container.**>
|
||||
type elasticsearch
|
||||
log_level info
|
||||
include_tag_key true
|
||||
host elasticsearch-logging.default
|
||||
port 9200
|
||||
logstash_format true
|
||||
flush_interval 5s
|
||||
# Never wait longer than 5 minutes between retries.
|
||||
max_retry_wait 300
|
||||
# Disable the limit on the number of retries (retry forever).
|
||||
disable_retry_limit
|
||||
</match>
|
||||
|
||||
<source>
|
||||
type tail
|
||||
format none
|
||||
path /varlog/kubelet.log
|
||||
pos_file /varlog/kubelet.log.pos
|
||||
tag kubelet
|
||||
</source>
|
||||
|
||||
<match kubelet>
|
||||
type elasticsearch
|
||||
log_level info
|
||||
include_tag_key true
|
||||
host elasticsearch-logging.default
|
||||
port 9200
|
||||
logstash_format true
|
||||
flush_interval 5s
|
||||
# Never wait longer than 5 minutes between retries.
|
||||
max_retry_wait 300
|
||||
# Disable the limit on the number of retries (retry forever).
|
||||
disable_retry_limit
|
||||
</match>
|
22
cluster/addons/fluentd-elasticsearch/kibana-controller.yaml
Normal file
22
cluster/addons/fluentd-elasticsearch/kibana-controller.yaml
Normal file
@@ -0,0 +1,22 @@
|
||||
apiVersion: v1beta1
|
||||
kind: ReplicationController
|
||||
id: kibana-logging-controller
|
||||
desiredState:
|
||||
replicas: 1
|
||||
replicaSelector:
|
||||
name: kibana-logging
|
||||
podTemplate:
|
||||
desiredState:
|
||||
manifest:
|
||||
version: v1beta1
|
||||
id: kibana-viewer
|
||||
containers:
|
||||
- name: kibana-logging
|
||||
image: kubernetes/kibana:1.0
|
||||
ports:
|
||||
- name: kibana-port
|
||||
containerPort: 80
|
||||
labels:
|
||||
name: kibana-logging
|
||||
labels:
|
||||
name: kibana-logging
|
24
cluster/addons/fluentd-elasticsearch/kibana-image/Dockerfile
Normal file
24
cluster/addons/fluentd-elasticsearch/kibana-image/Dockerfile
Normal file
@@ -0,0 +1,24 @@
|
||||
# A Dockerfile for creating a Kibana container that is designed
|
||||
# to work with Kubernetes logging.
|
||||
|
||||
FROM ubuntu:14.04
|
||||
MAINTAINER Satnam Singh "satnam@google.com"
|
||||
|
||||
ENV DEBIAN_FRONTEND noninteractive
|
||||
|
||||
RUN apt-get update && \
|
||||
apt-get install -y nginx-full curl && \
|
||||
apt-get clean
|
||||
|
||||
RUN curl -O http://download.elasticsearch.org/kibana/kibana/kibana-3.1.1.tar.gz && \
|
||||
tar xf kibana-3.1.1.tar.gz && \
|
||||
mv kibana-3.1.1/* /usr/share/nginx/html && \
|
||||
rm kibana-3.1.1.tar.gz
|
||||
|
||||
RUN echo "daemon off;" >> /etc/nginx/nginx.conf
|
||||
|
||||
# ADD default /etc/nginx/sites-available/default
|
||||
ADD run_kibana_nginx.sh /usr/local/bin/run_kibana_nginx.sh
|
||||
|
||||
EXPOSE 80
|
||||
CMD ["/usr/local/bin/run_kibana_nginx.sh"]
|
9
cluster/addons/fluentd-elasticsearch/kibana-image/Makefile
Executable file
9
cluster/addons/fluentd-elasticsearch/kibana-image/Makefile
Executable file
@@ -0,0 +1,9 @@
|
||||
.PHONY: build push
|
||||
|
||||
TAG = 1.0
|
||||
|
||||
build:
|
||||
docker build -t kubernetes/kibana:$(TAG) .
|
||||
|
||||
push:
|
||||
docker push kubernetes/kibana:$(TAG)
|
183
cluster/addons/fluentd-elasticsearch/kibana-image/run_kibana_nginx.sh
Executable file
183
cluster/addons/fluentd-elasticsearch/kibana-image/run_kibana_nginx.sh
Executable file
@@ -0,0 +1,183 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright 2014 Google Inc. All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# This code below is designed to support two specific scenarios for
|
||||
# using Elasticsearch and Kibana with Kubernetes. In both cases the
|
||||
# environment variables PROXY_HOST and PROXY_PORT identify the instance
|
||||
# of Elasticsearch to be used by Kibana. The default value for ES_HOST
|
||||
# identifies the location that served the Javascript for Kibana and
|
||||
# the default value of ES_PORT 5601 is the port to be used for connecting
|
||||
# to Kibana. Both of these may be overriden if required. The two scenarios are:
|
||||
# 1. Elasticsearch and Kibana containers running in a single pod. In this
|
||||
# case PROXY_HOST is set to the local host i.e. 127.0.0.1 and the
|
||||
# PROXY_PORT is set to 9200 because Elasticsearch is running on the
|
||||
# same name as Kibana. If KIBANA_IP is the external IP address of
|
||||
# the Kubernetes Kibna service then all requests to:
|
||||
# KIBANA_LOGGING_SERVICE:$ES_PORT/elasticsearch/XXX
|
||||
# are proxied to:
|
||||
# http://127.0.0.1:9200/XXX
|
||||
# 2. Elasticsearch and Kibana are run in separate pods and Elasticsearch
|
||||
# has an IP and port exposed via a Kubernetes service. In this case
|
||||
# the Elasticsearch service *must* be called 'elasticsearch' and then
|
||||
# all requests sent to:
|
||||
# KIBANA_LOGGING_SERVICE:$ES_PORT/elasticsearch/XXX
|
||||
# are proxied to:
|
||||
# http://$ELASTICSEARCH_LOGGING_SERVICE_HOST:$ELASTICSEARCH_LOGGING_SERVICE_PORT:9200/XXX
|
||||
# The proxy configuration occurs in a location block of the nginx configuration
|
||||
# file /etc/nginx/sites-available/default.
|
||||
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
# Report all environment variables containing 'elasticsearch'
|
||||
set | grep -i elasticsearch
|
||||
# Set the default value for the Elasticsearch host as seen by the client
|
||||
# Javascript code for Kibana.
|
||||
: ${ES_HOST:='"+window.location.hostname+"'}
|
||||
echo ES_HOST=$ES_HOST
|
||||
# Set the default port for Elasticsearch host as seen by the client
|
||||
# Javascript for Kibana.
|
||||
: ${ES_PORT:=5601}
|
||||
echo ES_PORT=$ES_PORT
|
||||
# Set the default host IP and port for Elasticsearch as seen by the proxy
|
||||
# code in the configuration for nginx. If a Kubernetes Elasticsearch
|
||||
# service called 'elasticsearch' is defined, use that. Otherwise, use
|
||||
# a local instance of Elasticsearch on port 9200.
|
||||
PROXY_HOST=${ELASTICSEARCH_LOGGING_SERVICE_HOST:-127.0.0.1}
|
||||
echo PROXY_HOST=${PROXY_HOST}
|
||||
PROXY_PORT=${ELASTICSEARCH_SERVICE_LOGGING_PORT:-9200}
|
||||
echo PROXY_PORT=${PROXY_PORT}
|
||||
# Test the connection to Elasticsearch
|
||||
echo "Running curl http://${PROXY_HOST}:${PROXY_PORT}"
|
||||
curl http://${PROXY_HOST}:${PROXY_PORT}
|
||||
|
||||
# Create a config.hs that defines the Elasticsearch server to be
|
||||
# at http://${ES_HOST}:${ES_PORT}/elasticsearch from the perspective of
|
||||
# the client Javascript code.
|
||||
cat << EOF > /usr/share/nginx/html/config.js
|
||||
/** @scratch /configuration/config.js/1
|
||||
*
|
||||
* == Configuration
|
||||
* config.js is where you will find the core Kibana configuration. This file contains parameter that
|
||||
* must be set before kibana is run for the first time.
|
||||
*/
|
||||
define(['settings'],
|
||||
function (Settings) {
|
||||
|
||||
|
||||
/** @scratch /configuration/config.js/2
|
||||
*
|
||||
* === Parameters
|
||||
*/
|
||||
return new Settings({
|
||||
|
||||
/** @scratch /configuration/config.js/5
|
||||
*
|
||||
* ==== elasticsearch
|
||||
*
|
||||
* The URL to your elasticsearch server. You almost certainly don't
|
||||
* want +http://localhost:9200+ here. Even if Kibana and Elasticsearch are on
|
||||
* the same host. By default this will attempt to reach ES at the same host you have
|
||||
* kibana installed on. You probably want to set it to the FQDN of your
|
||||
* elasticsearch host
|
||||
*
|
||||
* Note: this can also be an object if you want to pass options to the http client. For example:
|
||||
*
|
||||
* +elasticsearch: {server: "http://localhost:9200", withCredentials: true}+
|
||||
*
|
||||
*/
|
||||
elasticsearch: "http://${ES_HOST}:${ES_PORT}/elasticsearch",
|
||||
|
||||
/** @scratch /configuration/config.js/5
|
||||
*
|
||||
* ==== default_route
|
||||
*
|
||||
* This is the default landing page when you don't specify a dashboard to load. You can specify
|
||||
* files, scripts or saved dashboards here. For example, if you had saved a dashboard called
|
||||
* WebLogs to elasticsearch you might use:
|
||||
*
|
||||
* default_route: '/dashboard/elasticsearch/WebLogs',
|
||||
*/
|
||||
default_route : '/dashboard/file/logstash.json',
|
||||
|
||||
/** @scratch /configuration/config.js/5
|
||||
*
|
||||
* ==== kibana-int
|
||||
*
|
||||
* The default ES index to use for storing Kibana specific object
|
||||
* such as stored dashboards
|
||||
*/
|
||||
kibana_index: "kibana-int",
|
||||
|
||||
/** @scratch /configuration/config.js/5
|
||||
*
|
||||
* ==== panel_name
|
||||
*
|
||||
* An array of panel modules available. Panels will only be loaded when they are defined in the
|
||||
* dashboard, but this list is used in the "add panel" interface.
|
||||
*/
|
||||
panel_names: [
|
||||
'histogram',
|
||||
'map',
|
||||
'goal',
|
||||
'table',
|
||||
'filtering',
|
||||
'timepicker',
|
||||
'text',
|
||||
'hits',
|
||||
'column',
|
||||
'trends',
|
||||
'bettermap',
|
||||
'query',
|
||||
'terms',
|
||||
'stats',
|
||||
'sparklines'
|
||||
]
|
||||
});
|
||||
});
|
||||
EOF
|
||||
|
||||
# Proxy all calls to ...:80/elasticsearch to the location
|
||||
# defined by http://${PROXY_HOST}:${PROXY_PORT}
|
||||
cat <<EOF > /etc/nginx/sites-available/default
|
||||
server {
|
||||
listen 80 default_server;
|
||||
listen [::]:80 default_server ipv6only=on;
|
||||
|
||||
root /usr/share/nginx/html;
|
||||
index index.html index.htm;
|
||||
|
||||
# Make site accessible from http://localhost/
|
||||
server_name localhost;
|
||||
|
||||
location ~ /elasticsearch/?(.*)$ {
|
||||
proxy_http_version 1.1;
|
||||
proxy_set_header Upgrade \$http_upgrade;
|
||||
proxy_read_timeout 1d;
|
||||
proxy_set_header Connection "upgrade";
|
||||
proxy_pass http://${PROXY_HOST}:${PROXY_PORT}/\$1;
|
||||
}
|
||||
|
||||
location / {
|
||||
# First attempt to serve request as file, then
|
||||
# as directory, then fall back to displaying a 404.
|
||||
try_files \$uri \$uri/ =404;
|
||||
}
|
||||
}
|
||||
EOF
|
||||
|
||||
exec nginx -c /etc/nginx/nginx.conf "$@"
|
9
cluster/addons/fluentd-elasticsearch/kibana-service.yaml
Normal file
9
cluster/addons/fluentd-elasticsearch/kibana-service.yaml
Normal file
@@ -0,0 +1,9 @@
|
||||
apiVersion: v1beta1
|
||||
kind: Service
|
||||
id: kibana-logging
|
||||
containerPort: kibana-port
|
||||
port: 5601
|
||||
selector:
|
||||
name: kibana-logging
|
||||
createExternalLoadBalancer: true
|
||||
|
34
cluster/addons/fluentd-elasticsearch/logging-demo/Makefile
Normal file
34
cluster/addons/fluentd-elasticsearch/logging-demo/Makefile
Normal file
@@ -0,0 +1,34 @@
|
||||
# Makefile for launching syntheitc logging sources (any platform)
|
||||
# and for reporting the forwarding rules for the
|
||||
# Elasticsearch and Kibana pods for the GCE platform.
|
||||
|
||||
|
||||
.PHONY: up down logger-up logger-down logger10-up logger10-downget net
|
||||
|
||||
KUBECTL=../../../kubectl.sh
|
||||
|
||||
up: logger-up logger10-up
|
||||
|
||||
down: logger-down logger10-down
|
||||
|
||||
|
||||
logger-up:
|
||||
-${KUBECTL} create -f synthetic_0_25lps.yaml
|
||||
|
||||
logger-down:
|
||||
-${KUBECTL} delete pods synthetic-logger-0.25lps-pod
|
||||
|
||||
logger10-up:
|
||||
-${KUBECTL} create -f synthetic_10lps.yaml
|
||||
|
||||
logger10-down:
|
||||
-${KUBECTL} delete pods synthetic-logger-10lps-pod
|
||||
|
||||
get:
|
||||
${KUBECTL} get pods
|
||||
${KUBECTL} get replicationControllers
|
||||
${KUBECTL} get services
|
||||
|
||||
net:
|
||||
gcloud compute forwarding-rules describe elasticsearch-logging
|
||||
gcloud compute forwarding-rules describe kibana-logging
|
111
cluster/addons/fluentd-elasticsearch/logging-demo/README.md
Normal file
111
cluster/addons/fluentd-elasticsearch/logging-demo/README.md
Normal file
@@ -0,0 +1,111 @@
|
||||
# Elasticsearch/Kibana Logging Demonstration
|
||||
This directory contains two pod specifications which can be used as synthetic
|
||||
loggig sources. The pod specification in [synthetic_0_25lps.yaml](synthetic_0_25lps.yaml)
|
||||
describes a pod that just emits a log message once every 4 seconds:
|
||||
```
|
||||
# This pod specification creates an instance of a synthetic logger. The logger
|
||||
# is simply a program that writes out the hostname of the pod, a count which increments
|
||||
# by one on each iteration (to help notice missing log enteries) and the date using
|
||||
# a long format (RFC-3339) to nano-second precision. This program logs at a frequency
|
||||
# of 0.25 lines per second. The shellscript program is given directly to bash as -c argument
|
||||
# and could have been written out as:
|
||||
# i="0"
|
||||
# while true
|
||||
# do
|
||||
# echo -n "`hostname`: $i: "
|
||||
# date --rfc-3339 ns
|
||||
# sleep 4
|
||||
# i=$[$i+1]
|
||||
# done
|
||||
|
||||
apiVersion: v1beta1
|
||||
kind: Pod
|
||||
id: synthetic-logger-0.25lps-pod
|
||||
desiredState:
|
||||
manifest:
|
||||
version: v1beta1
|
||||
id: synth-logger-0.25lps
|
||||
containers:
|
||||
- name: synth-lgr
|
||||
image: ubuntu:14.04
|
||||
command: ["bash", "-c", "i=\"0\"; while true; do echo -n \"`hostname`: $i: \"; date --rfc-3339 ns; sleep 4; i=$[$i+1]; done"]
|
||||
labels:
|
||||
name: synth-logging-source
|
||||
```
|
||||
|
||||
The other YAML file [synthetic_10lps.yaml](synthetic_10lps.yaml) specifies a similar synthetic logger that emits 10 log messages every second. To run both synthetic loggers:
|
||||
```
|
||||
$ make up
|
||||
../../../kubectl.sh create -f synthetic_0_25lps.yaml
|
||||
Running: ../../../cluster/../cluster/gce/../../_output/dockerized/bin/linux/amd64/kubectl create -f synthetic_0_25lps.yaml
|
||||
synthetic-logger-0.25lps-pod
|
||||
../../../kubectl.sh create -f synthetic_10lps.yaml
|
||||
Running: ../../../cluster/../cluster/gce/../../_output/dockerized/bin/linux/amd64/kubectl create -f synthetic_10lps.yaml
|
||||
synthetic-logger-10lps-pod
|
||||
|
||||
```
|
||||
|
||||
Visiting the Kibana dashboard should make it clear that logs are being collected from the two synthetic loggers:
|
||||

|
||||
|
||||
You can report the running pods, replication controllers and services with another Makefile rule:
|
||||
```
|
||||
$ make get
|
||||
../../../kubectl.sh get pods
|
||||
Running: ../../../../cluster/gce/../../_output/dockerized/bin/linux/amd64/kubectl get pods
|
||||
POD CONTAINER(S) IMAGE(S) HOST LABELS STATUS
|
||||
7e1c7ce6-9764-11e4-898c-42010af03582 kibana-logging kubernetes/kibana kubernetes-minion-3.c.kubernetes-elk.internal/130.211.129.169 name=kibana-logging Running
|
||||
synthetic-logger-0.25lps-pod synth-lgr ubuntu:14.04 kubernetes-minion-2.c.kubernetes-elk.internal/146.148.41.87 name=synth-logging-source Running
|
||||
synthetic-logger-10lps-pod synth-lgr ubuntu:14.04 kubernetes-minion-1.c.kubernetes-elk.internal/146.148.42.44 name=synth-logging-source Running
|
||||
influx-grafana influxdb kubernetes/heapster_influxdb kubernetes-minion-3.c.kubernetes-elk.internal/130.211.129.169 name=influxdb Running
|
||||
grafana kubernetes/heapster_grafana
|
||||
elasticsearch dockerfile/elasticsearch
|
||||
heapster heapster kubernetes/heapster kubernetes-minion-2.c.kubernetes-elk.internal/146.148.41.87 name=heapster Running
|
||||
67cfcb1f-9764-11e4-898c-42010af03582 etcd quay.io/coreos/etcd:latest kubernetes-minion-3.c.kubernetes-elk.internal/130.211.129.169 k8s-app=skydns Running
|
||||
kube2sky kubernetes/kube2sky:1.0
|
||||
skydns kubernetes/skydns:2014-12-23-001
|
||||
6ba20338-9764-11e4-898c-42010af03582 elasticsearch-logging dockerfile/elasticsearch kubernetes-minion-3.c.kubernetes-elk.internal/130.211.129.169 name=elasticsearch-logging Running
|
||||
../../../cluster/kubectl.sh get replicationControllers
|
||||
Running: ../../../cluster/../cluster/gce/../../_output/dockerized/bin/linux/amd64/kubectl get replicationControllers
|
||||
CONTROLLER CONTAINER(S) IMAGE(S) SELECTOR REPLICAS
|
||||
skydns etcd quay.io/coreos/etcd:latest k8s-app=skydns 1
|
||||
kube2sky kubernetes/kube2sky:1.0
|
||||
skydns kubernetes/skydns:2014-12-23-001
|
||||
elasticsearch-logging-controller elasticsearch-logging dockerfile/elasticsearch name=elasticsearch-logging 1
|
||||
kibana-logging-controller kibana-logging kubernetes/kibana name=kibana-logging 1
|
||||
../../.../kubectl.sh get services
|
||||
Running: ../../../cluster/../cluster/gce/../../_output/dockerized/bin/linux/amd64/kubectl get services
|
||||
NAME LABELS SELECTOR IP PORT
|
||||
kubernetes-ro component=apiserver,provider=kubernetes <none> 10.0.83.3 80
|
||||
kubernetes component=apiserver,provider=kubernetes <none> 10.0.79.4 443
|
||||
influx-master <none> name=influxdb 10.0.232.223 8085
|
||||
skydns k8s-app=skydns k8s-app=skydns 10.0.0.10 53
|
||||
elasticsearch-logging <none> name=elasticsearch-logging 10.0.25.103 9200
|
||||
kibana-logging <none> name=kibana-logging 10.0.208.114 5601
|
||||
|
||||
```
|
||||
On the GCE provider you can also obtain the external IP addresses of the Elasticsearch and Kibana services:
|
||||
```
|
||||
$ make net
|
||||
IPAddress: 130.211.120.118
|
||||
IPProtocol: TCP
|
||||
creationTimestamp: '2015-01-08T10:30:34.210-08:00'
|
||||
id: '12815488049392139704'
|
||||
kind: compute#forwardingRule
|
||||
name: elasticsearch-logging
|
||||
portRange: 9200-9200
|
||||
region: https://www.googleapis.com/compute/v1/projects/kubernetes-elk/regions/us-central1
|
||||
selfLink: https://www.googleapis.com/compute/v1/projects/kubernetes-elk/regions/us-central1/forwardingRules/elasticsearch-logging
|
||||
target: https://www.googleapis.com/compute/v1/projects/kubernetes-elk/regions/us-central1/targetPools/elasticsearch-logging
|
||||
gcloud compute forwarding-rules describe kibana-logging
|
||||
IPAddress: 146.148.40.158
|
||||
IPProtocol: TCP
|
||||
creationTimestamp: '2015-01-08T10:31:05.715-08:00'
|
||||
id: '2755171906970792849'
|
||||
kind: compute#forwardingRule
|
||||
name: kibana-logging
|
||||
portRange: 5601-5601
|
||||
region: https://www.googleapis.com/compute/v1/projects/kubernetes-elk/regions/us-central1
|
||||
selfLink: https://www.googleapis.com/compute/v1/projects/kubernetes-elk/regions/us-central1/forwardingRules/kibana-logging
|
||||
target: https://www.googleapis.com/compute/v1/projects/kubernetes-elk/regions/us-central1/targetPools/kibana-logging
|
||||
```
|
Binary file not shown.
After Width: | Height: | Size: 87 KiB |
@@ -0,0 +1,29 @@
|
||||
# This pod specification creates an instance of a synthetic logger. The logger
|
||||
# is simply a program that writes out the hostname of the pod, a count which increments
|
||||
# by one on each iteration (to help notice missing log enteries) and the date using
|
||||
# a long format (RFC-3339) to nano-second precision. This program logs at a frequency
|
||||
# of 0.25 lines per second. The shellscript program is given directly to bash as -c argument
|
||||
# and could have been written out as:
|
||||
# i="0"
|
||||
# while true
|
||||
# do
|
||||
# echo -n "`hostname`: $i: "
|
||||
# date --rfc-3339 ns
|
||||
# sleep 4
|
||||
# i=$[$i+1]
|
||||
# done
|
||||
|
||||
apiVersion: v1beta1
|
||||
kind: Pod
|
||||
id: synthetic-logger-0.25lps-pod
|
||||
desiredState:
|
||||
manifest:
|
||||
version: v1beta1
|
||||
id: synth-logger-0.25lps
|
||||
containers:
|
||||
- name: synth-lgr
|
||||
image: ubuntu:14.04
|
||||
command: ["bash", "-c", "i=\"0\"; while true; do echo -n \"`hostname`: $i: \"; date --rfc-3339 ns; sleep 4; i=$[$i+1]; done"]
|
||||
labels:
|
||||
name: synth-logging-source
|
||||
|
@@ -0,0 +1,29 @@
|
||||
# This pod specification creates an instance of a synthetic logger. The logger
|
||||
# is simply a program that writes out the hostname of the pod, a count which increments
|
||||
# by one on each iteration (to help notice missing log enteries) and the date using
|
||||
# a long format (RFC-3339) to nano-second precision. This program logs at a frequency
|
||||
# of 0.25 lines per second. The shellscript program is given directly to bash as -c argument
|
||||
# and could have been written out as:
|
||||
# i="0"
|
||||
# while true
|
||||
# do
|
||||
# echo -n "`hostname`: $i: "
|
||||
# date --rfc-3339 ns
|
||||
# sleep 4
|
||||
# i=$[$i+1]
|
||||
# done
|
||||
|
||||
apiVersion: v1beta1
|
||||
kind: Pod
|
||||
id: synthetic-logger-10lps-pod
|
||||
desiredState:
|
||||
manifest:
|
||||
version: v1beta1
|
||||
id: synth-logger-10lps
|
||||
containers:
|
||||
- name: synth-lgr
|
||||
image: ubuntu:14.04
|
||||
command: ["bash", "-c", "i=\"0\"; while true; do echo -n \"`hostname`: $i: \"; date --rfc-3339 ns; sleep 0.1; i=$[$i+1]; done"]
|
||||
labels:
|
||||
name: synth-logging-source
|
||||
|
@@ -36,12 +36,14 @@ MINION_SCOPES=""
|
||||
POLL_SLEEP_INTERVAL=3
|
||||
PORTAL_NET="10.0.0.0/16"
|
||||
|
||||
# Optional: Install node monitoring.
|
||||
ENABLE_NODE_MONITORING=true
|
||||
|
||||
# Optional: Install node logging
|
||||
ENABLE_NODE_LOGGING=true
|
||||
ENABLE_NODE_LOGGING=false
|
||||
LOGGING_DESTINATION=elasticsearch # options: elasticsearch, gcp
|
||||
|
||||
# Optional: When set to true, Elasticsearch and Kibana will be setup as part of the cluster bring up.
|
||||
ENABLE_CLUSTER_LOGGING=false
|
||||
ELASTICSEARCH_LOGGING_REPLICAS=1
|
||||
|
||||
IAM_PROFILE="kubernetes"
|
||||
LOG="/dev/null"
|
||||
|
||||
|
@@ -497,3 +497,11 @@ function kube-down {
|
||||
$AWS_CMD delete-route --route-table-id $route_table_id --destination-cidr-block 0.0.0.0/0 > $LOG
|
||||
$AWS_CMD delete-vpc --vpc-id $vpc_id > $LOG
|
||||
}
|
||||
|
||||
function setup-logging {
|
||||
echo "TODO: setup logging"
|
||||
}
|
||||
|
||||
function teardown-logging {
|
||||
echo "TODO: teardown logging"
|
||||
}
|
@@ -36,3 +36,11 @@ MINION_IP_RANGES=($(eval echo "10.244.{1..${NUM_MINIONS}}.0/24"))
|
||||
MINION_SCOPES=""
|
||||
|
||||
PORTAL_NET="10.250.0.0/16"
|
||||
|
||||
# Optional: Install node logging
|
||||
ENABLE_NODE_LOGGING=false
|
||||
LOGGING_DESTINATION=elasticsearch # options: elasticsearch, gcp
|
||||
|
||||
# Optional: When set to true, Elasticsearch and Kibana will be setup as part of the cluster bring up.
|
||||
ENABLE_CLUSTER_LOGGING=false
|
||||
ELASTICSEARCH_LOGGING_REPLICAS=1
|
||||
|
@@ -565,3 +565,11 @@ function setup-monitoring {
|
||||
function teardown-monitoring {
|
||||
echo "not implemented" >/dev/null
|
||||
}
|
||||
|
||||
function setup-logging {
|
||||
echo "TODO: setup logging"
|
||||
}
|
||||
|
||||
function teardown-logging {
|
||||
echo "TODO: teardown logging"
|
||||
}
|
@@ -54,6 +54,10 @@ ENABLE_DOCKER_REGISTRY_CACHE=true
|
||||
ENABLE_NODE_LOGGING=true
|
||||
LOGGING_DESTINATION=elasticsearch # options: elasticsearch, gcp
|
||||
|
||||
# Optional: When set to true, Elasticsearch and Kibana will be setup as part of the cluster bring up.
|
||||
ENABLE_CLUSTER_LOGGING=true
|
||||
ELASTICSEARCH_LOGGING_REPLICAS=1
|
||||
|
||||
# Don't require https for registries in our local RFC1918 network
|
||||
EXTRA_DOCKER_OPTS="--insecure-registry 10.0.0.0/8"
|
||||
|
||||
|
@@ -348,14 +348,14 @@ function kube-up {
|
||||
local htpasswd
|
||||
htpasswd=$(cat "${KUBE_TEMP}/htpasswd")
|
||||
|
||||
if ! gcloud compute networks describe "${NETWORK}" &>/dev/null; then
|
||||
if ! gcloud compute networks --project "${PROJECT}" describe "${NETWORK}" &>/dev/null; then
|
||||
echo "Creating new network: ${NETWORK}"
|
||||
# The network needs to be created synchronously or we have a race. The
|
||||
# firewalls can be added concurrent with instance creation.
|
||||
gcloud compute networks create "${NETWORK}" --range "10.240.0.0/16"
|
||||
gcloud compute networks create --project "${PROJECT}" "${NETWORK}" --range "10.240.0.0/16"
|
||||
fi
|
||||
|
||||
if ! gcloud compute firewall-rules describe "${NETWORK}-default-internal" &>/dev/null; then
|
||||
if ! gcloud compute firewall-rules --project "${PROJECT}" describe "${NETWORK}-default-internal" &>/dev/null; then
|
||||
gcloud compute firewall-rules create "${NETWORK}-default-internal" \
|
||||
--project "${PROJECT}" \
|
||||
--network "${NETWORK}" \
|
||||
@@ -363,7 +363,7 @@ function kube-up {
|
||||
--allow "tcp:1-65535" "udp:1-65535" "icmp" &
|
||||
fi
|
||||
|
||||
if ! gcloud compute firewall-rules describe "${NETWORK}-default-ssh" &>/dev/null; then
|
||||
if ! gcloud compute firewall-rules describe --project "${PROJECT}" "${NETWORK}-default-ssh" &>/dev/null; then
|
||||
gcloud compute firewall-rules create "${NETWORK}-default-ssh" \
|
||||
--project "${PROJECT}" \
|
||||
--network "${NETWORK}" \
|
||||
@@ -718,7 +718,7 @@ function test-teardown {
|
||||
function ssh-to-node {
|
||||
local node="$1"
|
||||
local cmd="$2"
|
||||
gcloud compute ssh --ssh-flag="-o LogLevel=quiet" --zone="${ZONE}" "${node}" --command "${cmd}"
|
||||
gcloud compute ssh --ssh-flag="-o LogLevel=quiet" --project "${PROJECT}" --zone="${ZONE}" "${node}" --command "${cmd}"
|
||||
}
|
||||
|
||||
# Restart the kube-proxy on a node ($1)
|
||||
@@ -732,7 +732,7 @@ function setup-monitoring {
|
||||
echo "Setting up cluster monitoring using Heapster."
|
||||
|
||||
detect-project
|
||||
if ! gcloud compute firewall-rules describe monitoring-heapster &>/dev/null; then
|
||||
if ! gcloud compute firewall-rules --project "{$PROJECT}" describe monitoring-heapster &>/dev/null; then
|
||||
if ! gcloud compute firewall-rules create monitoring-heapster \
|
||||
--project "${PROJECT}" \
|
||||
--target-tags="${MINION_TAG}" \
|
||||
@@ -774,7 +774,7 @@ function teardown-monitoring {
|
||||
"${kubectl}" delete pods heapster &> /dev/null || true
|
||||
"${kubectl}" delete pods influx-grafana &> /dev/null || true
|
||||
"${kubectl}" delete services influx-master &> /dev/null || true
|
||||
if gcloud compute firewall-rules describe monitoring-heapster &> /dev/null; then
|
||||
if gcloud compute firewall-rules describe --project "${PROJECT}" monitoring-heapster &> /dev/null; then
|
||||
gcloud compute firewall-rules delete \
|
||||
--project "${PROJECT}" \
|
||||
--quiet \
|
||||
@@ -783,6 +783,48 @@ function teardown-monitoring {
|
||||
fi
|
||||
}
|
||||
|
||||
function setup-logging {
|
||||
# If logging with Fluentd to Elasticsearch is enabled then create pods
|
||||
# and services for Elasticsearch (for ingesting logs) and Kibana (for
|
||||
# viewing logs).
|
||||
if [[ "${ENABLE_NODE_LOGGING-}" == "true" ]] && \
|
||||
[[ "${LOGGING_DESTINATION-}" == "elasticsearch" ]] && \
|
||||
[[ "${ENABLE_CLUSTER_LOGGING-}" == "true" ]]; then
|
||||
local -r kubectl="${KUBE_ROOT}/cluster/kubectl.sh"
|
||||
if sed -e "s/{ELASTICSEARCH_LOGGING_REPLICAS}/${ELASTICSEARCH_LOGGING_REPLICAS}/g" \
|
||||
"${KUBE_ROOT}"/cluster/addons/fluentd-elasticsearch/es-controller.yaml.in | \
|
||||
"${kubectl}" create -f - &> /dev/null && \
|
||||
"${kubectl}" create -f "${KUBE_ROOT}"/cluster/addons/fluentd-elasticsearch/es-service.yaml &> /dev/null && \
|
||||
"${kubectl}" create -f "${KUBE_ROOT}"/cluster/addons/fluentd-elasticsearch/kibana-controller.yaml &> /dev/null && \
|
||||
"${kubectl}" create -f "${KUBE_ROOT}"/cluster/addons/fluentd-elasticsearch/kibana-service.yaml &> /dev/null; then
|
||||
gcloud compute firewall-rules create fluentd-elasticsearch-logging --project "${PROJECT}" \
|
||||
--allow tcp:5601 tcp:9200 tcp:9300 --target-tags "${INSTANCE_PREFIX}"-minion || true
|
||||
local -r region="${ZONE::-2}"
|
||||
local -r es_ip=$(gcloud compute forwarding-rules --project "${PROJECT}" describe --region "${region}" elasticsearch-logging | grep IPAddress | awk '{print $2}')
|
||||
local -r kibana_ip=$(gcloud compute forwarding-rules --project "${PROJECT}" describe --region "${region}" kibana-logging | grep IPAddress | awk '{print $2}')
|
||||
echo
|
||||
echo -e "${color_green}Cluster logs are ingested into Elasticsearch running at ${color_yellow}http://${es_ip}:9200"
|
||||
echo -e "${color_green}Kibana logging dashboard will be available at ${color_yellow}http://${kibana_ip}:5601${color_norm}"
|
||||
echo
|
||||
else
|
||||
echo -e "${color_red}Failed to launch Elasticsearch and Kibana pods and services for logging.${color_norm}"
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
function teardown-logging {
|
||||
if [[ "${ENABLE_NODE_LOGGING-}" == "true" ]] && \
|
||||
[[ "${LOGGING_DESTINATION-}" == "elasticsearch" ]] && \
|
||||
[[ "${ENABLE_CLUSTER_LOGGING-}" == "true" ]]; then
|
||||
local -r kubectl="${KUBE_ROOT}/cluster/kubectl.sh"
|
||||
"${kubectl}" delete replicationController elasticsearch-logging-controller &> /dev/null || true
|
||||
"${kubectl}" delete service elasticsearch-logging &> /dev/null || true
|
||||
"${kubectl}" delete replicationController kibana-logging-controller &> /dev/null || true
|
||||
"${kubectl}" delete service kibana-logging &> /dev/null || true
|
||||
gcloud compute firewall-rules delete -q fluentd-elasticsearch-logging --project "${PROJECT}" || true
|
||||
fi
|
||||
}
|
||||
|
||||
# Perform preparations required to run e2e tests
|
||||
function prepare-e2e() {
|
||||
detect-project
|
||||
|
@@ -21,3 +21,11 @@ NETWORK=${KUBE_GKE_NETWORK:-default}
|
||||
# For ease of maintenance, extract any pieces that do not vary between default
|
||||
# and test in a common config.
|
||||
source $(dirname "${BASH_SOURCE}")/config-common.sh
|
||||
|
||||
# Optional: Install node logging
|
||||
ENABLE_NODE_LOGGING=false
|
||||
LOGGING_DESTINATION=gcp # options: elasticsearch, gcp
|
||||
|
||||
# Optional: When set to true, Elasticsearch and Kibana will be setup as part of the cluster bring up.
|
||||
ENABLE_CLUSTER_LOGGING=false
|
||||
ELASTICSEARCH_LOGGING_REPLICAS=1
|
||||
|
@@ -261,3 +261,11 @@ function kube-down() {
|
||||
"${GCLOUD}" preview container clusters delete --project="${PROJECT}" \
|
||||
--zone="${ZONE}" "${CLUSTER_NAME}"
|
||||
}
|
||||
|
||||
function setup-logging {
|
||||
echo "TODO: setup logging"
|
||||
}
|
||||
|
||||
function teardown-logging {
|
||||
echo "TODO: teardown logging"
|
||||
}
|
||||
|
@@ -28,6 +28,8 @@ echo "Bringing down cluster using provider: $KUBERNETES_PROVIDER"
|
||||
|
||||
verify-prereqs
|
||||
teardown-monitoring
|
||||
teardown-logging
|
||||
|
||||
kube-down
|
||||
|
||||
echo "Done"
|
||||
|
@@ -43,7 +43,7 @@ echo "... calling setup-monitoring" >&2
|
||||
setup-monitoring
|
||||
|
||||
if [[ "${ENABLE_CLUSTER_DNS}" == "true" ]]; then
|
||||
echo ".. setting up cluster DNS"
|
||||
echo "... setting up cluster DNS"
|
||||
sed -e "s/{DNS_DOMAIN}/$DNS_DOMAIN/g" \
|
||||
-e "s/{DNS_REPLICAS}/$DNS_REPLICAS/g" \
|
||||
"${KUBE_ROOT}/cluster/addons/dns/skydns-rc.yaml.in" \
|
||||
@@ -54,4 +54,7 @@ if [[ "${ENABLE_CLUSTER_DNS}" == "true" ]]; then
|
||||
| "${KUBE_ROOT}/cluster/kubectl.sh" create -f -
|
||||
fi
|
||||
|
||||
echo "... calling setup-logging" >&2
|
||||
setup-logging
|
||||
|
||||
echo "Done" >&2
|
||||
|
@@ -42,9 +42,13 @@ PORTAL_NET="10.0.0.0/16"
|
||||
ENABLE_NODE_MONITORING=true
|
||||
|
||||
# Optional: Enable node logging.
|
||||
ENABLE_NODE_LOGGING=true
|
||||
ENABLE_NODE_LOGGING=false
|
||||
LOGGING_DESTINATION=elasticsearch
|
||||
|
||||
# Optional: When set to true, Elasticsearch and Kibana will be setup as part of the cluster bring up.
|
||||
ENABLE_CLUSTER_LOGGING=false
|
||||
ELASTICSEARCH_LOGGING_REPLICAS=1
|
||||
|
||||
# Optional: Install cluster DNS.
|
||||
ENABLE_CLUSTER_DNS=true
|
||||
DNS_SERVER_IP="10.0.0.10"
|
||||
|
@@ -350,6 +350,14 @@ function teardown-monitoring {
|
||||
echo "TODO"
|
||||
}
|
||||
|
||||
function setup-logging {
|
||||
echo "TODO: setup logging"
|
||||
}
|
||||
|
||||
function teardown-logging {
|
||||
echo "TODO: teardown logging"
|
||||
}
|
||||
|
||||
# Perform preparations required to run e2e tests
|
||||
function prepare-e2e() {
|
||||
echo "Rackspace doesn't need special preparations for e2e tests"
|
||||
|
@@ -2,7 +2,7 @@ version: v1beta2
|
||||
id: fluentd-to-elasticsearch
|
||||
containers:
|
||||
- name: fluentd-es
|
||||
image: kubernetes/fluentd-elasticsearch
|
||||
image: kubernetes/fluentd-elasticsearch:1.0
|
||||
volumeMounts:
|
||||
- name: containers
|
||||
mountPath: /var/lib/docker/containers
|
||||
|
@@ -50,9 +50,13 @@ MASTER_PASSWD=vagrant
|
||||
ENABLE_NODE_MONITORING=true
|
||||
|
||||
# Optional: Enable node logging.
|
||||
ENABLE_NODE_LOGGING=true
|
||||
ENABLE_NODE_LOGGING=false
|
||||
LOGGING_DESTINATION=elasticsearch
|
||||
|
||||
# Optional: When set to true, Elasticsearch and Kibana will be setup as part of the cluster bring up.
|
||||
ENABLE_CLUSTER_LOGGING=false
|
||||
ELASTICSEARCH_LOGGING_REPLICAS=1
|
||||
|
||||
# Extra options to set on the Docker command line. This is useful for setting
|
||||
# --insecure-registry for local registries.
|
||||
DOCKER_OPTS=""
|
||||
|
@@ -273,3 +273,11 @@ function teardown-monitoring {
|
||||
function prepare-e2e() {
|
||||
echo "Vagrant doesn't need special preparations for e2e tests"
|
||||
}
|
||||
|
||||
function setup-logging {
|
||||
echo "TODO: setup logging"
|
||||
}
|
||||
|
||||
function teardown-logging {
|
||||
echo "TODO: teardown logging"
|
||||
}
|
@@ -37,9 +37,13 @@ PORTAL_NET="10.244.240.0/20"
|
||||
ENABLE_NODE_MONITORING=true
|
||||
|
||||
# Optional: Enable node logging.
|
||||
ENABLE_NODE_LOGGING=true
|
||||
ENABLE_NODE_LOGGING=false
|
||||
LOGGING_DESTINATION=elasticsearch
|
||||
|
||||
# Optional: When set to true, Elasticsearch and Kibana will be setup as part of the cluster bring up.
|
||||
ENABLE_CLUSTER_LOGGING=false
|
||||
ELASTICSEARCH_LOGGING_REPLICAS=1
|
||||
|
||||
# Optional: Install cluster DNS.
|
||||
ENABLE_CLUSTER_DNS=true
|
||||
DNS_SERVER_IP="10.244.240.240"
|
||||
|
@@ -485,3 +485,11 @@ function setup-monitoring {
|
||||
function teardown-monitoring {
|
||||
echo "TODO"
|
||||
}
|
||||
|
||||
function setup-logging {
|
||||
echo "TODO: setup logging"
|
||||
}
|
||||
|
||||
function teardown-logging {
|
||||
echo "TODO: teardown logging"
|
||||
}
|
Reference in New Issue
Block a user