mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-21 10:51:29 +00:00
Launch Elasticsearch and Kibana automatically
This commit is contained in:
parent
21b661ecf3
commit
295bd3768d
32
cluster/addons/fluentd-elasticsearch/es-controller.yaml.in
Normal file
32
cluster/addons/fluentd-elasticsearch/es-controller.yaml.in
Normal file
@ -0,0 +1,32 @@
|
||||
apiVersion: v1beta1
|
||||
kind: ReplicationController
|
||||
id: elasticsearch-logging-controller
|
||||
desiredState:
|
||||
replicas: {ELASTICSEARCH_LOGGING_REPLICAS}
|
||||
replicaSelector:
|
||||
name: elasticsearch-logging
|
||||
podTemplate:
|
||||
desiredState:
|
||||
manifest:
|
||||
version: v1beta1
|
||||
id: es-log-ingestion
|
||||
containers:
|
||||
- name: elasticsearch-logging
|
||||
image: dockerfile/elasticsearch
|
||||
ports:
|
||||
- name: es-port
|
||||
containerPort: 9200
|
||||
- name: es-transport-port
|
||||
containerPort: 9300
|
||||
volumeMounts:
|
||||
- name: es-persistent-storage
|
||||
mountPath: /data
|
||||
volumes:
|
||||
- name: es-persistent-storage
|
||||
source:
|
||||
emptyDir: {}
|
||||
labels:
|
||||
name: elasticsearch-logging
|
||||
labels:
|
||||
name: elasticsearch-logging
|
||||
|
@ -1,8 +1,8 @@
|
||||
apiVersion: v1beta1
|
||||
kind: Service
|
||||
id: elasticsearch
|
||||
id: elasticsearch-logging
|
||||
containerPort: es-port
|
||||
port: 9200
|
||||
selector:
|
||||
app: elasticsearch
|
||||
name: elasticsearch-logging
|
||||
createExternalLoadBalancer: true
|
@ -1,6 +1,6 @@
|
||||
.PHONY: build push
|
||||
|
||||
TAG = latest
|
||||
TAG = 1.0
|
||||
|
||||
build:
|
||||
sudo docker build -t kubernetes/fluentd-elasticsearch:$(TAG) .
|
@ -47,7 +47,7 @@
|
||||
type elasticsearch
|
||||
log_level info
|
||||
include_tag_key true
|
||||
host elasticsearch.default
|
||||
host elasticsearch-logging.default
|
||||
port 9200
|
||||
logstash_format true
|
||||
flush_interval 5s
|
||||
@ -69,7 +69,7 @@
|
||||
type elasticsearch
|
||||
log_level info
|
||||
include_tag_key true
|
||||
host elasticsearch.default
|
||||
host elasticsearch-logging.default
|
||||
port 9200
|
||||
logstash_format true
|
||||
flush_interval 5s
|
22
cluster/addons/fluentd-elasticsearch/kibana-controller.yaml
Normal file
22
cluster/addons/fluentd-elasticsearch/kibana-controller.yaml
Normal file
@ -0,0 +1,22 @@
|
||||
apiVersion: v1beta1
|
||||
kind: ReplicationController
|
||||
id: kibana-logging-controller
|
||||
desiredState:
|
||||
replicas: 1
|
||||
replicaSelector:
|
||||
name: kibana-logging
|
||||
podTemplate:
|
||||
desiredState:
|
||||
manifest:
|
||||
version: v1beta1
|
||||
id: kibana-viewer
|
||||
containers:
|
||||
- name: kibana-logging
|
||||
image: kubernetes/kibana:1.0
|
||||
ports:
|
||||
- name: kibana-port
|
||||
containerPort: 80
|
||||
labels:
|
||||
name: kibana-logging
|
||||
labels:
|
||||
name: kibana-logging
|
9
cluster/addons/fluentd-elasticsearch/kibana-image/Makefile
Executable file
9
cluster/addons/fluentd-elasticsearch/kibana-image/Makefile
Executable file
@ -0,0 +1,9 @@
|
||||
.PHONY: build push
|
||||
|
||||
TAG = 1.0
|
||||
|
||||
build:
|
||||
docker build -t kubernetes/kibana:$(TAG) .
|
||||
|
||||
push:
|
||||
docker push kubernetes/kibana:$(TAG)
|
@ -26,16 +26,16 @@
|
||||
# PROXY_PORT is set to 9200 because Elasticsearch is running on the
|
||||
# same name as Kibana. If KIBANA_IP is the external IP address of
|
||||
# the Kubernetes Kibna service then all requests to:
|
||||
# KIBANA_SERVICE:$ES_PORT/elasticsearch/XXX
|
||||
# KIBANA_LOGGING_SERVICE:$ES_PORT/elasticsearch/XXX
|
||||
# are proxied to:
|
||||
# http://127.0.0.1:9200/XXX
|
||||
# 2. Elasticsearch and Kibana are run in separate pods and Elasticsearch
|
||||
# has an IP and port exposed via a Kubernetes service. In this case
|
||||
# the Elasticsearch service *must* be called 'elasticsearch' and then
|
||||
# all requests sent to:
|
||||
# KIBANA_SERVICE:$ES_PORT/elasticsearch/XXX
|
||||
# KIBANA_LOGGING_SERVICE:$ES_PORT/elasticsearch/XXX
|
||||
# are proxied to:
|
||||
# http://$ELASTICSEARCH_SERVICE_HOST:$ELASTICSEARCH_SERVICE_PORT:9200/XXX
|
||||
# http://$ELASTICSEARCH_LOGGING_SERVICE_HOST:$ELASTICSEARCH_LOGGING_SERVICE_PORT:9200/XXX
|
||||
# The proxy configuration occurs in a location block of the nginx configuration
|
||||
# file /etc/nginx/sites-available/default.
|
||||
|
||||
@ -57,9 +57,9 @@ echo ES_PORT=$ES_PORT
|
||||
# code in the configuration for nginx. If a Kubernetes Elasticsearch
|
||||
# service called 'elasticsearch' is defined, use that. Otherwise, use
|
||||
# a local instance of Elasticsearch on port 9200.
|
||||
PROXY_HOST=${ELASTICSEARCH_SERVICE_HOST:-127.0.0.1}
|
||||
PROXY_HOST=${ELASTICSEARCH_LOGGING_SERVICE_HOST:-127.0.0.1}
|
||||
echo PROXY_HOST=${PROXY_HOST}
|
||||
PROXY_PORT=${ELASTICSEARCH_SERVICE_PORT:-9200}
|
||||
PROXY_PORT=${ELASTICSEARCH_SERVICE_LOGGING_PORT:-9200}
|
||||
echo PROXY_PORT=${PROXY_PORT}
|
||||
# Test the connection to Elasticsearch
|
||||
echo "Running curl http://${PROXY_HOST}:${PROXY_PORT}"
|
@ -1,9 +1,9 @@
|
||||
apiVersion: v1beta1
|
||||
kind: Service
|
||||
id: kibana
|
||||
id: kibana-logging
|
||||
containerPort: kibana-port
|
||||
port: 5601
|
||||
selector:
|
||||
app: kibana-viewer
|
||||
name: kibana-logging
|
||||
createExternalLoadBalancer: true
|
||||
|
34
cluster/addons/fluentd-elasticsearch/logging-demo/Makefile
Normal file
34
cluster/addons/fluentd-elasticsearch/logging-demo/Makefile
Normal file
@ -0,0 +1,34 @@
|
||||
# Makefile for launching syntheitc logging sources (any platform)
|
||||
# and for reporting the forwarding rules for the
|
||||
# Elasticsearch and Kibana pods for the GCE platform.
|
||||
|
||||
|
||||
.PHONY: up down logger-up logger-down logger10-up logger10-downget net
|
||||
|
||||
KUBECTL=../../../kubectl.sh
|
||||
|
||||
up: logger-up logger10-up
|
||||
|
||||
down: logger-down logger10-down
|
||||
|
||||
|
||||
logger-up:
|
||||
-${KUBECTL} create -f synthetic_0_25lps.yaml
|
||||
|
||||
logger-down:
|
||||
-${KUBECTL} delete pods synthetic-logger-0.25lps-pod
|
||||
|
||||
logger10-up:
|
||||
-${KUBECTL} create -f synthetic_10lps.yaml
|
||||
|
||||
logger10-down:
|
||||
-${KUBECTL} delete pods synthetic-logger-10lps-pod
|
||||
|
||||
get:
|
||||
${KUBECTL} get pods
|
||||
${KUBECTL} get replicationControllers
|
||||
${KUBECTL} get services
|
||||
|
||||
net:
|
||||
gcloud compute forwarding-rules describe elasticsearch-logging
|
||||
gcloud compute forwarding-rules describe kibana-logging
|
111
cluster/addons/fluentd-elasticsearch/logging-demo/README.md
Normal file
111
cluster/addons/fluentd-elasticsearch/logging-demo/README.md
Normal file
@ -0,0 +1,111 @@
|
||||
# Elasticsearch/Kibana Logging Demonstration
|
||||
This directory contains two pod specifications which can be used as synthetic
|
||||
loggig sources. The pod specification in [synthetic_0_25lps.yaml](synthetic_0_25lps.yaml)
|
||||
describes a pod that just emits a log message once every 4 seconds:
|
||||
```
|
||||
# This pod specification creates an instance of a synthetic logger. The logger
|
||||
# is simply a program that writes out the hostname of the pod, a count which increments
|
||||
# by one on each iteration (to help notice missing log enteries) and the date using
|
||||
# a long format (RFC-3339) to nano-second precision. This program logs at a frequency
|
||||
# of 0.25 lines per second. The shellscript program is given directly to bash as -c argument
|
||||
# and could have been written out as:
|
||||
# i="0"
|
||||
# while true
|
||||
# do
|
||||
# echo -n "`hostname`: $i: "
|
||||
# date --rfc-3339 ns
|
||||
# sleep 4
|
||||
# i=$[$i+1]
|
||||
# done
|
||||
|
||||
apiVersion: v1beta1
|
||||
kind: Pod
|
||||
id: synthetic-logger-0.25lps-pod
|
||||
desiredState:
|
||||
manifest:
|
||||
version: v1beta1
|
||||
id: synth-logger-0.25lps
|
||||
containers:
|
||||
- name: synth-lgr
|
||||
image: ubuntu:14.04
|
||||
command: ["bash", "-c", "i=\"0\"; while true; do echo -n \"`hostname`: $i: \"; date --rfc-3339 ns; sleep 4; i=$[$i+1]; done"]
|
||||
labels:
|
||||
name: synth-logging-source
|
||||
```
|
||||
|
||||
The other YAML file [synthetic_10lps.yaml](synthetic_10lps.yaml) specifies a similar synthetic logger that emits 10 log messages every second. To run both synthetic loggers:
|
||||
```
|
||||
$ make up
|
||||
../../../kubectl.sh create -f synthetic_0_25lps.yaml
|
||||
Running: ../../../cluster/../cluster/gce/../../_output/dockerized/bin/linux/amd64/kubectl create -f synthetic_0_25lps.yaml
|
||||
synthetic-logger-0.25lps-pod
|
||||
../../../kubectl.sh create -f synthetic_10lps.yaml
|
||||
Running: ../../../cluster/../cluster/gce/../../_output/dockerized/bin/linux/amd64/kubectl create -f synthetic_10lps.yaml
|
||||
synthetic-logger-10lps-pod
|
||||
|
||||
```
|
||||
|
||||
Visiting the Kibana dashboard should make it clear that logs are being collected from the two synthetic loggers:
|
||||

|
||||
|
||||
You can report the running pods, replication controllers and services with another Makefile rule:
|
||||
```
|
||||
$ make get
|
||||
../../../kubectl.sh get pods
|
||||
Running: ../../../../cluster/gce/../../_output/dockerized/bin/linux/amd64/kubectl get pods
|
||||
POD CONTAINER(S) IMAGE(S) HOST LABELS STATUS
|
||||
7e1c7ce6-9764-11e4-898c-42010af03582 kibana-logging kubernetes/kibana kubernetes-minion-3.c.kubernetes-elk.internal/130.211.129.169 name=kibana-logging Running
|
||||
synthetic-logger-0.25lps-pod synth-lgr ubuntu:14.04 kubernetes-minion-2.c.kubernetes-elk.internal/146.148.41.87 name=synth-logging-source Running
|
||||
synthetic-logger-10lps-pod synth-lgr ubuntu:14.04 kubernetes-minion-1.c.kubernetes-elk.internal/146.148.42.44 name=synth-logging-source Running
|
||||
influx-grafana influxdb kubernetes/heapster_influxdb kubernetes-minion-3.c.kubernetes-elk.internal/130.211.129.169 name=influxdb Running
|
||||
grafana kubernetes/heapster_grafana
|
||||
elasticsearch dockerfile/elasticsearch
|
||||
heapster heapster kubernetes/heapster kubernetes-minion-2.c.kubernetes-elk.internal/146.148.41.87 name=heapster Running
|
||||
67cfcb1f-9764-11e4-898c-42010af03582 etcd quay.io/coreos/etcd:latest kubernetes-minion-3.c.kubernetes-elk.internal/130.211.129.169 k8s-app=skydns Running
|
||||
kube2sky kubernetes/kube2sky:1.0
|
||||
skydns kubernetes/skydns:2014-12-23-001
|
||||
6ba20338-9764-11e4-898c-42010af03582 elasticsearch-logging dockerfile/elasticsearch kubernetes-minion-3.c.kubernetes-elk.internal/130.211.129.169 name=elasticsearch-logging Running
|
||||
../../../cluster/kubectl.sh get replicationControllers
|
||||
Running: ../../../cluster/../cluster/gce/../../_output/dockerized/bin/linux/amd64/kubectl get replicationControllers
|
||||
CONTROLLER CONTAINER(S) IMAGE(S) SELECTOR REPLICAS
|
||||
skydns etcd quay.io/coreos/etcd:latest k8s-app=skydns 1
|
||||
kube2sky kubernetes/kube2sky:1.0
|
||||
skydns kubernetes/skydns:2014-12-23-001
|
||||
elasticsearch-logging-controller elasticsearch-logging dockerfile/elasticsearch name=elasticsearch-logging 1
|
||||
kibana-logging-controller kibana-logging kubernetes/kibana name=kibana-logging 1
|
||||
../../.../kubectl.sh get services
|
||||
Running: ../../../cluster/../cluster/gce/../../_output/dockerized/bin/linux/amd64/kubectl get services
|
||||
NAME LABELS SELECTOR IP PORT
|
||||
kubernetes-ro component=apiserver,provider=kubernetes <none> 10.0.83.3 80
|
||||
kubernetes component=apiserver,provider=kubernetes <none> 10.0.79.4 443
|
||||
influx-master <none> name=influxdb 10.0.232.223 8085
|
||||
skydns k8s-app=skydns k8s-app=skydns 10.0.0.10 53
|
||||
elasticsearch-logging <none> name=elasticsearch-logging 10.0.25.103 9200
|
||||
kibana-logging <none> name=kibana-logging 10.0.208.114 5601
|
||||
|
||||
```
|
||||
On the GCE provider you can also obtain the external IP addresses of the Elasticsearch and Kibana services:
|
||||
```
|
||||
$ make net
|
||||
IPAddress: 130.211.120.118
|
||||
IPProtocol: TCP
|
||||
creationTimestamp: '2015-01-08T10:30:34.210-08:00'
|
||||
id: '12815488049392139704'
|
||||
kind: compute#forwardingRule
|
||||
name: elasticsearch-logging
|
||||
portRange: 9200-9200
|
||||
region: https://www.googleapis.com/compute/v1/projects/kubernetes-elk/regions/us-central1
|
||||
selfLink: https://www.googleapis.com/compute/v1/projects/kubernetes-elk/regions/us-central1/forwardingRules/elasticsearch-logging
|
||||
target: https://www.googleapis.com/compute/v1/projects/kubernetes-elk/regions/us-central1/targetPools/elasticsearch-logging
|
||||
gcloud compute forwarding-rules describe kibana-logging
|
||||
IPAddress: 146.148.40.158
|
||||
IPProtocol: TCP
|
||||
creationTimestamp: '2015-01-08T10:31:05.715-08:00'
|
||||
id: '2755171906970792849'
|
||||
kind: compute#forwardingRule
|
||||
name: kibana-logging
|
||||
portRange: 5601-5601
|
||||
region: https://www.googleapis.com/compute/v1/projects/kubernetes-elk/regions/us-central1
|
||||
selfLink: https://www.googleapis.com/compute/v1/projects/kubernetes-elk/regions/us-central1/forwardingRules/kibana-logging
|
||||
target: https://www.googleapis.com/compute/v1/projects/kubernetes-elk/regions/us-central1/targetPools/kibana-logging
|
||||
```
|
Binary file not shown.
After Width: | Height: | Size: 87 KiB |
@ -36,12 +36,14 @@ MINION_SCOPES=""
|
||||
POLL_SLEEP_INTERVAL=3
|
||||
PORTAL_NET="10.0.0.0/16"
|
||||
|
||||
# Optional: Install node monitoring.
|
||||
ENABLE_NODE_MONITORING=true
|
||||
|
||||
# Optional: Install node logging
|
||||
ENABLE_NODE_LOGGING=true
|
||||
ENABLE_NODE_LOGGING=false
|
||||
LOGGING_DESTINATION=elasticsearch # options: elasticsearch, gcp
|
||||
|
||||
# Optional: When set to true, Elasticsearch and Kibana will be setup as part of the cluster bring up.
|
||||
ENABLE_CLUSTER_LOGGING=false
|
||||
ELASTICSEARCH_LOGGING_REPLICAS=1
|
||||
|
||||
IAM_PROFILE="kubernetes"
|
||||
LOG="/dev/null"
|
||||
|
||||
|
@ -497,3 +497,11 @@ function kube-down {
|
||||
$AWS_CMD delete-route --route-table-id $route_table_id --destination-cidr-block 0.0.0.0/0 > $LOG
|
||||
$AWS_CMD delete-vpc --vpc-id $vpc_id > $LOG
|
||||
}
|
||||
|
||||
function setup-logging {
|
||||
echo "TODO: setup logging"
|
||||
}
|
||||
|
||||
function teardown-logging {
|
||||
echo "TODO: teardown logging"
|
||||
}
|
@ -36,3 +36,11 @@ MINION_IP_RANGES=($(eval echo "10.244.{1..${NUM_MINIONS}}.0/24"))
|
||||
MINION_SCOPES=""
|
||||
|
||||
PORTAL_NET="10.250.0.0/16"
|
||||
|
||||
# Optional: Install node logging
|
||||
ENABLE_NODE_LOGGING=false
|
||||
LOGGING_DESTINATION=elasticsearch # options: elasticsearch, gcp
|
||||
|
||||
# Optional: When set to true, Elasticsearch and Kibana will be setup as part of the cluster bring up.
|
||||
ENABLE_CLUSTER_LOGGING=false
|
||||
ELASTICSEARCH_LOGGING_REPLICAS=1
|
||||
|
@ -565,3 +565,11 @@ function setup-monitoring {
|
||||
function teardown-monitoring {
|
||||
echo "not implemented" >/dev/null
|
||||
}
|
||||
|
||||
function setup-logging {
|
||||
echo "TODO: setup logging"
|
||||
}
|
||||
|
||||
function teardown-logging {
|
||||
echo "TODO: teardown logging"
|
||||
}
|
@ -54,6 +54,10 @@ ENABLE_DOCKER_REGISTRY_CACHE=true
|
||||
ENABLE_NODE_LOGGING=true
|
||||
LOGGING_DESTINATION=elasticsearch # options: elasticsearch, gcp
|
||||
|
||||
# Optional: When set to true, Elasticsearch and Kibana will be setup as part of the cluster bring up.
|
||||
ENABLE_CLUSTER_LOGGING=true
|
||||
ELASTICSEARCH_LOGGING_REPLICAS=1
|
||||
|
||||
# Don't require https for registries in our local RFC1918 network
|
||||
EXTRA_DOCKER_OPTS="--insecure-registry 10.0.0.0/8"
|
||||
|
||||
|
@ -348,14 +348,14 @@ function kube-up {
|
||||
local htpasswd
|
||||
htpasswd=$(cat "${KUBE_TEMP}/htpasswd")
|
||||
|
||||
if ! gcloud compute networks describe "${NETWORK}" &>/dev/null; then
|
||||
if ! gcloud compute networks --project "${PROJECT}" describe "${NETWORK}" &>/dev/null; then
|
||||
echo "Creating new network: ${NETWORK}"
|
||||
# The network needs to be created synchronously or we have a race. The
|
||||
# firewalls can be added concurrent with instance creation.
|
||||
gcloud compute networks create "${NETWORK}" --range "10.240.0.0/16"
|
||||
gcloud compute networks create --project "${PROJECT}" "${NETWORK}" --range "10.240.0.0/16"
|
||||
fi
|
||||
|
||||
if ! gcloud compute firewall-rules describe "${NETWORK}-default-internal" &>/dev/null; then
|
||||
if ! gcloud compute firewall-rules --project "${PROJECT}" describe "${NETWORK}-default-internal" &>/dev/null; then
|
||||
gcloud compute firewall-rules create "${NETWORK}-default-internal" \
|
||||
--project "${PROJECT}" \
|
||||
--network "${NETWORK}" \
|
||||
@ -363,7 +363,7 @@ function kube-up {
|
||||
--allow "tcp:1-65535" "udp:1-65535" "icmp" &
|
||||
fi
|
||||
|
||||
if ! gcloud compute firewall-rules describe "${NETWORK}-default-ssh" &>/dev/null; then
|
||||
if ! gcloud compute firewall-rules describe --project "${PROJECT}" "${NETWORK}-default-ssh" &>/dev/null; then
|
||||
gcloud compute firewall-rules create "${NETWORK}-default-ssh" \
|
||||
--project "${PROJECT}" \
|
||||
--network "${NETWORK}" \
|
||||
@ -718,7 +718,7 @@ function test-teardown {
|
||||
function ssh-to-node {
|
||||
local node="$1"
|
||||
local cmd="$2"
|
||||
gcloud compute ssh --ssh-flag="-o LogLevel=quiet" --zone="${ZONE}" "${node}" --command "${cmd}"
|
||||
gcloud compute ssh --ssh-flag="-o LogLevel=quiet" --project "${PROJECT}" --zone="${ZONE}" "${node}" --command "${cmd}"
|
||||
}
|
||||
|
||||
# Restart the kube-proxy on a node ($1)
|
||||
@ -732,7 +732,7 @@ function setup-monitoring {
|
||||
echo "Setting up cluster monitoring using Heapster."
|
||||
|
||||
detect-project
|
||||
if ! gcloud compute firewall-rules describe monitoring-heapster &>/dev/null; then
|
||||
if ! gcloud compute firewall-rules --project "{$PROJECT}" describe monitoring-heapster &>/dev/null; then
|
||||
if ! gcloud compute firewall-rules create monitoring-heapster \
|
||||
--project "${PROJECT}" \
|
||||
--target-tags="${MINION_TAG}" \
|
||||
@ -774,7 +774,7 @@ function teardown-monitoring {
|
||||
"${kubectl}" delete pods heapster &> /dev/null || true
|
||||
"${kubectl}" delete pods influx-grafana &> /dev/null || true
|
||||
"${kubectl}" delete services influx-master &> /dev/null || true
|
||||
if gcloud compute firewall-rules describe monitoring-heapster &> /dev/null; then
|
||||
if gcloud compute firewall-rules describe --project "${PROJECT}" monitoring-heapster &> /dev/null; then
|
||||
gcloud compute firewall-rules delete \
|
||||
--project "${PROJECT}" \
|
||||
--quiet \
|
||||
@ -783,6 +783,48 @@ function teardown-monitoring {
|
||||
fi
|
||||
}
|
||||
|
||||
function setup-logging {
|
||||
# If logging with Fluentd to Elasticsearch is enabled then create pods
|
||||
# and services for Elasticsearch (for ingesting logs) and Kibana (for
|
||||
# viewing logs).
|
||||
if [[ "${ENABLE_NODE_LOGGING-}" == "true" ]] && \
|
||||
[[ "${LOGGING_DESTINATION-}" == "elasticsearch" ]] && \
|
||||
[[ "${ENABLE_CLUSTER_LOGGING-}" == "true" ]]; then
|
||||
local -r kubectl="${KUBE_ROOT}/cluster/kubectl.sh"
|
||||
if sed -e "s/{ELASTICSEARCH_LOGGING_REPLICAS}/${ELASTICSEARCH_LOGGING_REPLICAS}/g" \
|
||||
"${KUBE_ROOT}"/cluster/addons/fluentd-elasticsearch/es-controller.yaml.in | \
|
||||
"${kubectl}" create -f - &> /dev/null && \
|
||||
"${kubectl}" create -f "${KUBE_ROOT}"/cluster/addons/fluentd-elasticsearch/es-service.yaml &> /dev/null && \
|
||||
"${kubectl}" create -f "${KUBE_ROOT}"/cluster/addons/fluentd-elasticsearch/kibana-controller.yaml &> /dev/null && \
|
||||
"${kubectl}" create -f "${KUBE_ROOT}"/cluster/addons/fluentd-elasticsearch/kibana-service.yaml &> /dev/null; then
|
||||
gcloud compute firewall-rules create fluentd-elasticsearch-logging --project "${PROJECT}" \
|
||||
--allow tcp:5601 tcp:9200 tcp:9300 --target-tags "${INSTANCE_PREFIX}"-minion || true
|
||||
local -r region="${ZONE::-2}"
|
||||
local -r es_ip=$(gcloud compute forwarding-rules --project "${PROJECT}" describe --region "${region}" elasticsearch-logging | grep IPAddress | awk '{print $2}')
|
||||
local -r kibana_ip=$(gcloud compute forwarding-rules --project "${PROJECT}" describe --region "${region}" kibana-logging | grep IPAddress | awk '{print $2}')
|
||||
echo
|
||||
echo -e "${color_green}Cluster logs are ingested into Elasticsearch running at ${color_yellow}http://${es_ip}:9200"
|
||||
echo -e "${color_green}Kibana logging dashboard will be available at ${color_yellow}http://${kibana_ip}:5601${color_norm}"
|
||||
echo
|
||||
else
|
||||
echo -e "${color_red}Failed to launch Elasticsearch and Kibana pods and services for logging.${color_norm}"
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
function teardown-logging {
|
||||
if [[ "${ENABLE_NODE_LOGGING-}" == "true" ]] && \
|
||||
[[ "${LOGGING_DESTINATION-}" == "elasticsearch" ]] && \
|
||||
[[ "${ENABLE_CLUSTER_LOGGING-}" == "true" ]]; then
|
||||
local -r kubectl="${KUBE_ROOT}/cluster/kubectl.sh"
|
||||
"${kubectl}" delete replicationController elasticsearch-logging-controller &> /dev/null || true
|
||||
"${kubectl}" delete service elasticsearch-logging &> /dev/null || true
|
||||
"${kubectl}" delete replicationController kibana-logging-controller &> /dev/null || true
|
||||
"${kubectl}" delete service kibana-logging &> /dev/null || true
|
||||
gcloud compute firewall-rules delete -q fluentd-elasticsearch-logging --project "${PROJECT}" || true
|
||||
fi
|
||||
}
|
||||
|
||||
# Perform preparations required to run e2e tests
|
||||
function prepare-e2e() {
|
||||
detect-project
|
||||
|
@ -21,3 +21,11 @@ NETWORK=${KUBE_GKE_NETWORK:-default}
|
||||
# For ease of maintenance, extract any pieces that do not vary between default
|
||||
# and test in a common config.
|
||||
source $(dirname "${BASH_SOURCE}")/config-common.sh
|
||||
|
||||
# Optional: Install node logging
|
||||
ENABLE_NODE_LOGGING=false
|
||||
LOGGING_DESTINATION=gcp # options: elasticsearch, gcp
|
||||
|
||||
# Optional: When set to true, Elasticsearch and Kibana will be setup as part of the cluster bring up.
|
||||
ENABLE_CLUSTER_LOGGING=false
|
||||
ELASTICSEARCH_LOGGING_REPLICAS=1
|
||||
|
@ -261,3 +261,11 @@ function kube-down() {
|
||||
"${GCLOUD}" preview container clusters delete --project="${PROJECT}" \
|
||||
--zone="${ZONE}" "${CLUSTER_NAME}"
|
||||
}
|
||||
|
||||
function setup-logging {
|
||||
echo "TODO: setup logging"
|
||||
}
|
||||
|
||||
function teardown-logging {
|
||||
echo "TODO: teardown logging"
|
||||
}
|
||||
|
@ -28,6 +28,8 @@ echo "Bringing down cluster using provider: $KUBERNETES_PROVIDER"
|
||||
|
||||
verify-prereqs
|
||||
teardown-monitoring
|
||||
teardown-logging
|
||||
|
||||
kube-down
|
||||
|
||||
echo "Done"
|
||||
|
@ -43,7 +43,7 @@ echo "... calling setup-monitoring" >&2
|
||||
setup-monitoring
|
||||
|
||||
if [[ "${ENABLE_CLUSTER_DNS}" == "true" ]]; then
|
||||
echo ".. setting up cluster DNS"
|
||||
echo "... setting up cluster DNS"
|
||||
sed -e "s/{DNS_DOMAIN}/$DNS_DOMAIN/g" \
|
||||
-e "s/{DNS_REPLICAS}/$DNS_REPLICAS/g" \
|
||||
"${KUBE_ROOT}/cluster/addons/dns/skydns-rc.yaml.in" \
|
||||
@ -54,4 +54,7 @@ if [[ "${ENABLE_CLUSTER_DNS}" == "true" ]]; then
|
||||
| "${KUBE_ROOT}/cluster/kubectl.sh" create -f -
|
||||
fi
|
||||
|
||||
echo "... calling setup-logging" >&2
|
||||
setup-logging
|
||||
|
||||
echo "Done" >&2
|
||||
|
@ -42,9 +42,13 @@ PORTAL_NET="10.0.0.0/16"
|
||||
ENABLE_NODE_MONITORING=true
|
||||
|
||||
# Optional: Enable node logging.
|
||||
ENABLE_NODE_LOGGING=true
|
||||
ENABLE_NODE_LOGGING=false
|
||||
LOGGING_DESTINATION=elasticsearch
|
||||
|
||||
# Optional: When set to true, Elasticsearch and Kibana will be setup as part of the cluster bring up.
|
||||
ENABLE_CLUSTER_LOGGING=false
|
||||
ELASTICSEARCH_LOGGING_REPLICAS=1
|
||||
|
||||
# Optional: Install cluster DNS.
|
||||
ENABLE_CLUSTER_DNS=true
|
||||
DNS_SERVER_IP="10.0.0.10"
|
||||
|
@ -350,6 +350,14 @@ function teardown-monitoring {
|
||||
echo "TODO"
|
||||
}
|
||||
|
||||
function setup-logging {
|
||||
echo "TODO: setup logging"
|
||||
}
|
||||
|
||||
function teardown-logging {
|
||||
echo "TODO: teardown logging"
|
||||
}
|
||||
|
||||
# Perform preparations required to run e2e tests
|
||||
function prepare-e2e() {
|
||||
echo "Rackspace doesn't need special preparations for e2e tests"
|
||||
|
@ -2,7 +2,7 @@ version: v1beta2
|
||||
id: fluentd-to-elasticsearch
|
||||
containers:
|
||||
- name: fluentd-es
|
||||
image: kubernetes/fluentd-elasticsearch
|
||||
image: kubernetes/fluentd-elasticsearch:1.0
|
||||
volumeMounts:
|
||||
- name: containers
|
||||
mountPath: /var/lib/docker/containers
|
||||
|
@ -50,9 +50,13 @@ MASTER_PASSWD=vagrant
|
||||
ENABLE_NODE_MONITORING=true
|
||||
|
||||
# Optional: Enable node logging.
|
||||
ENABLE_NODE_LOGGING=true
|
||||
ENABLE_NODE_LOGGING=false
|
||||
LOGGING_DESTINATION=elasticsearch
|
||||
|
||||
# Optional: When set to true, Elasticsearch and Kibana will be setup as part of the cluster bring up.
|
||||
ENABLE_CLUSTER_LOGGING=false
|
||||
ELASTICSEARCH_LOGGING_REPLICAS=1
|
||||
|
||||
# Extra options to set on the Docker command line. This is useful for setting
|
||||
# --insecure-registry for local registries.
|
||||
DOCKER_OPTS=""
|
||||
|
@ -273,3 +273,11 @@ function teardown-monitoring {
|
||||
function prepare-e2e() {
|
||||
echo "Vagrant doesn't need special preparations for e2e tests"
|
||||
}
|
||||
|
||||
function setup-logging {
|
||||
echo "TODO: setup logging"
|
||||
}
|
||||
|
||||
function teardown-logging {
|
||||
echo "TODO: teardown logging"
|
||||
}
|
@ -37,9 +37,13 @@ PORTAL_NET="10.244.240.0/20"
|
||||
ENABLE_NODE_MONITORING=true
|
||||
|
||||
# Optional: Enable node logging.
|
||||
ENABLE_NODE_LOGGING=true
|
||||
ENABLE_NODE_LOGGING=false
|
||||
LOGGING_DESTINATION=elasticsearch
|
||||
|
||||
# Optional: When set to true, Elasticsearch and Kibana will be setup as part of the cluster bring up.
|
||||
ENABLE_CLUSTER_LOGGING=false
|
||||
ELASTICSEARCH_LOGGING_REPLICAS=1
|
||||
|
||||
# Optional: Install cluster DNS.
|
||||
ENABLE_CLUSTER_DNS=true
|
||||
DNS_SERVER_IP="10.244.240.240"
|
||||
|
@ -485,3 +485,11 @@ function setup-monitoring {
|
||||
function teardown-monitoring {
|
||||
echo "TODO"
|
||||
}
|
||||
|
||||
function setup-logging {
|
||||
echo "TODO: setup logging"
|
||||
}
|
||||
|
||||
function teardown-logging {
|
||||
echo "TODO: teardown logging"
|
||||
}
|
@ -1,62 +0,0 @@
|
||||
# Makefile for Fluentd to Elastiscsearch and Kibana configured
|
||||
# in separate pods.
|
||||
|
||||
|
||||
.PHONY: up dow es-up kibana-up es-down kibana-down update \
|
||||
logger-up logger-down get net firewall rmfirewall
|
||||
|
||||
KUBECTL=kubectl.sh
|
||||
|
||||
up: logger-up es-up kibana-up
|
||||
|
||||
down: logger-down es-down kibana-down
|
||||
|
||||
|
||||
es-up:
|
||||
-${KUBECTL} create -f es-pod.yml
|
||||
-${KUBECTL} create -f es-service.yml
|
||||
|
||||
kibana-up:
|
||||
-${KUBECTL} create -f kibana-pod.yml
|
||||
-${KUBECTL} create -f kibana-service.yml
|
||||
|
||||
es-down:
|
||||
-${KUBECTL} delete pods elasticsearch-pod
|
||||
-${KUBECTL} delete service elasticsearch
|
||||
|
||||
kibana-down:
|
||||
-${KUBECTL} delete pods kibana-pod
|
||||
-${KUBECTL} delete service kibana
|
||||
|
||||
|
||||
update:
|
||||
-${KUBECTL} delete pods kibana-pod
|
||||
-${KUBECTL} create -f kibana-pod.yml
|
||||
|
||||
logger-up:
|
||||
-${KUBECTL} create -f synthetic_0_25lps.yml
|
||||
|
||||
logger-down:
|
||||
-${KUBECTL} delete pods synthetic-logger-0.25lps-pod
|
||||
|
||||
logger10-up:
|
||||
-${KUBECTL} create -f synthetic_10lps.yml
|
||||
|
||||
logger10-down:
|
||||
-${KUBECTL} delete pods synthetic-logger-10lps-pod
|
||||
|
||||
get:
|
||||
${KUBECTL} get pods
|
||||
${KUBECTL} get services
|
||||
|
||||
net:
|
||||
gcutil getforwardingrule elasticsearch
|
||||
gcutil getforwardingrule kibana
|
||||
|
||||
firewall:
|
||||
gcutil addfirewall --allowed=tcp:5601,tcp:9200,tcp:9300 --target_tags=kubernetes-minion kubernetes-elk-example
|
||||
|
||||
|
||||
rmfirewall:
|
||||
gcutil deletefirewall -f kubernetes-elk-example
|
||||
|
@ -1,333 +0,0 @@
|
||||
# Logging Pods in a Kubernetes Cluster using Fluentd, Elasticsearch and Kibana
|
||||
|
||||
When a GCE Kubernetes cluster is created a [pod](../../../docs/pods.md) will be placed on each node which uses the [Fluentd](http://www.fluentd.org/) log collector to collect all the Docker container logs and send them to an instance of [Elasticsearch](http://www.elasticsearch.org/) (or anything else that will listen to [Logstash](http://logstash.net/docs/1.4.2/tutorials/getting-started-with-logstash) format JSON on port 9200).
|
||||
|
||||
We can verify that a Fluentd collector is running by ssh-ing into one of the nodes and looking at the running containers.
|
||||
|
||||

|
||||
|
||||
Let's take a look in node 1.
|
||||
|
||||
```console
|
||||
$ gcloud compute --project "kubernetes-elk" ssh --zone "us-central1-b" "kubernetes-minion-1"
|
||||
...
|
||||
$ sudo -s
|
||||
$ docker ps
|
||||
satnam@kubernetes-minion-1:~$ sudo -s
|
||||
root@kubernetes-minion-1:/home/satnam# docker ps
|
||||
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
|
||||
a309846b005c ubuntu:14.04 "\"bash -c 'i=\"0\"; 3 days ago Up 3 days k8s_synth-lgr.ede24f12_synthetic-logger-10lps-pod.default.etcd_35c7b808-6c45-11e4-a194-42010af05d02_2abc4cd9
|
||||
d8d60784806b kubernetes/pause:latest "/pause" 3 days ago Up 3 days k8s_net.dbcb7509_synthetic-logger-10lps-pod.default.etcd_35c7b808-6c45-11e4-a194-42010af05d02_be1026dd
|
||||
2f47a6219e82 kubernetes/heapster:0.2 "/run.sh /bin/bash" 3 days ago Up 3 days k8s_heapster.24e32151_heapster.default.etcd_511e5a9d-6c39-11e4-a194-42010af05d02_b5ed97c1
|
||||
7dfd030bab93 kubernetes/fluentd-elasticsearch:latest "/run.sh" 3 days ago Up 3 days k8s_fluentd-es.f0eebcdc_fluentdesmanife2u464h05heqcpotoddodpnehjaqsde.default.file_fluentdesmanife2u464h05heqcpotoddodpnehjaqsde_90bbba27
|
||||
9a869d00c17b ubuntu:14.04 "\"bash -c 'i=\"0\"; 3 days ago Up 3 days k8s_synth-lgr.f0d3e2b_synthetic-logger-0.25lps-pod.default.etcd_7c7d3b8d-6c39-11e4-a194-42010af05d02_d3c519d5
|
||||
6abc80cadf3f kubernetes/pause:latest "/pause" 3 days ago Up 3 days k8s_net.dbcb7509_synthetic-logger-0.25lps-pod.default.etcd_7c7d3b8d-6c39-11e4-a194-42010af05d02_a8e3b763
|
||||
9b2787803043 kubernetes/pause:latest "/pause" 3 days ago Up 3 days k8s_net.dbcb7509_heapster.default.etcd_511e5a9d-6c39-11e4-a194-42010af05d02_f3fac3cc
|
||||
fda05d821371 kubernetes/pause:latest "/pause" 3 days ago Up 3 days k8s_net.dbcb7509_fluentdesmanife2u464h05heqcpotoddodpnehjaqsde.default.file_fluentdesmanife2u464h05heqcpotoddodpnehjaqsde_936da1a7
|
||||
04b1225d0ed3 google/cadvisor:0.5.0 "/usr/bin/cadvisor" 3 days ago Up 3 days k8s_cadvisor.b0dae998_cadvisormanifes12uqn2ohido76855gdecd9roadm7l0.default.file_cadvisormanifes12uqn2ohido76855gdecd9roadm7l0_70af8640
|
||||
ecf63dd4aa43 kubernetes/pause:latest "/pause" 3 days ago Up 3 days 0.0.0.0:4194->8080/tcp k8s_net.a0f18f6e_cadvisormanifes12uqn2ohido76855gdecd9roadm7l0.default.file_cadvisormanifes12uqn2ohido76855gdecd9roadm7l0_9e43beba
|
||||
```
|
||||
|
||||
There are several containers running running on this node. The containers `google/cadvisor` and `kubernetes/heapster` provide monitoring and profiling support. The container `kubernetes/fluentd-elasticsearch` is constantly looking at the logs files of Docker containers in the directories `/var/lib/docker/containers/*` and sending (tailing)
|
||||
this information in Logstash format to port 9200 on the local node.
|
||||
|
||||
We can look at the pod specification used to launch the Fluentd Elasticsearch container which is stored as a manifest file on the node.
|
||||
|
||||
```console
|
||||
$ cd /etc/kubernetes/manifests/
|
||||
$ ls
|
||||
cadvisor.manifest fluentd-es.manifest
|
||||
$ cat fluentd-es.manifest
|
||||
version: v1beta2
|
||||
id: fluentd-to-elasticsearch
|
||||
containers:
|
||||
- name: fluentd-es
|
||||
image: kubernetes/fluentd-elasticsearch
|
||||
volumeMounts:
|
||||
- name: containers
|
||||
mountPath: /var/lib/docker/containers
|
||||
readOnly: true
|
||||
volumes:
|
||||
- name: containers
|
||||
source:
|
||||
hostDir:
|
||||
path: /var/lib/docker/containers
|
||||
```
|
||||
|
||||
This is just a regular pod specification which you could have run using `kubectl.sh`. However, what you could not have done yourself is run an instance of this pod specification on each node which is what is accomplished with the manifest file at cluster creation time. Notice that CAdvisor also has a manifest pod specification.
|
||||
|
||||
We can connect to a running Fluentd Elasticsearch container to inspect the Fluentd configuration.
|
||||
|
||||
```console
|
||||
$ docker exec -ti 3c3816c0cfc6 bash
|
||||
$ cat /etc/td-agent/td-agent.conf
|
||||
...
|
||||
<source>
|
||||
type tail
|
||||
format json
|
||||
time_key time
|
||||
path /var/lib/docker/containers/*/*-json.log
|
||||
time_format %Y-%m-%dT%H:%M:%S
|
||||
tag docker.container.*
|
||||
</source>
|
||||
|
||||
<match docker.container.**>
|
||||
type elasticsearch
|
||||
log_level info
|
||||
include_tag_key true
|
||||
host kubernetes-minion-2
|
||||
port 9200
|
||||
logstash_format true
|
||||
flush_interval 5s
|
||||
</match>
|
||||
```
|
||||
|
||||
This configures Fluentd to gather all the Docker container log files and send them in Logstash format to port 9200.
|
||||
|
||||
Once you have turned up a Kubernetes cluster you can use the `Makefile` in this GitHub directory to try out some logging experiments.
|
||||
|
||||
We need to create an instance of Elasticsearch which will run on the cluster (this is not done automatically as part of the manifest pod creation step). We only want to run one instance of Elasticsearch on the cluster but we want it to appear as if it is running on every node. We can accomplish this by writing a suitable pod specification and service specification since this "appear to run everywhere on the cluster" abstraction is one of the things that Kubernetes provides.
|
||||
|
||||
First, here is the pod specification for Elasticsearch [es-pod.yml](es.pod.yml):
|
||||
|
||||
```
|
||||
apiVersion: v1beta1
|
||||
kind: Pod
|
||||
id: elasticsearch-pod
|
||||
desiredState:
|
||||
manifest:
|
||||
version: v1beta1
|
||||
id: es
|
||||
containers:
|
||||
- name: elasticsearch
|
||||
image: dockerfile/elasticsearch
|
||||
ports:
|
||||
- name: es-port
|
||||
containerPort: 9200
|
||||
- name: es-transport-port
|
||||
containerPort: 9300
|
||||
volumeMounts:
|
||||
- name: es-persistent-storage
|
||||
mountPath: /data
|
||||
volumes:
|
||||
- name: es-persistent-storage
|
||||
source:
|
||||
emptyDir: {}
|
||||
labels:
|
||||
app: elasticsearch
|
||||
```
|
||||
|
||||
This runs the official Docker image for Elasticsearch and wires up ports 9200 (for submitting and querying information) and 9300 (a port used to compose multiple instances of Elasticsearch -- more about this elsewhere). Kubernetes may have to restart an Elasticsearch container if something goes wrong and it would be shame to loose all the information (logs) that has been gathered when the original container dies and takes down all its information with it. To avoid this problem we wire up some persistent storage for Elasticsearch so the gathered data persists between one invocation of the Elasticsearch container and another.
|
||||
|
||||
To allow us to query Elasticsearch from outside the cluster (e.g. from our laptop) and to allow other Kubernetes pods access to the Elasticsearch web interface we define a Kubernetes Elasticsearch service [es-service.yml](es-service.yml):
|
||||
|
||||
```
|
||||
apiVersion: v1beta1
|
||||
kind: Service
|
||||
id: elasticsearch
|
||||
containerPort: es-port
|
||||
port: 9200
|
||||
selector:
|
||||
app: elasticsearch
|
||||
createExternalLoadBalancer: true
|
||||
```
|
||||
|
||||
The service specification will group together all containers that have the label `app=elasticsearch` (we will only use one) and for these containers it will map their internal port (9200) to port 9200 for a service which will act as a proxy for all the identified containers. Furthermore, an external load balancer is created to allow external access to the pods that are encapsulated by this service. The container ports identified by the service description are proxied by a single IP address scoped within the cluster.
|
||||
|
||||
```console
|
||||
$ kubectl.sh get services
|
||||
NAME LABELS SELECTOR IP PORT
|
||||
elasticsearch app=elasticsearch 10.0.0.1 9200
|
||||
```
|
||||
|
||||
Inside the cluster, the Elasticsearch service is reached at http://10.0.0.1:9200 which is its service address.
|
||||
|
||||
We can see which node the Elasticsearch instance is actually running one e.g. in the example below it is running on node 3.
|
||||
|
||||
```console
|
||||
$ kubectl.sh get pods
|
||||
NAME IMAGE(S) HOST LABELS STATUS
|
||||
elasticsearch-pod dockerfile/elasticsearch kubernetes-minion-3.c.kubernetes-elk.internal/146.148.59.62 app=elasticsearch Running
|
||||
```
|
||||
|
||||
You can see that Elasticsearch can be reached on port 9200 on node 1:
|
||||
|
||||
```console
|
||||
$ curl localhost:9200
|
||||
{
|
||||
"status" : 200,
|
||||
"name" : "Elsie-Dee",
|
||||
"version" : {
|
||||
"number" : "1.3.2",
|
||||
"build_hash" : "dee175dbe2f254f3f26992f5d7591939aaefd12f",
|
||||
"build_timestamp" : "2014-08-13T14:29:30Z",
|
||||
"build_snapshot" : false,
|
||||
"lucene_version" : "4.9"
|
||||
},
|
||||
"tagline" : "You Know, for Search"
|
||||
}
|
||||
```
|
||||
|
||||
If we ran the same curl command on node 2, node 3, or node 4 we would have got a response from the same instance of Elasticsearch. The actual instance is running on node 3, but it appears to run on every node.
|
||||
|
||||
We can also contact the Elasticsearch instance from outside the cluster by finding its external IP address and port number.
|
||||
|
||||
```console
|
||||
$ gcutil getforwardingrule elasticsearch
|
||||
+---------------+---------------------------------------+
|
||||
| name | elasticsearch |
|
||||
| description | |
|
||||
| creation-time | 2014-10-27T22:07:39.585-07:00 |
|
||||
| region | us-central1 |
|
||||
| ip | 130.211.122.249 |
|
||||
| protocol | TCP |
|
||||
| port-range | 9200-9200 |
|
||||
| target | us-central1/targetPools/elasticsearch |
|
||||
+---------------+---------------------------------------+
|
||||
$ curl http://130.211.122.249:9200
|
||||
{
|
||||
"status" : 200,
|
||||
"name" : "Elsie-Dee",
|
||||
"version" : {
|
||||
"number" : "1.3.2",
|
||||
"build_hash" : "dee175dbe2f254f3f26992f5d7591939aaefd12f",
|
||||
"build_timestamp" : "2014-08-13T14:29:30Z",
|
||||
"build_snapshot" : false,
|
||||
"lucene_version" : "4.9"
|
||||
},
|
||||
"tagline" : "You Know, for Search"
|
||||
}
|
||||
```
|
||||
|
||||
A nice aspect of this architecture is that all the Docker container log files from all the nodes get automatically interleaved into the same Elasticsearch datastore. Each node thinks it is talking directly to Elasticsearch but in reality only one node has the instance and requests to Elasticsearch on other nodes are proxies to the actual instance. All of this is transparent to the Fluentd configuration.
|
||||
|
||||
To view the log information gathered inside Elasticsearch we can use the [Kibana](http://www.elasticsearch.org/overview/kibana/) viewer. Again, we will create one instance of this and run it on the cluster somewhere (Kubernetes will decide where) and this will be done with a Docker container. Here is the pod specification [kibana-pod.yml](kibana-pod.yml):
|
||||
|
||||
```
|
||||
apiVersion: v1beta1
|
||||
kind: Pod
|
||||
id: kibana-pod
|
||||
desiredState:
|
||||
manifest:
|
||||
version: v1beta1
|
||||
id: kibana-server
|
||||
containers:
|
||||
- name: kibana-image
|
||||
image: kubernetes/kibana:latest
|
||||
ports:
|
||||
- name: kibana-port
|
||||
containerPort: 80
|
||||
labels:
|
||||
app: kibana-viewer
|
||||
```
|
||||
|
||||
This runs a specially made Kibana Docker image which is tailored for use with Kubernetes. One reason for this is that this image needs to know how to contact the Elasticsearch server which it should do by contacting the internal cluster IP and port number for the service. This information is made available with environment variable. For a service called `elasticsearch` the environment variables `ELASTICSEARCH_SERVICE_HOST` and `ELASTICSEARCH_SERVICE_PORT` define the internal cluster IP address and port of the Elasticsearch service. This capability allows us to compose Kubernetes services. This pod wires up port 80 of the container which serves the Kibana dashboard web page.
|
||||
|
||||
The Kibana service is defined as follows [kibana-service.yml](kibana-service.yml):
|
||||
|
||||
```
|
||||
apiVersion: v1beta1
|
||||
kind: Service
|
||||
id: kibana
|
||||
containerPort: kibana-port
|
||||
port: 5601
|
||||
selector:
|
||||
app: kibana-viewer
|
||||
createExternalLoadBalancer: true
|
||||
```
|
||||
|
||||
This maps the internal container port 80 to an external port 5601 for the Kibana viewer service.
|
||||
|
||||
Finally, we need some pod that will produce some output which can be logged. We use a synthetic logger which periodically writes out the name of the pod that is is running in, a count and the date at a rate of 0.25 lines per second [synthetic_0_25lps.yml](synthetic_0_25lps.yml):
|
||||
|
||||
```
|
||||
apiVersion: v1beta1
|
||||
kind: Pod
|
||||
id: synthetic-logger-0.25lps-pod
|
||||
desiredState:
|
||||
manifest:
|
||||
version: v1beta1
|
||||
id: synth-logger-0.25lps
|
||||
containers:
|
||||
- name: synth-lgr
|
||||
image: ubuntu:14.04
|
||||
command: ["bash", "-c", "i=\"0\"; while true; do echo -n \"`hostname`: $i: \"; date --rfc-3339 ns; sleep 4; i=$[$i+1]; done"]
|
||||
labels:
|
||||
name: synth-logging-source
|
||||
```
|
||||
|
||||
Once Elasticsearch, Kibana and the synthetic logger are running we should see something like:
|
||||
|
||||
```console
|
||||
$ kubectl.sh get pods
|
||||
NAME IMAGE(S) HOST LABELS STATUS
|
||||
synthetic-logger-0.25lps-pod ubuntu:14.04 kubernetes-minion-2.c.kubernetes-elk.internal/146.148.37.102 name=synth-logging-source Running
|
||||
elasticsearch-pod dockerfile/elasticsearch kubernetes-minion-3.c.kubernetes-elk.internal/146.148.59.62 app=elasticsearch Running
|
||||
kibana-pod kubernetes/kibana:latest kubernetes-minion-2.c.kubernetes-elk.internal/146.148.37.102 app=kibana-viewer Running
|
||||
kubectl.sh get services
|
||||
NAME LABELS SELECTOR IP PORT
|
||||
elasticsearch app=elasticsearch 10.0.0.1 9200
|
||||
kibana app=kibana-viewer 10.0.0.2 5601
|
||||
$ gcutil getforwardingrule elasticsearch
|
||||
+---------------+---------------------------------------+
|
||||
| name | elasticsearch |
|
||||
| description | |
|
||||
| creation-time | 2014-10-27T22:07:39.585-07:00 |
|
||||
| region | us-central1 |
|
||||
| ip | 130.211.122.249 |
|
||||
| protocol | TCP |
|
||||
| port-range | 9200-9200 |
|
||||
| target | us-central1/targetPools/elasticsearch |
|
||||
+---------------+---------------------------------------+
|
||||
$ gcutil getforwardingrule kibana
|
||||
+---------------+--------------------------------+
|
||||
| name | kibana |
|
||||
| description | |
|
||||
| creation-time | 2014-10-27T16:26:57.432-07:00 |
|
||||
| region | us-central1 |
|
||||
| ip | 23.236.59.213 |
|
||||
| protocol | TCP |
|
||||
| port-range | 5601-5601 |
|
||||
| target | us-central1/targetPools/kibana |
|
||||
+---------------+--------------------------------+
|
||||
```
|
||||
|
||||
This tells us that inside the cluster the Elasticsearch service is known as 10.0.0.1:9200 and outside the cluster it is known as 130.211.122.249:9200. Inside the cluster the Kibana service is known as 10.0.0.1:5601 and outside the cluster it is known as 23.236.59.213:5601. Let's visit this web site and check that we can see some logs.
|
||||
|
||||

|
||||
|
||||
Note that in this example Kibana is running on node 2. We can ssh into this machine and look at its log files to make sure it got the correct information about where to find Elasticsearch.
|
||||
|
||||
```console
|
||||
$ gcloud compute --project "kubernetes-elk" ssh --zone "us-central1-a"
|
||||
$ sudo -s
|
||||
$ docker ps
|
||||
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
|
||||
bae7cf093eba kubernetes/kibana:latest "/usr/local/bin/run. 4 hours ago Up 4 hours k8s_kibana-image.7ece93f5_kibana-pod.default.etcd_1414472864_0d7d25bd
|
||||
47cb11bf0f8f kubernetes/pause:go "/pause" 4 hours ago Up 4 hours k8s_net.d5468756_kibana-pod.default.etcd_1414472864_8f3effbe
|
||||
e98d629ca5f0 google/cadvisor:0.4.1 "/usr/bin/cadvisor / 8 hours ago Up 8 hours k8s_cadvisor.417cd83c_cadvisormanifes12uqn2ohido76855gdecd9roadm7l0.default.file_cadvisormanifes12uqn2ohido76855gdecd9roadm7l0_daa00a70
|
||||
3c3816c0cfc6 kubernetes/fluentd-elasticsearch:latest "/run.sh" 8 hours ago Up 8 hours k8s_fluentd-es.1b9eab35_fluentdesmanife2u464h05heqcpotoddodpnehjaqsde.default.file_fluentdesmanife2u464h05heqcpotoddodpnehjaqsde_5a344730
|
||||
bad9cbdb464c ubuntu:14.04 "\"bash -c 'i=\"0\"; 8 hours ago Up 8 hours k8s_synth-lgr.c1f588c9_synthetic-logger-0.25lps-pod.default.etcd_1414458076_08a6b51a
|
||||
4eff7c5e2c15 kubernetes/pause:go "/pause" 8 hours ago Up 8 hours k8s_net.fadb6b63_synthetic-logger-0.25lps-pod.default.etcd_1414458076_92f74236
|
||||
25e6677155b0 kubernetes/pause:go "/pause" 8 hours ago Up 8 hours k8s_net.fadb6b63_fluentdesmanife2u464h05heqcpotoddodpnehjaqsde.default.file_fluentdesmanife2u464h05heqcpotoddodpnehjaqsde_bf6ed0e9
|
||||
44a7db3c8e82 kubernetes/pause:go "/pause" 8 hours ago Up 8 hours 0.0.0.0:4194->8080/tcp k8s_net.f72d85c8_cadvisormanifes12uqn2ohido76855gdecd9roadm7l0.default.file_cadvisormanifes12uqn2ohido76855gdecd9roadm7l0_fc3e7475
|
||||
$ docker logs bae7cf093eba
|
||||
ELASTICSEARCH_PORT=tcp://10.0.0.1:9200
|
||||
ELASTICSEARCH_PORT_9200_TCP=tcp://10.0.0.1:9200
|
||||
ELASTICSEARCH_PORT_9200_TCP_ADDR=10.0.0.1
|
||||
ELASTICSEARCH_PORT_9200_TCP_PORT=9200
|
||||
ELASTICSEARCH_PORT_9200_TCP_PROTO=tcp
|
||||
ELASTICSEARCH_SERVICE_HOST=10.0.0.1
|
||||
ELASTICSEARCH_SERVICE_PORT=9200
|
||||
```
|
||||
|
||||
As expected we see that `ELASTICSEARCH_SERVICE_HOST` has the value 10.0.0.1 and that `ELASTICSEARCH_SERVICE_PORT` has the value 9200.
|
||||
|
||||
## Summary and Other Things
|
||||
* Kubernetes provides intrinsic support for various logging options including the collection of Docker log files using Fluentd.
|
||||
* The storage of log files (using Elasticsearch) and the viewing of log files (using Kibana) can be performed by writing regular pod and service specifications.
|
||||
* This example could be adapted to use multiple Elasticsearch instances. In this case the service address 10.0.0.1:9200 within the cluster (or the corresponding external address) will load balance requests automatically amongst several instances.
|
||||
* Likewise, the number of Kibana instances may also be scaled up if required.
|
||||
* The number of Elasticsearch instances and Kibana instances can be scaled up independently.
|
||||
|
||||
|
Binary file not shown.
Before Width: | Height: | Size: 102 KiB |
@ -1,25 +0,0 @@
|
||||
apiVersion: v1beta1
|
||||
kind: Pod
|
||||
id: elasticsearch-pod
|
||||
desiredState:
|
||||
manifest:
|
||||
version: v1beta1
|
||||
id: es
|
||||
containers:
|
||||
- name: elasticsearch
|
||||
image: dockerfile/elasticsearch
|
||||
ports:
|
||||
- name: es-port
|
||||
containerPort: 9200
|
||||
- name: es-transport-port
|
||||
containerPort: 9300
|
||||
volumeMounts:
|
||||
- name: es-persistent-storage
|
||||
mountPath: /data
|
||||
volumes:
|
||||
- name: es-persistent-storage
|
||||
source:
|
||||
emptyDir: {}
|
||||
labels:
|
||||
app: elasticsearch
|
||||
|
@ -1,15 +0,0 @@
|
||||
apiVersion: v1beta1
|
||||
kind: Pod
|
||||
id: kibana-pod
|
||||
desiredState:
|
||||
manifest:
|
||||
version: v1beta1
|
||||
id: kibana-server
|
||||
containers:
|
||||
- name: kibana-image
|
||||
image: kubernetes/kibana:latest
|
||||
ports:
|
||||
- name: kibana-port
|
||||
containerPort: 80
|
||||
labels:
|
||||
app: kibana-viewer
|
Binary file not shown.
Before Width: | Height: | Size: 461 KiB |
File diff suppressed because one or more lines are too long
Binary file not shown.
Before Width: | Height: | Size: 209 KiB |
File diff suppressed because one or more lines are too long
Binary file not shown.
Before Width: | Height: | Size: 43 KiB |
@ -1,5 +0,0 @@
|
||||
build:
|
||||
docker build -t kubernetes/kibana .
|
||||
|
||||
push:
|
||||
docker push kubernetes/kibana
|
BIN
docs/kibana.png
Normal file
BIN
docs/kibana.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 81 KiB |
@ -4,9 +4,45 @@
|
||||
Kubernetes components, such as kubelet and apiserver, use the [glog](https://godoc.org/github.com/golang/glog) logging library. Developer conventions for logging severity are described in [devel/logging.md](devel/logging.md).
|
||||
|
||||
## Logging in Containers
|
||||
There are no Kubernetes-specific requirements for logging from within containers. A
|
||||
[search](https://www.google.com/?q=docker+container+logging) will turn up any number of articles about logging and
|
||||
There are no Kubernetes-specific requirements for logging from within containers. [search](https://www.google.com/?q=docker+container+logging) will turn up any number of articles about logging and
|
||||
Docker containers. However, we do provide an example of how to collect, index, and view pod logs [using Fluentd, Elasticsearch, and Kibana](./getting-started-guides/logging.md)
|
||||
|
||||
|
||||
## Logging to Elasticsearch on the GCE platform
|
||||
Currently the collection of container logs using the [Fluentd](http://www.fluentd.org/) log collector is
|
||||
enabled by default for clusters created for the GCE platform. Each node uses Fluentd to collect
|
||||
the container logs which are submitted in [Logstash](http://logstash.net/docs/1.4.2/tutorials/getting-started-with-logstash)
|
||||
format (in JSON) to an [Elasticsearch](http://www.elasticsearch.org/) cluster which runs as a Kubernetes service.
|
||||
When you create a cluster the console output reports the URL of both the Elasticsearch cluster as well as
|
||||
a URL for a [Kibana](http://www.elasticsearch.org/overview/kibana/) dashboard viewer for the logs that have been ingested
|
||||
into Elasticsearch.
|
||||
```
|
||||
Cluster logs are ingested into Elasticsearch running at http://130.211.121.21:9200
|
||||
Kibana logging dashboard will be available at http://130.211.137.206:5601
|
||||
```
|
||||
Visiting the Kibana dashboard URL in a browser should give a display like this:
|
||||

|
||||
|
||||
To learn how to query, fitler etc. using Kibana you might like to look at this [tutorial](http://www.elasticsearch.org/guide/en/kibana/current/working-with-queries-and-filters.html).
|
||||
|
||||
You can check to see if any logs are being ingested into Elasticsearch by curling against its URL:
|
||||
```
|
||||
$ curl http://130.211.120.118:9200/_search?size=10
|
||||
{"took":7,"timed_out":false,"_shards":{"total":5,"successful":5,"failed":0},"hits":{"total":3705,"max_score":1.0,"hits":[{"_index":"logstash-2015.01.08","_type":"fluentd","_id":"AUrK0hRRanI4L8Durpdh","_score":1.0,"_source":{"message":"I0108 18:30:47.694725 4927 server.go:313] GET /healthz: (9.249us) 200","tag":"kubelet","@timestamp":"2015-01-08T18:30:47+00:00"}},{"_index":"logstash-2015.01.08","_type":"fluentd","_id":"AUrK0hRRanI4L8Durpdm","_score":1.0,"_source":{"message":"E0108 18:30:52.299372 4927 metadata.go:109] while reading 'google-dockercfg' metadata: http status code: 404 while fetching url http://metadata.google.internal./computeMetadata/v1/instance/attributes/google-dockercfg","tag":"kubelet","@timestamp":"2015-01-08T18:30:52+00:00"}},{"_index":"logstash-2015.01.08","_type":"fluentd","_id":"AUrK0hRRanI4L8Durpdr","_score":1.0,"_source":{"message":"I0108 18:30:52.317636 4927 docker.go:214] Pulling image kubernetes/kube2sky without credentials","tag":"kubelet","@timestamp":"2015-01-08T18:30:52+00:00"}},{"_index":"logstash-2015.01.08","_type":"fluentd","_id":"AUrK0hRRanI4L8Durpdw","_score":1.0,"_source":{"message":"I0108 18:30:54.500174 4927 event.go:92] Event(api.ObjectReference{Kind:\"BoundPod\", Namespace:\"default\", Name:\"67cfcb1f-9764-11e4-898c-42010af03582\", UID:\"67cfcb1f-9764-11e4-898c-42010af03582\", APIVersion:\"v1beta1\", ResourceVersion:\"\", FieldPath:\"spec.containers{kube2sky}\"}): status: 'waiting', reason: 'created' Created with docker id ff24ec6eb3b10d1163a2bcb7c63ccef78e6e3e7a1185eba3fe430f6b3d871eb5","tag":"kubelet","@timestamp":"2015-01-08T18:30:54+00:00"}},{"_index":"logstash-2015.01.08","_type":"fluentd","_id":"AUrK0hRRanI4L8Durpd1","_score":1.0,"_source":{"message":"goroutine 114 [running]:","tag":"kubelet","@timestamp":"2015-01-08T18:30:56+00:00"}},{"_index":"logstash-2015.01.08","_type":"fluentd","_id":"AUrK0hRRanI4L8Durpd6","_score":1.0,"_source":{"message":"github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet.(*Server).error(0xc2080e0060, 0x7fe0ba496840, 0xc208278840, 0x7fe0ba4881b0, 0xc20822daa0)","tag":"kubelet","@timestamp":"2015-01-08T18:30:56+00:00"}},{"_index":"logstash-2015.01.08","_type":"fluentd","_id":"AUrK0hRRanI4L8DurpeB","_score":1.0,"_source":{"message":"\t/go/src/github.com/GoogleCloudPlatform/kubernetes/_output/dockerized/go/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/server.go:94 +0x44","tag":"kubelet","@timestamp":"2015-01-08T18:30:56+00:00"}},{"_index":"logstash-2015.01.08","_type":"fluentd","_id":"AUrK0hRSanI4L8DurpeJ","_score":1.0,"_source":{"message":"goroutine 114 [running]:","tag":"kubelet","@timestamp":"2015-01-08T18:30:56+00:00"}},{"_index":"logstash-2015.01.08","_type":"fluentd","_id":"AUrK0hRSanI4L8DurpeO","_score":1.0,"_source":{"message":"github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet.(*Server).error(0xc2080e0060, 0x7fe0ba496840, 0xc208278a80, 0x7fe0ba4881b0, 0xc20822df00)","tag":"kubelet","@timestamp":"2015-01-08T18:30:56+00:00"}},{"_index":"logstash-2015.01.08","_type":"fluentd","_id":"AUrK0hRSanI4L8DurpeT","_score":1.0,"_source":{"message":"\t/go/src/github.com/GoogleCloudPlatform/kubernetes/_output/dockerized/go/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/server.go:240 +0x45","tag":"kubelet","@timestamp":"2015-01-08T18:30:56+00:00"}}]}}
|
||||
```
|
||||
|
||||
A [demonstration](../cluster/addons/fluentd-elasticsearch/logging-demo/README.md) of two synthetic logging sources can be used
|
||||
to check that logging is working correctly.
|
||||
|
||||
Cluster logging can be turned on or off using the environment variable `ENABLE_NODE_LOGGING` which is defined in the
|
||||
`config-default.sh` file for each provider. For the GCE provider this is set by default to `true`. Set this
|
||||
to `false` to disable cluster logging.
|
||||
|
||||
The type of logging is used is specified by the environment variable `LOGGING_DESTINATION` which for the
|
||||
GCE provider has the default value `elasticsearch`. If this is set to `gcp` for the GCE provder then
|
||||
logs will be sent to the Google Cloud Logging system instead.
|
||||
|
||||
When using Elasticsearch the number of Elasticsearch instances can be controlled by setting the
|
||||
variable `ELASTICSEARCH_LOGGING_REPLICAS` which has the default value of `1`. For large clusters
|
||||
or clusters that are generating log information at a high rate you may wish to use more
|
||||
Elasticsearch instances.
|
||||
|
Loading…
Reference in New Issue
Block a user