diff --git a/cluster/gce/config-default.sh b/cluster/gce/config-default.sh index ab317650cac..2b98bc6fd8d 100755 --- a/cluster/gce/config-default.sh +++ b/cluster/gce/config-default.sh @@ -35,3 +35,5 @@ MINION_SCOPES="compute-rw" # Increase the sleep interval value if concerned about API rate limits. 3, in seconds, is the default. POLL_SLEEP_INTERVAL=3 PORTAL_NET="10.0.0.0/16" +# When set to true, heapster will be setup as part of the cluster bring up. +MONITORING=true diff --git a/cluster/gce/config-test.sh b/cluster/gce/config-test.sh index 496f9f52cdd..8f3a7f4d340 100755 --- a/cluster/gce/config-test.sh +++ b/cluster/gce/config-test.sh @@ -35,3 +35,4 @@ MINION_SCOPES="" # Increase the sleep interval value if concerned about API rate limits. 3, in seconds, is the default. POLL_SLEEP_INTERVAL=3 PORTAL_NET="10.0.0.0/16" +MONITORING=false diff --git a/cluster/gce/util.sh b/cluster/gce/util.sh index 295eb2c7a85..805fd108fc2 100755 --- a/cluster/gce/util.sh +++ b/cluster/gce/util.sh @@ -569,3 +569,46 @@ function ssh-to-node { function restart-kube-proxy { ssh-to-node "$1" "sudo /etc/init.d/kube-proxy restart" } + +# Setup monitoring using heapster and InfluxDB +function setup-monitoring { + if [ $MONITORING ]; then + teardown-monitoring + if ! gcutil getfirewall monitoring-heapster &> /dev/null; then + gcutil addfirewall monitoring-heapster \ + --project "${PROJECT}" \ + --norespect_terminal_width \ + --sleep_between_polls "${POLL_SLEEP_INTERVAL}" \ + --target_tags="${MINION_TAG}" \ + --allowed "tcp:80,tcp:8083,tcp:8086,tcp:9200"; + if [ $? -ne 0 ]; then + echo "Failed to Setup Firewall for Monitoring" && false + fi + fi + + kubectl.sh create -f "${KUBE_ROOT}/examples/monitoring/influx-grafana-pod.json" > /dev/null && + kubectl.sh create -f "${KUBE_ROOT}/examples/monitoring/influx-grafana-service.json" > /dev/null && + kubectl.sh create -f "${KUBE_ROOT}/examples/monitoring/heapster-pod.json" > /dev/null + if [ $? -ne 0 ]; then + echo "Failed to Setup Monitoring" + teardown-monitoring + else + dashboardIP="http://admin:admin@`kubectl.sh get -o json pod influx-grafana | grep hostIP | awk '{print $2}' | sed 's/[,|\"]//g'`" + echo "Grafana dashboard will be available at $dashboardIP. Wait for the monitoring dashboard to be online." + fi + fi +} + +function teardown-monitoring { + if [ $MONITORING ]; then + kubectl.sh delete pods heapster &> /dev/null || true + kubectl.sh delete pods influx-grafana &> /dev/null || true + kubectl.sh delete services influx-master &> /dev/null || true + gcutil deletefirewall \ + --project "${PROJECT}" \ + --norespect_terminal_width \ + --sleep_between_polls "${POLL_SLEEP_INTERVAL}" \ + --force \ + monitoring-heapster || true > /dev/null + fi +} diff --git a/cluster/kube-up.sh b/cluster/kube-up.sh index 09466f478d3..c2434d4bf14 100755 --- a/cluster/kube-up.sh +++ b/cluster/kube-up.sh @@ -34,5 +34,6 @@ verify-prereqs kube-up "${KUBE_ROOT}/cluster/validate-cluster.sh" +setup-monitoring echo "Done" diff --git a/cluster/rackspace/util.sh b/cluster/rackspace/util.sh index 86f898f2738..4e93aa1b63e 100644 --- a/cluster/rackspace/util.sh +++ b/cluster/rackspace/util.sh @@ -322,3 +322,7 @@ kube-up() { echo " subject to \"Man in the middle\" type attacks." echo } + +function setup-monitoring { + echo "TODO" +} diff --git a/cluster/vagrant/util.sh b/cluster/vagrant/util.sh index 0000d0e90e7..c3c7774f0b9 100644 --- a/cluster/vagrant/util.sh +++ b/cluster/vagrant/util.sh @@ -172,3 +172,7 @@ function ssh-to-node { function restart-kube-proxy { ssh-to-node "$1" "sudo systemctl restart kube-proxy" } + +function setup-monitoring { + echo "TODO" +} diff --git a/cluster/vsphere/util.sh b/cluster/vsphere/util.sh index 7a85af1d4aa..c149bff3e2e 100755 --- a/cluster/vsphere/util.sh +++ b/cluster/vsphere/util.sh @@ -471,3 +471,7 @@ function test-setup { function test-teardown { echo "TODO" } + +function setup-monitoring { + echo "TODO" +} diff --git a/examples/monitoring/README.md b/examples/monitoring/README.md new file mode 100644 index 00000000000..e40cb638c26 --- /dev/null +++ b/examples/monitoring/README.md @@ -0,0 +1,3 @@ +# Heapster + +Heapster enables monitoring of Kubernetes Clusters using [cAdvisor](https://github.com/google/cadvisor). Detailed information about heapster can be found [here](https://github.com/GoogleCloudPlatform/heapster). diff --git a/examples/monitoring/heapster-pod.json b/examples/monitoring/heapster-pod.json new file mode 100644 index 00000000000..7ecbc505b26 --- /dev/null +++ b/examples/monitoring/heapster-pod.json @@ -0,0 +1,18 @@ +{ + "id": "heapster", + "kind": "Pod", + "apiVersion": "v1beta1", + "desiredState": { + "manifest": { + "version": "v1beta1", + "id": "heapster", + "containers": [{ + "name": "heapster", + "image": "kubernetes/heapster", + }] + } + }, + "labels": { + "name": "heapster", + } +} diff --git a/examples/monitoring/influx-grafana-pod.json b/examples/monitoring/influx-grafana-pod.json new file mode 100644 index 00000000000..001ecdda1bc --- /dev/null +++ b/examples/monitoring/influx-grafana-pod.json @@ -0,0 +1,34 @@ +{ + "id": "influx-grafana", + "kind": "Pod", + "apiVersion": "v1beta1", + "desiredState": { + "manifest": { + "version": "v1beta1", + "id": "influx-grafana", + "containers": [{ + "name": "influxdb", + "image": "kubernetes/heapster_influxdb", + "ports": [ + {"containerPort": 8083, "hostPort": 8083}, + {"containerPort": 8086, "hostPort": 8086}, + {"containerPort": 8090, "hostPort": 8090}, + {"containerPort": 8099, "hostPort": 8099}] + }, { + "name": "grafana", + "image": "kubernetes/heapster_grafana", + "ports": [{"containerPort": 80, "hostPort": 80}], + "env": [{"name": HTTP_USER, "value": admin}, + {"name": HTTP_PASS, "value": admin}], + }, { + "name": "elasticsearch", + "image": "dockerfile/elasticsearch", + "ports": [{"containerPort": 9200, "hostPort": 9200}, + {"containerPort": 9300}], + }] + }, + }, + "labels": { + "name": "influxdb", + } +} diff --git a/examples/monitoring/influx-grafana-service.json b/examples/monitoring/influx-grafana-service.json new file mode 100644 index 00000000000..d83724a2c9f --- /dev/null +++ b/examples/monitoring/influx-grafana-service.json @@ -0,0 +1,10 @@ +{ + "id": "influx-master", + "kind": "Service", + "apiVersion": "v1beta1", + "port": 8085, + "containerPort": 8086, + "provider": "kubernetes-default", + "component": "influxdb", + "selector": { "name": "influxdb" } +}