mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-25 04:33:26 +00:00
Merge pull request #3601 from zmerlynn/deferred_addons_phase_1
Deferred creation of SkyDNS, monitoring and logging objects
This commit is contained in:
commit
621e7037ae
@ -551,6 +551,14 @@ function kube::release::package_salt_tarball() {
|
||||
|
||||
cp -R "${KUBE_ROOT}/cluster/saltbase" "${release_stage}/"
|
||||
|
||||
# TODO(#3579): This is a temporary hack. It gathers up the yaml,
|
||||
# yaml.in files in cluster/addons (minus any demos) and overlays
|
||||
# them into kube-addons, where we expect them. (This pipeline is a
|
||||
# fancy copy, stripping anything but the files we don't want.)
|
||||
local objects
|
||||
objects=$(cd "${KUBE_ROOT}/cluster/addons" && find . -name \*.yaml -or -name \*.yaml.in | grep -v demo)
|
||||
tar c -C "${KUBE_ROOT}/cluster/addons" ${objects} | tar x -C "${release_stage}/saltbase/salt/kube-addons"
|
||||
|
||||
local package_name="${RELEASE_DIR}/kubernetes-salt.tar.gz"
|
||||
kube::release::create_tarball "${package_name}" "${release_stage}/.."
|
||||
}
|
||||
|
@ -1,6 +1,6 @@
|
||||
# DNS in Kubernetes
|
||||
This directory holds an example of how to run
|
||||
[SkyDNS](https://github.com/skynetservices/skydns) in a Kubernetes cluster.
|
||||
[SkyDNS](https://github.com/skynetservices/skydns) can be configured
|
||||
to automatically run in a Kubernetes cluster.
|
||||
|
||||
## What things get DNS names?
|
||||
The only objects to which we are assigning DNS names are Services. Every
|
||||
@ -18,23 +18,14 @@ Of course, giving services a name is just half of the problem - DNS names need a
|
||||
domain also. This implementation uses the variable `DNS_DOMAIN` (see below).
|
||||
You can configure your docker daemon with the flag `--dns-search`.
|
||||
|
||||
## How do I run it?
|
||||
The first thing you have to do is substitute the variables into the
|
||||
configuration. You can then feed the result into `kubectl`.
|
||||
## How do I configure it?
|
||||
The following environment variables are used at cluster startup to create the SkyDNS pods and configure the kubelets. If you need to, you can reconfigure your provider as necessary (e.g. `cluster/gce/config-default.sh`):
|
||||
|
||||
```shell
|
||||
DNS_SERVER_IP=10.0.0.10
|
||||
DNS_DOMAIN=kubernetes.local
|
||||
DNS_REPLICAS=2
|
||||
|
||||
sed -e "s/{DNS_DOMAIN}/$DNS_DOMAIN/g" \
|
||||
-e "s/{DNS_REPLICAS}/$DNS_REPLICAS/g" \
|
||||
./cluster/addons/dns/skydns-rc.yaml.in \
|
||||
| ./cluster/kubectl.sh create -f -
|
||||
|
||||
sed -e "s/{DNS_SERVER_IP}/$DNS_SERVER_IP/g" \
|
||||
./cluster/addons/dns/skydns-svc.yaml.in \
|
||||
| ./cluster/kubectl.sh create -f -
|
||||
ENABLE_CLUSTER_DNS=true
|
||||
DNS_SERVER_IP="10.0.0.10"
|
||||
DNS_DOMAIN="kubernetes.local"
|
||||
DNS_REPLICAS=1
|
||||
```
|
||||
|
||||
## How does it work?
|
||||
|
@ -5,7 +5,7 @@ namespace: default
|
||||
labels:
|
||||
k8s-app: skydns
|
||||
desiredState:
|
||||
replicas: {DNS_REPLICAS}
|
||||
replicas: {{ pillar['dns_replicas'] }}
|
||||
replicaSelector:
|
||||
k8s-app: skydns
|
||||
podTemplate:
|
||||
@ -28,7 +28,7 @@ desiredState:
|
||||
image: kubernetes/kube2sky:1.0
|
||||
command: [
|
||||
# entrypoint = "/kube2sky",
|
||||
"-domain={DNS_DOMAIN}",
|
||||
"-domain={{ pillar['dns_domain'] }}",
|
||||
]
|
||||
- name: skydns
|
||||
image: kubernetes/skydns:2014-12-23-001
|
||||
@ -36,7 +36,7 @@ desiredState:
|
||||
# entrypoint = "/skydns",
|
||||
"-machines=http://localhost:4001",
|
||||
"-addr=0.0.0.0:53",
|
||||
"-domain={DNS_DOMAIN}.",
|
||||
"-domain={{ pillar['dns_domain'] }}.",
|
||||
]
|
||||
ports:
|
||||
- name: dns
|
||||
|
@ -4,7 +4,7 @@ id: skydns
|
||||
namespace: default
|
||||
protocol: UDP
|
||||
port: 53
|
||||
portalIP: {DNS_SERVER_IP}
|
||||
portalIP: {{ pillar['dns_server'] }}
|
||||
containerPort: 53
|
||||
labels:
|
||||
k8s-app: skydns
|
||||
|
@ -2,7 +2,7 @@ apiVersion: v1beta1
|
||||
kind: ReplicationController
|
||||
id: elasticsearch-logging-controller
|
||||
desiredState:
|
||||
replicas: {ELASTICSEARCH_LOGGING_REPLICAS}
|
||||
replicas: {{ pillar['elasticsearch_replicas'] }}
|
||||
replicaSelector:
|
||||
name: elasticsearch-logging
|
||||
podTemplate:
|
||||
|
@ -106,14 +106,14 @@ function ensure-temp-dir {
|
||||
fi
|
||||
}
|
||||
|
||||
function setup-monitoring {
|
||||
function setup-monitoring-firewall {
|
||||
if [[ "${ENABLE_CLUSTER_MONITORING:-false}" == "true" ]]; then
|
||||
# TODO: Implement this.
|
||||
echo "Monitoring not currently supported on AWS"
|
||||
fi
|
||||
}
|
||||
|
||||
function teardown-monitoring {
|
||||
function teardown-monitoring-firewall {
|
||||
if [[ "${ENABLE_CLUSTER_MONITORING:-false}" == "true" ]]; then
|
||||
# TODO: Implement this.
|
||||
echo "Monitoring not currently supported on AWS"
|
||||
@ -296,10 +296,14 @@ function kube-up {
|
||||
echo "readonly AWS_ZONE='${ZONE}'"
|
||||
echo "readonly MASTER_HTPASSWD='${htpasswd}'"
|
||||
echo "readonly PORTAL_NET='${PORTAL_NET}'"
|
||||
echo "readonly ENABLE_CLUSTER_MONITORING='${ENABLE_CLUSTER_MONITORING:-false}'"
|
||||
echo "readonly ENABLE_NODE_MONITORING='${ENABLE_NODE_MONITORING:-false}'"
|
||||
echo "readonly ENABLE_CLUSTER_LOGGING='${ENABLE_CLUSTER_LOGGING:-false}'"
|
||||
echo "readonly ENABLE_NODE_LOGGING='${ENABLE_NODE_LOGGING:-false}'"
|
||||
echo "readonly LOGGING_DESTINATION='${LOGGING_DESTINATION:-}'"
|
||||
echo "readonly ELASTICSEARCH_LOGGING_REPLICAS='${ELASTICSEARCH_LOGGING_REPLICAS:-}'"
|
||||
echo "readonly ENABLE_CLUSTER_DNS='${ENABLE_CLUSTER_DNS:-false}'"
|
||||
echo "readonly DNS_REPLICAS='${DNS_REPLICAS:-}'"
|
||||
echo "readonly DNS_SERVER_IP='${DNS_SERVER_IP:-}'"
|
||||
echo "readonly DNS_DOMAIN='${DNS_DOMAIN:-}'"
|
||||
grep -v "^#" "${KUBE_ROOT}/cluster/aws/templates/create-dynamic-salt-files.sh"
|
||||
@ -498,10 +502,10 @@ function kube-down {
|
||||
$AWS_CMD delete-vpc --vpc-id $vpc_id > $LOG
|
||||
}
|
||||
|
||||
function setup-logging {
|
||||
function setup-logging-firewall {
|
||||
echo "TODO: setup logging"
|
||||
}
|
||||
|
||||
function teardown-logging {
|
||||
function teardown-logging-firewall {
|
||||
echo "TODO: teardown logging"
|
||||
}
|
||||
}
|
||||
|
@ -558,18 +558,18 @@ function restart-kube-proxy {
|
||||
}
|
||||
|
||||
# Setup monitoring using heapster and InfluxDB
|
||||
function setup-monitoring {
|
||||
function setup-monitoring-firewall {
|
||||
echo "not implemented" >/dev/null
|
||||
}
|
||||
|
||||
function teardown-monitoring {
|
||||
function teardown-monitoring-firewall {
|
||||
echo "not implemented" >/dev/null
|
||||
}
|
||||
|
||||
function setup-logging {
|
||||
function setup-logging-firewall {
|
||||
echo "TODO: setup logging"
|
||||
}
|
||||
|
||||
function teardown-logging {
|
||||
function teardown-logging-firewall {
|
||||
echo "TODO: teardown logging"
|
||||
}
|
||||
|
@ -22,10 +22,14 @@ mkdir -p /srv/salt-overlay/pillar
|
||||
cat <<EOF >/srv/salt-overlay/pillar/cluster-params.sls
|
||||
node_instance_prefix: '$(echo "$NODE_INSTANCE_PREFIX" | sed -e "s/'/''/g")'
|
||||
portal_net: '$(echo "$PORTAL_NET" | sed -e "s/'/''/g")'
|
||||
enable_cluster_monitoring: '$(echo "$ENABLE_CLUSTER_MONITORING" | sed -e "s/'/''/g")'
|
||||
enable_node_monitoring: '$(echo "$ENABLE_NODE_MONITORING" | sed -e "s/'/''/g")'
|
||||
enable_cluster_logging: '$(echo "$ENABLE_CLUSTER_LOGGING" | sed -e "s/'/''/g")'
|
||||
enable_node_logging: '$(echo "$ENABLE_NODE_LOGGING" | sed -e "s/'/''/g")'
|
||||
logging_destination: '$(echo "$LOGGING_DESTINATION" | sed -e "s/'/''/g")'
|
||||
elasticsearch_replicas: '$(echo "$ELASTICSEARCH_LOGGING_REPLICAS" | sed -e "s/'/''/g")'
|
||||
enable_cluster_dns: '$(echo "$ENABLE_CLUSTER_DNS" | sed -e "s/'/''/g")'
|
||||
dns_replicas: '$(echo "$DNS_REPLICAS" | sed -e "s/'/''/g")'
|
||||
dns_server: '$(echo "$DNS_SERVER_IP" | sed -e "s/'/''/g")'
|
||||
dns_domain: '$(echo "$DNS_DOMAIN" | sed -e "s/'/''/g")'
|
||||
EOF
|
||||
|
@ -393,10 +393,14 @@ function kube-up {
|
||||
echo "readonly SALT_TAR_URL='${SALT_TAR_URL}'"
|
||||
echo "readonly MASTER_HTPASSWD='${htpasswd}'"
|
||||
echo "readonly PORTAL_NET='${PORTAL_NET}'"
|
||||
echo "readonly ENABLE_CLUSTER_MONITORING='${ENABLE_CLUSTER_MONITORING:-false}'"
|
||||
echo "readonly ENABLE_NODE_MONITORING='${ENABLE_NODE_MONITORING:-false}'"
|
||||
echo "readonly ENABLE_CLUSTER_LOGGING='${ENABLE_CLUSTER_LOGGING:-false}'"
|
||||
echo "readonly ENABLE_NODE_LOGGING='${ENABLE_NODE_LOGGING:-false}'"
|
||||
echo "readonly LOGGING_DESTINATION='${LOGGING_DESTINATION:-}'"
|
||||
echo "readonly ELASTICSEARCH_LOGGING_REPLICAS='${ELASTICSEARCH_LOGGING_REPLICAS:-}'"
|
||||
echo "readonly ENABLE_CLUSTER_DNS='${ENABLE_CLUSTER_DNS:-false}'"
|
||||
echo "readonly DNS_REPLICAS='${DNS_REPLICAS:-}'"
|
||||
echo "readonly DNS_SERVER_IP='${DNS_SERVER_IP:-}'"
|
||||
echo "readonly DNS_DOMAIN='${DNS_DOMAIN:-}'"
|
||||
grep -v "^#" "${KUBE_ROOT}/cluster/gce/templates/common.sh"
|
||||
@ -731,106 +735,70 @@ function restart-kube-proxy {
|
||||
ssh-to-node "$1" "sudo /etc/init.d/kube-proxy restart"
|
||||
}
|
||||
|
||||
# Setup monitoring using heapster and InfluxDB
|
||||
function setup-monitoring {
|
||||
if [[ "${ENABLE_CLUSTER_MONITORING}" == "true" ]]; then
|
||||
echo "Setting up cluster monitoring using Heapster."
|
||||
# Setup monitoring firewalls using heapster and InfluxDB
|
||||
function setup-monitoring-firewall {
|
||||
if [[ "${ENABLE_CLUSTER_MONITORING}" != "true" ]]; then
|
||||
return
|
||||
fi
|
||||
|
||||
detect-project
|
||||
if ! gcloud compute firewall-rules --project "${PROJECT}" describe monitoring-heapster &> /dev/null; then
|
||||
if ! gcloud compute firewall-rules create monitoring-heapster \
|
||||
--project "${PROJECT}" \
|
||||
--target-tags="${MINION_TAG}" \
|
||||
--network="${NETWORK}" \
|
||||
--allow tcp:80 tcp:8083 tcp:8086; then
|
||||
echo -e "${color_red}Failed to set up firewall for monitoring ${color_norm}" && false
|
||||
fi
|
||||
fi
|
||||
echo "Setting up firewalls to Heapster based cluster monitoring."
|
||||
|
||||
local kubectl="${KUBE_ROOT}/cluster/kubectl.sh"
|
||||
local grafana_host=""
|
||||
if "${kubectl}" create -f "${KUBE_ROOT}/cluster/addons/cluster-monitoring/" &> /dev/null; then
|
||||
# wait for pods to be scheduled on a node.
|
||||
echo "waiting for monitoring pods to be scheduled."
|
||||
for i in `seq 1 10`; do
|
||||
grafana_host=$("${kubectl}" get pods -l name=influxGrafana -o template -t {{range.items}}{{.currentState.hostIP}}:{{end}} | sed s/://g)
|
||||
if [[ $grafana_host != *"<"* ]]; then
|
||||
detect-project
|
||||
gcloud compute firewall-rules create "${INSTANCE_PREFIX}-monitoring-heapster" --project "${PROJECT}" \
|
||||
--allow tcp:80 tcp:8083 tcp:8086 --target-tags="${MINION_TAG}" --network="${NETWORK}"
|
||||
|
||||
local kubectl="${KUBE_ROOT}/cluster/kubectl.sh"
|
||||
local grafana_host=""
|
||||
echo "waiting for monitoring pods to be scheduled."
|
||||
for i in `seq 1 10`; do
|
||||
grafana_host=$("${kubectl}" get pods -l name=influxGrafana -o template -t {{range.items}}{{.currentState.hostIP}}:{{end}} | sed s/://g)
|
||||
if [[ ${grafana_host} != *"<"* ]]; then
|
||||
break
|
||||
fi
|
||||
sleep 10
|
||||
done
|
||||
if [[ $grafana_host != *"<"* ]]; then
|
||||
echo
|
||||
echo -e "${color_green}Grafana dashboard will be available at ${color_yellow}http://$grafana_host${color_green}. Wait for the monitoring dashboard to be online.${color_norm}"
|
||||
echo
|
||||
else
|
||||
echo -e "${color_red}monitoring pods failed to be scheduled.${color_norm}"
|
||||
fi
|
||||
else
|
||||
echo -e "${color_red}Failed to Setup Monitoring ${color_norm}"
|
||||
teardown-monitoring
|
||||
fi
|
||||
sleep 10
|
||||
done
|
||||
if [[ ${grafana_host} != *"<"* ]]; then
|
||||
echo
|
||||
echo -e "${color_green}Grafana dashboard will be available at ${color_yellow}http://${grafana_host}${color_green}. Wait for the monitoring dashboard to be online.${color_norm}"
|
||||
echo
|
||||
else
|
||||
echo -e "${color_red}Monitoring pods failed to be scheduled!${color_norm}"
|
||||
fi
|
||||
}
|
||||
|
||||
function teardown-monitoring {
|
||||
if [[ "${ENABLE_CLUSTER_MONITORING}" == "true" ]]; then
|
||||
detect-project
|
||||
|
||||
local kubectl="${KUBE_ROOT}/cluster/kubectl.sh"
|
||||
local kubecfg="${KUBE_ROOT}/cluster/kubecfg.sh"
|
||||
"${kubecfg}" resize monitoring-influxGrafanaController 0 &> /dev/null || true
|
||||
"${kubecfg}" resize monitoring-heapsterController 0 &> /dev/null || true
|
||||
"${kubectl}" delete -f "${KUBE_ROOT}/cluster/addons/cluster-monitoring/" &> /dev/null || true
|
||||
if gcloud compute firewall-rules describe --project "${PROJECT}" monitoring-heapster &> /dev/null; then
|
||||
gcloud compute firewall-rules delete \
|
||||
--project "${PROJECT}" \
|
||||
--quiet \
|
||||
monitoring-heapster &> /dev/null || true
|
||||
fi
|
||||
function teardown-monitoring-firewall {
|
||||
if [[ "${ENABLE_CLUSTER_MONITORING}" != "true" ]]; then
|
||||
return
|
||||
fi
|
||||
|
||||
detect-project
|
||||
gcloud compute firewall-rules delete -q "${INSTANCE_PREFIX}-monitoring-heapster" --project "${PROJECT}" || true
|
||||
}
|
||||
|
||||
function setup-logging {
|
||||
function setup-logging-firewall {
|
||||
# If logging with Fluentd to Elasticsearch is enabled then create pods
|
||||
# and services for Elasticsearch (for ingesting logs) and Kibana (for
|
||||
# viewing logs).
|
||||
if [[ "${ENABLE_NODE_LOGGING-}" == "true" ]] && \
|
||||
[[ "${LOGGING_DESTINATION-}" == "elasticsearch" ]] && \
|
||||
[[ "${ENABLE_CLUSTER_LOGGING-}" == "true" ]]; then
|
||||
local -r kubectl="${KUBE_ROOT}/cluster/kubectl.sh"
|
||||
if sed -e "s/{ELASTICSEARCH_LOGGING_REPLICAS}/${ELASTICSEARCH_LOGGING_REPLICAS}/g" \
|
||||
"${KUBE_ROOT}"/cluster/addons/fluentd-elasticsearch/es-controller.yaml.in | \
|
||||
"${kubectl}" create -f - &> /dev/null && \
|
||||
"${kubectl}" create -f "${KUBE_ROOT}"/cluster/addons/fluentd-elasticsearch/es-service.yaml &> /dev/null && \
|
||||
"${kubectl}" create -f "${KUBE_ROOT}"/cluster/addons/fluentd-elasticsearch/kibana-controller.yaml &> /dev/null && \
|
||||
"${kubectl}" create -f "${KUBE_ROOT}"/cluster/addons/fluentd-elasticsearch/kibana-service.yaml &> /dev/null; then
|
||||
gcloud compute firewall-rules create fluentd-elasticsearch-logging --project "${PROJECT}" \
|
||||
--allow tcp:5601 tcp:9200 tcp:9300 --target-tags "${INSTANCE_PREFIX}"-minion || true
|
||||
local -r region="${ZONE::-2}"
|
||||
local -r es_ip=$(gcloud compute forwarding-rules --project "${PROJECT}" describe --region "${region}" elasticsearch-logging | grep IPAddress | awk '{print $2}')
|
||||
local -r kibana_ip=$(gcloud compute forwarding-rules --project "${PROJECT}" describe --region "${region}" kibana-logging | grep IPAddress | awk '{print $2}')
|
||||
echo
|
||||
echo -e "${color_green}Cluster logs are ingested into Elasticsearch running at ${color_yellow}http://${es_ip}:9200"
|
||||
echo -e "${color_green}Kibana logging dashboard will be available at ${color_yellow}http://${kibana_ip}:5601${color_norm}"
|
||||
echo
|
||||
else
|
||||
echo -e "${color_red}Failed to launch Elasticsearch and Kibana pods and services for logging.${color_norm}"
|
||||
fi
|
||||
if [[ "${ENABLE_NODE_LOGGING-}" != "true" ]] || \
|
||||
[[ "${LOGGING_DESTINATION-}" != "elasticsearch" ]] || \
|
||||
[[ "${ENABLE_CLUSTER_LOGGING-}" != "true" ]]; then
|
||||
return
|
||||
fi
|
||||
|
||||
detect-project
|
||||
gcloud compute firewall-rules create "${INSTANCE_PREFIX}-fluentd-elasticsearch-logging" --project "${PROJECT}" \
|
||||
--allow tcp:5601 tcp:9200 tcp:9300 --target-tags "${MINION_TAG}" --network="${NETWORK}"
|
||||
}
|
||||
|
||||
function teardown-logging {
|
||||
if [[ "${ENABLE_NODE_LOGGING-}" == "true" ]] && \
|
||||
[[ "${LOGGING_DESTINATION-}" == "elasticsearch" ]] && \
|
||||
[[ "${ENABLE_CLUSTER_LOGGING-}" == "true" ]]; then
|
||||
local -r kubectl="${KUBE_ROOT}/cluster/kubectl.sh"
|
||||
"${kubectl}" delete replicationController elasticsearch-logging-controller &> /dev/null || true
|
||||
"${kubectl}" delete service elasticsearch-logging &> /dev/null || true
|
||||
"${kubectl}" delete replicationController kibana-logging-controller &> /dev/null || true
|
||||
"${kubectl}" delete service kibana-logging &> /dev/null || true
|
||||
gcloud compute firewall-rules delete -q fluentd-elasticsearch-logging --project "${PROJECT}" || true
|
||||
function teardown-logging-firewall {
|
||||
if [[ "${ENABLE_NODE_LOGGING-}" != "true" ]] || \
|
||||
[[ "${LOGGING_DESTINATION-}" != "elasticsearch" ]] || \
|
||||
[[ "${ENABLE_CLUSTER_LOGGING-}" != "true" ]]; then
|
||||
return
|
||||
fi
|
||||
|
||||
detect-project
|
||||
gcloud compute firewall-rules delete -q "${INSTANCE_PREFIX}-fluentd-elasticsearch-logging" --project "${PROJECT}" || true
|
||||
}
|
||||
|
||||
# Perform preparations required to run e2e tests
|
||||
|
@ -115,8 +115,8 @@ function kube-up() {
|
||||
}
|
||||
|
||||
# Called during cluster/kube-up.sh
|
||||
function setup-monitoring() {
|
||||
echo "... in setup-monitoring()" >&2
|
||||
function setup-monitoring-firewall() {
|
||||
echo "... in setup-monitoring-firewall()" >&2
|
||||
# TODO(mbforbes): This isn't currently supported in GKE.
|
||||
}
|
||||
|
||||
@ -239,8 +239,8 @@ function test-teardown() {
|
||||
}
|
||||
|
||||
# Tears down monitoring.
|
||||
function teardown-monitoring() {
|
||||
echo "... in teardown-monitoring()" >&2
|
||||
function teardown-monitoring-firewall() {
|
||||
echo "... in teardown-monitoring-firewall()" >&2
|
||||
# TODO(mbforbes): This isn't currently supported in GKE.
|
||||
}
|
||||
|
||||
@ -257,10 +257,10 @@ function kube-down() {
|
||||
--zone="${ZONE}" "${CLUSTER_NAME}"
|
||||
}
|
||||
|
||||
function setup-logging {
|
||||
function setup-logging-firewall {
|
||||
echo "TODO: setup logging"
|
||||
}
|
||||
|
||||
function teardown-logging {
|
||||
function teardown-logging-firewall {
|
||||
echo "TODO: teardown logging"
|
||||
}
|
||||
|
@ -27,8 +27,8 @@ source "${KUBE_ROOT}/cluster/${KUBERNETES_PROVIDER}/util.sh"
|
||||
echo "Bringing down cluster using provider: $KUBERNETES_PROVIDER"
|
||||
|
||||
verify-prereqs
|
||||
teardown-monitoring
|
||||
teardown-logging
|
||||
teardown-monitoring-firewall
|
||||
teardown-logging-firewall
|
||||
|
||||
kube-down
|
||||
|
||||
|
@ -39,22 +39,10 @@ kube-up
|
||||
echo "... calling validate-cluster" >&2
|
||||
"${KUBE_ROOT}/cluster/validate-cluster.sh"
|
||||
|
||||
echo "... calling setup-monitoring" >&2
|
||||
setup-monitoring
|
||||
echo "... calling setup-monitoring-firewall" >&2
|
||||
setup-monitoring-firewall
|
||||
|
||||
if [[ "${ENABLE_CLUSTER_DNS}" == "true" ]]; then
|
||||
echo "... setting up cluster DNS"
|
||||
sed -e "s/{DNS_DOMAIN}/$DNS_DOMAIN/g" \
|
||||
-e "s/{DNS_REPLICAS}/$DNS_REPLICAS/g" \
|
||||
"${KUBE_ROOT}/cluster/addons/dns/skydns-rc.yaml.in" \
|
||||
| "${KUBE_ROOT}/cluster/kubectl.sh" create -f -
|
||||
|
||||
sed -e "s/{DNS_SERVER_IP}/$DNS_SERVER_IP/g" \
|
||||
"${KUBE_ROOT}/cluster/addons/dns/skydns-svc.yaml.in" \
|
||||
| "${KUBE_ROOT}/cluster/kubectl.sh" create -f -
|
||||
fi
|
||||
|
||||
echo "... calling setup-logging" >&2
|
||||
setup-logging
|
||||
echo "... calling setup-logging-firewall" >&2
|
||||
setup-logging-firewall
|
||||
|
||||
echo "Done" >&2
|
||||
|
@ -342,19 +342,19 @@ kube-up() {
|
||||
echo
|
||||
}
|
||||
|
||||
function setup-monitoring {
|
||||
function setup-monitoring-firewall {
|
||||
echo "TODO"
|
||||
}
|
||||
|
||||
function teardown-monitoring {
|
||||
function teardown-monitoring-firewall {
|
||||
echo "TODO"
|
||||
}
|
||||
|
||||
function setup-logging {
|
||||
function setup-logging-firewall {
|
||||
echo "TODO: setup logging"
|
||||
}
|
||||
|
||||
function teardown-logging {
|
||||
function teardown-logging-firewall {
|
||||
echo "TODO: teardown logging"
|
||||
}
|
||||
|
||||
|
79
cluster/saltbase/salt/kube-addons/init.sls
Normal file
79
cluster/saltbase/salt/kube-addons/init.sls
Normal file
@ -0,0 +1,79 @@
|
||||
{% if pillar.get('enable_cluster_monitoring', '').lower() == 'true' %}
|
||||
/etc/kubernetes/addons/cluster-monitoring:
|
||||
file.recurse:
|
||||
- source: salt://kube-addons/cluster-monitoring
|
||||
- include_pat: E@^.+\.yaml$
|
||||
- user: root
|
||||
- group: root
|
||||
- dir_mode: 755
|
||||
- file_mode: 644
|
||||
{% endif %}
|
||||
|
||||
{% if pillar.get('enable_cluster_dns', '').lower() == 'true' %}
|
||||
/etc/kubernetes/addons/dns/skydns-svc.yaml:
|
||||
file.managed:
|
||||
- source: salt://kube-addons/dns/skydns-svc.yaml.in
|
||||
- template: jinja
|
||||
- group: root
|
||||
- dir_mode: 755
|
||||
- makedirs: True
|
||||
|
||||
/etc/kubernetes/addons/dns/skydns-rc.yaml:
|
||||
file.managed:
|
||||
- source: salt://kube-addons/dns/skydns-rc.yaml.in
|
||||
- template: jinja
|
||||
- group: root
|
||||
- dir_mode: 755
|
||||
- makedirs: True
|
||||
{% endif %}
|
||||
|
||||
{% if pillar.get('enable_node_logging', '').lower() == 'true'
|
||||
and pillar.get('logging_destination').lower() == 'elasticsearch'
|
||||
and pillar.get('enable_cluster_logging', '').lower() == 'true' %}
|
||||
/etc/kubernetes/addons/fluentd-elasticsearch:
|
||||
file.recurse:
|
||||
- source: salt://kube-addons/fluentd-elasticsearch
|
||||
- include_pat: E@^.+\.yaml$
|
||||
- user: root
|
||||
- group: root
|
||||
- dir_mode: 755
|
||||
- file_mode: 644
|
||||
|
||||
/etc/kubernetes/addons/fluentd-elasticsearch/es-controller.yaml:
|
||||
file.managed:
|
||||
- source: salt://kube-addons/fluentd-elasticsearch/es-controller.yaml.in
|
||||
- template: jinja
|
||||
- group: root
|
||||
- dir_mode: 755
|
||||
- makedirs: True
|
||||
{% endif %}
|
||||
|
||||
{% if grains['os_family'] == 'RedHat' %}
|
||||
|
||||
/usr/lib/systemd/system/kube-addons.service:
|
||||
file.managed:
|
||||
- source: salt://kube-addons/kube-addons.service
|
||||
- user: root
|
||||
- group: root
|
||||
|
||||
/usr/lib/systemd/scripts/kube-addons:
|
||||
file.managed:
|
||||
- source: salt://kube-addons/initd
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: 755
|
||||
|
||||
{% else %}
|
||||
|
||||
/etc/init.d/kube-addons:
|
||||
file.managed:
|
||||
- source: salt://kube-addons/initd
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: 755
|
||||
|
||||
{% endif %}
|
||||
|
||||
kube-addons:
|
||||
service.running:
|
||||
- enable: True
|
123
cluster/saltbase/salt/kube-addons/initd
Normal file
123
cluster/saltbase/salt/kube-addons/initd
Normal file
@ -0,0 +1,123 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
### BEGIN INIT INFO
|
||||
# Provides: kube-addons
|
||||
# Required-Start: $local_fs $network $syslog kube-apiserver
|
||||
# Required-Stop:
|
||||
# Default-Start: 2 3 4 5
|
||||
# Default-Stop: 0 1 6
|
||||
# Short-Description: Kubernetes Addon Object Manager
|
||||
# Description:
|
||||
# Enforces installation of Kubernetes Addon Objects
|
||||
### END INIT INFO
|
||||
|
||||
|
||||
# PATH should only include /usr/* if it runs after the mountnfs.sh script
|
||||
PATH=/sbin:/usr/sbin:/bin:/usr/bin
|
||||
DESC="Kubernetes Addon Object Manager"
|
||||
NAME=kube-addons
|
||||
DAEMON_LOG_FILE=/var/log/$NAME.log
|
||||
PIDFILE=/var/run/$NAME.pid
|
||||
SCRIPTNAME=/etc/init.d/$NAME
|
||||
KUBECTL=/usr/local/bin/kubectl
|
||||
|
||||
# Define LSB log_* functions.
|
||||
# Depend on lsb-base (>= 3.2-14) to ensure that this file is present
|
||||
# and status_of_proc is working.
|
||||
. /lib/lsb/init-functions
|
||||
|
||||
function addon_manager_async() {
|
||||
# The business logic for whether a given object should be created
|
||||
# was already enforced by salt, and /etc/kubernetes/addons is the
|
||||
# managed result is of that. Start everything below that directory.
|
||||
echo "== Kubernetes addon manager started at $(date -Is) =="
|
||||
for obj in $(find /etc/kubernetes/addons -name \*.yaml); do
|
||||
${KUBECTL} create -f ${obj} &
|
||||
echo "++ addon ${obj} started in pid $! ++"
|
||||
done
|
||||
noerrors="true"
|
||||
for pid in $(jobs -p); do
|
||||
wait ${pid} || noerrors="false"
|
||||
echo "++ pid ${pid} complete ++"
|
||||
done
|
||||
if [ ${noerrors} == "true" ]; then
|
||||
echo "== Kubernetes addon manager completed successfully at $(date -Is) =="
|
||||
else
|
||||
echo "== Kubernetes addon manager completed with errors at $(date -Is) =="
|
||||
fi
|
||||
|
||||
# We stay around so that status checks by salt make it look like
|
||||
# the service is good. (We could do this is other ways, but this
|
||||
# is simple.)
|
||||
sleep infinity
|
||||
}
|
||||
|
||||
#
|
||||
# Function that starts the daemon/service
|
||||
#
|
||||
do_start()
|
||||
{
|
||||
addon_manager_async </dev/null >>${DAEMON_LOG_FILE} 2>&1 &
|
||||
echo $! > ${PIDFILE}
|
||||
disown
|
||||
}
|
||||
|
||||
#
|
||||
# Function that stops the daemon/service
|
||||
#
|
||||
do_stop()
|
||||
{
|
||||
kill $(cat ${PIDFILE})
|
||||
rm ${PIDFILE}
|
||||
return
|
||||
}
|
||||
|
||||
case "$1" in
|
||||
start)
|
||||
log_daemon_msg "Starting $DESC" "$NAME"
|
||||
do_start
|
||||
case "$?" in
|
||||
0|1) log_end_msg 0 || exit 0 ;;
|
||||
2) log_end_msg 1 || exit 1 ;;
|
||||
esac
|
||||
;;
|
||||
stop)
|
||||
log_daemon_msg "Stopping $DESC" "$NAME"
|
||||
do_stop
|
||||
case "$?" in
|
||||
0|1) log_end_msg 0 ;;
|
||||
2) exit 1 ;;
|
||||
esac
|
||||
;;
|
||||
status)
|
||||
if [ ! -e ${PIDFILE} ]; then
|
||||
exit 1
|
||||
fi
|
||||
pid=$(cat ${PIDFILE})
|
||||
# Checks that ${pid} is running AND is us.
|
||||
ps --no-headers ${pid} | grep ${SCRIPTNAME} > /dev/null || exit $?
|
||||
;;
|
||||
|
||||
restart|force-reload)
|
||||
log_daemon_msg "Restarting $DESC" "$NAME"
|
||||
do_stop
|
||||
case "$?" in
|
||||
0|1)
|
||||
do_start
|
||||
case "$?" in
|
||||
0) log_end_msg 0 ;;
|
||||
1) log_end_msg 1 ;; # Old process is still running
|
||||
*) log_end_msg 1 ;; # Failed to start
|
||||
esac
|
||||
;;
|
||||
*)
|
||||
# Failed to stop
|
||||
log_end_msg 1
|
||||
;;
|
||||
esac
|
||||
;;
|
||||
*)
|
||||
echo "Usage: $SCRIPTNAME {start|stop|status|restart|force-reload}" >&2
|
||||
exit 3
|
||||
;;
|
||||
esac
|
9
cluster/saltbase/salt/kube-addons/kube-addons.service
Normal file
9
cluster/saltbase/salt/kube-addons/kube-addons.service
Normal file
@ -0,0 +1,9 @@
|
||||
[Unit]
|
||||
Description=Kubernetes Addon Object Manager
|
||||
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
|
||||
|
||||
[Service]
|
||||
ExecStart=/usr/lib/systemd/scripts/kube-addons start
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
@ -36,6 +36,7 @@ base:
|
||||
- nginx
|
||||
- kube-client-tools
|
||||
- logrotate
|
||||
- kube-addons
|
||||
{% if grains['cloud'] is defined and grains['cloud'] == 'azure' %}
|
||||
- openvpn
|
||||
{% endif %}
|
||||
|
@ -82,10 +82,14 @@ mkdir -p /srv/salt-overlay/pillar
|
||||
cat <<EOF >/srv/salt-overlay/pillar/cluster-params.sls
|
||||
portal_net: '$(echo "$PORTAL_NET" | sed -e "s/'/''/g")'
|
||||
cert_ip: '$(echo "$MASTER_IP" | sed -e "s/'/''/g")'
|
||||
enable_cluster_monitoring: '$(echo "$ENABLE_CLUSTER_MONITORING" | sed -e "s/'/''/g")'
|
||||
enable_node_monitoring: '$(echo "$ENABLE_NODE_MONITORING" | sed -e "s/'/''/g")'
|
||||
enable_cluster_logging: '$(echo "$ENABLE_CLUSTER_LOGGING" | sed -e "s/'/''/g")'
|
||||
enable_node_logging: '$(echo "$ENABLE_NODE_LOGGING" | sed -e "s/'/''/g")'
|
||||
logging_destination: '$(echo "$LOGGING_DESTINATION" | sed -e "s/'/''/g")'
|
||||
elasticsearch_replicas: '$(echo "$ELASTICSEARCH_LOGGING_REPLICAS" | sed -e "s/'/''/g")'
|
||||
enable_cluster_dns: '$(echo "$ENABLE_CLUSTER_DNS" | sed -e "s/'/''/g")'
|
||||
dns_replicas: '$(echo "$DNS_REPLICAS" | sed -e "s/'/''/g")'
|
||||
dns_server: '$(echo "$DNS_SERVER_IP" | sed -e "s/'/''/g")'
|
||||
dns_domain: '$(echo "$DNS_DOMAIN" | sed -e "s/'/''/g")'
|
||||
EOF
|
||||
|
@ -287,11 +287,11 @@ function restart-kube-proxy {
|
||||
ssh-to-node "$1" "sudo systemctl restart kube-proxy"
|
||||
}
|
||||
|
||||
function setup-monitoring {
|
||||
function setup-monitoring-firewall {
|
||||
echo "TODO" 1>&2
|
||||
}
|
||||
|
||||
function teardown-monitoring {
|
||||
function teardown-monitoring-firewall {
|
||||
echo "TODO" 1>&2
|
||||
}
|
||||
|
||||
@ -300,10 +300,10 @@ function prepare-e2e() {
|
||||
echo "Vagrant doesn't need special preparations for e2e tests" 1>&2
|
||||
}
|
||||
|
||||
function setup-logging {
|
||||
function setup-logging-firewall {
|
||||
echo "TODO: setup logging"
|
||||
}
|
||||
|
||||
function teardown-logging {
|
||||
function teardown-logging-firewall {
|
||||
echo "TODO: teardown logging"
|
||||
}
|
||||
}
|
||||
|
@ -22,10 +22,14 @@ mkdir -p /srv/salt-overlay/pillar
|
||||
cat <<EOF >/srv/salt-overlay/pillar/cluster-params.sls
|
||||
node_instance_prefix: $NODE_INSTANCE_PREFIX
|
||||
portal_net: $PORTAL_NET
|
||||
enable_cluster_monitoring: $ENABLE_CLUSTER_MONITORING
|
||||
enable_node_monitoring: $ENABLE_NODE_MONITORING
|
||||
enable_cluster_logging: $ENABLE_CLUSTER_LOGGING
|
||||
enable_node_logging: $ENABLE_NODE_LOGGING
|
||||
logging_destination: $LOGGING_DESTINATION
|
||||
elasticsearch_replicas: $ELASTICSEARCH_LOGGING_REPLICAS
|
||||
enable_cluster_dns: $ENABLE_CLUSTER_DNS
|
||||
dns_replicas: $DNS_REPLICAS
|
||||
dns_server: $DNS_SERVER_IP
|
||||
dns_domain: $DNS_DOMAIN
|
||||
EOF
|
||||
|
@ -478,18 +478,18 @@ function test-teardown {
|
||||
echo "TODO"
|
||||
}
|
||||
|
||||
function setup-monitoring {
|
||||
function setup-monitoring-firewall {
|
||||
echo "TODO"
|
||||
}
|
||||
|
||||
function teardown-monitoring {
|
||||
function teardown-monitoring-firewall {
|
||||
echo "TODO"
|
||||
}
|
||||
|
||||
function setup-logging {
|
||||
function setup-logging-firewall {
|
||||
echo "TODO: setup logging"
|
||||
}
|
||||
|
||||
function teardown-logging {
|
||||
function teardown-logging-firewall {
|
||||
echo "TODO: teardown logging"
|
||||
}
|
Loading…
Reference in New Issue
Block a user