Modified influxdb petset to provision pv.

Modified influxdb petset to provision pv.
This commit is contained in:
Jerzy Szczepkowski 2016-07-12 16:31:21 +02:00
parent 46c8dfd7a2
commit f7167d11a8
5 changed files with 17 additions and 68 deletions

View File

@ -1,15 +0,0 @@
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: influxdb-claim
namespace: kube-system
labels:
kubernetes.io/cluster-service: "true"
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 10Gi
volumeName: influxdb-pv

View File

@ -3,11 +3,12 @@ kind: PetSet
metadata: metadata:
name: monitoring-influxdb-grafana-v3 name: monitoring-influxdb-grafana-v3
namespace: kube-system namespace: kube-system
labels: labels:
k8s-app: influxGrafana k8s-app: influxGrafana
version: v3 version: v3
kubernetes.io/cluster-service: "true" kubernetes.io/cluster-service: "true"
spec: spec:
serviceName: monitoring-influxdb
replicas: 1 replicas: 1
template: template:
metadata: metadata:
@ -31,7 +32,7 @@ spec:
- containerPort: 8083 - containerPort: 8083
- containerPort: 8086 - containerPort: 8086
volumeMounts: volumeMounts:
- name: influxdb-persistent-storage - name: influxdb-ps
mountPath: /data mountPath: /data
- image: gcr.io/google_containers/heapster_grafana:v2.6.0-2 - image: gcr.io/google_containers/heapster_grafana:v2.6.0-2
name: grafana name: grafana
@ -58,15 +59,25 @@ spec:
value: "true" value: "true"
- name: GF_AUTH_ANONYMOUS_ORG_ROLE - name: GF_AUTH_ANONYMOUS_ORG_ROLE
value: Admin value: Admin
# This is kubernetes specific endpoint where the service may be reached.
# It is embeded in "kube-system" namespace.
- name: GF_SERVER_ROOT_URL - name: GF_SERVER_ROOT_URL
value: /api/v1/proxy/namespaces/kube-system/services/monitoring-grafana/ value: /api/v1/proxy/namespaces/kube-system/services/monitoring-grafana/
volumeMounts: volumeMounts:
- name: grafana-persistent-storage - name: grafana-persistent-storage
mountPath: /var mountPath: /var
volumes: volumes:
- name: influxdb-persistent-storage
persistentVolumeClaim:
claimName: influxdb-claim
- name: grafana-persistent-storage - name: grafana-persistent-storage
emptyDir: {} emptyDir: {}
serviceName: monitoring-influxdb volumeClaimTemplates:
- metadata:
name: influxdb-ps
annotations:
volume.alpha.kubernetes.io/storage-class: anything
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 10Gi

View File

@ -1,19 +0,0 @@
{% set pd_prefix = pillar.get('master_name', '') -%}
{% set pd_name = pd_prefix + '-influxdb-pd' -%}
kind: PersistentVolume
apiVersion: v1
metadata:
name: influxdb-pv
labels:
kubernetes.io/cluster-service: "true"
spec:
capacity:
storage: 10Gi
accessModes:
- ReadWriteOnce
- ReadOnlyMany
gcePersistentDisk:
pdName: {{ pd_name }}
fsType: ext4
persistentVolumeReclaimPolicy: Delete

View File

@ -844,12 +844,6 @@ function start-kube-addons {
sed -i -e "s@{{ *nanny_memory *}}@${nanny_memory}@g" "${controller_yaml}" sed -i -e "s@{{ *nanny_memory *}}@${nanny_memory}@g" "${controller_yaml}"
sed -i -e "s@{{ *metrics_cpu_per_node *}}@${metrics_cpu_per_node}@g" "${controller_yaml}" sed -i -e "s@{{ *metrics_cpu_per_node *}}@${metrics_cpu_per_node}@g" "${controller_yaml}"
fi fi
if [[ "${ENABLE_CLUSTER_MONITORING:-}" == "influxdb" ]]; then
pv_yaml="${dst_dir}/${file_dir}/influxdb-pv.yaml"
pd_name="${INSTANCE_PREFIX}-influxdb-pd"
remove-salt-config-comments "${pv_yaml}"
sed -i -e "s@{{ *pd_name *}}@${pd_name}@g" "${pv_yaml}"
fi
if [[ "${ENABLE_CLUSTER_DNS:-}" == "true" ]]; then if [[ "${ENABLE_CLUSTER_DNS:-}" == "true" ]]; then
setup-addon-manifests "addons" "dns" setup-addon-manifests "addons" "dns"
local -r dns_rc_file="${dst_dir}/dns/skydns-rc.yaml" local -r dns_rc_file="${dst_dir}/dns/skydns-rc.yaml"

View File

@ -711,14 +711,6 @@ function create-master() {
--size "${CLUSTER_REGISTRY_DISK_SIZE}" & --size "${CLUSTER_REGISTRY_DISK_SIZE}" &
fi fi
# Create disk for influxdb if enabled
if [[ "${ENABLE_CLUSTER_MONITORING:-}" == "influxdb" ]]; then
gcloud compute disks create "${INSTANCE_PREFIX}-influxdb-pd" \
--project "${PROJECT}" \
--zone "${ZONE}" \
--size "10GiB" &
fi
# Generate a bearer token for this cluster. We push this separately # Generate a bearer token for this cluster. We push this separately
# from the other cluster variables so that the client (this # from the other cluster variables so that the client (this
# computer) can forget it later. This should disappear with # computer) can forget it later. This should disappear with
@ -1197,15 +1189,6 @@ function kube-down {
routes=( "${routes[@]:${batch}}" ) routes=( "${routes[@]:${batch}}" )
done done
# Delete persistent disk for influx-db.
if gcloud compute disks describe "${INSTANCE_PREFIX}"-influxdb-pd --zone "${ZONE}" --project "${PROJECT}" &>/dev/null; then
gcloud compute disks delete \
--project "${PROJECT}" \
--quiet \
--zone "${ZONE}" \
"${INSTANCE_PREFIX}"-influxdb-pd
fi
# If there are no more remaining master replicas, we should update kubeconfig. # If there are no more remaining master replicas, we should update kubeconfig.
if [[ "${REMAINING_MASTER_COUNT}" == "0" ]]; then if [[ "${REMAINING_MASTER_COUNT}" == "0" ]]; then
export CONTEXT="${PROJECT}_${INSTANCE_PREFIX}" export CONTEXT="${PROJECT}_${INSTANCE_PREFIX}"
@ -1266,11 +1249,6 @@ function check-resources {
return 1 return 1
fi fi
if gcloud compute disks describe --project "${PROJECT}" "${INSTANCE_PREFIX}-influxdb-pd" --zone "${ZONE}" &>/dev/null; then
KUBE_RESOURCE_FOUND="Persistent disk ${INSTANCE_PREFIX}-influxdb-pd"
return 1
fi
# Find out what minions are running. # Find out what minions are running.
local -a minions local -a minions
minions=( $(gcloud compute instances list \ minions=( $(gcloud compute instances list \