mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-23 19:56:01 +00:00
Merge pull request #29943 from kubernetes/revert-28840-influx-ps
Revert "Modified influxdb petset to provision persistent volume."
This commit is contained in:
commit
d2c17c7cfa
@ -0,0 +1,15 @@
|
||||
kind: PersistentVolumeClaim
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: influxdb-claim
|
||||
namespace: kube-system
|
||||
labels:
|
||||
kubernetes.io/cluster-service: "true"
|
||||
spec:
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
resources:
|
||||
requests:
|
||||
storage: 10Gi
|
||||
volumeName: influxdb-pv
|
||||
|
@ -3,12 +3,11 @@ kind: PetSet
|
||||
metadata:
|
||||
name: monitoring-influxdb-grafana-v3
|
||||
namespace: kube-system
|
||||
labels:
|
||||
labels:
|
||||
k8s-app: influxGrafana
|
||||
version: v3
|
||||
kubernetes.io/cluster-service: "true"
|
||||
spec:
|
||||
serviceName: monitoring-influxdb
|
||||
replicas: 1
|
||||
template:
|
||||
metadata:
|
||||
@ -32,7 +31,7 @@ spec:
|
||||
- containerPort: 8083
|
||||
- containerPort: 8086
|
||||
volumeMounts:
|
||||
- name: influxdb-ps
|
||||
- name: influxdb-persistent-storage
|
||||
mountPath: /data
|
||||
- image: gcr.io/google_containers/heapster_grafana:v2.6.0-2
|
||||
name: grafana
|
||||
@ -59,25 +58,15 @@ spec:
|
||||
value: "true"
|
||||
- name: GF_AUTH_ANONYMOUS_ORG_ROLE
|
||||
value: Admin
|
||||
# This is kubernetes specific endpoint where the service may be reached.
|
||||
# It is embeded in "kube-system" namespace.
|
||||
- name: GF_SERVER_ROOT_URL
|
||||
value: /api/v1/proxy/namespaces/kube-system/services/monitoring-grafana/
|
||||
volumeMounts:
|
||||
- name: grafana-persistent-storage
|
||||
mountPath: /var
|
||||
volumes:
|
||||
- name: influxdb-persistent-storage
|
||||
persistentVolumeClaim:
|
||||
claimName: influxdb-claim
|
||||
- name: grafana-persistent-storage
|
||||
emptyDir: {}
|
||||
volumeClaimTemplates:
|
||||
- metadata:
|
||||
name: influxdb-ps
|
||||
annotations:
|
||||
volume.alpha.kubernetes.io/storage-class: anything
|
||||
spec:
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
resources:
|
||||
requests:
|
||||
storage: 10Gi
|
||||
|
||||
serviceName: monitoring-influxdb
|
||||
|
19
cluster/addons/cluster-monitoring/influxdb/influxdb-pv.yaml
Normal file
19
cluster/addons/cluster-monitoring/influxdb/influxdb-pv.yaml
Normal file
@ -0,0 +1,19 @@
|
||||
{% set pd_prefix = pillar.get('master_name', '') -%}
|
||||
{% set pd_name = pd_prefix + '-influxdb-pd' -%}
|
||||
|
||||
kind: PersistentVolume
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: influxdb-pv
|
||||
labels:
|
||||
kubernetes.io/cluster-service: "true"
|
||||
spec:
|
||||
capacity:
|
||||
storage: 10Gi
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
- ReadOnlyMany
|
||||
gcePersistentDisk:
|
||||
pdName: {{ pd_name }}
|
||||
fsType: ext4
|
||||
persistentVolumeReclaimPolicy: Delete
|
@ -865,6 +865,12 @@ function start-kube-addons {
|
||||
sed -i -e "s@{{ *nanny_memory *}}@${nanny_memory}@g" "${controller_yaml}"
|
||||
sed -i -e "s@{{ *metrics_cpu_per_node *}}@${metrics_cpu_per_node}@g" "${controller_yaml}"
|
||||
fi
|
||||
if [[ "${ENABLE_CLUSTER_MONITORING:-}" == "influxdb" ]]; then
|
||||
pv_yaml="${dst_dir}/${file_dir}/influxdb-pv.yaml"
|
||||
pd_name="${INSTANCE_PREFIX}-influxdb-pd"
|
||||
remove-salt-config-comments "${pv_yaml}"
|
||||
sed -i -e "s@{{ *pd_name *}}@${pd_name}@g" "${pv_yaml}"
|
||||
fi
|
||||
if [[ "${ENABLE_CLUSTER_DNS:-}" == "true" ]]; then
|
||||
setup-addon-manifests "addons" "dns"
|
||||
local -r dns_rc_file="${dst_dir}/dns/skydns-rc.yaml"
|
||||
|
@ -712,6 +712,14 @@ function create-master() {
|
||||
--size "${CLUSTER_REGISTRY_DISK_SIZE}" &
|
||||
fi
|
||||
|
||||
# Create disk for influxdb if enabled
|
||||
if [[ "${ENABLE_CLUSTER_MONITORING:-}" == "influxdb" ]]; then
|
||||
gcloud compute disks create "${INSTANCE_PREFIX}-influxdb-pd" \
|
||||
--project "${PROJECT}" \
|
||||
--zone "${ZONE}" \
|
||||
--size "10GiB" &
|
||||
fi
|
||||
|
||||
# Generate a bearer token for this cluster. We push this separately
|
||||
# from the other cluster variables so that the client (this
|
||||
# computer) can forget it later. This should disappear with
|
||||
@ -1190,6 +1198,15 @@ function kube-down {
|
||||
routes=( "${routes[@]:${batch}}" )
|
||||
done
|
||||
|
||||
# Delete persistent disk for influx-db.
|
||||
if gcloud compute disks describe "${INSTANCE_PREFIX}"-influxdb-pd --zone "${ZONE}" --project "${PROJECT}" &>/dev/null; then
|
||||
gcloud compute disks delete \
|
||||
--project "${PROJECT}" \
|
||||
--quiet \
|
||||
--zone "${ZONE}" \
|
||||
"${INSTANCE_PREFIX}"-influxdb-pd
|
||||
fi
|
||||
|
||||
# If there are no more remaining master replicas, we should update kubeconfig.
|
||||
if [[ "${REMAINING_MASTER_COUNT}" == "0" ]]; then
|
||||
export CONTEXT="${PROJECT}_${INSTANCE_PREFIX}"
|
||||
@ -1250,6 +1267,11 @@ function check-resources {
|
||||
return 1
|
||||
fi
|
||||
|
||||
if gcloud compute disks describe --project "${PROJECT}" "${INSTANCE_PREFIX}-influxdb-pd" --zone "${ZONE}" &>/dev/null; then
|
||||
KUBE_RESOURCE_FOUND="Persistent disk ${INSTANCE_PREFIX}-influxdb-pd"
|
||||
return 1
|
||||
fi
|
||||
|
||||
# Find out what minions are running.
|
||||
local -a minions
|
||||
minions=( $(gcloud compute instances list \
|
||||
|
Loading…
Reference in New Issue
Block a user