mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-22 03:11:40 +00:00
Merge pull request #28109 from jszczepkowski/influx-ps
Automatic merge from submit-queue Influxdb migrated to PetSet and PersistentVolumes. ```release-note Influxdb migrated to PetSet and PersistentVolumes. ``` []() Influxdb migrated to PetSet and PersistentVolumes.
This commit is contained in:
commit
d4e6064b70
@ -0,0 +1,15 @@
|
||||
kind: PersistentVolumeClaim
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: influxdb-claim
|
||||
namespace: kube-system
|
||||
labels:
|
||||
kubernetes.io/cluster-service: "true"
|
||||
spec:
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
resources:
|
||||
requests:
|
||||
storage: 10Gi
|
||||
volumeName: influxdb-pv
|
||||
|
@ -1,5 +1,5 @@
|
||||
apiVersion: v1
|
||||
kind: ReplicationController
|
||||
apiVersion: apps/v1alpha1
|
||||
kind: PetSet
|
||||
metadata:
|
||||
name: monitoring-influxdb-grafana-v3
|
||||
namespace: kube-system
|
||||
@ -9,9 +9,6 @@ metadata:
|
||||
kubernetes.io/cluster-service: "true"
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
k8s-app: influxGrafana
|
||||
version: v3
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
@ -68,7 +65,8 @@ spec:
|
||||
mountPath: /var
|
||||
volumes:
|
||||
- name: influxdb-persistent-storage
|
||||
emptyDir: {}
|
||||
persistentVolumeClaim:
|
||||
claimName: influxdb-claim
|
||||
- name: grafana-persistent-storage
|
||||
emptyDir: {}
|
||||
|
||||
serviceName: monitoring-influxdb
|
20
cluster/addons/cluster-monitoring/influxdb/influxdb-pv.yaml
Normal file
20
cluster/addons/cluster-monitoring/influxdb/influxdb-pv.yaml
Normal file
@ -0,0 +1,20 @@
|
||||
{% set pd_prefix = pillar.get('master_name', '') -%}
|
||||
{% set pd_name = pd_prefix + '-influxdb-pd') -%}
|
||||
|
||||
kind: PersistentVolume
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: influxdb-pv
|
||||
namespace: kube-system
|
||||
labels:
|
||||
kubernetes.io/cluster-service: "true"
|
||||
spec:
|
||||
capacity:
|
||||
storage: 10Gi
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
- ReadOnlyMany
|
||||
gcePersistentDisk:
|
||||
pdName: {{ pd_name }}
|
||||
fsType: ext4
|
||||
persistentVolumeReclaimPolicy: Delete
|
@ -451,6 +451,7 @@ network_policy_provider: '$(echo "$NETWORK_POLICY_PROVIDER" | sed -e "s/'/''/g")
|
||||
enable_manifest_url: '$(echo "${ENABLE_MANIFEST_URL:-}" | sed -e "s/'/''/g")'
|
||||
manifest_url: '$(echo "${MANIFEST_URL:-}" | sed -e "s/'/''/g")'
|
||||
manifest_url_header: '$(echo "${MANIFEST_URL_HEADER:-}" | sed -e "s/'/''/g")'
|
||||
master_name: '$(echo "${MASTER_NAME:-}" | sed -e "s/'/''/g")'
|
||||
num_nodes: $(echo "${NUM_NODES:-}" | sed -e "s/'/''/g")
|
||||
e2e_storage_test_environment: '$(echo "$E2E_STORAGE_TEST_ENVIRONMENT" | sed -e "s/'/''/g")'
|
||||
kube_uid: '$(echo "${KUBE_UID}" | sed -e "s/'/''/g")'
|
||||
|
@ -1,74 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: ReplicationController
|
||||
metadata:
|
||||
name: monitoring-influxdb-grafana-v3
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: influxGrafana
|
||||
version: v3
|
||||
kubernetes.io/cluster-service: "true"
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
k8s-app: influxGrafana
|
||||
version: v3
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: influxGrafana
|
||||
version: v3
|
||||
kubernetes.io/cluster-service: "true"
|
||||
spec:
|
||||
containers:
|
||||
- image: gcr.io/google_containers/heapster_influxdb:v0.5
|
||||
name: influxdb
|
||||
resources:
|
||||
# keep request = limit to keep this container in guaranteed class
|
||||
limits:
|
||||
cpu: 100m
|
||||
memory: 500Mi
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 500Mi
|
||||
ports:
|
||||
- containerPort: 8083
|
||||
- containerPort: 8086
|
||||
volumeMounts:
|
||||
- name: influxdb-persistent-storage
|
||||
mountPath: /data
|
||||
- image: gcr.io/google_containers/heapster_grafana:v2.6.0-2
|
||||
name: grafana
|
||||
env:
|
||||
resources:
|
||||
# keep request = limit to keep this container in guaranteed class
|
||||
limits:
|
||||
cpu: 100m
|
||||
memory: 100Mi
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 100Mi
|
||||
env:
|
||||
# This variable is required to setup templates in Grafana.
|
||||
- name: INFLUXDB_SERVICE_URL
|
||||
value: http://monitoring-influxdb:8086
|
||||
# The following env variables are required to make Grafana accessible via
|
||||
# the kubernetes api-server proxy. On production clusters, we recommend
|
||||
# removing these env variables, setup auth for grafana, and expose the grafana
|
||||
# service using a LoadBalancer or a public IP.
|
||||
- name: GF_AUTH_BASIC_ENABLED
|
||||
value: "false"
|
||||
- name: GF_AUTH_ANONYMOUS_ENABLED
|
||||
value: "true"
|
||||
- name: GF_AUTH_ANONYMOUS_ORG_ROLE
|
||||
value: Admin
|
||||
- name: GF_SERVER_ROOT_URL
|
||||
value: /api/v1/proxy/namespaces/kube-system/services/monitoring-grafana/
|
||||
volumeMounts:
|
||||
- name: grafana-persistent-storage
|
||||
mountPath: /var
|
||||
volumes:
|
||||
- name: influxdb-persistent-storage
|
||||
emptyDir: {}
|
||||
- name: grafana-persistent-storage
|
||||
emptyDir: {}
|
||||
|
@ -821,6 +821,12 @@ function start-kube-addons {
|
||||
sed -i -e "s@{{ *nanny_memory *}}@${nanny_memory}@g" "${controller_yaml}"
|
||||
sed -i -e "s@{{ *metrics_cpu_per_node *}}@${metrics_cpu_per_node}@g" "${controller_yaml}"
|
||||
fi
|
||||
if [[ "${ENABLE_CLUSTER_MONITORING:-}" == "influxdb" ]]; then
|
||||
pv_yaml="${dst_dir}/${file_dir}/influxdb-pv.yaml"
|
||||
pd_name="${INSTANCE_PREFIX}-influxdb-pd"
|
||||
remove-salt-config-comments "${pv_yaml}"
|
||||
sed -i -e "s@{{ *pd_name *}}@${pd_name}@g" "${pv_yaml}"
|
||||
fi
|
||||
if [[ "${ENABLE_CLUSTER_DNS:-}" == "true" ]]; then
|
||||
setup-addon-manifests "addons" "dns"
|
||||
local -r dns_rc_file="${dst_dir}/dns/skydns-rc.yaml"
|
||||
|
@ -693,6 +693,14 @@ function create-master() {
|
||||
--size "${CLUSTER_REGISTRY_DISK_SIZE}" &
|
||||
fi
|
||||
|
||||
# Create disk for influxdb if enabled
|
||||
if [[ "${ENABLE_CLUSTER_MONITORING:-}" == "influxdb" ]]; then
|
||||
gcloud compute disks create "${INSTANCE_PREFIX}-influxdb-pd" \
|
||||
--project "${PROJECT}" \
|
||||
--zone "${ZONE}" \
|
||||
--size "10GiB" &
|
||||
fi
|
||||
|
||||
# Generate a bearer token for this cluster. We push this separately
|
||||
# from the other cluster variables so that the client (this
|
||||
# computer) can forget it later. This should disappear with
|
||||
@ -1076,6 +1084,15 @@ function kube-down {
|
||||
"${MASTER_NAME}-ip"
|
||||
fi
|
||||
|
||||
# Delete persistent disk for influx-db.
|
||||
if gcloud compute disks describe "${INSTANCE_PREFIX}"-influxdb-pd --zone "${ZONE}" --project "${PROJECT}" &>/dev/null; then
|
||||
gcloud compute disks delete \
|
||||
--project "${PROJECT}" \
|
||||
--quiet \
|
||||
--zone "${ZONE}" \
|
||||
"${INSTANCE_PREFIX}"-influxdb-pd
|
||||
fi
|
||||
|
||||
export CONTEXT="${PROJECT}_${INSTANCE_PREFIX}"
|
||||
clear-kubeconfig
|
||||
set -e
|
||||
@ -1132,6 +1149,11 @@ function check-resources {
|
||||
return 1
|
||||
fi
|
||||
|
||||
if gcloud compute disks describe --project "${PROJECT}" "${INSTANCE_PREFIX}-influxdb-pd" --zone "${ZONE}" &>/dev/null; then
|
||||
KUBE_RESOURCE_FOUND="Persistent disk ${INSTANCE_PREFIX}-influxdb-pd"
|
||||
return 1
|
||||
fi
|
||||
|
||||
# Find out what minions are running.
|
||||
local -a minions
|
||||
minions=( $(gcloud compute instances list \
|
||||
|
@ -10,7 +10,7 @@ spec:
|
||||
hostNetwork: true
|
||||
containers:
|
||||
- name: kube-addon-manager
|
||||
image: gcr.io/google-containers/kube-addon-manager:v4
|
||||
image: gcr.io/google-containers/kube-addon-manager:v5
|
||||
resources:
|
||||
requests:
|
||||
cpu: 5m
|
||||
|
@ -110,7 +110,11 @@ func verifyExpectedRcsExistAndGetExpectedPods(c *client.Client) ([]string, error
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if (len(rcList.Items) + len(deploymentList.Items)) != 1 {
|
||||
psList, err := c.Apps().PetSets(api.NamespaceSystem).List(options)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if (len(rcList.Items) + len(deploymentList.Items) + len(psList.Items)) != 1 {
|
||||
return nil, fmt.Errorf("expected to find one replica for RC or deployment with label %s but got %d",
|
||||
rcLabel, len(rcList.Items))
|
||||
}
|
||||
@ -144,6 +148,21 @@ func verifyExpectedRcsExistAndGetExpectedPods(c *client.Client) ([]string, error
|
||||
expectedPods = append(expectedPods, string(pod.UID))
|
||||
}
|
||||
}
|
||||
// And for pet sets.
|
||||
for _, ps := range psList.Items {
|
||||
selector := labels.Set(ps.Spec.Selector.MatchLabels).AsSelector()
|
||||
options := api.ListOptions{LabelSelector: selector}
|
||||
podList, err := c.Pods(api.NamespaceSystem).List(options)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, pod := range podList.Items {
|
||||
if pod.DeletionTimestamp != nil {
|
||||
continue
|
||||
}
|
||||
expectedPods = append(expectedPods, string(pod.UID))
|
||||
}
|
||||
}
|
||||
}
|
||||
return expectedPods, nil
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user