From d00cdf75e879432a93a58e6f92efe7f5bf096da2 Mon Sep 17 00:00:00 2001 From: Jerzy Szczepkowski Date: Tue, 21 Jun 2016 09:43:36 +0200 Subject: [PATCH] Influxdb migrated to PetSet and PersistentVolumes. Influxdb migrated to PetSet and PersistentVolumes. --- .../influxdb/influxdb-claim.yaml | 15 ++++ ...ller.yaml => influxdb-grafana-petset.yaml} | 12 ++- .../influxdb/influxdb-pv.yaml | 20 +++++ cluster/gce/configure-vm.sh | 1 + .../influxdb/influxdb-grafana-controller.yaml | 74 ------------------- cluster/gce/gci/configure-helper.sh | 6 ++ cluster/gce/util.sh | 22 ++++++ .../salt/kube-addons/kube-addon-manager.yaml | 2 +- test/e2e/monitoring.go | 21 +++++- 9 files changed, 90 insertions(+), 83 deletions(-) create mode 100644 cluster/addons/cluster-monitoring/influxdb/influxdb-claim.yaml rename cluster/addons/cluster-monitoring/influxdb/{influxdb-grafana-controller.yaml => influxdb-grafana-petset.yaml} (94%) create mode 100644 cluster/addons/cluster-monitoring/influxdb/influxdb-pv.yaml delete mode 100644 cluster/gce/coreos/kube-manifests/addons/cluster-monitoring/influxdb/influxdb-grafana-controller.yaml diff --git a/cluster/addons/cluster-monitoring/influxdb/influxdb-claim.yaml b/cluster/addons/cluster-monitoring/influxdb/influxdb-claim.yaml new file mode 100644 index 00000000000..d58bca264ca --- /dev/null +++ b/cluster/addons/cluster-monitoring/influxdb/influxdb-claim.yaml @@ -0,0 +1,15 @@ +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: influxdb-claim + namespace: kube-system + labels: + kubernetes.io/cluster-service: "true" +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 10Gi + volumeName: influxdb-pv + diff --git a/cluster/addons/cluster-monitoring/influxdb/influxdb-grafana-controller.yaml b/cluster/addons/cluster-monitoring/influxdb/influxdb-grafana-petset.yaml similarity index 94% rename from cluster/addons/cluster-monitoring/influxdb/influxdb-grafana-controller.yaml rename to cluster/addons/cluster-monitoring/influxdb/influxdb-grafana-petset.yaml index e6249666b76..e9048c2611d 100644 --- a/cluster/addons/cluster-monitoring/influxdb/influxdb-grafana-controller.yaml +++ b/cluster/addons/cluster-monitoring/influxdb/influxdb-grafana-petset.yaml @@ -1,5 +1,5 @@ -apiVersion: v1 -kind: ReplicationController +apiVersion: apps/v1alpha1 +kind: PetSet metadata: name: monitoring-influxdb-grafana-v3 namespace: kube-system @@ -9,9 +9,6 @@ metadata: kubernetes.io/cluster-service: "true" spec: replicas: 1 - selector: - k8s-app: influxGrafana - version: v3 template: metadata: labels: @@ -68,7 +65,8 @@ spec: mountPath: /var volumes: - name: influxdb-persistent-storage - emptyDir: {} + persistentVolumeClaim: + claimName: influxdb-claim - name: grafana-persistent-storage emptyDir: {} - + serviceName: monitoring-influxdb diff --git a/cluster/addons/cluster-monitoring/influxdb/influxdb-pv.yaml b/cluster/addons/cluster-monitoring/influxdb/influxdb-pv.yaml new file mode 100644 index 00000000000..76d091cdd63 --- /dev/null +++ b/cluster/addons/cluster-monitoring/influxdb/influxdb-pv.yaml @@ -0,0 +1,20 @@ +{% set pd_prefix = pillar.get('master_name', '') -%} +{% set pd_name = pd_prefix + '-influxdb-pd') -%} + +kind: PersistentVolume +apiVersion: v1 +metadata: + name: influxdb-pv + namespace: kube-system + labels: + kubernetes.io/cluster-service: "true" +spec: + capacity: + storage: 10Gi + accessModes: + - ReadWriteOnce + - ReadOnlyMany + gcePersistentDisk: + pdName: {{ pd_name }} + fsType: ext4 + persistentVolumeReclaimPolicy: Delete diff --git a/cluster/gce/configure-vm.sh b/cluster/gce/configure-vm.sh index 61a0d261fbf..255d7eab401 100755 --- a/cluster/gce/configure-vm.sh +++ b/cluster/gce/configure-vm.sh @@ -451,6 +451,7 @@ network_policy_provider: '$(echo "$NETWORK_POLICY_PROVIDER" | sed -e "s/'/''/g") enable_manifest_url: '$(echo "${ENABLE_MANIFEST_URL:-}" | sed -e "s/'/''/g")' manifest_url: '$(echo "${MANIFEST_URL:-}" | sed -e "s/'/''/g")' manifest_url_header: '$(echo "${MANIFEST_URL_HEADER:-}" | sed -e "s/'/''/g")' +master_name: '$(echo "${MASTER_NAME:-}" | sed -e "s/'/''/g")' num_nodes: $(echo "${NUM_NODES:-}" | sed -e "s/'/''/g") e2e_storage_test_environment: '$(echo "$E2E_STORAGE_TEST_ENVIRONMENT" | sed -e "s/'/''/g")' kube_uid: '$(echo "${KUBE_UID}" | sed -e "s/'/''/g")' diff --git a/cluster/gce/coreos/kube-manifests/addons/cluster-monitoring/influxdb/influxdb-grafana-controller.yaml b/cluster/gce/coreos/kube-manifests/addons/cluster-monitoring/influxdb/influxdb-grafana-controller.yaml deleted file mode 100644 index e6249666b76..00000000000 --- a/cluster/gce/coreos/kube-manifests/addons/cluster-monitoring/influxdb/influxdb-grafana-controller.yaml +++ /dev/null @@ -1,74 +0,0 @@ -apiVersion: v1 -kind: ReplicationController -metadata: - name: monitoring-influxdb-grafana-v3 - namespace: kube-system - labels: - k8s-app: influxGrafana - version: v3 - kubernetes.io/cluster-service: "true" -spec: - replicas: 1 - selector: - k8s-app: influxGrafana - version: v3 - template: - metadata: - labels: - k8s-app: influxGrafana - version: v3 - kubernetes.io/cluster-service: "true" - spec: - containers: - - image: gcr.io/google_containers/heapster_influxdb:v0.5 - name: influxdb - resources: - # keep request = limit to keep this container in guaranteed class - limits: - cpu: 100m - memory: 500Mi - requests: - cpu: 100m - memory: 500Mi - ports: - - containerPort: 8083 - - containerPort: 8086 - volumeMounts: - - name: influxdb-persistent-storage - mountPath: /data - - image: gcr.io/google_containers/heapster_grafana:v2.6.0-2 - name: grafana - env: - resources: - # keep request = limit to keep this container in guaranteed class - limits: - cpu: 100m - memory: 100Mi - requests: - cpu: 100m - memory: 100Mi - env: - # This variable is required to setup templates in Grafana. - - name: INFLUXDB_SERVICE_URL - value: http://monitoring-influxdb:8086 - # The following env variables are required to make Grafana accessible via - # the kubernetes api-server proxy. On production clusters, we recommend - # removing these env variables, setup auth for grafana, and expose the grafana - # service using a LoadBalancer or a public IP. - - name: GF_AUTH_BASIC_ENABLED - value: "false" - - name: GF_AUTH_ANONYMOUS_ENABLED - value: "true" - - name: GF_AUTH_ANONYMOUS_ORG_ROLE - value: Admin - - name: GF_SERVER_ROOT_URL - value: /api/v1/proxy/namespaces/kube-system/services/monitoring-grafana/ - volumeMounts: - - name: grafana-persistent-storage - mountPath: /var - volumes: - - name: influxdb-persistent-storage - emptyDir: {} - - name: grafana-persistent-storage - emptyDir: {} - diff --git a/cluster/gce/gci/configure-helper.sh b/cluster/gce/gci/configure-helper.sh index 83b6957574e..a4a26311bfa 100644 --- a/cluster/gce/gci/configure-helper.sh +++ b/cluster/gce/gci/configure-helper.sh @@ -821,6 +821,12 @@ function start-kube-addons { sed -i -e "s@{{ *nanny_memory *}}@${nanny_memory}@g" "${controller_yaml}" sed -i -e "s@{{ *metrics_cpu_per_node *}}@${metrics_cpu_per_node}@g" "${controller_yaml}" fi + if [[ "${ENABLE_CLUSTER_MONITORING:-}" == "influxdb" ]]; then + pv_yaml="${dst_dir}/${file_dir}/influxdb-pv.yaml" + pd_name="${INSTANCE_PREFIX}-influxdb-pd" + remove-salt-config-comments "${pv_yaml}" + sed -i -e "s@{{ *pd_name *}}@${pd_name}@g" "${pv_yaml}" + fi if [[ "${ENABLE_CLUSTER_DNS:-}" == "true" ]]; then setup-addon-manifests "addons" "dns" local -r dns_rc_file="${dst_dir}/dns/skydns-rc.yaml" diff --git a/cluster/gce/util.sh b/cluster/gce/util.sh index 121920bbe27..d8ff03a769c 100755 --- a/cluster/gce/util.sh +++ b/cluster/gce/util.sh @@ -693,6 +693,14 @@ function create-master() { --size "${CLUSTER_REGISTRY_DISK_SIZE}" & fi + # Create disk for influxdb if enabled + if [[ "${ENABLE_CLUSTER_MONITORING:-}" == "influxdb" ]]; then + gcloud compute disks create "${INSTANCE_PREFIX}-influxdb-pd" \ + --project "${PROJECT}" \ + --zone "${ZONE}" \ + --size "10GiB" & + fi + # Generate a bearer token for this cluster. We push this separately # from the other cluster variables so that the client (this # computer) can forget it later. This should disappear with @@ -1076,6 +1084,15 @@ function kube-down { "${MASTER_NAME}-ip" fi + # Delete persistent disk for influx-db. + if gcloud compute disks describe "${INSTANCE_PREFIX}"-influxdb-pd --zone "${ZONE}" --project "${PROJECT}" &>/dev/null; then + gcloud compute disks delete \ + --project "${PROJECT}" \ + --quiet \ + --zone "${ZONE}" \ + "${INSTANCE_PREFIX}"-influxdb-pd + fi + export CONTEXT="${PROJECT}_${INSTANCE_PREFIX}" clear-kubeconfig set -e @@ -1132,6 +1149,11 @@ function check-resources { return 1 fi + if gcloud compute disks describe --project "${PROJECT}" "${INSTANCE_PREFIX}-influxdb-pd" --zone "${ZONE}" &>/dev/null; then + KUBE_RESOURCE_FOUND="Persistent disk ${INSTANCE_PREFIX}-influxdb-pd" + return 1 + fi + # Find out what minions are running. local -a minions minions=( $(gcloud compute instances list \ diff --git a/cluster/saltbase/salt/kube-addons/kube-addon-manager.yaml b/cluster/saltbase/salt/kube-addons/kube-addon-manager.yaml index 8057314f80a..426034f0a17 100644 --- a/cluster/saltbase/salt/kube-addons/kube-addon-manager.yaml +++ b/cluster/saltbase/salt/kube-addons/kube-addon-manager.yaml @@ -10,7 +10,7 @@ spec: hostNetwork: true containers: - name: kube-addon-manager - image: gcr.io/google-containers/kube-addon-manager:v4 + image: gcr.io/google-containers/kube-addon-manager:v5 resources: requests: cpu: 5m diff --git a/test/e2e/monitoring.go b/test/e2e/monitoring.go index a4d591064ea..137c5b0f278 100644 --- a/test/e2e/monitoring.go +++ b/test/e2e/monitoring.go @@ -110,7 +110,11 @@ func verifyExpectedRcsExistAndGetExpectedPods(c *client.Client) ([]string, error if err != nil { return nil, err } - if (len(rcList.Items) + len(deploymentList.Items)) != 1 { + psList, err := c.Apps().PetSets(api.NamespaceSystem).List(options) + if err != nil { + return nil, err + } + if (len(rcList.Items) + len(deploymentList.Items) + len(psList.Items)) != 1 { return nil, fmt.Errorf("expected to find one replica for RC or deployment with label %s but got %d", rcLabel, len(rcList.Items)) } @@ -144,6 +148,21 @@ func verifyExpectedRcsExistAndGetExpectedPods(c *client.Client) ([]string, error expectedPods = append(expectedPods, string(pod.UID)) } } + // And for pet sets. + for _, ps := range psList.Items { + selector := labels.Set(ps.Spec.Selector.MatchLabels).AsSelector() + options := api.ListOptions{LabelSelector: selector} + podList, err := c.Pods(api.NamespaceSystem).List(options) + if err != nil { + return nil, err + } + for _, pod := range podList.Items { + if pod.DeletionTimestamp != nil { + continue + } + expectedPods = append(expectedPods, string(pod.UID)) + } + } } return expectedPods, nil }