Merge pull request #8469 from satnam6502/heapster

Add explicit version to name of monitoring RCs
This commit is contained in:
Quinton Hoole 2015-06-04 10:42:54 -07:00
commit a39a6013cb
6 changed files with 54 additions and 40 deletions

View File

@ -1,18 +1,22 @@
apiVersion: v1beta3
kind: ReplicationController
metadata:
name: monitoring-heapster-v1
namespace: default
labels:
name: heapster
k8s-app: heapster
version: v1
kubernetes.io/cluster-service: "true"
name: monitoring-heapster-controller
spec:
replicas: 1
selector:
name: heapster
k8s-app: heapster
version: v1
template:
metadata:
labels:
name: heapster
k8s-app: heapster
version: v1
kubernetes.io/cluster-service: "true"
spec:
containers:

View File

@ -1,14 +1,15 @@
apiVersion: v1beta3
kind: Service
metadata:
metadata:
name: monitoring-grafana
namespace: default
labels:
kubernetes.io/cluster-service: "true"
kubernetes.io/name: "Grafana"
name: monitoring-grafana
spec:
ports:
- port: 80
targetPort: 8080
selector:
name: influxGrafana
k8s-app: influxGrafana

View File

@ -1,18 +1,22 @@
apiVersion: v1beta3
kind: ReplicationController
metadata:
metadata:
name: monitoring-heapster-v1
namespace: default
labels:
name: heapster
k8s-app: heapster
version: v1
kubernetes.io/cluster-service: "true"
name: monitoring-heapster-controller
spec:
replicas: 1
selector:
name: heapster
k8s-app: heapster
version: v1
template:
metadata:
labels:
name: heapster
k8s-app: heapster
version: v1
kubernetes.io/cluster-service: "true"
spec:
containers:

View File

@ -1,18 +1,22 @@
apiVersion: v1beta3
kind: ReplicationController
metadata:
metadata:
name: monitoring-influx-grafana-v1
namespace: default
labels:
name: influxGrafana
k8s-app: influxGrafana
version: v1
kubernetes.io/cluster-service: "true"
name: monitoring-influx-grafana-controller
spec:
replicas: 1
selector:
name: influxGrafana
k8s-app: influxGrafana
version: v1
template:
metadata:
labels:
name: influxGrafana
k8s-app: influxGrafana
version: v1
kubernetes.io/cluster-service: "true"
spec:
containers:

View File

@ -1,9 +1,10 @@
apiVersion: v1beta3
kind: Service
metadata:
labels:
name: influxGrafana
metadata:
name: monitoring-influxdb
namespace: default
labels:
k8s-app: influxGrafana
spec:
ports:
- name: http
@ -13,5 +14,5 @@ spec:
port: 8086
targetPort: 8086
selector:
name: influxGrafana
k8s-app: influxGrafana

View File

@ -63,10 +63,7 @@ const (
)
var (
expectedRcs = map[string]bool{
"monitoring-heapster-controller": false,
"monitoring-influx-grafana-controller": false,
}
rcLabels = []string{"heapster", "influxGrafana"}
expectedServices = map[string]bool{
influxdbService: false,
"monitoring-grafana": false,
@ -74,17 +71,25 @@ var (
)
func verifyExpectedRcsExistAndGetExpectedPods(c *client.Client) ([]string, error) {
rcList, err := c.ReplicationControllers(api.NamespaceDefault).List(labels.Everything())
if err != nil {
return nil, err
}
expectedPods := []string{}
for _, rc := range rcList.Items {
if _, ok := expectedRcs[rc.Name]; ok {
if rc.Status.Replicas != 1 {
return nil, fmt.Errorf("expected to find only one replica for rc %q, found %d", rc.Name, rc.Status.Replicas)
}
expectedRcs[rc.Name] = true
// Iterate over the labels that identify the replication controllers that we
// want to check. The rcLabels contains the value values for the k8s-app key
// that identify the replication controllers that we want to check. Using a label
// rather than an explicit name is preferred because the names will typically have
// a version suffix e.g. heapster-monitoring-v1 and this will change after a rolling
// update e.g. to heapster-monitoring-v2. By using a label query we can check for the
// situaiton when a heapster-monitoring-v1 and heapster-monitoring-v2 replication controller
// is running (which would be an error except during a rolling update).
for _, rcLabel := range rcLabels {
rcList, err := c.ReplicationControllers(api.NamespaceDefault).List(labels.Set{"k8s-app": rcLabel}.AsSelector())
if err != nil {
return nil, err
}
if len(rcList.Items) != 1 {
return nil, fmt.Errorf("expected to find one replicat for RC with label %s but got %d",
rcLabel, len(rcList.Items))
}
for _, rc := range rcList.Items {
podList, err := c.Pods(api.NamespaceDefault).List(labels.Set(rc.Spec.Selector).AsSelector(), fields.Everything())
if err != nil {
return nil, err
@ -94,11 +99,6 @@ func verifyExpectedRcsExistAndGetExpectedPods(c *client.Client) ([]string, error
}
}
}
for rc, found := range expectedRcs {
if !found {
return nil, fmt.Errorf("Replication Controller %q not found.", rc)
}
}
return expectedPods, nil
}