cluster/gce/coreos: Update addon manifests.

This commit is contained in:
Yifan Gu 2016-05-01 23:18:09 -07:00 committed by Yifan Gu
parent f3ab7b182f
commit 6b358d5c4d
18 changed files with 409 additions and 107 deletions

View File

@ -136,6 +136,10 @@ function configure-master-addons() {
evaluate-manifests-dir ${MANIFESTS_DIR}/addons/dashboard ${addon_dir}/dashboard
fi
if [[ "${ENABLE_CLUSTER_LOGGING}" == "true" ]]; then
evaluate-manifests-dir ${MANIFESTS_DIR}/addons/fluentd-elasticsearch ${addon_dir}/fluentd-elasticsearch
fi
if [[ "${ENABLE_CLUSTER_MONITORING}" == "influxdb" ]]; then
evaluate-manifests-dir ${MANIFESTS_DIR}/addons/cluster-monitoring/influxdb ${addon_dir}/cluster-monitoring/influxdb
elif [[ "${ENABLE_CLUSTER_MONITORING}" == "google" ]]; then

View File

@ -1,11 +1,11 @@
apiVersion: v1
kind: ReplicationController
metadata:
name: l7-lb-controller
name: l7-lb-controller-v0.6.0
namespace: kube-system
labels:
k8s-app: glbc
version: v0.5.1
version: v0.6.0
kubernetes.io/cluster-service: "true"
kubernetes.io/name: "GLBC"
spec:
@ -13,12 +13,12 @@ spec:
replicas: 1
selector:
k8s-app: glbc
version: v0.5.1
version: v0.6.0
template:
metadata:
labels:
k8s-app: glbc
version: v0.5.1
version: v0.6.0
name: glbc
kubernetes.io/cluster-service: "true"
spec:
@ -45,7 +45,7 @@ spec:
requests:
cpu: 10m
memory: 20Mi
- image: gcr.io/google_containers/glbc:0.5.1
- image: gcr.io/google_containers/glbc:0.6.0
livenessProbe:
httpGet:
path: /healthz
@ -63,8 +63,7 @@ spec:
requests:
cpu: 100m
memory: 50Mi
command:
- /glbc
args:
- --default-backend-service=kube-system/default-http-backend
- --sync-period=300s
- --sync-period=60s
- --cluster-uid=${KUBE_UID}

View File

@ -1,39 +1,40 @@
apiVersion: v1
kind: ReplicationController
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: heapster-v10
name: heapster-v1.1.0.beta1
namespace: kube-system
labels:
k8s-app: heapster
version: v10
kubernetes.io/cluster-service: "true"
version: v1.1.0.beta1
spec:
replicas: 1
selector:
k8s-app: heapster
version: v10
matchLabels:
k8s-app: heapster
version: v1.1.0.beta1
template:
metadata:
labels:
k8s-app: heapster
version: v10
kubernetes.io/cluster-service: "true"
version: v1.1.0.beta1
spec:
containers:
- image: gcr.io/google_containers/heapster:v0.18.2
- image: gcr.io/google_containers/heapster:v1.1.0-beta1
name: heapster
resources:
# keep request = limit to keep this container in guaranteed class
limits:
cpu: 100m
memory: 300Mi
requests:
cpu: 100m
memory: 300Mi
command:
- /heapster
- --source=kubernetes.summary_api:''
- --sink=gcm
- --sink=gcmautoscaling
- --sink=gcl
- --stats_resolution=30s
- --sink_frequency=1m
- --metric_resolution=60s
volumeMounts:
- name: ssl-certs
mountPath: /etc/ssl/certs
@ -41,10 +42,87 @@ spec:
- name: usrsharecacerts
mountPath: /usr/share/ca-certificates
readOnly: true
- image: gcr.io/google_containers/heapster:v1.1.0-beta1
name: eventer
resources:
# keep request = limit to keep this container in guaranteed class
limits:
cpu: 100m
memory: 300Mi
requests:
cpu: 100m
memory: 300Mi
command:
- /eventer
- --source=kubernetes:''
- --sink=gcl
volumeMounts:
- name: ssl-certs
mountPath: /etc/ssl/certs
readOnly: true
- name: usrsharecacerts
mountPath: /usr/share/ca-certificates
readOnly: true
- image: gcr.io/google_containers/addon-resizer:1.0
name: heapster-nanny
resources:
limits:
cpu: 50m
memory: 100Mi
requests:
cpu: 50m
memory: 100Mi
env:
- name: MY_POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: MY_POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
command:
- /pod_nanny
- --cpu=100m
- --extra-cpu=0m
- --memory=300Mi
- --extra-memory=4Mi
- --threshold=5
- --deployment=heapster-v1.1.0.beta1
- --container=heapster
- --poll-period=300000
- image: gcr.io/google_containers/addon-resizer:1.0
name: eventer-nanny
resources:
limits:
cpu: 50m
memory: 100Mi
requests:
cpu: 50m
memory: 100Mi
env:
- name: MY_POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: MY_POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
command:
- /pod_nanny
- --cpu=100m
- --extra-cpu=0m
- --memory=300Mi
- --extra-memory=307200Ki
- --threshold=5
- --deployment=heapster-v1.1.0.beta1
- --container=eventer
- --poll-period=300000
volumes:
- name: ssl-certs
hostPath:
path: /etc/ssl/certs
path: "/etc/ssl/certs"
- name: usrsharecacerts
hostPath:
path: /usr/share/ca-certificates
path: "/usr/share/ca-certificates"

View File

@ -1,39 +1,41 @@
apiVersion: v1
kind: ReplicationController
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: heapster-v10
name: heapster-v1.1.0.beta1
namespace: kube-system
labels:
k8s-app: heapster
version: v10
kubernetes.io/cluster-service: "true"
version: v1.1.0.beta1
spec:
replicas: 1
selector:
k8s-app: heapster
version: v10
matchLabels:
k8s-app: heapster
version: v1.1.0.beta1
template:
metadata:
labels:
k8s-app: heapster
version: v10
kubernetes.io/cluster-service: "true"
version: v1.1.0.beta1
spec:
containers:
- image: gcr.io/google_containers/heapster:v0.18.2
- image: gcr.io/google_containers/heapster:v1.1.0-beta1
name: heapster
resources:
# keep request = limit to keep this container in guaranteed class
limits:
cpu: 100m
memory: 300Mi
requests:
cpu: 100m
memory: 300Mi
command:
- /heapster
- --source=kubernetes.summary_api:''
- --sink=gcl
- --sink=gcmautoscaling
- --sink=influxdb:http://monitoring-influxdb:8086
- --stats_resolution=30s
- --sink_frequency=1m
- --sink=gcm:?metrics=autoscaling
- --metric_resolution=60s
volumeMounts:
- name: ssl-certs
mountPath: /etc/ssl/certs
@ -41,10 +43,87 @@ spec:
- name: usrsharecacerts
mountPath: /usr/share/ca-certificates
readOnly: true
- image: gcr.io/google_containers/heapster:v1.1.0-beta1
name: eventer
resources:
# keep request = limit to keep this container in guaranteed class
limits:
cpu: 100m
memory: 300Mi
requests:
cpu: 100m
memory: 300Mi
command:
- /eventer
- --source=kubernetes:''
- --sink=gcl
volumeMounts:
- name: ssl-certs
mountPath: /etc/ssl/certs
readOnly: true
- name: usrsharecacerts
mountPath: /usr/share/ca-certificates
readOnly: true
- image: gcr.io/google_containers/addon-resizer:1.0
name: heapster-nanny
resources:
limits:
cpu: 50m
memory: 100Mi
requests:
cpu: 50m
memory: 100Mi
env:
- name: MY_POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: MY_POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
command:
- /pod_nanny
- --cpu=100m
- --extra-cpu=0m
- --memory=300Mi
- --extra-memory=4Mi
- --threshold=5
- --deployment=heapster-v1.1.0.beta1
- --container=heapster
- --poll-period=300000
- image: gcr.io/google_containers/addon-resizer:1.0
name: eventer-nanny
resources:
limits:
cpu: 50m
memory: 100Mi
requests:
cpu: 50m
memory: 100Mi
env:
- name: MY_POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: MY_POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
command:
- /pod_nanny
- --cpu=100m
- --extra-cpu=0m
- --memory=300Mi
- --extra-memory=307200Ki
- --threshold=5
- --deployment=heapster-v1.1.0.beta1
- --container=eventer
- --poll-period=300000
volumes:
- name: ssl-certs
hostPath:
path: /etc/ssl/certs
path: "/etc/ssl/certs"
- name: usrsharecacerts
hostPath:
path: /usr/share/ca-certificates
path: "/usr/share/ca-certificates"

View File

@ -1,34 +1,107 @@
apiVersion: v1
kind: ReplicationController
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: heapster-v10
name: heapster-v1.1.0.beta1
namespace: kube-system
labels:
k8s-app: heapster
version: v10
kubernetes.io/cluster-service: "true"
version: v1.1.0.beta1
spec:
replicas: 1
selector:
k8s-app: heapster
version: v10
matchLabels:
k8s-app: heapster
version: v1.1.0.beta1
template:
metadata:
labels:
k8s-app: heapster
version: v10
kubernetes.io/cluster-service: "true"
version: v1.1.0.beta1
spec:
containers:
- image: gcr.io/google_containers/heapster:v0.18.2
- image: gcr.io/google_containers/heapster:v1.1.0-beta1
name: heapster
resources:
# keep request = limit to keep this container in guaranteed class
limits:
cpu: 100m
memory: 300Mi
requests:
cpu: 100m
memory: 300Mi
command:
- /heapster
- --source=kubernetes.summary_api:''
- --sink=influxdb:http://monitoring-influxdb:8086
- --stats_resolution=30s
- --sink_frequency=1m
- --metric_resolution=60s
- image: gcr.io/google_containers/heapster:v1.1.0-beta1
name: eventer
resources:
# keep request = limit to keep this container in guaranteed class
limits:
cpu: 100m
memory: 300Mi
requests:
cpu: 100m
memory: 300Mi
command:
- /eventer
- --source=kubernetes:''
- --sink=influxdb:http://monitoring-influxdb:8086
- image: gcr.io/google_containers/addon-resizer:1.0
name: heapster-nanny
resources:
limits:
cpu: 50m
memory: 100Mi
requests:
cpu: 50m
memory: 100Mi
env:
- name: MY_POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: MY_POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
command:
- /pod_nanny
- --cpu=100m
- --extra-cpu=0m
- --memory=300Mi
- --extra-memory=4Mi
- --threshold=5
- --deployment=heapster-v1.1.0.beta1
- --container=heapster
- --poll-period=300000
- image: gcr.io/google_containers/addon-resizer:1.0
name: eventer-nanny
resources:
limits:
cpu: 50m
memory: 100Mi
requests:
cpu: 50m
memory: 100Mi
env:
- name: MY_POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: MY_POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
command:
- /pod_nanny
- --cpu=100m
- --extra-cpu=0m
- --memory=300Mi
- --extra-memory=307200Ki
- --threshold=5
- --deployment=heapster-v1.1.0.beta1
- --container=eventer
- --poll-period=300000

View File

@ -1,44 +1,52 @@
apiVersion: v1
kind: ReplicationController
metadata:
name: monitoring-influxdb-grafana-v2
name: monitoring-influxdb-grafana-v3
namespace: kube-system
labels:
k8s-app: influxGrafana
version: v2
version: v3
kubernetes.io/cluster-service: "true"
spec:
replicas: 1
selector:
k8s-app: influxGrafana
version: v2
version: v3
template:
metadata:
labels:
k8s-app: influxGrafana
version: v2
version: v3
kubernetes.io/cluster-service: "true"
spec:
containers:
- image: gcr.io/google_containers/heapster_influxdb:v0.4
- image: gcr.io/google_containers/heapster_influxdb:v0.5
name: influxdb
resources:
# keep request = limit to keep this container in guaranteed class
limits:
cpu: 100m
memory: 500Mi
requests:
cpu: 100m
memory: 500Mi
ports:
- containerPort: 8083
- containerPort: 8086
volumeMounts:
- name: influxdb-persistent-storage
mountPath: /data
- image: beta.gcr.io/google_containers/heapster_grafana:v2.1.1
- image: gcr.io/google_containers/heapster_grafana:v2.6.0-2
name: grafana
env:
resources:
# keep request = limit to keep this container in guaranteed class
limits:
cpu: 100m
memory: 100Mi
requests:
cpu: 100m
memory: 100Mi
env:
# This variable is required to setup templates in Grafana.
- name: INFLUXDB_SERVICE_URL
@ -58,11 +66,8 @@ spec:
volumeMounts:
- name: grafana-persistent-storage
mountPath: /var
volumes:
- name: influxdb-persistent-storage
emptyDir: {}
- name: grafana-persistent-storage
emptyDir: {}

View File

@ -1,31 +1,64 @@
apiVersion: v1
kind: ReplicationController
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: heapster-v10
name: heapster-v1.1.0.beta1
namespace: kube-system
labels:
k8s-app: heapster
version: v10
kubernetes.io/cluster-service: "true"
version: v1.1.0.beta1
spec:
replicas: 1
selector:
k8s-app: heapster
version: v10
matchLabels:
k8s-app: heapster
version: v1.1.0.beta1
template:
metadata:
labels:
k8s-app: heapster
version: v10
kubernetes.io/cluster-service: "true"
version: v1.1.0.beta1
spec:
containers:
- image: gcr.io/google_containers/heapster:v0.18.2
- image: gcr.io/google_containers/heapster:v1.1.0-beta1
name: heapster
resources:
# keep request = limit to keep this container in guaranteed class
limits:
cpu: 100m
memory: 300Mi
requests:
cpu: 100m
memory: 300Mi
command:
- /heapster
- --source=kubernetes.summary_api:''
- --metric_resolution=60s
- image: gcr.io/google_containers/addon-resizer:1.0
name: heapster-nanny
resources:
limits:
cpu: 50m
memory: 100Mi
requests:
cpu: 50m
memory: 100Mi
env:
- name: MY_POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: MY_POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
command:
- /pod_nanny
- --cpu=100m
- --extra-cpu=0m
- --memory=300Mi
- --extra-memory=4Mi
- --threshold=5
- --deployment=heapster-v1.1.0.beta1
- --container=heapster
- --poll-period=300000

View File

@ -1,7 +1,8 @@
apiVersion: v1
kind: ReplicationController
metadata:
# Keep this file in sync with addons/dashboard/dashboard-controller.yaml
# Keep the name in sync with image version and
# gce/coreos/kube-manifests/addons/dashboard counterparts
name: kubernetes-dashboard-v1.0.1
namespace: kube-system
labels:
@ -37,4 +38,4 @@ spec:
path: /
port: 9090
initialDelaySeconds: 30
timeoutSeconds: 30
timeoutSeconds: 30

View File

@ -1,32 +1,35 @@
apiVersion: v1
kind: ReplicationController
metadata:
name: kube-dns-v10
name: kube-dns-v11
namespace: kube-system
labels:
k8s-app: kube-dns
version: v10
version: v11
kubernetes.io/cluster-service: "true"
spec:
replicas: ${DNS_REPLICAS}
selector:
k8s-app: kube-dns
version: v10
version: v11
template:
metadata:
labels:
k8s-app: kube-dns
version: v10
version: v11
kubernetes.io/cluster-service: "true"
spec:
containers:
- name: etcd
image: gcr.io/google_containers/etcd:2.0.9
image: gcr.io/google_containers/etcd-amd64:2.2.1
resources:
# keep request = limit to keep this container in guaranteed class
# TODO: Set memory limits when we've profiled the container for large
# clusters, then set request = limit to keep this container in
# guaranteed class. Currently, this container falls into the
# "burstable" category so the kubelet doesn't backoff from restarting it.
limits:
cpu: 100m
memory: 50Mi
memory: 500Mi
requests:
cpu: 100m
memory: 50Mi
@ -44,32 +47,59 @@ spec:
- name: etcd-storage
mountPath: /var/etcd/data
- name: kube2sky
image: gcr.io/google_containers/kube2sky:1.12
image: gcr.io/google_containers/kube2sky:1.15
resources:
# keep request = limit to keep this container in guaranteed class
# TODO: Set memory limits when we've profiled the container for large
# clusters, then set request = limit to keep this container in
# guaranteed class. Currently, this container falls into the
# "burstable" category so the kubelet doesn't backoff from restarting it.
limits:
cpu: 100m
memory: 50Mi
# Kube2sky watches all pods.
memory: 200Mi
requests:
cpu: 100m
memory: 50Mi
livenessProbe:
httpGet:
path: /healthz
port: 8080
scheme: HTTP
initialDelaySeconds: 60
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 5
readinessProbe:
httpGet:
path: /readiness
port: 8081
scheme: HTTP
# we poll on pod startup for the Kubernetes master service and
# only setup the /readiness HTTP server once that's available.
initialDelaySeconds: 30
timeoutSeconds: 5
command:
- /kube2sky
args:
- -domain=${DNS_DOMAIN}
# command = "/kube2sky"
- --domain=${DNS_DOMAIN}
- name: skydns
image: gcr.io/google_containers/skydns:2015-10-13-8c72f8c
resources:
# keep request = limit to keep this container in guaranteed class
# TODO: Set memory limits when we've profiled the container for large
# clusters, then set request = limit to keep this container in
# guaranteed class. Currently, this container falls into the
# "burstable" category so the kubelet doesn't backoff from restarting it.
limits:
cpu: 100m
memory: 50Mi
memory: 200Mi
requests:
cpu: 100m
memory: 50Mi
command:
- /skydns
args:
# command = "/skydns"
- -machines=http://127.0.0.1:4001
- -addr=0.0.0.0:53
- -ns-rotate=false
@ -81,20 +111,6 @@ spec:
- containerPort: 53
name: dns-tcp
protocol: TCP
livenessProbe:
httpGet:
path: /healthz
port: 8080
scheme: HTTP
initialDelaySeconds: 30
timeoutSeconds: 5
readinessProbe:
httpGet:
path: /healthz
port: 8080
scheme: HTTP
initialDelaySeconds: 1
timeoutSeconds: 5
- name: healthz
image: gcr.io/google_containers/exechealthz:1.0
resources:

View File

@ -20,11 +20,14 @@ spec:
kubernetes.io/cluster-service: "true"
spec:
containers:
- image: gcr.io/google_containers/elasticsearch:1.7
- image: gcr.io/google_containers/elasticsearch:1.8
name: elasticsearch-logging
resources:
# keep request = limit to keep this container in guaranteed class
limits:
cpu: 100m
requests:
cpu: 100m
ports:
- containerPort: 9200
name: db
@ -37,4 +40,4 @@ spec:
mountPath: /data
volumes:
- name: es-persistent-storage
emptyDir: {}
emptyDir: {}

View File

@ -23,12 +23,15 @@ spec:
- name: kibana-logging
image: gcr.io/google_containers/kibana:1.3
resources:
# keep request = limit to keep this container in guaranteed class
limits:
cpu: 100m
requests:
cpu: 100m
env:
- name: "ELASTICSEARCH_URL"
value: "http://elasticsearch-logging:9200"
ports:
- containerPort: 5601
name: ui
protocol: TCP
protocol: TCP

View File

@ -1,7 +1,7 @@
apiVersion: v1
kind: Pod
metadata:
name: etcd-server-events-kubernetes-master
name: etcd-server-events
namespace: kube-system
spec:
containers:
@ -46,7 +46,7 @@ spec:
name: varlogetcd
dnsPolicy: ClusterFirst
hostNetwork: true
nodeName: kubernetes-master
nodeName: ${INSTANCE_PREFIX}-master
restartPolicy: Always
terminationGracePeriodSeconds: 30
volumes:

View File

@ -1,7 +1,7 @@
apiVersion: v1
kind: Pod
metadata:
name: etcd-server-kubernetes-master
name: etcd-server
namespace: kube-system
spec:
containers:
@ -14,7 +14,7 @@ spec:
--bind-addr=127.0.0.1:4001
--data-dir=/var/etcd/data
1>>/var/log/etcd.log 2>&1
image: gcr.io/google_containers/etcd:2.0.12
image: gcr.io/google_containers/etcd:2.2.1
imagePullPolicy: IfNotPresent
livenessProbe:
httpGet:

View File

@ -1,7 +1,7 @@
apiVersion: v1
kind: Pod
metadata:
name: kube-apiserver-kubernetes-master
name: kube-apiserver
namespace: kube-system
spec:
containers:

View File

@ -1,7 +1,7 @@
apiVersion: v1
kind: Pod
metadata:
name: kube-controller-manager-kubernetes-master
name: kube-controller-manager
namespace: kube-system
spec:
containers:

View File

@ -1,7 +1,7 @@
apiVersion: v1
kind: Pod
metadata:
name: kube-scheduler-kubernetes-master
name: kube-scheduler
namespace: kube-system
spec:
containers:
@ -31,7 +31,7 @@ spec:
name: logfile
dnsPolicy: ClusterFirst
hostNetwork: true
nodeName: kubernetes-master
nodeName: ${INSTANCE_PREFIX}-master
restartPolicy: Always
terminationGracePeriodSeconds: 30
volumes:

View File

@ -1,7 +1,11 @@
#cloud-config
coreos:
update:
reboot-strategy: off
units:
- name: locksmithd.service
mask: true
- name: kube-env.service
command: start
content: |

View File

@ -1,7 +1,11 @@
#cloud-config
coreos:
update:
reboot-strategy: off
units:
- name: locksmithd.service
mask: true
- name: kube-env.service
command: start
content: |