From d7f4f6b3e0ab9eabbf632edd60b74fa257890c9d Mon Sep 17 00:00:00 2001 From: Yifan Gu Date: Fri, 13 Nov 2015 13:12:28 -0800 Subject: [PATCH 1/5] cluster/gce/coreos: Add manifests for addons and master components. --- .../glbc/default-svc.yaml | 21 ++++ .../glbc/glbc-controller.yaml | 68 +++++++++++ .../google/heapster-controller.yaml | 50 ++++++++ .../google/heapster-service.yaml | 14 +++ .../heapster-controller-combined.yaml | 50 ++++++++ .../influxdb/grafana-service.yaml | 18 +++ .../influxdb/heapster-controller.yaml | 34 ++++++ .../influxdb/heapster-service.yaml | 14 +++ .../influxdb/influxdb-grafana-controller.yaml | 70 +++++++++++ .../influxdb/influxdb-service.yaml | 19 +++ .../standalone/heapster-controller.yaml | 31 +++++ .../standalone/heapster-service.yaml | 14 +++ .../kube-manifests/addons/dns/skydns-rc.yaml | 115 ++++++++++++++++++ .../kube-manifests/addons/dns/skydns-svc.yaml | 20 +++ .../fluentd-elasticsearch/es-controller.yaml | 40 ++++++ .../fluentd-elasticsearch/es-service.yaml | 16 +++ .../kibana-controller.yaml | 34 ++++++ .../fluentd-elasticsearch/kibana-service.yaml | 16 +++ .../addons/kube-ui/kube-ui-rc.yaml | 36 ++++++ .../addons/kube-ui/kube-ui-svc.yaml | 15 +++ .../kube-manifests/addons/namespace.yaml | 4 + .../addons/registry/registry-pv.yaml | 14 +++ .../addons/registry/registry-pvc.yaml | 13 ++ .../addons/registry/registry-rc.yaml | 44 +++++++ .../addons/registry/registry-svc.yaml | 16 +++ .../coreos/kube-manifests/etcd-events.yaml | 58 +++++++++ cluster/gce/coreos/kube-manifests/etcd.yaml | 57 +++++++++ .../coreos/kube-manifests/kube-apiserver.yaml | 85 +++++++++++++ .../kube-controller-manager.yaml | 65 ++++++++++ .../coreos/kube-manifests/kube-scheduler.yaml | 42 +++++++ .../coreos/kube-manifests/kube-system.json | 7 ++ .../coreos/kube-manifests/kubelet-config.yaml | 17 +++ .../kube-manifests/kubeproxy-config.yaml | 16 +++ 33 files changed, 1133 insertions(+) create mode 100644 cluster/gce/coreos/kube-manifests/addons/cluster-loadbalancing/glbc/default-svc.yaml create mode 100644 cluster/gce/coreos/kube-manifests/addons/cluster-loadbalancing/glbc/glbc-controller.yaml create mode 100644 cluster/gce/coreos/kube-manifests/addons/cluster-monitoring/google/heapster-controller.yaml create mode 100644 cluster/gce/coreos/kube-manifests/addons/cluster-monitoring/google/heapster-service.yaml create mode 100644 cluster/gce/coreos/kube-manifests/addons/cluster-monitoring/googleinfluxdb/heapster-controller-combined.yaml create mode 100644 cluster/gce/coreos/kube-manifests/addons/cluster-monitoring/influxdb/grafana-service.yaml create mode 100644 cluster/gce/coreos/kube-manifests/addons/cluster-monitoring/influxdb/heapster-controller.yaml create mode 100644 cluster/gce/coreos/kube-manifests/addons/cluster-monitoring/influxdb/heapster-service.yaml create mode 100644 cluster/gce/coreos/kube-manifests/addons/cluster-monitoring/influxdb/influxdb-grafana-controller.yaml create mode 100644 cluster/gce/coreos/kube-manifests/addons/cluster-monitoring/influxdb/influxdb-service.yaml create mode 100644 cluster/gce/coreos/kube-manifests/addons/cluster-monitoring/standalone/heapster-controller.yaml create mode 100644 cluster/gce/coreos/kube-manifests/addons/cluster-monitoring/standalone/heapster-service.yaml create mode 100644 cluster/gce/coreos/kube-manifests/addons/dns/skydns-rc.yaml create mode 100644 cluster/gce/coreos/kube-manifests/addons/dns/skydns-svc.yaml create mode 100644 cluster/gce/coreos/kube-manifests/addons/fluentd-elasticsearch/es-controller.yaml create mode 100644 cluster/gce/coreos/kube-manifests/addons/fluentd-elasticsearch/es-service.yaml create mode 100644 cluster/gce/coreos/kube-manifests/addons/fluentd-elasticsearch/kibana-controller.yaml create mode 100644 cluster/gce/coreos/kube-manifests/addons/fluentd-elasticsearch/kibana-service.yaml create mode 100644 cluster/gce/coreos/kube-manifests/addons/kube-ui/kube-ui-rc.yaml create mode 100644 cluster/gce/coreos/kube-manifests/addons/kube-ui/kube-ui-svc.yaml create mode 100644 cluster/gce/coreos/kube-manifests/addons/namespace.yaml create mode 100644 cluster/gce/coreos/kube-manifests/addons/registry/registry-pv.yaml create mode 100644 cluster/gce/coreos/kube-manifests/addons/registry/registry-pvc.yaml create mode 100644 cluster/gce/coreos/kube-manifests/addons/registry/registry-rc.yaml create mode 100644 cluster/gce/coreos/kube-manifests/addons/registry/registry-svc.yaml create mode 100644 cluster/gce/coreos/kube-manifests/etcd-events.yaml create mode 100644 cluster/gce/coreos/kube-manifests/etcd.yaml create mode 100644 cluster/gce/coreos/kube-manifests/kube-apiserver.yaml create mode 100644 cluster/gce/coreos/kube-manifests/kube-controller-manager.yaml create mode 100644 cluster/gce/coreos/kube-manifests/kube-scheduler.yaml create mode 100644 cluster/gce/coreos/kube-manifests/kube-system.json create mode 100644 cluster/gce/coreos/kube-manifests/kubelet-config.yaml create mode 100644 cluster/gce/coreos/kube-manifests/kubeproxy-config.yaml diff --git a/cluster/gce/coreos/kube-manifests/addons/cluster-loadbalancing/glbc/default-svc.yaml b/cluster/gce/coreos/kube-manifests/addons/cluster-loadbalancing/glbc/default-svc.yaml new file mode 100644 index 00000000000..cd07ec50eb4 --- /dev/null +++ b/cluster/gce/coreos/kube-manifests/addons/cluster-loadbalancing/glbc/default-svc.yaml @@ -0,0 +1,21 @@ +apiVersion: v1 +kind: Service +metadata: + # This must match the --default-backend-service argument of the l7 lb + # controller and is required because GCE mandates a default backend. + name: default-http-backend + namespace: kube-system + labels: + k8s-app: glbc + kubernetes.io/cluster-service: "true" + kubernetes.io/name: "GLBCDefaultBackend" +spec: + # The default backend must be of type NodePort. + type: NodePort + ports: + - port: 80 + targetPort: 8080 + protocol: TCP + name: http + selector: + k8s-app: glbc \ No newline at end of file diff --git a/cluster/gce/coreos/kube-manifests/addons/cluster-loadbalancing/glbc/glbc-controller.yaml b/cluster/gce/coreos/kube-manifests/addons/cluster-loadbalancing/glbc/glbc-controller.yaml new file mode 100644 index 00000000000..4b9b7bbf4b9 --- /dev/null +++ b/cluster/gce/coreos/kube-manifests/addons/cluster-loadbalancing/glbc/glbc-controller.yaml @@ -0,0 +1,68 @@ +apiVersion: v1 +kind: ReplicationController +metadata: + name: l7-lb-controller + namespace: kube-system + labels: + k8s-app: glbc + version: v0.5.1 + kubernetes.io/cluster-service: "true" + kubernetes.io/name: "GLBC" +spec: + # There should never be more than 1 controller alive simultaneously. + replicas: 1 + selector: + k8s-app: glbc + version: v0.5.1 + template: + metadata: + labels: + k8s-app: glbc + version: v0.5.1 + name: glbc + kubernetes.io/cluster-service: "true" + spec: + terminationGracePeriodSeconds: 600 + containers: + - name: default-http-backend + # Any image is permissable as long as: + # 1. It serves a 404 page at / + # 2. It serves 200 on a /healthz endpoint + image: gcr.io/google_containers/defaultbackend:1.0 + livenessProbe: + httpGet: + path: /healthz + port: 8080 + scheme: HTTP + initialDelaySeconds: 30 + timeoutSeconds: 5 + ports: + - containerPort: 8080 + resources: + limits: + cpu: 10m + memory: 20Mi + requests: + cpu: 10m + memory: 20Mi + - image: gcr.io/google_containers/glbc:0.5.1 + livenessProbe: + httpGet: + path: /healthz + port: 8081 + scheme: HTTP + initialDelaySeconds: 30 + # healthz reaches out to GCE + periodSeconds: 30 + timeoutSeconds: 5 + name: l7-lb-controller + resources: + limits: + cpu: 100m + memory: 100Mi + requests: + cpu: 100m + memory: 50Mi + args: + - --default-backend-service=kube-system/default-http-backend + - --sync-period=300s \ No newline at end of file diff --git a/cluster/gce/coreos/kube-manifests/addons/cluster-monitoring/google/heapster-controller.yaml b/cluster/gce/coreos/kube-manifests/addons/cluster-monitoring/google/heapster-controller.yaml new file mode 100644 index 00000000000..c8f903b0aae --- /dev/null +++ b/cluster/gce/coreos/kube-manifests/addons/cluster-monitoring/google/heapster-controller.yaml @@ -0,0 +1,50 @@ +apiVersion: v1 +kind: ReplicationController +metadata: + name: heapster-v10 + namespace: kube-system + labels: + k8s-app: heapster + version: v10 + kubernetes.io/cluster-service: "true" +spec: + replicas: 1 + selector: + k8s-app: heapster + version: v10 + template: + metadata: + labels: + k8s-app: heapster + version: v10 + kubernetes.io/cluster-service: "true" + spec: + containers: + - image: gcr.io/google_containers/heapster:v0.18.2 + name: heapster + resources: + limits: + cpu: 100m + memory: 300Mi + command: + - /heapster + - --source=kubernetes:'' + - --sink=gcm + - --sink=gcmautoscaling + - --sink=gcl + - --stats_resolution=30s + - --sink_frequency=1m + volumeMounts: + - name: ssl-certs + mountPath: /etc/ssl/certs + readOnly: true + - name: usrsharecacerts + mountPath: /usr/share/ca-certificates + readOnly: true + volumes: + - name: ssl-certs + hostPath: + path: /etc/ssl/certs + - name: usrsharecacerts + hostPath: + path: /usr/share/ca-certificates diff --git a/cluster/gce/coreos/kube-manifests/addons/cluster-monitoring/google/heapster-service.yaml b/cluster/gce/coreos/kube-manifests/addons/cluster-monitoring/google/heapster-service.yaml new file mode 100644 index 00000000000..31e8b96006d --- /dev/null +++ b/cluster/gce/coreos/kube-manifests/addons/cluster-monitoring/google/heapster-service.yaml @@ -0,0 +1,14 @@ +kind: Service +apiVersion: v1 +metadata: + name: heapster + namespace: kube-system + labels: + kubernetes.io/cluster-service: "true" + kubernetes.io/name: "Heapster" +spec: + ports: + - port: 80 + targetPort: 8082 + selector: + k8s-app: heapster diff --git a/cluster/gce/coreos/kube-manifests/addons/cluster-monitoring/googleinfluxdb/heapster-controller-combined.yaml b/cluster/gce/coreos/kube-manifests/addons/cluster-monitoring/googleinfluxdb/heapster-controller-combined.yaml new file mode 100644 index 00000000000..c036fc92e61 --- /dev/null +++ b/cluster/gce/coreos/kube-manifests/addons/cluster-monitoring/googleinfluxdb/heapster-controller-combined.yaml @@ -0,0 +1,50 @@ +apiVersion: v1 +kind: ReplicationController +metadata: + name: heapster-v10 + namespace: kube-system + labels: + k8s-app: heapster + version: v10 + kubernetes.io/cluster-service: "true" +spec: + replicas: 1 + selector: + k8s-app: heapster + version: v10 + template: + metadata: + labels: + k8s-app: heapster + version: v10 + kubernetes.io/cluster-service: "true" + spec: + containers: + - image: gcr.io/google_containers/heapster:v0.18.2 + name: heapster + resources: + limits: + cpu: 100m + memory: 300Mi + command: + - /heapster + - --source=kubernetes:'' + - --sink=gcl + - --sink=gcmautoscaling + - --sink=influxdb:http://monitoring-influxdb:8086 + - --stats_resolution=30s + - --sink_frequency=1m + volumeMounts: + - name: ssl-certs + mountPath: /etc/ssl/certs + readOnly: true + - name: usrsharecacerts + mountPath: /usr/share/ca-certificates + readOnly: true + volumes: + - name: ssl-certs + hostPath: + path: /etc/ssl/certs + - name: usrsharecacerts + hostPath: + path: /usr/share/ca-certificates diff --git a/cluster/gce/coreos/kube-manifests/addons/cluster-monitoring/influxdb/grafana-service.yaml b/cluster/gce/coreos/kube-manifests/addons/cluster-monitoring/influxdb/grafana-service.yaml new file mode 100644 index 00000000000..9140e8b0c2c --- /dev/null +++ b/cluster/gce/coreos/kube-manifests/addons/cluster-monitoring/influxdb/grafana-service.yaml @@ -0,0 +1,18 @@ +apiVersion: v1 +kind: Service +metadata: + name: monitoring-grafana + namespace: kube-system + labels: + kubernetes.io/cluster-service: "true" + kubernetes.io/name: "Grafana" +spec: + # On production clusters, consider setting up auth for grafana, and + # exposing Grafana either using a LoadBalancer or a public IP. + # type: LoadBalancer + ports: + - port: 80 + targetPort: 3000 + selector: + k8s-app: influxGrafana + diff --git a/cluster/gce/coreos/kube-manifests/addons/cluster-monitoring/influxdb/heapster-controller.yaml b/cluster/gce/coreos/kube-manifests/addons/cluster-monitoring/influxdb/heapster-controller.yaml new file mode 100644 index 00000000000..e6b71a10177 --- /dev/null +++ b/cluster/gce/coreos/kube-manifests/addons/cluster-monitoring/influxdb/heapster-controller.yaml @@ -0,0 +1,34 @@ +apiVersion: v1 +kind: ReplicationController +metadata: + name: heapster-v10 + namespace: kube-system + labels: + k8s-app: heapster + version: v10 + kubernetes.io/cluster-service: "true" +spec: + replicas: 1 + selector: + k8s-app: heapster + version: v10 + template: + metadata: + labels: + k8s-app: heapster + version: v10 + kubernetes.io/cluster-service: "true" + spec: + containers: + - image: gcr.io/google_containers/heapster:v0.18.2 + name: heapster + resources: + limits: + cpu: 100m + memory: 300Mi + command: + - /heapster + - --source=kubernetes:'' + - --sink=influxdb:http://monitoring-influxdb:8086 + - --stats_resolution=30s + - --sink_frequency=1m diff --git a/cluster/gce/coreos/kube-manifests/addons/cluster-monitoring/influxdb/heapster-service.yaml b/cluster/gce/coreos/kube-manifests/addons/cluster-monitoring/influxdb/heapster-service.yaml new file mode 100644 index 00000000000..e406d69c44c --- /dev/null +++ b/cluster/gce/coreos/kube-manifests/addons/cluster-monitoring/influxdb/heapster-service.yaml @@ -0,0 +1,14 @@ +kind: Service +apiVersion: v1 +metadata: + name: heapster + namespace: kube-system + labels: + kubernetes.io/cluster-service: "true" + kubernetes.io/name: "Heapster" +spec: + ports: + - port: 80 + targetPort: 8082 + selector: + k8s-app: heapster diff --git a/cluster/gce/coreos/kube-manifests/addons/cluster-monitoring/influxdb/influxdb-grafana-controller.yaml b/cluster/gce/coreos/kube-manifests/addons/cluster-monitoring/influxdb/influxdb-grafana-controller.yaml new file mode 100644 index 00000000000..bd28795ad08 --- /dev/null +++ b/cluster/gce/coreos/kube-manifests/addons/cluster-monitoring/influxdb/influxdb-grafana-controller.yaml @@ -0,0 +1,70 @@ +apiVersion: v1 +kind: ReplicationController +metadata: + name: monitoring-influxdb-grafana-v2 + namespace: kube-system + labels: + k8s-app: influxGrafana + version: v2 + kubernetes.io/cluster-service: "true" +spec: + replicas: 1 + selector: + k8s-app: influxGrafana + version: v2 + template: + metadata: + labels: + k8s-app: influxGrafana + version: v2 + kubernetes.io/cluster-service: "true" + spec: + containers: + - image: gcr.io/google_containers/heapster_influxdb:v0.4 + name: influxdb + resources: + limits: + cpu: 100m + memory: 200Mi + ports: + - containerPort: 8083 + hostPort: 8083 + - containerPort: 8086 + hostPort: 8086 + volumeMounts: + - name: influxdb-persistent-storage + mountPath: /data + - image: beta.gcr.io/google_containers/heapster_grafana:v2.1.1 + name: grafana + env: + resources: + limits: + cpu: 100m + memory: 100Mi + env: + # This variable is required to setup templates in Grafana. + - name: INFLUXDB_SERVICE_URL + value: http://monitoring-influxdb:8086 + # The following env variables are required to make Grafana accessible via + # the kubernetes api-server proxy. On production clusters, we recommend + # removing these env variables, setup auth for grafana, and expose the grafana + # service using a LoadBalancer or a public IP. + - name: GF_AUTH_BASIC_ENABLED + value: "false" + - name: GF_AUTH_ANONYMOUS_ENABLED + value: "true" + - name: GF_AUTH_ANONYMOUS_ORG_ROLE + value: Admin + - name: GF_SERVER_ROOT_URL + value: /api/v1/proxy/namespaces/kube-system/services/monitoring-grafana/ + volumeMounts: + - name: grafana-persistent-storage + mountPath: /var + + volumes: + - name: influxdb-persistent-storage + emptyDir: {} + - name: grafana-persistent-storage + emptyDir: {} + + diff --git a/cluster/gce/coreos/kube-manifests/addons/cluster-monitoring/influxdb/influxdb-service.yaml b/cluster/gce/coreos/kube-manifests/addons/cluster-monitoring/influxdb/influxdb-service.yaml new file mode 100644 index 00000000000..066e052476e --- /dev/null +++ b/cluster/gce/coreos/kube-manifests/addons/cluster-monitoring/influxdb/influxdb-service.yaml @@ -0,0 +1,19 @@ +apiVersion: v1 +kind: Service +metadata: + name: monitoring-influxdb + namespace: kube-system + labels: + kubernetes.io/cluster-service: "true" + kubernetes.io/name: "InfluxDB" +spec: + ports: + - name: http + port: 8083 + targetPort: 8083 + - name: api + port: 8086 + targetPort: 8086 + selector: + k8s-app: influxGrafana + diff --git a/cluster/gce/coreos/kube-manifests/addons/cluster-monitoring/standalone/heapster-controller.yaml b/cluster/gce/coreos/kube-manifests/addons/cluster-monitoring/standalone/heapster-controller.yaml new file mode 100644 index 00000000000..ed53a9d9df5 --- /dev/null +++ b/cluster/gce/coreos/kube-manifests/addons/cluster-monitoring/standalone/heapster-controller.yaml @@ -0,0 +1,31 @@ +apiVersion: v1 +kind: ReplicationController +metadata: + name: heapster-v10 + namespace: kube-system + labels: + k8s-app: heapster + version: v10 + kubernetes.io/cluster-service: "true" +spec: + replicas: 1 + selector: + k8s-app: heapster + version: v10 + template: + metadata: + labels: + k8s-app: heapster + version: v10 + kubernetes.io/cluster-service: "true" + spec: + containers: + - image: gcr.io/google_containers/heapster:v0.18.2 + name: heapster + resources: + limits: + cpu: 100m + memory: 300Mi + command: + - /heapster + - --source=kubernetes:'' diff --git a/cluster/gce/coreos/kube-manifests/addons/cluster-monitoring/standalone/heapster-service.yaml b/cluster/gce/coreos/kube-manifests/addons/cluster-monitoring/standalone/heapster-service.yaml new file mode 100644 index 00000000000..31e8b96006d --- /dev/null +++ b/cluster/gce/coreos/kube-manifests/addons/cluster-monitoring/standalone/heapster-service.yaml @@ -0,0 +1,14 @@ +kind: Service +apiVersion: v1 +metadata: + name: heapster + namespace: kube-system + labels: + kubernetes.io/cluster-service: "true" + kubernetes.io/name: "Heapster" +spec: + ports: + - port: 80 + targetPort: 8082 + selector: + k8s-app: heapster diff --git a/cluster/gce/coreos/kube-manifests/addons/dns/skydns-rc.yaml b/cluster/gce/coreos/kube-manifests/addons/dns/skydns-rc.yaml new file mode 100644 index 00000000000..5cf57c1162f --- /dev/null +++ b/cluster/gce/coreos/kube-manifests/addons/dns/skydns-rc.yaml @@ -0,0 +1,115 @@ +apiVersion: v1 +kind: ReplicationController +metadata: + name: kube-dns-v10 + namespace: kube-system + labels: + k8s-app: kube-dns + version: v10 + kubernetes.io/cluster-service: "true" +spec: + replicas: ${DNS_REPLICAS} + selector: + k8s-app: kube-dns + version: v10 + template: + metadata: + labels: + k8s-app: kube-dns + version: v10 + kubernetes.io/cluster-service: "true" + spec: + containers: + - name: etcd + image: gcr.io/google_containers/etcd:2.0.9 + resources: + # keep request = limit to keep this container in guaranteed class + limits: + cpu: 100m + memory: 50Mi + requests: + cpu: 100m + memory: 50Mi + command: + - /usr/local/bin/etcd + - -data-dir + - /var/etcd/data + - -listen-client-urls + - http://127.0.0.1:2379,http://127.0.0.1:4001 + - -advertise-client-urls + - http://127.0.0.1:2379,http://127.0.0.1:4001 + - -initial-cluster-token + - skydns-etcd + volumeMounts: + - name: etcd-storage + mountPath: /var/etcd/data + - name: kube2sky + image: gcr.io/google_containers/kube2sky:1.12 + resources: + # keep request = limit to keep this container in guaranteed class + limits: + cpu: 100m + memory: 50Mi + requests: + cpu: 100m + memory: 50Mi + args: + # command = "/kube2sky" + - -domain=${DNS_DOMAIN} + - name: skydns + image: gcr.io/google_containers/skydns:2015-10-13-8c72f8c + resources: + # keep request = limit to keep this container in guaranteed class + limits: + cpu: 100m + memory: 50Mi + requests: + cpu: 100m + memory: 50Mi + args: + # command = "/skydns" + - -machines=http://127.0.0.1:4001 + - -addr=0.0.0.0:53 + - -ns-rotate=false + - -domain=${DNS_DOMAIN}. + ports: + - containerPort: 53 + name: dns + protocol: UDP + - containerPort: 53 + name: dns-tcp + protocol: TCP + livenessProbe: + httpGet: + path: /healthz + port: 8080 + scheme: HTTP + initialDelaySeconds: 30 + timeoutSeconds: 5 + readinessProbe: + httpGet: + path: /healthz + port: 8080 + scheme: HTTP + initialDelaySeconds: 1 + timeoutSeconds: 5 + - name: healthz + image: gcr.io/google_containers/exechealthz:1.0 + resources: + # keep request = limit to keep this container in guaranteed class + limits: + cpu: 10m + memory: 20Mi + requests: + cpu: 10m + memory: 20Mi + args: + - -cmd=nslookup kubernetes.default.svc.${DNS_DOMAIN} 127.0.0.1 >/dev/null + - -port=8080 + ports: + - containerPort: 8080 + protocol: TCP + volumes: + - name: etcd-storage + emptyDir: {} + dnsPolicy: Default # Don't use cluster DNS. \ No newline at end of file diff --git a/cluster/gce/coreos/kube-manifests/addons/dns/skydns-svc.yaml b/cluster/gce/coreos/kube-manifests/addons/dns/skydns-svc.yaml new file mode 100644 index 00000000000..deeb0d9ba3d --- /dev/null +++ b/cluster/gce/coreos/kube-manifests/addons/dns/skydns-svc.yaml @@ -0,0 +1,20 @@ +apiVersion: v1 +kind: Service +metadata: + name: kube-dns + namespace: kube-system + labels: + k8s-app: kube-dns + kubernetes.io/cluster-service: "true" + kubernetes.io/name: "KubeDNS" +spec: + selector: + k8s-app: kube-dns + clusterIP: ${DNS_SERVER_IP} + ports: + - name: dns + port: 53 + protocol: UDP + - name: dns-tcp + port: 53 + protocol: TCP diff --git a/cluster/gce/coreos/kube-manifests/addons/fluentd-elasticsearch/es-controller.yaml b/cluster/gce/coreos/kube-manifests/addons/fluentd-elasticsearch/es-controller.yaml new file mode 100644 index 00000000000..6631153a56e --- /dev/null +++ b/cluster/gce/coreos/kube-manifests/addons/fluentd-elasticsearch/es-controller.yaml @@ -0,0 +1,40 @@ +apiVersion: v1 +kind: ReplicationController +metadata: + name: elasticsearch-logging-v1 + namespace: kube-system + labels: + k8s-app: elasticsearch-logging + version: v1 + kubernetes.io/cluster-service: "true" +spec: + replicas: 2 + selector: + k8s-app: elasticsearch-logging + version: v1 + template: + metadata: + labels: + k8s-app: elasticsearch-logging + version: v1 + kubernetes.io/cluster-service: "true" + spec: + containers: + - image: gcr.io/google_containers/elasticsearch:1.7 + name: elasticsearch-logging + resources: + limits: + cpu: 100m + ports: + - containerPort: 9200 + name: db + protocol: TCP + - containerPort: 9300 + name: transport + protocol: TCP + volumeMounts: + - name: es-persistent-storage + mountPath: /data + volumes: + - name: es-persistent-storage + emptyDir: {} diff --git a/cluster/gce/coreos/kube-manifests/addons/fluentd-elasticsearch/es-service.yaml b/cluster/gce/coreos/kube-manifests/addons/fluentd-elasticsearch/es-service.yaml new file mode 100644 index 00000000000..abf1fd3f684 --- /dev/null +++ b/cluster/gce/coreos/kube-manifests/addons/fluentd-elasticsearch/es-service.yaml @@ -0,0 +1,16 @@ +apiVersion: v1 +kind: Service +metadata: + name: elasticsearch-logging + namespace: kube-system + labels: + k8s-app: elasticsearch-logging + kubernetes.io/cluster-service: "true" + kubernetes.io/name: "Elasticsearch" +spec: + ports: + - port: 9200 + protocol: TCP + targetPort: db + selector: + k8s-app: elasticsearch-logging diff --git a/cluster/gce/coreos/kube-manifests/addons/fluentd-elasticsearch/kibana-controller.yaml b/cluster/gce/coreos/kube-manifests/addons/fluentd-elasticsearch/kibana-controller.yaml new file mode 100644 index 00000000000..893608aef6b --- /dev/null +++ b/cluster/gce/coreos/kube-manifests/addons/fluentd-elasticsearch/kibana-controller.yaml @@ -0,0 +1,34 @@ +apiVersion: v1 +kind: ReplicationController +metadata: + name: kibana-logging-v1 + namespace: kube-system + labels: + k8s-app: kibana-logging + version: v1 + kubernetes.io/cluster-service: "true" +spec: + replicas: 1 + selector: + k8s-app: kibana-logging + version: v1 + template: + metadata: + labels: + k8s-app: kibana-logging + version: v1 + kubernetes.io/cluster-service: "true" + spec: + containers: + - name: kibana-logging + image: gcr.io/google_containers/kibana:1.3 + resources: + limits: + cpu: 100m + env: + - name: "ELASTICSEARCH_URL" + value: "http://elasticsearch-logging:9200" + ports: + - containerPort: 5601 + name: ui + protocol: TCP diff --git a/cluster/gce/coreos/kube-manifests/addons/fluentd-elasticsearch/kibana-service.yaml b/cluster/gce/coreos/kube-manifests/addons/fluentd-elasticsearch/kibana-service.yaml new file mode 100644 index 00000000000..43efada2c50 --- /dev/null +++ b/cluster/gce/coreos/kube-manifests/addons/fluentd-elasticsearch/kibana-service.yaml @@ -0,0 +1,16 @@ +apiVersion: v1 +kind: Service +metadata: + name: kibana-logging + namespace: kube-system + labels: + k8s-app: kibana-logging + kubernetes.io/cluster-service: "true" + kubernetes.io/name: "Kibana" +spec: + ports: + - port: 5601 + protocol: TCP + targetPort: ui + selector: + k8s-app: kibana-logging diff --git a/cluster/gce/coreos/kube-manifests/addons/kube-ui/kube-ui-rc.yaml b/cluster/gce/coreos/kube-manifests/addons/kube-ui/kube-ui-rc.yaml new file mode 100644 index 00000000000..4bfdf381e87 --- /dev/null +++ b/cluster/gce/coreos/kube-manifests/addons/kube-ui/kube-ui-rc.yaml @@ -0,0 +1,36 @@ +apiVersion: v1 +kind: ReplicationController +metadata: + name: kube-ui-v3 + namespace: kube-system + labels: + k8s-app: kube-ui + version: v3 + kubernetes.io/cluster-service: "true" +spec: + replicas: 1 + selector: + k8s-app: kube-ui + version: v3 + template: + metadata: + labels: + k8s-app: kube-ui + version: v3 + kubernetes.io/cluster-service: "true" + spec: + containers: + - name: kube-ui + image: gcr.io/google_containers/kube-ui:v3 + resources: + limits: + cpu: 100m + memory: 50Mi + ports: + - containerPort: 8080 + livenessProbe: + httpGet: + path: / + port: 8080 + initialDelaySeconds: 30 + timeoutSeconds: 5 diff --git a/cluster/gce/coreos/kube-manifests/addons/kube-ui/kube-ui-svc.yaml b/cluster/gce/coreos/kube-manifests/addons/kube-ui/kube-ui-svc.yaml new file mode 100644 index 00000000000..cf960c8bda3 --- /dev/null +++ b/cluster/gce/coreos/kube-manifests/addons/kube-ui/kube-ui-svc.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: Service +metadata: + name: kube-ui + namespace: kube-system + labels: + k8s-app: kube-ui + kubernetes.io/cluster-service: "true" + kubernetes.io/name: "KubeUI" +spec: + selector: + k8s-app: kube-ui + ports: + - port: 80 + targetPort: 8080 diff --git a/cluster/gce/coreos/kube-manifests/addons/namespace.yaml b/cluster/gce/coreos/kube-manifests/addons/namespace.yaml new file mode 100644 index 00000000000..986f4b48221 --- /dev/null +++ b/cluster/gce/coreos/kube-manifests/addons/namespace.yaml @@ -0,0 +1,4 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: kube-system diff --git a/cluster/gce/coreos/kube-manifests/addons/registry/registry-pv.yaml b/cluster/gce/coreos/kube-manifests/addons/registry/registry-pv.yaml new file mode 100644 index 00000000000..bb4ceb532bb --- /dev/null +++ b/cluster/gce/coreos/kube-manifests/addons/registry/registry-pv.yaml @@ -0,0 +1,14 @@ +kind: PersistentVolume +apiVersion: v1 +metadata: + name: kube-system-kube-registry-pv + labels: + kubernetes.io/cluster-service: "true" +spec: + capacity: + storage: ${CLUSTER_REGISTRY_DISK_SIZE} + accessModes: + - ReadWriteOnce + gcePersistentDisk: + pdName: ${CLUSTER_REGISTRY_DISK} + fsType: "ext4" diff --git a/cluster/gce/coreos/kube-manifests/addons/registry/registry-pvc.yaml b/cluster/gce/coreos/kube-manifests/addons/registry/registry-pvc.yaml new file mode 100644 index 00000000000..4f6c8da6d7b --- /dev/null +++ b/cluster/gce/coreos/kube-manifests/addons/registry/registry-pvc.yaml @@ -0,0 +1,13 @@ +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: kube-registry-pvc + namespace: kube-system + labels: + kubernetes.io/cluster-service: "true" +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: ${CLUSTER_REGISTRY_DISK_SIZE} diff --git a/cluster/gce/coreos/kube-manifests/addons/registry/registry-rc.yaml b/cluster/gce/coreos/kube-manifests/addons/registry/registry-rc.yaml new file mode 100644 index 00000000000..2a8bd477a68 --- /dev/null +++ b/cluster/gce/coreos/kube-manifests/addons/registry/registry-rc.yaml @@ -0,0 +1,44 @@ +apiVersion: v1 +kind: ReplicationController +metadata: + name: kube-registry-v0 + namespace: kube-system + labels: + k8s-app: kube-registry + version: v0 + kubernetes.io/cluster-service: "true" +spec: + replicas: 1 + selector: + k8s-app: kube-registry + version: v0 + template: + metadata: + labels: + k8s-app: kube-registry + version: v0 + kubernetes.io/cluster-service: "true" + spec: + containers: + - name: registry + image: registry:2 + resources: + limits: + cpu: 100m + memory: 100Mi + env: + - name: REGISTRY_HTTP_ADDR + value: :5000 + - name: REGISTRY_STORAGE_FILESYSTEM_ROOTDIRECTORY + value: /var/lib/registry + volumeMounts: + - name: image-store + mountPath: /var/lib/registry + ports: + - containerPort: 5000 + name: registry + protocol: TCP + volumes: + - name: image-store + persistentVolumeClaim: + claimName: kube-registry-pvc diff --git a/cluster/gce/coreos/kube-manifests/addons/registry/registry-svc.yaml b/cluster/gce/coreos/kube-manifests/addons/registry/registry-svc.yaml new file mode 100644 index 00000000000..b9f1cc40b99 --- /dev/null +++ b/cluster/gce/coreos/kube-manifests/addons/registry/registry-svc.yaml @@ -0,0 +1,16 @@ +apiVersion: v1 +kind: Service +metadata: + name: kube-registry + namespace: kube-system + labels: + k8s-app: kube-registry + kubernetes.io/cluster-service: "true" + kubernetes.io/name: "KubeRegistry" +spec: + selector: + k8s-app: kube-registry + ports: + - name: registry + port: 5000 + protocol: TCP diff --git a/cluster/gce/coreos/kube-manifests/etcd-events.yaml b/cluster/gce/coreos/kube-manifests/etcd-events.yaml new file mode 100644 index 00000000000..3915cba2e7a --- /dev/null +++ b/cluster/gce/coreos/kube-manifests/etcd-events.yaml @@ -0,0 +1,58 @@ +apiVersion: v1 +kind: Pod +metadata: + name: etcd-server-events-kubernetes-master + namespace: kube-system +spec: + containers: + - command: + - /bin/sh + - -c + - /usr/local/bin/etcd + --listen-peer-urls=http://127.0.0.1:2381 + --addr=127.0.0.1:4002 + --bind-addr=127.0.0.1:4002 + --data-dir=/var/etcd/data-events + 1>>/var/log/etcd-events.log 2>&1 + image: gcr.io/google_containers/etcd:2.0.12 + imagePullPolicy: IfNotPresent + livenessProbe: + httpGet: + host: 127.0.0.1 + path: /health + port: 4002 + scheme: HTTP + initialDelaySeconds: 15 + timeoutSeconds: 15 + name: etcd-container + ports: + - containerPort: 2381 + hostPort: 2381 + name: serverport + protocol: TCP + - containerPort: 4002 + hostPort: 4002 + name: clientport + protocol: TCP + resources: + limits: + cpu: 100m + requests: + cpu: 100m + volumeMounts: + - mountPath: /var/etcd + name: varetcd + - mountPath: /var/log/etcd-events.log + name: varlogetcd + dnsPolicy: ClusterFirst + hostNetwork: true + nodeName: kubernetes-master + restartPolicy: Always + terminationGracePeriodSeconds: 30 + volumes: + - hostPath: + path: /mnt/master-pd/var/etcd + name: varetcd + - hostPath: + path: /var/log/etcd-events.log + name: varlogetcd diff --git a/cluster/gce/coreos/kube-manifests/etcd.yaml b/cluster/gce/coreos/kube-manifests/etcd.yaml new file mode 100644 index 00000000000..571c7db24cb --- /dev/null +++ b/cluster/gce/coreos/kube-manifests/etcd.yaml @@ -0,0 +1,57 @@ +apiVersion: v1 +kind: Pod +metadata: + name: etcd-server-kubernetes-master + namespace: kube-system +spec: + containers: + - command: + - /bin/sh + - -c + - /usr/local/bin/etcd + --listen-peer-urls=http://127.0.0.1:2380 + --addr=127.0.0.1:4001 + --bind-addr=127.0.0.1:4001 + --data-dir=/var/etcd/data + 1>>/var/log/etcd.log 2>&1 + image: gcr.io/google_containers/etcd:2.0.12 + imagePullPolicy: IfNotPresent + livenessProbe: + httpGet: + host: 127.0.0.1 + path: /health + port: 4001 + scheme: HTTP + initialDelaySeconds: 15 + timeoutSeconds: 15 + name: etcd-container + ports: + - containerPort: 2380 + hostPort: 2380 + name: serverport + protocol: TCP + - containerPort: 4001 + hostPort: 4001 + name: clientport + protocol: TCP + resources: + limits: + cpu: 200m + requests: + cpu: 200m + volumeMounts: + - mountPath: /var/etcd + name: varetcd + - mountPath: /var/log/etcd.log + name: varlogetcd + dnsPolicy: ClusterFirst + hostNetwork: true + restartPolicy: Always + terminationGracePeriodSeconds: 30 + volumes: + - hostPath: + path: /mnt/master-pd/var/etcd + name: varetcd + - hostPath: + path: /var/log/etcd.log + name: varlogetcd diff --git a/cluster/gce/coreos/kube-manifests/kube-apiserver.yaml b/cluster/gce/coreos/kube-manifests/kube-apiserver.yaml new file mode 100644 index 00000000000..490e391844a --- /dev/null +++ b/cluster/gce/coreos/kube-manifests/kube-apiserver.yaml @@ -0,0 +1,85 @@ +apiVersion: v1 +kind: Pod +metadata: + name: kube-apiserver-kubernetes-master + namespace: kube-system +spec: + containers: + - command: + - /bin/sh + - -c + - /usr/local/bin/kube-apiserver + --address=127.0.0.1 + --etcd-servers=http://127.0.0.1:4001 + --etcd-servers-overrides=/events#http://127.0.0.1:4002 + --cloud-provider=gce + --admission-control=${ADMISSION_CONTROL} + --service-cluster-ip-range=${SERVICE_CLUSTER_IP_RANGE} + --client-ca-file=/srv/kubernetes/ca.crt + --basic-auth-file=/srv/kubernetes/basic_auth.csv + --tls-cert-file=/srv/kubernetes/server.cert + --tls-private-key-file=/srv/kubernetes/server.key + --secure-port=443 + --token-auth-file=/srv/kubernetes/known_tokens.csv + --v=2 + --allow-privileged=True + 1>>/var/log/kube-apiserver.log 2>&1 + image: gcr.io/google_containers/kube-apiserver:${KUBE_APISERVER_DOCKER_TAG} + imagePullPolicy: IfNotPresent + livenessProbe: + httpGet: + host: 127.0.0.1 + path: /healthz + port: 8080 + scheme: HTTP + initialDelaySeconds: 15 + timeoutSeconds: 15 + name: kube-apiserver + ports: + - containerPort: 443 + hostPort: 443 + name: https + protocol: TCP + - containerPort: 8080 + hostPort: 8080 + name: local + protocol: TCP + resources: + limits: + cpu: 250m + requests: + cpu: 250m + volumeMounts: + - mountPath: /srv/kubernetes + name: srvkube + readOnly: true + - mountPath: /var/log/kube-apiserver.log + name: logfile + - mountPath: /etc/ssl + name: etcssl + readOnly: true + - mountPath: /usr/share/ca-certificates + name: usrsharecacerts + readOnly: true + - mountPath: /srv/sshproxy + name: srvsshproxy + dnsPolicy: ClusterFirst + hostNetwork: true + restartPolicy: Always + terminationGracePeriodSeconds: 30 + volumes: + - hostPath: + path: /srv/kubernetes + name: srvkube + - hostPath: + path: /var/log/kube-apiserver.log + name: logfile + - hostPath: + path: /etc/ssl + name: etcssl + - hostPath: + path: /usr/share/ca-certificates + name: usrsharecacerts + - hostPath: + path: /srv/sshproxy + name: srvsshproxy diff --git a/cluster/gce/coreos/kube-manifests/kube-controller-manager.yaml b/cluster/gce/coreos/kube-manifests/kube-controller-manager.yaml new file mode 100644 index 00000000000..35ac18d6328 --- /dev/null +++ b/cluster/gce/coreos/kube-manifests/kube-controller-manager.yaml @@ -0,0 +1,65 @@ +apiVersion: v1 +kind: Pod +metadata: + name: kube-controller-manager-kubernetes-master + namespace: kube-system +spec: + containers: + - command: + - /bin/sh + - -c + - /usr/local/bin/kube-controller-manager + --master=127.0.0.1:8080 + --cluster-name=${INSTANCE_PREFIX} + --cluster-cidr=${CLUSTER_IP_RANGE} + --allocate-node-cidrs=true + --cloud-provider=gce + --service-account-private-key-file=/srv/kubernetes/server.key + --v=2 + --root-ca-file=/srv/kubernetes/ca.crt + 1>>/var/log/kube-controller-manager.log 2>&1 + image: gcr.io/google_containers/kube-controller-manager:${KUBE_CONTROLLER_MANAGER_DOCKER_TAG} + imagePullPolicy: IfNotPresent + livenessProbe: + httpGet: + host: 127.0.0.1 + path: /healthz + port: 10252 + scheme: HTTP + initialDelaySeconds: 15 + timeoutSeconds: 15 + name: kube-controller-manager + resources: + limits: + cpu: 200m + requests: + cpu: 200m + volumeMounts: + - mountPath: /srv/kubernetes + name: srvkube + readOnly: true + - mountPath: /var/log/kube-controller-manager.log + name: logfile + - mountPath: /etc/ssl + name: etcssl + readOnly: true + - mountPath: /usr/share/ca-certificates + name: usrsharecacerts + readOnly: true + dnsPolicy: ClusterFirst + hostNetwork: true + restartPolicy: Always + terminationGracePeriodSeconds: 30 + volumes: + - hostPath: + path: /srv/kubernetes + name: srvkube + - hostPath: + path: /var/log/kube-controller-manager.log + name: logfile + - hostPath: + path: /etc/ssl + name: etcssl + - hostPath: + path: /usr/share/ca-certificates + name: usrsharecacerts diff --git a/cluster/gce/coreos/kube-manifests/kube-scheduler.yaml b/cluster/gce/coreos/kube-manifests/kube-scheduler.yaml new file mode 100644 index 00000000000..8ef9f5a73d0 --- /dev/null +++ b/cluster/gce/coreos/kube-manifests/kube-scheduler.yaml @@ -0,0 +1,42 @@ +apiVersion: v1 +kind: Pod +metadata: + name: kube-scheduler-kubernetes-master + namespace: kube-system +spec: + containers: + - command: + - /bin/sh + - -c + - /usr/local/bin/kube-scheduler + --master=127.0.0.1:8080 + --v=2 + 1>>/var/log/kube-scheduler.log 2>&1 + image: gcr.io/google_containers/kube-scheduler:${KUBE_SCHEDULER_DOCKER_TAG} + imagePullPolicy: IfNotPresent + livenessProbe: + httpGet: + host: 127.0.0.1 + path: /healthz + port: 10251 + scheme: HTTP + initialDelaySeconds: 15 + timeoutSeconds: 15 + name: kube-scheduler + resources: + limits: + cpu: 100m + requests: + cpu: 100m + volumeMounts: + - mountPath: /var/log/kube-scheduler.log + name: logfile + dnsPolicy: ClusterFirst + hostNetwork: true + nodeName: kubernetes-master + restartPolicy: Always + terminationGracePeriodSeconds: 30 + volumes: + - hostPath: + path: /var/log/kube-scheduler.log + name: logfile diff --git a/cluster/gce/coreos/kube-manifests/kube-system.json b/cluster/gce/coreos/kube-manifests/kube-system.json new file mode 100644 index 00000000000..395b9722bf6 --- /dev/null +++ b/cluster/gce/coreos/kube-manifests/kube-system.json @@ -0,0 +1,7 @@ +{ + "apiVersion": "v1", + "kind": "Namespace", + "metadata": { + "name": "kube-system" + } +} diff --git a/cluster/gce/coreos/kube-manifests/kubelet-config.yaml b/cluster/gce/coreos/kube-manifests/kubelet-config.yaml new file mode 100644 index 00000000000..8524abe8aa9 --- /dev/null +++ b/cluster/gce/coreos/kube-manifests/kubelet-config.yaml @@ -0,0 +1,17 @@ +apiVersion: v1 +kind: Config +users: +- name: kubelet + user: + client-certificate-data: ${KUBELET_CERT} + client-key-data: ${KUBELET_KEY} +clusters: +- name: local + cluster: + certificate-authority-data: ${CA_CERT} +contexts: +- context: + cluster: local + user: kubelet + name: service-account-context +current-context: service-account-context diff --git a/cluster/gce/coreos/kube-manifests/kubeproxy-config.yaml b/cluster/gce/coreos/kube-manifests/kubeproxy-config.yaml new file mode 100644 index 00000000000..c111eb0fa16 --- /dev/null +++ b/cluster/gce/coreos/kube-manifests/kubeproxy-config.yaml @@ -0,0 +1,16 @@ +apiVersion: v1 +kind: Config +users: +- name: kube-proxy + user: + token: ${KUBE_PROXY_TOKEN} +clusters: +- name: local + cluster: + certificate-authority-data: ${CA_CERT} +contexts: +- context: + cluster: local + user: kube-proxy + name: service-account-context +current-context: service-account-context From fe70bf8485f23b02b14e58ad007f92925f94fe88 Mon Sep 17 00:00:00 2001 From: Yifan Gu Date: Fri, 13 Nov 2015 14:21:48 -0800 Subject: [PATCH 2/5] cluster/gce/coreos: Upload templates during kube-up. Also added create-master-instance() and create-node-instance-template() --- cluster/common.sh | 2 +- cluster/gce/coreos/helper.sh | 46 ++++++++++++++++++++++++++++++------ cluster/gce/util.sh | 4 +++- 3 files changed, 43 insertions(+), 9 deletions(-) diff --git a/cluster/common.sh b/cluster/common.sh index f3553fafa29..9c82259039e 100755 --- a/cluster/common.sh +++ b/cluster/common.sh @@ -337,7 +337,7 @@ function find-release-tars() { # This tarball is only used by Ubuntu Trusty. KUBE_MANIFESTS_TAR= - if [[ "${KUBE_OS_DISTRIBUTION:-}" == "trusty" ]]; then + if [[ "${KUBE_OS_DISTRIBUTION:-}" == "trusty" || "${KUBE_OS_DISTRIBUTION:-}" == "coreos" ]]; then KUBE_MANIFESTS_TAR="${KUBE_ROOT}/server/kubernetes-manifests.tar.gz" if [[ ! -f "${KUBE_MANIFESTS_TAR}" ]]; then KUBE_MANIFESTS_TAR="${KUBE_ROOT}/_output/release-tars/kubernetes-manifests.tar.gz" diff --git a/cluster/gce/coreos/helper.sh b/cluster/gce/coreos/helper.sh index 4e86f16e399..15bd768194c 100755 --- a/cluster/gce/coreos/helper.sh +++ b/cluster/gce/coreos/helper.sh @@ -16,17 +16,49 @@ # A library of helper functions and constant for coreos os distro -# By sourcing debian's helper.sh, we use the same create-master-instance -# functions as debian. But we overwrite the create-node-instance-template -# function to use coreos. -source "${KUBE_ROOT}/cluster/gce/debian/helper.sh" - # TODO(dawnchen): Check $CONTAINER_RUNTIME to decide which # cloud_config yaml file should be passed # $1: template name (required) -function create-node-instance-template { +function create-node-instance-template() { local template_name="$1" create-node-template "$template_name" "${scope_flags}" \ "kube-env=${KUBE_TEMP}/node-kube-env.yaml" \ - "user-data=${KUBE_ROOT}/cluster/gce/coreos/node.yaml" + "user-data=${KUBE_ROOT}/cluster/gce/coreos/node.yaml" \ + "configure-node=${KUBE_ROOT}/cluster/gce/coreos/configure-node.sh" \ + "configure-kubelet=${KUBE_ROOT}/cluster/gce/coreos/configure-kubelet.sh" +} + + +# create-master-instance creates the master instance. If called with +# an argument, the argument is used as the name to a reserved IP +# address for the master. (In the case of upgrade/repair, we re-use +# the same IP.) +# +# It requires a whole slew of assumed variables, partially due to to +# the call to write-master-env. Listing them would be rather +# futile. Instead, we list the required calls to ensure any additional +# variables are set: +# ensure-temp-dir +# detect-project +# get-bearer-token +# +function create-master-instance() { + local address_opt="" + [[ -n ${1:-} ]] && address_opt="--address ${1}" + + write-master-env + gcloud compute instances create "${MASTER_NAME}" \ + ${address_opt} \ + --project "${PROJECT}" \ + --zone "${ZONE}" \ + --machine-type "${MASTER_SIZE}" \ + --image-project="${MASTER_IMAGE_PROJECT}" \ + --image "${MASTER_IMAGE}" \ + --tags "${MASTER_TAG}" \ + --network "${NETWORK}" \ + --scopes "storage-ro,compute-rw,monitoring,logging-write" \ + --can-ip-forward \ + --metadata-from-file \ + "kube-env=${KUBE_TEMP}/master-kube-env.yaml,user-data=${KUBE_ROOT}/cluster/gce/coreos/master.yaml,configure-node=${KUBE_ROOT}/cluster/gce/coreos/configure-node.sh,configure-kubelet=${KUBE_ROOT}/cluster/gce/coreos/configure-kubelet.sh" \ + --disk "name=${MASTER_NAME}-pd,device-name=master-pd,mode=rw,boot=no,auto-delete=no" } diff --git a/cluster/gce/util.sh b/cluster/gce/util.sh index f57f953e598..9f2966d37cb 100755 --- a/cluster/gce/util.sh +++ b/cluster/gce/util.sh @@ -216,7 +216,7 @@ function upload-server-tars() { SERVER_BINARY_TAR_URL="${server_binary_gs_url/gs:\/\//https://storage.googleapis.com/}" SALT_TAR_URL="${salt_gs_url/gs:\/\//https://storage.googleapis.com/}" - if [[ "${OS_DISTRIBUTION}" == "trusty" ]]; then + if [[ "${OS_DISTRIBUTION}" == "trusty" || "${OS_DISTRIBUTION}" == "coreos" ]]; then local kube_manifests_gs_url="${staging_path}/${KUBE_MANIFESTS_TAR##*/}" KUBE_MANIFESTS_TAR_HASH=$(sha1sum-file "${KUBE_MANIFESTS_TAR}") copy-if-not-staged "${staging_path}" "${kube_manifests_gs_url}" "${KUBE_MANIFESTS_TAR}" "${KUBE_MANIFESTS_TAR_HASH}" @@ -1484,6 +1484,8 @@ EOF if [[ "${OS_DISTRIBUTION}" == "coreos" ]]; then # CoreOS-only env vars. TODO(yifan): Make them available on other distros. cat >>$file < Date: Fri, 13 Nov 2015 13:14:08 -0800 Subject: [PATCH 3/5] cluster/gce/coreos: Update master and node cloud config. --- cluster/gce/coreos/master.yaml | 188 +++++++++++++++++++++++++++++++++ cluster/gce/coreos/node.yaml | 174 +++++++++++++----------------- 2 files changed, 260 insertions(+), 102 deletions(-) create mode 100644 cluster/gce/coreos/master.yaml diff --git a/cluster/gce/coreos/master.yaml b/cluster/gce/coreos/master.yaml new file mode 100644 index 00000000000..d1a4bd895de --- /dev/null +++ b/cluster/gce/coreos/master.yaml @@ -0,0 +1,188 @@ +#cloud-config + +coreos: + units: + - name: kube-env.service + command: start + content: | + [Unit] + Description=Fetch kubernetes-node-environment + Requires=network-online.target + After=network-online.target + [Service] + Type=oneshot + RemainAfterExit=yes + ExecStartPre=/usr/bin/curl --fail --silent --show-error \ + -H "X-Google-Metadata-Request: True" \ + -o /etc/kube-env.yaml \ + http://metadata.google.internal/computeMetadata/v1/instance/attributes/kube-env + # Transform the yaml to env file. + ExecStartPre=/usr/bin/mv /etc/kube-env.yaml /etc/kube-env + ExecStart=/usr/bin/sed -i "s/: '/=/;s/'$//" /etc/kube-env + + - name: kubernetes-install-rkt.service + command: start + content: | + [Unit] + Description=Fetch rkt + Documentation=http://github.com/coreos/rkt + Requires=network-online.target + After=network-online.target + [Service] + Type=oneshot + RemainAfterExit=yes + EnvironmentFile=/etc/kube-env + ExecStartPre=/usr/bin/mkdir -p /etc/rkt + ExecStartPre=/usr/bin/mkdir -p /opt/downloads + ExecStartPre=/usr/bin/curl --location --create-dirs --output /opt/downloads/rkt.tar.gz https://github.com/coreos/rkt/releases/download/v${RKT_VERSION}/rkt-v${RKT_VERSION}.tar.gz + ExecStart=/usr/bin/tar xf /opt/downloads/rkt.tar.gz -C /opt --overwrite + + - name: kubernetes-download-salt.service + command: start + content: | + [Unit] + Description=Download salt + Requires=network-online.target + After=network-online.target + Requires=kube-env.service + After=kube-env.service + [Service] + Type=oneshot + RemainAfterExit=yes + EnvironmentFile=/etc/kube-env + ExecStartPre=/usr/bin/mkdir -p /opt/downloads + ExecStartPre=/usr/bin/curl --location --create-dirs --output /opt/downloads/kubernetes-salt.tar.gz ${SALT_TAR_URL} + # TODO(yifan): Check hash. + ExecStart=/usr/bin/tar xf /opt/downloads/kubernetes-salt.tar.gz -C /opt --overwrite + + - name: kubernetes-download-manifests.service + command: start + content: | + [Unit] + Description=Download manifests + Requires=network-online.target + After=network-online.target + Requires=kube-env.service + After=kube-env.service + [Service] + Type=oneshot + RemainAfterExit=yes + EnvironmentFile=/etc/kube-env + ExecStartPre=/usr/bin/mkdir -p /opt/downloads + ExecStartPre=/usr/bin/curl --location --create-dirs --output /opt/downloads/kubernetes-manifests.tar.gz ${KUBE_MANIFESTS_TAR_URL} + # TODO(yifan): Check hash. + ExecStartPre=/usr/bin/mkdir -p /opt/kube-manifests + ExecStart=/usr/bin/tar xf /opt/downloads/kubernetes-manifests.tar.gz -C /opt/kube-manifests --overwrite + + - name: kubernetes-install-node.service + command: start + content: | + [Unit] + Description=Install Kubernetes Server + Requires=network-online.target + After=network-online.target + Requires=kube-env.service + After=kube-env.service + [Service] + Type=oneshot + RemainAfterExit=yes + EnvironmentFile=/etc/kube-env + ExecStartPre=/usr/bin/mkdir -p /opt/downloads + ExecStartPre=/usr/bin/curl --location --create-dirs --output /opt/downloads/kubernetes-server-linux-amd64.tar.gz ${SERVER_BINARY_TAR_URL} + # TODO(yifan): Check hash. + ExecStart=/usr/bin/tar xf /opt/downloads/kubernetes-server-linux-amd64.tar.gz -C /opt --overwrite + + - name: kubelet.service + command: start + content: | + [Unit] + Description=Run Kubelet service + Requires=network-online.target + After=network-online.target + Requires=kube-env.service + After=kube-env.service + Requires=kubernetes-download-manifests.service + After=kubernetes-download-manifests.service + [Service] + EnvironmentFile=/etc/kube-env + ExecStartPre=/usr/bin/curl --fail --silent --show-error \ + -H "X-Google-Metadata-Request: True" \ + -o /run/configure-kubelet.sh \ + http://metadata.google.internal/computeMetadata/v1/instance/attributes/configure-kubelet + ExecStartPre=/usr/bin/chmod 0755 /run/configure-kubelet.sh + ExecStartPre=/run/configure-kubelet.sh + ExecStart=/opt/kubernetes/server/bin/kubelet \ + --api-servers=https://${INSTANCE_PREFIX}-master \ + --enable-debugging-handlers=false \ + --cloud-provider=gce \ + --config=/etc/kubernetes/manifests \ + --allow-privileged=true \ + --v=2 \ + --cluster-dns=${DNS_SERVER_IP} \ + --cluster-domain=${DNS_DOMAIN} \ + --logtostderr=true \ + --container-runtime=${KUBERNETES_CONTAINER_RUNTIME} \ + --rkt-path=/opt/rkt-v${RKT_VERSON}/rkt \ + --configure-cbr0=${KUBERNETES_CONFIGURE_CBR0} \ + --pod-cidr=${MASTER_IP_RANGE} \ + --register-schedulable=false \ + --reconcile-cidr=false + Restart=always + RestartSec=10 + + - name: docker.service + command: start + drop-ins: + - name: 50-docker-opts.conf + content: | + [Service] + Environment='DOCKER_OPTS=--bridge=cbr0 --iptables=false --ip-masq=false' + MountFlags=slave + LimitNOFILE=1048576 + LimitNPROC=1048576 + LimitCORE=infinity + Restart=always + RestartSec=2s + StartLimitInterval=0 + + - name: kubernetes-configure-node.service + command: start + content: | + [Unit] + Description=Configure Node For Kubernetes service + Requires=kubernetes-install-node.service + After=kubernetes-install-node.service + Requires=kubernetes-install-rkt.service + After=kubernetes-install-rkt.service + Requires=kubernetes-download-salt.service + After=kubernetes-download-salt.service + Requires=kubernetes-download-manifests.service + After=kubernetes-download-manifests.service + # Need the kubelet/docker running because we will use docker load for docker images. + Requires=kubelet.service + After=kubelet.service + [Service] + Type=oneshot + RemainAfterExit=yes + EnvironmentFile=/etc/kube-env + ExecStartPre=/usr/bin/curl --fail --silent --show-error \ + -H "X-Google-Metadata-Request: True" \ + -o /run/configure-node.sh \ + http://metadata.google.internal/computeMetadata/v1/instance/attributes/configure-node + ExecStartPre=/usr/bin/chmod 0755 /run/configure-node.sh + ExecStart=/run/configure-node.sh + + - name: kubernetes-addons.service + command: start + content: | + [Unit] + Description=Start Kubernetes addons and watch for updates. + Requires=kubernetes-configure-node.service + After=kubernetes-configure-node.service + [Service] + Environment=KUBECTL_BIN=/opt/kubernetes/server/bin/kubectl + Environment=kubelet_kubeconfig_file=/var/lib/kubelet/kubeconfig + ExecStartPre=/usr/bin/chmod 0755 /opt/kubernetes/saltbase/salt/kube-addons/kube-addons.sh + ExecStart=/opt/kubernetes/saltbase/salt/kube-addons/kube-addons.sh + Restart=always + RestartSec=10 diff --git a/cluster/gce/coreos/node.yaml b/cluster/gce/coreos/node.yaml index 3ca473be617..d2faa576ecd 100644 --- a/cluster/gce/coreos/node.yaml +++ b/cluster/gce/coreos/node.yaml @@ -1,68 +1,5 @@ #cloud-config -write_files: - - path: /run/configure-hostname.sh - permissions: "0755" - content: | - #!/bin/bash -e - set -x - source /etc/kube-env - - hostnamectl set-hostname $(hostname | cut -f1 -d.) - - path: /run/setup-auth.sh - permissions: "0755" - content: | - #!/bin/bash -e - set -x - source /etc/kube-env - - /usr/bin/mkdir -p /var/lib/kubelet - cat > /var/lib/kubelet/kubeconfig << EOF - apiVersion: v1 - kind: Config - users: - - name: kubelet - user: - token: ${KUBELET_TOKEN} - clusters: - - name: local - cluster: - insecure-skip-tls-verify: true - contexts: - - context: - cluster: local - user: kubelet - name: service-account-context - current-context: service-account-context - EOF - - - path: /run/config-kube-proxy.sh - permissions: "0755" - content: | - #!/bin/bash -e - set -x - source /etc/kube-env - - /usr/bin/mkdir -p /var/lib/kube-proxy - cat > /var/lib/kube-proxy/kubeconfig << EOF - apiVersion: v1 - kind: Config - users: - - name: kube-proxy - user: - token: ${KUBE_PROXY_TOKEN} - clusters: - - name: local - cluster: - insecure-skip-tls-verify: true - contexts: - - context: - cluster: local - user: kube-proxy - name: service-account-context - current-context: service-account-context - EOF - coreos: units: - name: kube-env.service @@ -80,30 +17,46 @@ coreos: -o /etc/kube-env.yaml \ http://metadata.google.internal/computeMetadata/v1/instance/attributes/kube-env # Transform the yaml to env file. - ExecStartPre=/usr/bin/cp /etc/kube-env.yaml /etc/kube-env + ExecStartPre=/usr/bin/mv /etc/kube-env.yaml /etc/kube-env ExecStart=/usr/bin/sed -i "s/: '/=/;s/'$//" /etc/kube-env - name: kubernetes-install-rkt.service command: start content: | [Unit] - Description=Fetch Rocket + Description=Fetch rkt Documentation=http://github.com/coreos/rkt Requires=network-online.target After=network-online.target [Service] Type=oneshot + RemainAfterExit=yes EnvironmentFile=/etc/kube-env - ExecStartPre=/usr/bin/rm -rf /opt/rkt - ExecStartPre=/usr/bin/mkdir -p /opt/rkt ExecStartPre=/usr/bin/mkdir -p /etc/rkt - ExecStartPre=/usr/bin/wget \ - -O /opt/rkt/rkt-v${RKT_VERSION}.tar.gz \ - https://github.com/coreos/rkt/releases/download/v${RKT_VERSION}/rkt-v${RKT_VERSION}.tar.gz - ExecStartPre=/usr/bin/tar xzvf /opt/rkt/rkt-v${RKT_VERSION}.tar.gz -C /opt --overwrite - ExecStart=/usr/bin/mv /opt/rkt-v${RKT_VERSION} /opt/rkt/rkt + ExecStartPre=/usr/bin/mkdir -p /opt/downloads + ExecStartPre=/usr/bin/curl --location --create-dirs --output /opt/downloads/rkt.tar.gz https://github.com/coreos/rkt/releases/download/v${RKT_VERSION}/rkt-v${RKT_VERSION}.tar.gz + ExecStart=/usr/bin/tar xf /opt/downloads/rkt.tar.gz -C /opt --overwrite - - name: kubernetes-install-minion.service + - name: kubernetes-download-manifests.service + command: start + content: | + [Unit] + Description=Download manifests + Requires=network-online.target + After=network-online.target + Requires=kube-env.service + After=kube-env.service + [Service] + Type=oneshot + RemainAfterExit=yes + EnvironmentFile=/etc/kube-env + ExecStartPre=/usr/bin/mkdir -p /opt/downloads + ExecStartPre=/usr/bin/curl --location --create-dirs --output /opt/downloads/kubernetes-manifests.tar.gz ${KUBE_MANIFESTS_TAR_URL} + # TODO(yifan): Check hash. + ExecStartPre=/usr/bin/mkdir -p /opt/kube-manifests + ExecStart=/usr/bin/tar xf /opt/downloads/kubernetes-manifests.tar.gz -C /opt/kube-manifests --overwrite + + - name: kubernetes-install-node.service command: start content: | [Unit] @@ -120,33 +73,25 @@ coreos: ExecStartPre=/usr/bin/curl --location --create-dirs --output /opt/kubernetes/pkg/kubernetes-server-linux-amd64.tar.gz ${SERVER_BINARY_TAR_URL} ExecStart=/usr/bin/tar xf /opt/kubernetes/pkg/kubernetes-server-linux-amd64.tar.gz -C /opt --overwrite - - name: kubernetes-preparation.service - command: start - content: | - [Unit] - Description=Configure Node For Kubernetes service - Requires=kubernetes-install-minion.service - After=kubernetes-install-minion.service - Requires=kubernetes-install-rkt.service - After=kubernetes-install-rkt.service - [Service] - Type=oneshot - RemainAfterExit=yes - EnvironmentFile=/etc/kube-env - # TODO(dawnchen): Push this to separate write-files - ExecStart=/run/configure-hostname.sh - - name: kubelet.service command: start content: | [Unit] Description=Run Kubelet service - Requires=kubernetes-preparation.service - After=kubernetes-preparation.service + Requires=network-online.target + After=network-online.target + Requires=kube-env.service + After=kube-env.service + Requires=kubernetes-download-manifests.service + After=kubernetes-download-manifests.service [Service] EnvironmentFile=/etc/kube-env - ExecStartPre=/usr/bin/mkdir -p /etc/kubernetes/manifests - ExecStartPre=/run/setup-auth.sh + ExecStartPre=/usr/bin/curl --fail --silent --show-error \ + -H "X-Google-Metadata-Request: True" \ + -o /run/configure-kubelet.sh \ + http://metadata.google.internal/computeMetadata/v1/instance/attributes/configure-kubelet + ExecStartPre=/usr/bin/chmod 0755 /run/configure-kubelet.sh + ExecStartPre=/run/configure-kubelet.sh ExecStart=/opt/kubernetes/server/bin/kubelet \ --api-servers=https://${INSTANCE_PREFIX}-master \ --enable-debugging-handlers=true \ @@ -158,9 +103,8 @@ coreos: --cluster-domain=${DNS_DOMAIN} \ --logtostderr=true \ --container-runtime=${KUBERNETES_CONTAINER_RUNTIME} \ - --rkt-path=/opt/rkt/rkt/rkt \ - --configure-cbr0=${KUBERNETES_CONFIGURE_CBR0} \ - --pod-cidr=${MASTER_IP_RANGE} \ + --rkt-path=/opt/rkt-v${RKT_VERSON}/rkt \ + --configure-cbr0=${KUBERNETES_CONFIGURE_CBR0} Restart=always RestartSec=10 @@ -169,15 +113,12 @@ coreos: content: | [Unit] Description=Start Kube-proxy service as Daemon - Requires=kubernetes-install-minion.service - After=kubernetes-install-minion.service - Requires=kubernetes-install-rkt.service - After=kubernetes-install-rkt.service + Requires=kubernetes-configure-node.service + After=kubernetes-configure-node.service [Service] EnvironmentFile=/etc/kube-env - ExecStartPre=/run/config-kube-proxy.sh ExecStart=/opt/kubernetes/server/bin/kube-proxy \ - --master=https://${KUBERNETES_MASTER_NAME}.c.${PROJECT_ID}.internal \ + --master=https://${KUBERNETES_MASTER_NAME} \ --kubeconfig=/var/lib/kube-proxy/kubeconfig \ --v=2 \ --logtostderr=true @@ -191,3 +132,32 @@ coreos: content: | [Service] Environment='DOCKER_OPTS=--bridge=cbr0 --iptables=false --ip-masq=false' + MountFlags=slave + LimitNOFILE=1048576 + LimitNPROC=1048576 + LimitCORE=infinity + Restart=always + RestartSec=2s + StartLimitInterval=0 + + - name: kubernetes-configure-node.service + command: start + content: | + [Unit] + Description=Configure Node For Kubernetes service + Requires=kubernetes-install-node.service + After=kubernetes-install-node.service + Requires=kubernetes-install-rkt.service + After=kubernetes-install-rkt.service + Requires=kubernetes-download-manifests.service + After=kubernetes-download-manifests.service + [Service] + Type=oneshot + RemainAfterExit=yes + EnvironmentFile=/etc/kube-env + ExecStartPre=/usr/bin/curl --fail --silent --show-error \ + -H "X-Google-Metadata-Request: True" \ + -o /run/configure-node.sh \ + http://metadata.google.internal/computeMetadata/v1/instance/attributes/configure-node + ExecStartPre=/usr/bin/chmod 0755 /run/configure-node.sh + ExecStart=/run/configure-node.sh From 5817ca1c7114f83f137e0641d0ced8d41f13e10b Mon Sep 17 00:00:00 2001 From: Yifan Gu Date: Fri, 13 Nov 2015 13:15:32 -0800 Subject: [PATCH 4/5] cluster/gce/coreos: Add scripts for configuring the master/node. --- cluster/gce/coreos/configure-kubelet.sh | 33 +++ cluster/gce/coreos/configure-node.sh | 331 ++++++++++++++++++++++++ 2 files changed, 364 insertions(+) create mode 100755 cluster/gce/coreos/configure-kubelet.sh create mode 100644 cluster/gce/coreos/configure-node.sh diff --git a/cluster/gce/coreos/configure-kubelet.sh b/cluster/gce/coreos/configure-kubelet.sh new file mode 100755 index 00000000000..bc5ad667ed8 --- /dev/null +++ b/cluster/gce/coreos/configure-kubelet.sh @@ -0,0 +1,33 @@ +#!/bin/bash + +# Copyright 2015 The Kubernetes Authors All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -o errexit +set -o nounset +set -o pipefail + +MANIFESTS_DIR=/opt/kube-manifests/kubernetes + +echo "Configuring hostname" +hostnamectl set-hostname $(hostname | cut -f1 -d.) + +echo "Configuring kubelet" +mkdir -p /var/lib/kubelet +mkdir -p /etc/kubernetes/manifests +src=${MANIFESTS_DIR}/kubelet-config.yaml +dst=/var/lib/kubelet/kubeconfig +cp ${src} ${dst} +sed -i 's/\"/\\\"/g' ${dst} # eval will remove the double quotes if they are not escaped +eval "echo \"$(< ${dst})\"" > ${dst} diff --git a/cluster/gce/coreos/configure-node.sh b/cluster/gce/coreos/configure-node.sh new file mode 100644 index 00000000000..7a4fa1ac177 --- /dev/null +++ b/cluster/gce/coreos/configure-node.sh @@ -0,0 +1,331 @@ +#!/bin/bash + +# Copyright 2015 The Kubernetes Authors All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -o errexit +set -o nounset +set -o pipefail + +readonly KNOWN_TOKENS_FILE="/srv/salt-overlay/salt/kube-apiserver/known_tokens.csv" +readonly BASIC_AUTH_FILE="/srv/salt-overlay/salt/kube-apiserver/basic_auth.csv" + +# evaluate-manifest evalutes the source manifest with the environment variables. +function evaluate-manifest() { + local src=$1 + local dst=$2 + cp ${src} ${dst} + sed -i 's/\"/\\\"/g' ${dst} # eval will remove the double quotes if they are not escaped + eval "echo \"$(< ${dst})\"" > ${dst} +} + +# evaluate-manifests-dir evalutes the source manifests within $1 and put the result +# in $2. +function evaluate-manifests-dir() { + local src=$1 + local dst=$2 + mkdir -p ${dst} + + for f in ${src}/* + do + evaluate-manifest $f ${dst}/${f##*/} + done +} + +function configure-kube-proxy() { + echo "Configuring kube-proxy" + mkdir -p /var/lib/kube-proxy + evaluate-manifest ${MANIFESTS_DIR}/kubeproxy-config.yaml /var/lib/kube-proxy/kubeconfig +} + +function configure-logging() { + if [[ "${LOGGING_DESTINATION}" == "gcp" ]];then + echo "Configuring fluentd-gcp" + # fluentd-gcp + evaluate-manifest ${MANIFESTS_DIR}/fluentd-gcp.yaml /etc/kubernetes/manifests/fluentd-gcp.yaml + elif [[ "${LOGGING_DESTINATION}" == "elasticsearch" ]];then + echo "Configuring fluentd-es" + # fluentd-es + evaluate-manifest ${MANIFESTS_DIR}/fluentd-es.yaml /etc/kubernetes/manifests/fluentd-es.yaml + fi +} + +function configure-admission-controls() { + echo "Configuring admission controls" + mkdir -p /etc/kubernetes/admission-controls + cp -r ${SALT_DIR}/salt/kube-admission-controls/limit-range /etc/kubernetes/admission-controls/ +} + +function configure-etcd() { + echo "Configuring etcd" + touch /var/log/etcd.log + evaluate-manifest ${MANIFESTS_DIR}/etcd.yaml /etc/kubernetes/manifests/etcd.yaml +} + +function configure-etcd-events() { + echo "Configuring etcd-events" + touch /var/log/etcd-events.log + evaluate-manifest ${MANIFESTS_DIR}/etcd-events.yaml /etc/kubernetes/manifests/etcd-events.yaml +} + +function configure-kube-apiserver() { + echo "Configuring kube-apiserver" + + # Wait for etcd to be up. + wait-url-up http://127.0.0.1:4001/version + + touch /var/log/kube-apiserver.log + + # Copying known_tokens and basic_auth file. + cp ${SALT_OVERLAY}/salt/kube-apiserver/*.csv /srv/kubernetes/ + evaluate-manifest ${MANIFESTS_DIR}/kube-apiserver.yaml /etc/kubernetes/manifests/kube-apiserver.yaml +} + +function configure-kube-scheduler() { + echo "Configuring kube-scheduler" + touch /var/log/kube-scheduler.log + evaluate-manifest ${MANIFESTS_DIR}/kube-scheduler.yaml /etc/kubernetes/manifests/kube-scheduler.yaml +} + +function configure-kube-controller-manager() { + # Wait for api server. + wait-url-up http://127.0.0.1:8080/version + echo "Configuring kube-controller-manager" + touch /var/log/kube-controller-manager.log + evaluate-manifest ${MANIFESTS_DIR}/kube-controller-manager.yaml /etc/kubernetes/manifests/kube-controller-manager.yaml +} + +# Wait until $1 become reachable. +function wait-url-up() { + until curl --silent $1 + do + sleep 5 + done +} + +# Configure addon yamls, and run salt/kube-addons/kube-addon.sh +function configure-master-addons() { + echo "Configuring master addons" + + local addon_dir=/etc/kubernetes/addons + mkdir -p ${addon_dir} + + # Copy namespace.yaml + evaluate-manifest ${MANIFESTS_DIR}/addons/namespace.yaml ${addon_dir}/namespace.yaml + + if [[ "${ENABLE_L7_LOADBALANCING}" == "glbc" ]]; then + evaluate-manifests-dir ${MANIFESTS_DIR}/addons/cluster-loadbalancing/glbc ${addon_dir}/cluster-loadbalancing/glbc + fi + + if [[ "${ENABLE_CLUSTER_DNS}" == "true" ]]; then + evaluate-manifests-dir ${MANIFESTS_DIR}/addons/dns ${addon_dir}/dns + fi + + if [[ "${ENABLE_CLUSTER_UI}" == "true" ]]; then + evaluate-manifests-dir ${MANIFESTS_DIR}/addons/kube-ui ${addon_dir}/kube-ui + fi + + if [[ "${ENABLE_CLUSTER_MONITORING}" == "influxdb" ]]; then + evaluate-manifests-dir ${MANIFESTS_DIR}/addons/cluster-monitoring/influxdb ${addon_dir}/cluster-monitoring/influxdb + elif [[ "${ENABLE_CLUSTER_MONITORING}" == "google" ]]; then + evaluate-manifests-dir ${MANIFESTS_DIR}/addons/cluster-monitoring/google ${addon_dir}/cluster-monitoring/google + elif [[ "${ENABLE_CLUSTER_MONITORING}" == "standalone" ]]; then + evaluate-manifests-dir ${MANIFESTS_DIR}/addons/cluster-monitoring/standalone ${addon_dir}/cluster-monitoring/standalone + elif [[ "${ENABLE_CLUSTER_MONITORING}" == "googleinfluxdb" ]]; then + evaluate-manifests-dir ${MANIFESTS_DIR}/addons/cluster-monitoring/googleinfluxdb ${addon_dir}/cluster-monitoring/googleinfluxdb + fi + + # Note that, KUBE_ENABLE_INSECURE_REGISTRY is not supported yet. + if [[ "${ENABLE_CLUSTER_REGISTRY}" == "true" ]]; then + CLUSTER_REGISTRY_DISK_SIZE=$(convert-bytes-gce-kube "${CLUSTER_REGISTRY_DISK_SIZE}") + evaluate-manifests-dir ${MANIFESTS_DIR}/addons/registry ${addon_dir}/registry + fi +} + +function configure-master-components() { + configure-admission-controls + configure-etcd + configure-etcd-events + configure-kube-apiserver + configure-kube-scheduler + configure-kube-controller-manager + configure-master-addons +} + +# TODO(yifan): Merge this with mount-master-pd() in configure-vm.sh +# Pass ${save_format_and_mount} as an argument. +function mount-master-pd() { + if [[ ! -e /dev/disk/by-id/google-master-pd ]]; then + return + fi + device_info=$(ls -l /dev/disk/by-id/google-master-pd) + relative_path=${device_info##* } + device_path="/dev/disk/by-id/${relative_path}" + + # Format and mount the disk, create directories on it for all of the master's + # persistent data, and link them to where they're used. + echo "Mounting master-pd" + mkdir -p /mnt/master-pd + safe_format_and_mount=${SALT_DIR}/salt/helpers/safe_format_and_mount + chmod +x ${safe_format_and_mount} + ${safe_format_and_mount} -m "mkfs.ext4 -F" "${device_path}" /mnt/master-pd &>/var/log/master-pd-mount.log || \ + { echo "!!! master-pd mount failed, review /var/log/master-pd-mount.log !!!"; return 1; } + # Contains all the data stored in etcd + mkdir -m 700 -p /mnt/master-pd/var/etcd + # Contains the dynamically generated apiserver auth certs and keys + mkdir -p /mnt/master-pd/srv/kubernetes + # Contains the cluster's initial config parameters and auth tokens + mkdir -p /mnt/master-pd/srv/salt-overlay + # Directory for kube-apiserver to store SSH key (if necessary) + mkdir -p /mnt/master-pd/srv/sshproxy + + ln -s -f /mnt/master-pd/var/etcd /var/etcd + ln -s -f /mnt/master-pd/srv/kubernetes /srv/kubernetes + ln -s -f /mnt/master-pd/srv/sshproxy /srv/sshproxy + ln -s -f /mnt/master-pd/srv/salt-overlay /srv/salt-overlay + + # This is a bit of a hack to get around the fact that salt has to run after the + # PD and mounted directory are already set up. We can't give ownership of the + # directory to etcd until the etcd user and group exist, but they don't exist + # until salt runs if we don't create them here. We could alternatively make the + # permissions on the directory more permissive, but this seems less bad. + if ! id etcd &>/dev/null; then + useradd -s /sbin/nologin -d /var/etcd etcd + fi + chown -R etcd /mnt/master-pd/var/etcd + chgrp -R etcd /mnt/master-pd/var/etcd +} + +# The job of this function is simple, but the basic regular expression syntax makes +# this difficult to read. What we want to do is convert from [0-9]+B, KB, KiB, MB, etc +# into [0-9]+, Ki, Mi, Gi, etc. +# This is done in two steps: +# 1. Convert from [0-9]+X?i?B into [0-9]X? (X denotes the prefix, ? means the field +# is optional. +# 2. Attach an 'i' to the end of the string if we find a letter. +# The two step process is needed to handle the edge case in which we want to convert +# a raw byte count, as the result should be a simple number (e.g. 5B -> 5). +# +# TODO(yifan): Reuse the one defined in configure-vm.sh to remove duplication. +function convert-bytes-gce-kube() { + local -r storage_space=$1 + echo "${storage_space}" | sed -e 's/^\([0-9]\+\)\([A-Z]\)\?i\?B$/\1\2/g' -e 's/\([A-Z]\)$/\1i/' +} + +# TODO(yifan): Use create-salt-master-auth() in configure-vm.sh +function create-salt-master-auth() { + if [[ ! -e /srv/kubernetes/ca.crt ]]; then + if [[ ! -z "${CA_CERT:-}" ]] && [[ ! -z "${MASTER_CERT:-}" ]] && [[ ! -z "${MASTER_KEY:-}" ]]; then + mkdir -p /srv/kubernetes + (umask 077; + echo "${CA_CERT}" | base64 -d > /srv/kubernetes/ca.crt; + echo "${MASTER_CERT}" | base64 -d > /srv/kubernetes/server.cert; + echo "${MASTER_KEY}" | base64 -d > /srv/kubernetes/server.key; + # Kubecfg cert/key are optional and included for backwards compatibility. + # TODO(roberthbailey): Remove these two lines once GKE no longer requires + # fetching clients certs from the master VM. + echo "${KUBECFG_CERT:-}" | base64 -d > /srv/kubernetes/kubecfg.crt; + echo "${KUBECFG_KEY:-}" | base64 -d > /srv/kubernetes/kubecfg.key) + fi + fi + if [ ! -e "${BASIC_AUTH_FILE}" ]; then + mkdir -p /srv/salt-overlay/salt/kube-apiserver + (umask 077; + echo "${KUBE_PASSWORD},${KUBE_USER},admin" > "${BASIC_AUTH_FILE}") + fi + if [ ! -e "${KNOWN_TOKENS_FILE}" ]; then + mkdir -p /srv/salt-overlay/salt/kube-apiserver + (umask 077; + echo "${KUBE_BEARER_TOKEN},admin,admin" > "${KNOWN_TOKENS_FILE}"; + echo "${KUBELET_TOKEN},kubelet,kubelet" >> "${KNOWN_TOKENS_FILE}"; + echo "${KUBE_PROXY_TOKEN},kube_proxy,kube_proxy" >> "${KNOWN_TOKENS_FILE}") + + # Generate tokens for other "service accounts". Append to known_tokens. + # + # NB: If this list ever changes, this script actually has to + # change to detect the existence of this file, kill any deleted + # old tokens and add any new tokens (to handle the upgrade case). + local -r service_accounts=("system:scheduler" "system:controller_manager" "system:logging" "system:monitoring" "system:dns") + for account in "${service_accounts[@]}"; do + token=$(dd if=/dev/urandom bs=128 count=1 2>/dev/null | base64 | tr -d "=+/" | dd bs=32 count=1 2>/dev/null) + echo "${token},${account},${account}" >> "${KNOWN_TOKENS_FILE}" + done + fi +} + +# $1 is the directory containing all of the docker images +function load-docker-images() { + local success + local restart_docker + while true; do + success=true + restart_docker=false + for image in "$1/"*; do + timeout 30 docker load -i "${image}" &>/dev/null + rc=$? + if [[ "$rc" == 124 ]]; then + restart_docker=true + elif [[ "$rc" != 0 ]]; then + success=false + fi + done + if [[ "$success" == "true" ]]; then break; fi + if [[ "$restart_docker" == "true" ]]; then systemctl restart docker; fi + sleep 15 + done +} + + +# TODO(yifan): Making this function more generic for other runtimes. +function load-master-components-images() { + echo "Loading docker images for master components" + ${SALT_DIR}/install.sh ${KUBE_BIN_TAR} + ${SALT_DIR}/salt/kube-master-addons/kube-master-addons.sh + + # Get the image tags. + KUBE_APISERVER_DOCKER_TAG=$(cat ${KUBE_BIN_DIR}/kube-apiserver.docker_tag) + KUBE_CONTROLLER_MANAGER_DOCKER_TAG=$(cat ${KUBE_BIN_DIR}/kube-controller-manager.docker_tag) + KUBE_SCHEDULER_DOCKER_TAG=$(cat ${KUBE_BIN_DIR}/kube-scheduler.docker_tag) +} + + +########## +# main # +########## + +KUBE_BIN_TAR=/opt/downloads/kubernetes-server-linux-amd64.tar.gz +KUBE_BIN_DIR=/opt/kubernetes/server/bin +SALT_DIR=/opt/kubernetes/saltbase +SALT_OVERLAY=/srv/salt-overlay +MANIFESTS_DIR=/opt/kube-manifests/kubernetes + +# On CoreOS, the hosts is in /usr/share/baselayout/hosts +# So we need to manually populdate the hosts file here on gce. +echo "127.0.0.1 localhost" >> /etc/hosts +echo "::1 localhost" >> /etc/hosts + +if [[ "${KUBERNETES_MASTER}" == "true" ]]; then + mount-master-pd + create-salt-master-auth + load-master-components-images + configure-master-components +else + configure-kube-proxy +fi + +if [[ "${ENABLE_NODE_LOGGING}" == "true" ]];then + configure-logging +fi + +echo "Finish configuration successfully!" From 723402c4e92ef4b64d635130d07a2d408d7a06e2 Mon Sep 17 00:00:00 2001 From: Yifan Gu Date: Tue, 22 Dec 2015 15:41:38 -0800 Subject: [PATCH 5/5] build/common.sh: Copy manifests. --- build/common.sh | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/build/common.sh b/build/common.sh index e839d3017ec..7ed622d9108 100755 --- a/build/common.sh +++ b/build/common.sh @@ -896,7 +896,8 @@ function kube::release::package_kube_manifests_tarball() { # Source 2: manifests from cluster/gce/kube-manifests. # TODO(andyzheng0831): Enable the following line after finishing issue #16702. - # cp "${KUBE_ROOT}/cluster/gce/kube-manifests/"* "${release_stage}/" + # cp "${KUBE_ROOT}/cluster/gce/kube-manifests/*" "${release_stage}/" + cp -r "${KUBE_ROOT}/cluster/gce/coreos/kube-manifests"/* "${release_stage}/" kube::release::clean_cruft