mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-23 19:56:01 +00:00
Merge pull request #11450 from errordeveloper/master
coreos/azure: Updates for 1.0
This commit is contained in:
commit
0b7c2d5496
@ -102,6 +102,10 @@ func gotDashF(line int, fields []string, fieldNum int) error {
|
||||
// Same-dir files are usually created in the same example
|
||||
return nil
|
||||
}
|
||||
if strings.HasPrefix(target, "~/") {
|
||||
// Home directory may also be created by the same example
|
||||
return nil
|
||||
}
|
||||
if strings.HasPrefix(target, "/") {
|
||||
// Absolute paths tend to be /tmp/* and created in the same example.
|
||||
return nil
|
||||
|
@ -82,6 +82,10 @@ func TestKubectlDashF(t *testing.T) {
|
||||
"Foo\n```\nkubectl -blah create -f/foobar\n```\nBar",
|
||||
true,
|
||||
},
|
||||
{
|
||||
"Foo\n```\nkubectl -blah create -f~/foobar\n```\nBar",
|
||||
true,
|
||||
},
|
||||
// Real checks
|
||||
{
|
||||
"Foo\n```\nkubectl -blah create -f mungedocs.go\n```\nBar",
|
||||
|
@ -102,9 +102,9 @@ Check there are 2 nodes in the cluster:
|
||||
|
||||
```console
|
||||
core@kube-00 ~ $ kubectl get nodes
|
||||
NAME LABELS STATUS
|
||||
kube-01 environment=production Ready
|
||||
kube-02 environment=production Ready
|
||||
NAME LABELS STATUS
|
||||
kube-01 kubernetes.io/hostname=kube-01 Ready
|
||||
kube-02 kubernetes.io/hostname=kube-02 Ready
|
||||
```
|
||||
|
||||
## Deploying the workload
|
||||
@ -112,16 +112,10 @@ kube-02 environment=production Ready
|
||||
Let's follow the Guestbook example now:
|
||||
|
||||
```sh
|
||||
cd guestbook-example
|
||||
kubectl create -f examples/guestbook/redis-master-controller.yaml
|
||||
kubectl create -f examples/guestbook/redis-master-service.yaml
|
||||
kubectl create -f examples/guestbook/redis-slave-controller.yaml
|
||||
kubectl create -f examples/guestbook/redis-slave-service.yaml
|
||||
kubectl create -f examples/guestbook/frontend-controller.yaml
|
||||
kubectl create -f examples/guestbook/frontend-service.yaml
|
||||
kubectl create -f ~/guestbook-example
|
||||
```
|
||||
|
||||
You need to wait for the pods to get deployed, run the following and wait for `STATUS` to change from `Unknown`, through `Pending` to `Running`.
|
||||
You need to wait for the pods to get deployed, run the following and wait for `STATUS` to change from `Pending` to `Running`.
|
||||
|
||||
```sh
|
||||
kubectl get pods --watch
|
||||
@ -132,20 +126,20 @@ kubectl get pods --watch
|
||||
Eventually you should see:
|
||||
|
||||
```console
|
||||
NAME READY STATUS RESTARTS AGE
|
||||
frontend-8anh8 1/1 Running 0 1m
|
||||
frontend-8pq5r 1/1 Running 0 1m
|
||||
frontend-v7tbq 1/1 Running 0 1m
|
||||
redis-master-u0my3 1/1 Running 0 1m
|
||||
redis-slave-4eznf 1/1 Running 0 1m
|
||||
redis-slave-hf40f 1/1 Running 0 1m
|
||||
NAME READY STATUS RESTARTS AGE
|
||||
frontend-0a9xi 1/1 Running 0 4m
|
||||
frontend-4wahe 1/1 Running 0 4m
|
||||
frontend-6l36j 1/1 Running 0 4m
|
||||
redis-master-talmr 1/1 Running 0 4m
|
||||
redis-slave-12zfd 1/1 Running 0 4m
|
||||
redis-slave-3nbce 1/1 Running 0 4m
|
||||
```
|
||||
|
||||
## Scaling
|
||||
|
||||
Two single-core nodes are certainly not enough for a production system of today, and, as you can see, there is one _unassigned_ pod. Let's scale the cluster by adding a couple of bigger nodes.
|
||||
Two single-core nodes are certainly not enough for a production system of today. Let's scale the cluster by adding a couple of bigger nodes.
|
||||
|
||||
You will need to open another terminal window on your machine and go to the same working directory (e.g. `~/Workspace/weave-demos/coreos-azure`).
|
||||
You will need to open another terminal window on your machine and go to the same working directory (e.g. `~/Workspace/kubernetes/docs/getting-started-guides/coreos/azure/`).
|
||||
|
||||
First, lets set the size of new VMs:
|
||||
|
||||
@ -177,11 +171,11 @@ Back on `kube-00`:
|
||||
|
||||
```console
|
||||
core@kube-00 ~ $ kubectl get nodes
|
||||
NAME LABELS STATUS
|
||||
kube-01 environment=production Ready
|
||||
kube-02 environment=production Ready
|
||||
kube-03 environment=production Ready
|
||||
kube-04 environment=production Ready
|
||||
NAME LABELS STATUS
|
||||
kube-01 kubernetes.io/hostname=kube-01 Ready
|
||||
kube-02 kubernetes.io/hostname=kube-02 Ready
|
||||
kube-03 kubernetes.io/hostname=kube-03 Ready
|
||||
kube-04 kubernetes.io/hostname=kube-04 Ready
|
||||
```
|
||||
|
||||
You can see that two more nodes joined happily. Let's scale the number of Guestbook instances now.
|
||||
@ -190,18 +184,19 @@ First, double-check how many replication controllers there are:
|
||||
|
||||
```console
|
||||
core@kube-00 ~ $ kubectl get rc
|
||||
CONTROLLER CONTAINER(S) IMAGE(S) SELECTOR REPLICAS
|
||||
ONTROLLER CONTAINER(S) IMAGE(S) SELECTOR REPLICAS
|
||||
frontend php-redis kubernetes/example-guestbook-php-redis:v2 name=frontend 3
|
||||
redis-master master redis name=redis-master 1
|
||||
redis-slave slave kubernetes/redis-slave:v2 name=redis-slave 2
|
||||
redis-slave worker kubernetes/redis-slave:v2 name=redis-slave 2
|
||||
```
|
||||
|
||||
As there are 4 nodes, let's scale proportionally:
|
||||
|
||||
```console
|
||||
core@kube-00 ~ $ kubectl scale --replicas=4 rc redis-slave
|
||||
core@kube-00 ~ $ kubectl scale --replicas=4 rc redis-slave
|
||||
>>>>>>> coreos/azure: Updates for 1.0
|
||||
scaled
|
||||
core@kube-00 ~ $ kubectl scale --replicas=4 rc frontend
|
||||
core@kube-00 ~ $ kubectl scale --replicas=4 rc frontend
|
||||
scaled
|
||||
```
|
||||
|
||||
@ -212,7 +207,7 @@ core@kube-00 ~ $ kubectl get rc
|
||||
CONTROLLER CONTAINER(S) IMAGE(S) SELECTOR REPLICAS
|
||||
frontend php-redis kubernetes/example-guestbook-php-redis:v2 name=frontend 4
|
||||
redis-master master redis name=redis-master 1
|
||||
redis-slave slave kubernetes/redis-slave:v2 name=redis-slave 4
|
||||
redis-slave worker kubernetes/redis-slave:v2 name=redis-slave 4
|
||||
```
|
||||
|
||||
You now will have more instances of front-end Guestbook apps and Redis slaves; and, if you look up all pods labeled `name=frontend`, you should see one running on each node.
|
||||
@ -220,19 +215,35 @@ You now will have more instances of front-end Guestbook apps and Redis slaves; a
|
||||
```console
|
||||
core@kube-00 ~/guestbook-example $ kubectl get pods -l name=frontend
|
||||
NAME READY STATUS RESTARTS AGE
|
||||
frontend-8anh8 1/1 Running 0 3m
|
||||
frontend-8pq5r 1/1 Running 0 3m
|
||||
frontend-oz8uo 1/1 Running 0 51s
|
||||
frontend-v7tbq 1/1 Running 0 3m
|
||||
frontend-0a9xi 1/1 Running 0 22m
|
||||
frontend-4wahe 1/1 Running 0 22m
|
||||
frontend-6l36j 1/1 Running 0 22m
|
||||
frontend-z9oxo 1/1 Running 0 41s
|
||||
```
|
||||
|
||||
## Exposing the app to the outside world
|
||||
|
||||
To makes sure the app is working, you probably want to load it in the browser. For accessing the Guestbook service from the outside world, an Azure endpoint needs to be created like shown on the picture below.
|
||||
There is no native Azure load-ballancer support in Kubernets 1.0, however here is how you can expose the Guestbook app to the Internet.
|
||||
|
||||

|
||||
```
|
||||
./expose_guestbook_app_port.sh ./output/kube_1c1496016083b4_ssh_conf
|
||||
Guestbook app is on port 31605, will map it to port 80 on kube-00
|
||||
info: Executing command vm endpoint create
|
||||
+ Getting virtual machines
|
||||
+ Reading network configuration
|
||||
+ Updating network configuration
|
||||
info: vm endpoint create command OK
|
||||
info: Executing command vm endpoint show
|
||||
+ Getting virtual machines
|
||||
data: Name : tcp-80-31605
|
||||
data: Local port : 31605
|
||||
data: Protcol : tcp
|
||||
data: Virtual IP Address : 137.117.156.164
|
||||
data: Direct server return : Disabled
|
||||
info: vm endpoint show command OK
|
||||
```
|
||||
|
||||
You then should be able to access it from anywhere via the Azure virtual IP for `kube-01`, i.e. `http://104.40.211.194:8000/` as per screenshot.
|
||||
You then should be able to access it from anywhere via the Azure virtual IP for `kube-00` displayed above, i.e. `http://137.117.156.164/` in my case.
|
||||
|
||||
## Next steps
|
||||
|
||||
|
@ -1,14 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
labels:
|
||||
kubernetes.io/cluster-service: "true"
|
||||
kubernetes.io/name: "Grafana"
|
||||
name: monitoring-grafana
|
||||
spec:
|
||||
ports:
|
||||
- port: 80
|
||||
targetPort: 8080
|
||||
selector:
|
||||
name: influxGrafana
|
||||
|
@ -1,24 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: ReplicationController
|
||||
metadata:
|
||||
labels:
|
||||
name: heapster
|
||||
kubernetes.io/cluster-service: "true"
|
||||
name: monitoring-heapster-controller
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
name: heapster
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
name: heapster
|
||||
kubernetes.io/cluster-service: "true"
|
||||
spec:
|
||||
containers:
|
||||
- image: gcr.io/google_containers/heapster:v0.12.1
|
||||
name: heapster
|
||||
command:
|
||||
- /heapster
|
||||
- --source=kubernetes:http://kubernetes?auth=
|
||||
- --sink=influxdb:http://monitoring-influxdb:8086
|
@ -1,35 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: ReplicationController
|
||||
metadata:
|
||||
labels:
|
||||
name: influxGrafana
|
||||
kubernetes.io/cluster-service: "true"
|
||||
name: monitoring-influx-grafana-controller
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
name: influxGrafana
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
name: influxGrafana
|
||||
kubernetes.io/cluster-service: "true"
|
||||
spec:
|
||||
containers:
|
||||
- image: gcr.io/google_containers/heapster_influxdb:v0.3
|
||||
name: influxdb
|
||||
ports:
|
||||
- containerPort: 8083
|
||||
hostPort: 8083
|
||||
- containerPort: 8086
|
||||
hostPort: 8086
|
||||
- image: gcr.io/google_containers/heapster_grafana:v0.7
|
||||
name: grafana
|
||||
env:
|
||||
- name: INFLUXDB_EXTERNAL_URL
|
||||
value: /api/v1/proxy/namespaces/default/services/monitoring-grafana/db/
|
||||
- name: INFLUXDB_HOST
|
||||
value: monitoring-influxdb
|
||||
- name: INFLUXDB_PORT
|
||||
value: "8086"
|
||||
|
@ -1,17 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
labels:
|
||||
name: influxGrafana
|
||||
name: monitoring-influxdb
|
||||
spec:
|
||||
ports:
|
||||
- name: http
|
||||
port: 8083
|
||||
targetPort: 8083
|
||||
- name: api
|
||||
port: 8086
|
||||
targetPort: 8086
|
||||
selector:
|
||||
name: influxGrafana
|
||||
|
@ -1,37 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: ReplicationController
|
||||
metadata:
|
||||
name: elasticsearch-logging-v1
|
||||
namespace: default
|
||||
labels:
|
||||
k8s-app: elasticsearch-logging
|
||||
version: v1
|
||||
kubernetes.io/cluster-service: "true"
|
||||
spec:
|
||||
replicas: 2
|
||||
selector:
|
||||
k8s-app: elasticsearch-logging
|
||||
version: v1
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: elasticsearch-logging
|
||||
version: v1
|
||||
kubernetes.io/cluster-service: "true"
|
||||
spec:
|
||||
containers:
|
||||
- image: gcr.io/google_containers/elasticsearch:1.3
|
||||
name: elasticsearch-logging
|
||||
ports:
|
||||
- containerPort: 9200
|
||||
name: db
|
||||
protocol: TCP
|
||||
- containerPort: 9300
|
||||
name: transport
|
||||
protocol: TCP
|
||||
volumeMounts:
|
||||
- name: es-persistent-storage
|
||||
mountPath: /data
|
||||
volumes:
|
||||
- name: es-persistent-storage
|
||||
emptyDir: {}
|
@ -1,16 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: elasticsearch-logging
|
||||
namespace: default
|
||||
labels:
|
||||
k8s-app: elasticsearch-logging
|
||||
kubernetes.io/cluster-service: "true"
|
||||
kubernetes.io/name: "Elasticsearch"
|
||||
spec:
|
||||
ports:
|
||||
- port: 9200
|
||||
protocol: TCP
|
||||
targetPort: es-port
|
||||
selector:
|
||||
k8s-app: elasticsearch-logging
|
@ -1,31 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: ReplicationController
|
||||
metadata:
|
||||
name: kibana-logging-v1
|
||||
namespace: default
|
||||
labels:
|
||||
k8s-app: kibana-logging
|
||||
version: v1
|
||||
kubernetes.io/cluster-service: "true"
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
k8s-app: kibana-logging
|
||||
version: v1
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kibana-logging
|
||||
version: v1
|
||||
kubernetes.io/cluster-service: "true"
|
||||
spec:
|
||||
containers:
|
||||
- name: kibana-logging
|
||||
image: gcr.io/google_containers/kibana:1.3
|
||||
env:
|
||||
- name: "ELASTICSEARCH_URL"
|
||||
value: "http://elasticsearch-logging:9200"
|
||||
ports:
|
||||
- containerPort: 5601
|
||||
name: kibana-port
|
||||
protocol: TCP
|
@ -1,17 +0,0 @@
|
||||
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: kibana-logging
|
||||
namespace: default
|
||||
labels:
|
||||
k8s-app: kibana-logging
|
||||
kubernetes.io/cluster-service: "true"
|
||||
kubernetes.io/name: "Kibana"
|
||||
spec:
|
||||
ports:
|
||||
- port: 5601
|
||||
protocol: TCP
|
||||
targetPort: kibana-port
|
||||
selector:
|
||||
k8s-app: kibana-logging
|
@ -0,0 +1,92 @@
|
||||
apiVersion: v1
|
||||
kind: ReplicationController
|
||||
metadata:
|
||||
name: kube-dns-v8
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: kube-dns
|
||||
version: v8
|
||||
kubernetes.io/cluster-service: "true"
|
||||
spec:
|
||||
replicas: 3
|
||||
selector:
|
||||
k8s-app: kube-dns
|
||||
version: v8
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: kube-dns
|
||||
version: v8
|
||||
kubernetes.io/cluster-service: "true"
|
||||
spec:
|
||||
containers:
|
||||
- name: etcd
|
||||
image: gcr.io/google_containers/etcd:2.0.9
|
||||
resources:
|
||||
limits:
|
||||
cpu: 100m
|
||||
memory: 50Mi
|
||||
command:
|
||||
- /usr/local/bin/etcd
|
||||
- -data-dir
|
||||
- /var/etcd/data
|
||||
- -listen-client-urls
|
||||
- http://127.0.0.1:2379,http://127.0.0.1:4001
|
||||
- -advertise-client-urls
|
||||
- http://127.0.0.1:2379,http://127.0.0.1:4001
|
||||
- -initial-cluster-token
|
||||
- skydns-etcd
|
||||
volumeMounts:
|
||||
- name: etcd-storage
|
||||
mountPath: /var/etcd/data
|
||||
- name: kube2sky
|
||||
image: gcr.io/google_containers/kube2sky:1.11
|
||||
resources:
|
||||
limits:
|
||||
cpu: 100m
|
||||
memory: 50Mi
|
||||
args:
|
||||
# command = "/kube2sky"
|
||||
- -domain=kube.local
|
||||
- -kube_master_url=http://kube-00:8080
|
||||
- name: skydns
|
||||
image: gcr.io/google_containers/skydns:2015-03-11-001
|
||||
resources:
|
||||
limits:
|
||||
cpu: 100m
|
||||
memory: 50Mi
|
||||
args:
|
||||
# command = "/skydns"
|
||||
- -machines=http://localhost:4001
|
||||
- -addr=0.0.0.0:53
|
||||
- -domain=kube.local
|
||||
ports:
|
||||
- containerPort: 53
|
||||
name: dns
|
||||
protocol: UDP
|
||||
- containerPort: 53
|
||||
name: dns-tcp
|
||||
protocol: TCP
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /healthz
|
||||
port: 8080
|
||||
scheme: HTTP
|
||||
initialDelaySeconds: 30
|
||||
timeoutSeconds: 5
|
||||
- name: healthz
|
||||
image: gcr.io/google_containers/exechealthz:1.0
|
||||
resources:
|
||||
limits:
|
||||
cpu: 10m
|
||||
memory: 20Mi
|
||||
args:
|
||||
- -cmd=nslookup kubernetes.default.svc.kube.local localhost >/dev/null
|
||||
- -port=8080
|
||||
ports:
|
||||
- containerPort: 8080
|
||||
protocol: TCP
|
||||
volumes:
|
||||
- name: etcd-storage
|
||||
emptyDir: {}
|
||||
dnsPolicy: Default # Don't use cluster DNS.
|
@ -0,0 +1,20 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: kube-dns
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: kube-dns
|
||||
kubernetes.io/cluster-service: "true"
|
||||
kubernetes.io/name: "KubeDNS"
|
||||
spec:
|
||||
selector:
|
||||
k8s-app: kube-dns
|
||||
clusterIP: 10.1.0.3
|
||||
ports:
|
||||
- name: dns
|
||||
port: 53
|
||||
protocol: UDP
|
||||
- name: dns-tcp
|
||||
port: 53
|
||||
protocol: TCP
|
@ -1,60 +1,19 @@
|
||||
## This file is used as input to deployment script, which ammends it as needed.
|
||||
## More specifically, we need to add peer hosts for each but the elected peer.
|
||||
|
||||
write_files:
|
||||
- path: /opt/bin/curl-retry.sh
|
||||
permissions: '0755'
|
||||
owner: root
|
||||
content: |
|
||||
#!/bin/sh -x
|
||||
until curl $@
|
||||
do sleep 1
|
||||
done
|
||||
|
||||
coreos:
|
||||
units:
|
||||
- name: download-etcd2.service
|
||||
enable: true
|
||||
command: start
|
||||
content: |
|
||||
[Unit]
|
||||
After=network-online.target
|
||||
Before=etcd2.service
|
||||
Description=Download etcd2 Binaries
|
||||
Documentation=https://github.com/coreos/etcd/
|
||||
Requires=network-online.target
|
||||
[Service]
|
||||
Environment=ETCD2_RELEASE_TARBALL=https://github.com/coreos/etcd/releases/download/v2.0.11/etcd-v2.0.11-linux-amd64.tar.gz
|
||||
ExecStartPre=/bin/mkdir -p /opt/bin
|
||||
ExecStart=/opt/bin/curl-retry.sh --silent --location $ETCD2_RELEASE_TARBALL --output /tmp/etcd2.tgz
|
||||
ExecStart=/bin/tar xzvf /tmp/etcd2.tgz -C /opt
|
||||
ExecStartPost=/bin/ln -s /opt/etcd-v2.0.11-linux-amd64/etcd /opt/bin/etcd2
|
||||
ExecStartPost=/bin/ln -s /opt/etcd-v2.0.11-linux-amd64/etcdctl /opt/bin/etcdctl2
|
||||
RemainAfterExit=yes
|
||||
Type=oneshot
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
- name: etcd2.service
|
||||
enable: true
|
||||
command: start
|
||||
content: |
|
||||
[Unit]
|
||||
After=download-etcd2.service
|
||||
Description=etcd 2
|
||||
Documentation=https://github.com/coreos/etcd/
|
||||
[Service]
|
||||
Environment=ETCD_NAME=%H
|
||||
Environment=ETCD_INITIAL_CLUSTER_TOKEN=etcd-cluster
|
||||
Environment=ETCD_INITIAL_ADVERTISE_PEER_URLS=http://%H:2380
|
||||
Environment=ETCD_LISTEN_PEER_URLS=http://%H:2380
|
||||
Environment=ETCD_LISTEN_CLIENT_URLS=http://0.0.0.0:2379,http://0.0.0.0:4001
|
||||
Environment=ETCD_ADVERTISE_CLIENT_URLS=http://%H:2379,http://%H:4001
|
||||
Environment=ETCD_INITIAL_CLUSTER_STATE=new
|
||||
ExecStart=/opt/bin/etcd2
|
||||
Restart=always
|
||||
RestartSec=10
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
etcd2:
|
||||
name: '%H'
|
||||
initial-cluster-token: 'etcd-cluster'
|
||||
initial-advertise-peer-urls: 'http://%H:2380'
|
||||
listen-peer-urls: 'http://%H:2380'
|
||||
listen-client-urls: 'http://0.0.0.0:2379,http://0.0.0.0:4001'
|
||||
advertise-client-urls: 'http://%H:2379,http://%H:4001'
|
||||
initial-cluster-state: 'new'
|
||||
update:
|
||||
group: stable
|
||||
reboot-strategy: off
|
||||
|
@ -12,56 +12,6 @@ write_files:
|
||||
do sleep 1
|
||||
done
|
||||
|
||||
- path: /opt/bin/register_minion.sh
|
||||
permissions: '0755'
|
||||
owner: root
|
||||
content: |
|
||||
#!/bin/sh -xe
|
||||
node_id="${1}"
|
||||
master_url="${2}"
|
||||
env_label="${3}"
|
||||
until healthcheck=$(curl --fail --silent "${master_url}/healthz")
|
||||
do sleep 2
|
||||
done
|
||||
test -n "${healthcheck}"
|
||||
test "${healthcheck}" = "ok"
|
||||
printf '{
|
||||
"apiVersion": "v1",
|
||||
"kind": "Node",
|
||||
"metadata": {
|
||||
"name": "%s",
|
||||
"labels": { "environment": "%s" }
|
||||
}' "${node_id}" "${env_label}" \
|
||||
| /opt/bin/kubectl create -s "${master_url}" -f -
|
||||
|
||||
- path: /etc/kubernetes/manifests/fluentd.manifest
|
||||
permissions: '0755'
|
||||
owner: root
|
||||
content: |
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: fluentd-elasticsearch
|
||||
spec:
|
||||
containers:
|
||||
- name: fluentd-elasticsearch
|
||||
image: gcr.io/google_containers/fluentd-elasticsearch:1.5
|
||||
env:
|
||||
- name: "FLUENTD_ARGS"
|
||||
value: "-qq"
|
||||
volumeMounts:
|
||||
- name: varlog
|
||||
mountPath: /varlog
|
||||
- name: containers
|
||||
mountPath: /var/lib/docker/containers
|
||||
volumes:
|
||||
- name: varlog
|
||||
hostPath:
|
||||
path: /var/log
|
||||
- name: containers
|
||||
hostPath:
|
||||
path: /var/lib/docker/containers
|
||||
|
||||
coreos:
|
||||
update:
|
||||
group: stable
|
||||
@ -93,7 +43,7 @@ coreos:
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
WantedBy=kubernetes-master.target
|
||||
WantedBy=kubernetes-minion.target
|
||||
WantedBy=kubernetes-node.target
|
||||
|
||||
- name: kubernetes-master.target
|
||||
enable: true
|
||||
@ -106,24 +56,25 @@ coreos:
|
||||
After=weave-network.target
|
||||
Requires=weave-network.target
|
||||
ConditionHost=kube-00
|
||||
Wants=apiserver.service
|
||||
Wants=scheduler.service
|
||||
Wants=controller-manager.service
|
||||
Wants=kube-apiserver.service
|
||||
Wants=kube-scheduler.service
|
||||
Wants=kube-controller-manager.service
|
||||
Wants=kube-proxy.service
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
|
||||
- name: kubernetes-minion.target
|
||||
- name: kubernetes-node.target
|
||||
enable: true
|
||||
command: start
|
||||
content: |
|
||||
[Unit]
|
||||
Description=Kubernetes Cluster Minion
|
||||
Description=Kubernetes Cluster Node
|
||||
Documentation=http://kubernetes.io/
|
||||
RefuseManualStart=no
|
||||
After=weave-network.target
|
||||
Requires=weave-network.target
|
||||
ConditionHost=!kube-00
|
||||
Wants=proxy.service
|
||||
Wants=kube-proxy.service
|
||||
Wants=kubelet.service
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
@ -227,20 +178,20 @@ coreos:
|
||||
WantedBy=multi-user.target
|
||||
WantedBy=weave-network.target
|
||||
|
||||
- name: download-kubernetes.service
|
||||
- name: install-kubernetes.service
|
||||
enable: true
|
||||
content: |
|
||||
[Unit]
|
||||
After=network-online.target
|
||||
Before=apiserver.service
|
||||
Before=controller-manager.service
|
||||
Before=kube-apiserver.service
|
||||
Before=kube-controller-manager.service
|
||||
Before=kubelet.service
|
||||
Before=proxy.service
|
||||
Before=kube-proxy.service
|
||||
Description=Download Kubernetes Binaries
|
||||
Documentation=http://kubernetes.io/
|
||||
Requires=network-online.target
|
||||
[Service]
|
||||
Environment=KUBE_RELEASE_TARBALL=https://github.com/GoogleCloudPlatform/kubernetes/releases/download/v0.18.0/kubernetes.tar.gz
|
||||
Environment=KUBE_RELEASE_TARBALL=https://github.com/GoogleCloudPlatform/kubernetes/releases/download/v1.0.1/kubernetes.tar.gz
|
||||
ExecStartPre=/bin/mkdir -p /opt/
|
||||
ExecStart=/opt/bin/curl-retry.sh --silent --location $KUBE_RELEASE_TARBALL --output /tmp/kubernetes.tgz
|
||||
ExecStart=/bin/tar xzvf /tmp/kubernetes.tgz -C /tmp/
|
||||
@ -250,24 +201,24 @@ coreos:
|
||||
ExecStartPost=/bin/mv /tmp/kubernetes/examples/guestbook /home/core/guestbook-example
|
||||
ExecStartPost=/bin/chown core. -R /home/core/guestbook-example
|
||||
ExecStartPost=/bin/rm -rf /tmp/kubernetes
|
||||
ExecStartPost=/bin/sed 's/\("createExternalLoadBalancer":\) true/\1 false/' -i /home/core/guestbook-example/frontend-service.json
|
||||
ExecStartPost=/bin/sed 's/# type: LoadBalancer/type: NodePort/' -i /home/core/guestbook-example/frontend-service.yaml
|
||||
RemainAfterExit=yes
|
||||
Type=oneshot
|
||||
[Install]
|
||||
WantedBy=kubernetes-master.target
|
||||
WantedBy=kubernetes-minion.target
|
||||
WantedBy=kubernetes-node.target
|
||||
|
||||
- name: apiserver.service
|
||||
- name: kube-apiserver.service
|
||||
enable: true
|
||||
content: |
|
||||
[Unit]
|
||||
After=download-kubernetes.service
|
||||
Before=controller-manager.service
|
||||
Before=scheduler.service
|
||||
After=install-kubernetes.service
|
||||
Before=kube-controller-manager.service
|
||||
Before=kube-scheduler.service
|
||||
ConditionFileIsExecutable=/opt/kubernetes/server/bin/kube-apiserver
|
||||
Description=Kubernetes API Server
|
||||
Documentation=http://kubernetes.io/
|
||||
Wants=download-kubernetes.service
|
||||
Wants=install-kubernetes.service
|
||||
ConditionHost=kube-00
|
||||
[Service]
|
||||
ExecStart=/opt/kubernetes/server/bin/kube-apiserver \
|
||||
@ -275,23 +226,22 @@ coreos:
|
||||
--port=8080 \
|
||||
$ETCD_SERVERS \
|
||||
--service-cluster-ip-range=10.1.0.0/16 \
|
||||
--cloud_provider=vagrant \
|
||||
--logtostderr=true --v=3
|
||||
Restart=always
|
||||
RestartSec=10
|
||||
[Install]
|
||||
WantedBy=kubernetes-master.target
|
||||
|
||||
- name: scheduler.service
|
||||
- name: kube-scheduler.service
|
||||
enable: true
|
||||
content: |
|
||||
[Unit]
|
||||
After=apiserver.service
|
||||
After=download-kubernetes.service
|
||||
After=kube-apiserver.service
|
||||
After=install-kubernetes.service
|
||||
ConditionFileIsExecutable=/opt/kubernetes/server/bin/kube-scheduler
|
||||
Description=Kubernetes Scheduler
|
||||
Documentation=http://kubernetes.io/
|
||||
Wants=apiserver.service
|
||||
Wants=kube-apiserver.service
|
||||
ConditionHost=kube-00
|
||||
[Service]
|
||||
ExecStart=/opt/kubernetes/server/bin/kube-scheduler \
|
||||
@ -302,21 +252,20 @@ coreos:
|
||||
[Install]
|
||||
WantedBy=kubernetes-master.target
|
||||
|
||||
- name: controller-manager.service
|
||||
- name: kube-controller-manager.service
|
||||
enable: true
|
||||
content: |
|
||||
[Unit]
|
||||
After=download-kubernetes.service
|
||||
After=apiserver.service
|
||||
After=install-kubernetes.service
|
||||
After=kube-apiserver.service
|
||||
ConditionFileIsExecutable=/opt/kubernetes/server/bin/kube-controller-manager
|
||||
Description=Kubernetes Controller Manager
|
||||
Documentation=http://kubernetes.io/
|
||||
Wants=apiserver.service
|
||||
Wants=download-kubernetes.service
|
||||
Wants=kube-apiserver.service
|
||||
Wants=install-kubernetes.service
|
||||
ConditionHost=kube-00
|
||||
[Service]
|
||||
ExecStart=/opt/kubernetes/server/bin/kube-controller-manager \
|
||||
--cloud_provider=vagrant \
|
||||
--master=127.0.0.1:8080 \
|
||||
--logtostderr=true
|
||||
Restart=always
|
||||
@ -328,11 +277,11 @@ coreos:
|
||||
enable: true
|
||||
content: |
|
||||
[Unit]
|
||||
After=download-kubernetes.service
|
||||
After=install-kubernetes.service
|
||||
ConditionFileIsExecutable=/opt/kubernetes/server/bin/kubelet
|
||||
Description=Kubernetes Kubelet
|
||||
Documentation=http://kubernetes.io/
|
||||
Wants=download-kubernetes.service
|
||||
Wants=install-kubernetes.service
|
||||
ConditionHost=!kube-00
|
||||
[Service]
|
||||
ExecStartPre=/bin/mkdir -p /etc/kubernetes/manifests/
|
||||
@ -348,18 +297,17 @@ coreos:
|
||||
Restart=always
|
||||
RestartSec=10
|
||||
[Install]
|
||||
WantedBy=kubernetes-minion.target
|
||||
WantedBy=kubernetes-node.target
|
||||
|
||||
- name: proxy.service
|
||||
- name: kube-proxy.service
|
||||
enable: true
|
||||
content: |
|
||||
[Unit]
|
||||
After=download-kubernetes.service
|
||||
After=install-kubernetes.service
|
||||
ConditionFileIsExecutable=/opt/kubernetes/server/bin/kube-proxy
|
||||
Description=Kubernetes Proxy
|
||||
Documentation=http://kubernetes.io/
|
||||
Wants=download-kubernetes.service
|
||||
ConditionHost=!kube-00
|
||||
Wants=install-kubernetes.service
|
||||
[Service]
|
||||
ExecStart=/opt/kubernetes/server/bin/kube-proxy \
|
||||
--master=http://kube-00:8080 \
|
||||
@ -367,23 +315,25 @@ coreos:
|
||||
Restart=always
|
||||
RestartSec=10
|
||||
[Install]
|
||||
WantedBy=kubernetes-minion.target
|
||||
WantedBy=kubernetes-master.target
|
||||
WantedBy=kubernetes-node.target
|
||||
|
||||
- name: kubectl-create-minion.service
|
||||
- name: kube-create-addons.service
|
||||
enable: true
|
||||
content: |
|
||||
[Unit]
|
||||
After=download-kubernetes.service
|
||||
Before=proxy.service
|
||||
Before=kubelet.service
|
||||
After=install-kubernetes.service
|
||||
ConditionFileIsExecutable=/opt/kubernetes/server/bin/kubectl
|
||||
ConditionFileIsExecutable=/opt/bin/register_minion.sh
|
||||
Description=Kubernetes Create Minion
|
||||
ConditionPathIsDirectory=/etc/kubernetes/addons/
|
||||
ConditionHost=kube-00
|
||||
Description=Kubernetes Addons
|
||||
Documentation=http://kubernetes.io/
|
||||
Wants=download-kubernetes.service
|
||||
ConditionHost=!kube-00
|
||||
Wants=install-kubernetes.service
|
||||
Wants=kube-apiserver.service
|
||||
[Service]
|
||||
ExecStart=/opt/bin/register_minion.sh %H http://kube-00:8080 production
|
||||
Type=oneshot
|
||||
RemainAfterExit=no
|
||||
ExecStart=/opt/kubernetes/server/bin/kubectl create -f /etc/kubernetes/addons/
|
||||
SuccessExitStatus=1
|
||||
[Install]
|
||||
WantedBy=kubernetes-minion.target
|
||||
WantedBy=kubernetes-master.target
|
||||
|
29
docs/getting-started-guides/coreos/azure/expose_guestbook_app_port.sh
Executable file
29
docs/getting-started-guides/coreos/azure/expose_guestbook_app_port.sh
Executable file
@ -0,0 +1,29 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright 2014 The Kubernetes Authors All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
set -e
|
||||
|
||||
[ ! -z $1 ] || (echo Usage: $0 ssh_conf; exit 1)
|
||||
|
||||
fe_port=$(ssh -F $1 kube-00 \
|
||||
"/opt/bin/kubectl get -o template --template='{{(index .spec.ports 0).nodePort}}' services frontend -L name=frontend" \
|
||||
)
|
||||
|
||||
echo "Guestbook app is on port $fe_port, will map it to port 80 on kube-00"
|
||||
|
||||
./node_modules/.bin/azure vm endpoint create kube-00 80 $fe_port
|
||||
|
||||
./node_modules/.bin/azure vm endpoint show kube-00 tcp-80-${fe_port}
|
@ -13,9 +13,9 @@ var inspect = require('util').inspect;
|
||||
var util = require('./util.js');
|
||||
|
||||
var coreos_image_ids = {
|
||||
'stable': '2b171e93f07c4903bcad35bda10acf22__CoreOS-Stable-647.2.0',
|
||||
'beta': '2b171e93f07c4903bcad35bda10acf22__CoreOS-Beta-681.0.0', // untested
|
||||
'alpha': '2b171e93f07c4903bcad35bda10acf22__CoreOS-Alpha-695.0.0' // untested
|
||||
'stable': '2b171e93f07c4903bcad35bda10acf22__CoreOS-Stable-717.3.0',
|
||||
'beta': '2b171e93f07c4903bcad35bda10acf22__CoreOS-Beta-723.3.0', // untested
|
||||
'alpha': '2b171e93f07c4903bcad35bda10acf22__CoreOS-Alpha-745.1.0' // untested
|
||||
};
|
||||
|
||||
var conf = {};
|
||||
|
@ -3,7 +3,6 @@ var fs = require('fs');
|
||||
var yaml = require('js-yaml');
|
||||
var colors = require('colors/safe');
|
||||
|
||||
|
||||
var write_cloud_config_from_object = function (data, output_file) {
|
||||
try {
|
||||
fs.writeFileSync(output_file, [
|
||||
@ -41,3 +40,19 @@ exports.process_template = function (input_file, output_file, processor) {
|
||||
}
|
||||
return write_cloud_config_from_object(processor(_.clone(data)), output_file);
|
||||
};
|
||||
|
||||
exports.write_files_from = function (local_dir, remote_dir) {
|
||||
try {
|
||||
return _.map(fs.readdirSync(local_dir), function (fn) {
|
||||
return {
|
||||
path: [remote_dir, fn].join('/'),
|
||||
owner: 'root',
|
||||
permissions: '0640',
|
||||
encoding: 'base64',
|
||||
content: fs.readFileSync([local_dir, fn].join('/')).toString('base64'),
|
||||
};
|
||||
});
|
||||
} catch (e) {
|
||||
console.log(colors.red(e));
|
||||
}
|
||||
};
|
||||
|
@ -35,7 +35,7 @@ etcd_initial_cluster_conf_kube = function (conf) {
|
||||
};
|
||||
|
||||
return {
|
||||
'name': 'apiserver.service',
|
||||
'name': 'kube-apiserver.service',
|
||||
'drop-ins': [{
|
||||
'name': '50-etcd-initial-cluster.conf',
|
||||
'content': _.template("[Service]\nEnvironment=ETCD_SERVERS=--etcd_servers=<%= nodes.join(',') %>\n")(data),
|
||||
@ -68,8 +68,9 @@ exports.create_node_cloud_config = function (node_count, conf) {
|
||||
});
|
||||
};
|
||||
|
||||
var write_files_extra = cloud_config.write_files_from('addons', '/etc/kubernetes/addons');
|
||||
return cloud_config.process_template(input_file, output_file, function(data) {
|
||||
data.write_files = data.write_files.concat(_(node_count).times(make_node_config));
|
||||
data.write_files = data.write_files.concat(_(node_count).times(make_node_config), write_files_extra);
|
||||
data.coreos.units.push(etcd_initial_cluster_conf_kube(conf));
|
||||
return data;
|
||||
});
|
||||
|
@ -9,7 +9,7 @@
|
||||
"author": "Ilya Dmitrichenko <errordeveloper@gmail.com>",
|
||||
"license": "Apache 2.0",
|
||||
"dependencies": {
|
||||
"azure-cli": "^0.9.2",
|
||||
"azure-cli": "^0.9.5",
|
||||
"colors": "^1.0.3",
|
||||
"js-yaml": "^3.2.5",
|
||||
"openssl-wrapper": "^0.2.1",
|
||||
|
Loading…
Reference in New Issue
Block a user