mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-28 05:57:25 +00:00
This commit is contained in:
parent
a4d871a100
commit
a5915438fc
2
docs/getting-started-guides/coreos/azure/.gitignore
vendored
Normal file
2
docs/getting-started-guides/coreos/azure/.gitignore
vendored
Normal file
@ -0,0 +1,2 @@
|
||||
node_modules/
|
||||
output/*
|
182
docs/getting-started-guides/coreos/azure/README.md
Normal file
182
docs/getting-started-guides/coreos/azure/README.md
Normal file
@ -0,0 +1,182 @@
|
||||
---
|
||||
published: false
|
||||
title: Weaving Kubernetes on Azure
|
||||
tags: azure, coreos, kubernetes, usecase, guide, redis, php, cloud, provisioning
|
||||
---
|
||||
|
||||
## Introduction
|
||||
|
||||
In this tutorial we will demonstrate how to deploy a Kubernetes cluster to Azure cloud. Weave makes networking of containers simple and secure, in a transparent, yet robust way. The focus of this tutorial is provide an out-of-the-box production-ready implementation with dedicated Kubernetes master and etcd nodes. It will also show how to scale the cluster with ease.
|
||||
|
||||
## Let's go!
|
||||
To get started, you need to checkout the code:
|
||||
|
||||
```
|
||||
git clone https://github.com/errordeveloper/weave-demos
|
||||
cd weave-demos/coreos-azure
|
||||
```
|
||||
|
||||
You will need to have [Node.js installed](http://nodejs.org/download/) on you machine. If you have previously used Azure CLI, you should have it already.
|
||||
|
||||
You first need to install some of the dependencies with
|
||||
|
||||
```
|
||||
npm install
|
||||
```
|
||||
|
||||
Now, all you need to do is:
|
||||
|
||||
```
|
||||
./azure-login.js
|
||||
./create-kubernetes-cluster.js
|
||||
```
|
||||
|
||||
This script will provision a cluster suitable for production use, where there is a ring of 3 dedicated etcd nodes, Kubernetes master and 2 minions. The `kube-00` VM will be the master, your work loads are only to be deployed on the minion nodes, `kube-01` and `kube-02`. Initially, all VMs are single-core, to ensure a user of the free tier can reproduce it without paying extra. Later we will show how to add more bigger VMs.
|
||||
|
||||

|
||||
|
||||
Once the creation of Azure VMs has finished, you should see the following:
|
||||
|
||||
```
|
||||
...
|
||||
azure_wrapper/info: Saved SSH config, you can use it like so: `ssh -F ./output/kubernetes_1c1496016083b4_ssh_conf <hostname>`
|
||||
azure_wrapper/info: The hosts in this deployment are:
|
||||
[ 'etcd-00', 'etcd-01', 'etcd-02', 'kube-00', 'kube-01', 'kube-02' ]
|
||||
azure_wrapper/info: Saved state into `./output/kubernetes_1c1496016083b4_deployment.yml`
|
||||
```
|
||||
|
||||
Let's login to the master node like so:
|
||||
```
|
||||
ssh -F ./output/kubernetes_1c1496016083b4_ssh_conf kube-00
|
||||
```
|
||||
> Note: config file name will be different, make sure to use the one you see.
|
||||
|
||||
Check there are 2 minions in the cluster:
|
||||
```
|
||||
core@kube-00 ~ $ kubectl get minions
|
||||
NAME LABELS STATUS
|
||||
kube-01 environment=production Ready
|
||||
kube-02 environment=production Ready
|
||||
```
|
||||
|
||||
## Deploying the workload
|
||||
|
||||
Let's follow the Guestbook example now:
|
||||
```
|
||||
cd guestbook-example
|
||||
kubectl create -f redis-master.json
|
||||
kubectl create -f redis-master-service.json
|
||||
kubectl create -f redis-slave-controller.json
|
||||
kubectl create -f redis-slave-service.json
|
||||
kubectl create -f frontend-controller.json
|
||||
kubectl create -f frontend-service.json
|
||||
```
|
||||
|
||||
You need to wait for the pods to get deployed, run the following and wait for `STATUS` to change from `Unknown`, through `Pending` to `Runnig`.
|
||||
```
|
||||
kubectl get pods --watch
|
||||
```
|
||||
> Note: the most time it will spend downloading Docker container images on each of the minions.
|
||||
|
||||
Eventually you should see:
|
||||
```
|
||||
POD IP CONTAINER(S) IMAGE(S) HOST LABELS STATUS
|
||||
redis-master 10.2.1.4 master dockerfile/redis kube-01/ name=redis-master Running
|
||||
40d8cebd-b679-11e4-b6f6-000d3a20a034 10.2.2.4 slave brendanburns/redis-slave kube-02/ name=redisslave,uses=redis-master Running
|
||||
40dbdcd0-b679-11e4-b6f6-000d3a20a034 10.2.1.5 slave brendanburns/redis-slave kube-01/ name=redisslave,uses=redis-master Running
|
||||
421473f6-b679-11e4-b6f6-000d3a20a034 10.2.2.5 php-redis kubernetes/example-guestbook-php-redis kube-02/ name=frontend,uses=redisslave,redis-master Running
|
||||
4214d4fe-b679-11e4-b6f6-000d3a20a034 10.2.1.6 php-redis kubernetes/example-guestbook-php-redis kube-01/ name=frontend,uses=redisslave,redis-master Running
|
||||
42153c72-b679-11e4-b6f6-000d3a20a034 php-redis kubernetes/example-guestbook-php-redis <unassigned> name=frontend,uses=redisslave,redis-master Pending
|
||||
```
|
||||
|
||||
## Scaling
|
||||
|
||||
Two single-core minions are certainly not enough for a production system of today, and, as you can see we have one _unassigned_ pod. Let's resize the cluster by adding a couple of bigger nodes.
|
||||
|
||||
You will need to open another terminal window on your machine and go to the same working directory (e.g. `~/Workspace/weave-demos/coreos-azure`).
|
||||
|
||||
First, lets set the size of new VMs:
|
||||
```
|
||||
export AZ_VM_SIZE=Large
|
||||
```
|
||||
Now, run resize script with state file of the previous deployment:
|
||||
```
|
||||
./resize-kubernetes-cluster.js ./output/kubernetes_1c1496016083b4_deployment.yml
|
||||
...
|
||||
azure_wrapper/info: Saved SSH config, you can use it like so: `ssh -F ./output/kubernetes_8f984af944f572_ssh_conf <hostname>`
|
||||
azure_wrapper/info: The hosts in this deployment are:
|
||||
[ 'etcd-00',
|
||||
'etcd-01',
|
||||
'etcd-02',
|
||||
'kube-00',
|
||||
'kube-01',
|
||||
'kube-02',
|
||||
'kube-03',
|
||||
'kube-04' ]
|
||||
azure_wrapper/info: Saved state into `./output/kubernetes_8f984af944f572_deployment.yml`
|
||||
```
|
||||
> Note: this step has created new files in `./output`.
|
||||
|
||||
Back on `kube-00`:
|
||||
```
|
||||
core@kube-00 ~ $ kubectl get minions
|
||||
NAME LABELS STATUS
|
||||
kube-01 environment=production Ready
|
||||
kube-02 environment=production Ready
|
||||
kube-03 environment=production Ready
|
||||
kube-04 environment=production Ready
|
||||
```
|
||||
|
||||
We can see that two more minions joined happily. Let's resize the number of Guestbook instances we have.
|
||||
|
||||
First, double-check how many replication controllers there are:
|
||||
|
||||
```
|
||||
core@kube-00 ~ $ kubectl get rc
|
||||
CONTROLLER CONTAINER(S) IMAGE(S) SELECTOR REPLICAS
|
||||
frontendController php-redis kubernetes/example-guestbook-php-redis name=frontend 3
|
||||
redisSlaveController slave brendanburns/redis-slave name=redisslave 2
|
||||
```
|
||||
As we have 4 minions, let's resize proportionally:
|
||||
```
|
||||
core@kube-00 ~ $ kubectl resize --replicas=4 rc redisSlaveController
|
||||
resized
|
||||
core@kube-00 ~ $ kubectl resize --replicas=4 rc frontendController
|
||||
resized
|
||||
```
|
||||
Check what we have now:
|
||||
```
|
||||
kubectl get rc
|
||||
CONTROLLER CONTAINER(S) IMAGE(S) SELECTOR REPLICAS
|
||||
frontendController php-redis kubernetes/example-guestbook-php-redis name=frontend 4
|
||||
redisSlaveController slave brendanburns/redis-slave name=redisslave 4
|
||||
```
|
||||
|
||||
You now will have more instances of front-end Guestbook apps and Redis slaves; and, if we look up all pods labled `name=frontend`, we should see one running on each node.
|
||||
|
||||
```
|
||||
core@kube-00 ~/guestbook-example $ kubectl get pods -l name=frontend
|
||||
POD IP CONTAINER(S) IMAGE(S) HOST LABELS STATUS
|
||||
4214d4fe-b679-11e4-b6f6-000d3a20a034 10.2.1.6 php-redis kubernetes/example-guestbook-php-redis kube-01/ name=frontend,uses=redisslave,redis-master Running
|
||||
ae59fa80-b679-11e4-b6f6-000d3a20a034 10.2.4.5 php-redis kubernetes/example-guestbook-php-redis kube-04/ name=frontend,uses=redisslave,redis-master Running
|
||||
421473f6-b679-11e4-b6f6-000d3a20a034 10.2.2.5 php-redis kubernetes/example-guestbook-php-redis kube-02/ name=frontend,uses=redisslave,redis-master Running
|
||||
42153c72-b679-11e4-b6f6-000d3a20a034 10.2.3.4 php-redis kubernetes/example-guestbook-php-redis kube-03/ name=frontend,uses=redisslave,redis-master Running
|
||||
```
|
||||
|
||||
## Exposing the app to the outside world
|
||||
|
||||
To makes sure the app is working, we should load it in the browser. For accessing the Guesbook service from the outside world, I had to create an Azure endpoint like shown on the picture below.
|
||||
|
||||

|
||||
|
||||
I was then able to access it from anywhere via the Azure virtual IP for `kube-01`, i.e. `http://104.40.211.194:8000/`.
|
||||
|
||||
## Destructing the VMs
|
||||
|
||||
To delete the cluster run this:
|
||||
```
|
||||
./destroy-cluster.js ./output/kubernetes_8f984af944f572_deployment.yml
|
||||
```
|
||||
|
||||
Make sure to use the latest state file, as after resizing there is a new one. By the way, with the scripts shown, you can deploy multiple clusters, if you like :)
|
||||
|
3
docs/getting-started-guides/coreos/azure/azure-login.js
Executable file
3
docs/getting-started-guides/coreos/azure/azure-login.js
Executable file
@ -0,0 +1,3 @@
|
||||
#!/usr/bin/env node
|
||||
|
||||
require('child_process').fork('node_modules/azure-cli/bin/azure', ['login'].concat(process.argv));
|
@ -0,0 +1,73 @@
|
||||
## This file is used as input to deployment script, which ammends it as needed.
|
||||
## More specifically, we need to add environment files for as many nodes as we
|
||||
## are going to deploy.
|
||||
|
||||
coreos:
|
||||
units:
|
||||
- name: dhcpcd.service
|
||||
enable: true
|
||||
command: start
|
||||
|
||||
- name: systemd-resolved.service
|
||||
command: stop
|
||||
|
||||
- name: 10-weave.network
|
||||
runtime: false
|
||||
content: |
|
||||
[Match]
|
||||
Type=bridge
|
||||
Name=weave*
|
||||
[Network]
|
||||
|
||||
- name: weave.service
|
||||
content: |
|
||||
[Unit]
|
||||
After=install-weave.service
|
||||
Description=Weave Network
|
||||
Documentation=http://zettio.github.io/weave/
|
||||
Requires=install-weave.service
|
||||
[Service]
|
||||
EnvironmentFile=/etc/weave.%H.env
|
||||
ExecStartPre=/opt/bin/weave launch $WEAVE_PEERS
|
||||
ExecStartPre=/opt/bin/weave launch-dns $WEAVEDNS_ADDR
|
||||
ExecStart=/usr/bin/docker attach weave
|
||||
Restart=on-failure
|
||||
ExecStop=/opt/bin/weave stop
|
||||
ExecStop=/opt/bin/weave stop-dns
|
||||
|
||||
- name: install-weave.service
|
||||
command: start
|
||||
enable: true
|
||||
content: |
|
||||
[Unit]
|
||||
After=network-online.target
|
||||
After=docker.service
|
||||
Description=Install Weave
|
||||
Documentation=http://zettio.github.io/weave/
|
||||
Requires=network-online.target
|
||||
Requires=docker.service
|
||||
[Service]
|
||||
Type=oneshot
|
||||
RemainAfterExit=yes
|
||||
ExecStartPre=/usr/bin/docker pull zettio/weave:latest
|
||||
ExecStartPre=/usr/bin/docker pull zettio/weavedns:latest
|
||||
ExecStartPre=/usr/bin/docker pull zettio/weavetools:latest
|
||||
ExecStartPre=/bin/mkdir -p /opt/bin/
|
||||
ExecStartPre=/usr/bin/curl \
|
||||
--silent \
|
||||
--location \
|
||||
https://github.com/zettio/weave/releases/download/latest_release/weave \
|
||||
--output /opt/bin/weave
|
||||
ExecStartPre=/usr/bin/chmod +x /opt/bin/weave
|
||||
ExecStart=/bin/echo Weave Installed
|
||||
|
||||
- name: weave-network.target
|
||||
command: start
|
||||
enable: true
|
||||
content: |
|
||||
[Unit]
|
||||
Requires=weave.service
|
||||
RefuseManualStart=no
|
||||
Wants=weave.service
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
@ -0,0 +1,22 @@
|
||||
## This file is used as input to deployment script, which ammends it as needed.
|
||||
## More specifically, we need to add peer hosts for each but the elected peer.
|
||||
|
||||
coreos:
|
||||
etcd:
|
||||
name: etcd
|
||||
addr: $private_ipv4:4001
|
||||
bind-addr: 0.0.0.0
|
||||
peer-addr: $private_ipv4:7001
|
||||
snapshot: true
|
||||
max-retry-attempts: 50
|
||||
units:
|
||||
- name: dhcpcd.service
|
||||
enable: true
|
||||
command: start
|
||||
- name: systemd-resolved.service
|
||||
command: stop
|
||||
- name: etcd.service
|
||||
command: start
|
||||
update:
|
||||
group: stable
|
||||
reboot-strategy: off
|
@ -0,0 +1,351 @@
|
||||
## This file is used as input to deployment script, which ammends it as needed.
|
||||
## More specifically, we need to add environment files for as many nodes as we
|
||||
## are going to deploy.
|
||||
|
||||
write_files:
|
||||
- path: /opt/bin/register_minion.sh
|
||||
permissions: '0755'
|
||||
owner: root
|
||||
content: |
|
||||
#!/bin/sh -xe
|
||||
minion_id="${1}"
|
||||
master_url="${2}"
|
||||
until healthcheck=$(curl --fail --silent "${master_url}/healthz")
|
||||
do sleep 2
|
||||
done
|
||||
test -n "${healthcheck}"
|
||||
test "${healthcheck}" = "ok"
|
||||
printf '{ "id": "%s", "kind": "Minion", "apiVersion": "v1beta1", "labels": { "environment": "production" } }' "${minion_id}" \
|
||||
| /opt/bin/kubectl create -s "${master_url}" -f -
|
||||
|
||||
coreos:
|
||||
update:
|
||||
group: stable
|
||||
units:
|
||||
- name: docker.service
|
||||
drop-ins:
|
||||
- name: 50-weave-kubernetes.conf
|
||||
content: |
|
||||
[Service]
|
||||
Environment=DOCKER_OPTS='--bridge="weave" -r="false"'
|
||||
|
||||
- name: dhcpcd.service
|
||||
enable: true
|
||||
command: start
|
||||
|
||||
- name: systemd-resolved.service
|
||||
command: stop
|
||||
|
||||
- name: wait-for-network-online.service
|
||||
enable: true
|
||||
command: start
|
||||
content: |
|
||||
[Unit]
|
||||
After=network-online.target
|
||||
Before=install-weave.service
|
||||
Before=download-kubernetes.service
|
||||
Description=Temporary hack, as the use of dhcpcd.service breaks nework-online.online
|
||||
Documentation=https://github.com/coreos/bugs/issues/267
|
||||
Requires=network-online.target
|
||||
[Service]
|
||||
Type=oneshot
|
||||
#RemainAfterExit=yes
|
||||
ExecStart=/bin/sh -c 'until curl --silent --fail https://status.github.com/api/status.json | grep -q \"good\"; do sleep 2; done'
|
||||
[Install]
|
||||
WantedBy=network-online.target
|
||||
|
||||
- name: weave-network.target
|
||||
enable: true
|
||||
content: |
|
||||
[Unit]
|
||||
Description=Weave Network Setup Complete
|
||||
Documentation=man:systemd.special(7)
|
||||
RefuseManualStart=no
|
||||
After=network-online.target
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
WantedBy=kubernetes-master.target
|
||||
WantedBy=kubernetes-minion.target
|
||||
|
||||
- name: kubernetes-master.target
|
||||
enable: true
|
||||
command: start
|
||||
content: |
|
||||
[Unit]
|
||||
Description=Kubernetes Cluster Master
|
||||
Documentation=http://kubernetes.io/
|
||||
RefuseManualStart=no
|
||||
After=weave-network.target
|
||||
Requires=weave-network.target
|
||||
ConditionHost=kube-00
|
||||
Wants=apiserver.service
|
||||
Wants=scheduler.service
|
||||
Wants=controller-manager.service
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
|
||||
- name: kubernetes-minion.target
|
||||
enable: true
|
||||
command: start
|
||||
content: |
|
||||
[Unit]
|
||||
Description=Kubernetes Cluster Minion
|
||||
Documentation=http://kubernetes.io/
|
||||
RefuseManualStart=no
|
||||
After=weave-network.target
|
||||
Requires=weave-network.target
|
||||
ConditionHost=!kube-00
|
||||
Wants=proxy.service
|
||||
Wants=kubelet.service
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
|
||||
- name: 10-weave.network
|
||||
runtime: false
|
||||
content: |
|
||||
[Match]
|
||||
Type=bridge
|
||||
Name=weave*
|
||||
[Network]
|
||||
|
||||
- name: install-weave.service
|
||||
enable: true
|
||||
content: |
|
||||
[Unit]
|
||||
After=network-online.target
|
||||
Before=weave.service
|
||||
Before=weave-helper.service
|
||||
Before=docker.service
|
||||
Description=Install Weave
|
||||
Documentation=http://zettio.github.io/weave/
|
||||
Requires=network-online.target
|
||||
[Service]
|
||||
Type=oneshot
|
||||
RemainAfterExit=yes
|
||||
ExecStartPre=/bin/mkdir -p /opt/bin/
|
||||
ExecStartPre=/usr/bin/curl \
|
||||
--silent \
|
||||
--location \
|
||||
https://github.com/zettio/weave/releases/download/latest_release/weave \
|
||||
--output /opt/bin/weave
|
||||
ExecStartPre=/usr/bin/curl \
|
||||
--silent \
|
||||
--location \
|
||||
https://raw.github.com/errordeveloper/weave-demos/master/poseidon/weave-helper \
|
||||
--output /opt/bin/weave-helper
|
||||
ExecStartPre=/usr/bin/chmod +x /opt/bin/weave
|
||||
ExecStartPre=/usr/bin/chmod +x /opt/bin/weave-helper
|
||||
ExecStart=/bin/echo Weave Installed
|
||||
[Install]
|
||||
WantedBy=weave-network.target
|
||||
WantedBy=weave.service
|
||||
|
||||
- name: weave-helper.service
|
||||
enable: true
|
||||
content: |
|
||||
[Unit]
|
||||
After=install-weave.service
|
||||
After=docker.service
|
||||
Description=Weave Network Router
|
||||
Documentation=http://zettio.github.io/weave/
|
||||
Requires=docker.service
|
||||
Requires=install-weave.service
|
||||
[Service]
|
||||
ExecStart=/opt/bin/weave-helper
|
||||
[Install]
|
||||
WantedBy=weave-network.target
|
||||
|
||||
- name: weave.service
|
||||
enable: true
|
||||
content: |
|
||||
[Unit]
|
||||
After=install-weave.service
|
||||
After=docker.service
|
||||
Description=Weave Network Router
|
||||
Documentation=http://zettio.github.io/weave/
|
||||
Requires=docker.service
|
||||
Requires=install-weave.service
|
||||
[Service]
|
||||
EnvironmentFile=/etc/weave.%H.env
|
||||
ExecStartPre=/usr/bin/docker pull zettio/weave:latest
|
||||
ExecStartPre=/usr/bin/docker pull zettio/weavetools:latest
|
||||
ExecStartPre=/opt/bin/weave launch $WEAVE_PEERS
|
||||
ExecStart=/usr/bin/docker attach weave
|
||||
Restart=on-failure
|
||||
ExecStop=/opt/bin/weave stop
|
||||
[Install]
|
||||
WantedBy=weave-network.target
|
||||
|
||||
- name: weave-create-bridge.service
|
||||
enable: true
|
||||
content: |
|
||||
[Unit]
|
||||
After=network.target
|
||||
After=install-weave.service
|
||||
Before=weave.service
|
||||
Before=docker.service
|
||||
Description=Docker Application Container Engine
|
||||
Documentation=http://docs.docker.io
|
||||
Requires=network.target
|
||||
Requires=install-weave.service
|
||||
[Service]
|
||||
Type=oneshot
|
||||
EnvironmentFile=/etc/weave.%H.env
|
||||
ExecStart=/opt/bin/weave create-bridge
|
||||
ExecStart=/usr/bin/ip addr add dev weave $BRIDGE_ADDRESS_CIDR
|
||||
ExecStart=/usr/bin/ip route add $BREAKOUT_ROUTE dev weave scope link
|
||||
ExecStart=/usr/bin/ip route add 224.0.0.0/4 dev weave
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
WantedBy=weave-network.target
|
||||
|
||||
- name: download-kubernetes.service
|
||||
enable: true
|
||||
content: |
|
||||
[Unit]
|
||||
After=network-online.target
|
||||
Before=apiserver.service
|
||||
Before=controller-manager.service
|
||||
Before=kubelet.service
|
||||
Before=proxy.service
|
||||
Description=Download Kubernetes Binaries
|
||||
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
|
||||
Requires=network-online.target
|
||||
[Service]
|
||||
Environment=KUBE_RELEASE_TARBALL=https://github.com/GoogleCloudPlatform/kubernetes/releases/download/v0.9.3/kubernetes.tar.gz
|
||||
ExecStartPre=/bin/mkdir -p /opt/
|
||||
ExecStart=/bin/bash -c "curl --silent --location $KUBE_RELEASE_TARBALL | tar xzv -C /tmp/"
|
||||
ExecStart=/bin/tar xzvf /tmp/kubernetes/server/kubernetes-server-linux-amd64.tar.gz -C /opt
|
||||
ExecStartPost=/bin/ln -s /opt/kubernetes/server/bin/kubectl /opt/bin/
|
||||
ExecStartPost=/bin/mv /tmp/kubernetes/examples/guestbook /home/core/guestbook-example
|
||||
ExecStartPost=/bin/rm -rf /tmp/kubernetes
|
||||
RemainAfterExit=yes
|
||||
Type=oneshot
|
||||
[Install]
|
||||
WantedBy=kubernetes-master.target
|
||||
WantedBy=kubernetes-minion.target
|
||||
|
||||
- name: apiserver.service
|
||||
enable: true
|
||||
content: |
|
||||
[Unit]
|
||||
After=download-kubernetes.service
|
||||
Before=controller-manager.service
|
||||
Before=scheduler.service
|
||||
ConditionFileIsExecutable=/opt/kubernetes/server/bin/kube-apiserver
|
||||
Description=Kubernetes API Server
|
||||
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
|
||||
Wants=download-kubernetes.service
|
||||
ConditionHost=kube-00
|
||||
[Service]
|
||||
ExecStart=/opt/kubernetes/server/bin/kube-apiserver \
|
||||
--address=0.0.0.0 \
|
||||
--port=8080 \
|
||||
--etcd_servers=http://etcd-00:4001,http://etcd-01:4001,http://etcd-02:4001 \
|
||||
--portal_net=10.1.0.0/16 \
|
||||
--cloud_provider=vagrant \
|
||||
--logtostderr=true --v=3
|
||||
Restart=always
|
||||
RestartSec=10
|
||||
[Install]
|
||||
WantedBy=kubernetes-master.target
|
||||
|
||||
- name: scheduler.service
|
||||
enable: true
|
||||
content: |
|
||||
[Unit]
|
||||
After=apiserver.service
|
||||
After=download-kubernetes.service
|
||||
ConditionFileIsExecutable=/opt/kubernetes/server/bin/kube-scheduler
|
||||
Description=Kubernetes Scheduler
|
||||
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
|
||||
Wants=apiserver.service
|
||||
ConditionHost=kube-00
|
||||
[Service]
|
||||
ExecStart=/opt/kubernetes/server/bin/kube-scheduler \
|
||||
--logtostderr=true \
|
||||
--master=127.0.0.1:8080
|
||||
Restart=always
|
||||
RestartSec=10
|
||||
[Install]
|
||||
WantedBy=kubernetes-master.target
|
||||
|
||||
- name: controller-manager.service
|
||||
enable: true
|
||||
content: |
|
||||
[Unit]
|
||||
After=download-kubernetes.service
|
||||
After=apiserver.service
|
||||
ConditionFileIsExecutable=/opt/kubernetes/server/bin/kube-controller-manager
|
||||
Description=Kubernetes Controller Manager
|
||||
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
|
||||
Wants=apiserver.service
|
||||
Wants=download-kubernetes.service
|
||||
ConditionHost=kube-00
|
||||
[Service]
|
||||
ExecStart=/opt/kubernetes/server/bin/kube-controller-manager \
|
||||
--cloud_provider=vagrant \
|
||||
--master=127.0.0.1:8080 \
|
||||
--logtostderr=true
|
||||
Restart=always
|
||||
RestartSec=10
|
||||
[Install]
|
||||
WantedBy=kubernetes-master.target
|
||||
|
||||
- name: kubelet.service
|
||||
enable: true
|
||||
content: |
|
||||
[Unit]
|
||||
After=download-kubernetes.service
|
||||
ConditionFileIsExecutable=/opt/kubernetes/server/bin/kubelet
|
||||
Description=Kubernetes Kubelet
|
||||
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
|
||||
Wants=download-kubernetes.service
|
||||
ConditionHost=!kube-00
|
||||
[Service]
|
||||
ExecStart=/opt/kubernetes/server/bin/kubelet \
|
||||
--address=0.0.0.0 \
|
||||
--port=10250 \
|
||||
--hostname_override=%H \
|
||||
--etcd_servers=http://etcd-00:4001,http://etcd-01:4001,http://etcd-02:4001 \
|
||||
--logtostderr=true
|
||||
Restart=always
|
||||
RestartSec=10
|
||||
[Install]
|
||||
WantedBy=kubernetes-minion.target
|
||||
|
||||
- name: proxy.service
|
||||
enable: true
|
||||
content: |
|
||||
[Unit]
|
||||
After=download-kubernetes.service
|
||||
ConditionFileIsExecutable=/opt/kubernetes/server/bin/kube-proxy
|
||||
Description=Kubernetes Proxy
|
||||
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
|
||||
Wants=download-kubernetes.service
|
||||
ConditionHost=!kube-00
|
||||
[Service]
|
||||
ExecStart=/opt/kubernetes/server/bin/kube-proxy \
|
||||
--etcd_servers=http://etcd-00:4001,http://etcd-01:4001,http://etcd-02:4001 \
|
||||
--logtostderr=true
|
||||
Restart=always
|
||||
RestartSec=10
|
||||
[Install]
|
||||
WantedBy=kubernetes-minion.target
|
||||
|
||||
- name: kubectl-create-minion.service
|
||||
enable: true
|
||||
content: |
|
||||
[Unit]
|
||||
After=download-kubernetes.service
|
||||
ConditionFileIsExecutable=/opt/kubernetes/server/bin/kubectl
|
||||
ConditionFileIsExecutable=/opt/bin/register_minion.sh
|
||||
Description=Kubernetes Create Minion
|
||||
Documentation=https://github.com/GoogleCloudPlatform/kubernetes
|
||||
Wants=download-kubernetes.service
|
||||
ConditionHost=!kube-00
|
||||
[Service]
|
||||
ExecStart=/opt/bin/register_minion.sh %H http://kube-00:8080
|
||||
Type=oneshot
|
||||
[Install]
|
||||
WantedBy=kubernetes-minion.target
|
12
docs/getting-started-guides/coreos/azure/create-basic-weave-cluster.js
Executable file
12
docs/getting-started-guides/coreos/azure/create-basic-weave-cluster.js
Executable file
@ -0,0 +1,12 @@
|
||||
#!/usr/bin/env node
|
||||
|
||||
var azure = require('./lib/azure_wrapper.js');
|
||||
var weave = require('./lib/deployment_logic/weave.js');
|
||||
|
||||
azure.create_config('weave-cluster-example', { 'core': 3 });
|
||||
|
||||
azure.run_task_queue([
|
||||
azure.queue_default_network(),
|
||||
azure.queue_machines('core', 'stable',
|
||||
weave.create_basic_cloud_config),
|
||||
]);
|
14
docs/getting-started-guides/coreos/azure/create-kubernetes-cluster.js
Executable file
14
docs/getting-started-guides/coreos/azure/create-kubernetes-cluster.js
Executable file
@ -0,0 +1,14 @@
|
||||
#!/usr/bin/env node
|
||||
|
||||
var azure = require('./lib/azure_wrapper.js');
|
||||
var kube = require('./lib/deployment_logic/kubernetes.js');
|
||||
|
||||
azure.create_config('kubernetes', { 'etcd': 3, 'kube': 3 });
|
||||
|
||||
azure.run_task_queue([
|
||||
azure.queue_default_network(),
|
||||
azure.queue_machines('etcd', 'stable',
|
||||
kube.create_etcd_cloud_config),
|
||||
azure.queue_machines('kube', 'stable',
|
||||
kube.create_node_cloud_config),
|
||||
]);
|
7
docs/getting-started-guides/coreos/azure/destroy-cluster.js
Executable file
7
docs/getting-started-guides/coreos/azure/destroy-cluster.js
Executable file
@ -0,0 +1,7 @@
|
||||
#!/usr/bin/env node
|
||||
|
||||
var azure = require('./lib/azure_wrapper.js');
|
||||
|
||||
azure.destroy_cluster(process.argv[2]);
|
||||
|
||||
console.log('The cluster had been destroyed, you can delete the state file now.');
|
250
docs/getting-started-guides/coreos/azure/lib/azure_wrapper.js
Normal file
250
docs/getting-started-guides/coreos/azure/lib/azure_wrapper.js
Normal file
@ -0,0 +1,250 @@
|
||||
var _ = require('underscore');
|
||||
|
||||
var fs = require('fs');
|
||||
var cp = require('child_process');
|
||||
|
||||
var yaml = require('js-yaml');
|
||||
|
||||
var openssl = require('openssl-wrapper');
|
||||
|
||||
var clr = require('colors');
|
||||
var inspect = require('util').inspect;
|
||||
|
||||
var util = require('./util.js');
|
||||
|
||||
var coreos_image_ids = {
|
||||
'stable': '2b171e93f07c4903bcad35bda10acf22__CoreOS-Stable-557.2.0',
|
||||
'beta': '2b171e93f07c4903bcad35bda10acf22__CoreOS-Beta-557.2.0',
|
||||
'alpha': '2b171e93f07c4903bcad35bda10acf22__CoreOS-Alpha-584.0.0',
|
||||
};
|
||||
|
||||
var conf = {};
|
||||
|
||||
var hosts = {
|
||||
collection: [],
|
||||
ssh_port_counter: 2200,
|
||||
};
|
||||
|
||||
var task_queue = [];
|
||||
|
||||
exports.run_task_queue = function (dummy) {
|
||||
var tasks = {
|
||||
todo: task_queue,
|
||||
done: [],
|
||||
};
|
||||
|
||||
var pop_task = function() {
|
||||
console.log(clr.yellow('azure_wrapper/task:'), clr.grey(inspect(tasks)));
|
||||
var ret = {};
|
||||
ret.current = tasks.todo.shift();
|
||||
ret.remaining = tasks.todo.length;
|
||||
return ret;
|
||||
};
|
||||
|
||||
(function iter (task) {
|
||||
if (task.current === undefined) {
|
||||
if (conf.destroying === undefined) {
|
||||
create_ssh_conf();
|
||||
save_state();
|
||||
}
|
||||
return;
|
||||
} else {
|
||||
if (task.current.length !== 0) {
|
||||
console.log(clr.yellow('azure_wrapper/exec:'), clr.blue(inspect(task.current)));
|
||||
cp.fork('node_modules/azure-cli/bin/azure', task.current)
|
||||
.on('exit', function (code, signal) {
|
||||
tasks.done.push({
|
||||
code: code,
|
||||
signal: signal,
|
||||
what: task.current.join(' '),
|
||||
remaining: task.remaining,
|
||||
});
|
||||
if (code !== 0 && conf.destroying === undefined) {
|
||||
console.log(clr.red('azure_wrapper/fail: Exiting due to an error.'));
|
||||
save_state();
|
||||
console.log(clr.cyan('azure_wrapper/info: You probably want to destroy and re-run.'));
|
||||
process.abort();
|
||||
} else {
|
||||
iter(pop_task());
|
||||
}
|
||||
});
|
||||
} else {
|
||||
iter(pop_task());
|
||||
}
|
||||
}
|
||||
})(pop_task());
|
||||
};
|
||||
|
||||
var save_state = function () {
|
||||
var file_name = util.join_output_file_path(conf.name, 'deployment.yml');
|
||||
try {
|
||||
conf.hosts = hosts.collection;
|
||||
fs.writeFileSync(file_name, yaml.safeDump(conf));
|
||||
console.log(clr.yellow('azure_wrapper/info: Saved state into `%s`'), file_name);
|
||||
} catch (e) {
|
||||
console.log(clr.red(e));
|
||||
}
|
||||
};
|
||||
|
||||
var load_state = function (file_name) {
|
||||
try {
|
||||
conf = yaml.safeLoad(fs.readFileSync(file_name, 'utf8'));
|
||||
console.log(clr.yellow('azure_wrapper/info: Loaded state from `%s`'), file_name);
|
||||
return conf;
|
||||
} catch (e) {
|
||||
console.log(clr.red(e));
|
||||
}
|
||||
};
|
||||
|
||||
var create_ssh_key = function (prefix) {
|
||||
var opts = {
|
||||
x509: true,
|
||||
nodes: true,
|
||||
newkey: 'rsa:2048',
|
||||
subj: '/O=Weaveworks, Inc./L=London/C=GB/CN=weave.works',
|
||||
keyout: util.join_output_file_path(prefix, 'ssh.key'),
|
||||
out: util.join_output_file_path(prefix, 'ssh.pem'),
|
||||
};
|
||||
openssl.exec('req', opts, function (err, buffer) {
|
||||
if (err) console.log(clr.red(err));
|
||||
fs.chmod(opts.keyout, '0600', function (err) {
|
||||
if (err) console.log(clr.red(err));
|
||||
});
|
||||
});
|
||||
return {
|
||||
key: opts.keyout,
|
||||
pem: opts.out,
|
||||
}
|
||||
}
|
||||
|
||||
var create_ssh_conf = function () {
|
||||
var file_name = util.join_output_file_path(conf.name, 'ssh_conf');
|
||||
var ssh_conf_head = [
|
||||
"Host *",
|
||||
"\tHostname " + conf.resources['service'] + ".cloudapp.net",
|
||||
"\tUser core",
|
||||
"\tCompression yes",
|
||||
"\tLogLevel FATAL",
|
||||
"\tStrictHostKeyChecking no",
|
||||
"\tUserKnownHostsFile /dev/null",
|
||||
"\tIdentitiesOnly yes",
|
||||
"\tIdentityFile " + conf.resources['ssh_key']['key'],
|
||||
"\n",
|
||||
];
|
||||
|
||||
fs.writeFileSync(file_name, ssh_conf_head.concat(_.map(hosts.collection, function (host) {
|
||||
return _.template("Host <%= name %>\n\tPort <%= port %>\n")(host);
|
||||
})).join('\n'));
|
||||
console.log(clr.yellow('azure_wrapper/info:'), clr.green('Saved SSH config, you can use it like so: `ssh -F ', file_name, '<hostname>`'));
|
||||
console.log(clr.yellow('azure_wrapper/info:'), clr.green('The hosts in this deployment are:\n'), _.map(hosts.collection, function (host) { return host.name; }));
|
||||
};
|
||||
|
||||
var get_location = function () {
|
||||
if (process.env['AZ_LOCATION']) {
|
||||
return '--location=' + process.env['AZ_LOCATION'];
|
||||
} else {
|
||||
return '--location=West Europe';
|
||||
}
|
||||
}
|
||||
var get_vm_size = function () {
|
||||
if (process.env['AZ_VM_SIZE']) {
|
||||
return '--vm-size=' + process.env['AZ_VM_SIZE'];
|
||||
} else {
|
||||
return '--vm-size=Small';
|
||||
}
|
||||
}
|
||||
|
||||
exports.queue_default_network = function () {
|
||||
task_queue.push([
|
||||
'network', 'vnet', 'create',
|
||||
get_location(),
|
||||
'--address-space=172.16.0.0',
|
||||
conf.resources['vnet'],
|
||||
]);
|
||||
};
|
||||
|
||||
exports.queue_machines = function (name_prefix, coreos_update_channel, cloud_config_creator) {
|
||||
var x = conf.nodes[name_prefix];
|
||||
var vm_create_base_args = [
|
||||
'vm', 'create',
|
||||
get_location(),
|
||||
get_vm_size(),
|
||||
'--connect=' + conf.resources['service'],
|
||||
'--virtual-network-name=' + conf.resources['vnet'],
|
||||
'--no-ssh-password',
|
||||
'--ssh-cert=' + conf.resources['ssh_key']['pem'],
|
||||
];
|
||||
|
||||
var cloud_config = cloud_config_creator(x, conf);
|
||||
|
||||
var next_host = function (n) {
|
||||
hosts.ssh_port_counter += 1;
|
||||
var host = { name: util.hostname(n, name_prefix), port: hosts.ssh_port_counter };
|
||||
if (cloud_config instanceof Array) {
|
||||
host.cloud_config_file = cloud_config[n];
|
||||
} else {
|
||||
host.cloud_config_file = cloud_config;
|
||||
}
|
||||
hosts.collection.push(host);
|
||||
return _.map([
|
||||
"--vm-name=<%= name %>",
|
||||
"--ssh=<%= port %>",
|
||||
"--custom-data=<%= cloud_config_file %>",
|
||||
], function (arg) { return _.template(arg)(host); });
|
||||
};
|
||||
|
||||
task_queue = task_queue.concat(_(x).times(function (n) {
|
||||
if (conf.resizing && n < conf.old_size) {
|
||||
return [];
|
||||
} else {
|
||||
return vm_create_base_args.concat(next_host(n), [
|
||||
coreos_image_ids[coreos_update_channel], 'core',
|
||||
]);
|
||||
}
|
||||
}));
|
||||
};
|
||||
|
||||
exports.create_config = function (name, nodes) {
|
||||
conf = {
|
||||
name: name,
|
||||
nodes: nodes,
|
||||
weave_salt: util.rand_string(),
|
||||
resources: {
|
||||
vnet: [name, 'internal-vnet', util.rand_suffix].join('-'),
|
||||
service: [name, 'service-cluster', util.rand_suffix].join('-'),
|
||||
ssh_key: create_ssh_key(name),
|
||||
}
|
||||
};
|
||||
|
||||
};
|
||||
|
||||
exports.destroy_cluster = function (state_file) {
|
||||
load_state(state_file);
|
||||
if (conf.hosts === undefined) {
|
||||
console.log(clr.red('azure_wrapper/fail: Nothing to delete.'));
|
||||
process.abort();
|
||||
}
|
||||
|
||||
conf.destroying = true;
|
||||
task_queue = _.map(conf.hosts, function (host) {
|
||||
return ['vm', 'delete', '--quiet', '--blob-delete', host.name];
|
||||
});
|
||||
|
||||
task_queue.push(['network', 'vnet', 'delete', '--quiet', conf.resources['vnet']]);
|
||||
|
||||
exports.run_task_queue();
|
||||
};
|
||||
|
||||
exports.load_state_for_resizing = function (state_file, node_type, new_nodes) {
|
||||
load_state(state_file);
|
||||
if (conf.hosts === undefined) {
|
||||
console.log(clr.red('azure_wrapper/fail: Nothing to look at.'));
|
||||
process.abort();
|
||||
}
|
||||
conf.resizing = true;
|
||||
conf.old_size = conf.nodes[node_type];
|
||||
conf.old_state_file = state_file;
|
||||
conf.nodes[node_type] += new_nodes;
|
||||
hosts.collection = conf.hosts;
|
||||
hosts.ssh_port_counter += conf.hosts.length;
|
||||
}
|
43
docs/getting-started-guides/coreos/azure/lib/cloud_config.js
Normal file
43
docs/getting-started-guides/coreos/azure/lib/cloud_config.js
Normal file
@ -0,0 +1,43 @@
|
||||
var _ = require('underscore');
|
||||
var fs = require('fs');
|
||||
var yaml = require('js-yaml');
|
||||
var colors = require('colors/safe');
|
||||
|
||||
|
||||
var write_cloud_config_from_object = function (data, output_file) {
|
||||
try {
|
||||
fs.writeFileSync(output_file, [
|
||||
'#cloud-config',
|
||||
yaml.safeDump(data),
|
||||
].join("\n"));
|
||||
return output_file;
|
||||
} catch (e) {
|
||||
console.log(colors.red(e));
|
||||
}
|
||||
};
|
||||
|
||||
exports.generate_environment_file_entry_from_object = function (hostname, environ) {
|
||||
var data = {
|
||||
hostname: hostname,
|
||||
environ_array: _.map(environ, function (value, key) {
|
||||
return [key.toUpperCase(), JSON.stringify(value.toString())].join('=');
|
||||
}),
|
||||
};
|
||||
|
||||
return {
|
||||
permissions: '0600',
|
||||
owner: 'root',
|
||||
content: _.template("<%= environ_array.join('\\n') %>\n")(data),
|
||||
path: _.template("/etc/weave.<%= hostname %>.env")(data),
|
||||
};
|
||||
};
|
||||
|
||||
exports.process_template = function (input_file, output_file, processor) {
|
||||
var data = {};
|
||||
try {
|
||||
data = yaml.safeLoad(fs.readFileSync(input_file, 'utf8'));
|
||||
} catch (e) {
|
||||
console.log(colors.red(e));
|
||||
}
|
||||
return write_cloud_config_from_object(processor(_.clone(data)), output_file);
|
||||
};
|
@ -0,0 +1,44 @@
|
||||
var _ = require('underscore');
|
||||
|
||||
var util = require('../util.js');
|
||||
var cloud_config = require('../cloud_config.js');
|
||||
|
||||
|
||||
exports.create_etcd_cloud_config = function (node_count, conf) {
|
||||
var elected_node = 0;
|
||||
|
||||
var input_file = './cloud_config_templates/kubernetes-cluster-etcd-node-template.yml';
|
||||
|
||||
return _(node_count).times(function (n) {
|
||||
var output_file = util.join_output_file_path('kubernetes-cluster-etcd-node-' + n, 'generated.yml');
|
||||
|
||||
return cloud_config.process_template(input_file, output_file, function(data) {
|
||||
if (n !== elected_node) {
|
||||
data.coreos.etcd.peers = [
|
||||
util.hostname(elected_node, 'etcd'), 7001
|
||||
].join(':');
|
||||
}
|
||||
return data;
|
||||
});
|
||||
});
|
||||
};
|
||||
|
||||
exports.create_node_cloud_config = function (node_count, conf) {
|
||||
var elected_node = 0;
|
||||
|
||||
var input_file = './cloud_config_templates/kubernetes-cluster-main-nodes-template.yml';
|
||||
var output_file = util.join_output_file_path('kubernetes-cluster-main-nodes', 'generated.yml');
|
||||
|
||||
var make_node_config = function (n) {
|
||||
return cloud_config.generate_environment_file_entry_from_object(util.hostname(n, 'kube'), {
|
||||
weave_password: conf.weave_salt,
|
||||
weave_peers: n === elected_node ? "" : util.hostname(elected_node, 'kube'),
|
||||
breakout_route: util.ipv4([10, 2, 0, 0], 16),
|
||||
bridge_address_cidr: util.ipv4([10, 2, n, 1], 24),
|
||||
});
|
||||
};
|
||||
return cloud_config.process_template(input_file, output_file, function(data) {
|
||||
data.write_files = data.write_files.concat(_(node_count).times(make_node_config));
|
||||
return data;
|
||||
});
|
||||
};
|
@ -0,0 +1,29 @@
|
||||
var _ = require('underscore');
|
||||
|
||||
var util = require('../util.js');
|
||||
var cloud_config = require('../cloud_config.js');
|
||||
|
||||
var write_basic_weave_cluster_cloud_config = function (env_files) {
|
||||
var input_file = './cloud_config_templates/basic-weave-cluster-template.yml';
|
||||
var output_file = util.join_output_file_path('basic-weave-cluster', 'generated.yml');
|
||||
|
||||
return cloud_config.process_template(input_file, output_file, function(data) {
|
||||
data.write_files = env_files;
|
||||
return data;
|
||||
});
|
||||
};
|
||||
|
||||
exports.create_basic_cloud_config = function (node_count, conf) {
|
||||
var elected_node = 0;
|
||||
|
||||
var make_node_config = function (n) {
|
||||
return cloud_config.generate_environment_file_entry_from_object(util.hostname(n), {
|
||||
weavedns_addr: util.ipv4([10, 10, 1, 10+n], 24),
|
||||
weave_password: conf.weave_salt,
|
||||
weave_peers: n === elected_node ? "" : util.hostname(elected_node),
|
||||
});
|
||||
};
|
||||
|
||||
return write_basic_weave_cluster_cloud_config(_(node_count).times(make_node_config));
|
||||
};
|
||||
|
33
docs/getting-started-guides/coreos/azure/lib/util.js
Normal file
33
docs/getting-started-guides/coreos/azure/lib/util.js
Normal file
@ -0,0 +1,33 @@
|
||||
var _ = require('underscore');
|
||||
_.mixin(require('underscore.string').exports());
|
||||
|
||||
exports.ipv4 = function (ocets, prefix) {
|
||||
return {
|
||||
ocets: ocets,
|
||||
prefix: prefix,
|
||||
toString: function () {
|
||||
return [ocets.join('.'), prefix].join('/');
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
exports.hostname = function hostname (n, prefix) {
|
||||
return _.template("<%= pre %>-<%= seq %>")({
|
||||
pre: prefix || 'core',
|
||||
seq: _.pad(n, 2, '0'),
|
||||
});
|
||||
};
|
||||
|
||||
exports.rand_string = function () {
|
||||
var crypto = require('crypto');
|
||||
var shasum = crypto.createHash('sha256');
|
||||
shasum.update(crypto.randomBytes(256));
|
||||
return shasum.digest('hex');
|
||||
};
|
||||
|
||||
|
||||
exports.rand_suffix = exports.rand_string().substring(50);
|
||||
|
||||
exports.join_output_file_path = function(prefix, suffix) {
|
||||
return './output/' + [prefix, exports.rand_suffix, suffix].join('_');
|
||||
};
|
19
docs/getting-started-guides/coreos/azure/package.json
Normal file
19
docs/getting-started-guides/coreos/azure/package.json
Normal file
@ -0,0 +1,19 @@
|
||||
{
|
||||
"name": "coreos-azure-weave",
|
||||
"version": "1.0.0",
|
||||
"description": "Small utility to bring up a woven CoreOS cluster",
|
||||
"main": "index.js",
|
||||
"scripts": {
|
||||
"test": "echo \"Error: no test specified\" && exit 1"
|
||||
},
|
||||
"author": "Ilya Dmitrichenko <errordeveloper@gmail.com>",
|
||||
"license": "Apache 2.0",
|
||||
"dependencies": {
|
||||
"azure-cli": "^0.8.14",
|
||||
"colors": "^1.0.3",
|
||||
"js-yaml": "^3.2.5",
|
||||
"openssl-wrapper": "^0.2.1",
|
||||
"underscore": "^1.7.0",
|
||||
"underscore.string": "^3.0.2"
|
||||
}
|
||||
}
|
10
docs/getting-started-guides/coreos/azure/resize-kubernetes-cluster.js
Executable file
10
docs/getting-started-guides/coreos/azure/resize-kubernetes-cluster.js
Executable file
@ -0,0 +1,10 @@
|
||||
#!/usr/bin/env node
|
||||
|
||||
var azure = require('./lib/azure_wrapper.js');
|
||||
var kube = require('./lib/deployment_logic/kubernetes.js');
|
||||
|
||||
azure.load_state_for_resizing(process.argv[2], 'kube', 2);
|
||||
|
||||
azure.run_task_queue([
|
||||
azure.queue_machines('kube', 'stable', kube.create_node_cloud_config),
|
||||
]);
|
Loading…
Reference in New Issue
Block a user