simplify flannel configuration on rackspace

This commit is contained in:
Ryan Richard 2015-06-08 13:45:39 -05:00
parent 2bb0fc00e5
commit 0b26e4e301
3 changed files with 19 additions and 17 deletions

View File

@ -83,6 +83,10 @@ coreos:
peer-addr: $private_ipv4:7001
peer-bind-addr: $private_ipv4:7001
flannel:
ip_masq: true
interface: eth2
fleet:
public-ip: $private_ipv4
metadata: kubernetes_role=minion
@ -96,20 +100,16 @@ coreos:
- name: fleet.service
command: start
- name: flanneld.service
command: start
drop-ins:
- name: 50-network-config.conf
content: |
[Service]
ExecStartPre=/usr/bin/etcdctl set /coreos.com/network/config '{ "Network": "KUBE_NETWORK", "Backend": {"Type": "host-gw"}}'
ExecStart=
ExecStart=/usr/libexec/sdnotify-proxy /run/flannel/sd.sock \
/usr/bin/docker run --net=host --privileged=true --rm \
--volume=/run/flannel:/run/flannel \
--env=NOTIFY_SOCKET=/run/flannel/sd.sock \
--env-file=/run/flannel/options.env \
--volume=${ETCD_SSL_DIR}:/etc/ssl/etcd:ro \
quay.io/coreos/flannel:${FLANNEL_VER} /opt/bin/flanneld -etcd-endpoints http://127.0.0.1:4001 --ip-masq=true --iface=eth2
- name: 50-flannel.conf
content: |
[Unit]
Requires=etcd.service
After=etcd.service
[Service]
ExecStartPre=-/usr/bin/etcdctl mk /coreos.com/network/config '{"Network":"KUBE_NETWORK", "Backend": {"Type": "host-gw"}}'
command: start
- name: docker.service
command: start
drop-ins:
@ -120,6 +120,7 @@ coreos:
# won't land in flannel's network...
Requires=flanneld.service
After=flanneld.service
Restart=Always
- name: download-release.service
command: start
content: |

View File

@ -321,7 +321,8 @@ kube-up() {
detect-master
# TODO look for a better way to get the known_tokens to the master. This is needed over file injection since the files were too large on a 4 node cluster.
$(scp -o StrictHostKeyChecking=no -i ~/.ssh/${SSH_KEY_NAME} ${KUBE_TEMP}/known_tokens.csv core@${KUBE_MASTER_IP}:known_tokens.csv)
$(scp -o StrictHostKeyChecking=no -i ~/.ssh/${SSH_KEY_NAME} ${KUBE_TEMP}/known_tokens.csv core@${KUBE_MASTER_IP}:/home/core/known_tokens.csv)
$(sleep 2)
$(ssh -o StrictHostKeyChecking=no -i ~/.ssh/${SSH_KEY_NAME} core@${KUBE_MASTER_IP} sudo mv /home/core/known_tokens.csv /var/lib/kube-apiserver/known_tokens.csv)
$(ssh -o StrictHostKeyChecking=no -i ~/.ssh/${SSH_KEY_NAME} core@${KUBE_MASTER_IP} sudo chown root.root /var/lib/kube-apiserver/known_tokens.csv)
$(ssh -o StrictHostKeyChecking=no -i ~/.ssh/${SSH_KEY_NAME} core@${KUBE_MASTER_IP} sudo systemctl restart kube-apiserver)

View File

@ -1,7 +1,6 @@
# Rackspace
* Supported Version: v0.16.2
* `git checkout v0.16.2`
* Supported Version: v0.18.1
In general, the dev-build-and-up.sh workflow for Rackspace is the similar to GCE. The specific implementation is different due to the use of CoreOS, Rackspace Cloud Files and the overall network design.
@ -20,8 +19,9 @@ The current cluster design is inspired by:
##Provider: Rackspace
- To install the latest released version of kubernetes use `export KUBERNETES_PROVIDER=rackspace; wget -q -O - https://get.k8s.io | bash`
- To build your own released version from source use `export KUBERNETES_PROVIDER=rackspace` and run the `bash hack/dev-build-and-up.sh`
- Note: The get.k8s.io install method is not working yet for our scripts.
* To install the latest released version of kubernetes use `export KUBERNETES_PROVIDER=rackspace; wget -q -O - https://get.k8s.io | bash`
## Build
1. The kubernetes binaries will be built via the common build scripts in `build/`.