Merge pull request #9904 from jayunit100/ha

[HA] Fully automated impl of containerized high available etcd+apiserver+controllermgr+scheduler
This commit is contained in:
Brendan Burns 2015-07-10 20:51:38 -07:00
commit 80e92360ec
14 changed files with 798 additions and 0 deletions

View File

@ -200,4 +200,10 @@ restarting the kubelets on each node.
If you are turning up a fresh cluster, you will need to install the kubelet and kube-proxy on each worker node, and
set the ```--apiserver``` flag to your replicated endpoint.
##Vagrant up!
We indeed have an initial proof of concept tester for this, which is available [here](../examples/high-availability/).
It implements the major concepts (with a few minor reductions for simplicity), of the podmaster HA implementation alongside a quick smoke test using k8petstore.
[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/docs/high-availability.md?pixel)]()

46
examples/high-availability/Vagrantfile vendored Normal file
View File

@ -0,0 +1,46 @@
# -*- mode: ruby -*-
# vi: set ft=ruby :
# Vagrantfile API/syntax version. Don't touch unless you know what you're doing!
VAGRANTFILE_API_VERSION = "2"
Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
fedora = 'hansode/fedora-21-server-x86_64'
script = "provision.sh"
config.hostmanager.enabled = true
if Vagrant.has_plugin?("vagrant-cachier")
config.cache.scope = :box
end
config.vm.provider "virtualbox" do |v|
v.memory = 3000
v.cpus = 1
end
config.vm.define "kube0" do |kube0|
kube0.vm.box = fedora
kube0.vm.hostname = "kube0.ha"
kube0.vm.synced_folder ".", "/vagrant"
kube0.vm.network :private_network, ip: "192.168.4.100"
kube0.vm.provision "shell", path:script
end
config.vm.define "kube1" do |kube1|
kube1.vm.box = fedora
kube1.vm.hostname = "kube1.ha"
kube1.vm.synced_folder ".", "/vagrant"
kube1.vm.network :private_network, ip: "192.168.4.101"
kube1.vm.provision "shell", path:script
end
config.vm.define "kube2" do |kube2|
kube2.vm.box = fedora
kube2.vm.hostname = "kube2.ha"
kube2.vm.network :private_network, ip: "192.168.4.102"
kube2.vm.synced_folder ".", "/vagrant"
kube2.vm.provision "shell", path:script
end
end

View File

@ -0,0 +1,18 @@
-----BEGIN CERTIFICATE-----
MIIC+zCCAeWgAwIBAgIBATALBgkqhkiG9w0BAQswHzEdMBsGA1UEAwwUMTAuMC4y
LjE1QDE0MzQ1NTU1NTkwHhcNMTUwNjE3MTUzOTE5WhcNMTYwNjE2MTUzOTE5WjAf
MR0wGwYDVQQDDBQxMC4wLjIuMTVAMTQzNDU1NTU1OTCCASIwDQYJKoZIhvcNAQEB
BQADggEPADCCAQoCggEBAK3OFQlqz04iuOtmSIlbJkeTwecL+p8tdtmG9SRn4Fw6
TeuuoLCiSqjCZGLV1pKiL6fcjPYWsHoUNIzTtb6E/gj9OfGgQuIqZWRjM3blBmik
aZ7N7OwJ5SZy6e5wFtNJ08xRnDZjhOIhtSjPQHk0WsC3hKJav3rGNdh7C53LUiWB
uL3ne8oWaiTI9vlgW0ZWx6LcSa0U4jXftwdzLPLbB5INYrz9chF1hpulYnPWY1UA
GE6wJTEpQM0p88Ye1t8Ey5QRWp6tjxVfxDYScxSP6FS8Dcj36RF9+5zGYcQ1YbRC
Hc1hq7k33H6k5uUp+iPofezG9v4xhWqPkNV6LPxB9k8CAwEAAaNGMEQwDgYDVR0P
AQH/BAQDAgCgMBMGA1UdJQQMMAoGCCsGAQUFBwMBMAwGA1UdEwEB/wQCMAAwDwYD
VR0RBAgwBocECgACDzALBgkqhkiG9w0BAQsDggEBAFAzOeP67fKtHH2t114lSvvD
2wMj7YjSDLyp3xRAMqtAiQ2DXytXvJ0nG8HcI8rnYYx/0RhibpSM565KjMk3VKhV
FMYBU5BgFmR84TmCtLeZe4szr817A1Bbr25djMLQgHtEhtA0NptmjrzSdJICXeXe
ih29/5HCxELlbDl7Alb8C8ITQlWsVQpUyr2W5tPp2w1wUA5OD1jJAdQquOHG/lWn
4JC/4Out213CNCRh9dZFQsIy0oVUIncqspfj7v+xxVmQMeMqu1H92e5NFIqzfKaV
cL5lSqZj2tOKS4fKPqadZ6IBxOZVr28THCUlhbWwDrLEMk8Vu7W+iuhrl8Jthws=
-----END CERTIFICATE-----

View File

@ -0,0 +1,27 @@
-----BEGIN RSA PRIVATE KEY-----
MIIEowIBAAKCAQEArc4VCWrPTiK462ZIiVsmR5PB5wv6ny122Yb1JGfgXDpN666g
sKJKqMJkYtXWkqIvp9yM9hawehQ0jNO1voT+CP058aBC4iplZGMzduUGaKRpns3s
7AnlJnLp7nAW00nTzFGcNmOE4iG1KM9AeTRawLeEolq/esY12HsLnctSJYG4ved7
yhZqJMj2+WBbRlbHotxJrRTiNd+3B3Ms8tsHkg1ivP1yEXWGm6Vic9ZjVQAYTrAl
MSlAzSnzxh7W3wTLlBFanq2PFV/ENhJzFI/oVLwNyPfpEX37nMZhxDVhtEIdzWGr
uTfcfqTm5Sn6I+h97Mb2/jGFao+Q1Xos/EH2TwIDAQABAoIBAAN4BXt2okSQ9Nfd
5ry3EQCEtm4CEzJyGdkllGbxm8y7bSqzBYoA8zSDDLXcRrgOfJ0BDd8rfMIy/QmC
gDIZPWi4hRuJc0xIRFXIlRJeZ85W2bTNr1jWqbgmuvyDSDFXW97MEi4Ttmc8/Pyf
hk3k+Zq3DvKelQvCQPjdG9I+aJlPAQ9jRpseiXADgxgGJRjMrNNvDmAXSy+vD/PA
MzIPcmW48nQ6kvXs6UdRXmfZD8CySQnSMN+pOMzikN9bbyrPHnKxNzImsKSCpN78
Uh8eNItDJmMLWv/SwnVS8/X5wMxRQ2t5vuGRnWCQEc+rLtw2mAkj36GnfFpZvSNd
fVuVbCECgYEAygCErUVxFUgfBgrXDZa9VEiYKnuQBdrgKOH5JQ/Pifp9BuhNtLvH
fHZ16cesZnQ8q+U4nUbnu0s4Gpl0RS96KcwJ3EjGPknclZoVZVPB6Ece/7JaaZcA
OQuRRkYABJRPIcpPCtgeJO+OL6H3BFmvteT8GTrek6RY8wrw65nIPu0CgYEA3EP5
guS3OoclBvFs5o2WyKokc+kq+L0zS9WX/Hv4nK2c2aS628TfhDqWeTeettuq0Jlf
hGvNkNaehQfPpyrJzHwoATMWhi/cKM9sycC9oEFX1tuPAZit2gl+cjXZOX19msp6
Sh1I5VKGM8pxGFrE3gDDq1woRr+Ke+lWOiDz5qsCgYBMhSm9YYLW5v0pks2oTiPm
W6GY5jnGngUwN3njujuKLyNRjIpzHncRBObh6ku6e+nHzAtIOOXrozDbkqni03tZ
fft2QPMoAV7YJQhZ3AKmdNqfTfbF7PeepG0hy76R/YSEbljG6NtybnTUQmyKb1cK
dnWxMQXDtAwl9U0SUqzyeQKBgGANWGpHGMvyESiE8WtcxSs+XuUZAf80aoEgZMXa
veB9KRAT8NRTdvEvp1y274RoKIYMzAVwCVWm8burW1dXpmaAoeVcBO+BQW2efp9A
aLDQreBpIGSe0vlo+HYPm2mhmrt8nnVhbd9q7FD7b/Qh6QWyqaE5b+riLh648zwo
EJQ/AoGBAKpDzePHNptD8zZCi/LEjPGeI2HPSiDs7/s6591l5gbSvfRtWyyRtDk3
jRgbOAqjF3Eow+QOA1GNGaSYWoANBmhKuUwn3ETzsmQ8UFSj/Wmc3IohhYZtrh6h
e0T8VGFcS6bg5OLbYfarzdaI+hL7zlOhjDAgc9E8rjYgBIvb8h9n
-----END RSA PRIVATE KEY-----

View File

@ -0,0 +1,21 @@
###
# kubernetes kubelet (minion) config
# The address for the info server to serve on (set to 0.0.0.0 or "" for all interfaces)
KUBELET_ADDRESS=""
#--address=0.0.0.0"
# The port for the info server to serve on
# KUBELET_PORT="--port=10250"
# You may leave this blank to use the actual hostname
# KUBELET_HOSTNAME="--hostname_override=0.0.0."
# location of the api-server
KUBELET_API_SERVER="--api_servers=http://0.0.0.0:8080,kube1.ha:8080,kube0.ha:8080 "
# --cert-dir="/var/run/kubernetes": The directory where the TLS certs are located (by default /var/run/kubernetes). If --tls_cert_file and --tls_private_key_file are provided, this flag will be ignored.
# --tls-cert-file="": File containing x509 Certificate for HTTPS. (CA cert, if any, concatenated after server cert). If --tls_cert_file and --tls_private_key_file are not provided, a self-signed certificate and key are generated for the public address and saved to the directory passed to --cert_dir.
# --tls-private-key-file="": File containing x509 private key matching --tls_cert_file.
# We modify kubelet args to do verbose logs + read from manifests dir.
KUBELET_ARGS="--tls-cert-file=/vagrant/apiserver.crt --tls-private-key-file=/vagrant/apiserver.key --register-node=true --v=5 --config=/etc/kubernetes/manifests --kubeconfig=/vagrant/kubeconfig"

View File

@ -0,0 +1,97 @@
{
"apiVersion": "v1beta3",
"kind": "Pod",
"metadata": {"name":"etcd-server"},
"spec":{
"hostNetwork": true,
"containers":[
{
"name": "etcd-container",
"image": "quay.io/coreos/etcd",
"command": [
"/etcd","--name","NODE_NAME",
"--initial-advertise-peer-urls", "http://NODE_IP:2380",
"--listen-peer-urls", "http://NODE_IP:2380",
"--advertise-client-urls", "http://NODE_IP:2379",
"-initial-cluster", "kube0.ha=http://192.168.4.100:2380",
"--listen-client-urls", "http://127.0.0.1:2379,http://NODE_IP:2379",
"--data-dir","/var/etcd/data"
],
"ports":[
{
"name": "serverport", "containerPort": 2380, "hostPort": 2380
},
{
"name": "clientport","containerPort": 4001, "hostPort": 4001
}
],
"volumeMounts": [
{ "name": "varetcd",
"mountPath": "/var/etcd",
"readOnly": false},
{ "name": "etcssl",
"mountPath": "/etc/ssl",
"readOnly": true},
{ "name": "usrsharessl",
"mountPath": "/usr/share/ssl",
"readOnly": true},
{ "name": "varssl",
"mountPath": "/var/ssl",
"readOnly": true},
{ "name": "usrssl",
"mountPath": "/usr/ssl",
"readOnly": true},
{ "name": "usrlibssl",
"mountPath": "/usr/lib/ssl",
"readOnly": true},
{ "name": "usrlocalopenssl",
"mountPath": "/usr/local/openssl",
"readOnly": true},
{ "name": "etcopenssl",
"mountPath": "/etc/openssl",
"readOnly": true},
{ "name": "etcpkitls",
"mountPath": "/etc/pki/tls",
"readOnly": true}
]
}
],
"volumes":[
{ "name": "varetcd",
"hostPath": {
"path": "/var/etcd/data"}
},
{ "name": "etcssl",
"hostPath": {
"path": "/etc/pki/tls/certs"}
},
{ "name": "usrsharessl",
"hostPath": {
"path": "/usr/share/ssl"}
},
{ "name": "varssl",
"hostPath": {
"path": "/var/ssl"}
},
{ "name": "usrssl",
"hostPath": {
"path": "/usr/ssl"}
},
{ "name": "usrlibssl",
"hostPath": {
"path": "/usr/lib/ssl"}
},
{ "name": "usrlocalopenssl",
"hostPath": {
"path": "/usr/local/openssl"}
},
{ "name": "etcopenssl",
"hostPath": {
"path": "/etc/openssl"}
},
{ "name": "etcpkitls",
"hostPath": {
"path": "/etc/pki/tls"}
}
]
}}

View File

@ -0,0 +1,103 @@
{
"apiVersion": "v1beta3",
"kind": "Pod",
"metadata": {"name":"kube-apiserver"},
"spec":{
"hostNetwork": true,
"containers":[
{
"name": "kube-apiserver",
"image": "gcr.io/google_containers/kube-apiserver:9680e782e08a1a1c94c656190011bd02",
"command": [
"/bin/sh",
"-c",
"/usr/local/bin/kube-apiserver --address=0.0.0.0 --etcd_servers=http://kube0.ha:2379 --service-cluster-ip-range=10.0.0.0/16 --v=4 --allow_privileged=True 1>>/var/log/kube-apiserver.log 2>&1"
],
"ports":[
{ "name": "https",
"containerPort": 443,
"hostPort": 443},{
"name": "http",
"containerPort": 7080,
"hostPort": 7080},{
"name": "local",
"containerPort": 8080,
"hostPort": 8080}
],
"volumeMounts": [
{ "name": "srvkube",
"mountPath": "/srv/kubernetes",
"readOnly": true},
{ "name": "logfile",
"mountPath": "/var/log/kube-apiserver.log",
"readOnly": false},
{ "name": "etcssl",
"mountPath": "/etc/ssl",
"readOnly": true},
{ "name": "usrsharessl",
"mountPath": "/usr/share/ssl",
"readOnly": true},
{ "name": "varssl",
"mountPath": "/var/ssl",
"readOnly": true},
{ "name": "usrssl",
"mountPath": "/usr/ssl",
"readOnly": true},
{ "name": "usrlibssl",
"mountPath": "/usr/lib/ssl",
"readOnly": true},
{ "name": "usrlocalopenssl",
"mountPath": "/usr/local/openssl",
"readOnly": true},
{ "name": "etcopenssl",
"mountPath": "/etc/openssl",
"readOnly": true},
{ "name": "etcpkitls",
"mountPath": "/etc/pki/tls",
"readOnly": true}
]
}
],
"volumes":[
{ "name": "srvkube",
"hostPath": {
"path": "/srv/kubernetes"}
},
{ "name": "logfile",
"hostPath": {
"path": "/var/log/kube-apiserver.log"}
},
{ "name": "etcssl",
"hostPath": {
"path": "/etc/ssl"}
},
{ "name": "usrsharessl",
"hostPath": {
"path": "/usr/share/ssl"}
},
{ "name": "varssl",
"hostPath": {
"path": "/var/ssl"}
},
{ "name": "usrssl",
"hostPath": {
"path": "/usr/ssl"}
},
{ "name": "usrlibssl",
"hostPath": {
"path": "/usr/lib/ssl"}
},
{ "name": "usrlocalopenssl",
"hostPath": {
"path": "/usr/local/openssl"}
},
{ "name": "etcopenssl",
"hostPath": {
"path": "/etc/openssl"}
},
{ "name": "etcpkitls",
"hostPath": {
"path": "/etc/pki/tls"}
}
]
}}

View File

@ -0,0 +1,100 @@
{
"apiVersion": "v1beta3",
"kind": "Pod",
"metadata": {"name":"kube-controller-manager"},
"spec":{
"hostNetwork": true,
"containers":[
{
"name": "kube-controller-manager",
"image": "gcr.io/google_containers/kube-controller-manager:fda24638d51a48baa13c35337fcd4793",
"command": [
"/bin/sh",
"-c",
"/usr/local/bin/kube-controller-manager --master=192.168.4.102:8080 --service_account_private_key_file=/srv/kubernetes/server.key --v=4 1>>/var/log/kube-controller-manager.log 2>&1"
],
"livenessProbe": {
"httpGet": {
"path": "/healthz",
"port": 10252
},
"initialDelaySeconds": 15,
"timeoutSeconds": 1
},
"volumeMounts": [
{ "name": "srvkube",
"mountPath": "/srv/kubernetes",
"readOnly": true},
{ "name": "logfile",
"mountPath": "/var/log/kube-controller-manager.log",
"readOnly": false},
{ "name": "etcssl",
"mountPath": "/etc/ssl",
"readOnly": true},
{ "name": "usrsharessl",
"mountPath": "/usr/share/ssl",
"readOnly": true},
{ "name": "varssl",
"mountPath": "/var/ssl",
"readOnly": true},
{ "name": "usrssl",
"mountPath": "/usr/ssl",
"readOnly": true},
{ "name": "usrlibssl",
"mountPath": "/usr/lib/ssl",
"readOnly": true},
{ "name": "usrlocalopenssl",
"mountPath": "/usr/local/openssl",
"readOnly": true},
{ "name": "etcopenssl",
"mountPath": "/etc/openssl",
"readOnly": true},
{ "name": "etcpkitls",
"mountPath": "/etc/pki/tls",
"readOnly": true}
]
}
],
"volumes":[
{ "name": "srvkube",
"hostPath": {
"path": "/srv/kubernetes"}
},
{ "name": "logfile",
"hostPath": {
"path": "/var/log/kube-controller-manager.log"}
},
{ "name": "etcssl",
"hostPath": {
"path": "/etc/ssl"}
},
{ "name": "usrsharessl",
"hostPath": {
"path": "/usr/share/ssl"}
},
{ "name": "varssl",
"hostPath": {
"path": "/var/ssl"}
},
{ "name": "usrssl",
"hostPath": {
"path": "/usr/ssl"}
},
{ "name": "usrlibssl",
"hostPath": {
"path": "/usr/lib/ssl"}
},
{ "name": "usrlocalopenssl",
"hostPath": {
"path": "/usr/local/openssl"}
},
{ "name": "etcopenssl",
"hostPath": {
"path": "/etc/openssl"}
},
{ "name": "etcpkitls",
"hostPath": {
"path": "/etc/pki/tls"}
}
]
}}

View File

@ -0,0 +1,39 @@
{
"apiVersion": "v1beta3",
"kind": "Pod",
"metadata": {"name":"kube-scheduler"},
"spec":{
"hostNetwork": true,
"containers":[
{
"name": "kube-scheduler",
"image": "gcr.io/google_containers/kube-scheduler:34d0b8f8b31e27937327961528739bc9",
"command": [
"/bin/sh",
"-c",
"/usr/local/bin/kube-scheduler --master=127.0.0.1:8080 --v=2 1>>/var/log/kube-scheduler.log 2>&1"
],
"livenessProbe": {
"httpGet": {
"path": "/healthz",
"port": 10251
},
"initialDelaySeconds": 15,
"timeoutSeconds": 1
},
"volumeMounts": [
{
"name": "logfile",
"mountPath": "/var/log/kube-scheduler.log",
"readOnly": false
}
]
}
],
"volumes":[
{ "name": "logfile",
"hostPath": {
"path": "/var/log/kube-scheduler.log"}
}
]
}}

View File

@ -0,0 +1,2 @@
{
}

View File

@ -0,0 +1,57 @@
{
"apiVersion": "v1beta3",
"kind": "Pod",
"metadata": {"name":"scheduler-master"},
"spec":{
"hostNetwork": true,
"containers":[
{
"name": "scheduler-elector",
"image": "gcr.io/google_containers/podmaster:1.1",
"command": [
"/podmaster",
"--etcd-servers=http://192.168.4.100:2379,http://192.168.4.101:2379,http://192.168.4.102:2379",
"--key=scheduler",
"--source-file=/kubernetes/kube-scheduler.manifest",
"--dest-file=/manifests/kube-scheduler.manifest"
],
"volumeMounts": [
{ "name": "k8s",
"mountPath": "/kubernetes",
"readOnly": true},
{ "name": "manifests",
"mountPath": "/manifests",
"readOnly": false}
]
},
{
"name": "controller-manager-elector",
"image": "gcr.io/google_containers/podmaster:1.1",
"command": [
"/podmaster",
"--etcd-servers=http://192.168.4.101:2379,http://192.168.4.102:2379,http://192.168.4.100:2379",
"--key=controller",
"--source-file=/kubernetes/kube-controller-manager.manifest",
"--dest-file=/manifests/kube-controller-manager.manifest"
],
"volumeMounts": [
{ "name": "k8s",
"mountPath": "/kubernetes",
"readOnly": true},
{ "name": "manifests",
"mountPath": "/manifests",
"readOnly": false}
]
}
],
"volumes":[
{ "name": "k8s",
"hostPath": {
"path": "/srv/kubernetes"}
},
{ "name": "manifests",
"hostPath": {
"path": "/etc/kubernetes/manifests"}
}
]
}}

View File

@ -0,0 +1,57 @@
{
"apiVersion": "v1beta3",
"kind": "Pod",
"metadata": {"name":"scheduler-master"},
"spec":{
"hostNetwork": true,
"containers":[
{
"name": "scheduler-elector",
"image": "gcr.io/google_containers/podmaster:1.1",
"command": [
"/podmaster",
"--etcd-servers=http://127.0.0.1:4001",
"--key=scheduler",
"--source-file=/kubernetes/kube-scheduler.manifest",
"--dest-file=/manifests/kube-scheduler.manifest"
],
"volumeMounts": [
{ "name": "k8s",
"mountPath": "/kubernetes",
"readOnly": true},
{ "name": "manifests",
"mountPath": "/manifests",
"readOnly": false}
]
},
{
"name": "controller-manager-elector",
"image": "gcr.io/google_containers/podmaster:1.1",
"command": [
"/podmaster",
"--etcd-servers=http://127.0.0.1:4001",
"--key=controller",
"--source-file=/kubernetes/kube-controller-manager.manifest",
"--dest-file=/manifests/kube-controller-manager.manifest"
],
"volumeMounts": [
{ "name": "k8s",
"mountPath": "/kubernetes",
"readOnly": true},
{ "name": "manifests",
"mountPath": "/manifests",
"readOnly": false}
]
}
],
"volumes":[
{ "name": "k8s",
"hostPath": {
"path": "/srv/kubernetes"}
},
{ "name": "manifests",
"hostPath": {
"path": "/etc/kubernetes/manifests"}
}
]
}}

View File

@ -0,0 +1,44 @@
#!/bin/bash
# Copyright 2015 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -o errexit
set -o nounset
set -o pipefail
function setup_flannel {
yum install -y flannel
### Write this k/v to etcd. Flannel will grab it to setup its networking.
curl --silent -s -L http://kube0.ha:2379/v2/keys/coreos.com/network/config -XPUT -d value='{"Network": "172.31.255.0/24", "SubnetLen": 27, "Backend": {"Type": "vxlan"}}'
### Write flannel etcd file
cat >> /etc/sysconfig/flanneld << EOF
FLANNEL_ETCD="http://kube0.ha:2379"
FLANNEL_ETCD_KEY="/coreos.com/network"
FLANNEL_OPTIONS="--iface=eth1"
EOF
}
echo "now setting up flannel. Assuming etcd is online!"
setup_flannel
sudo service flanneld restart
sudo ip link delete docker0
sudo service docker restart
### This should restart etcd and all the others
### The pods will now have a default ip for docker containers which
### runs inside of the kube network.
sudo systemctl restart kubelet

View File

@ -0,0 +1,181 @@
#!/bin/bash
# Copyright 2015 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -o errexit
set -o nounset
set -o pipefail
set -x
# Grep the whole IP because otherwise sometimes vagrant attaches extra mystery dynamic IPs to eth1.
IP=`ip -o addr | grep '192.168.4' | cut -d' ' -f 7 | cut -d'/' -f 1`
echo "Using IP $IP for this machine."
function initialize {
systemctl disable iptables-services firewalld
echo "disabling selinux"
(setenforce 0 || echo "selinux might already be disabled...")
yum install -y docker
### Important : The kube version MUST match the containers in the manifests.
### Otherwise lots of API errors.
yum install -y http://cbs.centos.org/kojifiles/packages/kubernetes/0.17.1/3.el7/x86_64/kubernetes-node-0.17.1-3.el7.x86_64.rpm
mkdir -p -m 777 /etc/kubernetes/manifests
### just to make it easy to hack around as non root user
groupadd docker
gpasswd -a vagrant docker
systemctl restart docker
}
function start_kubelet {
systemctl enable docker
### We need a custom unit file with the --config/ option
cp /vagrant/etc_kubernetes_kubelet /etc/kubernetes/kubelet
systemctl enable kubelet
### Not sure why, but this restart is required?
sleep 2
systemctl restart kubelet
}
### Not the best idea if using flannel. Because of the circular dependency.
function write_etcd_manifest {
### I know this looks fancy, but
### Basically, this is just setting up ETCD config file w/ IP Addresses
cat /vagrant/etcd.manifest | \
sed "s/NODE_NAME/`hostname`/g" | \
sed "s/NODE_IP/$IP/g" > /etc/kubernetes/manifests/etcd.manifest
}
### Test of ETCD Members.
function test_etcd {
echo "----------- DEBUG ------------ KUBELET LOGS -----------------"
( journalctl -u kubelet | grep -A 20 -B 20 Fail || echo "no failure in logs")
echo "----------- END DEBUG OF KUBELET ----------------------------"
( curl http://kube0.ha:2379 > /tmp/curl_output || echo "failed etcd!!!" )
if [ -s /tmp/curl_output ]; then
echo "etcd success"
else
echo "etcd failure. exit!"
exit 100
fi
}
function k8petstore {
### run K8petstore . Should work perfectly IFF flannel and so on is setup properly.
wget https://raw.githubusercontent.com/GoogleCloudPlatform/kubernetes/release-0.17/examples/k8petstore/k8petstore.sh
chmod 777 k8petstore.sh
./k8petstore.sh
}
function write_api_server_config {
touch /var/log/kube-apiserver.log
mkdir -p -m 777 /srv/kubernetes/
### We will move files back and forth between the /srv/kube.. directory.
### That is how we modulate leader. Each node will continously either
### ensure that the manifests are in this dir, or else, are in the kubelet manifest dir.
cp /vagrant/kube-scheduler.manifest /vagrant/kube-controller-manager.manifest /srv/kubernetes
### All nodes will run an API Server. This is because API Server is stateless, so its not a problem
### To serve it up everywhere.
cp /vagrant/kube-apiserver.manifest /etc/kubernetes/manifests/
}
function write_podmaster_config {
touch /var/log/kube-scheduler.log
touch /var/log/kube-controller-manager.log
### These DO NOT go in manifest. Instead, we mount them here.
### We let podmaster swap these in and out of the manifests directory
### based on its own internal HA logic.
cp /vagrant/kube-controller-manager.manifest /srv/kubernetes/
cp /vagrant/kube-scheduler.manifest /srv/kubernetes/
#### Finally, the podmaster is the mechanism for election
cp /vagrant/podmaster.json /etc/kubernetes/manifests/
}
function poll {
### wait 10 minutes for kube-apiserver to come online
for i in `seq 1 600`
do
sleep 2
echo $i
### Just testing that the front end comes up. Not sure how to test total entries etc... (yet)
( curl "localhost:8080" > result || echo "failed on attempt $i, retrying again.. api not up yet. " )
( cat result || echo "no result" )
if ( cat result | grep -q api ) ; then
break
else
echo "continue.."
fi
done
if [ $i == 600 ]; then
exit 2
fi
}
function install_components {
### etcd node - this node only runs etcd in a kubelet, no flannel.
### we dont want circular dependency of docker -> flannel -> etcd -> docker
if [ "`hostname`" == "kube0.ha" ]; then
write_etcd_manifest
start_kubelet
### precaution to make sure etcd is writable, flush iptables.
iptables -F
### minions: these will each run their own api server.
else
### Make sure etcd running, flannel needs it.
test_etcd
start_kubelet
### Flannel setup...
### This will restart the kubelet and docker and so on...
/vagrant/provision-flannel.sh
echo "Now pulling down flannel nodes. "
curl -L http://kube0.ha:2379/v2/keys/coreos.com/network/subnets | python -mjson.tool
echo " Inspect the above lines carefully ^."
### All nodes run api server
write_api_server_config
### controller-manager will turn on and off
### and same for kube-scheduler
write_podmaster_config
# finally, for us to creaet public ips for k8petstore etc, we need the proxy running.
service kube-proxy start
service kube-proxy status
fi
}
initialize
install_components
iptables -F
if [ "`hostname`" == "kube2.ha" ]; then
poll
k8petstore
fi
echo "ALL DONE!"