From 7a6743808a34505792be6f452aa9e1ec346c417b Mon Sep 17 00:00:00 2001 From: Joe Beda Date: Thu, 13 Nov 2014 17:09:59 -0800 Subject: [PATCH 1/2] Make Vagrant CPU usage be more sane on hyperthreaded systems. Fixes #2359 --- Vagrantfile | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/Vagrantfile b/Vagrantfile index 3cb4badfbfc..5be613efe45 100644 --- a/Vagrantfile +++ b/Vagrantfile @@ -28,12 +28,13 @@ $kube_box = { } # This stuff is cargo-culted from http://www.stefanwrobel.com/how-to-make-vagrant-performance-not-suck -# Give access to all cpu cores on the host +# Give access to half of all cpu cores on the host. We divide by 2 as we assume +# that users are running with hyperthreads. host = RbConfig::CONFIG['host_os'] if host =~ /darwin/ - $vm_cpus = `sysctl -n hw.ncpu`.to_i + $vm_cpus = (`sysctl -n hw.ncpu`.to_i/2.0).ceil elsif host =~ /linux/ - $vm_cpus = `nproc`.to_i + $vm_cpus = (`nproc`.to_i/2.0).ceil else # sorry Windows folks, I can't help you $vm_cpus = 2 end @@ -41,7 +42,6 @@ end # Give VM 512MB of RAM $vm_mem = 512 - Vagrant.configure(VAGRANTFILE_API_VERSION) do |config| def customize_vm(config) config.vm.box = $kube_box[$kube_os]["name"] From 5a0159ea00e082bc85bbec18d1ab7ae78d90fa4f Mon Sep 17 00:00:00 2001 From: Joe Beda Date: Thu, 13 Nov 2014 22:14:56 -0800 Subject: [PATCH 2/2] Get Vagrant to start using TLS client certs. Also fix up cert generation. It was failing during the first salt highstate when trying to chown the certs as the apiserver user didn't exist yet. Fix this by creating a 'kube-cert' group and chgrping the files to that. Then make the apiserver a member of that group. Fixes #2365 Fixes #2368 --- cluster/kubecfg.sh | 9 +------ cluster/kubectl.sh | 7 ------ cluster/saltbase/salt/apiserver/init.sls | 2 ++ cluster/saltbase/salt/generate-cert/init.sls | 6 ++++- .../salt/generate-cert/make-ca-cert.sh | 5 ++-- .../saltbase/salt/generate-cert/make-cert.sh | 5 ++-- cluster/saltbase/salt/nginx/init.sls | 18 +++++++------ cluster/vagrant/provision-master.sh | 2 ++ cluster/vagrant/util.sh | 25 ++++++++++++++++++- docs/salt.md | 2 +- hack/e2e-suite/goe2e.sh | 11 +++++++- 11 files changed, 61 insertions(+), 31 deletions(-) diff --git a/cluster/kubecfg.sh b/cluster/kubecfg.sh index 228d71a494e..72cc2ce75f5 100755 --- a/cluster/kubecfg.sh +++ b/cluster/kubecfg.sh @@ -83,16 +83,9 @@ fi # When we are using vagrant it has hard coded auth. We repeat that here so that # we don't clobber auth that might be used for a publicly facing cluster. -if [ "$KUBERNETES_PROVIDER" == "vagrant" ]; then - cat >~/.kubernetes_vagrant_auth <~/.kubernetes_vagrant_auth </srv/salt-overlay/pillar/cluster-params.sls portal_net: $PORTAL_NET + cert_ip: $MASTER_IP EOF # Configure the salt-master diff --git a/cluster/vagrant/util.sh b/cluster/vagrant/util.sh index a38e40b9368..f7285e11295 100644 --- a/cluster/vagrant/util.sh +++ b/cluster/vagrant/util.sh @@ -43,6 +43,29 @@ function kube-up { get-password vagrant up + local kube_cert=".kubecfg.vagrant.crt" + local kube_key=".kubecfg.vagrant.key" + local ca_cert=".kubernetes.vagrant.ca.crt" + + (umask 077 + vagrant ssh master -- sudo cat /srv/kubernetes/kubecfg.crt >"${HOME}/${kube_cert}" 2>/dev/null + vagrant ssh master -- sudo cat /srv/kubernetes/kubecfg.key >"${HOME}/${kube_key}" 2>/dev/null + vagrant ssh master -- sudo cat /srv/kubernetes/ca.crt >"${HOME}/${ca_cert}" 2>/dev/null + + cat << EOF > ~/.kubernetes_vagrant_auth +{ + "User": "$KUBE_USER", + "Password": "$KUBE_PASSWORD", + "CAFile": "$HOME/$ca_cert", + "CertFile": "$HOME/$kube_cert", + "KeyFile": "$HOME/$kube_key" +} +EOF + + chmod 0600 ~/.kubernetes_auth "${HOME}/${kube_cert}" \ + "${HOME}/${kube_key}" "${HOME}/${ca_cert}" + ) + echo "Each machine instance has been created." echo " Now waiting for the Salt provisioning process to complete on each machine." echo " This can take some time based on your network, disk, and cpu speed." @@ -108,7 +131,7 @@ function kube-up { echo echo " https://${KUBE_MASTER_IP}" echo - echo "The user name and password to use is located in ~/.kubernetes_auth." + echo "The user name and password to use is located in ~/.kubernetes_vagrant_auth." echo } diff --git a/docs/salt.md b/docs/salt.md index 0e23f6de924..ceb796b3315 100644 --- a/docs/salt.md +++ b/docs/salt.md @@ -52,7 +52,7 @@ The following enumerates the set of defined key/value pairs that are supported t Key | Value ------------- | ------------- `cbr-cidr` | (Optional) The minion IP address range used for the docker container bridge. -`cloud` | (Optional) Which IaaS platform is used to host kubernetes, *gce*, *azure* +`cloud` | (Optional) Which IaaS platform is used to host kubernetes, *gce*, *azure*, *aws*, *vagrant* `cloud_provider` | (Optional) The cloud_provider used by apiserver: *gce*, *azure*, *vagrant* `etcd_servers` | (Optional) Comma-delimited list of IP addresses the apiserver and kubelet use to reach etcd. Uses the IP of the first machine in the kubernetes_master role. `hostnamef` | (Optional) The full host name of the machine, i.e. hostname -f diff --git a/hack/e2e-suite/goe2e.sh b/hack/e2e-suite/goe2e.sh index 4716b60e1ef..26082053ce0 100755 --- a/hack/e2e-suite/goe2e.sh +++ b/hack/e2e-suite/goe2e.sh @@ -63,5 +63,14 @@ locations=( ) e2e=$( (ls -t "${locations[@]}" 2>/dev/null || true) | head -1 ) +# When we are using vagrant it has hard coded auth. We repeat that here so that +# we don't clobber auth that might be used for a publicly facing cluster. +if [[ "$KUBERNETES_PROVIDER" == "vagrant" ]]; then + auth_config=( + "--auth_config=$HOME/.kubernetes_vagrant_auth" + ) +else + auth_config=() +fi -"${e2e}" -host="https://${KUBE_MASTER_IP-}" +"${e2e}" "${auth_config[@]:+${auth_config[@]}}" -host="https://${KUBE_MASTER_IP-}"