diff --git a/Vagrantfile b/Vagrantfile index 3cb4badfbfc..5be613efe45 100644 --- a/Vagrantfile +++ b/Vagrantfile @@ -28,12 +28,13 @@ $kube_box = { } # This stuff is cargo-culted from http://www.stefanwrobel.com/how-to-make-vagrant-performance-not-suck -# Give access to all cpu cores on the host +# Give access to half of all cpu cores on the host. We divide by 2 as we assume +# that users are running with hyperthreads. host = RbConfig::CONFIG['host_os'] if host =~ /darwin/ - $vm_cpus = `sysctl -n hw.ncpu`.to_i + $vm_cpus = (`sysctl -n hw.ncpu`.to_i/2.0).ceil elsif host =~ /linux/ - $vm_cpus = `nproc`.to_i + $vm_cpus = (`nproc`.to_i/2.0).ceil else # sorry Windows folks, I can't help you $vm_cpus = 2 end @@ -41,7 +42,6 @@ end # Give VM 512MB of RAM $vm_mem = 512 - Vagrant.configure(VAGRANTFILE_API_VERSION) do |config| def customize_vm(config) config.vm.box = $kube_box[$kube_os]["name"] diff --git a/cluster/kubecfg.sh b/cluster/kubecfg.sh index 228d71a494e..72cc2ce75f5 100755 --- a/cluster/kubecfg.sh +++ b/cluster/kubecfg.sh @@ -83,16 +83,9 @@ fi # When we are using vagrant it has hard coded auth. We repeat that here so that # we don't clobber auth that might be used for a publicly facing cluster. -if [ "$KUBERNETES_PROVIDER" == "vagrant" ]; then - cat >~/.kubernetes_vagrant_auth <~/.kubernetes_vagrant_auth </srv/salt-overlay/pillar/cluster-params.sls portal_net: $PORTAL_NET + cert_ip: $MASTER_IP EOF # Configure the salt-master diff --git a/cluster/vagrant/util.sh b/cluster/vagrant/util.sh index a38e40b9368..f7285e11295 100644 --- a/cluster/vagrant/util.sh +++ b/cluster/vagrant/util.sh @@ -43,6 +43,29 @@ function kube-up { get-password vagrant up + local kube_cert=".kubecfg.vagrant.crt" + local kube_key=".kubecfg.vagrant.key" + local ca_cert=".kubernetes.vagrant.ca.crt" + + (umask 077 + vagrant ssh master -- sudo cat /srv/kubernetes/kubecfg.crt >"${HOME}/${kube_cert}" 2>/dev/null + vagrant ssh master -- sudo cat /srv/kubernetes/kubecfg.key >"${HOME}/${kube_key}" 2>/dev/null + vagrant ssh master -- sudo cat /srv/kubernetes/ca.crt >"${HOME}/${ca_cert}" 2>/dev/null + + cat << EOF > ~/.kubernetes_vagrant_auth +{ + "User": "$KUBE_USER", + "Password": "$KUBE_PASSWORD", + "CAFile": "$HOME/$ca_cert", + "CertFile": "$HOME/$kube_cert", + "KeyFile": "$HOME/$kube_key" +} +EOF + + chmod 0600 ~/.kubernetes_auth "${HOME}/${kube_cert}" \ + "${HOME}/${kube_key}" "${HOME}/${ca_cert}" + ) + echo "Each machine instance has been created." echo " Now waiting for the Salt provisioning process to complete on each machine." echo " This can take some time based on your network, disk, and cpu speed." @@ -108,7 +131,7 @@ function kube-up { echo echo " https://${KUBE_MASTER_IP}" echo - echo "The user name and password to use is located in ~/.kubernetes_auth." + echo "The user name and password to use is located in ~/.kubernetes_vagrant_auth." echo } diff --git a/docs/salt.md b/docs/salt.md index 0e23f6de924..ceb796b3315 100644 --- a/docs/salt.md +++ b/docs/salt.md @@ -52,7 +52,7 @@ The following enumerates the set of defined key/value pairs that are supported t Key | Value ------------- | ------------- `cbr-cidr` | (Optional) The minion IP address range used for the docker container bridge. -`cloud` | (Optional) Which IaaS platform is used to host kubernetes, *gce*, *azure* +`cloud` | (Optional) Which IaaS platform is used to host kubernetes, *gce*, *azure*, *aws*, *vagrant* `cloud_provider` | (Optional) The cloud_provider used by apiserver: *gce*, *azure*, *vagrant* `etcd_servers` | (Optional) Comma-delimited list of IP addresses the apiserver and kubelet use to reach etcd. Uses the IP of the first machine in the kubernetes_master role. `hostnamef` | (Optional) The full host name of the machine, i.e. hostname -f diff --git a/hack/e2e-suite/goe2e.sh b/hack/e2e-suite/goe2e.sh index 4716b60e1ef..26082053ce0 100755 --- a/hack/e2e-suite/goe2e.sh +++ b/hack/e2e-suite/goe2e.sh @@ -63,5 +63,14 @@ locations=( ) e2e=$( (ls -t "${locations[@]}" 2>/dev/null || true) | head -1 ) +# When we are using vagrant it has hard coded auth. We repeat that here so that +# we don't clobber auth that might be used for a publicly facing cluster. +if [[ "$KUBERNETES_PROVIDER" == "vagrant" ]]; then + auth_config=( + "--auth_config=$HOME/.kubernetes_vagrant_auth" + ) +else + auth_config=() +fi -"${e2e}" -host="https://${KUBE_MASTER_IP-}" +"${e2e}" "${auth_config[@]:+${auth_config[@]}}" -host="https://${KUBE_MASTER_IP-}"