From 88e6cac54478b1a9cc49c0b48c8b1803189e6c3a Mon Sep 17 00:00:00 2001 From: Brad Erickson Date: Sun, 8 Nov 2015 23:08:58 -0800 Subject: [PATCH] Minion->Name rename: cluster/vagrant, docs and Vagrantfile --- Vagrantfile | 32 +++++++------- cluster/options.md | 2 +- cluster/vagrant/config-default.sh | 8 ++-- cluster/vagrant/pod-ip-test.sh | 42 +++++++++---------- cluster/vagrant/provision-master.sh | 10 ++--- ...rk-minion.sh => provision-network-node.sh} | 6 +-- ...{provision-minion.sh => provision-node.sh} | 12 +++--- cluster/vagrant/util.sh | 34 +++++++-------- docs/devel/developer-guides/vagrant.md | 42 +++++++++---------- docs/getting-started-guides/vagrant.md | 10 ++--- 10 files changed, 99 insertions(+), 99 deletions(-) rename cluster/vagrant/{provision-network-minion.sh => provision-network-node.sh} (90%) rename cluster/vagrant/{provision-minion.sh => provision-node.sh} (88%) diff --git a/Vagrantfile b/Vagrantfile index 55612548fb4..3b3599e7662 100644 --- a/Vagrantfile +++ b/Vagrantfile @@ -17,13 +17,13 @@ Calling 'vagrant up' directly is not supported. Instead, please run the followi END end -# The number of minions to provision -$num_minion = (ENV['NUM_NODES'] || 1).to_i +# The number of nodes to provision +$num_node = (ENV['NUM_NODES'] || 1).to_i # ip configuration $master_ip = ENV['MASTER_IP'] -$minion_ip_base = ENV['NODE_IP_BASE'] || "" -$minion_ips = $num_minion.times.collect { |n| $minion_ip_base + "#{n+3}" } +$node_ip_base = ENV['NODE_IP_BASE'] || "" +$node_ips = $num_node.times.collect { |n| $node_ip_base + "#{n+3}" } # Determine the OS platform to use $kube_os = ENV['KUBERNETES_OS'] || "fedora" @@ -105,7 +105,7 @@ end # When doing Salt provisioning, we copy approximately 200MB of content in /tmp before anything else happens. # This causes problems if anything else was in /tmp or the other directories that are bound to tmpfs device (i.e /run, etc.) $vm_master_mem = (ENV['KUBERNETES_MASTER_MEMORY'] || ENV['KUBERNETES_MEMORY'] || 1280).to_i -$vm_minion_mem = (ENV['KUBERNETES_NODE_MEMORY'] || ENV['KUBERNETES_MEMORY'] || 1024).to_i +$vm_node_mem = (ENV['KUBERNETES_NODE_MEMORY'] || ENV['KUBERNETES_MEMORY'] || 1024).to_i Vagrant.configure(VAGRANTFILE_API_VERSION) do |config| def setvmboxandurl(config, provider) @@ -221,21 +221,21 @@ Vagrant.configure(VAGRANTFILE_API_VERSION) do |config| c.vm.network "private_network", ip: "#{$master_ip}" end - # Kubernetes minion - $num_minion.times do |n| - minion_vm_name = "minion-#{n+1}" - minion_prefix = ENV['INSTANCE_PREFIX'] || 'kubernetes' # must mirror default in cluster/vagrant/config-default.sh - minion_hostname = "#{minion_prefix}-#{minion_vm_name}" + # Kubernetes node + $num_node.times do |n| + node_vm_name = "node-#{n+1}" + node_prefix = ENV['INSTANCE_PREFIX'] || 'kubernetes' # must mirror default in cluster/vagrant/config-default.sh + node_hostname = "#{node_prefix}-#{node_vm_name}" - config.vm.define minion_vm_name do |minion| - customize_vm minion, $vm_minion_mem + config.vm.define node_vm_name do |node| + customize_vm node, $vm_node_mem - minion_ip = $minion_ips[n] + node_ip = $node_ips[n] if ENV['KUBE_TEMP'] then - script = "#{ENV['KUBE_TEMP']}/minion-start-#{n}.sh" - minion.vm.provision "shell", run: "always", path: script + script = "#{ENV['KUBE_TEMP']}/node-start-#{n}.sh" + node.vm.provision "shell", run: "always", path: script end - minion.vm.network "private_network", ip: "#{minion_ip}" + node.vm.network "private_network", ip: "#{node_ip}" end end end diff --git a/cluster/options.md b/cluster/options.md index 3b2fa150fbb..f48d0ebc264 100644 --- a/cluster/options.md +++ b/cluster/options.md @@ -10,6 +10,6 @@ This is a work-in-progress; not all options are documented yet! **NUM_NODES** -The number of minion instances to create. Most providers default this to 4. +The number of node instances to create. Most providers default this to 4. [![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/cluster/options.md?pixel)]() diff --git a/cluster/vagrant/config-default.sh b/cluster/vagrant/config-default.sh index d7acde8f18f..fd420c368b4 100755 --- a/cluster/vagrant/config-default.sh +++ b/cluster/vagrant/config-default.sh @@ -16,7 +16,7 @@ ## Contains configuration values for interacting with the Vagrant cluster -# Number of minions in the cluster +# Number of nodes in the cluster NUM_NODES=${NUM_NODES-"1"} export NUM_NODES @@ -30,7 +30,7 @@ export MASTER_NAME="${INSTANCE_PREFIX}-master" # Should the master serve as a node REGISTER_MASTER_KUBELET=${REGISTER_MASTER:-false} -# Map out the IPs, names and container subnets of each minion +# Map out the IPs, names and container subnets of each node export NODE_IP_BASE=${NODE_IP_BASE-"10.245.1."} NODE_CONTAINER_SUBNET_BASE="10.246" MASTER_CONTAINER_NETMASK="255.255.255.0" @@ -39,11 +39,11 @@ MASTER_CONTAINER_SUBNET="${NODE_CONTAINER_SUBNET_BASE}.0.1/24" CONTAINER_SUBNET="${NODE_CONTAINER_SUBNET_BASE}.0.0/16" for ((i=0; i < NUM_NODES; i++)) do NODE_IPS[$i]="${NODE_IP_BASE}$((i+3))" - NODE_NAMES[$i]="${INSTANCE_PREFIX}-minion-$((i+1))" + NODE_NAMES[$i]="${INSTANCE_PREFIX}-node-$((i+1))" NODE_CONTAINER_SUBNETS[$i]="${NODE_CONTAINER_SUBNET_BASE}.$((i+1)).1/24" NODE_CONTAINER_ADDRS[$i]="${NODE_CONTAINER_SUBNET_BASE}.$((i+1)).1" NODE_CONTAINER_NETMASKS[$i]="255.255.255.0" - VAGRANT_NODE_NAMES[$i]="minion-$((i+1))" + VAGRANT_NODE_NAMES[$i]="node-$((i+1))" done SERVICE_CLUSTER_IP_RANGE=10.247.0.0/16 # formerly PORTAL_NET diff --git a/cluster/vagrant/pod-ip-test.sh b/cluster/vagrant/pod-ip-test.sh index f397ab8c438..a22f9c34164 100755 --- a/cluster/vagrant/pod-ip-test.sh +++ b/cluster/vagrant/pod-ip-test.sh @@ -58,46 +58,46 @@ cd "${KUBE_ROOT}" echo All verbose output will be redirected to $logfile, use --logfile option to change. -printf "Start the cluster with 2 minions .. " +printf "Start the cluster with 2 nodes .. " export NUM_NODES=2 export KUBERNETES_PROVIDER=vagrant (cluster/kube-up.sh >>"$logfile" 2>&1) || true echoOK $? -printf "Check if minion-1 can reach kubernetes master .. " -vagrant ssh minion-1 -- ping -c 10 kubernetes-master >>"$logfile" 2>&1 +printf "Check if node-1 can reach kubernetes master .. " +vagrant ssh node-1 -- ping -c 10 kubernetes-master >>"$logfile" 2>&1 echoOK $? -printf "Check if minion-2 can reach kubernetes master .. " -vagrant ssh minion-2 -- ping -c 10 kubernetes-master >>"$logfile" 2>&1 +printf "Check if node-2 can reach kubernetes master .. " +vagrant ssh node-2 -- ping -c 10 kubernetes-master >>"$logfile" 2>&1 echoOK $? -printf "Pull an image that runs a web server on minion-1 .. " -vagrant ssh minion-1 -- 'sudo docker pull kubernetes/serve_hostname' >>"$logfile" 2>&1 +printf "Pull an image that runs a web server on node-1 .. " +vagrant ssh node-1 -- 'sudo docker pull kubernetes/serve_hostname' >>"$logfile" 2>&1 echoOK $? -printf "Pull an image that runs a web server on minion-2 .. " -vagrant ssh minion-2 -- 'sudo docker pull kubernetes/serve_hostname' >>"$logfile" 2>&1 +printf "Pull an image that runs a web server on node-2 .. " +vagrant ssh node-2 -- 'sudo docker pull kubernetes/serve_hostname' >>"$logfile" 2>&1 echoOK $? -printf "Run the server on minion-1 .. " -vagrant ssh minion-1 -- sudo docker run -d kubernetes/serve_hostname >>"$logfile" 2>&1 +printf "Run the server on node-1 .. " +vagrant ssh node-1 -- sudo docker run -d kubernetes/serve_hostname >>"$logfile" 2>&1 echoOK $? -printf "Run the server on minion-2 .. " -vagrant ssh minion-2 -- sudo docker run -d kubernetes/serve_hostname >>"$logfile" 2>&1 +printf "Run the server on node-2 .. " +vagrant ssh node-2 -- sudo docker run -d kubernetes/serve_hostname >>"$logfile" 2>&1 echoOK $? -printf "Run ping from minion-1 to docker bridges and to the containers on both minions .. " -vagrant ssh minion-1 -- 'ping -c 20 10.246.0.1 && ping -c 20 10.246.1.1 && ping -c 20 10.246.0.2 && ping -c 20 10.246.1.2' >>"$logfile" 2>&1 +printf "Run ping from node-1 to docker bridges and to the containers on both nodes .. " +vagrant ssh node-1 -- 'ping -c 20 10.246.0.1 && ping -c 20 10.246.1.1 && ping -c 20 10.246.0.2 && ping -c 20 10.246.1.2' >>"$logfile" 2>&1 echoOK $? -printf "Same pinch from minion-2 .. " -vagrant ssh minion-2 -- 'ping -c 20 10.246.0.1 && ping -c 20 10.246.1.1 && ping -c 20 10.246.0.2 && ping -c 20 10.246.1.2' >>"$logfile" 2>&1 +printf "Same pinch from node-2 .. " +vagrant ssh node-2 -- 'ping -c 20 10.246.0.1 && ping -c 20 10.246.1.1 && ping -c 20 10.246.0.2 && ping -c 20 10.246.1.2' >>"$logfile" 2>&1 echoOK $? -printf "tcp check, curl to both the running webservers from minion-1 .. " -vagrant ssh minion-1 -- 'curl -sS 10.246.0.2:9376 && curl -sS 10.246.1.2:9376' >>"$logfile" 2>&1 +printf "tcp check, curl to both the running webservers from node-1 .. " +vagrant ssh node-1 -- 'curl -sS 10.246.0.2:9376 && curl -sS 10.246.1.2:9376' >>"$logfile" 2>&1 echoOK $? -printf "tcp check, curl to both the running webservers from minion-2 .. " -vagrant ssh minion-2 -- 'curl -sS 10.246.0.2:9376 && curl -sS 10.246.1.2:9376' >>"$logfile" 2>&1 +printf "tcp check, curl to both the running webservers from node-2 .. " +vagrant ssh node-2 -- 'curl -sS 10.246.0.2:9376 && curl -sS 10.246.1.2:9376' >>"$logfile" 2>&1 echoOK $? printf "All good, destroy the cluster .. " diff --git a/cluster/vagrant/provision-master.sh b/cluster/vagrant/provision-master.sh index 366d7731802..586e2ad8921 100755 --- a/cluster/vagrant/provision-master.sh +++ b/cluster/vagrant/provision-master.sh @@ -48,13 +48,13 @@ function release_not_found() { exit 1 } -# Setup hosts file to support ping by hostname to each minion in the cluster from apiserver +# Setup hosts file to support ping by hostname to each node in the cluster from apiserver for (( i=0; i<${#NODE_NAMES[@]}; i++)); do - minion=${NODE_NAMES[$i]} + node=${NODE_NAMES[$i]} ip=${NODE_IPS[$i]} - if [ ! "$(cat /etc/hosts | grep $minion)" ]; then - echo "Adding $minion to hosts file" - echo "$ip $minion" >> /etc/hosts + if [ ! "$(cat /etc/hosts | grep $node)" ]; then + echo "Adding $node to hosts file" + echo "$ip $node" >> /etc/hosts fi done echo "127.0.0.1 localhost" >> /etc/hosts # enables cmds like 'kubectl get pods' on master. diff --git a/cluster/vagrant/provision-network-minion.sh b/cluster/vagrant/provision-network-node.sh similarity index 90% rename from cluster/vagrant/provision-network-minion.sh rename to cluster/vagrant/provision-network-node.sh index 9c573c87bbf..fba23ed19cb 100644 --- a/cluster/vagrant/provision-network-minion.sh +++ b/cluster/vagrant/provision-network-node.sh @@ -14,10 +14,10 @@ # See the License for the specific language governing permissions and # limitations under the License. -# provision-network-minion configures flannel on the minion -function provision-network-minion { +# provision-network-node configures flannel on the node +function provision-network-node { - echo "Provisioning network on minion" + echo "Provisioning network on node" FLANNEL_ETCD_URL="http://${MASTER_IP}:4379" diff --git a/cluster/vagrant/provision-minion.sh b/cluster/vagrant/provision-node.sh similarity index 88% rename from cluster/vagrant/provision-minion.sh rename to cluster/vagrant/provision-node.sh index 916c1d31858..edc6452c337 100755 --- a/cluster/vagrant/provision-minion.sh +++ b/cluster/vagrant/provision-node.sh @@ -46,18 +46,18 @@ if [ ! "$(cat /etc/hosts | grep $MASTER_NAME)" ]; then fi echo "$NODE_IP $NODE_NAME" >> /etc/hosts -# Setup hosts file to support ping by hostname to each minion in the cluster +# Setup hosts file to support ping by hostname to each node in the cluster for (( i=0; i<${#NODE_NAMES[@]}; i++)); do - minion=${NODE_NAMES[$i]} + node=${NODE_NAMES[$i]} ip=${NODE_IPS[$i]} - if [ ! "$(cat /etc/hosts | grep $minion)" ]; then - echo "Adding $minion to hosts file" - echo "$ip $minion" >> /etc/hosts + if [ ! "$(cat /etc/hosts | grep $node)" ]; then + echo "Adding $node to hosts file" + echo "$ip $node" >> /etc/hosts fi done # Configure network -provision-network-minion +provision-network-node write-salt-config kubernetes-pool diff --git a/cluster/vagrant/util.sh b/cluster/vagrant/util.sh index 7aeeefc9106..875fa2a2cd7 100755 --- a/cluster/vagrant/util.sh +++ b/cluster/vagrant/util.sh @@ -25,7 +25,7 @@ function detect-master () { echo "KUBE_MASTER_IP: ${KUBE_MASTER_IP}" 1>&2 } -# Get minion IP addresses and store in KUBE_NODE_IP_ADDRESSES[] +# Get node IP addresses and store in KUBE_NODE_IP_ADDRESSES[] function detect-nodes { echo "Nodes already detected" 1>&2 KUBE_NODE_IP_ADDRESSES=("${NODE_IPS[@]}") @@ -114,7 +114,7 @@ function ensure-temp-dir { fi } -# Create a set of provision scripts for the master and each of the minions +# Create a set of provision scripts for the master and each of the nodes function create-provision-scripts { ensure-temp-dir @@ -139,9 +139,9 @@ function create-provision-scripts { echo "CONTAINER_ADDR='${NODE_CONTAINER_ADDRS[$i]}'" echo "CONTAINER_NETMASK='${NODE_CONTAINER_NETMASKS[$i]}'" awk '!/^#/' "${KUBE_ROOT}/cluster/vagrant/provision-utils.sh" - awk '!/^#/' "${KUBE_ROOT}/cluster/vagrant/provision-network-minion.sh" - awk '!/^#/' "${KUBE_ROOT}/cluster/vagrant/provision-minion.sh" - ) > "${KUBE_TEMP}/minion-start-${i}.sh" + awk '!/^#/' "${KUBE_ROOT}/cluster/vagrant/provision-network-node.sh" + awk '!/^#/' "${KUBE_ROOT}/cluster/vagrant/provision-node.sh" + ) > "${KUBE_TEMP}/node-start-${i}.sh" done } @@ -211,7 +211,7 @@ function verify-cluster { done done - # verify each minion has all required daemons + # verify each node has all required daemons local i for (( i=0; i<${#NODE_NAMES[@]}; i++)); do echo "Validating ${VAGRANT_NODE_NAMES[$i]}" @@ -231,12 +231,12 @@ function verify-cluster { done echo - echo "Waiting for each minion to be registered with cloud provider" + echo "Waiting for each node to be registered with cloud provider" for (( i=0; i<${#NODE_NAMES[@]}; i++)); do local validated="0" until [[ "$validated" == "1" ]]; do - local minions=$("${KUBE_ROOT}/cluster/kubectl.sh" get nodes -o name --api-version=v1) - validated=$(echo $minions | grep -c "${NODE_NAMES[i]}") || { + local nodes=$("${KUBE_ROOT}/cluster/kubectl.sh" get nodes -o name --api-version=v1) + validated=$(echo $nodes | grep -c "${NODE_NAMES[i]}") || { printf "." sleep 2 validated="0" @@ -324,34 +324,34 @@ function test-teardown { kube-down } -# Find the minion name based on the IP address +# Find the node name based on the IP address function find-vagrant-name-by-ip { local ip="$1" local ip_pattern="${NODE_IP_BASE}(.*)" - # This is subtle. We map 10.245.2.2 -> minion-1. We do this by matching a + # This is subtle. We map 10.245.2.2 -> node-1. We do this by matching a # regexp and using the capture to construct the name. [[ $ip =~ $ip_pattern ]] || { return 1 } - echo "minion-$((${BASH_REMATCH[1]} - 1))" + echo "node-$((${BASH_REMATCH[1]} - 1))" } -# Find the vagrant machine name based on the host name of the minion -function find-vagrant-name-by-minion-name { +# Find the vagrant machine name based on the host name of the node +function find-vagrant-name-by-node-name { local ip="$1" if [[ "$ip" == "${INSTANCE_PREFIX}-master" ]]; then echo "master" return $? fi - local ip_pattern="${INSTANCE_PREFIX}-minion-(.*)" + local ip_pattern="${INSTANCE_PREFIX}-node-(.*)" [[ $ip =~ $ip_pattern ]] || { return 1 } - echo "minion-${BASH_REMATCH[1]}" + echo "node-${BASH_REMATCH[1]}" } @@ -362,7 +362,7 @@ function ssh-to-node { local machine machine=$(find-vagrant-name-by-ip $node) || true - [[ -n ${machine-} ]] || machine=$(find-vagrant-name-by-minion-name $node) || true + [[ -n ${machine-} ]] || machine=$(find-vagrant-name-by-node-name $node) || true [[ -n ${machine-} ]] || { echo "Cannot find machine to ssh to: $1" return 1 diff --git a/docs/devel/developer-guides/vagrant.md b/docs/devel/developer-guides/vagrant.md index 2d628abb736..74e29e3a8be 100644 --- a/docs/devel/developer-guides/vagrant.md +++ b/docs/devel/developer-guides/vagrant.md @@ -47,7 +47,7 @@ Running kubernetes with Vagrant (and VirtualBox) is an easy way to run/test/deve ### Setup -By default, the Vagrant setup will create a single master VM (called kubernetes-master) and one node (called kubernetes-minion-1). Each VM will take 1 GB, so make sure you have at least 2GB to 4GB of free memory (plus appropriate free disk space). To start your local cluster, open a shell and run: +By default, the Vagrant setup will create a single master VM (called kubernetes-master) and one node (called kubernetes-node-1). Each VM will take 1 GB, so make sure you have at least 2GB to 4GB of free memory (plus appropriate free disk space). To start your local cluster, open a shell and run: ```sh cd kubernetes @@ -74,14 +74,14 @@ To access the master or any node: ```sh vagrant ssh master -vagrant ssh minion-1 +vagrant ssh node-1 ``` If you are running more than one nodes, you can access the others by: ```sh -vagrant ssh minion-2 -vagrant ssh minion-3 +vagrant ssh node-2 +vagrant ssh node-3 ``` To view the service status and/or logs on the kubernetes-master: @@ -101,11 +101,11 @@ $ vagrant ssh master To view the services on any of the nodes: ```console -$ vagrant ssh minion-1 -[vagrant@kubernetes-minion-1] $ sudo systemctl status docker -[vagrant@kubernetes-minion-1] $ sudo journalctl -r -u docker -[vagrant@kubernetes-minion-1] $ sudo systemctl status kubelet -[vagrant@kubernetes-minion-1] $ sudo journalctl -r -u kubelet +$ vagrant ssh node-1 +[vagrant@kubernetes-node-1] $ sudo systemctl status docker +[vagrant@kubernetes-node-1] $ sudo journalctl -r -u docker +[vagrant@kubernetes-node-1] $ sudo systemctl status kubelet +[vagrant@kubernetes-node-1] $ sudo journalctl -r -u kubelet ``` ### Interacting with your Kubernetes cluster with Vagrant. @@ -139,9 +139,9 @@ You may need to build the binaries first, you can do this with `make` $ ./cluster/kubectl.sh get nodes NAME LABELS STATUS -kubernetes-minion-0whl kubernetes.io/hostname=kubernetes-minion-0whl Ready -kubernetes-minion-4jdf kubernetes.io/hostname=kubernetes-minion-4jdf Ready -kubernetes-minion-epbe kubernetes.io/hostname=kubernetes-minion-epbe Ready +kubernetes-node-0whl kubernetes.io/hostname=kubernetes-node-0whl Ready +kubernetes-node-4jdf kubernetes.io/hostname=kubernetes-node-4jdf Ready +kubernetes-node-epbe kubernetes.io/hostname=kubernetes-node-epbe Ready ``` ### Interacting with your Kubernetes cluster with the `kube-*` scripts. @@ -206,9 +206,9 @@ Your cluster is running, you can list the nodes in your cluster: $ ./cluster/kubectl.sh get nodes NAME LABELS STATUS -kubernetes-minion-0whl kubernetes.io/hostname=kubernetes-minion-0whl Ready -kubernetes-minion-4jdf kubernetes.io/hostname=kubernetes-minion-4jdf Ready -kubernetes-minion-epbe kubernetes.io/hostname=kubernetes-minion-epbe Ready +kubernetes-node-0whl kubernetes.io/hostname=kubernetes-node-0whl Ready +kubernetes-node-4jdf kubernetes.io/hostname=kubernetes-node-4jdf Ready +kubernetes-node-epbe kubernetes.io/hostname=kubernetes-node-epbe Ready ``` Now start running some containers! @@ -245,11 +245,11 @@ my-nginx-kqdjk 1/1 Waiting 0 33s my-nginx-nyj3x 1/1 Waiting 0 33s ``` -You need to wait for the provisioning to complete, you can monitor the minions by doing: +You need to wait for the provisioning to complete, you can monitor the nodes by doing: ```console -$ sudo salt '*minion-1' cmd.run 'docker images' -kubernetes-minion-1: +$ sudo salt '*node-1' cmd.run 'docker images' +kubernetes-node-1: REPOSITORY TAG IMAGE ID CREATED VIRTUAL SIZE 96864a7d2df3 26 hours ago 204.4 MB kubernetes/pause latest 6c4579af347b 8 weeks ago 239.8 kB @@ -258,8 +258,8 @@ kubernetes-minion-1: Once the docker image for nginx has been downloaded, the container will start and you can list it: ```console -$ sudo salt '*minion-1' cmd.run 'docker ps' -kubernetes-minion-1: +$ sudo salt '*node-1' cmd.run 'docker ps' +kubernetes-node-1: CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES dbe79bf6e25b nginx:latest "nginx" 21 seconds ago Up 19 seconds k8s--mynginx.8c5b8a3a--7813c8bd_-_3ffe_-_11e4_-_9036_-_0800279696e1.etcd--7813c8bd_-_3ffe_-_11e4_-_9036_-_0800279696e1--fcfa837f fa0e29c94501 kubernetes/pause:latest "/pause" 8 minutes ago Up 8 minutes 0.0.0.0:8080->80/tcp k8s--net.a90e7ce4--7813c8bd_-_3ffe_-_11e4_-_9036_-_0800279696e1.etcd--7813c8bd_-_3ffe_-_11e4_-_9036_-_0800279696e1--baf5b21b @@ -346,7 +346,7 @@ It's very likely you see a build error due to an error in your source files! #### I have brought Vagrant up but the nodes won't validate! -Are you sure you built a release first? Did you install `net-tools`? For more clues, login to one of the nodes (`vagrant ssh minion-1`) and inspect the salt minion log (`sudo cat /var/log/salt/minion`). +Are you sure you built a release first? Did you install `net-tools`? For more clues, login to one of the nodes (`vagrant ssh node-1`) and inspect the salt minion log (`sudo cat /var/log/salt/minion`). #### I want to change the number of nodes! diff --git a/docs/getting-started-guides/vagrant.md b/docs/getting-started-guides/vagrant.md index 055070dd1d1..250b23a1c0e 100644 --- a/docs/getting-started-guides/vagrant.md +++ b/docs/getting-started-guides/vagrant.md @@ -322,9 +322,9 @@ export KUBERNETES_PROVIDER=vagrant During provision of the cluster, you may see the following message: ```sh -Validating minion-1 +Validating node-1 ............. -Waiting for each minion to be registered with cloud provider +Waiting for each node to be registered with cloud provider error: couldn't read version from server: Get https://10.245.1.2/api: dial tcp 10.245.1.2:443: i/o timeout ``` @@ -334,9 +334,9 @@ To debug, first verify that the master is binding to the proper IP address: ``` $ vagrant ssh master -$ ifconfig | grep eth1 -C 2 -eth1: flags=4163 mtu 1500 inet 10.245.1.2 netmask - 255.255.255.0 broadcast 10.245.1.255 +$ ifconfig | grep eth1 -C 2 +eth1: flags=4163 mtu 1500 inet 10.245.1.2 netmask + 255.255.255.0 broadcast 10.245.1.255 ``` Then verify that your host machine has a network connection to a bridge that can serve that address: