mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-23 11:50:44 +00:00
apiserver listen on 0.0.0.0 in vagrant
This commit is contained in:
parent
e1998e5a07
commit
10be80295c
@ -9,7 +9,6 @@
|
||||
|
||||
{% if grains.etcd_servers is defined %}
|
||||
{% set etcd_servers = "-etcd_servers=http://" + grains.etcd_servers + ":4001" %}
|
||||
{% set address = "-address=" + grains.etcd_servers %}
|
||||
{% else %}
|
||||
{% set ips = salt['mine.get']('roles:kubernetes-master', 'network.ip_addrs', 'grain').values() %}
|
||||
{% set etcd_servers = "-etcd_servers=http://" + ips[0][0] + ":4001" %}
|
||||
|
@ -3,7 +3,4 @@
|
||||
{% set daemon_args = "" %}
|
||||
{% endif %}
|
||||
{% set master="-master=127.0.0.1:8080" %}
|
||||
{% if grains.master_ip is defined %}
|
||||
{% set master="-master=" + grains.master_ip + ":8080" %}
|
||||
{% endif %}
|
||||
DAEMON_ARGS="{{daemon_args}} {{master}}"
|
||||
|
@ -47,7 +47,7 @@ server {
|
||||
auth_basic_user_file /usr/share/nginx/htpasswd;
|
||||
|
||||
# Proxy settings
|
||||
proxy_pass http://localhost:8080/;
|
||||
proxy_pass http://127.0.0.1:8080/;
|
||||
proxy_connect_timeout 159s;
|
||||
proxy_send_timeout 600s;
|
||||
proxy_read_timeout 600s;
|
||||
|
@ -21,7 +21,7 @@ NUM_MINIONS=${KUBERNETES_NUM_MINIONS-"3"}
|
||||
|
||||
# IP LOCATIONS FOR INTERACTING WITH THE MASTER
|
||||
export KUBE_MASTER_IP="10.245.1.2"
|
||||
export KUBERNETES_MASTER="http://10.245.1.2:8080"
|
||||
export KUBERNETES_MASTER="https://10.245.1.2"
|
||||
|
||||
# IP LOCATIONS FOR INTERACTING WITH THE MINIONS
|
||||
MINION_IP_BASE="10.245.2."
|
||||
|
@ -28,6 +28,8 @@ MINION_IP_RANGES=($(eval echo "10.245.{2..${NUM_MINIONS}}.2/24"))
|
||||
MINION_SCOPES=""
|
||||
|
||||
# simplified setup for local vagrant 2 node cluster
|
||||
MASTER_HTPASSWD=passw0rd
|
||||
|
||||
MASTER_USER=vagrant
|
||||
MASTER_PASSWD=vagrant
|
||||
|
||||
# Location to hold temp files for provision process
|
||||
KUBE_TEMP=/var/kube-temp
|
||||
|
@ -18,12 +18,11 @@
|
||||
set -e
|
||||
source $(dirname $0)/provision-config.sh
|
||||
|
||||
# we will run provision to update code each time we test, so we do not want to do salt install each time
|
||||
if [ ! -f "/var/kube-vagrant-setup" ]; then
|
||||
mkdir -p /etc/salt/minion.d
|
||||
echo "master: $MASTER_NAME" > /etc/salt/minion.d/master.conf
|
||||
# Update salt configuration
|
||||
mkdir -p /etc/salt/minion.d
|
||||
echo "master: $MASTER_NAME" > /etc/salt/minion.d/master.conf
|
||||
|
||||
cat <<EOF >/etc/salt/minion.d/grains.conf
|
||||
cat <<EOF >/etc/salt/minion.d/grains.conf
|
||||
grains:
|
||||
master_ip: $MASTER_IP
|
||||
etcd_servers: $MASTER_IP
|
||||
@ -32,27 +31,38 @@ grains:
|
||||
- kubernetes-master
|
||||
EOF
|
||||
|
||||
# Configure the salt-master
|
||||
# Auto accept all keys from minions that try to join
|
||||
mkdir -p /etc/salt/master.d
|
||||
cat <<EOF >/etc/salt/master.d/auto-accept.conf
|
||||
# Configure the salt-master
|
||||
# Auto accept all keys from minions that try to join
|
||||
mkdir -p /etc/salt/master.d
|
||||
cat <<EOF >/etc/salt/master.d/auto-accept.conf
|
||||
open_mode: True
|
||||
auto_accept: True
|
||||
EOF
|
||||
|
||||
cat <<EOF >/etc/salt/master.d/reactor.conf
|
||||
cat <<EOF >/etc/salt/master.d/reactor.conf
|
||||
# React to new minions starting by running highstate on them.
|
||||
reactor:
|
||||
- 'salt/minion/*/start':
|
||||
- /srv/reactor/start.sls
|
||||
EOF
|
||||
|
||||
cat <<EOF >/etc/salt/master.d/salt-output.conf
|
||||
cat <<EOF >/etc/salt/master.d/salt-output.conf
|
||||
# Minimize the amount of output to terminal
|
||||
state_verbose: False
|
||||
state_output: mixed
|
||||
EOF
|
||||
|
||||
# Configure nginx authorization
|
||||
mkdir -p $KUBE_TEMP
|
||||
mkdir -p /srv/salt/nginx
|
||||
echo "Using password: $MASTER_USER:$MASTER_PASSWD"
|
||||
python $(dirname $0)/../../third_party/htpasswd/htpasswd.py -b -c ${KUBE_TEMP}/htpasswd $MASTER_USER $MASTER_PASSWD
|
||||
MASTER_HTPASSWD=$(cat ${KUBE_TEMP}/htpasswd)
|
||||
echo $MASTER_HTPASSWD > /srv/salt/nginx/htpasswd
|
||||
|
||||
# we will run provision to update code each time we test, so we do not want to do salt install each time
|
||||
if [ ! $(which salt-master) ]; then
|
||||
|
||||
# Install Salt
|
||||
#
|
||||
# We specify -X to avoid a race condition that can cause minion failure to
|
||||
@ -67,11 +77,6 @@ EOF
|
||||
# (a new service file needs to be added for salt-api)
|
||||
curl -sS -L https://raw.githubusercontent.com/saltstack/salt-bootstrap/v2014.06.30/bootstrap-salt.sh | sh -s -- -M
|
||||
|
||||
mkdir -p /srv/salt/nginx
|
||||
echo $MASTER_HTPASSWD > /srv/salt/nginx/htpasswd
|
||||
|
||||
# a file we touch to state that base-setup is done
|
||||
echo "Salt configured" > /var/kube-vagrant-setup
|
||||
fi
|
||||
|
||||
# Build release
|
||||
|
@ -19,20 +19,19 @@ set -e
|
||||
source $(dirname $0)/provision-config.sh
|
||||
|
||||
MINION_IP=$4
|
||||
# we will run provision to update code each time we test, so we do not want to do salt install each time
|
||||
if [ ! -f "/var/kube-vagrant-setup" ]; then
|
||||
|
||||
if [ ! "$(cat /etc/hosts | grep $MASTER_NAME)" ]; then
|
||||
echo "Adding host entry for $MASTER_NAME"
|
||||
echo "$MASTER_IP $MASTER_NAME" >> /etc/hosts
|
||||
fi
|
||||
# make sure each minion has an entry in hosts file for master
|
||||
if [ ! "$(cat /etc/hosts | grep $MASTER_NAME)" ]; then
|
||||
echo "Adding host entry for $MASTER_NAME"
|
||||
echo "$MASTER_IP $MASTER_NAME" >> /etc/hosts
|
||||
fi
|
||||
|
||||
# Prepopulate the name of the Master
|
||||
mkdir -p /etc/salt/minion.d
|
||||
echo "master: $MASTER_NAME" > /etc/salt/minion.d/master.conf
|
||||
# Let the minion know who its master is
|
||||
mkdir -p /etc/salt/minion.d
|
||||
echo "master: $MASTER_NAME" > /etc/salt/minion.d/master.conf
|
||||
|
||||
# Our minions will have a pool role to distinguish them from the master.
|
||||
cat <<EOF >/etc/salt/minion.d/grains.conf
|
||||
# Our minions will have a pool role to distinguish them from the master.
|
||||
cat <<EOF >/etc/salt/minion.d/grains.conf
|
||||
grains:
|
||||
minion_ip: $MINION_IP
|
||||
etcd_servers: $MASTER_IP
|
||||
@ -41,6 +40,8 @@ grains:
|
||||
cbr-cidr: $MINION_IP_RANGE
|
||||
EOF
|
||||
|
||||
# we will run provision to update code each time we test, so we do not want to do salt install each time
|
||||
if [ ! $(which salt-minion) ]; then
|
||||
# Install Salt
|
||||
#
|
||||
# We specify -X to avoid a race condition that can cause minion failure to
|
||||
@ -50,8 +51,4 @@ EOF
|
||||
## TODO this only works on systemd distros, need to find a work-around as removing -X above fails to start the services installed
|
||||
systemctl enable salt-minion
|
||||
systemctl start salt-minion
|
||||
|
||||
# a file we touch to state that base-setup is done
|
||||
echo "Salt configured" > /var/kube-vagrant-setup
|
||||
|
||||
fi
|
||||
|
@ -91,6 +91,28 @@ cluster/kube-push.sh => updates a vagrant cluster
|
||||
cluster/kubecfg.sh => interact with the cluster
|
||||
```
|
||||
|
||||
### Authenticating with your master
|
||||
|
||||
To interact with the cluster, you must authenticate with the master when running cluster/kubecfg.sh commands.
|
||||
|
||||
If it's your first time using the cluster, your first invocation of cluster/kubecfg.sh will prompt you for credentials:
|
||||
|
||||
```
|
||||
cd kubernetes
|
||||
cluster/kubecfg.sh list minions
|
||||
Please enter Username: vagrant
|
||||
Please enter Password: vagrant
|
||||
Minion identifier
|
||||
----------
|
||||
```
|
||||
|
||||
The kubecfg.sh command will cache your credentials in a .kubernetes_auth file so you will not be prompted in the future.
|
||||
```
|
||||
cat ~/.kubernetes_auth
|
||||
{"User":"vagrant","Password":"vagrant"}
|
||||
```
|
||||
|
||||
If you try Kubernetes against multiple cloud providers, make sure this file is correct for your target environment.
|
||||
|
||||
### Running a container
|
||||
|
||||
@ -131,6 +153,21 @@ hack/e2e-test.sh
|
||||
|
||||
### Troubleshooting
|
||||
|
||||
#### I just created the cluster, but I am getting authorization errors!
|
||||
|
||||
You probably have an incorrect ~/.kubernetes_auth file for the cluster you are attempting to contact.
|
||||
|
||||
```
|
||||
rm ~/.kubernetes_auth
|
||||
```
|
||||
|
||||
And when using kubecfg.sh, provide the correct credentials:
|
||||
|
||||
```
|
||||
Please enter Username: vagrant
|
||||
Please enter Password: vagrant
|
||||
```
|
||||
|
||||
#### I just created the cluster, but I do not see my container running!
|
||||
|
||||
If this is your first time creating the cluster, the kubelet on each minion schedules a number of docker pull requests to fetch prerequisite images. This can take some time and as a result may delay your initial pod getting provisioned.
|
||||
|
Loading…
Reference in New Issue
Block a user