mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-24 12:15:52 +00:00
Vagrant now using pre-built binaries.
This commit is contained in:
parent
69a14a032c
commit
881cf80182
82
Vagrantfile
vendored
82
Vagrantfile
vendored
@ -7,45 +7,73 @@ VAGRANTFILE_API_VERSION = "2"
|
||||
# Require a recent version of vagrant otherwise some have reported errors setting host names on boxes
|
||||
Vagrant.require_version ">= 1.6.2"
|
||||
|
||||
Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
|
||||
# The number of minions to provision
|
||||
$num_minion = (ENV['KUBERNETES_NUM_MINIONS'] || 3).to_i
|
||||
|
||||
# The number of minions to provision
|
||||
num_minion = (ENV['KUBERNETES_NUM_MINIONS'] || 3).to_i
|
||||
# ip configuration
|
||||
$master_ip = "10.245.1.2"
|
||||
$minion_ip_base = "10.245.2."
|
||||
$minion_ips = $num_minion.times.collect { |n| $minion_ip_base + "#{n+2}" }
|
||||
$minion_ips_str = $minion_ips.join(",")
|
||||
|
||||
# ip configuration
|
||||
master_ip = "10.245.1.2"
|
||||
minion_ip_base = "10.245.2."
|
||||
minion_ips = num_minion.times.collect { |n| minion_ip_base + "#{n+2}" }
|
||||
minion_ips_str = minion_ips.join(",")
|
||||
# Determine the OS platform to use
|
||||
$kube_os = ENV['KUBERNETES_OS'] || "fedora"
|
||||
|
||||
# Determine the OS platform to use
|
||||
kube_os = ENV['KUBERNETES_OS'] || "fedora"
|
||||
|
||||
# OS platform to box information
|
||||
kube_box = {
|
||||
"fedora" => {
|
||||
"name" => "fedora20",
|
||||
"box_url" => "http://opscode-vm-bento.s3.amazonaws.com/vagrant/virtualbox/opscode_fedora-20_chef-provisionerless.box"
|
||||
}
|
||||
# OS platform to box information
|
||||
$kube_box = {
|
||||
"fedora" => {
|
||||
"name" => "fedora20",
|
||||
"box_url" => "http://opscode-vm-bento.s3.amazonaws.com/vagrant/virtualbox/opscode_fedora-20_chef-provisionerless.box"
|
||||
}
|
||||
}
|
||||
|
||||
# This stuff is cargo-culted from http://www.stefanwrobel.com/how-to-make-vagrant-performance-not-suck
|
||||
# Give access to all cpu cores on the host
|
||||
host = RbConfig::CONFIG['host_os']
|
||||
if host =~ /darwin/
|
||||
$vm_cpus = `sysctl -n hw.ncpu`.to_i
|
||||
elsif host =~ /linux/
|
||||
$vm_cpus = `nproc`.to_i
|
||||
else # sorry Windows folks, I can't help you
|
||||
$vm_cpus = 2
|
||||
end
|
||||
|
||||
# Give VM 1024 of RAM
|
||||
$vm_mem = 1024
|
||||
|
||||
|
||||
Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
|
||||
def customize_vm(config)
|
||||
config.vm.box = $kube_box[$kube_os]["name"]
|
||||
config.vm.box_url = $kube_box[$kube_os]["box_url"]
|
||||
|
||||
config.vm.provider :virtualbox do |v|
|
||||
v.customize ["modifyvm", :id, "--memory", $vm_mem]
|
||||
v.customize ["modifyvm", :id, "--cpus", $vm_cpus]
|
||||
|
||||
# Use faster paravirtualized networking
|
||||
v.customize ["modifyvm", :id, "--nictype1", "virtio"]
|
||||
v.customize ["modifyvm", :id, "--nictype2", "virtio"]
|
||||
end
|
||||
end
|
||||
|
||||
# Kubernetes master
|
||||
config.vm.define "master" do |config|
|
||||
config.vm.box = kube_box[kube_os]["name"]
|
||||
config.vm.box_url = kube_box[kube_os]["box_url"]
|
||||
config.vm.provision "shell", inline: "/vagrant/cluster/vagrant/provision-master.sh #{master_ip} #{num_minion} #{minion_ips_str}"
|
||||
config.vm.network "private_network", ip: "#{master_ip}"
|
||||
customize_vm config
|
||||
|
||||
config.vm.provision "shell", inline: "/vagrant/cluster/vagrant/provision-master.sh #{$master_ip} #{$num_minion} #{$minion_ips_str}"
|
||||
config.vm.network "private_network", ip: "#{$master_ip}"
|
||||
config.vm.hostname = "kubernetes-master"
|
||||
end
|
||||
end
|
||||
|
||||
# Kubernetes minion
|
||||
num_minion.times do |n|
|
||||
$num_minion.times do |n|
|
||||
config.vm.define "minion-#{n+1}" do |minion|
|
||||
customize_vm minion
|
||||
|
||||
minion_index = n+1
|
||||
minion_ip = minion_ips[n]
|
||||
minion.vm.box = kube_box[kube_os]["name"]
|
||||
minion.vm.box_url = kube_box[kube_os]["box_url"]
|
||||
minion.vm.provision "shell", inline: "/vagrant/cluster/vagrant/provision-minion.sh #{master_ip} #{num_minion} #{minion_ips_str} #{minion_ip} #{minion_index}"
|
||||
minion_ip = $minion_ips[n]
|
||||
minion.vm.provision "shell", inline: "/vagrant/cluster/vagrant/provision-minion.sh #{$master_ip} #{$num_minion} #{$minion_ips_str} #{minion_ip} #{minion_index}"
|
||||
minion.vm.network "private_network", ip: "#{minion_ip}"
|
||||
minion.vm.hostname = "kubernetes-minion-#{minion_index}"
|
||||
end
|
||||
|
@ -28,10 +28,12 @@ ENV GOARCH amd64
|
||||
# Get the code coverage tool and godep
|
||||
RUN go get code.google.com/p/go.tools/cmd/cover github.com/tools/godep
|
||||
|
||||
RUN mkdir -p /go/src/github.com/coreos/etcd && \
|
||||
cd /go/src/github.com/coreos/etcd && \
|
||||
git clone https://github.com/coreos/etcd.git . -b v0.4.6 --depth=1 && \
|
||||
go install github.com/coreos/etcd
|
||||
# Download and symlink etcd. We need this for our integration tests.
|
||||
RUN mkdir -p /usr/local/src/etcd &&\
|
||||
cd /usr/local/src/etcd &&\
|
||||
curl -L -O -s https://github.com/coreos/etcd/releases/download/v0.4.6/etcd-v0.4.6-linux-amd64.tar.gz &&\
|
||||
tar xzf etcd-v0.4.6-linux-amd64.tar.gz &&\
|
||||
ln -s ../src/etcd/etcd-v0.4.6-linux-amd64/etcd /usr/local/bin/
|
||||
|
||||
# Mark this as a kube-build container
|
||||
RUN touch /kube-build-image
|
||||
|
@ -152,7 +152,8 @@ function kube::build::build_image() {
|
||||
build
|
||||
cmd
|
||||
examples
|
||||
Godeps
|
||||
Godeps/Godeps.json
|
||||
Godeps/_workspace/src
|
||||
hack
|
||||
LICENSE
|
||||
pkg
|
||||
@ -203,8 +204,9 @@ function kube::build::docker_build() {
|
||||
|
||||
echo "+++ Building Docker image ${image}. This can take a while."
|
||||
set +e # We are handling the error here manually
|
||||
local -r docker_output="$(${build_cmd} 2>&1)"
|
||||
if [ $? -ne 0 ]; then
|
||||
local -r docker_output
|
||||
docker_output=$(${build_cmd} 2>&1)
|
||||
if [[ $? -ne 0 ]]; then
|
||||
set -e
|
||||
echo "+++ Docker build command failed for ${image}" >&2
|
||||
echo >&2
|
||||
@ -245,7 +247,7 @@ function kube::build::clean_images() {
|
||||
function kube::build::run_build_command() {
|
||||
[[ -n "$@" ]] || { echo "Invalid input." >&2; return 4; }
|
||||
|
||||
local -r docker="docker run --name=${DOCKER_CONTAINER_NAME} -it ${DOCKER_MOUNT} ${KUBE_BUILD_IMAGE}"
|
||||
local -r docker="docker run --name=${DOCKER_CONTAINER_NAME} --attach=stdout --attach=stderr --attach=stdin --tty ${DOCKER_MOUNT} ${KUBE_BUILD_IMAGE}"
|
||||
|
||||
# Remove the container if it is left over from some previous aborted run
|
||||
docker rm ${DOCKER_CONTAINER_NAME} >/dev/null 2>&1 || true
|
||||
@ -393,9 +395,13 @@ function kube::release::package_full_tarball() {
|
||||
cp "${RELEASE_DIR}/kubernetes-salt.tar.gz" "${release_stage}/server/"
|
||||
cp "${RELEASE_DIR}"/kubernetes-server-*.tar.gz "${release_stage}/server/"
|
||||
|
||||
mkdir -p "${release_stage}/third_party"
|
||||
cp -R "${KUBE_REPO_ROOT}/third_party/htpasswd" "${release_stage}/third_party/htpasswd"
|
||||
|
||||
cp -R "${KUBE_REPO_ROOT}/examples" "${release_stage}/"
|
||||
cp "${KUBE_REPO_ROOT}/README.md" "${release_stage}/"
|
||||
cp "${KUBE_REPO_ROOT}/LICENSE" "${release_stage}/"
|
||||
cp "${KUBE_REPO_ROOT}/Vagrantfile" "${release_stage}/"
|
||||
|
||||
local package_name="${RELEASE_DIR}/kubernetes.tar.gz"
|
||||
tar czf "${package_name}" -C "${release_stage}/.." .
|
||||
|
@ -62,7 +62,7 @@ function test-teardown {
|
||||
echo "TODO"
|
||||
}
|
||||
|
||||
# Set the {user} and {password} environment values required to interact with provider
|
||||
# Set the {KUBE_USER} and {KUBE_PASSWORD} environment values required to interact with provider
|
||||
function get-password {
|
||||
echo "TODO"
|
||||
}
|
||||
|
4
cluster/saltbase/pillar/cluster-params.sls
Normal file
4
cluster/saltbase/pillar/cluster-params.sls
Normal file
@ -0,0 +1,4 @@
|
||||
# This file is meant to be replaced with cluster specific parameters if necessary.
|
||||
|
||||
# Examples:
|
||||
# node_instance_prefix: <base of regex for -minion_regexp to apiserver>
|
@ -5,13 +5,17 @@
|
||||
|
||||
{% set machines = ""%}
|
||||
{% set cloud_provider = "" %}
|
||||
{% set minion_regexp = "-minion_regexp=.*" %}
|
||||
{% if grains.cloud_provider is defined %}
|
||||
{% set cloud_provider = "-cloud_provider=" + grains.cloud_provider %}
|
||||
{% endif %}
|
||||
|
||||
{% set minion_regexp = "-minion_regexp='" + pillar['node_instance_prefix'] + ".*'" %}
|
||||
{% set address = "-address=127.0.0.1" %}
|
||||
|
||||
{% if pillar['node_instance_prefix'] is defined %}
|
||||
{% set minion_regexp = "-minion_regexp='" + pillar['node_instance_prefix'] + ".*'" %}
|
||||
{% endif %}
|
||||
|
||||
{% if grains.etcd_servers is defined %}
|
||||
{% set etcd_servers = "-etcd_servers=http://" + grains.etcd_servers + ":4001" %}
|
||||
{% else %}
|
||||
|
@ -21,7 +21,6 @@ NUM_MINIONS=${KUBERNETES_NUM_MINIONS-"3"}
|
||||
|
||||
# IP LOCATIONS FOR INTERACTING WITH THE MASTER
|
||||
export KUBE_MASTER_IP="10.245.1.2"
|
||||
export KUBERNETES_MASTER="https://10.245.1.2"
|
||||
|
||||
INSTANCE_PREFIX=kubernetes
|
||||
MASTER_NAME="${INSTANCE_PREFIX}-master"
|
||||
|
@ -18,6 +18,57 @@
|
||||
set -e
|
||||
source $(dirname $0)/provision-config.sh
|
||||
|
||||
# # Install Docker on master to run the build. This is a necessary chunk of
|
||||
# # bootstrapping.
|
||||
# yum install -y docker-io
|
||||
# SYSTEMD_LOG_LEVEL=notice systemctl enable docker
|
||||
# systemctl start docker
|
||||
|
||||
# # Build release
|
||||
# echo "Building release"
|
||||
# pushd /vagrant
|
||||
# bash -x ./build/release.sh
|
||||
# popd
|
||||
|
||||
function release_not_found() {
|
||||
echo "It looks as if you don't have a compiled version of Kubernetes. If you" >&2
|
||||
echo "are running from a clone of the git repo, please run ./build/release.sh." >&2
|
||||
echo "Note that this requires having Docker installed. If you are running " >&2
|
||||
echo "from a release tarball, something is wrong. Look at " >&2
|
||||
echo "http://kubernetes.io/ for information on how to contact the development team for help." >&2
|
||||
exit 1
|
||||
}
|
||||
|
||||
# Look for our precompiled binary releases. When running from a source repo,
|
||||
# these are generated under _output. When running from an release tarball these
|
||||
# are under ./server.
|
||||
server_binary_tar="/vagrant/server/kubernetes-server-linux-amd64.tar.gz"
|
||||
if [[ ! -f "$server_binary_tar" ]]; then
|
||||
server_binary_tar="/vagrant/_output/release-tars/kubernetes-server-linux-amd64.tar.gz"
|
||||
fi
|
||||
if [[ ! -f "$server_binary_tar" ]]; then
|
||||
release_not_found
|
||||
fi
|
||||
|
||||
salt_tar="/vagrant/server/kubernetes-salt.tar.gz"
|
||||
if [[ ! -f "$salt_tar" ]]; then
|
||||
salt_tar="/vagrant/_output/release-tars/kubernetes-salt.tar.gz"
|
||||
fi
|
||||
if [[ ! -f "$salt_tar" ]]; then
|
||||
release_not_found
|
||||
fi
|
||||
|
||||
|
||||
echo "Running release install script"
|
||||
rm -rf /kube-install
|
||||
mkdir -p /kube-install
|
||||
pushd /kube-install
|
||||
tar xzf "$salt_tar"
|
||||
cp "$server_binary_tar" .
|
||||
./kubernetes/saltbase/install.sh "${server_binary_tar##*/}"
|
||||
popd
|
||||
|
||||
|
||||
# Setup hosts file to support ping by hostname to each minion in the cluster from apiserver
|
||||
minion_ip_array=(${MINION_IPS//,/ })
|
||||
for (( i=0; i<${#MINION_NAMES[@]}; i++)); do
|
||||
@ -103,19 +154,10 @@ EOF
|
||||
SYSTEMD_LOG_LEVEL=notice systemctl enable salt-api
|
||||
systemctl start salt-api
|
||||
|
||||
else
|
||||
# Only run highstate when updating the config. In the first-run case, Salt is
|
||||
# set up to run highstate as new minions join for the first time.
|
||||
echo "Executing configuration"
|
||||
salt '*' mine.update
|
||||
salt --force-color '*' state.highstate
|
||||
fi
|
||||
|
||||
# Build release
|
||||
echo "Building release"
|
||||
pushd /vagrant
|
||||
./release/build-release.sh kubernetes
|
||||
popd
|
||||
|
||||
echo "Running release install script"
|
||||
pushd /vagrant/_output/release/master-release/src/scripts
|
||||
./master-release-install.sh
|
||||
popd
|
||||
|
||||
echo "Executing configuration"
|
||||
salt '*' mine.update
|
||||
salt --force-color '*' state.highstate
|
||||
|
@ -19,8 +19,7 @@
|
||||
source $(dirname ${BASH_SOURCE})/${KUBE_CONFIG_FILE-"config-default.sh"}
|
||||
|
||||
function detect-master () {
|
||||
echo "KUBE_MASTER_IP: $KUBE_MASTER_IP"
|
||||
echo "KUBE_MASTER: $KUBE_MASTER"
|
||||
echo "KUBE_MASTER_IP: ${KUBE_MASTER_IP}"
|
||||
}
|
||||
|
||||
# Get minion IP addresses and store in KUBE_MINION_IP_ADDRESSES[]
|
||||
@ -31,7 +30,7 @@ function detect-minions {
|
||||
# Verify prereqs on host machine
|
||||
function verify-prereqs {
|
||||
for x in vagrant virtualbox; do
|
||||
if [ "$(which $x)" == "" ]; then
|
||||
if ! which "$x" >/dev/null; then
|
||||
echo "Can't find $x in PATH, please fix and retry."
|
||||
exit 1
|
||||
fi
|
||||
@ -40,7 +39,6 @@ function verify-prereqs {
|
||||
|
||||
# Instantiate a kubernetes cluster
|
||||
function kube-up {
|
||||
|
||||
get-password
|
||||
vagrant up
|
||||
|
||||
@ -51,26 +49,37 @@ function kube-up {
|
||||
|
||||
# verify master has all required daemons
|
||||
echo "Validating master"
|
||||
MACHINE="master"
|
||||
REQUIRED_DAEMON=("salt-master" "salt-minion" "apiserver" "nginx" "controller-manager" "scheduler")
|
||||
VALIDATED="1"
|
||||
until [ "$VALIDATED" -eq "0" ]; do
|
||||
VALIDATED="0"
|
||||
for daemon in ${REQUIRED_DAEMON[@]}; do
|
||||
vagrant ssh $MACHINE -c "which $daemon" >/dev/null 2>&1 || { printf "."; VALIDATED="1"; sleep 2; }
|
||||
local machine="master"
|
||||
local -a required_daemon=("salt-master" "salt-minion" "apiserver" "nginx" "controller-manager" "scheduler")
|
||||
local validated="1"
|
||||
until [[ "$validated" == "0" ]]; do
|
||||
validated="0"
|
||||
local daemon
|
||||
for daemon in "${required_daemon[@]}"; do
|
||||
vagrant ssh "$machine" -c "which '${daemon}'" >/dev/null 2>&1 || {
|
||||
printf "."
|
||||
validated="1"
|
||||
sleep 2
|
||||
}
|
||||
done
|
||||
done
|
||||
|
||||
# verify each minion has all required daemons
|
||||
local i
|
||||
for (( i=0; i<${#MINION_NAMES[@]}; i++)); do
|
||||
echo "Validating ${VAGRANT_MINION_NAMES[$i]}"
|
||||
MACHINE=${VAGRANT_MINION_NAMES[$i]}
|
||||
REQUIRED_DAEMON=("salt-minion" "kubelet" "docker")
|
||||
VALIDATED="1"
|
||||
until [ "$VALIDATED" -eq "0" ]; do
|
||||
VALIDATED="0"
|
||||
for daemon in ${REQUIRED_DAEMON[@]}; do
|
||||
vagrant ssh $MACHINE -c "which $daemon" >/dev/null 2>&1 || { printf "."; VALIDATED="1"; sleep 2; }
|
||||
local machine=${VAGRANT_MINION_NAMES[$i]}
|
||||
local -a required_daemon=("salt-minion" "kubelet" "docker")
|
||||
local validated="1"
|
||||
until [[ "$validated" == "0" ]]; do
|
||||
validated="0"
|
||||
local daemon
|
||||
for daemon in "${required_daemon[@]}"; do
|
||||
vagrant ssh "$machine" -c "which $daemon" >/dev/null 2>&1 || {
|
||||
printf "."
|
||||
validated="1"
|
||||
sleep 2
|
||||
}
|
||||
done
|
||||
done
|
||||
done
|
||||
@ -78,11 +87,16 @@ function kube-up {
|
||||
echo
|
||||
echo "Waiting for each minion to be registered with cloud provider"
|
||||
for (( i=0; i<${#MINION_NAMES[@]}; i++)); do
|
||||
MACHINE="${MINION_NAMES[$i]}"
|
||||
COUNT="0"
|
||||
until [ "$COUNT" -eq "1" ]; do
|
||||
$(dirname $0)/kubecfg.sh -template '{{range.Items}}{{.ID}}:{{end}}' list minions > /tmp/minions
|
||||
COUNT=$(grep -c ${MINION_NAMES[i]} /tmp/minions) || { printf "."; sleep 2; COUNT="0"; }
|
||||
local machine="${MINION_NAMES[$i]}"
|
||||
local count="0"
|
||||
until [[ "$count" == "1" ]]; do
|
||||
local minions
|
||||
minions=$("$(dirname $0)/cluster/kubecfg.sh" -template '{{range.Items}}{{.ID}}:{{end}}' list minions)
|
||||
count=$(echo $minions | grep -c "${MINION_NAMES[i]}") || {
|
||||
printf "."
|
||||
sleep 2
|
||||
count="0"
|
||||
}
|
||||
done
|
||||
done
|
||||
|
||||
@ -124,8 +138,7 @@ function test-teardown {
|
||||
|
||||
# Set the {user} and {password} environment values required to interact with provider
|
||||
function get-password {
|
||||
export user=vagrant
|
||||
export passwd=vagrant
|
||||
echo "Using credentials: $user:$passwd"
|
||||
export KUBE_USER=vagrant
|
||||
export KUBE_PASSWORD=vagrant
|
||||
echo "Using credentials: $KUBE_USER:$KUBE_PASSWORD"
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user