mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-22 03:11:40 +00:00
Minion->Node rename: NUM_NODES
This commit is contained in:
parent
d1dbeb98a0
commit
53172a5356
2
Vagrantfile
vendored
2
Vagrantfile
vendored
@ -18,7 +18,7 @@ END
|
||||
end
|
||||
|
||||
# The number of minions to provision
|
||||
$num_minion = (ENV['NUM_MINIONS'] || 1).to_i
|
||||
$num_minion = (ENV['NUM_NODES'] || 1).to_i
|
||||
|
||||
# ip configuration
|
||||
$master_ip = ENV['MASTER_IP']
|
||||
|
@ -17,13 +17,13 @@
|
||||
ZONE=${KUBE_AWS_ZONE:-us-west-2a}
|
||||
MASTER_SIZE=${MASTER_SIZE:-}
|
||||
NODE_SIZE=${NODE_SIZE:-}
|
||||
NUM_MINIONS=${NUM_MINIONS:-4}
|
||||
NUM_NODES=${NUM_NODES:-4}
|
||||
|
||||
# Dynamically set node sizes so that Heapster has enough space to run
|
||||
if [[ -z ${NODE_SIZE} ]]; then
|
||||
if (( ${NUM_MINIONS} < 50 )); then
|
||||
if (( ${NUM_NODES} < 50 )); then
|
||||
NODE_SIZE="t2.micro"
|
||||
elif (( ${NUM_MINIONS} < 150 )); then
|
||||
elif (( ${NUM_NODES} < 150 )); then
|
||||
NODE_SIZE="t2.small"
|
||||
else
|
||||
NODE_SIZE="t2.medium"
|
||||
@ -33,9 +33,9 @@ fi
|
||||
# Dynamically set the master size by the number of nodes, these are guesses
|
||||
# TODO: gather some data
|
||||
if [[ -z ${MASTER_SIZE} ]]; then
|
||||
if (( ${NUM_MINIONS} < 50 )); then
|
||||
if (( ${NUM_NODES} < 50 )); then
|
||||
MASTER_SIZE="t2.micro"
|
||||
elif (( ${NUM_MINIONS} < 150 )); then
|
||||
elif (( ${NUM_NODES} < 150 )); then
|
||||
MASTER_SIZE="t2.small"
|
||||
else
|
||||
MASTER_SIZE="t2.medium"
|
||||
@ -121,7 +121,7 @@ ENABLE_NODE_AUTOSCALER="${KUBE_ENABLE_NODE_AUTOSCALER:-false}"
|
||||
if [[ "${ENABLE_NODE_AUTOSCALER}" == "true" ]]; then
|
||||
# TODO: actually configure ASG or similar
|
||||
AUTOSCALER_MIN_NODES="${KUBE_AUTOSCALER_MIN_NODES:-1}"
|
||||
AUTOSCALER_MAX_NODES="${KUBE_AUTOSCALER_MAX_NODES:-${NUM_MINIONS}}"
|
||||
AUTOSCALER_MAX_NODES="${KUBE_AUTOSCALER_MAX_NODES:-${NUM_NODES}}"
|
||||
TARGET_NODE_UTILIZATION="${KUBE_TARGET_NODE_UTILIZATION:-0.7}"
|
||||
fi
|
||||
|
||||
|
@ -18,13 +18,13 @@ ZONE=${KUBE_AWS_ZONE:-us-west-2a}
|
||||
|
||||
MASTER_SIZE=${MASTER_SIZE:-}
|
||||
NODE_SIZE=${NODE_SIZE:-}
|
||||
NUM_MINIONS=${NUM_MINIONS:-2}
|
||||
NUM_NODES=${NUM_NODES:-2}
|
||||
|
||||
# Dynamically set node sizes so that Heapster has enough space to run
|
||||
if [[ -z ${NODE_SIZE} ]]; then
|
||||
if (( ${NUM_MINIONS} < 50 )); then
|
||||
if (( ${NUM_NODES} < 50 )); then
|
||||
NODE_SIZE="t2.micro"
|
||||
elif (( ${NUM_MINIONS} < 150 )); then
|
||||
elif (( ${NUM_NODES} < 150 )); then
|
||||
NODE_SIZE="t2.small"
|
||||
else
|
||||
NODE_SIZE="t2.medium"
|
||||
@ -34,9 +34,9 @@ fi
|
||||
# Dynamically set the master size by the number of nodes, these are guesses
|
||||
# TODO: gather some data
|
||||
if [[ -z ${MASTER_SIZE} ]]; then
|
||||
if (( ${NUM_MINIONS} < 50 )); then
|
||||
if (( ${NUM_NODES} < 50 )); then
|
||||
MASTER_SIZE="t2.micro"
|
||||
elif (( ${NUM_MINIONS} < 150 )); then
|
||||
elif (( ${NUM_NODES} < 150 )); then
|
||||
MASTER_SIZE="t2.small"
|
||||
else
|
||||
MASTER_SIZE="t2.medium"
|
||||
@ -117,7 +117,7 @@ ENABLE_NODE_AUTOSCALER="${KUBE_ENABLE_NODE_AUTOSCALER:-false}"
|
||||
if [[ "${ENABLE_NODE_AUTOSCALER}" == "true" ]]; then
|
||||
# TODO: actually configure ASG or similar
|
||||
AUTOSCALER_MIN_NODES="${KUBE_AUTOSCALER_MIN_NODES:-1}"
|
||||
AUTOSCALER_MAX_NODES="${KUBE_AUTOSCALER_MAX_NODES:-${NUM_MINIONS}}"
|
||||
AUTOSCALER_MAX_NODES="${KUBE_AUTOSCALER_MAX_NODES:-${NUM_NODES}}"
|
||||
TARGET_NODE_UTILIZATION="${KUBE_TARGET_NODE_UTILIZATION:-0.7}"
|
||||
fi
|
||||
|
||||
|
@ -38,7 +38,7 @@ export MASTER_SIZE=c4.large
|
||||
export NODE_SIZE=r3.large
|
||||
```
|
||||
|
||||
If you don't specify master and minion sizes, the scripts will attempt to guess the correct size of the master and worker nodes based on `${NUM_MINIONS}`.
|
||||
If you don't specify master and minion sizes, the scripts will attempt to guess the correct size of the master and worker nodes based on `${NUM_NODES}`.
|
||||
In particular for clusters less than 50 nodes it will
|
||||
use a `t2.micro` for clusters between 50 and 150 nodes it will use a `t2.small` and for clusters with greater than 150 nodes it will use a `t2.medium`.
|
||||
|
||||
|
@ -40,7 +40,7 @@ network_provider: '$(echo "$NETWORK_PROVIDER")'
|
||||
opencontrail_tag: '$(echo "$OPENCONTRAIL_TAG")'
|
||||
opencontrail_kubernetes_tag: '$(echo "$OPENCONTRAIL_KUBERNETES_TAG")'
|
||||
opencontrail_public_subnet: '$(echo "$OPENCONTRAIL_PUBLIC_SUBNET")'
|
||||
num_nodes: $(echo "${NUM_MINIONS}")
|
||||
num_nodes: $(echo "${NUM_NODES}")
|
||||
e2e_storage_test_environment: '$(echo "$E2E_STORAGE_TEST_ENVIRONMENT" | sed -e "s/'/''/g")'
|
||||
EOF
|
||||
|
||||
|
@ -1010,8 +1010,8 @@ function start-minions() {
|
||||
${AWS_ASG_CMD} create-auto-scaling-group \
|
||||
--auto-scaling-group-name ${ASG_NAME} \
|
||||
--launch-configuration-name ${ASG_NAME} \
|
||||
--min-size ${NUM_MINIONS} \
|
||||
--max-size ${NUM_MINIONS} \
|
||||
--min-size ${NUM_NODES} \
|
||||
--max-size ${NUM_NODES} \
|
||||
--vpc-zone-identifier ${SUBNET_ID} \
|
||||
--tags ResourceId=${ASG_NAME},ResourceType=auto-scaling-group,Key=Name,Value=${NODE_INSTANCE_PREFIX} \
|
||||
ResourceId=${ASG_NAME},ResourceType=auto-scaling-group,Key=Role,Value=${NODE_TAG} \
|
||||
@ -1022,7 +1022,7 @@ function start-minions() {
|
||||
attempt=0
|
||||
while true; do
|
||||
find-running-minions > $LOG
|
||||
if [[ ${#NODE_IDS[@]} == ${NUM_MINIONS} ]]; then
|
||||
if [[ ${#NODE_IDS[@]} == ${NUM_NODES} ]]; then
|
||||
echo -e " ${color_green}${#NODE_IDS[@]} minions started; ready${color_norm}"
|
||||
break
|
||||
fi
|
||||
|
@ -27,8 +27,8 @@ export NODES=${NODES:-"centos@172.10.0.12 centos@172.10.0.13"}
|
||||
# Number of nodes in your cluster.
|
||||
export NUM_NODES=${NUM_NODES:-2}
|
||||
|
||||
# Should be removed when NUM_MINIONS is deprecated in validate-cluster.sh
|
||||
export NUM_MINIONS=${NUM_NODES}
|
||||
# Should be removed when NUM_NODES is deprecated in validate-cluster.sh
|
||||
export NUM_NODES=${NUM_NODES}
|
||||
|
||||
# By default, the cluster will use the etcd installed on master.
|
||||
export ETCD_SERVERS=${ETCD_SERVERS:-"http://$MASTER_IP:4001"}
|
||||
|
@ -20,7 +20,7 @@ GCLOUD=gcloud
|
||||
ZONE=${KUBE_GCE_ZONE:-us-central1-b}
|
||||
MASTER_SIZE=${MASTER_SIZE:-n1-standard-2}
|
||||
NODE_SIZE=${NODE_SIZE:-n1-standard-2}
|
||||
NUM_MINIONS=${NUM_MINIONS:-3}
|
||||
NUM_NODES=${NUM_NODES:-3}
|
||||
MASTER_DISK_TYPE=pd-ssd
|
||||
MASTER_DISK_SIZE=${MASTER_DISK_SIZE:-20GB}
|
||||
NODE_DISK_TYPE=${NODE_DISK_TYPE:-pd-standard}
|
||||
@ -101,7 +101,7 @@ ENABLE_CLUSTER_UI="${KUBE_ENABLE_CLUSTER_UI:-true}"
|
||||
ENABLE_NODE_AUTOSCALER="${KUBE_ENABLE_NODE_AUTOSCALER:-false}"
|
||||
if [[ "${ENABLE_NODE_AUTOSCALER}" == "true" ]]; then
|
||||
AUTOSCALER_MIN_NODES="${KUBE_AUTOSCALER_MIN_NODES:-1}"
|
||||
AUTOSCALER_MAX_NODES="${KUBE_AUTOSCALER_MAX_NODES:-${NUM_MINIONS}}"
|
||||
AUTOSCALER_MAX_NODES="${KUBE_AUTOSCALER_MAX_NODES:-${NUM_NODES}}"
|
||||
TARGET_NODE_UTILIZATION="${KUBE_TARGET_NODE_UTILIZATION:-0.7}"
|
||||
ENABLE_CLUSTER_MONITORING=googleinfluxdb
|
||||
fi
|
||||
|
@ -20,7 +20,7 @@ GCLOUD=gcloud
|
||||
ZONE=${KUBE_GCE_ZONE:-us-central1-b}
|
||||
MASTER_SIZE=${MASTER_SIZE:-n1-standard-2}
|
||||
NODE_SIZE=${NODE_SIZE:-n1-standard-2}
|
||||
NUM_MINIONS=${NUM_MINIONS:-3}
|
||||
NUM_NODES=${NUM_NODES:-3}
|
||||
MASTER_DISK_TYPE=pd-ssd
|
||||
MASTER_DISK_SIZE=${MASTER_DISK_SIZE:-20GB}
|
||||
NODE_DISK_TYPE=${NODE_DISK_TYPE:-pd-standard}
|
||||
@ -109,7 +109,7 @@ ENABLE_CLUSTER_UI="${KUBE_ENABLE_CLUSTER_UI:-true}"
|
||||
ENABLE_NODE_AUTOSCALER="${KUBE_ENABLE_NODE_AUTOSCALER:-false}"
|
||||
if [[ "${ENABLE_NODE_AUTOSCALER}" == "true" ]]; then
|
||||
AUTOSCALER_MIN_NODES="${KUBE_AUTOSCALER_MIN_NODES:-1}"
|
||||
AUTOSCALER_MAX_NODES="${KUBE_AUTOSCALER_MAX_NODES:-${NUM_MINIONS}}"
|
||||
AUTOSCALER_MAX_NODES="${KUBE_AUTOSCALER_MAX_NODES:-${NUM_NODES}}"
|
||||
TARGET_NODE_UTILIZATION="${KUBE_TARGET_NODE_UTILIZATION:-0.7}"
|
||||
fi
|
||||
|
||||
|
@ -285,7 +285,7 @@ opencontrail_public_subnet: '$(echo "$OPENCONTRAIL_PUBLIC_SUBNET")'
|
||||
enable_manifest_url: '$(echo "$ENABLE_MANIFEST_URL" | sed -e "s/'/''/g")'
|
||||
manifest_url: '$(echo "$MANIFEST_URL" | sed -e "s/'/''/g")'
|
||||
manifest_url_header: '$(echo "$MANIFEST_URL_HEADER" | sed -e "s/'/''/g")'
|
||||
num_nodes: $(echo "${NUM_MINIONS}")
|
||||
num_nodes: $(echo "${NUM_NODES}")
|
||||
e2e_storage_test_environment: '$(echo "$E2E_STORAGE_TEST_ENVIRONMENT" | sed -e "s/'/''/g")'
|
||||
EOF
|
||||
|
||||
|
@ -680,7 +680,7 @@ function kube-up {
|
||||
--project "${PROJECT}" \
|
||||
--zone "${ZONE}" \
|
||||
--base-instance-name "${NODE_INSTANCE_PREFIX}" \
|
||||
--size "${NUM_MINIONS}" \
|
||||
--size "${NUM_NODES}" \
|
||||
--template "$template_name" || true;
|
||||
gcloud compute instance-groups managed wait-until-stable \
|
||||
"${NODE_INSTANCE_PREFIX}-group" \
|
||||
@ -1333,7 +1333,7 @@ KUBELET_APISERVER: $(yaml-quote ${KUBELET_APISERVER:-})
|
||||
ENABLE_MANIFEST_URL: $(yaml-quote ${ENABLE_MANIFEST_URL:-false})
|
||||
MANIFEST_URL: $(yaml-quote ${MANIFEST_URL:-})
|
||||
MANIFEST_URL_HEADER: $(yaml-quote ${MANIFEST_URL_HEADER:-})
|
||||
NUM_MINIONS: $(yaml-quote ${NUM_MINIONS})
|
||||
NUM_NODES: $(yaml-quote ${NUM_NODES})
|
||||
EOF
|
||||
if [ -n "${APISERVER_TEST_ARGS:-}" ]; then
|
||||
cat >>$file <<EOF
|
||||
|
@ -19,7 +19,7 @@
|
||||
# - CLUSTER_NAME (the name of the cluster)
|
||||
|
||||
ZONE="${ZONE:-us-central1-f}"
|
||||
NUM_MINIONS="${NUM_MINIONS:-3}"
|
||||
NUM_NODES="${NUM_NODES:-3}"
|
||||
CLUSTER_API_VERSION="${CLUSTER_API_VERSION:-}"
|
||||
NETWORK="${NETWORK:-default}"
|
||||
NETWORK_RANGE="${NETWORK_RANGE:-10.240.0.0/16}"
|
||||
|
@ -112,7 +112,7 @@ function verify-prereqs() {
|
||||
# CLUSTER_NAME
|
||||
# ZONE
|
||||
# CLUSTER_API_VERSION (optional)
|
||||
# NUM_MINIONS
|
||||
# NUM_NODES
|
||||
# NODE_SCOPES
|
||||
# MACHINE_TYPE
|
||||
function kube-up() {
|
||||
@ -143,7 +143,7 @@ function kube-up() {
|
||||
local create_args=(
|
||||
"--zone=${ZONE}"
|
||||
"--project=${PROJECT}"
|
||||
"--num-nodes=${NUM_MINIONS}"
|
||||
"--num-nodes=${NUM_NODES}"
|
||||
"--network=${NETWORK}"
|
||||
"--scopes=${NODE_SCOPES}"
|
||||
"--cluster-version=${CLUSTER_API_VERSION}"
|
||||
|
@ -14,4 +14,4 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
NUM_MINIONS=${NUM_MINIONS:-2}
|
||||
NUM_NODES=${NUM_NODES:-2}
|
||||
|
@ -71,7 +71,7 @@ function detect-minions() {
|
||||
# Strip out the IP addresses
|
||||
export KUBE_NODE_IP_ADDRESSES=($(${JUJU_PATH}/return-node-ips.py "${ipoutput}"))
|
||||
# echo "Kubernetes minions: " ${KUBE_NODE_IP_ADDRESSES[@]} 1>&2
|
||||
export NUM_MINIONS=${#KUBE_NODE_IP_ADDRESSES[@]}
|
||||
export NUM_NODES=${#KUBE_NODE_IP_ADDRESSES[@]}
|
||||
}
|
||||
|
||||
function get-password() {
|
||||
|
@ -22,7 +22,7 @@
|
||||
GCLOUD=gcloud
|
||||
ZONE=${KUBE_GCE_ZONE:-us-central1-b}
|
||||
MASTER_SIZE=${MASTER_SIZE:-n1-standard-4}
|
||||
NUM_MINIONS=${NUM_MINIONS:-100}
|
||||
NUM_NODES=${NUM_NODES:-100}
|
||||
MASTER_DISK_TYPE=pd-ssd
|
||||
MASTER_DISK_SIZE=${MASTER_DISK_SIZE:-20GB}
|
||||
REGISTER_MASTER_KUBELET=${REGISTER_MASTER:-false}
|
||||
|
@ -17,8 +17,8 @@
|
||||
## Contains configuration values for interacting with the libvirt CoreOS cluster
|
||||
|
||||
# Number of minions in the cluster
|
||||
NUM_MINIONS=${NUM_MINIONS:-3}
|
||||
export NUM_MINIONS
|
||||
NUM_NODES=${NUM_NODES:-3}
|
||||
export NUM_NODES
|
||||
|
||||
# The IP of the master
|
||||
export MASTER_IP="192.168.10.1"
|
||||
@ -33,18 +33,18 @@ MASTER_CONTAINER_NETMASK="255.255.255.0"
|
||||
MASTER_CONTAINER_ADDR="${NODE_CONTAINER_SUBNET_BASE}.0.1"
|
||||
MASTER_CONTAINER_SUBNET="${NODE_CONTAINER_SUBNET_BASE}.0.1/24"
|
||||
CONTAINER_SUBNET="${NODE_CONTAINER_SUBNET_BASE}.0.0/16"
|
||||
if [[ "$NUM_MINIONS" -gt 253 ]]; then
|
||||
if [[ "$NUM_NODES" -gt 253 ]]; then
|
||||
echo "ERROR: Because of how IPs are allocated in ${BASH_SOURCE}, you cannot create more than 253 nodes"
|
||||
exit 1
|
||||
fi
|
||||
for ((i=0; i < NUM_MINIONS; i++)) do
|
||||
for ((i=0; i < NUM_NODES; i++)) do
|
||||
NODE_IPS[$i]="${NODE_IP_BASE}$((i+2))"
|
||||
NODE_NAMES[$i]="${INSTANCE_PREFIX}-node-$((i+1))"
|
||||
NODE_CONTAINER_SUBNETS[$i]="${NODE_CONTAINER_SUBNET_BASE}.$((i+1)).1/24"
|
||||
NODE_CONTAINER_ADDRS[$i]="${NODE_CONTAINER_SUBNET_BASE}.$((i+1)).1"
|
||||
NODE_CONTAINER_NETMASKS[$i]="255.255.255.0"
|
||||
done
|
||||
NODE_CONTAINER_SUBNETS[$NUM_MINIONS]=$MASTER_CONTAINER_SUBNET
|
||||
NODE_CONTAINER_SUBNETS[$NUM_NODES]=$MASTER_CONTAINER_SUBNET
|
||||
|
||||
SERVICE_CLUSTER_IP_RANGE=10.11.0.0/16 # formerly PORTAL_NET
|
||||
|
||||
|
@ -167,8 +167,8 @@ function wait-cluster-readiness {
|
||||
local timeout=120
|
||||
while [[ $timeout -ne 0 ]]; do
|
||||
nb_ready_nodes=$("${kubectl}" get nodes -o go-template="{{range.items}}{{range.status.conditions}}{{.type}}{{end}}:{{end}}" --api-version=v1 2>/dev/null | tr ':' '\n' | grep -c Ready || true)
|
||||
echo "Nb ready nodes: $nb_ready_nodes / $NUM_MINIONS"
|
||||
if [[ "$nb_ready_nodes" -eq "$NUM_MINIONS" ]]; then
|
||||
echo "Nb ready nodes: $nb_ready_nodes / $NUM_NODES"
|
||||
if [[ "$nb_ready_nodes" -eq "$NUM_NODES" ]]; then
|
||||
return 0
|
||||
fi
|
||||
|
||||
@ -191,8 +191,8 @@ function kube-up {
|
||||
readonly kubernetes_dir="$POOL_PATH/kubernetes"
|
||||
|
||||
local i
|
||||
for (( i = 0 ; i <= $NUM_MINIONS ; i++ )); do
|
||||
if [[ $i -eq $NUM_MINIONS ]]; then
|
||||
for (( i = 0 ; i <= $NUM_NODES ; i++ )); do
|
||||
if [[ $i -eq $NUM_NODES ]]; then
|
||||
etcd2_initial_cluster[$i]="${MASTER_NAME}=http://${MASTER_IP}:2380"
|
||||
else
|
||||
etcd2_initial_cluster[$i]="${NODE_NAMES[$i]}=http://${NODE_IPS[$i]}:2380"
|
||||
@ -201,8 +201,8 @@ function kube-up {
|
||||
etcd2_initial_cluster=$(join , "${etcd2_initial_cluster[@]}")
|
||||
readonly machines=$(join , "${KUBE_NODE_IP_ADDRESSES[@]}")
|
||||
|
||||
for (( i = 0 ; i <= $NUM_MINIONS ; i++ )); do
|
||||
if [[ $i -eq $NUM_MINIONS ]]; then
|
||||
for (( i = 0 ; i <= $NUM_NODES ; i++ )); do
|
||||
if [[ $i -eq $NUM_NODES ]]; then
|
||||
type=master
|
||||
name=$MASTER_NAME
|
||||
public_ip=$MASTER_IP
|
||||
@ -262,7 +262,7 @@ function upload-server-tars {
|
||||
function kube-push {
|
||||
kube-push-internal
|
||||
ssh-to-node "$MASTER_NAME" "sudo systemctl restart kube-apiserver kube-controller-manager kube-scheduler"
|
||||
for ((i=0; i < NUM_MINIONS; i++)); do
|
||||
for ((i=0; i < NUM_NODES; i++)); do
|
||||
ssh-to-node "${NODE_NAMES[$i]}" "sudo systemctl restart kubelet kube-proxy"
|
||||
done
|
||||
wait-cluster-readiness
|
||||
@ -317,7 +317,7 @@ function ssh-to-node {
|
||||
elif [[ "$node" == "$MASTER_NAME" ]]; then
|
||||
machine="$MASTER_IP"
|
||||
else
|
||||
for ((i=0; i < NUM_MINIONS; i++)); do
|
||||
for ((i=0; i < NUM_NODES; i++)); do
|
||||
if [[ "$node" == "${NODE_NAMES[$i]}" ]]; then
|
||||
machine="${NODE_IPS[$i]}"
|
||||
break
|
||||
|
@ -16,10 +16,10 @@
|
||||
|
||||
## Contains configuration values for interacting with the mesos/docker cluster
|
||||
|
||||
NUM_MINIONS=${NUM_MINIONS:-2}
|
||||
NUM_NODES=${NUM_NODES:-2}
|
||||
INSTANCE_PREFIX="${INSTANCE_PREFIX:-kubernetes}"
|
||||
MASTER_NAME="${INSTANCE_PREFIX}-master"
|
||||
NODE_NAMES=($(eval echo ${INSTANCE_PREFIX}-minion-{1..${NUM_MINIONS}}))
|
||||
NODE_NAMES=($(eval echo ${INSTANCE_PREFIX}-minion-{1..${NUM_NODES}}))
|
||||
|
||||
SERVICE_CLUSTER_IP_RANGE=10.10.10.0/24
|
||||
|
||||
|
@ -15,8 +15,8 @@
|
||||
# limitations under the License.
|
||||
|
||||
## Contains configuration values for interacting with the docker-compose cluster in test mode
|
||||
#Set NUM_MINIONS to minimum required for testing.
|
||||
NUM_MINIONS=2
|
||||
#Set NUM_NODES to minimum required for testing.
|
||||
NUM_NODES=2
|
||||
|
||||
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../../..
|
||||
source "${KUBE_ROOT}/cluster/${KUBERNETES_PROVIDER}/config-default.sh"
|
||||
|
@ -283,8 +283,8 @@ function kube-up {
|
||||
|
||||
echo "Starting ${KUBERNETES_PROVIDER} cluster" 1>&2
|
||||
cluster::mesos::docker::docker_compose up -d
|
||||
echo "Scaling ${KUBERNETES_PROVIDER} cluster to ${NUM_MINIONS} slaves"
|
||||
cluster::mesos::docker::docker_compose scale mesosslave=${NUM_MINIONS}
|
||||
echo "Scaling ${KUBERNETES_PROVIDER} cluster to ${NUM_NODES} slaves"
|
||||
cluster::mesos::docker::docker_compose scale mesosslave=${NUM_NODES}
|
||||
|
||||
# await-health-check requires GNU timeout
|
||||
# apiserver hostname resolved by docker
|
||||
|
@ -8,7 +8,7 @@ These options apply across providers. There are additional documents for option
|
||||
|
||||
This is a work-in-progress; not all options are documented yet!
|
||||
|
||||
**NUM_MINIONS**
|
||||
**NUM_NODES**
|
||||
|
||||
The number of minion instances to create. Most providers default this to 4.
|
||||
|
||||
|
@ -16,7 +16,7 @@
|
||||
|
||||
# Sane defaults for dev environments. The following variables can be easily overriden
|
||||
# by setting each as a ENV variable ahead of time:
|
||||
# KUBE_IMAGE, KUBE_MASTER_FLAVOR, KUBE_NODE_FLAVOR, NUM_MINIONS, NOVA_NETWORK and SSH_KEY_NAME
|
||||
# KUBE_IMAGE, KUBE_MASTER_FLAVOR, KUBE_NODE_FLAVOR, NUM_NODES, NOVA_NETWORK and SSH_KEY_NAME
|
||||
|
||||
# Shared
|
||||
KUBE_IMAGE="${KUBE_IMAGE-f2a71670-ced3-4274-80b6-0efcd0f8f91b}" # CoreOS(Beta)
|
||||
@ -32,9 +32,9 @@ MASTER_TAG="tags=${INSTANCE_PREFIX}-master"
|
||||
|
||||
# Node
|
||||
KUBE_NODE_FLAVOR="${KUBE_NODE_FLAVOR-general1-2}"
|
||||
NUM_MINIONS="${NUM_MINIONS-4}"
|
||||
NUM_NODES="${NUM_NODES-4}"
|
||||
NODE_TAG="tags=${INSTANCE_PREFIX}-node"
|
||||
NODE_NAMES=($(eval echo ${INSTANCE_PREFIX}-node-{1..${NUM_MINIONS}}))
|
||||
NODE_NAMES=($(eval echo ${INSTANCE_PREFIX}-node-{1..${NUM_NODES}}))
|
||||
KUBE_NETWORK="10.240.0.0/16"
|
||||
SERVICE_CLUSTER_IP_RANGE="10.0.0.0/16" # formerly PORTAL_NET
|
||||
|
||||
|
@ -27,7 +27,7 @@ role=${role:-"ai i i"}
|
||||
export roles=($role)
|
||||
|
||||
# Define minion numbers
|
||||
export NUM_MINIONS=${NUM_MINIONS:-3}
|
||||
export NUM_NODES=${NUM_NODES:-3}
|
||||
# define the IP range used for service cluster IPs.
|
||||
# according to rfc 1918 ref: https://tools.ietf.org/html/rfc1918 choose a private ip range here.
|
||||
export SERVICE_CLUSTER_IP_RANGE=${SERVICE_CLUSTER_IP_RANGE:-192.168.3.0/24} # formerly PORTAL_NET
|
||||
|
@ -17,8 +17,8 @@
|
||||
## Contains configuration values for interacting with the Vagrant cluster
|
||||
|
||||
# Number of minions in the cluster
|
||||
NUM_MINIONS=${NUM_MINIONS-"1"}
|
||||
export NUM_MINIONS
|
||||
NUM_NODES=${NUM_NODES-"1"}
|
||||
export NUM_NODES
|
||||
|
||||
# The IP of the master
|
||||
export MASTER_IP=${MASTER_IP-"10.245.1.2"}
|
||||
@ -37,7 +37,7 @@ MASTER_CONTAINER_NETMASK="255.255.255.0"
|
||||
MASTER_CONTAINER_ADDR="${NODE_CONTAINER_SUBNET_BASE}.0.1"
|
||||
MASTER_CONTAINER_SUBNET="${NODE_CONTAINER_SUBNET_BASE}.0.1/24"
|
||||
CONTAINER_SUBNET="${NODE_CONTAINER_SUBNET_BASE}.0.0/16"
|
||||
for ((i=0; i < NUM_MINIONS; i++)) do
|
||||
for ((i=0; i < NUM_NODES; i++)) do
|
||||
NODE_IPS[$i]="${NODE_IP_BASE}$((i+3))"
|
||||
NODE_NAMES[$i]="${INSTANCE_PREFIX}-minion-$((i+1))"
|
||||
NODE_CONTAINER_SUBNETS[$i]="${NODE_CONTAINER_SUBNET_BASE}.$((i+1)).1/24"
|
||||
|
@ -15,8 +15,8 @@
|
||||
# limitations under the License.
|
||||
|
||||
## Contains configuration values for interacting with the Vagrant cluster in test mode
|
||||
#Set NUM_MINIONS to minimum required for testing.
|
||||
NUM_MINIONS=2
|
||||
#Set NUM_NODES to minimum required for testing.
|
||||
NUM_NODES=2
|
||||
|
||||
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../..
|
||||
source "${KUBE_ROOT}/cluster/vagrant/config-default.sh"
|
||||
|
@ -59,7 +59,7 @@ cd "${KUBE_ROOT}"
|
||||
echo All verbose output will be redirected to $logfile, use --logfile option to change.
|
||||
|
||||
printf "Start the cluster with 2 minions .. "
|
||||
export NUM_MINIONS=2
|
||||
export NUM_NODES=2
|
||||
export KUBERNETES_PROVIDER=vagrant
|
||||
|
||||
(cluster/kube-up.sh >>"$logfile" 2>&1) || true
|
||||
|
@ -24,7 +24,7 @@ KUBE_ROOT=$(dirname "${BASH_SOURCE}")/..
|
||||
source "${KUBE_ROOT}/cluster/kube-env.sh"
|
||||
source "${KUBE_ROOT}/cluster/kube-util.sh"
|
||||
|
||||
EXPECTED_NUM_NODES="${NUM_MINIONS}"
|
||||
EXPECTED_NUM_NODES="${NUM_NODES}"
|
||||
if [[ "${REGISTER_MASTER_KUBELET:-}" == "true" ]]; then
|
||||
EXPECTED_NUM_NODES=$((EXPECTED_NUM_NODES+1))
|
||||
fi
|
||||
|
@ -14,7 +14,7 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
NUM_MINIONS=4
|
||||
NUM_NODES=4
|
||||
DISK=./kube/kube.vmdk
|
||||
GUEST_ID=debian7_64Guest
|
||||
|
||||
@ -26,8 +26,8 @@ MASTER_NAME="${INSTANCE_PREFIX}-master"
|
||||
MASTER_MEMORY_MB=1024
|
||||
MASTER_CPU=1
|
||||
|
||||
NODE_NAMES=($(eval echo ${INSTANCE_PREFIX}-minion-{1..${NUM_MINIONS}}))
|
||||
NODE_IP_RANGES=($(eval echo "10.244.{1..${NUM_MINIONS}}.0/24"))
|
||||
NODE_NAMES=($(eval echo ${INSTANCE_PREFIX}-minion-{1..${NUM_NODES}}))
|
||||
NODE_IP_RANGES=($(eval echo "10.244.{1..${NUM_NODES}}.0/24"))
|
||||
NODE_MEMORY_MB=2048
|
||||
NODE_CPU=1
|
||||
|
||||
|
@ -14,7 +14,7 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
NUM_MINIONS=2
|
||||
NUM_NODES=2
|
||||
DISK=./kube/kube.vmdk
|
||||
GUEST_ID=debian7_64Guest
|
||||
|
||||
@ -26,8 +26,8 @@ MASTER_NAME="${INSTANCE_PREFIX}-master"
|
||||
MASTER_MEMORY_MB=1024
|
||||
MASTER_CPU=1
|
||||
|
||||
NODE_NAMES=($(eval echo ${INSTANCE_PREFIX}-minion-{1..${NUM_MINIONS}}))
|
||||
NODE_IP_RANGES=($(eval echo "10.244.{1..${NUM_MINIONS}}.0/24"))
|
||||
NODE_NAMES=($(eval echo ${INSTANCE_PREFIX}-minion-{1..${NUM_NODES}}))
|
||||
NODE_IP_RANGES=($(eval echo "10.244.{1..${NUM_NODES}}.0/24"))
|
||||
NODE_MEMORY_MB=1024
|
||||
NODE_CPU=1
|
||||
|
||||
|
@ -41,7 +41,7 @@ At v1.0, Kubernetes supports clusters up to 100 nodes with 30 pods per node and
|
||||
|
||||
A cluster is a set of nodes (physical or virtual machines) running Kubernetes agents, managed by a "master" (the cluster-level control plane).
|
||||
|
||||
Normally the number of nodes in a cluster is controlled by the the value `NUM_MINIONS` in the platform-specific `config-default.sh` file (for example, see [GCE's `config-default.sh`](http://releases.k8s.io/HEAD/cluster/gce/config-default.sh)).
|
||||
Normally the number of nodes in a cluster is controlled by the the value `NUM_NODES` in the platform-specific `config-default.sh` file (for example, see [GCE's `config-default.sh`](http://releases.k8s.io/HEAD/cluster/gce/config-default.sh)).
|
||||
|
||||
Simply changing that value to something very large, however, may cause the setup script to fail for many cloud providers. A GCE deployment, for example, will run in to quota issues and fail to bring the cluster up.
|
||||
|
||||
|
@ -250,7 +250,7 @@ cross-AZ-clusters are more convenient.
|
||||
* For auto-scaling, on each nodes it creates a launch configuration and group.
|
||||
The name for both is <*KUBE_AWS_INSTANCE_PREFIX*>-minion-group. The default
|
||||
name is kubernetes-minion-group. The auto-scaling group has a min and max size
|
||||
that are both set to NUM_MINIONS. You can change the size of the auto-scaling
|
||||
that are both set to NUM_NODES. You can change the size of the auto-scaling
|
||||
group to add or remove the total number of nodes from within the AWS API or
|
||||
Console. Each nodes self-configures, meaning that they come up; run Salt with
|
||||
the stored configuration; connect to the master; are assigned an internal CIDR;
|
||||
|
@ -301,7 +301,7 @@ Congratulations!
|
||||
The following will run all of the end-to-end testing scenarios assuming you set your environment in `cluster/kube-env.sh`:
|
||||
|
||||
```sh
|
||||
NUM_MINIONS=3 hack/e2e-test.sh
|
||||
NUM_NODES=3 hack/e2e-test.sh
|
||||
```
|
||||
|
||||
### Troubleshooting
|
||||
@ -350,10 +350,10 @@ Are you sure you built a release first? Did you install `net-tools`? For more cl
|
||||
|
||||
#### I want to change the number of nodes!
|
||||
|
||||
You can control the number of nodes that are instantiated via the environment variable `NUM_MINIONS` on your host machine. If you plan to work with replicas, we strongly encourage you to work with enough nodes to satisfy your largest intended replica size. If you do not plan to work with replicas, you can save some system resources by running with a single node. You do this, by setting `NUM_MINIONS` to 1 like so:
|
||||
You can control the number of nodes that are instantiated via the environment variable `NUM_NODES` on your host machine. If you plan to work with replicas, we strongly encourage you to work with enough nodes to satisfy your largest intended replica size. If you do not plan to work with replicas, you can save some system resources by running with a single node. You do this, by setting `NUM_NODES` to 1 like so:
|
||||
|
||||
```sh
|
||||
export NUM_MINIONS=1
|
||||
export NUM_NODES=1
|
||||
```
|
||||
|
||||
#### I want my VMs to have more memory!
|
||||
|
@ -73,7 +73,7 @@ To start a Kubemark cluster on GCE you need to create an external cluster (it ca
|
||||
`make quick-release`) and run `test/kubemark/start-kubemark.sh` script. This script will create a VM for master components, Pods for HollowNodes and do all the setup necessary
|
||||
to let them talk to each other. It will use the configuration stored in `cluster/kubemark/config-default.sh` - you can tweak it however you want, but note that some features
|
||||
may not be implemented yet, as implementation of Hollow components/mocks will probably be lagging behind ‘real’ one. For performance tests interesting variables are
|
||||
`NUM_MINIONS` and `MASTER_SIZE`. After start-kubemark script is finished you’ll have a ready Kubemark cluster, a kubeconfig file for talking to the Kubemark
|
||||
`NUM_NODES` and `MASTER_SIZE`. After start-kubemark script is finished you’ll have a ready Kubemark cluster, a kubeconfig file for talking to the Kubemark
|
||||
cluster is stored in `test/kubemark/kubeconfig.loc`.
|
||||
|
||||
Currently we're running HollowNode with limit of 0.05 a CPU core and ~60MB or memory, which taking into account default cluster addons and fluentD running on an 'external'
|
||||
|
@ -83,7 +83,7 @@ You can override the variables defined in [config-default.sh](http://releases.k8
|
||||
|
||||
```bash
|
||||
export KUBE_AWS_ZONE=eu-west-1c
|
||||
export NUM_MINIONS=2
|
||||
export NUM_NODES=2
|
||||
export NODE_SIZE=m3.medium
|
||||
export AWS_S3_REGION=eu-west-1
|
||||
export AWS_S3_BUCKET=mycompany-kubernetes-artifacts
|
||||
@ -91,7 +91,7 @@ export INSTANCE_PREFIX=k8s
|
||||
...
|
||||
```
|
||||
|
||||
The scripts will attempt to guess the correct size of the master and worker nodes based on `${NUM_MINIONS}`, in particular for clusters less than 50 nodes it will
|
||||
The scripts will attempt to guess the correct size of the master and worker nodes based on `${NUM_NODES}`, in particular for clusters less than 50 nodes it will
|
||||
use a `t2.micro` for clusters between 50 and 150 nodes it will use a `t2.small` and for clusters with greater than 150 nodes it will use a `t2.medium`.
|
||||
|
||||
It will also try to create or reuse a keypair called "kubernetes", and IAM profiles called "kubernetes-master" and "kubernetes-minion".
|
||||
|
@ -167,7 +167,7 @@ cluster/kube-up.sh
|
||||
|
||||
The `KUBERNETES_PROVIDER` environment variable tells all of the various cluster management scripts which variant to use. If you forget to set this, the assumption is you are running on Google Compute Engine.
|
||||
|
||||
The `NUM_MINIONS` environment variable may be set to specify the number of nodes to start. If it is not set, the number of nodes defaults to 3.
|
||||
The `NUM_NODES` environment variable may be set to specify the number of nodes to start. If it is not set, the number of nodes defaults to 3.
|
||||
|
||||
The `KUBE_PUSH` environment variable may be set to specify which Kubernetes binaries must be deployed on the cluster. Its possible values are:
|
||||
|
||||
@ -225,7 +225,7 @@ export KUBERNETES_PROVIDER=libvirt-coreos
|
||||
Bring up a libvirt-CoreOS cluster of 5 nodes
|
||||
|
||||
```sh
|
||||
NUM_MINIONS=5 cluster/kube-up.sh
|
||||
NUM_NODES=5 cluster/kube-up.sh
|
||||
```
|
||||
|
||||
Destroy the libvirt-CoreOS cluster
|
||||
|
@ -84,7 +84,7 @@ There is a specific `cluster/rackspace` directory with the scripts for the follo
|
||||
- flanneld uses this network for next hop routing. These routes allow the containers running on each node to communicate with one another on this private network.
|
||||
2. A SSH key will be created and uploaded if needed. This key must be used to ssh into the machines (we do not capture the password).
|
||||
3. The master server and additional nodes will be created via the `nova` CLI. A `cloud-config.yaml` is generated and provided as user-data with the entire configuration for the systems.
|
||||
4. We then boot as many nodes as defined via `$NUM_MINIONS`.
|
||||
4. We then boot as many nodes as defined via `$NUM_NODES`.
|
||||
|
||||
## Some notes
|
||||
|
||||
|
@ -116,7 +116,7 @@ export nodes="vcap@10.10.103.250 vcap@10.10.103.162 vcap@10.10.103.223"
|
||||
|
||||
export role="ai i i"
|
||||
|
||||
export NUM_MINIONS=${NUM_MINIONS:-3}
|
||||
export NUM_NODES=${NUM_NODES:-3}
|
||||
|
||||
export SERVICE_CLUSTER_IP_RANGE=192.168.3.0/24
|
||||
|
||||
@ -129,7 +129,7 @@ separated with blank space like `<user_1@ip_1> <user_2@ip_2> <user_3@ip_3> `
|
||||
Then the `role` variable defines the role of above machine in the same order, "ai" stands for machine
|
||||
acts as both master and node, "a" stands for master, "i" stands for node.
|
||||
|
||||
The `NUM_MINIONS` variable defines the total number of nodes.
|
||||
The `NUM_NODES` variable defines the total number of nodes.
|
||||
|
||||
The `SERVICE_CLUSTER_IP_RANGE` variable defines the kubernetes service IP range. Please make sure
|
||||
that you do have a valid private ip range defined here, because some IaaS provider may reserve private ips.
|
||||
|
@ -389,10 +389,10 @@ Log on to one of the nodes (`vagrant ssh node-1`) and inspect the salt minion lo
|
||||
|
||||
#### I want to change the number of nodes!
|
||||
|
||||
You can control the number of nodes that are instantiated via the environment variable `NUM_MINIONS` on your host machine. If you plan to work with replicas, we strongly encourage you to work with enough nodes to satisfy your largest intended replica size. If you do not plan to work with replicas, you can save some system resources by running with a single node. You do this, by setting `NUM_MINIONS` to 1 like so:
|
||||
You can control the number of nodes that are instantiated via the environment variable `NUM_NODES` on your host machine. If you plan to work with replicas, we strongly encourage you to work with enough nodes to satisfy your largest intended replica size. If you do not plan to work with replicas, you can save some system resources by running with a single node. You do this, by setting `NUM_NODES` to 1 like so:
|
||||
|
||||
```sh
|
||||
export NUM_MINIONS=1
|
||||
export NUM_NODES=1
|
||||
```
|
||||
|
||||
#### I want my VMs to have more memory!
|
||||
|
@ -57,7 +57,7 @@ At the end of the example, we will have:
|
||||
|
||||
## Prerequisites
|
||||
|
||||
You should already have turned up a Kubernetes cluster. To get the most of this example, ensure that Kubernetes will create more than one node (e.g. by setting your `NUM_MINIONS` environment variable to 2 or more).
|
||||
You should already have turned up a Kubernetes cluster. To get the most of this example, ensure that Kubernetes will create more than one node (e.g. by setting your `NUM_NODES` environment variable to 2 or more).
|
||||
|
||||
|
||||
## Step 1: Start the RabbitMQ service
|
||||
|
@ -43,7 +43,7 @@ of compute resources easier to follow by starting with an empty cluster.
|
||||
|
||||
```
|
||||
$ export KUBERNETES_PROVIDER=vagrant
|
||||
$ export NUM_MINIONS=1
|
||||
$ export NUM_NODES=1
|
||||
$ export KUBE_ENABLE_CLUSTER_MONITORING=none
|
||||
$ export KUBE_ENABLE_CLUSTER_DNS=false
|
||||
$ export KUBE_ENABLE_CLUSTER_UI=false
|
||||
|
@ -18,7 +18,7 @@
|
||||
# supports key features for Kubernetes version 1.0.
|
||||
|
||||
# Instructions:
|
||||
# - Setup a Kubernetes cluster with $NUM_MINIONS nodes (defined below).
|
||||
# - Setup a Kubernetes cluster with $NUM_NODES nodes (defined below).
|
||||
# - Provide a Kubeconfig file whose current context is set to the
|
||||
# cluster to be tested, and with suitable auth setting.
|
||||
# - Specify the location of that kubeconfig with, e.g.:
|
||||
@ -78,10 +78,10 @@ echo "Conformance test checking conformance with Kubernetes version 1.0"
|
||||
# somewhere in the description (i.e. either in the Describe part or the It part).
|
||||
# The list of tagged conformance tests can be retrieved by:
|
||||
#
|
||||
# NUM_MINIONS=4 KUBERNETES_CONFORMANCE_TEST="y" \
|
||||
# NUM_NODES=4 KUBERNETES_CONFORMANCE_TEST="y" \
|
||||
# hack/ginkgo-e2e.sh -ginkgo.focus='\[Conformance\]' -ginkgo.dryRun=true
|
||||
|
||||
declare -x KUBERNETES_CONFORMANCE_TEST="y"
|
||||
declare -x NUM_MINIONS=4
|
||||
declare -x NUM_NODES=4
|
||||
hack/ginkgo-e2e.sh -ginkgo.focus='\[Conformance\]' -ginkgo.skip='\[Skipped\]'
|
||||
exit $?
|
||||
|
@ -102,7 +102,7 @@ export PATH=$(dirname "${e2e_test}"):"${PATH}"
|
||||
--cluster-tag="${CLUSTER_ID:-}" \
|
||||
--repo-root="${KUBE_VERSION_ROOT}" \
|
||||
--node-instance-group="${NODE_INSTANCE_GROUP:-}" \
|
||||
--num-nodes="${NUM_MINIONS:-}" \
|
||||
--num-nodes="${NUM_NODES:-}" \
|
||||
--prefix="${KUBE_GCE_INSTANCE_PREFIX:-e2e}" \
|
||||
${E2E_CLEAN_START:+"--clean-start=true"} \
|
||||
${E2E_MIN_STARTUP_PODS:+"--minStartupPods=${E2E_MIN_STARTUP_PODS}"} \
|
||||
|
@ -102,12 +102,12 @@ fi
|
||||
if [[ "${KUBERNETES_PROVIDER}" == "aws" ]]; then
|
||||
if [[ "${PERFORMANCE:-}" == "true" ]]; then
|
||||
: ${MASTER_SIZE:="m3.xlarge"}
|
||||
: ${NUM_MINIONS:="100"}
|
||||
: ${NUM_NODES:="100"}
|
||||
: ${GINKGO_TEST_ARGS:="--ginkgo.focus=\[Performance\]"}
|
||||
else
|
||||
: ${MASTER_SIZE:="m3.large"}
|
||||
: ${NODE_SIZE:="m3.large"}
|
||||
: ${NUM_MINIONS:="3"}
|
||||
: ${NUM_NODES:="3"}
|
||||
fi
|
||||
fi
|
||||
|
||||
@ -354,7 +354,7 @@ case ${JOB_NAME} in
|
||||
: ${PROJECT:="kubernetes-jenkins-pull"}
|
||||
: ${ENABLE_DEPLOYMENTS:=true}
|
||||
# Override GCE defaults
|
||||
NUM_MINIONS=${NUM_NODES_PARALLEL}
|
||||
NUM_NODES=${NUM_NODES_PARALLEL}
|
||||
;;
|
||||
|
||||
# Runs all non-flaky tests on GCE in parallel.
|
||||
@ -373,7 +373,7 @@ case ${JOB_NAME} in
|
||||
: ${PROJECT:="kubernetes-jenkins"}
|
||||
: ${ENABLE_DEPLOYMENTS:=true}
|
||||
# Override GCE defaults
|
||||
NUM_MINIONS=${NUM_NODES_PARALLEL}
|
||||
NUM_NODES=${NUM_NODES_PARALLEL}
|
||||
;;
|
||||
|
||||
# Runs all non-flaky tests on AWS in parallel.
|
||||
@ -390,7 +390,7 @@ case ${JOB_NAME} in
|
||||
)"}
|
||||
: ${ENABLE_DEPLOYMENTS:=true}
|
||||
# Override AWS defaults.
|
||||
NUM_MINIONS=${NUM_NODES_PARALLEL}
|
||||
NUM_NODES=${NUM_NODES_PARALLEL}
|
||||
;;
|
||||
|
||||
# Runs the flaky tests on GCE in parallel.
|
||||
@ -409,7 +409,7 @@ case ${JOB_NAME} in
|
||||
: ${PROJECT:="k8s-jkns-e2e-gce-prl-flaky"}
|
||||
: ${FAIL_ON_GCP_RESOURCE_LEAK:="true"}
|
||||
# Override GCE defaults.
|
||||
NUM_MINIONS=${NUM_NODES_PARALLEL}
|
||||
NUM_NODES=${NUM_NODES_PARALLEL}
|
||||
;;
|
||||
|
||||
# Runs only the reboot tests on GCE.
|
||||
@ -432,7 +432,7 @@ case ${JOB_NAME} in
|
||||
MASTER_SIZE="n1-standard-4"
|
||||
NODE_SIZE="n1-standard-2"
|
||||
NODE_DISK_SIZE="50GB"
|
||||
NUM_MINIONS="100"
|
||||
NUM_NODES="100"
|
||||
# Reduce logs verbosity
|
||||
TEST_CLUSTER_LOG_LEVEL="--v=2"
|
||||
# Increase resync period to simulate production
|
||||
@ -454,7 +454,7 @@ case ${JOB_NAME} in
|
||||
MASTER_SIZE="n1-standard-4"
|
||||
NODE_SIZE="n1-standard-2"
|
||||
NODE_DISK_SIZE="50GB"
|
||||
NUM_MINIONS="100"
|
||||
NUM_NODES="100"
|
||||
# Reduce logs verbosity
|
||||
TEST_CLUSTER_LOG_LEVEL="--v=2"
|
||||
# Increase resync period to simulate production
|
||||
@ -1195,7 +1195,7 @@ case ${JOB_NAME} in
|
||||
: ${E2E_UP:="true"}
|
||||
: ${E2E_TEST:="false"}
|
||||
: ${E2E_DOWN:="false"}
|
||||
: ${NUM_MINIONS:=5}
|
||||
: ${NUM_NODES:=5}
|
||||
;;
|
||||
|
||||
kubernetes-upgrade-gce-step2-upgrade-master)
|
||||
@ -1208,7 +1208,7 @@ case ${JOB_NAME} in
|
||||
: ${E2E_TEST:="true"}
|
||||
: ${E2E_DOWN:="false"}
|
||||
: ${GINKGO_TEST_ARGS:="--ginkgo.focus=Cluster\sUpgrade.*upgrade-master"}
|
||||
: ${NUM_MINIONS:=5}
|
||||
: ${NUM_NODES:=5}
|
||||
: ${KUBE_ENABLE_DEPLOYMENTS:=true}
|
||||
: ${KUBE_ENABLE_DAEMONSETS:=true}
|
||||
;;
|
||||
@ -1230,7 +1230,7 @@ case ${JOB_NAME} in
|
||||
${GCE_DEFAULT_SKIP_TESTS[@]:+${GCE_DEFAULT_SKIP_TESTS[@]}} \
|
||||
${GCE_FLAKY_TESTS[@]:+${GCE_FLAKY_TESTS[@]}} \
|
||||
)"}
|
||||
: ${NUM_MINIONS:=5}
|
||||
: ${NUM_NODES:=5}
|
||||
;;
|
||||
|
||||
kubernetes-upgrade-gce-step4-upgrade-cluster)
|
||||
@ -1243,7 +1243,7 @@ case ${JOB_NAME} in
|
||||
: ${E2E_TEST:="true"}
|
||||
: ${E2E_DOWN:="false"}
|
||||
: ${GINKGO_TEST_ARGS:="--ginkgo.focus=Cluster\sUpgrade.*upgrade-cluster"}
|
||||
: ${NUM_MINIONS:=5}
|
||||
: ${NUM_NODES:=5}
|
||||
: ${KUBE_ENABLE_DEPLOYMENTS:=true}
|
||||
: ${KUBE_ENABLE_DAEMONSETS:=true}
|
||||
;;
|
||||
@ -1263,7 +1263,7 @@ case ${JOB_NAME} in
|
||||
${GCE_DEFAULT_SKIP_TESTS[@]:+${GCE_DEFAULT_SKIP_TESTS[@]}} \
|
||||
${GCE_FLAKY_TESTS[@]:+${GCE_FLAKY_TESTS[@]}} \
|
||||
)"}
|
||||
: ${NUM_MINIONS:=5}
|
||||
: ${NUM_NODES:=5}
|
||||
;;
|
||||
|
||||
kubernetes-upgrade-gce-step6-e2e-new)
|
||||
@ -1282,7 +1282,7 @@ case ${JOB_NAME} in
|
||||
${GCE_FLAKY_TESTS[@]:+${GCE_FLAKY_TESTS[@]}} \
|
||||
${GCE_SLOW_TESTS[@]:+${GCE_SLOW_TESTS[@]}} \
|
||||
)"}
|
||||
: ${NUM_MINIONS:=5}
|
||||
: ${NUM_NODES:=5}
|
||||
;;
|
||||
|
||||
# kubernetes-upgrade-gce-1.0-current-release
|
||||
@ -1305,7 +1305,7 @@ case ${JOB_NAME} in
|
||||
: ${E2E_TEST:="false"}
|
||||
: ${E2E_DOWN:="false"}
|
||||
: ${KUBE_GCE_INSTANCE_PREFIX:="e2e-upgrade-1-0"}
|
||||
: ${NUM_MINIONS:=5}
|
||||
: ${NUM_NODES:=5}
|
||||
;;
|
||||
|
||||
kubernetes-upgrade-1.0-current-release-gce-step2-upgrade-master)
|
||||
@ -1321,7 +1321,7 @@ case ${JOB_NAME} in
|
||||
: ${E2E_DOWN:="false"}
|
||||
: ${GINKGO_TEST_ARGS:="--ginkgo.focus=Cluster\sUpgrade.*upgrade-master --upgrade-target=${CURRENT_RELEASE_PUBLISHED_VERSION}"}
|
||||
: ${KUBE_GCE_INSTANCE_PREFIX:="e2e-upgrade-1-0"}
|
||||
: ${NUM_MINIONS:=5}
|
||||
: ${NUM_NODES:=5}
|
||||
: ${KUBE_ENABLE_DEPLOYMENTS:=true}
|
||||
: ${KUBE_ENABLE_DAEMONSETS:=true}
|
||||
;;
|
||||
@ -1342,7 +1342,7 @@ case ${JOB_NAME} in
|
||||
${GCE_FLAKY_TESTS[@]:+${GCE_FLAKY_TESTS[@]}} \
|
||||
)"}
|
||||
: ${KUBE_GCE_INSTANCE_PREFIX:="e2e-upgrade-1-0"}
|
||||
: ${NUM_MINIONS:=5}
|
||||
: ${NUM_NODES:=5}
|
||||
;;
|
||||
|
||||
kubernetes-upgrade-1.0-current-release-gce-step4-upgrade-cluster)
|
||||
@ -1358,7 +1358,7 @@ case ${JOB_NAME} in
|
||||
: ${E2E_DOWN:="false"}
|
||||
: ${GINKGO_TEST_ARGS:="--ginkgo.focus=Cluster\sUpgrade.*upgrade-cluster --upgrade-target=${CURRENT_RELEASE_PUBLISHED_VERSION}"}
|
||||
: ${KUBE_GCE_INSTANCE_PREFIX:="e2e-upgrade-1-0"}
|
||||
: ${NUM_MINIONS:=5}
|
||||
: ${NUM_NODES:=5}
|
||||
: ${KUBE_ENABLE_DEPLOYMENTS:=true}
|
||||
: ${KUBE_ENABLE_DAEMONSETS:=true}
|
||||
;;
|
||||
@ -1379,7 +1379,7 @@ case ${JOB_NAME} in
|
||||
${GCE_FLAKY_TESTS[@]:+${GCE_FLAKY_TESTS[@]}} \
|
||||
)"}
|
||||
: ${KUBE_GCE_INSTANCE_PREFIX:="e2e-upgrade-1-0"}
|
||||
: ${NUM_MINIONS:=5}
|
||||
: ${NUM_NODES:=5}
|
||||
;;
|
||||
|
||||
kubernetes-upgrade-1.0-current-release-gce-step6-e2e-new)
|
||||
@ -1400,7 +1400,7 @@ case ${JOB_NAME} in
|
||||
${GCE_SLOW_TESTS[@]:+${GCE_SLOW_TESTS[@]}} \
|
||||
)"}
|
||||
: ${KUBE_GCE_INSTANCE_PREFIX:="e2e-upgrade-1-0"}
|
||||
: ${NUM_MINIONS:=5}
|
||||
: ${NUM_NODES:=5}
|
||||
;;
|
||||
|
||||
# Run Kubemark test on a fake 100 node cluster to have a comparison
|
||||
@ -1415,7 +1415,7 @@ case ${JOB_NAME} in
|
||||
: ${USE_KUBEMARK:="true"}
|
||||
# Override defaults to be indpendent from GCE defaults and set kubemark parameters
|
||||
KUBE_GCE_INSTANCE_PREFIX="kubemark100"
|
||||
NUM_MINIONS="10"
|
||||
NUM_NODES="10"
|
||||
MASTER_SIZE="n1-standard-2"
|
||||
NODE_SIZE="n1-standard-1"
|
||||
KUBEMARK_MASTER_SIZE="n1-standard-4"
|
||||
@ -1433,7 +1433,7 @@ case ${JOB_NAME} in
|
||||
: ${E2E_TEST:="false"}
|
||||
: ${USE_KUBEMARK:="true"}
|
||||
# Override defaults to be indpendent from GCE defaults and set kubemark parameters
|
||||
NUM_MINIONS="6"
|
||||
NUM_NODES="6"
|
||||
MASTER_SIZE="n1-standard-4"
|
||||
NODE_SIZE="n1-standard-8"
|
||||
KUBE_GCE_INSTANCE_PREFIX="kubemark500"
|
||||
@ -1453,10 +1453,10 @@ case ${JOB_NAME} in
|
||||
: ${USE_KUBEMARK:="true"}
|
||||
# Override defaults to be indpendent from GCE defaults and set kubemark parameters
|
||||
# We need 11 so that we won't hit max-pods limit (set to 100). TODO: do it in a nicer way.
|
||||
NUM_MINIONS="11"
|
||||
NUM_NODES="11"
|
||||
MASTER_SIZE="n1-standard-4"
|
||||
NODE_SIZE="n1-standard-8" # Note: can fit about 17 hollow nodes per core
|
||||
# so NUM_MINIONS x cores_per_minion should
|
||||
# so NUM_NODES x cores_per_minion should
|
||||
# be set accordingly.
|
||||
KUBE_GCE_INSTANCE_PREFIX="kubemark1000"
|
||||
E2E_ZONE="asia-east1-a"
|
||||
@ -1502,7 +1502,7 @@ export KUBE_ENABLE_EXPERIMENTAL_API=${ENABLE_EXPERIMENTAL_API:-}
|
||||
export MASTER_SIZE=${MASTER_SIZE:-}
|
||||
export NODE_SIZE=${NODE_SIZE:-}
|
||||
export NODE_DISK_SIZE=${NODE_DISK_SIZE:-}
|
||||
export NUM_MINIONS=${NUM_MINIONS:-}
|
||||
export NUM_NODES=${NUM_NODES:-}
|
||||
export TEST_CLUSTER_LOG_LEVEL=${TEST_CLUSTER_LOG_LEVEL:-}
|
||||
export TEST_CLUSTER_RESYNC_PERIOD=${TEST_CLUSTER_RESYNC_PERIOD:-}
|
||||
export PROJECT=${PROJECT:-}
|
||||
@ -1715,15 +1715,15 @@ fi
|
||||
### Start Kubemark ###
|
||||
if [[ "${USE_KUBEMARK:-}" == "true" ]]; then
|
||||
export RUN_FROM_DISTRO=true
|
||||
NUM_NODES_BKP=${NUM_MINIONS}
|
||||
NUM_NODES_BKP=${NUM_NODES}
|
||||
MASTER_SIZE_BKP=${MASTER_SIZE}
|
||||
./test/kubemark/stop-kubemark.sh
|
||||
NUM_MINIONS=${KUBEMARK_NUM_NODES:-$NUM_MINIONS}
|
||||
NUM_NODES=${KUBEMARK_NUM_NODES:-$NUM_NODES}
|
||||
MASTER_SIZE=${KUBEMARK_MASTER_SIZE:-$MASTER_SIZE}
|
||||
./test/kubemark/start-kubemark.sh
|
||||
./test/kubemark/run-e2e-tests.sh --ginkgo.focus="should\sallow\sstarting\s30\spods\sper\snode" --delete-namespace="false" --gather-resource-usage="false"
|
||||
./test/kubemark/stop-kubemark.sh
|
||||
NUM_MINIONS=${NUM_NODES_BKP}
|
||||
NUM_NODES=${NUM_NODES_BKP}
|
||||
MASTER_SIZE=${MASTER_SIZE_BKP}
|
||||
unset RUN_FROM_DISTRO
|
||||
unset NUM_NODES_BKP
|
||||
|
@ -205,7 +205,7 @@ contexts:
|
||||
current-context: kubemark-context
|
||||
EOF
|
||||
|
||||
sed "s/##numreplicas##/${NUM_MINIONS:-10}/g" ${KUBE_ROOT}/test/kubemark/hollow-node_template.json > ${KUBE_ROOT}/test/kubemark/hollow-node.json
|
||||
sed "s/##numreplicas##/${NUM_NODES:-10}/g" ${KUBE_ROOT}/test/kubemark/hollow-node_template.json > ${KUBE_ROOT}/test/kubemark/hollow-node.json
|
||||
sed -i'' -e "s/##project##/${PROJECT}/g" ${KUBE_ROOT}/test/kubemark/hollow-node.json
|
||||
kubectl create -f ${KUBE_ROOT}/test/kubemark/kubemark-ns.json
|
||||
kubectl create -f ${KUBECONFIG_SECRET} --namespace="kubemark"
|
||||
@ -215,7 +215,7 @@ rm ${KUBECONFIG_SECRET}
|
||||
|
||||
echo "Waiting for all HollowNodes to become Running..."
|
||||
echo "This can loop forever if something crashed."
|
||||
until [[ "$(kubectl --kubeconfig=${KUBE_ROOT}/test/kubemark/kubeconfig.loc get node | grep Ready | wc -l)" == "${NUM_MINIONS}" ]]; do
|
||||
until [[ "$(kubectl --kubeconfig=${KUBE_ROOT}/test/kubemark/kubeconfig.loc get node | grep Ready | wc -l)" == "${NUM_NODES}" ]]; do
|
||||
echo -n .
|
||||
sleep 1
|
||||
done
|
||||
|
Loading…
Reference in New Issue
Block a user