diff --git a/cluster/kubemark/iks/config-default.sh b/cluster/kubemark/iks/config-default.sh index 62c041a6faf..779dee1e9df 100644 --- a/cluster/kubemark/iks/config-default.sh +++ b/cluster/kubemark/iks/config-default.sh @@ -25,8 +25,16 @@ CLUSTER_LOCATION="${CLUSTER_LOCATION:-wdc06}" REGISTRY_LOGIN_URL="${REGISTRY_LOGIN_URL:-https://api.ng.bluemix.net}" # User defined +# number of real workers in spawnTester cluster NUM_NODES="${NUM_NODES:-2}" +# spec of real workers in spawnTester cluster +NODE_SIZE=${NODE_SIZE:-u2c.2x4} DESIRED_NODES="${DESIRED_NODES:-10}" +# number of hollow nodes +# TODO: once IKS supports `kubetest`, i.e. the base cluster provisioning implemented in `kubetest` +# we can use NUM_NODES=${KUBEMARK_NUM_NODES:-10} to avoid usage of KUBEMARK_NUM_NODES +# more context, see https://github.com/kubernetes/kubernetes/pull/76909#discussion_r277735942 +KUBEMARK_NUM_NODES="${KUBEMARK_NUM_NODES:-10}" ENABLE_KUBEMARK_CLUSTER_AUTOSCALER="${ENABLE_KUBEMARK_CLUSTER_AUTOSCALER:-true}" ENABLE_KUBEMARK_KUBE_DNS="${ENABLE_KUBEMARK_KUBE_DNS:-false}" KUBELET_TEST_LOG_LEVEL="${KUBELET_TEST_LOG_LEVEL:-"--v=2"}" diff --git a/test/kubemark/iks/startup.sh b/test/kubemark/iks/startup.sh index c5bcd9f3f1a..ea574c5a024 100644 --- a/test/kubemark/iks/startup.sh +++ b/test/kubemark/iks/startup.sh @@ -227,7 +227,7 @@ EOF # Create the replication controller for hollow-nodes. # We allow to override the NUM_REPLICAS when running Cluster Autoscaler. - NUM_REPLICAS=${NUM_REPLICAS:-${NUM_NODES}} + NUM_REPLICAS=${NUM_REPLICAS:-${KUBEMARK_NUM_NODES}} sed "s/{{numreplicas}}/${NUM_REPLICAS}/g" "${RESOURCE_DIRECTORY}/hollow-node_template.yaml" > "${RESOURCE_DIRECTORY}/hollow-node.yaml" proxy_cpu=20 if [ "${NUM_NODES}" -gt 1000 ]; then @@ -240,9 +240,8 @@ EOF sed -i'' -e "s'{{kubemark_image_registry}}'${KUBEMARK_IMAGE_REGISTRY}${KUBE_NAMESPACE}'g" "${RESOURCE_DIRECTORY}/hollow-node.yaml" sed -i'' -e "s/{{kubemark_image_tag}}/${KUBEMARK_IMAGE_TAG}/g" "${RESOURCE_DIRECTORY}/hollow-node.yaml" sed -i'' -e "s/{{master_ip}}/${MASTER_IP}/g" "${RESOURCE_DIRECTORY}/hollow-node.yaml" - sed -i'' -e "s/{{kubelet_verbosity_level}}/${KUBELET_TEST_LOG_LEVEL}/g" "${RESOURCE_DIRECTORY}/hollow-node.yaml" - sed -i'' -e "s/{{kubeproxy_verbosity_level}}/${KUBEPROXY_TEST_LOG_LEVEL}/g" "${RESOURCE_DIRECTORY}/hollow-node.yaml" - sed -i'' -e "s/{{use_real_proxier}}/${USE_REAL_PROXIER}/g" "${RESOURCE_DIRECTORY}/hollow-node.yaml" + sed -i'' -e "s/{{hollow_kubelet_params}}/${HOLLOW_KUBELET_TEST_ARGS:-}/g" "${RESOURCE_DIRECTORY}/hollow-node.yaml" + sed -i'' -e "s/{{hollow_proxy_params}}/${HOLLOW_PROXY_TEST_ARGS:-}/g" "${RESOURCE_DIRECTORY}/hollow-node.yaml" sed -i'' -e "s'{{kubemark_mig_config}}'${KUBEMARK_MIG_CONFIG:-}'g" "${RESOURCE_DIRECTORY}/hollow-node.yaml" "${KUBECTL}" create -f "${RESOURCE_DIRECTORY}/hollow-node.yaml" --namespace="kubemark" @@ -253,7 +252,8 @@ EOF function wait-for-hollow-nodes-to-run-or-timeout { echo -n "Waiting for all hollow-nodes to become Running" start=$(date +%s) - nodes=$("${KUBECTL}" --kubeconfig="${KUBECONFIG}" get node 2> /dev/null) || true + # IKS uses a real cluster for hollow master, so need to exclude the real worker nodes + nodes=$("${KUBECTL}" --kubeconfig="${KUBECONFIG}" get node | grep hollow-node 2> /dev/null) || true ready=$(($(echo "${nodes}" | grep -vc "NotReady") - 1)) until [[ "${ready}" -ge "${NUM_REPLICAS}" ]]; do echo -n "." diff --git a/test/kubemark/iks/util.sh b/test/kubemark/iks/util.sh index de91618f9a8..b8f74dffb0a 100644 --- a/test/kubemark/iks/util.sh +++ b/test/kubemark/iks/util.sh @@ -20,39 +20,44 @@ KUBE_ROOT=$(dirname "${BASH_SOURCE[0]}")/../../.. function create-clusters { # shellcheck disable=SC2154 # Color defined in sourced script echo -e "${color_yellow}CHECKING CLUSTERS${color_norm}" - if bx cs clusters | grep -Fq 'deleting'; then + if ibmcloud ks clusters | grep -Fq 'deleting'; then echo -n "Deleting old clusters" fi - while bx cs clusters | grep -Fq 'deleting' + while ibmcloud ks clusters | grep -Fq 'deleting' do echo -n "." sleep 10 done echo "" - bx cs region-set us-east >/dev/null - bx cs vlans wdc06 >/dev/null - PRIVLAN=$(bx cs vlans wdc06 --json | jq '. | .[] | select(.type == "private") | .id' | sed -e "s/\"//g") - PUBVLAN=$(bx cs vlans wdc06 --json | jq '. | .[] | select(.type == "public") | .id' | sed -e "s/\"//g") - if ! bx cs clusters | grep -Fq 'kubeSpawnTester'; then + ibmcloud ks region-set us-east >/dev/null + ibmcloud ks vlans wdc06 >/dev/null + PRIVLAN=$(ibmcloud ks vlans wdc06 --json | jq '. | .[] | select(.type == "private") | .id' | sed -e "s/\"//g") + PUBVLAN=$(ibmcloud ks vlans wdc06 --json | jq '. | .[] | select(.type == "public") | .id' | sed -e "s/\"//g") + if ! ibmcloud ks clusters | grep -Fq 'kubeSpawnTester'; then echo "Creating spawning cluster" - bx cs cluster-create --location "${CLUSTER_LOCATION}" --public-vlan "${PUBVLAN}" --private-vlan "${PRIVLAN}" --workers 2 --machine-type u2c.2x4 --name kubeSpawnTester + # make number and spec of node workers configurable + # otherwise it can't afford tests like kubemark-5000 + # TODO: dynamically adjust the number and spec + ibmcloud ks cluster-create --location "${CLUSTER_LOCATION}" --public-vlan "${PUBVLAN}" --private-vlan "${PRIVLAN}" --workers "${NUM_NODES:-2}" --machine-type "${NODE_SIZE}" --name kubeSpawnTester fi - if ! bx cs clusters | grep -Fq 'kubeMasterTester'; then + if ! ibmcloud ks clusters | grep -Fq 'kubeMasterTester'; then echo "Creating master cluster" - bx cs cluster-create --location "${CLUSTER_LOCATION}" --public-vlan "${PUBVLAN}" --private-vlan "${PRIVLAN}" --workers 2 --machine-type u2c.2x4 --name kubeMasterTester + # if we can't make it a bare master (workers = 0) + # then make workers = 1 with the smallest machine spec + ibmcloud ks cluster-create --location "${CLUSTER_LOCATION}" --public-vlan "${PUBVLAN}" --private-vlan "${PRIVLAN}" --workers 1 --machine-type u2c.2x4 --name kubeMasterTester fi push-image - if ! bx cs clusters | grep 'kubeSpawnTester' | grep -Fq 'normal'; then + if ! ibmcloud ks clusters | grep 'kubeSpawnTester' | grep -Fq 'normal'; then # shellcheck disable=SC2154 # Color defined in sourced script echo -e "${color_cyan}Warning: new clusters may take up to 60 minutes to be ready${color_norm}" echo -n "Clusters loading" fi - while ! bx cs clusters | grep 'kubeSpawnTester' | grep -Fq 'normal' + while ! ibmcloud ks clusters | grep 'kubeSpawnTester' | grep -Fq 'normal' do echo -n "." sleep 5 done - while ! bx cs clusters | grep 'kubeMasterTester' | grep -Fq 'normal' + while ! ibmcloud ks clusters | grep 'kubeMasterTester' | grep -Fq 'normal' do echo -n "." sleep 5 @@ -63,17 +68,17 @@ function create-clusters { # Builds and pushes image to registry function push-image { if [[ "${ISBUILD}" = "y" ]]; then - if ! bx cr namespaces | grep -Fq "${KUBE_NAMESPACE}"; then + if ! ibmcloud cr namespaces | grep -Fq "${KUBE_NAMESPACE}"; then echo "Creating registry namespace" - bx cr namespace-add "${KUBE_NAMESPACE}" - echo "bx cr namespace-rm ${KUBE_NAMESPACE}" >> "${RESOURCE_DIRECTORY}/iks-namespacelist.sh" + ibmcloud cr namespace-add "${KUBE_NAMESPACE}" + echo "ibmcloud cr namespace-rm ${KUBE_NAMESPACE}" >> "${RESOURCE_DIRECTORY}/iks-namespacelist.sh" fi docker build -t "${KUBEMARK_INIT_TAG}" "${KUBEMARK_IMAGE_LOCATION}" docker tag "${KUBEMARK_INIT_TAG}" "${KUBEMARK_IMAGE_REGISTRY}${KUBE_NAMESPACE}/${PROJECT}:${KUBEMARK_IMAGE_TAG}" docker push "${KUBEMARK_IMAGE_REGISTRY}${KUBE_NAMESPACE}/${PROJECT}:${KUBEMARK_IMAGE_TAG}" echo "Image pushed" else - KUBEMARK_IMAGE_REGISTRY="brandondr96" + KUBEMARK_IMAGE_REGISTRY="${KUBEMARK_IMAGE_REGISTRY:-brandondr96}" KUBE_NAMESPACE="" fi } @@ -122,7 +127,7 @@ function master-config { if [[ "${USE_EXISTING}" = "y" ]]; then export KUBECONFIG=${CUSTOM_MASTER_CONFIG} else - eval "$(bx cs cluster-config kubeMasterTester --admin | grep export)" + eval "$(ibmcloud ks cluster-config kubeMasterTester --admin | grep export)" fi } @@ -133,20 +138,20 @@ function spawn-config { if [[ "${USE_EXISTING}" = "y" ]]; then export KUBECONFIG=${CUSTOM_SPAWN_CONFIG} else - eval "$(bx cs cluster-config kubeSpawnTester --admin | grep export)" + eval "$(ibmcloud ks cluster-config kubeSpawnTester --admin | grep export)" fi } # Deletes existing clusters function delete-clusters { echo "DELETING CLUSTERS" - bx cs cluster-rm kubeSpawnTester - bx cs cluster-rm kubeMasterTester - while ! bx cs clusters | grep 'kubeSpawnTester' | grep -Fq 'deleting' - do - sleep 5 - done - while ! bx cs clusters | grep 'kubeMasterTester' | grep -Fq 'deleting' + ibmcloud ks cluster-rm kubeSpawnTester + ibmcloud ks cluster-rm kubeMasterTester + while ! ibmcloud ks clusters | grep 'kubeSpawnTester' | grep -Fq 'deleting' + do + sleep 5 + done + while ! ibmcloud ks clusters | grep 'kubeMasterTester' | grep -Fq 'deleting' do sleep 5 done @@ -159,14 +164,14 @@ function complete-login { echo -n -e "Do you have a federated IBM cloud login? [y/N]${color_cyan}>${color_norm} " read -r ISFED if [[ "${ISFED}" = "y" ]]; then - bx login --sso -a "${REGISTRY_LOGIN_URL}" + ibmcloud login --sso -a "${REGISTRY_LOGIN_URL}" elif [[ "${ISFED}" = "N" ]]; then - bx login -a "${REGISTRY_LOGIN_URL}" + ibmcloud login -a "${REGISTRY_LOGIN_URL}" else echo -e "${color_red}Invalid response, please try again:${color_norm}" complete-login fi - bx cr login + ibmcloud cr login } # Generate values to fill the hollow-node configuration templates.