Merge pull request #73746 from mrbobbytables/kubemark-shellcheck

Fix shellcheck lint errors in Kubemark scripts
This commit is contained in:
Kubernetes Prow Robot 2019-02-25 17:25:13 -08:00 committed by GitHub
commit 0ff7e463ee
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
15 changed files with 184 additions and 122 deletions

View File

@ -19,6 +19,8 @@
# gce/util.sh script which assumes config filename), but if some things that
# are enabled by default should not run in hollow clusters, they should be disabled here.
# shellcheck disable=SC2034 # Variables sourced in other scripts.
source "${KUBE_ROOT}/cluster/gce/config-common.sh"
GCLOUD=gcloud
@ -115,7 +117,7 @@ ENABLE_KUBEMARK_CLUSTER_AUTOSCALER="${ENABLE_KUBEMARK_CLUSTER_AUTOSCALER:-false}
# (e.g. kubemark master, Heapster) enough resources to handle maximum cluster size.
if [[ "${ENABLE_KUBEMARK_CLUSTER_AUTOSCALER}" == "true" ]]; then
NUM_REPLICAS=1
if [[ ! -z "$NUM_NODES" ]]; then
if [[ -n "$NUM_NODES" ]]; then
echo "WARNING: Using Cluster Autoscaler, ignoring NUM_NODES parameter. Set KUBEMARK_AUTOSCALER_MAX_NODES to specify maximum size of the cluster."
fi
fi

View File

@ -16,6 +16,7 @@
# Cloud information
RANDGEN=$(dd if=/dev/urandom bs=64 count=1 2>/dev/null | base64 | tr -d "=+/" | dd bs=16 count=1 2>/dev/null | sed 's/[A-Z]//g')
# shellcheck disable=2034 # Variable sourced in other scripts.
KUBE_NAMESPACE="kubemark_${RANDGEN}"
KUBEMARK_IMAGE_TAG="${KUBEMARK_IMAGE_TAG:-2}"
KUBEMARK_IMAGE_LOCATION="${KUBEMARK_IMAGE_LOCATION:-${KUBE_ROOT}/cluster/images/kubemark}"

View File

@ -14,7 +14,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../..
source ${KUBE_ROOT}/test/kubemark/cloud-provider-config.sh
source ${KUBE_ROOT}/cluster/${CLOUD_PROVIDER}/util.sh
source ${KUBE_ROOT}/cluster/kubemark/${CLOUD_PROVIDER}/config-default.sh
KUBE_ROOT=$(dirname "${BASH_SOURCE[0]}")/../..
source "${KUBE_ROOT}/test/kubemark/cloud-provider-config.sh"
source "${KUBE_ROOT}/cluster/${CLOUD_PROVIDER}/util.sh"
source "${KUBE_ROOT}/cluster/kubemark/${CLOUD_PROVIDER}/config-default.sh"

View File

@ -18,9 +18,6 @@
./cluster/gce/upgrade.sh
./cluster/gce/util.sh
./cluster/images/conformance/run_e2e.sh
./cluster/kubemark/gce/config-default.sh
./cluster/kubemark/iks/config-default.sh
./cluster/kubemark/util.sh
./cluster/log-dump/log-dump.sh
./cluster/pre-existing/util.sh
./cluster/restore-from-backup.sh
@ -133,16 +130,6 @@
./test/images/volume/rbd/create_block.sh
./test/images/volume/rbd/mon.sh
./test/images/volume/rbd/osd.sh
./test/kubemark/common/util.sh
./test/kubemark/gce/util.sh
./test/kubemark/iks/shutdown.sh
./test/kubemark/iks/startup.sh
./test/kubemark/iks/util.sh
./test/kubemark/master-log-dump.sh
./test/kubemark/resources/start-kubemark-master.sh
./test/kubemark/run-e2e-tests.sh
./test/kubemark/start-kubemark.sh
./test/kubemark/stop-kubemark.sh
./third_party/forked/shell2junit/sh2ju.sh
./third_party/intemp/intemp.sh
./third_party/multiarch/qemu-user-static/register/qemu-binfmt-conf.sh

View File

@ -17,7 +17,7 @@
# Running cmd $RETRIES times in case of failures.
function run-cmd-with-retries {
RETRIES="${RETRIES:-3}"
for attempt in $(seq 1 ${RETRIES}); do
for attempt in $(seq 1 "${RETRIES}"); do
local ret_val=0
exec 5>&1 # Duplicate &1 to &5 for use below.
# We don't use 'local' to declare result as then ret_val always gets value 0.
@ -26,19 +26,24 @@ function run-cmd-with-retries {
if [[ "${ret_val:-0}" -ne "0" ]]; then
if [[ $(echo "${result}" | grep -c "already exists") -gt 0 ]]; then
if [[ "${attempt}" == 1 ]]; then
# shellcheck disable=SC2154 # Color defined in sourced script
echo -e "${color_red}Failed to $1 $2 ${3:-} as the resource hasn't been deleted from a previous run.${color_norm}" >& 2
exit 1
fi
# shellcheck disable=SC2154 # Color defined in sourced script
echo -e "${color_yellow}Succeeded to $1 $2 ${3:-} in the previous attempt, but status response wasn't received.${color_norm}"
return 0
fi
# shellcheck disable=SC2154 # Color defined in sourced script
echo -e "${color_yellow}Attempt $attempt failed to $1 $2 ${3:-}. Retrying.${color_norm}" >& 2
sleep $(($attempt * 5))
sleep $((attempt * 5))
else
# shellcheck disable=SC2154 # Color defined in sourced script
echo -e "${color_green}Succeeded to $1 $2 ${3:-}.${color_norm}"
return 0
fi
done
# shellcheck disable=SC2154 # Color defined in sourced script
echo -e "${color_red}Failed to $1 $2 ${3:-}.${color_norm}" >& 2
exit 1
}

View File

@ -14,7 +14,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../../..
KUBE_ROOT=$(dirname "${BASH_SOURCE[0]}")/../../..
source "${KUBE_ROOT}/test/kubemark/common/util.sh"
@ -47,7 +47,7 @@ function get-or-create-master-ip {
}
function create-master-instance-with-resources {
GCLOUD_COMMON_ARGS="--project ${PROJECT} --zone ${ZONE}"
GCLOUD_COMMON_ARGS=(--project "${PROJECT}" --zone "${ZONE}")
# Override the master image project to cos-cloud for COS images staring with `cos` string prefix.
DEFAULT_GCI_PROJECT=google-containers
if [[ "${GCI_VERSION}" == "cos"* ]]; then
@ -56,13 +56,13 @@ function create-master-instance-with-resources {
MASTER_IMAGE_PROJECT=${KUBE_GCE_MASTER_PROJECT:-${DEFAULT_GCI_PROJECT}}
run-gcloud-compute-with-retries disks create "${MASTER_NAME}-pd" \
${GCLOUD_COMMON_ARGS} \
"${GCLOUD_COMMON_ARGS[@]}" \
--type "${MASTER_DISK_TYPE}" \
--size "${MASTER_DISK_SIZE}" &
if [ "${EVENT_PD:-}" == "true" ]; then
run-gcloud-compute-with-retries disks create "${MASTER_NAME}-event-pd" \
${GCLOUD_COMMON_ARGS} \
"${GCLOUD_COMMON_ARGS[@]}" \
--type "${MASTER_DISK_TYPE}" \
--size "${MASTER_DISK_SIZE}" &
fi
@ -72,7 +72,7 @@ function create-master-instance-with-resources {
wait
run-gcloud-compute-with-retries instances create "${MASTER_NAME}" \
${GCLOUD_COMMON_ARGS} \
"${GCLOUD_COMMON_ARGS[@]}" \
--address "${MASTER_IP}" \
--machine-type "${MASTER_SIZE}" \
--image-project="${MASTER_IMAGE_PROJECT}" \
@ -84,13 +84,13 @@ function create-master-instance-with-resources {
--disk "name=${MASTER_NAME}-pd,device-name=master-pd,mode=rw,boot=no,auto-delete=no"
run-gcloud-compute-with-retries instances add-metadata "${MASTER_NAME}" \
${GCLOUD_COMMON_ARGS} \
"${GCLOUD_COMMON_ARGS[@]}" \
--metadata-from-file startup-script="${KUBE_ROOT}/test/kubemark/resources/start-kubemark-master.sh" &
if [ "${EVENT_PD:-}" == "true" ]; then
echo "Attaching ${MASTER_NAME}-event-pd to ${MASTER_NAME}"
run-gcloud-compute-with-retries instances attach-disk "${MASTER_NAME}" \
${GCLOUD_COMMON_ARGS} \
"${GCLOUD_COMMON_ARGS[@]}" \
--disk "${MASTER_NAME}-event-pd" \
--device-name="master-event-pd" &
fi
@ -112,20 +112,20 @@ function execute-cmd-on-master-with-retries() {
}
function copy-files() {
run-gcloud-compute-with-retries scp --recurse --zone="${ZONE}" --project="${PROJECT}" $@
run-gcloud-compute-with-retries scp --recurse --zone="${ZONE}" --project="${PROJECT}" "$@"
}
function delete-master-instance-and-resources {
GCLOUD_COMMON_ARGS="--project ${PROJECT} --zone ${ZONE} --quiet"
GCLOUD_COMMON_ARGS=(--project "${PROJECT}" --zone "${ZONE}" --quiet)
gcloud compute instances delete "${MASTER_NAME}" \
${GCLOUD_COMMON_ARGS} || true
"${GCLOUD_COMMON_ARGS[@]}" || true
gcloud compute disks delete "${MASTER_NAME}-pd" \
${GCLOUD_COMMON_ARGS} || true
"${GCLOUD_COMMON_ARGS[@]}" || true
gcloud compute disks delete "${MASTER_NAME}-event-pd" \
${GCLOUD_COMMON_ARGS} &> /dev/null || true
"${GCLOUD_COMMON_ARGS[@]}" &> /dev/null || true
gcloud compute addresses delete "${MASTER_NAME}-ip" \
--project "${PROJECT}" \
@ -138,9 +138,9 @@ function delete-master-instance-and-resources {
if [ "${SEPARATE_EVENT_MACHINE:-false}" == "true" ]; then
gcloud compute instances delete "${EVENT_STORE_NAME}" \
${GCLOUD_COMMON_ARGS} || true
"${GCLOUD_COMMON_ARGS[@]}" || true
gcloud compute disks delete "${EVENT_STORE_NAME}-pd" \
${GCLOUD_COMMON_ARGS} || true
"${GCLOUD_COMMON_ARGS[@]}" || true
fi
}

View File

@ -24,6 +24,7 @@ RESOURCE_DIRECTORY="${KUBEMARK_DIRECTORY}/resources"
complete-login
# Remove resources created for kubemark
# shellcheck disable=SC2154 # Color defined in sourced script
echo -e "${color_yellow}REMOVING RESOURCES${color_norm}"
spawn-config
"${KUBECTL}" delete -f "${RESOURCE_DIRECTORY}/addons" &> /dev/null || true
@ -35,8 +36,9 @@ rm -rf "${RESOURCE_DIRECTORY}/addons"
# Remove clusters, namespaces, and deployments
delete-clusters
if [[ -f "${RESOURCE_DIRECTORY}/iks-namespacelist.sh" ]] ; then
bash ${RESOURCE_DIRECTORY}/iks-namespacelist.sh
rm -f ${RESOURCE_DIRECTORY}/iks-namespacelist.sh
bash "${RESOURCE_DIRECTORY}/iks-namespacelist.sh"
rm -f "${RESOURCE_DIRECTORY}/iks-namespacelist.sh"
fi
# shellcheck disable=SC2154 # Color defined in sourced script
echo -e "${color_blue}EXECUTION COMPLETE${color_norm}"
exit 0

View File

@ -25,7 +25,8 @@ RESOURCE_DIRECTORY="${KUBEMARK_DIRECTORY}/resources"
# templates, and finally create these resources through kubectl.
function create-kube-hollow-node-resources {
# Create kubeconfig for Kubelet.
KUBELET_KUBECONFIG_CONTENTS=$(echo "apiVersion: v1
KUBELET_KUBECONFIG_CONTENTS="$(cat <<EOF
apiVersion: v1
kind: Config
users:
- name: kubelet
@ -42,10 +43,13 @@ contexts:
cluster: kubemark
user: kubelet
name: kubemark-context
current-context: kubemark-context")
current-context: kubemark-context
EOF
)"
# Create kubeconfig for Kubeproxy.
KUBEPROXY_KUBECONFIG_CONTENTS=$(echo "apiVersion: v1
KUBEPROXY_KUBECONFIG_CONTENTS="$(cat <<EOF
apiVersion: v1
kind: Config
users:
- name: kube-proxy
@ -62,10 +66,13 @@ contexts:
cluster: kubemark
user: kube-proxy
name: kubemark-context
current-context: kubemark-context")
current-context: kubemark-context
EOF
)"
# Create kubeconfig for Heapster.
HEAPSTER_KUBECONFIG_CONTENTS=$(echo "apiVersion: v1
HEAPSTER_KUBECONFIG_CONTENTS="$(cat <<EOF
apiVersion: v1
kind: Config
users:
- name: heapster
@ -82,10 +89,13 @@ contexts:
cluster: kubemark
user: heapster
name: kubemark-context
current-context: kubemark-context")
current-context: kubemark-context
EOF
)"
# Create kubeconfig for Cluster Autoscaler.
CLUSTER_AUTOSCALER_KUBECONFIG_CONTENTS=$(echo "apiVersion: v1
CLUSTER_AUTOSCALER_KUBECONFIG_CONTENTS="$(cat <<EOF
apiVersion: v1
kind: Config
users:
- name: cluster-autoscaler
@ -102,10 +112,13 @@ contexts:
cluster: kubemark
user: cluster-autoscaler
name: kubemark-context
current-context: kubemark-context")
current-context: kubemark-context
EOF
)"
# Create kubeconfig for NodeProblemDetector.
NPD_KUBECONFIG_CONTENTS=$(echo "apiVersion: v1
NPD_KUBECONFIG_CONTENTS="$(cat <<EOF
apiVersion: v1
kind: Config
users:
- name: node-problem-detector
@ -122,10 +135,13 @@ contexts:
cluster: kubemark
user: node-problem-detector
name: kubemark-context
current-context: kubemark-context")
current-context: kubemark-context
EOF
)"
# Create kubeconfig for Kube DNS.
KUBE_DNS_KUBECONFIG_CONTENTS=$(echo "apiVersion: v1
KUBE_DNS_KUBECONFIG_CONTENTS="$(cat <<EOF
apiVersion: v1
kind: Config
users:
- name: kube-dns
@ -142,7 +158,9 @@ contexts:
cluster: kubemark
user: kube-dns
name: kubemark-context
current-context: kubemark-context")
current-context: kubemark-context
EOF
)"
# Create kubemark namespace.
spawn-config
@ -173,14 +191,14 @@ current-context: kubemark-context")
mkdir -p "${RESOURCE_DIRECTORY}/addons"
sed "s/{{MASTER_IP}}/${MASTER_IP}/g" "${RESOURCE_DIRECTORY}/heapster_template.json" > "${RESOURCE_DIRECTORY}/addons/heapster.json"
metrics_mem_per_node=4
metrics_mem=$((200 + ${metrics_mem_per_node}*${NUM_NODES}))
metrics_mem=$((200 + metrics_mem_per_node*NUM_NODES))
sed -i'' -e "s/{{METRICS_MEM}}/${metrics_mem}/g" "${RESOURCE_DIRECTORY}/addons/heapster.json"
metrics_cpu_per_node_numerator=${NUM_NODES}
metrics_cpu_per_node_denominator=2
metrics_cpu=$((80 + metrics_cpu_per_node_numerator / metrics_cpu_per_node_denominator))
sed -i'' -e "s/{{METRICS_CPU}}/${metrics_cpu}/g" "${RESOURCE_DIRECTORY}/addons/heapster.json"
eventer_mem_per_node=500
eventer_mem=$((200 * 1024 + ${eventer_mem_per_node}*${NUM_NODES}))
eventer_mem=$((200 * 1024 + eventer_mem_per_node*NUM_NODES))
sed -i'' -e "s/{{EVENTER_MEM}}/${eventer_mem}/g" "${RESOURCE_DIRECTORY}/addons/heapster.json"
# Cluster Autoscaler.
@ -216,7 +234,7 @@ current-context: kubemark-context")
proxy_cpu=50
fi
proxy_mem_per_node=50
proxy_mem=$((100 * 1024 + ${proxy_mem_per_node}*${NUM_NODES}))
proxy_mem=$((100 * 1024 + proxy_mem_per_node*NUM_NODES))
sed -i'' -e "s/{{HOLLOW_PROXY_CPU}}/${proxy_cpu}/g" "${RESOURCE_DIRECTORY}/hollow-node.yaml"
sed -i'' -e "s/{{HOLLOW_PROXY_MEM}}/${proxy_mem}/g" "${RESOURCE_DIRECTORY}/hollow-node.yaml"
sed -i'' -e "s'{{kubemark_image_registry}}'${KUBEMARK_IMAGE_REGISTRY}${KUBE_NAMESPACE}'g" "${RESOURCE_DIRECTORY}/hollow-node.yaml"
@ -236,8 +254,7 @@ function wait-for-hollow-nodes-to-run-or-timeout {
echo -n "Waiting for all hollow-nodes to become Running"
start=$(date +%s)
nodes=$("${KUBECTL}" --kubeconfig="${KUBECONFIG}" get node 2> /dev/null) || true
ready=$(($(echo "${nodes}" | grep -v "NotReady" | wc -l) - 1))
ready=$(($(echo "${nodes}" | grep -vc "NotReady") - 1))
until [[ "${ready}" -ge "${NUM_REPLICAS}" ]]; do
echo -n "."
sleep 1
@ -245,6 +262,7 @@ function wait-for-hollow-nodes-to-run-or-timeout {
# Fail it if it already took more than 30 minutes.
if [ $((now - start)) -gt 1800 ]; then
echo ""
# shellcheck disable=SC2154 # Color defined in sourced script
echo -e "${color_red} Timeout waiting for all hollow-nodes to become Running. ${color_norm}"
# Try listing nodes again - if it fails it means that API server is not responding
if "${KUBECTL}" --kubeconfig="${KUBECONFIG}" get node &> /dev/null; then
@ -254,16 +272,17 @@ function wait-for-hollow-nodes-to-run-or-timeout {
fi
spawn-config
pods=$("${KUBECTL}" get pods -l name=hollow-node --namespace=kubemark) || true
running=$(($(echo "${pods}" | grep "Running" | wc -l)))
running=$(($(echo "${pods}" | grep -c "Running")))
echo "${running} hollow-nodes are reported as 'Running'"
not_running=$(($(echo "${pods}" | grep -v "Running" | wc -l) - 1))
not_running=$(($(echo "${pods}" | grep -vc "Running") - 1))
echo "${not_running} hollow-nodes are reported as NOT 'Running'"
echo $(echo "${pods}" | grep -v "Running")
echo "${pods}" | grep -v "Running"
exit 1
fi
nodes=$("${KUBECTL}" --kubeconfig="${KUBECONFIG}" get node 2> /dev/null) || true
ready=$(($(echo "${nodes}" | grep -v "NotReady" | wc -l) - 1))
ready=$(($(echo "${nodes}" | grep -vc "NotReady") - 1))
done
# shellcheck disable=SC2154 # Color defined in sourced script
echo -e "${color_green} Done!${color_norm}"
}
@ -281,14 +300,17 @@ set-hollow-master
echo "Creating kube hollow node resources"
create-kube-hollow-node-resources
master-config
# shellcheck disable=SC2154 # Color defined in sourced script
echo -e "${color_blue}EXECUTION COMPLETE${color_norm}"
# Check status of Kubemark
# shellcheck disable=SC2154 # Color defined in sourced script
echo -e "${color_yellow}CHECKING STATUS${color_norm}"
wait-for-hollow-nodes-to-run-or-timeout
# Celebrate
echo ""
# shellcheck disable=SC2154 # Color defined in sourced script
echo -e "${color_blue}SUCCESS${color_norm}"
clean-repo
exit 0
exit 0

View File

@ -14,10 +14,11 @@
# See the License for the specific language governing permissions and
# limitations under the License.
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../../..
KUBE_ROOT=$(dirname "${BASH_SOURCE[0]}")/../../..
# Creates a new kube-spawn cluster
function create-clusters {
# shellcheck disable=SC2154 # Color defined in sourced script
echo -e "${color_yellow}CHECKING CLUSTERS${color_norm}"
if bx cs clusters | grep -Fq 'deleting'; then
echo -n "Deleting old clusters"
@ -34,14 +35,15 @@ function create-clusters {
PUBVLAN=$(bx cs vlans wdc06 --json | jq '. | .[] | select(.type == "public") | .id' | sed -e "s/\"//g")
if ! bx cs clusters | grep -Fq 'kubeSpawnTester'; then
echo "Creating spawning cluster"
bx cs cluster-create --location ${CLUSTER_LOCATION} --public-vlan ${PUBVLAN} --private-vlan ${PRIVLAN} --workers 2 --machine-type u2c.2x4 --name kubeSpawnTester
bx cs cluster-create --location "${CLUSTER_LOCATION}" --public-vlan "${PUBVLAN}" --private-vlan "${PRIVLAN}" --workers 2 --machine-type u2c.2x4 --name kubeSpawnTester
fi
if ! bx cs clusters | grep -Fq 'kubeMasterTester'; then
echo "Creating master cluster"
bx cs cluster-create --location ${CLUSTER_LOCATION} --public-vlan ${PUBVLAN} --private-vlan ${PRIVLAN} --workers 2 --machine-type u2c.2x4 --name kubeMasterTester
bx cs cluster-create --location "${CLUSTER_LOCATION}" --public-vlan "${PUBVLAN}" --private-vlan "${PRIVLAN}" --workers 2 --machine-type u2c.2x4 --name kubeMasterTester
fi
push-image
if ! bx cs clusters | grep 'kubeSpawnTester' | grep -Fq 'normal'; then
# shellcheck disable=SC2154 # Color defined in sourced script
echo -e "${color_cyan}Warning: new clusters may take up to 60 minutes to be ready${color_norm}"
echo -n "Clusters loading"
fi
@ -61,17 +63,17 @@ function create-clusters {
# Builds and pushes image to registry
function push-image {
if [[ "${ISBUILD}" = "y" ]]; then
if ! bx cr namespaces | grep -Fq ${KUBE_NAMESPACE}; then
if ! bx cr namespaces | grep -Fq "${KUBE_NAMESPACE}"; then
echo "Creating registry namespace"
bx cr namespace-add ${KUBE_NAMESPACE}
echo "bx cr namespace-rm ${KUBE_NAMESPACE}" >> ${RESOURCE_DIRECTORY}/iks-namespacelist.sh
bx cr namespace-add "${KUBE_NAMESPACE}"
echo "bx cr namespace-rm ${KUBE_NAMESPACE}" >> "${RESOURCE_DIRECTORY}/iks-namespacelist.sh"
fi
docker build -t ${KUBEMARK_INIT_TAG} ${KUBEMARK_IMAGE_LOCATION}
docker tag ${KUBEMARK_INIT_TAG} ${KUBEMARK_IMAGE_REGISTRY}${KUBE_NAMESPACE}/${PROJECT}:${KUBEMARK_IMAGE_TAG}
docker push ${KUBEMARK_IMAGE_REGISTRY}${KUBE_NAMESPACE}/${PROJECT}:${KUBEMARK_IMAGE_TAG}
docker build -t "${KUBEMARK_INIT_TAG}" "${KUBEMARK_IMAGE_LOCATION}"
docker tag "${KUBEMARK_INIT_TAG}" "${KUBEMARK_IMAGE_REGISTRY}${KUBE_NAMESPACE}/${PROJECT}:${KUBEMARK_IMAGE_TAG}"
docker push "${KUBEMARK_IMAGE_REGISTRY}${KUBE_NAMESPACE}/${PROJECT}:${KUBEMARK_IMAGE_TAG}"
echo "Image pushed"
else
KUBEMARK_IMAGE_REGISTRY=$(echo "brandondr96")
KUBEMARK_IMAGE_REGISTRY="brandondr96"
KUBE_NAMESPACE=""
fi
}
@ -79,16 +81,17 @@ function push-image {
# Allow user to use existing clusters if desired
function choose-clusters {
echo -n -e "Do you want to use custom clusters? [y/N]${color_cyan}>${color_norm} "
read USE_EXISTING
read -r USE_EXISTING
if [[ "${USE_EXISTING}" = "y" ]]; then
echo -e "${color_yellow}Enter path for desired hollow-node spawning cluster kubeconfig file:${color_norm}"
read CUSTOM_SPAWN_CONFIG
read -r CUSTOM_SPAWN_CONFIG
echo -e "${color_yellow}Enter path for desired hollow-node hosting cluster kubeconfig file:${color_norm}"
read CUSTOM_MASTER_CONFIG
read -r CUSTOM_MASTER_CONFIG
push-image
elif [[ "${USE_EXISTING}" = "N" ]]; then
create-clusters
else
# shellcheck disable=SC2154 # Color defined in sourced script
echo -e "${color_red}Invalid response, please try again:${color_norm}"
choose-clusters
fi
@ -102,28 +105,35 @@ function set-registry-secrets {
kubectl -n kubemark get serviceaccounts default -o json | jq 'del(.metadata.resourceVersion)' | jq 'setpath(["imagePullSecrets"];[{"name":"bluemix-kubemark-secret-regional"}])' | kubectl -n kubemark replace serviceaccount default -f -
}
# Sets hollow nodes spawned under master
# Sets the hollow-node master
# Exported variables:
# MASTER_IP - IP Address of the Kubemark master
function set-hollow-master {
echo -e "${color_yellow}CONFIGURING MASTER${color_norm}"
master-config
MASTER_IP=$(cat $KUBECONFIG | grep server | awk -F "/" '{print $3}')
MASTER_IP=$(grep server "$KUBECONFIG" | awk -F "/" '{print $3}')
export MASTER_IP
}
# Set up master cluster environment
# Exported variables:
# KUBECONFIG - Overrides default kube config for the purpose of setting up the Kubemark master components.
function master-config {
if [[ "${USE_EXISTING}" = "y" ]]; then
export KUBECONFIG=${CUSTOM_MASTER_CONFIG}
else
$(bx cs cluster-config kubeMasterTester --admin | grep export)
eval "$(bx cs cluster-config kubeMasterTester --admin | grep export)"
fi
}
# Set up spawn cluster environment
# Exported variables:
# KUBECONFIG - Overrides default kube config for the purpose of setting up the hollow-node cluster.
function spawn-config {
if [[ "${USE_EXISTING}" = "y" ]]; then
export KUBECONFIG=${CUSTOM_SPAWN_CONFIG}
else
$(bx cs cluster-config kubeSpawnTester --admin | grep export)
eval "$(bx cs cluster-config kubeSpawnTester --admin | grep export)"
fi
}
@ -147,11 +157,11 @@ function delete-clusters {
function complete-login {
echo -e "${color_yellow}LOGGING INTO CLOUD SERVICES${color_norm}"
echo -n -e "Do you have a federated IBM cloud login? [y/N]${color_cyan}>${color_norm} "
read ISFED
read -r ISFED
if [[ "${ISFED}" = "y" ]]; then
bx login --sso -a ${REGISTRY_LOGIN_URL}
bx login --sso -a "${REGISTRY_LOGIN_URL}"
elif [[ "${ISFED}" = "N" ]]; then
bx login -a ${REGISTRY_LOGIN_URL}
bx login -a "${REGISTRY_LOGIN_URL}"
else
echo -e "${color_red}Invalid response, please try again:${color_norm}"
complete-login
@ -159,28 +169,34 @@ function complete-login {
bx cr login
}
# Generate values to fill the hollow-node configuration
# Generate values to fill the hollow-node configuration templates.
# Exported variables:
# KUBECTL - The name or path to the kubernetes client binary.
# TEST_CLUSTER_API_CONTENT_TYPE - Defines the content-type of the requests used by the Kubemark components.
function generate-values {
echo "Generating values"
master-config
KUBECTL=kubectl
export KUBECTL
KUBEMARK_DIRECTORY="${KUBE_ROOT}/test/kubemark"
RESOURCE_DIRECTORY="${KUBEMARK_DIRECTORY}/resources"
TEST_CLUSTER_API_CONTENT_TYPE="bluemix" #Determine correct usage of this
export TEST_CLUSTER_API_CONTENT_TYPE
CONFIGPATH=${KUBECONFIG%/*}
KUBELET_CERT_BASE64="${KUBELET_CERT_BASE64:-$(cat ${CONFIGPATH}/admin.pem | base64 | tr -d '\r\n')}"
KUBELET_KEY_BASE64="${KUBELET_KEY_BASE64:-$(cat ${CONFIGPATH}/admin-key.pem | base64 | tr -d '\r\n')}"
CA_CERT_BASE64="${CA_CERT_BASE64:-$(cat `find ${CONFIGPATH} -name *ca*` | base64 | tr -d '\r\n')}"
KUBELET_CERT_BASE64="${KUBELET_CERT_BASE64:-$(base64 "${CONFIGPATH}/admin.pem" | tr -d '\r\n')}"
KUBELET_KEY_BASE64="${KUBELET_KEY_BASE64:-$(base64 "${CONFIGPATH}/admin-key.pem" | tr -d '\r\n')}"
CA_CERT_BASE64="${CA_CERT_BASE64:-$( base64 "$(find "${CONFIGPATH}" -name "*ca*" | head -n 1)" | tr -d '\r\n')}"
}
# Build image for kubemark
function build-kubemark-image {
echo -n -e "Do you want to build the kubemark image? [y/N]${color_cyan}>${color_norm} "
read ISBUILD
read -r ISBUILD
if [[ "${ISBUILD}" = "y" ]]; then
echo -e "${color_yellow}BUILDING IMAGE${color_norm}"
${KUBE_ROOT}/build/run.sh make kubemark
cp ${KUBE_ROOT}/_output/dockerized/bin/linux/amd64/kubemark ${KUBEMARK_IMAGE_LOCATION}
"${KUBE_ROOT}/build/run.sh" make kubemark
cp "${KUBE_ROOT}/_output/dockerized/bin/linux/amd64/kubemark" "${KUBEMARK_IMAGE_LOCATION}"
elif [[ "${ISBUILD}" = "N" ]]; then
echo -n ""
else
@ -192,11 +208,11 @@ function build-kubemark-image {
# Clean up repository
function clean-repo {
echo -n -e "Do you want to remove build output and binary? [y/N]${color_cyan}>${color_norm} "
read ISCLEAN
read -r ISCLEAN
if [[ "${ISCLEAN}" = "y" ]]; then
echo -e "${color_yellow}CLEANING REPO${color_norm}"
rm -rf ${KUBE_ROOT}/_output
rm -f ${KUBEMARK_IMAGE_LOCATION}/kubemark
rm -rf "${KUBE_ROOT}/_output"
rm -f "${KUBEMARK_IMAGE_LOCATION}/kubemark"
elif [[ "${ISCLEAN}" = "N" ]]; then
echo -n ""
else

View File

@ -15,12 +15,12 @@
# limitations under the License.
REPORT_DIR="${1:-_artifacts}"
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../..
KUBE_ROOT=$(dirname "${BASH_SOURCE[0]}")/../..
source ${KUBE_ROOT}/test/kubemark/cloud-provider-config.sh
source ${KUBE_ROOT}/cluster/kubemark/${CLOUD_PROVIDER}/config-default.sh
source "${KUBE_ROOT}/test/kubemark/cloud-provider-config.sh"
source "${KUBE_ROOT}/cluster/kubemark/${CLOUD_PROVIDER}/config-default.sh"
export KUBEMARK_MASTER_NAME="${MASTER_NAME}"
echo "Dumping logs for kubemark master: ${KUBEMARK_MASTER_NAME}"
DUMP_ONLY_MASTER_LOGS=true ${KUBE_ROOT}/cluster/log-dump/log-dump.sh "${REPORT_DIR}"
DUMP_ONLY_MASTER_LOGS=true "${KUBE_ROOT}/cluster/log-dump/log-dump.sh" "${REPORT_DIR}"

View File

@ -107,7 +107,7 @@ function find-attached-pd() {
if [[ ! -e /dev/disk/by-id/${pd_name} ]]; then
echo ""
fi
device_info=$(ls -l /dev/disk/by-id/${pd_name})
device_info=$(ls -l "/dev/disk/by-id/${pd_name}")
relative_path=${device_info##* }
echo "/dev/disk/by-id/${relative_path}"
}
@ -288,9 +288,9 @@ function start-kubelet {
#
# $1 is the file to create.
function prepare-log-file {
touch $1
chmod 644 $1
chown root:root $1
touch "$1"
chmod 644 "$1"
chown root:root "$1"
}
# A helper function for copying addon manifests and set dir/files
@ -301,10 +301,13 @@ function prepare-log-file {
function setup-addon-manifests {
local -r src_dir="${KUBE_ROOT}/$2"
local -r dst_dir="/etc/kubernetes/$1/$2"
if [[ ! -d "${dst_dir}" ]]; then
mkdir -p "${dst_dir}"
fi
local files=$(find "${src_dir}" -maxdepth 1 -name "*.yaml")
local files
files=$(find "${src_dir}" -maxdepth 1 -name "*.yaml")
if [[ -n "${files}" ]]; then
cp "${src_dir}/"*.yaml "${dst_dir}"
fi
@ -508,7 +511,7 @@ function compute-kube-apiserver-params {
params+=" --token-auth-file=/etc/srv/kubernetes/known_tokens.csv"
params+=" --secure-port=443"
params+=" --basic-auth-file=/etc/srv/kubernetes/basic_auth.csv"
params+=" --target-ram-mb=$((${NUM_NODES} * 60))"
params+=" --target-ram-mb=$((NUM_NODES * 60))"
params+=" --service-cluster-ip-range=${SERVICE_CLUSTER_IP_RANGE}"
params+=" --admission-control=${CUSTOM_ADMISSION_PLUGINS}"
params+=" --authorization-mode=Node,RBAC"
@ -598,7 +601,7 @@ function start-kubemaster-component() {
local -r component=$1
prepare-log-file /var/log/"${component}".log
local -r src_file="${KUBE_ROOT}/${component}.yaml"
local -r params=$(compute-${component}-params)
local -r params=$("compute-${component}-params")
# Evaluate variables.
sed -i -e "s@{{params}}@${params}@g" "${src_file}"
@ -609,18 +612,18 @@ function start-kubemaster-component() {
elif [ "${component}" == "kube-addon-manager" ]; then
setup-addon-manifests "addons" "kubemark-rbac-bindings"
else
local -r component_docker_tag=$(cat ${KUBE_BINDIR}/${component}.docker_tag)
local -r component_docker_tag=$(cat "${KUBE_BINDIR}/${component}.docker_tag")
sed -i -e "s@{{${component}_docker_tag}}@${component_docker_tag}@g" "${src_file}"
if [ "${component}" == "kube-apiserver" ]; then
local audit_policy_config_mount=""
local audit_policy_config_volume=""
if [[ "${ENABLE_APISERVER_ADVANCED_AUDIT:-}" == "true" ]]; then
read -d '' audit_policy_config_mount << EOF
read -r -d '' audit_policy_config_mount << EOF
- name: auditpolicyconfigmount
mountPath: ${audit_policy_file}
readOnly: true
EOF
read -d '' audit_policy_config_volume << EOF
read -r -d '' audit_policy_config_volume << EOF
- name: auditpolicyconfigmount
hostPath:
path: ${audit_policy_file}
@ -669,7 +672,8 @@ fi
main_etcd_mount_point="/mnt/disks/master-pd"
mount-pd "google-master-pd" "${main_etcd_mount_point}"
# Contains all the data stored in etcd.
mkdir -m 700 -p "${main_etcd_mount_point}/var/etcd"
mkdir -p "${main_etcd_mount_point}/var/etcd"
chmod 700 "${main_etcd_mount_point}/var/etcd"
ln -s -f "${main_etcd_mount_point}/var/etcd" /var/etcd
mkdir -p /etc/srv
# Setup the dynamically generated apiserver auth certs and keys to pd.
@ -692,7 +696,8 @@ fi
event_etcd_mount_point="/mnt/disks/master-event-pd"
mount-pd "google-master-event-pd" "${event_etcd_mount_point}"
# Contains all the data stored in event etcd.
mkdir -m 700 -p "${event_etcd_mount_point}/var/etcd/events"
mkdir -p "${event_etcd_mount_point}/var/etcd/events"
chmod 700 "${event_etcd_mount_point}/var/etcd/events"
ln -s -f "${event_etcd_mount_point}/var/etcd/events" /var/etcd/events
fi
}

View File

@ -17,10 +17,10 @@
export KUBERNETES_PROVIDER="kubemark"
export KUBE_CONFIG_FILE="config-default.sh"
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../..
KUBE_ROOT=$(dirname "${BASH_SOURCE[0]}")/../..
# We need an absolute path to KUBE_ROOT
ABSOLUTE_ROOT=$(readlink -f ${KUBE_ROOT})
ABSOLUTE_ROOT=$(readlink -f "${KUBE_ROOT}")
source "${KUBE_ROOT}/cluster/kubemark/util.sh"
@ -32,21 +32,23 @@ export KUBE_MASTER_URL="https://${KUBE_MASTER_IP}"
export KUBECONFIG="${ABSOLUTE_ROOT}/test/kubemark/resources/kubeconfig.kubemark"
export E2E_MIN_STARTUP_PODS=0
if [[ -z "$@" ]]; then
ARGS='--ginkgo.focus=[Feature:Performance]'
if [[ -z "$*" ]]; then
ARGS=('--ginkgo.focus=[Feature:Performance]')
else
ARGS=$@
ARGS=("$@")
fi
if [[ "${ENABLE_KUBEMARK_CLUSTER_AUTOSCALER}" == "true" ]]; then
ARGS="${ARGS} --kubemark-external-kubeconfig=${DEFAULT_KUBECONFIG}"
ARGS+=("--kubemark-external-kubeconfig=${DEFAULT_KUBECONFIG}")
fi
if [[ -f /.dockerenv ]]; then
# Running inside a dockerized runner.
go run ./hack/e2e.go -- --check-version-skew=false --test --test_args="--e2e-verify-service-account=false --dump-logs-on-failure=false ${ARGS}"
go run ./hack/e2e.go -- --check-version-skew=false --test --test_args="--e2e-verify-service-account=false --dump-logs-on-failure=false ${ARGS[*]}"
else
# Running locally.
ARGS=$(echo $ARGS | sed 's/\[/\\\[/g' | sed 's/\]/\\\]/g')
${KUBE_ROOT}/hack/ginkgo-e2e.sh "--e2e-verify-service-account=false" "--dump-logs-on-failure=false" $ARGS
for ((i=0; i < ${ARGS[@]}; i++)); do
ARGS[$i]="$(echo "ARGS[$i]" | sed -e 's/\[/\\\[/g' -e 's/\]/\\\]/g' )"
done
"${KUBE_ROOT}/hack/ginkgo-e2e.sh" "--e2e-verify-service-account=false" "--dump-logs-on-failure=false" "${ARGS[@]}"
fi

View File

@ -74,3 +74,20 @@ function copy-files() {
function delete-master-instance-and-resources {
echo "Deleting master instance and its allocated resources" 1>&2
}
# Common colors used throughout the kubemark scripts
if [[ -z "${color_start-}" ]]; then
declare -r color_start="\033["
# shellcheck disable=SC2034
declare -r color_red="${color_start}0;31m"
# shellcheck disable=SC2034
declare -r color_yellow="${color_start}0;33m"
# shellcheck disable=SC2034
declare -r color_green="${color_start}0;32m"
# shellcheck disable=SC2034
declare -r color_blue="${color_start}1;34m"
# shellcheck disable=SC2034
declare -r color_cyan="${color_start}1;36m"
# shellcheck disable=SC2034
declare -r color_norm="${color_start}0m"
fi

View File

@ -20,7 +20,7 @@ set -o errexit
set -o nounset
set -o pipefail
TMP_ROOT="$(dirname "${BASH_SOURCE}")/../.."
TMP_ROOT="$(dirname "${BASH_SOURCE[@]}")/../.."
KUBE_ROOT=$(readlink -e "${TMP_ROOT}" 2> /dev/null || perl -MCwd -e 'print Cwd::abs_path shift' "${TMP_ROOT}")
source "${KUBE_ROOT}/test/kubemark/skeleton/util.sh"
@ -373,14 +373,14 @@ current-context: kubemark-context"
mkdir -p "${RESOURCE_DIRECTORY}/addons"
sed "s/{{MASTER_IP}}/${MASTER_IP}/g" "${RESOURCE_DIRECTORY}/heapster_template.json" > "${RESOURCE_DIRECTORY}/addons/heapster.json"
metrics_mem_per_node=4
metrics_mem=$((200 + ${metrics_mem_per_node}*${NUM_NODES}))
metrics_mem=$((200 + metrics_mem_per_node*NUM_NODES))
sed -i'' -e "s/{{METRICS_MEM}}/${metrics_mem}/g" "${RESOURCE_DIRECTORY}/addons/heapster.json"
metrics_cpu_per_node_numerator=${NUM_NODES}
metrics_cpu_per_node_denominator=2
metrics_cpu=$((80 + metrics_cpu_per_node_numerator / metrics_cpu_per_node_denominator))
sed -i'' -e "s/{{METRICS_CPU}}/${metrics_cpu}/g" "${RESOURCE_DIRECTORY}/addons/heapster.json"
eventer_mem_per_node=500
eventer_mem=$((200 * 1024 + ${eventer_mem_per_node}*${NUM_NODES}))
eventer_mem=$((200 * 1024 + eventer_mem_per_node*NUM_NODES))
sed -i'' -e "s/{{EVENTER_MEM}}/${eventer_mem}/g" "${RESOURCE_DIRECTORY}/addons/heapster.json"
# Cluster Autoscaler.
@ -415,7 +415,7 @@ current-context: kubemark-context"
proxy_cpu=50
fi
proxy_mem_per_node=50
proxy_mem=$((100 * 1024 + ${proxy_mem_per_node}*${NUM_NODES}))
proxy_mem=$((100 * 1024 + proxy_mem_per_node*NUM_NODES))
sed -i'' -e "s/{{HOLLOW_PROXY_CPU}}/${proxy_cpu}/g" "${RESOURCE_DIRECTORY}/hollow-node.yaml"
sed -i'' -e "s/{{HOLLOW_PROXY_MEM}}/${proxy_mem}/g" "${RESOURCE_DIRECTORY}/hollow-node.yaml"
sed -i'' -e "s'{{kubemark_image_registry}}'${KUBEMARK_IMAGE_REGISTRY}'g" "${RESOURCE_DIRECTORY}/hollow-node.yaml"
@ -434,7 +434,7 @@ function wait-for-hollow-nodes-to-run-or-timeout {
echo -n "Waiting for all hollow-nodes to become Running"
start=$(date +%s)
nodes=$("${KUBECTL}" --kubeconfig="${LOCAL_KUBECONFIG}" get node 2> /dev/null) || true
ready=$(($(echo "${nodes}" | grep -v "NotReady" | wc -l) - 1))
ready=$(($(echo "${nodes}" | grep -vc "NotReady") - 1))
until [[ "${ready}" -ge "${NUM_REPLICAS}" ]]; do
echo -n "."
@ -443,6 +443,7 @@ function wait-for-hollow-nodes-to-run-or-timeout {
# Fail it if it already took more than 30 minutes.
if [ $((now - start)) -gt 1800 ]; then
echo ""
# shellcheck disable=SC2154 # Color defined in sourced script
echo -e "${color_red} Timeout waiting for all hollow-nodes to become Running. ${color_norm}"
# Try listing nodes again - if it fails it means that API server is not responding
if "${KUBECTL}" --kubeconfig="${LOCAL_KUBECONFIG}" get node &> /dev/null; then
@ -451,16 +452,17 @@ function wait-for-hollow-nodes-to-run-or-timeout {
echo "Got error while trying to list hollow-nodes. Probably API server is down."
fi
pods=$("${KUBECTL}" get pods -l name=hollow-node --namespace=kubemark) || true
running=$(($(echo "${pods}" | grep "Running" | wc -l)))
running=$(($(echo "${pods}" | grep -c "Running")))
echo "${running} hollow-nodes are reported as 'Running'"
not_running=$(($(echo "${pods}" | grep -v "Running" | wc -l) - 1))
not_running=$(($(echo "${pods}" | grep -vc "Running") - 1))
echo "${not_running} hollow-nodes are reported as NOT 'Running'"
echo "${pods}" | grep -v Running
exit 1
fi
nodes=$("${KUBECTL}" --kubeconfig="${LOCAL_KUBECONFIG}" get node 2> /dev/null) || true
ready=$(($(echo "${nodes}" | grep -v "NotReady" | wc -l) - 1))
ready=$(($(echo "${nodes}" | grep -vc "NotReady") - 1))
done
# shellcheck disable=SC2154 # Color defined in sourced script
echo -e "${color_green} Done!${color_norm}"
}
@ -475,6 +477,7 @@ write-local-kubeconfig
# Setup for master.
function start-master {
# shellcheck disable=SC2154 # Color defined in sourced script
echo -e "${color_yellow}STARTING SETUP FOR MASTER${color_norm}"
create-master-environment-file
create-master-instance-with-resources

View File

@ -16,7 +16,7 @@
# Script that destroys Kubemark cluster and deletes all master resources.
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../..
KUBE_ROOT=$(dirname "${BASH_SOURCE[0]}")/../..
source "${KUBE_ROOT}/test/kubemark/skeleton/util.sh"
source "${KUBE_ROOT}/test/kubemark/cloud-provider-config.sh"