Fix shellcheck failures in test/cmd/l.*.sh

This commit is contained in:
Xiangyang Chu 2019-06-19 11:09:15 +08:00
parent 3a344c7168
commit 313044abd7
2 changed files with 65 additions and 66 deletions

View File

@ -18,5 +18,4 @@
./hack/lib/test.sh
./hack/test-integration.sh
./hack/verify-test-featuregates.sh
./test/cmd/legacy-script.sh
./test/images/image-util.sh

View File

@ -23,7 +23,7 @@ set -o pipefail
# Set locale to ensure english responses from kubectl commands
export LANG=C
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../..
KUBE_ROOT=$(dirname "${BASH_SOURCE[0]}")/../..
# Expects the following has already been done by whatever sources this script
# source "${KUBE_ROOT}/hack/lib/init.sh"
# source "${KUBE_ROOT}/hack/lib/test.sh"
@ -66,14 +66,14 @@ CTLRMGR_PORT=${CTLRMGR_PORT:-10252}
PROXY_HOST=127.0.0.1 # kubectl only serves on localhost.
IMAGE_NGINX="k8s.gcr.io/nginx:1.7.9"
IMAGE_DEPLOYMENT_R1="k8s.gcr.io/nginx:test-cmd" # deployment-revision1.yaml
IMAGE_DEPLOYMENT_R2="$IMAGE_NGINX" # deployment-revision2.yaml
IMAGE_PERL="k8s.gcr.io/perl"
IMAGE_PAUSE_V2="k8s.gcr.io/pause:2.0"
IMAGE_DAEMONSET_R2="k8s.gcr.io/pause:latest"
IMAGE_DAEMONSET_R2_2="k8s.gcr.io/nginx:test-cmd" # rollingupdate-daemonset-rv2.yaml
IMAGE_STATEFULSET_R1="k8s.gcr.io/nginx-slim:0.7"
IMAGE_STATEFULSET_R2="k8s.gcr.io/nginx-slim:0.8"
export IMAGE_DEPLOYMENT_R1="k8s.gcr.io/nginx:test-cmd" # deployment-revision1.yaml
export IMAGE_DEPLOYMENT_R2="$IMAGE_NGINX" # deployment-revision2.yaml
export IMAGE_PERL="k8s.gcr.io/perl"
export IMAGE_PAUSE_V2="k8s.gcr.io/pause:2.0"
export IMAGE_DAEMONSET_R2="k8s.gcr.io/pause:latest"
export IMAGE_DAEMONSET_R2_2="k8s.gcr.io/nginx:test-cmd" # rollingupdate-daemonset-rv2.yaml
export IMAGE_STATEFULSET_R1="k8s.gcr.io/nginx-slim:0.7"
export IMAGE_STATEFULSET_R2="k8s.gcr.io/nginx-slim:0.8"
# Expose kubectl directly for readability
PATH="${KUBE_OUTPUT_HOSTBIN}":$PATH
@ -83,8 +83,8 @@ clusterroles="clusterroles"
configmaps="configmaps"
csr="csr"
deployments="deployments"
horizontalpodautoscalers="horizontalpodautoscalers"
metrics="metrics"
export horizontalpodautoscalers="horizontalpodautoscalers"
export metrics="metrics"
namespaces="namespaces"
nodes="nodes"
persistentvolumeclaims="persistentvolumeclaims"
@ -98,10 +98,10 @@ secrets="secrets"
serviceaccounts="serviceaccounts"
services="services"
statefulsets="statefulsets"
static="static"
export static="static"
storageclass="storageclass"
subjectaccessreviews="subjectaccessreviews"
selfsubjectaccessreviews="selfsubjectaccessreviews"
export selfsubjectaccessreviews="selfsubjectaccessreviews"
customresourcedefinitions="customresourcedefinitions"
daemonsets="daemonsets"
controllerrevisions="controllerrevisions"
@ -131,9 +131,9 @@ function record_command() {
local name="$1"
local output="${KUBE_JUNIT_REPORT_DIR:-/tmp/junit-results}"
echo "Recording: ${name}"
echo "Running command: $@"
juLog -output="${output}" -class="test-cmd" -name="${name}" "$@"
if [[ $? -ne 0 ]]; then
echo "Running command: $*"
if ! juLog -output="${output}" -class="test-cmd" -name="${name}" "$@"
then
echo "Error when running ${name}"
foundError="${foundError}""${name}"", "
fi
@ -147,7 +147,7 @@ function stop-proxy()
{
[[ -n "${PROXY_PORT-}" ]] && kube::log::status "Stopping proxy on port ${PROXY_PORT}"
[[ -n "${PROXY_PID-}" ]] && kill "${PROXY_PID}" 1>&2 2>/dev/null
[[ -n "${PROXY_PORT_FILE-}" ]] && rm -f ${PROXY_PORT_FILE}
[[ -n "${PROXY_PORT_FILE-}" ]] && rm -f "${PROXY_PORT_FILE}"
PROXY_PID=
PROXY_PORT=
PROXY_PORT_FILE=
@ -163,22 +163,22 @@ function start-proxy()
if [ $# -eq 0 ]; then
kubectl proxy --port=0 --www=. 1>${PROXY_PORT_FILE} 2>&1 &
kubectl proxy --port=0 --www=. 1>"${PROXY_PORT_FILE}" 2>&1 &
else
kubectl proxy --port=0 --www=. --api-prefix="$1" 1>${PROXY_PORT_FILE} 2>&1 &
kubectl proxy --port=0 --www=. --api-prefix="$1" 1>"${PROXY_PORT_FILE}" 2>&1 &
fi
PROXY_PID=$!
PROXY_PORT=
local attempts=0
while [[ -z ${PROXY_PORT} ]]; do
if (( ${attempts} > 9 )); then
if (( attempts > 9 )); then
kill "${PROXY_PID}"
kube::log::error_exit "Couldn't start proxy. Failed to read port after ${attempts} tries. Got: $(cat ${PROXY_PORT_FILE})"
kube::log::error_exit "Couldn't start proxy. Failed to read port after ${attempts} tries. Got: $(cat "${PROXY_PORT_FILE}")"
fi
sleep .5
kube::log::status "Attempt ${attempts} to read ${PROXY_PORT_FILE}..."
PROXY_PORT=$(sed 's/.*Starting to serve on 127.0.0.1:\([0-9]*\)$/\1/'< ${PROXY_PORT_FILE})
PROXY_PORT=$(sed 's/.*Starting to serve on 127.0.0.1:\([0-9]*\)$/\1/'< "${PROXY_PORT_FILE}")
attempts=$((attempts+1))
done
@ -204,7 +204,7 @@ function cleanup()
rm -rf "${KUBE_TEMP}"
local junit_dir="${KUBE_JUNIT_REPORT_DIR:-/tmp/junit-results}"
echo "junit report dir:" ${junit_dir}
echo "junit report dir:" "${junit_dir}"
kube::log::status "Clean up complete"
}
@ -231,9 +231,9 @@ function kubectl-with-retry()
ERROR_FILE="${KUBE_TEMP}/kubectl-error"
preserve_err_file=${PRESERVE_ERR_FILE:-false}
for count in {0..3}; do
kubectl "$@" 2> ${ERROR_FILE} || true
kubectl "$@" 2> "${ERROR_FILE}" || true
if grep -q "the object has been modified" "${ERROR_FILE}"; then
kube::log::status "retry $1, error: $(cat ${ERROR_FILE})"
kube::log::status "retry $1, error: $(cat "${ERROR_FILE}")"
rm "${ERROR_FILE}"
sleep $((2**count))
else
@ -255,12 +255,12 @@ function wait-for-pods-with-label()
{
local i
for i in $(seq 1 10); do
kubeout=`kubectl get po -l $1 --output=go-template --template='{{range.items}}{{.metadata.name}}{{end}}' --sort-by metadata.name "${kube_flags[@]}"`
if [[ $kubeout = $2 ]]; then
kubeout=$(kubectl get po -l "$1" --output=go-template --template='{{range.items}}{{.metadata.name}}{{end}}' --sort-by metadata.name "${kube_flags[@]}")
if [[ $kubeout = "$2" ]]; then
return
fi
echo Waiting for pods: $2, found $kubeout
sleep $i
echo Waiting for pods: "$2", found "$kubeout"
sleep "$i"
done
kube::log::error_exit "Timeout waiting for pods with label $1"
}
@ -324,7 +324,7 @@ runTests() {
# token defined in hack/testdata/auth-tokens.csv
kube_flags_with_token=(
-s "https://127.0.0.1:${SECURE_API_PORT}" --token=admin-token --insecure-skip-tls-verify=true
-s "https://127.0.0.1:${SECURE_API_PORT}" "--token=admin-token" "--insecure-skip-tls-verify=true"
)
if [[ -z "${ALLOW_SKEW:-}" ]]; then
@ -336,42 +336,42 @@ runTests() {
fi
id_field=".metadata.name"
labels_field=".metadata.labels"
annotations_field=".metadata.annotations"
service_selector_field=".spec.selector"
rc_replicas_field=".spec.replicas"
rc_status_replicas_field=".status.replicas"
rc_container_image_field=".spec.template.spec.containers"
rs_replicas_field=".spec.replicas"
port_field="(index .spec.ports 0).port"
port_name="(index .spec.ports 0).name"
second_port_field="(index .spec.ports 1).port"
second_port_name="(index .spec.ports 1).name"
image_field="(index .spec.containers 0).image"
pod_container_name_field="(index .spec.containers 0).name"
container_name_field="(index .spec.template.spec.containers 0).name"
hpa_min_field=".spec.minReplicas"
hpa_max_field=".spec.maxReplicas"
hpa_cpu_field=".spec.targetCPUUtilizationPercentage"
template_labels=".spec.template.metadata.labels.name"
statefulset_replicas_field=".spec.replicas"
statefulset_observed_generation=".status.observedGeneration"
job_parallelism_field=".spec.parallelism"
deployment_replicas=".spec.replicas"
secret_data=".data"
secret_type=".type"
change_cause_annotation='.*kubernetes.io/change-cause.*'
pdb_min_available=".spec.minAvailable"
pdb_max_unavailable=".spec.maxUnavailable"
generation_field=".metadata.generation"
container_len="(len .spec.template.spec.containers)"
image_field0="(index .spec.template.spec.containers 0).image"
image_field1="(index .spec.template.spec.containers 1).image"
export labels_field=".metadata.labels"
export annotations_field=".metadata.annotations"
export service_selector_field=".spec.selector"
export rc_replicas_field=".spec.replicas"
export rc_status_replicas_field=".status.replicas"
export rc_container_image_field=".spec.template.spec.containers"
export rs_replicas_field=".spec.replicas"
export port_field="(index .spec.ports 0).port"
export port_name="(index .spec.ports 0).name"
export second_port_field="(index .spec.ports 1).port"
export second_port_name="(index .spec.ports 1).name"
export image_field="(index .spec.containers 0).image"
export pod_container_name_field="(index .spec.containers 0).name"
export container_name_field="(index .spec.template.spec.containers 0).name"
export hpa_min_field=".spec.minReplicas"
export hpa_max_field=".spec.maxReplicas"
export hpa_cpu_field=".spec.targetCPUUtilizationPercentage"
export template_labels=".spec.template.metadata.labels.name"
export statefulset_replicas_field=".spec.replicas"
export statefulset_observed_generation=".status.observedGeneration"
export job_parallelism_field=".spec.parallelism"
export deployment_replicas=".spec.replicas"
export secret_data=".data"
export secret_type=".type"
export change_cause_annotation='.*kubernetes.io/change-cause.*'
export pdb_min_available=".spec.minAvailable"
export pdb_max_unavailable=".spec.maxUnavailable"
export generation_field=".metadata.generation"
export container_len="(len .spec.template.spec.containers)"
export image_field0="(index .spec.template.spec.containers 0).image"
export image_field1="(index .spec.template.spec.containers 1).image"
# Make sure "default" namespace exists.
if kube::test::if_supports_resource "${namespaces}" ; then
output_message=$(kubectl get "${kube_flags[@]}" namespaces)
if [[ ! $(echo "${output_message}" | grep "default") ]]; then
if ! grep -q "default" <<< "${output_message}"; then
# Create default namespace
kubectl create "${kube_flags[@]}" ns default
fi
@ -395,10 +395,10 @@ runTests() {
if [[ -n "${WHAT-}" ]]; then
for pkg in ${WHAT}
do
do
# running of kubeadm is captured in hack/make-targets/test-cmd.sh
if [[ "${pkg}" != "kubeadm" ]]; then
record_command run_${pkg}_tests
if [[ "${pkg}" != "kubeadm" ]]; then
record_command run_"${pkg}"_tests
fi
done
cleanup_tests