Merge pull request #79195 from xychu/fix-shellchecks-d

Fix shellcheck failures in test/cmd/{d,l}.*.sh
This commit is contained in:
Kubernetes Prow Robot 2019-08-01 05:46:14 -07:00 committed by GitHub
commit a1727472b2
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
6 changed files with 79 additions and 83 deletions

View File

@ -18,7 +18,4 @@
./hack/lib/test.sh ./hack/lib/test.sh
./hack/test-integration.sh ./hack/test-integration.sh
./hack/verify-test-featuregates.sh ./hack/verify-test-featuregates.sh
./test/cmd/diff.sh
./test/cmd/discovery.sh
./test/cmd/legacy-script.sh
./test/images/image-util.sh ./test/images/image-util.sh

View File

@ -640,7 +640,7 @@ run_rs_tests() {
# Post-condition: no replica set exists # Post-condition: no replica set exists
kube::test::get_object_assert rs "{{range.items}}{{${id_field:?}}}:{{end}}" '' kube::test::get_object_assert rs "{{range.items}}{{${id_field:?}}}:{{end}}" ''
if kube::test::if_supports_resource "${horizontalpodautoscalers:?}" ; then if kube::test::if_supports_resource "horizontalpodautoscalers" ; then
### Auto scale replica set ### Auto scale replica set
# Pre-condition: no replica set exists # Pre-condition: no replica set exists
kube::test::get_object_assert rs "{{range.items}}{{${id_field:?}}}:{{end}}" '' kube::test::get_object_assert rs "{{range.items}}{{${id_field:?}}}:{{end}}" ''

View File

@ -48,7 +48,7 @@ run_kubectl_diff_same_names() {
create_and_use_new_namespace create_and_use_new_namespace
kube::log::status "Test kubectl diff with multiple resources with the same name" kube::log::status "Test kubectl diff with multiple resources with the same name"
output_message=$(KUBECTL_EXTERNAL_DIFF=find kubectl diff -Rf hack/testdata/diff/) output_message=$(KUBECTL_EXTERNAL_DIFF="find" kubectl diff -Rf hack/testdata/diff/)
kube::test::if_has_string "${output_message}" 'v1\.Pod\..*\.test' kube::test::if_has_string "${output_message}" 'v1\.Pod\..*\.test'
kube::test::if_has_string "${output_message}" 'apps\.v1\.Deployment\..*\.test' kube::test::if_has_string "${output_message}" 'apps\.v1\.Deployment\..*\.test'
kube::test::if_has_string "${output_message}" 'v1\.ConfigMap\..*\.test' kube::test::if_has_string "${output_message}" 'v1\.ConfigMap\..*\.test'

View File

@ -30,11 +30,11 @@ run_RESTMapper_evaluation_tests() {
### Non-existent resource type should give a recognizeable error ### Non-existent resource type should give a recognizeable error
# Pre-condition: None # Pre-condition: None
# Command # Command
kubectl get "${kube_flags[@]}" unknownresourcetype 2>${RESTMAPPER_ERROR_FILE} || true kubectl get "${kube_flags[@]:?}" unknownresourcetype 2>"${RESTMAPPER_ERROR_FILE}" || true
if grep -q "the server doesn't have a resource type" "${RESTMAPPER_ERROR_FILE}"; then if grep -q "the server doesn't have a resource type" "${RESTMAPPER_ERROR_FILE}"; then
kube::log::status "\"kubectl get unknownresourcetype\" returns error as expected: $(cat ${RESTMAPPER_ERROR_FILE})" kube::log::status "\"kubectl get unknownresourcetype\" returns error as expected: $(cat "${RESTMAPPER_ERROR_FILE}")"
else else
kube::log::status "\"kubectl get unknownresourcetype\" returns unexpected error or non-error: $(cat ${RESTMAPPER_ERROR_FILE})" kube::log::status "\"kubectl get unknownresourcetype\" returns unexpected error or non-error: $(cat "${RESTMAPPER_ERROR_FILE}")"
exit 1 exit 1
fi fi
rm "${RESTMAPPER_ERROR_FILE}" rm "${RESTMAPPER_ERROR_FILE}"
@ -121,9 +121,9 @@ run_swagger_tests() {
# Verify schema # Verify schema
file="${KUBE_TEMP}/schema.json" file="${KUBE_TEMP}/schema.json"
curl -s "http://127.0.0.1:${API_PORT}/openapi/v2" > "${file}" curl -s "http://127.0.0.1:${API_PORT}/openapi/v2" > "${file}"
[[ "$(grep "list of returned" "${file}")" ]] grep -q "list of returned" "${file}"
[[ "$(grep "List of services" "${file}")" ]] grep -q "List of services" "${file}"
[[ "$(grep "Watch for changes to the described resources" "${file}")" ]] grep -q "Watch for changes to the described resources" "${file}"
set +o nounset set +o nounset
set +o errexit set +o errexit

View File

@ -23,7 +23,7 @@ set -o pipefail
# Set locale to ensure english responses from kubectl commands # Set locale to ensure english responses from kubectl commands
export LANG=C export LANG=C
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../.. KUBE_ROOT=$(dirname "${BASH_SOURCE[0]}")/../..
# Expects the following has already been done by whatever sources this script # Expects the following has already been done by whatever sources this script
# source "${KUBE_ROOT}/hack/lib/init.sh" # source "${KUBE_ROOT}/hack/lib/init.sh"
# source "${KUBE_ROOT}/hack/lib/test.sh" # source "${KUBE_ROOT}/hack/lib/test.sh"
@ -66,14 +66,14 @@ CTLRMGR_PORT=${CTLRMGR_PORT:-10252}
PROXY_HOST=127.0.0.1 # kubectl only serves on localhost. PROXY_HOST=127.0.0.1 # kubectl only serves on localhost.
IMAGE_NGINX="k8s.gcr.io/nginx:1.7.9" IMAGE_NGINX="k8s.gcr.io/nginx:1.7.9"
IMAGE_DEPLOYMENT_R1="k8s.gcr.io/nginx:test-cmd" # deployment-revision1.yaml export IMAGE_DEPLOYMENT_R1="k8s.gcr.io/nginx:test-cmd" # deployment-revision1.yaml
IMAGE_DEPLOYMENT_R2="$IMAGE_NGINX" # deployment-revision2.yaml export IMAGE_DEPLOYMENT_R2="$IMAGE_NGINX" # deployment-revision2.yaml
IMAGE_PERL="k8s.gcr.io/perl" export IMAGE_PERL="k8s.gcr.io/perl"
IMAGE_PAUSE_V2="k8s.gcr.io/pause:2.0" export IMAGE_PAUSE_V2="k8s.gcr.io/pause:2.0"
IMAGE_DAEMONSET_R2="k8s.gcr.io/pause:latest" export IMAGE_DAEMONSET_R2="k8s.gcr.io/pause:latest"
IMAGE_DAEMONSET_R2_2="k8s.gcr.io/nginx:test-cmd" # rollingupdate-daemonset-rv2.yaml export IMAGE_DAEMONSET_R2_2="k8s.gcr.io/nginx:test-cmd" # rollingupdate-daemonset-rv2.yaml
IMAGE_STATEFULSET_R1="k8s.gcr.io/nginx-slim:0.7" export IMAGE_STATEFULSET_R1="k8s.gcr.io/nginx-slim:0.7"
IMAGE_STATEFULSET_R2="k8s.gcr.io/nginx-slim:0.8" export IMAGE_STATEFULSET_R2="k8s.gcr.io/nginx-slim:0.8"
# Expose kubectl directly for readability # Expose kubectl directly for readability
PATH="${KUBE_OUTPUT_HOSTBIN}":$PATH PATH="${KUBE_OUTPUT_HOSTBIN}":$PATH
@ -83,8 +83,6 @@ clusterroles="clusterroles"
configmaps="configmaps" configmaps="configmaps"
csr="csr" csr="csr"
deployments="deployments" deployments="deployments"
horizontalpodautoscalers="horizontalpodautoscalers"
metrics="metrics"
namespaces="namespaces" namespaces="namespaces"
nodes="nodes" nodes="nodes"
persistentvolumeclaims="persistentvolumeclaims" persistentvolumeclaims="persistentvolumeclaims"
@ -98,10 +96,8 @@ secrets="secrets"
serviceaccounts="serviceaccounts" serviceaccounts="serviceaccounts"
services="services" services="services"
statefulsets="statefulsets" statefulsets="statefulsets"
static="static"
storageclass="storageclass" storageclass="storageclass"
subjectaccessreviews="subjectaccessreviews" subjectaccessreviews="subjectaccessreviews"
selfsubjectaccessreviews="selfsubjectaccessreviews"
customresourcedefinitions="customresourcedefinitions" customresourcedefinitions="customresourcedefinitions"
daemonsets="daemonsets" daemonsets="daemonsets"
controllerrevisions="controllerrevisions" controllerrevisions="controllerrevisions"
@ -131,9 +127,9 @@ function record_command() {
local name="$1" local name="$1"
local output="${KUBE_JUNIT_REPORT_DIR:-/tmp/junit-results}" local output="${KUBE_JUNIT_REPORT_DIR:-/tmp/junit-results}"
echo "Recording: ${name}" echo "Recording: ${name}"
echo "Running command: $@" echo "Running command: $*"
juLog -output="${output}" -class="test-cmd" -name="${name}" "$@" if ! juLog -output="${output}" -class="test-cmd" -name="${name}" "$@"
if [[ $? -ne 0 ]]; then then
echo "Error when running ${name}" echo "Error when running ${name}"
foundError="${foundError}""${name}"", " foundError="${foundError}""${name}"", "
fi fi
@ -147,7 +143,7 @@ function stop-proxy()
{ {
[[ -n "${PROXY_PORT-}" ]] && kube::log::status "Stopping proxy on port ${PROXY_PORT}" [[ -n "${PROXY_PORT-}" ]] && kube::log::status "Stopping proxy on port ${PROXY_PORT}"
[[ -n "${PROXY_PID-}" ]] && kill "${PROXY_PID}" 1>&2 2>/dev/null [[ -n "${PROXY_PID-}" ]] && kill "${PROXY_PID}" 1>&2 2>/dev/null
[[ -n "${PROXY_PORT_FILE-}" ]] && rm -f ${PROXY_PORT_FILE} [[ -n "${PROXY_PORT_FILE-}" ]] && rm -f "${PROXY_PORT_FILE}"
PROXY_PID= PROXY_PID=
PROXY_PORT= PROXY_PORT=
PROXY_PORT_FILE= PROXY_PORT_FILE=
@ -163,22 +159,22 @@ function start-proxy()
if [ $# -eq 0 ]; then if [ $# -eq 0 ]; then
kubectl proxy --port=0 --www=. 1>${PROXY_PORT_FILE} 2>&1 & kubectl proxy --port=0 --www=. 1>"${PROXY_PORT_FILE}" 2>&1 &
else else
kubectl proxy --port=0 --www=. --api-prefix="$1" 1>${PROXY_PORT_FILE} 2>&1 & kubectl proxy --port=0 --www=. --api-prefix="$1" 1>"${PROXY_PORT_FILE}" 2>&1 &
fi fi
PROXY_PID=$! PROXY_PID=$!
PROXY_PORT= PROXY_PORT=
local attempts=0 local attempts=0
while [[ -z ${PROXY_PORT} ]]; do while [[ -z ${PROXY_PORT} ]]; do
if (( ${attempts} > 9 )); then if (( attempts > 9 )); then
kill "${PROXY_PID}" kill "${PROXY_PID}"
kube::log::error_exit "Couldn't start proxy. Failed to read port after ${attempts} tries. Got: $(cat ${PROXY_PORT_FILE})" kube::log::error_exit "Couldn't start proxy. Failed to read port after ${attempts} tries. Got: $(cat "${PROXY_PORT_FILE}")"
fi fi
sleep .5 sleep .5
kube::log::status "Attempt ${attempts} to read ${PROXY_PORT_FILE}..." kube::log::status "Attempt ${attempts} to read ${PROXY_PORT_FILE}..."
PROXY_PORT=$(sed 's/.*Starting to serve on 127.0.0.1:\([0-9]*\)$/\1/'< ${PROXY_PORT_FILE}) PROXY_PORT=$(sed 's/.*Starting to serve on 127.0.0.1:\([0-9]*\)$/\1/'< "${PROXY_PORT_FILE}")
attempts=$((attempts+1)) attempts=$((attempts+1))
done done
@ -204,7 +200,7 @@ function cleanup()
rm -rf "${KUBE_TEMP}" rm -rf "${KUBE_TEMP}"
local junit_dir="${KUBE_JUNIT_REPORT_DIR:-/tmp/junit-results}" local junit_dir="${KUBE_JUNIT_REPORT_DIR:-/tmp/junit-results}"
echo "junit report dir:" ${junit_dir} echo "junit report dir:" "${junit_dir}"
kube::log::status "Clean up complete" kube::log::status "Clean up complete"
} }
@ -231,9 +227,9 @@ function kubectl-with-retry()
ERROR_FILE="${KUBE_TEMP}/kubectl-error" ERROR_FILE="${KUBE_TEMP}/kubectl-error"
preserve_err_file=${PRESERVE_ERR_FILE:-false} preserve_err_file=${PRESERVE_ERR_FILE:-false}
for count in {0..3}; do for count in {0..3}; do
kubectl "$@" 2> ${ERROR_FILE} || true kubectl "$@" 2> "${ERROR_FILE}" || true
if grep -q "the object has been modified" "${ERROR_FILE}"; then if grep -q "the object has been modified" "${ERROR_FILE}"; then
kube::log::status "retry $1, error: $(cat ${ERROR_FILE})" kube::log::status "retry $1, error: $(cat "${ERROR_FILE}")"
rm "${ERROR_FILE}" rm "${ERROR_FILE}"
sleep $((2**count)) sleep $((2**count))
else else
@ -255,12 +251,12 @@ function wait-for-pods-with-label()
{ {
local i local i
for i in $(seq 1 10); do for i in $(seq 1 10); do
kubeout=`kubectl get po -l $1 --output=go-template --template='{{range.items}}{{.metadata.name}}{{end}}' --sort-by metadata.name "${kube_flags[@]}"` kubeout=$(kubectl get po -l "$1" --output=go-template --template='{{range.items}}{{.metadata.name}}{{end}}' --sort-by metadata.name "${kube_flags[@]}")
if [[ $kubeout = $2 ]]; then if [[ $kubeout = "$2" ]]; then
return return
fi fi
echo Waiting for pods: $2, found $kubeout echo Waiting for pods: "$2", found "$kubeout"
sleep $i sleep "$i"
done done
kube::log::error_exit "Timeout waiting for pods with label $1" kube::log::error_exit "Timeout waiting for pods with label $1"
} }
@ -319,59 +315,62 @@ runTests() {
} }
kube_flags=( kube_flags=(
-s "http://127.0.0.1:${API_PORT}" '-s' "http://127.0.0.1:${API_PORT}"
) )
# token defined in hack/testdata/auth-tokens.csv # token defined in hack/testdata/auth-tokens.csv
kube_flags_with_token=( kube_flags_with_token=(
-s "https://127.0.0.1:${SECURE_API_PORT}" --token=admin-token --insecure-skip-tls-verify=true '-s' "https://127.0.0.1:${SECURE_API_PORT}" '--token=admin-token' '--insecure-skip-tls-verify=true'
) )
if [[ -z "${ALLOW_SKEW:-}" ]]; then if [[ -z "${ALLOW_SKEW:-}" ]]; then
kube_flags+=("--match-server-version") kube_flags+=('--match-server-version')
kube_flags_with_token+=("--match-server-version") kube_flags_with_token+=('--match-server-version')
fi fi
if kube::test::if_supports_resource "${nodes}" ; then if kube::test::if_supports_resource "${nodes}" ; then
[ "$(kubectl get nodes -o go-template='{{ .apiVersion }}' "${kube_flags[@]}")" == "v1" ] [ "$(kubectl get nodes -o go-template='{{ .apiVersion }}' "${kube_flags[@]}")" == "v1" ]
fi fi
id_field=".metadata.name" # Define helper variables for fields to prevent typos.
labels_field=".metadata.labels" # They will be used in some other files under test/cmd,
annotations_field=".metadata.annotations" # Let's export them as https://github.com/koalaman/shellcheck/wiki/SC2034 suggested.
service_selector_field=".spec.selector" export id_field=".metadata.name"
rc_replicas_field=".spec.replicas" export labels_field=".metadata.labels"
rc_status_replicas_field=".status.replicas" export annotations_field=".metadata.annotations"
rc_container_image_field=".spec.template.spec.containers" export service_selector_field=".spec.selector"
rs_replicas_field=".spec.replicas" export rc_replicas_field=".spec.replicas"
port_field="(index .spec.ports 0).port" export rc_status_replicas_field=".status.replicas"
port_name="(index .spec.ports 0).name" export rc_container_image_field=".spec.template.spec.containers"
second_port_field="(index .spec.ports 1).port" export rs_replicas_field=".spec.replicas"
second_port_name="(index .spec.ports 1).name" export port_field="(index .spec.ports 0).port"
image_field="(index .spec.containers 0).image" export port_name="(index .spec.ports 0).name"
pod_container_name_field="(index .spec.containers 0).name" export second_port_field="(index .spec.ports 1).port"
container_name_field="(index .spec.template.spec.containers 0).name" export second_port_name="(index .spec.ports 1).name"
hpa_min_field=".spec.minReplicas" export image_field="(index .spec.containers 0).image"
hpa_max_field=".spec.maxReplicas" export pod_container_name_field="(index .spec.containers 0).name"
hpa_cpu_field=".spec.targetCPUUtilizationPercentage" export container_name_field="(index .spec.template.spec.containers 0).name"
template_labels=".spec.template.metadata.labels.name" export hpa_min_field=".spec.minReplicas"
statefulset_replicas_field=".spec.replicas" export hpa_max_field=".spec.maxReplicas"
statefulset_observed_generation=".status.observedGeneration" export hpa_cpu_field=".spec.targetCPUUtilizationPercentage"
job_parallelism_field=".spec.parallelism" export template_labels=".spec.template.metadata.labels.name"
deployment_replicas=".spec.replicas" export statefulset_replicas_field=".spec.replicas"
secret_data=".data" export statefulset_observed_generation=".status.observedGeneration"
secret_type=".type" export job_parallelism_field=".spec.parallelism"
change_cause_annotation='.*kubernetes.io/change-cause.*' export deployment_replicas=".spec.replicas"
pdb_min_available=".spec.minAvailable" export secret_data=".data"
pdb_max_unavailable=".spec.maxUnavailable" export secret_type=".type"
generation_field=".metadata.generation" export change_cause_annotation='.*kubernetes.io/change-cause.*'
container_len="(len .spec.template.spec.containers)" export pdb_min_available=".spec.minAvailable"
image_field0="(index .spec.template.spec.containers 0).image" export pdb_max_unavailable=".spec.maxUnavailable"
image_field1="(index .spec.template.spec.containers 1).image" export generation_field=".metadata.generation"
export container_len="(len .spec.template.spec.containers)"
export image_field0="(index .spec.template.spec.containers 0).image"
export image_field1="(index .spec.template.spec.containers 1).image"
# Make sure "default" namespace exists. # Make sure "default" namespace exists.
if kube::test::if_supports_resource "${namespaces}" ; then if kube::test::if_supports_resource "${namespaces}" ; then
output_message=$(kubectl get "${kube_flags[@]}" namespaces) output_message=$(kubectl get "${kube_flags[@]}" namespaces)
if [[ ! $(echo "${output_message}" | grep "default") ]]; then if ! grep -q "default" <<< "${output_message}"; then
# Create default namespace # Create default namespace
kubectl create "${kube_flags[@]}" ns default kubectl create "${kube_flags[@]}" ns default
fi fi
@ -395,10 +394,10 @@ runTests() {
if [[ -n "${WHAT-}" ]]; then if [[ -n "${WHAT-}" ]]; then
for pkg in ${WHAT} for pkg in ${WHAT}
do do
# running of kubeadm is captured in hack/make-targets/test-cmd.sh # running of kubeadm is captured in hack/make-targets/test-cmd.sh
if [[ "${pkg}" != "kubeadm" ]]; then if [[ "${pkg}" != "kubeadm" ]]; then
record_command run_${pkg}_tests record_command "run_${pkg}_tests"
fi fi
done done
cleanup_tests cleanup_tests

View File

@ -27,10 +27,10 @@ run_kubectl_local_proxy_tests() {
start-proxy start-proxy
check-curl-proxy-code /api/kubernetes 404 check-curl-proxy-code /api/kubernetes 404
check-curl-proxy-code /api/v1/namespaces 200 check-curl-proxy-code /api/v1/namespaces 200
if kube::test::if_supports_resource "${metrics:?}" ; then if kube::test::if_supports_resource "metrics" ; then
check-curl-proxy-code /metrics 200 check-curl-proxy-code /metrics 200
fi fi
if kube::test::if_supports_resource "${static:?}" ; then if kube::test::if_supports_resource "static" ; then
check-curl-proxy-code /static/ 200 check-curl-proxy-code /static/ 200
fi fi
stop-proxy stop-proxy
@ -45,7 +45,7 @@ run_kubectl_local_proxy_tests() {
start-proxy /custom start-proxy /custom
check-curl-proxy-code /custom/api/kubernetes 404 check-curl-proxy-code /custom/api/kubernetes 404
check-curl-proxy-code /custom/api/v1/namespaces 200 check-curl-proxy-code /custom/api/v1/namespaces 200
if kube::test::if_supports_resource "${metrics}" ; then if kube::test::if_supports_resource "metrics" ; then
check-curl-proxy-code /custom/metrics 200 check-curl-proxy-code /custom/metrics 200
fi fi
check-curl-proxy-code /custom/api/v1/namespaces 200 check-curl-proxy-code /custom/api/v1/namespaces 200