From 99b2184993050cdbb99089a494e066e2b8b06828 Mon Sep 17 00:00:00 2001 From: Xiangyang Chu Date: Mon, 17 Jun 2019 10:04:27 +0800 Subject: [PATCH] Fix shellcheck faulures in test/cmd Including files after test/cmd/n* --- hack/.shellcheck_failures | 7 ------- test/cmd/node-management.sh | 4 ++-- test/cmd/old-print.sh | 6 +++--- test/cmd/proxy.sh | 4 ++-- test/cmd/rbac.sh | 4 ++-- test/cmd/request-timeout.sh | 4 ++-- test/cmd/run.sh | 4 ++-- test/cmd/save-config.sh | 22 +++++++++++----------- 8 files changed, 24 insertions(+), 31 deletions(-) diff --git a/hack/.shellcheck_failures b/hack/.shellcheck_failures index 791c036a21b..8e3cbaa1ddd 100644 --- a/hack/.shellcheck_failures +++ b/hack/.shellcheck_failures @@ -37,13 +37,6 @@ ./test/cmd/generic-resources.sh ./test/cmd/get.sh ./test/cmd/legacy-script.sh -./test/cmd/node-management.sh -./test/cmd/old-print.sh -./test/cmd/proxy.sh -./test/cmd/rbac.sh -./test/cmd/request-timeout.sh -./test/cmd/run.sh -./test/cmd/save-config.sh ./test/e2e_node/conformance/run_test.sh ./test/e2e_node/gubernator.sh ./test/images/image-util.sh diff --git a/test/cmd/node-management.sh b/test/cmd/node-management.sh index 9b42b940b6d..8b9635c3cb7 100755 --- a/test/cmd/node-management.sh +++ b/test/cmd/node-management.sh @@ -24,10 +24,10 @@ run_cluster_management_tests() { kube::log::status "Testing cluster-management commands" - kube::test::get_object_assert nodes "{{range.items}}{{$id_field}}:{{end}}" '127.0.0.1:' + kube::test::get_object_assert nodes "{{range.items}}{{${id_field:?}}}:{{end}}" '127.0.0.1:' # create test pods we can work with - kubectl create -f - "${kube_flags[@]}" << __EOF__ + kubectl create -f - "${kube_flags[@]:?}" << __EOF__ { "kind": "Pod", "apiVersion": "v1", diff --git a/test/cmd/old-print.sh b/test/cmd/old-print.sh index f881e50e85d..6bdcbe9db91 100755 --- a/test/cmd/old-print.sh +++ b/test/cmd/old-print.sh @@ -26,14 +26,14 @@ run_kubectl_old_print_tests() { kube::log::status "Testing kubectl get --server-print=false" ### Test retrieval of all types in discovery # Pre-condition: no resources exist - output_message=$(kubectl get pods --server-print=false 2>&1 "${kube_flags[@]}") + output_message=$(kubectl get pods --server-print=false 2>&1 "${kube_flags[@]:?}") # Post-condition: Expect text indicating no resources were found kube::test::if_has_string "${output_message}" 'No resources found.' ### Test retrieval of pods against server-side printing kubectl create -f test/fixtures/doc-yaml/admin/limitrange/valid-pod.yaml "${kube_flags[@]}" # Post-condition: valid-pod POD is created - kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:' + kube::test::get_object_assert pods "{{range.items}}{{${id_field:?}}}:{{end}}" 'valid-pod:' # Compare "old" output with experimental output and ensure both are the same # remove the last column, as it contains the object's AGE, which could cause a mismatch. expected_output=$(kubectl get pod "${kube_flags[@]}" | awk 'NF{NF--};1') @@ -97,7 +97,7 @@ run_kubectl_old_print_tests() { kube::test::if_has_string "${actual_output}" "${expected_output}" ### Test retrieval of crds against server-side printing - kubectl "${kube_flags_with_token[@]}" create -f - << __EOF__ + kubectl "${kube_flags_with_token[@]:?}" create -f - << __EOF__ { "kind": "CustomResourceDefinition", "apiVersion": "apiextensions.k8s.io/v1beta1", diff --git a/test/cmd/proxy.sh b/test/cmd/proxy.sh index b2e504967f7..29eee0b1da1 100755 --- a/test/cmd/proxy.sh +++ b/test/cmd/proxy.sh @@ -27,10 +27,10 @@ run_kubectl_local_proxy_tests() { start-proxy check-curl-proxy-code /api/kubernetes 404 check-curl-proxy-code /api/v1/namespaces 200 - if kube::test::if_supports_resource "${metrics}" ; then + if kube::test::if_supports_resource "${metrics:?}" ; then check-curl-proxy-code /metrics 200 fi - if kube::test::if_supports_resource "${static}" ; then + if kube::test::if_supports_resource "${static:?}" ; then check-curl-proxy-code /static/ 200 fi stop-proxy diff --git a/test/cmd/rbac.sh b/test/cmd/rbac.sh index 7df349909e2..52fb55e3e93 100755 --- a/test/cmd/rbac.sh +++ b/test/cmd/rbac.sh @@ -30,7 +30,7 @@ run_clusterroles_tests() { kube::test::get_object_assert clusterrolebindings/cluster-admin "{{.metadata.name}}" 'cluster-admin' # test `kubectl create clusterrole` - kubectl create "${kube_flags[@]}" clusterrole pod-admin --verb=* --resource=pods + kubectl create "${kube_flags[@]:?}" clusterrole pod-admin --verb=* --resource=pods kube::test::get_object_assert clusterrole/pod-admin "{{range.rules}}{{range.verbs}}{{.}}:{{end}}{{end}}" '\*:' output_message=$(kubectl delete clusterrole pod-admin -n test 2>&1 "${kube_flags[@]}") kube::test::if_has_string "${output_message}" 'warning: deleting cluster-scoped resources' @@ -53,7 +53,7 @@ run_clusterroles_tests() { kube::test::get_object_assert clusterrole/url-reader "{{range.rules}}{{range.verbs}}{{.}}:{{end}}{{end}}" 'get:' kube::test::get_object_assert clusterrole/url-reader "{{range.rules}}{{range.nonResourceURLs}}{{.}}:{{end}}{{end}}" '/logs/\*:/healthz/\*:' kubectl create "${kube_flags[@]}" clusterrole aggregation-reader --aggregation-rule="foo1=foo2" - kube::test::get_object_assert clusterrole/aggregation-reader "{{$id_field}}" 'aggregation-reader' + kube::test::get_object_assert clusterrole/aggregation-reader "{{${id_field:?}}}" 'aggregation-reader' # test `kubectl create clusterrolebinding` # test `kubectl set subject clusterrolebinding` diff --git a/test/cmd/request-timeout.sh b/test/cmd/request-timeout.sh index 9712d367c1c..de5838e7511 100755 --- a/test/cmd/request-timeout.sh +++ b/test/cmd/request-timeout.sh @@ -26,9 +26,9 @@ run_kubectl_request_timeout_tests() { ### Test global request timeout option # Pre-condition: no POD exists create_and_use_new_namespace - kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" '' + kube::test::get_object_assert pods "{{range.items}}{{${id_field:?}}}:{{end}}" '' # Command - kubectl create "${kube_flags[@]}" -f test/fixtures/doc-yaml/admin/limitrange/valid-pod.yaml + kubectl create "${kube_flags[@]:?}" -f test/fixtures/doc-yaml/admin/limitrange/valid-pod.yaml # Post-condition: valid-pod POD is created kubectl get "${kube_flags[@]}" pods -o json kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:' diff --git a/test/cmd/run.sh b/test/cmd/run.sh index ae877af323c..f4402815c89 100755 --- a/test/cmd/run.sh +++ b/test/cmd/run.sh @@ -26,9 +26,9 @@ run_kubectl_run_tests() { kube::log::status "Testing kubectl run" ## kubectl run should create deployments, jobs or cronjob # Pre-Condition: no Job exists - kube::test::get_object_assert jobs "{{range.items}}{{$id_field}}:{{end}}" '' + kube::test::get_object_assert jobs "{{range.items}}{{${id_field:?}}}:{{end}}" '' # Command - kubectl run pi --generator=job/v1 "--image=$IMAGE_PERL" --restart=OnFailure -- perl -Mbignum=bpi -wle 'print bpi(20)' "${kube_flags[@]}" + kubectl run pi --generator=job/v1 "--image=$IMAGE_PERL" --restart=OnFailure -- perl -Mbignum=bpi -wle 'print bpi(20)' "${kube_flags[@]:?}" # Post-Condition: Job "pi" is created kube::test::get_object_assert jobs "{{range.items}}{{$id_field}}:{{end}}" 'pi:' # Describe command (resource only) should print detailed information diff --git a/test/cmd/save-config.sh b/test/cmd/save-config.sh index aac07d9cf13..25e524a6082 100755 --- a/test/cmd/save-config.sh +++ b/test/cmd/save-config.sh @@ -28,11 +28,11 @@ run_save_config_tests() { ## 1. kubectl create --save-config should generate configuration annotation # Pre-Condition: no POD exists create_and_use_new_namespace - kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" '' + kube::test::get_object_assert pods "{{range.items}}{{${id_field:?}}}:{{end}}" '' # Command: create a pod "test-pod" - kubectl create -f hack/testdata/pod.yaml --save-config "${kube_flags[@]}" + kubectl create -f hack/testdata/pod.yaml --save-config "${kube_flags[@]:?}" # Post-Condition: pod "test-pod" has configuration annotation - [[ "$(kubectl get pods test-pod -o yaml "${kube_flags[@]}" | grep kubectl.kubernetes.io/last-applied-configuration)" ]] + grep -q "kubectl.kubernetes.io/last-applied-configuration" <<< "$(kubectl get pods test-pod -o yaml "${kube_flags[@]}")" # Clean up kubectl delete -f hack/testdata/pod.yaml "${kube_flags[@]}" ## 2. kubectl edit --save-config should generate configuration annotation @@ -40,14 +40,14 @@ run_save_config_tests() { create_and_use_new_namespace kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" '' kubectl create -f hack/testdata/pod.yaml "${kube_flags[@]}" - ! [[ "$(kubectl get pods test-pod -o yaml "${kube_flags[@]}" | grep kubectl.kubernetes.io/last-applied-configuration)" ]] + ! grep -q "kubectl.kubernetes.io/last-applied-configuration" <<< "$(kubectl get pods test-pod -o yaml "${kube_flags[@]}")" # Command: edit the pod "test-pod" temp_editor="${KUBE_TEMP}/tmp-editor.sh" echo -e "#!/usr/bin/env bash\n${SED} -i \"s/test-pod-label/test-pod-label-edited/g\" \$@" > "${temp_editor}" chmod +x "${temp_editor}" EDITOR=${temp_editor} kubectl edit pod test-pod --save-config "${kube_flags[@]}" # Post-Condition: pod "test-pod" has configuration annotation - [[ "$(kubectl get pods test-pod -o yaml "${kube_flags[@]}" | grep kubectl.kubernetes.io/last-applied-configuration)" ]] + grep -q "kubectl.kubernetes.io/last-applied-configuration" <<< "$(kubectl get pods test-pod -o yaml "${kube_flags[@]}")" # Clean up kubectl delete -f hack/testdata/pod.yaml "${kube_flags[@]}" ## 3. kubectl replace --save-config should generate configuration annotation @@ -55,11 +55,11 @@ run_save_config_tests() { create_and_use_new_namespace kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" '' kubectl create -f hack/testdata/pod.yaml "${kube_flags[@]}" - ! [[ "$(kubectl get pods test-pod -o yaml "${kube_flags[@]}" | grep kubectl.kubernetes.io/last-applied-configuration)" ]] + ! grep -q "kubectl.kubernetes.io/last-applied-configuration" <<< "$(kubectl get pods test-pod -o yaml "${kube_flags[@]}")" # Command: replace the pod "test-pod" kubectl replace -f hack/testdata/pod.yaml --save-config "${kube_flags[@]}" # Post-Condition: pod "test-pod" has configuration annotation - [[ "$(kubectl get pods test-pod -o yaml "${kube_flags[@]}" | grep kubectl.kubernetes.io/last-applied-configuration)" ]] + grep -q "kubectl.kubernetes.io/last-applied-configuration" <<< "$(kubectl get pods test-pod -o yaml "${kube_flags[@]}")" # Clean up kubectl delete -f hack/testdata/pod.yaml "${kube_flags[@]}" ## 4. kubectl run --save-config should generate configuration annotation @@ -68,25 +68,25 @@ run_save_config_tests() { # Command: create the rc "nginx" with image nginx kubectl run nginx "--image=$IMAGE_NGINX" --save-config --generator=run/v1 "${kube_flags[@]}" # Post-Condition: rc "nginx" has configuration annotation - [[ "$(kubectl get rc nginx -o yaml "${kube_flags[@]}" | grep kubectl.kubernetes.io/last-applied-configuration)" ]] + grep -q "kubectl.kubernetes.io/last-applied-configuration" <<< "$(kubectl get rc nginx -o yaml "${kube_flags[@]}")" ## 5. kubectl expose --save-config should generate configuration annotation # Pre-Condition: no service exists kube::test::get_object_assert svc "{{range.items}}{{$id_field}}:{{end}}" '' # Command: expose the rc "nginx" kubectl expose rc nginx --save-config --port=80 --target-port=8000 "${kube_flags[@]}" # Post-Condition: service "nginx" has configuration annotation - [[ "$(kubectl get svc nginx -o yaml "${kube_flags[@]}" | grep kubectl.kubernetes.io/last-applied-configuration)" ]] + grep -q "kubectl.kubernetes.io/last-applied-configuration" <<< "$(kubectl get svc nginx -o yaml "${kube_flags[@]}")" # Clean up kubectl delete rc,svc nginx ## 6. kubectl autoscale --save-config should generate configuration annotation # Pre-Condition: no RC exists, then create the rc "frontend", which shouldn't have configuration annotation kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" '' kubectl create -f hack/testdata/frontend-controller.yaml "${kube_flags[@]}" - ! [[ "$(kubectl get rc frontend -o yaml "${kube_flags[@]}" | grep kubectl.kubernetes.io/last-applied-configuration)" ]] + ! grep -q "kubectl.kubernetes.io/last-applied-configuration" <<< "$(kubectl get rc frontend -o yaml "${kube_flags[@]}")" # Command: autoscale rc "frontend" kubectl autoscale -f hack/testdata/frontend-controller.yaml --save-config "${kube_flags[@]}" --max=2 # Post-Condition: hpa "frontend" has configuration annotation - [[ "$(kubectl get hpa frontend -o yaml "${kube_flags[@]}" | grep kubectl.kubernetes.io/last-applied-configuration)" ]] + grep -q "kubectl.kubernetes.io/last-applied-configuration" <<< "$(kubectl get hpa frontend -o yaml "${kube_flags[@]}")" # Ensure we can interact with HPA objects in lists through autoscaling/v1 APIs output_message=$(kubectl get hpa -o=jsonpath='{.items[0].apiVersion}' 2>&1 "${kube_flags[@]}") kube::test::if_has_string "${output_message}" 'autoscaling/v1'