mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-22 19:31:44 +00:00
Merge pull request #79097 from xychu/fix-shellchecks-n
Fix shellcheck failures in test/cmd
This commit is contained in:
commit
1545b28b58
@ -37,13 +37,6 @@
|
||||
./test/cmd/generic-resources.sh
|
||||
./test/cmd/get.sh
|
||||
./test/cmd/legacy-script.sh
|
||||
./test/cmd/node-management.sh
|
||||
./test/cmd/old-print.sh
|
||||
./test/cmd/proxy.sh
|
||||
./test/cmd/rbac.sh
|
||||
./test/cmd/request-timeout.sh
|
||||
./test/cmd/run.sh
|
||||
./test/cmd/save-config.sh
|
||||
./test/e2e_node/conformance/run_test.sh
|
||||
./test/e2e_node/gubernator.sh
|
||||
./test/images/image-util.sh
|
||||
|
@ -24,10 +24,10 @@ run_cluster_management_tests() {
|
||||
|
||||
kube::log::status "Testing cluster-management commands"
|
||||
|
||||
kube::test::get_object_assert nodes "{{range.items}}{{$id_field}}:{{end}}" '127.0.0.1:'
|
||||
kube::test::get_object_assert nodes "{{range.items}}{{${id_field:?}}}:{{end}}" '127.0.0.1:'
|
||||
|
||||
# create test pods we can work with
|
||||
kubectl create -f - "${kube_flags[@]}" << __EOF__
|
||||
kubectl create -f - "${kube_flags[@]:?}" << __EOF__
|
||||
{
|
||||
"kind": "Pod",
|
||||
"apiVersion": "v1",
|
||||
|
@ -26,14 +26,14 @@ run_kubectl_old_print_tests() {
|
||||
kube::log::status "Testing kubectl get --server-print=false"
|
||||
### Test retrieval of all types in discovery
|
||||
# Pre-condition: no resources exist
|
||||
output_message=$(kubectl get pods --server-print=false 2>&1 "${kube_flags[@]}")
|
||||
output_message=$(kubectl get pods --server-print=false 2>&1 "${kube_flags[@]:?}")
|
||||
# Post-condition: Expect text indicating no resources were found
|
||||
kube::test::if_has_string "${output_message}" 'No resources found.'
|
||||
|
||||
### Test retrieval of pods against server-side printing
|
||||
kubectl create -f test/fixtures/doc-yaml/admin/limitrange/valid-pod.yaml "${kube_flags[@]}"
|
||||
# Post-condition: valid-pod POD is created
|
||||
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
|
||||
kube::test::get_object_assert pods "{{range.items}}{{${id_field:?}}}:{{end}}" 'valid-pod:'
|
||||
# Compare "old" output with experimental output and ensure both are the same
|
||||
# remove the last column, as it contains the object's AGE, which could cause a mismatch.
|
||||
expected_output=$(kubectl get pod "${kube_flags[@]}" | awk 'NF{NF--};1')
|
||||
@ -97,7 +97,7 @@ run_kubectl_old_print_tests() {
|
||||
kube::test::if_has_string "${actual_output}" "${expected_output}"
|
||||
|
||||
### Test retrieval of crds against server-side printing
|
||||
kubectl "${kube_flags_with_token[@]}" create -f - << __EOF__
|
||||
kubectl "${kube_flags_with_token[@]:?}" create -f - << __EOF__
|
||||
{
|
||||
"kind": "CustomResourceDefinition",
|
||||
"apiVersion": "apiextensions.k8s.io/v1beta1",
|
||||
|
@ -27,10 +27,10 @@ run_kubectl_local_proxy_tests() {
|
||||
start-proxy
|
||||
check-curl-proxy-code /api/kubernetes 404
|
||||
check-curl-proxy-code /api/v1/namespaces 200
|
||||
if kube::test::if_supports_resource "${metrics}" ; then
|
||||
if kube::test::if_supports_resource "${metrics:?}" ; then
|
||||
check-curl-proxy-code /metrics 200
|
||||
fi
|
||||
if kube::test::if_supports_resource "${static}" ; then
|
||||
if kube::test::if_supports_resource "${static:?}" ; then
|
||||
check-curl-proxy-code /static/ 200
|
||||
fi
|
||||
stop-proxy
|
||||
|
@ -30,7 +30,7 @@ run_clusterroles_tests() {
|
||||
kube::test::get_object_assert clusterrolebindings/cluster-admin "{{.metadata.name}}" 'cluster-admin'
|
||||
|
||||
# test `kubectl create clusterrole`
|
||||
kubectl create "${kube_flags[@]}" clusterrole pod-admin --verb=* --resource=pods
|
||||
kubectl create "${kube_flags[@]:?}" clusterrole pod-admin --verb=* --resource=pods
|
||||
kube::test::get_object_assert clusterrole/pod-admin "{{range.rules}}{{range.verbs}}{{.}}:{{end}}{{end}}" '\*:'
|
||||
output_message=$(kubectl delete clusterrole pod-admin -n test 2>&1 "${kube_flags[@]}")
|
||||
kube::test::if_has_string "${output_message}" 'warning: deleting cluster-scoped resources'
|
||||
@ -53,7 +53,7 @@ run_clusterroles_tests() {
|
||||
kube::test::get_object_assert clusterrole/url-reader "{{range.rules}}{{range.verbs}}{{.}}:{{end}}{{end}}" 'get:'
|
||||
kube::test::get_object_assert clusterrole/url-reader "{{range.rules}}{{range.nonResourceURLs}}{{.}}:{{end}}{{end}}" '/logs/\*:/healthz/\*:'
|
||||
kubectl create "${kube_flags[@]}" clusterrole aggregation-reader --aggregation-rule="foo1=foo2"
|
||||
kube::test::get_object_assert clusterrole/aggregation-reader "{{$id_field}}" 'aggregation-reader'
|
||||
kube::test::get_object_assert clusterrole/aggregation-reader "{{${id_field:?}}}" 'aggregation-reader'
|
||||
|
||||
# test `kubectl create clusterrolebinding`
|
||||
# test `kubectl set subject clusterrolebinding`
|
||||
|
@ -26,9 +26,9 @@ run_kubectl_request_timeout_tests() {
|
||||
### Test global request timeout option
|
||||
# Pre-condition: no POD exists
|
||||
create_and_use_new_namespace
|
||||
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
|
||||
kube::test::get_object_assert pods "{{range.items}}{{${id_field:?}}}:{{end}}" ''
|
||||
# Command
|
||||
kubectl create "${kube_flags[@]}" -f test/fixtures/doc-yaml/admin/limitrange/valid-pod.yaml
|
||||
kubectl create "${kube_flags[@]:?}" -f test/fixtures/doc-yaml/admin/limitrange/valid-pod.yaml
|
||||
# Post-condition: valid-pod POD is created
|
||||
kubectl get "${kube_flags[@]}" pods -o json
|
||||
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
|
||||
|
@ -26,9 +26,9 @@ run_kubectl_run_tests() {
|
||||
kube::log::status "Testing kubectl run"
|
||||
## kubectl run should create deployments, jobs or cronjob
|
||||
# Pre-Condition: no Job exists
|
||||
kube::test::get_object_assert jobs "{{range.items}}{{$id_field}}:{{end}}" ''
|
||||
kube::test::get_object_assert jobs "{{range.items}}{{${id_field:?}}}:{{end}}" ''
|
||||
# Command
|
||||
kubectl run pi --generator=job/v1 "--image=$IMAGE_PERL" --restart=OnFailure -- perl -Mbignum=bpi -wle 'print bpi(20)' "${kube_flags[@]}"
|
||||
kubectl run pi --generator=job/v1 "--image=$IMAGE_PERL" --restart=OnFailure -- perl -Mbignum=bpi -wle 'print bpi(20)' "${kube_flags[@]:?}"
|
||||
# Post-Condition: Job "pi" is created
|
||||
kube::test::get_object_assert jobs "{{range.items}}{{$id_field}}:{{end}}" 'pi:'
|
||||
# Describe command (resource only) should print detailed information
|
||||
|
@ -28,11 +28,11 @@ run_save_config_tests() {
|
||||
## 1. kubectl create --save-config should generate configuration annotation
|
||||
# Pre-Condition: no POD exists
|
||||
create_and_use_new_namespace
|
||||
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
|
||||
kube::test::get_object_assert pods "{{range.items}}{{${id_field:?}}}:{{end}}" ''
|
||||
# Command: create a pod "test-pod"
|
||||
kubectl create -f hack/testdata/pod.yaml --save-config "${kube_flags[@]}"
|
||||
kubectl create -f hack/testdata/pod.yaml --save-config "${kube_flags[@]:?}"
|
||||
# Post-Condition: pod "test-pod" has configuration annotation
|
||||
[[ "$(kubectl get pods test-pod -o yaml "${kube_flags[@]}" | grep kubectl.kubernetes.io/last-applied-configuration)" ]]
|
||||
grep -q "kubectl.kubernetes.io/last-applied-configuration" <<< "$(kubectl get pods test-pod -o yaml "${kube_flags[@]}")"
|
||||
# Clean up
|
||||
kubectl delete -f hack/testdata/pod.yaml "${kube_flags[@]}"
|
||||
## 2. kubectl edit --save-config should generate configuration annotation
|
||||
@ -40,14 +40,14 @@ run_save_config_tests() {
|
||||
create_and_use_new_namespace
|
||||
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
|
||||
kubectl create -f hack/testdata/pod.yaml "${kube_flags[@]}"
|
||||
! [[ "$(kubectl get pods test-pod -o yaml "${kube_flags[@]}" | grep kubectl.kubernetes.io/last-applied-configuration)" ]]
|
||||
! grep -q "kubectl.kubernetes.io/last-applied-configuration" <<< "$(kubectl get pods test-pod -o yaml "${kube_flags[@]}")"
|
||||
# Command: edit the pod "test-pod"
|
||||
temp_editor="${KUBE_TEMP}/tmp-editor.sh"
|
||||
echo -e "#!/usr/bin/env bash\n${SED} -i \"s/test-pod-label/test-pod-label-edited/g\" \$@" > "${temp_editor}"
|
||||
chmod +x "${temp_editor}"
|
||||
EDITOR=${temp_editor} kubectl edit pod test-pod --save-config "${kube_flags[@]}"
|
||||
# Post-Condition: pod "test-pod" has configuration annotation
|
||||
[[ "$(kubectl get pods test-pod -o yaml "${kube_flags[@]}" | grep kubectl.kubernetes.io/last-applied-configuration)" ]]
|
||||
grep -q "kubectl.kubernetes.io/last-applied-configuration" <<< "$(kubectl get pods test-pod -o yaml "${kube_flags[@]}")"
|
||||
# Clean up
|
||||
kubectl delete -f hack/testdata/pod.yaml "${kube_flags[@]}"
|
||||
## 3. kubectl replace --save-config should generate configuration annotation
|
||||
@ -55,11 +55,11 @@ run_save_config_tests() {
|
||||
create_and_use_new_namespace
|
||||
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
|
||||
kubectl create -f hack/testdata/pod.yaml "${kube_flags[@]}"
|
||||
! [[ "$(kubectl get pods test-pod -o yaml "${kube_flags[@]}" | grep kubectl.kubernetes.io/last-applied-configuration)" ]]
|
||||
! grep -q "kubectl.kubernetes.io/last-applied-configuration" <<< "$(kubectl get pods test-pod -o yaml "${kube_flags[@]}")"
|
||||
# Command: replace the pod "test-pod"
|
||||
kubectl replace -f hack/testdata/pod.yaml --save-config "${kube_flags[@]}"
|
||||
# Post-Condition: pod "test-pod" has configuration annotation
|
||||
[[ "$(kubectl get pods test-pod -o yaml "${kube_flags[@]}" | grep kubectl.kubernetes.io/last-applied-configuration)" ]]
|
||||
grep -q "kubectl.kubernetes.io/last-applied-configuration" <<< "$(kubectl get pods test-pod -o yaml "${kube_flags[@]}")"
|
||||
# Clean up
|
||||
kubectl delete -f hack/testdata/pod.yaml "${kube_flags[@]}"
|
||||
## 4. kubectl run --save-config should generate configuration annotation
|
||||
@ -68,25 +68,25 @@ run_save_config_tests() {
|
||||
# Command: create the rc "nginx" with image nginx
|
||||
kubectl run nginx "--image=$IMAGE_NGINX" --save-config --generator=run/v1 "${kube_flags[@]}"
|
||||
# Post-Condition: rc "nginx" has configuration annotation
|
||||
[[ "$(kubectl get rc nginx -o yaml "${kube_flags[@]}" | grep kubectl.kubernetes.io/last-applied-configuration)" ]]
|
||||
grep -q "kubectl.kubernetes.io/last-applied-configuration" <<< "$(kubectl get rc nginx -o yaml "${kube_flags[@]}")"
|
||||
## 5. kubectl expose --save-config should generate configuration annotation
|
||||
# Pre-Condition: no service exists
|
||||
kube::test::get_object_assert svc "{{range.items}}{{$id_field}}:{{end}}" ''
|
||||
# Command: expose the rc "nginx"
|
||||
kubectl expose rc nginx --save-config --port=80 --target-port=8000 "${kube_flags[@]}"
|
||||
# Post-Condition: service "nginx" has configuration annotation
|
||||
[[ "$(kubectl get svc nginx -o yaml "${kube_flags[@]}" | grep kubectl.kubernetes.io/last-applied-configuration)" ]]
|
||||
grep -q "kubectl.kubernetes.io/last-applied-configuration" <<< "$(kubectl get svc nginx -o yaml "${kube_flags[@]}")"
|
||||
# Clean up
|
||||
kubectl delete rc,svc nginx
|
||||
## 6. kubectl autoscale --save-config should generate configuration annotation
|
||||
# Pre-Condition: no RC exists, then create the rc "frontend", which shouldn't have configuration annotation
|
||||
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" ''
|
||||
kubectl create -f hack/testdata/frontend-controller.yaml "${kube_flags[@]}"
|
||||
! [[ "$(kubectl get rc frontend -o yaml "${kube_flags[@]}" | grep kubectl.kubernetes.io/last-applied-configuration)" ]]
|
||||
! grep -q "kubectl.kubernetes.io/last-applied-configuration" <<< "$(kubectl get rc frontend -o yaml "${kube_flags[@]}")"
|
||||
# Command: autoscale rc "frontend"
|
||||
kubectl autoscale -f hack/testdata/frontend-controller.yaml --save-config "${kube_flags[@]}" --max=2
|
||||
# Post-Condition: hpa "frontend" has configuration annotation
|
||||
[[ "$(kubectl get hpa frontend -o yaml "${kube_flags[@]}" | grep kubectl.kubernetes.io/last-applied-configuration)" ]]
|
||||
grep -q "kubectl.kubernetes.io/last-applied-configuration" <<< "$(kubectl get hpa frontend -o yaml "${kube_flags[@]}")"
|
||||
# Ensure we can interact with HPA objects in lists through autoscaling/v1 APIs
|
||||
output_message=$(kubectl get hpa -o=jsonpath='{.items[0].apiVersion}' 2>&1 "${kube_flags[@]}")
|
||||
kube::test::if_has_string "${output_message}" 'autoscaling/v1'
|
||||
|
Loading…
Reference in New Issue
Block a user