Update shellcheck version (0.7.2 -> 0.8.0) and fix findings

This commit is contained in:
Oscar Utbult 2022-11-02 10:40:47 +01:00
parent 3efd107eb2
commit 61cd37b806
14 changed files with 89 additions and 76 deletions

View File

@ -134,7 +134,7 @@ function kube::release::package_client_tarballs() {
for platform_long in "${long_platforms[@]}"; do
local platform
local platform_tag
platform=${platform_long##${LOCAL_OUTPUT_BINPATH}/} # Strip LOCAL_OUTPUT_BINPATH
platform=${platform_long##"${LOCAL_OUTPUT_BINPATH}"/} # Strip LOCAL_OUTPUT_BINPATH
platform_tag=${platform/\//-} # Replace a "/" for a "-"
kube::log::status "Starting tarball: client $platform_tag"

View File

@ -82,8 +82,8 @@ kube::test::object_assert() {
for j in $(seq 1 "${tries}"); do
# shellcheck disable=SC2086
# Disabling because "args" needs to allow for expansion here
res=$(eval kubectl get "${kube_flags[@]}" ${args} "${object}" -o go-template=\""${request}"\")
# Disabling because to allow for expansion here
res=$(kubectl get "${kube_flags[@]}" ${args} ${object} -o go-template="${request}")
if [[ "${res}" =~ ^$expected$ ]]; then
echo -n "${green}"
echo "$(kube::test::get_caller 3): Successful get ${object} ${request}: ${res}"
@ -110,7 +110,9 @@ kube::test::get_object_jsonpath_assert() {
local request=$2
local expected=$3
res=$(eval kubectl get "${kube_flags[@]}" "${object}" -o jsonpath=\""${request}"\")
# shellcheck disable=SC2086
# Disabling to allow for expansion here
res=$(kubectl get "${kube_flags[@]}" ${object} -o jsonpath=${request})
if [[ "${res}" =~ ^$expected$ ]]; then
echo -n "${green}"
@ -135,7 +137,9 @@ kube::test::describe_object_assert() {
local object=$2
local matches=( "${@:3}" )
result=$(eval kubectl describe "${kube_flags[@]}" "${resource}" "${object}")
# shellcheck disable=SC2086
# Disabling to allow for expansion here
result=$(kubectl describe "${kube_flags[@]}" ${resource} ${object})
for match in "${matches[@]}"; do
if grep -q "${match}" <<< "${result}"; then
@ -166,10 +170,12 @@ kube::test::describe_object_events_assert() {
local object=$2
local showevents=${3:-"true"}
# shellcheck disable=SC2086
# Disabling to allow for expansion here
if [[ -z "${3:-}" ]]; then
result=$(eval kubectl describe "${kube_flags[@]}" "${resource}" "${object}")
result=$(kubectl describe "${kube_flags[@]}" ${resource} ${object})
else
result=$(eval kubectl describe "${kube_flags[@]}" "--show-events=${showevents}" "${resource}" "${object}")
result=$(kubectl describe "${kube_flags[@]}" "--show-events=${showevents}" ${resource} ${object})
fi
if grep -q "No events.\|Events:" <<< "${result}"; then
@ -203,7 +209,9 @@ kube::test::describe_resource_assert() {
local resource=$1
local matches=( "${@:2}" )
result=$(eval kubectl describe "${kube_flags[@]}" "${resource}")
# shellcheck disable=SC2086
# Disabling to allow for expansion here
result=$(kubectl describe "${kube_flags[@]}" ${resource})
for match in "${matches[@]}"; do
if grep -q "${match}" <<< "${result}"; then
@ -233,7 +241,9 @@ kube::test::describe_resource_events_assert() {
local resource=$1
local showevents=${2:-"true"}
result=$(eval kubectl describe "${kube_flags[@]}" "--show-events=${showevents}" "${resource}")
# shellcheck disable=SC2086
# Disabling to allow for expansion here
result=$(kubectl describe "${kube_flags[@]}" "--show-events=${showevents}" ${resource})
if grep -q "No events.\|Events:" <<< "${result}"; then
local has_events="true"
@ -273,8 +283,9 @@ kube::test::describe_resource_chunk_size_assert() {
local expectLists
IFS="," read -r -a expectLists <<< "${resource},${additionalResources}"
# Default chunk size
defaultResult=$(eval kubectl describe "${resource}" --show-events=true -v=6 "${args}" "${kube_flags[@]}" 2>&1 >/dev/null)
# shellcheck disable=SC2086
# Disabling to allow for expansion here
defaultResult=$(kubectl describe ${resource} --show-events=true -v=6 ${args} "${kube_flags[@]}" 2>&1 >/dev/null)
for r in "${expectLists[@]}"; do
if grep -q "${r}?.*limit=500" <<< "${defaultResult}"; then
echo "query for ${r} had limit param"
@ -292,8 +303,10 @@ kube::test::describe_resource_chunk_size_assert() {
fi
done
# shellcheck disable=SC2086
# Disabling to allow for expansion here
# Try a non-default chunk size
customResult=$(eval kubectl describe "${resource}" --show-events=false --chunk-size=10 -v=6 "${args}" "${kube_flags[@]}" 2>&1 >/dev/null)
customResult=$(kubectl describe ${resource} --show-events=false --chunk-size=10 -v=6 ${args} "${kube_flags[@]}" 2>&1 >/dev/null)
if grep -q "${resource}?limit=10" <<< "${customResult}"; then
echo "query for ${resource} had user-specified limit param"
else

View File

@ -30,8 +30,8 @@ CLEAN_PATTERNS=(
for pattern in "${CLEAN_PATTERNS[@]}"; do
while IFS=$'\n' read -r match; do
echo "Removing ${match#${KUBE_ROOT}\/} .."
rm -rf "${match#${KUBE_ROOT}\/}"
echo "Removing ${match#"${KUBE_ROOT}"\/} .."
rm -rf "${match#"${KUBE_ROOT}"\/}"
done < <(find "${KUBE_ROOT}" -iregex "^${KUBE_ROOT}/${pattern}$")
done

View File

@ -30,8 +30,8 @@ DOCKER="${DOCKER:-docker}"
# required version for this script, if not installed on the host we will
# use the official docker image instead. keep this in sync with SHELLCHECK_IMAGE
SHELLCHECK_VERSION="0.7.2"
SHELLCHECK_IMAGE="docker.io/koalaman/shellcheck-alpine:v0.7.2@sha256:ce6fd9cc808a47d1d121ba92c203ecc02e8ed78e0e4c412f7fca54c2e954526d"
SHELLCHECK_VERSION="0.8.0"
SHELLCHECK_IMAGE="docker.io/koalaman/shellcheck-alpine:v0.8.0@sha256:f42fde76d2d14a645a848826e54a4d650150e151d9c81057c898da89a82c8a56"
# disabled lints
disabled=(

View File

@ -306,14 +306,14 @@ run_deployment_tests() {
# Pre-condition: no deployment exists
kube::test::get_object_assert deployment "{{range.items}}{{${id_field:?}}}:{{end}}" ''
# Pre-condition: no hpa exists
kube::test::get_object_assert 'hpa' "{{range.items}}{{ if eq $id_field \\\"nginx-deployment\\\" }}found{{end}}{{end}}:" ':'
kube::test::get_object_assert 'hpa' "{{range.items}}{{ if eq $id_field \"nginx-deployment\" }}found{{end}}{{end}}:" ':'
# Command
kubectl create -f test/fixtures/doc-yaml/user-guide/deployment.yaml "${kube_flags[@]:?}"
kube::test::get_object_assert deployment "{{range.items}}{{${id_field:?}}}:{{end}}" 'nginx-deployment:'
# Dry-run autoscale
kubectl-with-retry autoscale deployment nginx-deployment --dry-run=client "${kube_flags[@]:?}" --min=2 --max=3
kubectl-with-retry autoscale deployment nginx-deployment --dry-run=server "${kube_flags[@]:?}" --min=2 --max=3
kube::test::get_object_assert 'hpa' "{{range.items}}{{ if eq $id_field \\\"nginx-deployment\\\" }}found{{end}}{{end}}:" ':'
kube::test::get_object_assert 'hpa' "{{range.items}}{{ if eq $id_field \"nginx-deployment\" }}found{{end}}{{end}}:" ':'
# autoscale 2~3 pods, no CPU utilization specified
kubectl-with-retry autoscale deployment nginx-deployment "${kube_flags[@]:?}" --min=2 --max=3
kube::test::get_object_assert 'hpa nginx-deployment' "{{${hpa_min_field:?}}} {{${hpa_max_field:?}}} {{${hpa_cpu_field:?}}}" '2 3 80'
@ -648,7 +648,7 @@ run_rs_tests() {
kube::log::status "Deleting rs"
kubectl delete rs frontend "${kube_flags[@]:?}"
# Post-condition: no pods from frontend replica set
kube::test::wait_object_assert 'pods -l "tier=frontend"' "{{range.items}}{{${id_field:?}}}:{{end}}" ''
kube::test::wait_object_assert "pods -l tier=frontend" "{{range.items}}{{${id_field:?}}}:{{end}}" ''
### Create and then delete a replica set with cascading strategy set to orphan, make sure it doesn't delete pods.
# Pre-condition: no replica set exists
@ -656,13 +656,13 @@ run_rs_tests() {
# Command
kubectl create -f hack/testdata/frontend-replicaset.yaml "${kube_flags[@]}"
# wait for all 3 pods to be set up
kube::test::wait_object_assert 'pods -l "tier=frontend"' "{{range.items}}{{${pod_container_name_field:?}}}:{{end}}" 'php-redis:php-redis:php-redis:'
kube::test::wait_object_assert "pods -l tier=frontend" "{{range.items}}{{${pod_container_name_field:?}}}:{{end}}" 'php-redis:php-redis:php-redis:'
kube::log::status "Deleting rs"
kubectl delete rs frontend "${kube_flags[@]:?}" --cascade=orphan
# Wait for the rs to be deleted.
kube::test::wait_object_assert rs "{{range.items}}{{${id_field:?}}}:{{end}}" ''
# Post-condition: All 3 pods still remain from frontend replica set
kube::test::get_object_assert 'pods -l "tier=frontend"' "{{range.items}}{{$pod_container_name_field}}:{{end}}" 'php-redis:php-redis:php-redis:'
kube::test::get_object_assert "pods -l tier=frontend" "{{range.items}}{{$pod_container_name_field}}:{{end}}" 'php-redis:php-redis:php-redis:'
# Cleanup
kubectl delete pods -l "tier=frontend" "${kube_flags[@]:?}"
kube::test::get_object_assert pods "{{range.items}}{{${id_field:?}}}:{{end}}" ''

View File

@ -27,18 +27,18 @@ run_job_tests() {
### Create a new namespace
# Pre-condition: the test-jobs namespace does not exist
kube::test::get_object_assert 'namespaces' "{{range.items}}{{ if eq ${id_field:?} \\\"test-jobs\\\" }}found{{end}}{{end}}:" ':'
kube::test::get_object_assert 'namespaces' "{{range.items}}{{ if eq ${id_field:?} \"test-jobs\" }}found{{end}}{{end}}:" ':'
# Command
kubectl create namespace test-jobs
# Post-condition: namespace 'test-jobs' is created.
kube::test::get_object_assert 'namespaces/test-jobs' "{{$id_field}}" 'test-jobs'
# Pre-condition: cronjob does not exist
kube::test::get_object_assert 'cronjob --namespace=test-jobs' "{{range.items}}{{ if eq $id_field \\\"pi\\\" }}found{{end}}{{end}}:" ':'
kube::test::get_object_assert 'cronjob --namespace=test-jobs' "{{range.items}}{{ if eq $id_field \"pi\" }}found{{end}}{{end}}:" ':'
# Dry-run create CronJob
kubectl create cronjob pi --dry-run=client --schedule="59 23 31 2 *" --namespace=test-jobs "--image=$IMAGE_PERL" -- perl -Mbignum=bpi -wle 'print bpi(20)' "${kube_flags[@]:?}"
kubectl create cronjob pi --dry-run=server --schedule="59 23 31 2 *" --namespace=test-jobs "--image=$IMAGE_PERL" -- perl -Mbignum=bpi -wle 'print bpi(20)' "${kube_flags[@]:?}"
kube::test::get_object_assert 'cronjob' "{{range.items}}{{ if eq $id_field \\\"pi\\\" }}found{{end}}{{end}}:" ':'
kube::test::get_object_assert 'cronjob' "{{range.items}}{{ if eq $id_field \"pi\" }}found{{end}}{{end}}:" ':'
### Create a cronjob in a specific namespace
kubectl create cronjob pi --schedule="59 23 31 2 *" --namespace=test-jobs "--image=$IMAGE_PERL" -- perl -Mbignum=bpi -wle 'print bpi(20)' "${kube_flags[@]:?}"
# Post-Condition: assertion object exists
@ -56,11 +56,11 @@ run_job_tests() {
kube::test::get_object_assert jobs "{{range.items}}{{$id_field}}{{end}}" ''
# Pre-condition: job does not exist
kube::test::get_object_assert 'job --namespace=test-jobs' "{{range.items}}{{ if eq $id_field \\\"test-jobs\\\" }}found{{end}}{{end}}:" ':'
kube::test::get_object_assert 'job --namespace=test-jobs' "{{range.items}}{{ if eq $id_field \"test-jobs\" }}found{{end}}{{end}}:" ':'
### Dry-run create a job in a specific namespace
kubectl create job test-job --from=cronjob/pi --namespace=test-jobs --dry-run=client
kubectl create job test-job --from=cronjob/pi --namespace=test-jobs --dry-run=server
kube::test::get_object_assert 'job --namespace=test-jobs' "{{range.items}}{{ if eq $id_field \\\"test-jobs\\\" }}found{{end}}{{end}}:" ':'
kube::test::get_object_assert 'job --namespace=test-jobs' "{{range.items}}{{ if eq $id_field \"test-jobs\" }}found{{end}}{{end}}:" ':'
### Create a job in a specific namespace
kubectl create job test-job --from=cronjob/pi --namespace=test-jobs
# Post-Condition: assertion object exists

View File

@ -30,7 +30,7 @@ run_configmap_tests() {
### Create a new namespace
# Pre-condition: the test-configmaps namespace does not exist
kube::test::get_object_assert 'namespaces' "{{range.items}}{{ if eq $id_field \\\"test-configmaps\\\" }}found{{end}}{{end}}:" ':'
kube::test::get_object_assert 'namespaces' "{{range.items}}{{ if eq $id_field \"test-configmaps\" }}found{{end}}{{end}}:" ':'
# Command
kubectl create namespace test-configmaps
# Post-condition: namespace 'test-configmaps' is created.
@ -38,12 +38,12 @@ run_configmap_tests() {
### Create a generic configmap in a specific namespace
# Pre-condition: configmap test-configmap and test-binary-configmap does not exist
kube::test::get_object_assert 'configmaps' "{{range.items}}{{ if eq $id_field \\\"test-configmap\\\" }}found{{end}}{{end}}:" ':'
kube::test::get_object_assert 'configmaps' "{{range.items}}{{ if eq $id_field \\\"test-binary-configmap\\\" }}found{{end}}{{end}}:" ':'
kube::test::get_object_assert 'configmaps' "{{range.items}}{{ if eq $id_field \"test-configmap\" }}found{{end}}{{end}}:" ':'
kube::test::get_object_assert 'configmaps' "{{range.items}}{{ if eq $id_field \"test-binary-configmap\" }}found{{end}}{{end}}:" ':'
# Dry-run command
kubectl create configmap test-configmap --dry-run=client --from-literal=key1=value1 --namespace=test-configmaps
kubectl create configmap test-configmap --dry-run=server --from-literal=key1=value1 --namespace=test-configmaps
kube::test::get_object_assert 'configmaps' "{{range.items}}{{ if eq $id_field \\\"test-configmap\\\" }}found{{end}}{{end}}:" ':'
kube::test::get_object_assert 'configmaps' "{{range.items}}{{ if eq $id_field \"test-configmap\" }}found{{end}}{{end}}:" ':'
# Command
kubectl create configmap test-configmap --from-literal=key1=value1 --namespace=test-configmaps
kubectl create configmap test-binary-configmap --from-file <( head -c 256 /dev/urandom ) --namespace=test-configmaps
@ -163,15 +163,15 @@ run_pod_tests() {
# Command
kubectl create -f test/fixtures/doc-yaml/admin/limitrange/valid-pod.yaml "${kube_flags[@]}"
# Post-condition: valid-pod POD is created
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" "valid-pod:"
### Delete POD valid-pod with label
# Pre-condition: valid-pod POD exists
kube::test::get_object_assert "pods -l'name in (valid-pod)'" "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
kube::test::get_object_assert "pods -lname=valid-pod" "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
# Command
kubectl delete pods -l'name in (valid-pod)' "${kube_flags[@]}" --grace-period=0 --force
kubectl delete pods -lname=valid-pod "${kube_flags[@]}" --grace-period=0 --force
# Post-condition: valid-pod POD doesn't exist
kube::test::get_object_assert "pods -l'name in (valid-pod)'" "{{range.items}}{{$id_field}}:{{end}}" ''
kube::test::get_object_assert "pods -lname=valid-pod" "{{range.items}}{{$id_field}}:{{end}}" ''
### Create POD valid-pod from YAML
# Pre-condition: no POD exists
@ -201,7 +201,7 @@ run_pod_tests() {
# Pre-condition: valid-pod POD exists
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
# Command
! kubectl delete --all pods -l'name in (valid-pod)' "${kube_flags[@]}" || exit 1
! kubectl delete --all pods -lname=valid-pod "${kube_flags[@]}" || exit 1
# Post-condition: valid-pod POD exists
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
@ -211,12 +211,12 @@ run_pod_tests() {
# Command
kubectl delete --all pods "${kube_flags[@]}" --grace-period=0 --force # --all remove all the pods
# Post-condition: no POD exists
kube::test::get_object_assert "pods -l'name in (valid-pod)'" "{{range.items}}{{$id_field}}:{{end}}" ''
kube::test::get_object_assert "pods -lname=valid-pod" "{{range.items}}{{$id_field}}:{{end}}" ''
# Detailed tests for describe pod output
### Create a new namespace
# Pre-condition: the test-secrets namespace does not exist
kube::test::get_object_assert 'namespaces' "{{range.items}}{{ if eq $id_field \\\"test-kubectl-describe-pod\\\" }}found{{end}}{{end}}:" ':'
kube::test::get_object_assert 'namespaces' "{{range.items}}{{ if eq $id_field \"test-kubectl-describe-pod\" }}found{{end}}{{end}}:" ':'
# Command
kubectl create namespace test-kubectl-describe-pod
# Post-condition: namespace 'test-secrets' is created.
@ -238,7 +238,7 @@ run_pod_tests() {
### Create a generic configmap
# Pre-condition: CONFIGMAP test-configmap does not exist
#kube::test::get_object_assert 'configmap/test-configmap --namespace=test-kubectl-describe-pod' "{{$id_field}}" ''
kube::test::get_object_assert 'configmaps --namespace=test-kubectl-describe-pod' "{{range.items}}{{ if eq $id_field \\\"test-configmap\\\" }}found{{end}}{{end}}:" ':'
kube::test::get_object_assert 'configmaps --namespace=test-kubectl-describe-pod' "{{range.items}}{{ if eq $id_field \"test-configmap\" }}found{{end}}{{end}}:" ':'
#kube::test::get_object_assert 'configmaps --namespace=test-kubectl-describe-pod' "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
@ -248,11 +248,11 @@ run_pod_tests() {
### Create a pod disruption budget with minAvailable
# Pre-condition: pdb does not exist
kube::test::get_object_assert 'pdb --namespace=test-kubectl-describe-pod' "{{range.items}}{{ if eq $id_field \\\"test-pdb-1\\\" }}found{{end}}{{end}}:" ':'
kube::test::get_object_assert 'pdb --namespace=test-kubectl-describe-pod' "{{range.items}}{{ if eq $id_field \"test-pdb-1\" }}found{{end}}{{end}}:" ':'
# Dry-run command
kubectl create pdb test-pdb-1 --dry-run=client --selector=app=rails --min-available=2 --namespace=test-kubectl-describe-pod
kubectl create pdb test-pdb-1 --dry-run=server --selector=app=rails --min-available=2 --namespace=test-kubectl-describe-pod
kube::test::get_object_assert 'pdb --namespace=test-kubectl-describe-pod' "{{range.items}}{{ if eq $id_field \\\"test-pdb-1\\\" }}found{{end}}{{end}}:" ':'
kube::test::get_object_assert 'pdb --namespace=test-kubectl-describe-pod' "{{range.items}}{{ if eq $id_field \"test-pdb-1\" }}found{{end}}{{end}}:" ':'
# Command
kubectl create pdb test-pdb-1 --selector=app=rails --min-available=2 --namespace=test-kubectl-describe-pod
# Post-condition: pdb exists and has expected values
@ -293,14 +293,14 @@ run_pod_tests() {
kubectl delete namespace test-kubectl-describe-pod
### Priority Class
kube::test::get_object_assert 'priorityclasses' "{{range.items}}{{ if eq $id_field \\\"test-priorityclass\\\" }}found{{end}}{{end}}:" ':'
kube::test::get_object_assert 'priorityclasses' "{{range.items}}{{ if eq $id_field \"test-priorityclass\" }}found{{end}}{{end}}:" ':'
# Dry-run command
kubectl create priorityclass test-priorityclass --dry-run=client
kubectl create priorityclass test-priorityclass --dry-run=server
kube::test::get_object_assert 'priorityclasses' "{{range.items}}{{ if eq $id_field \\\"test-priorityclass\\\" }}found{{end}}{{end}}:" ':'
kube::test::get_object_assert 'priorityclasses' "{{range.items}}{{ if eq $id_field \"test-priorityclass\" }}found{{end}}{{end}}:" ':'
# Command
kubectl create priorityclass test-priorityclass
kube::test::get_object_assert 'priorityclasses' "{{range.items}}{{ if eq $id_field \\\"test-priorityclass\\\" }}found{{end}}{{end}}:" 'found:'
kube::test::get_object_assert 'priorityclasses' "{{range.items}}{{ if eq $id_field \"test-priorityclass\" }}found{{end}}{{end}}:" 'found:'
# Describe command should respect the chunk size parameter
kube::test::describe_resource_chunk_size_assert priorityclasses events
kubectl delete priorityclass test-priorityclass
@ -706,7 +706,7 @@ __EOF__
# Pre-condition: valid-pod POD exists
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
# Command
kubectl delete pods -l'name in (valid-pod-super-sayan)' --grace-period=0 --force "${kube_flags[@]}"
kubectl delete pods -lname=valid-pod-super-sayan --grace-period=0 --force "${kube_flags[@]}"
# Post-condition: valid-pod POD doesn't exist
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
@ -828,7 +828,7 @@ run_secrets_test() {
### Create a new namespace
# Pre-condition: the test-secrets namespace does not exist
kube::test::get_object_assert 'namespaces' "{{range.items}}{{ if eq $id_field \\\"test-secrets\\\" }}found{{end}}{{end}}:" ':'
kube::test::get_object_assert 'namespaces' "{{range.items}}{{ if eq $id_field \"test-secrets\" }}found{{end}}{{end}}:" ':'
# Command
kubectl create namespace test-secrets
# Post-condition: namespace 'test-secrets' is created.
@ -948,7 +948,7 @@ run_service_accounts_tests() {
### Create a new namespace
# Pre-condition: the test-service-accounts namespace does not exist
kube::test::get_object_assert 'namespaces' "{{range.items}}{{ if eq $id_field \\\"test-service-accounts\\\" }}found{{end}}{{end}}:" ':'
kube::test::get_object_assert 'namespaces' "{{range.items}}{{ if eq $id_field \"test-service-accounts\" }}found{{end}}{{end}}:" ':'
# Command
kubectl create namespace test-service-accounts
# Post-condition: namespace 'test-service-accounts' is created.
@ -956,11 +956,11 @@ run_service_accounts_tests() {
### Create a service account in a specific namespace
# Pre-condition: service account does not exist
kube::test::get_object_assert 'serviceaccount --namespace=test-service-accounts' "{{range.items}}{{ if eq $id_field \\\"test-service-account\\\" }}found{{end}}{{end}}:" ':'
kube::test::get_object_assert 'serviceaccount --namespace=test-service-accounts' "{{range.items}}{{ if eq $id_field \"test-service-account\" }}found{{end}}{{end}}:" ':'
# Dry-run command
kubectl create serviceaccount test-service-account --dry-run=client --namespace=test-service-accounts
kubectl create serviceaccount test-service-account --dry-run=server --namespace=test-service-accounts
kube::test::get_object_assert 'serviceaccount --namespace=test-service-accounts' "{{range.items}}{{ if eq $id_field \\\"test-service-account\\\" }}found{{end}}{{end}}:" ':'
kube::test::get_object_assert 'serviceaccount --namespace=test-service-accounts' "{{range.items}}{{ if eq $id_field \"test-service-account\" }}found{{end}}{{end}}:" ':'
# Command
kubectl create serviceaccount test-service-account --namespace=test-service-accounts
# Post-condition: secret exists and has expected values
@ -1207,7 +1207,7 @@ run_rc_tests() {
kubectl create -f hack/testdata/frontend-controller.yaml "${kube_flags[@]}"
kubectl delete rc frontend "${kube_flags[@]}"
# Post-condition: no pods from frontend controller
kube::test::wait_object_assert 'pods -l "name=frontend"' "{{range.items}}{{$id_field}}:{{end}}" ''
kube::test::wait_object_assert "pods -l name=frontend" "{{range.items}}{{$id_field}}:{{end}}" ''
### Create replication controller frontend from JSON
# Pre-condition: no replication controller exists
@ -1487,14 +1487,14 @@ run_namespace_tests() {
### Quota
kubectl create namespace quotas
kube::test::get_object_assert 'namespaces/quotas' "{{$id_field}}" 'quotas'
kube::test::get_object_assert 'quota --namespace=quotas' "{{range.items}}{{ if eq $id_field \\\"test-quota\\\" }}found{{end}}{{end}}:" ':'
kube::test::get_object_assert 'quota --namespace=quotas' "{{range.items}}{{ if eq $id_field \"test-quota\" }}found{{end}}{{end}}:" ':'
# Dry-run command
kubectl create quota test-quota --dry-run=client --namespace=quotas
kubectl create quota test-quota --dry-run=server --namespace=quotas
kube::test::get_object_assert 'quota --namespace=quotas' "{{range.items}}{{ if eq $id_field \\\"test-quota\\\" }}found{{end}}{{end}}:" ':'
kube::test::get_object_assert 'quota --namespace=quotas' "{{range.items}}{{ if eq $id_field \"test-quota\" }}found{{end}}{{end}}:" ':'
# Command
kubectl create quota test-quota --namespace=quotas
kube::test::get_object_assert 'quota --namespace=quotas' "{{range.items}}{{ if eq $id_field \\\"test-quota\\\" }}found{{end}}{{end}}:" 'found:'
kube::test::get_object_assert 'quota --namespace=quotas' "{{range.items}}{{ if eq $id_field \"test-quota\" }}found{{end}}{{end}}:" 'found:'
# Describe command should respect the chunk size parameter
kube::test::describe_resource_chunk_size_assert resourcequotas "" "--namespace=quotas"
# Clean up
@ -1508,7 +1508,7 @@ run_namespace_tests() {
if kube::test::if_supports_resource "${pods:?}" ; then
### Create a new namespace
# Pre-condition: the other namespace does not exist
kube::test::get_object_assert 'namespaces' "{{range.items}}{{ if eq $id_field \\\"other\\\" }}found{{end}}{{end}}:" ':'
kube::test::get_object_assert 'namespaces' "{{range.items}}{{ if eq $id_field \"other\" }}found{{end}}{{end}}:" ':'
# Command
kubectl create namespace other
# Post-condition: namespace 'other' is created.

View File

@ -70,7 +70,7 @@ run_crd_tests() {
__EOF__
# Post-Condition: assertion object exist
kube::test::get_object_assert customresourcedefinitions "{{range.items}}{{if eq ${id_field:?} \\\"foos.company.com\\\"}}{{$id_field}}:{{end}}{{end}}" 'foos.company.com:'
kube::test::get_object_assert customresourcedefinitions "{{range.items}}{{if eq ${id_field:?} \"foos.company.com\"}}{{$id_field}}:{{end}}{{end}}" 'foos.company.com:'
kubectl "${kube_flags_with_token[@]}" create -f - << __EOF__
{
@ -104,7 +104,7 @@ __EOF__
__EOF__
# Post-Condition: assertion object exist
kube::test::get_object_assert customresourcedefinitions "{{range.items}}{{if eq $id_field \\\"foos.company.com\\\" \\\"bars.company.com\\\"}}{{$id_field}}:{{end}}{{end}}" 'bars.company.com:foos.company.com:'
kube::test::get_object_assert customresourcedefinitions "{{range.items}}{{if eq $id_field \"foos.company.com\" \"bars.company.com\"}}{{$id_field}}:{{end}}{{end}}" 'bars.company.com:foos.company.com:'
# This test ensures that the name printer is able to output a resource
# in the proper "kind.group/resource_name" format, and that the
@ -143,7 +143,7 @@ __EOF__
__EOF__
# Post-Condition: assertion crd with non-matching kind and resource exists
kube::test::get_object_assert customresourcedefinitions "{{range.items}}{{if eq $id_field \\\"foos.company.com\\\" \\\"bars.company.com\\\" \\\"resources.mygroup.example.com\\\"}}{{$id_field}}:{{end}}{{end}}" 'bars.company.com:foos.company.com:resources.mygroup.example.com:'
kube::test::get_object_assert customresourcedefinitions "{{range.items}}{{if eq $id_field \"foos.company.com\" \"bars.company.com\" \"resources.mygroup.example.com\"}}{{$id_field}}:{{end}}{{end}}" 'bars.company.com:foos.company.com:resources.mygroup.example.com:'
# This test ensures that we can create complex validation without client-side validation complaining
kubectl "${kube_flags_with_token[@]}" create -f - << __EOF__
@ -185,7 +185,7 @@ __EOF__
__EOF__
# Post-Condition: assertion crd with non-matching kind and resource exists
kube::test::get_object_assert customresourcedefinitions "{{range.items}}{{if eq $id_field \\\"foos.company.com\\\" \\\"bars.company.com\\\" \\\"resources.mygroup.example.com\\\" \\\"validfoos.company.com\\\"}}{{$id_field}}:{{end}}{{end}}" 'bars.company.com:foos.company.com:resources.mygroup.example.com:validfoos.company.com:'
kube::test::get_object_assert customresourcedefinitions "{{range.items}}{{if eq $id_field \"foos.company.com\" \"bars.company.com\" \"resources.mygroup.example.com\" \"validfoos.company.com\"}}{{$id_field}}:{{end}}{{end}}" 'bars.company.com:foos.company.com:resources.mygroup.example.com:validfoos.company.com:'
run_non_native_resource_tests

View File

@ -30,10 +30,10 @@ run_kubectl_diff_tests() {
output_message=$(! kubectl diff -f hack/testdata/pod.yaml)
kube::test::if_has_string "${output_message}" 'test-pod'
# Ensure diff only dry-runs and doesn't persist change
kube::test::get_object_assert 'pod' "{{range.items}}{{ if eq ${id_field:?} \\\"test-pod\\\" }}found{{end}}{{end}}:" ':'
kube::test::get_object_assert 'pod' "{{range.items}}{{ if eq ${id_field:?} \"test-pod\" }}found{{end}}{{end}}:" ':'
kubectl apply -f hack/testdata/pod.yaml
kube::test::get_object_assert 'pod' "{{range.items}}{{ if eq ${id_field:?} \\\"test-pod\\\" }}found{{end}}{{end}}:" 'found:'
kube::test::get_object_assert 'pod' "{{range.items}}{{ if eq ${id_field:?} \"test-pod\" }}found{{end}}{{end}}:" 'found:'
initialResourceVersion=$(kubectl get "${kube_flags[@]:?}" -f hack/testdata/pod.yaml -o go-template='{{ .metadata.resourceVersion }}')
# Make sure that diffing the resource right after returns nothing (0 exit code).
@ -73,11 +73,11 @@ run_kubectl_diff_tests() {
output_message=$(! kubectl diff --server-side -f hack/testdata/pod.yaml)
kube::test::if_has_string "${output_message}" 'test-pod'
# Ensure diff --server-side only dry-runs and doesn't persist change
kube::test::get_object_assert 'pod' "{{range.items}}{{ if eq ${id_field:?} \\\"test-pod\\\" }}found{{end}}{{end}}:" ':'
kube::test::get_object_assert 'pod' "{{range.items}}{{ if eq ${id_field:?} \"test-pod\" }}found{{end}}{{end}}:" ':'
# Server-side apply the Pod
kubectl apply --server-side -f hack/testdata/pod.yaml
kube::test::get_object_assert 'pod' "{{range.items}}{{ if eq ${id_field:?} \\\"test-pod\\\" }}found{{end}}{{end}}:" 'found:'
kube::test::get_object_assert 'pod' "{{range.items}}{{ if eq ${id_field:?} \"test-pod\" }}found{{end}}{{end}}:" 'found:'
# Make sure that --server-side diffing the resource right after returns nothing (0 exit code).
kubectl diff --server-side -f hack/testdata/pod.yaml

View File

@ -82,7 +82,7 @@ run_resource_aliasing_tests() {
kubectl create -f test/e2e/testing-manifests/statefulset/cassandra/controller.yaml "${kube_flags[@]}"
kubectl create -f test/e2e/testing-manifests/statefulset/cassandra/service.yaml "${kube_flags[@]}"
object="all -l'app=cassandra'"
object="all -l app=cassandra"
request="{{range.items}}{{range .metadata.labels}}{{.}}:{{end}}{{end}}"
# all 4 cassandra's might not be in the request immediately...

View File

@ -28,7 +28,7 @@ run_kubectl_events_tests() {
### Create a new namespace
# Pre-condition: the test-events namespace does not exist
kube::test::get_object_assert 'namespaces' "{{range.items}}{{ if eq ${id_field:?} \\\"test-events\\\" }}found{{end}}{{end}}:" ':'
kube::test::get_object_assert 'namespaces' "{{range.items}}{{ if eq ${id_field:?} \"test-events\" }}found{{end}}{{end}}:" ':'
# Command
kubectl create namespace test-events
# Post-condition: namespace 'test-events' is created.
@ -39,7 +39,7 @@ run_kubectl_events_tests() {
kube::test::if_has_not_string "${output_message}" "Warning" "InvalidSchedule" "Cronjob/pi"
# Pre-condition: cronjob does not exist in test-events namespace
kube::test::get_object_assert 'cronjob --namespace=test-events' "{{range.items}}{{ if eq $id_field \\\"pi\\\" }}found{{end}}{{end}}:" ':'
kube::test::get_object_assert 'cronjob --namespace=test-events' "{{range.items}}{{ if eq $id_field \"pi\" }}found{{end}}{{end}}:" ':'
### Create a cronjob in a specific namespace
kubectl create cronjob pi --schedule="59 23 31 2 *" --namespace=test-events "--image=$IMAGE_PERL" -- perl -Mbignum=bpi -wle 'print bpi(20)' "${kube_flags[@]:?}"
# Post-Condition: assertion object exists

View File

@ -134,9 +134,9 @@ run_kubectl_get_tests() {
### Test kubectl get chunk size does not result in a --watch error when resource list is served in multiple chunks
# Pre-condition: ConfigMap one two tree does not exist
kube::test::get_object_assert 'configmaps' "{{range.items}}{{ if eq $id_field \\\"one\\\" }}found{{end}}{{end}}:" ':'
kube::test::get_object_assert 'configmaps' "{{range.items}}{{ if eq $id_field \\\"two\\\" }}found{{end}}{{end}}:" ':'
kube::test::get_object_assert 'configmaps' "{{range.items}}{{ if eq $id_field \\\"three\\\" }}found{{end}}{{end}}:" ':'
kube::test::get_object_assert 'configmaps' "{{range.items}}{{ if eq $id_field \"one\" }}found{{end}}{{end}}:" ':'
kube::test::get_object_assert 'configmaps' "{{range.items}}{{ if eq $id_field \"two\" }}found{{end}}{{end}}:" ':'
kube::test::get_object_assert 'configmaps' "{{range.items}}{{ if eq $id_field \"three\" }}found{{end}}{{end}}:" ':'
# Post-condition: Create three configmaps and ensure that we can --watch them with a --chunk-size of 1
kubectl create cm one "${kube_flags[@]}"
@ -405,7 +405,7 @@ run_kubectl_all_namespace_tests() {
kube::log::status "Testing kubectl --all-namespace"
# Pre-condition: the "default" namespace exists
kube::test::get_object_assert namespaces "{{range.items}}{{if eq $id_field \\\"default\\\"}}{{$id_field}}:{{end}}{{end}}" 'default:'
kube::test::get_object_assert namespaces "{{range.items}}{{if eq $id_field \"default\"}}{{$id_field}}:{{end}}{{end}}" 'default:'
### Create POD
# Pre-condition: no POD exists

View File

@ -88,30 +88,30 @@ run_cluster_management_tests() {
# taint/untaint
# Pre-condition: node doesn't have dedicated=foo:PreferNoSchedule taint
kube::test::get_object_assert "nodes 127.0.0.1" '{{range .spec.taints}}{{if eq .key \"dedicated\"}}{{.key}}={{.value}}:{{.effect}}{{end}}{{end}}' "" # expect no output
kube::test::get_object_assert "nodes 127.0.0.1" "{{range .spec.taints}}{{if eq .key \"dedicated\"}}{{.key}}={{.value}}:{{.effect}}{{end}}{{end}}" "" # expect no output
# Dry-run
kubectl taint node 127.0.0.1 --dry-run=client dedicated=foo:PreferNoSchedule
kubectl taint node 127.0.0.1 --dry-run=server dedicated=foo:PreferNoSchedule
kube::test::get_object_assert "nodes 127.0.0.1" '{{range .spec.taints}}{{if eq .key \"dedicated\"}}{{.key}}={{.value}}:{{.effect}}{{end}}{{end}}' "" # expect no output
kube::test::get_object_assert "nodes 127.0.0.1" "{{range .spec.taints}}{{if eq .key \"dedicated\"}}{{.key}}={{.value}}:{{.effect}}{{end}}{{end}}" "" # expect no output
# taint can add a taint (<key>=<value>:<effect>)
kubectl taint node 127.0.0.1 dedicated=foo:PreferNoSchedule
kube::test::get_object_assert "nodes 127.0.0.1" '{{range .spec.taints}}{{if eq .key \"dedicated\"}}{{.key}}={{.value}}:{{.effect}}{{end}}{{end}}' "dedicated=foo:PreferNoSchedule"
kube::test::get_object_assert "nodes 127.0.0.1" "{{range .spec.taints}}{{if eq .key \"dedicated\"}}{{.key}}={{.value}}:{{.effect}}{{end}}{{end}}" "dedicated=foo:PreferNoSchedule"
# taint can remove a taint
kubectl taint node 127.0.0.1 dedicated-
# taint can add a taint (<key>:<effect>)
kubectl taint node 127.0.0.1 dedicated:PreferNoSchedule
kube::test::get_object_assert "nodes 127.0.0.1" '{{range .spec.taints}}{{if eq .key \"dedicated\"}}{{.key}}={{.value}}:{{.effect}}{{end}}{{end}}' "dedicated=<no value>:PreferNoSchedule"
kube::test::get_object_assert "nodes 127.0.0.1" "{{range .spec.taints}}{{if eq .key \"dedicated\"}}{{.key}}={{.value}}:{{.effect}}{{end}}{{end}}" "dedicated=<no value>:PreferNoSchedule"
# Node has field manager for kubectl taint
output_message=$(kubectl get node 127.0.0.1 --show-managed-fields -o=jsonpath='{.metadata.managedFields[*].manager}' "${kube_flags[@]:?}" 2>&1)
kube::test::if_has_string "${output_message}" 'kubectl-taint'
# Dry-run remove a taint
kubectl taint node 127.0.0.1 --dry-run=client dedicated-
kubectl taint node 127.0.0.1 --dry-run=server dedicated-
kube::test::get_object_assert "nodes 127.0.0.1" '{{range .spec.taints}}{{if eq .key \"dedicated\"}}{{.key}}={{.value}}:{{.effect}}{{end}}{{end}}' "dedicated=<no value>:PreferNoSchedule"
kube::test::get_object_assert "nodes 127.0.0.1" "{{range .spec.taints}}{{if eq .key \"dedicated\"}}{{.key}}={{.value}}:{{.effect}}{{end}}{{end}}" "dedicated=<no value>:PreferNoSchedule"
# taint can remove a taint
kubectl taint node 127.0.0.1 dedicated-
# Post-condition: node doesn't have dedicated=foo:PreferNoSchedule taint
kube::test::get_object_assert "nodes 127.0.0.1" '{{range .spec.taints}}{{if eq .key \"dedicated\"}}{{.key}}={{.value}}:{{.effect}}{{end}}{{end}}' "" # expect no output
kube::test::get_object_assert "nodes 127.0.0.1" "{{range .spec.taints}}{{if eq .key \"dedicated\"}}{{.key}}={{.value}}:{{.effect}}{{end}}{{end}}" "" # expect no output
### kubectl cordon update with --dry-run does not mark node unschedulable
# Pre-condition: node is schedulable

View File

@ -284,8 +284,8 @@ if [[ "${WHAT}" == "all-conformance" ]]; then
shift
conformance_images=("busybox" "agnhost" "jessie-dnsutils" "kitten" "nautilus" "nonewprivs" "resource-consumer" "sample-apiserver")
for image in "${conformance_images[@]}"; do
eval "${TASK}" "${image}" "$@"
"${TASK}" "${image}" "$@"
done
else
eval "${TASK}" "$@"
"${TASK}" "$@"
fi