Fix shellcheck failures SC2251

https://github.com/koalaman/shellcheck/wiki/SC2251

This may be masking some test failures.

We have a bunch of test code like this:

set -o errexit
[...]
! kubectl get pod wrong-pod
[...]

This test will succeed no matter what the result (return code) of kubectl is.
This commit is contained in:
Odin Ugedal 2019-10-23 21:26:02 +02:00
parent d467b8ea14
commit 6a73c0899a
No known key found for this signature in database
GPG Key ID: AFF9C8242CF7A7AF
7 changed files with 35 additions and 35 deletions

View File

@ -55,9 +55,9 @@ run_kubectl_apply_tests() {
[[ "$(kubectl apply -f hack/testdata/retainKeys/deployment/deployment-after.yaml "${kube_flags[@]:?}")" ]]
# Post-Condition: deployment "test-deployment-retainkeys" has updated fields
grep -q Recreate <<< "$(kubectl get deployments test-deployment-retainkeys -o yaml "${kube_flags[@]:?}")"
! grep -q RollingUpdate <<< "$(kubectl get deployments test-deployment-retainkeys -o yaml "${kube_flags[@]:?}")"
! grep -q RollingUpdate <<< "$(kubectl get deployments test-deployment-retainkeys -o yaml "${kube_flags[@]:?}")" || exit 1
grep -q hostPath <<< "$(kubectl get deployments test-deployment-retainkeys -o yaml "${kube_flags[@]:?}")"
! grep -q emptyDir <<< "$(kubectl get deployments test-deployment-retainkeys -o yaml "${kube_flags[@]:?}")"
! grep -q emptyDir <<< "$(kubectl get deployments test-deployment-retainkeys -o yaml "${kube_flags[@]:?}")" || exit 1
# Clean up
kubectl delete deployments test-deployment-retainkeys "${kube_flags[@]:?}"
@ -119,7 +119,7 @@ __EOF__
# Dry-run create the CR
kubectl "${kube_flags[@]:?}" apply --server-dry-run -f hack/testdata/CRD/resource.yaml "${kube_flags[@]:?}"
# Make sure that the CR doesn't exist
! kubectl "${kube_flags[@]:?}" get resource/myobj
! kubectl "${kube_flags[@]:?}" get resource/myobj || exit 1
# clean-up
kubectl "${kube_flags[@]:?}" delete customresourcedefinition resources.mygroup.example.com
@ -317,7 +317,7 @@ __EOF__
# Dry-run create the CR
kubectl "${kube_flags[@]:?}" apply --server-side --server-dry-run -f hack/testdata/CRD/resource.yaml "${kube_flags[@]:?}"
# Make sure that the CR doesn't exist
! kubectl "${kube_flags[@]:?}" get resource/myobj
! kubectl "${kube_flags[@]:?}" get resource/myobj || exit 1
# clean-up
kubectl "${kube_flags[@]:?}" delete customresourcedefinition resources.mygroup.example.com

View File

@ -296,7 +296,7 @@ run_deployment_tests() {
sleep 1
kube::test::get_object_assert deployment "{{range.items}}{{${image_field0:?}}}:{{end}}" "${IMAGE_DEPLOYMENT_R1}:"
# Rollback to revision 1000000 - should be no-op
! kubectl rollout undo deployment nginx --to-revision=1000000 "${kube_flags[@]:?}"
! kubectl rollout undo deployment nginx --to-revision=1000000 "${kube_flags[@]:?}" || exit 1
kube::test::get_object_assert deployment "{{range.items}}{{${image_field0:?}}}:{{end}}" "${IMAGE_DEPLOYMENT_R1}:"
# Rollback to last revision
kubectl rollout undo deployment nginx "${kube_flags[@]:?}"
@ -305,9 +305,9 @@ run_deployment_tests() {
# Pause the deployment
kubectl-with-retry rollout pause deployment nginx "${kube_flags[@]:?}"
# A paused deployment cannot be rolled back
! kubectl rollout undo deployment nginx "${kube_flags[@]:?}"
! kubectl rollout undo deployment nginx "${kube_flags[@]:?}" || exit 1
# A paused deployment cannot be restarted
! kubectl rollout restart deployment nginx "${kube_flags[@]:?}"
! kubectl rollout restart deployment nginx "${kube_flags[@]:?}" || exit 1
# Resume the deployment
kubectl-with-retry rollout resume deployment nginx "${kube_flags[@]:?}"
# The resumed deployment can now be rolled back
@ -316,7 +316,7 @@ run_deployment_tests() {
newrs="$(kubectl describe deployment nginx | grep NewReplicaSet | awk '{print $2}')"
kubectl get rs "${newrs}" -o yaml | grep "deployment.kubernetes.io/revision-history: 1,3"
# Check that trying to watch the status of a superseded revision returns an error
! kubectl rollout status deployment/nginx --revision=3
! kubectl rollout status deployment/nginx --revision=3 || exit 1
# Restarting the deployment creates a new replicaset
kubectl rollout restart deployment/nginx
sleep 1
@ -342,7 +342,7 @@ run_deployment_tests() {
kube::test::get_object_assert deployment "{{range.items}}{{${image_field0:?}}}:{{end}}" "${IMAGE_DEPLOYMENT_R2}:"
kube::test::get_object_assert deployment "{{range.items}}{{${image_field1:?}}}:{{end}}" "${IMAGE_PERL}:"
# Set non-existing container should fail
! kubectl set image deployment nginx-deployment redis=redis "${kube_flags[@]:?}"
! kubectl set image deployment nginx-deployment redis=redis "${kube_flags[@]:?}" || exit 1
# Set image of deployments without specifying name
kubectl set image deployments --all nginx="${IMAGE_DEPLOYMENT_R1}" "${kube_flags[@]:?}"
kube::test::get_object_assert deployment "{{range.items}}{{${image_field0:?}}}:{{end}}" "${IMAGE_DEPLOYMENT_R1}:"
@ -656,7 +656,7 @@ run_rs_tests() {
kube::test::get_object_assert 'hpa frontend' "{{${hpa_min_field:?}}} {{${hpa_max_field:?}}} {{${hpa_cpu_field:?}}}" '2 3 80'
kubectl delete hpa frontend "${kube_flags[@]:?}"
# autoscale without specifying --max should fail
! kubectl autoscale rs frontend "${kube_flags[@]:?}"
! kubectl autoscale rs frontend "${kube_flags[@]:?}" || exit 1
# Clean up
kubectl delete rs frontend "${kube_flags[@]:?}"
fi

View File

@ -185,7 +185,7 @@ run_pod_tests() {
# Pre-condition: valid-pod POD exists
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
# Command
! kubectl delete pods "${kube_flags[@]}"
! kubectl delete pods "${kube_flags[@]}" || exit 1
# Post-condition: valid-pod POD exists
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
@ -193,7 +193,7 @@ run_pod_tests() {
# Pre-condition: valid-pod POD exists
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
# Command
! kubectl delete --all pods -l'name in (valid-pod)' "${kube_flags[@]}"
! kubectl delete --all pods -l'name in (valid-pod)' "${kube_flags[@]}" || exit 1
# Post-condition: valid-pod POD exists
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:'
@ -255,7 +255,7 @@ run_pod_tests() {
kube::test::get_object_assert 'pdb/test-pdb-4 --namespace=test-kubectl-describe-pod' "{{$pdb_max_unavailable}}" '50%'
### Fail creating a pod disruption budget if both maxUnavailable and minAvailable specified
! kubectl create pdb test-pdb --selector=app=rails --min-available=2 --max-unavailable=3 --namespace=test-kubectl-describe-pod
! kubectl create pdb test-pdb --selector=app=rails --min-available=2 --max-unavailable=3 --namespace=test-kubectl-describe-pod || exit 1
# Create a pod that consumes secret, configmap, and downward API keys as envs
kube::test::get_object_assert 'pods --namespace=test-kubectl-describe-pod' "{{range.items}}{{$id_field}}:{{end}}" ''
@ -567,7 +567,7 @@ __EOF__
grep -q 'Edit cancelled' <<< "$(EDITOR="cat" kubectl edit pod/valid-pod 2>&1)"
grep -q 'name: valid-pod' <<< "$(EDITOR="cat" kubectl edit pod/valid-pod)"
grep -q CRLF <<< "$(EDITOR="cat" kubectl edit --windows-line-endings pod/valid-pod | file - )"
! grep -q CRLF <<< "$(EDITOR="cat" kubectl edit --windows-line-endings=false pod/valid-pod | file - )"
! grep -q CRLF <<< "$(EDITOR="cat" kubectl edit --windows-line-endings=false pod/valid-pod | file - )" || exit 1
grep -q 'kind: List' <<< "$(EDITOR="cat" kubectl edit ns)"
### Label POD YAML file locally without effecting the live pod.
@ -584,7 +584,7 @@ __EOF__
# Pre-condition: name is valid-pod
kube::test::get_object_assert 'pod valid-pod' "{{${labels_field}.name}}" 'valid-pod'
# Command
! kubectl label pods valid-pod name=valid-pod-super-sayan "${kube_flags[@]}"
! kubectl label pods valid-pod name=valid-pod-super-sayan "${kube_flags[@]}" || exit 1
# Post-condition: name is still valid-pod
kube::test::get_object_assert 'pod valid-pod' "{{${labels_field}.name}}" 'valid-pod'
@ -631,7 +631,7 @@ __EOF__
# Post-Condition: pod "test-pod" is created
kube::test::get_object_assert 'pods test-pod' "{{${labels_field}.name}}" 'test-pod-label'
# Post-Condition: pod "test-pod" doesn't have configuration annotation
! grep -q kubectl.kubernetes.io/last-applied-configuration <<< "$(kubectl get pods test-pod -o yaml "${kube_flags[@]}" )"
! grep -q kubectl.kubernetes.io/last-applied-configuration <<< "$(kubectl get pods test-pod -o yaml "${kube_flags[@]}" )" || exit 1
## 2. kubectl replace doesn't set the annotation
kubectl get pods test-pod -o yaml "${kube_flags[@]}" | ${SED} 's/test-pod-label/test-pod-replaced/g' > "${KUBE_TEMP}"/test-pod-replace.yaml
# Command: replace the pod "test-pod"
@ -639,7 +639,7 @@ __EOF__
# Post-Condition: pod "test-pod" is replaced
kube::test::get_object_assert 'pods test-pod' "{{${labels_field}.name}}" 'test-pod-replaced'
# Post-Condition: pod "test-pod" doesn't have configuration annotation
! grep -q kubectl.kubernetes.io/last-applied-configuration <<< "$(kubectl get pods test-pod -o yaml "${kube_flags[@]}")"
! grep -q kubectl.kubernetes.io/last-applied-configuration <<< "$(kubectl get pods test-pod -o yaml "${kube_flags[@]}")" || exit 1
## 3. kubectl apply does set the annotation
# Command: apply the pod "test-pod"
kubectl apply -f hack/testdata/pod-apply.yaml "${kube_flags[@]}"
@ -657,7 +657,7 @@ __EOF__
# Post-Condition: pod "test-pod" has configuration annotation, and it's updated (different from the annotation when it's applied)
grep -q kubectl.kubernetes.io/last-applied-configuration <<< "$(kubectl get pods test-pod -o yaml "${kube_flags[@]}" )"
kubectl get pods test-pod -o yaml "${kube_flags[@]}" | grep kubectl.kubernetes.io/last-applied-configuration > "${KUBE_TEMP}"/annotation-configuration-replaced
! [[ $(diff -q "${KUBE_TEMP}"/annotation-configuration "${KUBE_TEMP}"/annotation-configuration-replaced > /dev/null) ]]
! [[ $(diff -q "${KUBE_TEMP}"/annotation-configuration "${KUBE_TEMP}"/annotation-configuration-replaced > /dev/null) ]] || exit 1
# Clean up
rm "${KUBE_TEMP}"/test-pod-replace.yaml "${KUBE_TEMP}"/annotation-configuration "${KUBE_TEMP}"/annotation-configuration-replaced
kubectl delete pods test-pod "${kube_flags[@]}"
@ -883,7 +883,7 @@ run_service_tests() {
# Set selector of a local file without talking to the server
kubectl set selector -f test/e2e/testing-manifests/guestbook/redis-master-service.yaml role=padawan --local -o yaml "${kube_flags[@]}"
! kubectl set selector -f test/e2e/testing-manifests/guestbook/redis-master-service.yaml role=padawan --dry-run -o yaml "${kube_flags[@]}"
! kubectl set selector -f test/e2e/testing-manifests/guestbook/redis-master-service.yaml role=padawan --dry-run -o yaml "${kube_flags[@]}" || exit 1
# Set command to change the selector.
kubectl set selector -f test/e2e/testing-manifests/guestbook/redis-master-service.yaml role=padawan
# prove role=padawan
@ -894,7 +894,7 @@ run_service_tests() {
kube::test::get_object_assert 'services redis-master' "{{range$service_selector_field}}{{.}}:{{end}}" "redis:master:backend:"
# Show dry-run works on running selector
kubectl set selector services redis-master role=padawan --dry-run -o yaml "${kube_flags[@]}"
! kubectl set selector services redis-master role=padawan --local -o yaml "${kube_flags[@]}"
! kubectl set selector services redis-master role=padawan --local -o yaml "${kube_flags[@]}" || exit 1
kube::test::get_object_assert 'services redis-master' "{{range$service_selector_field}}{{.}}:{{end}}" "redis:master:backend:"
### Dump current redis-master service
@ -1086,7 +1086,7 @@ run_rc_tests() {
# Pre-condition: 2 replicas
kube::test::get_object_assert 'rc frontend' "{{$rc_replicas_field}}" '2'
# Command
! kubectl scale --current-replicas=3 --replicas=2 replicationcontrollers frontend "${kube_flags[@]}"
! kubectl scale --current-replicas=3 --replicas=2 replicationcontrollers frontend "${kube_flags[@]}" || exit 1
# Post-condition: nothing changed
kube::test::get_object_assert 'rc frontend' "{{$rc_replicas_field}}" '2'
@ -1250,7 +1250,7 @@ run_rc_tests() {
kube::test::get_object_assert 'hpa frontend' "{{$hpa_min_field}} {{$hpa_max_field}} {{$hpa_cpu_field}}" '2 3 80'
kubectl delete hpa frontend "${kube_flags[@]}"
# autoscale without specifying --max should fail
! kubectl autoscale rc frontend "${kube_flags[@]}"
! kubectl autoscale rc frontend "${kube_flags[@]}" || exit 1
# Clean up
kubectl delete rc frontend "${kube_flags[@]}"
@ -1259,7 +1259,7 @@ run_rc_tests() {
kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" ''
# Set resources of a local file without talking to the server
kubectl set resources -f hack/testdata/deployment-multicontainer-resources.yaml -c=perl --limits=cpu=300m --requests=cpu=300m --local -o yaml "${kube_flags[@]}"
! kubectl set resources -f hack/testdata/deployment-multicontainer-resources.yaml -c=perl --limits=cpu=300m --requests=cpu=300m --dry-run -o yaml "${kube_flags[@]}"
! kubectl set resources -f hack/testdata/deployment-multicontainer-resources.yaml -c=perl --limits=cpu=300m --requests=cpu=300m --dry-run -o yaml "${kube_flags[@]}" || exit 1
# Create a deployment
kubectl create -f hack/testdata/deployment-multicontainer-resources.yaml "${kube_flags[@]}"
kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" 'nginx-deployment-resources:'
@ -1270,7 +1270,7 @@ run_rc_tests() {
kube::test::get_object_assert deployment "{{range.items}}{{(index .spec.template.spec.containers 0).resources.limits.cpu}}:{{end}}" "100m:"
kube::test::get_object_assert deployment "{{range.items}}{{(index .spec.template.spec.containers 1).resources.limits.cpu}}:{{end}}" "100m:"
# Set a non-existing container should fail
! kubectl set resources deployment nginx-deployment-resources -c=redis --limits=cpu=100m
! kubectl set resources deployment nginx-deployment-resources -c=redis --limits=cpu=100m || exit 1
# Set the limit of a specific container in deployment
kubectl set resources deployment nginx-deployment-resources -c=nginx --limits=cpu=200m "${kube_flags[@]}"
kube::test::get_object_assert deployment "{{range.items}}{{(index .spec.template.spec.containers 0).resources.limits.cpu}}:{{end}}" "200m:"
@ -1282,7 +1282,7 @@ run_rc_tests() {
kube::test::get_object_assert deployment "{{range.items}}{{(index .spec.template.spec.containers 1).resources.requests.cpu}}:{{end}}" "300m:"
# Show dry-run works on running deployments
kubectl set resources deployment nginx-deployment-resources -c=perl --limits=cpu=400m --requests=cpu=400m --dry-run -o yaml "${kube_flags[@]}"
! kubectl set resources deployment nginx-deployment-resources -c=perl --limits=cpu=400m --requests=cpu=400m --local -o yaml "${kube_flags[@]}"
! kubectl set resources deployment nginx-deployment-resources -c=perl --limits=cpu=400m --requests=cpu=400m --local -o yaml "${kube_flags[@]}" || exit 1
kube::test::get_object_assert deployment "{{range.items}}{{(index .spec.template.spec.containers 0).resources.limits.cpu}}:{{end}}" "200m:"
kube::test::get_object_assert deployment "{{range.items}}{{(index .spec.template.spec.containers 1).resources.limits.cpu}}:{{end}}" "300m:"
kube::test::get_object_assert deployment "{{range.items}}{{(index .spec.template.spec.containers 1).resources.requests.cpu}}:{{end}}" "300m:"

View File

@ -243,7 +243,7 @@ run_non_native_resource_tests() {
kubectl "${kube_flags[@]}" get foos/test -o json > "${CRD_RESOURCE_FILE}"
# cannot apply strategic patch locally
CRD_PATCH_ERROR_FILE="${KUBE_TEMP}/crd-foos-test-error"
! kubectl "${kube_flags[@]}" patch --local -f "${CRD_RESOURCE_FILE}" -p '{"patched":"value3"}' 2> "${CRD_PATCH_ERROR_FILE}"
! kubectl "${kube_flags[@]}" patch --local -f "${CRD_RESOURCE_FILE}" -p '{"patched":"value3"}' 2> "${CRD_PATCH_ERROR_FILE}" || exit 1
if grep -q "try --type merge" "${CRD_PATCH_ERROR_FILE}"; then
kube::log::status "\"kubectl patch --local\" returns error as expected for CustomResource: $(cat "${CRD_PATCH_ERROR_FILE}")"
else

View File

@ -50,7 +50,7 @@ run_kubectl_create_error_tests() {
kube::log::status "Testing kubectl create with error"
# Passing no arguments to create is an error
! kubectl create
! kubectl create || exit 1
## kubectl create should not panic on empty string lists in a template
ERROR_FILE="${KUBE_TEMP}/validation-error"

View File

@ -327,7 +327,7 @@ run_recursive_resources_tests() {
# Pre-condition: no replication controller exists
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
! kubectl create -f hack/testdata/recursive/rc --recursive "${kube_flags[@]}"
! kubectl create -f hack/testdata/recursive/rc --recursive "${kube_flags[@]}" || exit 1
# Post-condition: frontend replication controller is created
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" 'busybox0:busybox1:'
@ -387,7 +387,7 @@ run_recursive_resources_tests() {
kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
# Create deployments (revision 1) recursively from directory of YAML files
! kubectl create -f hack/testdata/recursive/deployment --recursive "${kube_flags[@]}"
! kubectl create -f hack/testdata/recursive/deployment --recursive "${kube_flags[@]}" || exit 1
kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" 'nginx0-deployment:nginx1-deployment:'
kube::test::get_object_assert deployment "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_NGINX}:${IMAGE_NGINX}:"
## Rollback the deployments to revision 1 recursively
@ -418,7 +418,7 @@ run_recursive_resources_tests() {
# Clean up
unset PRESERVE_ERR_FILE
rm "${ERROR_FILE}"
! kubectl delete -f hack/testdata/recursive/deployment --recursive "${kube_flags[@]}" --grace-period=0 --force
! kubectl delete -f hack/testdata/recursive/deployment --recursive "${kube_flags[@]}" --grace-period=0 --force || exit 1
sleep 1
### Rollout on multiple replication controllers recursively - these tests ensure that rollouts cannot be performed on resources that don't support it
@ -426,7 +426,7 @@ run_recursive_resources_tests() {
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" ''
# Command
# Create replication controllers recursively from directory of YAML files
! kubectl create -f hack/testdata/recursive/rc --recursive "${kube_flags[@]}"
! kubectl create -f hack/testdata/recursive/rc --recursive "${kube_flags[@]}" || exit 1
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" 'busybox0:busybox1:'
# Command
## Attempt to rollback the replication controllers to revision 1 recursively
@ -447,7 +447,7 @@ run_recursive_resources_tests() {
kube::test::if_has_string "${output_message}" 'replicationcontrollers "busybox0" resuming is not supported'
kube::test::if_has_string "${output_message}" 'replicationcontrollers "busybox1" resuming is not supported'
# Clean up
! kubectl delete -f hack/testdata/recursive/rc --recursive "${kube_flags[@]}" --grace-period=0 --force
! kubectl delete -f hack/testdata/recursive/rc --recursive "${kube_flags[@]}" --grace-period=0 --force || exit 1
sleep 1
set +o nounset

View File

@ -40,7 +40,7 @@ run_save_config_tests() {
create_and_use_new_namespace
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
kubectl create -f hack/testdata/pod.yaml "${kube_flags[@]}"
! grep -q "kubectl.kubernetes.io/last-applied-configuration" <<< "$(kubectl get pods test-pod -o yaml "${kube_flags[@]}")"
! grep -q "kubectl.kubernetes.io/last-applied-configuration" <<< "$(kubectl get pods test-pod -o yaml "${kube_flags[@]}")" || exit 1
# Command: edit the pod "test-pod"
temp_editor="${KUBE_TEMP}/tmp-editor.sh"
echo -e "#!/usr/bin/env bash\n${SED} -i \"s/test-pod-label/test-pod-label-edited/g\" \$@" > "${temp_editor}"
@ -55,7 +55,7 @@ run_save_config_tests() {
create_and_use_new_namespace
kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" ''
kubectl create -f hack/testdata/pod.yaml "${kube_flags[@]}"
! grep -q "kubectl.kubernetes.io/last-applied-configuration" <<< "$(kubectl get pods test-pod -o yaml "${kube_flags[@]}")"
! grep -q "kubectl.kubernetes.io/last-applied-configuration" <<< "$(kubectl get pods test-pod -o yaml "${kube_flags[@]}")" || exit 1
# Command: replace the pod "test-pod"
kubectl replace -f hack/testdata/pod.yaml --save-config "${kube_flags[@]}"
# Post-Condition: pod "test-pod" has configuration annotation
@ -82,7 +82,7 @@ run_save_config_tests() {
# Pre-Condition: no RC exists, then create the rc "frontend", which shouldn't have configuration annotation
kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" ''
kubectl create -f hack/testdata/frontend-controller.yaml "${kube_flags[@]}"
! grep -q "kubectl.kubernetes.io/last-applied-configuration" <<< "$(kubectl get rc frontend -o yaml "${kube_flags[@]}")"
! grep -q "kubectl.kubernetes.io/last-applied-configuration" <<< "$(kubectl get rc frontend -o yaml "${kube_flags[@]}")" || exit 1
# Command: autoscale rc "frontend"
kubectl autoscale -f hack/testdata/frontend-controller.yaml --save-config "${kube_flags[@]}" --max=2
# Post-Condition: hpa "frontend" has configuration annotation