From 65a66b9d7f08897b92dc6f9789f4f2fe4fc85e97 Mon Sep 17 00:00:00 2001 From: Odin Ugedal Date: Wed, 23 Oct 2019 19:55:44 +0200 Subject: [PATCH 1/5] Bump shellcheck to v0.7.0 Changelog: https://github.com/koalaman/shellcheck/blob/master/CHANGELOG.md We have some new erros due to the version bump. - SC2034: VARIABLE_XYZ appears unused. Verify use (or export if used externally). - Applies to all scripts we source in other scripts - SC2039: In POSIX sh, set option pipefail is undefined. - Applies to files using it with "sh" instead of "bash" in the shebang - SC2054: Use spaces, not commas, to separate array elements. - Fixing Should make no difference in the code - SC2128: Expanding an array without an index only gives the first element. - Fixing Should make no difference in the code - SC2251: This ! is not on a condition and skips errexit. Use `&& exit 1` instead, or make sure $? is checked. - Not 100% sure if we can swap to `&& exit 1`. Applies to a lot of test code. All changes should be straight forward to fix, but will be done in a separate PR. --- hack/.shellcheck_failures | 13 +++++++++++++ hack/verify-shellcheck.sh | 6 +++--- 2 files changed, 16 insertions(+), 3 deletions(-) diff --git a/hack/.shellcheck_failures b/hack/.shellcheck_failures index 82638a92176..783ee0f7eb1 100644 --- a/hack/.shellcheck_failures +++ b/hack/.shellcheck_failures @@ -1,3 +1,4 @@ +./build/common.sh ./build/lib/release.sh ./cluster/common.sh ./cluster/gce/config-default.sh @@ -6,7 +7,19 @@ ./cluster/gce/gci/configure.sh ./cluster/gce/gci/health-monitor.sh ./cluster/gce/gci/master-helper.sh +./cluster/gce/gci/mounter/stage-upload.sh ./cluster/gce/upgrade.sh ./cluster/gce/util.sh ./cluster/log-dump/log-dump.sh ./cluster/pre-existing/util.sh +./hack/lib/golang.sh +./hack/lib/test.sh +./test/cmd/apply.sh +./test/cmd/apps.sh +./test/cmd/core.sh +./test/cmd/crd.sh +./test/cmd/create.sh +./test/cmd/generic-resources.sh +./test/cmd/save-config.sh +./test/images/pets/redis-installer/on-start.sh +./test/images/pets/zookeeper-installer/on-start.sh diff --git a/hack/verify-shellcheck.sh b/hack/verify-shellcheck.sh index 2852c66df2b..9a87f4ca3c1 100755 --- a/hack/verify-shellcheck.sh +++ b/hack/verify-shellcheck.sh @@ -24,9 +24,9 @@ source "${KUBE_ROOT}/hack/lib/util.sh" # required version for this script, if not installed on the host we will # use the official docker image instead. keep this in sync with SHELLCHECK_IMAGE -SHELLCHECK_VERSION="0.6.0" -# upstream shellcheck latest stable image as of January 10th, 2019 -SHELLCHECK_IMAGE="koalaman/shellcheck-alpine:v0.6.0@sha256:7d4d712a2686da99d37580b4e2f45eb658b74e4b01caf67c1099adc294b96b52" +SHELLCHECK_VERSION="0.7.0" +# upstream shellcheck latest stable image as of October 23rd, 2019 +SHELLCHECK_IMAGE="koalaman/shellcheck-alpine:v0.7.0@sha256:24bbf52aae6eaa27accc9f61de32d30a1498555e6ef452966d0702ff06f38ecb" # fixed name for the shellcheck docker container so we can reliably clean it up SHELLCHECK_CONTAINER="k8s-shellcheck" From cce1f32ea5b030f5f31b8ae19f1fffe1671b9a38 Mon Sep 17 00:00:00 2001 From: Odin Ugedal Date: Wed, 23 Oct 2019 20:29:55 +0200 Subject: [PATCH 2/5] Fix shellcheck failures SC2034 --- build/common.sh | 10 ++++++---- cluster/gce/gci/mounter/stage-upload.sh | 2 +- hack/.shellcheck_failures | 4 ---- hack/lib/golang.sh | 2 ++ hack/lib/test.sh | 2 ++ 5 files changed, 11 insertions(+), 9 deletions(-) diff --git a/build/common.sh b/build/common.sh index 9c16c571ac9..a68e0f26d47 100755 --- a/build/common.sh +++ b/build/common.sh @@ -14,6 +14,8 @@ # See the License for the specific language governing permissions and # limitations under the License. +# shellcheck disable=SC2034 # Variables sourced in other scripts. + # Common utilities, variables and checks for all build scripts. set -o errexit set -o nounset @@ -97,10 +99,10 @@ kube::build::get_docker_wrapped_binaries() { ### If you change any of these lists, please also update DOCKERIZED_BINARIES ### in build/BUILD. And kube::golang::server_image_targets local targets=( - kube-apiserver,"${KUBE_BASE_IMAGE_REGISTRY}/debian-base-${arch}:${debian_base_version}" - kube-controller-manager,"${KUBE_BASE_IMAGE_REGISTRY}/debian-base-${arch}:${debian_base_version}" - kube-scheduler,"${KUBE_BASE_IMAGE_REGISTRY}/debian-base-${arch}:${debian_base_version}" - kube-proxy,"${KUBE_BASE_IMAGE_REGISTRY}/debian-iptables-${arch}:${debian_iptables_version}" + "kube-apiserver,${KUBE_BASE_IMAGE_REGISTRY}/debian-base-${arch}:${debian_base_version}" + "kube-controller-manager,${KUBE_BASE_IMAGE_REGISTRY}/debian-base-${arch}:${debian_base_version}" + "kube-scheduler,${KUBE_BASE_IMAGE_REGISTRY}/debian-base-${arch}:${debian_base_version}" + "kube-proxy,${KUBE_BASE_IMAGE_REGISTRY}/debian-iptables-${arch}:${debian_iptables_version}" ) echo "${targets[@]}" diff --git a/cluster/gce/gci/mounter/stage-upload.sh b/cluster/gce/gci/mounter/stage-upload.sh index ae1657e283a..37af6a2bf0a 100755 --- a/cluster/gce/gci/mounter/stage-upload.sh +++ b/cluster/gce/gci/mounter/stage-upload.sh @@ -1,4 +1,4 @@ -#!/bin/sh +#!/usr/bin/env bash # Copyright 2016 The Kubernetes Authors. # diff --git a/hack/.shellcheck_failures b/hack/.shellcheck_failures index 783ee0f7eb1..1f0086d1628 100644 --- a/hack/.shellcheck_failures +++ b/hack/.shellcheck_failures @@ -1,4 +1,3 @@ -./build/common.sh ./build/lib/release.sh ./cluster/common.sh ./cluster/gce/config-default.sh @@ -7,13 +6,10 @@ ./cluster/gce/gci/configure.sh ./cluster/gce/gci/health-monitor.sh ./cluster/gce/gci/master-helper.sh -./cluster/gce/gci/mounter/stage-upload.sh ./cluster/gce/upgrade.sh ./cluster/gce/util.sh ./cluster/log-dump/log-dump.sh ./cluster/pre-existing/util.sh -./hack/lib/golang.sh -./hack/lib/test.sh ./test/cmd/apply.sh ./test/cmd/apps.sh ./test/cmd/core.sh diff --git a/hack/lib/golang.sh b/hack/lib/golang.sh index d39f2df3e23..e9a5af98547 100755 --- a/hack/lib/golang.sh +++ b/hack/lib/golang.sh @@ -14,6 +14,8 @@ # See the License for the specific language governing permissions and # limitations under the License. +# shellcheck disable=SC2034 # Variables sourced in other scripts. + # The golang package that we are building. readonly KUBE_GO_PACKAGE=k8s.io/kubernetes readonly KUBE_GOPATH="${KUBE_OUTPUT}/go" diff --git a/hack/lib/test.sh b/hack/lib/test.sh index b04a82905b9..23a7563ff43 100644 --- a/hack/lib/test.sh +++ b/hack/lib/test.sh @@ -14,6 +14,8 @@ # See the License for the specific language governing permissions and # limitations under the License. +# shellcheck disable=SC2034 # Variables sourced in other scripts. + # A set of helpers for tests readonly reset=$(tput sgr0) From d467b8ea142756e052c2c28ed0c6d6227a493ce2 Mon Sep 17 00:00:00 2001 From: Odin Ugedal Date: Wed, 23 Oct 2019 21:09:47 +0200 Subject: [PATCH 3/5] Fix shellcheck failures SC2128 --- hack/.shellcheck_failures | 9 --------- test/images/pets/redis-installer/on-start.sh | 8 ++++---- test/images/pets/zookeeper-installer/on-start.sh | 2 +- 3 files changed, 5 insertions(+), 14 deletions(-) diff --git a/hack/.shellcheck_failures b/hack/.shellcheck_failures index 1f0086d1628..82638a92176 100644 --- a/hack/.shellcheck_failures +++ b/hack/.shellcheck_failures @@ -10,12 +10,3 @@ ./cluster/gce/util.sh ./cluster/log-dump/log-dump.sh ./cluster/pre-existing/util.sh -./test/cmd/apply.sh -./test/cmd/apps.sh -./test/cmd/core.sh -./test/cmd/crd.sh -./test/cmd/create.sh -./test/cmd/generic-resources.sh -./test/cmd/save-config.sh -./test/images/pets/redis-installer/on-start.sh -./test/images/pets/zookeeper-installer/on-start.sh diff --git a/test/images/pets/redis-installer/on-start.sh b/test/images/pets/redis-installer/on-start.sh index cc8f8084abb..77a50625eb0 100755 --- a/test/images/pets/redis-installer/on-start.sh +++ b/test/images/pets/redis-installer/on-start.sh @@ -25,11 +25,11 @@ PORT=6379 # Ping everyone but ourself to see if there's a master. Only one pet starts at # a time, so if we don't see a master we can assume the position is ours. while read -ra LINE; do - if [[ "${LINE}" == *"${HOSTNAME}"* ]]; then - sed -i -e "s|^bind.*$|bind ${LINE}|" ${CFG} - elif [ "$(/opt/redis/redis-cli -h "${LINE}" info | grep role | sed 's,\r$,,')" = "role:master" ]; then + if [[ "${LINE[0]}" == *"${HOSTNAME}"* ]]; then + sed -i -e "s|^bind.*$|bind ${LINE[0]}|" ${CFG} + elif [ "$(/opt/redis/redis-cli -h "${LINE[0]}" info | grep role | sed 's,\r$,,')" = "role:master" ]; then # TODO: More restrictive regex? - sed -i -e "s|^# slaveof.*$|slaveof ${LINE} ${PORT}|" ${CFG} + sed -i -e "s|^# slaveof.*$|slaveof ${LINE[0]} ${PORT}|" ${CFG} fi done diff --git a/test/images/pets/zookeeper-installer/on-start.sh b/test/images/pets/zookeeper-installer/on-start.sh index 92eadee009b..b478a5a1f5e 100755 --- a/test/images/pets/zookeeper-installer/on-start.sh +++ b/test/images/pets/zookeeper-installer/on-start.sh @@ -36,7 +36,7 @@ MY_ID_FILE=/tmp/zookeeper/myid HOSTNAME=$(hostname) while read -ra LINE; do - PEERS=("${PEERS[@]}" "$LINE") + PEERS=("${PEERS[@]}" "${LINE[0]}") done # Don't add the first member as an observer From 6a73c0899aed737217a448e187c6c7fbbedc102d Mon Sep 17 00:00:00 2001 From: Odin Ugedal Date: Wed, 23 Oct 2019 21:26:02 +0200 Subject: [PATCH 4/5] Fix shellcheck failures SC2251 https://github.com/koalaman/shellcheck/wiki/SC2251 This may be masking some test failures. We have a bunch of test code like this: set -o errexit [...] ! kubectl get pod wrong-pod [...] This test will succeed no matter what the result (return code) of kubectl is. --- test/cmd/apply.sh | 8 ++++---- test/cmd/apps.sh | 12 ++++++------ test/cmd/core.sh | 30 +++++++++++++++--------------- test/cmd/crd.sh | 2 +- test/cmd/create.sh | 2 +- test/cmd/generic-resources.sh | 10 +++++----- test/cmd/save-config.sh | 6 +++--- 7 files changed, 35 insertions(+), 35 deletions(-) diff --git a/test/cmd/apply.sh b/test/cmd/apply.sh index 85b036567d5..2a4d0bbd3cb 100755 --- a/test/cmd/apply.sh +++ b/test/cmd/apply.sh @@ -55,9 +55,9 @@ run_kubectl_apply_tests() { [[ "$(kubectl apply -f hack/testdata/retainKeys/deployment/deployment-after.yaml "${kube_flags[@]:?}")" ]] # Post-Condition: deployment "test-deployment-retainkeys" has updated fields grep -q Recreate <<< "$(kubectl get deployments test-deployment-retainkeys -o yaml "${kube_flags[@]:?}")" - ! grep -q RollingUpdate <<< "$(kubectl get deployments test-deployment-retainkeys -o yaml "${kube_flags[@]:?}")" + ! grep -q RollingUpdate <<< "$(kubectl get deployments test-deployment-retainkeys -o yaml "${kube_flags[@]:?}")" || exit 1 grep -q hostPath <<< "$(kubectl get deployments test-deployment-retainkeys -o yaml "${kube_flags[@]:?}")" - ! grep -q emptyDir <<< "$(kubectl get deployments test-deployment-retainkeys -o yaml "${kube_flags[@]:?}")" + ! grep -q emptyDir <<< "$(kubectl get deployments test-deployment-retainkeys -o yaml "${kube_flags[@]:?}")" || exit 1 # Clean up kubectl delete deployments test-deployment-retainkeys "${kube_flags[@]:?}" @@ -119,7 +119,7 @@ __EOF__ # Dry-run create the CR kubectl "${kube_flags[@]:?}" apply --server-dry-run -f hack/testdata/CRD/resource.yaml "${kube_flags[@]:?}" # Make sure that the CR doesn't exist - ! kubectl "${kube_flags[@]:?}" get resource/myobj + ! kubectl "${kube_flags[@]:?}" get resource/myobj || exit 1 # clean-up kubectl "${kube_flags[@]:?}" delete customresourcedefinition resources.mygroup.example.com @@ -317,7 +317,7 @@ __EOF__ # Dry-run create the CR kubectl "${kube_flags[@]:?}" apply --server-side --server-dry-run -f hack/testdata/CRD/resource.yaml "${kube_flags[@]:?}" # Make sure that the CR doesn't exist - ! kubectl "${kube_flags[@]:?}" get resource/myobj + ! kubectl "${kube_flags[@]:?}" get resource/myobj || exit 1 # clean-up kubectl "${kube_flags[@]:?}" delete customresourcedefinition resources.mygroup.example.com diff --git a/test/cmd/apps.sh b/test/cmd/apps.sh index 2000453449a..0c3b8262232 100755 --- a/test/cmd/apps.sh +++ b/test/cmd/apps.sh @@ -296,7 +296,7 @@ run_deployment_tests() { sleep 1 kube::test::get_object_assert deployment "{{range.items}}{{${image_field0:?}}}:{{end}}" "${IMAGE_DEPLOYMENT_R1}:" # Rollback to revision 1000000 - should be no-op - ! kubectl rollout undo deployment nginx --to-revision=1000000 "${kube_flags[@]:?}" + ! kubectl rollout undo deployment nginx --to-revision=1000000 "${kube_flags[@]:?}" || exit 1 kube::test::get_object_assert deployment "{{range.items}}{{${image_field0:?}}}:{{end}}" "${IMAGE_DEPLOYMENT_R1}:" # Rollback to last revision kubectl rollout undo deployment nginx "${kube_flags[@]:?}" @@ -305,9 +305,9 @@ run_deployment_tests() { # Pause the deployment kubectl-with-retry rollout pause deployment nginx "${kube_flags[@]:?}" # A paused deployment cannot be rolled back - ! kubectl rollout undo deployment nginx "${kube_flags[@]:?}" + ! kubectl rollout undo deployment nginx "${kube_flags[@]:?}" || exit 1 # A paused deployment cannot be restarted - ! kubectl rollout restart deployment nginx "${kube_flags[@]:?}" + ! kubectl rollout restart deployment nginx "${kube_flags[@]:?}" || exit 1 # Resume the deployment kubectl-with-retry rollout resume deployment nginx "${kube_flags[@]:?}" # The resumed deployment can now be rolled back @@ -316,7 +316,7 @@ run_deployment_tests() { newrs="$(kubectl describe deployment nginx | grep NewReplicaSet | awk '{print $2}')" kubectl get rs "${newrs}" -o yaml | grep "deployment.kubernetes.io/revision-history: 1,3" # Check that trying to watch the status of a superseded revision returns an error - ! kubectl rollout status deployment/nginx --revision=3 + ! kubectl rollout status deployment/nginx --revision=3 || exit 1 # Restarting the deployment creates a new replicaset kubectl rollout restart deployment/nginx sleep 1 @@ -342,7 +342,7 @@ run_deployment_tests() { kube::test::get_object_assert deployment "{{range.items}}{{${image_field0:?}}}:{{end}}" "${IMAGE_DEPLOYMENT_R2}:" kube::test::get_object_assert deployment "{{range.items}}{{${image_field1:?}}}:{{end}}" "${IMAGE_PERL}:" # Set non-existing container should fail - ! kubectl set image deployment nginx-deployment redis=redis "${kube_flags[@]:?}" + ! kubectl set image deployment nginx-deployment redis=redis "${kube_flags[@]:?}" || exit 1 # Set image of deployments without specifying name kubectl set image deployments --all nginx="${IMAGE_DEPLOYMENT_R1}" "${kube_flags[@]:?}" kube::test::get_object_assert deployment "{{range.items}}{{${image_field0:?}}}:{{end}}" "${IMAGE_DEPLOYMENT_R1}:" @@ -656,7 +656,7 @@ run_rs_tests() { kube::test::get_object_assert 'hpa frontend' "{{${hpa_min_field:?}}} {{${hpa_max_field:?}}} {{${hpa_cpu_field:?}}}" '2 3 80' kubectl delete hpa frontend "${kube_flags[@]:?}" # autoscale without specifying --max should fail - ! kubectl autoscale rs frontend "${kube_flags[@]:?}" + ! kubectl autoscale rs frontend "${kube_flags[@]:?}" || exit 1 # Clean up kubectl delete rs frontend "${kube_flags[@]:?}" fi diff --git a/test/cmd/core.sh b/test/cmd/core.sh index 6235a531486..64ff8a8ab81 100755 --- a/test/cmd/core.sh +++ b/test/cmd/core.sh @@ -185,7 +185,7 @@ run_pod_tests() { # Pre-condition: valid-pod POD exists kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:' # Command - ! kubectl delete pods "${kube_flags[@]}" + ! kubectl delete pods "${kube_flags[@]}" || exit 1 # Post-condition: valid-pod POD exists kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:' @@ -193,7 +193,7 @@ run_pod_tests() { # Pre-condition: valid-pod POD exists kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:' # Command - ! kubectl delete --all pods -l'name in (valid-pod)' "${kube_flags[@]}" + ! kubectl delete --all pods -l'name in (valid-pod)' "${kube_flags[@]}" || exit 1 # Post-condition: valid-pod POD exists kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" 'valid-pod:' @@ -255,7 +255,7 @@ run_pod_tests() { kube::test::get_object_assert 'pdb/test-pdb-4 --namespace=test-kubectl-describe-pod' "{{$pdb_max_unavailable}}" '50%' ### Fail creating a pod disruption budget if both maxUnavailable and minAvailable specified - ! kubectl create pdb test-pdb --selector=app=rails --min-available=2 --max-unavailable=3 --namespace=test-kubectl-describe-pod + ! kubectl create pdb test-pdb --selector=app=rails --min-available=2 --max-unavailable=3 --namespace=test-kubectl-describe-pod || exit 1 # Create a pod that consumes secret, configmap, and downward API keys as envs kube::test::get_object_assert 'pods --namespace=test-kubectl-describe-pod' "{{range.items}}{{$id_field}}:{{end}}" '' @@ -567,7 +567,7 @@ __EOF__ grep -q 'Edit cancelled' <<< "$(EDITOR="cat" kubectl edit pod/valid-pod 2>&1)" grep -q 'name: valid-pod' <<< "$(EDITOR="cat" kubectl edit pod/valid-pod)" grep -q CRLF <<< "$(EDITOR="cat" kubectl edit --windows-line-endings pod/valid-pod | file - )" - ! grep -q CRLF <<< "$(EDITOR="cat" kubectl edit --windows-line-endings=false pod/valid-pod | file - )" + ! grep -q CRLF <<< "$(EDITOR="cat" kubectl edit --windows-line-endings=false pod/valid-pod | file - )" || exit 1 grep -q 'kind: List' <<< "$(EDITOR="cat" kubectl edit ns)" ### Label POD YAML file locally without effecting the live pod. @@ -584,7 +584,7 @@ __EOF__ # Pre-condition: name is valid-pod kube::test::get_object_assert 'pod valid-pod' "{{${labels_field}.name}}" 'valid-pod' # Command - ! kubectl label pods valid-pod name=valid-pod-super-sayan "${kube_flags[@]}" + ! kubectl label pods valid-pod name=valid-pod-super-sayan "${kube_flags[@]}" || exit 1 # Post-condition: name is still valid-pod kube::test::get_object_assert 'pod valid-pod' "{{${labels_field}.name}}" 'valid-pod' @@ -631,7 +631,7 @@ __EOF__ # Post-Condition: pod "test-pod" is created kube::test::get_object_assert 'pods test-pod' "{{${labels_field}.name}}" 'test-pod-label' # Post-Condition: pod "test-pod" doesn't have configuration annotation - ! grep -q kubectl.kubernetes.io/last-applied-configuration <<< "$(kubectl get pods test-pod -o yaml "${kube_flags[@]}" )" + ! grep -q kubectl.kubernetes.io/last-applied-configuration <<< "$(kubectl get pods test-pod -o yaml "${kube_flags[@]}" )" || exit 1 ## 2. kubectl replace doesn't set the annotation kubectl get pods test-pod -o yaml "${kube_flags[@]}" | ${SED} 's/test-pod-label/test-pod-replaced/g' > "${KUBE_TEMP}"/test-pod-replace.yaml # Command: replace the pod "test-pod" @@ -639,7 +639,7 @@ __EOF__ # Post-Condition: pod "test-pod" is replaced kube::test::get_object_assert 'pods test-pod' "{{${labels_field}.name}}" 'test-pod-replaced' # Post-Condition: pod "test-pod" doesn't have configuration annotation - ! grep -q kubectl.kubernetes.io/last-applied-configuration <<< "$(kubectl get pods test-pod -o yaml "${kube_flags[@]}")" + ! grep -q kubectl.kubernetes.io/last-applied-configuration <<< "$(kubectl get pods test-pod -o yaml "${kube_flags[@]}")" || exit 1 ## 3. kubectl apply does set the annotation # Command: apply the pod "test-pod" kubectl apply -f hack/testdata/pod-apply.yaml "${kube_flags[@]}" @@ -657,7 +657,7 @@ __EOF__ # Post-Condition: pod "test-pod" has configuration annotation, and it's updated (different from the annotation when it's applied) grep -q kubectl.kubernetes.io/last-applied-configuration <<< "$(kubectl get pods test-pod -o yaml "${kube_flags[@]}" )" kubectl get pods test-pod -o yaml "${kube_flags[@]}" | grep kubectl.kubernetes.io/last-applied-configuration > "${KUBE_TEMP}"/annotation-configuration-replaced - ! [[ $(diff -q "${KUBE_TEMP}"/annotation-configuration "${KUBE_TEMP}"/annotation-configuration-replaced > /dev/null) ]] + ! [[ $(diff -q "${KUBE_TEMP}"/annotation-configuration "${KUBE_TEMP}"/annotation-configuration-replaced > /dev/null) ]] || exit 1 # Clean up rm "${KUBE_TEMP}"/test-pod-replace.yaml "${KUBE_TEMP}"/annotation-configuration "${KUBE_TEMP}"/annotation-configuration-replaced kubectl delete pods test-pod "${kube_flags[@]}" @@ -883,7 +883,7 @@ run_service_tests() { # Set selector of a local file without talking to the server kubectl set selector -f test/e2e/testing-manifests/guestbook/redis-master-service.yaml role=padawan --local -o yaml "${kube_flags[@]}" - ! kubectl set selector -f test/e2e/testing-manifests/guestbook/redis-master-service.yaml role=padawan --dry-run -o yaml "${kube_flags[@]}" + ! kubectl set selector -f test/e2e/testing-manifests/guestbook/redis-master-service.yaml role=padawan --dry-run -o yaml "${kube_flags[@]}" || exit 1 # Set command to change the selector. kubectl set selector -f test/e2e/testing-manifests/guestbook/redis-master-service.yaml role=padawan # prove role=padawan @@ -894,7 +894,7 @@ run_service_tests() { kube::test::get_object_assert 'services redis-master' "{{range$service_selector_field}}{{.}}:{{end}}" "redis:master:backend:" # Show dry-run works on running selector kubectl set selector services redis-master role=padawan --dry-run -o yaml "${kube_flags[@]}" - ! kubectl set selector services redis-master role=padawan --local -o yaml "${kube_flags[@]}" + ! kubectl set selector services redis-master role=padawan --local -o yaml "${kube_flags[@]}" || exit 1 kube::test::get_object_assert 'services redis-master' "{{range$service_selector_field}}{{.}}:{{end}}" "redis:master:backend:" ### Dump current redis-master service @@ -1086,7 +1086,7 @@ run_rc_tests() { # Pre-condition: 2 replicas kube::test::get_object_assert 'rc frontend' "{{$rc_replicas_field}}" '2' # Command - ! kubectl scale --current-replicas=3 --replicas=2 replicationcontrollers frontend "${kube_flags[@]}" + ! kubectl scale --current-replicas=3 --replicas=2 replicationcontrollers frontend "${kube_flags[@]}" || exit 1 # Post-condition: nothing changed kube::test::get_object_assert 'rc frontend' "{{$rc_replicas_field}}" '2' @@ -1250,7 +1250,7 @@ run_rc_tests() { kube::test::get_object_assert 'hpa frontend' "{{$hpa_min_field}} {{$hpa_max_field}} {{$hpa_cpu_field}}" '2 3 80' kubectl delete hpa frontend "${kube_flags[@]}" # autoscale without specifying --max should fail - ! kubectl autoscale rc frontend "${kube_flags[@]}" + ! kubectl autoscale rc frontend "${kube_flags[@]}" || exit 1 # Clean up kubectl delete rc frontend "${kube_flags[@]}" @@ -1259,7 +1259,7 @@ run_rc_tests() { kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" '' # Set resources of a local file without talking to the server kubectl set resources -f hack/testdata/deployment-multicontainer-resources.yaml -c=perl --limits=cpu=300m --requests=cpu=300m --local -o yaml "${kube_flags[@]}" - ! kubectl set resources -f hack/testdata/deployment-multicontainer-resources.yaml -c=perl --limits=cpu=300m --requests=cpu=300m --dry-run -o yaml "${kube_flags[@]}" + ! kubectl set resources -f hack/testdata/deployment-multicontainer-resources.yaml -c=perl --limits=cpu=300m --requests=cpu=300m --dry-run -o yaml "${kube_flags[@]}" || exit 1 # Create a deployment kubectl create -f hack/testdata/deployment-multicontainer-resources.yaml "${kube_flags[@]}" kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" 'nginx-deployment-resources:' @@ -1270,7 +1270,7 @@ run_rc_tests() { kube::test::get_object_assert deployment "{{range.items}}{{(index .spec.template.spec.containers 0).resources.limits.cpu}}:{{end}}" "100m:" kube::test::get_object_assert deployment "{{range.items}}{{(index .spec.template.spec.containers 1).resources.limits.cpu}}:{{end}}" "100m:" # Set a non-existing container should fail - ! kubectl set resources deployment nginx-deployment-resources -c=redis --limits=cpu=100m + ! kubectl set resources deployment nginx-deployment-resources -c=redis --limits=cpu=100m || exit 1 # Set the limit of a specific container in deployment kubectl set resources deployment nginx-deployment-resources -c=nginx --limits=cpu=200m "${kube_flags[@]}" kube::test::get_object_assert deployment "{{range.items}}{{(index .spec.template.spec.containers 0).resources.limits.cpu}}:{{end}}" "200m:" @@ -1282,7 +1282,7 @@ run_rc_tests() { kube::test::get_object_assert deployment "{{range.items}}{{(index .spec.template.spec.containers 1).resources.requests.cpu}}:{{end}}" "300m:" # Show dry-run works on running deployments kubectl set resources deployment nginx-deployment-resources -c=perl --limits=cpu=400m --requests=cpu=400m --dry-run -o yaml "${kube_flags[@]}" - ! kubectl set resources deployment nginx-deployment-resources -c=perl --limits=cpu=400m --requests=cpu=400m --local -o yaml "${kube_flags[@]}" + ! kubectl set resources deployment nginx-deployment-resources -c=perl --limits=cpu=400m --requests=cpu=400m --local -o yaml "${kube_flags[@]}" || exit 1 kube::test::get_object_assert deployment "{{range.items}}{{(index .spec.template.spec.containers 0).resources.limits.cpu}}:{{end}}" "200m:" kube::test::get_object_assert deployment "{{range.items}}{{(index .spec.template.spec.containers 1).resources.limits.cpu}}:{{end}}" "300m:" kube::test::get_object_assert deployment "{{range.items}}{{(index .spec.template.spec.containers 1).resources.requests.cpu}}:{{end}}" "300m:" diff --git a/test/cmd/crd.sh b/test/cmd/crd.sh index 9c54c63a993..9a3ffe5d48c 100755 --- a/test/cmd/crd.sh +++ b/test/cmd/crd.sh @@ -243,7 +243,7 @@ run_non_native_resource_tests() { kubectl "${kube_flags[@]}" get foos/test -o json > "${CRD_RESOURCE_FILE}" # cannot apply strategic patch locally CRD_PATCH_ERROR_FILE="${KUBE_TEMP}/crd-foos-test-error" - ! kubectl "${kube_flags[@]}" patch --local -f "${CRD_RESOURCE_FILE}" -p '{"patched":"value3"}' 2> "${CRD_PATCH_ERROR_FILE}" + ! kubectl "${kube_flags[@]}" patch --local -f "${CRD_RESOURCE_FILE}" -p '{"patched":"value3"}' 2> "${CRD_PATCH_ERROR_FILE}" || exit 1 if grep -q "try --type merge" "${CRD_PATCH_ERROR_FILE}"; then kube::log::status "\"kubectl patch --local\" returns error as expected for CustomResource: $(cat "${CRD_PATCH_ERROR_FILE}")" else diff --git a/test/cmd/create.sh b/test/cmd/create.sh index 8ffa71b21bd..b027f140f69 100755 --- a/test/cmd/create.sh +++ b/test/cmd/create.sh @@ -50,7 +50,7 @@ run_kubectl_create_error_tests() { kube::log::status "Testing kubectl create with error" # Passing no arguments to create is an error - ! kubectl create + ! kubectl create || exit 1 ## kubectl create should not panic on empty string lists in a template ERROR_FILE="${KUBE_TEMP}/validation-error" diff --git a/test/cmd/generic-resources.sh b/test/cmd/generic-resources.sh index 2904833eba7..c3bb11fa5e0 100755 --- a/test/cmd/generic-resources.sh +++ b/test/cmd/generic-resources.sh @@ -327,7 +327,7 @@ run_recursive_resources_tests() { # Pre-condition: no replication controller exists kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" '' # Command - ! kubectl create -f hack/testdata/recursive/rc --recursive "${kube_flags[@]}" + ! kubectl create -f hack/testdata/recursive/rc --recursive "${kube_flags[@]}" || exit 1 # Post-condition: frontend replication controller is created kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" 'busybox0:busybox1:' @@ -387,7 +387,7 @@ run_recursive_resources_tests() { kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" '' # Command # Create deployments (revision 1) recursively from directory of YAML files - ! kubectl create -f hack/testdata/recursive/deployment --recursive "${kube_flags[@]}" + ! kubectl create -f hack/testdata/recursive/deployment --recursive "${kube_flags[@]}" || exit 1 kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" 'nginx0-deployment:nginx1-deployment:' kube::test::get_object_assert deployment "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_NGINX}:${IMAGE_NGINX}:" ## Rollback the deployments to revision 1 recursively @@ -418,7 +418,7 @@ run_recursive_resources_tests() { # Clean up unset PRESERVE_ERR_FILE rm "${ERROR_FILE}" - ! kubectl delete -f hack/testdata/recursive/deployment --recursive "${kube_flags[@]}" --grace-period=0 --force + ! kubectl delete -f hack/testdata/recursive/deployment --recursive "${kube_flags[@]}" --grace-period=0 --force || exit 1 sleep 1 ### Rollout on multiple replication controllers recursively - these tests ensure that rollouts cannot be performed on resources that don't support it @@ -426,7 +426,7 @@ run_recursive_resources_tests() { kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" '' # Command # Create replication controllers recursively from directory of YAML files - ! kubectl create -f hack/testdata/recursive/rc --recursive "${kube_flags[@]}" + ! kubectl create -f hack/testdata/recursive/rc --recursive "${kube_flags[@]}" || exit 1 kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" 'busybox0:busybox1:' # Command ## Attempt to rollback the replication controllers to revision 1 recursively @@ -447,7 +447,7 @@ run_recursive_resources_tests() { kube::test::if_has_string "${output_message}" 'replicationcontrollers "busybox0" resuming is not supported' kube::test::if_has_string "${output_message}" 'replicationcontrollers "busybox1" resuming is not supported' # Clean up - ! kubectl delete -f hack/testdata/recursive/rc --recursive "${kube_flags[@]}" --grace-period=0 --force + ! kubectl delete -f hack/testdata/recursive/rc --recursive "${kube_flags[@]}" --grace-period=0 --force || exit 1 sleep 1 set +o nounset diff --git a/test/cmd/save-config.sh b/test/cmd/save-config.sh index 25e524a6082..5a35637643e 100755 --- a/test/cmd/save-config.sh +++ b/test/cmd/save-config.sh @@ -40,7 +40,7 @@ run_save_config_tests() { create_and_use_new_namespace kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" '' kubectl create -f hack/testdata/pod.yaml "${kube_flags[@]}" - ! grep -q "kubectl.kubernetes.io/last-applied-configuration" <<< "$(kubectl get pods test-pod -o yaml "${kube_flags[@]}")" + ! grep -q "kubectl.kubernetes.io/last-applied-configuration" <<< "$(kubectl get pods test-pod -o yaml "${kube_flags[@]}")" || exit 1 # Command: edit the pod "test-pod" temp_editor="${KUBE_TEMP}/tmp-editor.sh" echo -e "#!/usr/bin/env bash\n${SED} -i \"s/test-pod-label/test-pod-label-edited/g\" \$@" > "${temp_editor}" @@ -55,7 +55,7 @@ run_save_config_tests() { create_and_use_new_namespace kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" '' kubectl create -f hack/testdata/pod.yaml "${kube_flags[@]}" - ! grep -q "kubectl.kubernetes.io/last-applied-configuration" <<< "$(kubectl get pods test-pod -o yaml "${kube_flags[@]}")" + ! grep -q "kubectl.kubernetes.io/last-applied-configuration" <<< "$(kubectl get pods test-pod -o yaml "${kube_flags[@]}")" || exit 1 # Command: replace the pod "test-pod" kubectl replace -f hack/testdata/pod.yaml --save-config "${kube_flags[@]}" # Post-Condition: pod "test-pod" has configuration annotation @@ -82,7 +82,7 @@ run_save_config_tests() { # Pre-Condition: no RC exists, then create the rc "frontend", which shouldn't have configuration annotation kube::test::get_object_assert rc "{{range.items}}{{$id_field}}:{{end}}" '' kubectl create -f hack/testdata/frontend-controller.yaml "${kube_flags[@]}" - ! grep -q "kubectl.kubernetes.io/last-applied-configuration" <<< "$(kubectl get rc frontend -o yaml "${kube_flags[@]}")" + ! grep -q "kubectl.kubernetes.io/last-applied-configuration" <<< "$(kubectl get rc frontend -o yaml "${kube_flags[@]}")" || exit 1 # Command: autoscale rc "frontend" kubectl autoscale -f hack/testdata/frontend-controller.yaml --save-config "${kube_flags[@]}" --max=2 # Post-Condition: hpa "frontend" has configuration annotation From 4e44407fa9fd2b72ed6b55569182664df7d65180 Mon Sep 17 00:00:00 2001 From: Odin Ugedal Date: Wed, 23 Oct 2019 23:23:22 +0200 Subject: [PATCH 5/5] Fix failing service test --- test/cmd/core.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/cmd/core.sh b/test/cmd/core.sh index 64ff8a8ab81..f7d69849216 100755 --- a/test/cmd/core.sh +++ b/test/cmd/core.sh @@ -883,7 +883,7 @@ run_service_tests() { # Set selector of a local file without talking to the server kubectl set selector -f test/e2e/testing-manifests/guestbook/redis-master-service.yaml role=padawan --local -o yaml "${kube_flags[@]}" - ! kubectl set selector -f test/e2e/testing-manifests/guestbook/redis-master-service.yaml role=padawan --dry-run -o yaml "${kube_flags[@]}" || exit 1 + kubectl set selector -f test/e2e/testing-manifests/guestbook/redis-master-service.yaml role=padawan --dry-run -o yaml "${kube_flags[@]}" # Set command to change the selector. kubectl set selector -f test/e2e/testing-manifests/guestbook/redis-master-service.yaml role=padawan # prove role=padawan