diff --git a/hack/.shellcheck_failures b/hack/.shellcheck_failures index 1e9e6d22723..f804dc20101 100644 --- a/hack/.shellcheck_failures +++ b/hack/.shellcheck_failures @@ -30,9 +30,6 @@ ./hack/test-integration.sh ./hack/update-vendor.sh ./hack/verify-test-featuregates.sh -./test/cmd/apply.sh -./test/cmd/apps.sh -./test/cmd/authorization.sh ./test/cmd/batch.sh ./test/cmd/certificate.sh ./test/cmd/core.sh diff --git a/test/cmd/apply.sh b/test/cmd/apply.sh index 9158118098f..8b6acec44a1 100755 --- a/test/cmd/apply.sh +++ b/test/cmd/apply.sh @@ -27,75 +27,75 @@ run_kubectl_apply_tests() { kube::log::status "Testing kubectl apply" ## kubectl apply should create the resource that doesn't exist yet # Pre-Condition: no POD exists - kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" '' + kube::test::get_object_assert pods "{{range.items}}{{${id_field:?}}}:{{end}}" '' # Command: apply a pod "test-pod" (doesn't exist) should create this pod - kubectl apply -f hack/testdata/pod.yaml "${kube_flags[@]}" + kubectl apply -f hack/testdata/pod.yaml "${kube_flags[@]:?}" # Post-Condition: pod "test-pod" is created - kube::test::get_object_assert 'pods test-pod' "{{${labels_field}.name}}" 'test-pod-label' + kube::test::get_object_assert 'pods test-pod' "{{${labels_field:?}.name}}" 'test-pod-label' # Post-Condition: pod "test-pod" has configuration annotation - [[ "$(kubectl get pods test-pod -o yaml "${kube_flags[@]}" | grep kubectl.kubernetes.io/last-applied-configuration)" ]] + grep -q kubectl.kubernetes.io/last-applied-configuration <<< "$(kubectl get pods test-pod -o yaml "${kube_flags[@]:?}")" # Clean up - kubectl delete pods test-pod "${kube_flags[@]}" + kubectl delete pods test-pod "${kube_flags[@]:?}" ## kubectl apply should be able to clear defaulted fields. # Pre-Condition: no deployment exists - kube::test::get_object_assert deployments "{{range.items}}{{$id_field}}:{{end}}" '' + kube::test::get_object_assert deployments "{{range.items}}{{${id_field:?}}}:{{end}}" '' # Command: apply a deployment "test-deployment-retainkeys" (doesn't exist) should create this deployment - kubectl apply -f hack/testdata/retainKeys/deployment/deployment-before.yaml "${kube_flags[@]}" + kubectl apply -f hack/testdata/retainKeys/deployment/deployment-before.yaml "${kube_flags[@]:?}" # Post-Condition: deployment "test-deployment-retainkeys" created - kube::test::get_object_assert deployments "{{range.items}}{{$id_field}}{{end}}" 'test-deployment-retainkeys' + kube::test::get_object_assert deployments "{{range.items}}{{${id_field:?}}}{{end}}" 'test-deployment-retainkeys' # Post-Condition: deployment "test-deployment-retainkeys" has defaulted fields - [[ "$(kubectl get deployments test-deployment-retainkeys -o yaml "${kube_flags[@]}" | grep RollingUpdate)" ]] - [[ "$(kubectl get deployments test-deployment-retainkeys -o yaml "${kube_flags[@]}" | grep maxSurge)" ]] - [[ "$(kubectl get deployments test-deployment-retainkeys -o yaml "${kube_flags[@]}" | grep maxUnavailable)" ]] - [[ "$(kubectl get deployments test-deployment-retainkeys -o yaml "${kube_flags[@]}" | grep emptyDir)" ]] + grep -q RollingUpdate <<< "$(kubectl get deployments test-deployment-retainkeys -o yaml "${kube_flags[@]:?}")" + grep -q maxSurge <<< "$(kubectl get deployments test-deployment-retainkeys -o yaml "${kube_flags[@]:?}")" + grep -q maxUnavailable <<< "$(kubectl get deployments test-deployment-retainkeys -o yaml "${kube_flags[@]:?}")" + grep -q emptyDir <<< "$(kubectl get deployments test-deployment-retainkeys -o yaml "${kube_flags[@]:?}")" # Command: apply a deployment "test-deployment-retainkeys" should clear # defaulted fields and successfully update the deployment - [[ "$(kubectl apply -f hack/testdata/retainKeys/deployment/deployment-after.yaml "${kube_flags[@]}")" ]] + [[ "$(kubectl apply -f hack/testdata/retainKeys/deployment/deployment-after.yaml "${kube_flags[@]:?}")" ]] # Post-Condition: deployment "test-deployment-retainkeys" has updated fields - [[ "$(kubectl get deployments test-deployment-retainkeys -o yaml "${kube_flags[@]}" | grep Recreate)" ]] - ! [[ "$(kubectl get deployments test-deployment-retainkeys -o yaml "${kube_flags[@]}" | grep RollingUpdate)" ]] - [[ "$(kubectl get deployments test-deployment-retainkeys -o yaml "${kube_flags[@]}" | grep hostPath)" ]] - ! [[ "$(kubectl get deployments test-deployment-retainkeys -o yaml "${kube_flags[@]}" | grep emptyDir)" ]] + grep -q Recreate <<< "$(kubectl get deployments test-deployment-retainkeys -o yaml "${kube_flags[@]:?}")" + ! grep -q RollingUpdate <<< "$(kubectl get deployments test-deployment-retainkeys -o yaml "${kube_flags[@]:?}")" + grep -q hostPath <<< "$(kubectl get deployments test-deployment-retainkeys -o yaml "${kube_flags[@]:?}")" + ! grep -q emptyDir <<< "$(kubectl get deployments test-deployment-retainkeys -o yaml "${kube_flags[@]:?}")" # Clean up - kubectl delete deployments test-deployment-retainkeys "${kube_flags[@]}" + kubectl delete deployments test-deployment-retainkeys "${kube_flags[@]:?}" ## kubectl apply -f with label selector should only apply matching objects # Pre-Condition: no POD exists - kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" '' + kube::test::get_object_assert pods "{{range.items}}{{${id_field:?}}}:{{end}}" '' # apply - kubectl apply -l unique-label=bingbang -f hack/testdata/filter "${kube_flags[@]}" + kubectl apply -l unique-label=bingbang -f hack/testdata/filter "${kube_flags[@]:?}" # check right pod exists - kube::test::get_object_assert 'pods selector-test-pod' "{{${labels_field}.name}}" 'selector-test-pod' + kube::test::get_object_assert 'pods selector-test-pod' "{{${labels_field:?}.name}}" 'selector-test-pod' # check wrong pod doesn't exist - output_message=$(! kubectl get pods selector-test-pod-dont-apply 2>&1 "${kube_flags[@]}") + output_message=$(! kubectl get pods selector-test-pod-dont-apply 2>&1 "${kube_flags[@]:?}") kube::test::if_has_string "${output_message}" 'pods "selector-test-pod-dont-apply" not found' # cleanup kubectl delete pods selector-test-pod ## kubectl apply --server-dry-run # Pre-Condition: no POD exists - kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" '' + kube::test::get_object_assert pods "{{range.items}}{{${id_field:?}}}:{{end}}" '' # apply dry-run - kubectl apply --server-dry-run -f hack/testdata/pod.yaml "${kube_flags[@]}" + kubectl apply --server-dry-run -f hack/testdata/pod.yaml "${kube_flags[@]:?}" # No pod exists - kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" '' + kube::test::get_object_assert pods "{{range.items}}{{${id_field:?}}}:{{end}}" '' # apply non dry-run creates the pod - kubectl apply -f hack/testdata/pod.yaml "${kube_flags[@]}" + kubectl apply -f hack/testdata/pod.yaml "${kube_flags[@]:?}" # apply changes - kubectl apply --server-dry-run -f hack/testdata/pod-apply.yaml "${kube_flags[@]}" + kubectl apply --server-dry-run -f hack/testdata/pod-apply.yaml "${kube_flags[@]:?}" # Post-Condition: label still has initial value - kube::test::get_object_assert 'pods test-pod' "{{${labels_field}.name}}" 'test-pod-label' + kube::test::get_object_assert 'pods test-pod' "{{${labels_field:?}.name}}" 'test-pod-label' # clean-up - kubectl delete -f hack/testdata/pod.yaml "${kube_flags[@]}" + kubectl delete -f hack/testdata/pod.yaml "${kube_flags[@]:?}" ## kubectl apply dry-run on CR # Create CRD - kubectl "${kube_flags_with_token[@]}" create -f - << __EOF__ + kubectl "${kube_flags_with_token[@]:?}" create -f - << __EOF__ { "kind": "CustomResourceDefinition", "apiVersion": "apiextensions.k8s.io/v1beta1", @@ -117,31 +117,31 @@ run_kubectl_apply_tests() { __EOF__ # Dry-run create the CR - kubectl "${kube_flags[@]}" apply --server-dry-run -f hack/testdata/CRD/resource.yaml "${kube_flags[@]}" + kubectl "${kube_flags[@]:?}" apply --server-dry-run -f hack/testdata/CRD/resource.yaml "${kube_flags[@]:?}" # Make sure that the CR doesn't exist - ! kubectl "${kube_flags[@]}" get resource/myobj + ! kubectl "${kube_flags[@]:?}" get resource/myobj # clean-up - kubectl "${kube_flags[@]}" delete customresourcedefinition resources.mygroup.example.com + kubectl "${kube_flags[@]:?}" delete customresourcedefinition resources.mygroup.example.com ## kubectl apply --prune # Pre-Condition: no POD exists - kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" '' + kube::test::get_object_assert pods "{{range.items}}{{${id_field:?}}}:{{end}}" '' # apply a - kubectl apply --prune -l prune-group=true -f hack/testdata/prune/a.yaml "${kube_flags[@]}" + kubectl apply --prune -l prune-group=true -f hack/testdata/prune/a.yaml "${kube_flags[@]:?}" # check right pod exists - kube::test::get_object_assert 'pods a' "{{${id_field}}}" 'a' + kube::test::get_object_assert 'pods a' "{{${id_field:?}}}" 'a' # check wrong pod doesn't exist - output_message=$(! kubectl get pods b 2>&1 "${kube_flags[@]}") + output_message=$(! kubectl get pods b 2>&1 "${kube_flags[@]:?}") kube::test::if_has_string "${output_message}" 'pods "b" not found' # apply b - kubectl apply --prune -l prune-group=true -f hack/testdata/prune/b.yaml "${kube_flags[@]}" + kubectl apply --prune -l prune-group=true -f hack/testdata/prune/b.yaml "${kube_flags[@]:?}" # check right pod exists - kube::test::get_object_assert 'pods b' "{{${id_field}}}" 'b' + kube::test::get_object_assert 'pods b' "{{${id_field:?}}}" 'b' # check wrong pod doesn't exist - output_message=$(! kubectl get pods a 2>&1 "${kube_flags[@]}") + output_message=$(! kubectl get pods a 2>&1 "${kube_flags[@]:?}") kube::test::if_has_string "${output_message}" 'pods "a" not found' # cleanup @@ -149,79 +149,79 @@ __EOF__ # same thing without prune for a sanity check # Pre-Condition: no POD exists - kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" '' + kube::test::get_object_assert pods "{{range.items}}{{${id_field:?}}}:{{end}}" '' # apply a - kubectl apply -l prune-group=true -f hack/testdata/prune/a.yaml "${kube_flags[@]}" + kubectl apply -l prune-group=true -f hack/testdata/prune/a.yaml "${kube_flags[@]:?}" # check right pod exists - kube::test::get_object_assert 'pods a' "{{${id_field}}}" 'a' + kube::test::get_object_assert 'pods a' "{{${id_field:?}}}" 'a' # check wrong pod doesn't exist - output_message=$(! kubectl get pods b 2>&1 "${kube_flags[@]}") + output_message=$(! kubectl get pods b 2>&1 "${kube_flags[@]:?}") kube::test::if_has_string "${output_message}" 'pods "b" not found' # apply b - kubectl apply -l prune-group=true -f hack/testdata/prune/b.yaml "${kube_flags[@]}" + kubectl apply -l prune-group=true -f hack/testdata/prune/b.yaml "${kube_flags[@]:?}" # check both pods exist - kube::test::get_object_assert 'pods a' "{{${id_field}}}" 'a' - kube::test::get_object_assert 'pods b' "{{${id_field}}}" 'b' + kube::test::get_object_assert 'pods a' "{{${id_field:?}}}" 'a' + kube::test::get_object_assert 'pods b' "{{${id_field:?}}}" 'b' # check wrong pod doesn't exist # cleanup kubectl delete pod/a pod/b ## kubectl apply --prune requires a --all flag to select everything - output_message=$(! kubectl apply --prune -f hack/testdata/prune 2>&1 "${kube_flags[@]}") + output_message=$(! kubectl apply --prune -f hack/testdata/prune 2>&1 "${kube_flags[@]:?}") kube::test::if_has_string "${output_message}" \ 'all resources selected for prune without explicitly passing --all' # should apply everything kubectl apply --all --prune -f hack/testdata/prune - kube::test::get_object_assert 'pods a' "{{${id_field}}}" 'a' - kube::test::get_object_assert 'pods b' "{{${id_field}}}" 'b' + kube::test::get_object_assert 'pods a' "{{${id_field:?}}}" 'a' + kube::test::get_object_assert 'pods b' "{{${id_field:?}}}" 'b' kubectl delete pod/a pod/b ## kubectl apply --prune should fallback to delete for non reapable types - kubectl apply --all --prune -f hack/testdata/prune-reap/a.yml 2>&1 "${kube_flags[@]}" - kube::test::get_object_assert 'pvc a-pvc' "{{${id_field}}}" 'a-pvc' - kubectl apply --all --prune -f hack/testdata/prune-reap/b.yml 2>&1 "${kube_flags[@]}" - kube::test::get_object_assert 'pvc b-pvc' "{{${id_field}}}" 'b-pvc' - kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" '' - kubectl delete pvc b-pvc 2>&1 "${kube_flags[@]}" + kubectl apply --all --prune -f hack/testdata/prune-reap/a.yml 2>&1 "${kube_flags[@]:?}" + kube::test::get_object_assert 'pvc a-pvc' "{{${id_field:?}}}" 'a-pvc' + kubectl apply --all --prune -f hack/testdata/prune-reap/b.yml 2>&1 "${kube_flags[@]:?}" + kube::test::get_object_assert 'pvc b-pvc' "{{${id_field:?}}}" 'b-pvc' + kube::test::get_object_assert pods "{{range.items}}{{${id_field:?}}}:{{end}}" '' + kubectl delete pvc b-pvc 2>&1 "${kube_flags[@]:?}" ## kubectl apply --prune --prune-whitelist # Pre-Condition: no POD exists - kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" '' + kube::test::get_object_assert pods "{{range.items}}{{${id_field:?}}}:{{end}}" '' # apply pod a - kubectl apply --prune -l prune-group=true -f hack/testdata/prune/a.yaml "${kube_flags[@]}" + kubectl apply --prune -l prune-group=true -f hack/testdata/prune/a.yaml "${kube_flags[@]:?}" # check right pod exists - kube::test::get_object_assert 'pods a' "{{${id_field}}}" 'a' + kube::test::get_object_assert 'pods a' "{{${id_field:?}}}" 'a' # apply svc and don't prune pod a by overwriting whitelist - kubectl apply --prune -l prune-group=true -f hack/testdata/prune/svc.yaml --prune-whitelist core/v1/Service 2>&1 "${kube_flags[@]}" - kube::test::get_object_assert 'service prune-svc' "{{${id_field}}}" 'prune-svc' - kube::test::get_object_assert 'pods a' "{{${id_field}}}" 'a' + kubectl apply --prune -l prune-group=true -f hack/testdata/prune/svc.yaml --prune-whitelist core/v1/Service 2>&1 "${kube_flags[@]:?}" + kube::test::get_object_assert 'service prune-svc' "{{${id_field:?}}}" 'prune-svc' + kube::test::get_object_assert 'pods a' "{{${id_field:?}}}" 'a' # apply svc and prune pod a with default whitelist - kubectl apply --prune -l prune-group=true -f hack/testdata/prune/svc.yaml 2>&1 "${kube_flags[@]}" - kube::test::get_object_assert 'service prune-svc' "{{${id_field}}}" 'prune-svc' - kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" '' + kubectl apply --prune -l prune-group=true -f hack/testdata/prune/svc.yaml 2>&1 "${kube_flags[@]:?}" + kube::test::get_object_assert 'service prune-svc' "{{${id_field:?}}}" 'prune-svc' + kube::test::get_object_assert pods "{{range.items}}{{${id_field:?}}}:{{end}}" '' # cleanup - kubectl delete svc prune-svc 2>&1 "${kube_flags[@]}" + kubectl delete svc prune-svc 2>&1 "${kube_flags[@]:?}" ## kubectl apply -f some.yml --force # Pre-condition: no service exists - kube::test::get_object_assert services "{{range.items}}{{$id_field}}:{{end}}" '' + kube::test::get_object_assert services "{{range.items}}{{${id_field:?}}}:{{end}}" '' # apply service a - kubectl apply -f hack/testdata/service-revision1.yaml "${kube_flags[@]}" + kubectl apply -f hack/testdata/service-revision1.yaml "${kube_flags[@]:?}" # check right service exists - kube::test::get_object_assert 'services a' "{{${id_field}}}" 'a' + kube::test::get_object_assert 'services a' "{{${id_field:?}}}" 'a' # change immutable field and apply service a - output_message=$(! kubectl apply -f hack/testdata/service-revision2.yaml 2>&1 "${kube_flags[@]}") + output_message=$(! kubectl apply -f hack/testdata/service-revision2.yaml 2>&1 "${kube_flags[@]:?}") kube::test::if_has_string "${output_message}" 'field is immutable' # apply --force to recreate resources for immutable fields - kubectl apply -f hack/testdata/service-revision2.yaml --force "${kube_flags[@]}" + kubectl apply -f hack/testdata/service-revision2.yaml --force "${kube_flags[@]:?}" # check immutable field exists kube::test::get_object_assert 'services a' "{{.spec.clusterIP}}" '10.0.0.12' # cleanup - kubectl delete -f hack/testdata/service-revision2.yaml "${kube_flags[@]}" + kubectl delete -f hack/testdata/service-revision2.yaml "${kube_flags[@]:?}" ## kubectl apply -k somedir kubectl apply -k hack/testdata/kustomize @@ -252,31 +252,31 @@ run_kubectl_apply_tests() { kube::log::status "Testing kubectl apply --experimental-server-side" ## kubectl apply should create the resource that doesn't exist yet # Pre-Condition: no POD exists - kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" '' + kube::test::get_object_assert pods "{{range.items}}{{${id_field:?}}}:{{end}}" '' # Command: apply a pod "test-pod" (doesn't exist) should create this pod - kubectl apply --experimental-server-side -f hack/testdata/pod.yaml "${kube_flags[@]}" + kubectl apply --experimental-server-side -f hack/testdata/pod.yaml "${kube_flags[@]:?}" # Post-Condition: pod "test-pod" is created - kube::test::get_object_assert 'pods test-pod' "{{${labels_field}.name}}" 'test-pod-label' + kube::test::get_object_assert 'pods test-pod' "{{${labels_field:?}.name}}" 'test-pod-label' # Clean up - kubectl delete pods test-pod "${kube_flags[@]}" + kubectl delete pods test-pod "${kube_flags[@]:?}" ## kubectl apply --server-dry-run # Pre-Condition: no POD exists - kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" '' + kube::test::get_object_assert pods "{{range.items}}{{${id_field:?}}}:{{end}}" '' # apply dry-run - kubectl apply --experimental-server-side --server-dry-run -f hack/testdata/pod.yaml "${kube_flags[@]}" + kubectl apply --experimental-server-side --server-dry-run -f hack/testdata/pod.yaml "${kube_flags[@]:?}" # No pod exists - kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" '' + kube::test::get_object_assert pods "{{range.items}}{{${id_field:?}}}:{{end}}" '' # apply non dry-run creates the pod - kubectl apply --experimental-server-side -f hack/testdata/pod.yaml "${kube_flags[@]}" + kubectl apply --experimental-server-side -f hack/testdata/pod.yaml "${kube_flags[@]:?}" # apply changes - kubectl apply --experimental-server-side --server-dry-run -f hack/testdata/pod-apply.yaml "${kube_flags[@]}" + kubectl apply --experimental-server-side --server-dry-run -f hack/testdata/pod-apply.yaml "${kube_flags[@]:?}" # Post-Condition: label still has initial value - kube::test::get_object_assert 'pods test-pod' "{{${labels_field}.name}}" 'test-pod-label' + kube::test::get_object_assert 'pods test-pod' "{{${labels_field:?}.name}}" 'test-pod-label' # clean-up - kubectl delete -f hack/testdata/pod.yaml "${kube_flags[@]}" + kubectl delete -f hack/testdata/pod.yaml "${kube_flags[@]:?}" ## kubectl apply dry-run on CR # Create CRD @@ -302,12 +302,12 @@ run_kubectl_apply_tests() { __EOF__ # Dry-run create the CR - kubectl "${kube_flags[@]}" apply --experimental-server-side --server-dry-run -f hack/testdata/CRD/resource.yaml "${kube_flags[@]}" + kubectl "${kube_flags[@]:?}" apply --experimental-server-side --server-dry-run -f hack/testdata/CRD/resource.yaml "${kube_flags[@]:?}" # Make sure that the CR doesn't exist - ! kubectl "${kube_flags[@]}" get resource/myobj + ! kubectl "${kube_flags[@]:?}" get resource/myobj # clean-up - kubectl "${kube_flags[@]}" delete customresourcedefinition resources.mygroup.example.com + kubectl "${kube_flags[@]:?}" delete customresourcedefinition resources.mygroup.example.com set +o nounset set +o errexit diff --git a/test/cmd/apps.sh b/test/cmd/apps.sh index 1f947d2e59d..4390a3947ca 100755 --- a/test/cmd/apps.sh +++ b/test/cmd/apps.sh @@ -27,24 +27,24 @@ run_daemonset_tests() { ### Create a rolling update DaemonSet # Pre-condition: no DaemonSet exists - kube::test::get_object_assert daemonsets "{{range.items}}{{$id_field}}:{{end}}" '' + kube::test::get_object_assert daemonsets "{{range.items}}{{${id_field:?}}}:{{end}}" '' # Command - kubectl apply -f hack/testdata/rollingupdate-daemonset.yaml "${kube_flags[@]}" + kubectl apply -f hack/testdata/rollingupdate-daemonset.yaml "${kube_flags[@]:?}" # Template Generation should be 1 - kube::test::get_object_assert 'daemonsets bind' "{{${template_generation_field}}}" '1' - kubectl apply -f hack/testdata/rollingupdate-daemonset.yaml "${kube_flags[@]}" + kube::test::get_object_assert 'daemonsets bind' "{{${template_generation_field:?}}}" '1' + kubectl apply -f hack/testdata/rollingupdate-daemonset.yaml "${kube_flags[@]:?}" # Template Generation should stay 1 - kube::test::get_object_assert 'daemonsets bind' "{{${template_generation_field}}}" '1' + kube::test::get_object_assert 'daemonsets bind' "{{${template_generation_field:?}}}" '1' # Test set commands - kubectl set image daemonsets/bind "${kube_flags[@]}" *=k8s.gcr.io/pause:test-cmd - kube::test::get_object_assert 'daemonsets bind' "{{${template_generation_field}}}" '2' - kubectl set env daemonsets/bind "${kube_flags[@]}" foo=bar - kube::test::get_object_assert 'daemonsets bind' "{{${template_generation_field}}}" '3' - kubectl set resources daemonsets/bind "${kube_flags[@]}" --limits=cpu=200m,memory=512Mi - kube::test::get_object_assert 'daemonsets bind' "{{${template_generation_field}}}" '4' + kubectl set image daemonsets/bind "${kube_flags[@]:?}" "*=k8s.gcr.io/pause:test-cmd" + kube::test::get_object_assert 'daemonsets bind' "{{${template_generation_field:?}}}" '2' + kubectl set env daemonsets/bind "${kube_flags[@]:?}" foo=bar + kube::test::get_object_assert 'daemonsets bind' "{{${template_generation_field:?}}}" '3' + kubectl set resources daemonsets/bind "${kube_flags[@]:?}" --limits=cpu=200m,memory=512Mi + kube::test::get_object_assert 'daemonsets bind' "{{${template_generation_field:?}}}" '4' # Clean up - kubectl delete -f hack/testdata/rollingupdate-daemonset.yaml "${kube_flags[@]}" + kubectl delete -f hack/testdata/rollingupdate-daemonset.yaml "${kube_flags[@]:?}" set +o nounset set +o errexit @@ -59,42 +59,42 @@ run_daemonset_history_tests() { ### Test rolling back a DaemonSet # Pre-condition: no DaemonSet or its pods exists - kube::test::get_object_assert daemonsets "{{range.items}}{{$id_field}}:{{end}}" '' + kube::test::get_object_assert daemonsets "{{range.items}}{{${id_field:?}}}:{{end}}" '' # Command # Create a DaemonSet (revision 1) - kubectl apply -f hack/testdata/rollingupdate-daemonset.yaml --record "${kube_flags[@]}" - kube::test::wait_object_assert controllerrevisions "{{range.items}}{{$annotations_field}}:{{end}}" ".*rollingupdate-daemonset.yaml --record.*" + kubectl apply -f hack/testdata/rollingupdate-daemonset.yaml --record "${kube_flags[@]:?}" + kube::test::wait_object_assert controllerrevisions "{{range.items}}{{${annotations_field:?}}}:{{end}}" ".*rollingupdate-daemonset.yaml --record.*" # Rollback to revision 1 - should be no-op - kubectl rollout undo daemonset --to-revision=1 "${kube_flags[@]}" - kube::test::get_object_assert daemonset "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_PAUSE_V2}:" - kube::test::get_object_assert daemonset "{{range.items}}{{$container_len}}{{end}}" "1" + kubectl rollout undo daemonset --to-revision=1 "${kube_flags[@]:?}" + kube::test::get_object_assert daemonset "{{range.items}}{{${image_field0:?}}}:{{end}}" "${IMAGE_PAUSE_V2}:" + kube::test::get_object_assert daemonset "{{range.items}}{{${container_len:?}}}{{end}}" "1" # Update the DaemonSet (revision 2) - kubectl apply -f hack/testdata/rollingupdate-daemonset-rv2.yaml --record "${kube_flags[@]}" - kube::test::wait_object_assert daemonset "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_DAEMONSET_R2}:" - kube::test::wait_object_assert daemonset "{{range.items}}{{$image_field1}}:{{end}}" "${IMAGE_DAEMONSET_R2_2}:" - kube::test::get_object_assert daemonset "{{range.items}}{{$container_len}}{{end}}" "2" - kube::test::wait_object_assert controllerrevisions "{{range.items}}{{$annotations_field}}:{{end}}" ".*rollingupdate-daemonset-rv2.yaml --record.*" + kubectl apply -f hack/testdata/rollingupdate-daemonset-rv2.yaml --record "${kube_flags[@]:?}" + kube::test::wait_object_assert daemonset "{{range.items}}{{${image_field0:?}}}:{{end}}" "${IMAGE_DAEMONSET_R2}:" + kube::test::wait_object_assert daemonset "{{range.items}}{{${image_field1:?}}}:{{end}}" "${IMAGE_DAEMONSET_R2_2}:" + kube::test::get_object_assert daemonset "{{range.items}}{{${container_len:?}}}{{end}}" "2" + kube::test::wait_object_assert controllerrevisions "{{range.items}}{{${annotations_field:?}}}:{{end}}" ".*rollingupdate-daemonset-rv2.yaml --record.*" # Rollback to revision 1 with dry-run - should be no-op - kubectl rollout undo daemonset --dry-run=true "${kube_flags[@]}" - kube::test::get_object_assert daemonset "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_DAEMONSET_R2}:" - kube::test::get_object_assert daemonset "{{range.items}}{{$image_field1}}:{{end}}" "${IMAGE_DAEMONSET_R2_2}:" - kube::test::get_object_assert daemonset "{{range.items}}{{$container_len}}{{end}}" "2" + kubectl rollout undo daemonset --dry-run=true "${kube_flags[@]:?}" + kube::test::get_object_assert daemonset "{{range.items}}{{${image_field0:?}}}:{{end}}" "${IMAGE_DAEMONSET_R2}:" + kube::test::get_object_assert daemonset "{{range.items}}{{${image_field1:?}}}:{{end}}" "${IMAGE_DAEMONSET_R2_2}:" + kube::test::get_object_assert daemonset "{{range.items}}{{${container_len:?}}}{{end}}" "2" # Rollback to revision 1 - kubectl rollout undo daemonset --to-revision=1 "${kube_flags[@]}" - kube::test::wait_object_assert daemonset "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_PAUSE_V2}:" - kube::test::get_object_assert daemonset "{{range.items}}{{$container_len}}{{end}}" "1" + kubectl rollout undo daemonset --to-revision=1 "${kube_flags[@]:?}" + kube::test::wait_object_assert daemonset "{{range.items}}{{${image_field0:?}}}:{{end}}" "${IMAGE_PAUSE_V2}:" + kube::test::get_object_assert daemonset "{{range.items}}{{${container_len:?}}}{{end}}" "1" # Rollback to revision 1000000 - should fail - output_message=$(! kubectl rollout undo daemonset --to-revision=1000000 "${kube_flags[@]}" 2>&1) + output_message=$(! kubectl rollout undo daemonset --to-revision=1000000 "${kube_flags[@]:?}" 2>&1) kube::test::if_has_string "${output_message}" "unable to find specified revision" - kube::test::get_object_assert daemonset "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_PAUSE_V2}:" - kube::test::get_object_assert daemonset "{{range.items}}{{$container_len}}{{end}}" "1" + kube::test::get_object_assert daemonset "{{range.items}}{{${image_field0:?}}}:{{end}}" "${IMAGE_PAUSE_V2}:" + kube::test::get_object_assert daemonset "{{range.items}}{{${container_len:?}}}{{end}}" "1" # Rollback to last revision - kubectl rollout undo daemonset "${kube_flags[@]}" - kube::test::wait_object_assert daemonset "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_DAEMONSET_R2}:" - kube::test::wait_object_assert daemonset "{{range.items}}{{$image_field1}}:{{end}}" "${IMAGE_DAEMONSET_R2_2}:" - kube::test::get_object_assert daemonset "{{range.items}}{{$container_len}}{{end}}" "2" + kubectl rollout undo daemonset "${kube_flags[@]:?}" + kube::test::wait_object_assert daemonset "{{range.items}}{{${image_field0:?}}}:{{end}}" "${IMAGE_DAEMONSET_R2}:" + kube::test::wait_object_assert daemonset "{{range.items}}{{${image_field1:?}}}:{{end}}" "${IMAGE_DAEMONSET_R2_2}:" + kube::test::get_object_assert daemonset "{{range.items}}{{${container_len:?}}}{{end}}" "2" # Clean up - kubectl delete -f hack/testdata/rollingupdate-daemonset.yaml "${kube_flags[@]}" + kubectl delete -f hack/testdata/rollingupdate-daemonset.yaml "${kube_flags[@]:?}" set +o nounset set +o errexit @@ -108,20 +108,20 @@ run_kubectl_apply_deployments_tests() { kube::log::status "Testing kubectl apply deployments" ## kubectl apply should propagate user defined null values # Pre-Condition: no Deployments, ReplicaSets, Pods exist - kube::test::get_object_assert deployments "{{range.items}}{{$id_field}}:{{end}}" '' - kube::test::get_object_assert replicasets "{{range.items}}{{$id_field}}:{{end}}" '' - kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" '' + kube::test::get_object_assert deployments "{{range.items}}{{${id_field:?}}}:{{end}}" '' + kube::test::get_object_assert replicasets "{{range.items}}{{${id_field:?}}}:{{end}}" '' + kube::test::get_object_assert pods "{{range.items}}{{${id_field:?}}}:{{end}}" '' # apply base deployment - kubectl apply -f hack/testdata/null-propagation/deployment-l1.yaml "${kube_flags[@]}" + kubectl apply -f hack/testdata/null-propagation/deployment-l1.yaml "${kube_flags[@]:?}" # check right deployment exists - kube::test::get_object_assert 'deployments my-depl' "{{${id_field}}}" 'my-depl' + kube::test::get_object_assert 'deployments my-depl' "{{${id_field:?}}}" 'my-depl' # check right labels exists kube::test::get_object_assert 'deployments my-depl' "{{.spec.template.metadata.labels.l1}}" 'l1' kube::test::get_object_assert 'deployments my-depl' "{{.spec.selector.matchLabels.l1}}" 'l1' kube::test::get_object_assert 'deployments my-depl' "{{.metadata.labels.l1}}" 'l1' # apply new deployment with new template labels - kubectl apply -f hack/testdata/null-propagation/deployment-l2.yaml "${kube_flags[@]}" + kubectl apply -f hack/testdata/null-propagation/deployment-l2.yaml "${kube_flags[@]:?}" # check right labels exists kube::test::get_object_assert 'deployments my-depl' "{{.spec.template.metadata.labels.l1}}" '' kube::test::get_object_assert 'deployments my-depl' "{{.spec.selector.matchLabels.l1}}" '' @@ -134,24 +134,24 @@ run_kubectl_apply_deployments_tests() { # need to explicitly remove replicasets and pods because we changed the deployment selector and orphaned things kubectl delete deployments,rs,pods --all --cascade=false --grace-period=0 # Post-Condition: no Deployments, ReplicaSets, Pods exist - kube::test::wait_object_assert deployments "{{range.items}}{{$id_field}}:{{end}}" '' - kube::test::wait_object_assert replicasets "{{range.items}}{{$id_field}}:{{end}}" '' - kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" '' + kube::test::wait_object_assert deployments "{{range.items}}{{${id_field:?}}}:{{end}}" '' + kube::test::wait_object_assert replicasets "{{range.items}}{{${id_field:?}}}:{{end}}" '' + kube::test::get_object_assert pods "{{range.items}}{{${id_field:?}}}:{{end}}" '' # kubectl apply deployment --overwrite=true --force=true # Pre-Condition: no deployment exists - kube::test::get_object_assert deployments "{{range.items}}{{$id_field}}:{{end}}" '' + kube::test::get_object_assert deployments "{{range.items}}{{${id_field:?}}}:{{end}}" '' # apply deployment nginx - kubectl apply -f hack/testdata/deployment-label-change1.yaml "${kube_flags[@]}" + kubectl apply -f hack/testdata/deployment-label-change1.yaml "${kube_flags[@]:?}" # check right deployment exists - kube::test::get_object_assert 'deployment nginx' "{{${id_field}}}" 'nginx' + kube::test::get_object_assert 'deployment nginx' "{{${id_field:?}}}" 'nginx' # apply deployment with new labels and a conflicting resourceVersion - output_message=$(! kubectl apply -f hack/testdata/deployment-label-change2.yaml 2>&1 "${kube_flags[@]}") + output_message=$(! kubectl apply -f hack/testdata/deployment-label-change2.yaml 2>&1 "${kube_flags[@]:?}") kube::test::if_has_string "${output_message}" 'Error from server (Conflict)' # apply deployment with --force and --overwrite will succeed kubectl apply -f hack/testdata/deployment-label-change2.yaml --overwrite=true --force=true --grace-period=10 # check the changed deployment - output_message=$(kubectl apply view-last-applied deploy/nginx -o json 2>&1 "${kube_flags[@]}" |grep nginx2) + output_message=$(kubectl apply view-last-applied deploy/nginx -o json 2>&1 "${kube_flags[@]:?}" |grep nginx2) kube::test::if_has_string "${output_message}" '"name": "nginx2"' # applying a resource (with --force) that is both conflicting and invalid will # cause the server to only return a "Conflict" error when we attempt to patch. @@ -161,10 +161,10 @@ run_kubectl_apply_deployments_tests() { # invalid, we will receive an invalid error when we attempt to create it, after # having deleted the old resource. Ensure that when this case is reached, the # old resource is restored once again, and the validation error is printed. - output_message=$(! kubectl apply -f hack/testdata/deployment-label-change3.yaml --force 2>&1 "${kube_flags[@]}") + output_message=$(! kubectl apply -f hack/testdata/deployment-label-change3.yaml --force 2>&1 "${kube_flags[@]:?}") kube::test::if_has_string "${output_message}" 'Invalid value' # Ensure that the old object has been restored - kube::test::get_object_assert 'deployment nginx' "{{${template_labels}}}" 'nginx2' + kube::test::get_object_assert 'deployment nginx' "{{${template_labels:?}}}" 'nginx2' # cleanup kubectl delete deployments --all --grace-period=10 @@ -181,140 +181,140 @@ run_deployment_tests() { # Test kubectl create deployment (using default - old generator) kubectl create deployment test-nginx-extensions --image=k8s.gcr.io/nginx:test-cmd # Post-Condition: Deployment "nginx" is created. - kube::test::get_object_assert 'deploy test-nginx-extensions' "{{$container_name_field}}" 'nginx' + kube::test::get_object_assert 'deploy test-nginx-extensions' "{{${container_name_field:?}}}" 'nginx' # and old generator was used, iow. old defaults are applied output_message=$(kubectl get deployment.apps/test-nginx-extensions -o jsonpath='{.spec.revisionHistoryLimit}') kube::test::if_has_not_string "${output_message}" '2' # Ensure we can interact with deployments through extensions and apps endpoints - output_message=$(kubectl get deployment.extensions -o=jsonpath='{.items[0].apiVersion}' 2>&1 "${kube_flags[@]}") + output_message=$(kubectl get deployment.extensions -o=jsonpath='{.items[0].apiVersion}' 2>&1 "${kube_flags[@]:?}") kube::test::if_has_string "${output_message}" 'extensions/v1beta1' - output_message=$(kubectl get deployment.apps -o=jsonpath='{.items[0].apiVersion}' 2>&1 "${kube_flags[@]}") + output_message=$(kubectl get deployment.apps -o=jsonpath='{.items[0].apiVersion}' 2>&1 "${kube_flags[@]:?}") kube::test::if_has_string "${output_message}" 'apps/v1' # Clean up - kubectl delete deployment test-nginx-extensions "${kube_flags[@]}" + kubectl delete deployment test-nginx-extensions "${kube_flags[@]:?}" # Test kubectl create deployment kubectl create deployment test-nginx-apps --image=k8s.gcr.io/nginx:test-cmd --generator=deployment-basic/apps.v1beta1 # Post-Condition: Deployment "nginx" is created. - kube::test::get_object_assert 'deploy test-nginx-apps' "{{$container_name_field}}" 'nginx' + kube::test::get_object_assert 'deploy test-nginx-apps' "{{${container_name_field:?}}}" 'nginx' # and new generator was used, iow. new defaults are applied output_message=$(kubectl get deployment/test-nginx-apps -o jsonpath='{.spec.revisionHistoryLimit}') kube::test::if_has_string "${output_message}" '2' # Ensure we can interact with deployments through extensions and apps endpoints - output_message=$(kubectl get deployment.extensions -o=jsonpath='{.items[0].apiVersion}' 2>&1 "${kube_flags[@]}") + output_message=$(kubectl get deployment.extensions -o=jsonpath='{.items[0].apiVersion}' 2>&1 "${kube_flags[@]:?}") kube::test::if_has_string "${output_message}" 'extensions/v1beta1' - output_message=$(kubectl get deployment.apps -o=jsonpath='{.items[0].apiVersion}' 2>&1 "${kube_flags[@]}") + output_message=$(kubectl get deployment.apps -o=jsonpath='{.items[0].apiVersion}' 2>&1 "${kube_flags[@]:?}") kube::test::if_has_string "${output_message}" 'apps/v1' # Describe command (resource only) should print detailed information kube::test::describe_resource_assert rs "Name:" "Pod Template:" "Labels:" "Selector:" "Controlled By" "Replicas:" "Pods Status:" "Volumes:" # Describe command (resource only) should print detailed information kube::test::describe_resource_assert pods "Name:" "Image:" "Node:" "Labels:" "Status:" "Controlled By" # Clean up - kubectl delete deployment test-nginx-apps "${kube_flags[@]}" + kubectl delete deployment test-nginx-apps "${kube_flags[@]:?}" ### Test kubectl create deployment with image and command # Pre-Condition: No deployment exists. - kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" '' + kube::test::get_object_assert deployment "{{range.items}}{{${id_field:?}}}:{{end}}" '' # Command kubectl create deployment nginx-with-command --image=k8s.gcr.io/nginx:test-cmd -- /bin/sleep infinity # Post-Condition: Deployment "nginx" is created. - kube::test::get_object_assert 'deploy nginx-with-command' "{{$container_name_field}}" 'nginx' + kube::test::get_object_assert 'deploy nginx-with-command' "{{${container_name_field:?}}}" 'nginx' # Clean up - kubectl delete deployment nginx-with-command "${kube_flags[@]}" + kubectl delete deployment nginx-with-command "${kube_flags[@]:?}" ### Test kubectl create deployment should not fail validation # Pre-Condition: No deployment exists. - kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" '' + kube::test::get_object_assert deployment "{{range.items}}{{${id_field:?}}}:{{end}}" '' # Command - kubectl create -f hack/testdata/deployment-with-UnixUserID.yaml "${kube_flags[@]}" + kubectl create -f hack/testdata/deployment-with-UnixUserID.yaml "${kube_flags[@]:?}" # Post-Condition: Deployment "deployment-with-unixuserid" is created. - kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" 'deployment-with-unixuserid:' + kube::test::get_object_assert deployment "{{range.items}}{{${id_field:?}}}:{{end}}" 'deployment-with-unixuserid:' # Clean up - kubectl delete deployment deployment-with-unixuserid "${kube_flags[@]}" + kubectl delete deployment deployment-with-unixuserid "${kube_flags[@]:?}" ### Test cascading deletion ## Test that rs is deleted when deployment is deleted. # Pre-condition: no deployment exists - kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" '' + kube::test::get_object_assert deployment "{{range.items}}{{${id_field:?}}}:{{end}}" '' # Create deployment - kubectl create -f test/fixtures/doc-yaml/user-guide/deployment.yaml "${kube_flags[@]}" + kubectl create -f test/fixtures/doc-yaml/user-guide/deployment.yaml "${kube_flags[@]:?}" # Wait for rs to come up. - kube::test::wait_object_assert rs "{{range.items}}{{$rs_replicas_field}}{{end}}" '3' + kube::test::wait_object_assert rs "{{range.items}}{{${rs_replicas_field:?}}}{{end}}" '3' # Deleting the deployment should delete the rs. - kubectl delete deployment nginx-deployment "${kube_flags[@]}" - kube::test::wait_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" '' + kubectl delete deployment nginx-deployment "${kube_flags[@]:?}" + kube::test::wait_object_assert rs "{{range.items}}{{${id_field:?}}}:{{end}}" '' ## Test that rs is not deleted when deployment is deleted with cascade set to false. # Pre-condition: no deployment and rs exist - kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" '' - kube::test::get_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" '' + kube::test::get_object_assert deployment "{{range.items}}{{${id_field:?}}}:{{end}}" '' + kube::test::get_object_assert rs "{{range.items}}{{${id_field:?}}}:{{end}}" '' # Create deployment kubectl create deployment nginx-deployment --image=k8s.gcr.io/nginx:test-cmd # Wait for rs to come up. - kube::test::wait_object_assert rs "{{range.items}}{{$rs_replicas_field}}{{end}}" '1' + kube::test::wait_object_assert rs "{{range.items}}{{${rs_replicas_field:?}}}{{end}}" '1' # Delete the deployment with cascade set to false. - kubectl delete deployment nginx-deployment "${kube_flags[@]}" --cascade=false + kubectl delete deployment nginx-deployment "${kube_flags[@]:?}" --cascade=false # Wait for the deployment to be deleted and then verify that rs is not # deleted. - kube::test::wait_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" '' - kube::test::get_object_assert rs "{{range.items}}{{$rs_replicas_field}}{{end}}" '1' + kube::test::wait_object_assert deployment "{{range.items}}{{${id_field:?}}}:{{end}}" '' + kube::test::get_object_assert rs "{{range.items}}{{${rs_replicas_field:?}}}{{end}}" '1' # Cleanup # Find the name of the rs to be deleted. - output_message=$(kubectl get rs "${kube_flags[@]}" -o template --template={{range.items}}{{$id_field}}{{end}}) - kubectl delete rs ${output_message} "${kube_flags[@]}" + output_message=$(kubectl get rs "${kube_flags[@]:?}" -o template --template="{{range.items}}{{${id_field:?}}}{{end}}") + kubectl delete rs "${output_message}" "${kube_flags[@]:?}" ### Auto scale deployment # Pre-condition: no deployment exists - kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" '' + kube::test::get_object_assert deployment "{{range.items}}{{${id_field:?}}}:{{end}}" '' # Command - kubectl create -f test/fixtures/doc-yaml/user-guide/deployment.yaml "${kube_flags[@]}" - kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" 'nginx-deployment:' + kubectl create -f test/fixtures/doc-yaml/user-guide/deployment.yaml "${kube_flags[@]:?}" + kube::test::get_object_assert deployment "{{range.items}}{{${id_field:?}}}:{{end}}" 'nginx-deployment:' # autoscale 2~3 pods, no CPU utilization specified - kubectl-with-retry autoscale deployment nginx-deployment "${kube_flags[@]}" --min=2 --max=3 - kube::test::get_object_assert 'hpa nginx-deployment' "{{$hpa_min_field}} {{$hpa_max_field}} {{$hpa_cpu_field}}" '2 3 80' + kubectl-with-retry autoscale deployment nginx-deployment "${kube_flags[@]:?}" --min=2 --max=3 + kube::test::get_object_assert 'hpa nginx-deployment' "{{${hpa_min_field:?}}} {{${hpa_max_field:?}}} {{${hpa_cpu_field:?}}}" '2 3 80' # Clean up # Note that we should delete hpa first, otherwise it may fight with the deployment reaper. - kubectl delete hpa nginx-deployment "${kube_flags[@]}" - kubectl delete deployment.apps nginx-deployment "${kube_flags[@]}" + kubectl delete hpa nginx-deployment "${kube_flags[@]:?}" + kubectl delete deployment.apps nginx-deployment "${kube_flags[@]:?}" ### Rollback a deployment # Pre-condition: no deployment exists - kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" '' + kube::test::get_object_assert deployment "{{range.items}}{{${id_field:?}}}:{{end}}" '' # Command # Create a deployment (revision 1) - kubectl create -f hack/testdata/deployment-revision1.yaml "${kube_flags[@]}" - kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" 'nginx:' - kube::test::get_object_assert deployment "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_DEPLOYMENT_R1}:" + kubectl create -f hack/testdata/deployment-revision1.yaml "${kube_flags[@]:?}" + kube::test::get_object_assert deployment "{{range.items}}{{${id_field:?}}}:{{end}}" 'nginx:' + kube::test::get_object_assert deployment "{{range.items}}{{${image_field0:?}}}:{{end}}" "${IMAGE_DEPLOYMENT_R1}:" # Rollback to revision 1 - should be no-op - kubectl rollout undo deployment nginx --to-revision=1 "${kube_flags[@]}" - kube::test::get_object_assert deployment "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_DEPLOYMENT_R1}:" + kubectl rollout undo deployment nginx --to-revision=1 "${kube_flags[@]:?}" + kube::test::get_object_assert deployment "{{range.items}}{{${image_field0:?}}}:{{end}}" "${IMAGE_DEPLOYMENT_R1}:" # Update the deployment (revision 2) - kubectl apply -f hack/testdata/deployment-revision2.yaml "${kube_flags[@]}" - kube::test::get_object_assert deployment.apps "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_DEPLOYMENT_R2}:" + kubectl apply -f hack/testdata/deployment-revision2.yaml "${kube_flags[@]:?}" + kube::test::get_object_assert deployment.apps "{{range.items}}{{${image_field0:?}}}:{{end}}" "${IMAGE_DEPLOYMENT_R2}:" # Rollback to revision 1 with dry-run - should be no-op - kubectl rollout undo deployment nginx --dry-run=true "${kube_flags[@]}" | grep "test-cmd" - kube::test::get_object_assert deployment.apps "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_DEPLOYMENT_R2}:" + kubectl rollout undo deployment nginx --dry-run=true "${kube_flags[@]:?}" | grep "test-cmd" + kube::test::get_object_assert deployment.apps "{{range.items}}{{${image_field0:?}}}:{{end}}" "${IMAGE_DEPLOYMENT_R2}:" # Rollback to revision 1 - kubectl rollout undo deployment nginx --to-revision=1 "${kube_flags[@]}" + kubectl rollout undo deployment nginx --to-revision=1 "${kube_flags[@]:?}" sleep 1 - kube::test::get_object_assert deployment "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_DEPLOYMENT_R1}:" + kube::test::get_object_assert deployment "{{range.items}}{{${image_field0:?}}}:{{end}}" "${IMAGE_DEPLOYMENT_R1}:" # Rollback to revision 1000000 - should be no-op - ! kubectl rollout undo deployment nginx --to-revision=1000000 "${kube_flags[@]}" - kube::test::get_object_assert deployment "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_DEPLOYMENT_R1}:" + ! kubectl rollout undo deployment nginx --to-revision=1000000 "${kube_flags[@]:?}" + kube::test::get_object_assert deployment "{{range.items}}{{${image_field0:?}}}:{{end}}" "${IMAGE_DEPLOYMENT_R1}:" # Rollback to last revision - kubectl rollout undo deployment nginx "${kube_flags[@]}" + kubectl rollout undo deployment nginx "${kube_flags[@]:?}" sleep 1 - kube::test::get_object_assert deployment "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_DEPLOYMENT_R2}:" + kube::test::get_object_assert deployment "{{range.items}}{{${image_field0:?}}}:{{end}}" "${IMAGE_DEPLOYMENT_R2}:" # Pause the deployment - kubectl-with-retry rollout pause deployment nginx "${kube_flags[@]}" + kubectl-with-retry rollout pause deployment nginx "${kube_flags[@]:?}" # A paused deployment cannot be rolled back - ! kubectl rollout undo deployment nginx "${kube_flags[@]}" + ! kubectl rollout undo deployment nginx "${kube_flags[@]:?}" # A paused deployment cannot be restarted - ! kubectl rollout restart deployment nginx "${kube_flags[@]}" + ! kubectl rollout restart deployment nginx "${kube_flags[@]:?}" # Resume the deployment - kubectl-with-retry rollout resume deployment nginx "${kube_flags[@]}" + kubectl-with-retry rollout resume deployment nginx "${kube_flags[@]:?}" # The resumed deployment can now be rolled back - kubectl rollout undo deployment nginx "${kube_flags[@]}" + kubectl rollout undo deployment nginx "${kube_flags[@]:?}" # Check that the new replica set has all old revisions stored in an annotation newrs="$(kubectl describe deployment nginx | grep NewReplicaSet | awk '{print $2}')" kubectl get rs "${newrs}" -o yaml | grep "deployment.kubernetes.io/revision-history: 1,3" @@ -326,84 +326,84 @@ run_deployment_tests() { newrs="$(kubectl describe deployment nginx | grep NewReplicaSet | awk '{print $2}')" rs="$(kubectl get rs "${newrs}" -o yaml)" kube::test::if_has_string "${rs}" "deployment.kubernetes.io/revision: \"6\"" - cat hack/testdata/deployment-revision1.yaml | ${SED} "s/name: nginx$/name: nginx2/" | kubectl create -f - "${kube_flags[@]}" + ${SED} "s/name: nginx$/name: nginx2/" hack/testdata/deployment-revision1.yaml | kubectl create -f - "${kube_flags[@]:?}" # Deletion of both deployments should not be blocked - kubectl delete deployment nginx2 "${kube_flags[@]}" + kubectl delete deployment nginx2 "${kube_flags[@]:?}" # Clean up - kubectl delete deployment nginx "${kube_flags[@]}" + kubectl delete deployment nginx "${kube_flags[@]:?}" ### Set image of a deployment # Pre-condition: no deployment exists - kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" '' + kube::test::get_object_assert deployment "{{range.items}}{{${id_field:?}}}:{{end}}" '' # Create a deployment - kubectl create -f hack/testdata/deployment-multicontainer.yaml "${kube_flags[@]}" - kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" 'nginx-deployment:' - kube::test::get_object_assert deployment "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_DEPLOYMENT_R1}:" - kube::test::get_object_assert deployment "{{range.items}}{{$image_field1}}:{{end}}" "${IMAGE_PERL}:" + kubectl create -f hack/testdata/deployment-multicontainer.yaml "${kube_flags[@]:?}" + kube::test::get_object_assert deployment "{{range.items}}{{${id_field:?}}}:{{end}}" 'nginx-deployment:' + kube::test::get_object_assert deployment "{{range.items}}{{${image_field0:?}}}:{{end}}" "${IMAGE_DEPLOYMENT_R1}:" + kube::test::get_object_assert deployment "{{range.items}}{{${image_field1:?}}}:{{end}}" "${IMAGE_PERL}:" # Set the deployment's image - kubectl set image deployment nginx-deployment nginx="${IMAGE_DEPLOYMENT_R2}" "${kube_flags[@]}" - kube::test::get_object_assert deployment "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_DEPLOYMENT_R2}:" - kube::test::get_object_assert deployment "{{range.items}}{{$image_field1}}:{{end}}" "${IMAGE_PERL}:" + kubectl set image deployment nginx-deployment nginx="${IMAGE_DEPLOYMENT_R2}" "${kube_flags[@]:?}" + kube::test::get_object_assert deployment "{{range.items}}{{${image_field0:?}}}:{{end}}" "${IMAGE_DEPLOYMENT_R2}:" + kube::test::get_object_assert deployment "{{range.items}}{{${image_field1:?}}}:{{end}}" "${IMAGE_PERL}:" # Set non-existing container should fail - ! kubectl set image deployment nginx-deployment redis=redis "${kube_flags[@]}" + ! kubectl set image deployment nginx-deployment redis=redis "${kube_flags[@]:?}" # Set image of deployments without specifying name - kubectl set image deployments --all nginx="${IMAGE_DEPLOYMENT_R1}" "${kube_flags[@]}" - kube::test::get_object_assert deployment "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_DEPLOYMENT_R1}:" - kube::test::get_object_assert deployment "{{range.items}}{{$image_field1}}:{{end}}" "${IMAGE_PERL}:" + kubectl set image deployments --all nginx="${IMAGE_DEPLOYMENT_R1}" "${kube_flags[@]:?}" + kube::test::get_object_assert deployment "{{range.items}}{{${image_field0:?}}}:{{end}}" "${IMAGE_DEPLOYMENT_R1}:" + kube::test::get_object_assert deployment "{{range.items}}{{${image_field1:?}}}:{{end}}" "${IMAGE_PERL}:" # Set image of a deployment specified by file - kubectl set image -f hack/testdata/deployment-multicontainer.yaml nginx="${IMAGE_DEPLOYMENT_R2}" "${kube_flags[@]}" - kube::test::get_object_assert deployment "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_DEPLOYMENT_R2}:" - kube::test::get_object_assert deployment "{{range.items}}{{$image_field1}}:{{end}}" "${IMAGE_PERL}:" + kubectl set image -f hack/testdata/deployment-multicontainer.yaml nginx="${IMAGE_DEPLOYMENT_R2}" "${kube_flags[@]:?}" + kube::test::get_object_assert deployment "{{range.items}}{{${image_field0:?}}}:{{end}}" "${IMAGE_DEPLOYMENT_R2}:" + kube::test::get_object_assert deployment "{{range.items}}{{${image_field1:?}}}:{{end}}" "${IMAGE_PERL}:" # Set image of a local file without talking to the server - kubectl set image -f hack/testdata/deployment-multicontainer.yaml nginx="${IMAGE_DEPLOYMENT_R1}" "${kube_flags[@]}" --local -o yaml - kube::test::get_object_assert deployment "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_DEPLOYMENT_R2}:" - kube::test::get_object_assert deployment "{{range.items}}{{$image_field1}}:{{end}}" "${IMAGE_PERL}:" + kubectl set image -f hack/testdata/deployment-multicontainer.yaml nginx="${IMAGE_DEPLOYMENT_R1}" "${kube_flags[@]:?}" --local -o yaml + kube::test::get_object_assert deployment "{{range.items}}{{${image_field0:?}}}:{{end}}" "${IMAGE_DEPLOYMENT_R2}:" + kube::test::get_object_assert deployment "{{range.items}}{{${image_field1:?}}}:{{end}}" "${IMAGE_PERL}:" # Set image of all containers of the deployment - kubectl set image deployment nginx-deployment "*"="${IMAGE_DEPLOYMENT_R1}" "${kube_flags[@]}" - kube::test::get_object_assert deployment "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_DEPLOYMENT_R1}:" - kube::test::get_object_assert deployment "{{range.items}}{{$image_field1}}:{{end}}" "${IMAGE_DEPLOYMENT_R1}:" + kubectl set image deployment nginx-deployment "*=${IMAGE_DEPLOYMENT_R1}" "${kube_flags[@]:?}" + kube::test::get_object_assert deployment "{{range.items}}{{${image_field0:?}}}:{{end}}" "${IMAGE_DEPLOYMENT_R1}:" + kube::test::get_object_assert deployment "{{range.items}}{{${image_field1:?}}}:{{end}}" "${IMAGE_DEPLOYMENT_R1}:" # Set image of all containners of the deployment again when image not change - kubectl set image deployment nginx-deployment "*"="${IMAGE_DEPLOYMENT_R1}" "${kube_flags[@]}" - kube::test::get_object_assert deployment "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_DEPLOYMENT_R1}:" - kube::test::get_object_assert deployment "{{range.items}}{{$image_field1}}:{{end}}" "${IMAGE_DEPLOYMENT_R1}:" + kubectl set image deployment nginx-deployment "*=${IMAGE_DEPLOYMENT_R1}" "${kube_flags[@]:?}" + kube::test::get_object_assert deployment "{{range.items}}{{${image_field0:?}}}:{{end}}" "${IMAGE_DEPLOYMENT_R1}:" + kube::test::get_object_assert deployment "{{range.items}}{{${image_field1:?}}}:{{end}}" "${IMAGE_DEPLOYMENT_R1}:" # Clean up - kubectl delete deployment nginx-deployment "${kube_flags[@]}" + kubectl delete deployment nginx-deployment "${kube_flags[@]:?}" ### Set env of a deployment # Pre-condition: no deployment exists - kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" '' + kube::test::get_object_assert deployment "{{range.items}}{{${id_field:?}}}:{{end}}" '' # Create a deployment - kubectl create -f hack/testdata/deployment-multicontainer.yaml "${kube_flags[@]}" - kubectl create -f hack/testdata/configmap.yaml "${kube_flags[@]}" - kubectl create -f hack/testdata/secret.yaml "${kube_flags[@]}" - kube::test::get_object_assert deployment "{{range.items}}{{$id_field}}:{{end}}" 'nginx-deployment:' + kubectl create -f hack/testdata/deployment-multicontainer.yaml "${kube_flags[@]:?}" + kubectl create -f hack/testdata/configmap.yaml "${kube_flags[@]:?}" + kubectl create -f hack/testdata/secret.yaml "${kube_flags[@]:?}" + kube::test::get_object_assert deployment "{{range.items}}{{${id_field:?}}}:{{end}}" 'nginx-deployment:' #configmap is special here due to controller will create kube-root-ca.crt for each namespace automatically - kube::test::get_object_assert 'configmaps/test-set-env-config' "{{$id_field}}" 'test-set-env-config' - kube::test::get_object_assert secret "{{range.items}}{{$id_field}}:{{end}}" 'test-set-env-secret:' + kube::test::get_object_assert 'configmaps/test-set-env-config' "{{${id_field:?}}}" 'test-set-env-config' + kube::test::get_object_assert secret "{{range.items}}{{${id_field:?}}}:{{end}}" 'test-set-env-secret:' # Set env of deployments by configmap from keys - kubectl set env deployment nginx-deployment --keys=key-2 --from=configmap/test-set-env-config "${kube_flags[@]}" + kubectl set env deployment nginx-deployment --keys=key-2 --from=configmap/test-set-env-config "${kube_flags[@]:?}" # Assert correct value in deployment env kube::test::get_object_assert 'deploy nginx-deployment' "{{ (index (index .spec.template.spec.containers 0).env 0).name}}" 'KEY_2' # Assert single value in deployment env kube::test::get_object_assert 'deploy nginx-deployment' "{{ len (index .spec.template.spec.containers 0).env }}" '1' # Set env of deployments by configmap - kubectl set env deployment nginx-deployment --from=configmap/test-set-env-config "${kube_flags[@]}" + kubectl set env deployment nginx-deployment --from=configmap/test-set-env-config "${kube_flags[@]:?}" # Assert all values in deployment env kube::test::get_object_assert 'deploy nginx-deployment' "{{ len (index .spec.template.spec.containers 0).env }}" '2' # Set env of deployments for all container - kubectl set env deployment nginx-deployment env=prod "${kube_flags[@]}" + kubectl set env deployment nginx-deployment env=prod "${kube_flags[@]:?}" # Set env of deployments for specific container - kubectl set env deployment nginx-deployment superenv=superprod -c=nginx "${kube_flags[@]}" + kubectl set env deployment nginx-deployment superenv=superprod -c=nginx "${kube_flags[@]:?}" # Set env of deployments by secret from keys - kubectl set env deployment nginx-deployment --keys=username --from=secret/test-set-env-secret "${kube_flags[@]}" + kubectl set env deployment nginx-deployment --keys=username --from=secret/test-set-env-secret "${kube_flags[@]:?}" # Set env of deployments by secret - kubectl set env deployment nginx-deployment --from=secret/test-set-env-secret "${kube_flags[@]}" + kubectl set env deployment nginx-deployment --from=secret/test-set-env-secret "${kube_flags[@]:?}" # Remove specific env of deployment kubectl set env deployment nginx-deployment env- # Clean up - kubectl delete deployment nginx-deployment "${kube_flags[@]}" - kubectl delete configmap test-set-env-config "${kube_flags[@]}" - kubectl delete secret test-set-env-secret "${kube_flags[@]}" + kubectl delete deployment nginx-deployment "${kube_flags[@]:?}" + kubectl delete configmap test-set-env-config "${kube_flags[@]:?}" + kubectl delete secret test-set-env-secret "${kube_flags[@]:?}" set +o nounset set +o errexit @@ -418,42 +418,42 @@ run_statefulset_history_tests() { ### Test rolling back a StatefulSet # Pre-condition: no statefulset or its pods exists - kube::test::get_object_assert statefulset "{{range.items}}{{$id_field}}:{{end}}" '' + kube::test::get_object_assert statefulset "{{range.items}}{{${id_field:?}}}:{{end}}" '' # Command # Create a StatefulSet (revision 1) - kubectl apply -f hack/testdata/rollingupdate-statefulset.yaml --record "${kube_flags[@]}" - kube::test::wait_object_assert controllerrevisions "{{range.items}}{{$annotations_field}}:{{end}}" ".*rollingupdate-statefulset.yaml --record.*" + kubectl apply -f hack/testdata/rollingupdate-statefulset.yaml --record "${kube_flags[@]:?}" + kube::test::wait_object_assert controllerrevisions "{{range.items}}{{${annotations_field:?}}}:{{end}}" ".*rollingupdate-statefulset.yaml --record.*" # Rollback to revision 1 - should be no-op - kubectl rollout undo statefulset --to-revision=1 "${kube_flags[@]}" - kube::test::get_object_assert statefulset "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_STATEFULSET_R1}:" - kube::test::get_object_assert statefulset "{{range.items}}{{$container_len}}{{end}}" "1" + kubectl rollout undo statefulset --to-revision=1 "${kube_flags[@]:?}" + kube::test::get_object_assert statefulset "{{range.items}}{{${image_field0:?}}}:{{end}}" "${IMAGE_STATEFULSET_R1}:" + kube::test::get_object_assert statefulset "{{range.items}}{{${container_len:?}}}{{end}}" "1" # Update the statefulset (revision 2) - kubectl apply -f hack/testdata/rollingupdate-statefulset-rv2.yaml --record "${kube_flags[@]}" - kube::test::wait_object_assert statefulset "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_STATEFULSET_R2}:" - kube::test::wait_object_assert statefulset "{{range.items}}{{$image_field1}}:{{end}}" "${IMAGE_PAUSE_V2}:" - kube::test::get_object_assert statefulset "{{range.items}}{{$container_len}}{{end}}" "2" - kube::test::wait_object_assert controllerrevisions "{{range.items}}{{$annotations_field}}:{{end}}" ".*rollingupdate-statefulset-rv2.yaml --record.*" + kubectl apply -f hack/testdata/rollingupdate-statefulset-rv2.yaml --record "${kube_flags[@]:?}" + kube::test::wait_object_assert statefulset "{{range.items}}{{${image_field0:?}}}:{{end}}" "${IMAGE_STATEFULSET_R2}:" + kube::test::wait_object_assert statefulset "{{range.items}}{{${image_field1:?}}}:{{end}}" "${IMAGE_PAUSE_V2}:" + kube::test::get_object_assert statefulset "{{range.items}}{{${container_len:?}}}{{end}}" "2" + kube::test::wait_object_assert controllerrevisions "{{range.items}}{{${annotations_field:?}}}:{{end}}" ".*rollingupdate-statefulset-rv2.yaml --record.*" # Rollback to revision 1 with dry-run - should be no-op - kubectl rollout undo statefulset --dry-run=true "${kube_flags[@]}" - kube::test::get_object_assert statefulset "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_STATEFULSET_R2}:" - kube::test::get_object_assert statefulset "{{range.items}}{{$image_field1}}:{{end}}" "${IMAGE_PAUSE_V2}:" - kube::test::get_object_assert statefulset "{{range.items}}{{$container_len}}{{end}}" "2" + kubectl rollout undo statefulset --dry-run=true "${kube_flags[@]:?}" + kube::test::get_object_assert statefulset "{{range.items}}{{${image_field0:?}}}:{{end}}" "${IMAGE_STATEFULSET_R2}:" + kube::test::get_object_assert statefulset "{{range.items}}{{${image_field1:?}}}:{{end}}" "${IMAGE_PAUSE_V2}:" + kube::test::get_object_assert statefulset "{{range.items}}{{${container_len:?}}}{{end}}" "2" # Rollback to revision 1 - kubectl rollout undo statefulset --to-revision=1 "${kube_flags[@]}" - kube::test::wait_object_assert statefulset "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_STATEFULSET_R1}:" - kube::test::get_object_assert statefulset "{{range.items}}{{$container_len}}{{end}}" "1" + kubectl rollout undo statefulset --to-revision=1 "${kube_flags[@]:?}" + kube::test::wait_object_assert statefulset "{{range.items}}{{${image_field0:?}}}:{{end}}" "${IMAGE_STATEFULSET_R1}:" + kube::test::get_object_assert statefulset "{{range.items}}{{${container_len:?}}}{{end}}" "1" # Rollback to revision 1000000 - should fail - output_message=$(! kubectl rollout undo statefulset --to-revision=1000000 "${kube_flags[@]}" 2>&1) + output_message=$(! kubectl rollout undo statefulset --to-revision=1000000 "${kube_flags[@]:?}" 2>&1) kube::test::if_has_string "${output_message}" "unable to find specified revision" - kube::test::get_object_assert statefulset "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_STATEFULSET_R1}:" - kube::test::get_object_assert statefulset "{{range.items}}{{$container_len}}{{end}}" "1" + kube::test::get_object_assert statefulset "{{range.items}}{{${image_field0:?}}}:{{end}}" "${IMAGE_STATEFULSET_R1}:" + kube::test::get_object_assert statefulset "{{range.items}}{{${container_len:?}}}{{end}}" "1" # Rollback to last revision - kubectl rollout undo statefulset "${kube_flags[@]}" - kube::test::wait_object_assert statefulset "{{range.items}}{{$image_field0}}:{{end}}" "${IMAGE_STATEFULSET_R2}:" - kube::test::wait_object_assert statefulset "{{range.items}}{{$image_field1}}:{{end}}" "${IMAGE_PAUSE_V2}:" - kube::test::get_object_assert statefulset "{{range.items}}{{$container_len}}{{end}}" "2" + kubectl rollout undo statefulset "${kube_flags[@]:?}" + kube::test::wait_object_assert statefulset "{{range.items}}{{${image_field0:?}}}:{{end}}" "${IMAGE_STATEFULSET_R2}:" + kube::test::wait_object_assert statefulset "{{range.items}}{{${image_field1:?}}}:{{end}}" "${IMAGE_PAUSE_V2}:" + kube::test::get_object_assert statefulset "{{range.items}}{{${container_len:?}}}{{end}}" "2" # Clean up - delete newest configuration - kubectl delete -f hack/testdata/rollingupdate-statefulset-rv2.yaml "${kube_flags[@]}" + kubectl delete -f hack/testdata/rollingupdate-statefulset-rv2.yaml "${kube_flags[@]:?}" # Post-condition: no pods from statefulset controller wait-for-pods-with-label "app=nginx-statefulset" "" @@ -470,26 +470,26 @@ run_stateful_set_tests() { ### Create and stop statefulset, make sure it doesn't leak pods # Pre-condition: no statefulset exists - kube::test::get_object_assert statefulset "{{range.items}}{{$id_field}}:{{end}}" '' + kube::test::get_object_assert statefulset "{{range.items}}{{${id_field:?}}}:{{end}}" '' # Command: create statefulset - kubectl create -f hack/testdata/rollingupdate-statefulset.yaml "${kube_flags[@]}" + kubectl create -f hack/testdata/rollingupdate-statefulset.yaml "${kube_flags[@]:?}" ### Scale statefulset test with current-replicas and replicas # Pre-condition: 0 replicas - kube::test::get_object_assert 'statefulset nginx' "{{$statefulset_replicas_field}}" '0' - kube::test::wait_object_assert 'statefulset nginx' "{{$statefulset_observed_generation}}" '1' + kube::test::get_object_assert 'statefulset nginx' "{{${statefulset_replicas_field:?}}}" '0' + kube::test::wait_object_assert 'statefulset nginx' "{{${statefulset_observed_generation:?}}}" '1' # Command: Scale up - kubectl scale --current-replicas=0 --replicas=1 statefulset nginx "${kube_flags[@]}" + kubectl scale --current-replicas=0 --replicas=1 statefulset nginx "${kube_flags[@]:?}" # Post-condition: 1 replica, named nginx-0 - kube::test::get_object_assert 'statefulset nginx' "{{$statefulset_replicas_field}}" '1' - kube::test::wait_object_assert 'statefulset nginx' "{{$statefulset_observed_generation}}" '2' + kube::test::get_object_assert 'statefulset nginx' "{{${statefulset_replicas_field:?}}}" '1' + kube::test::wait_object_assert 'statefulset nginx' "{{${statefulset_observed_generation:?}}}" '2' # Typically we'd wait and confirm that N>1 replicas are up, but this framework # doesn't start the scheduler, so pet-0 will block all others. # TODO: test robust scaling in an e2e. wait-for-pods-with-label "app=nginx-statefulset" "nginx-0" ### Clean up - kubectl delete -f hack/testdata/rollingupdate-statefulset.yaml "${kube_flags[@]}" + kubectl delete -f hack/testdata/rollingupdate-statefulset.yaml "${kube_flags[@]:?}" # Post-condition: no pods from statefulset controller wait-for-pods-with-label "app=nginx-statefulset" "" @@ -507,40 +507,40 @@ run_rs_tests() { ### Create and stop a replica set, make sure it doesn't leak pods # Pre-condition: no replica set exists - kube::test::get_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" '' + kube::test::get_object_assert rs "{{range.items}}{{${id_field:?}}}:{{end}}" '' # Command - kubectl create -f hack/testdata/frontend-replicaset.yaml "${kube_flags[@]}" + kubectl create -f hack/testdata/frontend-replicaset.yaml "${kube_flags[@]:?}" kube::log::status "Deleting rs" - kubectl delete rs frontend "${kube_flags[@]}" + kubectl delete rs frontend "${kube_flags[@]:?}" # Post-condition: no pods from frontend replica set - kube::test::wait_object_assert 'pods -l "tier=frontend"' "{{range.items}}{{$id_field}}:{{end}}" '' + kube::test::wait_object_assert 'pods -l "tier=frontend"' "{{range.items}}{{${id_field:?}}}:{{end}}" '' ### Create and then delete a replica set with cascade=false, make sure it doesn't delete pods. # Pre-condition: no replica set exists - kube::test::get_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" '' + kube::test::get_object_assert rs "{{range.items}}{{${id_field:?}}}:{{end}}" '' # Command #TODO(mortent): Remove this workaround when ReplicaSet bug described in issue #69376 is fixed local replicaset_name="frontend-no-cascade" - sed -r 's/^(\s*)(name\s*:\s*frontend\s*$)/\1name: '"${replicaset_name}"'/' hack/testdata/frontend-replicaset.yaml | kubectl create "${kube_flags[@]}" -f - + sed -r 's/^(\s*)(name\s*:\s*frontend\s*$)/\1name: '"${replicaset_name:?}"'/' hack/testdata/frontend-replicaset.yaml | kubectl create "${kube_flags[@]:?}" -f - # wait for all 3 pods to be set up - kube::test::wait_object_assert 'pods -l "tier=frontend"' "{{range.items}}{{$pod_container_name_field}}:{{end}}" 'php-redis:php-redis:php-redis:' + kube::test::wait_object_assert 'pods -l "tier=frontend"' "{{range.items}}{{${pod_container_name_field:?}}}:{{end}}" 'php-redis:php-redis:php-redis:' kube::log::status "Deleting rs" - kubectl delete rs "${replicaset_name}" "${kube_flags[@]}" --cascade=false + kubectl delete rs "${replicaset_name}" "${kube_flags[@]:?}" --cascade=false # Wait for the rs to be deleted. - kube::test::wait_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" '' + kube::test::wait_object_assert rs "{{range.items}}{{${id_field:?}}}:{{end}}" '' # Post-condition: All 3 pods still remain from frontend replica set kube::test::get_object_assert 'pods -l "tier=frontend"' "{{range.items}}{{$pod_container_name_field}}:{{end}}" 'php-redis:php-redis:php-redis:' # Cleanup - kubectl delete pods -l "tier=frontend" "${kube_flags[@]}" - kube::test::get_object_assert pods "{{range.items}}{{$id_field}}:{{end}}" '' + kubectl delete pods -l "tier=frontend" "${kube_flags[@]:?}" + kube::test::get_object_assert pods "{{range.items}}{{${id_field:?}}}:{{end}}" '' ### Create replica set frontend from YAML # Pre-condition: no replica set exists - kube::test::get_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" '' + kube::test::get_object_assert rs "{{range.items}}{{${id_field:?}}}:{{end}}" '' # Command - kubectl create -f hack/testdata/frontend-replicaset.yaml "${kube_flags[@]}" + kubectl create -f hack/testdata/frontend-replicaset.yaml "${kube_flags[@]:?}" # Post-condition: frontend replica set is created - kube::test::get_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" 'frontend:' + kube::test::get_object_assert rs "{{range.items}}{{${id_field:?}}}:{{end}}" 'frontend:' # Describe command should print detailed information kube::test::describe_object_assert rs 'frontend' "Name:" "Pod Template:" "Labels:" "Selector:" "Replicas:" "Pods Status:" "Volumes:" # Describe command should print events information by default @@ -562,16 +562,16 @@ run_rs_tests() { ### Scale replica set frontend with current-replicas and replicas # Pre-condition: 3 replicas - kube::test::get_object_assert 'rs frontend' "{{$rs_replicas_field}}" '3' + kube::test::get_object_assert 'rs frontend' "{{${rs_replicas_field:?}}}" '3' # Command - kubectl scale --current-replicas=3 --replicas=2 replicasets frontend "${kube_flags[@]}" + kubectl scale --current-replicas=3 --replicas=2 replicasets frontend "${kube_flags[@]:?}" # Post-condition: 2 replicas - kube::test::get_object_assert 'rs frontend' "{{$rs_replicas_field}}" '2' + kube::test::get_object_assert 'rs frontend' "{{${rs_replicas_field:?}}}" '2' # Set up three deploy, two deploy have same label - kubectl create -f hack/testdata/scale-deploy-1.yaml "${kube_flags[@]}" - kubectl create -f hack/testdata/scale-deploy-2.yaml "${kube_flags[@]}" - kubectl create -f hack/testdata/scale-deploy-3.yaml "${kube_flags[@]}" + kubectl create -f hack/testdata/scale-deploy-1.yaml "${kube_flags[@]:?}" + kubectl create -f hack/testdata/scale-deploy-2.yaml "${kube_flags[@]:?}" + kubectl create -f hack/testdata/scale-deploy-3.yaml "${kube_flags[@]:?}" kube::test::get_object_assert 'deploy scale-1' "{{.spec.replicas}}" '1' kube::test::get_object_assert 'deploy scale-2' "{{.spec.replicas}}" '1' kube::test::get_object_assert 'deploy scale-3' "{{.spec.replicas}}" '1' @@ -586,78 +586,78 @@ run_rs_tests() { kube::test::get_object_assert 'deploy scale-2' "{{.spec.replicas}}" '3' kube::test::get_object_assert 'deploy scale-3' "{{.spec.replicas}}" '3' # Clean-up - kubectl delete rs frontend "${kube_flags[@]}" - kubectl delete deploy scale-1 scale-2 scale-3 "${kube_flags[@]}" + kubectl delete rs frontend "${kube_flags[@]:?}" + kubectl delete deploy scale-1 scale-2 scale-3 "${kube_flags[@]:?}" ### Expose replica set as service - kubectl create -f hack/testdata/frontend-replicaset.yaml "${kube_flags[@]}" + kubectl create -f hack/testdata/frontend-replicaset.yaml "${kube_flags[@]:?}" # Pre-condition: 3 replicas - kube::test::get_object_assert 'rs frontend' "{{$rs_replicas_field}}" '3' + kube::test::get_object_assert 'rs frontend' "{{${rs_replicas_field:?}}}" '3' # Command - kubectl expose rs frontend --port=80 "${kube_flags[@]}" + kubectl expose rs frontend --port=80 "${kube_flags[@]:?}" # Post-condition: service exists and the port is unnamed - kube::test::get_object_assert 'service frontend' "{{$port_name}} {{$port_field}}" ' 80' + kube::test::get_object_assert 'service frontend' "{{${port_name:?}}} {{${port_field:?}}}" ' 80' # Create a service using service/v1 generator - kubectl expose rs frontend --port=80 --name=frontend-2 --generator=service/v1 "${kube_flags[@]}" + kubectl expose rs frontend --port=80 --name=frontend-2 --generator=service/v1 "${kube_flags[@]:?}" # Post-condition: service exists and the port is named default. - kube::test::get_object_assert 'service frontend-2' "{{$port_name}} {{$port_field}}" 'default 80' + kube::test::get_object_assert 'service frontend-2' "{{${port_name:?}}} {{${port_field:?}}}" 'default 80' # Cleanup services - kubectl delete service frontend{,-2} "${kube_flags[@]}" + kubectl delete service frontend{,-2} "${kube_flags[@]:?}" # Test set commands # Pre-condition: frontend replica set exists at generation 1 - kube::test::get_object_assert 'rs frontend' "{{${generation_field}}}" '1' - kubectl set image rs/frontend "${kube_flags[@]}" *=k8s.gcr.io/pause:test-cmd - kube::test::get_object_assert 'rs frontend' "{{${generation_field}}}" '2' - kubectl set env rs/frontend "${kube_flags[@]}" foo=bar - kube::test::get_object_assert 'rs frontend' "{{${generation_field}}}" '3' - kubectl set resources rs/frontend "${kube_flags[@]}" --limits=cpu=200m,memory=512Mi - kube::test::get_object_assert 'rs frontend' "{{${generation_field}}}" '4' + kube::test::get_object_assert 'rs frontend' "{{${generation_field:?}}}" '1' + kubectl set image rs/frontend "${kube_flags[@]:?}" "*=k8s.gcr.io/pause:test-cmd" + kube::test::get_object_assert 'rs frontend' "{{${generation_field:?}}}" '2' + kubectl set env rs/frontend "${kube_flags[@]:?}" foo=bar + kube::test::get_object_assert 'rs frontend' "{{${generation_field:?}}}" '3' + kubectl set resources rs/frontend "${kube_flags[@]:?}" --limits=cpu=200m,memory=512Mi + kube::test::get_object_assert 'rs frontend' "{{${generation_field:?}}}" '4' ### Delete replica set with id # Pre-condition: frontend replica set exists - kube::test::get_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" 'frontend:' + kube::test::get_object_assert rs "{{range.items}}{{${id_field:?}}}:{{end}}" 'frontend:' # Command - kubectl delete rs frontend "${kube_flags[@]}" + kubectl delete rs frontend "${kube_flags[@]:?}" # Post-condition: no replica set exists - kube::test::get_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" '' + kube::test::get_object_assert rs "{{range.items}}{{${id_field:?}}}:{{end}}" '' ### Create two replica sets # Pre-condition: no replica set exists - kube::test::get_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" '' + kube::test::get_object_assert rs "{{range.items}}{{${id_field:?}}}:{{end}}" '' # Command - kubectl create -f hack/testdata/frontend-replicaset.yaml "${kube_flags[@]}" - kubectl create -f hack/testdata/redis-slave-replicaset.yaml "${kube_flags[@]}" + kubectl create -f hack/testdata/frontend-replicaset.yaml "${kube_flags[@]:?}" + kubectl create -f hack/testdata/redis-slave-replicaset.yaml "${kube_flags[@]:?}" # Post-condition: frontend and redis-slave - kube::test::get_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" 'frontend:redis-slave:' + kube::test::get_object_assert rs "{{range.items}}{{${id_field:?}}}:{{end}}" 'frontend:redis-slave:' ### Delete multiple replica sets at once # Pre-condition: frontend and redis-slave - kube::test::get_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" 'frontend:redis-slave:' + kube::test::get_object_assert rs "{{range.items}}{{${id_field:?}}}:{{end}}" 'frontend:redis-slave:' # Command - kubectl delete rs frontend redis-slave "${kube_flags[@]}" # delete multiple replica sets at once + kubectl delete rs frontend redis-slave "${kube_flags[@]:?}" # delete multiple replica sets at once # Post-condition: no replica set exists - kube::test::get_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" '' + kube::test::get_object_assert rs "{{range.items}}{{${id_field:?}}}:{{end}}" '' - if kube::test::if_supports_resource "${horizontalpodautoscalers}" ; then + if kube::test::if_supports_resource "${horizontalpodautoscalers:?}" ; then ### Auto scale replica set # Pre-condition: no replica set exists - kube::test::get_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" '' + kube::test::get_object_assert rs "{{range.items}}{{${id_field:?}}}:{{end}}" '' # Command - kubectl create -f hack/testdata/frontend-replicaset.yaml "${kube_flags[@]}" - kube::test::get_object_assert rs "{{range.items}}{{$id_field}}:{{end}}" 'frontend:' + kubectl create -f hack/testdata/frontend-replicaset.yaml "${kube_flags[@]:?}" + kube::test::get_object_assert rs "{{range.items}}{{${id_field:?}}}:{{end}}" 'frontend:' # autoscale 1~2 pods, CPU utilization 70%, replica set specified by file - kubectl autoscale -f hack/testdata/frontend-replicaset.yaml "${kube_flags[@]}" --max=2 --cpu-percent=70 - kube::test::get_object_assert 'hpa frontend' "{{$hpa_min_field}} {{$hpa_max_field}} {{$hpa_cpu_field}}" '1 2 70' - kubectl delete hpa frontend "${kube_flags[@]}" + kubectl autoscale -f hack/testdata/frontend-replicaset.yaml "${kube_flags[@]:?}" --max=2 --cpu-percent=70 + kube::test::get_object_assert 'hpa frontend' "{{${hpa_min_field:?}}} {{${hpa_max_field:?}}} {{${hpa_cpu_field:?}}}" '1 2 70' + kubectl delete hpa frontend "${kube_flags[@]:?}" # autoscale 2~3 pods, no CPU utilization specified, replica set specified by name - kubectl autoscale rs frontend "${kube_flags[@]}" --min=2 --max=3 - kube::test::get_object_assert 'hpa frontend' "{{$hpa_min_field}} {{$hpa_max_field}} {{$hpa_cpu_field}}" '2 3 80' - kubectl delete hpa frontend "${kube_flags[@]}" + kubectl autoscale rs frontend "${kube_flags[@]:?}" --min=2 --max=3 + kube::test::get_object_assert 'hpa frontend' "{{${hpa_min_field:?}}} {{${hpa_max_field:?}}} {{${hpa_cpu_field:?}}}" '2 3 80' + kubectl delete hpa frontend "${kube_flags[@]:?}" # autoscale without specifying --max should fail - ! kubectl autoscale rs frontend "${kube_flags[@]}" + ! kubectl autoscale rs frontend "${kube_flags[@]:?}" # Clean up - kubectl delete rs frontend "${kube_flags[@]}" + kubectl delete rs frontend "${kube_flags[@]:?}" fi set +o nounset diff --git a/test/cmd/authorization.sh b/test/cmd/authorization.sh index 895ccd2a55e..e271fb1a546 100755 --- a/test/cmd/authorization.sh +++ b/test/cmd/authorization.sh @@ -59,21 +59,21 @@ run_impersonation_tests() { kube::log::status "Testing impersonation" - output_message=$(! kubectl get pods "${kube_flags_with_token[@]}" --as-group=foo 2>&1) + output_message=$(! kubectl get pods "${kube_flags_with_token[@]:?}" --as-group=foo 2>&1) kube::test::if_has_string "${output_message}" 'without impersonating a user' - if kube::test::if_supports_resource "${csr}" ; then + if kube::test::if_supports_resource "${csr:?}" ; then # --as - kubectl create -f hack/testdata/csr.yml "${kube_flags_with_token[@]}" --as=user1 + kubectl create -f hack/testdata/csr.yml "${kube_flags_with_token[@]:?}" --as=user1 kube::test::get_object_assert 'csr/foo' '{{.spec.username}}' 'user1' kube::test::get_object_assert 'csr/foo' '{{range .spec.groups}}{{.}}{{end}}' 'system:authenticated' - kubectl delete -f hack/testdata/csr.yml "${kube_flags_with_token[@]}" + kubectl delete -f hack/testdata/csr.yml "${kube_flags_with_token[@]:?}" # --as-group - kubectl create -f hack/testdata/csr.yml "${kube_flags_with_token[@]}" --as=user1 --as-group=group2 --as-group=group1 --as-group=,,,chameleon + kubectl create -f hack/testdata/csr.yml "${kube_flags_with_token[@]:?}" --as=user1 --as-group=group2 --as-group=group1 --as-group=,,,chameleon kube::test::get_object_assert 'csr/foo' '{{len .spec.groups}}' '3' kube::test::get_object_assert 'csr/foo' '{{range .spec.groups}}{{.}} {{end}}' 'group2 group1 ,,,chameleon ' - kubectl delete -f hack/testdata/csr.yml "${kube_flags_with_token[@]}" + kubectl delete -f hack/testdata/csr.yml "${kube_flags_with_token[@]:?}" fi set +o nounset