From 177905302e90aac854d40dc7f4102fe9fcb7391c Mon Sep 17 00:00:00 2001 From: Shang Ding Date: Thu, 9 Feb 2023 20:32:35 -0600 Subject: [PATCH] add integration tests for debug profiles general & baseline --- test/cmd/debug.sh | 160 +++++++++++++++++++++++++++++++++++++- test/cmd/legacy-script.sh | 4 + 2 files changed, 162 insertions(+), 2 deletions(-) diff --git a/test/cmd/debug.sh b/test/cmd/debug.sh index 5efbdc8efdc..f53b17a0b9d 100755 --- a/test/cmd/debug.sh +++ b/test/cmd/debug.sh @@ -50,7 +50,7 @@ run_kubectl_debug_pod_tests() { kube::test::get_object_assert pod/target-copy '{{range.spec.containers}}{{.image}}:{{end}}' "${IMAGE_NGINX:?}:busybox:" # Clean up kubectl delete pod target target-copy "${kube_flags[@]:?}" - + # Pre-Condition: Pod "nginx" is created kubectl run target "--image=${IMAGE_NGINX:?}" "${kube_flags[@]:?}" kube::test::get_object_assert pod "{{range.items}}{{${id_field:?}}}:{{end}}" 'target:' @@ -120,7 +120,163 @@ run_kubectl_debug_node_tests() { # pod.spec.nodeName is set by kubectl debug node which causes the delete to hang, # presumably waiting for a kubelet that's not present. Force the delete. kubectl delete --force pod "${debugger:?}" "${kube_flags[@]:?}" - + + set +o nounset + set +o errexit +} + +run_kubectl_debug_general_tests() { + set -o nounset + set -o errexit + + create_and_use_new_namespace + kube::log::status "Testing kubectl debug profile general" + + ### Debug by pod copy + ### probes are removed, sets SYS_PTRACE in debugging container, sets shareProcessNamespace + + # Pre-Condition: Pod "nginx" is created + kubectl run target "--image=${IMAGE_NGINX:?}" "${kube_flags[@]:?}" + kube::test::get_object_assert pod "{{range.items}}{{${id_field:?}}}:{{end}}" 'target:' + # Command: create a copy of target with a new debug container + kubectl debug --profile general target -it --copy-to=target-copy --image=busybox --container=debug-container --attach=false "${kube_flags[@]:?}" + # Post-Conditions + kube::test::get_object_assert pod "{{range.items}}{{${id_field:?}}}:{{end}}" 'target:target-copy:' + kube::test::get_object_assert pod/target-copy '{{range.spec.containers}}{{.name}}:{{end}}' 'target:debug-container:' + kube::test::get_object_assert pod/target-copy '{{range.spec.containers}}{{.image}}:{{end}}' "${IMAGE_NGINX:?}:busybox:" + kube::test::get_object_assert pod/target-copy '{{range.spec.containers}}{{if (index . "livenessProbe")}}:{{end}}{{end}}' '' + kube::test::get_object_assert pod/target-copy '{{range.spec.containers}}{{if (index . "readinessProbe")}}:{{end}}{{end}}' '' + kube::test::get_object_assert pod/target-copy '{{(index (index .spec.containers 1).securityContext.capabilities.add 0)}}' 'SYS_PTRACE' + kube::test::get_object_assert pod/target-copy '{{.spec.shareProcessNamespace}}' 'true' + # Clean up + kubectl delete pod target target-copy "${kube_flags[@]:?}" + + ### Debug by EC + ### sets SYS_PTRACE in ephemeral container + + # Pre-Condition: Pod "nginx" is created + kubectl run target "--image=${IMAGE_NGINX:?}" "${kube_flags[@]:?}" + kube::test::get_object_assert pod "{{range.items}}{{${id_field:?}}}:{{end}}" 'target:' + # Command: create a copy of target with a new debug container + kubectl debug --profile general target -it --image=busybox --container=debug-container --attach=false "${kube_flags[@]:?}" + # Post-Conditions + kube::test::get_object_assert pod/target '{{range.spec.ephemeralContainers}}{{.name}}:{{.image}}{{end}}' 'debug-container:busybox' + kube::test::get_object_assert pod/target '{{(index (index .spec.ephemeralContainers 0).securityContext.capabilities.add 0)}}' 'SYS_PTRACE' + # Clean up + kubectl delete pod target "${kube_flags[@]:?}" + + set +o nounset + set +o errexit +} + +run_kubectl_debug_general_node_tests() { + set -o nounset + set -o errexit + + create_and_use_new_namespace + kube::log::status "Testing kubectl debug profile general (node)" + + ### Debug node + ### empty securityContext, uses host namespaces, mounts root partition + + # Pre-Condition: node exists + kube::test::get_object_assert nodes "{{range.items}}{{${id_field:?}}}:{{end}}" '127.0.0.1:' + # Command: create a new node debugger pod + output_message=$(kubectl debug --profile general node/127.0.0.1 --image=busybox --attach=false "${kube_flags[@]:?}" -- true) + # Post-Conditions + kube::test::get_object_assert pod "{{(len .items)}}" '1' + debugger=$(kubectl get pod -o go-template="{{(index .items 0)${id_field:?}}}") + kube::test::if_has_string "${output_message:?}" "${debugger:?}" + kube::test::get_object_assert "pod/${debugger:?}" "{{${image_field:?}}}" 'busybox' + kube::test::get_object_assert "pod/${debugger:?}" '{{.spec.nodeName}}' '127.0.0.1' + kube::test::get_object_assert "pod/${debugger:?}" '{{.spec.hostIPC}}' 'true' + kube::test::get_object_assert "pod/${debugger:?}" '{{.spec.hostNetwork}}' 'true' + kube::test::get_object_assert "pod/${debugger:?}" '{{.spec.hostPID}}' 'true' + kube::test::get_object_assert "pod/${debugger:?}" '{{(index (index .spec.containers 0).volumeMounts 0).mountPath}}' '/host' + kube::test::get_object_assert "pod/${debugger:?}" '{{(index .spec.volumes 0).hostPath.path}}' '/' + kube::test::get_object_assert "pod/${debugger:?}" '{{if (index (index .spec.containers 0) "securityContext")}}:{{end}}' '' + # Clean up + # pod.spec.nodeName is set by kubectl debug node which causes the delete to hang, + # presumably waiting for a kubelet that's not present. Force the delete. + kubectl delete --force pod "${debugger:?}" "${kube_flags[@]:?}" + + set +o nounset + set +o errexit +} + +run_kubectl_debug_baseline_tests() { + set -o nounset + set -o errexit + + create_and_use_new_namespace + kube::log::status "Testing kubectl debug profile baseline" + + ### Debug by pod copy + ### probes are removed, empty securityContext, sets shareProcessNamespace + + # Pre-Condition: Pod "nginx" is created + kubectl run target "--image=${IMAGE_NGINX:?}" "${kube_flags[@]:?}" + kube::test::get_object_assert pod "{{range.items}}{{${id_field:?}}}:{{end}}" 'target:' + # Command: create a copy of target with a new debug container + kubectl debug --profile baseline target -it --copy-to=target-copy --image=busybox --container=debug-container --attach=false "${kube_flags[@]:?}" + # Post-Conditions + kube::test::get_object_assert pod "{{range.items}}{{${id_field:?}}}:{{end}}" 'target:target-copy:' + kube::test::get_object_assert pod/target-copy '{{range.spec.containers}}{{.name}}:{{end}}' 'target:debug-container:' + kube::test::get_object_assert pod/target-copy '{{range.spec.containers}}{{.image}}:{{end}}' "${IMAGE_NGINX:?}:busybox:" + kube::test::get_object_assert pod/target-copy '{{range.spec.containers}}{{if (index . "livenessProbe")}}:{{end}}{{end}}' '' + kube::test::get_object_assert pod/target-copy '{{range.spec.containers}}{{if (index . "readinessProbe")}}:{{end}}{{end}}' '' + kube::test::get_object_assert pod/target-copy '{{if (index (index .spec.containers 0) "securityContext")}}:{{end}}' '' + kube::test::get_object_assert pod/target-copy '{{.spec.shareProcessNamespace}}' 'true' + # Clean up + kubectl delete pod target target-copy "${kube_flags[@]:?}" + + ### Debug by EC + ### empty securityContext + + # Pre-Condition: Pod "nginx" is created + kubectl run target "--image=${IMAGE_NGINX:?}" "${kube_flags[@]:?}" + kube::test::get_object_assert pod "{{range.items}}{{${id_field:?}}}:{{end}}" 'target:' + # Command: create a copy of target with a new debug container + kubectl debug --profile baseline target -it --image=busybox --container=debug-container --attach=false "${kube_flags[@]:?}" + # Post-Conditions + kube::test::get_object_assert pod/target '{{range.spec.ephemeralContainers}}{{.name}}:{{.image}}{{end}}' 'debug-container:busybox' + kube::test::get_object_assert pod/target '{{if (index (index .spec.ephemeralContainers 0) "securityContext")}}:{{end}}' '' + # Clean up + kubectl delete pod target "${kube_flags[@]:?}" + + set +o nounset + set +o errexit +} + +run_kubectl_debug_baseline_node_tests() { + set -o nounset + set -o errexit + + create_and_use_new_namespace + kube::log::status "Testing kubectl debug profile baseline (node)" + + ### Debug node + ### empty securityContext, uses isolated namespaces + + # Pre-Condition: node exists + kube::test::get_object_assert nodes "{{range.items}}{{${id_field:?}}}:{{end}}" '127.0.0.1:' + # Command: create a new node debugger pod + output_message=$(kubectl debug --profile baseline node/127.0.0.1 --image=busybox --attach=false "${kube_flags[@]:?}" -- true) + # Post-Conditions + kube::test::get_object_assert pod "{{(len .items)}}" '1' + debugger=$(kubectl get pod -o go-template="{{(index .items 0)${id_field:?}}}") + kube::test::if_has_string "${output_message:?}" "${debugger:?}" + kube::test::get_object_assert "pod/${debugger:?}" "{{${image_field:?}}}" 'busybox' + kube::test::get_object_assert "pod/${debugger:?}" '{{.spec.nodeName}}' '127.0.0.1' + kube::test::get_object_assert "pod/${debugger:?}" '{{.spec.hostIPC}}' '' + kube::test::get_object_assert "pod/${debugger:?}" '{{.spec.hostNetwork}}' '' + kube::test::get_object_assert "pod/${debugger:?}" '{{.spec.hostPID}}' '' + kube::test::get_object_assert "pod/${debugger:?}" '{{if (index (index .spec.containers 0) "securityContext")}}:{{end}}' '' + # Clean up + # pod.spec.nodeName is set by kubectl debug node which causes the delete to hang, + # presumably waiting for a kubelet that's not present. Force the delete. + kubectl delete --force pod "${debugger:?}" "${kube_flags[@]:?}" + set +o nounset set +o errexit } diff --git a/test/cmd/legacy-script.sh b/test/cmd/legacy-script.sh index 7ac43358393..38d6e0a4c40 100755 --- a/test/cmd/legacy-script.sh +++ b/test/cmd/legacy-script.sh @@ -1011,9 +1011,13 @@ runTests() { #################### if kube::test::if_supports_resource "${pods}" ; then record_command run_kubectl_debug_pod_tests + record_command run_kubectl_debug_general_tests + record_command run_kubectl_debug_baseline_tests fi if kube::test::if_supports_resource "${nodes}" ; then record_command run_kubectl_debug_node_tests + record_command run_kubectl_debug_general_node_tests + record_command run_kubectl_debug_baseline_node_tests fi cleanup_tests