From e22f8ed553c0c2ed86fa22c5b281ddf13577b8b1 Mon Sep 17 00:00:00 2001 From: mochizuki875 Date: Tue, 13 Jun 2023 13:08:06 +0900 Subject: [PATCH] add CAP_NET_RAW to netadmin profile and remove privileged --- .../kubectl/pkg/cmd/debug/debug_test.go | 8 +- .../k8s.io/kubectl/pkg/cmd/debug/profiles.go | 23 ++---- .../kubectl/pkg/cmd/debug/profiles_test.go | 15 ++-- test/cmd/debug.sh | 80 ++++++++++++++++++- test/cmd/legacy-script.sh | 2 + 5 files changed, 95 insertions(+), 33 deletions(-) diff --git a/staging/src/k8s.io/kubectl/pkg/cmd/debug/debug_test.go b/staging/src/k8s.io/kubectl/pkg/cmd/debug/debug_test.go index 17a04803225..e3e72f2196f 100644 --- a/staging/src/k8s.io/kubectl/pkg/cmd/debug/debug_test.go +++ b/staging/src/k8s.io/kubectl/pkg/cmd/debug/debug_test.go @@ -310,7 +310,7 @@ func TestGenerateDebugContainer(t *testing.T) { TerminationMessagePolicy: corev1.TerminationMessageReadFile, SecurityContext: &corev1.SecurityContext{ Capabilities: &corev1.Capabilities{ - Add: []corev1.Capability{"NET_ADMIN"}, + Add: []corev1.Capability{"NET_ADMIN", "NET_RAW"}, }, }, }, @@ -1323,11 +1323,12 @@ func TestGeneratePodCopyWithDebugContainer(t *testing.T) { ImagePullPolicy: corev1.PullIfNotPresent, SecurityContext: &corev1.SecurityContext{ Capabilities: &corev1.Capabilities{ - Add: []corev1.Capability{"NET_ADMIN"}, + Add: []corev1.Capability{"NET_ADMIN", "NET_RAW"}, }, }, }, }, + ShareProcessNamespace: pointer.Bool(true), }, }, }, @@ -1694,9 +1695,8 @@ func TestGenerateNodeDebugPod(t *testing.T) { TerminationMessagePolicy: corev1.TerminationMessageReadFile, VolumeMounts: nil, SecurityContext: &corev1.SecurityContext{ - Privileged: pointer.Bool(true), Capabilities: &corev1.Capabilities{ - Add: []corev1.Capability{"NET_ADMIN"}, + Add: []corev1.Capability{"NET_ADMIN", "NET_RAW"}, }, }, }, diff --git a/staging/src/k8s.io/kubectl/pkg/cmd/debug/profiles.go b/staging/src/k8s.io/kubectl/pkg/cmd/debug/profiles.go index 603970fcb87..ee3383250bb 100644 --- a/staging/src/k8s.io/kubectl/pkg/cmd/debug/profiles.go +++ b/staging/src/k8s.io/kubectl/pkg/cmd/debug/profiles.go @@ -201,9 +201,11 @@ func (p *netadminProfile) Apply(pod *corev1.Pod, containerName string, target ru switch style { case node: useHostNamespaces(pod) - setPrivileged(pod, containerName) - case podCopy, ephemeral: + case podCopy: + shareProcessNamespace(pod) + + case ephemeral: // no additional modifications needed } @@ -269,20 +271,6 @@ func clearSecurityContext(p *corev1.Pod, containerName string) { }) } -// setPrivileged configures the containers as privileged. -func setPrivileged(p *corev1.Pod, containerName string) { - podutils.VisitContainers(&p.Spec, podutils.AllContainers, func(c *corev1.Container, _ podutils.ContainerType) bool { - if c.Name != containerName { - return true - } - if c.SecurityContext == nil { - c.SecurityContext = &corev1.SecurityContext{} - } - c.SecurityContext.Privileged = pointer.Bool(true) - return false - }) -} - // disallowRoot configures the container to run as a non-root user. func disallowRoot(p *corev1.Pod, containerName string) { podutils.VisitContainers(&p.Spec, podutils.AllContainers, func(c *corev1.Container, _ podutils.ContainerType) bool { @@ -326,13 +314,14 @@ func allowProcessTracing(p *corev1.Pod, containerName string) { }) } -// allowNetadminCapability grants NET_ADMIN capability to the container. +// allowNetadminCapability grants NET_ADMIN and NET_RAW capability to the container. func allowNetadminCapability(p *corev1.Pod, containerName string) { podutils.VisitContainers(&p.Spec, podutils.AllContainers, func(c *corev1.Container, _ podutils.ContainerType) bool { if c.Name != containerName { return true } addCapability(c, "NET_ADMIN") + addCapability(c, "NET_RAW") return false }) } diff --git a/staging/src/k8s.io/kubectl/pkg/cmd/debug/profiles_test.go b/staging/src/k8s.io/kubectl/pkg/cmd/debug/profiles_test.go index fa45f510553..7b44a9f3a73 100644 --- a/staging/src/k8s.io/kubectl/pkg/cmd/debug/profiles_test.go +++ b/staging/src/k8s.io/kubectl/pkg/cmd/debug/profiles_test.go @@ -495,7 +495,7 @@ func TestNetAdminProfile(t *testing.T) { Name: "dbg", Image: "dbgimage", SecurityContext: &corev1.SecurityContext{ Capabilities: &corev1.Capabilities{ - Add: []corev1.Capability{"NET_ADMIN"}, + Add: []corev1.Capability{"NET_ADMIN", "NET_RAW"}, }, }, }, @@ -526,6 +526,7 @@ func TestNetAdminProfile(t *testing.T) { expectPod: &corev1.Pod{ ObjectMeta: metav1.ObjectMeta{Name: "podcopy"}, Spec: corev1.PodSpec{ + ShareProcessNamespace: pointer.Bool(true), Containers: []corev1.Container{ {Name: "app", Image: "appimage"}, { @@ -533,7 +534,7 @@ func TestNetAdminProfile(t *testing.T) { Image: "dbgimage", SecurityContext: &corev1.SecurityContext{ Capabilities: &corev1.Capabilities{ - Add: []corev1.Capability{"NET_ADMIN"}, + Add: []corev1.Capability{"NET_ADMIN", "NET_RAW"}, }, }, }, @@ -572,6 +573,7 @@ func TestNetAdminProfile(t *testing.T) { expectPod: &corev1.Pod{ ObjectMeta: metav1.ObjectMeta{Name: "podcopy"}, Spec: corev1.PodSpec{ + ShareProcessNamespace: pointer.Bool(true), Containers: []corev1.Container{ {Name: "app", Image: "appimage"}, { @@ -579,7 +581,7 @@ func TestNetAdminProfile(t *testing.T) { Image: "dbgimage", SecurityContext: &corev1.SecurityContext{ Capabilities: &corev1.Capabilities{ - Add: []corev1.Capability{"SYS_PTRACE", "NET_ADMIN"}, + Add: []corev1.Capability{"SYS_PTRACE", "NET_ADMIN", "NET_RAW"}, }, }, }, @@ -610,9 +612,8 @@ func TestNetAdminProfile(t *testing.T) { Name: "dbg", Image: "dbgimage", SecurityContext: &corev1.SecurityContext{ - Privileged: pointer.BoolPtr(true), Capabilities: &corev1.Capabilities{ - Add: []corev1.Capability{"NET_ADMIN"}, + Add: []corev1.Capability{"NET_ADMIN", "NET_RAW"}, }, }, }, @@ -630,7 +631,6 @@ func TestNetAdminProfile(t *testing.T) { Name: "dbg", Image: "dbgimage", SecurityContext: &corev1.SecurityContext{ - Privileged: pointer.BoolPtr(true), Capabilities: &corev1.Capabilities{ Add: []corev1.Capability{"SYS_PTRACE"}, }, @@ -652,9 +652,8 @@ func TestNetAdminProfile(t *testing.T) { Name: "dbg", Image: "dbgimage", SecurityContext: &corev1.SecurityContext{ - Privileged: pointer.BoolPtr(true), Capabilities: &corev1.Capabilities{ - Add: []corev1.Capability{"SYS_PTRACE", "NET_ADMIN"}, + Add: []corev1.Capability{"SYS_PTRACE", "NET_ADMIN", "NET_RAW"}, }, }, }, diff --git a/test/cmd/debug.sh b/test/cmd/debug.sh index afa84b83a58..73cde0bfd6b 100755 --- a/test/cmd/debug.sh +++ b/test/cmd/debug.sh @@ -291,7 +291,6 @@ run_kubectl_debug_restricted_tests() { kube::log::status "Testing kubectl debug profile restricted" ### Pod Troubleshooting by ephemeral containers with restricted profile - # Pre-Condition: Pod "nginx" is created kubectl run target "--image=${IMAGE_NGINX:?}" "${kube_flags[@]:?}" kube::test::get_object_assert pod "{{range.items}}{{${id_field:?}}}:{{end}}" 'target:' @@ -304,6 +303,7 @@ run_kubectl_debug_restricted_tests() { # Clean up kubectl delete pod target "${kube_flags[@]:?}" + ### Pod Troubleshooting by pod copy with restricted profile # Pre-Condition: Pod "nginx" is created kubectl run target "--image=${IMAGE_NGINX:?}" "${kube_flags[@]:?}" kube::test::get_object_assert pod "{{range.items}}{{${id_field:?}}}:{{end}}" 'target:' @@ -324,6 +324,7 @@ run_kubectl_debug_restricted_tests() { output_message=$(kubectl get namespaces "${ns_name}" --show-labels) kube::test::if_has_string "${output_message}" 'pod-security.kubernetes.io/enforce=restricted' + ### Pod Troubleshooting by ephemeral containers with restricted profile (restricted namespace) # Pre-Condition: Pod "busybox" is created that complies with the restricted policy kubectl create -f hack/testdata/pod-restricted-runtime-default.yaml -n "${ns_name}" kube::test::get_object_assert "pod -n ${ns_name}" "{{range.items}}{{${id_field:?}}}:{{end}}" 'target:' @@ -336,6 +337,7 @@ run_kubectl_debug_restricted_tests() { # Clean up kubectl delete pod target -n "${ns_name}" "${kube_flags[@]:?}" + ### Pod Troubleshooting by pod copy with restricted profile (restricted namespace) # Pre-Condition: Pod "nginx" is created kubectl create -f hack/testdata/pod-restricted-runtime-default.yaml -n "${ns_name}" kube::test::get_object_assert "pod -n ${ns_name}" "{{range.items}}{{${id_field:?}}}:{{end}}" 'target:' @@ -349,6 +351,7 @@ run_kubectl_debug_restricted_tests() { # Clean up kubectl delete pod target target-copy -n "${ns_name}" "${kube_flags[@]:?}" + ### Pod Troubleshooting by ephemeral containers with restricted profile (restricted namespace) # Pre-Condition: Pod "busybox" is created that complies with the restricted policy kubectl create -f hack/testdata/pod-restricted-localhost.yaml -n "${ns_name}" kube::test::get_object_assert "pod -n ${ns_name}" "{{range.items}}{{${id_field:?}}}:{{end}}" 'target:' @@ -361,6 +364,7 @@ run_kubectl_debug_restricted_tests() { # Clean up kubectl delete pod target -n ${ns_name} "${kube_flags[@]:?}" + ### Pod Troubleshooting by pod copy with restricted profile (restricted namespace) # Pre-Condition: Pod "nginx" is created kubectl create -f hack/testdata/pod-restricted-localhost.yaml -n "${ns_name}" kube::test::get_object_assert "pod -n ${ns_name}" "{{range.items}}{{${id_field:?}}}:{{end}}" 'target:' @@ -388,8 +392,7 @@ run_kubectl_debug_restricted_node_tests() { create_and_use_new_namespace kube::log::status "Testing kubectl debug profile restricted (node)" - ### Debug node with restrected profile - + ### Debug node with restricted profile # Pre-Condition: node exists kube::test::get_object_assert nodes "{{range.items}}{{${id_field:?}}}:{{end}}" '127.0.0.1:' # Restricted profile just works in not restricted namespace @@ -422,6 +425,7 @@ run_kubectl_debug_restricted_node_tests() { output_message=$(kubectl get namespaces "${ns_name}" --show-labels) kube::test::if_has_string "${output_message}" 'pod-security.kubernetes.io/enforce=restricted' + ### Debug node with restricted profile (restricted namespace) # Pre-Condition: node exists kube::test::get_object_assert nodes "{{range.items}}{{${id_field:?}}}:{{end}}" '127.0.0.1:' # Restricted profile works in restricted namespace @@ -452,4 +456,72 @@ run_kubectl_debug_restricted_node_tests() { set +o nounset set +o errexit -} \ No newline at end of file +} + +run_kubectl_debug_netadmin_tests() { + set -o nounset + set -o errexit + + create_and_use_new_namespace + kube::log::status "Testing kubectl debug profile netadmin" + + ### Pod Troubleshooting by ephemeral containers with netadmin profile + # Pre-Condition: Pod "nginx" is created + kubectl run target "--image=${IMAGE_NGINX:?}" "${kube_flags[@]:?}" + kube::test::get_object_assert pod "{{range.items}}{{${id_field:?}}}:{{end}}" 'target:' + # Command: add a new debug container with netadmin profile + output_message=$(kubectl debug target -it --image=busybox --attach=false -c debug-container --profile=netadmin "${kube_flags[@]:?}") + # Post-Conditions + kube::test::get_object_assert pod/target '{{range.spec.ephemeralContainers}}{{.name}}:{{end}}' 'debug-container:' + kube::test::get_object_assert pod/target '{{(index (index .spec.ephemeralContainers 0).securityContext.capabilities.add)}}' '\[NET_ADMIN NET_RAW\]' + # Clean up + kubectl delete pod target "${kube_flags[@]:?}" + + ### Pod Troubleshooting by pod copy with netadmin profile + # Pre-Condition: Pod "nginx" is created + kubectl run target "--image=${IMAGE_NGINX:?}" "${kube_flags[@]:?}" + kube::test::get_object_assert pod "{{range.items}}{{${id_field:?}}}:{{end}}" 'target:' + # Command: create a copy of target with a new debug container + kubectl debug target -it --copy-to=target-copy --image=busybox --container=debug-container --attach=false --profile=netadmin "${kube_flags[@]:?}" + # Post-Conditions + kube::test::get_object_assert pod "{{range.items}}{{${id_field:?}}}:{{end}}" 'target:target-copy:' + kube::test::get_object_assert pod/target-copy '{{range.spec.containers}}{{.name}}:{{end}}' 'target:debug-container:' + kube::test::get_object_assert pod/target-copy '{{range.spec.containers}}{{.image}}:{{end}}' "${IMAGE_NGINX:?}:busybox:" + kube::test::get_object_assert pod/target-copy '{{.spec.shareProcessNamespace}}' 'true' + kube::test::get_object_assert pod/target-copy '{{(index (index .spec.containers 1).securityContext.capabilities.add)}}' '\[NET_ADMIN NET_RAW\]' + # Clean up + kubectl delete pod target target-copy "${kube_flags[@]:?}" + + set +o nounset + set +o errexit +} + +run_kubectl_debug_netadmin_node_tests() { + set -o nounset + set -o errexit + + create_and_use_new_namespace + kube::log::status "Testing kubectl debug profile netadmin (node)" + + ### Debug node with netadmin profile + # Pre-Condition: node exists + kube::test::get_object_assert nodes "{{range.items}}{{${id_field:?}}}:{{end}}" '127.0.0.1:' + # Command: create a new node debugger pod + output_message=$(kubectl debug --profile netadmin node/127.0.0.1 --image=busybox --attach=false "${kube_flags[@]:?}" -- true) + # Post-Conditions + kube::test::get_object_assert pod "{{(len .items)}}" '1' + debugger=$(kubectl get pod -o go-template="{{(index .items 0)${id_field:?}}}") + kube::test::if_has_string "${output_message:?}" "${debugger:?}" + kube::test::get_object_assert "pod/${debugger:?}" "{{${image_field:?}}}" 'busybox' + kube::test::get_object_assert "pod/${debugger:?}" '{{.spec.nodeName}}' '127.0.0.1' + kube::test::get_object_assert "pod/${debugger:?}" '{{.spec.hostNetwork}}' 'true' + kube::test::get_object_assert "pod/${debugger:?}" '{{.spec.hostPID}}' 'true' + kube::test::get_object_assert "pod/${debugger:?}" '{{index .spec.containers 0 "securityContext" "capabilities" "add"}}' '\[NET_ADMIN NET_RAW\]' + # Clean up + # pod.spec.nodeName is set by kubectl debug node which causes the delete to hang, + # presumably waiting for a kubelet that's not present. Force the delete. + kubectl delete --force pod "${debugger:?}" "${kube_flags[@]:?}" + + set +o nounset + set +o errexit +} diff --git a/test/cmd/legacy-script.sh b/test/cmd/legacy-script.sh index d3e0220be25..0987eab0e02 100755 --- a/test/cmd/legacy-script.sh +++ b/test/cmd/legacy-script.sh @@ -1029,12 +1029,14 @@ runTests() { record_command run_kubectl_debug_general_tests record_command run_kubectl_debug_baseline_tests record_command run_kubectl_debug_restricted_tests + record_command run_kubectl_debug_netadmin_tests fi if kube::test::if_supports_resource "${nodes}" ; then record_command run_kubectl_debug_node_tests record_command run_kubectl_debug_general_node_tests record_command run_kubectl_debug_baseline_node_tests record_command run_kubectl_debug_restricted_node_tests + record_command run_kubectl_debug_netadmin_node_tests fi cleanup_tests