Fix: Restricted profile comply with PSS (#117543)

* restricted profile comply with PSA v1.27

* add test case

* Reflect review comments

* Reflect review comments 2

* Reflect review comments 3
This commit is contained in:
Keita Mochizuki
2023-05-24 20:16:49 +09:00
committed by GitHub
parent b2522655b3
commit 0813904404
7 changed files with 263 additions and 1 deletions

View File

@@ -0,0 +1,23 @@
apiVersion: v1
kind: Pod
metadata:
labels:
run: target
name: target
spec:
securityContext:
seccompProfile:
type: Localhost
localhostProfile: dummy.json
containers:
- image: busybox
name: target
command: ["/bin/sh", "-c", "sleep 100"]
securityContext:
runAsUser: 1000
runAsGroup: 1000
runAsNonRoot: true
allowPrivilegeEscalation: false
capabilities:
drop:
- "ALL"

View File

@@ -0,0 +1,22 @@
apiVersion: v1
kind: Pod
metadata:
labels:
run: target
name: target
spec:
securityContext:
seccompProfile:
type: RuntimeDefault
containers:
- image: busybox
name: target
command: ["/bin/sh", "-c", "sleep 100"]
securityContext:
runAsUser: 1000
runAsGroup: 1000
runAsNonRoot: true
allowPrivilegeEscalation: false
capabilities:
drop:
- "ALL"

View File

@@ -289,6 +289,8 @@ func TestGenerateDebugContainer(t *testing.T) {
Capabilities: &corev1.Capabilities{
Drop: []corev1.Capability{"ALL"},
},
AllowPrivilegeEscalation: pointer.Bool(false),
SeccompProfile: &corev1.SeccompProfile{Type: "RuntimeDefault"},
},
},
},
@@ -1274,10 +1276,12 @@ func TestGeneratePodCopyWithDebugContainer(t *testing.T) {
Image: "busybox",
ImagePullPolicy: corev1.PullIfNotPresent,
SecurityContext: &corev1.SecurityContext{
RunAsNonRoot: pointer.Bool(true),
Capabilities: &corev1.Capabilities{
Drop: []corev1.Capability{"ALL"},
},
RunAsNonRoot: pointer.Bool(true),
AllowPrivilegeEscalation: pointer.Bool(false),
SeccompProfile: &corev1.SeccompProfile{Type: "RuntimeDefault"},
},
},
},
@@ -1646,6 +1650,8 @@ func TestGenerateNodeDebugPod(t *testing.T) {
Capabilities: &corev1.Capabilities{
Drop: []corev1.Capability{"ALL"},
},
AllowPrivilegeEscalation: pointer.Bool(false),
SeccompProfile: &corev1.SeccompProfile{Type: "RuntimeDefault"},
},
},
},

View File

@@ -176,6 +176,8 @@ func (p *restrictedProfile) Apply(pod *corev1.Pod, containerName string, target
clearSecurityContext(pod, containerName)
disallowRoot(pod, containerName)
dropCapabilities(pod, containerName)
disallowPrivilegeEscalation(pod, containerName)
setSeccompProfile(pod, containerName)
switch style {
case podCopy:
@@ -343,3 +345,31 @@ func addCapability(c *corev1.Container, capability corev1.Capability) {
}
c.SecurityContext.Capabilities.Add = append(c.SecurityContext.Capabilities.Add, capability)
}
// disallowPrivilegeEscalation configures the containers not allowed PrivilegeEscalation
func disallowPrivilegeEscalation(p *corev1.Pod, containerName string) {
podutils.VisitContainers(&p.Spec, podutils.AllContainers, func(c *corev1.Container, _ podutils.ContainerType) bool {
if c.Name != containerName {
return true
}
if c.SecurityContext == nil {
c.SecurityContext = &corev1.SecurityContext{}
}
c.SecurityContext.AllowPrivilegeEscalation = pointer.Bool(false)
return false
})
}
// setSeccompProfile apply SeccompProfile to the containers
func setSeccompProfile(p *corev1.Pod, containerName string) {
podutils.VisitContainers(&p.Spec, podutils.AllContainers, func(c *corev1.Container, _ podutils.ContainerType) bool {
if c.Name != containerName {
return true
}
if c.SecurityContext == nil {
c.SecurityContext = &corev1.SecurityContext{}
}
c.SecurityContext.SeccompProfile = &corev1.SeccompProfile{Type: "RuntimeDefault"}
return false
})
}

View File

@@ -347,6 +347,8 @@ func TestRestrictedProfile(t *testing.T) {
Capabilities: &corev1.Capabilities{
Drop: []corev1.Capability{"ALL"},
},
AllowPrivilegeEscalation: pointer.Bool(false),
SeccompProfile: &corev1.SeccompProfile{Type: "RuntimeDefault"},
},
},
},
@@ -386,6 +388,8 @@ func TestRestrictedProfile(t *testing.T) {
Capabilities: &corev1.Capabilities{
Drop: []corev1.Capability{"ALL"},
},
AllowPrivilegeEscalation: pointer.Bool(false),
SeccompProfile: &corev1.SeccompProfile{Type: "RuntimeDefault"},
},
},
},
@@ -404,6 +408,8 @@ func TestRestrictedProfile(t *testing.T) {
Capabilities: &corev1.Capabilities{
Add: []corev1.Capability{"ALL"},
},
AllowPrivilegeEscalation: pointer.Bool(false),
SeccompProfile: &corev1.SeccompProfile{Type: "RuntimeDefault"},
},
},
},
@@ -423,6 +429,8 @@ func TestRestrictedProfile(t *testing.T) {
Capabilities: &corev1.Capabilities{
Drop: []corev1.Capability{"ALL"},
},
AllowPrivilegeEscalation: pointer.Bool(false),
SeccompProfile: &corev1.SeccompProfile{Type: "RuntimeDefault"},
},
},
},

View File

@@ -280,3 +280,174 @@ run_kubectl_debug_baseline_node_tests() {
set +o nounset
set +o errexit
}
run_kubectl_debug_restricted_tests() {
set -o nounset
set -o errexit
create_and_use_new_namespace
kube::log::status "Testing kubectl debug profile restricted"
### Pod Troubleshooting by ephemeral containers with restricted profile
# Pre-Condition: Pod "nginx" is created
kubectl run target "--image=${IMAGE_NGINX:?}" "${kube_flags[@]:?}"
kube::test::get_object_assert pod "{{range.items}}{{${id_field:?}}}:{{end}}" 'target:'
# Restricted profile just works in not restricted namespace
# Command: add a new debug container with restricted profile
output_message=$(kubectl debug target -it --image=busybox --attach=false -c debug-container --profile=restricted "${kube_flags[@]:?}")
kube::test::if_has_not_string "${output_message}" 'forbidden: violates PodSecurity'
# Post-Conditions
kube::test::get_object_assert pod/target '{{range.spec.ephemeralContainers}}{{.name}}:{{end}}' 'debug-container:'
# Clean up
kubectl delete pod target "${kube_flags[@]:?}"
# Pre-Condition: Pod "nginx" is created
kubectl run target "--image=${IMAGE_NGINX:?}" "${kube_flags[@]:?}"
kube::test::get_object_assert pod "{{range.items}}{{${id_field:?}}}:{{end}}" 'target:'
# Restricted profile just works in not restricted namespace
# Command: create a copy of target with a new debug container
kubectl debug target -it --copy-to=target-copy --image=busybox --container=debug-container --attach=false --profile=restricted "${kube_flags[@]:?}"
# Post-Conditions
kube::test::get_object_assert pod "{{range.items}}{{${id_field:?}}}:{{end}}" 'target:target-copy:'
kube::test::get_object_assert pod/target-copy '{{range.spec.containers}}{{.name}}:{{end}}' 'target:debug-container:'
kube::test::get_object_assert pod/target-copy '{{range.spec.containers}}{{.image}}:{{end}}' "${IMAGE_NGINX:?}:busybox:"
# Clean up
kubectl delete pod target target-copy "${kube_flags[@]:?}"
ns_name="namespace-restricted"
# Command: create namespace and add a label
kubectl create namespace "${ns_name}"
kubectl label namespace "${ns_name}" pod-security.kubernetes.io/enforce=restricted
output_message=$(kubectl get namespaces "${ns_name}" --show-labels)
kube::test::if_has_string "${output_message}" 'pod-security.kubernetes.io/enforce=restricted'
# Pre-Condition: Pod "busybox" is created that complies with the restricted policy
kubectl create -f hack/testdata/pod-restricted-runtime-default.yaml -n "${ns_name}"
kube::test::get_object_assert "pod -n ${ns_name}" "{{range.items}}{{${id_field:?}}}:{{end}}" 'target:'
# Restricted profile works when pod's seccompProfile is RuntimeDefault
# Command: add a new debug container with restricted profile
output_message=$(kubectl debug target -it --image=busybox --attach=false -c debug-container --profile=restricted -n "${ns_name}" "${kube_flags[@]:?}")
kube::test::if_has_not_string "${output_message}" 'forbidden: violates PodSecurity'
# Post-Conditions
kube::test::get_object_assert "pod/target -n ${ns_name}" '{{range.spec.ephemeralContainers}}{{.name}}:{{end}}' 'debug-container:'
# Clean up
kubectl delete pod target -n "${ns_name}" "${kube_flags[@]:?}"
# Pre-Condition: Pod "nginx" is created
kubectl create -f hack/testdata/pod-restricted-runtime-default.yaml -n "${ns_name}"
kube::test::get_object_assert "pod -n ${ns_name}" "{{range.items}}{{${id_field:?}}}:{{end}}" 'target:'
# Restricted profile works when pod's seccompProfile is RuntimeDefault
# Command: create a copy of target with a new debug container
kubectl debug target -it --copy-to=target-copy --image=busybox --container=debug-container --attach=false --profile=restricted -n ${ns_name} "${kube_flags[@]:?}"
# Post-Conditions
kube::test::get_object_assert "pod -n ${ns_name}" "{{range.items}}{{${id_field:?}}}:{{end}}" 'target:target-copy:'
kube::test::get_object_assert "pod/target-copy -n ${ns_name}" '{{range.spec.containers}}{{.name}}:{{end}}' 'target:debug-container:'
kube::test::get_object_assert "pod/target-copy -n ${ns_name}" '{{range.spec.containers}}{{.image}}:{{end}}' "busybox:busybox:"
# Clean up
kubectl delete pod target target-copy -n "${ns_name}" "${kube_flags[@]:?}"
# Pre-Condition: Pod "busybox" is created that complies with the restricted policy
kubectl create -f hack/testdata/pod-restricted-localhost.yaml -n "${ns_name}"
kube::test::get_object_assert "pod -n ${ns_name}" "{{range.items}}{{${id_field:?}}}:{{end}}" 'target:'
# Restricted profile works when pod's seccompProfile is Localhost
# Command: add a new debug container with restricted profile
output_message=$(kubectl debug target -it --image=busybox --attach=false -c debug-container --profile=restricted -n ${ns_name} "${kube_flags[@]:?}")
kube::test::if_has_not_string "${output_message}" 'forbidden: violates PodSecurity'
# Post-Conditions
kube::test::get_object_assert "pod/target -n ${ns_name}" '{{range.spec.ephemeralContainers}}{{.name}}:{{end}}' 'debug-container:'
# Clean up
kubectl delete pod target -n ${ns_name} "${kube_flags[@]:?}"
# Pre-Condition: Pod "nginx" is created
kubectl create -f hack/testdata/pod-restricted-localhost.yaml -n "${ns_name}"
kube::test::get_object_assert "pod -n ${ns_name}" "{{range.items}}{{${id_field:?}}}:{{end}}" 'target:'
# Restricted profile works when pod's seccompProfile is Localhost
# Command: create a copy of target with a new debug container
kubectl debug target -it --copy-to=target-copy --image=busybox --container=debug-container --attach=false --profile=restricted -n ${ns_name} "${kube_flags[@]:?}"
# Post-Conditions
kube::test::get_object_assert "pod -n ${ns_name}" "{{range.items}}{{${id_field:?}}}:{{end}}" 'target:target-copy:'
kube::test::get_object_assert "pod/target-copy -n ${ns_name}" '{{range.spec.containers}}{{.name}}:{{end}}' 'target:debug-container:'
kube::test::get_object_assert "pod/target-copy -n ${ns_name}" '{{range.spec.containers}}{{.image}}:{{end}}' "busybox:busybox:"
# Clean up
kubectl delete pod target target-copy -n "${ns_name}" "${kube_flags[@]:?}"
# Clean up restricted namespace
kubectl delete namespace "${ns_name}"
set +o nounset
set +o errexit
}
run_kubectl_debug_restricted_node_tests() {
set -o nounset
set -o errexit
create_and_use_new_namespace
kube::log::status "Testing kubectl debug profile restricted (node)"
### Debug node with restrected profile
# Pre-Condition: node exists
kube::test::get_object_assert nodes "{{range.items}}{{${id_field:?}}}:{{end}}" '127.0.0.1:'
# Restricted profile just works in not restricted namespace
# Command: create a new node debugger pod
output_message=$(kubectl debug --profile restricted node/127.0.0.1 --image=busybox --attach=false "${kube_flags[@]:?}" -- true)
kube::test::if_has_not_string "${output_message}" 'forbidden: violates PodSecurity'
# Post-Conditions
kube::test::get_object_assert pod "{{(len .items)}}" '1'
debugger=$(kubectl get pod -o go-template="{{(index .items 0)${id_field:?}}}")
kube::test::if_has_string "${output_message:?}" "${debugger:?}"
kube::test::get_object_assert "pod/${debugger:?}" "{{${image_field:?}}}" 'busybox'
kube::test::get_object_assert "pod/${debugger:?}" '{{.spec.nodeName}}' '127.0.0.1'
kube::test::get_object_assert "pod/${debugger:?}" '{{.spec.hostIPC}}' '<no value>'
kube::test::get_object_assert "pod/${debugger:?}" '{{.spec.hostNetwork}}' '<no value>'
kube::test::get_object_assert "pod/${debugger:?}" '{{.spec.hostPID}}' '<no value>'
kube::test::get_object_assert "pod/${debugger:?}" '{{index .spec.containers 0 "securityContext" "allowPrivilegeEscalation"}}' 'false'
kube::test::get_object_assert "pod/${debugger:?}" '{{index .spec.containers 0 "securityContext" "capabilities" "drop"}}' '\[ALL\]'
kube::test::get_object_assert "pod/${debugger:?}" '{{if (index (index .spec.containers 0) "securityContext" "capabilities" "add") }}:{{end}}' ''
kube::test::get_object_assert "pod/${debugger:?}" '{{index .spec.containers 0 "securityContext" "runAsNonRoot"}}' 'true'
kube::test::get_object_assert "pod/${debugger:?}" '{{index .spec.containers 0 "securityContext" "seccompProfile" "type"}}' 'RuntimeDefault'
# Clean up
# pod.spec.nodeName is set by kubectl debug node which causes the delete to hang,
# presumably waiting for a kubelet that's not present. Force the delete.
kubectl delete --force pod "${debugger:?}" "${kube_flags[@]:?}"
ns_name="namespace-restricted"
# Command: create namespace and add a label
kubectl create namespace "${ns_name}"
kubectl label namespace "${ns_name}" pod-security.kubernetes.io/enforce=restricted
output_message=$(kubectl get namespaces "${ns_name}" --show-labels)
kube::test::if_has_string "${output_message}" 'pod-security.kubernetes.io/enforce=restricted'
# Pre-Condition: node exists
kube::test::get_object_assert nodes "{{range.items}}{{${id_field:?}}}:{{end}}" '127.0.0.1:'
# Restricted profile works in restricted namespace
# Command: create a new node debugger pod
output_message=$(kubectl debug --profile restricted node/127.0.0.1 --image=busybox --attach=false -n ${ns_name} "${kube_flags[@]:?}" -- true)
kube::test::if_has_not_string "${output_message}" 'forbidden: violates PodSecurity'
# Post-Conditions
kube::test::get_object_assert "pod -n ${ns_name}" "{{(len .items)}}" '1'
debugger=$(kubectl get pod -n ${ns_name} -o go-template="{{(index .items 0)${id_field:?}}}")
kube::test::if_has_string "${output_message:?}" "${debugger:?}"
kube::test::get_object_assert "pod/${debugger:?} -n ${ns_name}" "{{${image_field:?}}}" 'busybox'
kube::test::get_object_assert "pod/${debugger:?} -n ${ns_name}" '{{.spec.nodeName}}' '127.0.0.1'
kube::test::get_object_assert "pod/${debugger:?} -n ${ns_name}" '{{.spec.hostIPC}}' '<no value>'
kube::test::get_object_assert "pod/${debugger:?} -n ${ns_name}" '{{.spec.hostNetwork}}' '<no value>'
kube::test::get_object_assert "pod/${debugger:?} -n ${ns_name}" '{{.spec.hostPID}}' '<no value>'
kube::test::get_object_assert "pod/${debugger:?} -n ${ns_name}" '{{index .spec.containers 0 "securityContext" "allowPrivilegeEscalation"}}' 'false'
kube::test::get_object_assert "pod/${debugger:?} -n ${ns_name}" '{{index .spec.containers 0 "securityContext" "capabilities" "drop"}}' '\[ALL\]'
kube::test::get_object_assert "pod/${debugger:?} -n ${ns_name}" '{{if (index (index .spec.containers 0) "securityContext" "capabilities" "add") }}:{{end}}' ''
kube::test::get_object_assert "pod/${debugger:?} -n ${ns_name}" '{{index .spec.containers 0 "securityContext" "runAsNonRoot"}}' 'true'
kube::test::get_object_assert "pod/${debugger:?} -n ${ns_name}" '{{index .spec.containers 0 "securityContext" "seccompProfile" "type"}}' 'RuntimeDefault'
# Clean up
# pod.spec.nodeName is set by kubectl debug node which causes the delete to hang,
# presumably waiting for a kubelet that's not present. Force the delete.
kubectl delete --force pod "${debugger:?}" -n ${ns_name} "${kube_flags[@]:?}"
# Clean up restricted namespace
kubectl delete namespace "${ns_name}"
set +o nounset
set +o errexit
}

View File

@@ -1021,11 +1021,13 @@ runTests() {
record_command run_kubectl_debug_pod_tests
record_command run_kubectl_debug_general_tests
record_command run_kubectl_debug_baseline_tests
record_command run_kubectl_debug_restricted_tests
fi
if kube::test::if_supports_resource "${nodes}" ; then
record_command run_kubectl_debug_node_tests
record_command run_kubectl_debug_general_node_tests
record_command run_kubectl_debug_baseline_node_tests
record_command run_kubectl_debug_restricted_node_tests
fi
cleanup_tests