Merge pull request #10306 from microsoft/danmihai1/more-security-contexts

genpolicy: get UID from PodSecurityContext
This commit is contained in:
Fabiano Fidêncio 2024-09-18 21:33:39 +02:00 committed by GitHub
commit 593cbb8710
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
13 changed files with 118 additions and 32 deletions

View File

@ -147,4 +147,8 @@ impl yaml::K8sResource for DaemonSet {
.clone()
.or_else(|| Some(String::new()))
}
fn get_process_fields(&self, process: &mut policy::KataProcess) {
yaml::get_process_fields(process, &self.spec.template.spec.securityContext);
}
}

View File

@ -145,4 +145,8 @@ impl yaml::K8sResource for Deployment {
.clone()
.or_else(|| Some(String::new()))
}
fn get_process_fields(&self, process: &mut policy::KataProcess) {
yaml::get_process_fields(process, &self.spec.template.spec.securityContext);
}
}

View File

@ -110,4 +110,8 @@ impl yaml::K8sResource for Job {
}
false
}
fn get_process_fields(&self, process: &mut policy::KataProcess) {
yaml::get_process_fields(process, &self.spec.template.spec.securityContext);
}
}

View File

@ -94,7 +94,7 @@ pub struct PodSpec {
topologySpreadConstraints: Option<Vec<TopologySpreadConstraint>>,
#[serde(skip_serializing_if = "Option::is_none")]
securityContext: Option<PodSecurityContext>,
pub securityContext: Option<PodSecurityContext>,
#[serde(skip_serializing_if = "Option::is_none")]
priorityClassName: Option<String>,
@ -312,9 +312,9 @@ struct SeccompProfile {
/// See Reference / Kubernetes API / Workload Resources / Pod.
#[derive(Clone, Debug, Serialize, Deserialize)]
struct PodSecurityContext {
pub struct PodSecurityContext {
#[serde(skip_serializing_if = "Option::is_none")]
runAsUser: Option<i64>,
pub runAsUser: Option<i64>,
// TODO: additional fields.
}
@ -893,11 +893,7 @@ impl yaml::K8sResource for Pod {
}
fn get_process_fields(&self, process: &mut policy::KataProcess) {
if let Some(context) = &self.spec.securityContext {
if let Some(uid) = context.runAsUser {
process.User.UID = uid.try_into().unwrap();
}
}
yaml::get_process_fields(process, &self.spec.securityContext);
}
}

View File

@ -108,4 +108,8 @@ impl yaml::K8sResource for ReplicaSet {
}
false
}
fn get_process_fields(&self, process: &mut policy::KataProcess) {
yaml::get_process_fields(process, &self.spec.template.spec.securityContext);
}
}

View File

@ -110,4 +110,8 @@ impl yaml::K8sResource for ReplicationController {
}
false
}
fn get_process_fields(&self, process: &mut policy::KataProcess) {
yaml::get_process_fields(process, &self.spec.template.spec.securityContext);
}
}

View File

@ -192,6 +192,10 @@ impl yaml::K8sResource for StatefulSet {
.clone()
.or_else(|| Some(String::new()))
}
fn get_process_fields(&self, process: &mut policy::KataProcess) {
yaml::get_process_fields(process, &self.spec.template.spec.securityContext);
}
}
impl StatefulSet {

View File

@ -97,8 +97,8 @@ pub trait K8sResource {
}
fn get_process_fields(&self, _process: &mut policy::KataProcess) {
// Just Pods can have a PodSecurityContext field, so the other
// resources can use this default get_process_fields implementation.
// No need to implement support for securityContext or similar fields
// for some of the K8s resource types.
}
}
@ -378,3 +378,14 @@ fn handle_unused_field(path: &str, silent_unsupported_fields: bool) {
panic!("Unsupported field: {}", path);
}
}
pub fn get_process_fields(
process: &mut policy::KataProcess,
security_context: &Option<pod::PodSecurityContext>,
) {
if let Some(context) = security_context {
if let Some(uid) = context.runAsUser {
process.User.UID = uid.try_into().unwrap();
}
}
}

View File

@ -14,17 +14,24 @@ setup() {
get_pod_config_dir
deployment_name="policy-redis-deployment"
deployment_yaml="${pod_config_dir}/k8s-policy-deployment.yaml"
correct_deployment_yaml="${pod_config_dir}/k8s-policy-deployment.yaml"
# Add an appropriate policy to the correct YAML file.
policy_settings_dir="$(create_tmp_policy_settings_dir "${pod_config_dir}")"
add_requests_to_policy_settings "${policy_settings_dir}" "ReadStreamRequest"
auto_generate_policy "${policy_settings_dir}" "${deployment_yaml}"
# Save some time by executing genpolicy a single time.
if [ "${BATS_TEST_NUMBER}" == "1" ]; then
# Add an appropriate policy to the correct YAML file.
policy_settings_dir="$(create_tmp_policy_settings_dir "${pod_config_dir}")"
add_requests_to_policy_settings "${policy_settings_dir}" "ReadStreamRequest"
auto_generate_policy "${policy_settings_dir}" "${correct_deployment_yaml}"
fi
# Start each test case with a copy of the correct yaml file.
incorrect_deployment_yaml="${pod_config_dir}/k8s-policy-deployment-incorrect.yaml"
cp "${correct_deployment_yaml}" "${incorrect_deployment_yaml}"
}
@test "Successful deployment with auto-generated policy and container image volumes" {
# Initiate deployment
kubectl apply -f "${deployment_yaml}"
kubectl apply -f "${correct_deployment_yaml}"
# Wait for the deployment to be created
cmd="kubectl rollout status --timeout=1s deployment/${deployment_name} | grep 'successfully rolled out'"
@ -32,16 +39,41 @@ setup() {
waitForProcess "${wait_time}" "${sleep_time}" "${cmd}"
}
test_deployment_policy_error() {
# Initiate deployment
kubectl apply -f "${incorrect_deployment_yaml}"
# Wait for the deployment pod to fail
wait_for_blocked_request "CreateContainerRequest" "${deployment_name}"
}
@test "Policy failure: unexpected UID = 0" {
# Change the pod UID to 0 after the policy has been generated using a different
# runAsUser value. The policy would use UID = 0 by default, if there weren't
# a different runAsUser value in the YAML file.
yq -i \
'.spec.template.spec.securityContext.runAsUser = 0' \
"${incorrect_deployment_yaml}"
test_deployment_policy_error
}
teardown() {
auto_generate_policy_enabled || skip "Auto-generated policy tests are disabled."
# Debugging information
# Pod debugging information. Don't print the "Message:" line because it contains a truncated policy log.
info "Pod ${deployment_name}:"
kubectl describe pod "${deployment_name}" | grep -v "Message:"
# Deployment debugging information. The --watch=false argument makes "kubectl rollout status"
# return instead of waiting for a possibly failed deployment to complete.
info "Deployment ${deployment_name}:"
kubectl describe deployment "${deployment_name}"
kubectl rollout status deployment/${deployment_name}
kubectl rollout status deployment/${deployment_name} --watch=false
# Clean-up
kubectl delete deployment "${deployment_name}"
delete_tmp_policy_settings_dir "${policy_settings_dir}"
rm -f "${incorrect_deployment_yaml}"
}

View File

@ -131,6 +131,15 @@ test_job_policy_error() {
test_job_policy_error
}
@test "Policy failure: unexpected UID = 222" {
# Changing the job spec after generating its policy will cause CreateContainer to be denied.
yq -i \
'.spec.template.spec.securityContext.runAsUser = 222' \
"${incorrect_yaml}"
test_job_policy_error
}
teardown() {
auto_generate_policy_enabled || skip "Auto-generated policy tests are disabled."

View File

@ -61,17 +61,18 @@ test_rc_policy() {
--output=jsonpath='{.spec.replicas}')
[ "${number_of_replicas}" -gt 0 ]
# The replicas pods can be in running, waiting, succeeded or failed
# status. We need them all on running state before proceeding.
cmd="kubectl describe rc ${replication_name}"
cmd+=" | grep \"Pods Status\" | grep \"${number_of_replicas} Running\""
info "Waiting for: ${cmd}"
waitForProcess "$wait_time" "$sleep_time" "$cmd"
# Wait for all the expected pods to be created.
local count=0
local launched_pods=()
while [ $count -lt 6 ] && [ "${#launched_pods[@]}" -ne "${number_of_replicas}" ]; do
count=$((count + 1))
sleep 10
launched_pods=($(kubectl get pods "--selector=app=${app_name}" \
--output=jsonpath={.items..metadata.name}))
done
# Check that the number of pods created for the replication controller
# is equal to the number of replicas that we defined.
launched_pods=($(kubectl get pods "--selector=app=${app_name}" \
--output=jsonpath={.items..metadata.name}))
[ "${#launched_pods[@]}" -eq "${number_of_replicas}" ]
# Check pod creation
@ -110,13 +111,13 @@ test_rc_policy() {
@test "Policy failure: unexpected host device mapping" {
# Changing the template spec after generating its policy will cause CreateContainer to be denied.
yq -i \
'.spec.template.spec.containers[0].volumeMounts += [{"mountPath": "/dev/ttyS0", "name": "dev-ttys0"}]' \
"${incorrect_yaml}"
yq -i \
'.spec.template.spec.containers[0].volumeMounts += [{"mountPath": "/dev/ttyS0", "name": "dev-ttys0"}]' \
"${incorrect_yaml}"
yq -i \
'.spec.template.spec.volumes += [{"name": "dev-ttys0", "hostPath": {"path": "/dev/ttyS0"}}]' \
"${incorrect_yaml}"
yq -i \
'.spec.template.spec.volumes += [{"name": "dev-ttys0", "hostPath": {"path": "/dev/ttyS0"}}]' \
"${incorrect_yaml}"
test_rc_policy true
}
@ -139,6 +140,15 @@ test_rc_policy() {
test_rc_policy true
}
@test "Policy failure: unexpected UID = 1000" {
# Changing the template spec after generating its policy will cause CreateContainer to be denied.
yq -i \
'.spec.template.spec.securityContext.runAsUser = 1000' \
"${incorrect_yaml}"
test_rc_policy true
}
teardown() {
auto_generate_policy_enabled || skip "Auto-generated policy tests are disabled."

View File

@ -25,6 +25,8 @@ spec:
spec:
terminationGracePeriodSeconds: 0
runtimeClassName: kata
securityContext:
runAsUser: 1000
containers:
- name: master
image: quay.io/opstree/redis

View File

@ -17,6 +17,8 @@ spec:
labels:
app: policy-nginx-rc
spec:
securityContext:
runAsUser: 123
terminationGracePeriodSeconds: 0
runtimeClassName: kata
containers: