Merge pull request #10008 from microsoft/danmihai1/runAsUser

genpolicy: add support for runAsUser fields
This commit is contained in:
Dan Mihai 2024-07-15 12:08:50 -07:00 committed by GitHub
commit bcaf7fc3b4
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
8 changed files with 71 additions and 19 deletions

View File

@ -540,9 +540,7 @@ allow_user(p_process, i_process) {
p_user := p_process.User
i_user := i_process.User
# TODO: track down the reason for mcr.microsoft.com/oss/bitnami/redis:6.0.8 being
# executed with uid = 0 despite having "User": "1001" in its container image
# config.
# TODO: remove this workaround when fixing https://github.com/kata-containers/kata-containers/issues/9928.
#print("allow_user: input uid =", i_user.UID, "policy uid =", p_user.UID)
#p_user.UID == i_user.UID

View File

@ -32,7 +32,7 @@ pub fn get_process(privileged_container: bool, common: &policy::CommonData) -> p
Env: Vec::new(),
Cwd: "/".to_string(),
Capabilities: capabilities,
NoNewPrivileges: true,
NoNewPrivileges: false,
}
}

View File

@ -92,6 +92,9 @@ pub struct PodSpec {
#[serde(skip_serializing_if = "Option::is_none")]
topologySpreadConstraints: Option<Vec<TopologySpreadConstraint>>,
#[serde(skip_serializing_if = "Option::is_none")]
securityContext: Option<PodSecurityContext>,
}
/// See Reference / Kubernetes API / Workload Resources / Pod.
@ -231,7 +234,7 @@ struct Probe {
#[serde(skip_serializing_if = "Option::is_none")]
tcpSocket: Option<TCPSocketAction>,
// TODO: additional fiels.
// TODO: additional fields.
}
/// See Reference / Kubernetes API / Workload Resources / Pod.
@ -259,7 +262,7 @@ struct HTTPGetAction {
#[serde(skip_serializing_if = "Option::is_none")]
httpHeaders: Option<Vec<HTTPHeader>>,
// TODO: additional fiels.
// TODO: additional fields.
}
/// See Reference / Kubernetes API / Workload Resources / Pod.
@ -301,6 +304,14 @@ struct SeccompProfile {
localhostProfile: Option<String>,
}
/// See Reference / Kubernetes API / Workload Resources / Pod.
#[derive(Clone, Debug, Serialize, Deserialize)]
struct PodSecurityContext {
#[serde(skip_serializing_if = "Option::is_none")]
runAsUser: Option<i64>,
// TODO: additional fields.
}
/// See Reference / Kubernetes API / Workload Resources / Pod.
#[derive(Clone, Debug, Serialize, Deserialize)]
struct Lifecycle {
@ -316,7 +327,7 @@ struct Lifecycle {
struct LifecycleHandler {
#[serde(skip_serializing_if = "Option::is_none")]
exec: Option<ExecAction>,
// TODO: additional fiels.
// TODO: additional fields.
}
/// See Reference / Kubernetes API / Workload Resources / Pod.
@ -571,15 +582,6 @@ impl Container {
false
}
pub fn allow_privilege_escalation(&self) -> bool {
if let Some(context) = &self.securityContext {
if let Some(allow) = context.allowPrivilegeEscalation {
return allow;
}
}
true
}
pub fn read_only_root_filesystem(&self) -> bool {
if let Some(context) = &self.securityContext {
if let Some(read_only) = context.readOnlyRootFilesystem {
@ -811,6 +813,14 @@ impl yaml::K8sResource for Pod {
.clone()
.or_else(|| Some(String::new()))
}
fn get_process_fields(&self, process: &mut policy::KataProcess) {
if let Some(context) = &self.spec.securityContext {
if let Some(uid) = context.runAsUser {
process.User.UID = uid.try_into().unwrap();
}
}
}
}
impl Container {
@ -858,6 +868,17 @@ impl Container {
}
compress_default_capabilities(capabilities, defaults);
}
pub fn get_process_fields(&self, process: &mut policy::KataProcess) {
if let Some(context) = &self.securityContext {
if let Some(uid) = context.runAsUser {
process.User.UID = uid.try_into().unwrap();
}
if let Some(allow) = context.allowPrivilegeEscalation {
process.NoNewPrivileges = !allow
}
}
}
}
fn compress_default_capabilities(

View File

@ -657,8 +657,10 @@ impl AgentPolicy {
substitute_env_variables(&mut process.Env);
substitute_args_env_variables(&mut process.Args, &process.Env);
c_settings.get_process_fields(&mut process);
process.NoNewPrivileges = !yaml_container.allow_privilege_escalation();
resource.get_process_fields(&mut process);
yaml_container.get_process_fields(&mut process);
process
}

View File

@ -94,6 +94,11 @@ pub trait K8sResource {
fn get_runtime_class_name(&self) -> Option<String> {
None
}
fn get_process_fields(&self, _process: &mut policy::KataProcess) {
// Just Pods can have a PodSecurityContext field, so the other
// resources can use this default get_process_fields implementation.
}
}
/// See Reference / Kubernetes API / Common Definitions / LabelSelector.

View File

@ -174,6 +174,24 @@ test_pod_policy_error() {
run ! grep -q "io.katacontainers.config.agent.policy" "${testcase_pre_generate_pod_yaml}"
}
@test "Successful pod due to runAsUser workaround from rules.rego" {
# This test case should fail, but it passes due to these lines being commented out in rules.rego:
#
# allow_user(p_process, i_process) {
# #print("allow_user: input uid =", i_user.UID, "policy uid =", p_user.UID)
# #p_user.UID == i_user.UID
#
# So this test case should be converted to use test_pod_policy_error when that workaround will
# be removed.
yq -i \
'.spec.containers[0].securityContext.runAsUser = 101' \
"${incorrect_pod_yaml}"
kubectl create -f "${correct_configmap_yaml}"
kubectl create -f "${incorrect_pod_yaml}"
kubectl wait --for=condition=Ready "--timeout=${timeout}" pod "${pod_name}"
}
teardown() {
auto_generate_policy_enabled || skip "Auto-generated policy tests are disabled."

View File

@ -12,7 +12,14 @@ setup() {
get_pod_config_dir
yaml_file="${pod_config_dir}/pod-security-context.yaml"
add_allow_all_policy_to_yaml "${yaml_file}"
policy_settings_dir="$(create_tmp_policy_settings_dir "${pod_config_dir}")"
cmd="ps --user 1000 -f"
exec_command="sh -c ${cmd}"
add_exec_to_policy_settings "${policy_settings_dir}" "${exec_command}"
add_requests_to_policy_settings "${policy_settings_dir}" "ReadStreamRequest"
auto_generate_policy "${policy_settings_dir}" "${yaml_file}"
}
@test "Security context" {
@ -25,7 +32,6 @@ setup() {
kubectl wait --for=condition=Ready --timeout=$timeout pod "$pod_name"
# Check user
cmd="ps --user 1000 -f"
process="tail -f /dev/null"
kubectl exec $pod_name -- sh -c $cmd | grep "$process"
}
@ -35,4 +41,5 @@ teardown() {
kubectl describe "pod/$pod_name"
kubectl delete pod "$pod_name"
delete_tmp_policy_settings_dir "${policy_settings_dir}"
}

View File

@ -25,6 +25,7 @@ spec:
name: policy-configmap
key: data-2
securityContext:
runAsUser: 1000
seccompProfile:
type: RuntimeDefault
topologySpreadConstraints: