Merge pull request #10291 from microsoft/danmihai1/user-name-to-uid

genpolicy: fix and re-enable create container UID verification
This commit is contained in:
Dan Mihai 2024-09-12 15:47:59 -07:00 committed by GitHub
commit e937cb1ded
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
7 changed files with 312 additions and 107 deletions

View File

@ -669,13 +669,12 @@ dependencies = [
[[package]]
name = "errno"
version = "0.3.1"
version = "0.3.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4bcfec3a70f97c962c307b2d2c56e358cf1d00b558d74262b5f929ee8cc7e73a"
checksum = "534c5cf6194dfab3db3242765c03bbe257cf92f22b38f6bc0c58d59108a820ba"
dependencies = [
"errno-dragonfly",
"libc",
"windows-sys 0.48.0",
"windows-sys 0.52.0",
]
[[package]]
@ -890,6 +889,7 @@ dependencies = [
"serde_json",
"serde_yaml",
"sha2",
"tar",
"tarindex",
"tempfile",
"tokio",
@ -1342,9 +1342,9 @@ checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646"
[[package]]
name = "libc"
version = "0.2.155"
version = "0.2.158"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "97b3888a4aecf77e811145cadf6eef5901f4782c53886191b2f693f24761847c"
checksum = "d8adc4bb1803a324070e64a98ae98f38934d91957a99cfb3a43dcbc01bc56439"
[[package]]
name = "libz-ng-sys"
@ -1376,9 +1376,9 @@ checksum = "ece97ea872ece730aed82664c424eb4c8291e1ff2480247ccf7409044bc6479f"
[[package]]
name = "linux-raw-sys"
version = "0.4.10"
version = "0.4.14"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "da2479e8c062e40bf0066ffa0bc823de0a9368974af99c9f6df941d2c231e03f"
checksum = "78b3ae25bc7c8c38cec158d1f2757ee79e9b3740fbc7ccf0e59e4b08d793fa89"
[[package]]
name = "log"
@ -2313,7 +2313,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "62b24138615de35e32031d041a09032ef3487a616d901ca4db224e7d557efae2"
dependencies = [
"bitflags 1.3.2",
"errno 0.3.1",
"errno 0.3.9",
"io-lifetimes",
"libc",
"linux-raw-sys 0.3.7",
@ -2322,15 +2322,15 @@ dependencies = [
[[package]]
name = "rustix"
version = "0.38.19"
version = "0.38.36"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "745ecfa778e66b2b63c88a61cb36e0eea109e803b0b86bf9879fbc77c70e86ed"
checksum = "3f55e80d50763938498dd5ebb18647174e0c76dc38c5505294bb224624f30f36"
dependencies = [
"bitflags 2.4.1",
"errno 0.3.1",
"errno 0.3.9",
"libc",
"linux-raw-sys 0.4.10",
"windows-sys 0.48.0",
"linux-raw-sys 0.4.14",
"windows-sys 0.52.0",
]
[[package]]
@ -2686,9 +2686,9 @@ checksum = "55937e1799185b12863d447f42597ed69d9928686b8d88a1df17376a097d8369"
[[package]]
name = "tar"
version = "0.4.38"
version = "0.4.41"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4b55807c0344e1e6c04d7c965f5289c39a8d94ae23ed5c0b57aabac549f871c6"
checksum = "cb797dad5fb5b76fcf519e702f4a589483b5ef06567f160c392832c1f5e44909"
dependencies = [
"filetime",
"libc",
@ -3229,7 +3229,7 @@ dependencies = [
"either",
"home",
"once_cell",
"rustix 0.38.19",
"rustix 0.38.36",
]
[[package]]
@ -3324,6 +3324,15 @@ dependencies = [
"windows-targets 0.48.0",
]
[[package]]
name = "windows-sys"
version = "0.52.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d"
dependencies = [
"windows-targets 0.52.6",
]
[[package]]
name = "windows-targets"
version = "0.42.1"
@ -3531,11 +3540,13 @@ dependencies = [
[[package]]
name = "xattr"
version = "0.2.3"
version = "1.3.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6d1526bbe5aaeb5eb06885f4d987bcdfa5e23187055de9b83fe00156a821fabc"
checksum = "8da84f1a25939b27f6820d92aed108f83ff920fdf11a7b19366c27c4cda81d4f"
dependencies = [
"libc",
"linux-raw-sys 0.4.14",
"rustix 0.38.36",
]
[[package]]

View File

@ -69,5 +69,8 @@ tonic = "0.9.2"
tower = "0.4.13"
containerd-client = "0.4.0"
# username to UID:GID mapping support
tar = "0.4.41"
[dev-dependencies]
regorus = { version = "0.1.4", default-features = false, features = ["arc", "regex"]}

View File

@ -540,9 +540,8 @@ allow_user(p_process, i_process) {
p_user := p_process.User
i_user := i_process.User
# TODO: remove this workaround when fixing https://github.com/kata-containers/kata-containers/issues/9928.
#print("allow_user: input uid =", i_user.UID, "policy uid =", p_user.UID)
#p_user.UID == i_user.UID
print("allow_user: input uid =", i_user.UID, "policy uid =", p_user.UID)
p_user.UID == i_user.UID
# TODO: track down the reason for registry.k8s.io/pause:3.9 being
# executed with gid = 0 despite having "65535:65535" in its container image

View File

@ -24,7 +24,8 @@ use oci_distribution::{
use serde::{Deserialize, Serialize};
use sha2::{digest::typenum::Unsigned, digest::OutputSizeUser, Sha256};
use std::{
collections::BTreeMap, fs::OpenOptions, io, io::BufWriter, io::Seek, io::Write, path::Path,
collections::BTreeMap, fs::OpenOptions, io, io::BufWriter, io::Read, io::Seek, io::Write,
path::Path,
};
use tokio::io::AsyncWriteExt;
@ -68,6 +69,7 @@ pub struct DockerRootfs {
pub struct ImageLayer {
pub diff_id: String,
pub verity_hash: String,
pub passwd: String,
}
/// See https://docs.docker.com/reference/dockerfile/#volume.
@ -79,6 +81,34 @@ pub struct DockerVolumeHostDirectory {
// run the container."
}
/// A single record in a Unix passwd file.
#[derive(Debug)]
struct PasswdRecord {
pub user: String,
#[allow(dead_code)]
pub validate: bool,
pub uid: u32,
pub gid: u32,
#[allow(dead_code)]
pub gecos: String,
#[allow(dead_code)]
pub home: String,
#[allow(dead_code)]
pub shell: String,
}
/// Path to /etc/passwd in a container layer's tar file.
const PASSWD_FILE_TAR_PATH: &str = "etc/passwd";
/// Path to a file indicating a whiteout of the /etc/passwd file in a container
/// layer's tar file (i.e., /etc/passwd was deleted in the layer).
const PASSWD_FILE_WHITEOUT_TAR_PATH: &str = "etc/.wh.passwd";
/// A marker used to track whether a particular container layer has had its
/// /etc/passwd file deleted, and thus any such files read from previous, lower
/// layers should be discarded.
const WHITEOUT_MARKER: &str = "WHITEOUT";
impl Container {
pub async fn new(config: &Config, image: &str) -> Result<Self> {
info!("============================================");
@ -150,28 +180,83 @@ impl Container {
debug!("Getting process field from docker config layer...");
let docker_config = &self.config_layer.config;
/*
* The user field may:
*
* 1. Be empty
* 2. Contain only a UID
* 3. Contain a UID:GID pair, in that format
* 4. Contain a user name, which we need to translate into a UID/GID pair
* 5. Be erroneus, somehow
*/
if let Some(image_user) = &docker_config.User {
if !image_user.is_empty() {
debug!("Splitting Docker config user = {:?}", image_user);
let user: Vec<&str> = image_user.split(':').collect();
if !user.is_empty() {
debug!("Parsing uid from user[0] = {}", &user[0]);
match user[0].parse() {
Ok(id) => process.User.UID = id,
Err(e) => {
// "image: prom/prometheus" has user = "nobody", but
// process.User.UID is an u32 value.
warn!(
"Failed to parse {} as u32, using uid = 0 - error {e}",
&user[0]
);
process.User.UID = 0;
if image_user.contains(':') {
debug!("Splitting Docker config user = {:?}", image_user);
let user: Vec<&str> = image_user.split(':').collect();
let parts_count = user.len();
if parts_count != 2 {
warn!(
"Failed to split user, expected two parts, got {}, using uid = gid = 0",
parts_count
);
} else {
debug!("Parsing uid from user[0] = {}", &user[0]);
match user[0].parse() {
Ok(id) => process.User.UID = id,
Err(e) => {
warn!(
"Failed to parse {} as u32, using uid = 0 - error {e}",
&user[0]
);
}
}
debug!("Parsing gid from user[1] = {:?}", user[1]);
match user[1].parse() {
Ok(id) => process.User.GID = id,
Err(e) => {
warn!(
"Failed to parse {} as u32, using gid = 0 - error {e}",
&user[0]
);
}
}
}
} else {
match image_user.parse::<u32>() {
Ok(uid) => process.User.UID = uid,
Err(outer_e) => {
// Find the last layer with an /etc/passwd file,
// respecting whiteouts.
let mut passwd = "".to_string();
for layer in self.get_image_layers() {
if !layer.passwd.is_empty() {
passwd = layer.passwd
} else if layer.passwd == WHITEOUT_MARKER {
passwd = "".to_string();
}
}
if passwd.is_empty() {
warn!("Failed to parse {} as u32 - error {outer_e} - and no /etc/passwd file is available, using uid = gid = 0", image_user);
} else {
match parse_passwd_file(passwd) {
Ok(records) => {
if let Some(record) =
records.iter().find(|&r| r.user == *image_user)
{
process.User.UID = record.uid;
process.User.GID = record.gid;
}
}
Err(inner_e) => {
warn!("Failed to parse {} as u32 - error {outer_e} - and failed to parse /etc/passwd - error {inner_e}, using uid = gid = 0", image_user);
}
}
}
}
}
}
if user.len() > 1 {
debug!("Parsing gid from user[1] = {:?}", user[1]);
process.User.GID = user[1].parse().unwrap();
}
}
}
@ -261,16 +346,18 @@ async fn get_image_layers(
|| layer.media_type.eq(manifest::IMAGE_LAYER_GZIP_MEDIA_TYPE)
{
if layer_index < config_layer.rootfs.diff_ids.len() {
let (verity_hash, passwd) = get_verity_and_users(
layers_cache_file_path.clone(),
client,
reference,
&layer.digest,
&config_layer.rootfs.diff_ids[layer_index].clone(),
)
.await?;
layers.push(ImageLayer {
diff_id: config_layer.rootfs.diff_ids[layer_index].clone(),
verity_hash: get_verity_hash(
layers_cache_file_path.clone(),
client,
reference,
&layer.digest,
&config_layer.rootfs.diff_ids[layer_index].clone(),
)
.await?,
verity_hash: verity_hash.to_owned(),
passwd: passwd.to_owned(),
});
} else {
return Err(anyhow!("Too many Docker gzip layers"));
@ -283,13 +370,13 @@ async fn get_image_layers(
Ok(layers)
}
async fn get_verity_hash(
async fn get_verity_and_users(
layers_cache_file_path: Option<String>,
client: &mut Client,
reference: &Reference,
layer_digest: &str,
diff_id: &str,
) -> Result<String> {
) -> Result<(String, String)> {
let temp_dir = tempfile::tempdir_in(".")?;
let base_dir = temp_dir.path();
// Use file names supported by both Linux and Windows.
@ -301,12 +388,15 @@ async fn get_verity_hash(
compressed_path.set_extension("gz");
let mut verity_hash = "".to_string();
let mut passwd = "".to_string();
let mut error_message = "".to_string();
let mut error = false;
// get value from store and return if it exists
if let Some(path) = layers_cache_file_path.as_ref() {
verity_hash = read_verity_from_store(path, diff_id)?;
let res = read_verity_and_users_from_store(path, diff_id)?;
verity_hash = res.0;
passwd = res.1;
info!("Using cache file");
info!("dm-verity root hash: {verity_hash}");
}
@ -327,15 +417,16 @@ async fn get_verity_hash(
};
if !error {
match get_verity_hash_value(&decompressed_path) {
match get_verity_hash_and_users(&decompressed_path) {
Err(e) => {
error_message = format!("Failed to get verity hash {e}");
error = true;
}
Ok(v) => {
verity_hash = v;
Ok(res) => {
verity_hash = res.0;
passwd = res.1;
if let Some(path) = layers_cache_file_path.as_ref() {
add_verity_to_store(path, diff_id, &verity_hash)?;
add_verity_and_users_to_store(path, diff_id, &verity_hash, &passwd)?;
}
info!("dm-verity root hash: {verity_hash}");
}
@ -351,11 +442,16 @@ async fn get_verity_hash(
}
warn!("{error_message}");
}
Ok(verity_hash)
Ok((verity_hash, passwd))
}
// the store is a json file that matches layer hashes to verity hashes
pub fn add_verity_to_store(cache_file: &str, diff_id: &str, verity_hash: &str) -> Result<()> {
pub fn add_verity_and_users_to_store(
cache_file: &str,
diff_id: &str,
verity_hash: &str,
passwd: &str,
) -> Result<()> {
// open the json file in read mode, create it if it doesn't exist
let read_file = OpenOptions::new()
.read(true)
@ -375,6 +471,7 @@ pub fn add_verity_to_store(cache_file: &str, diff_id: &str, verity_hash: &str) -
data.push(ImageLayer {
diff_id: diff_id.to_string(),
verity_hash: verity_hash.to_string(),
passwd: passwd.to_string(),
});
// Serialize in pretty format
@ -399,26 +496,29 @@ pub fn add_verity_to_store(cache_file: &str, diff_id: &str, verity_hash: &str) -
// helper function to read the verity hash from the store
// returns empty string if not found or file does not exist
pub fn read_verity_from_store(cache_file: &str, diff_id: &str) -> Result<String> {
pub fn read_verity_and_users_from_store(
cache_file: &str,
diff_id: &str,
) -> Result<(String, String)> {
match OpenOptions::new().read(true).open(cache_file) {
Ok(file) => match serde_json::from_reader(file) {
Result::<Vec<ImageLayer>, _>::Ok(layers) => {
for layer in layers {
if layer.diff_id == diff_id {
return Ok(layer.verity_hash);
return Ok((layer.verity_hash, layer.passwd));
}
}
}
Err(e) => {
warn!("read_verity_from_store: failed to read cached image layers: {e}");
warn!("read_verity_and_users_from_store: failed to read cached image layers: {e}");
}
},
Err(e) => {
info!("read_verity_from_store: failed to open cache file: {e}");
info!("read_verity_and_users_from_store: failed to open cache file: {e}");
}
}
Ok(String::new())
Ok((String::new(), String::new()))
}
async fn create_decompressed_layer_file(
@ -457,7 +557,7 @@ async fn create_decompressed_layer_file(
Ok(())
}
pub fn get_verity_hash_value(path: &Path) -> Result<String> {
pub fn get_verity_hash_and_users(path: &Path) -> Result<(String, String)> {
info!("Calculating dm-verity root hash");
let mut file = std::fs::File::open(path)?;
let size = file.seek(std::io::SeekFrom::End(0))?;
@ -470,7 +570,23 @@ pub fn get_verity_hash_value(path: &Path) -> Result<String> {
let hash = verity::traverse_file(&mut file, 0, false, v, &mut verity::no_write)?;
let result = format!("{:x}", hash);
Ok(result)
file.seek(std::io::SeekFrom::Start(0))?;
let mut passwd = String::new();
for entry_wrap in tar::Archive::new(file).entries()? {
let mut entry = entry_wrap?;
let entry_path = entry.header().path()?;
let path_str = entry_path.to_str().unwrap();
if path_str == PASSWD_FILE_TAR_PATH {
entry.read_to_string(&mut passwd)?;
break;
} else if path_str == PASSWD_FILE_WHITEOUT_TAR_PATH {
passwd = WHITEOUT_MARKER.to_owned();
break;
}
}
Ok((result, passwd))
}
pub async fn get_container(config: &Config, image: &str) -> Result<Container> {
@ -525,3 +641,31 @@ fn build_auth(reference: &Reference) -> RegistryAuth {
RegistryAuth::Anonymous
}
fn parse_passwd_file(passwd: String) -> Result<Vec<PasswdRecord>> {
let mut records = Vec::new();
for rec in passwd.lines() {
let fields: Vec<&str> = rec.split(':').collect();
let field_count = fields.len();
if field_count != 7 {
return Err(anyhow!(
"Incorrect passwd record, expected 7 fields, got {}",
field_count
));
}
records.push(PasswdRecord {
user: fields[0].to_string(),
validate: fields[1] == "x",
uid: fields[2].parse().unwrap(),
gid: fields[3].parse().unwrap(),
gecos: fields[4].to_string(),
home: fields[5].to_string(),
shell: fields[6].to_string(),
});
}
Ok(records)
}

View File

@ -6,8 +6,8 @@
// Allow Docker image config field names.
#![allow(non_snake_case)]
use crate::registry::{
add_verity_to_store, get_verity_hash_value, read_verity_from_store, Container,
DockerConfigLayer, ImageLayer,
add_verity_and_users_to_store, get_verity_hash_and_users, read_verity_and_users_from_store,
Container, DockerConfigLayer, ImageLayer,
};
use anyhow::{anyhow, Result};
@ -265,15 +265,17 @@ pub async fn get_image_layers(
|| layer_media_type.eq("application/vnd.oci.image.layer.v1.tar+gzip")
{
if layer_index < config_layer.rootfs.diff_ids.len() {
let (verity_hash, passwd) = get_verity_and_users(
layers_cache_file_path.clone(),
layer["digest"].as_str().unwrap(),
client,
&config_layer.rootfs.diff_ids[layer_index].clone(),
)
.await?;
let imageLayer = ImageLayer {
diff_id: config_layer.rootfs.diff_ids[layer_index].clone(),
verity_hash: get_verity_hash(
layers_cache_file_path.clone(),
layer["digest"].as_str().unwrap(),
client,
&config_layer.rootfs.diff_ids[layer_index].clone(),
)
.await?,
verity_hash,
passwd,
};
layersVec.push(imageLayer);
} else {
@ -286,12 +288,12 @@ pub async fn get_image_layers(
Ok(layersVec)
}
async fn get_verity_hash(
async fn get_verity_and_users(
layers_cache_file_path: Option<String>,
layer_digest: &str,
client: &containerd_client::Client,
diff_id: &str,
) -> Result<String> {
) -> Result<(String, String)> {
let temp_dir = tempfile::tempdir_in(".")?;
let base_dir = temp_dir.path();
// Use file names supported by both Linux and Windows.
@ -303,11 +305,14 @@ async fn get_verity_hash(
compressed_path.set_extension("gz");
let mut verity_hash = "".to_string();
let mut passwd = "".to_string();
let mut error_message = "".to_string();
let mut error = false;
if let Some(path) = layers_cache_file_path.as_ref() {
verity_hash = read_verity_from_store(path, diff_id)?;
let res = read_verity_and_users_from_store(path, diff_id)?;
verity_hash = res.0;
passwd = res.1;
info!("Using cache file");
info!("dm-verity root hash: {verity_hash}");
}
@ -327,15 +332,16 @@ async fn get_verity_hash(
}
if !error {
match get_verity_hash_value(&decompressed_path) {
match get_verity_hash_and_users(&decompressed_path) {
Err(e) => {
error_message = format!("Failed to get verity hash {e}");
error = true;
}
Ok(v) => {
verity_hash = v;
Ok(res) => {
verity_hash = res.0;
passwd = res.1;
if let Some(path) = layers_cache_file_path.as_ref() {
add_verity_to_store(path, diff_id, &verity_hash)?;
add_verity_and_users_to_store(path, diff_id, &verity_hash, &passwd)?;
}
info!("dm-verity root hash: {verity_hash}");
}
@ -350,7 +356,7 @@ async fn get_verity_hash(
}
warn!("{error_message}");
}
Ok(verity_hash)
Ok((verity_hash, passwd))
}
async fn create_decompressed_layer_file(

View File

@ -8,6 +8,8 @@
load "${BATS_TEST_DIRNAME}/../../common.bash"
load "${BATS_TEST_DIRNAME}/tests_common.sh"
issue="https://github.com/kata-containers/kata-containers/issues/10297"
setup() {
auto_generate_policy_enabled || skip "Auto-generated policy tests are disabled."
@ -37,6 +39,9 @@ setup() {
# Save some time by executing genpolicy a single time.
if [ "${BATS_TEST_NUMBER}" == "1" ]; then
# Work around #10297 if needed.
prometheus_image_supported || replace_prometheus_image
# Save pre-generated yaml files
cp "${correct_configmap_yaml}" "${pre_generate_configmap_yaml}"
cp "${correct_pod_yaml}" "${pre_generate_pod_yaml}"
@ -54,6 +59,22 @@ setup() {
cp "${pre_generate_pod_yaml}" "${testcase_pre_generate_pod_yaml}"
}
prometheus_image_supported() {
[[ "${SNAPSHOTTER:-}" == "nydus" ]] && return 1
return 0
}
replace_prometheus_image() {
info "Replacing prometheus image with busybox to work around ${issue}"
yq -i \
'.spec.containers[0].name = "busybox"' \
"${correct_pod_yaml}"
yq -i \
'.spec.containers[0].image = "quay.io/prometheus/busybox:latest"' \
"${correct_pod_yaml}"
}
# Common function for several test cases from this bats script.
wait_for_pod_ready() {
kubectl create -f "${correct_configmap_yaml}"
@ -193,30 +214,12 @@ test_pod_policy_error() {
run ! grep -q "io.katacontainers.config.agent.policy" "${testcase_pre_generate_pod_yaml}"
}
@test "Successful pod due to runAsUser workaround from rules.rego" {
# This test case should fail, but it passes due to these lines being commented out in rules.rego:
#
# allow_user(p_process, i_process) {
# #print("allow_user: input uid =", i_user.UID, "policy uid =", p_user.UID)
# #p_user.UID == i_user.UID
#
# So this test case should be converted to use test_pod_policy_error when that workaround will
# be removed.
yq -i \
'.spec.containers[0].securityContext.runAsUser = 101' \
"${incorrect_pod_yaml}"
kubectl create -f "${correct_configmap_yaml}"
kubectl create -f "${incorrect_pod_yaml}"
kubectl wait --for=condition=Ready "--timeout=${timeout}" pod "${pod_name}"
}
@test "ExecProcessRequest tests" {
wait_for_pod_ready
# Execute commands allowed by the policy.
pod_exec_allowed_command "${pod_name}" "echo" "livenessProbe" "test"
pod_exec_allowed_command "${pod_name}" "sh" "-c" "ls -l /"
pod_exec_allowed_command "${pod_name}" "echo" "-n" "readinessProbe with space characters"
pod_exec_allowed_command "${pod_name}" "echo" "startupProbe" "test"
# Try to execute commands disallowed by the policy.
@ -227,6 +230,44 @@ test_pod_policy_error() {
pod_exec_blocked_command "${pod_name}" "echo" "hello"
}
@test "Successful pod: runAsUser having the same value as the UID from the container image" {
prometheus_image_supported || skip "Test case not supported due to ${issue}"
# This container image specifies user = "nobody" that corresponds to UID = 65534. Setting
# the same value for runAsUser in the YAML file doesn't change the auto-generated Policy.
yq -i \
'.spec.containers[0].securityContext.runAsUser = 65534' \
"${incorrect_pod_yaml}"
kubectl create -f "${correct_configmap_yaml}"
kubectl create -f "${incorrect_pod_yaml}"
kubectl wait --for=condition=Ready "--timeout=${timeout}" pod "${pod_name}"
}
@test "Policy failure: unexpected UID = 0" {
prometheus_image_supported || skip "Test case not supported due to ${issue}"
# Change the container UID to 0 after the policy has been generated, and verify that the
# change gets rejected by the policy. UID = 0 is the default value from genpolicy, but
# this container image specifies user = "nobody" that corresponds to UID = 65534.
yq -i \
'.spec.containers[0].securityContext.runAsUser = 0' \
"${incorrect_pod_yaml}"
test_pod_policy_error
}
@test "Policy failure: unexpected UID = 1234" {
# Change the container UID to 1234 after the policy has been generated, and verify that the
# change gets rejected by the policy. This container image specifies user = "nobody" that
# corresponds to UID = 65534.
yq -i \
'.spec.containers[0].securityContext.runAsUser = 1234' \
"${incorrect_pod_yaml}"
test_pod_policy_error
}
teardown() {
auto_generate_policy_enabled || skip "Auto-generated policy tests are disabled."

View File

@ -11,8 +11,10 @@ spec:
terminationGracePeriodSeconds: 0
runtimeClassName: kata
containers:
- name: busybox
image: quay.io/prometheus/busybox:latest
- name: prometheus
image: quay.io/prometheus/prometheus:latest
command:
- /bin/sh
env:
- name: KUBE_CONFIG_1
valueFrom:
@ -25,7 +27,6 @@ spec:
name: policy-configmap
key: data-2
securityContext:
runAsUser: 1000
seccompProfile:
type: RuntimeDefault
livenessProbe:
@ -40,9 +41,9 @@ spec:
readinessProbe:
exec:
command:
- "sh"
- "-c"
- "ls -l /"
- "echo"
- "-n"
- "readinessProbe with space characters"
startupProbe:
exec:
command: