genpolicy: remove tarfs snapshotter support

AKS Confidential Containers are using the tarfs snapshotter. CoCo
upstream doesn't use this snapshotter, so remove this Policy complexity
from upstream.

Signed-off-by: Dan Mihai <dmihai@microsoft.com>
This commit is contained in:
Dan Mihai 2025-07-15 15:47:00 +00:00
parent 077c59dd1f
commit f6016f4f36
9 changed files with 36 additions and 567 deletions

View File

@ -815,7 +815,6 @@ dependencies = [
"env_logger",
"flate2",
"fs2",
"generic-array",
"k8s-cri",
"kata-agent-policy",
"libz-ng-sys",
@ -831,15 +830,12 @@ dependencies = [
"serde_ignored",
"serde_json",
"serde_yaml",
"sha2",
"slog",
"tar",
"tarindex",
"tempfile",
"tokio",
"tonic",
"tower 0.4.13",
"zerocopy",
]
[[package]]
@ -2567,24 +2563,6 @@ dependencies = [
"xattr",
]
[[package]]
name = "tarfs-defs"
version = "0.1.0"
source = "git+https://github.com/kata-containers/tardev-snapshotter?rev=06183a5#06183a5e2a83c3261740f4f0f6ce4aa16b14e436"
dependencies = [
"zerocopy",
]
[[package]]
name = "tarindex"
version = "0.1.0"
source = "git+https://github.com/kata-containers/tardev-snapshotter?rev=06183a5#06183a5e2a83c3261740f4f0f6ce4aa16b14e436"
dependencies = [
"tar",
"tarfs-defs",
"zerocopy",
]
[[package]]
name = "tempfile"
version = "3.19.1"
@ -3500,27 +3478,6 @@ dependencies = [
"synstructure",
]
[[package]]
name = "zerocopy"
version = "0.6.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "854e949ac82d619ee9a14c66a1b674ac730422372ccb759ce0c39cabcf2bf8e6"
dependencies = [
"byteorder",
"zerocopy-derive",
]
[[package]]
name = "zerocopy-derive"
version = "0.6.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "125139de3f6b9d625c39e2efdd73d41bdac468ccd556556440e322be0e1bbd91"
dependencies = [
"proc-macro2",
"quote",
"syn 2.0.104",
]
[[package]]
name = "zerofrom"
version = "0.1.6"

View File

@ -43,12 +43,14 @@ docker_credential = "1.3.1"
flate2 = { version = "1.0.26", features = [
"zlib-ng",
], default-features = false }
fs2 = "0.4.3"
libz-ng-sys = "1.1.15" # force newer version that compiles on ppc64le
oci-client = { version = "0.12.0" }
openssl = { version = "0.10.73", features = ["vendored"] }
serde_ignored = "0.1.7"
serde_json = "1.0.39"
serde-transcode = "1.1.1"
tempfile = "3.19.1"
tokio = { version = "1.38.0", features = ["rt-multi-thread"] }
# OCI container specs.
@ -58,14 +60,6 @@ oci-spec = { version = "0.8.1", features = ["runtime"] }
protocols = { path = "../../libs/protocols", features = ["with-serde"] }
protobuf = "3.2.0"
# dm-verity root hash support
generic-array = "0.14.6"
sha2 = "0.10.6"
tarindex = { git = "https://github.com/kata-containers/tardev-snapshotter", rev = "06183a5" }
tempfile = "3.19.1"
zerocopy = "0.6.6"
fs2 = "0.4.3"
# containerd image pull support
k8s-cri = "0.7.0"
tonic = "0.9.2"

View File

@ -1038,34 +1038,14 @@ allow_storages(p_storages, i_storages, bundle_id, sandbox_id) if {
p_count == i_count - img_pull_count
image_info := allow_container_image_storage(p_storages)
layer_ids := image_info.layer_ids
root_hashes := image_info.root_hashes
every i_storage in i_storages {
allow_storage(p_storages, i_storage, bundle_id, sandbox_id, layer_ids, root_hashes)
allow_storage(p_storages, i_storage, bundle_id, sandbox_id)
}
print("allow_storages: true")
}
# Currently, Image Layer Integrity Verification through Policy is only required for Guest VMs
# that use container image layers provided as dm-verity-protected block device images created on the Host.
allow_container_image_storage(p_storages) = { "layer_ids": [], "root_hashes": [] } if {
policy_data.common.image_layer_verification != "host-tarfs-dm-verity"
}
allow_container_image_storage(p_storages) = { "layer_ids": layer_ids, "root_hashes": root_hashes } if {
policy_data.common.image_layer_verification == "host-tarfs-dm-verity"
some overlay_storage in p_storages
overlay_storage.driver == "overlayfs"
count(overlay_storage.options) == 2
layer_ids := split(overlay_storage.options[0], ":")
root_hashes := split(overlay_storage.options[1], ":")
}
allow_storage(p_storages, i_storage, bundle_id, sandbox_id, layer_ids, root_hashes) if {
allow_storage(p_storages, i_storage, bundle_id, sandbox_id) if {
some p_storage in p_storages
print("allow_storage: p_storage =", p_storage)
@ -1077,12 +1057,12 @@ allow_storage(p_storages, i_storage, bundle_id, sandbox_id, layer_ids, root_hash
p_storage.fstype == i_storage.fstype
allow_storage_source(p_storage, i_storage, bundle_id)
allow_storage_options(p_storage, i_storage, layer_ids, root_hashes)
allow_mount_point(p_storage, i_storage, bundle_id, sandbox_id, layer_ids)
allow_storage_options(p_storage, i_storage)
allow_mount_point(p_storage, i_storage, bundle_id, sandbox_id)
print("allow_storage: true")
}
allow_storage(p_storages, i_storage, bundle_id, sandbox_id, layer_ids, root_hashes) if {
allow_storage(p_storages, i_storage, bundle_id, sandbox_id) if {
i_storage.driver == "image_guest_pull"
print("allow_storage with image_guest_pull: start")
i_storage.fstype == "overlay"
@ -1121,7 +1101,7 @@ allow_storage_source(p_storage, i_storage, bundle_id) if {
print("allow_storage_source 3: true")
}
allow_storage_options(p_storage, i_storage, layer_ids, root_hashes) if {
allow_storage_options(p_storage, i_storage) if {
print("allow_storage_options 1: start")
p_storage.driver != "blk"
@ -1130,163 +1110,48 @@ allow_storage_options(p_storage, i_storage, layer_ids, root_hashes) if {
print("allow_storage_options 1: true")
}
allow_storage_options(p_storage, i_storage, layer_ids, root_hashes) if {
print("allow_storage_options 2: start")
p_storage.driver == "overlayfs"
count(p_storage.options) == 2
policy_ids := split(p_storage.options[0], ":")
print("allow_storage_options 2: policy_ids =", policy_ids)
policy_ids == layer_ids
policy_hashes := split(p_storage.options[1], ":")
print("allow_storage_options 2: policy_hashes =", policy_hashes)
p_count := count(policy_ids)
print("allow_storage_options 2: p_count =", p_count)
p_count >= 1
p_count == count(policy_hashes)
i_count := count(i_storage.options)
print("allow_storage_options 2: i_count =", i_count)
i_count == p_count + 3
print("allow_storage_options 2: i_storage.options[0] =", i_storage.options[0])
i_storage.options[0] == "io.katacontainers.fs-opt.layer-src-prefix=/var/lib/containerd/io.containerd.snapshotter.v1.tardev/layers"
print("allow_storage_options 2: i_storage.options[i_count - 2] =", i_storage.options[i_count - 2])
i_storage.options[i_count - 2] == "io.katacontainers.fs-opt.overlay-rw"
lowerdir := concat("=", ["lowerdir", p_storage.options[0]])
print("allow_storage_options 2: lowerdir =", lowerdir)
i_storage.options[i_count - 1] == lowerdir
print("allow_storage_options 2: i_storage.options[i_count - 1] =", i_storage.options[i_count - 1])
every i, policy_id in policy_ids {
allow_overlay_layer(policy_id, policy_hashes[i], i_storage.options[i + 1])
}
print("allow_storage_options 2: true")
}
allow_storage_options(p_storage, i_storage, layer_ids, root_hashes) if {
print("allow_storage_options 3: start")
p_storage.driver == "blk"
count(p_storage.options) == 1
startswith(p_storage.options[0], "$(hash")
hash_suffix := trim_left(p_storage.options[0], "$(hash")
endswith(hash_suffix, ")")
hash_index := trim_right(hash_suffix, ")")
i := to_number(hash_index)
print("allow_storage_options 3: i =", i)
hash_option := concat("=", ["io.katacontainers.fs-opt.root-hash", root_hashes[i]])
print("allow_storage_options 3: hash_option =", hash_option)
count(i_storage.options) == 4
i_storage.options[0] == "ro"
i_storage.options[1] == "io.katacontainers.fs-opt.block_device=file"
i_storage.options[2] == "io.katacontainers.fs-opt.is-layer"
i_storage.options[3] == hash_option
print("allow_storage_options 3: true")
}
allow_overlay_layer(policy_id, policy_hash, i_option) if {
print("allow_overlay_layer: policy_id =", policy_id, "policy_hash =", policy_hash)
print("allow_overlay_layer: i_option =", i_option)
startswith(i_option, "io.katacontainers.fs-opt.layer=")
i_value := replace(i_option, "io.katacontainers.fs-opt.layer=", "")
i_value_decoded := base64.decode(i_value)
print("allow_overlay_layer: i_value_decoded =", i_value_decoded)
policy_suffix := concat("=", ["tar,ro,io.katacontainers.fs-opt.block_device=file,io.katacontainers.fs-opt.is-layer,io.katacontainers.fs-opt.root-hash", policy_hash])
p_value := concat(",", [policy_id, policy_suffix])
print("allow_overlay_layer: p_value =", p_value)
p_value == i_value_decoded
print("allow_overlay_layer: true")
}
allow_mount_point(p_storage, i_storage, bundle_id, sandbox_id, layer_ids) if {
p_storage.fstype == "tar"
startswith(p_storage.mount_point, "$(layer")
mount_suffix := trim_left(p_storage.mount_point, "$(layer")
endswith(mount_suffix, ")")
layer_index := trim_right(mount_suffix, ")")
i := to_number(layer_index)
print("allow_mount_point 1: i =", i)
layer_id := layer_ids[i]
print("allow_mount_point 1: layer_id =", layer_id)
p_mount := concat("/", ["/run/kata-containers/sandbox/layers", layer_id])
print("allow_mount_point 1: p_mount =", p_mount)
p_mount == i_storage.mount_point
print("allow_mount_point 1: true")
}
allow_mount_point(p_storage, i_storage, bundle_id, sandbox_id, layer_ids) if {
p_storage.fstype == "fuse3.kata-overlay"
mount1 := replace(p_storage.mount_point, "$(cpath)", policy_data.common.cpath)
mount2 := replace(mount1, "$(bundle-id)", bundle_id)
print("allow_mount_point 2: mount2 =", mount2)
mount2 == i_storage.mount_point
print("allow_mount_point 2: true")
}
allow_mount_point(p_storage, i_storage, bundle_id, sandbox_id, layer_ids) if {
allow_mount_point(p_storage, i_storage, bundle_id, sandbox_id) if {
p_storage.fstype == "local"
mount1 := p_storage.mount_point
print("allow_mount_point 3: mount1 =", mount1)
mount2 := replace(mount1, "$(cpath)", policy_data.common.mount_source_cpath)
print("allow_mount_point 3: mount2 =", mount2)
print("allow_mount_point 1: mount2 =", mount2)
mount3 := replace(mount2, "$(sandbox-id)", sandbox_id)
print("allow_mount_point 3: mount3 =", mount3)
print("allow_mount_point 1: mount3 =", mount3)
regex.match(mount3, i_storage.mount_point)
print("allow_mount_point 3: true")
print("allow_mount_point 1: true")
}
allow_mount_point(p_storage, i_storage, bundle_id, sandbox_id, layer_ids) if {
allow_mount_point(p_storage, i_storage, bundle_id, sandbox_id) if {
p_storage.fstype == "bind"
mount1 := p_storage.mount_point
print("allow_mount_point 4: mount1 =", mount1)
print("allow_mount_point 2: mount1 =", mount1)
mount2 := replace(mount1, "$(cpath)", policy_data.common.cpath)
print("allow_mount_point 4: mount2 =", mount2)
print("allow_mount_point 2: mount2 =", mount2)
mount3 := replace(mount2, "$(bundle-id)", bundle_id)
print("allow_mount_point 4: mount3 =", mount3)
print("allow_mount_point 2: mount3 =", mount3)
regex.match(mount3, i_storage.mount_point)
print("allow_mount_point 4: true")
print("allow_mount_point 2: true")
}
allow_mount_point(p_storage, i_storage, bundle_id, sandbox_id, layer_ids) if {
allow_mount_point(p_storage, i_storage, bundle_id, sandbox_id) if {
p_storage.fstype == "tmpfs"
mount1 := p_storage.mount_point
print("allow_mount_point 5: mount1 =", mount1)
print("allow_mount_point 3: mount1 =", mount1)
regex.match(mount1, i_storage.mount_point)
print("allow_mount_point 5: true")
print("allow_mount_point 3: true")
}
# ExecProcessRequest.process.Capabilities
@ -1429,7 +1294,6 @@ allow_interactive_exec(p_container, i_process) if {
print("allow_interactive_exec: true")
}
# get p_container from state
get_state_container(container_id):= p_container if {
idx := get_state_val(container_id)
p_container := policy_data.containers[idx]

View File

@ -26,6 +26,5 @@ pub mod secret;
pub mod settings;
pub mod stateful_set;
pub mod utils;
pub mod verity;
pub mod volume;
pub mod yaml;

View File

@ -28,7 +28,6 @@ mod secret;
mod settings;
mod stateful_set;
mod utils;
mod verity;
mod version;
mod volume;
mod yaml;

View File

@ -12,7 +12,6 @@ use crate::mount_and_storage;
use crate::no_policy;
use crate::pod;
use crate::policy;
use crate::registry;
use crate::secret;
use crate::utils;
use crate::yaml;
@ -24,7 +23,6 @@ use oci_spec::runtime as oci;
use protocols::agent;
use serde::{Deserialize, Serialize};
use serde_yaml::Value;
use sha2::{Digest, Sha256};
use std::boxed;
use std::collections::{BTreeMap, BTreeSet};
use std::fs::read_to_string;
@ -418,9 +416,6 @@ pub struct CommonData {
/// Default capabilities for a privileged container.
pub privileged_caps: Vec<String>,
/// Parse Container image as a storage object
pub image_layer_verification: String,
}
/// Configuration from "kubectl config".
@ -616,12 +611,7 @@ impl AgentPolicy {
is_pause_container,
);
let image_layers = yaml_container.registry.get_image_layers();
let mut storages = Default::default();
const HOST_TARFS_DM_VERITY: &str = "host-tarfs-dm-verity";
if self.config.settings.common.image_layer_verification == HOST_TARFS_DM_VERITY {
get_image_layer_storages(&mut storages, &image_layers, &root);
}
resource.get_container_mounts_and_storages(
&mut mounts,
&mut storages,
@ -784,71 +774,6 @@ impl KataSpec {
}
}
fn get_image_layer_storages(
storages: &mut Vec<agent::Storage>,
image_layers: &Vec<registry::ImageLayer>,
root: &KataRoot,
) {
let mut new_storages: Vec<agent::Storage> = Vec::new();
let mut layer_names: Vec<String> = Vec::new();
let mut layer_hashes: Vec<String> = Vec::new();
let mut previous_chain_id = String::new();
let layers_count = image_layers.len();
let mut layer_index = layers_count;
for layer in image_layers {
// See https://github.com/opencontainers/image-spec/blob/main/config.md#layer-chainid
let chain_id = if previous_chain_id.is_empty() {
layer.diff_id.clone()
} else {
let mut hasher = Sha256::new();
hasher.update(format!("{previous_chain_id} {}", &layer.diff_id));
format!("sha256:{:x}", hasher.finalize())
};
debug!(
"previous_chain_id = {}, chain_id = {}",
&previous_chain_id, &chain_id
);
previous_chain_id.clone_from(&chain_id);
layer_names.push(name_to_hash(&chain_id));
layer_hashes.push(layer.verity_hash.to_string());
layer_index -= 1;
new_storages.push(agent::Storage {
driver: "blk".to_string(),
driver_options: Vec::new(),
source: String::new(), // TODO
fstype: "tar".to_string(),
options: vec![format!("$(hash{layer_index})")],
mount_point: format!("$(layer{layer_index})"),
fs_group: protobuf::MessageField::none(),
special_fields: ::protobuf::SpecialFields::new(),
});
}
new_storages.reverse();
for storage in new_storages {
storages.push(storage);
}
layer_names.reverse();
layer_hashes.reverse();
let overlay_storage = agent::Storage {
driver: "overlayfs".to_string(),
driver_options: Vec::new(),
source: String::new(), // TODO
fstype: "fuse3.kata-overlay".to_string(),
options: vec![layer_names.join(":"), layer_hashes.join(":")],
mount_point: root.Path.clone(),
fs_group: protobuf::MessageField::none(),
special_fields: ::protobuf::SpecialFields::new(),
};
storages.push(overlay_storage);
}
async fn parse_config_file(
yaml_file: String,
config: &utils::Config,
@ -880,13 +805,6 @@ async fn parse_config_file(
Ok(k8sRes)
}
/// Converts the given name to a string representation of its sha256 hash.
fn name_to_hash(name: &str) -> String {
let mut hasher = Sha256::new();
hasher.update(name);
format!("{:x}", hasher.finalize())
}
fn substitute_env_variables(env: &mut Vec<String>) {
loop {
let mut substituted = false;

View File

@ -10,7 +10,6 @@ use crate::containerd;
use crate::layers_cache::ImageLayersCache;
use crate::policy;
use crate::utils::Config;
use crate::verity;
use anyhow::{anyhow, bail, Result};
use docker_credential::{CredentialRetrievalError, DockerCredential};
@ -22,8 +21,7 @@ use oci_client::{
Client, Reference,
};
use serde::{Deserialize, Serialize};
use sha2::{digest::typenum::Unsigned, digest::OutputSizeUser, Sha256};
use std::{collections::BTreeMap, io, io::Read, io::Seek, io::Write, path::Path};
use std::{collections::BTreeMap, io, io::Read, io::Write, path::Path};
use tokio::io::AsyncWriteExt;
/// Container image properties obtained from an OCI repository.
@ -32,7 +30,6 @@ pub struct Container {
#[allow(dead_code)]
pub image: String,
pub config_layer: DockerConfigLayer,
pub image_layers: Vec<ImageLayer>,
pub passwd: String,
pub group: String,
}
@ -68,7 +65,6 @@ pub struct DockerRootfs {
#[derive(Clone, Debug, Serialize, Deserialize)]
pub struct ImageLayer {
pub diff_id: String,
pub verity_hash: String,
pub passwd: String,
pub group: String,
}
@ -197,7 +193,6 @@ impl Container {
Ok(Container {
image: image_string,
config_layer,
image_layers,
passwd,
group,
})
@ -429,10 +424,6 @@ impl Container {
debug!("get_process succeeded.");
}
pub fn get_image_layers(&self) -> Vec<ImageLayer> {
self.image_layers.clone()
}
}
async fn get_image_layers(
@ -452,7 +443,7 @@ async fn get_image_layers(
|| layer.media_type.eq(manifest::IMAGE_LAYER_GZIP_MEDIA_TYPE)
{
if layer_index < config_layer.rootfs.diff_ids.len() {
let mut imageLayer = get_verity_and_users(
let mut imageLayer = get_users_from_layer(
layers_cache,
client,
reference,
@ -473,7 +464,7 @@ async fn get_image_layers(
Ok(layers)
}
async fn get_verity_and_users(
async fn get_users_from_layer(
layers_cache: &ImageLayersCache,
client: &mut Client,
reference: &Reference,
@ -482,7 +473,6 @@ async fn get_verity_and_users(
) -> Result<ImageLayer> {
if let Some(layer) = layers_cache.get_layer(diff_id) {
info!("Using cache file");
info!("dm-verity root hash: {}", layer.verity_hash);
return Ok(layer);
}
@ -505,22 +495,17 @@ async fn get_verity_and_users(
)
.await
{
temp_dir.close()?;
bail!(format!(
"Failed to create verity hash for {layer_digest}, error {e}"
));
bail!(format!("Failed to decompress image layer, error {e}"));
};
match get_verity_hash_and_users(&decompressed_path) {
match get_users_from_decompressed_layer(&decompressed_path) {
Err(e) => {
temp_dir.close()?;
bail!(format!("Failed to get verity hash {e}"));
bail!(format!("Failed to get users from image layer, error {e}"));
}
Ok((verity_hash, passwd, group)) => {
info!("dm-verity root hash: {verity_hash}");
Ok((passwd, group)) => {
let layer = ImageLayer {
diff_id: diff_id.to_string(),
verity_hash,
passwd,
group,
};
@ -558,29 +543,12 @@ async fn create_decompressed_layer_file(
let mut gz_decoder = flate2::read::GzDecoder::new(compressed_file);
std::io::copy(&mut gz_decoder, &mut decompressed_file).map_err(|e| anyhow!(e))?;
info!("Adding tarfs index to layer");
decompressed_file.seek(std::io::SeekFrom::Start(0))?;
tarindex::append_index(&mut decompressed_file).map_err(|e| anyhow!(e))?;
decompressed_file.flush().map_err(|e| anyhow!(e))?;
Ok(())
}
pub fn get_verity_hash_and_users(path: &Path) -> Result<(String, String, String)> {
info!("Calculating dm-verity root hash");
let mut file = std::fs::File::open(path)?;
let size = file.seek(std::io::SeekFrom::End(0))?;
if size < 4096 {
return Err(anyhow!("Block device {:?} is too small: {size}", &path));
}
let salt = [0u8; <Sha256 as OutputSizeUser>::OutputSize::USIZE];
let v = verity::Verity::<Sha256>::new(size, 4096, 4096, &salt, 0)?;
let hash = verity::traverse_file(&mut file, 0, false, v, &mut verity::no_write)?;
let result = format!("{:x}", hash);
file.seek(std::io::SeekFrom::Start(0))?;
pub fn get_users_from_decompressed_layer(path: &Path) -> Result<(String, String)> {
let file = std::fs::File::open(path)?;
let mut passwd = String::new();
let mut group = String::new();
let (mut found_passwd, mut found_group) = (false, false);
@ -615,7 +583,7 @@ pub fn get_verity_hash_and_users(path: &Path) -> Result<(String, String, String)
}
}
Ok((result, passwd, group))
Ok((passwd, group))
}
pub async fn get_container(config: &Config, image: &str) -> Result<Container> {

View File

@ -7,7 +7,7 @@
#![allow(non_snake_case)]
use crate::layers_cache::ImageLayersCache;
use crate::registry::{
get_verity_hash_and_users, Container, DockerConfigLayer, ImageLayer, WHITEOUT_MARKER,
get_users_from_decompressed_layer, Container, DockerConfigLayer, ImageLayer, WHITEOUT_MARKER,
};
use crate::utils::Config;
@ -17,7 +17,7 @@ use docker_credential::{CredentialRetrievalError, DockerCredential};
use k8s_cri::v1::{image_service_client::ImageServiceClient, AuthConfig};
use log::{debug, info, warn};
use oci_client::Reference;
use std::{collections::HashMap, convert::TryFrom, io::Seek, io::Write, path::Path};
use std::{collections::HashMap, convert::TryFrom, io::Write, path::Path};
use tokio::{
io,
io::{AsyncSeekExt, AsyncWriteExt},
@ -89,7 +89,6 @@ impl Container {
Ok(Container {
image: image_str,
config_layer,
image_layers,
passwd,
group,
})
@ -286,7 +285,7 @@ pub async fn get_image_layers(
|| layer_media_type.eq("application/vnd.oci.image.layer.v1.tar+gzip")
{
if layer_index < config_layer.rootfs.diff_ids.len() {
let mut imageLayer = get_verity_and_users(
let mut imageLayer = get_users_from_layer(
layers_cache,
layer["digest"].as_str().unwrap(),
client,
@ -305,7 +304,7 @@ pub async fn get_image_layers(
Ok(layersVec)
}
async fn get_verity_and_users(
async fn get_users_from_layer(
layers_cache: &ImageLayersCache,
layer_digest: &str,
client: &containerd_client::Client,
@ -313,7 +312,6 @@ async fn get_verity_and_users(
) -> Result<ImageLayer> {
if let Some(layer) = layers_cache.get_layer(diff_id) {
info!("Using cache file");
info!("dm-verity root hash: {}", layer.verity_hash);
return Ok(layer);
}
@ -338,16 +336,14 @@ async fn get_verity_and_users(
));
}
match get_verity_hash_and_users(&decompressed_path) {
match get_users_from_decompressed_layer(&decompressed_path) {
Err(e) => {
temp_dir.close()?;
bail!(format!("Failed to get verity hash {e}"));
}
Ok((verity_hash, passwd, group)) => {
info!("dm-verity root hash: {verity_hash}");
Ok((passwd, group)) => {
let layer = ImageLayer {
diff_id: diff_id.to_string(),
verity_hash,
passwd,
group,
};
@ -402,10 +398,6 @@ async fn create_decompressed_layer_file(
let mut gz_decoder = flate2::read::GzDecoder::new(compressed_file);
std::io::copy(&mut gz_decoder, &mut decompressed_file).map_err(|e| anyhow!(e))?;
info!("Adding tarfs index to layer");
decompressed_file.seek(std::io::SeekFrom::Start(0))?;
tarindex::append_index(&mut decompressed_file).map_err(|e| anyhow!(e))?;
decompressed_file.flush().map_err(|e| anyhow!(e))?;
Ok(())
}

View File

@ -1,222 +0,0 @@
// Copyright (c) 2023 Microsoft Corporation
//
// SPDX-License-Identifier: Apache-2.0
//
use generic_array::{typenum::Unsigned, GenericArray};
use sha2::Digest;
use std::fs::File;
use std::io::{self, Read, Seek, SeekFrom};
use zerocopy::byteorder::{LE, U32, U64};
use zerocopy::AsBytes;
#[derive(Default, zerocopy::AsBytes, zerocopy::FromBytes, zerocopy::Unaligned)]
#[repr(C)]
pub struct SuperBlock {
pub data_block_size: U32<LE>,
pub hash_block_size: U32<LE>,
pub data_block_count: U64<LE>,
}
#[derive(Clone)]
struct Level {
next_index: usize,
file_offset: u64,
data: Vec<u8>,
}
pub struct Verity<T: Digest + Clone> {
levels: Vec<Level>,
seeded: T,
data_block_size: usize,
hash_block_size: usize,
block_remaining_count: u64,
super_block: SuperBlock,
}
impl<T: Digest + Clone> Verity<T> {
const HASH_SIZE: usize = T::OutputSize::USIZE;
/// Creates a new `Verity` instance.
pub fn new(
data_size: u64,
data_block_size: usize,
hash_block_size: usize,
salt: &[u8],
mut write_file_offset: u64,
) -> io::Result<Self> {
let level_count = {
let mut max_size = data_block_size as u64;
let mut count = 0usize;
while max_size < data_size {
count += 1;
max_size *= (hash_block_size / Self::HASH_SIZE) as u64;
}
count
};
let data = vec![0; hash_block_size];
let mut levels = Vec::new();
levels.resize(
level_count,
Level {
next_index: 0,
file_offset: 0,
data,
},
);
for (i, l) in levels.iter_mut().enumerate() {
let entry_size = (data_block_size as u64)
* ((hash_block_size / Self::HASH_SIZE) as u64).pow(level_count as u32 - i as u32);
let count = data_size.div_ceil(entry_size);
l.file_offset = write_file_offset;
write_file_offset += hash_block_size as u64 * count;
}
let block_count = data_size / (data_block_size as u64);
Ok(Self {
levels,
seeded: T::new_with_prefix(salt),
data_block_size,
block_remaining_count: block_count,
hash_block_size,
super_block: SuperBlock {
data_block_size: (data_block_size as u32).into(),
hash_block_size: (hash_block_size as u32).into(),
data_block_count: block_count.into(),
},
})
}
/// Determines if more blocks are expected.
///
/// This is based on file size specified when this instance was created.
fn more_blocks(&self) -> bool {
self.block_remaining_count > 0
}
/// Adds the given hash to the level.
///
/// Returns `true` is the level is now full; `false` is there is still room for more hashes.
fn add_hash(&mut self, l: usize, hash: &[u8]) -> bool {
let level = &mut self.levels[l];
level.data[level.next_index * Self::HASH_SIZE..][..Self::HASH_SIZE].copy_from_slice(hash);
level.next_index += 1;
level.next_index >= self.hash_block_size / Self::HASH_SIZE
}
/// Finalises the level despite potentially not having filled it.
///
/// It zeroes out the remaining bytes of the level so that its hash can be calculated
/// consistently.
fn finalize_level(&mut self, l: usize) {
let level = &mut self.levels[l];
for b in &mut level.data[level.next_index * Self::HASH_SIZE..] {
*b = 0;
}
level.next_index = 0;
}
fn uplevel<F>(&mut self, l: usize, reader: &mut File, writer: &mut F) -> io::Result<bool>
where
F: FnMut(&mut File, &[u8], u64) -> io::Result<()>,
{
self.finalize_level(l);
writer(reader, &self.levels[l].data, self.levels[l].file_offset)?;
self.levels[l].file_offset += self.hash_block_size as u64;
let h = self.digest(&self.levels[l].data);
Ok(self.add_hash(l - 1, h.as_slice()))
}
fn digest(&self, block: &[u8]) -> GenericArray<u8, T::OutputSize> {
let mut hasher = self.seeded.clone();
hasher.update(block);
hasher.finalize()
}
fn add_block<F>(&mut self, b: &[u8], reader: &mut File, writer: &mut F) -> io::Result<()>
where
F: FnMut(&mut File, &[u8], u64) -> io::Result<()>,
{
if self.block_remaining_count == 0 {
return Err(io::Error::new(
io::ErrorKind::InvalidData,
"unexpected block",
));
}
self.block_remaining_count -= 1;
let count = self.levels.len();
let hash = self.digest(b);
if self.add_hash(count - 1, hash.as_slice()) {
// Go up the levels as far as it can.
for l in (1..count).rev() {
if !self.uplevel(l, reader, writer)? {
break;
}
}
}
Ok(())
}
fn finalize(
mut self,
write_superblock: bool,
reader: &mut File,
writer: &mut impl FnMut(&mut File, &[u8], u64) -> io::Result<()>,
) -> io::Result<GenericArray<u8, T::OutputSize>> {
let len = self.levels.len();
for mut l in (1..len).rev() {
if self.levels[l].next_index != 0 {
while l > 0 {
self.uplevel(l, reader, writer)?;
l -= 1;
}
break;
}
}
self.finalize_level(0);
writer(reader, &self.levels[0].data, self.levels[0].file_offset)?;
self.levels[0].file_offset += self.hash_block_size as u64;
if write_superblock {
writer(
reader,
self.super_block.as_bytes(),
self.levels[len - 1].file_offset + 4096 - 512,
)?;
// TODO: Align to the hash_block_size...
// Align to 4096 bytes.
writer(reader, &[0u8], self.levels[len - 1].file_offset + 4095)?;
}
Ok(self.digest(&self.levels[0].data))
}
}
pub fn traverse_file<T: Digest + Clone>(
file: &mut File,
mut read_offset: u64,
write_superblock: bool,
mut verity: Verity<T>,
writer: &mut impl FnMut(&mut File, &[u8], u64) -> io::Result<()>,
) -> io::Result<GenericArray<u8, T::OutputSize>> {
let mut buf = vec![0; verity.data_block_size];
while verity.more_blocks() {
file.seek(SeekFrom::Start(read_offset))?;
file.read_exact(&mut buf)?;
verity.add_block(&buf, file, writer)?;
read_offset += verity.data_block_size as u64;
}
verity.finalize(write_superblock, file, writer)
}
pub fn no_write(_: &mut File, _: &[u8], _: u64) -> io::Result<()> {
Ok(())
}