Merge pull request #7688 from ChengyuZhu6/image_sharing_dmverity

CC | support dm-verity tarfs disk image in CoCo
This commit is contained in:
Fabiano Fidêncio 2023-09-13 20:52:01 +02:00 committed by GitHub
commit ee15a389de
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
16 changed files with 1040 additions and 125 deletions

67
src/agent/Cargo.lock generated
View File

@ -1156,6 +1156,34 @@ dependencies = [
"opaque-debug", "opaque-debug",
] ]
[[package]]
name = "devicemapper"
version = "0.33.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "75a9fd602a98d192f7662a1f4c4cf6920a1b454c3a9e724f6490cf8e30910114"
dependencies = [
"bitflags",
"devicemapper-sys",
"env_logger",
"lazy_static",
"log",
"nix 0.26.2",
"rand 0.8.5",
"retry",
"semver",
"serde",
]
[[package]]
name = "devicemapper-sys"
version = "0.1.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f0b0f9d16560f830ae6e90b769017333c4561d2c84f39e7aa7d935d2e7bcbc4c"
dependencies = [
"bindgen",
"nix 0.26.2",
]
[[package]] [[package]]
name = "diff" name = "diff"
version = "0.1.13" version = "0.1.13"
@ -1396,6 +1424,19 @@ dependencies = [
"syn 2.0.28", "syn 2.0.28",
] ]
[[package]]
name = "env_logger"
version = "0.10.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "85cdab6a89accf66733ad5a1693a4dcced6aeff64602b634530dd73c1f3ee9f0"
dependencies = [
"humantime",
"is-terminal",
"log",
"regex",
"termcolor",
]
[[package]] [[package]]
name = "errno" name = "errno"
version = "0.2.8" version = "0.2.8"
@ -1878,6 +1919,12 @@ version = "1.0.2"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c4a1e36c821dbe04574f602848a19f742f4fb3c98d40449f11bcad18d6b17421" checksum = "c4a1e36c821dbe04574f602848a19f742f4fb3c98d40449f11bcad18d6b17421"
[[package]]
name = "humantime"
version = "2.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4"
[[package]] [[package]]
name = "hyper" name = "hyper"
version = "0.14.26" version = "0.14.26"
@ -1968,13 +2015,14 @@ dependencies = [
[[package]] [[package]]
name = "image-rs" name = "image-rs"
version = "0.1.0" version = "0.1.0"
source = "git+https://github.com/confidential-containers/guest-components?rev=53ddd632424432077e95d3901deb64727be0b4c1#53ddd632424432077e95d3901deb64727be0b4c1" source = "git+https://github.com/confidential-containers/guest-components?rev=e9944577d1f61060c51d48890359a5467d519a29#e9944577d1f61060c51d48890359a5467d519a29"
dependencies = [ dependencies = [
"anyhow", "anyhow",
"async-compression", "async-compression",
"async-trait", "async-trait",
"base64 0.21.2", "base64 0.21.2",
"cfg-if 1.0.0", "cfg-if 1.0.0",
"devicemapper",
"flate2", "flate2",
"futures", "futures",
"futures-util", "futures-util",
@ -2872,7 +2920,7 @@ dependencies = [
[[package]] [[package]]
name = "ocicrypt-rs" name = "ocicrypt-rs"
version = "0.1.0" version = "0.1.0"
source = "git+https://github.com/confidential-containers/guest-components?rev=53ddd632424432077e95d3901deb64727be0b4c1#53ddd632424432077e95d3901deb64727be0b4c1" source = "git+https://github.com/confidential-containers/guest-components?rev=e9944577d1f61060c51d48890359a5467d519a29#e9944577d1f61060c51d48890359a5467d519a29"
dependencies = [ dependencies = [
"aes 0.8.3", "aes 0.8.3",
"anyhow", "anyhow",
@ -3873,6 +3921,15 @@ dependencies = [
"winreg", "winreg",
] ]
[[package]]
name = "retry"
version = "1.3.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ac95c60a949a63fd2822f4964939662d8f2c16c4fa0624fd954bc6e703b9a3f6"
dependencies = [
"rand 0.8.5",
]
[[package]] [[package]]
name = "rfc6979" name = "rfc6979"
version = "0.3.1" version = "0.3.1"
@ -4171,6 +4228,12 @@ dependencies = [
"libc", "libc",
] ]
[[package]]
name = "semver"
version = "1.0.18"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b0293b4b29daaf487284529cc2f5675b8e57c61f70167ba415a463651fd6a918"
[[package]] [[package]]
name = "sequoia-openpgp" name = "sequoia-openpgp"
version = "1.14.0" version = "1.14.0"

View File

@ -74,8 +74,8 @@ clap = { version = "3.0.1", features = ["derive"] }
openssl = { version = "0.10.38", features = ["vendored"] } openssl = { version = "0.10.38", features = ["vendored"] }
# Image pull/decrypt # Image pull/decrypt
image-rs = { git = "https://github.com/confidential-containers/guest-components", rev = "53ddd632424432077e95d3901deb64727be0b4c1", default-features = false, features = [ image-rs = { git = "https://github.com/confidential-containers/guest-components", rev = "e9944577d1f61060c51d48890359a5467d519a29", default-features = false, features = [
"kata-cc-native-tls", "kata-cc-native-tls", "verity",
] } ] }
[patch.crates-io] [patch.crates-io]

View File

@ -27,19 +27,29 @@ use crate::{ccw, device::get_virtio_blk_ccw_device_name};
#[derive(Debug)] #[derive(Debug)]
pub struct VirtioBlkMmioHandler {} pub struct VirtioBlkMmioHandler {}
#[async_trait::async_trait] impl VirtioBlkMmioHandler {
impl StorageHandler for VirtioBlkMmioHandler { pub async fn update_device_path(
#[instrument] storage: &mut Storage,
async fn create_device( ctx: &mut StorageContext<'_>,
&self, ) -> Result<()> {
storage: Storage,
ctx: &mut StorageContext,
) -> Result<Arc<dyn StorageDevice>> {
if !Path::new(&storage.source).exists() { if !Path::new(&storage.source).exists() {
get_virtio_mmio_device_name(ctx.sandbox, &storage.source) get_virtio_mmio_device_name(ctx.sandbox, &storage.source)
.await .await
.context("failed to get mmio device name")?; .context("failed to get mmio device name")?;
} }
Ok(())
}
}
#[async_trait::async_trait]
impl StorageHandler for VirtioBlkMmioHandler {
#[instrument]
async fn create_device(
&self,
mut storage: Storage,
ctx: &mut StorageContext,
) -> Result<Arc<dyn StorageDevice>> {
Self::update_device_path(&mut storage, ctx).await?;
let path = common_storage_handler(ctx.logger, &storage)?; let path = common_storage_handler(ctx.logger, &storage)?;
new_device(path) new_device(path)
} }
@ -48,14 +58,11 @@ impl StorageHandler for VirtioBlkMmioHandler {
#[derive(Debug)] #[derive(Debug)]
pub struct VirtioBlkPciHandler {} pub struct VirtioBlkPciHandler {}
#[async_trait::async_trait] impl VirtioBlkPciHandler {
impl StorageHandler for VirtioBlkPciHandler { pub async fn update_device_path(
#[instrument] storage: &mut Storage,
async fn create_device( ctx: &mut StorageContext<'_>,
&self, ) -> Result<()> {
mut storage: Storage,
ctx: &mut StorageContext,
) -> Result<Arc<dyn StorageDevice>> {
// If hot-plugged, get the device node path based on the PCI path // If hot-plugged, get the device node path based on the PCI path
// otherwise use the virt path provided in Storage Source // otherwise use the virt path provided in Storage Source
if storage.source.starts_with("/dev") { if storage.source.starts_with("/dev") {
@ -71,6 +78,19 @@ impl StorageHandler for VirtioBlkPciHandler {
storage.source = dev_path; storage.source = dev_path;
} }
Ok(())
}
}
#[async_trait::async_trait]
impl StorageHandler for VirtioBlkPciHandler {
#[instrument]
async fn create_device(
&self,
mut storage: Storage,
ctx: &mut StorageContext,
) -> Result<Arc<dyn StorageDevice>> {
Self::update_device_path(&mut storage, ctx).await?;
let path = common_storage_handler(ctx.logger, &storage)?; let path = common_storage_handler(ctx.logger, &storage)?;
new_device(path) new_device(path)
} }
@ -79,6 +99,21 @@ impl StorageHandler for VirtioBlkPciHandler {
#[derive(Debug)] #[derive(Debug)]
pub struct VirtioBlkCcwHandler {} pub struct VirtioBlkCcwHandler {}
impl VirtioBlkCcwHandler {
pub async fn update_device_path(
_storage: &mut Storage,
_ctx: &mut StorageContext<'_>,
) -> Result<()> {
#[cfg(target_arch = "s390x")]
{
let ccw_device = ccw::Device::from_str(&_storage.source)?;
let dev_path = get_virtio_blk_ccw_device_name(_ctx.sandbox, &ccw_device).await?;
_storage.source = dev_path;
}
Ok(())
}
}
#[async_trait::async_trait] #[async_trait::async_trait]
impl StorageHandler for VirtioBlkCcwHandler { impl StorageHandler for VirtioBlkCcwHandler {
#[cfg(target_arch = "s390x")] #[cfg(target_arch = "s390x")]
@ -88,9 +123,7 @@ impl StorageHandler for VirtioBlkCcwHandler {
mut storage: Storage, mut storage: Storage,
ctx: &mut StorageContext, ctx: &mut StorageContext,
) -> Result<Arc<dyn StorageDevice>> { ) -> Result<Arc<dyn StorageDevice>> {
let ccw_device = ccw::Device::from_str(&storage.source)?; Self::update_device_path(&mut storage, ctx).await?;
let dev_path = get_virtio_blk_ccw_device_name(ctx.sandbox, &ccw_device).await?;
storage.source = dev_path;
let path = common_storage_handler(ctx.logger, &storage)?; let path = common_storage_handler(ctx.logger, &storage)?;
new_device(path) new_device(path)
} }
@ -109,6 +142,18 @@ impl StorageHandler for VirtioBlkCcwHandler {
#[derive(Debug)] #[derive(Debug)]
pub struct ScsiHandler {} pub struct ScsiHandler {}
impl ScsiHandler {
pub async fn update_device_path(
storage: &mut Storage,
ctx: &mut StorageContext<'_>,
) -> Result<()> {
// Retrieve the device path from SCSI address.
let dev_path = get_scsi_device_name(ctx.sandbox, &storage.source).await?;
storage.source = dev_path;
Ok(())
}
}
#[async_trait::async_trait] #[async_trait::async_trait]
impl StorageHandler for ScsiHandler { impl StorageHandler for ScsiHandler {
#[instrument] #[instrument]
@ -117,10 +162,7 @@ impl StorageHandler for ScsiHandler {
mut storage: Storage, mut storage: Storage,
ctx: &mut StorageContext, ctx: &mut StorageContext,
) -> Result<Arc<dyn StorageDevice>> { ) -> Result<Arc<dyn StorageDevice>> {
// Retrieve the device path from SCSI address. Self::update_device_path(&mut storage, ctx).await?;
let dev_path = get_scsi_device_name(ctx.sandbox, &storage.source).await?;
storage.source = dev_path;
let path = common_storage_handler(ctx.logger, &storage)?; let path = common_storage_handler(ctx.logger, &storage)?;
new_device(path) new_device(path)
} }
@ -129,17 +171,26 @@ impl StorageHandler for ScsiHandler {
#[derive(Debug)] #[derive(Debug)]
pub struct PmemHandler {} pub struct PmemHandler {}
impl PmemHandler {
pub async fn update_device_path(
storage: &mut Storage,
ctx: &mut StorageContext<'_>,
) -> Result<()> {
// Retrieve the device for pmem storage
wait_for_pmem_device(ctx.sandbox, &storage.source).await?;
Ok(())
}
}
#[async_trait::async_trait] #[async_trait::async_trait]
impl StorageHandler for PmemHandler { impl StorageHandler for PmemHandler {
#[instrument] #[instrument]
async fn create_device( async fn create_device(
&self, &self,
storage: Storage, mut storage: Storage,
ctx: &mut StorageContext, ctx: &mut StorageContext,
) -> Result<Arc<dyn StorageDevice>> { ) -> Result<Arc<dyn StorageDevice>> {
// Retrieve the device for pmem storage Self::update_device_path(&mut storage, ctx).await?;
wait_for_pmem_device(ctx.sandbox, &storage.source).await?;
let path = common_storage_handler(ctx.logger, &storage)?; let path = common_storage_handler(ctx.logger, &storage)?;
new_device(path) new_device(path)
} }

View File

@ -0,0 +1,165 @@
// Copyright (c) 2023 Alibaba Cloud
//
// SPDX-License-Identifier: Apache-2.0
//
use std::path::Path;
use std::sync::Arc;
use anyhow::{anyhow, Context, Result};
use image_rs::verity::{create_dmverity_device, destroy_dmverity_device};
use kata_sys_util::mount::create_mount_destination;
use kata_types::mount::{DmVerityInfo, StorageDevice};
use kata_types::volume::{
KATA_VOLUME_DMVERITY_OPTION_SOURCE_TYPE, KATA_VOLUME_DMVERITY_OPTION_VERITY_INFO,
KATA_VOLUME_DMVERITY_SOURCE_TYPE_PMEM, KATA_VOLUME_DMVERITY_SOURCE_TYPE_SCSI,
KATA_VOLUME_DMVERITY_SOURCE_TYPE_VIRTIO_CCW, KATA_VOLUME_DMVERITY_SOURCE_TYPE_VIRTIO_MMIO,
KATA_VOLUME_DMVERITY_SOURCE_TYPE_VIRTIO_PCI,
};
use protocols::agent::Storage;
use slog::Logger;
use tracing::instrument;
use crate::storage::block_handler::{
PmemHandler, ScsiHandler, VirtioBlkCcwHandler, VirtioBlkMmioHandler, VirtioBlkPciHandler,
};
use crate::storage::{common_storage_handler, StorageContext, StorageHandler};
use super::StorageDeviceGeneric;
#[derive(Debug)]
pub struct DmVerityHandler {}
impl DmVerityHandler {
fn get_dm_verity_info(storage: &Storage) -> Result<DmVerityInfo> {
for option in storage.driver_options.iter() {
if let Some((key, value)) = option.split_once('=') {
if key == KATA_VOLUME_DMVERITY_OPTION_VERITY_INFO {
let verity_info: DmVerityInfo = serde_json::from_str(value)?;
return Ok(verity_info);
}
}
}
Err(anyhow!("missing DmVerity information for DmVerity volume"))
}
async fn update_source_device(
storage: &mut Storage,
ctx: &mut StorageContext<'_>,
) -> Result<()> {
for option in storage.driver_options.clone() {
if let Some((key, value)) = option.split_once('=') {
if key == KATA_VOLUME_DMVERITY_OPTION_SOURCE_TYPE {
match value {
KATA_VOLUME_DMVERITY_SOURCE_TYPE_VIRTIO_PCI => {
VirtioBlkPciHandler::update_device_path(storage, ctx).await?;
}
KATA_VOLUME_DMVERITY_SOURCE_TYPE_VIRTIO_MMIO => {
VirtioBlkMmioHandler::update_device_path(storage, ctx).await?;
}
KATA_VOLUME_DMVERITY_SOURCE_TYPE_VIRTIO_CCW => {
VirtioBlkCcwHandler::update_device_path(storage, ctx).await?;
}
KATA_VOLUME_DMVERITY_SOURCE_TYPE_SCSI => {
ScsiHandler::update_device_path(storage, ctx).await?;
}
KATA_VOLUME_DMVERITY_SOURCE_TYPE_PMEM => {
PmemHandler::update_device_path(storage, ctx).await?;
}
_ => {}
}
}
}
}
Ok(())
}
}
#[async_trait::async_trait]
impl StorageHandler for DmVerityHandler {
#[instrument]
async fn create_device(
&self,
mut storage: Storage,
ctx: &mut StorageContext,
) -> Result<Arc<dyn StorageDevice>> {
Self::update_source_device(&mut storage, ctx).await?;
create_mount_destination(&storage.source, &storage.mount_point, "", &storage.fstype)
.context("Could not create mountpoint")?;
let verity_info = Self::get_dm_verity_info(&storage)?;
let verity_info = serde_json::to_string(&verity_info)
.map_err(|e| anyhow!("failed to serialize dm_verity info, {}", e))?;
let verity_device_path = create_dmverity_device(&verity_info, Path::new(storage.source()))
.context("create device with dm-verity enabled")?;
storage.source = verity_device_path;
common_storage_handler(ctx.logger, &storage)?;
Ok(Arc::new(DmVerityDevice {
common: StorageDeviceGeneric::new(storage.mount_point),
verity_device_path: storage.source,
logger: ctx.logger.clone(),
}))
}
}
struct DmVerityDevice {
common: StorageDeviceGeneric,
verity_device_path: String,
logger: Logger,
}
impl StorageDevice for DmVerityDevice {
fn path(&self) -> Option<&str> {
self.common.path()
}
fn cleanup(&self) -> Result<()> {
self.common.cleanup().context("clean up dm-verity volume")?;
let device_path = &self.verity_device_path;
debug!(
self.logger,
"destroy verity device path = {:?}", device_path
);
destroy_dmverity_device(device_path)?;
Ok(())
}
}
#[cfg(test)]
mod tests {
use kata_types::{mount::DmVerityInfo, volume::KATA_VOLUME_DMVERITY_OPTION_VERITY_INFO};
use protocols::agent::Storage;
use crate::storage::dm_verity::DmVerityHandler;
#[test]
fn test_get_dm_verity_info() {
let verity_info = DmVerityInfo {
hashtype: "sha256".to_string(),
hash: "d86104eee715a1b59b62148641f4ca73edf1be3006c4d481f03f55ac05640570".to_string(),
blocknum: 2361,
blocksize: 512,
hashsize: 4096,
offset: 1212416,
};
let verity_info_str = serde_json::to_string(&verity_info);
assert!(verity_info_str.is_ok());
let storage = Storage {
driver: KATA_VOLUME_DMVERITY_OPTION_VERITY_INFO.to_string(),
driver_options: vec![format!("verity_info={}", verity_info_str.ok().unwrap())],
..Default::default()
};
match DmVerityHandler::get_dm_verity_info(&storage) {
Ok(result) => {
assert_eq!(verity_info, result);
}
Err(e) => panic!("err = {}", e),
}
}
}

View File

@ -10,6 +10,7 @@ use std::sync::Arc;
use anyhow::{anyhow, Context, Result}; use anyhow::{anyhow, Context, Result};
use kata_types::mount::StorageDevice; use kata_types::mount::StorageDevice;
use kata_types::volume::KATA_VOLUME_OVERLAYFS_CREATE_DIR;
use protocols::agent::Storage; use protocols::agent::Storage;
use tracing::instrument; use tracing::instrument;
@ -50,6 +51,15 @@ impl StorageHandler for OverlayfsHandler {
.options .options
.push(format!("workdir={}", work.to_string_lossy())); .push(format!("workdir={}", work.to_string_lossy()));
} }
let overlay_create_dir_prefix = &(KATA_VOLUME_OVERLAYFS_CREATE_DIR.to_string() + "=");
for driver_option in &storage.driver_options {
if let Some(dir) = driver_option
.as_str()
.strip_prefix(overlay_create_dir_prefix)
{
fs::create_dir_all(dir).context("Failed to create directory")?;
}
}
let path = common_storage_handler(ctx.logger, &storage)?; let path = common_storage_handler(ctx.logger, &storage)?;
new_device(path) new_device(path)

View File

@ -13,6 +13,7 @@ use std::sync::Arc;
use anyhow::{anyhow, Context, Result}; use anyhow::{anyhow, Context, Result};
use kata_sys_util::mount::{create_mount_destination, parse_mount_options}; use kata_sys_util::mount::{create_mount_destination, parse_mount_options};
use kata_types::mount::{StorageDevice, StorageHandlerManager, KATA_SHAREDFS_GUEST_PREMOUNT_TAG}; use kata_types::mount::{StorageDevice, StorageHandlerManager, KATA_SHAREDFS_GUEST_PREMOUNT_TAG};
use kata_types::volume::KATA_VOLUME_TYPE_DMVERITY;
use nix::unistd::{Gid, Uid}; use nix::unistd::{Gid, Uid};
use protocols::agent::Storage; use protocols::agent::Storage;
use protocols::types::FSGroupChangePolicy; use protocols::types::FSGroupChangePolicy;
@ -22,6 +23,7 @@ use tracing::instrument;
use self::bind_watcher_handler::BindWatcherHandler; use self::bind_watcher_handler::BindWatcherHandler;
use self::block_handler::{PmemHandler, ScsiHandler, VirtioBlkMmioHandler, VirtioBlkPciHandler}; use self::block_handler::{PmemHandler, ScsiHandler, VirtioBlkMmioHandler, VirtioBlkPciHandler};
use self::dm_verity::DmVerityHandler;
use self::ephemeral_handler::EphemeralHandler; use self::ephemeral_handler::EphemeralHandler;
use self::fs_handler::{OverlayfsHandler, Virtio9pHandler, VirtioFsHandler}; use self::fs_handler::{OverlayfsHandler, Virtio9pHandler, VirtioFsHandler};
use self::local_handler::LocalHandler; use self::local_handler::LocalHandler;
@ -37,6 +39,7 @@ pub use self::ephemeral_handler::update_ephemeral_mounts;
mod bind_watcher_handler; mod bind_watcher_handler;
mod block_handler; mod block_handler;
mod dm_verity;
mod ephemeral_handler; mod ephemeral_handler;
mod fs_handler; mod fs_handler;
mod local_handler; mod local_handler;
@ -145,6 +148,7 @@ lazy_static! {
manager.add_handler(DRIVER_SCSI_TYPE, Arc::new(ScsiHandler{})).unwrap(); manager.add_handler(DRIVER_SCSI_TYPE, Arc::new(ScsiHandler{})).unwrap();
manager.add_handler(DRIVER_VIRTIOFS_TYPE, Arc::new(VirtioFsHandler{})).unwrap(); manager.add_handler(DRIVER_VIRTIOFS_TYPE, Arc::new(VirtioFsHandler{})).unwrap();
manager.add_handler(DRIVER_WATCHABLE_BIND_TYPE, Arc::new(BindWatcherHandler{})).unwrap(); manager.add_handler(DRIVER_WATCHABLE_BIND_TYPE, Arc::new(BindWatcherHandler{})).unwrap();
manager.add_handler(KATA_VOLUME_TYPE_DMVERITY, Arc::new(DmVerityHandler{})).unwrap();
manager manager
}; };
} }

View File

@ -399,12 +399,15 @@ pub fn parse_mount_options<T: AsRef<str>>(options: &[T]) -> Result<(MsFlags, Str
let mut data: Vec<String> = Vec::new(); let mut data: Vec<String> = Vec::new();
for opt in options.iter() { for opt in options.iter() {
if opt.as_ref() == "loop" { let opt_str = opt.as_ref();
if matches!(opt_str, "loop") {
return Err(Error::InvalidMountOption("loop".to_string())); return Err(Error::InvalidMountOption("loop".to_string()));
} else if let Some(v) = parse_mount_flags(flags, opt.as_ref()) { } else if let Some(v) = parse_mount_flags(flags, opt_str) {
flags = v; flags = v;
} else if opt_str.starts_with("io.katacontainers.") {
continue;
} else { } else {
data.push(opt.as_ref().to_string()); data.push(opt_str.to_string());
} }
} }

View File

@ -29,6 +29,9 @@ pub mod k8s;
/// Constants and data types related to mount point. /// Constants and data types related to mount point.
pub mod mount; pub mod mount;
/// Constants and data types related to data volumes.
pub mod volume;
pub(crate) mod utils; pub(crate) mod utils;
/// hypervisor capabilities /// hypervisor capabilities

View File

@ -0,0 +1,26 @@
// Copyright (c) 2023 Alibaba Cloud
//
// SPDX-License-Identifier: Apache-2.0
//
/// Volume to support dm-verity over block devices.
pub const KATA_VOLUME_TYPE_DMVERITY: &str = "dmverity";
/// Key to identify dmverity information in `Storage.driver_options`
pub const KATA_VOLUME_DMVERITY_OPTION_VERITY_INFO: &str = "verity_info";
/// Key to identify type of source device in `Storage.driver_options`
pub const KATA_VOLUME_DMVERITY_OPTION_SOURCE_TYPE: &str = "source_type";
/// Source device of dmverity volume is a Virtio PCI device
pub const KATA_VOLUME_DMVERITY_SOURCE_TYPE_VIRTIO_PCI: &str = "virtio_pci";
/// Source device of dmverity volume is a Virtio MMIO device
pub const KATA_VOLUME_DMVERITY_SOURCE_TYPE_VIRTIO_MMIO: &str = "virtio_mmio";
/// Source device of dmverity volume is a Virtio CCW device
pub const KATA_VOLUME_DMVERITY_SOURCE_TYPE_VIRTIO_CCW: &str = "virtio_ccw";
/// Source device of dmverity volume is a SCSI disk.
pub const KATA_VOLUME_DMVERITY_SOURCE_TYPE_SCSI: &str = "scsi";
/// Source device of dmverity volume is a pmem disk.
pub const KATA_VOLUME_DMVERITY_SOURCE_TYPE_PMEM: &str = "pmem";
/// Key to indentify directory creation in `Storage.driver_options`.
pub const KATA_VOLUME_OVERLAYFS_CREATE_DIR: &str =
"io.katacontainers.volume.overlayfs.create_directory";

View File

@ -129,7 +129,7 @@ func CreateSandbox(ctx context.Context, vci vc.VC, ociSpec specs.Spec, runtimeCo
} }
if !rootFs.Mounted && len(sandboxConfig.Containers) == 1 { if !rootFs.Mounted && len(sandboxConfig.Containers) == 1 {
if rootFs.Source != "" { if rootFs.Source != "" && !vc.HasOptionPrefix(rootFs.Options, vc.VirtualVolumePrefix) {
realPath, err := ResolvePath(rootFs.Source) realPath, err := ResolvePath(rootFs.Source)
if err != nil { if err != nil {
return nil, vc.Process{}, err return nil, vc.Process{}, err

View File

@ -668,41 +668,9 @@ func (c *Container) createBlockDevices(ctx context.Context) error {
} }
} }
var stat unix.Stat_t
if err := unix.Stat(c.mounts[i].Source, &stat); err != nil {
return fmt.Errorf("stat %q failed: %v", c.mounts[i].Source, err)
}
var di *config.DeviceInfo
var err error
// Check if mount is a block device file. If it is, the block device will be attached to the host // Check if mount is a block device file. If it is, the block device will be attached to the host
// instead of passing this as a shared mount. // instead of passing this as a shared mount.
if stat.Mode&unix.S_IFMT == unix.S_IFBLK { di, err := c.createDeviceInfo(c.mounts[i].Source, c.mounts[i].Destination, c.mounts[i].ReadOnly, isBlockFile)
di = &config.DeviceInfo{
HostPath: c.mounts[i].Source,
ContainerPath: c.mounts[i].Destination,
DevType: "b",
Major: int64(unix.Major(uint64(stat.Rdev))),
Minor: int64(unix.Minor(uint64(stat.Rdev))),
ReadOnly: c.mounts[i].ReadOnly,
}
} else if isBlockFile && stat.Mode&unix.S_IFMT == unix.S_IFREG {
di = &config.DeviceInfo{
HostPath: c.mounts[i].Source,
ContainerPath: c.mounts[i].Destination,
DevType: "b",
Major: -1,
Minor: 0,
ReadOnly: c.mounts[i].ReadOnly,
}
// Check whether source can be used as a pmem device
} else if di, err = config.PmemDeviceInfo(c.mounts[i].Source, c.mounts[i].Destination); err != nil {
c.Logger().WithError(err).
WithField("mount-source", c.mounts[i].Source).
Debug("no loop device")
}
if err == nil && di != nil { if err == nil && di != nil {
b, err := c.sandbox.devManager.NewDevice(*di) b, err := c.sandbox.devManager.NewDevice(*di)
if err != nil { if err != nil {
@ -801,6 +769,67 @@ func newContainer(ctx context.Context, sandbox *Sandbox, contConfig *ContainerCo
return c, nil return c, nil
} }
// Create Device Information about the block device
func (c *Container) createDeviceInfo(source, destination string, readonly, isBlockFile bool) (*config.DeviceInfo, error) {
var stat unix.Stat_t
if err := unix.Stat(source, &stat); err != nil {
return nil, fmt.Errorf("stat %q failed: %v", source, err)
}
var di *config.DeviceInfo
var err error
if stat.Mode&unix.S_IFMT == unix.S_IFBLK {
di = &config.DeviceInfo{
HostPath: source,
ContainerPath: destination,
DevType: "b",
Major: int64(unix.Major(uint64(stat.Rdev))),
Minor: int64(unix.Minor(uint64(stat.Rdev))),
ReadOnly: readonly,
}
} else if isBlockFile && stat.Mode&unix.S_IFMT == unix.S_IFREG {
di = &config.DeviceInfo{
HostPath: source,
ContainerPath: destination,
DevType: "b",
Major: -1,
Minor: 0,
ReadOnly: readonly,
}
// Check whether source can be used as a pmem device
} else if di, err = config.PmemDeviceInfo(source, destination); err != nil {
c.Logger().WithError(err).
WithField("mount-source", source).
Debug("no loop device")
}
return di, err
}
// call hypervisor to create device about KataVirtualVolume.
func (c *Container) createVirtualVolumeDevices() ([]config.DeviceInfo, error) {
var deviceInfos []config.DeviceInfo
for _, o := range c.rootFs.Options {
if strings.HasPrefix(o, VirtualVolumePrefix) {
virtVolume, err := types.ParseKataVirtualVolume(strings.TrimPrefix(o, VirtualVolumePrefix))
if err != nil {
return nil, err
}
if virtVolume.VolumeType == types.KataVirtualVolumeImageRawBlockType || virtVolume.VolumeType == types.KataVirtualVolumeLayerRawBlockType {
di, err := c.createDeviceInfo(virtVolume.Source, virtVolume.Source, true, true)
if err != nil {
return nil, err
}
deviceInfos = append(deviceInfos, *di)
} else if virtVolume.VolumeType == types.KataVirtualVolumeImageGuestPullType {
///TODO implement the logic with pulling image in the guest.
continue
}
}
}
return deviceInfos, nil
}
func (c *Container) createMounts(ctx context.Context) error { func (c *Container) createMounts(ctx context.Context) error {
// Create block devices for newly created container // Create block devices for newly created container
return c.createBlockDevices(ctx) return c.createBlockDevices(ctx)
@ -810,7 +839,13 @@ func (c *Container) createDevices(contConfig *ContainerConfig) error {
// If devices were not found in storage, create Device implementations // If devices were not found in storage, create Device implementations
// from the configuration. This should happen at create. // from the configuration. This should happen at create.
var storedDevices []ContainerDevice var storedDevices []ContainerDevice
for _, info := range contConfig.DeviceInfos { virtualVolumesDeviceInfos, err := c.createVirtualVolumeDevices()
if err != nil {
return err
}
deviceInfos := append(virtualVolumesDeviceInfos, contConfig.DeviceInfos...)
for _, info := range deviceInfos {
dev, err := c.sandbox.devManager.NewDevice(info) dev, err := c.sandbox.devManager.NewDevice(info)
if err != nil { if err != nil {
return err return err

View File

@ -21,8 +21,9 @@ var fsShareTracingTags = map[string]string{
// SharedFile represents the outcome of a host filesystem sharing // SharedFile represents the outcome of a host filesystem sharing
// operation. // operation.
type SharedFile struct { type SharedFile struct {
storage *grpc.Storage containerStorages []*grpc.Storage
guestPath string volumeStorages []*grpc.Storage
guestPath string
} }
type FilesystemSharer interface { type FilesystemSharer interface {

View File

@ -27,6 +27,7 @@ import (
"github.com/kata-containers/kata-containers/src/runtime/pkg/katautils/katatrace" "github.com/kata-containers/kata-containers/src/runtime/pkg/katautils/katatrace"
"github.com/kata-containers/kata-containers/src/runtime/virtcontainers/pkg/agent/protocols/grpc" "github.com/kata-containers/kata-containers/src/runtime/virtcontainers/pkg/agent/protocols/grpc"
"github.com/kata-containers/kata-containers/src/runtime/virtcontainers/pkg/annotations" "github.com/kata-containers/kata-containers/src/runtime/virtcontainers/pkg/annotations"
"github.com/kata-containers/kata-containers/src/runtime/virtcontainers/types"
"github.com/kata-containers/kata-containers/src/runtime/virtcontainers/utils" "github.com/kata-containers/kata-containers/src/runtime/virtcontainers/utils"
) )
@ -438,8 +439,79 @@ func (f *FilesystemShare) shareRootFilesystemWithNydus(ctx context.Context, c *C
f.Logger().Infof("Nydus rootfs info: %#v\n", rootfs) f.Logger().Infof("Nydus rootfs info: %#v\n", rootfs)
return &SharedFile{ return &SharedFile{
storage: rootfs, containerStorages: []*grpc.Storage{rootfs},
guestPath: rootfsGuestPath, guestPath: rootfsGuestPath,
}, nil
}
// handleVirtualVolume processes each `extraoption` in rootFs.Options,
// creating storage, and then aggregates all storages into an array.
func handleVirtualVolume(c *Container) ([]*grpc.Storage, string, error) {
var volumes []*grpc.Storage
var volumeType string
for _, o := range c.rootFs.Options {
if strings.HasPrefix(o, VirtualVolumePrefix) {
virtVolume, err := types.ParseKataVirtualVolume(strings.TrimPrefix(o, VirtualVolumePrefix))
if err != nil {
return nil, "", err
}
volumeType = virtVolume.VolumeType
var vol *grpc.Storage
if virtVolume.VolumeType == types.KataVirtualVolumeImageRawBlockType || virtVolume.VolumeType == types.KataVirtualVolumeLayerRawBlockType {
for i, d := range c.devices {
if d.ContainerPath == virtVolume.Source {
vol, err = handleVirtualVolumeStorageObject(c, d.ID, virtVolume)
if err != nil {
return nil, "", err
}
c.devices[i].ContainerPath = vol.MountPoint
vol.Fstype = virtVolume.FSType
vol.Options = append(vol.Options, virtVolume.Options...)
break
}
}
}
if vol != nil {
volumes = append(volumes, vol)
}
}
}
return volumes, volumeType, nil
}
func (f *FilesystemShare) shareRootFilesystemWithVirtualVolume(ctx context.Context, c *Container) (*SharedFile, error) {
kataGuestDir := filepath.Join(defaultKataGuestVirtualVolumedir, "containers")
guestPath := filepath.Join("/run/kata-containers/", c.id, c.rootfsSuffix)
rootFsStorages, volumeType, err := handleVirtualVolume(c)
if err != nil {
return nil, err
}
if volumeType == types.KataVirtualVolumeImageRawBlockType || volumeType == types.KataVirtualVolumeLayerRawBlockType {
rootfs := &grpc.Storage{}
rootfs.MountPoint = guestPath
overlayDirDriverOption := "io.katacontainers.volume.overlayfs.create_directory"
rootfs.Source = typeOverlayFS
rootfs.Fstype = typeOverlayFS
rootfs.Driver = kataOverlayDevType
for _, v := range rootFsStorages {
rootfs.Options = append(rootfs.Options, fmt.Sprintf("%s=%s", lowerDir, v.MountPoint))
}
rootfsUpperDir := filepath.Join(kataGuestDir, c.id, "fs")
rootfsWorkDir := filepath.Join(kataGuestDir, c.id, "work")
rootfs.DriverOptions = append(rootfs.DriverOptions, fmt.Sprintf("%s=%s", overlayDirDriverOption, rootfsUpperDir))
rootfs.DriverOptions = append(rootfs.DriverOptions, fmt.Sprintf("%s=%s", overlayDirDriverOption, rootfsWorkDir))
rootfs.Options = append(rootfs.Options, fmt.Sprintf("%s=%s", upperDir, rootfsUpperDir))
rootfs.Options = append(rootfs.Options, fmt.Sprintf("%s=%s", workDir, rootfsWorkDir))
rootFsStorages = append(rootFsStorages, rootfs)
f.Logger().Infof("verity rootfs info: %#v\n", rootfs)
}
return &SharedFile{
containerStorages: rootFsStorages,
guestPath: guestPath,
}, nil }, nil
} }
@ -451,11 +523,15 @@ func (f *FilesystemShare) ShareRootFilesystem(ctx context.Context, c *Container)
// so there is no Rootfs.Target. // so there is no Rootfs.Target.
if f.sandbox.config.ServiceOffload && c.rootFs.Target == "" { if f.sandbox.config.ServiceOffload && c.rootFs.Target == "" {
return &SharedFile{ return &SharedFile{
storage: nil, containerStorages: nil,
guestPath: rootfsGuestPath, guestPath: rootfsGuestPath,
}, nil }, nil
} }
if HasOptionPrefix(c.rootFs.Options, VirtualVolumePrefix) {
return f.shareRootFilesystemWithVirtualVolume(ctx, c)
}
if c.rootFs.Type == NydusRootFSType { if c.rootFs.Type == NydusRootFSType {
return f.shareRootFilesystemWithNydus(ctx, c) return f.shareRootFilesystemWithNydus(ctx, c)
} }
@ -463,13 +539,13 @@ func (f *FilesystemShare) ShareRootFilesystem(ctx context.Context, c *Container)
if HasOptionPrefix(c.rootFs.Options, annotations.FileSystemLayer) { if HasOptionPrefix(c.rootFs.Options, annotations.FileSystemLayer) {
path := filepath.Join("/run/kata-containers", c.id, "rootfs") path := filepath.Join("/run/kata-containers", c.id, "rootfs")
return &SharedFile{ return &SharedFile{
storage: &grpc.Storage{ containerStorages: []*grpc.Storage{{
MountPoint: path, MountPoint: path,
Source: "none", Source: "none",
Fstype: c.rootFs.Type, Fstype: c.rootFs.Type,
Driver: kataOverlayDevType, Driver: kataOverlayDevType,
Options: c.rootFs.Options, Options: c.rootFs.Options,
}, }},
guestPath: path, guestPath: path,
}, nil }, nil
} }
@ -534,8 +610,8 @@ func (f *FilesystemShare) ShareRootFilesystem(ctx context.Context, c *Container)
} }
return &SharedFile{ return &SharedFile{
storage: rootfsStorage, containerStorages: []*grpc.Storage{rootfsStorage},
guestPath: rootfsGuestPath, guestPath: rootfsGuestPath,
}, nil }, nil
} }
@ -549,8 +625,8 @@ func (f *FilesystemShare) ShareRootFilesystem(ctx context.Context, c *Container)
} }
return &SharedFile{ return &SharedFile{
storage: nil, containerStorages: nil,
guestPath: rootfsGuestPath, guestPath: rootfsGuestPath,
}, nil }, nil
} }

View File

@ -69,6 +69,8 @@ const (
NydusRootFSType = "fuse.nydus-overlayfs" NydusRootFSType = "fuse.nydus-overlayfs"
VirtualVolumePrefix = "io.katacontainers.volume="
// enable debug console // enable debug console
kernelParamDebugConsole = "agent.debug_console" kernelParamDebugConsole = "agent.debug_console"
kernelParamDebugConsoleVPort = "agent.debug_console_vport" kernelParamDebugConsoleVPort = "agent.debug_console_vport"
@ -81,42 +83,44 @@ const (
type customRequestTimeoutKeyType struct{} type customRequestTimeoutKeyType struct{}
var ( var (
checkRequestTimeout = 30 * time.Second checkRequestTimeout = 30 * time.Second
defaultRequestTimeout = 60 * time.Second defaultRequestTimeout = 60 * time.Second
imageRequestTimeout = 60 * time.Second imageRequestTimeout = 60 * time.Second
remoteRequestTimeout = 300 * time.Second remoteRequestTimeout = 300 * time.Second
customRequestTimeoutKey = customRequestTimeoutKeyType(struct{}{}) customRequestTimeoutKey = customRequestTimeoutKeyType(struct{}{})
errorMissingOCISpec = errors.New("Missing OCI specification") errorMissingOCISpec = errors.New("Missing OCI specification")
defaultKataHostSharedDir = "/run/kata-containers/shared/sandboxes/" defaultKataGuestVirtualVolumedir = "/run/kata-containers/virtual-volumes/"
defaultKataGuestSharedDir = "/run/kata-containers/shared/containers/" defaultKataHostSharedDir = "/run/kata-containers/shared/sandboxes/"
defaultKataGuestNydusRootDir = "/run/kata-containers/shared/" defaultKataGuestSharedDir = "/run/kata-containers/shared/containers/"
mountGuestTag = "kataShared" defaultKataGuestNydusRootDir = "/run/kata-containers/shared/"
defaultKataGuestSandboxDir = "/run/kata-containers/sandbox/" mountGuestTag = "kataShared"
type9pFs = "9p" defaultKataGuestSandboxDir = "/run/kata-containers/sandbox/"
typeVirtioFS = "virtiofs" type9pFs = "9p"
typeOverlayFS = "overlay" typeVirtioFS = "virtiofs"
kata9pDevType = "9p" typeOverlayFS = "overlay"
kataMmioBlkDevType = "mmioblk" kata9pDevType = "9p"
kataBlkDevType = "blk" kataMmioBlkDevType = "mmioblk"
kataBlkCCWDevType = "blk-ccw" kataBlkDevType = "blk"
kataSCSIDevType = "scsi" kataBlkCCWDevType = "blk-ccw"
kataNvdimmDevType = "nvdimm" kataSCSIDevType = "scsi"
kataVirtioFSDevType = "virtio-fs" kataNvdimmDevType = "nvdimm"
kataOverlayDevType = "overlayfs" kataVirtioFSDevType = "virtio-fs"
kataWatchableBindDevType = "watchable-bind" kataOverlayDevType = "overlayfs"
kataVfioPciDevType = "vfio-pci" // VFIO device to used as VFIO in the container kataWatchableBindDevType = "watchable-bind"
kataVfioPciGuestKernelDevType = "vfio-pci-gk" // VFIO device for consumption by the guest kernel kataVfioPciDevType = "vfio-pci" // VFIO device to used as VFIO in the container
kataVfioApDevType = "vfio-ap" kataVfioPciGuestKernelDevType = "vfio-pci-gk" // VFIO device for consumption by the guest kernel
sharedDir9pOptions = []string{"trans=virtio,version=9p2000.L,cache=mmap", "nodev"} kataVfioApDevType = "vfio-ap"
sharedDirVirtioFSOptions = []string{} kataDmVerityBlkDevType = "dmverity"
sharedDirVirtioFSDaxOptions = "dax" sharedDir9pOptions = []string{"trans=virtio,version=9p2000.L,cache=mmap", "nodev"}
shmDir = "shm" sharedDirVirtioFSOptions = []string{}
kataEphemeralDevType = "ephemeral" sharedDirVirtioFSDaxOptions = "dax"
defaultEphemeralPath = filepath.Join(defaultKataGuestSandboxDir, kataEphemeralDevType) shmDir = "shm"
grpcMaxDataSize = int64(1024 * 1024) kataEphemeralDevType = "ephemeral"
localDirOptions = []string{"mode=0777"} defaultEphemeralPath = filepath.Join(defaultKataGuestSandboxDir, kataEphemeralDevType)
maxHostnameLen = 64 grpcMaxDataSize = int64(1024 * 1024)
GuestDNSFile = "/etc/resolv.conf" localDirOptions = []string{"mode=0777"}
maxHostnameLen = 64
GuestDNSFile = "/etc/resolv.conf"
) )
const ( const (
@ -1198,6 +1202,10 @@ func (k *kataAgent) appendDevices(deviceList []*grpc.Device, c *Container) []*gr
return nil return nil
} }
if strings.HasPrefix(dev.ContainerPath, defaultKataGuestVirtualVolumedir) {
continue
}
switch device.DeviceType() { switch device.DeviceType() {
case config.DeviceBlock: case config.DeviceBlock:
kataDevice = k.appendBlockDevice(dev, device, c) kataDevice = k.appendBlockDevice(dev, device, c)
@ -1256,12 +1264,17 @@ func (k *kataAgent) createContainer(ctx context.Context, sandbox *Sandbox, c *Co
return nil, err return nil, err
} }
if sharedRootfs.storage != nil { if sharedRootfs.containerStorages != nil {
// Add rootfs to the list of container storage. // Add rootfs to the list of container storage.
// We only need to do this for block based rootfs, as we ctrStorages = append(ctrStorages, sharedRootfs.containerStorages...)
}
if sharedRootfs.volumeStorages != nil {
// Add volumeStorages to the list of container storage.
// We only need to do this for KataVirtualVolume based rootfs, as we
// want the agent to mount it into the right location // want the agent to mount it into the right location
// (kataGuestSharedDir/ctrID/
ctrStorages = append(ctrStorages, sharedRootfs.storage) ctrStorages = append(ctrStorages, sharedRootfs.volumeStorages...)
} }
ociSpec := c.GetPatchedOCISpec() ociSpec := c.GetPatchedOCISpec()
@ -1537,14 +1550,36 @@ func (k *kataAgent) handleLocalStorage(mounts []specs.Mount, sandboxID string, r
return localStorages, nil return localStorages, nil
} }
// handleDeviceBlockVolume handles volume that is block device file // Add the source block type to DriverOptions in the volume with dm-verity
// and DeviceBlock type. func handleDmVerityBlockVolume(driverType, source string, verityInfo *types.DmVerityInfo, vol *grpc.Storage) (*grpc.Storage, error) {
func (k *kataAgent) handleDeviceBlockVolume(c *Container, m Mount, device api.Device) (*grpc.Storage, error) { no, err := json.Marshal(verityInfo)
if err != nil {
return nil, err
}
vol.Driver = kataDmVerityBlkDevType
vol.DriverOptions = append(vol.DriverOptions, "verity_info="+string(no))
switch driverType {
case kataNvdimmDevType:
vol.DriverOptions = append(vol.DriverOptions, "source_type=pmem")
case kataBlkCCWDevType:
vol.DriverOptions = append(vol.DriverOptions, "source_type=virtio_ccw")
case kataBlkDevType:
vol.DriverOptions = append(vol.DriverOptions, "source_type=virtio_pci")
case kataMmioBlkDevType:
vol.DriverOptions = append(vol.DriverOptions, "source_type=virtio_mmio")
case kataSCSIDevType:
vol.DriverOptions = append(vol.DriverOptions, "source_type=scsi")
}
vol.Options = []string{"ro"}
vol.MountPoint = filepath.Join(defaultKataGuestVirtualVolumedir, "verity", verityInfo.Hash)
return vol, nil
}
func handleBlockVolume(c *Container, device api.Device) (*grpc.Storage, error) {
vol := &grpc.Storage{} vol := &grpc.Storage{}
blockDrive, ok := device.GetDeviceInfo().(*config.BlockDrive) blockDrive, ok := device.GetDeviceInfo().(*config.BlockDrive)
if !ok || blockDrive == nil { if !ok || blockDrive == nil {
k.Logger().Error("malformed block drive")
return nil, fmt.Errorf("malformed block drive") return nil, fmt.Errorf("malformed block drive")
} }
switch { switch {
@ -1569,6 +1604,47 @@ func (k *kataAgent) handleDeviceBlockVolume(c *Container, m Mount, device api.De
default: default:
return nil, fmt.Errorf("Unknown block device driver: %s", c.sandbox.config.HypervisorConfig.BlockDeviceDriver) return nil, fmt.Errorf("Unknown block device driver: %s", c.sandbox.config.HypervisorConfig.BlockDeviceDriver)
} }
return vol, nil
}
// handleVirtualVolumeStorageObject handles KataVirtualVolume that is block device file.
func handleVirtualVolumeStorageObject(c *Container, blockDeviceId string, virtVolume *types.KataVirtualVolume) (*grpc.Storage, error) {
var vol *grpc.Storage
if virtVolume.VolumeType == types.KataVirtualVolumeImageRawBlockType || virtVolume.VolumeType == types.KataVirtualVolumeLayerRawBlockType {
device := c.sandbox.devManager.GetDeviceByID(blockDeviceId)
if device == nil {
return nil, fmt.Errorf("Failed to find device by id (id=%s) in handleVirtualVolumeStorageObject", blockDeviceId)
}
var err error
vol, err = handleBlockVolume(c, device)
if err != nil {
return nil, err
}
filename := b64.URLEncoding.EncodeToString([]byte(vol.Source))
vol.MountPoint = filepath.Join(defaultKataGuestVirtualVolumedir, filename)
//convert block storage to dmverity storage if dm-verity info is available
if virtVolume.DmVerity != nil {
vol, err = handleDmVerityBlockVolume(vol.Driver, virtVolume.Source, virtVolume.DmVerity, vol)
if err != nil {
return nil, err
}
}
} else if virtVolume.VolumeType == types.KataVirtualVolumeImageGuestPullType {
///TODO implement the logic with pulling image in the guest.
return nil, nil
}
return vol, nil
}
// handleDeviceBlockVolume handles volume that is block device file
// and DeviceBlock type.
func (k *kataAgent) handleDeviceBlockVolume(c *Container, m Mount, device api.Device) (*grpc.Storage, error) {
vol, err := handleBlockVolume(c, device)
if err != nil {
return nil, err
}
vol.MountPoint = m.Destination vol.MountPoint = m.Destination

View File

@ -0,0 +1,156 @@
package types
import (
"encoding/base64"
"encoding/hex"
"encoding/json"
"fmt"
"strings"
"github.com/pkg/errors"
)
const (
minBlockSize = 1 << 9
maxBlockSize = 1 << 19
)
const (
KataVirtualVolumeDirectBlockType = "direct_block"
KataVirtualVolumeImageRawBlockType = "image_raw_block"
KataVirtualVolumeLayerRawBlockType = "layer_raw_block"
KataVirtualVolumeImageNydusBlockType = "image_nydus_block"
KataVirtualVolumeLayerNydusBlockType = "layer_nydus_block"
KataVirtualVolumeImageNydusFsType = "image_nydus_fs"
KataVirtualVolumeLayerNydusFsType = "layer_nydus_fs"
KataVirtualVolumeImageGuestPullType = "image_guest_pull"
)
// DmVerityInfo contains configuration information for DmVerity device.
type DmVerityInfo struct {
HashType string `json:"hashtype"`
Hash string `json:"hash"`
BlockNum uint64 `json:"blocknum"`
Blocksize uint64 `json:"blocksize"`
Hashsize uint64 `json:"hashsize"`
Offset uint64 `json:"offset"`
}
// DirectAssignedVolume contains meta information for a directly assigned volume.
type DirectAssignedVolume struct {
Metadata map[string]string `json:"metadata"`
}
// ImagePullVolume contains meta information for pulling an image inside the guest.
type ImagePullVolume struct {
Metadata map[string]string `json:"metadata"`
}
// NydusImageVolume contains Nydus image volume information.
type NydusImageVolume struct {
Config string `json:"config"`
SnapshotDir string `json:"snapshot_dir"`
}
// KataVirtualVolume encapsulates information for extra mount options and direct volumes.
type KataVirtualVolume struct {
VolumeType string `json:"volume_type"`
Source string `json:"source,omitempty"`
FSType string `json:"fs_type,omitempty"`
Options []string `json:"options,omitempty"`
DirectVolume *DirectAssignedVolume `json:"direct_volume,omitempty"`
ImagePull *ImagePullVolume `json:"image_pull,omitempty"`
NydusImage *NydusImageVolume `json:"nydus_image,omitempty"`
DmVerity *DmVerityInfo `json:"dm_verity,omitempty"`
}
func (d *DmVerityInfo) IsValid() error {
err := d.validateHashType()
if err != nil {
return err
}
if d.BlockNum == 0 || d.BlockNum > uint64(^uint32(0)) {
return fmt.Errorf("Zero block count for DmVerity device %s", d.Hash)
}
if !isValidBlockSize(d.Blocksize) || !isValidBlockSize(d.Hashsize) {
return fmt.Errorf("Unsupported verity block size: data_block_size = %d, hash_block_size = %d", d.Blocksize, d.Hashsize)
}
if d.Offset%d.Hashsize != 0 || d.Offset < d.Blocksize*d.BlockNum {
return fmt.Errorf("Invalid hashvalue offset %d for DmVerity device %s", d.Offset, d.Hash)
}
return nil
}
func (d *DirectAssignedVolume) IsValid() bool {
return d.Metadata != nil
}
func (i *ImagePullVolume) IsValid() bool {
return i.Metadata != nil
}
func (n *NydusImageVolume) IsValid() bool {
return len(n.Config) > 0 || len(n.SnapshotDir) > 0
}
func (k *KataVirtualVolume) IsValid() bool {
return len(k.VolumeType) > 0 &&
(k.DirectVolume == nil || k.DirectVolume.IsValid()) &&
(k.ImagePull == nil || k.ImagePull.IsValid()) &&
(k.NydusImage == nil || k.NydusImage.IsValid()) &&
(k.DmVerity == nil || k.DmVerity.IsValid() == nil)
}
func (d *DmVerityInfo) validateHashType() error {
switch strings.ToLower(d.HashType) {
case "sha256":
return d.isValidHash(64, "sha256")
case "sha1":
return d.isValidHash(40, "sha1")
default:
return fmt.Errorf("Unsupported hash algorithm %s for DmVerity device %s", d.HashType, d.Hash)
}
}
func isValidBlockSize(blockSize uint64) bool {
return minBlockSize <= blockSize && blockSize <= maxBlockSize
}
func (d *DmVerityInfo) isValidHash(expectedLen int, hashType string) error {
_, err := hex.DecodeString(d.Hash)
if len(d.Hash) != expectedLen || err != nil {
return fmt.Errorf("Invalid hash value %s:%s for DmVerity device with %s", hashType, d.Hash, hashType)
}
return nil
}
func ParseDmVerityInfo(option string) (*DmVerityInfo, error) {
no := &DmVerityInfo{}
if err := json.Unmarshal([]byte(option), no); err != nil {
return nil, errors.Wrapf(err, "DmVerityInfo json unmarshal err")
}
if err := no.IsValid(); err != nil {
return nil, fmt.Errorf("DmVerityInfo is not correct, %+v; error = %+v", no, err)
}
return no, nil
}
func ParseKataVirtualVolume(option string) (*KataVirtualVolume, error) {
opt, err := base64.StdEncoding.DecodeString(option)
if err != nil {
return nil, errors.Wrap(err, "KataVirtualVolume base64 decoding err")
}
no := &KataVirtualVolume{}
if err := json.Unmarshal(opt, no); err != nil {
return nil, errors.Wrapf(err, "KataVirtualVolume json unmarshal err")
}
if !no.IsValid() {
return nil, fmt.Errorf("KataVirtualVolume is not correct, %+v", no)
}
return no, nil
}

View File

@ -0,0 +1,246 @@
package types
import (
"encoding/base64"
"encoding/json"
"testing"
"github.com/stretchr/testify/assert"
)
func TestDmVerityInfoValidation(t *testing.T) {
TestData := []DmVerityInfo{
{
HashType: "md5", // "md5" is not a supported hash algorithm
Blocksize: 512,
Hashsize: 512,
BlockNum: 16384,
Offset: 8388608,
Hash: "9de18652fe74edfb9b805aaed72ae2aa48f94333f1ba5c452ac33b1c39325174",
},
{
HashType: "sha256",
Blocksize: 3000, // Invalid block size, not a power of 2.
Hashsize: 512,
BlockNum: 16384,
Offset: 8388608,
Hash: "9de18652fe74edfb9b805aaed72ae2aa48f94333f1ba5c452ac33b1c39325174",
},
{
HashType: "sha256",
Blocksize: 0, // Invalid block size, less than 512.
Hashsize: 512,
BlockNum: 16384,
Offset: 8388608,
Hash: "9de18652fe74edfb9b805aaed72ae2aa48f94333f1ba5c452ac33b1c39325174",
},
{
HashType: "sha256",
Blocksize: 524800, // Invalid block size, greater than 524288.
Hashsize: 512,
BlockNum: 16384,
Offset: 8388608,
Hash: "9de18652fe74edfb9b805aaed72ae2aa48f94333f1ba5c452ac33b1c39325174",
},
{
HashType: "sha256",
Blocksize: 512,
Hashsize: 3000, // Invalid hash block size, not a power of 2.
BlockNum: 16384,
Offset: 8388608,
Hash: "9de18652fe74edfb9b805aaed72ae2aa48f94333f1ba5c452ac33b1c39325174",
},
{
HashType: "sha256",
Blocksize: 512,
Hashsize: 0, // Invalid hash block size, less than 512.
BlockNum: 16384,
Offset: 8388608,
Hash: "9de18652fe74edfb9b805aaed72ae2aa48f94333f1ba5c452ac33b1c39325174",
},
{
HashType: "sha256",
Blocksize: 512,
Hashsize: 524800, // Invalid hash block size, greater than 524288.
BlockNum: 16384,
Offset: 8388608,
Hash: "9de18652fe74edfb9b805aaed72ae2aa48f94333f1ba5c452ac33b1c39325174",
},
{
HashType: "sha256",
Blocksize: 512,
Hashsize: 512,
BlockNum: 0, // Invalid BlockNum, it must be greater than 0.
Offset: 8388608,
Hash: "9de18652fe74edfb9b805aaed72ae2aa48f94333f1ba5c452ac33b1c39325174",
},
{
HashType: "sha256",
Blocksize: 512,
Hashsize: 512,
BlockNum: 16384,
Offset: 0, // Invalid offset, it must be greater than 0.
Hash: "9de18652fe74edfb9b805aaed72ae2aa48f94333f1ba5c452ac33b1c39325174",
},
{
HashType: "sha256",
Blocksize: 512,
Hashsize: 512,
BlockNum: 16384,
Offset: 8193, // Invalid offset, it must be aligned to 512.
Hash: "9de18652fe74edfb9b805aaed72ae2aa48f94333f1ba5c452ac33b1c39325174",
},
{
HashType: "sha256",
Blocksize: 512,
Hashsize: 512,
BlockNum: 16384,
Offset: 8388608 - 4096, // Invalid offset, it must be equal to blocksize * BlockNum.
Hash: "9de18652fe74edfb9b805aaed72ae2aa48f94333f1ba5c452ac33b1c39325174",
},
}
for _, d := range TestData {
assert.Error(t, d.IsValid())
}
TestCorrectData := DmVerityInfo{
HashType: "sha256",
Blocksize: 512,
Hashsize: 512,
BlockNum: 16384,
Offset: 8388608,
Hash: "9de18652fe74edfb9b805aaed72ae2aa48f94333f1ba5c452ac33b1c39325174",
}
assert.NoError(t, TestCorrectData.IsValid())
}
func TestDirectAssignedVolumeValidation(t *testing.T) {
validDirectVolume := DirectAssignedVolume{
Metadata: map[string]string{"key": "value"},
}
assert.True(t, validDirectVolume.IsValid())
invalidDirectVolume := DirectAssignedVolume{
Metadata: nil,
}
assert.False(t, invalidDirectVolume.IsValid())
}
func TestImagePullVolumeValidation(t *testing.T) {
validImagePull := ImagePullVolume{
Metadata: map[string]string{"key": "value"},
}
assert.True(t, validImagePull.IsValid())
invalidImagePull := ImagePullVolume{
Metadata: nil,
}
assert.False(t, invalidImagePull.IsValid())
}
func TestNydusImageVolumeValidation(t *testing.T) {
validNydusImage := NydusImageVolume{
Config: "config_value",
SnapshotDir: "",
}
assert.True(t, validNydusImage.IsValid())
invalidNydusImage := NydusImageVolume{
Config: "",
SnapshotDir: "",
}
assert.False(t, invalidNydusImage.IsValid())
}
func TestKataVirtualVolumeValidation(t *testing.T) {
validKataVirtualVolume := KataVirtualVolume{
VolumeType: "direct_block",
Source: "/dev/sdb",
FSType: "ext4",
Options: []string{"rw"},
DirectVolume: &DirectAssignedVolume{
Metadata: map[string]string{"key": "value"},
},
// Initialize other fields
}
assert.True(t, validKataVirtualVolume.IsValid())
invalidKataVirtualVolume := KataVirtualVolume{
VolumeType: "direct_block",
Source: "/dev/sdb",
FSType: "",
Options: nil,
DirectVolume: &DirectAssignedVolume{
Metadata: nil,
},
// Initialize other fields
}
assert.False(t, invalidKataVirtualVolume.IsValid())
}
func TestParseDmVerityInfo(t *testing.T) {
// Create a mock valid KataVirtualVolume
validDmVerityInfo := DmVerityInfo{
HashType: "sha256",
Blocksize: 512,
Hashsize: 512,
BlockNum: 16384,
Offset: 8388608,
Hash: "9de18652fe74edfb9b805aaed72ae2aa48f94333f1ba5c452ac33b1c39325174",
}
validKataVirtualVolumeJSON, _ := json.Marshal(validDmVerityInfo)
t.Run("Valid Option", func(t *testing.T) {
volume, err := ParseDmVerityInfo(string(validKataVirtualVolumeJSON))
assert.NoError(t, err)
assert.NotNil(t, volume)
assert.NoError(t, volume.IsValid())
})
t.Run("Invalid JSON Option", func(t *testing.T) {
volume, err := ParseDmVerityInfo("invalid_json")
assert.Error(t, err)
assert.Nil(t, volume)
})
}
func TestParseKataVirtualVolume(t *testing.T) {
// Create a mock valid KataVirtualVolume
validKataVirtualVolume := KataVirtualVolume{
VolumeType: "direct_block",
Source: "/dev/sdb",
FSType: "ext4",
Options: []string{"rw"},
DirectVolume: &DirectAssignedVolume{
Metadata: map[string]string{"key": "value"},
},
// Initialize other fields
}
validKataVirtualVolumeJSON, _ := json.Marshal(validKataVirtualVolume)
validOption := base64.StdEncoding.EncodeToString(validKataVirtualVolumeJSON)
t.Run("Valid Option", func(t *testing.T) {
volume, err := ParseKataVirtualVolume(validOption)
assert.NoError(t, err)
assert.NotNil(t, volume)
assert.True(t, volume.IsValid())
})
t.Run("Invalid JSON Option", func(t *testing.T) {
invalidJSONOption := base64.StdEncoding.EncodeToString([]byte("invalid_json"))
volume, err := ParseKataVirtualVolume(invalidJSONOption)
assert.Error(t, err)
assert.Nil(t, volume)
})
invalidBase64Option := "invalid_base64"
t.Run("Invalid Base64 Option", func(t *testing.T) {
volume, err := ParseKataVirtualVolume(invalidBase64Option)
assert.Error(t, err)
assert.Nil(t, volume)
})
}