mirror of
https://github.com/kata-containers/kata-containers.git
synced 2025-04-29 04:04:45 +00:00
agent: refine storage related code a bit
Refine storage related code by: - remove the STORAGE_HANDLER_LIST - define type alias - move code near to its caller Signed-off-by: Jiang Liu <gerry@linux.alibaba.com>
This commit is contained in:
parent
60ca12ccb0
commit
8f49ee33b2
@ -3,8 +3,6 @@
|
|||||||
// SPDX-License-Identifier: Apache-2.0
|
// SPDX-License-Identifier: Apache-2.0
|
||||||
//
|
//
|
||||||
|
|
||||||
#![allow(dead_code)]
|
|
||||||
|
|
||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
use std::fmt::Debug;
|
use std::fmt::Debug;
|
||||||
use std::fs::{self, File, OpenOptions};
|
use std::fs::{self, File, OpenOptions};
|
||||||
@ -133,19 +131,6 @@ lazy_static! {
|
|||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
pub const STORAGE_HANDLER_LIST: &[&str] = &[
|
|
||||||
//DRIVER_BLK_TYPE,
|
|
||||||
//DRIVER_9P_TYPE,
|
|
||||||
//DRIVER_VIRTIOFS_TYPE,
|
|
||||||
//DRIVER_EPHEMERAL_TYPE,
|
|
||||||
//DRIVER_OVERLAYFS_TYPE,
|
|
||||||
//DRIVER_MMIO_BLK_TYPE,
|
|
||||||
//DRIVER_LOCAL_TYPE,
|
|
||||||
//DRIVER_SCSI_TYPE,
|
|
||||||
//DRIVER_NVDIMM_TYPE,
|
|
||||||
//DRIVER_WATCHABLE_BIND_TYPE,
|
|
||||||
];
|
|
||||||
|
|
||||||
#[instrument]
|
#[instrument]
|
||||||
pub fn baremount(
|
pub fn baremount(
|
||||||
source: &Path,
|
source: &Path,
|
||||||
@ -222,7 +207,7 @@ impl StorageHandler for EphemeralHandler {
|
|||||||
// /sys/kernel/mm/hugepages/hugepages-1048576kB/nr_hugepages
|
// /sys/kernel/mm/hugepages/hugepages-1048576kB/nr_hugepages
|
||||||
// /sys/kernel/mm/hugepages/hugepages-2048kB/nr_hugepages
|
// /sys/kernel/mm/hugepages/hugepages-2048kB/nr_hugepages
|
||||||
// options eg "pagesize=2097152,size=524288000"(2M, 500M)
|
// options eg "pagesize=2097152,size=524288000"(2M, 500M)
|
||||||
allocate_hugepages(ctx.logger, &storage.options.to_vec())
|
Self::allocate_hugepages(ctx.logger, &storage.options.to_vec())
|
||||||
.context("allocate hugepages")?;
|
.context("allocate hugepages")?;
|
||||||
common_storage_handler(ctx.logger, &storage)?;
|
common_storage_handler(ctx.logger, &storage)?;
|
||||||
} else if !storage.options.is_empty() {
|
} else if !storage.options.is_empty() {
|
||||||
@ -254,6 +239,96 @@ impl StorageHandler for EphemeralHandler {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl EphemeralHandler {
|
||||||
|
// Allocate hugepages by writing to sysfs
|
||||||
|
fn allocate_hugepages(logger: &Logger, options: &[String]) -> Result<()> {
|
||||||
|
info!(logger, "mounting hugePages storage options: {:?}", options);
|
||||||
|
|
||||||
|
let (pagesize, size) = Self::get_pagesize_and_size_from_option(options)
|
||||||
|
.context(format!("parse mount options: {:?}", &options))?;
|
||||||
|
|
||||||
|
info!(
|
||||||
|
logger,
|
||||||
|
"allocate hugepages. pageSize: {}, size: {}", pagesize, size
|
||||||
|
);
|
||||||
|
|
||||||
|
// sysfs entry is always of the form hugepages-${pagesize}kB
|
||||||
|
// Ref: https://www.kernel.org/doc/Documentation/vm/hugetlbpage.txt
|
||||||
|
let path = Path::new(SYS_FS_HUGEPAGES_PREFIX)
|
||||||
|
.join(format!("hugepages-{}kB", pagesize / 1024))
|
||||||
|
.join("nr_hugepages");
|
||||||
|
|
||||||
|
// write numpages to nr_hugepages file.
|
||||||
|
let numpages = format!("{}", size / pagesize);
|
||||||
|
info!(logger, "write {} pages to {:?}", &numpages, &path);
|
||||||
|
|
||||||
|
let mut file = OpenOptions::new()
|
||||||
|
.write(true)
|
||||||
|
.open(&path)
|
||||||
|
.context(format!("open nr_hugepages directory {:?}", &path))?;
|
||||||
|
|
||||||
|
file.write_all(numpages.as_bytes())
|
||||||
|
.context(format!("write nr_hugepages failed: {:?}", &path))?;
|
||||||
|
|
||||||
|
// Even if the write succeeds, the kernel isn't guaranteed to be
|
||||||
|
// able to allocate all the pages we requested. Verify that it
|
||||||
|
// did.
|
||||||
|
let verify = fs::read_to_string(&path).context(format!("reading {:?}", &path))?;
|
||||||
|
let allocated = verify
|
||||||
|
.trim_end()
|
||||||
|
.parse::<u64>()
|
||||||
|
.map_err(|_| anyhow!("Unexpected text {:?} in {:?}", &verify, &path))?;
|
||||||
|
if allocated != size / pagesize {
|
||||||
|
return Err(anyhow!(
|
||||||
|
"Only allocated {} of {} hugepages of size {}",
|
||||||
|
allocated,
|
||||||
|
numpages,
|
||||||
|
pagesize
|
||||||
|
));
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
// Parse filesystem options string to retrieve hugepage details
|
||||||
|
// options eg "pagesize=2048,size=107374182"
|
||||||
|
fn get_pagesize_and_size_from_option(options: &[String]) -> Result<(u64, u64)> {
|
||||||
|
let mut pagesize_str: Option<&str> = None;
|
||||||
|
let mut size_str: Option<&str> = None;
|
||||||
|
|
||||||
|
for option in options {
|
||||||
|
let vars: Vec<&str> = option.trim().split(',').collect();
|
||||||
|
|
||||||
|
for var in vars {
|
||||||
|
if let Some(stripped) = var.strip_prefix("pagesize=") {
|
||||||
|
pagesize_str = Some(stripped);
|
||||||
|
} else if let Some(stripped) = var.strip_prefix("size=") {
|
||||||
|
size_str = Some(stripped);
|
||||||
|
}
|
||||||
|
|
||||||
|
if pagesize_str.is_some() && size_str.is_some() {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if pagesize_str.is_none() || size_str.is_none() {
|
||||||
|
return Err(anyhow!("no pagesize/size options found"));
|
||||||
|
}
|
||||||
|
|
||||||
|
let pagesize = pagesize_str
|
||||||
|
.unwrap()
|
||||||
|
.parse::<u64>()
|
||||||
|
.context(format!("parse pagesize: {:?}", &pagesize_str))?;
|
||||||
|
let size = size_str
|
||||||
|
.unwrap()
|
||||||
|
.parse::<u64>()
|
||||||
|
.context(format!("parse size: {:?}", &pagesize_str))?;
|
||||||
|
|
||||||
|
Ok((pagesize, size))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// update_ephemeral_mounts takes a list of ephemeral mounts and remounts them
|
// update_ephemeral_mounts takes a list of ephemeral mounts and remounts them
|
||||||
// with mount options passed by the caller
|
// with mount options passed by the caller
|
||||||
#[instrument]
|
#[instrument]
|
||||||
@ -416,94 +491,6 @@ impl StorageHandler for Virtio9pHandler {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Allocate hugepages by writing to sysfs
|
|
||||||
fn allocate_hugepages(logger: &Logger, options: &[String]) -> Result<()> {
|
|
||||||
info!(logger, "mounting hugePages storage options: {:?}", options);
|
|
||||||
|
|
||||||
let (pagesize, size) = get_pagesize_and_size_from_option(options)
|
|
||||||
.context(format!("parse mount options: {:?}", &options))?;
|
|
||||||
|
|
||||||
info!(
|
|
||||||
logger,
|
|
||||||
"allocate hugepages. pageSize: {}, size: {}", pagesize, size
|
|
||||||
);
|
|
||||||
|
|
||||||
// sysfs entry is always of the form hugepages-${pagesize}kB
|
|
||||||
// Ref: https://www.kernel.org/doc/Documentation/vm/hugetlbpage.txt
|
|
||||||
let path = Path::new(SYS_FS_HUGEPAGES_PREFIX)
|
|
||||||
.join(format!("hugepages-{}kB", pagesize / 1024))
|
|
||||||
.join("nr_hugepages");
|
|
||||||
|
|
||||||
// write numpages to nr_hugepages file.
|
|
||||||
let numpages = format!("{}", size / pagesize);
|
|
||||||
info!(logger, "write {} pages to {:?}", &numpages, &path);
|
|
||||||
|
|
||||||
let mut file = OpenOptions::new()
|
|
||||||
.write(true)
|
|
||||||
.open(&path)
|
|
||||||
.context(format!("open nr_hugepages directory {:?}", &path))?;
|
|
||||||
|
|
||||||
file.write_all(numpages.as_bytes())
|
|
||||||
.context(format!("write nr_hugepages failed: {:?}", &path))?;
|
|
||||||
|
|
||||||
// Even if the write succeeds, the kernel isn't guaranteed to be
|
|
||||||
// able to allocate all the pages we requested. Verify that it
|
|
||||||
// did.
|
|
||||||
let verify = fs::read_to_string(&path).context(format!("reading {:?}", &path))?;
|
|
||||||
let allocated = verify
|
|
||||||
.trim_end()
|
|
||||||
.parse::<u64>()
|
|
||||||
.map_err(|_| anyhow!("Unexpected text {:?} in {:?}", &verify, &path))?;
|
|
||||||
if allocated != size / pagesize {
|
|
||||||
return Err(anyhow!(
|
|
||||||
"Only allocated {} of {} hugepages of size {}",
|
|
||||||
allocated,
|
|
||||||
numpages,
|
|
||||||
pagesize
|
|
||||||
));
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
// Parse filesystem options string to retrieve hugepage details
|
|
||||||
// options eg "pagesize=2048,size=107374182"
|
|
||||||
fn get_pagesize_and_size_from_option(options: &[String]) -> Result<(u64, u64)> {
|
|
||||||
let mut pagesize_str: Option<&str> = None;
|
|
||||||
let mut size_str: Option<&str> = None;
|
|
||||||
|
|
||||||
for option in options {
|
|
||||||
let vars: Vec<&str> = option.trim().split(',').collect();
|
|
||||||
|
|
||||||
for var in vars {
|
|
||||||
if let Some(stripped) = var.strip_prefix("pagesize=") {
|
|
||||||
pagesize_str = Some(stripped);
|
|
||||||
} else if let Some(stripped) = var.strip_prefix("size=") {
|
|
||||||
size_str = Some(stripped);
|
|
||||||
}
|
|
||||||
|
|
||||||
if pagesize_str.is_some() && size_str.is_some() {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if pagesize_str.is_none() || size_str.is_none() {
|
|
||||||
return Err(anyhow!("no pagesize/size options found"));
|
|
||||||
}
|
|
||||||
|
|
||||||
let pagesize = pagesize_str
|
|
||||||
.unwrap()
|
|
||||||
.parse::<u64>()
|
|
||||||
.context(format!("parse pagesize: {:?}", &pagesize_str))?;
|
|
||||||
let size = size_str
|
|
||||||
.unwrap()
|
|
||||||
.parse::<u64>()
|
|
||||||
.context(format!("parse size: {:?}", &pagesize_str))?;
|
|
||||||
|
|
||||||
Ok((pagesize, size))
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
struct VirtioFsHandler {}
|
struct VirtioFsHandler {}
|
||||||
|
|
||||||
@ -1918,7 +1905,7 @@ mod tests {
|
|||||||
|
|
||||||
for case in data {
|
for case in data {
|
||||||
let input = case.0;
|
let input = case.0;
|
||||||
let r = get_pagesize_and_size_from_option(&[input.to_string()]);
|
let r = EphemeralHandler::get_pagesize_and_size_from_option(&[input.to_string()]);
|
||||||
|
|
||||||
let is_ok = case.2;
|
let is_ok = case.2;
|
||||||
if is_ok {
|
if is_ok {
|
||||||
|
@ -57,7 +57,7 @@ use crate::device::{
|
|||||||
};
|
};
|
||||||
use crate::linux_abi::*;
|
use crate::linux_abi::*;
|
||||||
use crate::metrics::get_metrics;
|
use crate::metrics::get_metrics;
|
||||||
use crate::mount::{add_storages, baremount, update_ephemeral_mounts, STORAGE_HANDLER_LIST};
|
use crate::mount::{add_storages, baremount, update_ephemeral_mounts, STORAGE_HANDLERS};
|
||||||
use crate::namespace::{NSTYPEIPC, NSTYPEPID, NSTYPEUTS};
|
use crate::namespace::{NSTYPEIPC, NSTYPEPID, NSTYPEUTS};
|
||||||
use crate::network::setup_guest_dns;
|
use crate::network::setup_guest_dns;
|
||||||
use crate::pci;
|
use crate::pci;
|
||||||
@ -1579,7 +1579,7 @@ fn get_agent_details() -> AgentDetails {
|
|||||||
detail.init_daemon = unistd::getpid() == Pid::from_raw(1);
|
detail.init_daemon = unistd::getpid() == Pid::from_raw(1);
|
||||||
|
|
||||||
detail.device_handlers = Vec::new();
|
detail.device_handlers = Vec::new();
|
||||||
detail.storage_handlers = STORAGE_HANDLER_LIST.iter().map(|x| x.to_string()).collect();
|
detail.storage_handlers = STORAGE_HANDLERS.get_handlers();
|
||||||
|
|
||||||
detail
|
detail
|
||||||
}
|
}
|
||||||
|
@ -63,7 +63,6 @@ impl StorageState {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[allow(dead_code)]
|
|
||||||
pub fn from_device(device: StorageDeviceObject) -> Self {
|
pub fn from_device(device: StorageDeviceObject) -> Self {
|
||||||
Self { inner: device }
|
Self { inner: device }
|
||||||
}
|
}
|
||||||
@ -160,7 +159,6 @@ impl Sandbox {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Update the storage device associated with a path.
|
/// Update the storage device associated with a path.
|
||||||
#[allow(dead_code)]
|
|
||||||
pub fn update_sandbox_storage(
|
pub fn update_sandbox_storage(
|
||||||
&mut self,
|
&mut self,
|
||||||
path: &str,
|
path: &str,
|
||||||
|
@ -58,7 +58,8 @@ use crate::fs::is_symlink;
|
|||||||
use crate::sl;
|
use crate::sl;
|
||||||
|
|
||||||
/// Default permission for directories created for mountpoint.
|
/// Default permission for directories created for mountpoint.
|
||||||
const MOUNT_PERM: u32 = 0o755;
|
const MOUNT_DIR_PERM: u32 = 0o755;
|
||||||
|
const MOUNT_FILE_PERM: u32 = 0o644;
|
||||||
|
|
||||||
pub const PROC_MOUNTS_FILE: &str = "/proc/mounts";
|
pub const PROC_MOUNTS_FILE: &str = "/proc/mounts";
|
||||||
const PROC_FIELDS_PER_LINE: usize = 6;
|
const PROC_FIELDS_PER_LINE: usize = 6;
|
||||||
@ -187,13 +188,16 @@ pub fn create_mount_destination<S: AsRef<Path>, D: AsRef<Path>, R: AsRef<Path>>(
|
|||||||
.parent()
|
.parent()
|
||||||
.ok_or_else(|| Error::InvalidPath(dst.to_path_buf()))?;
|
.ok_or_else(|| Error::InvalidPath(dst.to_path_buf()))?;
|
||||||
let mut builder = fs::DirBuilder::new();
|
let mut builder = fs::DirBuilder::new();
|
||||||
builder.mode(MOUNT_PERM).recursive(true).create(parent)?;
|
builder
|
||||||
|
.mode(MOUNT_DIR_PERM)
|
||||||
|
.recursive(true)
|
||||||
|
.create(parent)?;
|
||||||
|
|
||||||
if fs_type == "bind" {
|
if fs_type == "bind" {
|
||||||
// The source and destination for bind mounting must be the same type: file or directory.
|
// The source and destination for bind mounting must be the same type: file or directory.
|
||||||
if !src.as_ref().is_dir() {
|
if !src.as_ref().is_dir() {
|
||||||
fs::OpenOptions::new()
|
fs::OpenOptions::new()
|
||||||
.mode(MOUNT_PERM)
|
.mode(MOUNT_FILE_PERM)
|
||||||
.write(true)
|
.write(true)
|
||||||
.create(true)
|
.create(true)
|
||||||
.open(dst)?;
|
.open(dst)?;
|
||||||
|
@ -524,6 +524,11 @@ impl<H> StorageHandlerManager<H> {
|
|||||||
pub fn handler(&self, id: &str) -> Option<&H> {
|
pub fn handler(&self, id: &str) -> Option<&H> {
|
||||||
self.handlers.get(id)
|
self.handlers.get(id)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Get names of registered handlers.
|
||||||
|
pub fn get_handlers(&self) -> Vec<String> {
|
||||||
|
self.handlers.keys().map(|v| v.to_string()).collect()
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Join user provided volume path with kata direct-volume root path.
|
/// Join user provided volume path with kata direct-volume root path.
|
||||||
|
Loading…
Reference in New Issue
Block a user