agent: avoid creating new Vec instances when easily avoidable

There are many places where the code currently creates new `Vec`
instances when it's not really needed. The result is a perf hit because
it allocates memory, copies all elements, then frees the memory; in some
cases, copying elements also involves extra allocations (e.g., when
elements are strings, or structs containing strings).

This patch addresses a number of these cases.

Fixes: #7203

Signed-off-by: Wedson Almeida Filho <walmeida@microsoft.com>
This commit is contained in:
Wedson Almeida Filho 2023-06-28 01:34:57 -03:00
parent 311671abb5
commit 729b2dd611
3 changed files with 38 additions and 48 deletions

View File

@ -41,6 +41,7 @@ pub const MOUNT_GUEST_TAG: &str = "kataShared";
// Allocating an FSGroup that owns the pod's volumes
const FS_GID: &str = "fsgid";
const FS_GID_EQ: &str = "fsgid=";
const SYS_FS_HUGEPAGES_PREFIX: &str = "/sys/kernel/mm/hugepages";
const RW_MASK: u32 = 0o660;
@ -272,10 +273,10 @@ async fn ephemeral_storage_handler(
#[instrument]
pub async fn update_ephemeral_mounts(
logger: Logger,
storages: Vec<Storage>,
storages: &[Storage],
_sandbox: &Arc<Mutex<Sandbox>>,
) -> Result<()> {
for (_, storage) in storages.iter().enumerate() {
for storage in storages {
let handler_name = &storage.driver;
let logger = logger.new(o!(
"msg" => "updating tmpfs storage",
@ -290,18 +291,13 @@ pub async fn update_ephemeral_mounts(
continue;
} else {
// assume that fsGid has already been set
let mut opts = Vec::new();
for (_, opt) in storage.options.iter().enumerate() {
let fields: Vec<&str> = opt.split('=').collect();
if fields.len() == 2 && fields[0] == FS_GID {
continue;
}
opts.push(opt.as_str())
}
let (flags, options) = parse_mount_flags_and_options(&opts);
let mount_path = Path::new(&storage.mount_point);
let src_path = Path::new(&storage.source);
let opts = storage
.options
.iter()
.filter(|&opt| !opt.starts_with(FS_GID_EQ));
let (flags, options) = parse_mount_flags_and_options(opts);
info!(logger, "mounting storage";
"mount-source" => src_path.display(),
@ -420,7 +416,7 @@ async fn handle_hugetlbfs_storage(logger: &Logger, storage: &Storage) -> Result<
// /sys/kernel/mm/hugepages/hugepages-1048576kB/nr_hugepages
// /sys/kernel/mm/hugepages/hugepages-2048kB/nr_hugepages
// options eg "pagesize=2097152,size=524288000"(2M, 500M)
allocate_hugepages(logger, &storage.options.to_vec()).context("allocate hugepages")?;
allocate_hugepages(logger, &storage.options).context("allocate hugepages")?;
common_storage_handler(logger, storage)?;
@ -675,7 +671,7 @@ fn mount_storage(logger: &Logger, storage: &Storage) -> Result<()> {
.map_err(anyhow::Error::from)
.context("Could not create mountpoint")?;
}
let (flags, options) = parse_mount_flags_and_options(&storage.options);
let (flags, options) = parse_mount_flags_and_options(storage.options.iter());
info!(logger, "mounting storage";
"mount-source" => src_path.display(),
@ -802,11 +798,13 @@ pub fn is_mounted(mount_point: &str) -> Result<bool> {
}
#[instrument]
fn parse_mount_flags_and_options<S: AsRef<str> + Debug>(options_vec: &[S]) -> (MsFlags, String) {
fn parse_mount_flags_and_options(
opts_iter: impl Iterator<Item = impl AsRef<str>> + Debug,
) -> (MsFlags, String) {
let mut flags = MsFlags::empty();
let mut options: String = "".to_string();
for opt in options_vec {
for opt in opts_iter {
let opt = opt.as_ref();
if !opt.is_empty() {
match FLAGS.get(opt) {
@ -912,7 +910,7 @@ pub async fn add_storages(
#[instrument]
fn mount_to_rootfs(logger: &Logger, m: &InitMount) -> Result<()> {
let (flags, options) = parse_mount_flags_and_options(&m.options);
let (flags, options) = parse_mount_flags_and_options(m.options.iter());
fs::create_dir_all(m.dest).context("could not create directory")?;
@ -1121,7 +1119,7 @@ fn ensure_destination_file_exists(path: &Path) -> Result<()> {
#[instrument]
fn parse_options(option_list: &[String]) -> HashMap<String, String> {
let mut options = HashMap::new();
for opt in option_list.iter() {
for opt in option_list {
let fields: Vec<&str> = opt.split('=').collect();
if fields.len() == 2 {
options.insert(fields[0].to_string(), fields[1].to_string());
@ -2043,7 +2041,7 @@ mod tests {
for (i, d) in tests.iter().enumerate() {
let msg = format!("test[{}]: {:?}", i, d);
let result = parse_mount_flags_and_options(&d.options_vec);
let result = parse_mount_flags_and_options(d.options_vec.iter());
let msg = format!("{}: result: {:?}", msg, result);

View File

@ -29,7 +29,7 @@ impl Network {
}
}
pub fn setup_guest_dns(logger: Logger, dns_list: Vec<String>) -> Result<()> {
pub fn setup_guest_dns(logger: Logger, dns_list: &[String]) -> Result<()> {
do_setup_guest_dns(
logger,
dns_list,
@ -38,7 +38,7 @@ pub fn setup_guest_dns(logger: Logger, dns_list: Vec<String>) -> Result<()> {
)
}
fn do_setup_guest_dns(logger: Logger, dns_list: Vec<String>, src: &str, dst: &str) -> Result<()> {
fn do_setup_guest_dns(logger: Logger, dns_list: &[String], src: &str, dst: &str) -> Result<()> {
let logger = logger.new(o!( "subsystem" => "network"));
if dns_list.is_empty() {
@ -124,7 +124,7 @@ mod tests {
.expect("failed to write file contents");
// call do_setup_guest_dns
let result = do_setup_guest_dns(logger, dns.clone(), src_filename, dst_filename);
let result = do_setup_guest_dns(logger, &dns, src_filename, dst_filename);
assert!(result.is_ok(), "result should be ok, but {:?}", result);

View File

@ -305,21 +305,19 @@ impl AgentService {
let handle = tokio::spawn(async move {
let mut sandbox = s.lock().await;
sandbox.bind_watcher.remove_container(&cid2).await;
match sandbox.get_container(&cid2) {
Some(ctr) => ctr.destroy().await,
None => Err(anyhow!("Invalid container id")),
}
sandbox
.get_container(&cid2)
.ok_or_else(|| anyhow!("Invalid container id"))?
.destroy()
.await
});
let to = Duration::from_secs(req.timeout.into());
match tokio::time::timeout(to, handle).await {
Ok(res) => {
res??;
let mut sandbox = self.sandbox.lock().await;
remove_container_resources(&mut sandbox, &cid)
}
Err(_e) => Err(anyhow!(nix::Error::ETIME)),
}
tokio::time::timeout(to, handle)
.await
.map_err(|_| anyhow!(nix::Error::ETIME))???;
remove_container_resources(&mut *self.sandbox.lock().await, &cid)
}
#[instrument]
@ -964,7 +962,7 @@ impl agent_ttrpc::AgentService for AgentService {
trace_rpc_call!(ctx, "update_mounts", req);
is_allowed(&req)?;
match update_ephemeral_mounts(sl(), req.storages.to_vec(), &self.sandbox).await {
match update_ephemeral_mounts(sl(), &req.storages, &self.sandbox).await {
Ok(_) => Ok(Empty::new()),
Err(e) => Err(ttrpc_error(
ttrpc::Code::INTERNAL,
@ -1224,14 +1222,12 @@ impl agent_ttrpc::AgentService for AgentService {
Err(e) => return Err(ttrpc_error(ttrpc::Code::INTERNAL, e)),
};
match setup_guest_dns(sl(), req.dns.to_vec()) {
match setup_guest_dns(sl(), &req.dns) {
Ok(_) => {
let mut s = self.sandbox.lock().await;
let _dns = req
.dns
.to_vec()
.iter()
.map(|dns| s.network.set_dns(dns.to_string()));
for dns in req.dns {
s.network.set_dns(dns);
}
}
Err(e) => return Err(ttrpc_error(ttrpc::Code::INTERNAL, e)),
};
@ -1626,11 +1622,7 @@ fn get_agent_details() -> AgentDetails {
detail.init_daemon = unistd::getpid() == Pid::from_raw(1);
detail.device_handlers = Vec::new();
detail.storage_handlers = STORAGE_HANDLER_LIST
.to_vec()
.iter()
.map(|x| x.to_string())
.collect();
detail.storage_handlers = STORAGE_HANDLER_LIST.iter().map(|x| x.to_string()).collect();
detail
}
@ -1978,10 +1970,10 @@ fn load_kernel_module(module: &protocols::agent::KernelModule) -> Result<()> {
"load_kernel_module {}: {:?}", module.name, module.parameters
);
let mut args = vec!["-v".to_string(), module.name.clone()];
let mut args = vec!["-v", &module.name];
if !module.parameters.is_empty() {
args.extend(module.parameters.to_vec())
args.extend(module.parameters.iter().map(String::as_str));
}
let output = Command::new(MODPROBE_PATH)