This commit is contained in:
Alex Lyn 2025-08-12 01:49:54 +08:00 committed by GitHub
commit 02da60bb71
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
18 changed files with 424 additions and 376 deletions

View File

@ -42,6 +42,10 @@ jobs:
path: src/runtime-rs
needs:
- rust
- name: libs
path: src/libs
needs:
- rust
- name: agent-ctl
path: src/tools/agent-ctl
needs:

View File

@ -34,8 +34,7 @@ pub fn is_ephemeral_volume(mount: &Mount) -> bool {
mount.destination(),
),
(Some("bind"), Some(source), dest) if get_linux_mount_info(source)
.map_or(false, |info| info.fs_type == "tmpfs") &&
(Some("bind"), Some(source), dest) if get_linux_mount_info(source).is_ok_and(|info| info.fs_type == "tmpfs") &&
(is_empty_dir(source) || dest.as_path() == Path::new("/dev/shm"))
)
}

View File

@ -823,11 +823,11 @@ mod tests {
#[test]
fn test_get_linux_mount_info() {
let info = get_linux_mount_info("/sys/fs/cgroup").unwrap();
let info = get_linux_mount_info("/dev/shm").unwrap();
assert_eq!(&info.device, "tmpfs");
assert_eq!(&info.fs_type, "tmpfs");
assert_eq!(&info.path, "/sys/fs/cgroup");
assert_eq!(&info.path, "/dev/shm");
assert!(matches!(
get_linux_mount_info(""),

View File

@ -168,7 +168,7 @@ pub fn is_valid_numa_cpu(cpus: &[u32]) -> Result<bool> {
let numa_nodes = get_numa_nodes()?;
for cpu in cpus {
if numa_nodes.get(cpu).is_none() {
if !numa_nodes.contains_key(cpu) {
return Ok(false);
}
}

View File

@ -66,7 +66,7 @@ impl PCIDevices for NvidiaPCIDevice {
}
}
return nvidia_devices;
nvidia_devices
}
}

View File

@ -7,7 +7,7 @@
use std::collections::HashMap;
use std::fs;
use std::io;
use std::path::PathBuf;
use std::path::{Path, PathBuf};
use mockall::automock;
use pci_ids::{Classes, Vendors};
@ -61,24 +61,22 @@ pub(crate) trait MemoryResourceTrait {
impl MemoryResourceTrait for MemoryResources {
fn get_total_addressable_memory(&self, round_up: bool) -> (u64, u64) {
let mut num_bar = 0;
let mut mem_size_32bit = 0u64;
let mut mem_size_64bit = 0u64;
let mut keys: Vec<_> = self.keys().cloned().collect();
keys.sort();
for key in keys {
if key as usize >= PCI_IOV_NUM_BAR || num_bar == PCI_IOV_NUM_BAR {
for (num_bar, key) in keys.into_iter().enumerate() {
if key >= PCI_IOV_NUM_BAR || num_bar == PCI_IOV_NUM_BAR {
break;
}
num_bar += 1;
if let Some(region) = self.get(&key) {
let flags = region.flags & PCI_BASE_ADDRESS_MEM_TYPE_MASK;
let mem_type_32bit = flags == PCI_BASE_ADDRESS_MEM_TYPE32;
let mem_type_64bit = flags == PCI_BASE_ADDRESS_MEM_TYPE64;
let mem_size = (region.end - region.start + 1) as u64;
let mem_size = region.end - region.start + 1;
if mem_type_32bit {
mem_size_32bit += mem_size;
@ -138,10 +136,10 @@ impl PCIDeviceManager {
for entry in device_dirs {
let device_dir = entry?;
let device_address = device_dir.file_name().to_string_lossy().to_string();
if let Ok(device) = self.get_device_by_pci_bus_id(&device_address, vendor, &mut cache) {
if let Some(dev) = device {
pci_devices.push(dev);
}
if let Ok(Some(dev)) =
self.get_device_by_pci_bus_id(&device_address, vendor, &mut cache)
{
pci_devices.push(dev);
}
}
@ -238,7 +236,7 @@ impl PCIDeviceManager {
Ok(Some(pci_device))
}
fn parse_resources(&self, device_path: &PathBuf) -> io::Result<MemoryResources> {
fn parse_resources(&self, device_path: &Path) -> io::Result<MemoryResources> {
let content = fs::read_to_string(device_path.join("resource"))?;
let mut resources: MemoryResources = MemoryResources::new();
for (i, line) in content.lines().enumerate() {
@ -405,6 +403,8 @@ mod tests {
#[test]
fn test_parse_resources() {
setup_mock_device_files();
let manager = PCIDeviceManager::new(MOCK_PCI_DEVICES_ROOT);
let device_path = PathBuf::from(MOCK_PCI_DEVICES_ROOT).join("0000:ff:1f.0");
@ -418,6 +418,8 @@ mod tests {
assert_eq!(resource.start, 0x00000000);
assert_eq!(resource.end, 0x0000ffff);
assert_eq!(resource.flags, 0x00000404);
cleanup_mock_device_files();
}
#[test]
@ -435,10 +437,7 @@ mod tests {
file.write_all(&vec![0; 512]).unwrap();
// It should be true
assert!(is_pcie_device(
&format!("ff:00.0"),
MOCK_SYS_BUS_PCI_DEVICES
));
assert!(is_pcie_device("ff:00.0", MOCK_SYS_BUS_PCI_DEVICES));
// Clean up
let _ = fs::remove_file(config_path);

View File

@ -142,13 +142,13 @@ pub fn arch_guest_protection(
#[allow(dead_code)]
pub fn available_guest_protection() -> Result<GuestProtection, ProtectionError> {
if !Uid::effective().is_root() {
return Err(ProtectionError::NoPerms)?;
Err(ProtectionError::NoPerms)?;
}
let facilities = crate::cpu::retrieve_cpu_facilities().map_err(|err| {
ProtectionError::CheckFailed(format!(
"Error retrieving cpu facilities file : {}",
err.to_string()
err
))
})?;

View File

@ -8,7 +8,6 @@ use std::collections::HashMap;
use std::fs::File;
use std::io::{self, BufReader, Result};
use std::result::{self};
use std::u32;
use serde::Deserialize;
@ -462,12 +461,12 @@ impl Annotation {
/// update config info by annotation
pub fn update_config_by_annotation(&self, config: &mut TomlConfig) -> Result<()> {
if let Some(hv) = self.annotations.get(KATA_ANNO_CFG_RUNTIME_HYPERVISOR) {
if config.hypervisor.get(hv).is_some() {
if config.hypervisor.contains_key(hv) {
config.runtime.hypervisor_name = hv.to_string();
}
}
if let Some(ag) = self.annotations.get(KATA_ANNO_CFG_RUNTIME_AGENT) {
if config.agent.get(ag).is_some() {
if config.agent.contains_key(ag) {
config.runtime.agent_name = ag.to_string();
}
}
@ -943,8 +942,7 @@ impl Annotation {
}
}
KATA_ANNO_CFG_HYPERVISOR_VIRTIO_FS_EXTRA_ARGS => {
let args: Vec<String> =
value.to_string().split(',').map(str::to_string).collect();
let args: Vec<String> = value.split(',').map(str::to_string).collect();
for arg in args {
hv.shared_fs.virtio_fs_extra_args.push(arg.to_string());
}
@ -970,7 +968,7 @@ impl Annotation {
// update agent config
KATA_ANNO_CFG_KERNEL_MODULES => {
let kernel_mod: Vec<String> =
value.to_string().split(';').map(str::to_string).collect();
value.split(';').map(str::to_string).collect();
for modules in kernel_mod {
ag.kernel_modules.push(modules.to_string());
}
@ -991,14 +989,16 @@ impl Annotation {
return Err(u32_err);
}
},
KATA_ANNO_CFG_RUNTIME_CREATE_CONTAINTER_TIMEOUT => match self.get_value::<u32>(key) {
Ok(v) => {
ag.request_timeout_ms = v.unwrap_or_default() * 1000;
KATA_ANNO_CFG_RUNTIME_CREATE_CONTAINTER_TIMEOUT => {
match self.get_value::<u32>(key) {
Ok(v) => {
ag.request_timeout_ms = v.unwrap_or_default() * 1000;
}
Err(_e) => {
return Err(u32_err);
}
}
Err(_e) => {
return Err(u32_err);
}
},
}
// update runtime config
KATA_ANNO_CFG_RUNTIME_NAME => {
let runtime = vec!["virt-container", "linux-container", "wasm-container"];
@ -1031,8 +1031,7 @@ impl Annotation {
}
},
KATA_ANNO_CFG_EXPERIMENTAL => {
let args: Vec<String> =
value.to_string().split(',').map(str::to_string).collect();
let args: Vec<String> = value.split(',').map(str::to_string).collect();
for arg in args {
config.runtime.experimental.push(arg.to_string());
}

View File

@ -115,7 +115,10 @@ pub struct Agent {
/// This timeout value is used to set the maximum duration for the agent to process a CreateContainerRequest.
/// It's also used to ensure that workloads, especially those involving large image pulls within the guest,
/// have sufficient time to complete.
#[serde(default = "default_request_timeout", rename = "create_container_timeout")]
#[serde(
default = "default_request_timeout",
rename = "create_container_timeout"
)]
pub request_timeout_ms: u32,
/// Agent health check request timeout value in millisecond
@ -127,12 +130,12 @@ pub struct Agent {
/// These modules will be loaded in the guest kernel using modprobe(8).
/// The following example can be used to load two kernel modules with parameters:
/// - kernel_modules=["e1000e InterruptThrottleRate=3000,3000,3000 EEE=1", "i915 enable_ppgtt=0"]
/// The first word is considered as the module name and the rest as its parameters.
/// Container will not be started when:
/// The first word is considered as the module name and the rest as its parameters.
/// Container will not be started when:
/// - A kernel module is specified and the modprobe command is not installed in the guest
/// or it fails loading the module.
/// - The module is not available in the guest or it doesn't met the guest kernel
/// requirements, like architecture and version.
/// requirements, like architecture and version.
#[serde(default)]
pub kernel_modules: Vec<String>,

View File

@ -6,7 +6,6 @@
use std::io::Result;
use std::path::Path;
use std::sync::Arc;
use std::u32;
use super::{default, register_hypervisor_plugin};
use crate::config::default::MAX_DRAGONBALL_VCPUS;

File diff suppressed because it is too large Load Diff

View File

@ -9,7 +9,6 @@ use std::fs;
use std::io::{self, Result};
use std::path::{Path, PathBuf};
use std::sync::{Arc, Mutex};
use std::u32;
use lazy_static::lazy_static;

View File

@ -129,20 +129,20 @@ fn calculate_digest(algorithm: &str, data: &str) -> Result<Vec<u8>> {
let digest = match algorithm {
"sha256" => {
let mut hasher = Sha256::new();
hasher.update(&data);
hasher.update(data);
hasher.finalize().to_vec()
}
"sha384" => {
let mut hasher = Sha384::new();
hasher.update(&data);
hasher.update(data);
hasher.finalize().to_vec()
}
"sha512" => {
let mut hasher = Sha512::new();
hasher.update(&data);
hasher.update(data);
hasher.finalize().to_vec()
}
_ => return Err(anyhow!("unsupported Hash algorithm: {}", algorithm).into()),
_ => return Err(anyhow!("unsupported Hash algorithm: {}", algorithm)),
};
Ok(digest)
@ -172,7 +172,7 @@ fn adjust_digest(digest: &[u8], platform: ProtectedPlatform) -> Vec<u8> {
/// Parse initdata
fn parse_initdata(initdata_str: &str) -> Result<InitData> {
let initdata: InitData = toml::from_str(&initdata_str)?;
let initdata: InitData = toml::from_str(initdata_str)?;
initdata.validate()?;
Ok(initdata)
@ -192,7 +192,7 @@ pub fn calculate_initdata_digest(
let algorithm: &str = &initdata.algorithm;
// 2. Calculate Digest
let digest = calculate_digest(algorithm, &initdata_toml).context("calculate digest")?;
let digest = calculate_digest(algorithm, initdata_toml).context("calculate digest")?;
// 3. Adjust Digest with Platform
let digest_platform = adjust_digest(&digest, platform);

View File

@ -205,47 +205,48 @@ pub struct NydusImageVolume {
pub snapshot_dir: String,
}
/// Kata virtual volume to encapsulate information for extra mount options and direct volumes.
/// Represents a Kata virtual volume, encapsulating information for extra mount options and direct volumes.
///
/// It's very expensive to build direct communication channels to pass information:
/// - between snapshotters and kata-runtime/kata-agent/image-rs
/// - between CSI drivers and kata-runtime/kata-agent
/// Direct communication channels between components like snapshotters, `kata-runtime`, `kata-agent`,
/// `image-rs`, and CSI drivers are often expensive to build and maintain.
///
/// So `KataVirtualVolume` is introduced to encapsulate extra mount options and direct volume
/// information, so we can build a common infrastructure to handle them.
/// `KataVirtualVolume` is a superset of `NydusExtraOptions` and `DirectVolumeMountInfo`.
/// Therefore, `KataVirtualVolume` is introduced as a common infrastructure to encapsulate
/// additional mount options and direct volume information. It serves as a superset of
/// `NydusExtraOptions` and `DirectVolumeMountInfo`.
///
/// Value of `volume_type` determines how to interpret other fields in the structure.
/// The interpretation of other fields within this structure is determined by the `volume_type` field.
///
/// - `KATA_VIRTUAL_VOLUME_IGNORE`
/// -- all other fields should be ignored/unused.
/// # Volume Types:
///
/// - `KATA_VIRTUAL_VOLUME_DIRECT_BLOCK`
/// -- `source`: the directly assigned block device
/// -- `fs_type`: filesystem type
/// -- `options`: mount options
/// -- `direct_volume`: additional metadata to pass to the agent regarding this volume.
/// - `KATA_VIRTUAL_VOLUME_IGNORE`:
/// All other fields should be ignored/unused.
///
/// - `KATA_VIRTUAL_VOLUME_IMAGE_RAW_BLOCK` or `KATA_VIRTUAL_VOLUME_LAYER_RAW_BLOCK`
/// -- `source`: path to the raw block image for the container image or layer.
/// -- `fs_type`: filesystem type
/// -- `options`: mount options
/// -- `dm_verity`: disk dm-verity information
/// - `KATA_VIRTUAL_VOLUME_DIRECT_BLOCK`:
/// - `source`: The directly assigned block device path.
/// - `fs_type`: Filesystem type.
/// - `options`: Mount options.
/// - `direct_volume`: Additional metadata to pass to the agent regarding this volume.
///
/// - `KATA_VIRTUAL_VOLUME_IMAGE_NYDUS_BLOCK` or `KATA_VIRTUAL_VOLUME_LAYER_NYDUS_BLOCK`
/// -- `source`: path to nydus meta blob
/// -- `fs_type`: filesystem type
/// -- `nydus_image`: configuration information for nydus image.
/// -- `dm_verity`: disk dm-verity information
/// - `KATA_VIRTUAL_VOLUME_IMAGE_RAW_BLOCK` or `KATA_VIRTUAL_VOLUME_LAYER_RAW_BLOCK`:
/// - `source`: Path to the raw block image for the container image or layer.
/// - `fs_type`: Filesystem type.
/// - `options`: Mount options.
/// - `dm_verity`: Disk `dm-verity` information.
///
/// - `KATA_VIRTUAL_VOLUME_IMAGE_NYDUS_FS` or `KATA_VIRTUAL_VOLUME_LAYER_NYDUS_FS`
/// -- `source`: path to nydus meta blob
/// -- `fs_type`: filesystem type
/// -- `nydus_image`: configuration information for nydus image.
/// - `KATA_VIRTUAL_VOLUME_IMAGE_NYDUS_BLOCK` or `KATA_VIRTUAL_VOLUME_LAYER_NYDUS_BLOCK`:
/// - `source`: Path to nydus meta blob.
/// - `fs_type`: Filesystem type.
/// - `nydus_image`: Configuration information for nydus image.
/// - `dm_verity`: Disk `dm-verity` information.
///
/// - `KATA_VIRTUAL_VOLUME_IMAGE_GUEST_PULL`
/// -- `source`: image reference
/// -- `image_pull`: metadata for image pulling
/// - `KATA_VIRTUAL_VOLUME_IMAGE_NYDUS_FS` or `KATA_VIRTUAL_VOLUME_LAYER_NYDUS_FS`:
/// - `source`: Path to nydus meta blob.
/// - `fs_type`: Filesystem type.
/// - `nydus_image`: Configuration information for nydus image.
///
/// - `KATA_VIRTUAL_VOLUME_IMAGE_GUEST_PULL`:
/// - `source`: Image reference.
/// - `image_pull`: Metadata for image pulling.
#[derive(Debug, Clone, Eq, PartialEq, Default, Serialize, Deserialize)]
pub struct KataVirtualVolume {
/// Type of virtual volume.
@ -275,7 +276,7 @@ pub struct KataVirtualVolume {
}
impl KataVirtualVolume {
/// Create a new instance of `KataVirtualVolume` with specified type.
/// Creates a new instance of `KataVirtualVolume` with the specified type.
pub fn new(volume_type: String) -> Self {
Self {
volume_type,
@ -283,7 +284,7 @@ impl KataVirtualVolume {
}
}
/// Validate virtual volume object.
/// Validates the virtual volume object.
pub fn validate(&self) -> Result<()> {
match self.volume_type.as_str() {
KATA_VIRTUAL_VOLUME_DIRECT_BLOCK => {
@ -365,25 +366,25 @@ impl KataVirtualVolume {
Ok(())
}
/// Serialize the virtual volume object to json.
/// Serializes the virtual volume object to a JSON string.
pub fn to_json(&self) -> Result<String> {
Ok(serde_json::to_string(self)?)
}
/// Deserialize a virtual volume object from json string.
/// Deserializes a virtual volume object from a JSON string.
pub fn from_json(value: &str) -> Result<Self> {
let volume: KataVirtualVolume = serde_json::from_str(value)?;
volume.validate()?;
Ok(volume)
}
/// Serialize the virtual volume object to json and encode the string with base64.
/// Serializes the virtual volume object to a JSON string and encodes the string with base64.
pub fn to_base64(&self) -> Result<String> {
let json = self.to_json()?;
Ok(base64::encode(json))
}
/// Decode and deserialize a virtual volume object from base64 encoded json string.
/// Decodes and deserializes a virtual volume object from a base64 encoded JSON string.
pub fn from_base64(value: &str) -> Result<Self> {
let json = base64::decode(value)?;
let volume: KataVirtualVolume = serde_json::from_slice(&json)?;
@ -453,18 +454,18 @@ impl TryFrom<&NydusExtraOptions> for KataVirtualVolume {
}
}
/// Trait object for storage device.
/// Trait object for a storage device.
pub trait StorageDevice: Send + Sync {
/// Path
/// Returns the path of the storage device, if available.
fn path(&self) -> Option<&str>;
/// Clean up resources related to the storage device.
/// Cleans up resources related to the storage device.
fn cleanup(&self) -> Result<()>;
}
/// Join user provided volume path with kata direct-volume root path.
/// Joins a user-provided volume path with the Kata direct-volume root path.
///
/// The `volume_path` is base64-url-encoded and then safely joined to the `prefix`
/// The `volume_path` is base64-url-encoded and then safely joined to the `prefix`.
pub fn join_path(prefix: &str, volume_path: &str) -> Result<PathBuf> {
if volume_path.is_empty() {
return Err(anyhow!(std::io::ErrorKind::NotFound));
@ -474,7 +475,7 @@ pub fn join_path(prefix: &str, volume_path: &str) -> Result<PathBuf> {
Ok(safe_path::scoped_join(prefix, b64_url_encoded_path)?)
}
/// get DirectVolume mountInfo from mountinfo.json.
/// Gets `DirectVolumeMountInfo` from `mountinfo.json`.
pub fn get_volume_mount_info(volume_path: &str) -> Result<DirectVolumeMountInfo> {
let volume_path = join_path(KATA_DIRECT_VOLUME_ROOT_PATH, volume_path)?;
let mount_info_file_path = volume_path.join(KATA_MOUNT_INFO_FILE_NAME);
@ -484,28 +485,30 @@ pub fn get_volume_mount_info(volume_path: &str) -> Result<DirectVolumeMountInfo>
Ok(mount_info)
}
/// Check whether a mount type is a marker for Kata specific volume.
/// Checks whether a mount type is a marker for a Kata specific volume.
pub fn is_kata_special_volume(ty: &str) -> bool {
ty.len() > KATA_VOLUME_TYPE_PREFIX.len() && ty.starts_with(KATA_VOLUME_TYPE_PREFIX)
}
/// Check whether a mount type is a marker for Kata guest mount volume.
/// Checks whether a mount type is a marker for a Kata guest mount volume.
pub fn is_kata_guest_mount_volume(ty: &str) -> bool {
ty.len() > KATA_GUEST_MOUNT_PREFIX.len() && ty.starts_with(KATA_GUEST_MOUNT_PREFIX)
}
/// Check whether a mount type is a marker for Kata ephemeral volume.
/// Checks whether a mount type is a marker for a Kata ephemeral volume.
pub fn is_kata_ephemeral_volume(ty: &str) -> bool {
ty == KATA_EPHEMERAL_VOLUME_TYPE
}
/// Check whether a mount type is a marker for Kata hostdir volume.
/// Checks whether a mount type is a marker for a Kata hostdir volume.
pub fn is_kata_host_dir_volume(ty: &str) -> bool {
ty == KATA_HOST_DIR_VOLUME_TYPE
}
/// sandbox bindmount format: /path/to/dir, or /path/to/dir:ro[:rw]
/// the real path is without suffix ":ro" or ":rw".
/// Splits a sandbox bindmount string into its real path and mode.
///
/// The `bindmount` format is typically `/path/to/dir` or `/path/to/dir:ro[:rw]`.
/// This function extracts the real path (without the suffix ":ro" or ":rw") and the mode.
pub fn split_bind_mounts(bindmount: &str) -> (&str, &str) {
let (real_path, mode) = if bindmount.ends_with(SANDBOX_BIND_MOUNTS_RO) {
(
@ -525,12 +528,14 @@ pub fn split_bind_mounts(bindmount: &str) -> (&str, &str) {
(real_path, mode)
}
/// This function, adjust_rootfs_mounts, manages the root filesystem mounts based on guest-pull mechanism.
/// - the function disregards any provided rootfs_mounts.
/// Instead, it forcefully creates a single, default KataVirtualVolume specifically for guest-pull operations.
/// This volume's representation is then base64-encoded and added as the only option to a new, singular Mount entry,
/// which becomes the sole item in the returned Vec<Mount>.
/// This ensures that when guest pull is active, the root filesystem is exclusively configured via this virtual volume.
/// Adjusts the root filesystem mounts based on the guest-pull mechanism.
///
/// This function disregards any provided `rootfs_mounts`. Instead, it forcefully creates
/// a single, default `KataVirtualVolume` specifically for guest-pull operations.
/// This volume's representation is then base64-encoded and added as the only option
/// to a new, singular `Mount` entry, which becomes the sole item in the returned `Vec<Mount>`.
/// This ensures that when guest pull is active, the root filesystem is exclusively
/// configured via this virtual volume.
pub fn adjust_rootfs_mounts() -> Result<Vec<Mount>> {
// We enforce a single, default KataVirtualVolume as the exclusive rootfs mount.
let volume = KataVirtualVolume::new(KATA_VIRTUAL_VOLUME_IMAGE_GUEST_PULL.to_string());

View File

@ -115,7 +115,7 @@ impl From<oci::PosixRlimit> for grpc::POSIXRlimit {
impl From<oci::Process> for grpc::Process {
fn from(from: oci::Process) -> Self {
grpc::Process {
Terminal: from.terminal().map_or(false, |t| t),
Terminal: from.terminal().is_some_and(|t| t),
ConsoleSize: from_option(from.console_size()),
User: from_option(Some(from.user().clone())),
Args: option_vec_to_vec(from.args()),
@ -161,7 +161,7 @@ impl From<oci::LinuxMemory> for grpc::LinuxMemory {
Kernel: from.kernel().map_or(0, |t| t),
KernelTCP: from.kernel_tcp().map_or(0, |t| t),
Swappiness: from.swappiness().map_or(0, |t| t),
DisableOOMKiller: from.disable_oom_killer().map_or(false, |t| t),
DisableOOMKiller: from.disable_oom_killer().is_some_and(|t| t),
..Default::default()
}
}

View File

@ -355,6 +355,7 @@ mod tests {
.read(false)
.write(true)
.create(true)
.truncate(true)
.mode(0o200)
.open(&path)
.unwrap();
@ -376,6 +377,7 @@ mod tests {
.read(false)
.write(true)
.create(true)
.truncate(true)
.mode(0o200)
.open(&path)
.unwrap();

View File

@ -90,7 +90,7 @@ pub fn mgmt_socket_addr(sid: &str) -> Result<String> {
));
}
get_uds_with_sid(sid, &sb_storage_path()?)
get_uds_with_sid(sid, sb_storage_path()?)
}
#[cfg(test)]

View File

@ -135,6 +135,7 @@ mapping:
- Static checks / build-checks / check (make check, dragonball, src/dragonball, rust)
- Static checks / build-checks / check (make check, genpolicy, src/tools/genpolicy, rust, protobuf-compiler)
- Static checks / build-checks / check (make check, kata-ctl, src/tools/kata-ctl, rust)
- Static checks / build-checks / check (make check, libs, src/libs, rust)
- Static checks / build-checks / check (make check, runtime-rs, src/runtime-rs, rust)
- Static checks / build-checks / check (make check, runtime, src/runtime, golang, XDG_RUNTIME_DIR)
- Static checks / build-checks / check (make check, trace-forwarder, src/tools/trace-forwarder, rust)
@ -143,6 +144,7 @@ mapping:
- Static checks / build-checks / check (make test, dragonball, src/dragonball, rust)
- Static checks / build-checks / check (make test, genpolicy, src/tools/genpolicy, rust, protobuf-compiler)
- Static checks / build-checks / check (make test, kata-ctl, src/tools/kata-ctl, rust)
- Static checks / build-checks / check (make test, libs, src/libs, rust)
- Static checks / build-checks / check (make test, runtime-rs, src/runtime-rs, rust)
- Static checks / build-checks / check (make test, runtime, src/runtime, golang, XDG_RUNTIME_DIR)
- Static checks / build-checks / check (make test, trace-forwarder, src/tools/trace-forwarder, rust)
@ -159,6 +161,7 @@ mapping:
- Static checks / build-checks / check (sudo -E PATH="$PATH" make test, dragonball, src/dragonball, rust)
- Static checks / build-checks / check (sudo -E PATH="$PATH" make test, genpolicy, src/tools/genpolicy, rust, protobuf-compiler)
- Static checks / build-checks / check (sudo -E PATH="$PATH" make test, kata-ctl, src/tools/kata-ctl, rust)
- Static checks / build-checks / check (sudo -E PATH="$PATH" make test, libs, src/libs, rust)
- Static checks / build-checks / check (sudo -E PATH="$PATH" make test, runtime-rs, src/runtime-rs, rust)
- Static checks / build-checks / check (sudo -E PATH="$PATH" make test, runtime, src/runtime, golang, XDG_RUNTIME_DIR)
- Static checks / build-checks / check (sudo -E PATH="$PATH" make test, trace-forwarder, src/tools/trace-forwarder, rust)