mirror of
https://github.com/kata-containers/kata-containers.git
synced 2025-05-01 21:24:36 +00:00
runtime: mount direct-assigned block device fs only once
Mount the direct-assigned block device fs only once and keep a refcount in the guest. Also use the ro flag inside the options field to determine whether the block device and filesystem should be mounted as ro Fixes: #3454 Signed-off-by: Feng Wang <feng.wang@databricks.com>
This commit is contained in:
parent
27fb490228
commit
f905161bbb
@ -193,13 +193,6 @@ async fn ephemeral_storage_handler(
|
|||||||
storage: &Storage,
|
storage: &Storage,
|
||||||
sandbox: Arc<Mutex<Sandbox>>,
|
sandbox: Arc<Mutex<Sandbox>>,
|
||||||
) -> Result<String> {
|
) -> Result<String> {
|
||||||
let mut sb = sandbox.lock().await;
|
|
||||||
let new_storage = sb.set_sandbox_storage(&storage.mount_point);
|
|
||||||
|
|
||||||
if !new_storage {
|
|
||||||
return Ok("".to_string());
|
|
||||||
}
|
|
||||||
|
|
||||||
// hugetlbfs
|
// hugetlbfs
|
||||||
if storage.fstype == FS_TYPE_HUGETLB {
|
if storage.fstype == FS_TYPE_HUGETLB {
|
||||||
return handle_hugetlbfs_storage(logger, storage).await;
|
return handle_hugetlbfs_storage(logger, storage).await;
|
||||||
@ -255,13 +248,6 @@ async fn local_storage_handler(
|
|||||||
storage: &Storage,
|
storage: &Storage,
|
||||||
sandbox: Arc<Mutex<Sandbox>>,
|
sandbox: Arc<Mutex<Sandbox>>,
|
||||||
) -> Result<String> {
|
) -> Result<String> {
|
||||||
let mut sb = sandbox.lock().await;
|
|
||||||
let new_storage = sb.set_sandbox_storage(&storage.mount_point);
|
|
||||||
|
|
||||||
if !new_storage {
|
|
||||||
return Ok("".to_string());
|
|
||||||
}
|
|
||||||
|
|
||||||
fs::create_dir_all(&storage.mount_point).context(format!(
|
fs::create_dir_all(&storage.mount_point).context(format!(
|
||||||
"failed to create dir all {:?}",
|
"failed to create dir all {:?}",
|
||||||
&storage.mount_point
|
&storage.mount_point
|
||||||
@ -401,7 +387,7 @@ fn get_pagesize_and_size_from_option(options: &[String]) -> Result<(u64, u64)> {
|
|||||||
async fn virtiommio_blk_storage_handler(
|
async fn virtiommio_blk_storage_handler(
|
||||||
logger: &Logger,
|
logger: &Logger,
|
||||||
storage: &Storage,
|
storage: &Storage,
|
||||||
_sandbox: Arc<Mutex<Sandbox>>,
|
sandbox: Arc<Mutex<Sandbox>>,
|
||||||
) -> Result<String> {
|
) -> Result<String> {
|
||||||
//The source path is VmPath
|
//The source path is VmPath
|
||||||
common_storage_handler(logger, storage)
|
common_storage_handler(logger, storage)
|
||||||
@ -641,6 +627,14 @@ pub async fn add_storages(
|
|||||||
"subsystem" => "storage",
|
"subsystem" => "storage",
|
||||||
"storage-type" => handler_name.to_owned()));
|
"storage-type" => handler_name.to_owned()));
|
||||||
|
|
||||||
|
{
|
||||||
|
let mut sb = sandbox.lock().await;
|
||||||
|
let new_storage = sb.set_sandbox_storage(&storage.mount_point);
|
||||||
|
if !new_storage {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
let res = match handler_name.as_str() {
|
let res = match handler_name.as_str() {
|
||||||
DRIVER_BLK_TYPE => virtio_blk_storage_handler(&logger, &storage, sandbox.clone()).await,
|
DRIVER_BLK_TYPE => virtio_blk_storage_handler(&logger, &storage, sandbox.clone()).await,
|
||||||
DRIVER_BLK_CCW_TYPE => {
|
DRIVER_BLK_CCW_TYPE => {
|
||||||
|
@ -51,7 +51,7 @@ use crate::device::{
|
|||||||
};
|
};
|
||||||
use crate::linux_abi::*;
|
use crate::linux_abi::*;
|
||||||
use crate::metrics::get_metrics;
|
use crate::metrics::get_metrics;
|
||||||
use crate::mount::{add_storages, baremount, remove_mounts, STORAGE_HANDLER_LIST};
|
use crate::mount::{add_storages, baremount, STORAGE_HANDLER_LIST};
|
||||||
use crate::namespace::{NSTYPEIPC, NSTYPEPID, NSTYPEUTS};
|
use crate::namespace::{NSTYPEIPC, NSTYPEPID, NSTYPEUTS};
|
||||||
use crate::network::setup_guest_dns;
|
use crate::network::setup_guest_dns;
|
||||||
use crate::pci;
|
use crate::pci;
|
||||||
@ -287,8 +287,6 @@ impl AgentService {
|
|||||||
// Find the sandbox storage used by this container
|
// Find the sandbox storage used by this container
|
||||||
let mounts = sandbox.container_mounts.get(&cid);
|
let mounts = sandbox.container_mounts.get(&cid);
|
||||||
if let Some(mounts) = mounts {
|
if let Some(mounts) = mounts {
|
||||||
remove_mounts(mounts)?;
|
|
||||||
|
|
||||||
for m in mounts.iter() {
|
for m in mounts.iter() {
|
||||||
if sandbox.storages.get(m).is_some() {
|
if sandbox.storages.get(m).is_some() {
|
||||||
cmounts.push(m.to_string());
|
cmounts.push(m.to_string());
|
||||||
|
@ -626,12 +626,22 @@ func (c *Container) createBlockDevices(ctx context.Context) error {
|
|||||||
c.Logger().WithError(err).Error("error writing sandbox info")
|
c.Logger().WithError(err).Error("error writing sandbox info")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
readonly := false
|
||||||
|
for _, flag := range mntInfo.Options {
|
||||||
|
if flag == "ro" {
|
||||||
|
readonly = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
c.mounts[i].Source = mntInfo.Device
|
c.mounts[i].Source = mntInfo.Device
|
||||||
c.mounts[i].Type = mntInfo.FsType
|
c.mounts[i].Type = mntInfo.FsType
|
||||||
c.mounts[i].Options = mntInfo.Options
|
c.mounts[i].Options = mntInfo.Options
|
||||||
|
c.mounts[i].ReadOnly = readonly
|
||||||
m.Source = mntInfo.Device
|
m.Source = mntInfo.Device
|
||||||
m.Type = mntInfo.FsType
|
m.Type = mntInfo.FsType
|
||||||
m.Options = mntInfo.Options
|
m.Options = mntInfo.Options
|
||||||
|
m.ReadOnly = readonly
|
||||||
}
|
}
|
||||||
|
|
||||||
var stat unix.Stat_t
|
var stat unix.Stat_t
|
||||||
|
@ -6,6 +6,7 @@
|
|||||||
package virtcontainers
|
package virtcontainers
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
b64 "encoding/base64"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
@ -1545,11 +1546,14 @@ func (k *kataAgent) handleBlkOCIMounts(c *Container, spec *specs.Spec) ([]*grpc.
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// The device will be mounted at a unique location within the VM. Mounting
|
// Each device will be mounted at a unique location within the VM only once. Mounting
|
||||||
// to the container specific location is handled within the OCI spec. Let's ensure that
|
// to the container specific location is handled within the OCI spec. Let's ensure that
|
||||||
// the storage mount point is unique, and that this is utilized as the source in the OCI
|
// the storage mount point is unique for each device. This is then utilized as the source
|
||||||
// spec.
|
// in the OCI spec. If multiple containers mount the same block device, it's refcounted inside
|
||||||
filename := fmt.Sprintf("%s-%s", uuid.Generate().String(), filepath.Base(vol.MountPoint))
|
// the guest by Kata agent.
|
||||||
|
filename := b64.StdEncoding.EncodeToString([]byte(vol.Source))
|
||||||
|
// Make the base64 encoding path safe.
|
||||||
|
filename = strings.ReplaceAll(filename, "/", "_")
|
||||||
path := filepath.Join(kataGuestSandboxStorageDir(), filename)
|
path := filepath.Join(kataGuestSandboxStorageDir(), filename)
|
||||||
|
|
||||||
// Update applicable OCI mount source
|
// Update applicable OCI mount source
|
||||||
|
Loading…
Reference in New Issue
Block a user