mirror of
https://github.com/AmbiML/sparrow-kata-full.git
synced 2025-07-15 15:01:45 +00:00
MemoryManager: integrate with the slot allocator and many api updates
- track CAmkES well-known name changes (e.g. RECV_CNODE -> MEMORT_RECV_CNODE) - initialize the slot allocator for the component - fix size_bytes to use the object count in its calculation - add can_combine method to help optimize ObjDescBundle construction - enhance ObjDescBundle: - len, count, size_bytes, count_log2, maybe_combine_last - move_objects_to_toplevel, move_objects_from_toplevel - fmt (show SELF_CNODE & MEMORY_RECV_CNODE symbolically) - add MmeCapAllocFailed - add kata_object_alloc_in_toplevel & kata_object_free_toplevel - add kata_object_alloc_in_cnode & kata_object_free_in_cnode - add kata_frame_alloc_in_cnode - avoid underflow in stats bookkeeping to not crash until we fix the root cause Change-Id: I826b55f1f0b07bef58a420e32efbc02cd1d6363f GitOrigin-RevId: 43bd8ae5cf41fd510fae502c7cd8e234c04edb85
This commit is contained in:
parent
2251f84c1b
commit
4f17bb33b3
@ -7,6 +7,7 @@ use core::ops::Range;
|
||||
use core::slice;
|
||||
use kata_os_common::allocator;
|
||||
use kata_os_common::logger::KataLogger;
|
||||
use kata_os_common::slot_allocator;
|
||||
use kata_memory_interface::MemoryManagerError;
|
||||
use kata_memory_interface::MemoryManagerInterface;
|
||||
use kata_memory_interface::ObjDescBundle;
|
||||
@ -15,6 +16,8 @@ use kata_memory_manager::KataMemoryManager;
|
||||
use kata_os_common::sel4_sys;
|
||||
use log::{info, trace};
|
||||
|
||||
use slot_allocator::KATA_CSPACE_SLOTS;
|
||||
|
||||
use sel4_sys::seL4_BootInfo;
|
||||
use sel4_sys::seL4_CNode_Delete;
|
||||
use sel4_sys::seL4_CPtr;
|
||||
@ -34,7 +37,7 @@ extern "C" {
|
||||
|
||||
// Each CAmkES-component has a CNode setup at a well-known slot in SELF_CNODE.
|
||||
// We re-use that slot to receive CNode caps passed with alloc & free requests.
|
||||
static RECV_CNODE: seL4_CPtr;
|
||||
static MEMORY_RECV_CNODE: seL4_CPtr;
|
||||
}
|
||||
|
||||
#[no_mangle]
|
||||
@ -75,11 +78,20 @@ pub extern "C" fn pre_init() {
|
||||
stats.free_bytes,
|
||||
);
|
||||
}
|
||||
|
||||
KATA_CSPACE_SLOTS.init(
|
||||
/*first_slot=*/ bootinfo.empty.start,
|
||||
/*size=*/ bootinfo.empty.end - bootinfo.empty.start
|
||||
);
|
||||
trace!("setup cspace slots: first slot {} free {}",
|
||||
KATA_CSPACE_SLOTS.base_slot(),
|
||||
KATA_CSPACE_SLOTS.free_slots());
|
||||
}
|
||||
|
||||
unsafe {
|
||||
// Delete the CAmkES-setup CNode; we're going to reuse the
|
||||
// well-known slot once it is empty (see below).
|
||||
seL4_CNode_Delete(SELF_CNODE, RECV_CNODE, seL4_WordBits as u8)
|
||||
seL4_CNode_Delete(SELF_CNODE, MEMORY_RECV_CNODE, seL4_WordBits as u8)
|
||||
.expect("recv_node");
|
||||
}
|
||||
}
|
||||
@ -94,8 +106,8 @@ pub extern "C" fn memory__init() {
|
||||
// NB: this must be done here (rather than someplace like pre_init)
|
||||
// so it's in the context of the MemoryInterface thread (so we write
|
||||
// the correct ipc buffer).
|
||||
seL4_SetCapReceivePath(SELF_CNODE, RECV_CNODE, seL4_WordBits);
|
||||
trace!("Cap receive path {}:{}:{}", SELF_CNODE, RECV_CNODE, seL4_WordBits);
|
||||
seL4_SetCapReceivePath(SELF_CNODE, MEMORY_RECV_CNODE, seL4_WordBits);
|
||||
trace!("Cap receive path {}:{}:{}", SELF_CNODE, MEMORY_RECV_CNODE, seL4_WordBits);
|
||||
}
|
||||
}
|
||||
|
||||
@ -119,12 +131,12 @@ pub extern "C" fn memory_alloc(
|
||||
unsafe {
|
||||
let recv_path = seL4_GetCapReceivePath();
|
||||
// NB: make sure noone clobbers the setup done in memory__init
|
||||
assert_eq!(recv_path, (SELF_CNODE, RECV_CNODE, seL4_WordBits));
|
||||
assert_eq!(recv_path, (SELF_CNODE, MEMORY_RECV_CNODE, seL4_WordBits));
|
||||
|
||||
let raw_slice = slice::from_raw_parts(c_raw_data, c_raw_data_len as usize);
|
||||
let ret_status = match postcard::from_bytes::<ObjDescBundle>(raw_slice) {
|
||||
Ok(mut bundle) => {
|
||||
// TODO(sleffler): verify we received a CNode in RECV_CNODE.
|
||||
// TODO(sleffler): verify we received a CNode in MEMORY_RECV_CNODE.
|
||||
bundle.cnode = recv_path.1;
|
||||
// NB: bundle.depth should reflect the received cnode
|
||||
KATA_MEMORY.alloc(&bundle).into()
|
||||
@ -145,12 +157,12 @@ pub extern "C" fn memory_free(
|
||||
unsafe {
|
||||
let recv_path = seL4_GetCapReceivePath();
|
||||
// NB: make sure noone clobbers the setup done in memory__init
|
||||
assert_eq!(recv_path, (SELF_CNODE, RECV_CNODE, seL4_WordBits));
|
||||
assert_eq!(recv_path, (SELF_CNODE, MEMORY_RECV_CNODE, seL4_WordBits));
|
||||
|
||||
let raw_slice = slice::from_raw_parts(c_raw_data, c_raw_data_len as usize);
|
||||
let ret_status = match postcard::from_bytes::<ObjDescBundle>(raw_slice) {
|
||||
Ok(mut bundle) => {
|
||||
// TODO(sleffler): verify we received a CNode in RECV_CNODE.
|
||||
// TODO(sleffler): verify we received a CNode in MEMORY_RECV_CNODE.
|
||||
bundle.cnode = recv_path.1;
|
||||
// NB: bundle.depth should reflect the received cnode
|
||||
KATA_MEMORY.free(&bundle).into()
|
||||
|
@ -1,21 +1,29 @@
|
||||
//! Kata OS memory management support
|
||||
|
||||
#![cfg_attr(not(test), no_std)]
|
||||
#![allow(dead_code)]
|
||||
|
||||
extern crate alloc;
|
||||
use alloc::vec;
|
||||
use alloc::vec::Vec;
|
||||
use core::fmt;
|
||||
use kata_os_common::slot_allocator;
|
||||
use kata_os_common::sel4_sys;
|
||||
use log::trace;
|
||||
use postcard;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use sel4_sys::seL4_CNode_Move;
|
||||
use sel4_sys::seL4_CPtr;
|
||||
use sel4_sys::seL4_Error;
|
||||
use sel4_sys::seL4_ObjectType::*;
|
||||
use sel4_sys::seL4_ObjectType;
|
||||
use sel4_sys::seL4_PageBits;
|
||||
use sel4_sys::seL4_Result;
|
||||
use sel4_sys::seL4_SetCap;
|
||||
use sel4_sys::seL4_WordBits;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use slot_allocator::KATA_CSPACE_SLOTS;
|
||||
|
||||
// NB: @14b per desc this supports ~150 descriptors (depending
|
||||
// on serde overhead), the rpc buffer is actually 4K so we could
|
||||
@ -23,6 +31,17 @@ use serde::{Deserialize, Serialize};
|
||||
pub const RAW_OBJ_DESC_DATA_SIZE: usize = 2048;
|
||||
pub type RawObjDescData = [u8; RAW_OBJ_DESC_DATA_SIZE];
|
||||
|
||||
extern "C" {
|
||||
// Each CAmkES-generated CNode has a writable self-reference to itself in
|
||||
// the slot SELF_CNODE to enable dynamic management of capabilities.
|
||||
static SELF_CNODE: seL4_CPtr;
|
||||
|
||||
// Each CAmkES-component has a CNode setup at a well-known slot. In lieu
|
||||
// of any supplied CNode we can use that container to pass capabilities.
|
||||
static MEMORY_RECV_CNODE: seL4_CPtr;
|
||||
static MEMORY_RECV_CNODE_DEPTH: u8;
|
||||
}
|
||||
|
||||
// The MemoryManager takes collections of Object Descriptors.
|
||||
//
|
||||
// For an alloc request an object descriptor provides everything needed
|
||||
@ -34,7 +53,7 @@ pub type RawObjDescData = [u8; RAW_OBJ_DESC_DATA_SIZE];
|
||||
// to irq's, configure TCB slots, etc.
|
||||
//
|
||||
// TODO(sleffler): support setting fixed physical address for drivers
|
||||
// TODO(sleffler): allocate associated resources like endpoint #'s?
|
||||
// TODO(sleffler): maybe allocate associated resources like endpoint #'s?
|
||||
#[derive(Copy, Clone, Debug, Serialize, Deserialize)]
|
||||
pub struct ObjDesc {
|
||||
// Requested object type or type of object being released.
|
||||
@ -66,12 +85,19 @@ impl ObjDesc {
|
||||
) -> Self {
|
||||
ObjDesc { type_, count, cptr }
|
||||
}
|
||||
|
||||
// Returns a new ObjDesc with count of 1 and the cptr offset by |index|.
|
||||
pub fn new_at(&self, index: usize) -> ObjDesc {
|
||||
assert!(index < self.retype_count());
|
||||
ObjDesc::new(self.type_, 1, self.cptr + index)
|
||||
}
|
||||
|
||||
// Parameters for seL4_Untyped_Retype call.
|
||||
pub fn retype_size_bits(&self) -> Option<usize> {
|
||||
match self.type_ {
|
||||
seL4_UntypedObject => Some(self.count), // Log2 memory size
|
||||
seL4_CapTableObject => Some(self.count), // Log2 number of slots
|
||||
seL4_SchedContextObject => Some(self.count), // Log2 context size
|
||||
seL4_UntypedObject // Log2 memory size
|
||||
| seL4_CapTableObject // Log2 number of slots
|
||||
| seL4_SchedContextObject => Some(self.count), // Log2 context size
|
||||
_ => self.type_.size_bits(),
|
||||
}
|
||||
}
|
||||
@ -86,18 +112,28 @@ impl ObjDesc {
|
||||
}
|
||||
}
|
||||
|
||||
// Memory occupied by objects. Used for bookkeeping and statistics.
|
||||
// Memory occupied by objects. Used mainly for bookkeeping and statistics.
|
||||
pub fn size_bytes(&self) -> Option<usize> {
|
||||
match self.type_ {
|
||||
seL4_UntypedObject => Some(1 << self.count),
|
||||
seL4_UntypedObject
|
||||
| seL4_SchedContextObject => Some(1 << self.count),
|
||||
seL4_CapTableObject =>
|
||||
self.type_.size_bits().map(|x| self.count * (1 << x)),
|
||||
seL4_SchedContextObject => Some(1 << self.count),
|
||||
_ => self.type_.size_bits().map(|x| 1 << x),
|
||||
self.type_.size_bits().map(|x| (1 << (x + self.count))),
|
||||
_ => self.type_.size_bits().map(|x| self.count * (1 << x)),
|
||||
}
|
||||
}
|
||||
|
||||
// Checks if two descriptors can be combined. This is used to optimize
|
||||
// dynamically constructed ObjDescBundle's (e.g. rz::Upload)
|
||||
pub fn can_combine(&self, other: &ObjDesc) -> bool {
|
||||
self.type_ == other.type_ && self.cptr + self.count == other.cptr
|
||||
}
|
||||
}
|
||||
|
||||
// ObjDescBundle holds a collection of ObjDesc's and their associated
|
||||
// container (i.e. CNode). This enables full "path addressing" of the
|
||||
// objects. Helper methods do move/copy operations between a component's
|
||||
// top-level CNode and dynamically allocated CNodes.
|
||||
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||
pub struct ObjDescBundle {
|
||||
pub cnode: seL4_CPtr,
|
||||
@ -110,8 +146,119 @@ impl ObjDescBundle {
|
||||
depth: u8,
|
||||
objs: Vec<ObjDesc>,
|
||||
) -> Self {
|
||||
// TODO(sleffler): assert the largest cptr fits in the container
|
||||
ObjDescBundle { cnode, depth, objs }
|
||||
}
|
||||
|
||||
// Returns the number of object descriptors.
|
||||
pub fn len(&self) -> usize { self.objs.len() }
|
||||
|
||||
// Returns the count of objects specified by the object descriptors.
|
||||
pub fn count(&self) -> usize {
|
||||
self.objs.as_slice().iter().map(|od| od.retype_count()).sum()
|
||||
}
|
||||
|
||||
// Returns the total bytes specified by the object descriptors.
|
||||
pub fn size_bytes(&self) -> usize {
|
||||
self.objs.as_slice().iter().map(|od| od.size_bytes().unwrap()).sum()
|
||||
}
|
||||
|
||||
// Returns the log2 size that holds all the objects. This is typically
|
||||
// used to size CNode's based on their intended contents. NB: we return
|
||||
// values > 0 since the kernel rejects a CapTable object with size_bits=0.
|
||||
pub fn count_log2(&self) -> usize {
|
||||
// NB: BITS & leading_zeros return u32
|
||||
(1 + usize::BITS - usize::leading_zeros(self.count())) as usize
|
||||
}
|
||||
|
||||
pub fn maybe_combine_last(&mut self) -> bool {
|
||||
let len = self.len();
|
||||
if len > 1 && self.objs[len - 2].can_combine(&self.objs[len - 1]) {
|
||||
self.objs[len - 2].count += self.objs[len - 1].count;
|
||||
self.objs.pop();
|
||||
true
|
||||
} else {
|
||||
false
|
||||
}
|
||||
}
|
||||
|
||||
// Move objects to dynamically-allocated slots in the top-level
|
||||
// CNode and mutate the Object Descriptor with the new cptr's.
|
||||
// TODO(sleffler) make generic (requires supplying slot allocator)?
|
||||
pub fn move_objects_to_toplevel(
|
||||
&mut self,
|
||||
) -> seL4_Result {
|
||||
let dest_cnode = unsafe { SELF_CNODE };
|
||||
let dest_depth = seL4_WordBits as u8;
|
||||
for od in &mut self.objs {
|
||||
let dest_slot = unsafe { KATA_CSPACE_SLOTS.alloc(od.retype_count()) }
|
||||
.ok_or(seL4_Error::seL4_NotEnoughMemory)?; // XXX seL4_Result not a good fit
|
||||
for offset in 0..od.retype_count() {
|
||||
unsafe {
|
||||
// TODO(sleffler): cleanup on error?
|
||||
seL4_CNode_Move(
|
||||
/*desT_root=*/ dest_cnode,
|
||||
/*dest_index=*/ dest_slot + offset,
|
||||
/*dest_depth=*/ dest_depth,
|
||||
/*src_root=*/ self.cnode,
|
||||
/*src_index=*/ od.cptr + offset,
|
||||
/*src_depth=*/ self.depth,
|
||||
)?;
|
||||
}
|
||||
}
|
||||
od.cptr = dest_slot;
|
||||
}
|
||||
self.cnode = dest_cnode;
|
||||
self.depth = dest_depth;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// Move objects from the top-level CSpace to |dest_cnode| and
|
||||
// release the top-level slots. The Object Descriptor are mutated
|
||||
// with adjusted cptr's.
|
||||
pub fn move_objects_from_toplevel(
|
||||
&mut self,
|
||||
dest_cnode: seL4_CPtr,
|
||||
dest_depth: u8,
|
||||
) -> seL4_Result {
|
||||
let mut dest_slot = 0; // NB: assume empty container
|
||||
for od in &mut self.objs {
|
||||
let count = od.retype_count();
|
||||
for offset in 0..count {
|
||||
// XXX cleanup on error?
|
||||
unsafe {
|
||||
seL4_CNode_Move(
|
||||
/*dest_root=*/ dest_cnode,
|
||||
/*dest_index=*/ dest_slot + offset,
|
||||
/*dest_depth=*/ dest_depth,
|
||||
/*src_root=*/ self.cnode,
|
||||
/*src_index=*/ od.cptr + offset,
|
||||
/*src_depth=*/ self.depth,
|
||||
)
|
||||
}?;
|
||||
}
|
||||
unsafe { KATA_CSPACE_SLOTS.free(od.cptr, count) };
|
||||
od.cptr = dest_slot;
|
||||
dest_slot = dest_slot + count;
|
||||
}
|
||||
self.cnode = dest_cnode;
|
||||
self.depth = dest_depth;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
impl fmt::Display for ObjDescBundle {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
if self.cnode == unsafe { SELF_CNODE} {
|
||||
assert_eq!(self.depth as usize, seL4_WordBits);
|
||||
write!(f, "{{ SELF, {:?} }}", &self.objs)
|
||||
} else if self.cnode == unsafe { MEMORY_RECV_CNODE} {
|
||||
assert_eq!(self.depth, unsafe { MEMORY_RECV_CNODE_DEPTH });
|
||||
write!(f, "{{ MEMORY_RECV, {:?} }}", &self.objs)
|
||||
} else {
|
||||
write!(f, "{{ cnode: {}, depth: {}, {:?} }}",
|
||||
self.cnode, self.depth, &self.objs)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Eq, PartialEq)]
|
||||
@ -119,6 +266,7 @@ pub enum MemoryError {
|
||||
ObjCountInvalid = 0, // Too many objects requested
|
||||
ObjTypeInvalid, // Request with invalid object type
|
||||
ObjCapInvalid, // Request with invalid cptr XXX
|
||||
CapAllocFailed,
|
||||
UnknownMemoryError,
|
||||
// Generic errors.
|
||||
AllocFailed,
|
||||
@ -175,6 +323,7 @@ pub enum MemoryManagerError {
|
||||
MmeObjCountInvalid,
|
||||
MmeObjTypeInvalid,
|
||||
MmeObjCapInvalid,
|
||||
MmeCapAllocFailed,
|
||||
MmeSerializeFailed,
|
||||
MmeDeserializeFailed,
|
||||
MmeUnknownError,
|
||||
@ -188,6 +337,7 @@ impl From<MemoryError> for MemoryManagerError {
|
||||
MemoryError::ObjCountInvalid => MemoryManagerError::MmeObjCountInvalid,
|
||||
MemoryError::ObjTypeInvalid => MemoryManagerError::MmeObjTypeInvalid,
|
||||
MemoryError::ObjCapInvalid => MemoryManagerError::MmeObjCapInvalid,
|
||||
MemoryError::CapAllocFailed => MemoryManagerError::MmeCapAllocFailed,
|
||||
MemoryError::AllocFailed => MemoryManagerError::MmeAllocFailed,
|
||||
MemoryError::FreeFailed => MemoryManagerError::MmeFreeFailed,
|
||||
_ => MemoryManagerError::MmeUnknownError,
|
||||
@ -214,18 +364,9 @@ impl From<MemoryManagerError> for Result<(), MemoryManagerError> {
|
||||
|
||||
// Client wrappers.
|
||||
|
||||
extern "C" {
|
||||
// Each CAmkES-generated CNode has a writable self-reference to itself in
|
||||
// the slot SELF_CNODE. This is used to pass CNode containers of dynamically
|
||||
// allocated objects between clients & the MemoryManager.
|
||||
static SELF_CNODE: seL4_CPtr;
|
||||
|
||||
// Each CAmkES-component has a CNode setup at a well-known slot. We use that
|
||||
// CNode to pass capabilities with alloc & free requests.
|
||||
static RECV_CNODE: seL4_CPtr;
|
||||
static RECV_CNODE_DEPTH: u8;
|
||||
}
|
||||
|
||||
// Allocates the objects specified in |request|. The capabilities are stored
|
||||
// in |request|.cnode which is assumed to be a CNode with sufficient capacity
|
||||
#[inline]
|
||||
pub fn kata_object_alloc(
|
||||
request: &ObjDescBundle,
|
||||
) -> Result<(), MemoryManagerError> {
|
||||
@ -234,7 +375,7 @@ pub fn kata_object_alloc(
|
||||
fn memory_alloc(c_request_len: u32, c_request_data: *const u8)
|
||||
-> MemoryManagerError;
|
||||
}
|
||||
trace!("kata_object_alloc {:?}", request); // XXX
|
||||
trace!("kata_object_alloc {}", request);
|
||||
let raw_data = &mut [0u8; RAW_OBJ_DESC_DATA_SIZE];
|
||||
postcard::to_slice(&request, &mut raw_data[..])
|
||||
.map_err(|_| MemoryManagerError::MmeSerializeFailed)?;
|
||||
@ -242,164 +383,187 @@ pub fn kata_object_alloc(
|
||||
// Attach our CNode for returning objects; the CAmkES template
|
||||
// forces extraCaps=1 when constructing the MessageInfo struct
|
||||
// used by the seL4_Call inside memory_alloc.
|
||||
sel4_sys::debug_assert_slot_cnode!(request.cnode);
|
||||
seL4_SetCap(0, request.cnode);
|
||||
|
||||
memory_alloc(raw_data.len() as u32, raw_data.as_ptr()).into()
|
||||
}
|
||||
}
|
||||
|
||||
// TODO(sleffler): is anyone going to use these convience wrappers given
|
||||
// the objects are returned inside a CNode (so it's way more convenient
|
||||
// to allocate 'em all togerher and then move the CNode to a TCB)?
|
||||
|
||||
// XXX need real allocator
|
||||
static mut EMPTY_SLOT: seL4_CPtr = 30; // Next empty slot in debug_console_cnode XXX
|
||||
|
||||
impl ObjDescBundle {
|
||||
// Move objects from |src_cnode| to our top-levbel CSpace and mutate
|
||||
// the Object Descriptor with adjusted cptr's.
|
||||
// TODO(sleffler) make generic?
|
||||
fn move_objects_to_toplevel(
|
||||
&mut self,
|
||||
dest_cnode: seL4_CPtr,
|
||||
dest_depth: u8,
|
||||
) -> Result<(), MemoryManagerError> {
|
||||
unsafe {
|
||||
let mut dest_slot = EMPTY_SLOT;
|
||||
for od in &mut self.objs {
|
||||
for offset in 0..od.retype_count() {
|
||||
// XXX cleanup on error?
|
||||
seL4_CNode_Move(
|
||||
/*root=*/ dest_cnode,
|
||||
/*dest_index=*/ dest_slot,
|
||||
/*dest_depth=*/ dest_depth,
|
||||
/*src_root=*/ self.cnode,
|
||||
/*src_index=*/ od.cptr + offset,
|
||||
/*src_depth=*/ self.depth,
|
||||
).map_err(|_| MemoryManagerError::MmeObjCapInvalid)?;
|
||||
dest_slot += 1;
|
||||
}
|
||||
od.cptr = dest_slot - od.retype_count();
|
||||
}
|
||||
EMPTY_SLOT = dest_slot;
|
||||
// Allocates the objects specified in |objs|. The capabilities are moved
|
||||
// to SELF_CNODE which must have sufficient space.
|
||||
#[inline]
|
||||
pub fn kata_object_alloc_in_toplevel(
|
||||
objs: Vec<ObjDesc>,
|
||||
) -> Result<ObjDescBundle, MemoryManagerError> {
|
||||
// Request the objects using the dedicated MemoryManager container.
|
||||
let mut request = ObjDescBundle::new(
|
||||
unsafe { MEMORY_RECV_CNODE },
|
||||
unsafe { MEMORY_RECV_CNODE_DEPTH },
|
||||
objs.clone(),
|
||||
);
|
||||
kata_object_alloc(&request)?;
|
||||
match request.move_objects_to_toplevel() {
|
||||
Err(_) => {
|
||||
kata_object_free(&request).expect("kata_object_alloc_in_toplevel");
|
||||
Err(MemoryManagerError::MmeObjCapInvalid) // TODO(sleffler): e.into
|
||||
}
|
||||
self.cnode = dest_cnode;
|
||||
self.depth = dest_depth;
|
||||
Ok(())
|
||||
Ok(_) => Ok(request),
|
||||
}
|
||||
}
|
||||
|
||||
// Allocates the objects specified in |objs|. The capabilities are stored
|
||||
// in a new CNode allocated with sufficient capacity.
|
||||
// Note the objects' cptr's are assumed to be consecutive and start at zero.
|
||||
// Note the returned |ObjDescBundle| has the new CNode marked as the container.
|
||||
// TODO(sleffler): not used any more, remove?
|
||||
#[inline]
|
||||
pub fn kata_object_alloc_in_cnode(
|
||||
objs: Vec<ObjDesc>,
|
||||
) -> Result<ObjDescBundle, MemoryManagerError> {
|
||||
fn next_log2(value: usize) -> usize {
|
||||
let mut size_bits = 0;
|
||||
while value > (1 << size_bits) {
|
||||
size_bits = size_bits + 1;
|
||||
}
|
||||
size_bits
|
||||
}
|
||||
// NB: CNode size depends on how many objects are requested.
|
||||
let cnode_depth = next_log2(objs.iter().map(|od| od.count).sum());
|
||||
|
||||
// Request a top-level CNode.
|
||||
let cnode = kata_cnode_alloc(cnode_depth)
|
||||
.map_err(|_| MemoryManagerError::MmeAllocFailed)?;
|
||||
|
||||
// Now construct the request for |objs| with |cnode| as the container.
|
||||
let request = ObjDescBundle::new(
|
||||
cnode.objs[0].cptr, cnode_depth as u8, objs.clone(),
|
||||
);
|
||||
match kata_object_alloc(&request) {
|
||||
Err(e) => {
|
||||
kata_object_free_toplevel(&cnode).expect("cnode free");
|
||||
Err(e)
|
||||
}
|
||||
Ok(_) => Ok(request),
|
||||
}
|
||||
}
|
||||
|
||||
// TODO(sleffler): remove unused convience wrappers?
|
||||
|
||||
#[inline]
|
||||
#[allow(dead_code)]
|
||||
pub fn kata_untyped_alloc(space_bytes: usize)
|
||||
-> Result<ObjDescBundle, MemoryManagerError>
|
||||
{
|
||||
let mut objs = ObjDescBundle::new(
|
||||
unsafe { RECV_CNODE },
|
||||
unsafe { RECV_CNODE_DEPTH },
|
||||
unsafe { MEMORY_RECV_CNODE },
|
||||
unsafe { MEMORY_RECV_CNODE_DEPTH },
|
||||
vec![ ObjDesc::new(seL4_UntypedObject, space_bytes, /*cptr=*/ 0) ],
|
||||
);
|
||||
kata_object_alloc(&objs)?;
|
||||
objs.move_objects_to_toplevel(unsafe { SELF_CNODE }, seL4_WordBits as u8)?;
|
||||
objs.move_objects_to_toplevel()
|
||||
.map_err(|_| MemoryManagerError::MmeObjCapInvalid)?;
|
||||
Ok(objs)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
#[allow(dead_code)]
|
||||
pub fn kata_tcb_alloc() -> Result<ObjDescBundle, MemoryManagerError> {
|
||||
let mut objs = ObjDescBundle::new(
|
||||
unsafe { RECV_CNODE },
|
||||
unsafe { RECV_CNODE_DEPTH },
|
||||
unsafe { MEMORY_RECV_CNODE },
|
||||
unsafe { MEMORY_RECV_CNODE_DEPTH },
|
||||
vec![ ObjDesc::new(seL4_TCBObject, 1, /*cptr=*/ 0) ],
|
||||
);
|
||||
kata_object_alloc(&objs)?;
|
||||
objs.move_objects_to_toplevel(unsafe { SELF_CNODE }, seL4_WordBits as u8)?;
|
||||
objs.move_objects_to_toplevel()
|
||||
.map_err(|_| MemoryManagerError::MmeObjCapInvalid)?;
|
||||
Ok(objs)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
#[allow(dead_code)]
|
||||
pub fn kata_endpoint_alloc() -> Result<ObjDescBundle, MemoryManagerError> {
|
||||
let mut objs = ObjDescBundle::new(
|
||||
unsafe { RECV_CNODE },
|
||||
unsafe { RECV_CNODE_DEPTH },
|
||||
unsafe { MEMORY_RECV_CNODE },
|
||||
unsafe { MEMORY_RECV_CNODE_DEPTH },
|
||||
vec![ ObjDesc::new(seL4_EndpointObject, 1, /*cptr=*/ 0) ],
|
||||
);
|
||||
kata_object_alloc(&objs)?;
|
||||
objs.move_objects_to_toplevel(unsafe { SELF_CNODE }, seL4_WordBits as u8)?;
|
||||
objs.move_objects_to_toplevel()
|
||||
.map_err(|_| MemoryManagerError::MmeObjCapInvalid)?;
|
||||
Ok(objs)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
#[allow(dead_code)]
|
||||
pub fn kata_notification_alloc() -> Result<ObjDescBundle, MemoryManagerError> {
|
||||
let mut objs = ObjDescBundle::new(
|
||||
unsafe { RECV_CNODE },
|
||||
unsafe { RECV_CNODE_DEPTH },
|
||||
unsafe { MEMORY_RECV_CNODE },
|
||||
unsafe { MEMORY_RECV_CNODE_DEPTH },
|
||||
vec![ ObjDesc::new(seL4_NotificationObject, 1, /*cptr=*/ 0) ],
|
||||
);
|
||||
kata_object_alloc(&objs)?;
|
||||
objs.move_objects_to_toplevel(unsafe { SELF_CNODE }, seL4_WordBits as u8)?;
|
||||
objs.move_objects_to_toplevel()
|
||||
.map_err(|_| MemoryManagerError::MmeObjCapInvalid)?;
|
||||
Ok(objs)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
#[allow(dead_code)]
|
||||
// |size_bits| is the log2 of the #slots to allocate.
|
||||
pub fn kata_cnode_alloc(size_bits: usize) -> Result<ObjDescBundle, MemoryManagerError>
|
||||
pub fn kata_cnode_alloc(size_bits: usize)
|
||||
-> Result<ObjDescBundle, MemoryManagerError>
|
||||
{
|
||||
let mut objs = ObjDescBundle::new(
|
||||
unsafe { RECV_CNODE },
|
||||
unsafe { RECV_CNODE_DEPTH },
|
||||
unsafe { MEMORY_RECV_CNODE },
|
||||
unsafe { MEMORY_RECV_CNODE_DEPTH },
|
||||
vec![ ObjDesc::new(seL4_CapTableObject, size_bits, /*cptr=*/ 0 )],
|
||||
);
|
||||
kata_object_alloc(&objs)?;
|
||||
objs.move_objects_to_toplevel(unsafe { SELF_CNODE }, seL4_WordBits as u8)?;
|
||||
objs.move_objects_to_toplevel()
|
||||
.map_err(|_| MemoryManagerError::MmeObjCapInvalid)?;
|
||||
Ok(objs)
|
||||
}
|
||||
|
||||
#[cfg(feature = "CONFIG_KERNEL_MCS")]
|
||||
#[inline]
|
||||
#[allow(dead_code)]
|
||||
pub fn kata_sched_context_alloc(size_bits: usize) -> Result<ObjDescBundle, MemoryManagerError>
|
||||
pub fn kata_sched_context_alloc(size_bits: usize)
|
||||
-> Result<ObjDescBundle, MemoryManagerError>
|
||||
{
|
||||
let mut objs = ObjDescBundle::new(
|
||||
unsafe { RECV_CNODE },
|
||||
unsafe { RECV_CNODE_DEPTH },
|
||||
unsafe { MEMORY_RECV_CNODE },
|
||||
unsafe { MEMORY_RECV_CNODE_DEPTH },
|
||||
vec![ ObjDesc::new(seL4_SchedContextObject, size_bits, /*cptr=*/ 0) ],
|
||||
);
|
||||
kata_object_alloc(&objs)?;
|
||||
objs.move_objects_to_toplevel(unsafe { SELF_CNODE }, seL4_WordBits as u8)?;
|
||||
objs.move_objects_to_toplevel()
|
||||
.map_err(|_| MemoryManagerError::MmeObjCapInvalid)?;
|
||||
Ok(objs)
|
||||
}
|
||||
|
||||
#[cfg(feature = "CONFIG_KERNEL_MCS")]
|
||||
#[inline]
|
||||
#[allow(dead_code)]
|
||||
pub fn kata_reply_alloc() -> Result<ObjDescBundle, MemoryManagerError>
|
||||
{
|
||||
let mut objs = ObjDescBundle::new(
|
||||
unsafe { RECV_CNODE },
|
||||
unsafe { RECV_CNODE_DEPTH },
|
||||
unsafe { MEMORY_RECV_CNODE },
|
||||
unsafe { MEMORY_RECV_CNODE_DEPTH },
|
||||
vec![ ObjDesc::new(seL4_ReplyObject, 1, /*cptr=*/ 0) ],
|
||||
);
|
||||
kata_object_alloc(&objs)?;
|
||||
objs.move_objects_to_toplevel(unsafe { SELF_CNODE }, seL4_WordBits as u8)?;
|
||||
objs.move_objects_to_toplevel()
|
||||
.map_err(|_| MemoryManagerError::MmeObjCapInvalid)?;
|
||||
Ok(objs)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
#[allow(dead_code)]
|
||||
// Wrapper for allocating 4K pages.
|
||||
pub fn kata_frame_alloc(space_bytes: usize) -> Result<ObjDescBundle, MemoryManagerError>
|
||||
#[inline]
|
||||
pub fn kata_frame_alloc(space_bytes: usize)
|
||||
-> Result<ObjDescBundle, MemoryManagerError>
|
||||
{
|
||||
fn howmany(value: usize, unit: usize) -> usize {
|
||||
(value + (unit - 1)) / unit
|
||||
}
|
||||
// NB: always allocate 4K pages
|
||||
let mut objs = ObjDescBundle::new(
|
||||
unsafe { RECV_CNODE },
|
||||
unsafe { RECV_CNODE_DEPTH },
|
||||
unsafe { MEMORY_RECV_CNODE },
|
||||
unsafe { MEMORY_RECV_CNODE_DEPTH },
|
||||
// NB: always allocate 4K pages
|
||||
vec![ ObjDesc::new(
|
||||
seL4_RISCV_4K_Page,
|
||||
howmany(space_bytes, 1 << seL4_PageBits),
|
||||
@ -407,25 +571,40 @@ pub fn kata_frame_alloc(space_bytes: usize) -> Result<ObjDescBundle, MemoryManag
|
||||
)],
|
||||
);
|
||||
kata_object_alloc(&objs)?;
|
||||
objs.move_objects_to_toplevel(unsafe { SELF_CNODE }, seL4_WordBits as u8)?;
|
||||
objs.move_objects_to_toplevel()
|
||||
.map_err(|_| MemoryManagerError::MmeObjCapInvalid)?;
|
||||
Ok(objs)
|
||||
}
|
||||
|
||||
// Like kata_frame_alloc but also create a CNode to hold the frames.
|
||||
#[inline]
|
||||
pub fn kata_frame_alloc_in_cnode(space_bytes: usize)
|
||||
-> Result<ObjDescBundle, MemoryManagerError>
|
||||
{
|
||||
fn howmany(value: usize, unit: usize) -> usize {
|
||||
(value + (unit - 1)) / unit
|
||||
}
|
||||
// NB: always allocate 4K pages
|
||||
let npages = howmany(space_bytes, 1 << seL4_PageBits);
|
||||
kata_object_alloc_in_cnode(vec![
|
||||
ObjDesc::new( seL4_RISCV_4K_Page, npages, /*cptr=*/ 0)
|
||||
])
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn kata_page_table_alloc() -> Result<ObjDescBundle, MemoryManagerError> {
|
||||
let mut objs = ObjDescBundle::new(
|
||||
unsafe { MEMORY_RECV_CNODE },
|
||||
unsafe { MEMORY_RECV_CNODE_DEPTH },
|
||||
vec![ ObjDesc::new(seL4_RISCV_PageTableObject, 1, /*cptr=*/ 0) ],
|
||||
);
|
||||
kata_object_alloc(&objs)?;
|
||||
objs.move_objects_to_toplevel()
|
||||
.map_err(|_| MemoryManagerError::MmeObjCapInvalid)?;
|
||||
Ok(objs)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
#[allow(dead_code)]
|
||||
pub fn kata_page_table_alloc() -> Result<ObjDescBundle, MemoryManagerError> {
|
||||
let mut objs = ObjDescBundle::new(
|
||||
unsafe { RECV_CNODE },
|
||||
unsafe { RECV_CNODE_DEPTH },
|
||||
vec![ ObjDesc::new(seL4_RISCV_PageTableObject, 1, /*cptr=*/ 0) ],
|
||||
);
|
||||
kata_object_alloc(&objs)?;
|
||||
objs.move_objects_to_toplevel(unsafe { SELF_CNODE }, seL4_WordBits as u8)?;
|
||||
Ok(objs)
|
||||
}
|
||||
|
||||
// TODO(sleffler): other objects, esp. vm stuff
|
||||
|
||||
pub fn kata_object_free(
|
||||
request: &ObjDescBundle,
|
||||
) -> Result<(), MemoryManagerError> {
|
||||
@ -433,7 +612,7 @@ pub fn kata_object_free(
|
||||
// NB: this assumes the MemoryManager component is named "memory".
|
||||
fn memory_free(c_data_len: u32, c_data: *const u8) -> MemoryManagerError;
|
||||
}
|
||||
trace!("kata_object_free {:?}", request); // XXX
|
||||
trace!("kata_object_free {}", request);
|
||||
let raw_data = &mut [0u8; RAW_OBJ_DESC_DATA_SIZE];
|
||||
postcard::to_slice(request, &mut raw_data[..])
|
||||
.map_err(|_| MemoryManagerError::MmeSerializeFailed)?;
|
||||
@ -441,57 +620,46 @@ pub fn kata_object_free(
|
||||
// Attach our CNode for returning objects; the CAmkES template
|
||||
// forces extraCaps=1 when constructing the MessageInfo struct
|
||||
// used in the seL4_Call.
|
||||
sel4_sys::debug_assert_slot_cnode!(request.cnode);
|
||||
seL4_SetCap(0, request.cnode);
|
||||
|
||||
memory_free(raw_data.len() as u32, raw_data.as_ptr()).into()
|
||||
}
|
||||
}
|
||||
|
||||
impl ObjDescBundle {
|
||||
// Move objects from our top-level CSpace to |dest_cnode| and return
|
||||
// mutate Object Descriptor with adjusted cptr's.
|
||||
fn move_objects_from_toplevel(
|
||||
&mut self,
|
||||
dest_cnode: seL4_CPtr,
|
||||
dest_depth: u8,
|
||||
) -> Result<(), MemoryManagerError> {
|
||||
unsafe {
|
||||
let mut dest_slot = 0;
|
||||
for od in &mut self.objs {
|
||||
for offset in 0..od.retype_count() {
|
||||
// XXX cleanup on error?
|
||||
seL4_CNode_Move(
|
||||
/*root=*/ dest_cnode,
|
||||
/*dest_index=*/ dest_slot,
|
||||
/*dest_depth=*/ dest_depth,
|
||||
/*src_root=*/ self.cnode,
|
||||
/*src_index=*/ od.cptr + offset,
|
||||
/*src_depth=*/ self.depth,
|
||||
).map_err(|_| MemoryManagerError::MmeObjCapInvalid)?;
|
||||
dest_slot += 1;
|
||||
}
|
||||
// XXX assumes od.cptr's are sequential
|
||||
od.cptr = dest_slot - od.retype_count();
|
||||
EMPTY_SLOT -= od.retype_count(); // XXX assumes no intervening alloc
|
||||
}
|
||||
}
|
||||
self.cnode = dest_cnode;
|
||||
self.depth = dest_depth;
|
||||
Ok(())
|
||||
}
|
||||
// Free |request| and then the container that holds them. The container
|
||||
// is expected to be in the top-level CNode (as returned by
|
||||
// kata_object_alloc_in_cnode).
|
||||
#[inline]
|
||||
pub fn kata_object_free_in_cnode(
|
||||
request: &ObjDescBundle,
|
||||
) -> Result<(), MemoryManagerError> {
|
||||
let cnode_obj = ObjDescBundle::new(
|
||||
unsafe { SELF_CNODE }, seL4_WordBits as u8,
|
||||
vec![ObjDesc::new(
|
||||
/*type=*/ seL4_CapTableObject,
|
||||
/*count=*/ request.depth as usize,
|
||||
/*cptr=*/ request.cnode
|
||||
)],
|
||||
);
|
||||
kata_object_free(&request)?;
|
||||
// No way to recover if this fails..
|
||||
kata_object_free_toplevel(&cnode_obj)
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
pub fn kata_object_free_toplevel(objs: &ObjDescBundle) -> Result<(), MemoryManagerError> {
|
||||
#[inline]
|
||||
pub fn kata_object_free_toplevel(objs: &ObjDescBundle)
|
||||
-> Result<(), MemoryManagerError>
|
||||
{
|
||||
let mut objs_mut = objs.clone();
|
||||
objs_mut.move_objects_from_toplevel(
|
||||
unsafe { RECV_CNODE },
|
||||
unsafe { RECV_CNODE_DEPTH }
|
||||
)?;
|
||||
unsafe { MEMORY_RECV_CNODE },
|
||||
unsafe { MEMORY_RECV_CNODE_DEPTH }
|
||||
).map_err(|_| MemoryManagerError::MmeObjCapInvalid)?;
|
||||
kata_object_free(&objs_mut)
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
#[inline]
|
||||
pub fn kata_memory_stats() -> Result<MemoryManagerStats, MemoryManagerError> {
|
||||
extern "C" {
|
||||
// NB: this assumes the MemoryManager component is named "memory".
|
||||
|
@ -8,7 +8,7 @@ use kata_memory_interface::MemoryError;
|
||||
use kata_memory_interface::MemoryManagerInterface;
|
||||
use kata_memory_interface::MemoryManagerStats;
|
||||
use kata_os_common::sel4_sys;
|
||||
use log::{debug, error};
|
||||
use log::{debug, error, trace};
|
||||
use sel4_sys::seL4_CPtr;
|
||||
use sel4_sys::seL4_CNode_Delete;
|
||||
use sel4_sys::seL4_Error;
|
||||
@ -35,7 +35,7 @@ const UNTYPED_SLAB_CAPACITY: usize = 64; // # slabs kept inline
|
||||
// TODO(sleffler): support device-backed memory objects
|
||||
#[derive(Debug)]
|
||||
struct UntypedSlab {
|
||||
pub size_bits: usize, // XXX only used to sort
|
||||
pub size_bits: usize, // NB: only used to sort
|
||||
pub _base_paddr: seL4_Word, // Physical address of slab start
|
||||
pub _last_paddr: seL4_Word, // Physical address of slab end
|
||||
pub cptr: seL4_CPtr, // seL4 untyped object
|
||||
@ -174,7 +174,7 @@ impl MemoryManager {
|
||||
|
||||
fn delete_caps(root: seL4_CPtr, depth: u8, od: &ObjDesc) -> seL4_Result {
|
||||
for offset in 0..od.retype_count() {
|
||||
// XXX warn about errors?
|
||||
// TODO(sleffler) warn about errors?
|
||||
unsafe { seL4_CNode_Delete(root, od.cptr + offset, depth) }?;
|
||||
}
|
||||
Ok(())
|
||||
@ -183,6 +183,8 @@ impl MemoryManager {
|
||||
|
||||
impl MemoryManagerInterface for MemoryManager {
|
||||
fn alloc(&mut self, bundle: &ObjDescBundle) -> Result<(), MemoryError> {
|
||||
trace!("alloc {:?}", bundle);
|
||||
|
||||
// TODO(sleffler): split by device vs no-device (or allow mixing)
|
||||
let first_ut = self.cur_untyped;
|
||||
let mut ut_index = first_ut;
|
||||
@ -192,9 +194,11 @@ impl MemoryManagerInterface for MemoryManager {
|
||||
|
||||
for od in &bundle.objs {
|
||||
// NB: we don't check slots are available (the kernel will tell us).
|
||||
// XXX check size_bytes() against untyped slab? (depend on kernel for now)
|
||||
// TODO(sleffler): maybe check size_bytes() against untyped slab?
|
||||
// (we depend on the kernel for now)
|
||||
while let Err(e) =
|
||||
// XXX ASIDPool maps to UntypedObject?
|
||||
// NB: we don't allocate ASIDPool objects but if we did it
|
||||
// would fail because it needs to map to an UntypedObject
|
||||
MemoryManager::retype_untyped(self.untypeds[ut_index].cptr, bundle.cnode, od)
|
||||
{
|
||||
if e != seL4_Error::seL4_NotEnoughMemory {
|
||||
@ -232,14 +236,22 @@ impl MemoryManagerInterface for MemoryManager {
|
||||
Ok(())
|
||||
}
|
||||
fn free(&mut self, bundle: &ObjDescBundle) -> Result<(), MemoryError> {
|
||||
trace!("free {:?}", bundle);
|
||||
|
||||
for od in &bundle.objs {
|
||||
// XXX support leaving objects so client can do bulk reclaim on exit
|
||||
// (maybe require cptr != 0)
|
||||
// TODO(sleffler): support leaving objects so client can do bulk
|
||||
// reclaim on exit (maybe require cptr != 0)
|
||||
if MemoryManager::delete_caps(bundle.cnode, bundle.depth, od).is_ok() {
|
||||
// NB: atm we do not do per-untyped bookkeeping so just track
|
||||
// global stats.
|
||||
self.allocated_bytes -= od.size_bytes().ok_or(MemoryError::ObjTypeInvalid)?;
|
||||
self.allocated_objs -= od.retype_count();
|
||||
// TODO(sleffler): temp workaround for bad bookkeeping / client mis-handling
|
||||
let size_bytes = od.size_bytes().ok_or(MemoryError::ObjTypeInvalid)?;
|
||||
if size_bytes <= self.allocated_bytes {
|
||||
self.allocated_bytes -= size_bytes;
|
||||
self.allocated_objs -= od.retype_count();
|
||||
} else {
|
||||
debug!("Undeflow on free of {:?}", od);
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
|
Loading…
Reference in New Issue
Block a user