Merge changes from topic "kata-os-rootserver"

* changes:
  capdl: calculate space used by a specification
  kata-os-model: fix MCS support
  kata-os-model: mark RISC-V non-executable pages with the NX bit
  kata-os-model: simplify & shrink page frame fills
  kata-os-model: merge vspace root collection into object creation
  kata-os-model: rust version of capdl-loader-app internals

GitOrigin-RevId: 44208d281ea021a671c90dc650389029baf9243e
This commit is contained in:
Sam Leffler
2022-02-01 23:08:37 +00:00
parent 3f121414ea
commit 8961c75d25
27 changed files with 4433 additions and 0 deletions

View File

@@ -4,4 +4,6 @@ version = "0.1.0"
edition = "2018"
[dependencies]
capdl = { path = "src/capdl" }
kata-os-model = { path = "src/kata-os-model" }
sel4-sys = { path = "src/sel4-sys" }

View File

@@ -0,0 +1,24 @@
[package]
name = "capdl"
version = "0.1.0"
edition = "2018"
build = "build.rs"
[build-dependencies]
sel4-config = { path = "../sel4-config" }
[build-env]
SEL4_OUT_DIR = "${ROOTDIR}out/kata/kernel"
[features]
default = []
CONFIG_DEBUG_BUILD = []
CONFIG_KERNEL_MCS = []
[lib]
path = "mod.rs"
[dependencies]
cstr_core = { version = "0.2.3", default-features = false }
sel4-sys = { path = "../sel4-sys" }
log = "0.4"

View File

@@ -0,0 +1,21 @@
extern crate sel4_config;
use std::env;
fn main() {
// If SEL4_OUT_DIR is not set we expect the kernel build at a fixed
// location relative to the ROOTDIR env variable.
println!("SEL4_OUT_DIR {:?}", env::var("SEL4_OUT_DIR"));
let sel4_out_dir = env::var("SEL4_OUT_DIR").unwrap_or_else(
|_| format!("{}/out/kata/kernel", env::var("ROOTDIR").unwrap())
);
println!("sel4_out_dir {}", sel4_out_dir);
// Dredge seL4 kernel config for settings we need as features to generate
// correct code: e.g. CONFIG_KERNEL_MCS enables MCS support which changes
// the system call numbering.
let features = sel4_config::get_sel4_features(&sel4_out_dir);
println!("features={:?}", features);
for feature in features {
println!("cargo:rustc-cfg=feature=\"{}\"", feature);
}
}

View File

@@ -0,0 +1,911 @@
// capDL specification support.
//
// This code started from a bindgen conversion of capdl.h; some vestiges
// of that are still present and could be improved.
//
// TODO(sleffler): test on non-riscv arch's
// TODO(sleffler): support for constructing specifications
#![no_std]
#![allow(non_camel_case_types)]
#![allow(non_upper_case_globals)]
#![allow(dead_code)]
use core::mem::size_of;
use cstr_core;
use sel4_sys::SEL4_BOOTINFO_HEADER_FDT;
use sel4_sys::SEL4_BOOTINFO_HEADER_PADDING;
use sel4_sys::SEL4_BOOTINFO_HEADER_X86_ACPI_RSDP;
use sel4_sys::SEL4_BOOTINFO_HEADER_X86_FRAMEBUFFER;
use sel4_sys::SEL4_BOOTINFO_HEADER_X86_MBMMAP;
use sel4_sys::SEL4_BOOTINFO_HEADER_X86_TSC_FREQ;
use sel4_sys::SEL4_BOOTINFO_HEADER_X86_VBE;
use sel4_sys::seL4_CapRights;
use sel4_sys::seL4_CNode_CapData;
use sel4_sys::seL4_CPtr;
use sel4_sys::seL4_ObjectType;
use sel4_sys::seL4_ObjectTypeCount;
use sel4_sys::seL4_Word;
use self::CDL_CapDataType::*;
use self::CDL_ObjectType::*;
#[repr(C)]
#[derive(Copy, Clone, Debug, Default, Eq, Hash, Ord, PartialEq, PartialOrd)]
pub struct __BindgenBitfieldUnit<Storage> {
storage: Storage,
}
impl<Storage> __BindgenBitfieldUnit<Storage> {
#[inline]
pub const fn new(storage: Storage) -> Self {
Self { storage }
}
}
impl<Storage> __BindgenBitfieldUnit<Storage>
where
Storage: AsRef<[u8]> + AsMut<[u8]>,
{
#[inline]
pub fn get_bit(&self, index: usize) -> bool {
debug_assert!(index / 8 < self.storage.as_ref().len());
let byte_index = index / 8;
let byte = self.storage.as_ref()[byte_index];
let bit_index = if cfg!(target_endian = "big") {
7 - (index % 8)
} else {
index % 8
};
let mask = 1 << bit_index;
byte & mask == mask
}
#[inline]
pub fn set_bit(&mut self, index: usize, val: bool) {
debug_assert!(index / 8 < self.storage.as_ref().len());
let byte_index = index / 8;
let byte = &mut self.storage.as_mut()[byte_index];
let bit_index = if cfg!(target_endian = "big") {
7 - (index % 8)
} else {
index % 8
};
let mask = 1 << bit_index;
if val {
*byte |= mask;
} else {
*byte &= !mask;
}
}
#[inline]
pub fn get(&self, bit_offset: usize, bit_width: u8) -> u64 {
debug_assert!(bit_width <= 64);
debug_assert!(bit_offset / 8 < self.storage.as_ref().len());
debug_assert!((bit_offset + (bit_width as usize)) / 8 <= self.storage.as_ref().len());
let mut val = 0;
for i in 0..(bit_width as usize) {
if self.get_bit(i + bit_offset) {
let index = if cfg!(target_endian = "big") {
bit_width as usize - 1 - i
} else {
i
};
val |= 1 << index;
}
}
val
}
#[inline]
pub fn set(&mut self, bit_offset: usize, bit_width: u8, val: u64) {
debug_assert!(bit_width <= 64);
debug_assert!(bit_offset / 8 < self.storage.as_ref().len());
debug_assert!((bit_offset + (bit_width as usize)) / 8 <= self.storage.as_ref().len());
for i in 0..(bit_width as usize) {
let mask = 1 << i;
let val_bit_is_set = val & mask == mask;
let index = if cfg!(target_endian = "big") {
bit_width as usize - 1 - i
} else {
i
};
self.set_bit(index + bit_offset, val_bit_is_set);
}
}
}
#[repr(u32)]
#[derive(Debug, PartialEq, Eq)]
pub enum CDL_CapDataType {
CDL_CapData_Badge,
CDL_CapData_Guard,
CDL_CapData_Raw,
}
// NB: we avoid an enum to simplify use
pub const CDL_TCB_CTable_Slot: seL4_Word = 0;
pub const CDL_TCB_VTable_Slot: seL4_Word = CDL_TCB_CTable_Slot + 1;
pub const CDL_TCB_Reply_Slot: seL4_Word = CDL_TCB_VTable_Slot + 1;
pub const CDL_TCB_Caller_Slot: seL4_Word = CDL_TCB_Reply_Slot + 1;
pub const CDL_TCB_IPCBuffer_Slot: seL4_Word = CDL_TCB_Caller_Slot + 1;
pub const CDL_TCB_FaultEP_Slot: seL4_Word = CDL_TCB_IPCBuffer_Slot + 1;
pub const CDL_TCB_SC_Slot: seL4_Word = CDL_TCB_FaultEP_Slot + 1;
pub const CDL_TCB_TemporalFaultEP_Slot: seL4_Word = CDL_TCB_SC_Slot + 1;
// CONFIG_ARM_HYPERVISOR_SUPPORT || CONFIG_VTX
pub const CDL_TCB_VCPU_Slot: seL4_Word = CDL_TCB_TemporalFaultEP_Slot + 1;
#[derive(Debug, PartialEq, Eq)]
pub enum kobject_t {
KOBJECT_PAGE_DIRECTORY,
KOBJECT_PAGE_TABLE,
KOBJECT_FRAME,
KOBJECT_ARCH_NUM_TYPES,
KOBJECT_TCB,
KOBJECT_CNODE,
KOBJECT_CSLOT,
KOBJECT_UNTYPED,
KOBJECT_ENDPOINT,
KOBJECT_NOTIFICATION,
KOBJECT_REPLY,
KOBJECT_SCHED_CONTEXT,
NUM_KOBJECT_TYPES,
}
pub type CDL_ObjID = seL4_Word;
// NB: some object id's are written in the spec as -1
pub fn is_objid_valid(val: CDL_ObjID) -> bool {
val != CDL_ObjID::MAX
}
pub type CDL_IRQ = seL4_Word;
pub type CDL_Core = seL4_Word;
#[repr(usize)]
pub enum CDL_CapRights {
CDL_CanWrite = 1, // BIT(0)
CDL_CanRead = 2, // BIT(1)
CDL_CanGrant = 4, // BIT(2)
CDL_CanGrantReply = 8, // BIT(3)
CDL_AllRights = 15,
}
impl From<CDL_CapRights> for seL4_CapRights {
fn from(rights: CDL_CapRights) -> seL4_CapRights {
// TODO(sleffler): simplify/cleanup ::new
let val: usize = unsafe { ::core::mem::transmute(rights) };
seL4_CapRights::new(
((val & 8) != 0) as usize,
((val & 4) != 0) as usize,
((val & 2) != 0) as usize,
((val & 1) != 0) as usize,
)
}
}
// NB: u8 required for use below
#[repr(u8)]
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
pub enum CDL_CapType {
CDL_NullCap = 0,
CDL_UntypedCap,
CDL_EPCap,
CDL_NotificationCap,
CDL_ReplyCap,
CDL_MasterReplyCap,
CDL_CNodeCap,
CDL_TCBCap,
CDL_IRQControlCap,
CDL_IRQHandlerCap,
CDL_FrameCap,
CDL_PTCap,
CDL_PDCap,
CDL_PML4Cap,
CDL_PDPTCap,
CDL_PUDCap,
CDL_PGDCap,
CDL_ASIDControlCap,
CDL_ASIDPoolCap,
// TODO(sleffler): missing arch-specific caps
CDL_SCCap,
CDL_SchedControlCap,
CDL_RTReplyCap,
CDL_DomainCap,
}
#[repr(C, packed)]
#[derive(Copy, Clone)]
pub struct CDL_CapData {
pub __bindgen_anon: CDL_CapData__bindgen_ty_1,
pub _bitfield_align: [u8; 0],
pub _bitfield: __BindgenBitfieldUnit<[u8; 1]>,
}
#[repr(C)]
#[derive(Copy, Clone)]
pub union CDL_CapData__bindgen_ty_1 {
pub __bindgen_anon: CDL_CapData__bindgen_ty_1__bindgen_ty_1,
pub badge: seL4_Word,
pub data: seL4_Word,
}
#[repr(C)]
#[derive(Copy, Clone)]
pub struct CDL_CapData__bindgen_ty_1__bindgen_ty_1 {
pub _bitfield_align: [u32; 0],
pub _bitfield: __BindgenBitfieldUnit<[u8; 4]>,
}
impl CDL_CapData__bindgen_ty_1__bindgen_ty_1 {
#[inline]
pub fn new (guard_bits: seL4_Word, guard_size: seL4_Word) -> Self {
CDL_CapData__bindgen_ty_1__bindgen_ty_1 {
_bitfield_align: [],
_bitfield: Self::new_bitfield(guard_bits, guard_size),
}
}
#[inline]
pub fn guard_bits(&self) -> seL4_Word {
self._bitfield.get(0, 18) as seL4_Word
}
#[inline]
pub fn set_guard_bits(&mut self, val: seL4_Word) {
self._bitfield.set(0, 18, val as u64)
}
#[inline]
pub fn guard_size(&self) -> seL4_Word {
self._bitfield.get(18, 14) as seL4_Word
}
#[inline]
pub fn set_guard_size(&mut self, val: seL4_Word) {
self._bitfield.set(18, 14, val as u64)
}
fn new_bitfield(
guard_bits: seL4_Word,
guard_size: seL4_Word,
) -> __BindgenBitfieldUnit<[u8; 4]> {
let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 4]> = Default::default();
__bindgen_bitfield_unit.set(0, 18, guard_bits as u64);
__bindgen_bitfield_unit.set(18, 14, guard_size as u64);
__bindgen_bitfield_unit
}
}
impl CDL_CapData {
pub fn get_cap_data(&self) -> seL4_Word {
match self.tag() {
CDL_CapData_Badge => self.badge(),
CDL_CapData_Guard => {
seL4_CNode_CapData::new(self.guard_bits(), self.guard_size()).words[0]
}
CDL_CapData_Raw => self.data(),
}
}
#[inline]
pub fn tag(&self) -> CDL_CapDataType {
unsafe {
::core::mem::transmute::<u32, CDL_CapDataType>(self._bitfield.get(0, 2) as u32)
}
}
#[inline]
pub fn set_tag(&mut self, val: u32) {
self._bitfield.set(0, 2, val as u64)
}
#[inline]
pub fn guard_bits(&self) -> seL4_Word {
unsafe { self.__bindgen_anon.__bindgen_anon.guard_bits() }
}
#[inline]
pub fn set_guard_bits(&mut self, val: seL4_Word) {
unsafe { self.__bindgen_anon.__bindgen_anon.set_guard_bits(val) }
}
#[inline]
pub fn guard_size(&self) -> seL4_Word {
unsafe { self.__bindgen_anon.__bindgen_anon.guard_size() }
}
#[inline]
pub fn set_guard_size(&mut self, val: seL4_Word) {
unsafe { self.__bindgen_anon.__bindgen_anon.set_guard_size(val) }
}
#[inline]
pub fn badge(&self) -> seL4_Word {
unsafe { self.__bindgen_anon.badge }
}
#[inline]
pub fn set_badge(&mut self, val: seL4_Word) {
self.__bindgen_anon.badge = val
}
#[inline]
pub fn data(&self) -> seL4_Word {
unsafe { self.__bindgen_anon.data }
}
#[inline]
pub fn set_data(&mut self, val: seL4_Word) {
self.__bindgen_anon.data = val
}
}
#[repr(C, packed)]
#[derive(Copy, Clone)]
pub struct CDL_Cap {
pub obj_id: CDL_ObjID,
pub data: CDL_CapData,
pub irq: CDL_IRQ,
pub mapping_container_id: CDL_ObjID,
pub mapping_slot: seL4_Word,
pub mapped_frame_cap: seL4_CPtr,
pub type_: CDL_CapType, // NB: ok to use enum 'cuz declared repr(C, u8)
pub _bitfield_align: [u8; 0],
pub _bitfield: __BindgenBitfieldUnit<[u8; 1]>,
}
impl CDL_Cap {
// data in an seL4-friendly format
pub fn cap_data(&self) -> seL4_Word {
self.data.get_cap_data()
}
// Returns the sel4utils representation of a CDL_Cap's rights
pub fn cap_rights(&self) -> seL4_CapRights {
self.rights().into()
}
#[inline]
pub fn r#type(&self) -> CDL_CapType {
self.type_
}
#[inline]
pub fn vm_attribs(&self) -> u32 {
self._bitfield.get(0, 3) as u32
}
#[inline]
pub fn set_vm_attribs(&mut self, val: u32) {
self._bitfield.set(0, 3, val as u64)
}
#[inline]
pub fn is_orig(&self) -> bool {
self._bitfield.get(3, 1) != 0
}
#[inline]
pub fn set_is_orig(&mut self, val: bool) {
self._bitfield
.set(3, 1, if val { 1u64 } else { 0u64 })
}
#[inline]
pub fn rights(&self) -> CDL_CapRights {
unsafe {
::core::mem::transmute::<u32, CDL_CapRights>(self._bitfield.get(4, 4) as u32)
}
}
#[inline]
pub fn set_rights(&mut self, val: CDL_CapRights) {
unsafe {
let val: u32 = ::core::mem::transmute(val);
self._bitfield.set(4, 4, val as u64)
}
}
#[inline]
fn new_bitfield(
vm_attribs: u32,
is_orig: u32,
rights: u32,
) -> __BindgenBitfieldUnit<[u8; 1]> {
let mut __bindgen_bitfield_unit: __BindgenBitfieldUnit<[u8; 1]> = Default::default();
__bindgen_bitfield_unit.set(0, 3, vm_attribs as u64);
__bindgen_bitfield_unit.set(3, 1, is_orig as u64);
__bindgen_bitfield_unit.set(4, 4, rights as u64);
__bindgen_bitfield_unit
}
}
#[repr(C, packed)]
#[derive(Copy, Clone)]
pub struct CDL_CapSlot {
pub slot: seL4_Word,
pub cap: CDL_Cap,
}
#[repr(C)]
#[derive(Debug)]
pub struct CDL_CapMap {
pub num: seL4_Word,
pub slot: *const CDL_CapSlot,
}
impl<'a> CDL_CapMap {
pub fn as_slice(&'a self) -> &'a [CDL_CapSlot] {
unsafe { core::slice::from_raw_parts(self.slot, self.num) }
}
pub fn get_slot(&self, index: usize) -> CDL_CapSlot {
self.as_slice()[index]
}
pub fn get_cap_at(&self, slot: seL4_Word) -> Option<&CDL_Cap> {
self.as_slice().iter().find(|s| s.slot == slot).map(|s| &s.cap)
}
}
// TODO(sleffler): values depend on config & arch, this works for riscv
// might be better to ditch the enum or move to arch
#[repr(C)]
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
pub enum CDL_ObjectType {
CDL_Untyped = 0,
CDL_TCB,
CDL_Endpoint,
CDL_Notification,
CDL_CNode,
#[cfg(feature = "CONFIG_KERNEL_MCS")]
CDL_SchedContext,
#[cfg(feature = "CONFIG_KERNEL_MCS")]
CDL_RTReply,
// XXX these work for CONFIG_ARCH_RISCV
CDL_Frame,
CDL_Mega_Frame,
CDL_PT,
// NB: remainder are numbered relative to seL4_ObjectTypeCount
CDL_ASIDPool = seL4_ObjectTypeCount + 1,
CDL_Interrupt,
CDL_IOPorts, // CONFIG_ARCH_X86
CDL_IODevice, // CONFIG_ARCH_X86
// NB: when MCS is not enabled these are still defined (sigh)
#[cfg(not(feature = "CONFIG_KERNEL_MCS"))]
CDL_SchedContext,
#[cfg(not(feature = "CONFIG_KERNEL_MCS"))]
CDL_RTReply,
CDL_IOAPICInterrupt, // CONFIG_ARCH_X86
CDL_MSIInterrupt, // CONFIG_ARCH_X86
CDL_ARMIODevice, // CONFIG_ARCH_ARM
CDL_PT_ROOT_ALIAS, // NB: not used, placeholder
CDL_ARMInterrupt, // CONFIG_ARCH_ARM
CDL_SID, // CONFIG_ARCH_ARM
CDL_CB, // CONFIG_ARCH_ARM
}
impl From<CDL_ObjectType> for seL4_ObjectType {
fn from(type_: CDL_ObjectType) -> seL4_ObjectType {
unsafe { ::core::mem::transmute(type_) }
}
}
#[repr(C)]
#[derive(Debug, Copy, Clone)]
pub struct CDL_TCBExtraInfo {
pub priority: u8,
pub max_priority: u8,
pub affinity: u8,
pub domain: u8,
pub pc: seL4_Word,
pub sp: seL4_Word,
pub elf_name: *const cstr_core::c_char,
pub init: *const seL4_Word,
pub init_sz: seL4_Word,
pub fault_ep: seL4_CPtr,
pub ipcbuffer_addr: seL4_Word,
pub resume: bool,
}
#[repr(C)]
#[derive(Debug, Copy, Clone)]
pub struct CDL_SCExtraInfo {
pub period: u64,
pub budget: u64,
pub data: seL4_Word,
}
#[repr(C)]
#[derive(Debug, Copy, Clone)]
pub struct CDL_CBExtraInfo {
pub bank: u8,
}
#[repr(C)]
#[derive(Debug, Copy, Clone)]
pub struct CDL_IOAPICIRQExtraInfo {
pub ioapic: u32,
pub ioapic_pin: u32,
pub level: u32,
pub polarity: u32,
}
#[repr(C)]
#[derive(Debug, Copy, Clone)]
pub struct CDL_MSIIRQExtraInfo {
pub handle: u32,
pub pci_bus: u32,
pub pci_dev: u32,
pub pci_fun: u32,
}
#[repr(C)]
#[derive(Debug, Copy, Clone)]
pub struct CDL_ARMIRQExtraInfo {
pub trigger: u32,
pub target: u32,
}
#[repr(C)]
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
pub enum CDL_FrameFillType_t {
CDL_FrameFill_None = 0,
CDL_FrameFill_BootInfo,
CDL_FrameFill_FileData,
}
#[repr(C)]
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
pub enum CDL_FrameFill_BootInfoEnum_t {
// TODO(sleffler): C code defines Padding as FDT
CDL_FrameFill_BootInfo_Padding = SEL4_BOOTINFO_HEADER_PADDING as isize,
CDL_FrameFill_BootInfo_X86_VBE = SEL4_BOOTINFO_HEADER_X86_VBE as isize,
CDL_FrameFill_BootInfo_X86_MBMMAP = SEL4_BOOTINFO_HEADER_X86_MBMMAP as isize,
CDL_FrameFill_BootInfo_X86_ACPI_RSDP = SEL4_BOOTINFO_HEADER_X86_ACPI_RSDP as isize,
CDL_FrameFill_BootInfo_X86_Framebuffer = SEL4_BOOTINFO_HEADER_X86_FRAMEBUFFER as isize,
CDL_FrameFill_BootInfo_X86_TSC_Freq = SEL4_BOOTINFO_HEADER_X86_TSC_FREQ as isize,
CDL_FrameFill_BootInfo_FDT = SEL4_BOOTINFO_HEADER_FDT as isize,
}
impl From<CDL_FrameFill_BootInfoEnum_t> for usize {
fn from(bi_type: CDL_FrameFill_BootInfoEnum_t) -> usize {
bi_type as usize
}
}
#[repr(C)]
#[derive(Debug, Copy, Clone)]
pub struct CDL_FrameFill_BootInfoType_t {
pub type_: CDL_FrameFill_BootInfoEnum_t,
pub src_offset: usize,
}
#[repr(C)]
#[derive(Debug, Copy, Clone)]
pub struct CDL_FrameFill_FileDataType_t {
pub filename: *const cstr_core::c_char,
pub file_offset: usize,
}
impl<'a> CDL_FrameFill_FileDataType_t {
pub fn filename(&'a self) -> &'a str {
unsafe { cstr_core::CStr::from_ptr(self.filename) }.to_str().unwrap()
}
}
#[repr(C)]
#[derive(Copy, Clone)]
pub struct CDL_FrameFill_Element_t {
pub type_: CDL_FrameFillType_t,
pub dest_offset: usize,
pub dest_len: usize,
__bindgen_anon: CDL_FrameFill_Element_t__bindgen_ty_1,
}
impl<'a> CDL_FrameFill_Element_t {
#[inline]
pub fn get_bootinfo(&'a self) -> &'a CDL_FrameFill_BootInfoType_t {
debug_assert!(self.type_ == CDL_FrameFillType_t::CDL_FrameFill_BootInfo);
unsafe { &self.__bindgen_anon.bi_type }
}
#[inline]
pub fn get_file_data(&'a self) -> &'a CDL_FrameFill_FileDataType_t {
debug_assert!(self.type_ == CDL_FrameFillType_t::CDL_FrameFill_FileData);
unsafe { &self.__bindgen_anon.file_data_type }
}
}
#[repr(C)]
#[derive(Copy, Clone)]
pub union CDL_FrameFill_Element_t__bindgen_ty_1 {
pub bi_type: CDL_FrameFill_BootInfoType_t,
pub file_data_type: CDL_FrameFill_FileDataType_t,
}
#[repr(C)]
#[derive(Copy, Clone)]
pub struct CDL_FrameExtraInfo {
pub fill: [CDL_FrameFill_Element_t; 1],
pub paddr: seL4_Word,
}
#[repr(C, packed)]
pub struct CDL_Object {
#[cfg(feature = "CONFIG_DEBUG_BUILD")]
pub name: *const cstr_core::c_char,
pub slots: CDL_CapMap,
pub extra: CDL_ObjectExtra,
pub type_: CDL_ObjectType,
pub size_bits: u32,
}
impl<'a> CDL_Object {
#[cfg(feature = "CONFIG_DEBUG_BUILD")]
pub fn name(&self) -> &str {
if self.name.is_null() {
"<null>"
} else {
unsafe { cstr_core::CStr::from_ptr(self.name) }
.to_str()
.unwrap()
}
}
#[cfg(not(feature = "CONFIG_DEBUG_BUILD"))]
#[inline]
pub fn name(&self) -> &str { "<n/a>" }
pub fn slots_slice(&'a self) -> &'a [CDL_CapSlot] {
#[allow(unaligned_references)]
self.slots.as_slice()
}
#[inline]
pub fn num_slots(&self) -> seL4_Word {
self.slots.num
}
#[inline]
pub fn slot(&self, index: seL4_Word) -> CDL_CapSlot {
#[allow(unaligned_references)]
self.slots.get_slot(index)
}
#[inline]
pub fn get_cap_at(&'a self, slot: seL4_Word) -> Option<&CDL_Cap> {
#[allow(unaligned_references)]
self.slots.get_cap_at(slot)
}
#[inline]
pub fn r#type(&self) -> CDL_ObjectType {
self.type_
}
#[inline]
pub fn size_bits(&self) -> seL4_Word {
self.size_bits as seL4_Word
}
pub fn paddr(&self) -> Option<seL4_Word> {
match self.type_ {
CDL_Frame => Some(unsafe { self.extra.frame_extra.paddr }),
CDL_Untyped => Some(unsafe { self.extra.paddr }),
_ => None,
}
}
pub fn frame_fill(&'a self, index: usize) -> Option<&'a CDL_FrameFill_Element_t> {
match self.type_ {
CDL_Frame => Some(unsafe { &self.extra.frame_extra.fill[index] }),
_ => None,
}
}
pub fn is_device(&self) -> bool {
// NB: must have a non-zero physical address
self.paddr().map_or(false, |v| v != 0)
}
// TODO(sleffler): maybe assert type_ before referencing union members
// NB: return everything as seL4_Word to minimize conversions
#[inline]
pub fn tcb_ipcbuffer_addr(&self) -> seL4_Word {
unsafe { self.extra.tcb_extra.ipcbuffer_addr }
}
#[inline]
pub fn tcb_priority(&self) -> seL4_Word {
(unsafe { self.extra.tcb_extra.priority }) as seL4_Word
}
#[inline]
pub fn tcb_max_priority(&self) -> seL4_Word {
(unsafe { self.extra.tcb_extra.max_priority }) as seL4_Word
}
#[inline]
pub fn tcb_affinity(&self) -> seL4_Word {
(unsafe { self.extra.tcb_extra.affinity }) as seL4_Word
}
#[inline]
pub fn tcb_domain(&self) -> seL4_Word {
(unsafe { self.extra.tcb_extra.domain }) as seL4_Word
}
#[inline]
pub fn tcb_init(&self) -> *const seL4_Word {
unsafe { self.extra.tcb_extra.init }
}
#[inline]
pub fn tcb_init_sz(&self) -> seL4_Word {
(unsafe { self.extra.tcb_extra.init_sz }) as seL4_Word
}
#[inline]
pub fn tcb_pc(&self) -> seL4_Word {
unsafe { self.extra.tcb_extra.pc }
}
#[inline]
pub fn tcb_sp(&self) -> seL4_Word {
unsafe { self.extra.tcb_extra.sp }
}
pub fn tcb_elf_name(&'a self) -> Option<&'a str> {
unsafe {
if self.extra.tcb_extra.elf_name.is_null() {
None
} else {
cstr_core::CStr::from_ptr(self.extra.tcb_extra.elf_name)
.to_str().ok()
}
}
}
#[inline]
pub fn tcb_resume(&self) -> bool {
unsafe { self.extra.tcb_extra.resume }
}
#[inline]
pub fn tcb_fault_ep(&self) -> seL4_CPtr {
unsafe { self.extra.tcb_extra.fault_ep }
}
#[inline]
pub fn cb_bank(&self) -> seL4_Word {
(unsafe { self.extra.cb_extra.bank }) as seL4_Word
}
#[inline]
pub fn sc_period(&self) -> u64 {
unsafe { self.extra.sc_extra.period }
}
#[inline]
pub fn sc_budget(&self) -> u64 {
unsafe { self.extra.sc_extra.budget }
}
#[inline]
pub fn sc_data(&self) -> seL4_Word {
(unsafe { self.extra.sc_extra.data }) as seL4_Word
}
#[inline]
pub fn other_start(&self) -> seL4_Word {
(unsafe { self.extra.other.start }) as seL4_Word
}
#[inline]
pub fn other_end(&self) -> seL4_Word {
(unsafe { self.extra.other.end }) as seL4_Word
}
#[inline]
pub fn msi_pci_bus(&self) -> seL4_Word {
(unsafe { self.extra.msiirq_extra.pci_bus }) as seL4_Word
}
#[inline]
pub fn msi_pci_dev(&self) -> seL4_Word {
(unsafe { self.extra.msiirq_extra.pci_dev }) as seL4_Word
}
#[inline]
pub fn msi_pci_fun(&self) -> seL4_Word {
(unsafe { self.extra.msiirq_extra.pci_fun }) as seL4_Word
}
#[inline]
pub fn msi_handle(&self) -> seL4_Word {
(unsafe { self.extra.msiirq_extra.handle }) as seL4_Word
}
#[inline]
pub fn ioapic_ioapic(&self) -> seL4_Word {
(unsafe { self.extra.ioapicirq_extra.ioapic }) as seL4_Word
}
#[inline]
pub fn ioapic_pin(&self) -> seL4_Word {
(unsafe { self.extra.ioapicirq_extra.ioapic_pin }) as seL4_Word
}
#[inline]
pub fn ioapic_level(&self) -> seL4_Word {
(unsafe { self.extra.ioapicirq_extra.level }) as seL4_Word
}
#[inline]
pub fn ioapic_polarity(&self) -> seL4_Word {
(unsafe { self.extra.ioapicirq_extra.polarity }) as seL4_Word
}
#[inline]
pub fn armirq_trigger(&self) -> seL4_Word {
(unsafe { self.extra.armirq_extra.trigger }) as seL4_Word
}
#[inline]
pub fn armirq_target(&self) -> seL4_Word {
(unsafe { self.extra.armirq_extra.target }) as seL4_Word
}
}
#[repr(C)]
#[derive(Copy, Clone)]
pub union CDL_ObjectExtra {
pub tcb_extra: CDL_TCBExtraInfo,
pub sc_extra: CDL_SCExtraInfo,
pub cb_extra: CDL_CBExtraInfo,
pub ioapicirq_extra: CDL_IOAPICIRQExtraInfo,
pub msiirq_extra: CDL_MSIIRQExtraInfo,
pub armirq_extra: CDL_ARMIRQExtraInfo,
pub frame_extra: CDL_FrameExtraInfo,
// Physical address; only defined for untyped objects.
pub paddr: seL4_Word,
// ASID pool assignment.
pub _asid_high: seL4_Word,
pub other: CDL_ObjectExtraOther,
}
#[repr(C)]
#[derive(Debug, Copy, Clone)]
pub struct CDL_ObjectExtraOther {
pub start: seL4_Word,
pub end: seL4_Word,
}
#[repr(C)]
#[derive(Debug, Copy, Clone)]
pub struct CDL_UntypedDerivation {
pub untyped: CDL_ObjID,
pub num: seL4_Word,
pub children: *const CDL_ObjID,
}
impl CDL_UntypedDerivation {
pub fn get_child(&self, index: usize) -> Option<CDL_ObjID> {
if index >= self.num {
return None;
}
Some(unsafe { ::core::slice::from_raw_parts(self.children, self.num) }[index])
}
}
#[repr(C)]
#[derive(Debug, Copy, Clone)]
pub struct CDL_Model {
// Object list.
pub num: seL4_Word,
pub objects: *const CDL_Object,
// IRQ routing/assignments.
pub num_irqs: seL4_Word,
pub irqs: *const CDL_ObjID,
// Untyped memory descriptors.
pub num_untyped: seL4_Word,
pub untyped: *const CDL_UntypedDerivation,
// ASID slot number -> ASID pool object mapping.
// NB: asid_slots[0] is unused because it is the rootserver's
// slot which is assigned by the kernel at boot.
pub num_asid_slots: seL4_Word,
pub asid_slots: *const CDL_ObjID,
}
impl<'a> CDL_Model {
pub fn obj_slice(&'a self) -> &'a [CDL_Object] {
unsafe { core::slice::from_raw_parts(self.objects, self.num) }
}
pub fn irq_slice(&'a self) -> &'a [CDL_ObjID] {
unsafe { core::slice::from_raw_parts(self.irqs, self.num_irqs) }
}
pub fn untyped_slice(&'a self) -> &'a [CDL_UntypedDerivation] {
unsafe { core::slice::from_raw_parts(self.untyped, self.num_untyped) }
}
pub fn asid_slot_slice(&'a self) -> &'a [CDL_ObjID] {
unsafe { core::slice::from_raw_parts(self.asid_slots, self.num_asid_slots) }
}
// Calculate the space occupied by the capDL specification.
pub fn calc_space(&self) -> usize {
let mut total_space =
size_of::<CDL_Model>() +
self.num * size_of::<CDL_Object>() +
self.num_irqs * size_of::<CDL_ObjID>() +
self.num_untyped * size_of::<CDL_UntypedDerivation>() +
self.num_asid_slots * size_of::<CDL_ObjID>();
for obj in self.obj_slice() {
#[cfg(feature = "CONFIG_DEBUG_BUILD")]
if !obj.name.is_null() {
total_space += obj.name().len() + 1;
}
total_space += obj.slots.num * size_of::<CDL_CapSlot>();
match obj.r#type() {
CDL_TCB => {
total_space += obj.tcb_init_sz() * size_of::<seL4_Word>();
if let Some(str) = obj.tcb_elf_name() {
total_space += str.len() + 1;
}
}
CDL_Frame => {
// TOOD(sleffler): iter over array instead of assuming 1
let frame_fill = obj.frame_fill(0).unwrap();
if frame_fill.type_ == CDL_FrameFillType_t::CDL_FrameFill_FileData {
total_space += frame_fill.get_file_data().filename().len() + 1;
}
}
_ => {}
}
}
for &ut in self.untyped_slice() {
total_space += ut.num * size_of::<CDL_ObjID>();
}
total_space
}
}

View File

@@ -0,0 +1,39 @@
[package]
name = "kata-os-model"
version = "0.1.0"
edition = "2018"
build = "build.rs"
[build-dependencies]
sel4-config = { path = "../sel4-config" }
[build-env]
SEL4_OUT_DIR = "${ROOTDIR}out/kata/kernel"
[features]
default = [
"CONFIG_CAPDL_SHARED_FRAMES", # NB: required for CAmkES
]
CONFIG_ARM_HYPERVISOR_SUPPORT = []
CONFIG_ARM_SMMU = []
CONFIG_CAPDL_LOADER_CC_REGISTERS = []
CONFIG_CAPDL_LOADER_STATIC_ALLOC = []
CONFIG_CAPDL_LOADER_WRITEABLE_PAGES = []
CONFIG_CAPDL_SHARED_FRAMES = []
CONFIG_DEBUG_BUILD = []
CONFIG_KERNEL_MCS = []
CONFIG_PRINTING = []
CONFIG_SMP_SUPPORT = []
CONFIG_VTX = []
[lib]
path = "mod.rs"
[dependencies]
capdl = { path = "../capdl" }
cpio = { git = "https://github.com/rcore-os/cpio" }
cstr_core = "0.2.3"
log = "0.4"
sel4-sys = { path = "../sel4-sys" }
smallvec = "1.2"
static_assertions = "1.1"

View File

@@ -0,0 +1,310 @@
// ARM aarch64 target support.
#![allow(non_camel_case_types)]
use static_assertions::assert_cfg;
assert_cfg!(target_arch = "aarch64");
mod arm;
pub use arm::*;
use crate::KataOsModel;
use capdl::kobject_t::*;
use capdl::CDL_CapType::*;
use capdl::CDL_ObjectType::*;
use capdl::*;
use log::{error, trace};
use sel4_sys::seL4_CapInitThreadCNode;
use sel4_sys::seL4_CapIRQControl;
use sel4_sys::seL4_CPtr;
use sel4_sys::seL4_Error;
use sel4_sys::seL4_IRQControl_Get;
use sel4_sys::seL4_ObjectType::*;
use sel4_sys::seL4_PageBits;
use sel4_sys::seL4_PageDirIndexBits;
use sel4_sys::seL4_PageTableIndexBits;
use sel4_sys::seL4_PUDIndexBits;
use sel4_sys::seL4_Result;
use sel4_sys::seL4_UserContext;
use sel4_sys::seL4_Word;
pub use arm::PAGE_SIZE;
pub const STACK_ALIGNMENT_BYTES: usize = 16;
pub const REG_ARGS: seL4_Word = 4; // Number of regs for passing thread args
pub const CDL_PT_LEVEL_1_IndexBits: usize = seL4_PUDIndexBits;
pub const CDL_PT_LEVEL_2_IndexBits: usize = seL4_PageDirIndexBits;
pub const CDL_PT_LEVEL_3_IndexBits: usize = seL4_PageTableIndexBits;
// Architecture-independent aliases to enable arch-independent rootserver code
pub use sel4_sys::seL4_ARM_ASIDControl_MakePool as seL4_ASIDControl_MakePool;
pub use sel4_sys::seL4_ARM_ASIDPool_Assign as seL4_ASIDPool_Assign;
pub use sel4_sys::seL4_ARM_PageTable_Map as seL4_PageTable_Map;
pub use sel4_sys::seL4_ARM_Page_GetAddress as seL4_Page_GetAddress;
pub use sel4_sys::seL4_ARM_Page_Map as seL4_Page_Map;
pub use sel4_sys::seL4_ARM_Page_Unmap as seL4_Page_Unmap;
pub use sel4_sys::seL4_ARM_VMAttributes as seL4_VMAttributes;
pub use sel4_sys::seL4_ARM_VMAttributes::Default_VMAttributes as seL4_Default_VMAttributes;
fn MASK(pow2_bits: usize) -> usize { (1 << pow2_bits) - 1 }
pub fn PD_SLOT(vaddr: usize) -> usize {
(vaddr >> (seL4_PageTableIndexBits + seL4_PageBits)) & MASK(seL4_PageDirIndexBits)
}
// NB: used for tcb_args::maybe_spill_tcb_args
pub fn PT_SLOT(vaddr: usize) -> usize { (vaddr >> seL4_PageBits) & MASK(seL4_PageTableIndexBits) }
// Identifies IRQ objects that potentially need special processing.
pub fn is_irq(type_: CDL_ObjectType) -> bool { type_ == CDL_ARMInterrupt || type_ == CDL_Interrupt }
// Identifies objects that need to be instantiated.
pub fn requires_creation(type_: CDL_ObjectType) -> bool { !is_irq(type_) }
pub fn create_irq_cap(irq: CDL_IRQ, obj: &CDL_Object, free_slot: seL4_CPtr) -> seL4_Result {
assert_eq!(obj.r#type(), CDL_ARMInterrupt);
// XXX seL4_IRQControl_GetTriggerCore for NUM_NODES > 1
unsafe {
seL4_IRQControl_GetTrigger(
seL4_CapIRQControl,
irq,
obj.armirq_trigger(),
/*root=*/ seL4_CapInitThreadCNode as usize,
/*index=*/ free_slot,
/*depth=*/ seL4_WordBits as u8,
)
}
}
pub fn get_user_context(cdl_tcb: &CDL_Object, sp: seL4_Word) -> *const seL4_UserContext {
#[rustfmt::skip]
static mut regs: seL4_UserContext = seL4_UserContext {
pc: 0, sp: 0, spsr: 0,
x0: 0, x1: 0, x2: 0, x3: 0, x4: 0, x5: 0, x6: 0, x7: 0,
x8: 0, x9: 0, x10: 0, x11: 0, x12: 0, x13: 0, x14: 0, x15: 0,
x16: 0, x17: 0, x18: 0, x19: 0, x20: 0, x21: 0, x22: 0, x23: 0,
x24: 0, x25: 0, x26: 0, x27: 0, x28: 0, x29: 0, x30: 0,
tpidr_el0: 0, tpidrro_el0: 0,
};
assert_eq!(cdl_tcb.r#type(), CDL_TCB);
unsafe {
regs.pc = cdl_tcb.tcb_pc();
regs.sp = sp; // NB: may be adjusted from cdl_tcb.tcb_sp()
let argv = core::slice::from_raw_parts(cdl_tcb.tcb_init(), cdl_tcb.tcb_init_sz());
regs.x0 = if argv.len() > 0 { argv[0] } else { 0 };
regs.x1 = if argv.len() > 1 { argv[1] } else { 0 };
regs.x2 = if argv.len() > 2 { argv[2] } else { 0 };
regs.x3 = if argv.len() > 3 { argv[3] } else { 0 };
// trace!("Start {} with pc {:#x} sp {:#x} argv {:?}", cdl_tcb.name(),
// regs.pc, regs.sp, argv);
&regs as *const seL4_UserContext
}
}
pub fn kobject_get_size(t: kobject_t, object_size: seL4_Word) -> seL4_Word {
if t == KOBJECT_FRAME && object_size == seL4_HugePageBits {
return object_size;
}
if t == KOBJECT_PAGE_UPPER_DIRECTORY {
return seL4_PUDBits;
}
#[cfg(feature = "CONFIG_KERNEL_MCS")]
if t == KOBJECT_SCHED_CONTEXT {
return core::cmp::max(object_size, sel4_sys::seL4_MinSchedContextBits);
}
error!("Unexpected object: type {:?} size {}", t, object_size);
0
}
pub fn kobject_get_type(t: kobject_t, object_size: seL4_Word) -> seL4_ObjectType {
match t {
KOBJECT_PAGE_GLOBAL_DIRECTORY => {
return seL4_ARM_PageGlobalDirectoryObject;
}
KOBJECT_PAGE_UPPER_DIRECTORY => {
return seL4_ARM_PageUpperDirectoryObject;
}
KOBJECT_FRAME => {
if object_size == seL4_HugePageBits {
return seL4_ARM_HugePageObject;
}
error!("Unexpected frame size {}", object_size);
}
_ => {}
}
error!("Unexpected object: type {:?} size {}", t, object_size);
seL4_InvalidObjectType
}
impl<'a> KataOsModel<'a> {
pub fn create_arch_object(
&mut self,
_obj: &CDL_Object,
_id: CDL_ObjID,
_free_slot: usize,
) -> Option<seL4_Error> {
// CDL_SID objects with CONFIG_ARM_SMU?
None
}
pub fn init_vspace(&mut self, obj_id: CDL_ObjID) -> seL4_Result {
if cfg!(all(
feature = "CONFIG_ARM_HYPERVISOR_SUPPORT",
feature = "CONFIG_ARM_PA_SIZE_BITS_40"
)) {
self.init_level_1(obj_id, 0, obj_id)
} else {
self.init_level_0(obj_id, 0, obj_id)
}
}
fn init_level_3(
&mut self,
level_3_obj: CDL_ObjID,
level_0_obj: CDL_ObjID,
level_3_base: usize,
) -> seL4_Result {
for slot in self.get_object(level_3_obj).slots_slice() {
let frame_cap = &slot.cap;
self.map_page_frame(
frame_cap,
level_0_obj,
frame_cap.cap_rights().into(),
level_3_base + (slot.slot << seL4_PageBits),
)?;
}
Ok(())
}
fn init_level_2(
&mut self,
level_0_obj: CDL_ObjID,
level_2_base: usize,
level_2_obj: CDL_ObjID,
) -> seL4_Result {
for slot in self.get_object(level_2_obj).slots_slice() {
let base = level_2_base + (slot.slot << (CDL_PT_LEVEL_3_IndexBits + seL4_PageBits));
let level_3_cap = &slot.cap;
if level_3_cap.r#type() == CDL_FrameCap {
self.map_page_frame(
level_3_cap,
level_0_obj,
level_3_cap.cap_rights().into(),
base,
)?;
} else {
let level_3_obj = level_3_cap.obj_id;
self.map_page_table(level_3_cap, level_0_obj, base)?;
self.init_level_3(level_3_obj, level_0_obj, base)?;
}
}
Ok(())
}
fn init_level_1(
&mut self,
level_0_obj: CDL_ObjID,
level_1_base: usize,
level_1_obj: CDL_ObjID,
) -> seL4_Result {
for slot in self.get_object(level_1_obj).slots_slice() {
let base = level_1_base
+ (slot.slot
<< (CDL_PT_LEVEL_2_IndexBits + CDL_PT_LEVEL_3_IndexBits + seL4_PageBits));
let level_2_cap = &slot.cap;
if level_2_cap.r#type() == CDL_FrameCap {
self.map_page_frame(
level_2_cap,
level_0_obj,
level_2_cap.cap_rights().into(),
base,
)?;
} else {
let level_3_obj = level_3_cap.obj_id;
self.map_page_dir(level_2_cap, level_0_obj, base)?;
self.init_level_2(level_0_obj, base, level_2_obj.obj_id)?;
}
}
}
fn map_page_dir(&self, page_cap: &CDL_Cap, pd_id: CDL_ObjID, vaddr: seL4_Word) -> seL4_Result {
assert_eq!(page_cap.r#type(), CDL_PDCap);
let sel4_page = self.get_orig_cap(page_cap.obj_id);
let sel4_pd = self.get_orig_cap(pd_id);
// trace!(" Map PD {} into {} @{:#x}, vm_attribs={:#x}",
// self.get_object(page_cap.obj_id).name(),
// self.get_object(pd_id).name(),
// vaddr, page_cap.vm_attribs());
let vm_attribs: seL4_VMAttributes = page_cap.vm_attribs().into();
unsafe { seL4_ARM_PageDirectory_Map(sel4_page, sel4_pd, vaddr, vm_attribs) }
}
fn init_level_0(
&mut self,
level_0_obj: CDL_ObjID,
level_0_base: usize,
level_0_obj: CDL_ObjID,
) -> Result<(), seL4_Error> {
for slot in self.get_object(level_0_obj).slots_slice() {
let base = level_0_base
+ (slot.slot
<< (CDL_PT_LEVEL_1_IndexBits
+ CDL_PT_LEVEL_2_IndexBits
+ CDL_PT_LEVEL_3_IndexBits
+ seL4_PageBits));
let level_1_cap = &slot.cap;
self.map_page_upper_dir(level_1_cap, level_0_obj, base)?;
self.init_level_1(level_0_obj, base, level_1_cap.obj_id)?;
}
}
fn map_page_upper_dir(
&self,
page_cap: &CDL_Cap,
pud_id: CDL_ObjID,
vaddr: seL4_Word,
) -> seL4_Result {
assert_eq!(page_cap.r#type(), CDL_PUDCap);
let sel4_page = self.get_orig_cap(page_cap.obj_id);
let sel4_pud = self.get_orig_cap(pud_id);
// trace!(" Map PUD {} into {} @{:#x}, vm_attribs={:#x}",
// self.get_object(page_cap.obj_id).name(),
// self.get_object(pud_id).name(),
// vaddr, page_cap.vm_attribs());
let vm_attribs: seL4_VMAttributes = page_cap.vm_attribs().into();
unsafe { seL4_ARM_PageUpperDirectory_Map(sel4_page, sel4_pud, vaddr, vm_attribs) }
}
pub fn get_cdl_frame_pt(&self, pd: CDL_ObjID, vaddr: usize) -> Option<&'a CDL_Cap> {
let pd_cap = self.get_cdl_frame_pd(pd, vaddr)?;
self.get_spec_object(pd_cap.obj_id)
.get_cap_at(PD_SLOT(vaddr))
}
fn get_cdl_frame_pd(&self, root: CDL_ObjID, vaddr: usize) -> Option<&'a CDL_Cap> {
if cfg!(all(
feature = "CONFIG_ARM_HYPERVISOR_SUPPORT",
feature = "CONFIG_ARM_PA_SIZE_BITS_40"
)) {
self.get_spec_object(root).get_cap_at(PUD_SLOT(vaddr))
} else {
let pud_cap = self.get_cdl_frame_pud(root, vaddr)?;
self.get_spec_object(pud_cap.obj_id)
.get_cap_at(PUD_SLOT(vaddr))
}
}
fn get_cdl_frame_pud(&self, root: CDL_ObjID, vaddr: usize) -> Option<&'a CDL_Cap> {
self.get_spec_object(root).get_cap_at(PGD_SLOT(vaddr))
}
}

View File

@@ -0,0 +1,296 @@
// ARM 32-bit target support.
#![allow(non_camel_case_types)]
use static_assertions::assert_cfg;
assert_cfg!(all(target_arch = "arm", target_pointer_width = "32"));
use crate::KataOsModel;
use capdl::kobject_t::*;
use capdl::CDL_CapType::*;
use capdl::CDL_ObjectType::*;
use capdl::*;
use log::{error, trace};
use sel4_sys::seL4_ARM_Page_CleanInvalidate_Data;
use sel4_sys::seL4_ARM_Page_Map;
use sel4_sys::seL4_ARM_Page_Unify_Instruction;
use sel4_sys::seL4_CapInitThreadCNode;
use sel4_sys::seL4_CapIRQControl;
use sel4_sys::seL4_CPtr;
use sel4_sys::seL4_Error;
use sel4_sys::seL4_IRQControl_Get;
use sel4_sys::seL4_ObjectType::*;
use sel4_sys::seL4_PageBits;
use sel4_sys::seL4_PageDirIndexBits;
use sel4_sys::seL4_PageTableIndexBits;
use sel4_sys::seL4_PUDIndexBits;
use sel4_sys::seL4_Result;
use sel4_sys::seL4_UserContext;
use sel4_sys::seL4_Word;
pub const PAGE_SIZE: usize = 4096;
pub const STACK_ALIGNMENT_BYTES: usize = 16;
pub const REG_ARGS: seL4_Word = 4; // Number of regs for passing thread args
pub const CDL_PT_LEVEL_3_IndexBits: usize = seL4_PageTableIndexBits;
// Architecture-independent aliases to enable arch-independent rootserver code
pub use sel4_sys::seL4_ARM_ASIDControl_MakePool as seL4_ASIDControl_MakePool;
pub use sel4_sys::seL4_ARM_ASIDPool_Assign as seL4_ASIDPool_Assign;
pub use sel4_sys::seL4_ARM_PageTable_Map as seL4_PageTable_Map;
pub use sel4_sys::seL4_ARM_Page_GetAddress as seL4_Page_GetAddress;
// NB: seL4_Page_Map has a wrapper (see below)
pub use sel4_sys::seL4_ARM_Page_Unmap as seL4_Page_Unmap;
pub use sel4_sys::seL4_ARM_VMAttributes as seL4_VMAttributes;
pub use sel4_sys::seL4_ARM_VMAttributes::Default_VMAttributes as seL4_Default_VMAttributes;
pub fn seL4_Page_Map(
sel4_page: seL4_ARM_Page,
sel4_pd: seL4_ARM_PageTable,
vaddr: seL4_Word,
rights: seL4_CapRights,
vm_attribs: seL4_ARM_VMAttributes,
) -> seL4_Result {
if !rights.get_capAllowGrant() {
vm_attribs |= seL4_ARM_ExecuteNever;
}
seL4_ARM_Page_Map(sel4_page, sel4_pd, vaddr, rights, vm_attribs)?;
// XXX lookup frame_size_bits & page
/* When seL4 creates a new frame object it zeroes the associated memory
* through a cached kernel mapping. If we are configuring a cached
* mapping for the target, standard coherence protocols ensure
* everything works as expected. However, if we are configuring an
* uncached mapping for the target, the dirty zero data cached from the
* kernel's mapping is likely flushed to memory at some time in the
* future causing an unpleasant surprise for the target whose own
* uncached writes are mysteriously overwritten. To prevent this, we
* unify the mapping here, flushing the cached data from the kernel's
* mapping.
*/
const CHAR_BIT: usize = 8; // XXX
assert!(frame_size_bits <= size_of::<usize>() * CHAR_BIT - 1, "illegal object size");
let addr = seL4_ARM_Page_GetAddress(sel4_page);
if addr.paddr >= memory_region[0].start && addr.paddr <= memory_region[0].end {
if !(vm_attribs & seL4_ARM_PageCacheable) && spec.objects[page].paddr() == 0 {
seL4_ARM_Page_CleanInvalidate_Data(sel4_page, 0, BIT(frame_size_bits))?;
}
if rights.get_capAllowGrant() {
seL4_ARM_Page_Unify_Instruction(sel4_page, 0, BIT(frame_size_bits))?;
}
}
Ok(())
}
fn MASK(pow2_bits: usize) -> usize { (1 << pow2_bits) - 1 }
pub fn PD_SLOT(vaddr: usize) -> usize {
(vaddr >> (seL4_PageTableIndexBits + seL4_PageBits)) & MASK(seL4_PageDirIndexBits)
}
// NB: used for tcb_args::maybe_spill_tcb_args
pub fn PT_SLOT(vaddr: usize) -> usize { (vaddr >> seL4_PageBits) & MASK(seL4_PageTableIndexBits) }
// Identifies IRQ objects that potentially need special processing.
pub fn is_irq(type_: CDL_ObjectType) -> bool { type_ == CDL_ARMInterrupt || type_ == CDL_Interrupt }
// Identifies objects that need to be instantiated.
pub fn requires_creation(type_: CDL_ObjectType) -> bool { !is_irq(type_) }
pub fn create_irq_cap(irq: CDL_IRQ, obj: &CDL_Object, free_slot: seL4_CPtr) -> seL4_Error {
let root = seL4_CapInitThreadCNode;
let index = free_slot;
let depth = seL4_WordBits as u8;
match obj.r#type() {
// XXX seL4_IRQControl_GetTriggerCore for NUM_NODES > 1
#[cfg(feature = "CONFIG_SMP_SUPPORT")]
CDL_ARMInterrupt => {
seL4_IRQControl_GetTriggerCore(
seL4_CapIRQControl,
irq,
obj.armirq_trigger(),
root,
index,
depth,
obj.armirq_target(),
)
}
#[cfg(not(feature = "CONFIG_SMP_SUPPORT"))]
CDL_ARMInterrupt => {
seL4_IRQControl_GetTrigger(
seL4_CapIRQControl,
irq,
obj.armirq_trigger(),
root,
index,
depth,
)
}
_ => seL4_IRQControl_Get(seL4_CapIRQControl, irq, root, index, depth),
}
}
pub fn get_user_context(cdl_tcb: &CDL_Object, sp: seL4_Word) -> *const seL4_UserContext {
#[rustfmt::skip]
static mut regs: seL4_UserContext = seL4_UserContext {
pc: 0, sp: 0, spsr: 0,
x0: 0, x1: 0, x2: 0, x3: 0, x4: 0, x5: 0, x6: 0, x7: 0,
x8: 0, x9: 0, x10: 0, x11: 0, x12: 0, x13: 0, x14: 0, x15: 0,
x16: 0, x17: 0, x18: 0, x19: 0, x20: 0, x21: 0, x22: 0, x23: 0,
x24: 0, x25: 0, x26: 0, x27: 0, x28: 0, x29: 0, x30: 0,
tpidr_el0: 0, tpidrro_el0: 0,
};
assert_eq!(cdl_tcb.r#type(), CDL_TCB);
unsafe {
regs.pc = cdl_tcb.tcb_pc();
regs.sp = sp; // NB: may be adjusted from cdl_tcb.tcb_sp()
let argv = core::slice::from_raw_parts(cdl_tcb.tcb_init(), cdl_tcb.tcb_init_sz());
regs.x0 = if argv.len() > 0 { argv[0] } else { 0 };
regs.x1 = if argv.len() > 1 { argv[1] } else { 0 };
regs.x2 = if argv.len() > 2 { argv[2] } else { 0 };
regs.x3 = if argv.len() > 3 { argv[3] } else { 0 };
// trace!("Start {} with pc {:#x} sp {:#x} argv {:?}", cdl_tcb.name(),
// regs.pc, regs.sp, argv);
&regs as *const seL4_UserContext
}
}
pub fn kobject_get_size(t: kobject_t, object_size: seL4_Word) -> seL4_Word {
if t == KOBJECT_FRAME && object_size == seL4_HugePageBits {
return object_size;
}
if t == KOBJECT_PAGE_UPPER_DIRECTORY {
return seL4_PUDBits;
}
#[cfg(feature = "CONFIG_KERNEL_MCS")]
if t == KOBJECT_SCHED_CONTEXT {
return core::cmp::max(object_size, sel4_sys::seL4_MinSchedContextBits);
}
error!("Unexpected object: type {:?} size {}", t, object_size);
0
}
pub fn kobject_get_type(t: kobject_t, object_size: seL4_Word) -> seL4_ObjectType {
match t {
KOBJECT_PAGE_GLOBAL_DIRECTORY => {
return seL4_ARM_PageGlobalDirectoryObject;
}
KOBJECT_PAGE_UPPER_DIRECTORY => {
return seL4_ARM_PageUpperDirectoryObject;
}
KOBJECT_FRAME => {
if object_size == seL4_HugePageBits {
return seL4_ARM_HugePageObject;
}
error!("Unexpected frame size {}", object_size);
}
_ => {}
}
error!("Unexpected object: type {:?} size {}", t, object_size);
seL4_InvalidObjectType
}
impl<'a> KataOsModel<'a> {
pub fn create_arch_object(
&mut self,
obj: &CDL_Object,
_id: CDL_ObjID,
free_slot: usize,
) -> Option<seL4_Error> {
match obj.r#type() {
#[cfg(feature = "CONFIG_ARM_SMMU")]
CDL_SID => {
/* There can be multiple sids per context bank, currently only 1
* sid per cb is implemented for the vms. When this gets extended
* we need to decide to add sid number -> cb number map into the
* haskell / python tool or generate the capdl spec so that the
* order remains correct here e.g a list a stream ids followed by
* the cb they are mapped to, the cb condition here (1***) will the
* reset the stream id number back to 0 for the next context bank.
*/
assert!(sid_number <= MAX_STREAM_IDS);
// XXX handle error
seL4_ARM_SIDControl_GetSID(
seL4_CapSMMUSIDControl,
self.sid_number,
seL4_CapInitThreadCNode,
free_slot,
seL4_WordBits as u8,
);
self.sid_number += 1;
Some(seL4_NoError)
}
#[cfg(feature = "CONFIG_ARM_SMMU")]
CDL_CB => {
self.sid_number = 0; //(1***)
Some(seL4_ARM_CBControl_GetCB(
seL4_CapSMMUCBControl,
obj.cb_bank(),
seL4_CapInitThreadCNode,
free_slot,
seL4_WordBits as u8,
))
}
_ => None,
}
}
pub fn init_vspace(&mut self, obj_id: CDL_ObjID) -> seL4_Result {
assert_eq!(self.get_object(obj_id).r#type(), CDL_PD);
// XXX C code does all PD's before PT's, not sure if this works
self.map_page_directory(obj_id)?;
self.map_page_directory_page_tables(obj_id)?;
Ok(())
}
fn map_page_directory(&self, pd_id: CDL_ObjID) {
fn map_page_directory_slot(pd_id: CDL_ObjID, pd_slot: &CDL_CapSlot) {
let page_cap = &pd_slot.cap;
let page_vaddr = pd_slot.slot << (seL4_PageTableIndexBits + seL4_PageBits);
self.map_page(page_cap, pd_id, page_cap.cap_rights(), page_vaddr);
}
for slot in self.spec.get_object(pd_id).slots_slice() {
map_page_directory_slot(pd_id, &slot);
}
}
fn map_page_directory_page_tables(&self, pd: CDL_ObjID) {
fn map_page_table_slots(pd: CDL_ObjID, pd_slot: &CDL_CapSlot) {
fn map_page_table_slot(
pd: CDL_ObjID,
pt: CDL_ObjID,
pt_vaddr: seL4_Word,
pt_slot: &CDL_CapSlot,
) {
let page_cap = &pt_slot.cap;
let page_vaddr = pt_vaddr + (pt_slot.slot << seL4_PageBits);
self.map_page(page_cap, pd, page_cap.cap_rights(), page_vaddr);
}
let page_cap = &pd_slot.cap;
if (page_cap.r#type() == CDL_PTCap) {
let page = page_cap.obj_id;
let page_vaddr = pd_slot.slot << (seL4_PageTableIndexBits + seL4_PageBits);
for slot in self.spec.get_object(page).slots_slice() {
self.map_page_table_slot(pd, page, page_vaddr, &slot);
}
}
}
for slot in self.spec.get_object(pd).slots_slice() {
self.map_page_table_slots(pd, &slot);
}
}
pub fn get_cdl_frame_pt(&mut self, pd: CDL_ObjID, vaddr: usize) -> Option<&'a mut CDL_Cap> {
self.get_spec_object(pd).get_cap_at(PD_SLOT(vaddr))
}
}

View File

@@ -0,0 +1,67 @@
// RISC-V common target support.
#![allow(non_camel_case_types)]
#![allow(dead_code)]
use static_assertions::assert_cfg;
assert_cfg!(any(target_arch = "riscv32", target_arch = "riscv64"));
use capdl::CDL_ObjectType::*;
use capdl::*;
use sel4_sys::seL4_CapRights;
use sel4_sys::seL4_CPtr;
use sel4_sys::seL4_PageBits;
use sel4_sys::seL4_PageTableIndexBits;
use sel4_sys::seL4_Result;
use sel4_sys::seL4_RISCV_Page_Map;
use sel4_sys::seL4_RISCV_VMAttributes;
use sel4_sys::seL4_Word;
pub const PAGE_SIZE: usize = 4096; // Base page size
pub const STACK_ALIGNMENT_BYTES: usize = 16;
pub const REG_ARGS: seL4_Word = 4; // Number of regs for passing thread args
// Architecture-independent aliases to enable arch-independent rootserver code
// TODO(sleffler): maybe move to sel4_sys?
pub use sel4_sys::seL4_PageTableIndexBits as seL4_PageDirIndexBits;
pub use sel4_sys::seL4_RISCV_ASIDControl_MakePool as seL4_ASIDControl_MakePool;
pub use sel4_sys::seL4_RISCV_ASIDPool_Assign as seL4_ASIDPool_Assign;
pub use sel4_sys::seL4_RISCV_PageTable_Map as seL4_PageTable_Map;
pub use sel4_sys::seL4_RISCV_Page_GetAddress as seL4_Page_GetAddress;
pub use sel4_sys::seL4_RISCV_Page_Unmap as seL4_Page_Unmap;
pub use sel4_sys::seL4_RISCV_VMAttributes as seL4_VMAttributes;
pub use sel4_sys::seL4_RISCV_VMAttributes::Default_VMAttributes as seL4_Default_VMAttributes;
pub unsafe fn seL4_Page_Map(
sel4_page: seL4_CPtr,
sel4_pd: seL4_CPtr,
vaddr: seL4_Word,
rights: seL4_CapRights,
vm_attribs: seL4_VMAttributes,
) -> seL4_Result {
if rights.get_capAllowGrant() != 0 {
// NB: executable
seL4_RISCV_Page_Map(sel4_page, sel4_pd, vaddr, rights, vm_attribs)
} else {
seL4_RISCV_Page_Map(sel4_page, sel4_pd, vaddr, rights,
seL4_RISCV_VMAttributes::ExecuteNever)
}
}
fn MASK(pow2_bits: usize) -> usize { (1 << pow2_bits) - 1 }
// NB: used to setup copy_addr_pt
pub fn PD_SLOT(vaddr: usize) -> usize {
(vaddr >> (seL4_PageTableIndexBits + seL4_PageBits)) & MASK(seL4_PageDirIndexBits)
}
// NB: used for tcb_args::maybe_spill_tcb_args
pub fn PT_SLOT(vaddr: usize) -> usize { (vaddr >> seL4_PageBits) & MASK(seL4_PageTableIndexBits) }
// Identifies IRQ objects that potentially need special processing.
pub fn is_irq(type_: CDL_ObjectType) -> bool { type_ == CDL_Interrupt }
// Identifies objects that need to be instantiated. This is overridden
// by architectures that have device objects that are not backed by
// untyped memory (i.e. that need creation).
pub fn requires_creation(type_: CDL_ObjectType) -> bool { !is_irq(type_) }

View File

@@ -0,0 +1,210 @@
// RISC-V 32-bit target support.
#![allow(non_camel_case_types)]
use static_assertions::assert_cfg;
assert_cfg!(target_arch = "riscv32");
mod riscv;
pub use riscv::*;
use crate::KataOsModel;
use capdl::kobject_t::*;
use capdl::CDL_CapType::*;
use capdl::CDL_ObjectType::*;
use capdl::*;
use log::error;
use sel4_sys::seL4_CapInitThreadCNode;
use sel4_sys::seL4_CapIRQControl;
use sel4_sys::seL4_CPtr;
use sel4_sys::seL4_Error;
use sel4_sys::seL4_IRQControl_Get;
use sel4_sys::seL4_LargePageBits;
use sel4_sys::seL4_MinSchedContextBits;
use sel4_sys::seL4_ObjectType;
use sel4_sys::seL4_ObjectType::*;
use sel4_sys::seL4_PageBits;
use sel4_sys::seL4_PageTableBits;
use sel4_sys::seL4_PageTableIndexBits;
use sel4_sys::seL4_Result;
use sel4_sys::seL4_UserContext;
use sel4_sys::seL4_Word;
use sel4_sys::seL4_WordBits;
const CDL_PT_NUM_LEVELS: usize = 2;
// TOOD(sleffler): levels really should be 0 & 1, the names are vestiges of 64-bit support
const CDL_PT_LEVEL_3_IndexBits: usize = seL4_PageTableIndexBits;
fn MASK(pow2_bits: usize) -> usize { (1 << pow2_bits) - 1 }
pub fn create_irq_cap(irq: CDL_IRQ, _obj: &CDL_Object, free_slot: seL4_CPtr) -> seL4_Result {
unsafe {
seL4_IRQControl_Get(
seL4_CapIRQControl,
irq,
/*root=*/ seL4_CapInitThreadCNode as usize,
/*index=*/ free_slot,
/*depth=*/ seL4_WordBits as u8,
)
}
}
pub fn get_user_context(cdl_tcb: &CDL_Object, sp: seL4_Word) -> *const seL4_UserContext {
#[rustfmt::skip]
static mut regs: seL4_UserContext = seL4_UserContext {
pc: 0, ra: 0, sp: 0, gp: 0,
s0: 0, s1: 0, s2: 0, s3: 0, s4: 0, s5: 0,
s6: 0, s7: 0, s8: 0, s9: 0, s10: 0, s11: 0,
a0: 0, a1: 0, a2: 0, a3: 0, a4: 0, a5: 0, a6: 0, a7: 0,
t0: 0, t1: 0, t2: 0, t3: 0, t4: 0, t5: 0, t6: 0, tp: 0,
};
assert_eq!(cdl_tcb.r#type(), CDL_TCB);
unsafe {
regs.pc = cdl_tcb.tcb_pc();
regs.sp = sp; // NB: may be adjusted from cdl_tcb.tcb_sp()
let argv = core::slice::from_raw_parts(cdl_tcb.tcb_init(), cdl_tcb.tcb_init_sz());
regs.a0 = if argv.len() > 0 { argv[0] } else { 0 };
regs.a1 = if argv.len() > 1 { argv[1] } else { 0 };
regs.a2 = if argv.len() > 2 { argv[2] } else { 0 };
regs.a3 = if argv.len() > 3 { argv[3] } else { 0 };
// trace!("Start {} with pc {:#x} sp {:#x} argv {:?}", cdl_tcb.name(),
// regs.pc, regs.sp, argv);
&regs as *const seL4_UserContext
}
}
pub fn kobject_get_size(t: kobject_t, object_size: seL4_Word) -> seL4_Word {
match t {
KOBJECT_FRAME => {
if object_size == seL4_PageBits || object_size == seL4_LargePageBits {
return object_size;
}
}
KOBJECT_PAGE_TABLE => {
return seL4_PageTableBits;
}
KOBJECT_SCHED_CONTEXT => {
return core::cmp::max(object_size, seL4_MinSchedContextBits);
}
_ => {}
}
error!("Unexpected object: type {:?} size {}", t, object_size);
object_size
}
pub fn kobject_get_type(t: kobject_t, object_size: seL4_Word) -> seL4_ObjectType {
match t {
KOBJECT_PAGE_DIRECTORY => {
return seL4_RISCV_PageTableObject;
}
KOBJECT_PAGE_TABLE => {
return seL4_RISCV_PageTableObject;
}
KOBJECT_FRAME => {
if object_size == seL4_PageBits {
return seL4_RISCV_4K_Page;
}
if object_size == seL4_LargePageBits {
return seL4_RISCV_Mega_Page;
}
}
_ => {}
}
error!("Unexpected object: type {:?} size {}", t, object_size);
seL4_LastObjectType // XXX not right
}
impl<'a> KataOsModel<'a> {
pub fn create_arch_object(
&mut self,
_obj: &CDL_Object,
_id: CDL_ObjID,
_free_slot: usize,
) -> Option<seL4_Error> {
// No architecture-specific overrides.
None
}
pub fn init_vspace(&mut self, obj_id: CDL_ObjID) -> seL4_Result {
self.init_level_2(obj_id, 0, obj_id)
}
pub fn get_cdl_frame_pt(&self, pd: CDL_ObjID, vaddr: usize) -> Option<&'a CDL_Cap> {
self.get_cdl_frame_pt_recurse(pd, vaddr, 2)
}
/**
* Do a recursive traversal from the top to bottom of a page table structure to
* get the cap for a particular page table object for a certain vaddr at a certain
* level. The level variable treats level==CDL_PT_NUM_LEVELS as the root page table
* object, and level 0 as the bottom level 4k frames.
*/
fn get_cdl_frame_pt_recurse(
&self,
root: CDL_ObjID,
vaddr: usize,
level: usize,
) -> Option<&'a CDL_Cap> {
fn PT_LEVEL_SLOT(vaddr: usize, level: usize) -> usize {
(vaddr >> ((seL4_PageTableIndexBits * (level - 1)) + seL4_PageBits))
& MASK(seL4_PageTableIndexBits)
}
let obj_id = if level < CDL_PT_NUM_LEVELS {
self.get_cdl_frame_pt_recurse(root, vaddr, level + 1)?
.obj_id
} else {
root
};
self.get_object(obj_id)
.get_cap_at(PT_LEVEL_SLOT(vaddr, level))
}
fn init_level_3(
&mut self,
level_3_obj: CDL_ObjID,
level_0_obj: CDL_ObjID,
level_3_base: usize,
) -> seL4_Result {
for slot in self.get_object(level_3_obj).slots_slice() {
let frame_cap = &slot.cap;
self.map_page_frame(
frame_cap,
level_0_obj,
frame_cap.cap_rights().into(),
level_3_base + (slot.slot << seL4_PageBits),
)?;
}
Ok(())
}
fn init_level_2(
&mut self,
level_0_obj: CDL_ObjID,
level_2_base: usize,
level_2_obj: CDL_ObjID,
) -> seL4_Result {
for slot in self.get_object(level_2_obj).slots_slice() {
let base = level_2_base + (slot.slot << (CDL_PT_LEVEL_3_IndexBits + seL4_PageBits));
let level_3_cap = &slot.cap;
if level_3_cap.r#type() == CDL_FrameCap {
self.map_page_frame(
level_3_cap,
level_0_obj,
level_3_cap.cap_rights().into(),
base,
)?;
} else {
let level_3_obj = level_3_cap.obj_id;
self.map_page_table(level_3_cap, level_0_obj, base)?;
self.init_level_3(level_3_obj, level_0_obj, base)?;
}
}
Ok(())
}
}

View File

@@ -0,0 +1,161 @@
// Intel x86 32-bit target support.
use static_assertions::assert_cfg;
assert_cfg!(target_arch = "x86");
use log::{debug, error, info, trace, warn};
pub const PAGE_SIZE: usize = 4096;
pub const STACK_ALIGNMENT_BYTES: usize = 16;
// XXX really?
cfg_if! {
if #[cfg(feature = "CONFIG_CAPDL_LOADER_CC_REGISTERS")] {
pub const REG_ARGS: seL4_Word = 4;
} else {
pub const REG_ARGS: seL4_Word = 0;
}
}
// Identifies IRQ objects that potentially need special processing.
pub fn is_irq(type_: CDL_ObjectType) -> bool {
type_ == CDL_IOAPICInterrupt || type_ == CDL_MSIInterrupt || type_ == CDL_Interrupt
}
// Identifies objects that need to be instantiated.
pub fn requires_creation(type_: CDL_ObjectType) -> bool { !is_irq(type_) }
pub fn create_irq_cap(irq: CDL_IRQ, obj: &CDL_Object, free_slot: seL4_CPtr) -> seL4_Error {
let root = seL4_CapInitThreadCNode;
let index = free_slot;
let depth = seL4_WordBits as u8;
match obj.r#type() {
CDL_IOAPICInterrupt => {
seL4_IRQControl_GetIOAPIC(
seL4_CapIRQControl,
root,
index,
depth,
obj.ioapicirq_ioapic(),
obj.ioapicirq_pin(),
obj.ioapicirq_level(),
obj.ioapicirq_polarity(),
irq,
)
}
CDL_MSIInterrupt => {
seL4_IRQControl_GetMSI(
seL4_CapIRQControl,
root,
index,
depth,
obj.msiirq_pci_bus(),
obj.msiirq_pci_dev(),
obj.msiirq_pci_fun(),
obj.msiirq_handle(),
irq,
)
}
_ => seL4_IRQControl_Get(seL4_CapIRQControl, irq, root, index, depth),
}
}
pub fn get_user_context(cdl_tcb: &CDL_Object, sp: seL4_Word) -> *const seL4_UserContext {
#[rustfmt::skip]
static mut regs: seL4_UserContext = seL4_UserContext {
eip: 0, esp: 0, eflags: 0,
eax: 0, ebx: 0, ecx: 0, edx: 0,
esi: 0, edi: 0,
ebp: 0, tls_base: 0, fs: 0, gs: 0,
};
assert_eq!(cdl_tcb.r#type(), CDL_TCB);
unsafe {
regs.eip = cdl_tcb.tcb_pc();
regs.esp = sp; // NB: may be adjusted from cdl_tcb.tcb_sp()
// XXX REG_ARGS == 0
let argv = core::slice::from_raw_parts(cdl_tcb.tcb_init(), cdl_tcb.tcb_init_sz());
regs.eax = if argv.len() > 0 { argv[0] } else { 0 };
regs.ebx = if argv.len() > 1 { argv[1] } else { 0 };
regs.ecx = if argv.len() > 2 { argv[2] } else { 0 };
regs.edx = if argv.len() > 3 { argv[3] } else { 0 };
// trace!("Start {} with eip {:#x} esp {:#x} argv {:?}", cdl_tcb.name(),
// regs.eip, regs.esp, argv);
&regs as *const seL4_UserContext
}
}
impl<'a> KataOsModel<'a> {
pub fn create_arch_object(
&mut self,
obj: &CDL_Object,
_id: CDL_ObjID,
free_slot: usize,
) -> Option<seL4_Error> {
if obj.r#type() == CDL_IOPorts {
// XXX handle error
seL4_X86_IOPortControl_Issue(
seL4_CapIOPortControl,
obj.other_start(),
obj.other_end(),
seL4_CapInitThreadCNode,
free_slot,
seL4_WordBits as u8,
);
Some(seL4_NoError)
} else {
None
}
}
pub fn init_vspace(&mut self, obj_id: CDL_ObjID) -> seL4_Result {
assert_eq!(self.get_object(obj_id).r#type(), CDL_PD);
// XXX C code does all PD's before PT's, not sure if this works
self.map_page_directory(obj_id)?;
self.map_page_directory_page_tables(obj_id)?;
Ok(())
}
fn map_page_directory(&self, pd_id: CDL_ObjID) {
fn map_page_directory_slot(pd_id: CDL_ObjID, pd_slot: &CDL_CapSlot) {
let page_cap = &pd_slot.cap;
let page_vaddr = pd_slot.slot << (seL4_PageTableIndexBits + seL4_PageBits);
self.map_page(page_cap, pd_id, page_cap.cap_rights(), page_vaddr);
}
for slot in self.spec.get_object(pd_id).slots_slice() {
map_page_directory_slot(pd_id, &slot);
}
}
fn map_page_directory_page_tables(&self, pd: CDL_ObjID) {
fn map_page_table_slots(pd: CDL_ObjID, pd_slot: &CDL_CapSlot) {
fn map_page_table_slot(
pd: CDL_ObjID,
pt: CDL_ObjID,
pt_vaddr: seL4_Word,
pt_slot: &CDL_CapSlot,
) {
let page_cap = &pt_slot.cap;
let page_vaddr = pt_vaddr + (pt_slot.slot << seL4_PageBits);
self.map_page(page_cap, pd, page_cap.cap_rights(), page_vaddr);
}
let page_cap = &pd_slot.cap;
if page_cap.r#type() == CDL_PTCap {
let page = page_cap.obj_id;
let page_vaddr = pd_slot.slot << (seL4_PageTableIndexBits + seL4_PageBits);
for slot in self.spec.get_object(page).slots_slice() {
self.map_page_table_slot(pd, page, page_vaddr, &slot);
}
}
}
for slot in self.spec.get_object(pd).slots_slice() {
self.map_page_table_slots(pd, &slot);
}
}
}

View File

@@ -0,0 +1,214 @@
// Intel x86 64-bit target support.
use static_assertions::assert_cfg;
essert_cfg!(target_arch = "x86_64");
mod x86;
pub use x86::*;
use log::{debug, error, info, trace, warn};
pub use x86::create_irq_cap;
pub use x86::is_irq;
pub use x86::requires_creation;
pub use x86::PAGE_SIZE;
pub use x86::REG_ARGS;
pub use x86::STACK_ALIGNMENT_BYTES;
pub fn get_user_context(cdl_tcb: &CDL_Object, sp: seL4_Word) -> *const seL4_UserContext {
#[rustfmt::skip]
static mut regs: seL4_UserContext = seL4_UserContext {
rip: 0, rsp: 0, rflags: 0,
rax: 0, rbx: 0, rcx: 0, rdx: 0,
rsi: 0, rdi: 0, rbp: 0,
r8: 0, r9: 0, r10: 0, r11: 0, r12: 0, r13: 0, r14: 0, r15: 0,
tls_base: 0,
};
assert_eq!(cdl_tcb.r#type(), CDL_TCB);
unsafe {
regs.rip = cdl_tcb.tcb_pc();
regs.rsp = sp; // NB: may be adjusted from cdl_tcb.tcb_sp()
let argv = core::slice::from_raw_parts(cdl_tcb.tcb_init(), cdl_tcb.tcb_init_sz());
regs.rdi = if argc > 0 { argv[0] } else { 0 };
regs.rsi = if argc > 1 { argv[1] } else { 0 };
regs.rdx = if argc > 2 { argv[2] } else { 0 };
regs.rcx = if argc > 3 { argv[3] } else { 0 };
// trace!("Start {} with eip {:#x} esp {:#x} argv {:?}", cdl_tcb.name(),
// regs.eip, regs.esp, argv);
&regs as *const seL4_UserContext
}
}
impl<'a> KataOsModel<'a> {
pub fn create_arch_object(
&mut self,
obj: &CDL_Object,
_id: CDL_ObjID,
free_slot: usize,
) -> Option<seL4_Error> {
if obj.r#type() == CDL_IOPorts {
// XXX handle error
seL4_X86_IOPortControl_Issue(
seL4_CapIOPortControl,
obj.other_start(),
obj.other_end(),
seL4_CapInitThreadCNode,
free_slot,
seL4_WordBits as u8,
);
Some(seL4_NoError)
} else {
None
}
}
pub fn init_vspace(&mut self, obj_id: CDL_ObjID) -> seL4_Result {
assert_eq!(self.get_object(obj_id).r#type(), CDL_PD);
self.init_level_0(obj_id, 0, obj_id)?;
Ok(())
}
fn init_level_3(
&mut self,
level_3_obj: CDL_ObjID,
level_0_obj: CDL_ObjID,
level_3_base: usize,
) -> seL4_Result {
for slot in self.get_object(level_3_obj).slots_slice() {
let frame_cap = &slot.cap;
self.map_page_frame(
frame_cap,
level_0_obj,
frame_cap.cap_rights().into(),
level_3_base + (slot.slot << seL4_PageBits),
)?;
}
Ok(())
}
fn init_level_2(
&mut self,
level_0_obj: CDL_ObjID,
level_2_base: usize,
level_2_obj: CDL_ObjID,
) -> seL4_Result {
for slot in self.get_object(level_2_obj).slots_slice() {
let base = level_2_base + (slot.slot << (CDL_PT_LEVEL_3_IndexBits + seL4_PageBits));
let level_3_cap = &slot.cap;
if level_3_cap.r#type() == CDL_FrameCap {
self.map_page_frame(
level_3_cap,
level_0_obj,
level_3_cap.cap_rights().into(),
base,
)?;
} else {
let level_3_obj = level_3_cap.obj_id;
self.map_page_table(level_3_cap, level_0_obj, base)?;
self.init_level_3(level_3_obj, level_0_obj, base)?;
}
}
Ok(())
}
fn init_level_1(
&mut self,
level_0_obj: CDL_ObjID,
level_1_base: usize,
level_1_obj: CDL_ObjID,
) -> seL4_Result {
for slot in self.get_object(level_1_obj).slots_slice() {
let base = level_1_base
+ (slot.slot
<< (CDL_PT_LEVEL_2_IndexBits + CDL_PT_LEVEL_3_IndexBits + seL4_PageBits));
let level_2_cap = &slot.cap;
if level_2_cap.r#type() == CDL_FrameCap {
self.map_page_frame(
level_2_cap,
level_0_obj,
level_2_cap.cap_rights().into(),
base,
)?;
} else {
let level_3_obj = level_3_cap.obj_id;
self.map_page_dir(level_2_cap, level_0_obj, base)?;
self.init_level_2(level_0_obj, base, level_2_obj.obj_id)?;
}
}
}
fn map_page_dir(&self, page_cap: &CDL_Cap, pd_id: CDL_ObjID, vaddr: seL4_Word) -> seL4_Result {
assert_eq!(page_cap.r#type(), CDL_PDCap);
let sel4_page = self.get_orig_cap(page_cap.obj_id);
let sel4_pd = self.get_orig_cap(pd_id);
// trace!(" Map PD {} into {} @{:#x}, vm_attribs={:#x}",
// self.get_object(page_cap.obj_id).name(),
// self.get_object(pd_id).name(),
// vaddr, page_cap.vm_attribs());
let vm_attribs: seL4_VMAttributes = page_cap.vm_attribs().into();
unsafe { seL4_X86_PageDirectory_Map(sel4_page, sel4_pd, vaddr, vm_attribs) }
}
fn init_level_0(
&mut self,
level_0_obj: CDL_ObjID,
level_0_base: usize,
level_0_obj: CDL_ObjID,
) -> Result<(), seL4_Error> {
for slot in self.get_object(level_0_obj).slots_slice() {
let base = level_0_base
+ (slot.slot
<< (CDL_PT_LEVEL_1_IndexBits
+ CDL_PT_LEVEL_2_IndexBits
+ CDL_PT_LEVEL_3_IndexBits
+ seL4_PageBits));
let level_1_cap = &slot.cap;
self.map_page_dir_pt(level_1_cap, level_0_obj, base)?;
self.init_level_1(level_0_obj, base, level_1_cap.obj_id)?;
}
}
fn map_page_dir_pt(
&self,
page_cap: &CDL_Cap,
pud_id: CDL_ObjID,
vaddr: seL4_Word,
) -> seL4_Result {
assert_eq!(page_cap.r#type(), CDL_PDPTCap);
let sel4_page = self.get_orig_cap(page_cap.obj_id);
let sel4_pud = self.get_orig_cap(pud_id);
// trace!(" Map PDPT {} into {} @{:#x}, vm_attribs={:#x}",
// self.get_object(page_cap.obj_id).name(),
// self.get_object(pud_id).name(),
// vaddr, page_cap.vm_attribs());
let vm_attribs: seL4_VMAttributes = page_cap.vm_attribs().into();
unsafe { seL4_X86_PDPT_Map(sel4_page, sel4_pud, vaddr, vm_attribs) }
}
fn get_cdl_frame_pt(&self, pd: CDL_ObjID, vaddr: usize) -> Option<&'a CDL_Cap> {
let pd_cap = self.get_cdl_frame_pd_mut(pd, vaddr)?;
self.get_spec_object(pd_cap.obj_id)
.get_cap_at(PD_SLOT(vaddr))
}
fn get_cdl_frame_pd(&self, root: CDL_ObjID, vaddr: usize) -> Option<&'a CDL_Cap> {
fn get_cdl_frame_pdpt(&self, root: CDL_ObjID, vaddr: usize) -> Option<&'a CDL_Cap> {
self.get_spec_object(root).get_cap_at_mut(PML4_SLOT(vaddr))
}
let pdpt_cap = self.get_cdl_frame_pdpt_mut(root, vaddr)?;
self.get_spec_object(pdpt_cap.obj_id)
.get_cap_at(PDPT_SLOT(vaddr))
}
}

View File

@@ -0,0 +1,21 @@
extern crate sel4_config;
use std::env;
fn main() {
// If SEL4_OUT_DIR is not set we expect the kernel build at a fixed
// location relative to the ROOTDIR env variable.
println!("SEL4_OUT_DIR {:?}", env::var("SEL4_OUT_DIR"));
let sel4_out_dir = env::var("SEL4_OUT_DIR").unwrap_or_else(
|_| format!("{}/out/kata/kernel", env::var("ROOTDIR").unwrap())
);
println!("sel4_out_dir {}", sel4_out_dir);
// Dredge seL4 kernel config for settings we need as features to generate
// correct code: e.g. CONFIG_KERNEL_MCS enables MCS support which changes
// the system call numbering.
let features = sel4_config::get_sel4_features(&sel4_out_dir);
println!("features={:?}", features);
for feature in features {
println!("cargo:rustc-cfg=feature=\"{}\"", feature);
}
}

View File

@@ -0,0 +1,347 @@
// Dynamic Object Allocation.
use crate::KataOsModel;
use capdl::kobject_t::KOBJECT_FRAME;
use capdl::CDL_ObjectType::*;
use capdl::*;
use log::{debug, trace};
use smallvec::SmallVec;
use sel4_sys::seL4_BootInfo;
use sel4_sys::seL4_CapASIDControl;
use sel4_sys::seL4_CapInitThreadCNode;
use sel4_sys::seL4_CapRights;
use sel4_sys::seL4_CNode_Copy;
use sel4_sys::seL4_CNode_Delete;
use sel4_sys::seL4_CNode_Move;
use sel4_sys::seL4_CPtr;
use sel4_sys::seL4_Error;
use sel4_sys::seL4_ObjectType::*;
use sel4_sys::seL4_ObjectType;
use sel4_sys::seL4_PageBits;
use sel4_sys::seL4_Result;
use sel4_sys::seL4_UntypedDesc;
use sel4_sys::seL4_Untyped_Retype;
use sel4_sys::seL4_Word;
use sel4_sys::seL4_WordBits;
use crate::arch::kobject_get_type;
use crate::arch::requires_creation;
use crate::arch::seL4_ASIDControl_MakePool;
use crate::arch::seL4_Page_GetAddress;
use static_assertions::assert_cfg;
assert_cfg!(not(feature = "CONFIG_CAPDL_LOADER_STATIC_ALLOC"));
fn BIT(bit_num: usize) -> usize { 1 << bit_num }
impl<'a> KataOsModel<'a> {
// Verify the untypeds in the model correspond to the contents of bootinfo.
pub fn check_untypeds(&self) -> seL4_Result {
assert_eq!(
self.spec.num_untyped, 0,
"capdl has static obj alloc but rootserver setup for dynamic"
);
Ok(())
}
// Create objects using a simple allocator. This relies on capDL-tool
// grouping device objects by address and sorting non-device objects
// by descending size to minimize fragmentation. See CapDL/PrintC.hs.
pub fn create_objects(&mut self) -> seL4_Result {
// Sort untypeds from largest to smallest.
let num_normal_untypes = self.sort_untypeds(self.bootinfo);
let mut ut_index = 0; // index into untypeds
// Collect the roots in a local SmallVec so we can dedup entries
// before we stash them in self.vpsace_roots. This minimizes the
// possibility of vpsace_roots spilling to the heap.
let mut roots = SmallVec::new();
// First, allocate most objects and record the cslot locations.
// The exception is ASIDPools, where create_object only allocates
// the backing untypeds.
let mut free_slot_index = 0;
for (obj_id, obj) in self.spec.obj_slice().iter()
.enumerate()
.filter(|(_, obj)| requires_creation(obj.r#type()))
{
let free_slot = self.free_slot_start + free_slot_index;
// trace!(
// "Creating object {} in slot {} from untyped {:#x}...",
// obj.name(),
// free_slot,
// self.state.get_untyped_cptr(ut_index)
// );
// NB: create_object may use free_slot + 1 and free_slot + 2
while let Err(e) =
self.create_object(obj, obj_id, self.state.get_untyped_cptr(ut_index), free_slot)
{
debug!("create error {:?}", e);
if e != seL4_Error::seL4_NotEnoughMemory {
panic!("Untyped retype failed, error {:?}", e);
}
// This untyped is exhausted, go to the next entry.
ut_index += 1;
if ut_index >= num_normal_untypes {
panic!("Out of untyped memory.");
}
}
// Capture VSpace roots for later use.
if obj.r#type() == CDL_TCB {
if let Some(root_cap) = obj.get_cap_at(CDL_TCB_VTable_Slot) {
roots.push(root_cap.obj_id);
}
}
// Record the cslot assigned to the object.
self.set_orig_cap(obj_id, free_slot);
free_slot_index += 1;
}
// Now setup the backing untypeds for the ASID pools. This is
// done in the order given by the ASID slot allocation policy.
// This fixes the layout inside the kernel's ASID table, which
// ensures consistency with verification models.
for asid_high in 1..self.spec.num_asid_slots {
let obj_id = self.get_asid(asid_high).unwrap();
let asid_ut = self.get_orig_cap(obj_id);
let asid_slot = self.free_slot_start + free_slot_index;
trace!(
"Create ASID pool {} asid_slot={} asid_ut={:#x}",
self.get_object(obj_id).name(),
asid_slot,
asid_ut
);
unsafe {
seL4_ASIDControl_MakePool(
seL4_CapASIDControl,
asid_ut,
seL4_CapInitThreadCNode,
asid_slot,
seL4_WordBits as u8,
)
}?;
// update to point to our new ASID pool
self.set_orig_cap(obj_id, asid_slot);
free_slot_index += 1;
}
// Update the free slot to go past all the objects we just made.
self.free_slot_start += free_slot_index;
// Stash the VSpace roots.
roots.sort();
roots.dedup();
self.vspace_roots = roots;
Ok(())
}
// Sort untyped objects from largest to smallest. This ensures that
// fragmentation is minimized if the objects are also sorted, largest
// to smallest, during creation.
fn sort_untypeds(&mut self, bootinfo: &seL4_BootInfo) -> usize {
let untyped_start = bootinfo.untyped.start;
let untyped_end = bootinfo.untyped.end;
let untypedList = unsafe { self.bootinfo.untyped_descs() };
// Count how many non-device untypeds there are of each size.
let mut count: [usize; seL4_WordBits] = [0; seL4_WordBits];
for ut in untypedList {
if !ut.is_device() {
count[ut.size_bits()] += 1;
}
}
// Calculate the starting index for each untyped.
let mut total: seL4_Word = 0;
for size in (1..seL4_WordBits).rev() {
let oldCount = count[size];
count[size] = total;
total += oldCount;
}
// Store untypeds in untyped_cptrs array.
let mut num_normal_untypes = 0usize;
for untyped_index in 0..(untyped_end - untyped_start) {
let ut = &untypedList[untyped_index];
if !ut.is_device() {
let index = ut.size_bits();
// trace!("Untyped {:3} (cptr={:#x}) (addr={:#x}) is of size {:2}. Placing in slot {}...",
// untyped_index, untyped_start + untyped_index, ut.paddr, index, count[index]);
self.state
.set_untyped_cptr(count[index], untyped_start + untyped_index);
count[index] += 1;
num_normal_untypes += 1;
} else {
// trace!("Untyped {:3} (cptr={:#x}) (addr={:#x}) is of size {:2}. Skipping as it is device",
// untyped_index, untyped_start + untyped_index, ut.paddr, ut.size_bits());
}
}
num_normal_untypes
}
pub fn find_device_object(
&self,
free_slot: seL4_CPtr,
_untyped_index: usize,
sel4_type: seL4_ObjectType,
obj_size_bits: usize,
paddr: seL4_Word,
obj_id: CDL_ObjID,
) -> seL4_Result {
// See if an overlapping object was already created, can only do this
// for frames. Any overlapping object will be the immediately preceding
// one since objects are created in order of physical address.
if sel4_type != seL4_UntypedObject && obj_id > 0 {
// NB: if obj_id is -1 (invalid) then prev will also be
// out-of-range in get_object and an assert will trip.
let prev = obj_id - 1;
let prev_obj = self.get_object(prev);
if prev_obj.r#type() == CDL_Frame
&& prev_obj.paddr() == Some(paddr)
&& prev_obj.size_bits() == obj_size_bits
{
debug!(
"Copy overlapping object {}'s cap {:#x}",
prev_obj.name(),
self.get_orig_cap(prev)
);
let seL4_AllRights = seL4_CapRights::new(
/*grant_reply=*/ 1, /*grant=*/ 1, /*read=*/ 1, /*write=*/ 1,
);
// Copy the overlapping object's capability.
return unsafe {
seL4_CNode_Copy(
seL4_CapInitThreadCNode,
free_slot,
seL4_WordBits as u8,
seL4_CapInitThreadCNode,
self.get_orig_cap(prev),
seL4_WordBits as u8,
seL4_AllRights,
)
};
}
}
// Assume we are allocating from a device untyped; search the
// untyped list for the entry where our object is located. Within
// each region it gets harder. We have no visibility into what's
// available so we issue repeated Retype requests to create temporary
// Frame objects and fetch the frame's address. But to get the kernel
// to Retype successive Frame's it's necessary to trick it by holding
// a capability slot so the kernel doesn't just return the same slot
// for successive Retype's. Note for all this to work the slots at
// free_slot + 1 and free_slot + 2 must be available (true here since
// create_objects asigns slots sequentially).
let untypedList = unsafe { self.bootinfo.untyped_descs() };
for i in 0..(self.bootinfo.untyped.end - self.bootinfo.untyped.start) {
fn is_obj_inside_untyped(
obj_addr: seL4_Word,
obj_size_bits: usize,
ut: &seL4_UntypedDesc,
) -> bool {
ut.paddr <= obj_addr && obj_addr + obj_size_bits <= ut.paddr + BIT(ut.size_bits())
}
fn get_address(ut_slot: seL4_CPtr) -> Result<seL4_Page_GetAddress, seL4_Error> {
// Create a temporary frame to get the address. We load this at slot + 2
// to avoid the free_slot (where we want to write out object) and + 1 where
// our "hold" cap is located (see above).
let temp_slot = ut_slot + 2;
unsafe {
seL4_Untyped_Retype(
ut_slot,
kobject_get_type(KOBJECT_FRAME, seL4_PageBits).into(),
seL4_PageBits,
seL4_CapInitThreadCNode,
0,
0,
temp_slot,
1,
)?;
let temp_addr = seL4_Page_GetAddress(temp_slot);
seL4_CNode_Delete(seL4_CapInitThreadCNode, temp_slot, seL4_WordBits as u8)?;
Ok(temp_addr)
}
}
if is_obj_inside_untyped(paddr, BIT(obj_size_bits), &untypedList[i]) {
// See above, loop looking for a Frame in the untyped object
// that matches our object's address. If we run out of space
// in the untyped the kernel will return seL4_NotEnoughMemory
// that we pass back to the caller who then advances to the
// next untyped region.
let mut hold_slot: Option<seL4_CPtr> = None;
loop {
unsafe {
seL4_Untyped_Retype(
self.bootinfo.untyped.start + i,
sel4_type.into(),
obj_size_bits,
seL4_CapInitThreadCNode,
0,
0,
free_slot,
1,
)
}?;
let addr: seL4_Page_GetAddress = if sel4_type == seL4_UntypedObject {
get_address(free_slot)?
} else {
unsafe { seL4_Page_GetAddress(free_slot) }
};
if addr.paddr == paddr {
// Found our object, delete any holding cap.
if let Some(hold_slot_cap) = hold_slot {
unsafe {
seL4_CNode_Delete(
seL4_CapInitThreadCNode,
hold_slot_cap,
seL4_WordBits as u8,
)
}?;
}
return Ok(());
}
// Prepare to advance to the next Frame in the untyped
// region. If this is the first time doing this we create
// a holding slot (as described above); otherwise we
// delete the Retype'd Frame so the next trip around the
// loop will advance the location of the next Retype.
if hold_slot.is_none() {
hold_slot = Some(free_slot + 1);
unsafe {
seL4_CNode_Move(
seL4_CapInitThreadCNode,
hold_slot.unwrap(),
seL4_WordBits as u8,
seL4_CapInitThreadCNode,
free_slot,
seL4_WordBits as u8,
)
}?;
} else {
unsafe {
seL4_CNode_Delete(
seL4_CapInitThreadCNode,
free_slot,
seL4_WordBits as u8,
)
}?;
}
}
}
}
Ok(())
}
}

View File

@@ -0,0 +1,149 @@
// MCS Kernel Support.
use crate::KataOsModel;
use capdl::CDL_ObjectType::*;
use capdl::*;
use log::debug;
use sel4_sys::seL4_CNode;
use sel4_sys::seL4_CPtr;
use sel4_sys::seL4_CapInitThreadTCB;
use sel4_sys::seL4_Error;
use sel4_sys::seL4_Result;
use sel4_sys::seL4_SchedContext;
use sel4_sys::seL4_SchedContext_NoFlag;
use sel4_sys::seL4_SchedControl;
use sel4_sys::seL4_SchedControl_ConfigureFlags;
use sel4_sys::seL4_TCB_Configure;
use sel4_sys::seL4_TCB_SetSchedParams;
use sel4_sys::seL4_TCB_SetTimeoutEndpoint;
use sel4_sys::seL4_Time;
use sel4_sys::seL4_Word;
use static_assertions::assert_cfg;
assert_cfg!(feature = "CONFIG_KERNEL_MCS");
impl<'a> KataOsModel<'a> {
pub fn init_sched_ctrl(&mut self) -> seL4_Result {
for index in 0..(self.bootinfo.schedcontrol.end - self.bootinfo.schedcontrol.start) {
self.set_sched_ctrl_cap(index, self.bootinfo.schedcontrol.start + index);
}
Ok(())
}
pub fn init_scs(&self) -> seL4_Result {
let affinity: CDL_Core = 0;
for obj_id in 0..self.spec.num {
let cdl_sc = self.get_object(obj_id);
if cdl_sc.r#type() == CDL_SchedContext {
/* all scs get configured on core 0, any scs that should be bound to a tcb will
be reconfigured for the correct core in init_tcbs */
SchedControl_Configure(
/*sched_ctrl=*/ self.get_sched_ctrl_cap(affinity),
/*sel4_sc=*/ self.get_orig_cap(obj_id),
/*affinity=*/ 0,
cdl_sc.sc_budget(),
cdl_sc.sc_period(),
cdl_sc.sc_data(),
)?;
}
}
Ok(())
}
pub fn init_fault_ep(
&mut self,
cdl_tcb: &CDL_Object,
) -> Result<(seL4_CPtr, seL4_CPtr), seL4_Error> {
// NB: fault ep cptrs are in the caller's cspace.
let sel4_tempfault_ep: seL4_CPtr =
if let Some(cap_tempfault_ep) = cdl_tcb.get_cap_at(CDL_TCB_TemporalFaultEP_Slot) {
self.get_orig_cap(cap_tempfault_ep.obj_id)
} else {
debug!("TCB {} has no temporal fault endpoint", cdl_tcb.name());
0
};
let sel4_fault_ep: seL4_CPtr =
if let Some(cap_fault_ep) = cdl_tcb.get_cap_at(CDL_TCB_FaultEP_Slot) {
let fault_ep_obj = cap_fault_ep.obj_id;
let fault_ep_badge = cap_fault_ep.cap_data();
if fault_ep_badge != 0 {
self.mint_cap(fault_ep_obj, fault_ep_badge)?
} else {
self.get_orig_cap(fault_ep_obj)
}
} else {
debug!("TCB {} has no fault endpoint", cdl_tcb.name());
0
};
Ok((sel4_fault_ep, sel4_tempfault_ep))
}
}
// TODO(sleffler): match syscall types
pub fn SchedControl_Configure(
sched_ctrl: seL4_SchedControl,
sel4_sc: seL4_SchedContext,
_affinity: seL4_Word,
sc_budget: seL4_Time,
sc_period: seL4_Time,
sc_data: seL4_Word,
) -> seL4_Result {
assert!(sel4_sc != 0);
unsafe {
seL4_SchedControl_ConfigureFlags(sched_ctrl, sel4_sc,
sc_budget, sc_period,
/*extra_refills=*/ 0,
/*badge=*/ sc_data,
/*flags=*/ seL4_SchedContext_NoFlag)
}
}
pub fn TCB_Configure(
sel4_tcb: seL4_Word,
_sel4_fault_ep: seL4_CPtr,
sel4_cspace_root: seL4_CNode,
sel4_cspace_root_data: seL4_Word,
sel4_vspace_root: seL4_CPtr,
sel4_vspace_root_data: seL4_Word,
ipcbuffer_addr: seL4_Word,
sel4_ipcbuffer: seL4_CPtr,
) -> seL4_Result {
unsafe {
seL4_TCB_Configure(
sel4_tcb,
sel4_cspace_root,
sel4_cspace_root_data,
sel4_vspace_root,
sel4_vspace_root_data,
ipcbuffer_addr,
sel4_ipcbuffer,
)
}
}
pub fn TCB_SchedParams(
sel4_tcb: seL4_Word,
max_priority: seL4_Word,
priority: seL4_Word,
sel4_sc: seL4_Word,
sel4_fault_ep: seL4_CPtr,
) -> seL4_Result {
unsafe {
seL4_TCB_SetSchedParams(
sel4_tcb,
seL4_CapInitThreadTCB,
max_priority,
priority,
sel4_sc,
sel4_fault_ep,
)
}
}
pub fn TCB_SetTimeoutEndpoint(sel4_tcb: seL4_Word, sel4_tempfault_ep: seL4_CPtr) -> seL4_Result {
unsafe { seL4_TCB_SetTimeoutEndpoint(sel4_tcb, sel4_tempfault_ep) }
}

View File

@@ -0,0 +1,89 @@
// No MCS Kernel Support.
use crate::KataOsModel;
use capdl::*;
use sel4_sys::seL4_CapInitThreadTCB;
use sel4_sys::seL4_CNode;
use sel4_sys::seL4_CPtr;
use sel4_sys::seL4_Error;
use sel4_sys::seL4_Result;
use sel4_sys::seL4_SchedContext;
use sel4_sys::seL4_SchedControl;
use sel4_sys::seL4_TCB;
use sel4_sys::seL4_TCB_Configure;
use sel4_sys::seL4_TCB_SetSchedParams;
use sel4_sys::seL4_Time;
use sel4_sys::seL4_Word;
use static_assertions::assert_cfg;
assert_cfg!(not(feature = "CONFIG_KERNEL_MCS"));
impl<'a> KataOsModel<'a> {
pub fn init_sched_ctrl(&mut self) -> seL4_Result { Ok(()) }
pub fn init_scs(&self) -> seL4_Result { Ok(()) }
pub fn init_fault_ep(
&mut self,
cdl_tcb: &CDL_Object,
) -> Result<(seL4_CPtr, seL4_CPtr), seL4_Error> {
// NB: fault ep cptrs are in the configured thread's cspace.
Ok((cdl_tcb.tcb_fault_ep(), 0 as seL4_CPtr))
}
}
// TODO(sleffler): match syscall types
pub fn SchedControl_Configure(
_sched_ctrl: seL4_SchedControl,
_sel4_sc: seL4_SchedContext,
_affinity: seL4_Word,
_sc_budget: seL4_Time,
_sc_period: seL4_Time,
_sc_data: seL4_Word,
) -> seL4_Result {
Ok(())
}
pub fn TCB_Configure(
sel4_tcb: seL4_Word,
sel4_fault_ep: seL4_CPtr,
sel4_cspace_root: seL4_CNode,
sel4_cspace_root_data: seL4_Word,
sel4_vspace_root: seL4_CPtr,
sel4_vspace_root_data: seL4_Word,
ipcbuffer_addr: seL4_Word,
sel4_ipcbuffer: seL4_CPtr,
) -> seL4_Result {
unsafe {
seL4_TCB_Configure(
sel4_tcb,
sel4_fault_ep,
sel4_cspace_root,
sel4_cspace_root_data,
sel4_vspace_root,
sel4_vspace_root_data,
ipcbuffer_addr,
sel4_ipcbuffer,
)
}
}
pub fn TCB_SchedParams(
sel4_tcb: seL4_Word,
mcp: seL4_Word,
priority: seL4_Word,
_sel4_sc: seL4_Word,
_sel4_fault_ep: seL4_CPtr,
) -> seL4_Result {
unsafe {
seL4_TCB_SetSchedParams(
sel4_tcb,
/*authority=*/ seL4_CapInitThreadTCB as seL4_TCB,
mcp,
priority,
)
}
}
pub fn TCB_SetTimeoutEndpoint(_sel4_tcb: seL4_Word, _sel4_tempfault_ep: seL4_CPtr) -> seL4_Result {
Ok(())
}

View File

@@ -0,0 +1,10 @@
// No SMP support.
// TODO(sleffler): maybe merge into arch code
use sel4_sys::seL4_Result;
use sel4_sys::seL4_Word;
use static_assertions::assert_cfg;
assert_cfg!(not(feature = "CONFIG_SMP_SUPPORT"));
pub fn TCB_SetAffinity(_sel4_tcb: seL4_Word, _affinity: seL4_Word) -> seL4_Result { Ok(()) }

View File

@@ -0,0 +1,29 @@
// Register Calling Convention.
// Max 4 arguments are passed to threads using registers.
use crate::arch::REG_ARGS;
use crate::KataOsModel;
use capdl::*;
use sel4_sys::seL4_Error;
use sel4_sys::seL4_Word;
use static_assertions::assert_cfg;
assert_cfg!(not(feature = "CONFIG_CAPDL_LOADER_CC_REGISTERS"));
impl<'a> KataOsModel<'a> {
pub fn maybe_spill_tcb_args(
&self,
cdl_tcb: &CDL_Object,
sp: seL4_Word,
) -> Result<seL4_Word, seL4_Error> {
let argc = cdl_tcb.tcb_init_sz();
assert!(
argc <= REG_ARGS,
"TCB {} has {} arguments, which is not supported using {} the register calling convention",
cdl_tcb.name(),
argc,
);
Ok(sp)
}
}

View File

@@ -0,0 +1,16 @@
// No Hypervisor Support.
use crate::KataOsModel;
use capdl::*;
use sel4_sys::seL4_CPtr;
use sel4_sys::seL4_Result;
use static_assertions::assert_cfg;
assert_cfg!(not(any(feature = "CONFIG_ARM_HYPERVISOR_SUPPORT", feature = "CONFIG_VTX")));
impl<'a> KataOsModel<'a> {
pub fn set_tcb_vcpu(&self, _cdl_tcb: &CDL_Object, _sel4_tcb: seL4_CPtr) -> seL4_Result {
Ok(())
}
}

View File

@@ -0,0 +1,13 @@
// SMP support.
// TODO(sleffler): maybe merge into arch code
use sel4_sys::seL4_Result;
use sel4_sys::seL4_TCB_SetAffinity;
use sel4_sys::seL4_Word;
use static_assertions::assert_cfg;
assert_cfg!(feature = "CONFIG_SMP_SUPPORT");
pub fn TCB_SetAffinity(sel4_tcb: seL4_Word, affinity: seL4_Word) -> seL4_Result {
seL4_TCB_SetAffinity(sel4_tcb, affinity)
}

View File

@@ -0,0 +1,137 @@
// Spill-to-stack Calling Convention.
// The first REG_ARGS arguments are passed to threads using registers;
// any more arguments are written to the stack.
use crate::arch::seL4_Default_VMAttributes;
use crate::arch::seL4_Page_Map;
use crate::arch::seL4_Page_Unmap;
use crate::arch::PAGE_SIZE;
use crate::arch::PT_SLOT;
use crate::arch::REG_ARGS;
use crate::arch::STACK_ALIGNMENT_BYTES;
use crate::copy_region;
use crate::KataOsModel;
use capdl::CDL_CapType::*;
use capdl::*;
use core::mem::size_of;
use core::ptr;
use sel4_sys::seL4_CPtr;
use sel4_sys::seL4_CapInitThreadVSpace;
use sel4_sys::seL4_CapRights;
use sel4_sys::seL4_Error;
use sel4_sys::seL4_Word;
use static_assertions::assert_cfg;
assert_cfg!(not(feature = "CONFIG_CAPDL_LOADER_CC_REGISTERS"));
impl<'a> KataOsModel<'a> {
// Check TCB's argv and if needed write arguments to the stack and
// fixup the stack pointer to match.
pub fn maybe_spill_tcb_args(
&self,
cdl_tcb: &CDL_Object,
osp: seL4_Word,
) -> Result<seL4_Word, seL4_Error> {
let argc = cdl_tcb.tcb_init_sz();
let reg_args = REG_ARGS;
if argc <= reg_args {
return Ok(osp); // Arguments fit in registers; nothing to do.
}
// More arguments than will fit in registers; map the TCB's stack
// into our address space to write the spillover.
assert_eq!(
STACK_ALIGNMENT_BYTES % size_of::<seL4_Word>(),
0,
"Stack alignment wrong for argument size"
);
let mut sp = osp;
// Find the TCB's PD.
let pd = cdl_tcb.get_cap_at(CDL_TCB_VTable_Slot).unwrap().obj_id;
// NB: the stack pointer will initially be aligned to
// STACK_ALIGNMENT_BYTES. Any padding required to enforce this
// alignment will come before any stack arguments.
let num_stack_args = argc - reg_args; // positive because argc > reg_args
let args_per_alignment = STACK_ALIGNMENT_BYTES / size_of::<seL4_Word>();
let num_unaligned_args = num_stack_args % args_per_alignment;
if num_unaligned_args != 0 {
let num_padding_args = args_per_alignment - num_unaligned_args;
let num_padding_bytes = num_padding_args * size_of::<seL4_Word>();
sp -= num_padding_bytes;
}
// Find and map the frame representing the TCB's stack. Note that
// we do `sp - sizeof(uintptr_t)` because the stack pointer may
// be on a page boundary.
let frame = self.get_frame_cap(pd, sp - size_of::<seL4_Word>());
/* FIXME: The above could actually fail messily if the user has given a
* spec with stack pointers that point outside the ELF image.
*/
// NB: platforms that have an NX attribute will add it in
// seL4_Page_Map when capAllowGrant is false (e.g. arm).
let attribs = seL4_Default_VMAttributes;
unsafe {
seL4_Page_Map(
frame,
seL4_CapInitThreadVSpace,
ptr::addr_of!(copy_region.data[0]) as usize,
// seL4_ReadWrite
seL4_CapRights::new(
/*grant_reply=*/ 0, /*grant=*/ 0, /*read=*/ 1, /*write=*/ 1,
),
attribs,
)
}?;
// Write spillover arguments to the TCB's stack.
let argv = unsafe { core::slice::from_raw_parts(cdl_tcb.tcb_init(), argc) };
for i in (reg_args..argc).rev() {
sp -= size_of::<seL4_Word>();
// We could support this case with more complicated logic, but
// choose not to.
assert!(
(sp % PAGE_SIZE) != 0,
"TCB {}'s initial arguments cause its stack to cross a page boundary",
cdl_tcb.name()
);
// NB: copy_region.data is [seL4_Word] but sp is in bytes
unsafe {
ptr::write(
&mut copy_region.data[(sp % PAGE_SIZE) / size_of::<seL4_Word>()],
argv[i],
)
};
}
#[cfg(target_arch = "arm")]
unsafe { seL4_ARM_Page_Unify_Instruction(frame, 0, PAGE_SIZE) }?;
unsafe { seL4_Page_Unmap(frame) }?;
Ok(sp)
}
// Locate page Frame associated with |vaddr| in the page directory
// object |pd|. This is used for findings the stack of a TCB when
// doing argv spillover to the stack.
fn get_frame_cap(&self, pd: CDL_ObjID, vaddr: usize) -> seL4_CPtr {
self.get_orig_cap(self.get_cdl_frame_cap(pd, vaddr).unwrap().obj_id)
}
fn get_cdl_frame_cap(&self, pd: CDL_ObjID, vaddr: usize) -> Option<&'a CDL_Cap> {
// arch::get_cdl_frame_pt
let pt_cap = self.get_cdl_frame_pt(pd, vaddr)?;
// Check if the PT cap is actually a large frame cap.
if pt_cap.r#type() == CDL_FrameCap {
Some(pt_cap)
} else {
self.get_object(pt_cap.obj_id).get_cap_at(PT_SLOT(vaddr))
}
}
}

View File

@@ -0,0 +1,181 @@
// Static Object Allocation.
// Expect a statically-allocated capDL spec.
use crate::KataOsModel;
use capdl::CDL_ObjectType::*;
use capdl::*;
use log::debug;
use smallvec::SmallVec;
use crate::arch::seL4_ASIDControl_MakePool;
use sel4_sys::seL4_CapASIDControl;
use sel4_sys::seL4_CapInitThreadCNode;
use sel4_sys::seL4_CPtr;
use sel4_sys::seL4_ObjectType;
use sel4_sys::seL4_Result;
use sel4_sys::seL4_Untyped_Retype;
use sel4_sys::seL4_Word;
use sel4_sys::seL4_WordBits;
use static_assertions::assert_cfg;
assert_cfg!(feature = "CONFIG_CAPDL_LOADER_STATIC_ALLOC");
impl<'a> KataOsModel<'a> {
fn get_untyped(&self, untyped_num: usize) -> Option<&'a CDL_UntypedDerivation> {
if untyped_num >= self.spec.num_untyped {
return None;
}
Some(unsafe {
&core::slice::from_raw_parts(self.spec.untyped, self.spec.num_untyped)[untyped_num]
})
}
// Verify the untypeds in the model correspond to the contents of bootinfo.
pub fn check_untypeds(&mut self) -> seL4_Result {
let untypedList = unsafe { self.bootinfo.untyped_descs() };
let mut bi_start = 0;
for u in 0..self.spec.num_untyped {
let mut found = false;
let num_untyped = self.bootinfo.untyped.end - self.bootinfo.untyped.start;
let ut = self.get_object(self.get_untyped(u).unwrap().untyped);
assert_eq!(ut.r#type(), CDL_Untyped);
// TODO(sleffler): doesn't seem correct, why not stop at first?
// + handling of bi_start seems awkward
for i in bi_start..num_untyped {
let bt_ut = &untypedList[i];
if bt_ut.paddr == ut.paddr().unwrap() {
assert_eq!(
bt_ut.size_bits() as usize,
ut.size_bits(),
"Untyped size mismatch, bootinfo {}, capdl {}",
bt_ut.size_bits(),
ut.size_bits()
);
self.state
.set_untyped_cptr(u, self.bootinfo.untyped.start + i);
found = true;
if i == bi_start {
bi_start += 1;
}
// XXX no break?
}
}
assert!(found, "Failed to find bootinfo to match untyped {:?}", ut.paddr().unwrap());
}
Ok(())
}
/*
* Spec was statically allocated; just run its untyped derivation steps.
*/
pub fn create_objects(&mut self) -> seL4_Result {
debug!("Creating objects...");
let mut free_slot_index = 0;
// Collect the roots in a local SmallVec so we can dedup entries
// before we stash them in self.vpsace_roots. This minimizes the
// possibility of vpsace_roots spilling to the heap.
let mut roots = SmallVec::new();
/* First, allocate most objects and update the spec database with
the cslot locations. The exception is ASIDPools, where
create_object only allocates the backing untypeds. */
for ut_index in 0..self.spec.num_untyped {
let ud = self.get_untyped(ut_index).unwrap();
for child in 0..ud.num {
let obj_id = ud.get_child(child).unwrap();
let free_slot = self.free_slot_start + free_slot_index;
let obj = self.get_object(obj_id);
let capdl_obj_type = obj.r#type();
let untyped_cptr = self.state.get_untyped_cptr(ut_index);
assert!(
!capdl_obj_type.requires_creation(),
"Object {} requires dynamic allocation",
obj.name()
);
debug!(
"Creating object {} in slot {}, from untyped {:x}...",
obj.name(),
free_slot,
untyped_cptr
);
self.create_object(obj, obj_id, untyped_cptr, free_slot)?;
// Capture VSpace roots for later use.
if obj.r#type() == CDL_TCB {
if let Some(root_cap) = obj.get_cap_at(CDL_TCB_VTable_Slot) {
roots.push(root_cap.obj_id);
}
}
self.set_orig_cap(obj_id, free_slot);
free_slot_index += 1;
}
}
// XXX common with dynamic
/* Now, we turn the backing untypeds into ASID pools, in the order
given by the ASID slot allocation policy. This fixes the layout
inside the kernel's ASID table, which ensures consistency with
verification models. */
debug!("Creating ASID pools...");
for asid_high in 1..self.spec.num_asid_slots {
let obj_id = self.get_asid(asid_high).unwrap();
let asid_ut = self.get_orig_cap(obj_id);
let asid_slot = self.free_slot_start + free_slot_index;
unsafe {
seL4_ASIDControl_MakePool(
seL4_CapASIDControl,
asid_ut,
seL4_CapInitThreadCNode,
asid_slot,
seL4_WordBits as u8,
)
}?;
// update to point to our new ASID pool
self.set_orig_cap(obj_id, asid_slot);
free_slot_index += 1;
}
// Update the free slot to go past all the objects we just made.
self.free_slot_start += free_slot_index;
// Stash the VSpace roots.
roots.sort();
roots.dedup();
self.vspace_roots = roots;
Ok(())
}
pub fn find_device_object(
&self,
free_slot: seL4_CPtr,
untyped_slot: seL4_CPtr,
sel4_type: seL4_ObjectType,
obj_size: usize,
_paddr: seL4_Word,
_obj_id: CDL_ObjID,
) -> seL4_Result {
unsafe {
seL4_Untyped_Retype(
untyped_slot,
sel4_type.into(),
obj_size,
seL4_CapInitThreadCNode,
/*node_index=*/ 0,
/*node_depth=*/ 0,
/*node_offset=*/ free_slot,
/*no_objects=*/ 1,
)
}
}
}

View File

@@ -0,0 +1,33 @@
// Hypervisor Support.
use crate::KataOsModel;
use capdl::*;
use sel4_sys::seL4_CPtr;
use sel4_sys::seL4_Result;
use static_assertions::assert_cfg;
assert_cfg!(any(feature = "CONFIG_ARM_HYPERVISOR_SUPPORT", feature = "CONFIG_VTX"));
impl<'a> KataOsModel<'a> {
pub fn set_tcb_vcpu(&self, cdl_tcb: &CDL_Object, sel4_tcb: seL4_CPtr) -> seL4_Result {
let cap_to_cptr = |opt: Option<&CDL_Cap>| -> seL4_CPtr {
match opt {
Some(obj) => self.get_orig_cap(obj.obj_id),
_ => 0,
}
};
let cdl_vcpu_opt = cdl_tcb.get_cap_at(CDL_TCB_VCPU_Slot);
let sel4_vcpu = cap_to_cptr(cdl_vcpu_opt);
if sel4_vcpu != 0 {
// TODO(sleffler): maybe belongs in arch support
#[cfg(feature = "CONFIG_ARM_HYPERVISOR_SUPPORT")]
unsafe { sel4_sys::seL4_ARM_VCPU_SetTCB(sel4_vcpu, sel4_tcb) }?;
#[cfg(feature = "CONFIG_VTX")]
unsafe { sel4_sys::seL4_X86_VCPU_SetTCB(sel4_vcpu, sel4_tcb) }?;
}
Ok(())
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,4 @@
max_width = 100
fn_call_width = 78
fn_single_line = true
force_multiline_blocks = true

View File

@@ -1,3 +1,5 @@
#![no_std]
pub extern crate capdl;
pub extern crate kata_os_model;
pub extern crate sel4_sys;

View File

@@ -14,6 +14,7 @@ pub const seL4_WordSizeBits: usize = 2;
pub const seL4_PageBits: usize = 12;
pub const seL4_SlotBits: usize = 4;
pub const seL4_TCBBits: usize = 9;
pub const seL4_ReplyBits: usize = 4;
pub const seL4_EndpointBits: usize = 4;
pub const seL4_PageTableEntryBits: usize = 2;
pub const seL4_PageTableIndexBits: usize = 10;
@@ -24,6 +25,11 @@ pub const seL4_NumASIDPoolBits: usize = 5;
pub const seL4_ASIDPoolIndexBits: usize = 4;
pub const seL4_ASIDPoolBits: usize = 12;
#[cfg(feature = "CONFIG_KERNEL_MCS")]
pub const seL4_NotificationBits: usize = 5;
#[cfg(not(feature = "CONFIG_KERNEL_MCS"))]
pub const seL4_NotificationBits: usize = 4;
pub type seL4_RISCV_Page = seL4_CPtr;
pub type seL4_RISCV_PageTable = seL4_CPtr;
pub type seL4_RISCV_ASIDControl = seL4_CPtr;

View File

@@ -1,4 +1,6 @@
set(CAMKES_APP "system" CACHE STRING "The one and only CAmkES application in this project")
set(CAPDL_LOADER_APP "capdl-loader-app" CACHE STRING "")
#set(CAPDL_LOADER_APP "kata-os-rootserver" CACHE STRING "")
set(PLATFORM "sparrow" CACHE STRING "The one and only seL4 platform for Sparrow")
set(KernelSel4Arch "riscv32" CACHE STRING "Specifies 32-bit branch of the seL4 spike platform")